icsDataValidation 1.0.415__tar.gz → 1.0.419__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- icsdatavalidation-1.0.419/PKG-INFO +20 -0
- {icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation/services/comparison_service.py +26 -11
- icsdatavalidation-1.0.419/icsDataValidation.egg-info/PKG-INFO +20 -0
- {icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation.egg-info/SOURCES.txt +0 -1
- icsdatavalidation-1.0.415/PKG-INFO +0 -298
- icsdatavalidation-1.0.415/README.md +0 -277
- icsdatavalidation-1.0.415/icsDataValidation.egg-info/PKG-INFO +0 -298
- {icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/MANIFEST.in +0 -0
- {icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation/configuration.py +0 -0
- {icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation/connection_setups/__init__.py +0 -0
- {icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation/connection_setups/azure_connection_setup.py +0 -0
- {icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation/connection_setups/databricks_connection_setup.py +0 -0
- {icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation/connection_setups/exasol_connection_setup.py +0 -0
- {icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation/connection_setups/oracle_connection_setup.py +0 -0
- {icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation/connection_setups/snowflake_connection_setup.py +0 -0
- {icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation/connection_setups/sqlserver_connection_setup.py +0 -0
- {icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation/connection_setups/teradata_connection_setup.py +0 -0
- {icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation/core/__init__.py +0 -0
- {icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation/core/database_objects.py +0 -0
- {icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation/core/object_comparison.py +0 -0
- {icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation/input_parameters/__init__.py +0 -0
- {icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation/input_parameters/testing_tool_params.py +0 -0
- {icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation/main.py +0 -0
- {icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation/output_parameters/__init__.py +0 -0
- {icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation/output_parameters/result_params.py +0 -0
- {icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation/services/__init__.py +0 -0
- {icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation/services/database_services/__init__.py +0 -0
- {icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation/services/database_services/azure_service.py +0 -0
- {icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation/services/database_services/databricks_hive_metastore_service.py +0 -0
- {icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation/services/database_services/databricks_unity_catalog_service.py +0 -0
- {icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation/services/database_services/exasol_service.py +0 -0
- {icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation/services/database_services/oracle_service.py +0 -0
- {icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation/services/database_services/snowflake_service.py +0 -0
- {icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation/services/database_services/sqlserver_service.py +0 -0
- {icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation/services/database_services/teradata_service.py +0 -0
- {icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation/services/initialization_service.py +0 -0
- {icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation/services/result_service.py +0 -0
- {icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation/services/system_service.py +0 -0
- {icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation/services/testset_service.py +0 -0
- {icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation/utils/__init__.py +0 -0
- {icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation/utils/file_util.py +0 -0
- {icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation/utils/logger_util.py +0 -0
- {icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation/utils/pandas_util.py +0 -0
- {icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation/utils/parallelization_util.py +0 -0
- {icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation/utils/sql_util.py +0 -0
- {icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation.egg-info/dependency_links.txt +0 -0
- {icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation.egg-info/not-zip-safe +0 -0
- {icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation.egg-info/requires.txt +0 -0
- {icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation.egg-info/top_level.txt +0 -0
- {icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/pyproject.toml +0 -0
- {icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/setup.cfg +0 -0
- {icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/setup.py +0 -0
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: icsDataValidation
|
|
3
|
+
Version: 1.0.419
|
|
4
|
+
Summary: Add your description here
|
|
5
|
+
Author-email: initions <ICSMC_EXT_PYPIORG@accenture.com>
|
|
6
|
+
License: MIT
|
|
7
|
+
Requires-Python: >=3.11
|
|
8
|
+
Description-Content-Type: text/markdown
|
|
9
|
+
Requires-Dist: azure-storage-blob==12.13.1
|
|
10
|
+
Requires-Dist: boto3==1.26.154
|
|
11
|
+
Requires-Dist: cloe-util-snowflake-connector==1.0.5
|
|
12
|
+
Requires-Dist: databricks-sdk==0.29.0
|
|
13
|
+
Requires-Dist: databricks-sql-connector==3.0.1
|
|
14
|
+
Requires-Dist: numpy==1.26.3
|
|
15
|
+
Requires-Dist: oracledb==2.5.0
|
|
16
|
+
Requires-Dist: pandas==2.2.2
|
|
17
|
+
Requires-Dist: pyexasol==0.24.0
|
|
18
|
+
Requires-Dist: pyodbc
|
|
19
|
+
Requires-Dist: python-dotenv>=1.0.1
|
|
20
|
+
Requires-Dist: teradatasql==17.20.0.10
|
|
@@ -4,7 +4,7 @@ import datetime
|
|
|
4
4
|
import numpy as np
|
|
5
5
|
|
|
6
6
|
from pandas._testing import assert_frame_equal
|
|
7
|
-
from decimal import Decimal
|
|
7
|
+
from decimal import Decimal, InvalidOperation, getcontext
|
|
8
8
|
|
|
9
9
|
from icsDataValidation.utils.logger_util import configure_dev_ops_logger
|
|
10
10
|
from icsDataValidation.utils.pandas_util import get_diff_dataframes, get_diff_dict_from_diff_dataframes
|
|
@@ -166,16 +166,31 @@ class ComparisonService(TestingToolParams):
|
|
|
166
166
|
del trgt_columns_aggregate['TESTATM_ERRORS']
|
|
167
167
|
|
|
168
168
|
if self.result_params.src_row_count != 0 and self.result_params.trgt_row_count != 0:
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
169
|
+
try:
|
|
170
|
+
aggregation_differences_trgt_minus_src_not_boolean = {
|
|
171
|
+
k: round(Decimal(trgt_columns_aggregate[k][1])
|
|
172
|
+
- Decimal(src_columns_aggregate[k][1]), self.numeric_scale)
|
|
173
|
+
for k in src_columns_aggregate.keys()
|
|
174
|
+
if k in trgt_columns_aggregate
|
|
175
|
+
and str(src_columns_aggregate[k][1]) != str(trgt_columns_aggregate[k][1])
|
|
176
|
+
and src_columns_aggregate[k][1] != trgt_columns_aggregate[k][1]
|
|
177
|
+
and src_columns_aggregate[k][0].upper() != 'AGGREGATEBOOLEAN'
|
|
178
|
+
and trgt_columns_aggregate[k][0].upper() != 'AGGREGATEBOOLEAN'
|
|
179
|
+
}
|
|
180
|
+
except InvalidOperation as e:
|
|
181
|
+
getcontext().prec = 100 # sets the precision of Decimal to a higher value - due to the limitations of the decimal module when handling such large numbers with high precision
|
|
182
|
+
aggregation_differences_trgt_minus_src_not_boolean = {
|
|
183
|
+
k: round(Decimal(trgt_columns_aggregate[k][1])
|
|
184
|
+
- Decimal(src_columns_aggregate[k][1]), self.numeric_scale)
|
|
185
|
+
for k in src_columns_aggregate.keys()
|
|
186
|
+
if k in trgt_columns_aggregate
|
|
187
|
+
and str(src_columns_aggregate[k][1]) != str(trgt_columns_aggregate[k][1])
|
|
188
|
+
and src_columns_aggregate[k][1] != trgt_columns_aggregate[k][1]
|
|
189
|
+
and src_columns_aggregate[k][0].upper() != 'AGGREGATEBOOLEAN'
|
|
190
|
+
and trgt_columns_aggregate[k][0].upper() != 'AGGREGATEBOOLEAN'
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
|
|
179
194
|
aggregation_differences_trgt_minus_src_boolean = {
|
|
180
195
|
k: str(
|
|
181
196
|
int(trgt_columns_aggregate[k][1].split('_',1)[0])
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: icsDataValidation
|
|
3
|
+
Version: 1.0.419
|
|
4
|
+
Summary: Add your description here
|
|
5
|
+
Author-email: initions <ICSMC_EXT_PYPIORG@accenture.com>
|
|
6
|
+
License: MIT
|
|
7
|
+
Requires-Python: >=3.11
|
|
8
|
+
Description-Content-Type: text/markdown
|
|
9
|
+
Requires-Dist: azure-storage-blob==12.13.1
|
|
10
|
+
Requires-Dist: boto3==1.26.154
|
|
11
|
+
Requires-Dist: cloe-util-snowflake-connector==1.0.5
|
|
12
|
+
Requires-Dist: databricks-sdk==0.29.0
|
|
13
|
+
Requires-Dist: databricks-sql-connector==3.0.1
|
|
14
|
+
Requires-Dist: numpy==1.26.3
|
|
15
|
+
Requires-Dist: oracledb==2.5.0
|
|
16
|
+
Requires-Dist: pandas==2.2.2
|
|
17
|
+
Requires-Dist: pyexasol==0.24.0
|
|
18
|
+
Requires-Dist: pyodbc
|
|
19
|
+
Requires-Dist: python-dotenv>=1.0.1
|
|
20
|
+
Requires-Dist: teradatasql==17.20.0.10
|
|
@@ -1,298 +0,0 @@
|
|
|
1
|
-
Metadata-Version: 2.4
|
|
2
|
-
Name: icsDataValidation
|
|
3
|
-
Version: 1.0.415
|
|
4
|
-
Summary: Add your description here
|
|
5
|
-
Author-email: initions <ICSMC_EXT_PYPIORG@accenture.com>
|
|
6
|
-
License: MIT
|
|
7
|
-
Requires-Python: >=3.11
|
|
8
|
-
Description-Content-Type: text/markdown
|
|
9
|
-
Requires-Dist: azure-storage-blob==12.13.1
|
|
10
|
-
Requires-Dist: boto3==1.26.154
|
|
11
|
-
Requires-Dist: cloe-util-snowflake-connector==1.0.5
|
|
12
|
-
Requires-Dist: databricks-sdk==0.29.0
|
|
13
|
-
Requires-Dist: databricks-sql-connector==3.0.1
|
|
14
|
-
Requires-Dist: numpy==1.26.3
|
|
15
|
-
Requires-Dist: oracledb==2.5.0
|
|
16
|
-
Requires-Dist: pandas==2.2.2
|
|
17
|
-
Requires-Dist: pyexasol==0.24.0
|
|
18
|
-
Requires-Dist: pyodbc
|
|
19
|
-
Requires-Dist: python-dotenv>=1.0.1
|
|
20
|
-
Requires-Dist: teradatasql==17.20.0.10
|
|
21
|
-
|
|
22
|
-
# icsDV - initions Data Validation Tool
|
|
23
|
-
|
|
24
|
-
## Introduction
|
|
25
|
-
|
|
26
|
-
The icsDataValidation tool identifies data mismatches between two databases.
|
|
27
|
-
The functionalities are specifically geared to support migration projects.
|
|
28
|
-
It helps to find data issues in tables and views in comparison of a source and a target system.
|
|
29
|
-
|
|
30
|
-
### What is "generic" about the tool?
|
|
31
|
-
|
|
32
|
-
The icsDataValidation tool (icsDV) is in particular structered in a way that it is easily expandable.
|
|
33
|
-
The main code is used by all different database options.
|
|
34
|
-
Specifics for each supported database are implemented in a database service per database.
|
|
35
|
-
|
|
36
|
-
The different database services are very similar.
|
|
37
|
-
They hold the same methods with the same input and output parameters.
|
|
38
|
-
Each method is aligned with the syntax and the settings of the database it is created for.
|
|
39
|
-
Each core implementation includes connections setup, object comparison functionality and the result preparation.
|
|
40
|
-
|
|
41
|
-
### Supported Databases
|
|
42
|
-
|
|
43
|
-
The icsDV supports comparisons between the following databases:
|
|
44
|
-
|
|
45
|
-
- Snowflake
|
|
46
|
-
- Teradata
|
|
47
|
-
- Azure SQL Server
|
|
48
|
-
- Exasol
|
|
49
|
-
- Oracle
|
|
50
|
-
- Databricks with and without Unity Catalog
|
|
51
|
-
|
|
52
|
-
Comparison results can be written to either Snowflake or Databricks.
|
|
53
|
-
|
|
54
|
-
### Features
|
|
55
|
-
|
|
56
|
-
The key features of the tool are:
|
|
57
|
-
|
|
58
|
-
- Comparison of tables and views between a source and a target system.
|
|
59
|
-
- Pipeline integration in Azure DevOps or GitLab
|
|
60
|
-
- Multiple verification/comparison steps:
|
|
61
|
-
- Row count comparison
|
|
62
|
-
- Column names comparison
|
|
63
|
-
- Aggregation comparison (depending on data type)
|
|
64
|
-
- "group by" comparison
|
|
65
|
-
- Pandas DataFrame comparison (with a threshold for the size of the object)
|
|
66
|
-
- Pandas DataFrame sample comparison (with a random sample of the object)
|
|
67
|
-
- Detailed representation of the comparison result
|
|
68
|
-
- "high-level" result (for each pipeline/execution)
|
|
69
|
-
- "object-level" result (for each table/view)
|
|
70
|
-
- "column-level" result (for each column)
|
|
71
|
-
- Parallelization for performance enhancement of the comparison of a large number of objects
|
|
72
|
-
- Input testsets (white-listing of objects)
|
|
73
|
-
- Object filter (black-listing of objects)
|
|
74
|
-
- Object mappings between the source and the target system
|
|
75
|
-
- Comparison result saved and displayed in multiple instances
|
|
76
|
-
- saved as JSON files in the repository
|
|
77
|
-
- export to result tables in the target system (Snowflake or Databricks)
|
|
78
|
-
- export to Azure Blob Storage or AWS S3 Bucket
|
|
79
|
-
|
|
80
|
-
### Repository Structure
|
|
81
|
-
|
|
82
|
-
The repository is structured in the following sections:
|
|
83
|
-
|
|
84
|
-
- **icsDataValidation**
|
|
85
|
-
> This is where all code files are stored.
|
|
86
|
-
|
|
87
|
-
- **icsDataValidation/main.py**
|
|
88
|
-
> Entry point for python.
|
|
89
|
-
|
|
90
|
-
- **icsDataValidation/core**
|
|
91
|
-
> Main code files for the parts independent on the source and target system.
|
|
92
|
-
|
|
93
|
-
- **icsDataValidation/services/database_services**
|
|
94
|
-
> Database services for all supported systems can be found here.
|
|
95
|
-
Each file contains a class that is identically structured in comparison to the other database service classes.
|
|
96
|
-
Each database service class contains methods to query metadata, create aggregations, and retrieve data for the comparison step.
|
|
97
|
-
|
|
98
|
-
- **icsDataValidation/connection_setups**
|
|
99
|
-
> The connection setups are database dependent.
|
|
100
|
-
They define how the credentials for the database connections are retrieved.
|
|
101
|
-
|
|
102
|
-
- **examples/comparison_results**
|
|
103
|
-
> The comparison results are saved here.
|
|
104
|
-
One JSON file with all results is saved for each execution/pipeline run.
|
|
105
|
-
Additionally there are live comparison results saved for each compared object as a failsafe.
|
|
106
|
-
|
|
107
|
-
- **examples**
|
|
108
|
-
> This folder contains all files defining a specific validation setup.
|
|
109
|
-
- A file named `migration_config.json` contains configurations about the source system, the target system and the mapping of objects between both. It contains the blacklists and "group by" aggregation settings.
|
|
110
|
-
- A file named `ics_data_validation_config.json` specifies the source system, the target system and the results system. Most importantly, this includes the names of the results tables and the connection configurations (Server, Port, Secrets) of source and target system.
|
|
111
|
-
- A file named `manual_execution_params.py` is only relevant for local execution of the code. It contains settings which would otherwise be defined in the pipeline setup, i.e. limits on the size of objects to compare and the numeric precision.
|
|
112
|
-
- The folder `testsets` contains JSON files specifying whitelists of objects to compare.
|
|
113
|
-
|
|
114
|
-
For all the files here, empty `*.template.*` files are available and may serve as a starting point.
|
|
115
|
-
This repo stores only template files.
|
|
116
|
-
The actual files used for each setup should not be committed here.
|
|
117
|
-
They are stored in [a separate repository.](https://dev.azure.com/initions-consulting/icsDataValidation/_git/icsDataValidation%20-%20workflow%20demo).
|
|
118
|
-
|
|
119
|
-
- **examples/pipeline**
|
|
120
|
-
> Files defining the pipelines that execute the icsDV are stored here. For example, YML files for Azure DevOps pipelines.
|
|
121
|
-
|
|
122
|
-
## icsDV - Execution Manual
|
|
123
|
-
|
|
124
|
-
## icsDV - Input Parameters
|
|
125
|
-
|
|
126
|
-
There are four types of input parameters:
|
|
127
|
-
|
|
128
|
-
1. Pipeline Parameters - which are defined as input parameters of a pipeline (Azure DevOps Pipeline or Gitlab Pipeline).
|
|
129
|
-
2. Manual Execution Parameters - defined in the code (testing_tool.py).
|
|
130
|
-
They correspond to the Pipeline Parameters and are used when executing the code directly without a pipeline instead of the Pipeline Parameters.
|
|
131
|
-
3. Global Parameters - directly defined in the TestingToolParams class. They are used in pipeline runs and for manual executions.
|
|
132
|
-
4. Environmental Parameters - Stored either in Azure DevOps in a variable group, in Gitlab, or, for manual executions, in a `*.env` file in a location that can be specified in the `manual_execution_params.py`.
|
|
133
|
-
|
|
134
|
-
Additionally the parameters can be categorized into 3 groups:
|
|
135
|
-
|
|
136
|
-
1. Setup Parameters - these are parameters which are usually just set once when setting up the icsDV.
|
|
137
|
-
2. Configuration Parameters - are used to configure the general settings but can be adjusted to the conditions of the workload on the fly.
|
|
138
|
-
3. Execution Parameters - are set individually for each execution of the icsDV, e.g. the selection of objects to be tested.
|
|
139
|
-
|
|
140
|
-
### Setup Parameters
|
|
141
|
-
|
|
142
|
-
Stored in `ics_data_validation_config.json`:
|
|
143
|
-
|
|
144
|
-
| Parameter | Description | Input Type |
|
|
145
|
-
|---------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------|
|
|
146
|
-
| source_system_selection | Name of the source system as defined in the database_config.json as a key. | Pipeline Parameter or Manual Execution Parameter |
|
|
147
|
-
| target_system_selection | Name of the target system as defined in the database_config.json as a key. | Pipeline Parameter or Manual Execution Parameter |
|
|
148
|
-
| result_system_selection | Name of the result system as defined in the database_config.json as a key. | Pipeline Parameter or Manual Execution Parameter |
|
|
149
|
-
| azure_devops_pipeline | Azure DevOps Pipeline support. Set to "True" to push the changes of a run to the GIT repository. | Global Parameter - TestingToolParams |
|
|
150
|
-
| gitlab_pipeline | Gitlab Pipeline support. Set to "True" to push the changes of a run to the GIT repository. | Global Parameter - TestingToolParams |
|
|
151
|
-
| result_database_name | Name of the database or catalog the results are written to | Global Parameter - TestingToolParams |
|
|
152
|
-
| result_schema_name | Name of the schema the results are written to | Global Parameter - TestingToolParams |
|
|
153
|
-
| result_table_highlevel_name | Name of the high-level results table | Global Parameter - TestingToolParams |
|
|
154
|
-
| result_table_objectlevel_name | Name of the object-level results table | Global Parameter - TestingToolParams |
|
|
155
|
-
| result_table_columnlevel_name | Name of the column-level results table | Global Parameter - TestingToolParams |
|
|
156
|
-
| result_meta_data_schema_name | Name of the schema the full results are written to | Global Parameter - TestingToolParams |
|
|
157
|
-
| result_table_name | Name of the table the full results are written to | Global Parameter - TestingToolParams |
|
|
158
|
-
| result_live_table_name | Name of the table the live results are written to | Global Parameter - TestingToolParams |
|
|
159
|
-
| results_folder_name | Folder that in which the results are stored in JSON format. Default: `examples/comparison_results/` | Global Parameter - TestingToolParams |
|
|
160
|
-
| remaining_mapping_objects_folder_name | Output folder that holds information about source system objects which are not covered by the mapping and are therefor not included in the comparison. Default: `examples/remaining_mapping_objects/` | Global Parameter - TestingToolParams |
|
|
161
|
-
| testset_folder_name | Folder that holds the test set files in JSON format. Default: `examples/testsets/` | Global Parameter - TestingToolParams |
|
|
162
|
-
| stage_schema | Name of the Snowflake Schema where the stage is created to upload the comparison results to Snowflake. Only needed if the `upload_result_to_result_database` functionality is used with Snowflake as target system. | Global Parameter - TestingToolParams |
|
|
163
|
-
| stage_name_prefix | Prefix of the name of the Snowflake Stage which is used to upload the comparison results to Snowflake. The name is complemented by a run_guid which is a unique uuid for each icsDV execution. Only needed if the `upload_result_to_result_database` functionality is used. | Global Parameter - TestingToolParams |
|
|
164
|
-
| container_name | Name of the Azure Storage Container to upload the comparison results into the blob storage. Note: Only needed if the `upload_result_to_blob` functionality is used. | Global Parameter - TestingToolParams |
|
|
165
|
-
| bucket_name | Name of the AWS S3 Bucket to upload the comparison results into the AWS. Note: Only needed if the `upload_result_to_bucket` functionality is used. | Global Parameter - TestingToolParams |
|
|
166
|
-
|
|
167
|
-
### Configuration Parameters
|
|
168
|
-
|
|
169
|
-
Stored in `manual_execution_params.py`:
|
|
170
|
-
|
|
171
|
-
| Parameter | Description | Input Type |
|
|
172
|
-
|----------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------|
|
|
173
|
-
| ENV_FILEPATH | Absolute path to the `*.env` file containing secrets, passwords and tokens. | Pipeline Parameter or Manual Execution Parameters |
|
|
174
|
-
| UPLOAD_RESULT_TO_BLOB | Set to "True" to upload the comparison results to an Azure Blob Storage. An `azure_storage_connection_string` is needed if set to "True". | Pipeline Parameter or Manual Execution Parameters |
|
|
175
|
-
| UPLOAD_RESULT_TO_BUCKET | Set to "True" to upload the comparison results to an AWS S3 Bucket. An `aws_bucket_access_key` and an `aws_bucket_secret_key` is needed if set to "True". | Pipeline Parameter or Manual Execution Parameter |
|
|
176
|
-
| UPLOAD_RESULT_TO_RESULT_DATABASE | Set to "True" to upload the comparison results to Snowflake or Databricks. A `result_system_selection` is needed if set to "True". | Pipeline Parameter or Manual Execution Parameter |
|
|
177
|
-
| MAX_OBJECT_SIZE | Limits Pandas comparison to objects of a size smaller than `MAX_OBJECT_SIZE` bytes. Data type is String. Default: `str(-1)`, no limit. | Pipeline Parameter or Manual Execution Parameter |
|
|
178
|
-
| MAX_ROW_NUMBER | Limits Pandas comparison to objects with less than `MAX_ROW_NUMBER` rows. Data type is String. Default: `str(-1)`, no limit. | Pipeline Parameter or Manual Execution Parameter |
|
|
179
|
-
| EXECUTE_GROUP_BY_COMPARISON | Set to "True" to execute group-by comparisons. See sec. "Group-By-Aggregation" for details. | Pipeline Parameter or Manual Execution Parameter |
|
|
180
|
-
| USE_GROUP_BY_COLUMNS | Set to "True" to activate group-by columns. See sec. "Group-By-Aggregation" for details. | Pipeline Parameter or Manual Execution Parameter |
|
|
181
|
-
| MIN_GROUP_BY_COUNT_DISTINCT | Minimum expected number of group-by counts. See sec. "Group-By-Aggregation" for details. | Pipeline Parameter or Manual Execution Parameter |
|
|
182
|
-
| MAX_GROUP_BY_COUNT_DISTINCT | Maximum expected number of group-by counts. See sec. "Group-By-Aggregation" for details. | Pipeline Parameter or Manual Execution Parameter |
|
|
183
|
-
| MAX_GROUP_BY_SIZE | Maximum size of the group-by query. See sec. "Group-By-Aggregation" for details. | Pipeline Parameter or Manual Execution Parameter |
|
|
184
|
-
| NUMERIC_SCALE | Number of digits to compare. Data type is String. Default: `str(2)`, i.e. deviations below 0.01 are tolerated. | Pipeline Parameter or Manual Execution Parameter |
|
|
185
|
-
|
|
186
|
-
### Execution Parameters
|
|
187
|
-
|
|
188
|
-
Stored in `manual_execution_params.py`:
|
|
189
|
-
|
|
190
|
-
| Parameter | Description | Input Type |
|
|
191
|
-
|-------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------|
|
|
192
|
-
| DATABASE_NAME | Filters the test set on a specific database/catalog. For no filter set "None" as a Manual Execution Parameter and leave it empty as a Pipeline Parameter. | Pipeline Parameter or Manual Execution Parameter |
|
|
193
|
-
| SCHEMA_NAME | Filters the test set on a specific schema. For no filter set "None" as a Manual Execution Parameter and leave it empty as a Pipeline Parameter. | Pipeline Parameter or Manual Execution Parameter |
|
|
194
|
-
| TESTSET_FILE_NAMES | File names of the test set as defined in the folder testset_folder_name (see Setup Parameters) as JSON files. | Pipeline Parameter or Manual Execution Parameter |
|
|
195
|
-
| OBJECT_TYPE_RESTRICTION | Filters the testset to only tables (`"include_only_tables"`), only views (`"include_only_views"`) or all tables and views (`"include_all"`). | Pipeline Parameter or Manual Execution Parameter |
|
|
196
|
-
| MAX_NUMBER_OF_THREADS | Maximum number of threads used. Values larget than the default, `str(1)`, activate parallelization. | Pipeline Parameter or Manual Execution Parameter |
|
|
197
|
-
|
|
198
|
-
## icsDV - Configuration
|
|
199
|
-
|
|
200
|
-
### Blacklists
|
|
201
|
-
|
|
202
|
-
### Whitelists (Testsets)
|
|
203
|
-
|
|
204
|
-
### Mapping
|
|
205
|
-
|
|
206
|
-
### Group-By-Aggregation
|
|
207
|
-
|
|
208
|
-
The Group-By-Aggregation is a feature to pinpoint the differences in the data.
|
|
209
|
-
It can be activiated by setting the parameter `EXECUTE_GROUP_BY_COMPARISON` to TRUE.
|
|
210
|
-
If activated an additional comparison step is performed.
|
|
211
|
-
Each table is queried with a group-by-statement including aggregations depending on the data type.
|
|
212
|
-
Those aggregations are consequently compared.
|
|
213
|
-
As a result the differences in the data can be narrowed down to certain grouping values.
|
|
214
|
-
|
|
215
|
-
There are three options to define the column over which the group-by is executed.
|
|
216
|
-
|
|
217
|
-
1. "group-by-columns-per-table" defined as multiple lists for specific tables. Activated with the `USE_GROUP_BY_COLUMNS` parameter and `GROUP_BY_COLUMNS_PER_TABLE` defined in the `migration_config.json`.
|
|
218
|
-
2. "group-by-columns" from a predifined list for all tables by a validation. Activated with the `USE_GROUP_BY_COLUMNS` parameter and `GROUP_BY_COLUMNS` defined in the `migration_config.json`.
|
|
219
|
-
3. "group-by-columns" evaluated from all existing columns by a validation
|
|
220
|
-
|
|
221
|
-
The validation consists of a number of tests and can be configured by a number of parameters to either easily find columns to group by over or to only select columns which add a definite value for pinpointing the differences in the data.
|
|
222
|
-
|
|
223
|
-
The validation tests for the "group-by-columns" are:
|
|
224
|
-
|
|
225
|
-
1. Number of distinct values of the column is more than 1.
|
|
226
|
-
2. Number of distinct values of the column is less than the rowcount of the table.
|
|
227
|
-
3. Number of distinct values of the column exceeds the `MIN_GROUP_BY_COUNT_DISTINCT` parameter.
|
|
228
|
-
4. Number of distinct values of the column is below the `MAX_GROUP_BY_COUNT_DISTINCT` parameter.
|
|
229
|
-
5. The size of the expected result of the group-by-query is below the `MAX_GROUP_BY_SIZE` parameter.
|
|
230
|
-
(The size is defined by "Number of distinct values" * "Number of columns")
|
|
231
|
-
|
|
232
|
-
All tests are executed on source and target.
|
|
233
|
-
|
|
234
|
-
> Note: The group by comparison can be activated by setting the `execute_group_by_comparison` parameter to TRUE.
|
|
235
|
-
The `migration_config.json` has to include the follwing keys when the parameter use_group_by_columns is set to TRUE.
|
|
236
|
-
|
|
237
|
-
"GROUP_BY_AGGREGATION":{
|
|
238
|
-
"GROUP_BY_COLUMNS_PER_TABLE": {},
|
|
239
|
-
"GROUP_BY_COLUMNS":[]
|
|
240
|
-
}
|
|
241
|
-
The values of those keys can be empty.
|
|
242
|
-
|
|
243
|
-
## icsDV - Comparison Results
|
|
244
|
-
|
|
245
|
-
### JSON Results
|
|
246
|
-
|
|
247
|
-
- Complete Comparison Result JSONs
|
|
248
|
-
- Live Comparison Result JSONs
|
|
249
|
-
|
|
250
|
-
### Target System Result Tables
|
|
251
|
-
|
|
252
|
-
- High-Level Result
|
|
253
|
-
- Object-Level Result
|
|
254
|
-
- Column-Level Result
|
|
255
|
-
|
|
256
|
-
### Result Export in a File Storage
|
|
257
|
-
|
|
258
|
-
## icsDV - Setup
|
|
259
|
-
|
|
260
|
-
### Code setup
|
|
261
|
-
|
|
262
|
-
- To handle the code, we recommend using VS Code.
|
|
263
|
-
- The code is written in python. The tool is compatible with version 3.11
|
|
264
|
-
- It is recommended to use a project-specific python environment.
|
|
265
|
-
You can create one with `python -m venv .env` in the root folder of this repo.
|
|
266
|
-
After creating it, you should activate it (`source .env/bin/activate`), select the python binary `.env/bin/python` therein as your python interpreter in VSC and make sure that python libraries are read from and installed to this environment, i.e. `export PYTHONPATH=$(pwd)/.env/lib/python3.8/site-packages`.
|
|
267
|
-
- In this environment, install the packages listed in the `requirements.txt` and the `requirements-dev.txt`. i.e. run `pip install -r requirements.txt`.
|
|
268
|
-
|
|
269
|
-
### Setup for manual execution
|
|
270
|
-
|
|
271
|
-
### Setup as Azure DevOps pipeline
|
|
272
|
-
|
|
273
|
-
### Setup as GitLab pipeline
|
|
274
|
-
|
|
275
|
-
## authentication
|
|
276
|
-
|
|
277
|
-
The following auth methods to snowflake are supported:
|
|
278
|
-
|
|
279
|
-
- password, provided via PASSWORD_NAME
|
|
280
|
-
- private key with/without encryption, provided via PRIVATE_KEY_NAME with/without PRIVATE_KEY_PASSPHRASE_NAME
|
|
281
|
-
- path to private key file with/without encryption, provided via PRIVATE_KEY_FILE_PATH with/without PRIVATE_KEY_FILE_PASSWORD
|
|
282
|
-
|
|
283
|
-
## devcontainer
|
|
284
|
-
|
|
285
|
-
run with uv as follows in devcontainer:
|
|
286
|
-
```bash
|
|
287
|
-
uv run -s icsDataValidation/main.py
|
|
288
|
-
```
|
|
289
|
-
|
|
290
|
-
Inside the [devcontainer config](.devcontainer/devcontainer.json) the mounts setting is used to bring a .env from the host system into the devcontainer.
|
|
291
|
-
|
|
292
|
-
```bash
|
|
293
|
-
"mounts": [
|
|
294
|
-
"source=/home/Documents/Generic_Testing_Tool/generic_testing_tool_password.env,target=/workspaces/icsDataValidation/examples/generic_testing_tool_password.env,type=bind"
|
|
295
|
-
]
|
|
296
|
-
```
|
|
297
|
-
|
|
298
|
-
To use this feature either create the .env under the source path on your host or adjust this path to another path on the host system. The target path do no need adjustment!
|
|
@@ -1,277 +0,0 @@
|
|
|
1
|
-
# icsDV - initions Data Validation Tool
|
|
2
|
-
|
|
3
|
-
## Introduction
|
|
4
|
-
|
|
5
|
-
The icsDataValidation tool identifies data mismatches between two databases.
|
|
6
|
-
The functionalities are specifically geared to support migration projects.
|
|
7
|
-
It helps to find data issues in tables and views in comparison of a source and a target system.
|
|
8
|
-
|
|
9
|
-
### What is "generic" about the tool?
|
|
10
|
-
|
|
11
|
-
The icsDataValidation tool (icsDV) is in particular structered in a way that it is easily expandable.
|
|
12
|
-
The main code is used by all different database options.
|
|
13
|
-
Specifics for each supported database are implemented in a database service per database.
|
|
14
|
-
|
|
15
|
-
The different database services are very similar.
|
|
16
|
-
They hold the same methods with the same input and output parameters.
|
|
17
|
-
Each method is aligned with the syntax and the settings of the database it is created for.
|
|
18
|
-
Each core implementation includes connections setup, object comparison functionality and the result preparation.
|
|
19
|
-
|
|
20
|
-
### Supported Databases
|
|
21
|
-
|
|
22
|
-
The icsDV supports comparisons between the following databases:
|
|
23
|
-
|
|
24
|
-
- Snowflake
|
|
25
|
-
- Teradata
|
|
26
|
-
- Azure SQL Server
|
|
27
|
-
- Exasol
|
|
28
|
-
- Oracle
|
|
29
|
-
- Databricks with and without Unity Catalog
|
|
30
|
-
|
|
31
|
-
Comparison results can be written to either Snowflake or Databricks.
|
|
32
|
-
|
|
33
|
-
### Features
|
|
34
|
-
|
|
35
|
-
The key features of the tool are:
|
|
36
|
-
|
|
37
|
-
- Comparison of tables and views between a source and a target system.
|
|
38
|
-
- Pipeline integration in Azure DevOps or GitLab
|
|
39
|
-
- Multiple verification/comparison steps:
|
|
40
|
-
- Row count comparison
|
|
41
|
-
- Column names comparison
|
|
42
|
-
- Aggregation comparison (depending on data type)
|
|
43
|
-
- "group by" comparison
|
|
44
|
-
- Pandas DataFrame comparison (with a threshold for the size of the object)
|
|
45
|
-
- Pandas DataFrame sample comparison (with a random sample of the object)
|
|
46
|
-
- Detailed representation of the comparison result
|
|
47
|
-
- "high-level" result (for each pipeline/execution)
|
|
48
|
-
- "object-level" result (for each table/view)
|
|
49
|
-
- "column-level" result (for each column)
|
|
50
|
-
- Parallelization for performance enhancement of the comparison of a large number of objects
|
|
51
|
-
- Input testsets (white-listing of objects)
|
|
52
|
-
- Object filter (black-listing of objects)
|
|
53
|
-
- Object mappings between the source and the target system
|
|
54
|
-
- Comparison result saved and displayed in multiple instances
|
|
55
|
-
- saved as JSON files in the repository
|
|
56
|
-
- export to result tables in the target system (Snowflake or Databricks)
|
|
57
|
-
- export to Azure Blob Storage or AWS S3 Bucket
|
|
58
|
-
|
|
59
|
-
### Repository Structure
|
|
60
|
-
|
|
61
|
-
The repository is structured in the following sections:
|
|
62
|
-
|
|
63
|
-
- **icsDataValidation**
|
|
64
|
-
> This is where all code files are stored.
|
|
65
|
-
|
|
66
|
-
- **icsDataValidation/main.py**
|
|
67
|
-
> Entry point for python.
|
|
68
|
-
|
|
69
|
-
- **icsDataValidation/core**
|
|
70
|
-
> Main code files for the parts independent on the source and target system.
|
|
71
|
-
|
|
72
|
-
- **icsDataValidation/services/database_services**
|
|
73
|
-
> Database services for all supported systems can be found here.
|
|
74
|
-
Each file contains a class that is identically structured in comparison to the other database service classes.
|
|
75
|
-
Each database service class contains methods to query metadata, create aggregations, and retrieve data for the comparison step.
|
|
76
|
-
|
|
77
|
-
- **icsDataValidation/connection_setups**
|
|
78
|
-
> The connection setups are database dependent.
|
|
79
|
-
They define how the credentials for the database connections are retrieved.
|
|
80
|
-
|
|
81
|
-
- **examples/comparison_results**
|
|
82
|
-
> The comparison results are saved here.
|
|
83
|
-
One JSON file with all results is saved for each execution/pipeline run.
|
|
84
|
-
Additionally there are live comparison results saved for each compared object as a failsafe.
|
|
85
|
-
|
|
86
|
-
- **examples**
|
|
87
|
-
> This folder contains all files defining a specific validation setup.
|
|
88
|
-
- A file named `migration_config.json` contains configurations about the source system, the target system and the mapping of objects between both. It contains the blacklists and "group by" aggregation settings.
|
|
89
|
-
- A file named `ics_data_validation_config.json` specifies the source system, the target system and the results system. Most importantly, this includes the names of the results tables and the connection configurations (Server, Port, Secrets) of source and target system.
|
|
90
|
-
- A file named `manual_execution_params.py` is only relevant for local execution of the code. It contains settings which would otherwise be defined in the pipeline setup, i.e. limits on the size of objects to compare and the numeric precision.
|
|
91
|
-
- The folder `testsets` contains JSON files specifying whitelists of objects to compare.
|
|
92
|
-
|
|
93
|
-
For all the files here, empty `*.template.*` files are available and may serve as a starting point.
|
|
94
|
-
This repo stores only template files.
|
|
95
|
-
The actual files used for each setup should not be committed here.
|
|
96
|
-
They are stored in [a separate repository.](https://dev.azure.com/initions-consulting/icsDataValidation/_git/icsDataValidation%20-%20workflow%20demo).
|
|
97
|
-
|
|
98
|
-
- **examples/pipeline**
|
|
99
|
-
> Files defining the pipelines that execute the icsDV are stored here. For example, YML files for Azure DevOps pipelines.
|
|
100
|
-
|
|
101
|
-
## icsDV - Execution Manual
|
|
102
|
-
|
|
103
|
-
## icsDV - Input Parameters
|
|
104
|
-
|
|
105
|
-
There are four types of input parameters:
|
|
106
|
-
|
|
107
|
-
1. Pipeline Parameters - which are defined as input parameters of a pipeline (Azure DevOps Pipeline or Gitlab Pipeline).
|
|
108
|
-
2. Manual Execution Parameters - defined in the code (testing_tool.py).
|
|
109
|
-
They correspond to the Pipeline Parameters and are used when executing the code directly without a pipeline instead of the Pipeline Parameters.
|
|
110
|
-
3. Global Parameters - directly defined in the TestingToolParams class. They are used in pipeline runs and for manual executions.
|
|
111
|
-
4. Environmental Parameters - Stored either in Azure DevOps in a variable group, in Gitlab, or, for manual executions, in a `*.env` file in a location that can be specified in the `manual_execution_params.py`.
|
|
112
|
-
|
|
113
|
-
Additionally the parameters can be categorized into 3 groups:
|
|
114
|
-
|
|
115
|
-
1. Setup Parameters - these are parameters which are usually just set once when setting up the icsDV.
|
|
116
|
-
2. Configuration Parameters - are used to configure the general settings but can be adjusted to the conditions of the workload on the fly.
|
|
117
|
-
3. Execution Parameters - are set individually for each execution of the icsDV, e.g. the selection of objects to be tested.
|
|
118
|
-
|
|
119
|
-
### Setup Parameters
|
|
120
|
-
|
|
121
|
-
Stored in `ics_data_validation_config.json`:
|
|
122
|
-
|
|
123
|
-
| Parameter | Description | Input Type |
|
|
124
|
-
|---------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------|
|
|
125
|
-
| source_system_selection | Name of the source system as defined in the database_config.json as a key. | Pipeline Parameter or Manual Execution Parameter |
|
|
126
|
-
| target_system_selection | Name of the target system as defined in the database_config.json as a key. | Pipeline Parameter or Manual Execution Parameter |
|
|
127
|
-
| result_system_selection | Name of the result system as defined in the database_config.json as a key. | Pipeline Parameter or Manual Execution Parameter |
|
|
128
|
-
| azure_devops_pipeline | Azure DevOps Pipeline support. Set to "True" to push the changes of a run to the GIT repository. | Global Parameter - TestingToolParams |
|
|
129
|
-
| gitlab_pipeline | Gitlab Pipeline support. Set to "True" to push the changes of a run to the GIT repository. | Global Parameter - TestingToolParams |
|
|
130
|
-
| result_database_name | Name of the database or catalog the results are written to | Global Parameter - TestingToolParams |
|
|
131
|
-
| result_schema_name | Name of the schema the results are written to | Global Parameter - TestingToolParams |
|
|
132
|
-
| result_table_highlevel_name | Name of the high-level results table | Global Parameter - TestingToolParams |
|
|
133
|
-
| result_table_objectlevel_name | Name of the object-level results table | Global Parameter - TestingToolParams |
|
|
134
|
-
| result_table_columnlevel_name | Name of the column-level results table | Global Parameter - TestingToolParams |
|
|
135
|
-
| result_meta_data_schema_name | Name of the schema the full results are written to | Global Parameter - TestingToolParams |
|
|
136
|
-
| result_table_name | Name of the table the full results are written to | Global Parameter - TestingToolParams |
|
|
137
|
-
| result_live_table_name | Name of the table the live results are written to | Global Parameter - TestingToolParams |
|
|
138
|
-
| results_folder_name | Folder that in which the results are stored in JSON format. Default: `examples/comparison_results/` | Global Parameter - TestingToolParams |
|
|
139
|
-
| remaining_mapping_objects_folder_name | Output folder that holds information about source system objects which are not covered by the mapping and are therefor not included in the comparison. Default: `examples/remaining_mapping_objects/` | Global Parameter - TestingToolParams |
|
|
140
|
-
| testset_folder_name | Folder that holds the test set files in JSON format. Default: `examples/testsets/` | Global Parameter - TestingToolParams |
|
|
141
|
-
| stage_schema | Name of the Snowflake Schema where the stage is created to upload the comparison results to Snowflake. Only needed if the `upload_result_to_result_database` functionality is used with Snowflake as target system. | Global Parameter - TestingToolParams |
|
|
142
|
-
| stage_name_prefix | Prefix of the name of the Snowflake Stage which is used to upload the comparison results to Snowflake. The name is complemented by a run_guid which is a unique uuid for each icsDV execution. Only needed if the `upload_result_to_result_database` functionality is used. | Global Parameter - TestingToolParams |
|
|
143
|
-
| container_name | Name of the Azure Storage Container to upload the comparison results into the blob storage. Note: Only needed if the `upload_result_to_blob` functionality is used. | Global Parameter - TestingToolParams |
|
|
144
|
-
| bucket_name | Name of the AWS S3 Bucket to upload the comparison results into the AWS. Note: Only needed if the `upload_result_to_bucket` functionality is used. | Global Parameter - TestingToolParams |
|
|
145
|
-
|
|
146
|
-
### Configuration Parameters
|
|
147
|
-
|
|
148
|
-
Stored in `manual_execution_params.py`:
|
|
149
|
-
|
|
150
|
-
| Parameter | Description | Input Type |
|
|
151
|
-
|----------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------|
|
|
152
|
-
| ENV_FILEPATH | Absolute path to the `*.env` file containing secrets, passwords and tokens. | Pipeline Parameter or Manual Execution Parameters |
|
|
153
|
-
| UPLOAD_RESULT_TO_BLOB | Set to "True" to upload the comparison results to an Azure Blob Storage. An `azure_storage_connection_string` is needed if set to "True". | Pipeline Parameter or Manual Execution Parameters |
|
|
154
|
-
| UPLOAD_RESULT_TO_BUCKET | Set to "True" to upload the comparison results to an AWS S3 Bucket. An `aws_bucket_access_key` and an `aws_bucket_secret_key` is needed if set to "True". | Pipeline Parameter or Manual Execution Parameter |
|
|
155
|
-
| UPLOAD_RESULT_TO_RESULT_DATABASE | Set to "True" to upload the comparison results to Snowflake or Databricks. A `result_system_selection` is needed if set to "True". | Pipeline Parameter or Manual Execution Parameter |
|
|
156
|
-
| MAX_OBJECT_SIZE | Limits Pandas comparison to objects of a size smaller than `MAX_OBJECT_SIZE` bytes. Data type is String. Default: `str(-1)`, no limit. | Pipeline Parameter or Manual Execution Parameter |
|
|
157
|
-
| MAX_ROW_NUMBER | Limits Pandas comparison to objects with less than `MAX_ROW_NUMBER` rows. Data type is String. Default: `str(-1)`, no limit. | Pipeline Parameter or Manual Execution Parameter |
|
|
158
|
-
| EXECUTE_GROUP_BY_COMPARISON | Set to "True" to execute group-by comparisons. See sec. "Group-By-Aggregation" for details. | Pipeline Parameter or Manual Execution Parameter |
|
|
159
|
-
| USE_GROUP_BY_COLUMNS | Set to "True" to activate group-by columns. See sec. "Group-By-Aggregation" for details. | Pipeline Parameter or Manual Execution Parameter |
|
|
160
|
-
| MIN_GROUP_BY_COUNT_DISTINCT | Minimum expected number of group-by counts. See sec. "Group-By-Aggregation" for details. | Pipeline Parameter or Manual Execution Parameter |
|
|
161
|
-
| MAX_GROUP_BY_COUNT_DISTINCT | Maximum expected number of group-by counts. See sec. "Group-By-Aggregation" for details. | Pipeline Parameter or Manual Execution Parameter |
|
|
162
|
-
| MAX_GROUP_BY_SIZE | Maximum size of the group-by query. See sec. "Group-By-Aggregation" for details. | Pipeline Parameter or Manual Execution Parameter |
|
|
163
|
-
| NUMERIC_SCALE | Number of digits to compare. Data type is String. Default: `str(2)`, i.e. deviations below 0.01 are tolerated. | Pipeline Parameter or Manual Execution Parameter |
|
|
164
|
-
|
|
165
|
-
### Execution Parameters
|
|
166
|
-
|
|
167
|
-
Stored in `manual_execution_params.py`:
|
|
168
|
-
|
|
169
|
-
| Parameter | Description | Input Type |
|
|
170
|
-
|-------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------|
|
|
171
|
-
| DATABASE_NAME | Filters the test set on a specific database/catalog. For no filter set "None" as a Manual Execution Parameter and leave it empty as a Pipeline Parameter. | Pipeline Parameter or Manual Execution Parameter |
|
|
172
|
-
| SCHEMA_NAME | Filters the test set on a specific schema. For no filter set "None" as a Manual Execution Parameter and leave it empty as a Pipeline Parameter. | Pipeline Parameter or Manual Execution Parameter |
|
|
173
|
-
| TESTSET_FILE_NAMES | File names of the test set as defined in the folder testset_folder_name (see Setup Parameters) as JSON files. | Pipeline Parameter or Manual Execution Parameter |
|
|
174
|
-
| OBJECT_TYPE_RESTRICTION | Filters the testset to only tables (`"include_only_tables"`), only views (`"include_only_views"`) or all tables and views (`"include_all"`). | Pipeline Parameter or Manual Execution Parameter |
|
|
175
|
-
| MAX_NUMBER_OF_THREADS | Maximum number of threads used. Values larget than the default, `str(1)`, activate parallelization. | Pipeline Parameter or Manual Execution Parameter |
|
|
176
|
-
|
|
177
|
-
## icsDV - Configuration
|
|
178
|
-
|
|
179
|
-
### Blacklists
|
|
180
|
-
|
|
181
|
-
### Whitelists (Testsets)
|
|
182
|
-
|
|
183
|
-
### Mapping
|
|
184
|
-
|
|
185
|
-
### Group-By-Aggregation
|
|
186
|
-
|
|
187
|
-
The Group-By-Aggregation is a feature to pinpoint the differences in the data.
|
|
188
|
-
It can be activiated by setting the parameter `EXECUTE_GROUP_BY_COMPARISON` to TRUE.
|
|
189
|
-
If activated an additional comparison step is performed.
|
|
190
|
-
Each table is queried with a group-by-statement including aggregations depending on the data type.
|
|
191
|
-
Those aggregations are consequently compared.
|
|
192
|
-
As a result the differences in the data can be narrowed down to certain grouping values.
|
|
193
|
-
|
|
194
|
-
There are three options to define the column over which the group-by is executed.
|
|
195
|
-
|
|
196
|
-
1. "group-by-columns-per-table" defined as multiple lists for specific tables. Activated with the `USE_GROUP_BY_COLUMNS` parameter and `GROUP_BY_COLUMNS_PER_TABLE` defined in the `migration_config.json`.
|
|
197
|
-
2. "group-by-columns" from a predifined list for all tables by a validation. Activated with the `USE_GROUP_BY_COLUMNS` parameter and `GROUP_BY_COLUMNS` defined in the `migration_config.json`.
|
|
198
|
-
3. "group-by-columns" evaluated from all existing columns by a validation
|
|
199
|
-
|
|
200
|
-
The validation consists of a number of tests and can be configured by a number of parameters to either easily find columns to group by over or to only select columns which add a definite value for pinpointing the differences in the data.
|
|
201
|
-
|
|
202
|
-
The validation tests for the "group-by-columns" are:
|
|
203
|
-
|
|
204
|
-
1. Number of distinct values of the column is more than 1.
|
|
205
|
-
2. Number of distinct values of the column is less than the rowcount of the table.
|
|
206
|
-
3. Number of distinct values of the column exceeds the `MIN_GROUP_BY_COUNT_DISTINCT` parameter.
|
|
207
|
-
4. Number of distinct values of the column is below the `MAX_GROUP_BY_COUNT_DISTINCT` parameter.
|
|
208
|
-
5. The size of the expected result of the group-by-query is below the `MAX_GROUP_BY_SIZE` parameter.
|
|
209
|
-
(The size is defined by "Number of distinct values" * "Number of columns")
|
|
210
|
-
|
|
211
|
-
All tests are executed on source and target.
|
|
212
|
-
|
|
213
|
-
> Note: The group by comparison can be activated by setting the `execute_group_by_comparison` parameter to TRUE.
|
|
214
|
-
The `migration_config.json` has to include the follwing keys when the parameter use_group_by_columns is set to TRUE.
|
|
215
|
-
|
|
216
|
-
"GROUP_BY_AGGREGATION":{
|
|
217
|
-
"GROUP_BY_COLUMNS_PER_TABLE": {},
|
|
218
|
-
"GROUP_BY_COLUMNS":[]
|
|
219
|
-
}
|
|
220
|
-
The values of those keys can be empty.
|
|
221
|
-
|
|
222
|
-
## icsDV - Comparison Results
|
|
223
|
-
|
|
224
|
-
### JSON Results
|
|
225
|
-
|
|
226
|
-
- Complete Comparison Result JSONs
|
|
227
|
-
- Live Comparison Result JSONs
|
|
228
|
-
|
|
229
|
-
### Target System Result Tables
|
|
230
|
-
|
|
231
|
-
- High-Level Result
|
|
232
|
-
- Object-Level Result
|
|
233
|
-
- Column-Level Result
|
|
234
|
-
|
|
235
|
-
### Result Export in a File Storage
|
|
236
|
-
|
|
237
|
-
## icsDV - Setup
|
|
238
|
-
|
|
239
|
-
### Code setup
|
|
240
|
-
|
|
241
|
-
- To handle the code, we recommend using VS Code.
|
|
242
|
-
- The code is written in python. The tool is compatible with version 3.11
|
|
243
|
-
- It is recommended to use a project-specific python environment.
|
|
244
|
-
You can create one with `python -m venv .env` in the root folder of this repo.
|
|
245
|
-
After creating it, you should activate it (`source .env/bin/activate`), select the python binary `.env/bin/python` therein as your python interpreter in VSC and make sure that python libraries are read from and installed to this environment, i.e. `export PYTHONPATH=$(pwd)/.env/lib/python3.8/site-packages`.
|
|
246
|
-
- In this environment, install the packages listed in the `requirements.txt` and the `requirements-dev.txt`. i.e. run `pip install -r requirements.txt`.
|
|
247
|
-
|
|
248
|
-
### Setup for manual execution
|
|
249
|
-
|
|
250
|
-
### Setup as Azure DevOps pipeline
|
|
251
|
-
|
|
252
|
-
### Setup as GitLab pipeline
|
|
253
|
-
|
|
254
|
-
## authentication
|
|
255
|
-
|
|
256
|
-
The following auth methods to snowflake are supported:
|
|
257
|
-
|
|
258
|
-
- password, provided via PASSWORD_NAME
|
|
259
|
-
- private key with/without encryption, provided via PRIVATE_KEY_NAME with/without PRIVATE_KEY_PASSPHRASE_NAME
|
|
260
|
-
- path to private key file with/without encryption, provided via PRIVATE_KEY_FILE_PATH with/without PRIVATE_KEY_FILE_PASSWORD
|
|
261
|
-
|
|
262
|
-
## devcontainer
|
|
263
|
-
|
|
264
|
-
run with uv as follows in devcontainer:
|
|
265
|
-
```bash
|
|
266
|
-
uv run -s icsDataValidation/main.py
|
|
267
|
-
```
|
|
268
|
-
|
|
269
|
-
Inside the [devcontainer config](.devcontainer/devcontainer.json) the mounts setting is used to bring a .env from the host system into the devcontainer.
|
|
270
|
-
|
|
271
|
-
```bash
|
|
272
|
-
"mounts": [
|
|
273
|
-
"source=/home/Documents/Generic_Testing_Tool/generic_testing_tool_password.env,target=/workspaces/icsDataValidation/examples/generic_testing_tool_password.env,type=bind"
|
|
274
|
-
]
|
|
275
|
-
```
|
|
276
|
-
|
|
277
|
-
To use this feature either create the .env under the source path on your host or adjust this path to another path on the host system. The target path do no need adjustment!
|
|
@@ -1,298 +0,0 @@
|
|
|
1
|
-
Metadata-Version: 2.4
|
|
2
|
-
Name: icsDataValidation
|
|
3
|
-
Version: 1.0.415
|
|
4
|
-
Summary: Add your description here
|
|
5
|
-
Author-email: initions <ICSMC_EXT_PYPIORG@accenture.com>
|
|
6
|
-
License: MIT
|
|
7
|
-
Requires-Python: >=3.11
|
|
8
|
-
Description-Content-Type: text/markdown
|
|
9
|
-
Requires-Dist: azure-storage-blob==12.13.1
|
|
10
|
-
Requires-Dist: boto3==1.26.154
|
|
11
|
-
Requires-Dist: cloe-util-snowflake-connector==1.0.5
|
|
12
|
-
Requires-Dist: databricks-sdk==0.29.0
|
|
13
|
-
Requires-Dist: databricks-sql-connector==3.0.1
|
|
14
|
-
Requires-Dist: numpy==1.26.3
|
|
15
|
-
Requires-Dist: oracledb==2.5.0
|
|
16
|
-
Requires-Dist: pandas==2.2.2
|
|
17
|
-
Requires-Dist: pyexasol==0.24.0
|
|
18
|
-
Requires-Dist: pyodbc
|
|
19
|
-
Requires-Dist: python-dotenv>=1.0.1
|
|
20
|
-
Requires-Dist: teradatasql==17.20.0.10
|
|
21
|
-
|
|
22
|
-
# icsDV - initions Data Validation Tool
|
|
23
|
-
|
|
24
|
-
## Introduction
|
|
25
|
-
|
|
26
|
-
The icsDataValidation tool identifies data mismatches between two databases.
|
|
27
|
-
The functionalities are specifically geared to support migration projects.
|
|
28
|
-
It helps to find data issues in tables and views in comparison of a source and a target system.
|
|
29
|
-
|
|
30
|
-
### What is "generic" about the tool?
|
|
31
|
-
|
|
32
|
-
The icsDataValidation tool (icsDV) is in particular structered in a way that it is easily expandable.
|
|
33
|
-
The main code is used by all different database options.
|
|
34
|
-
Specifics for each supported database are implemented in a database service per database.
|
|
35
|
-
|
|
36
|
-
The different database services are very similar.
|
|
37
|
-
They hold the same methods with the same input and output parameters.
|
|
38
|
-
Each method is aligned with the syntax and the settings of the database it is created for.
|
|
39
|
-
Each core implementation includes connections setup, object comparison functionality and the result preparation.
|
|
40
|
-
|
|
41
|
-
### Supported Databases
|
|
42
|
-
|
|
43
|
-
The icsDV supports comparisons between the following databases:
|
|
44
|
-
|
|
45
|
-
- Snowflake
|
|
46
|
-
- Teradata
|
|
47
|
-
- Azure SQL Server
|
|
48
|
-
- Exasol
|
|
49
|
-
- Oracle
|
|
50
|
-
- Databricks with and without Unity Catalog
|
|
51
|
-
|
|
52
|
-
Comparison results can be written to either Snowflake or Databricks.
|
|
53
|
-
|
|
54
|
-
### Features
|
|
55
|
-
|
|
56
|
-
The key features of the tool are:
|
|
57
|
-
|
|
58
|
-
- Comparison of tables and views between a source and a target system.
|
|
59
|
-
- Pipeline integration in Azure DevOps or GitLab
|
|
60
|
-
- Multiple verification/comparison steps:
|
|
61
|
-
- Row count comparison
|
|
62
|
-
- Column names comparison
|
|
63
|
-
- Aggregation comparison (depending on data type)
|
|
64
|
-
- "group by" comparison
|
|
65
|
-
- Pandas DataFrame comparison (with a threshold for the size of the object)
|
|
66
|
-
- Pandas DataFrame sample comparison (with a random sample of the object)
|
|
67
|
-
- Detailed representation of the comparison result
|
|
68
|
-
- "high-level" result (for each pipeline/execution)
|
|
69
|
-
- "object-level" result (for each table/view)
|
|
70
|
-
- "column-level" result (for each column)
|
|
71
|
-
- Parallelization for performance enhancement of the comparison of a large number of objects
|
|
72
|
-
- Input testsets (white-listing of objects)
|
|
73
|
-
- Object filter (black-listing of objects)
|
|
74
|
-
- Object mappings between the source and the target system
|
|
75
|
-
- Comparison result saved and displayed in multiple instances
|
|
76
|
-
- saved as JSON files in the repository
|
|
77
|
-
- export to result tables in the target system (Snowflake or Databricks)
|
|
78
|
-
- export to Azure Blob Storage or AWS S3 Bucket
|
|
79
|
-
|
|
80
|
-
### Repository Structure
|
|
81
|
-
|
|
82
|
-
The repository is structured in the following sections:
|
|
83
|
-
|
|
84
|
-
- **icsDataValidation**
|
|
85
|
-
> This is where all code files are stored.
|
|
86
|
-
|
|
87
|
-
- **icsDataValidation/main.py**
|
|
88
|
-
> Entry point for python.
|
|
89
|
-
|
|
90
|
-
- **icsDataValidation/core**
|
|
91
|
-
> Main code files for the parts independent on the source and target system.
|
|
92
|
-
|
|
93
|
-
- **icsDataValidation/services/database_services**
|
|
94
|
-
> Database services for all supported systems can be found here.
|
|
95
|
-
Each file contains a class that is identically structured in comparison to the other database service classes.
|
|
96
|
-
Each database service class contains methods to query metadata, create aggregations, and retrieve data for the comparison step.
|
|
97
|
-
|
|
98
|
-
- **icsDataValidation/connection_setups**
|
|
99
|
-
> The connection setups are database dependent.
|
|
100
|
-
They define how the credentials for the database connections are retrieved.
|
|
101
|
-
|
|
102
|
-
- **examples/comparison_results**
|
|
103
|
-
> The comparison results are saved here.
|
|
104
|
-
One JSON file with all results is saved for each execution/pipeline run.
|
|
105
|
-
Additionally there are live comparison results saved for each compared object as a failsafe.
|
|
106
|
-
|
|
107
|
-
- **examples**
|
|
108
|
-
> This folder contains all files defining a specific validation setup.
|
|
109
|
-
- A file named `migration_config.json` contains configurations about the source system, the target system and the mapping of objects between both. It contains the blacklists and "group by" aggregation settings.
|
|
110
|
-
- A file named `ics_data_validation_config.json` specifies the source system, the target system and the results system. Most importantly, this includes the names of the results tables and the connection configurations (Server, Port, Secrets) of source and target system.
|
|
111
|
-
- A file named `manual_execution_params.py` is only relevant for local execution of the code. It contains settings which would otherwise be defined in the pipeline setup, i.e. limits on the size of objects to compare and the numeric precision.
|
|
112
|
-
- The folder `testsets` contains JSON files specifying whitelists of objects to compare.
|
|
113
|
-
|
|
114
|
-
For all the files here, empty `*.template.*` files are available and may serve as a starting point.
|
|
115
|
-
This repo stores only template files.
|
|
116
|
-
The actual files used for each setup should not be committed here.
|
|
117
|
-
They are stored in [a separate repository.](https://dev.azure.com/initions-consulting/icsDataValidation/_git/icsDataValidation%20-%20workflow%20demo).
|
|
118
|
-
|
|
119
|
-
- **examples/pipeline**
|
|
120
|
-
> Files defining the pipelines that execute the icsDV are stored here. For example, YML files for Azure DevOps pipelines.
|
|
121
|
-
|
|
122
|
-
## icsDV - Execution Manual
|
|
123
|
-
|
|
124
|
-
## icsDV - Input Parameters
|
|
125
|
-
|
|
126
|
-
There are four types of input parameters:
|
|
127
|
-
|
|
128
|
-
1. Pipeline Parameters - which are defined as input parameters of a pipeline (Azure DevOps Pipeline or Gitlab Pipeline).
|
|
129
|
-
2. Manual Execution Parameters - defined in the code (testing_tool.py).
|
|
130
|
-
They correspond to the Pipeline Parameters and are used when executing the code directly without a pipeline instead of the Pipeline Parameters.
|
|
131
|
-
3. Global Parameters - directly defined in the TestingToolParams class. They are used in pipeline runs and for manual executions.
|
|
132
|
-
4. Environmental Parameters - Stored either in Azure DevOps in a variable group, in Gitlab, or, for manual executions, in a `*.env` file in a location that can be specified in the `manual_execution_params.py`.
|
|
133
|
-
|
|
134
|
-
Additionally the parameters can be categorized into 3 groups:
|
|
135
|
-
|
|
136
|
-
1. Setup Parameters - these are parameters which are usually just set once when setting up the icsDV.
|
|
137
|
-
2. Configuration Parameters - are used to configure the general settings but can be adjusted to the conditions of the workload on the fly.
|
|
138
|
-
3. Execution Parameters - are set individually for each execution of the icsDV, e.g. the selection of objects to be tested.
|
|
139
|
-
|
|
140
|
-
### Setup Parameters
|
|
141
|
-
|
|
142
|
-
Stored in `ics_data_validation_config.json`:
|
|
143
|
-
|
|
144
|
-
| Parameter | Description | Input Type |
|
|
145
|
-
|---------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------|
|
|
146
|
-
| source_system_selection | Name of the source system as defined in the database_config.json as a key. | Pipeline Parameter or Manual Execution Parameter |
|
|
147
|
-
| target_system_selection | Name of the target system as defined in the database_config.json as a key. | Pipeline Parameter or Manual Execution Parameter |
|
|
148
|
-
| result_system_selection | Name of the result system as defined in the database_config.json as a key. | Pipeline Parameter or Manual Execution Parameter |
|
|
149
|
-
| azure_devops_pipeline | Azure DevOps Pipeline support. Set to "True" to push the changes of a run to the GIT repository. | Global Parameter - TestingToolParams |
|
|
150
|
-
| gitlab_pipeline | Gitlab Pipeline support. Set to "True" to push the changes of a run to the GIT repository. | Global Parameter - TestingToolParams |
|
|
151
|
-
| result_database_name | Name of the database or catalog the results are written to | Global Parameter - TestingToolParams |
|
|
152
|
-
| result_schema_name | Name of the schema the results are written to | Global Parameter - TestingToolParams |
|
|
153
|
-
| result_table_highlevel_name | Name of the high-level results table | Global Parameter - TestingToolParams |
|
|
154
|
-
| result_table_objectlevel_name | Name of the object-level results table | Global Parameter - TestingToolParams |
|
|
155
|
-
| result_table_columnlevel_name | Name of the column-level results table | Global Parameter - TestingToolParams |
|
|
156
|
-
| result_meta_data_schema_name | Name of the schema the full results are written to | Global Parameter - TestingToolParams |
|
|
157
|
-
| result_table_name | Name of the table the full results are written to | Global Parameter - TestingToolParams |
|
|
158
|
-
| result_live_table_name | Name of the table the live results are written to | Global Parameter - TestingToolParams |
|
|
159
|
-
| results_folder_name | Folder that in which the results are stored in JSON format. Default: `examples/comparison_results/` | Global Parameter - TestingToolParams |
|
|
160
|
-
| remaining_mapping_objects_folder_name | Output folder that holds information about source system objects which are not covered by the mapping and are therefor not included in the comparison. Default: `examples/remaining_mapping_objects/` | Global Parameter - TestingToolParams |
|
|
161
|
-
| testset_folder_name | Folder that holds the test set files in JSON format. Default: `examples/testsets/` | Global Parameter - TestingToolParams |
|
|
162
|
-
| stage_schema | Name of the Snowflake Schema where the stage is created to upload the comparison results to Snowflake. Only needed if the `upload_result_to_result_database` functionality is used with Snowflake as target system. | Global Parameter - TestingToolParams |
|
|
163
|
-
| stage_name_prefix | Prefix of the name of the Snowflake Stage which is used to upload the comparison results to Snowflake. The name is complemented by a run_guid which is a unique uuid for each icsDV execution. Only needed if the `upload_result_to_result_database` functionality is used. | Global Parameter - TestingToolParams |
|
|
164
|
-
| container_name | Name of the Azure Storage Container to upload the comparison results into the blob storage. Note: Only needed if the `upload_result_to_blob` functionality is used. | Global Parameter - TestingToolParams |
|
|
165
|
-
| bucket_name | Name of the AWS S3 Bucket to upload the comparison results into the AWS. Note: Only needed if the `upload_result_to_bucket` functionality is used. | Global Parameter - TestingToolParams |
|
|
166
|
-
|
|
167
|
-
### Configuration Parameters
|
|
168
|
-
|
|
169
|
-
Stored in `manual_execution_params.py`:
|
|
170
|
-
|
|
171
|
-
| Parameter | Description | Input Type |
|
|
172
|
-
|----------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------|
|
|
173
|
-
| ENV_FILEPATH | Absolute path to the `*.env` file containing secrets, passwords and tokens. | Pipeline Parameter or Manual Execution Parameters |
|
|
174
|
-
| UPLOAD_RESULT_TO_BLOB | Set to "True" to upload the comparison results to an Azure Blob Storage. An `azure_storage_connection_string` is needed if set to "True". | Pipeline Parameter or Manual Execution Parameters |
|
|
175
|
-
| UPLOAD_RESULT_TO_BUCKET | Set to "True" to upload the comparison results to an AWS S3 Bucket. An `aws_bucket_access_key` and an `aws_bucket_secret_key` is needed if set to "True". | Pipeline Parameter or Manual Execution Parameter |
|
|
176
|
-
| UPLOAD_RESULT_TO_RESULT_DATABASE | Set to "True" to upload the comparison results to Snowflake or Databricks. A `result_system_selection` is needed if set to "True". | Pipeline Parameter or Manual Execution Parameter |
|
|
177
|
-
| MAX_OBJECT_SIZE | Limits Pandas comparison to objects of a size smaller than `MAX_OBJECT_SIZE` bytes. Data type is String. Default: `str(-1)`, no limit. | Pipeline Parameter or Manual Execution Parameter |
|
|
178
|
-
| MAX_ROW_NUMBER | Limits Pandas comparison to objects with less than `MAX_ROW_NUMBER` rows. Data type is String. Default: `str(-1)`, no limit. | Pipeline Parameter or Manual Execution Parameter |
|
|
179
|
-
| EXECUTE_GROUP_BY_COMPARISON | Set to "True" to execute group-by comparisons. See sec. "Group-By-Aggregation" for details. | Pipeline Parameter or Manual Execution Parameter |
|
|
180
|
-
| USE_GROUP_BY_COLUMNS | Set to "True" to activate group-by columns. See sec. "Group-By-Aggregation" for details. | Pipeline Parameter or Manual Execution Parameter |
|
|
181
|
-
| MIN_GROUP_BY_COUNT_DISTINCT | Minimum expected number of group-by counts. See sec. "Group-By-Aggregation" for details. | Pipeline Parameter or Manual Execution Parameter |
|
|
182
|
-
| MAX_GROUP_BY_COUNT_DISTINCT | Maximum expected number of group-by counts. See sec. "Group-By-Aggregation" for details. | Pipeline Parameter or Manual Execution Parameter |
|
|
183
|
-
| MAX_GROUP_BY_SIZE | Maximum size of the group-by query. See sec. "Group-By-Aggregation" for details. | Pipeline Parameter or Manual Execution Parameter |
|
|
184
|
-
| NUMERIC_SCALE | Number of digits to compare. Data type is String. Default: `str(2)`, i.e. deviations below 0.01 are tolerated. | Pipeline Parameter or Manual Execution Parameter |
|
|
185
|
-
|
|
186
|
-
### Execution Parameters
|
|
187
|
-
|
|
188
|
-
Stored in `manual_execution_params.py`:
|
|
189
|
-
|
|
190
|
-
| Parameter | Description | Input Type |
|
|
191
|
-
|-------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------|
|
|
192
|
-
| DATABASE_NAME | Filters the test set on a specific database/catalog. For no filter set "None" as a Manual Execution Parameter and leave it empty as a Pipeline Parameter. | Pipeline Parameter or Manual Execution Parameter |
|
|
193
|
-
| SCHEMA_NAME | Filters the test set on a specific schema. For no filter set "None" as a Manual Execution Parameter and leave it empty as a Pipeline Parameter. | Pipeline Parameter or Manual Execution Parameter |
|
|
194
|
-
| TESTSET_FILE_NAMES | File names of the test set as defined in the folder testset_folder_name (see Setup Parameters) as JSON files. | Pipeline Parameter or Manual Execution Parameter |
|
|
195
|
-
| OBJECT_TYPE_RESTRICTION | Filters the testset to only tables (`"include_only_tables"`), only views (`"include_only_views"`) or all tables and views (`"include_all"`). | Pipeline Parameter or Manual Execution Parameter |
|
|
196
|
-
| MAX_NUMBER_OF_THREADS | Maximum number of threads used. Values larget than the default, `str(1)`, activate parallelization. | Pipeline Parameter or Manual Execution Parameter |
|
|
197
|
-
|
|
198
|
-
## icsDV - Configuration
|
|
199
|
-
|
|
200
|
-
### Blacklists
|
|
201
|
-
|
|
202
|
-
### Whitelists (Testsets)
|
|
203
|
-
|
|
204
|
-
### Mapping
|
|
205
|
-
|
|
206
|
-
### Group-By-Aggregation
|
|
207
|
-
|
|
208
|
-
The Group-By-Aggregation is a feature to pinpoint the differences in the data.
|
|
209
|
-
It can be activiated by setting the parameter `EXECUTE_GROUP_BY_COMPARISON` to TRUE.
|
|
210
|
-
If activated an additional comparison step is performed.
|
|
211
|
-
Each table is queried with a group-by-statement including aggregations depending on the data type.
|
|
212
|
-
Those aggregations are consequently compared.
|
|
213
|
-
As a result the differences in the data can be narrowed down to certain grouping values.
|
|
214
|
-
|
|
215
|
-
There are three options to define the column over which the group-by is executed.
|
|
216
|
-
|
|
217
|
-
1. "group-by-columns-per-table" defined as multiple lists for specific tables. Activated with the `USE_GROUP_BY_COLUMNS` parameter and `GROUP_BY_COLUMNS_PER_TABLE` defined in the `migration_config.json`.
|
|
218
|
-
2. "group-by-columns" from a predifined list for all tables by a validation. Activated with the `USE_GROUP_BY_COLUMNS` parameter and `GROUP_BY_COLUMNS` defined in the `migration_config.json`.
|
|
219
|
-
3. "group-by-columns" evaluated from all existing columns by a validation
|
|
220
|
-
|
|
221
|
-
The validation consists of a number of tests and can be configured by a number of parameters to either easily find columns to group by over or to only select columns which add a definite value for pinpointing the differences in the data.
|
|
222
|
-
|
|
223
|
-
The validation tests for the "group-by-columns" are:
|
|
224
|
-
|
|
225
|
-
1. Number of distinct values of the column is more than 1.
|
|
226
|
-
2. Number of distinct values of the column is less than the rowcount of the table.
|
|
227
|
-
3. Number of distinct values of the column exceeds the `MIN_GROUP_BY_COUNT_DISTINCT` parameter.
|
|
228
|
-
4. Number of distinct values of the column is below the `MAX_GROUP_BY_COUNT_DISTINCT` parameter.
|
|
229
|
-
5. The size of the expected result of the group-by-query is below the `MAX_GROUP_BY_SIZE` parameter.
|
|
230
|
-
(The size is defined by "Number of distinct values" * "Number of columns")
|
|
231
|
-
|
|
232
|
-
All tests are executed on source and target.
|
|
233
|
-
|
|
234
|
-
> Note: The group by comparison can be activated by setting the `execute_group_by_comparison` parameter to TRUE.
|
|
235
|
-
The `migration_config.json` has to include the follwing keys when the parameter use_group_by_columns is set to TRUE.
|
|
236
|
-
|
|
237
|
-
"GROUP_BY_AGGREGATION":{
|
|
238
|
-
"GROUP_BY_COLUMNS_PER_TABLE": {},
|
|
239
|
-
"GROUP_BY_COLUMNS":[]
|
|
240
|
-
}
|
|
241
|
-
The values of those keys can be empty.
|
|
242
|
-
|
|
243
|
-
## icsDV - Comparison Results
|
|
244
|
-
|
|
245
|
-
### JSON Results
|
|
246
|
-
|
|
247
|
-
- Complete Comparison Result JSONs
|
|
248
|
-
- Live Comparison Result JSONs
|
|
249
|
-
|
|
250
|
-
### Target System Result Tables
|
|
251
|
-
|
|
252
|
-
- High-Level Result
|
|
253
|
-
- Object-Level Result
|
|
254
|
-
- Column-Level Result
|
|
255
|
-
|
|
256
|
-
### Result Export in a File Storage
|
|
257
|
-
|
|
258
|
-
## icsDV - Setup
|
|
259
|
-
|
|
260
|
-
### Code setup
|
|
261
|
-
|
|
262
|
-
- To handle the code, we recommend using VS Code.
|
|
263
|
-
- The code is written in python. The tool is compatible with version 3.11
|
|
264
|
-
- It is recommended to use a project-specific python environment.
|
|
265
|
-
You can create one with `python -m venv .env` in the root folder of this repo.
|
|
266
|
-
After creating it, you should activate it (`source .env/bin/activate`), select the python binary `.env/bin/python` therein as your python interpreter in VSC and make sure that python libraries are read from and installed to this environment, i.e. `export PYTHONPATH=$(pwd)/.env/lib/python3.8/site-packages`.
|
|
267
|
-
- In this environment, install the packages listed in the `requirements.txt` and the `requirements-dev.txt`. i.e. run `pip install -r requirements.txt`.
|
|
268
|
-
|
|
269
|
-
### Setup for manual execution
|
|
270
|
-
|
|
271
|
-
### Setup as Azure DevOps pipeline
|
|
272
|
-
|
|
273
|
-
### Setup as GitLab pipeline
|
|
274
|
-
|
|
275
|
-
## authentication
|
|
276
|
-
|
|
277
|
-
The following auth methods to snowflake are supported:
|
|
278
|
-
|
|
279
|
-
- password, provided via PASSWORD_NAME
|
|
280
|
-
- private key with/without encryption, provided via PRIVATE_KEY_NAME with/without PRIVATE_KEY_PASSPHRASE_NAME
|
|
281
|
-
- path to private key file with/without encryption, provided via PRIVATE_KEY_FILE_PATH with/without PRIVATE_KEY_FILE_PASSWORD
|
|
282
|
-
|
|
283
|
-
## devcontainer
|
|
284
|
-
|
|
285
|
-
run with uv as follows in devcontainer:
|
|
286
|
-
```bash
|
|
287
|
-
uv run -s icsDataValidation/main.py
|
|
288
|
-
```
|
|
289
|
-
|
|
290
|
-
Inside the [devcontainer config](.devcontainer/devcontainer.json) the mounts setting is used to bring a .env from the host system into the devcontainer.
|
|
291
|
-
|
|
292
|
-
```bash
|
|
293
|
-
"mounts": [
|
|
294
|
-
"source=/home/Documents/Generic_Testing_Tool/generic_testing_tool_password.env,target=/workspaces/icsDataValidation/examples/generic_testing_tool_password.env,type=bind"
|
|
295
|
-
]
|
|
296
|
-
```
|
|
297
|
-
|
|
298
|
-
To use this feature either create the .env under the source path on your host or adjust this path to another path on the host system. The target path do no need adjustment!
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation/core/database_objects.py
RENAMED
|
File without changes
|
{icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation/core/object_comparison.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation/services/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation/services/result_service.py
RENAMED
|
File without changes
|
{icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation/services/system_service.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
{icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation/utils/file_util.py
RENAMED
|
File without changes
|
{icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation/utils/logger_util.py
RENAMED
|
File without changes
|
{icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation/utils/pandas_util.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation.egg-info/not-zip-safe
RENAMED
|
File without changes
|
{icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation.egg-info/requires.txt
RENAMED
|
File without changes
|
{icsdatavalidation-1.0.415 → icsdatavalidation-1.0.419}/icsDataValidation.egg-info/top_level.txt
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|