informatica-python 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- informatica_python/__init__.py +4 -0
- informatica_python/cli.py +83 -0
- informatica_python/converter.py +285 -0
- informatica_python/generators/__init__.py +0 -0
- informatica_python/generators/config_gen.py +159 -0
- informatica_python/generators/error_log_gen.py +140 -0
- informatica_python/generators/helper_gen.py +693 -0
- informatica_python/generators/mapping_gen.py +649 -0
- informatica_python/generators/sql_gen.py +132 -0
- informatica_python/generators/workflow_gen.py +234 -0
- informatica_python/models.py +281 -0
- informatica_python/parser.py +468 -0
- informatica_python/utils/__init__.py +0 -0
- informatica_python/utils/datatype_map.py +105 -0
- informatica_python/utils/expression_converter.py +128 -0
- informatica_python-1.0.0.dist-info/METADATA +118 -0
- informatica_python-1.0.0.dist-info/RECORD +20 -0
- informatica_python-1.0.0.dist-info/WHEEL +5 -0
- informatica_python-1.0.0.dist-info/entry_points.txt +2 -0
- informatica_python-1.0.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
import argparse
|
|
2
|
+
import sys
|
|
3
|
+
import json
|
|
4
|
+
from informatica_python.converter import InformaticaConverter
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def main():
|
|
8
|
+
parser = argparse.ArgumentParser(
|
|
9
|
+
prog="informatica-python",
|
|
10
|
+
description="Convert Informatica PowerCenter workflow XML to Python/PySpark code",
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
parser.add_argument(
|
|
14
|
+
"input_file",
|
|
15
|
+
help="Path to Informatica workflow XML file",
|
|
16
|
+
)
|
|
17
|
+
parser.add_argument(
|
|
18
|
+
"-o", "--output",
|
|
19
|
+
default="output",
|
|
20
|
+
help="Output directory for generated files (default: output)",
|
|
21
|
+
)
|
|
22
|
+
parser.add_argument(
|
|
23
|
+
"-z", "--zip",
|
|
24
|
+
default=None,
|
|
25
|
+
help="Output as zip file (provide zip file path)",
|
|
26
|
+
)
|
|
27
|
+
parser.add_argument(
|
|
28
|
+
"--data-lib",
|
|
29
|
+
choices=["pandas", "dask", "polars", "vaex", "modin"],
|
|
30
|
+
default="pandas",
|
|
31
|
+
help="Data manipulation library to use (default: pandas)",
|
|
32
|
+
)
|
|
33
|
+
parser.add_argument(
|
|
34
|
+
"--json",
|
|
35
|
+
action="store_true",
|
|
36
|
+
dest="output_json",
|
|
37
|
+
help="Output parsed XML as JSON (no code generation)",
|
|
38
|
+
)
|
|
39
|
+
parser.add_argument(
|
|
40
|
+
"--json-file",
|
|
41
|
+
default=None,
|
|
42
|
+
help="Save parsed JSON to a file",
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
args = parser.parse_args()
|
|
46
|
+
|
|
47
|
+
converter = InformaticaConverter(data_lib=args.data_lib)
|
|
48
|
+
|
|
49
|
+
try:
|
|
50
|
+
if args.output_json or args.json_file:
|
|
51
|
+
result = converter.parse_file(args.input_file)
|
|
52
|
+
json_str = json.dumps(result, indent=2, ensure_ascii=False)
|
|
53
|
+
if args.json_file:
|
|
54
|
+
with open(args.json_file, "w", encoding="utf-8") as f:
|
|
55
|
+
f.write(json_str)
|
|
56
|
+
print(f"JSON saved to: {args.json_file}")
|
|
57
|
+
else:
|
|
58
|
+
print(json_str)
|
|
59
|
+
else:
|
|
60
|
+
output_path = converter.convert(
|
|
61
|
+
args.input_file,
|
|
62
|
+
output_dir=args.output,
|
|
63
|
+
output_zip=args.zip,
|
|
64
|
+
)
|
|
65
|
+
print(f"Conversion complete! Output: {output_path}")
|
|
66
|
+
print(f"Files generated:")
|
|
67
|
+
if args.zip:
|
|
68
|
+
import zipfile
|
|
69
|
+
with zipfile.ZipFile(output_path, "r") as zf:
|
|
70
|
+
for name in zf.namelist():
|
|
71
|
+
print(f" - {name}")
|
|
72
|
+
else:
|
|
73
|
+
import os
|
|
74
|
+
for f in sorted(os.listdir(output_path)):
|
|
75
|
+
print(f" - {f}")
|
|
76
|
+
|
|
77
|
+
except Exception as e:
|
|
78
|
+
print(f"Error: {e}", file=sys.stderr)
|
|
79
|
+
sys.exit(1)
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
if __name__ == "__main__":
|
|
83
|
+
main()
|
|
@@ -0,0 +1,285 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import json
|
|
3
|
+
import zipfile
|
|
4
|
+
import tempfile
|
|
5
|
+
from typing import Optional
|
|
6
|
+
from informatica_python.parser import InformaticaParser
|
|
7
|
+
from informatica_python.models import PowermartDef, FolderDef
|
|
8
|
+
from informatica_python.generators.helper_gen import generate_helper_functions
|
|
9
|
+
from informatica_python.generators.mapping_gen import generate_mapping_code
|
|
10
|
+
from informatica_python.generators.workflow_gen import generate_workflow_code
|
|
11
|
+
from informatica_python.generators.config_gen import generate_config
|
|
12
|
+
from informatica_python.generators.sql_gen import generate_sql_file
|
|
13
|
+
from informatica_python.generators.error_log_gen import generate_error_log
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class InformaticaConverter:
|
|
17
|
+
def __init__(self, data_lib: str = "pandas"):
|
|
18
|
+
self.data_lib = data_lib
|
|
19
|
+
self.parser = InformaticaParser()
|
|
20
|
+
self.powermart = None
|
|
21
|
+
|
|
22
|
+
def parse_file(self, file_path: str) -> dict:
|
|
23
|
+
self.powermart = self.parser.parse_file(file_path)
|
|
24
|
+
return self.to_json()
|
|
25
|
+
|
|
26
|
+
def parse_string(self, xml_string: str) -> dict:
|
|
27
|
+
self.powermart = self.parser.parse_string(xml_string)
|
|
28
|
+
return self.to_json()
|
|
29
|
+
|
|
30
|
+
def to_json(self) -> dict:
|
|
31
|
+
if not self.powermart:
|
|
32
|
+
return {}
|
|
33
|
+
return self._powermart_to_dict(self.powermart)
|
|
34
|
+
|
|
35
|
+
def convert(self, file_path: str, output_dir: str = "output",
|
|
36
|
+
output_zip: Optional[str] = None) -> str:
|
|
37
|
+
self.powermart = self.parser.parse_file(file_path)
|
|
38
|
+
|
|
39
|
+
if not self.powermart.repositories:
|
|
40
|
+
raise ValueError("No repository found in XML file")
|
|
41
|
+
|
|
42
|
+
all_folders = []
|
|
43
|
+
for repo in self.powermart.repositories:
|
|
44
|
+
all_folders.extend(repo.folders)
|
|
45
|
+
|
|
46
|
+
if not all_folders:
|
|
47
|
+
raise ValueError("No folder found in XML file")
|
|
48
|
+
|
|
49
|
+
if len(all_folders) == 1:
|
|
50
|
+
return self._convert_folder(all_folders[0], output_dir, output_zip)
|
|
51
|
+
|
|
52
|
+
result_path = output_dir if not output_zip else os.path.dirname(output_zip) or "."
|
|
53
|
+
for folder in all_folders:
|
|
54
|
+
folder_dir = os.path.join(output_dir, folder.name)
|
|
55
|
+
folder_zip = None
|
|
56
|
+
if output_zip:
|
|
57
|
+
base, ext = os.path.splitext(output_zip)
|
|
58
|
+
folder_zip = f"{base}_{folder.name}{ext}"
|
|
59
|
+
self._convert_folder(folder, folder_dir, folder_zip)
|
|
60
|
+
return result_path
|
|
61
|
+
|
|
62
|
+
def convert_string(self, xml_string: str, output_dir: str = "output",
|
|
63
|
+
output_zip: Optional[str] = None) -> str:
|
|
64
|
+
self.powermart = self.parser.parse_string(xml_string)
|
|
65
|
+
|
|
66
|
+
if not self.powermart.repositories:
|
|
67
|
+
raise ValueError("No repository found in XML")
|
|
68
|
+
|
|
69
|
+
all_folders = []
|
|
70
|
+
for repo in self.powermart.repositories:
|
|
71
|
+
all_folders.extend(repo.folders)
|
|
72
|
+
|
|
73
|
+
if not all_folders:
|
|
74
|
+
raise ValueError("No folder found in XML")
|
|
75
|
+
|
|
76
|
+
if len(all_folders) == 1:
|
|
77
|
+
return self._convert_folder(all_folders[0], output_dir, output_zip)
|
|
78
|
+
|
|
79
|
+
result_path = output_dir if not output_zip else os.path.dirname(output_zip) or "."
|
|
80
|
+
for folder in all_folders:
|
|
81
|
+
folder_dir = os.path.join(output_dir, folder.name)
|
|
82
|
+
folder_zip = None
|
|
83
|
+
if output_zip:
|
|
84
|
+
base, ext = os.path.splitext(output_zip)
|
|
85
|
+
folder_zip = f"{base}_{folder.name}{ext}"
|
|
86
|
+
self._convert_folder(folder, folder_dir, folder_zip)
|
|
87
|
+
return result_path
|
|
88
|
+
|
|
89
|
+
def _convert_folder(self, folder: FolderDef, output_dir: str,
|
|
90
|
+
output_zip: Optional[str] = None) -> str:
|
|
91
|
+
files = {}
|
|
92
|
+
|
|
93
|
+
files["helper_functions.py"] = generate_helper_functions(folder, self.data_lib)
|
|
94
|
+
|
|
95
|
+
for i, mapping in enumerate(folder.mappings, 1):
|
|
96
|
+
code = generate_mapping_code(mapping, folder, self.data_lib, i)
|
|
97
|
+
files[f"mapping_{i}.py"] = code
|
|
98
|
+
|
|
99
|
+
files["workflow.py"] = generate_workflow_code(folder)
|
|
100
|
+
|
|
101
|
+
files["config.yml"] = generate_config(folder, self.data_lib)
|
|
102
|
+
|
|
103
|
+
files["all_sql_queries.sql"] = generate_sql_file(folder)
|
|
104
|
+
|
|
105
|
+
files["error_log.txt"] = generate_error_log(
|
|
106
|
+
folder,
|
|
107
|
+
parser_errors=self.parser.errors,
|
|
108
|
+
parser_warnings=self.parser.warnings,
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
if output_zip:
|
|
112
|
+
return self._write_zip(files, output_zip)
|
|
113
|
+
else:
|
|
114
|
+
return self._write_files(files, output_dir)
|
|
115
|
+
|
|
116
|
+
def _write_files(self, files: dict, output_dir: str) -> str:
|
|
117
|
+
os.makedirs(output_dir, exist_ok=True)
|
|
118
|
+
for filename, content in files.items():
|
|
119
|
+
filepath = os.path.join(output_dir, filename)
|
|
120
|
+
with open(filepath, "w", encoding="utf-8") as f:
|
|
121
|
+
f.write(content)
|
|
122
|
+
return output_dir
|
|
123
|
+
|
|
124
|
+
def _write_zip(self, files: dict, zip_path: str) -> str:
|
|
125
|
+
os.makedirs(os.path.dirname(zip_path) or ".", exist_ok=True)
|
|
126
|
+
with zipfile.ZipFile(zip_path, "w", zipfile.ZIP_DEFLATED) as zf:
|
|
127
|
+
for filename, content in files.items():
|
|
128
|
+
zf.writestr(filename, content)
|
|
129
|
+
return zip_path
|
|
130
|
+
|
|
131
|
+
def _powermart_to_dict(self, pm: PowermartDef) -> dict:
|
|
132
|
+
result = {
|
|
133
|
+
"creation_date": pm.creation_date,
|
|
134
|
+
"repository_version": pm.repository_version,
|
|
135
|
+
"repositories": [],
|
|
136
|
+
}
|
|
137
|
+
for repo in pm.repositories:
|
|
138
|
+
repo_dict = {
|
|
139
|
+
"name": repo.name,
|
|
140
|
+
"version": repo.version,
|
|
141
|
+
"codepage": repo.codepage,
|
|
142
|
+
"database_type": repo.database_type,
|
|
143
|
+
"folders": [],
|
|
144
|
+
}
|
|
145
|
+
for folder in repo.folders:
|
|
146
|
+
folder_dict = self._folder_to_dict(folder)
|
|
147
|
+
repo_dict["folders"].append(folder_dict)
|
|
148
|
+
result["repositories"].append(repo_dict)
|
|
149
|
+
return result
|
|
150
|
+
|
|
151
|
+
def _folder_to_dict(self, folder: FolderDef) -> dict:
|
|
152
|
+
return {
|
|
153
|
+
"name": folder.name,
|
|
154
|
+
"owner": folder.owner,
|
|
155
|
+
"description": folder.description,
|
|
156
|
+
"sources": [self._source_to_dict(s) for s in folder.sources],
|
|
157
|
+
"targets": [self._target_to_dict(t) for t in folder.targets],
|
|
158
|
+
"mappings": [self._mapping_to_dict(m) for m in folder.mappings],
|
|
159
|
+
"sessions": [{"name": s.name, "mapping_name": s.mapping_name} for s in folder.sessions],
|
|
160
|
+
"workflows": [self._workflow_to_dict(w) for w in folder.workflows],
|
|
161
|
+
"tasks": [{"name": t.name, "type": t.type} for t in folder.tasks],
|
|
162
|
+
"configs": [{"name": c.name} for c in folder.configs],
|
|
163
|
+
"schedulers": [{"name": s.name} for s in folder.schedulers],
|
|
164
|
+
"shortcuts": [{"name": s.name, "reference": s.reference_name} for s in folder.shortcuts],
|
|
165
|
+
"mapplets": [{"name": m.name} for m in folder.mapplets],
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
def _source_to_dict(self, src):
|
|
169
|
+
return {
|
|
170
|
+
"name": src.name,
|
|
171
|
+
"database_type": src.database_type,
|
|
172
|
+
"db_name": src.db_name,
|
|
173
|
+
"owner_name": src.owner_name,
|
|
174
|
+
"fields": [
|
|
175
|
+
{
|
|
176
|
+
"name": f.name,
|
|
177
|
+
"datatype": f.datatype,
|
|
178
|
+
"precision": f.precision,
|
|
179
|
+
"scale": f.scale,
|
|
180
|
+
"nullable": f.nullable,
|
|
181
|
+
"keytype": f.keytype,
|
|
182
|
+
}
|
|
183
|
+
for f in src.fields
|
|
184
|
+
],
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
def _target_to_dict(self, tgt):
|
|
188
|
+
return {
|
|
189
|
+
"name": tgt.name,
|
|
190
|
+
"database_type": tgt.database_type,
|
|
191
|
+
"fields": [
|
|
192
|
+
{
|
|
193
|
+
"name": f.name,
|
|
194
|
+
"datatype": f.datatype,
|
|
195
|
+
"precision": f.precision,
|
|
196
|
+
"scale": f.scale,
|
|
197
|
+
"nullable": f.nullable,
|
|
198
|
+
"keytype": f.keytype,
|
|
199
|
+
}
|
|
200
|
+
for f in tgt.fields
|
|
201
|
+
],
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
def _mapping_to_dict(self, mapping):
|
|
205
|
+
return {
|
|
206
|
+
"name": mapping.name,
|
|
207
|
+
"description": mapping.description,
|
|
208
|
+
"is_valid": mapping.is_valid,
|
|
209
|
+
"transformations": [
|
|
210
|
+
{
|
|
211
|
+
"name": tx.name,
|
|
212
|
+
"type": tx.type,
|
|
213
|
+
"fields": [
|
|
214
|
+
{
|
|
215
|
+
"name": f.name,
|
|
216
|
+
"datatype": f.datatype,
|
|
217
|
+
"expression": f.expression,
|
|
218
|
+
"porttype": f.porttype,
|
|
219
|
+
}
|
|
220
|
+
for f in tx.fields
|
|
221
|
+
],
|
|
222
|
+
"attributes": [
|
|
223
|
+
{"name": a.name, "value": a.value}
|
|
224
|
+
for a in tx.attributes
|
|
225
|
+
],
|
|
226
|
+
}
|
|
227
|
+
for tx in mapping.transformations
|
|
228
|
+
],
|
|
229
|
+
"connectors": [
|
|
230
|
+
{
|
|
231
|
+
"from_field": c.from_field,
|
|
232
|
+
"from_instance": c.from_instance,
|
|
233
|
+
"to_field": c.to_field,
|
|
234
|
+
"to_instance": c.to_instance,
|
|
235
|
+
}
|
|
236
|
+
for c in mapping.connectors
|
|
237
|
+
],
|
|
238
|
+
"instances": [
|
|
239
|
+
{
|
|
240
|
+
"name": i.name,
|
|
241
|
+
"type": i.type,
|
|
242
|
+
"transformation_name": i.transformation_name,
|
|
243
|
+
}
|
|
244
|
+
for i in mapping.instances
|
|
245
|
+
],
|
|
246
|
+
"variables": [
|
|
247
|
+
{
|
|
248
|
+
"name": v.name,
|
|
249
|
+
"datatype": v.datatype,
|
|
250
|
+
"default_value": v.default_value,
|
|
251
|
+
}
|
|
252
|
+
for v in mapping.variables
|
|
253
|
+
],
|
|
254
|
+
}
|
|
255
|
+
|
|
256
|
+
def _workflow_to_dict(self, wf):
|
|
257
|
+
return {
|
|
258
|
+
"name": wf.name,
|
|
259
|
+
"description": wf.description,
|
|
260
|
+
"is_valid": wf.is_valid,
|
|
261
|
+
"task_instances": [
|
|
262
|
+
{
|
|
263
|
+
"name": t.name,
|
|
264
|
+
"task_name": t.task_name,
|
|
265
|
+
"task_type": t.task_type,
|
|
266
|
+
}
|
|
267
|
+
for t in wf.task_instances
|
|
268
|
+
],
|
|
269
|
+
"links": [
|
|
270
|
+
{
|
|
271
|
+
"from": l.from_instance,
|
|
272
|
+
"to": l.to_instance,
|
|
273
|
+
"condition": l.condition,
|
|
274
|
+
}
|
|
275
|
+
for l in wf.links
|
|
276
|
+
],
|
|
277
|
+
"variables": [
|
|
278
|
+
{
|
|
279
|
+
"name": v.name,
|
|
280
|
+
"datatype": v.datatype,
|
|
281
|
+
"default_value": v.default_value,
|
|
282
|
+
}
|
|
283
|
+
for v in wf.variables
|
|
284
|
+
],
|
|
285
|
+
}
|
|
File without changes
|
|
@@ -0,0 +1,159 @@
|
|
|
1
|
+
import yaml
|
|
2
|
+
from typing import List, Dict
|
|
3
|
+
from informatica_python.models import FolderDef, SessionDef
|
|
4
|
+
from informatica_python.utils.datatype_map import get_db_type
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def generate_config(folder: FolderDef, data_lib: str = "pandas") -> str:
|
|
8
|
+
config = {
|
|
9
|
+
"workflow": {
|
|
10
|
+
"name": "",
|
|
11
|
+
"folder": folder.name,
|
|
12
|
+
},
|
|
13
|
+
"data_library": data_lib,
|
|
14
|
+
"connections": {},
|
|
15
|
+
"sources": {},
|
|
16
|
+
"targets": {},
|
|
17
|
+
"variables": {},
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
if folder.workflows:
|
|
21
|
+
config["workflow"]["name"] = folder.workflows[0].name
|
|
22
|
+
|
|
23
|
+
connection_names = set()
|
|
24
|
+
|
|
25
|
+
for src in folder.sources:
|
|
26
|
+
src_config = {
|
|
27
|
+
"database_type": src.database_type or "unknown",
|
|
28
|
+
"db_name": src.db_name or "",
|
|
29
|
+
"owner": src.owner_name or "dbo",
|
|
30
|
+
"fields": [],
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
for fld in src.fields:
|
|
34
|
+
src_config["fields"].append({
|
|
35
|
+
"name": fld.name,
|
|
36
|
+
"datatype": fld.datatype,
|
|
37
|
+
"precision": fld.precision,
|
|
38
|
+
"scale": fld.scale,
|
|
39
|
+
"nullable": fld.nullable,
|
|
40
|
+
})
|
|
41
|
+
|
|
42
|
+
config["sources"][src.name] = src_config
|
|
43
|
+
|
|
44
|
+
if src.database_type and src.database_type != "Flat File":
|
|
45
|
+
conn_key = src.db_name or "default"
|
|
46
|
+
if conn_key not in connection_names:
|
|
47
|
+
connection_names.add(conn_key)
|
|
48
|
+
config["connections"][conn_key] = {
|
|
49
|
+
"type": get_db_type(src.database_type),
|
|
50
|
+
"host": "${DB_HOST}",
|
|
51
|
+
"port": _get_default_port(src.database_type),
|
|
52
|
+
"database": src.db_name or "${DB_NAME}",
|
|
53
|
+
"username": "${DB_USER}",
|
|
54
|
+
"password": "${DB_PASSWORD}",
|
|
55
|
+
"schema": src.owner_name or "dbo",
|
|
56
|
+
}
|
|
57
|
+
else:
|
|
58
|
+
config["sources"][src.name]["file_path"] = f"${{INPUT_DIR}}/{src.name}"
|
|
59
|
+
config["sources"][src.name]["delimiter"] = ","
|
|
60
|
+
config["sources"][src.name]["header"] = True
|
|
61
|
+
config["sources"][src.name]["encoding"] = "utf-8"
|
|
62
|
+
|
|
63
|
+
for tgt in folder.targets:
|
|
64
|
+
tgt_config = {
|
|
65
|
+
"database_type": tgt.database_type or "unknown",
|
|
66
|
+
"fields": [],
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
for fld in tgt.fields:
|
|
70
|
+
tgt_config["fields"].append({
|
|
71
|
+
"name": fld.name,
|
|
72
|
+
"datatype": fld.datatype,
|
|
73
|
+
"precision": fld.precision,
|
|
74
|
+
"scale": fld.scale,
|
|
75
|
+
"nullable": fld.nullable,
|
|
76
|
+
"keytype": fld.keytype,
|
|
77
|
+
})
|
|
78
|
+
|
|
79
|
+
config["targets"][tgt.name] = tgt_config
|
|
80
|
+
|
|
81
|
+
if tgt.database_type and tgt.database_type != "Flat File":
|
|
82
|
+
conn_key = "target"
|
|
83
|
+
if conn_key not in connection_names:
|
|
84
|
+
connection_names.add(conn_key)
|
|
85
|
+
config["connections"][conn_key] = {
|
|
86
|
+
"type": get_db_type(tgt.database_type),
|
|
87
|
+
"host": "${DB_HOST}",
|
|
88
|
+
"port": _get_default_port(tgt.database_type),
|
|
89
|
+
"database": "${TARGET_DB_NAME}",
|
|
90
|
+
"username": "${DB_USER}",
|
|
91
|
+
"password": "${DB_PASSWORD}",
|
|
92
|
+
"schema": "dbo",
|
|
93
|
+
}
|
|
94
|
+
else:
|
|
95
|
+
config["targets"][tgt.name]["file_path"] = f"${{OUTPUT_DIR}}/{tgt.name}"
|
|
96
|
+
|
|
97
|
+
_extract_session_connections(folder, config, connection_names)
|
|
98
|
+
|
|
99
|
+
for mapping in folder.mappings:
|
|
100
|
+
for var in mapping.variables:
|
|
101
|
+
var_name = var.name.replace("$$", "")
|
|
102
|
+
config["variables"][var_name] = {
|
|
103
|
+
"datatype": var.datatype,
|
|
104
|
+
"default_value": var.default_value or "",
|
|
105
|
+
"is_persistent": var.is_persistent,
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
for wf in folder.workflows:
|
|
109
|
+
for var in wf.variables:
|
|
110
|
+
var_name = var.name.replace("$$", "")
|
|
111
|
+
config["variables"][var_name] = {
|
|
112
|
+
"datatype": var.datatype,
|
|
113
|
+
"default_value": var.default_value or "",
|
|
114
|
+
"is_persistent": var.is_persistent,
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
if not config["connections"]:
|
|
118
|
+
config["connections"]["default"] = {
|
|
119
|
+
"type": "mssql",
|
|
120
|
+
"host": "${DB_HOST}",
|
|
121
|
+
"port": 1433,
|
|
122
|
+
"database": "${DB_NAME}",
|
|
123
|
+
"username": "${DB_USER}",
|
|
124
|
+
"password": "${DB_PASSWORD}",
|
|
125
|
+
"schema": "dbo",
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
return yaml.dump(config, default_flow_style=False, sort_keys=False, allow_unicode=True)
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
def _get_default_port(database_type):
|
|
132
|
+
port_map = {
|
|
133
|
+
"Microsoft SQL Server": 1433,
|
|
134
|
+
"Oracle": 1521,
|
|
135
|
+
"Sybase": 5000,
|
|
136
|
+
"DB2": 50000,
|
|
137
|
+
"Teradata": 1025,
|
|
138
|
+
"Informix": 9088,
|
|
139
|
+
}
|
|
140
|
+
return port_map.get(database_type, 1433)
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
def _extract_session_connections(folder, config, connection_names):
|
|
144
|
+
for session in folder.sessions:
|
|
145
|
+
for sti in session.transform_instances:
|
|
146
|
+
for conn_ref in sti.connections:
|
|
147
|
+
conn_name = conn_ref.connection_name or conn_ref.variable or "default"
|
|
148
|
+
if conn_name and conn_name not in connection_names:
|
|
149
|
+
connection_names.add(conn_name)
|
|
150
|
+
config["connections"][conn_name] = {
|
|
151
|
+
"type": conn_ref.connection_type or "relational",
|
|
152
|
+
"connection_name": conn_ref.connection_name,
|
|
153
|
+
"connection_subtype": conn_ref.connection_subtype,
|
|
154
|
+
"host": "${DB_HOST}",
|
|
155
|
+
"port": 1433,
|
|
156
|
+
"database": "${DB_NAME}",
|
|
157
|
+
"username": "${DB_USER}",
|
|
158
|
+
"password": "${DB_PASSWORD}",
|
|
159
|
+
}
|
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
from datetime import datetime
|
|
2
|
+
from informatica_python.models import FolderDef
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
def generate_error_log(folder: FolderDef, parser_errors=None, parser_warnings=None) -> str:
|
|
6
|
+
lines = []
|
|
7
|
+
lines.append("=" * 70)
|
|
8
|
+
lines.append(f"Informatica-Python Conversion Log")
|
|
9
|
+
lines.append(f"Generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
|
|
10
|
+
lines.append(f"Folder: {folder.name}")
|
|
11
|
+
lines.append("=" * 70)
|
|
12
|
+
lines.append("")
|
|
13
|
+
|
|
14
|
+
lines.append(f"Sources found: {len(folder.sources)}")
|
|
15
|
+
for src in folder.sources:
|
|
16
|
+
lines.append(f" - {src.name} ({src.database_type}, {len(src.fields)} fields)")
|
|
17
|
+
lines.append("")
|
|
18
|
+
|
|
19
|
+
lines.append(f"Targets found: {len(folder.targets)}")
|
|
20
|
+
for tgt in folder.targets:
|
|
21
|
+
lines.append(f" - {tgt.name} ({tgt.database_type}, {len(tgt.fields)} fields)")
|
|
22
|
+
lines.append("")
|
|
23
|
+
|
|
24
|
+
lines.append(f"Mappings found: {len(folder.mappings)}")
|
|
25
|
+
for mapping in folder.mappings:
|
|
26
|
+
lines.append(f" - {mapping.name} (valid={mapping.is_valid})")
|
|
27
|
+
lines.append(f" Transformations: {len(mapping.transformations)}")
|
|
28
|
+
tx_types = {}
|
|
29
|
+
for tx in mapping.transformations:
|
|
30
|
+
tx_types[tx.type] = tx_types.get(tx.type, 0) + 1
|
|
31
|
+
for tx_type, count in sorted(tx_types.items()):
|
|
32
|
+
lines.append(f" {tx_type}: {count}")
|
|
33
|
+
lines.append(f" Connectors: {len(mapping.connectors)}")
|
|
34
|
+
lines.append(f" Instances: {len(mapping.instances)}")
|
|
35
|
+
lines.append(f" Variables: {len(mapping.variables)}")
|
|
36
|
+
lines.append("")
|
|
37
|
+
|
|
38
|
+
lines.append(f"Sessions found: {len(folder.sessions)}")
|
|
39
|
+
for session in folder.sessions:
|
|
40
|
+
lines.append(f" - {session.name} (mapping: {session.mapping_name})")
|
|
41
|
+
lines.append("")
|
|
42
|
+
|
|
43
|
+
lines.append(f"Workflows found: {len(folder.workflows)}")
|
|
44
|
+
for wf in folder.workflows:
|
|
45
|
+
lines.append(f" - {wf.name} (valid={wf.is_valid})")
|
|
46
|
+
lines.append(f" Task Instances: {len(wf.task_instances)}")
|
|
47
|
+
lines.append(f" Links: {len(wf.links)}")
|
|
48
|
+
lines.append(f" Variables: {len(wf.variables)}")
|
|
49
|
+
lines.append("")
|
|
50
|
+
|
|
51
|
+
if folder.mapplets:
|
|
52
|
+
lines.append(f"Mapplets found: {len(folder.mapplets)}")
|
|
53
|
+
for m in folder.mapplets:
|
|
54
|
+
lines.append(f" - {m.name}")
|
|
55
|
+
lines.append("")
|
|
56
|
+
|
|
57
|
+
if folder.shortcuts:
|
|
58
|
+
lines.append(f"Shortcuts found: {len(folder.shortcuts)}")
|
|
59
|
+
for s in folder.shortcuts:
|
|
60
|
+
lines.append(f" - {s.name} -> {s.reference_name}")
|
|
61
|
+
lines.append("")
|
|
62
|
+
|
|
63
|
+
if folder.configs:
|
|
64
|
+
lines.append(f"Configs found: {len(folder.configs)}")
|
|
65
|
+
for c in folder.configs:
|
|
66
|
+
lines.append(f" - {c.name}")
|
|
67
|
+
lines.append("")
|
|
68
|
+
|
|
69
|
+
if folder.schedulers:
|
|
70
|
+
lines.append(f"Schedulers found: {len(folder.schedulers)}")
|
|
71
|
+
for s in folder.schedulers:
|
|
72
|
+
lines.append(f" - {s.name}")
|
|
73
|
+
lines.append("")
|
|
74
|
+
|
|
75
|
+
lines.append("-" * 70)
|
|
76
|
+
lines.append("WARNINGS AND DETECTIONS")
|
|
77
|
+
lines.append("-" * 70)
|
|
78
|
+
lines.append("")
|
|
79
|
+
|
|
80
|
+
if parser_errors:
|
|
81
|
+
for err in parser_errors:
|
|
82
|
+
lines.append(f"[ERROR] {err}")
|
|
83
|
+
lines.append("")
|
|
84
|
+
|
|
85
|
+
if parser_warnings:
|
|
86
|
+
for warn in parser_warnings:
|
|
87
|
+
lines.append(f"[WARNING] {warn}")
|
|
88
|
+
lines.append("")
|
|
89
|
+
|
|
90
|
+
for mapping in folder.mappings:
|
|
91
|
+
for tx in mapping.transformations:
|
|
92
|
+
if tx.type in ("Custom Transformation", "Java", "Stored Procedure"):
|
|
93
|
+
lines.append(f"[WARNING] Mapping '{mapping.name}': Transformation '{tx.name}' is type '{tx.type}' - manual review needed")
|
|
94
|
+
|
|
95
|
+
if tx.type == "Lookup Procedure":
|
|
96
|
+
has_sql_override = False
|
|
97
|
+
for attr in tx.attributes:
|
|
98
|
+
if attr.name == "Lookup Sql Override" and attr.value:
|
|
99
|
+
has_sql_override = True
|
|
100
|
+
if has_sql_override:
|
|
101
|
+
lines.append(f"[INFO] Mapping '{mapping.name}': Lookup '{tx.name}' has SQL override")
|
|
102
|
+
|
|
103
|
+
if tx.type == "Source Qualifier":
|
|
104
|
+
for attr in tx.attributes:
|
|
105
|
+
if attr.name == "Sql Query" and attr.value:
|
|
106
|
+
lines.append(f"[INFO] Mapping '{mapping.name}': Source Qualifier '{tx.name}' has SQL override")
|
|
107
|
+
|
|
108
|
+
for fld in tx.fields:
|
|
109
|
+
if fld.expression:
|
|
110
|
+
expr_upper = fld.expression.upper()
|
|
111
|
+
if "ERROR(" in expr_upper or "ABORT(" in expr_upper:
|
|
112
|
+
lines.append(f"[WARNING] Mapping '{mapping.name}': Field '{fld.name}' in '{tx.name}' contains ERROR/ABORT function")
|
|
113
|
+
if ":LKP." in fld.expression:
|
|
114
|
+
lines.append(f"[INFO] Mapping '{mapping.name}': Field '{fld.name}' uses inline lookup")
|
|
115
|
+
|
|
116
|
+
lines.append("")
|
|
117
|
+
lines.append("-" * 70)
|
|
118
|
+
lines.append("CONVERSION SUMMARY")
|
|
119
|
+
lines.append("-" * 70)
|
|
120
|
+
lines.append("")
|
|
121
|
+
|
|
122
|
+
total_transforms = sum(len(m.transformations) for m in folder.mappings)
|
|
123
|
+
unsupported = 0
|
|
124
|
+
for mapping in folder.mappings:
|
|
125
|
+
for tx in mapping.transformations:
|
|
126
|
+
if tx.type in ("Custom Transformation", "Java", "Stored Procedure",
|
|
127
|
+
"External Procedure", "HTTP Transformation",
|
|
128
|
+
"Web Service Consumer"):
|
|
129
|
+
unsupported += 1
|
|
130
|
+
|
|
131
|
+
supported = total_transforms - unsupported
|
|
132
|
+
pct = (supported / total_transforms * 100) if total_transforms > 0 else 100
|
|
133
|
+
|
|
134
|
+
lines.append(f"Total transformations: {total_transforms}")
|
|
135
|
+
lines.append(f"Supported/converted: {supported}")
|
|
136
|
+
lines.append(f"Needs manual review: {unsupported}")
|
|
137
|
+
lines.append(f"Conversion coverage: {pct:.1f}%")
|
|
138
|
+
lines.append("")
|
|
139
|
+
|
|
140
|
+
return "\n".join(lines)
|