informatica-python 1.0.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (26) hide show
  1. informatica_python-1.0.0/PKG-INFO +118 -0
  2. informatica_python-1.0.0/README.md +106 -0
  3. informatica_python-1.0.0/informatica_python/__init__.py +4 -0
  4. informatica_python-1.0.0/informatica_python/cli.py +83 -0
  5. informatica_python-1.0.0/informatica_python/converter.py +285 -0
  6. informatica_python-1.0.0/informatica_python/generators/__init__.py +0 -0
  7. informatica_python-1.0.0/informatica_python/generators/config_gen.py +159 -0
  8. informatica_python-1.0.0/informatica_python/generators/error_log_gen.py +140 -0
  9. informatica_python-1.0.0/informatica_python/generators/helper_gen.py +693 -0
  10. informatica_python-1.0.0/informatica_python/generators/mapping_gen.py +649 -0
  11. informatica_python-1.0.0/informatica_python/generators/sql_gen.py +132 -0
  12. informatica_python-1.0.0/informatica_python/generators/workflow_gen.py +234 -0
  13. informatica_python-1.0.0/informatica_python/models.py +281 -0
  14. informatica_python-1.0.0/informatica_python/parser.py +468 -0
  15. informatica_python-1.0.0/informatica_python/utils/__init__.py +0 -0
  16. informatica_python-1.0.0/informatica_python/utils/datatype_map.py +105 -0
  17. informatica_python-1.0.0/informatica_python/utils/expression_converter.py +128 -0
  18. informatica_python-1.0.0/informatica_python.egg-info/PKG-INFO +118 -0
  19. informatica_python-1.0.0/informatica_python.egg-info/SOURCES.txt +24 -0
  20. informatica_python-1.0.0/informatica_python.egg-info/dependency_links.txt +1 -0
  21. informatica_python-1.0.0/informatica_python.egg-info/entry_points.txt +2 -0
  22. informatica_python-1.0.0/informatica_python.egg-info/requires.txt +5 -0
  23. informatica_python-1.0.0/informatica_python.egg-info/top_level.txt +1 -0
  24. informatica_python-1.0.0/pyproject.toml +24 -0
  25. informatica_python-1.0.0/setup.cfg +4 -0
  26. informatica_python-1.0.0/tests/test_converter.py +260 -0
@@ -0,0 +1,118 @@
1
+ Metadata-Version: 2.4
2
+ Name: informatica-python
3
+ Version: 1.0.0
4
+ Summary: Convert Informatica PowerCenter workflow XML to Python/PySpark code
5
+ License: MIT
6
+ Requires-Python: >=3.8
7
+ Description-Content-Type: text/markdown
8
+ Requires-Dist: lxml>=4.9.0
9
+ Requires-Dist: pyyaml>=6.0
10
+ Provides-Extra: dev
11
+ Requires-Dist: pytest>=7.0; extra == "dev"
12
+
13
+ # informatica-python
14
+
15
+ Convert Informatica PowerCenter workflow XML files to Python/PySpark code.
16
+
17
+ ## Installation
18
+
19
+ ```bash
20
+ pip install informatica-python
21
+ ```
22
+
23
+ ## Quick Start
24
+
25
+ ### Command Line
26
+
27
+ ```bash
28
+ # Convert XML to Python files in a directory
29
+ informatica-python workflow.xml -o output_dir
30
+
31
+ # Convert XML to a zip file
32
+ informatica-python workflow.xml -z output.zip
33
+
34
+ # Use a different data library (pandas, dask, polars, vaex, modin)
35
+ informatica-python workflow.xml -o output_dir --data-lib polars
36
+
37
+ # Parse XML to JSON (no code generation)
38
+ informatica-python workflow.xml --json
39
+
40
+ # Save parsed JSON to file
41
+ informatica-python workflow.xml --json-file parsed.json
42
+ ```
43
+
44
+ ### Python API
45
+
46
+ ```python
47
+ from informatica_python import InformaticaConverter
48
+
49
+ # Convert XML to Python files
50
+ converter = InformaticaConverter(data_lib="pandas")
51
+ converter.convert("workflow.xml", output_dir="output")
52
+
53
+ # Convert to zip
54
+ converter.convert("workflow.xml", output_zip="output.zip")
55
+
56
+ # Parse XML to JSON dict
57
+ result = converter.parse_file("workflow.xml")
58
+
59
+ # Parse XML string
60
+ result = converter.parse_string(xml_string)
61
+ ```
62
+
63
+ ## Generated Output Files
64
+
65
+ | File | Description |
66
+ |------|-------------|
67
+ | `helper_functions.py` | Database/file I/O functions plus Python equivalents for 50+ Informatica expression functions |
68
+ | `mapping_N.py` | One file per mapping with full transformation logic |
69
+ | `workflow.py` | Task orchestration with topological ordering |
70
+ | `config.yml` | Connection configs, source/target metadata, variables |
71
+ | `all_sql_queries.sql` | All extracted SQL queries (source qualifiers, lookups, pre/post SQL) |
72
+ | `error_log.txt` | Conversion summary, warnings, and coverage statistics |
73
+
74
+ ## Supported Transformation Types
75
+
76
+ - Source Qualifier / Application Source Qualifier
77
+ - Expression
78
+ - Filter
79
+ - Aggregator
80
+ - Sorter
81
+ - Joiner
82
+ - Lookup Procedure
83
+ - Router
84
+ - Union
85
+ - Update Strategy
86
+ - Sequence Generator
87
+ - Normalizer
88
+ - Rank
89
+ - Stored Procedure (placeholder)
90
+ - Custom Transformation (placeholder)
91
+ - Java Transformation (placeholder)
92
+ - SQL Transformation
93
+
94
+ ## Supported Data Libraries
95
+
96
+ Choose your preferred data manipulation library with `--data-lib`:
97
+
98
+ - **pandas** (default) — Standard Python data analysis
99
+ - **dask** — Parallel computing with pandas-like API
100
+ - **polars** — Fast DataFrame library written in Rust
101
+ - **vaex** — Out-of-core DataFrames for large datasets
102
+ - **modin** — Drop-in pandas replacement with parallel execution
103
+
104
+ ## Informatica Expression Functions
105
+
106
+ The generated `helper_functions.py` includes Python equivalents for:
107
+
108
+ `IIF`, `DECODE`, `NVL`, `NVL2`, `ISNULL`, `LTRIM`, `RTRIM`, `UPPER`, `LOWER`, `SUBSTR`, `LPAD`, `RPAD`, `TO_CHAR`, `TO_DATE`, `TO_INTEGER`, `TO_BIGINT`, `TO_FLOAT`, `TO_DECIMAL`, `REPLACECHR`, `REPLACESTR`, `INSTR`, `LENGTH`, `CONCAT`, `REG_EXTRACT`, `REG_MATCH`, `REG_REPLACE`, `GET_DATE_PART`, `ADD_TO_DATE`, `IS_DATE`, `IS_NUMBER`, `IS_SPACES`, `SYSDATE`, `ERROR`, `ABORT`, and more.
109
+
110
+ ## Requirements
111
+
112
+ - Python >= 3.8
113
+ - lxml >= 4.9.0
114
+ - PyYAML >= 6.0
115
+
116
+ ## License
117
+
118
+ MIT
@@ -0,0 +1,106 @@
1
+ # informatica-python
2
+
3
+ Convert Informatica PowerCenter workflow XML files to Python/PySpark code.
4
+
5
+ ## Installation
6
+
7
+ ```bash
8
+ pip install informatica-python
9
+ ```
10
+
11
+ ## Quick Start
12
+
13
+ ### Command Line
14
+
15
+ ```bash
16
+ # Convert XML to Python files in a directory
17
+ informatica-python workflow.xml -o output_dir
18
+
19
+ # Convert XML to a zip file
20
+ informatica-python workflow.xml -z output.zip
21
+
22
+ # Use a different data library (pandas, dask, polars, vaex, modin)
23
+ informatica-python workflow.xml -o output_dir --data-lib polars
24
+
25
+ # Parse XML to JSON (no code generation)
26
+ informatica-python workflow.xml --json
27
+
28
+ # Save parsed JSON to file
29
+ informatica-python workflow.xml --json-file parsed.json
30
+ ```
31
+
32
+ ### Python API
33
+
34
+ ```python
35
+ from informatica_python import InformaticaConverter
36
+
37
+ # Convert XML to Python files
38
+ converter = InformaticaConverter(data_lib="pandas")
39
+ converter.convert("workflow.xml", output_dir="output")
40
+
41
+ # Convert to zip
42
+ converter.convert("workflow.xml", output_zip="output.zip")
43
+
44
+ # Parse XML to JSON dict
45
+ result = converter.parse_file("workflow.xml")
46
+
47
+ # Parse XML string
48
+ result = converter.parse_string(xml_string)
49
+ ```
50
+
51
+ ## Generated Output Files
52
+
53
+ | File | Description |
54
+ |------|-------------|
55
+ | `helper_functions.py` | Database/file I/O functions plus Python equivalents for 50+ Informatica expression functions |
56
+ | `mapping_N.py` | One file per mapping with full transformation logic |
57
+ | `workflow.py` | Task orchestration with topological ordering |
58
+ | `config.yml` | Connection configs, source/target metadata, variables |
59
+ | `all_sql_queries.sql` | All extracted SQL queries (source qualifiers, lookups, pre/post SQL) |
60
+ | `error_log.txt` | Conversion summary, warnings, and coverage statistics |
61
+
62
+ ## Supported Transformation Types
63
+
64
+ - Source Qualifier / Application Source Qualifier
65
+ - Expression
66
+ - Filter
67
+ - Aggregator
68
+ - Sorter
69
+ - Joiner
70
+ - Lookup Procedure
71
+ - Router
72
+ - Union
73
+ - Update Strategy
74
+ - Sequence Generator
75
+ - Normalizer
76
+ - Rank
77
+ - Stored Procedure (placeholder)
78
+ - Custom Transformation (placeholder)
79
+ - Java Transformation (placeholder)
80
+ - SQL Transformation
81
+
82
+ ## Supported Data Libraries
83
+
84
+ Choose your preferred data manipulation library with `--data-lib`:
85
+
86
+ - **pandas** (default) — Standard Python data analysis
87
+ - **dask** — Parallel computing with pandas-like API
88
+ - **polars** — Fast DataFrame library written in Rust
89
+ - **vaex** — Out-of-core DataFrames for large datasets
90
+ - **modin** — Drop-in pandas replacement with parallel execution
91
+
92
+ ## Informatica Expression Functions
93
+
94
+ The generated `helper_functions.py` includes Python equivalents for:
95
+
96
+ `IIF`, `DECODE`, `NVL`, `NVL2`, `ISNULL`, `LTRIM`, `RTRIM`, `UPPER`, `LOWER`, `SUBSTR`, `LPAD`, `RPAD`, `TO_CHAR`, `TO_DATE`, `TO_INTEGER`, `TO_BIGINT`, `TO_FLOAT`, `TO_DECIMAL`, `REPLACECHR`, `REPLACESTR`, `INSTR`, `LENGTH`, `CONCAT`, `REG_EXTRACT`, `REG_MATCH`, `REG_REPLACE`, `GET_DATE_PART`, `ADD_TO_DATE`, `IS_DATE`, `IS_NUMBER`, `IS_SPACES`, `SYSDATE`, `ERROR`, `ABORT`, and more.
97
+
98
+ ## Requirements
99
+
100
+ - Python >= 3.8
101
+ - lxml >= 4.9.0
102
+ - PyYAML >= 6.0
103
+
104
+ ## License
105
+
106
+ MIT
@@ -0,0 +1,4 @@
1
+ from informatica_python.converter import InformaticaConverter
2
+
3
+ __version__ = "1.0.0"
4
+ __all__ = ["InformaticaConverter"]
@@ -0,0 +1,83 @@
1
+ import argparse
2
+ import sys
3
+ import json
4
+ from informatica_python.converter import InformaticaConverter
5
+
6
+
7
+ def main():
8
+ parser = argparse.ArgumentParser(
9
+ prog="informatica-python",
10
+ description="Convert Informatica PowerCenter workflow XML to Python/PySpark code",
11
+ )
12
+
13
+ parser.add_argument(
14
+ "input_file",
15
+ help="Path to Informatica workflow XML file",
16
+ )
17
+ parser.add_argument(
18
+ "-o", "--output",
19
+ default="output",
20
+ help="Output directory for generated files (default: output)",
21
+ )
22
+ parser.add_argument(
23
+ "-z", "--zip",
24
+ default=None,
25
+ help="Output as zip file (provide zip file path)",
26
+ )
27
+ parser.add_argument(
28
+ "--data-lib",
29
+ choices=["pandas", "dask", "polars", "vaex", "modin"],
30
+ default="pandas",
31
+ help="Data manipulation library to use (default: pandas)",
32
+ )
33
+ parser.add_argument(
34
+ "--json",
35
+ action="store_true",
36
+ dest="output_json",
37
+ help="Output parsed XML as JSON (no code generation)",
38
+ )
39
+ parser.add_argument(
40
+ "--json-file",
41
+ default=None,
42
+ help="Save parsed JSON to a file",
43
+ )
44
+
45
+ args = parser.parse_args()
46
+
47
+ converter = InformaticaConverter(data_lib=args.data_lib)
48
+
49
+ try:
50
+ if args.output_json or args.json_file:
51
+ result = converter.parse_file(args.input_file)
52
+ json_str = json.dumps(result, indent=2, ensure_ascii=False)
53
+ if args.json_file:
54
+ with open(args.json_file, "w", encoding="utf-8") as f:
55
+ f.write(json_str)
56
+ print(f"JSON saved to: {args.json_file}")
57
+ else:
58
+ print(json_str)
59
+ else:
60
+ output_path = converter.convert(
61
+ args.input_file,
62
+ output_dir=args.output,
63
+ output_zip=args.zip,
64
+ )
65
+ print(f"Conversion complete! Output: {output_path}")
66
+ print(f"Files generated:")
67
+ if args.zip:
68
+ import zipfile
69
+ with zipfile.ZipFile(output_path, "r") as zf:
70
+ for name in zf.namelist():
71
+ print(f" - {name}")
72
+ else:
73
+ import os
74
+ for f in sorted(os.listdir(output_path)):
75
+ print(f" - {f}")
76
+
77
+ except Exception as e:
78
+ print(f"Error: {e}", file=sys.stderr)
79
+ sys.exit(1)
80
+
81
+
82
+ if __name__ == "__main__":
83
+ main()
@@ -0,0 +1,285 @@
1
+ import os
2
+ import json
3
+ import zipfile
4
+ import tempfile
5
+ from typing import Optional
6
+ from informatica_python.parser import InformaticaParser
7
+ from informatica_python.models import PowermartDef, FolderDef
8
+ from informatica_python.generators.helper_gen import generate_helper_functions
9
+ from informatica_python.generators.mapping_gen import generate_mapping_code
10
+ from informatica_python.generators.workflow_gen import generate_workflow_code
11
+ from informatica_python.generators.config_gen import generate_config
12
+ from informatica_python.generators.sql_gen import generate_sql_file
13
+ from informatica_python.generators.error_log_gen import generate_error_log
14
+
15
+
16
+ class InformaticaConverter:
17
+ def __init__(self, data_lib: str = "pandas"):
18
+ self.data_lib = data_lib
19
+ self.parser = InformaticaParser()
20
+ self.powermart = None
21
+
22
+ def parse_file(self, file_path: str) -> dict:
23
+ self.powermart = self.parser.parse_file(file_path)
24
+ return self.to_json()
25
+
26
+ def parse_string(self, xml_string: str) -> dict:
27
+ self.powermart = self.parser.parse_string(xml_string)
28
+ return self.to_json()
29
+
30
+ def to_json(self) -> dict:
31
+ if not self.powermart:
32
+ return {}
33
+ return self._powermart_to_dict(self.powermart)
34
+
35
+ def convert(self, file_path: str, output_dir: str = "output",
36
+ output_zip: Optional[str] = None) -> str:
37
+ self.powermart = self.parser.parse_file(file_path)
38
+
39
+ if not self.powermart.repositories:
40
+ raise ValueError("No repository found in XML file")
41
+
42
+ all_folders = []
43
+ for repo in self.powermart.repositories:
44
+ all_folders.extend(repo.folders)
45
+
46
+ if not all_folders:
47
+ raise ValueError("No folder found in XML file")
48
+
49
+ if len(all_folders) == 1:
50
+ return self._convert_folder(all_folders[0], output_dir, output_zip)
51
+
52
+ result_path = output_dir if not output_zip else os.path.dirname(output_zip) or "."
53
+ for folder in all_folders:
54
+ folder_dir = os.path.join(output_dir, folder.name)
55
+ folder_zip = None
56
+ if output_zip:
57
+ base, ext = os.path.splitext(output_zip)
58
+ folder_zip = f"{base}_{folder.name}{ext}"
59
+ self._convert_folder(folder, folder_dir, folder_zip)
60
+ return result_path
61
+
62
+ def convert_string(self, xml_string: str, output_dir: str = "output",
63
+ output_zip: Optional[str] = None) -> str:
64
+ self.powermart = self.parser.parse_string(xml_string)
65
+
66
+ if not self.powermart.repositories:
67
+ raise ValueError("No repository found in XML")
68
+
69
+ all_folders = []
70
+ for repo in self.powermart.repositories:
71
+ all_folders.extend(repo.folders)
72
+
73
+ if not all_folders:
74
+ raise ValueError("No folder found in XML")
75
+
76
+ if len(all_folders) == 1:
77
+ return self._convert_folder(all_folders[0], output_dir, output_zip)
78
+
79
+ result_path = output_dir if not output_zip else os.path.dirname(output_zip) or "."
80
+ for folder in all_folders:
81
+ folder_dir = os.path.join(output_dir, folder.name)
82
+ folder_zip = None
83
+ if output_zip:
84
+ base, ext = os.path.splitext(output_zip)
85
+ folder_zip = f"{base}_{folder.name}{ext}"
86
+ self._convert_folder(folder, folder_dir, folder_zip)
87
+ return result_path
88
+
89
+ def _convert_folder(self, folder: FolderDef, output_dir: str,
90
+ output_zip: Optional[str] = None) -> str:
91
+ files = {}
92
+
93
+ files["helper_functions.py"] = generate_helper_functions(folder, self.data_lib)
94
+
95
+ for i, mapping in enumerate(folder.mappings, 1):
96
+ code = generate_mapping_code(mapping, folder, self.data_lib, i)
97
+ files[f"mapping_{i}.py"] = code
98
+
99
+ files["workflow.py"] = generate_workflow_code(folder)
100
+
101
+ files["config.yml"] = generate_config(folder, self.data_lib)
102
+
103
+ files["all_sql_queries.sql"] = generate_sql_file(folder)
104
+
105
+ files["error_log.txt"] = generate_error_log(
106
+ folder,
107
+ parser_errors=self.parser.errors,
108
+ parser_warnings=self.parser.warnings,
109
+ )
110
+
111
+ if output_zip:
112
+ return self._write_zip(files, output_zip)
113
+ else:
114
+ return self._write_files(files, output_dir)
115
+
116
+ def _write_files(self, files: dict, output_dir: str) -> str:
117
+ os.makedirs(output_dir, exist_ok=True)
118
+ for filename, content in files.items():
119
+ filepath = os.path.join(output_dir, filename)
120
+ with open(filepath, "w", encoding="utf-8") as f:
121
+ f.write(content)
122
+ return output_dir
123
+
124
+ def _write_zip(self, files: dict, zip_path: str) -> str:
125
+ os.makedirs(os.path.dirname(zip_path) or ".", exist_ok=True)
126
+ with zipfile.ZipFile(zip_path, "w", zipfile.ZIP_DEFLATED) as zf:
127
+ for filename, content in files.items():
128
+ zf.writestr(filename, content)
129
+ return zip_path
130
+
131
+ def _powermart_to_dict(self, pm: PowermartDef) -> dict:
132
+ result = {
133
+ "creation_date": pm.creation_date,
134
+ "repository_version": pm.repository_version,
135
+ "repositories": [],
136
+ }
137
+ for repo in pm.repositories:
138
+ repo_dict = {
139
+ "name": repo.name,
140
+ "version": repo.version,
141
+ "codepage": repo.codepage,
142
+ "database_type": repo.database_type,
143
+ "folders": [],
144
+ }
145
+ for folder in repo.folders:
146
+ folder_dict = self._folder_to_dict(folder)
147
+ repo_dict["folders"].append(folder_dict)
148
+ result["repositories"].append(repo_dict)
149
+ return result
150
+
151
+ def _folder_to_dict(self, folder: FolderDef) -> dict:
152
+ return {
153
+ "name": folder.name,
154
+ "owner": folder.owner,
155
+ "description": folder.description,
156
+ "sources": [self._source_to_dict(s) for s in folder.sources],
157
+ "targets": [self._target_to_dict(t) for t in folder.targets],
158
+ "mappings": [self._mapping_to_dict(m) for m in folder.mappings],
159
+ "sessions": [{"name": s.name, "mapping_name": s.mapping_name} for s in folder.sessions],
160
+ "workflows": [self._workflow_to_dict(w) for w in folder.workflows],
161
+ "tasks": [{"name": t.name, "type": t.type} for t in folder.tasks],
162
+ "configs": [{"name": c.name} for c in folder.configs],
163
+ "schedulers": [{"name": s.name} for s in folder.schedulers],
164
+ "shortcuts": [{"name": s.name, "reference": s.reference_name} for s in folder.shortcuts],
165
+ "mapplets": [{"name": m.name} for m in folder.mapplets],
166
+ }
167
+
168
+ def _source_to_dict(self, src):
169
+ return {
170
+ "name": src.name,
171
+ "database_type": src.database_type,
172
+ "db_name": src.db_name,
173
+ "owner_name": src.owner_name,
174
+ "fields": [
175
+ {
176
+ "name": f.name,
177
+ "datatype": f.datatype,
178
+ "precision": f.precision,
179
+ "scale": f.scale,
180
+ "nullable": f.nullable,
181
+ "keytype": f.keytype,
182
+ }
183
+ for f in src.fields
184
+ ],
185
+ }
186
+
187
+ def _target_to_dict(self, tgt):
188
+ return {
189
+ "name": tgt.name,
190
+ "database_type": tgt.database_type,
191
+ "fields": [
192
+ {
193
+ "name": f.name,
194
+ "datatype": f.datatype,
195
+ "precision": f.precision,
196
+ "scale": f.scale,
197
+ "nullable": f.nullable,
198
+ "keytype": f.keytype,
199
+ }
200
+ for f in tgt.fields
201
+ ],
202
+ }
203
+
204
+ def _mapping_to_dict(self, mapping):
205
+ return {
206
+ "name": mapping.name,
207
+ "description": mapping.description,
208
+ "is_valid": mapping.is_valid,
209
+ "transformations": [
210
+ {
211
+ "name": tx.name,
212
+ "type": tx.type,
213
+ "fields": [
214
+ {
215
+ "name": f.name,
216
+ "datatype": f.datatype,
217
+ "expression": f.expression,
218
+ "porttype": f.porttype,
219
+ }
220
+ for f in tx.fields
221
+ ],
222
+ "attributes": [
223
+ {"name": a.name, "value": a.value}
224
+ for a in tx.attributes
225
+ ],
226
+ }
227
+ for tx in mapping.transformations
228
+ ],
229
+ "connectors": [
230
+ {
231
+ "from_field": c.from_field,
232
+ "from_instance": c.from_instance,
233
+ "to_field": c.to_field,
234
+ "to_instance": c.to_instance,
235
+ }
236
+ for c in mapping.connectors
237
+ ],
238
+ "instances": [
239
+ {
240
+ "name": i.name,
241
+ "type": i.type,
242
+ "transformation_name": i.transformation_name,
243
+ }
244
+ for i in mapping.instances
245
+ ],
246
+ "variables": [
247
+ {
248
+ "name": v.name,
249
+ "datatype": v.datatype,
250
+ "default_value": v.default_value,
251
+ }
252
+ for v in mapping.variables
253
+ ],
254
+ }
255
+
256
+ def _workflow_to_dict(self, wf):
257
+ return {
258
+ "name": wf.name,
259
+ "description": wf.description,
260
+ "is_valid": wf.is_valid,
261
+ "task_instances": [
262
+ {
263
+ "name": t.name,
264
+ "task_name": t.task_name,
265
+ "task_type": t.task_type,
266
+ }
267
+ for t in wf.task_instances
268
+ ],
269
+ "links": [
270
+ {
271
+ "from": l.from_instance,
272
+ "to": l.to_instance,
273
+ "condition": l.condition,
274
+ }
275
+ for l in wf.links
276
+ ],
277
+ "variables": [
278
+ {
279
+ "name": v.name,
280
+ "datatype": v.datatype,
281
+ "default_value": v.default_value,
282
+ }
283
+ for v in wf.variables
284
+ ],
285
+ }