informatica-python 1.8.2__tar.gz → 1.9.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. {informatica_python-1.8.2 → informatica_python-1.9.1}/PKG-INFO +6 -2
  2. {informatica_python-1.8.2 → informatica_python-1.9.1}/README.md +5 -1
  3. {informatica_python-1.8.2 → informatica_python-1.9.1}/informatica_python/__init__.py +1 -1
  4. {informatica_python-1.8.2 → informatica_python-1.9.1}/informatica_python/converter.py +10 -1
  5. {informatica_python-1.8.2 → informatica_python-1.9.1}/informatica_python/generators/mapping_gen.py +34 -24
  6. {informatica_python-1.8.2 → informatica_python-1.9.1}/informatica_python/generators/workflow_gen.py +2 -2
  7. {informatica_python-1.8.2 → informatica_python-1.9.1}/informatica_python.egg-info/PKG-INFO +6 -2
  8. {informatica_python-1.8.2 → informatica_python-1.9.1}/pyproject.toml +1 -1
  9. {informatica_python-1.8.2 → informatica_python-1.9.1}/tests/test_converter.py +6 -1
  10. {informatica_python-1.8.2 → informatica_python-1.9.1}/tests/test_integration.py +79 -4
  11. {informatica_python-1.8.2 → informatica_python-1.9.1}/LICENSE +0 -0
  12. {informatica_python-1.8.2 → informatica_python-1.9.1}/informatica_python/cli.py +0 -0
  13. {informatica_python-1.8.2 → informatica_python-1.9.1}/informatica_python/generators/__init__.py +0 -0
  14. {informatica_python-1.8.2 → informatica_python-1.9.1}/informatica_python/generators/config_gen.py +0 -0
  15. {informatica_python-1.8.2 → informatica_python-1.9.1}/informatica_python/generators/error_log_gen.py +0 -0
  16. {informatica_python-1.8.2 → informatica_python-1.9.1}/informatica_python/generators/helper_gen.py +0 -0
  17. {informatica_python-1.8.2 → informatica_python-1.9.1}/informatica_python/generators/sql_gen.py +0 -0
  18. {informatica_python-1.8.2 → informatica_python-1.9.1}/informatica_python/models.py +0 -0
  19. {informatica_python-1.8.2 → informatica_python-1.9.1}/informatica_python/parser.py +0 -0
  20. {informatica_python-1.8.2 → informatica_python-1.9.1}/informatica_python/utils/__init__.py +0 -0
  21. {informatica_python-1.8.2 → informatica_python-1.9.1}/informatica_python/utils/datatype_map.py +0 -0
  22. {informatica_python-1.8.2 → informatica_python-1.9.1}/informatica_python/utils/expression_converter.py +0 -0
  23. {informatica_python-1.8.2 → informatica_python-1.9.1}/informatica_python/utils/lib_adapters.py +0 -0
  24. {informatica_python-1.8.2 → informatica_python-1.9.1}/informatica_python/utils/sql_dialect.py +0 -0
  25. {informatica_python-1.8.2 → informatica_python-1.9.1}/informatica_python.egg-info/SOURCES.txt +0 -0
  26. {informatica_python-1.8.2 → informatica_python-1.9.1}/informatica_python.egg-info/dependency_links.txt +0 -0
  27. {informatica_python-1.8.2 → informatica_python-1.9.1}/informatica_python.egg-info/entry_points.txt +0 -0
  28. {informatica_python-1.8.2 → informatica_python-1.9.1}/informatica_python.egg-info/requires.txt +0 -0
  29. {informatica_python-1.8.2 → informatica_python-1.9.1}/informatica_python.egg-info/top_level.txt +0 -0
  30. {informatica_python-1.8.2 → informatica_python-1.9.1}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: informatica-python
3
- Version: 1.8.2
3
+ Version: 1.9.1
4
4
  Summary: Convert Informatica PowerCenter workflow XML to Python/PySpark code
5
5
  Author: Nick
6
6
  License: MIT
@@ -97,7 +97,7 @@ converter.convert_to_files("workflow_export.xml", "output_dir", data_lib="polars
97
97
  | File | Description |
98
98
  |------|-------------|
99
99
  | `helper_functions.py` | Database/file I/O helpers, Informatica expression equivalents (80+ functions), window/analytic functions, stored procedure execution, state persistence |
100
- | `mapping_N.py` | One per mapping — transformation logic with row-count logging, source reads, target writes, inline documentation |
100
+ | `mapping_{name}.py` | One per mapping, named after the real Informatica mapping name — transformation logic with row-count logging, source reads, target writes, inline documentation |
101
101
  | `workflow.py` | Task orchestration with topological ordering, decision branching, worklet calls, and error handling |
102
102
  | `config.yml` | Connection configs, source/target metadata, runtime parameters |
103
103
  | `all_sql_queries.sql` | All SQL extracted from Source Qualifiers, Lookups, SQL transforms (with ANSI-translated variants) |
@@ -304,6 +304,10 @@ Converts Informatica expressions to Python equivalents:
304
304
 
305
305
  ## Changelog
306
306
 
307
+ ### v1.9.x (Phase 8)
308
+ - Mapping output files now use real mapping names (e.g., `mapping_m_customer_load.py`) instead of generic numeric indices (`mapping_1.py`)
309
+ - Workflow imports automatically match the named mapping files
310
+
307
311
  ### v1.8.x (Phase 7)
308
312
  - Row-count logging at every pipeline step (source reads, transforms, target writes)
309
313
  - Backend-safe logging (try/except wrapped for Dask/lazy backends)
@@ -70,7 +70,7 @@ converter.convert_to_files("workflow_export.xml", "output_dir", data_lib="polars
70
70
  | File | Description |
71
71
  |------|-------------|
72
72
  | `helper_functions.py` | Database/file I/O helpers, Informatica expression equivalents (80+ functions), window/analytic functions, stored procedure execution, state persistence |
73
- | `mapping_N.py` | One per mapping — transformation logic with row-count logging, source reads, target writes, inline documentation |
73
+ | `mapping_{name}.py` | One per mapping, named after the real Informatica mapping name — transformation logic with row-count logging, source reads, target writes, inline documentation |
74
74
  | `workflow.py` | Task orchestration with topological ordering, decision branching, worklet calls, and error handling |
75
75
  | `config.yml` | Connection configs, source/target metadata, runtime parameters |
76
76
  | `all_sql_queries.sql` | All SQL extracted from Source Qualifiers, Lookups, SQL transforms (with ANSI-translated variants) |
@@ -277,6 +277,10 @@ Converts Informatica expressions to Python equivalents:
277
277
 
278
278
  ## Changelog
279
279
 
280
+ ### v1.9.x (Phase 8)
281
+ - Mapping output files now use real mapping names (e.g., `mapping_m_customer_load.py`) instead of generic numeric indices (`mapping_1.py`)
282
+ - Workflow imports automatically match the named mapping files
283
+
280
284
  ### v1.8.x (Phase 7)
281
285
  - Row-count logging at every pipeline step (source reads, transforms, target writes)
282
286
  - Backend-safe logging (try/except wrapped for Dask/lazy backends)
@@ -7,7 +7,7 @@ Licensed under the MIT License.
7
7
 
8
8
  from informatica_python.converter import InformaticaConverter
9
9
 
10
- __version__ = "1.8.2"
10
+ __version__ = "1.9.1"
11
11
  __author__ = "Nick"
12
12
  __license__ = "MIT"
13
13
  __all__ = ["InformaticaConverter"]
@@ -19,6 +19,14 @@ class InformaticaConverter:
19
19
  self.parser = InformaticaParser()
20
20
  self.powermart = None
21
21
 
22
+ @staticmethod
23
+ def _safe_name(name):
24
+ import re
25
+ safe = re.sub(r'[^a-zA-Z0-9_]', '_', name)
26
+ if safe and safe[0].isdigit():
27
+ safe = '_' + safe
28
+ return safe.lower()
29
+
22
30
  def parse_file(self, file_path: str) -> dict:
23
31
  self.powermart = self.parser.parse_file(file_path)
24
32
  return self.to_json()
@@ -102,7 +110,8 @@ class InformaticaConverter:
102
110
 
103
111
  for i, mapping in enumerate(folder.mappings, 1):
104
112
  code = generate_mapping_code(mapping, folder, self.data_lib, i, validate_casts=validate_casts)
105
- files[f"mapping_{i}.py"] = code
113
+ safe_name = self._safe_name(mapping.name)
114
+ files[f"mapping_{safe_name}.py"] = code
106
115
 
107
116
  files["workflow.py"] = generate_workflow_code(folder)
108
117
 
@@ -601,8 +601,19 @@ def _generate_source_qualifier(lines, sq, source_map, source_dfs, connector_grap
601
601
  lines.append(f" execute_sql(config, '''{pre_sql}''')")
602
602
  lines.append("")
603
603
 
604
- if sql_override:
605
- src_name = next(iter(connected_sources)) if connected_sources else "source"
604
+ if not connected_sources:
605
+ sq_src_name = sq.name[3:] if sq.name.upper().startswith("SQ_") else sq.name
606
+ if sql_override:
607
+ lines.append(f" sql_{sq_safe} = '''")
608
+ for sql_line in sql_override.strip().split("\n"):
609
+ lines.append(f" {sql_line}")
610
+ lines.append(f" '''")
611
+ lines.append(f" df_{sq_safe} = read_from_db(config, sql_{sq_safe}, 'default')")
612
+ else:
613
+ lines.append(f" df_{sq_safe} = read_file(config.get('sources', {{}}).get('{sq_src_name}', {{}}).get('file_path', '{sq_src_name}'),")
614
+ lines.append(f" config.get('sources', {{}}).get('{sq_src_name}', {{}}))")
615
+ elif sql_override:
616
+ src_name = next(iter(connected_sources))
606
617
  src_def = source_map.get(src_name, SourceDef(name=src_name))
607
618
  sq_override = (session_overrides or {}).get(sq.name, {}) or (session_overrides or {}).get(src_name, {})
608
619
  conn_name = sq_override.get("connection_name") or (_safe_name(src_def.db_name) if src_def.db_name else "default")
@@ -612,36 +623,35 @@ def _generate_source_qualifier(lines, sq, source_map, source_dfs, connector_grap
612
623
  lines.append(f" {sql_line}")
613
624
  lines.append(f" '''")
614
625
  lines.append(f" df_{sq_safe} = read_from_db(config, sql_{sq_safe}, '{conn_name}')")
626
+ elif len(connected_sources) == 1:
627
+ src_name = next(iter(connected_sources))
628
+ src_def = source_map.get(src_name, SourceDef(name=src_name))
629
+ safe_src = _safe_name(src_name)
630
+ src_override = (session_overrides or {}).get(sq.name, {}) or (session_overrides or {}).get(src_name, {})
631
+ if src_def.database_type and src_def.database_type != "Flat File":
632
+ conn_name = src_override.get("connection_name") or (_safe_name(src_def.db_name) if src_def.db_name else "default")
633
+ schema = src_def.owner_name or "dbo"
634
+ cols = ", ".join(f.name for f in src_def.fields) if src_def.fields else "*"
635
+ lines.append(f" df_{sq_safe} = read_from_db(config, 'SELECT {cols} FROM {schema}.{src_def.name}', '{conn_name}')")
636
+ elif src_def.flatfile:
637
+ _emit_flatfile_read(lines, sq_safe, src_def)
638
+ else:
639
+ lines.append(f" df_{sq_safe} = read_file(config.get('sources', {{}}).get('{src_def.name}', {{}}).get('file_path', '{src_def.name}'),")
640
+ lines.append(f" config.get('sources', {{}}).get('{src_def.name}', {{}}))")
615
641
  else:
616
- if len(connected_sources) == 1:
617
- src_name = next(iter(connected_sources))
642
+ for src_name in connected_sources:
618
643
  src_def = source_map.get(src_name, SourceDef(name=src_name))
619
644
  safe_src = _safe_name(src_name)
620
- src_override = (session_overrides or {}).get(sq.name, {}) or (session_overrides or {}).get(src_name, {})
621
645
  if src_def.database_type and src_def.database_type != "Flat File":
622
- conn_name = src_override.get("connection_name") or (_safe_name(src_def.db_name) if src_def.db_name else "default")
646
+ conn_name = _safe_name(src_def.db_name) if src_def.db_name else "default"
623
647
  schema = src_def.owner_name or "dbo"
624
- cols = ", ".join(f.name for f in src_def.fields) if src_def.fields else "*"
625
- lines.append(f" df_{sq_safe} = read_from_db(config, 'SELECT {cols} FROM {schema}.{src_def.name}', '{conn_name}')")
648
+ lines.append(f" df_{safe_src} = read_from_db(config, 'SELECT * FROM {schema}.{src_def.name}', '{conn_name}')")
626
649
  elif src_def.flatfile:
627
- _emit_flatfile_read(lines, sq_safe, src_def)
650
+ _emit_flatfile_read(lines, safe_src, src_def)
628
651
  else:
629
- lines.append(f" df_{sq_safe} = read_file(config.get('sources', {{}}).get('{src_def.name}', {{}}).get('file_path', '{src_def.name}'),")
652
+ lines.append(f" df_{safe_src} = read_file(config.get('sources', {{}}).get('{src_def.name}', {{}}).get('file_path', '{src_def.name}'),")
630
653
  lines.append(f" config.get('sources', {{}}).get('{src_def.name}', {{}}))")
631
- else:
632
- for src_name in connected_sources:
633
- src_def = source_map.get(src_name, SourceDef(name=src_name))
634
- safe_src = _safe_name(src_name)
635
- if src_def.database_type and src_def.database_type != "Flat File":
636
- conn_name = _safe_name(src_def.db_name) if src_def.db_name else "default"
637
- schema = src_def.owner_name or "dbo"
638
- lines.append(f" df_{safe_src} = read_from_db(config, 'SELECT * FROM {schema}.{src_def.name}', '{conn_name}')")
639
- elif src_def.flatfile:
640
- _emit_flatfile_read(lines, safe_src, src_def)
641
- else:
642
- lines.append(f" df_{safe_src} = read_file(config.get('sources', {{}}).get('{src_def.name}', {{}}).get('file_path', '{src_def.name}'),")
643
- lines.append(f" config.get('sources', {{}}).get('{src_def.name}', {{}}))")
644
- lines.append(f" df_{sq_safe} = df_{_safe_name(next(iter(connected_sources)))}")
654
+ lines.append(f" df_{sq_safe} = df_{_safe_name(next(iter(connected_sources)))}")
645
655
 
646
656
  source_dfs[sq.name] = f"df_{sq_safe}"
647
657
  lines.append(f" try:")
@@ -25,9 +25,9 @@ def generate_workflow_code(folder: FolderDef) -> str:
25
25
  lines.append("from helper_functions import load_config, logger, load_persistent_state, save_persistent_state, get_persistent_variable, set_persistent_variable")
26
26
  lines.append("")
27
27
 
28
- for i, mapping in enumerate(folder.mappings, 1):
28
+ for mapping in folder.mappings:
29
29
  safe_name = _safe_name(mapping.name)
30
- lines.append(f"from mapping_{i} import run_{safe_name}")
30
+ lines.append(f"from mapping_{safe_name} import run_{safe_name}")
31
31
  lines.append("")
32
32
  lines.append("")
33
33
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: informatica-python
3
- Version: 1.8.2
3
+ Version: 1.9.1
4
4
  Summary: Convert Informatica PowerCenter workflow XML to Python/PySpark code
5
5
  Author: Nick
6
6
  License: MIT
@@ -97,7 +97,7 @@ converter.convert_to_files("workflow_export.xml", "output_dir", data_lib="polars
97
97
  | File | Description |
98
98
  |------|-------------|
99
99
  | `helper_functions.py` | Database/file I/O helpers, Informatica expression equivalents (80+ functions), window/analytic functions, stored procedure execution, state persistence |
100
- | `mapping_N.py` | One per mapping — transformation logic with row-count logging, source reads, target writes, inline documentation |
100
+ | `mapping_{name}.py` | One per mapping, named after the real Informatica mapping name — transformation logic with row-count logging, source reads, target writes, inline documentation |
101
101
  | `workflow.py` | Task orchestration with topological ordering, decision branching, worklet calls, and error handling |
102
102
  | `config.yml` | Connection configs, source/target metadata, runtime parameters |
103
103
  | `all_sql_queries.sql` | All SQL extracted from Source Qualifiers, Lookups, SQL transforms (with ANSI-translated variants) |
@@ -304,6 +304,10 @@ Converts Informatica expressions to Python equivalents:
304
304
 
305
305
  ## Changelog
306
306
 
307
+ ### v1.9.x (Phase 8)
308
+ - Mapping output files now use real mapping names (e.g., `mapping_m_customer_load.py`) instead of generic numeric indices (`mapping_1.py`)
309
+ - Workflow imports automatically match the named mapping files
310
+
307
311
  ### v1.8.x (Phase 7)
308
312
  - Row-count logging at every pipeline step (source reads, transforms, target writes)
309
313
  - Backend-safe logging (try/except wrapped for Dask/lazy backends)
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "informatica-python"
7
- version = "1.8.2"
7
+ version = "1.9.1"
8
8
  description = "Convert Informatica PowerCenter workflow XML to Python/PySpark code"
9
9
  readme = "README.md"
10
10
  license = {text = "MIT"}
@@ -104,7 +104,6 @@ def test_convert_to_files():
104
104
 
105
105
  expected_files = [
106
106
  "helper_functions.py",
107
- "mapping_1.py",
108
107
  "workflow.py",
109
108
  "config.yml",
110
109
  "all_sql_queries.sql",
@@ -118,6 +117,12 @@ def test_convert_to_files():
118
117
  assert size > 0, f"{f} should not be empty"
119
118
  print(f" {f}: {size} bytes")
120
119
 
120
+ mapping_files = [f for f in os.listdir(output_dir) if f.startswith("mapping_") and f.endswith(".py")]
121
+ assert len(mapping_files) > 0, "At least one mapping file should exist"
122
+ for mf in mapping_files:
123
+ assert mf != "mapping_1.py", "Mapping files should use real mapping names, not numeric indices"
124
+ print(f" {mf}: {os.path.getsize(os.path.join(output_dir, mf))} bytes")
125
+
121
126
  print(f"PASS: test_convert_to_files")
122
127
 
123
128
 
@@ -3,6 +3,7 @@ import sys
3
3
  import csv
4
4
  import tempfile
5
5
  import shutil
6
+ import unittest
6
7
  import pytest
7
8
  from informatica_python.converter import InformaticaConverter
8
9
  from informatica_python.utils.expression_converter import (
@@ -337,7 +338,7 @@ class TestCodeGeneration:
337
338
  converter = InformaticaConverter(data_lib="pandas")
338
339
  output = converter.convert_string(MINIMAL_XML, output_dir=self.tmpdir)
339
340
 
340
- mapping_path = os.path.join(output, "mapping_1.py")
341
+ mapping_path = os.path.join(output, "mapping_m_test_expr.py")
341
342
  assert os.path.exists(mapping_path)
342
343
 
343
344
  with open(mapping_path) as f:
@@ -350,7 +351,7 @@ class TestCodeGeneration:
350
351
  converter = InformaticaConverter(data_lib="pandas")
351
352
  output = converter.convert_string(FILTER_XML, output_dir=self.tmpdir)
352
353
 
353
- mapping_path = os.path.join(output, "mapping_1.py")
354
+ mapping_path = os.path.join(output, "mapping_m_test_filter.py")
354
355
  with open(mapping_path) as f:
355
356
  code = f.read()
356
357
  assert 'Filter' in code
@@ -378,7 +379,7 @@ class TestCodeGeneration:
378
379
  output = converter.convert_string(MINIMAL_XML, output_dir=self.tmpdir)
379
380
 
380
381
  expected_files = [
381
- "helper_functions.py", "mapping_1.py", "workflow.py",
382
+ "helper_functions.py", "mapping_m_test_expr.py", "workflow.py",
382
383
  "config.yml", "all_sql_queries.sql", "error_log.txt",
383
384
  ]
384
385
  for fname in expected_files:
@@ -398,7 +399,7 @@ class TestCodeGeneration:
398
399
  converter = InformaticaConverter(data_lib="pandas")
399
400
  output = converter.convert_string(MINIMAL_XML, output_dir=self.tmpdir)
400
401
 
401
- mapping_path = os.path.join(output, "mapping_1.py")
402
+ mapping_path = os.path.join(output, "mapping_m_test_expr.py")
402
403
  with open(mapping_path) as f:
403
404
  code = f.read()
404
405
  compile(code, mapping_path, "exec")
@@ -1536,3 +1537,77 @@ class TestGeneratedCodeDocumentation:
1536
1537
  folder = FolderDef(name="TestFolder", mappings=[mapping])
1537
1538
  code = generate_mapping_code(mapping, folder, "pandas", 1)
1538
1539
  assert "..." in code
1540
+
1541
+
1542
+ class TestSourceQualifierNoSources(unittest.TestCase):
1543
+
1544
+ def test_sq_with_no_connected_sources_and_empty_source_map(self):
1545
+ from informatica_python.models import (
1546
+ FolderDef, MappingDef, TransformationDef,
1547
+ InstanceDef, ConnectorDef, FieldDef, TableAttribute,
1548
+ )
1549
+ from informatica_python.generators.mapping_gen import generate_mapping_code
1550
+ mapping = MappingDef(
1551
+ name="m_orphan_sq",
1552
+ transformations=[
1553
+ TransformationDef(name="SQ_ORPHAN", type="Source Qualifier", fields=[], attributes=[]),
1554
+ ],
1555
+ connectors=[],
1556
+ instances=[
1557
+ InstanceDef(name="SQ_ORPHAN", type="Source Qualifier", transformation_name="SQ_ORPHAN"),
1558
+ InstanceDef(name="TGT1", type="Target Definition", transformation_name="TGT1"),
1559
+ ],
1560
+ )
1561
+ folder = FolderDef(name="TestFolder", sources=[], targets=[], mappings=[mapping])
1562
+ code = generate_mapping_code(mapping, folder, "pandas", 1)
1563
+ assert "df_sq_orphan" in code
1564
+ assert "read_file" in code or "read_from_db" in code
1565
+
1566
+ def test_sq_with_no_connectors_but_sources_exist(self):
1567
+ from informatica_python.models import (
1568
+ FolderDef, MappingDef, TransformationDef,
1569
+ InstanceDef, ConnectorDef, FieldDef, TableAttribute,
1570
+ )
1571
+ from informatica_python.generators.mapping_gen import generate_mapping_code
1572
+ mapping = MappingDef(
1573
+ name="m_disconnect",
1574
+ transformations=[
1575
+ TransformationDef(name="SQ_SRC1", type="Source Qualifier", fields=[], attributes=[]),
1576
+ ],
1577
+ connectors=[],
1578
+ instances=[
1579
+ InstanceDef(name="SRC1", type="Source Definition", transformation_name="SRC1"),
1580
+ InstanceDef(name="SQ_SRC1", type="Source Qualifier", transformation_name="SQ_SRC1"),
1581
+ InstanceDef(name="TGT1", type="Target Definition", transformation_name="TGT1"),
1582
+ ],
1583
+ )
1584
+ folder = FolderDef(name="TestFolder", sources=[], targets=[], mappings=[mapping])
1585
+ code = generate_mapping_code(mapping, folder, "pandas", 1)
1586
+ assert "df_sq_src1" in code
1587
+
1588
+ def test_sq_with_sql_override_no_sources(self):
1589
+ from informatica_python.models import (
1590
+ FolderDef, MappingDef, TransformationDef,
1591
+ InstanceDef, ConnectorDef, FieldDef, TableAttribute,
1592
+ )
1593
+ from informatica_python.generators.mapping_gen import generate_mapping_code
1594
+ mapping = MappingDef(
1595
+ name="m_sql_only",
1596
+ transformations=[
1597
+ TransformationDef(
1598
+ name="SQ_SQL",
1599
+ type="Source Qualifier",
1600
+ fields=[],
1601
+ attributes=[TableAttribute(name="Sql Query", value="SELECT * FROM my_table")],
1602
+ ),
1603
+ ],
1604
+ connectors=[],
1605
+ instances=[
1606
+ InstanceDef(name="SQ_SQL", type="Source Qualifier", transformation_name="SQ_SQL"),
1607
+ InstanceDef(name="TGT1", type="Target Definition", transformation_name="TGT1"),
1608
+ ],
1609
+ )
1610
+ folder = FolderDef(name="TestFolder", sources=[], targets=[], mappings=[mapping])
1611
+ code = generate_mapping_code(mapping, folder, "pandas", 1)
1612
+ assert "df_sq_sql" in code
1613
+ assert "read_from_db" in code