neo4j-etl-lib 0.0.2__tar.gz → 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. neo4j_etl_lib-0.1.0/PKG-INFO +54 -0
  2. neo4j_etl_lib-0.1.0/README.md +14 -0
  3. {neo4j_etl_lib-0.0.2 → neo4j_etl_lib-0.1.0}/pyproject.toml +2 -1
  4. {neo4j_etl_lib-0.0.2 → neo4j_etl_lib-0.1.0}/src/etl_lib/__init__.py +1 -1
  5. {neo4j_etl_lib-0.0.2 → neo4j_etl_lib-0.1.0}/src/etl_lib/cli/run_tools.py +2 -2
  6. {neo4j_etl_lib-0.0.2 → neo4j_etl_lib-0.1.0}/src/etl_lib/core/BatchProcessor.py +1 -2
  7. {neo4j_etl_lib-0.0.2 → neo4j_etl_lib-0.1.0}/src/etl_lib/core/ETLContext.py +31 -7
  8. {neo4j_etl_lib-0.0.2 → neo4j_etl_lib-0.1.0}/src/etl_lib/core/ProgressReporter.py +4 -4
  9. {neo4j_etl_lib-0.0.2 → neo4j_etl_lib-0.1.0}/src/etl_lib/core/Task.py +0 -3
  10. {neo4j_etl_lib-0.0.2 → neo4j_etl_lib-0.1.0}/src/etl_lib/core/ValidationBatchProcessor.py +1 -1
  11. neo4j_etl_lib-0.1.0/src/etl_lib/core/utils.py +28 -0
  12. neo4j_etl_lib-0.1.0/src/etl_lib/data_sink/CSVBatchSink.py +57 -0
  13. neo4j_etl_lib-0.0.2/src/etl_lib/data_sink/CypherBatchProcessor.py → neo4j_etl_lib-0.1.0/src/etl_lib/data_sink/CypherBatchSink.py +5 -5
  14. neo4j_etl_lib-0.0.2/src/etl_lib/data_source/CSVBatchProcessor.py → neo4j_etl_lib-0.1.0/src/etl_lib/data_source/CSVBatchSource.py +16 -24
  15. neo4j_etl_lib-0.1.0/src/etl_lib/data_source/CypherBatchSource.py +47 -0
  16. neo4j_etl_lib-0.1.0/src/etl_lib/task/CreateReportingConstraintsTask.py +17 -0
  17. {neo4j_etl_lib-0.0.2 → neo4j_etl_lib-0.1.0}/src/etl_lib/task/ExecuteCypherTask.py +5 -0
  18. neo4j_etl_lib-0.1.0/src/etl_lib/task/data_loading/CSVLoad2Neo4jTask.py +57 -0
  19. neo4j_etl_lib-0.1.0/src/etl_lib/test_utils/__init__.py +0 -0
  20. neo4j_etl_lib-0.1.0/src/etl_lib/test_utils/utils.py +153 -0
  21. neo4j_etl_lib-0.0.2/.env.sample +0 -7
  22. neo4j_etl_lib-0.0.2/.gitignore +0 -9
  23. neo4j_etl_lib-0.0.2/PKG-INFO +0 -126
  24. neo4j_etl_lib-0.0.2/README.md +0 -88
  25. neo4j_etl_lib-0.0.2/dashboard.json +0 -190
  26. neo4j_etl_lib-0.0.2/docs/Makefile +0 -23
  27. neo4j_etl_lib-0.0.2/docs/README.md +0 -16
  28. neo4j_etl_lib-0.0.2/docs/_static/images/schema.json +0 -510
  29. neo4j_etl_lib-0.0.2/docs/_static/images/schema.png +0 -0
  30. neo4j_etl_lib-0.0.2/docs/_static/pydata-custom.css +0 -10
  31. neo4j_etl_lib-0.0.2/docs/_static/readthedocs-custom.css +0 -30
  32. neo4j_etl_lib-0.0.2/docs/_templates/custom-class-template.rst +0 -34
  33. neo4j_etl_lib-0.0.2/docs/_templates/custom-module-template.rst +0 -66
  34. neo4j_etl_lib-0.0.2/docs/api.rst +0 -12
  35. neo4j_etl_lib-0.0.2/docs/conf.py +0 -95
  36. neo4j_etl_lib-0.0.2/docs/index.rst +0 -24
  37. neo4j_etl_lib-0.0.2/pytest.ini +0 -10
  38. neo4j_etl_lib-0.0.2/src/etl_lib/core/utils.py +0 -7
  39. neo4j_etl_lib-0.0.2/src/etl_lib/task/data_loading/CSVLoad2Neo4jTask.py +0 -41
  40. {neo4j_etl_lib-0.0.2 → neo4j_etl_lib-0.1.0}/LICENSE +0 -0
  41. {neo4j_etl_lib-0.0.2 → neo4j_etl_lib-0.1.0}/src/etl_lib/cli/__init__.py +0 -0
  42. {neo4j_etl_lib-0.0.2 → neo4j_etl_lib-0.1.0}/src/etl_lib/core/ClosedLoopBatchProcessor.py +0 -0
  43. {neo4j_etl_lib-0.0.2 → neo4j_etl_lib-0.1.0}/src/etl_lib/core/__init__.py +0 -0
  44. {neo4j_etl_lib-0.0.2 → neo4j_etl_lib-0.1.0}/src/etl_lib/data_sink/__init__.py +0 -0
  45. {neo4j_etl_lib-0.0.2 → neo4j_etl_lib-0.1.0}/src/etl_lib/data_source/__init__.py +0 -0
  46. {neo4j_etl_lib-0.0.2 → neo4j_etl_lib-0.1.0}/src/etl_lib/task/GDSTask.py +0 -0
  47. {neo4j_etl_lib-0.0.2 → neo4j_etl_lib-0.1.0}/src/etl_lib/task/__init__.py +0 -0
  48. {neo4j_etl_lib-0.0.2 → neo4j_etl_lib-0.1.0}/src/etl_lib/task/data_loading/__init__.py +0 -0
@@ -0,0 +1,54 @@
1
+ Metadata-Version: 2.4
2
+ Name: neo4j-etl-lib
3
+ Version: 0.1.0
4
+ Summary: Building blocks for ETL pipelines.
5
+ Keywords: etl,graph,database
6
+ Author-email: Bert Radke <bert.radke@pm.me>
7
+ Requires-Python: >=3.10
8
+ Description-Content-Type: text/markdown
9
+ Classifier: License :: OSI Approved :: Apache Software License
10
+ Classifier: Intended Audience :: Developers
11
+ Classifier: Programming Language :: Python
12
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
13
+ Classifier: Programming Language :: Python :: 3
14
+ Classifier: Topic :: Database
15
+ Classifier: Development Status :: 4 - Beta
16
+ License-File: LICENSE
17
+ Requires-Dist: pydantic>=2.10.5; python_version >= '3.8'
18
+ Requires-Dist: neo4j>=5.27.0; python_version >= '3.7'
19
+ Requires-Dist: python-dotenv>=1.0.1; python_version >= '3.8'
20
+ Requires-Dist: tabulate>=0.9.0; python_version >= '3.7'
21
+ Requires-Dist: click>=8.1.8; python_version >= '3.7'
22
+ Requires-Dist: pytest>=8.3.0 ; extra == "dev" and ( python_version >= '3.8')
23
+ Requires-Dist: testcontainers[neo4j]==4.9.0 ; extra == "dev" and ( python_version >= '3.9' and python_version < '4.0')
24
+ Requires-Dist: pytest-cov ; extra == "dev"
25
+ Requires-Dist: bumpver ; extra == "dev"
26
+ Requires-Dist: isort ; extra == "dev"
27
+ Requires-Dist: pip-tools ; extra == "dev"
28
+ Requires-Dist: sphinx ; extra == "dev"
29
+ Requires-Dist: sphinx-rtd-theme ; extra == "dev"
30
+ Requires-Dist: pydata-sphinx-theme ; extra == "dev"
31
+ Requires-Dist: sphinx-autodoc-typehints ; extra == "dev"
32
+ Requires-Dist: sphinxcontrib-napoleon ; extra == "dev"
33
+ Requires-Dist: sphinx-autoapi ; extra == "dev"
34
+ Requires-Dist: graphdatascience>=1.13 ; extra == "gds" and ( python_version >= '3.9')
35
+ Project-URL: Documentation, https://neo-technology-field.github.io/python-etl-lib/index.html
36
+ Project-URL: Home, https://github.com/neo-technology-field/python-etl-lib
37
+ Provides-Extra: dev
38
+ Provides-Extra: gds
39
+
40
+ # Neo4j ETL Toolbox
41
+
42
+ A Python library of building blocks to assemble etl pipelines.
43
+
44
+ Complete documentation can be found on https://neo-technology-field.github.io/python-etl-lib/index.html
45
+
46
+ See https://github.com/neo-technology-field/python-etl-lib/tree/main/examples/gtfs for an example project.
47
+
48
+
49
+ The library can be installed via
50
+
51
+ ```bash
52
+ pip install neo4j-etl-lib
53
+ ```
54
+
@@ -0,0 +1,14 @@
1
+ # Neo4j ETL Toolbox
2
+
3
+ A Python library of building blocks to assemble etl pipelines.
4
+
5
+ Complete documentation can be found on https://neo-technology-field.github.io/python-etl-lib/index.html
6
+
7
+ See https://github.com/neo-technology-field/python-etl-lib/tree/main/examples/gtfs for an example project.
8
+
9
+
10
+ The library can be installed via
11
+
12
+ ```bash
13
+ pip install neo4j-etl-lib
14
+ ```
@@ -34,7 +34,8 @@ dev = [
34
34
  "pytest>=8.3.0; python_version >= '3.8'",
35
35
  "testcontainers[neo4j]==4.9.0; python_version >= '3.9' and python_version < '4.0'",
36
36
  "pytest-cov", "bumpver", "isort", "pip-tools",
37
- "sphinx", "sphinx-rtd-theme", "pydata-sphinx-theme", "sphinx-autodoc-typehints", "sphinxcontrib-napoleon"
37
+ "sphinx", "sphinx-rtd-theme", "pydata-sphinx-theme", "sphinx-autodoc-typehints",
38
+ "sphinxcontrib-napoleon", "sphinx-autoapi"
38
39
  ]
39
40
  gds = ["graphdatascience>=1.13; python_version >= '3.9'"]
40
41
 
@@ -1,4 +1,4 @@
1
1
  """
2
2
  Building blocks for ETL pipelines.
3
3
  """
4
- __version__ = "0.0.2"
4
+ __version__ = "0.1.0"
@@ -98,7 +98,7 @@ def cli(ctx, neo4j_uri, neo4j_user, neo4j_password, log_file, database_name):
98
98
  @click.pass_context
99
99
  def query(ctx, number_runs):
100
100
  """
101
- Retrieve the list of the last x etl runs from the database and display them.
101
+ Retrieve the list of the last x ETL runs from the database and display them.
102
102
  """
103
103
  print(f"Listing runs in database '{ctx.obj['database_name']}'")
104
104
  with __driver(ctx) as driver:
@@ -155,7 +155,7 @@ def detail(ctx, run_id, details):
155
155
  "status": record["status"],
156
156
  "batches": record["batches"],
157
157
  "duration": __duration_from_start_end(record["startTime"], record["endTime"]),
158
- "changes": sum(record.get("stats", {}).values())
158
+ "changes": record.get("changes", 0)
159
159
  }
160
160
  for record in records
161
161
  ]
@@ -4,7 +4,6 @@ import sys
4
4
  from dataclasses import dataclass, field
5
5
  from typing import Generator
6
6
 
7
- from etl_lib.core.ETLContext import ETLContext
8
7
  from etl_lib.core.Task import Task
9
8
  from etl_lib.core.utils import merge_summery
10
9
 
@@ -53,7 +52,7 @@ class BatchProcessor:
53
52
  and returned in batches to the caller. Usage of `Generators` ensure that not all data must be loaded at once.
54
53
  """
55
54
 
56
- def __init__(self, context: ETLContext, task: Task, predecessor=None):
55
+ def __init__(self, context, task: Task = None, predecessor=None):
57
56
  """
58
57
  Constructs a new :py:class:`etl_lib.core.BatchProcessor` instance.
59
58
 
@@ -2,7 +2,7 @@ import logging
2
2
  from typing import NamedTuple, Any
3
3
 
4
4
  from graphdatascience import GraphDataScience
5
- from neo4j import Driver, GraphDatabase, WRITE_ACCESS, SummaryCounters
5
+ from neo4j import GraphDatabase, WRITE_ACCESS, SummaryCounters
6
6
 
7
7
  from etl_lib.core.ProgressReporter import get_reporter
8
8
 
@@ -20,18 +20,19 @@ def append_results(r1: QueryResult, r2: QueryResult) -> QueryResult:
20
20
 
21
21
 
22
22
  class Neo4jContext:
23
- uri: str
24
- auth: (str, str)
25
- driver: Driver
26
- database: str
23
+ """
24
+ Holds the connection to the neo4j database and provides facilities to execute queries.
25
+ """
27
26
 
28
27
  def __init__(self, env_vars: dict):
29
28
  """
30
29
  Create a new Neo4j context.
30
+
31
31
  Reads the following env_vars keys:
32
32
  - `NEO4J_URI`,
33
33
  - `NEO4J_USERNAME`,
34
34
  - `NEO4J_PASSWORD`.
35
+ - `NEO4J_DATABASE`,
35
36
  """
36
37
  self.logger = logging.getLogger(self.__class__.__name__)
37
38
  self.uri = env_vars["NEO4J_URI"]
@@ -43,6 +44,10 @@ class Neo4jContext:
43
44
  def query_database(self, session, query, **kwargs) -> QueryResult:
44
45
  """
45
46
  Executes a Cypher query on the Neo4j database.
47
+
48
+ Args:
49
+ session: Neo4j database session.
50
+ query: Cypher query either as a single query or as a list.
46
51
  """
47
52
  if isinstance(query, list):
48
53
  results = []
@@ -78,12 +83,33 @@ class Neo4jContext:
78
83
  }
79
84
 
80
85
  def session(self, database=None):
86
+ """
87
+ Create a new Neo4j session in write mode, caller is responsible to close the session.
88
+
89
+ Args:
90
+ database: name of the database to use for this session. If not provided, the database name provided during
91
+ construction will be used.
92
+
93
+ Returns:
94
+ newly created Neo4j session.
95
+
96
+ """
81
97
  if database is None:
82
98
  return self.driver.session(database=self.database, default_access_mode=WRITE_ACCESS)
83
99
  else:
84
100
  return self.driver.session(database=database, default_access_mode=WRITE_ACCESS)
85
101
 
86
102
  def gds(self, database=None) -> GraphDataScience:
103
+ """
104
+ Creates a new GraphDataScience client.
105
+
106
+ Args:
107
+ database: Name of the database to use for this dgs client.
108
+ If not provided, the database name provided during construction will be used.
109
+
110
+ Returns:
111
+ gds client.
112
+ """
87
113
  if database is None:
88
114
  return GraphDataScience.from_neo4j_driver(driver=self.driver, database=self.database)
89
115
  else:
@@ -104,8 +130,6 @@ class ETLContext:
104
130
  Will be passed to all :py:class:`etl_lib.core.Task` to provide access to environment variables and functionally
105
131
  deemed general enough that all parts of the ETL pipeline would need it.
106
132
  """
107
- neo4j: Neo4jContext
108
- __env_vars: dict
109
133
 
110
134
  def __init__(self, env_vars: dict):
111
135
  """
@@ -66,7 +66,7 @@ class ProgressReporter:
66
66
  task.success = success
67
67
  task.summery = summery
68
68
 
69
- report = f"{'\t' * task.depth}finished {task.task_name()} with success: {success}"
69
+ report = f"{'\t' * task.depth} finished {task.task_name()} in {task.end_time - task.start_time} with success: {success}"
70
70
  if error is not None:
71
71
  report += f", error: \n{error}"
72
72
  else:
@@ -197,10 +197,10 @@ def get_reporter(context) -> ProgressReporter:
197
197
  """
198
198
  Returns a ProgressReporter instance.
199
199
 
200
- If the :py:class:`ETLContext <etl_lib.core.ETLContext>` env holds the key `REPORTER_DATABASE` then
201
- a :py:class:`Neo4jProgressReporter` instance is created with the given database name.
200
+ If the :class:`ETLContext <etl_lib.core.ETLContext>` env holds the key `REPORTER_DATABASE` then
201
+ a :class:`Neo4jProgressReporter` instance is created with the given database name.
202
202
 
203
- Otherwise, a :py:class:`ProgressReporter` (no logging to database) instance will be created.
203
+ Otherwise, a :class:`ProgressReporter` (no logging to database) instance will be created.
204
204
  """
205
205
 
206
206
  db = context.env("REPORTER_DATABASE")
@@ -78,9 +78,6 @@ class Task:
78
78
  """Time when the :py:func:`~execute` has finished., `None` before."""
79
79
  self.success: bool
80
80
  """True if the task has finished successful. False otherwise, `None` before the task has finished."""
81
- self.summery: dict # TODO: still in use?
82
- """Summery statistics about the task performed, such as rows inserted, updated."""
83
- self.error: str # TODO: still in use?
84
81
  self.depth: int = 0
85
82
  """Level or depth of the task in the hierarchy. The root task is depth 0. Updated by the Reporter"""
86
83
 
@@ -47,7 +47,7 @@ class ValidationBatchProcessor(BatchProcessor):
47
47
  for row in batch.chunk:
48
48
  try:
49
49
  # Validate and transform the row
50
- validated_row = self.model(**row).model_dump()
50
+ validated_row = json.loads(self.model(**row).model_dump_json())
51
51
  valid_rows.append(validated_row)
52
52
  except ValidationError as e:
53
53
  # Collect invalid rows with errors
@@ -0,0 +1,28 @@
1
+ import logging
2
+
3
+
4
+ def merge_summery(summery_1: dict, summery_2: dict) -> dict:
5
+ """
6
+ Helper function to merge dicts. Assuming that values are numbers.
7
+ If a key exists in both dicts, then the result will contain a key with the added values.
8
+ """
9
+ return {i: summery_1.get(i, 0) + summery_2.get(i, 0)
10
+ for i in set(summery_1).union(summery_2)}
11
+
12
+
13
+ def setup_logging(log_file=None):
14
+ """
15
+ Set up logging to console and optionally to a log file.
16
+
17
+ :param log_file: Path to the log file
18
+ :type log_file: str, optional
19
+ """
20
+ handlers = [logging.StreamHandler()]
21
+ if log_file:
22
+ handlers.append(logging.FileHandler(log_file))
23
+
24
+ logging.basicConfig(
25
+ level=logging.INFO,
26
+ format='%(asctime)s - %(levelname)s - %(message)s',
27
+ handlers=handlers
28
+ )
@@ -0,0 +1,57 @@
1
+ import csv
2
+ from pathlib import Path
3
+ from typing import Generator
4
+
5
+ from etl_lib.core.ETLContext import ETLContext
6
+ from etl_lib.core.BatchProcessor import BatchProcessor, BatchResults, append_result
7
+ from etl_lib.core.Task import Task
8
+
9
+
10
+ class CSVBatchSink(BatchProcessor):
11
+ """
12
+ BatchProcessor to write batches of data to a CSV file.
13
+ """
14
+
15
+ def __init__(self, context: ETLContext, task: Task, predecessor: BatchProcessor, file_path: Path, **kwargs):
16
+ """
17
+ Constructs a new CSVBatchSink.
18
+
19
+ Args:
20
+ context: :class:`etl_lib.core.ETLContext.ETLContext` instance.
21
+ task: :class:`etl_lib.core.Task.Task` instance owning this batchProcessor.
22
+ predecessor: BatchProcessor which :func:`~get_batch` function will be called to receive batches to process.
23
+ file_path: Path to the CSV file where data will be written. If the file exists, data will be appended.
24
+ **kwargs: Additional arguments passed to `csv.DictWriter` to allow tuning the csv creation.
25
+ """
26
+ super().__init__(context, task, predecessor)
27
+ self.file_path = file_path
28
+ self.file_initialized = False
29
+ self.csv_kwargs = kwargs
30
+
31
+ def get_batch(self, batch_size: int) -> Generator[BatchResults, None, None]:
32
+ assert self.predecessor is not None
33
+
34
+ for batch_result in self.predecessor.get_batch(batch_size):
35
+ self._write_to_csv(batch_result.chunk)
36
+ yield append_result(batch_result, {"rows_written": len(batch_result.chunk)})
37
+
38
+ def _write_to_csv(self, data: list[dict]):
39
+ """
40
+ Writes a batch of data to the CSV file.
41
+
42
+ Args:
43
+ data: A list of dictionaries representing rows of data.
44
+ """
45
+ if not data:
46
+ return
47
+
48
+ fieldnames = data[0].keys()
49
+ write_header = not self.file_initialized or not self.file_path.exists()
50
+
51
+ with self.file_path.open(mode="a", newline="", encoding="utf-8") as csvfile:
52
+ writer = csv.DictWriter(csvfile, fieldnames=fieldnames, **self.csv_kwargs)
53
+ if write_header:
54
+ writer.writeheader()
55
+ writer.writerows(data)
56
+
57
+ self.file_initialized = True
@@ -5,19 +5,19 @@ from etl_lib.core.BatchProcessor import BatchProcessor, BatchResults, append_res
5
5
  from etl_lib.core.Task import Task
6
6
 
7
7
 
8
- class CypherBatchProcessor(BatchProcessor):
8
+ class CypherBatchSink(BatchProcessor):
9
9
  """
10
10
  BatchProcessor to write batches of data to a Neo4j database.
11
11
  """
12
12
 
13
13
  def __init__(self, context: ETLContext, task: Task, predecessor: BatchProcessor, query: str):
14
14
  """
15
- Constructs a new CypherBatchProcessor.
15
+ Constructs a new CypherBatchSink.
16
16
 
17
17
  Args:
18
- context: :py:class:`etl_lib.core.ETLContext.ETLContext` instance.
19
- task: :py:class:`etl_lib.core.Task.Task` instance owning this batchProcessor.
20
- predecessor: BatchProcessor which :py:func:`~get_batch` function will be called to receive batches to process.
18
+ context: :class:`etl_lib.core.ETLContext.ETLContext` instance.
19
+ task: :class:`etl_lib.core.Task.Task` instance owning this batchProcessor.
20
+ predecessor: BatchProcessor which :func:`~get_batch` function will be called to receive batches to process.
21
21
  query: Cypher to write the query to Neo4j.
22
22
  Data will be passed as `batch` parameter.
23
23
  Therefor, the query should start with a `UNWIND $batch AS row`.
@@ -4,11 +4,10 @@ from pathlib import Path
4
4
  from typing import Generator
5
5
 
6
6
  from etl_lib.core.BatchProcessor import BatchProcessor, BatchResults
7
- from etl_lib.core.ETLContext import ETLContext
8
7
  from etl_lib.core.Task import Task
9
8
 
10
9
 
11
- class CSVBatchProcessor(BatchProcessor):
10
+ class CSVBatchSource(BatchProcessor):
12
11
  """
13
12
  BatchProcessor that reads a CSV file using the `csv` package.
14
13
 
@@ -17,13 +16,13 @@ class CSVBatchProcessor(BatchProcessor):
17
16
  starting with 0.
18
17
  """
19
18
 
20
- def __init__(self, csv_file: Path, context: ETLContext, task: Task, **kwargs):
19
+ def __init__(self, csv_file: Path, context, task: Task = None, **kwargs):
21
20
  """
22
- Constructs a new CSVBatchProcessor.
21
+ Constructs a new CSVBatchSource.
23
22
 
24
23
  Args:
25
24
  csv_file: Path to the CSV file.
26
- context: :py:class:`etl_lib.core.ETLContext.ETLContext` instance.
25
+ context: :class:`etl_lib.core.ETLContext.ETLContext` instance.
27
26
  kwargs: Will be passed on to the `csv.DictReader` providing a way to customise the reading to different
28
27
  csv formats.
29
28
  """
@@ -32,10 +31,10 @@ class CSVBatchProcessor(BatchProcessor):
32
31
  self.kwargs = kwargs
33
32
 
34
33
  def get_batch(self, max_batch__size: int) -> Generator[BatchResults]:
35
- for batch_size, chunks_ in self.read_csv(self.csv_file, batch_size=max_batch__size, **self.kwargs):
34
+ for batch_size, chunks_ in self.__read_csv(self.csv_file, batch_size=max_batch__size, **self.kwargs):
36
35
  yield BatchResults(chunk=chunks_, statistics={"csv_lines_read": batch_size}, batch_size=batch_size)
37
36
 
38
- def read_csv(self, file: Path, batch_size: int, **kwargs):
37
+ def __read_csv(self, file: Path, batch_size: int, **kwargs):
39
38
  if file.suffix == ".gz":
40
39
  with gzip.open(file, "rt", encoding='utf-8-sig') as f:
41
40
  yield from self.__parse_csv(batch_size, file=f, **kwargs)
@@ -44,30 +43,23 @@ class CSVBatchProcessor(BatchProcessor):
44
43
  yield from self.__parse_csv(batch_size, file=f, **kwargs)
45
44
 
46
45
  def __parse_csv(self, batch_size, file, **kwargs):
47
- csv_file = csv.DictReader(file, **kwargs)
48
- yield from self.__split_to_batches(csv_file, batch_size)
46
+ """Read CSV in batches without loading the entire file at once."""
47
+ csv_reader = csv.DictReader(file, **kwargs)
49
48
 
50
- def __split_to_batches(self, source: [dict], batch_size):
51
- """
52
- Splits the provided source into batches.
53
-
54
- Args:
55
- source: Anything that can be loop over, ideally, this should also be a generator
56
- batch_size: desired batch size
57
-
58
- Returns:
59
- generator object to loop over the batches. Each batch is an Array.
60
- """
61
49
  cnt = 0
62
50
  batch_ = []
63
- for i in source:
64
- i["_row"] = cnt
51
+
52
+ for row in csv_reader:
53
+ row["_row"] = cnt
65
54
  cnt += 1
66
- batch_.append(self.__clean_dict(i))
55
+ batch_.append(self.__clean_dict(row))
56
+
67
57
  if len(batch_) == batch_size:
68
58
  yield len(batch_), batch_
69
59
  batch_ = []
70
- if len(batch_) > 0:
60
+
61
+ # Yield any remaining data
62
+ if batch_:
71
63
  yield len(batch_), batch_
72
64
 
73
65
  def __clean_dict(self, input_dict):
@@ -0,0 +1,47 @@
1
+ from typing import Generator
2
+
3
+ from etl_lib.core.BatchProcessor import BatchResults, BatchProcessor
4
+ from etl_lib.core.ETLContext import ETLContext
5
+ from etl_lib.core.Task import Task
6
+
7
+
8
+ class CypherBatchSource(BatchProcessor):
9
+
10
+ def __init__(self, context: ETLContext, task: Task, query: str, **kwargs):
11
+ """
12
+ Constructs a new CypherBatchSource.
13
+
14
+ Args:
15
+ context: :class:`etl_lib.core.ETLContext.ETLContext` instance.
16
+ task: :class:`etl_lib.core.Task.Task` instance owning this batchProcessor.
17
+ query: Cypher query to execute.
18
+ kwargs: Arguments passed as parameters with the query.
19
+ """
20
+ super().__init__(context, task)
21
+ self.query = query
22
+ self.kwargs = kwargs
23
+
24
+ def __read_records(self, tx, batch_size):
25
+ batch_ = []
26
+ result = tx.run(self.query, **self.kwargs)
27
+
28
+ for record in result:
29
+ batch_.append(record.data())
30
+ if len(batch_) == batch_size:
31
+ yield batch_
32
+ batch_ = []
33
+
34
+ if batch_:
35
+ yield batch_
36
+
37
+ def get_batch(self, max_batch_size: int) -> Generator[BatchResults, None, None]:
38
+ # not using managed tx on purpose. First of, we want to keep the tx open while delivering batches
39
+ # automatic retry logic would help, as we do not want to start the query again
40
+ with self.context.neo4j.session() as session:
41
+ with session.begin_transaction() as tx:
42
+ for chunk in self.__read_records(tx, max_batch_size):
43
+ yield BatchResults(
44
+ chunk=chunk,
45
+ statistics={"cypher_rows_read": len(chunk)},
46
+ batch_size=len(chunk)
47
+ )
@@ -0,0 +1,17 @@
1
+ from etl_lib.core.Task import Task, TaskReturn
2
+
3
+
4
+ class CreateReportingConstraintsTask(Task):
5
+ """Creates the constraint in the REPORTER_DATABASE database."""
6
+
7
+ def __init__(self, config):
8
+ super().__init__(config)
9
+
10
+ def run_internal(self, **kwargs) -> TaskReturn:
11
+ database = self.context.env("REPORTER_DATABASE")
12
+ assert database is not None, "REPORTER_DATABASE needs to be set in order to run this task"
13
+
14
+ with self.context.neo4j.session(database) as session:
15
+ result = self.context.neo4j.query_database(session=session,
16
+ query="CREATE CONSTRAINT IF NOT EXISTS FOR (n:ETLTask) REQUIRE n.uuid IS UNIQUE")
17
+ return TaskReturn(True, result.summery)
@@ -6,7 +6,12 @@ from etl_lib.core.utils import merge_summery
6
6
 
7
7
 
8
8
  class ExecuteCypherTask(Task):
9
+ """
10
+ Execute cypher (write) as a Task.
9
11
 
12
+ This task is for data refinement jobs, as it does not return cypher results.
13
+ Parameters can be passed as keyword arguments to the constructor and will be available as parameters inside cypher.
14
+ """
10
15
  def __init__(self, context: ETLContext):
11
16
  super().__init__(context)
12
17
  self.context = context
@@ -0,0 +1,57 @@
1
+ import abc
2
+ import logging
3
+ from pathlib import Path
4
+ from typing import Type
5
+
6
+ from pydantic import BaseModel
7
+
8
+ from etl_lib.core.ETLContext import ETLContext
9
+ from etl_lib.core.ClosedLoopBatchProcessor import ClosedLoopBatchProcessor
10
+ from etl_lib.core.Task import Task, TaskReturn
11
+ from etl_lib.core.ValidationBatchProcessor import ValidationBatchProcessor
12
+ from etl_lib.data_sink.CypherBatchSink import CypherBatchSink
13
+ from etl_lib.data_source.CSVBatchSource import CSVBatchSource
14
+
15
+
16
+ class CSVLoad2Neo4jTask(Task):
17
+ """
18
+ Loads the specified CSV file to Neo4j.
19
+
20
+ Uses BatchProcessors to read, validate and write to Neo4j.
21
+ The validation step is using pydantic, hence a Pydantic model needs to be provided.
22
+ Rows that fail the validation, will be written to en error file. The location of the error file is determined as
23
+ follows:
24
+
25
+ If the context env vars hold an entry `ETL_ERROR_PATH` the file will be place there, with the name set to name
26
+ of the provided filename appended with `.error.json`
27
+
28
+ If `ETL_ERROR_PATH` is not set, the file will be placed in the same directory as the CSV file.
29
+ """
30
+ def __init__(self, context: ETLContext, model: Type[BaseModel], file: Path, batch_size: int = 5000):
31
+ super().__init__(context)
32
+ self.batch_size = batch_size
33
+ self.model = model
34
+ self.logger = logging.getLogger(self.__class__.__name__)
35
+ self.file = file
36
+
37
+ def run_internal(self, **kwargs) -> TaskReturn:
38
+ error_path = self.context.env("ETL_ERROR_PATH")
39
+ if error_path is None:
40
+ error_file = self.file.with_suffix(".error.json")
41
+ else:
42
+ error_file = error_path / self.file.with_name(self.file.stem + ".error.json").name
43
+
44
+ csv = CSVBatchSource(self.file, self.context, self)
45
+ validator = ValidationBatchProcessor(self.context, self, csv, self.model, error_file)
46
+ cypher = CypherBatchSink(self.context, self, validator, self._query())
47
+ end = ClosedLoopBatchProcessor(self.context, self, cypher)
48
+ result = next(end.get_batch(self.batch_size))
49
+
50
+ return TaskReturn(True, result.statistics)
51
+
52
+ def __repr__(self):
53
+ return f"{self.__class__.__name__}({self.file})"
54
+
55
+ @abc.abstractmethod
56
+ def _query(self):
57
+ pass
File without changes