dfflow 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
dfflow-0.1.0/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Suresh K
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
dfflow-0.1.0/PKG-INFO ADDED
@@ -0,0 +1,184 @@
1
+ Metadata-Version: 2.4
2
+ Name: dfflow
3
+ Version: 0.1.0
4
+ Summary: Lightweight pandas DataFrame logging and flow tracker
5
+ Home-page: https://github.com/suressssz/dfflow
6
+ Author: Suresh K
7
+ Classifier: Programming Language :: Python :: 3
8
+ Classifier: License :: OSI Approved :: MIT License
9
+ Classifier: Operating System :: OS Independent
10
+ Requires-Python: >=3.7
11
+ Description-Content-Type: text/markdown
12
+ License-File: LICENSE
13
+ Requires-Dist: pandas>=1.3
14
+ Dynamic: author
15
+ Dynamic: classifier
16
+ Dynamic: description
17
+ Dynamic: description-content-type
18
+ Dynamic: home-page
19
+ Dynamic: license-file
20
+ Dynamic: requires-dist
21
+ Dynamic: requires-python
22
+ Dynamic: summary
23
+
24
+ # dfflow
25
+
26
+ Lightweight pandas DataFrame logging and flow pipeline tracker.
27
+
28
+ ---
29
+
30
+ ## 📌 What is this?
31
+
32
+ **dfflow** is a simple, easy-to-use Python package for:
33
+ - Logging pandas DataFrame transformations step by step
34
+ - Saving logs in text or JSON format
35
+ - Building reusable pipeline-style data flows
36
+
37
+ ---
38
+
39
+ ## 📌 Features
40
+
41
+ ✅ Automatic DataFrame transformation logging
42
+ ✅ Text and JSON log formats supported
43
+ ✅ Easy pipeline-style processing
44
+ ✅ Customizable logging
45
+
46
+ ---
47
+
48
+ ## 📌 Installation
49
+
50
+ ```bash
51
+ pip install dfflow
52
+ ```
53
+
54
+ ---
55
+
56
+ ## 📌 Requirements
57
+
58
+ - Python >=3.7
59
+ - pandas >=1.3
60
+
61
+ To install requirements:
62
+
63
+ ```bash
64
+ pip install -r requirements.txt
65
+ ```
66
+
67
+ ---
68
+
69
+ ## 📌 Usage Example 1
70
+
71
+ ```python
72
+ import pandas as pd
73
+ from dfflow import DFLogger, FlowPipeline, drop_nulls
74
+ import os
75
+ os.makedirs('examples', exist_ok=True)
76
+
77
+ logger = DFLogger()
78
+ pipe = FlowPipeline(logger=logger)
79
+
80
+ pipe.add_step("Drop Nulls", drop_nulls)
81
+
82
+ df = pd.DataFrame({
83
+ 'name': ['Suresh', None, 'Jerry', 'Sunil', 'Linga'],
84
+ 'age': [23, 26, None, 23, 24]
85
+ })
86
+
87
+ pipe.run(df)
88
+
89
+ ```
90
+ ---
91
+ ## 📌 Usage Example 2
92
+
93
+ ```python
94
+ import pandas as pd
95
+ from dfflow import DFLogger, FlowPipeline
96
+ from dfflow.cleaning import drop_nulls, lowercase_columns
97
+ from dfflow.profile import profile_summary
98
+
99
+ # Create a sample DataFrame
100
+ data = {
101
+ "Name": ["Suresh", None, "Sunil", "Shiva", "Linga"],
102
+ "Age": [23, 26, 23, 24, None]
103
+ }
104
+ df = pd.DataFrame(data)
105
+
106
+ print("Original DataFrame:")
107
+ print(df)
108
+ print("\n")
109
+
110
+ # Setup the DFLogger
111
+ logger = DFLogger(
112
+ log_file='dfflow_log2.txt', # Log file path
113
+ mode='text' # 'text' or 'json'
114
+ )
115
+
116
+ # Create the FlowPipeline
117
+ pipe = FlowPipeline(logger=logger)
118
+
119
+ # Add cleaning steps
120
+ pipe.add_step("Drop Nulls", drop_nulls)
121
+ pipe.add_step("Lowercase Columns", lowercase_columns)
122
+
123
+ # Run the pipeline
124
+ result_df = pipe.run(df)
125
+
126
+ print("Final Cleaned DataFrame:")
127
+ print(result_df)
128
+ print("\n")
129
+
130
+ # Profile Summary
131
+ summary = profile_summary(result_df)
132
+ print("Profile Summary:")
133
+ print(summary)
134
+
135
+ ```
136
+ ---
137
+
138
+ ## 📌 Project Structure
139
+
140
+ ```
141
+ dfflow/ # Main package folder
142
+
143
+ ├── __init__.py
144
+ ├── logger.py # Core logging class (DFLogger)
145
+ ├── decorators.py # log_step decorator
146
+ ├── flow.py # FlowPipeline logic
147
+ ├── cleaning.py # Cleaning steps (drop_nulls, lowercase_columns)
148
+ ├── profile.py # Profile utilities
149
+ ├── utils.py # Helper functions
150
+
151
+ examples/ # Usage demos
152
+ │ ├── dfflow_log.txt
153
+ │ ├── dfflow_log2.txt
154
+ │ ├── flow_demo.py
155
+ │ └── flow_demo2.py
156
+
157
+ tests/ # Unit tests
158
+ │ ├── __init__.py
159
+ │ ├── test_cleaning.py
160
+ │ ├── test_flow.py
161
+ │ ├── test_logger.py
162
+ │ └── test_profile.py
163
+
164
+ LICENSE # MIT License
165
+ README.md # Project description
166
+ requirements.txt # Dependencies
167
+ setup.py # Packaging configuration
168
+
169
+ ```
170
+
171
+
172
+ ---
173
+
174
+ ## 📌 Author
175
+
176
+ Maintained by **Suresh K**
177
+ 📧 Email: sureshstr38@gmail.com
178
+ 🌐 GitHub: [suressssz](https://github.com/suressssz)
179
+
180
+ ---
181
+
182
+ ## 📌 License
183
+
184
+ This project is licensed under the MIT License. See the [LICENSE](LICENSE) file for details.
dfflow-0.1.0/README.md ADDED
@@ -0,0 +1,161 @@
1
+ # dfflow
2
+
3
+ Lightweight pandas DataFrame logging and flow pipeline tracker.
4
+
5
+ ---
6
+
7
+ ## 📌 What is this?
8
+
9
+ **dfflow** is a simple, easy-to-use Python package for:
10
+ - Logging pandas DataFrame transformations step by step
11
+ - Saving logs in text or JSON format
12
+ - Building reusable pipeline-style data flows
13
+
14
+ ---
15
+
16
+ ## 📌 Features
17
+
18
+ ✅ Automatic DataFrame transformation logging
19
+ ✅ Text and JSON log formats supported
20
+ ✅ Easy pipeline-style processing
21
+ ✅ Customizable logging
22
+
23
+ ---
24
+
25
+ ## 📌 Installation
26
+
27
+ ```bash
28
+ pip install dfflow
29
+ ```
30
+
31
+ ---
32
+
33
+ ## 📌 Requirements
34
+
35
+ - Python >=3.7
36
+ - pandas >=1.3
37
+
38
+ To install requirements:
39
+
40
+ ```bash
41
+ pip install -r requirements.txt
42
+ ```
43
+
44
+ ---
45
+
46
+ ## 📌 Usage Example 1
47
+
48
+ ```python
49
+ import pandas as pd
50
+ from dfflow import DFLogger, FlowPipeline, drop_nulls
51
+ import os
52
+ os.makedirs('examples', exist_ok=True)
53
+
54
+ logger = DFLogger()
55
+ pipe = FlowPipeline(logger=logger)
56
+
57
+ pipe.add_step("Drop Nulls", drop_nulls)
58
+
59
+ df = pd.DataFrame({
60
+ 'name': ['Suresh', None, 'Jerry', 'Sunil', 'Linga'],
61
+ 'age': [23, 26, None, 23, 24]
62
+ })
63
+
64
+ pipe.run(df)
65
+
66
+ ```
67
+ ---
68
+ ## 📌 Usage Example 2
69
+
70
+ ```python
71
+ import pandas as pd
72
+ from dfflow import DFLogger, FlowPipeline
73
+ from dfflow.cleaning import drop_nulls, lowercase_columns
74
+ from dfflow.profile import profile_summary
75
+
76
+ # Create a sample DataFrame
77
+ data = {
78
+ "Name": ["Suresh", None, "Sunil", "Shiva", "Linga"],
79
+ "Age": [23, 26, 23, 24, None]
80
+ }
81
+ df = pd.DataFrame(data)
82
+
83
+ print("Original DataFrame:")
84
+ print(df)
85
+ print("\n")
86
+
87
+ # Setup the DFLogger
88
+ logger = DFLogger(
89
+ log_file='dfflow_log2.txt', # Log file path
90
+ mode='text' # 'text' or 'json'
91
+ )
92
+
93
+ # Create the FlowPipeline
94
+ pipe = FlowPipeline(logger=logger)
95
+
96
+ # Add cleaning steps
97
+ pipe.add_step("Drop Nulls", drop_nulls)
98
+ pipe.add_step("Lowercase Columns", lowercase_columns)
99
+
100
+ # Run the pipeline
101
+ result_df = pipe.run(df)
102
+
103
+ print("Final Cleaned DataFrame:")
104
+ print(result_df)
105
+ print("\n")
106
+
107
+ # Profile Summary
108
+ summary = profile_summary(result_df)
109
+ print("Profile Summary:")
110
+ print(summary)
111
+
112
+ ```
113
+ ---
114
+
115
+ ## 📌 Project Structure
116
+
117
+ ```
118
+ dfflow/ # Main package folder
119
+
120
+ ├── __init__.py
121
+ ├── logger.py # Core logging class (DFLogger)
122
+ ├── decorators.py # log_step decorator
123
+ ├── flow.py # FlowPipeline logic
124
+ ├── cleaning.py # Cleaning steps (drop_nulls, lowercase_columns)
125
+ ├── profile.py # Profile utilities
126
+ ├── utils.py # Helper functions
127
+
128
+ examples/ # Usage demos
129
+ │ ├── dfflow_log.txt
130
+ │ ├── dfflow_log2.txt
131
+ │ ├── flow_demo.py
132
+ │ └── flow_demo2.py
133
+
134
+ tests/ # Unit tests
135
+ │ ├── __init__.py
136
+ │ ├── test_cleaning.py
137
+ │ ├── test_flow.py
138
+ │ ├── test_logger.py
139
+ │ └── test_profile.py
140
+
141
+ LICENSE # MIT License
142
+ README.md # Project description
143
+ requirements.txt # Dependencies
144
+ setup.py # Packaging configuration
145
+
146
+ ```
147
+
148
+
149
+ ---
150
+
151
+ ## 📌 Author
152
+
153
+ Maintained by **Suresh K**
154
+ 📧 Email: sureshstr38@gmail.com
155
+ 🌐 GitHub: [suressssz](https://github.com/suressssz)
156
+
157
+ ---
158
+
159
+ ## 📌 License
160
+
161
+ This project is licensed under the MIT License. See the [LICENSE](LICENSE) file for details.
@@ -0,0 +1,5 @@
1
+ from .logger import DFLogger
2
+ from .decorators import log_step
3
+ from .flow import FlowPipeline
4
+ from .cleaning import drop_nulls, lowercase_columns
5
+ from .profile import profile_summary
@@ -0,0 +1,10 @@
1
+ from .decorators import log_step
2
+
3
+ @log_step("Drop Nulls")
4
+ def drop_nulls(df):
5
+ return df.dropna()
6
+
7
+ @log_step("Lowercase Columns")
8
+ def lowercase_columns(df):
9
+ df.columns = [col.lower() for col in df.columns]
10
+ return df
@@ -0,0 +1,8 @@
1
+ def log_step(step_name):
2
+ def decorator(func):
3
+ def wrapper(df, *args, **kwargs):
4
+ result = func(df, *args, **kwargs)
5
+ print(f"[dfflow] Step: {step_name} completed.")
6
+ return result
7
+ return wrapper
8
+ return decorator
@@ -0,0 +1,14 @@
1
+ class FlowPipeline:
2
+ def __init__(self, logger=None):
3
+ self.steps = []
4
+ self.logger = logger
5
+
6
+ def add_step(self, name, func):
7
+ self.steps.append((name, func))
8
+
9
+ def run(self, df):
10
+ for name, func in self.steps:
11
+ df = func(df)
12
+ if self.logger:
13
+ self.logger.log(f"Step: {name}", df)
14
+ return df
@@ -0,0 +1,34 @@
1
+ import datetime
2
+ import json
3
+
4
+ class DFLogger:
5
+ def __init__(self, log_file='dfflow_log.txt', mode='text'):
6
+ self.log_file = log_file
7
+ self.mode = mode
8
+
9
+ def log(self, message, df, level="INFO"):
10
+ timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
11
+ if self.mode == 'json':
12
+ self._log_json(timestamp, message, df, level)
13
+ else:
14
+ self._log_text(timestamp, message, df, level)
15
+
16
+ def _log_text(self, timestamp, message, df, level):
17
+ with open(self.log_file, 'a') as f:
18
+ f.write("="*50 + "\n")
19
+ f.write(f"Timestamp : {timestamp}\n")
20
+ f.write(f"Level : {level}\n")
21
+ f.write(f"Message : {message}\n")
22
+ f.write("DataFrame :\n")
23
+ f.write(df.to_string())
24
+ f.write("\n\n")
25
+
26
+ def _log_json(self, timestamp, message, df, level):
27
+ log_entry = {
28
+ "timestamp": timestamp,
29
+ "level": level,
30
+ "message": message,
31
+ "dataframe": df.to_dict(orient="records")
32
+ }
33
+ with open(self.log_file, 'a') as f:
34
+ f.write(json.dumps(log_entry) + "\n")
@@ -0,0 +1,6 @@
1
+ def profile_summary(df):
2
+ return {
3
+ "shape": df.shape,
4
+ "columns": list(df.columns),
5
+ "null_counts": df.isnull().sum().to_dict()
6
+ }
@@ -0,0 +1,4 @@
1
+ from datetime import datetime
2
+
3
+ def current_timestamp():
4
+ return datetime.now().strftime("%Y-%m-%d %H:%M:%S")
@@ -0,0 +1,184 @@
1
+ Metadata-Version: 2.4
2
+ Name: dfflow
3
+ Version: 0.1.0
4
+ Summary: Lightweight pandas DataFrame logging and flow tracker
5
+ Home-page: https://github.com/suressssz/dfflow
6
+ Author: Suresh K
7
+ Classifier: Programming Language :: Python :: 3
8
+ Classifier: License :: OSI Approved :: MIT License
9
+ Classifier: Operating System :: OS Independent
10
+ Requires-Python: >=3.7
11
+ Description-Content-Type: text/markdown
12
+ License-File: LICENSE
13
+ Requires-Dist: pandas>=1.3
14
+ Dynamic: author
15
+ Dynamic: classifier
16
+ Dynamic: description
17
+ Dynamic: description-content-type
18
+ Dynamic: home-page
19
+ Dynamic: license-file
20
+ Dynamic: requires-dist
21
+ Dynamic: requires-python
22
+ Dynamic: summary
23
+
24
+ # dfflow
25
+
26
+ Lightweight pandas DataFrame logging and flow pipeline tracker.
27
+
28
+ ---
29
+
30
+ ## 📌 What is this?
31
+
32
+ **dfflow** is a simple, easy-to-use Python package for:
33
+ - Logging pandas DataFrame transformations step by step
34
+ - Saving logs in text or JSON format
35
+ - Building reusable pipeline-style data flows
36
+
37
+ ---
38
+
39
+ ## 📌 Features
40
+
41
+ ✅ Automatic DataFrame transformation logging
42
+ ✅ Text and JSON log formats supported
43
+ ✅ Easy pipeline-style processing
44
+ ✅ Customizable logging
45
+
46
+ ---
47
+
48
+ ## 📌 Installation
49
+
50
+ ```bash
51
+ pip install dfflow
52
+ ```
53
+
54
+ ---
55
+
56
+ ## 📌 Requirements
57
+
58
+ - Python >=3.7
59
+ - pandas >=1.3
60
+
61
+ To install requirements:
62
+
63
+ ```bash
64
+ pip install -r requirements.txt
65
+ ```
66
+
67
+ ---
68
+
69
+ ## 📌 Usage Example 1
70
+
71
+ ```python
72
+ import pandas as pd
73
+ from dfflow import DFLogger, FlowPipeline, drop_nulls
74
+ import os
75
+ os.makedirs('examples', exist_ok=True)
76
+
77
+ logger = DFLogger()
78
+ pipe = FlowPipeline(logger=logger)
79
+
80
+ pipe.add_step("Drop Nulls", drop_nulls)
81
+
82
+ df = pd.DataFrame({
83
+ 'name': ['Suresh', None, 'Jerry', 'Sunil', 'Linga'],
84
+ 'age': [23, 26, None, 23, 24]
85
+ })
86
+
87
+ pipe.run(df)
88
+
89
+ ```
90
+ ---
91
+ ## 📌 Usage Example 2
92
+
93
+ ```python
94
+ import pandas as pd
95
+ from dfflow import DFLogger, FlowPipeline
96
+ from dfflow.cleaning import drop_nulls, lowercase_columns
97
+ from dfflow.profile import profile_summary
98
+
99
+ # Create a sample DataFrame
100
+ data = {
101
+ "Name": ["Suresh", None, "Sunil", "Shiva", "Linga"],
102
+ "Age": [23, 26, 23, 24, None]
103
+ }
104
+ df = pd.DataFrame(data)
105
+
106
+ print("Original DataFrame:")
107
+ print(df)
108
+ print("\n")
109
+
110
+ # Setup the DFLogger
111
+ logger = DFLogger(
112
+ log_file='dfflow_log2.txt', # Log file path
113
+ mode='text' # 'text' or 'json'
114
+ )
115
+
116
+ # Create the FlowPipeline
117
+ pipe = FlowPipeline(logger=logger)
118
+
119
+ # Add cleaning steps
120
+ pipe.add_step("Drop Nulls", drop_nulls)
121
+ pipe.add_step("Lowercase Columns", lowercase_columns)
122
+
123
+ # Run the pipeline
124
+ result_df = pipe.run(df)
125
+
126
+ print("Final Cleaned DataFrame:")
127
+ print(result_df)
128
+ print("\n")
129
+
130
+ # Profile Summary
131
+ summary = profile_summary(result_df)
132
+ print("Profile Summary:")
133
+ print(summary)
134
+
135
+ ```
136
+ ---
137
+
138
+ ## 📌 Project Structure
139
+
140
+ ```
141
+ dfflow/ # Main package folder
142
+
143
+ ├── __init__.py
144
+ ├── logger.py # Core logging class (DFLogger)
145
+ ├── decorators.py # log_step decorator
146
+ ├── flow.py # FlowPipeline logic
147
+ ├── cleaning.py # Cleaning steps (drop_nulls, lowercase_columns)
148
+ ├── profile.py # Profile utilities
149
+ ├── utils.py # Helper functions
150
+
151
+ examples/ # Usage demos
152
+ │ ├── dfflow_log.txt
153
+ │ ├── dfflow_log2.txt
154
+ │ ├── flow_demo.py
155
+ │ └── flow_demo2.py
156
+
157
+ tests/ # Unit tests
158
+ │ ├── __init__.py
159
+ │ ├── test_cleaning.py
160
+ │ ├── test_flow.py
161
+ │ ├── test_logger.py
162
+ │ └── test_profile.py
163
+
164
+ LICENSE # MIT License
165
+ README.md # Project description
166
+ requirements.txt # Dependencies
167
+ setup.py # Packaging configuration
168
+
169
+ ```
170
+
171
+
172
+ ---
173
+
174
+ ## 📌 Author
175
+
176
+ Maintained by **Suresh K**
177
+ 📧 Email: sureshstr38@gmail.com
178
+ 🌐 GitHub: [suressssz](https://github.com/suressssz)
179
+
180
+ ---
181
+
182
+ ## 📌 License
183
+
184
+ This project is licensed under the MIT License. See the [LICENSE](LICENSE) file for details.
@@ -0,0 +1,20 @@
1
+ LICENSE
2
+ README.md
3
+ setup.py
4
+ dfflow/__init__.py
5
+ dfflow/cleaning.py
6
+ dfflow/decorators.py
7
+ dfflow/flow.py
8
+ dfflow/logger.py
9
+ dfflow/profile.py
10
+ dfflow/utils.py
11
+ dfflow.egg-info/PKG-INFO
12
+ dfflow.egg-info/SOURCES.txt
13
+ dfflow.egg-info/dependency_links.txt
14
+ dfflow.egg-info/requires.txt
15
+ dfflow.egg-info/top_level.txt
16
+ tests/__init__.py
17
+ tests/test_cleaning.py
18
+ tests/test_flow.py
19
+ tests/test_logger.py
20
+ tests/test_profile.py
@@ -0,0 +1 @@
1
+ pandas>=1.3
@@ -0,0 +1,2 @@
1
+ dfflow
2
+ tests
dfflow-0.1.0/setup.cfg ADDED
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
dfflow-0.1.0/setup.py ADDED
@@ -0,0 +1,21 @@
1
+ from setuptools import setup, find_packages
2
+
3
+ setup(
4
+ name="dfflow",
5
+ version="0.1.0",
6
+ author="Suresh K",
7
+ description="Lightweight pandas DataFrame logging and flow tracker",
8
+ long_description=open("README.md").read(),
9
+ long_description_content_type="text/markdown",
10
+ url="https://github.com/suressssz/dfflow",
11
+ packages=find_packages(),
12
+ install_requires=[
13
+ "pandas>=1.3"
14
+ ],
15
+ classifiers=[
16
+ "Programming Language :: Python :: 3",
17
+ "License :: OSI Approved :: MIT License",
18
+ "Operating System :: OS Independent",
19
+ ],
20
+ python_requires='>=3.7',
21
+ )
File without changes
@@ -0,0 +1,21 @@
1
+ # tests/test_cleaning.py
2
+
3
+ import pandas as pd
4
+ from dfflow.cleaning import drop_nulls, lowercase_columns
5
+
6
+ def test_drop_nulls():
7
+ df = pd.DataFrame({
8
+ 'A': [1, None, 3],
9
+ 'B': [4, 5, None]
10
+ })
11
+ result = drop_nulls(df)
12
+ assert result.isnull().sum().sum() == 0
13
+
14
+ def test_lowercase_columns():
15
+ df = pd.DataFrame({
16
+ 'FirstName': [1, 2],
17
+ 'AGE': [3, 4]
18
+ })
19
+ result = lowercase_columns(df)
20
+ for col in result.columns:
21
+ assert col.islower()
@@ -0,0 +1,30 @@
1
+ # tests/test_flow.py
2
+
3
+ import pandas as pd
4
+ from dfflow import DFLogger, FlowPipeline, drop_nulls, lowercase_columns
5
+
6
+ def test_flow_pipeline(tmp_path):
7
+ log_file = tmp_path / "flow_log.txt"
8
+ logger = DFLogger(log_file=str(log_file), mode='text')
9
+
10
+ df = pd.DataFrame({
11
+ 'Name': ['Tom', None],
12
+ 'Age': [25, 22]
13
+ })
14
+
15
+ pipe = FlowPipeline(logger=logger)
16
+ pipe.add_step("Drop Nulls", drop_nulls)
17
+ pipe.add_step("Lowercase Columns", lowercase_columns)
18
+
19
+ result = pipe.run(df)
20
+
21
+ # Check nulls dropped
22
+ assert result.isnull().sum().sum() == 0
23
+ # Check columns lowercased
24
+ for col in result.columns:
25
+ assert col.islower()
26
+
27
+ # Check log created
28
+ with open(log_file) as f:
29
+ content = f.read()
30
+ assert "Drop Nulls" in content
@@ -0,0 +1,28 @@
1
+ # tests/test_logger.py
2
+
3
+ import pandas as pd
4
+ import os
5
+ from dfflow.logger import DFLogger
6
+
7
+ def test_text_logging(tmp_path):
8
+ log_file = tmp_path / "test_log.txt"
9
+ logger = DFLogger(log_file=str(log_file), mode='text')
10
+ df = pd.DataFrame({'A': [1, 2]})
11
+ logger.log("Test log message", df)
12
+
13
+ assert log_file.exists()
14
+ with open(log_file) as f:
15
+ content = f.read()
16
+ assert "Test log message" in content
17
+ assert "A" in content
18
+
19
+ def test_json_logging(tmp_path):
20
+ log_file = tmp_path / "test_log.json"
21
+ logger = DFLogger(log_file=str(log_file), mode='json')
22
+ df = pd.DataFrame({'B': [3, 4]})
23
+ logger.log("JSON log message", df)
24
+
25
+ assert log_file.exists()
26
+ with open(log_file) as f:
27
+ content = f.read()
28
+ assert "JSON log message" in content
@@ -0,0 +1,15 @@
1
+ # tests/test_profile.py
2
+
3
+ import pandas as pd
4
+ from dfflow.profile import profile_summary
5
+
6
+ def test_profile_summary():
7
+ df = pd.DataFrame({
8
+ 'X': [1, None],
9
+ 'Y': [3, 4]
10
+ })
11
+ summary = profile_summary(df)
12
+ assert 'shape' in summary
13
+ assert 'columns' in summary
14
+ assert 'null_counts' in summary
15
+ assert summary['null_counts']['X'] == 1