datapipelab 0.3.4__tar.gz → 0.3.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. datapipelab-0.3.5/PKG-INFO +313 -0
  2. datapipelab-0.3.5/datapipelab.egg-info/PKG-INFO +313 -0
  3. {datapipelab-0.3.4 → datapipelab-0.3.5}/setup.py +3 -1
  4. datapipelab-0.3.4/PKG-INFO +0 -8
  5. datapipelab-0.3.4/datapipelab.egg-info/PKG-INFO +0 -8
  6. {datapipelab-0.3.4 → datapipelab-0.3.5}/MANIFEST.in +0 -0
  7. {datapipelab-0.3.4 → datapipelab-0.3.5}/README.md +0 -0
  8. {datapipelab-0.3.4 → datapipelab-0.3.5}/datapipelab/__init__.py +0 -0
  9. {datapipelab-0.3.4 → datapipelab-0.3.5}/datapipelab/app/__init__.py +0 -0
  10. {datapipelab-0.3.4 → datapipelab-0.3.5}/datapipelab/app/connector_node/__init__.py +0 -0
  11. {datapipelab-0.3.4 → datapipelab-0.3.5}/datapipelab/app/node/__init__.py +0 -0
  12. {datapipelab-0.3.4 → datapipelab-0.3.5}/datapipelab/app/node/custom_node.py +0 -0
  13. {datapipelab-0.3.4 → datapipelab-0.3.5}/datapipelab/app/node/processor/__init__.py +0 -0
  14. {datapipelab-0.3.4 → datapipelab-0.3.5}/datapipelab/app/node/processor/bigquery_api_node.py +0 -0
  15. {datapipelab-0.3.4 → datapipelab-0.3.5}/datapipelab/app/node/processor/bigquery_spark_node.py +0 -0
  16. {datapipelab-0.3.4 → datapipelab-0.3.5}/datapipelab/app/node/processor/custom_node.py +0 -0
  17. {datapipelab-0.3.4 → datapipelab-0.3.5}/datapipelab/app/node/processor/gcp_bucket_node.py +0 -0
  18. {datapipelab-0.3.4 → datapipelab-0.3.5}/datapipelab/app/node/processor/shell_node.py +0 -0
  19. {datapipelab-0.3.4 → datapipelab-0.3.5}/datapipelab/app/node/processor/spark_node.py +0 -0
  20. {datapipelab-0.3.4 → datapipelab-0.3.5}/datapipelab/app/node/sink/__init__.py +0 -0
  21. {datapipelab-0.3.4 → datapipelab-0.3.5}/datapipelab/app/node/sink/csv_node.py +0 -0
  22. {datapipelab-0.3.4 → datapipelab-0.3.5}/datapipelab/app/node/sink/delta_node.py +0 -0
  23. {datapipelab-0.3.4 → datapipelab-0.3.5}/datapipelab/app/node/sink/hive_node.py +0 -0
  24. {datapipelab-0.3.4 → datapipelab-0.3.5}/datapipelab/app/node/sink/pandas_csv_node.py +0 -0
  25. {datapipelab-0.3.4 → datapipelab-0.3.5}/datapipelab/app/node/sink/spark_api_node.py +0 -0
  26. {datapipelab-0.3.4 → datapipelab-0.3.5}/datapipelab/app/node/sink/spark_node.py +0 -0
  27. {datapipelab-0.3.4 → datapipelab-0.3.5}/datapipelab/app/node/sink/teams_notification_node.py +0 -0
  28. {datapipelab-0.3.4 → datapipelab-0.3.5}/datapipelab/app/node/source/__init__.py +0 -0
  29. {datapipelab-0.3.4 → datapipelab-0.3.5}/datapipelab/app/node/source/delta_node.py +0 -0
  30. {datapipelab-0.3.4 → datapipelab-0.3.5}/datapipelab/app/node/source/hive_node.py +0 -0
  31. {datapipelab-0.3.4 → datapipelab-0.3.5}/datapipelab/app/node/source/spark_api_node.py +0 -0
  32. {datapipelab-0.3.4 → datapipelab-0.3.5}/datapipelab/app/node/source/spark_node.py +0 -0
  33. {datapipelab-0.3.4 → datapipelab-0.3.5}/datapipelab/app/node/tnode.py +0 -0
  34. {datapipelab-0.3.4 → datapipelab-0.3.5}/datapipelab/app/wrapper/__init__.py +0 -0
  35. {datapipelab-0.3.4 → datapipelab-0.3.5}/datapipelab/app/wrapper/source/__init__.py +0 -0
  36. {datapipelab-0.3.4 → datapipelab-0.3.5}/datapipelab/engine.py +0 -0
  37. {datapipelab-0.3.4 → datapipelab-0.3.5}/datapipelab/logger.py +0 -0
  38. {datapipelab-0.3.4 → datapipelab-0.3.5}/datapipelab/pipeline.py +0 -0
  39. {datapipelab-0.3.4 → datapipelab-0.3.5}/datapipelab/pipeline_config.py +0 -0
  40. {datapipelab-0.3.4 → datapipelab-0.3.5}/datapipelab/pipeline_handler.py +0 -0
  41. {datapipelab-0.3.4 → datapipelab-0.3.5}/datapipelab.egg-info/SOURCES.txt +0 -0
  42. {datapipelab-0.3.4 → datapipelab-0.3.5}/datapipelab.egg-info/dependency_links.txt +0 -0
  43. {datapipelab-0.3.4 → datapipelab-0.3.5}/datapipelab.egg-info/requires.txt +0 -0
  44. {datapipelab-0.3.4 → datapipelab-0.3.5}/datapipelab.egg-info/top_level.txt +0 -0
  45. {datapipelab-0.3.4 → datapipelab-0.3.5}/setup.cfg +0 -0
@@ -0,0 +1,313 @@
1
+ Metadata-Version: 2.4
2
+ Name: datapipelab
3
+ Version: 0.3.5
4
+ Summary: A data pipeline library with connectors, sources, processors, and sinks.
5
+ Description-Content-Type: text/markdown
6
+ Requires-Dist: json5
7
+ Requires-Dist: loguru
8
+ Dynamic: description
9
+ Dynamic: description-content-type
10
+ Dynamic: requires-dist
11
+ Dynamic: summary
12
+
13
+ # Datapipelab
14
+
15
+ ## Overview
16
+ `datapipelab` is a lightweight, flexible data pipeline framework designed for building and orchestrating complex data workflows. It supports a modular node-based architecture, allowing users to plug in source, processor, and sink nodes using technologies such as Apache Spark, Google BigQuery, Hive, Delta Lake, and Microsoft Teams.
17
+
18
+
19
+ ## Installation
20
+ Clone the repository and install any required dependencies:
21
+ ```bash
22
+ pip install -r requirements.txt
23
+ ```
24
+
25
+ Or, if integrating as a module:
26
+ ```bash
27
+ pip install datapipelab
28
+ ```
29
+
30
+
31
+ ## Usage Guide
32
+
33
+ To run a pipeline, you typically follow these steps:
34
+
35
+ 1. **Define your pipeline configuration** using a Python list or a JSON config.
36
+ 2. **Instantiate and execute the engine**.
37
+
38
+ Example:
39
+
40
+ ```python
41
+ from datapipelab.engine import Engine
42
+
43
+ config = [
44
+ {
45
+ "type": "source",
46
+ "format": "hive",
47
+ "name": "load_customer_accounts",
48
+ "options": {
49
+ "query": "SELECT customer_id, enrollment_date FROM customer_account"
50
+ }
51
+ },
52
+ {
53
+ "type": "processor",
54
+ "format": "spark",
55
+ "name": "aggregate_active_users",
56
+ "options": {
57
+ "parents": ["load_customer_accounts"],
58
+ "query": """
59
+ SELECT
60
+ YEAR(enrollment_date) AS enrollment_year,
61
+ COUNT(*) AS active_user_count
62
+ FROM load_customer_accounts
63
+ GROUP BY enrollment_year
64
+ """
65
+ }
66
+ },
67
+ {
68
+ "type": "sink",
69
+ "format": "hive",
70
+ "name": "store_active_user_report",
71
+ "options": {
72
+ "parents": ["aggregate_active_users"],
73
+ "table": "report.active_user_summary"
74
+ }
75
+ },
76
+ {
77
+ "type": "sink",
78
+ "format": "teams_notification",
79
+ "name": "notify_report_ready",
80
+ "options": {
81
+ "parents": ["store_active_user_report"],
82
+ "webhook_url": "{{{WEBHOOK_URL}}}",
83
+ "message": "Active user report has been updated in Hive."
84
+ }
85
+ }
86
+ ]
87
+
88
+ params = {"WEBHOOK_URL": "https://outlook.office.com/webhook/..."}
89
+ engine = Engine(config, spark, params)
90
+ engine.running_travelers()
91
+ ```
92
+
93
+
94
+ ## Pipeline Configuration
95
+
96
+ Pipelines are defined using structured configuration objects or files that specify:
97
+
98
+ * Nodes (source, processor, sink)
99
+ * Dependencies and execution order via `parents`
100
+ * Parameters for each node, e.g., SQL queries, table names, paths
101
+
102
+ ## Available Node Types
103
+
104
+ ### Source Nodes
105
+
106
+ * **`spark_node`**
107
+ * Executes a Spark SQL query to read data into the pipeline.
108
+ * Example:
109
+
110
+ ```json
111
+ {
112
+ "name": "node_name",
113
+ "type": "source",
114
+ "format": "spark",
115
+ "source": "spark",
116
+ "options": {
117
+ "query": "SELECT * FROM database_name.table_name"
118
+ }
119
+ }
120
+ ```
121
+
122
+
123
+ * **`hive_node`**
124
+ * Reads data from a Hive table.
125
+ * Example:
126
+
127
+ ```json
128
+ {
129
+ "name": "node_name",
130
+ "type": "source",
131
+ "format": "hive",
132
+ "source": "hive",
133
+ "options": {
134
+ "query": "SELECT * FROM database_name.table_name"
135
+ }
136
+ }
137
+ ```
138
+
139
+ ### Processor Nodes
140
+
141
+ * **`bigquery_api_node`**
142
+ * Executes a query via BigQuery API.
143
+ * Example:
144
+
145
+ ```json
146
+ {
147
+ "name": "node_name",
148
+ "type": "processor",
149
+ "format": "bigquery_api",
150
+ "options": {
151
+ "credentials_path": "creadentials.json",
152
+ "return_as_spark_df": false,
153
+ "return_as_python_list": false,
154
+ "return_as_is": true,
155
+ "project_name": "project_name",
156
+ "query": "select * from `project_name.dataset_name.table_name`"
157
+ }
158
+ }
159
+ ```
160
+ - *`return_as_python_list` and `return_as_is` are optional
161
+ - *`query` can be any valid BigQuery SQL query including (SELECT/DDL/DML/Scripting/Control Flow/Stored Procedure Calls/Temporary Table Usage) statements.
162
+
163
+
164
+ * **`gcp_bucket_api_node`**
165
+ * Deletes a bucket or a directory in a GCP bucket.
166
+ * Example:
167
+
168
+ ```json
169
+ {
170
+ "name": "node_name",
171
+ "type": "processor",
172
+ "format": "gcp_bucket_api",
173
+ "options": {
174
+ "credentials_path": "creadentials.json",
175
+ "project_name": "project_name",
176
+ "bucket_name": "bucket_name",
177
+ "subdirectory": "path/to/subdirectory"
178
+ }
179
+ }
180
+ ```
181
+ - *`subdirectory` is optional and can be used to specify a subdirectory within the bucket.
182
+
183
+
184
+ * **`bigquery_spark_node`**
185
+ * Reads data from BigQuery using the Spark BigQuery connector.
186
+ * Example:
187
+
188
+ ```json
189
+ {
190
+ "name": "node_name",
191
+ "type": "processor",
192
+ "format": "bigquery_spark",
193
+ "options": {
194
+ "parent_project": "parent_project_name",
195
+ "materialization_dataset": "materialization_dataset_name",
196
+ "query": "select * from `project_name.dataset_name.table_name`"
197
+ }
198
+ }
199
+ ```
200
+ - *`query` does not support DDL/DML/Scripting/Control Flow/Stored Procedure Calls/Temporary Table Usage statements. Only SELECT statements are supported.
201
+
202
+
203
+ * **`shell_node`**
204
+ * Executes a shell command or script.
205
+ * Example:
206
+
207
+ ```json
208
+ {
209
+ "name": "node_name",
210
+ "type": "processor",
211
+ "format": "shell",
212
+ "options": {
213
+ "query": "echo 'Hello, World!'"
214
+ }
215
+ }
216
+ ```
217
+
218
+
219
+ * **`custom_node`**
220
+ * Custom logic node written by user.
221
+ * Example:
222
+
223
+ ```json
224
+ {
225
+ "name": "node_name",
226
+ "type": "processor",
227
+ "format": "custom",
228
+ "options": {
229
+ "module_name": "CustomModuleName"
230
+ "module_path": "path/to/custom_module",
231
+ "class_name": "CustomNodeClassName",
232
+ "optional_param": "value"
233
+ }
234
+ }
235
+ ```
236
+
237
+
238
+ ### Sink Nodes
239
+
240
+ * **`hive_node`**
241
+ * Writes output to a Hive table.
242
+ * Example:
243
+
244
+ ```json
245
+ {
246
+ "name": "node_name",
247
+ "type": "sink",
248
+ "format": "hive",
249
+ "type": "spark",
250
+ "options": {
251
+ "parents": ["parent_node_name"],
252
+ "database": "database_name",
253
+ "table": "table_name"
254
+ }
255
+ }
256
+ ```
257
+
258
+
259
+ * **`spark_node`**
260
+ * Writes output to a Hive table.
261
+ * Example:
262
+
263
+ ```json
264
+ {
265
+ "name": "node_name",
266
+ "type": "sink",
267
+ "format": "spark",
268
+ "type": "spark",
269
+ "options": {
270
+ "parents": ["parent_node_name"],
271
+ "database": "database_name",
272
+ "table": "table_name"
273
+ }
274
+ }
275
+ ```
276
+
277
+
278
+ * **`teams_notification_node`**
279
+
280
+ * Sends a message to a Microsoft Teams channel.
281
+ * Example:
282
+
283
+ ```json
284
+ {
285
+ "type": "sink",
286
+ "format": "teams_notification",
287
+ "name": "notify_report_ready",
288
+ "options": {
289
+ "parents": ["store_active_user_report"],
290
+ "webhook_url": "{{{WEBHOOK_URL}}}",
291
+ "message": "Active user report has been updated in Hive."
292
+ }
293
+ }
294
+ ```
295
+
296
+
297
+ ## Extending the Framework
298
+
299
+ To create a custom node:
300
+
301
+ 1. Subclass `TNode` from `app/node/tnode.py`
302
+ 2. Implement the required methods (`run`, `validate`, etc.)
303
+ 3. Register your node in the pipeline factory or configuration
304
+
305
+ ## Logging and Monitoring
306
+
307
+ Logging is centralized in `logger.py`. Logs are categorized by node and execution stage to assist with debugging and auditing.
308
+
309
+ ## Troubleshooting
310
+
311
+ ---
312
+
313
+ For more advanced examples or integration guides, refer to the `examples/` folder or reach out to the maintainers.
@@ -0,0 +1,313 @@
1
+ Metadata-Version: 2.4
2
+ Name: datapipelab
3
+ Version: 0.3.5
4
+ Summary: A data pipeline library with connectors, sources, processors, and sinks.
5
+ Description-Content-Type: text/markdown
6
+ Requires-Dist: json5
7
+ Requires-Dist: loguru
8
+ Dynamic: description
9
+ Dynamic: description-content-type
10
+ Dynamic: requires-dist
11
+ Dynamic: summary
12
+
13
+ # Datapipelab
14
+
15
+ ## Overview
16
+ `datapipelab` is a lightweight, flexible data pipeline framework designed for building and orchestrating complex data workflows. It supports a modular node-based architecture, allowing users to plug in source, processor, and sink nodes using technologies such as Apache Spark, Google BigQuery, Hive, Delta Lake, and Microsoft Teams.
17
+
18
+
19
+ ## Installation
20
+ Clone the repository and install any required dependencies:
21
+ ```bash
22
+ pip install -r requirements.txt
23
+ ```
24
+
25
+ Or, if integrating as a module:
26
+ ```bash
27
+ pip install datapipelab
28
+ ```
29
+
30
+
31
+ ## Usage Guide
32
+
33
+ To run a pipeline, you typically follow these steps:
34
+
35
+ 1. **Define your pipeline configuration** using a Python list or a JSON config.
36
+ 2. **Instantiate and execute the engine**.
37
+
38
+ Example:
39
+
40
+ ```python
41
+ from datapipelab.engine import Engine
42
+
43
+ config = [
44
+ {
45
+ "type": "source",
46
+ "format": "hive",
47
+ "name": "load_customer_accounts",
48
+ "options": {
49
+ "query": "SELECT customer_id, enrollment_date FROM customer_account"
50
+ }
51
+ },
52
+ {
53
+ "type": "processor",
54
+ "format": "spark",
55
+ "name": "aggregate_active_users",
56
+ "options": {
57
+ "parents": ["load_customer_accounts"],
58
+ "query": """
59
+ SELECT
60
+ YEAR(enrollment_date) AS enrollment_year,
61
+ COUNT(*) AS active_user_count
62
+ FROM load_customer_accounts
63
+ GROUP BY enrollment_year
64
+ """
65
+ }
66
+ },
67
+ {
68
+ "type": "sink",
69
+ "format": "hive",
70
+ "name": "store_active_user_report",
71
+ "options": {
72
+ "parents": ["aggregate_active_users"],
73
+ "table": "report.active_user_summary"
74
+ }
75
+ },
76
+ {
77
+ "type": "sink",
78
+ "format": "teams_notification",
79
+ "name": "notify_report_ready",
80
+ "options": {
81
+ "parents": ["store_active_user_report"],
82
+ "webhook_url": "{{{WEBHOOK_URL}}}",
83
+ "message": "Active user report has been updated in Hive."
84
+ }
85
+ }
86
+ ]
87
+
88
+ params = {"WEBHOOK_URL": "https://outlook.office.com/webhook/..."}
89
+ engine = Engine(config, spark, params)
90
+ engine.running_travelers()
91
+ ```
92
+
93
+
94
+ ## Pipeline Configuration
95
+
96
+ Pipelines are defined using structured configuration objects or files that specify:
97
+
98
+ * Nodes (source, processor, sink)
99
+ * Dependencies and execution order via `parents`
100
+ * Parameters for each node, e.g., SQL queries, table names, paths
101
+
102
+ ## Available Node Types
103
+
104
+ ### Source Nodes
105
+
106
+ * **`spark_node`**
107
+ * Executes a Spark SQL query to read data into the pipeline.
108
+ * Example:
109
+
110
+ ```json
111
+ {
112
+ "name": "node_name",
113
+ "type": "source",
114
+ "format": "spark",
115
+ "source": "spark",
116
+ "options": {
117
+ "query": "SELECT * FROM database_name.table_name"
118
+ }
119
+ }
120
+ ```
121
+
122
+
123
+ * **`hive_node`**
124
+ * Reads data from a Hive table.
125
+ * Example:
126
+
127
+ ```json
128
+ {
129
+ "name": "node_name",
130
+ "type": "source",
131
+ "format": "hive",
132
+ "source": "hive",
133
+ "options": {
134
+ "query": "SELECT * FROM database_name.table_name"
135
+ }
136
+ }
137
+ ```
138
+
139
+ ### Processor Nodes
140
+
141
+ * **`bigquery_api_node`**
142
+ * Executes a query via BigQuery API.
143
+ * Example:
144
+
145
+ ```json
146
+ {
147
+ "name": "node_name",
148
+ "type": "processor",
149
+ "format": "bigquery_api",
150
+ "options": {
151
+ "credentials_path": "creadentials.json",
152
+ "return_as_spark_df": false,
153
+ "return_as_python_list": false,
154
+ "return_as_is": true,
155
+ "project_name": "project_name",
156
+ "query": "select * from `project_name.dataset_name.table_name`"
157
+ }
158
+ }
159
+ ```
160
+ - *`return_as_python_list` and `return_as_is` are optional
161
+ - *`query` can be any valid BigQuery SQL query including (SELECT/DDL/DML/Scripting/Control Flow/Stored Procedure Calls/Temporary Table Usage) statements.
162
+
163
+
164
+ * **`gcp_bucket_api_node`**
165
+ * Deletes a bucket or a directory in a GCP bucket.
166
+ * Example:
167
+
168
+ ```json
169
+ {
170
+ "name": "node_name",
171
+ "type": "processor",
172
+ "format": "gcp_bucket_api",
173
+ "options": {
174
+ "credentials_path": "creadentials.json",
175
+ "project_name": "project_name",
176
+ "bucket_name": "bucket_name",
177
+ "subdirectory": "path/to/subdirectory"
178
+ }
179
+ }
180
+ ```
181
+ - *`subdirectory` is optional and can be used to specify a subdirectory within the bucket.
182
+
183
+
184
+ * **`bigquery_spark_node`**
185
+ * Reads data from BigQuery using the Spark BigQuery connector.
186
+ * Example:
187
+
188
+ ```json
189
+ {
190
+ "name": "node_name",
191
+ "type": "processor",
192
+ "format": "bigquery_spark",
193
+ "options": {
194
+ "parent_project": "parent_project_name",
195
+ "materialization_dataset": "materialization_dataset_name",
196
+ "query": "select * from `project_name.dataset_name.table_name`"
197
+ }
198
+ }
199
+ ```
200
+ - *`query` does not support DDL/DML/Scripting/Control Flow/Stored Procedure Calls/Temporary Table Usage statements. Only SELECT statements are supported.
201
+
202
+
203
+ * **`shell_node`**
204
+ * Executes a shell command or script.
205
+ * Example:
206
+
207
+ ```json
208
+ {
209
+ "name": "node_name",
210
+ "type": "processor",
211
+ "format": "shell",
212
+ "options": {
213
+ "query": "echo 'Hello, World!'"
214
+ }
215
+ }
216
+ ```
217
+
218
+
219
+ * **`custom_node`**
220
+ * Custom logic node written by user.
221
+ * Example:
222
+
223
+ ```json
224
+ {
225
+ "name": "node_name",
226
+ "type": "processor",
227
+ "format": "custom",
228
+ "options": {
229
+ "module_name": "CustomModuleName"
230
+ "module_path": "path/to/custom_module",
231
+ "class_name": "CustomNodeClassName",
232
+ "optional_param": "value"
233
+ }
234
+ }
235
+ ```
236
+
237
+
238
+ ### Sink Nodes
239
+
240
+ * **`hive_node`**
241
+ * Writes output to a Hive table.
242
+ * Example:
243
+
244
+ ```json
245
+ {
246
+ "name": "node_name",
247
+ "type": "sink",
248
+ "format": "hive",
249
+ "type": "spark",
250
+ "options": {
251
+ "parents": ["parent_node_name"],
252
+ "database": "database_name",
253
+ "table": "table_name"
254
+ }
255
+ }
256
+ ```
257
+
258
+
259
+ * **`spark_node`**
260
+ * Writes output to a Hive table.
261
+ * Example:
262
+
263
+ ```json
264
+ {
265
+ "name": "node_name",
266
+ "type": "sink",
267
+ "format": "spark",
268
+ "type": "spark",
269
+ "options": {
270
+ "parents": ["parent_node_name"],
271
+ "database": "database_name",
272
+ "table": "table_name"
273
+ }
274
+ }
275
+ ```
276
+
277
+
278
+ * **`teams_notification_node`**
279
+
280
+ * Sends a message to a Microsoft Teams channel.
281
+ * Example:
282
+
283
+ ```json
284
+ {
285
+ "type": "sink",
286
+ "format": "teams_notification",
287
+ "name": "notify_report_ready",
288
+ "options": {
289
+ "parents": ["store_active_user_report"],
290
+ "webhook_url": "{{{WEBHOOK_URL}}}",
291
+ "message": "Active user report has been updated in Hive."
292
+ }
293
+ }
294
+ ```
295
+
296
+
297
+ ## Extending the Framework
298
+
299
+ To create a custom node:
300
+
301
+ 1. Subclass `TNode` from `app/node/tnode.py`
302
+ 2. Implement the required methods (`run`, `validate`, etc.)
303
+ 3. Register your node in the pipeline factory or configuration
304
+
305
+ ## Logging and Monitoring
306
+
307
+ Logging is centralized in `logger.py`. Logs are categorized by node and execution stage to assist with debugging and auditing.
308
+
309
+ ## Troubleshooting
310
+
311
+ ---
312
+
313
+ For more advanced examples or integration guides, refer to the `examples/` folder or reach out to the maintainers.
@@ -6,10 +6,12 @@ long_description = (this_directory / "README.md").read_text()
6
6
 
7
7
  setup(
8
8
  name='datapipelab',
9
- version='0.3.4',
9
+ version='0.3.5',
10
10
  description='A data pipeline library with connectors, sources, processors, and sinks.',
11
11
  packages=find_packages(),
12
12
  include_package_data=True,
13
+ long_description=long_description,
14
+ long_description_content_type='text/markdown',
13
15
  install_requires=[
14
16
  'json5',
15
17
  'loguru',
@@ -1,8 +0,0 @@
1
- Metadata-Version: 2.4
2
- Name: datapipelab
3
- Version: 0.3.4
4
- Summary: A data pipeline library with connectors, sources, processors, and sinks.
5
- Requires-Dist: json5
6
- Requires-Dist: loguru
7
- Dynamic: requires-dist
8
- Dynamic: summary
@@ -1,8 +0,0 @@
1
- Metadata-Version: 2.4
2
- Name: datapipelab
3
- Version: 0.3.4
4
- Summary: A data pipeline library with connectors, sources, processors, and sinks.
5
- Requires-Dist: json5
6
- Requires-Dist: loguru
7
- Dynamic: requires-dist
8
- Dynamic: summary
File without changes
File without changes
File without changes