acryl-datahub-actions 1.3.1.5rc9__py3-none-any.whl → 1.3.1.5rc10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: acryl-datahub-actions
3
- Version: 1.3.1.5rc9
3
+ Version: 1.3.1.5rc10
4
4
  Summary: An action framework to work with DataHub real time changes.
5
5
  Home-page: https://docs.datahub.com/
6
6
  License: Apache-2.0
@@ -21,201 +21,216 @@ Classifier: Environment :: MacOS X
21
21
  Classifier: Topic :: Software Development
22
22
  Requires-Python: >=3.9
23
23
  Description-Content-Type: text/markdown
24
- Requires-Dist: pydantic<3.0.0,>=2.0.0
25
- Requires-Dist: toml>=0.10.0
26
24
  Requires-Dist: h11>=0.16
25
+ Requires-Dist: PyYAML
26
+ Requires-Dist: typing-inspect
27
+ Requires-Dist: entrypoints
27
28
  Requires-Dist: stackprinter
28
- Requires-Dist: click>=6.0.0
29
- Requires-Dist: click-default-group
30
- Requires-Dist: azure-identity==1.21.0
29
+ Requires-Dist: httpcore>=1.0.9
30
+ Requires-Dist: prometheus-client
31
+ Requires-Dist: python-dateutil>=2.8.0
32
+ Requires-Dist: acryl-datahub[datahub-kafka]==1.3.1.5rc10
31
33
  Requires-Dist: aws-msk-iam-sasl-signer-python==1.0.2
34
+ Requires-Dist: click>=6.0.0
32
35
  Requires-Dist: progressbar2
33
- Requires-Dist: entrypoints
34
- Requires-Dist: PyYAML
35
- Requires-Dist: python-dateutil>=2.8.0
36
- Requires-Dist: httpcore>=1.0.9
37
- Requires-Dist: acryl-datahub[datahub-kafka]==1.3.1.5rc9
38
36
  Requires-Dist: tenacity
39
- Requires-Dist: prometheus-client
37
+ Requires-Dist: toml>=0.10.0
38
+ Requires-Dist: click-default-group
39
+ Requires-Dist: pydantic<3.0.0,>=2.0.0
40
40
  Requires-Dist: ratelimit
41
- Requires-Dist: typing-inspect
41
+ Requires-Dist: azure-identity==1.21.0
42
42
  Provides-Extra: base
43
- Requires-Dist: toml>=0.10.0; extra == "base"
43
+ Requires-Dist: PyYAML; extra == "base"
44
+ Requires-Dist: entrypoints; extra == "base"
44
45
  Requires-Dist: stackprinter; extra == "base"
46
+ Requires-Dist: prometheus-client; extra == "base"
47
+ Requires-Dist: python-dateutil>=2.8.0; extra == "base"
45
48
  Requires-Dist: click>=6.0.0; extra == "base"
49
+ Requires-Dist: toml>=0.10.0; extra == "base"
46
50
  Requires-Dist: click-default-group; extra == "base"
47
51
  Requires-Dist: progressbar2; extra == "base"
48
- Requires-Dist: entrypoints; extra == "base"
49
- Requires-Dist: PyYAML; extra == "base"
50
- Requires-Dist: python-dateutil>=2.8.0; extra == "base"
51
52
  Requires-Dist: tenacity; extra == "base"
52
- Requires-Dist: prometheus-client; extra == "base"
53
53
  Provides-Extra: kafka
54
- Requires-Dist: toml>=0.10.0; extra == "kafka"
54
+ Requires-Dist: PyYAML; extra == "kafka"
55
+ Requires-Dist: entrypoints; extra == "kafka"
55
56
  Requires-Dist: stackprinter; extra == "kafka"
57
+ Requires-Dist: prometheus-client; extra == "kafka"
58
+ Requires-Dist: python-dateutil>=2.8.0; extra == "kafka"
56
59
  Requires-Dist: click>=6.0.0; extra == "kafka"
60
+ Requires-Dist: toml>=0.10.0; extra == "kafka"
57
61
  Requires-Dist: click-default-group; extra == "kafka"
58
- Requires-Dist: progressbar2; extra == "kafka"
59
- Requires-Dist: entrypoints; extra == "kafka"
60
- Requires-Dist: PyYAML; extra == "kafka"
61
62
  Requires-Dist: confluent-kafka[schemaregistry]; extra == "kafka"
62
- Requires-Dist: python-dateutil>=2.8.0; extra == "kafka"
63
+ Requires-Dist: progressbar2; extra == "kafka"
63
64
  Requires-Dist: tenacity; extra == "kafka"
64
- Requires-Dist: prometheus-client; extra == "kafka"
65
65
  Provides-Extra: executor
66
- Requires-Dist: toml>=0.10.0; extra == "executor"
66
+ Requires-Dist: PyYAML; extra == "executor"
67
+ Requires-Dist: entrypoints; extra == "executor"
67
68
  Requires-Dist: stackprinter; extra == "executor"
69
+ Requires-Dist: prometheus-client; extra == "executor"
70
+ Requires-Dist: python-dateutil>=2.8.0; extra == "executor"
68
71
  Requires-Dist: acryl-executor==0.3.2; extra == "executor"
69
72
  Requires-Dist: click>=6.0.0; extra == "executor"
73
+ Requires-Dist: toml>=0.10.0; extra == "executor"
70
74
  Requires-Dist: click-default-group; extra == "executor"
71
75
  Requires-Dist: progressbar2; extra == "executor"
72
- Requires-Dist: entrypoints; extra == "executor"
73
- Requires-Dist: PyYAML; extra == "executor"
74
- Requires-Dist: python-dateutil>=2.8.0; extra == "executor"
75
76
  Requires-Dist: tenacity; extra == "executor"
76
- Requires-Dist: prometheus-client; extra == "executor"
77
77
  Provides-Extra: slack
78
- Requires-Dist: slack-bolt>=1.15.5; extra == "slack"
79
- Requires-Dist: toml>=0.10.0; extra == "slack"
78
+ Requires-Dist: PyYAML; extra == "slack"
79
+ Requires-Dist: entrypoints; extra == "slack"
80
80
  Requires-Dist: stackprinter; extra == "slack"
81
+ Requires-Dist: prometheus-client; extra == "slack"
82
+ Requires-Dist: python-dateutil>=2.8.0; extra == "slack"
81
83
  Requires-Dist: click>=6.0.0; extra == "slack"
84
+ Requires-Dist: toml>=0.10.0; extra == "slack"
82
85
  Requires-Dist: click-default-group; extra == "slack"
83
86
  Requires-Dist: progressbar2; extra == "slack"
84
- Requires-Dist: entrypoints; extra == "slack"
85
- Requires-Dist: PyYAML; extra == "slack"
86
- Requires-Dist: python-dateutil>=2.8.0; extra == "slack"
87
+ Requires-Dist: slack-bolt>=1.15.5; extra == "slack"
87
88
  Requires-Dist: tenacity; extra == "slack"
88
- Requires-Dist: prometheus-client; extra == "slack"
89
89
  Provides-Extra: teams
90
- Requires-Dist: toml>=0.10.0; extra == "teams"
90
+ Requires-Dist: PyYAML; extra == "teams"
91
+ Requires-Dist: entrypoints; extra == "teams"
91
92
  Requires-Dist: stackprinter; extra == "teams"
93
+ Requires-Dist: prometheus-client; extra == "teams"
94
+ Requires-Dist: python-dateutil>=2.8.0; extra == "teams"
95
+ Requires-Dist: pymsteams>=0.2.2; extra == "teams"
92
96
  Requires-Dist: click>=6.0.0; extra == "teams"
97
+ Requires-Dist: toml>=0.10.0; extra == "teams"
93
98
  Requires-Dist: click-default-group; extra == "teams"
94
99
  Requires-Dist: progressbar2; extra == "teams"
95
- Requires-Dist: entrypoints; extra == "teams"
96
- Requires-Dist: PyYAML; extra == "teams"
97
- Requires-Dist: python-dateutil>=2.8.0; extra == "teams"
98
100
  Requires-Dist: tenacity; extra == "teams"
99
- Requires-Dist: pymsteams>=0.2.2; extra == "teams"
100
- Requires-Dist: prometheus-client; extra == "teams"
101
101
  Provides-Extra: tag-propagation
102
- Requires-Dist: toml>=0.10.0; extra == "tag-propagation"
102
+ Requires-Dist: PyYAML; extra == "tag-propagation"
103
+ Requires-Dist: entrypoints; extra == "tag-propagation"
103
104
  Requires-Dist: stackprinter; extra == "tag-propagation"
105
+ Requires-Dist: prometheus-client; extra == "tag-propagation"
106
+ Requires-Dist: python-dateutil>=2.8.0; extra == "tag-propagation"
104
107
  Requires-Dist: click>=6.0.0; extra == "tag-propagation"
108
+ Requires-Dist: toml>=0.10.0; extra == "tag-propagation"
105
109
  Requires-Dist: click-default-group; extra == "tag-propagation"
106
110
  Requires-Dist: progressbar2; extra == "tag-propagation"
107
- Requires-Dist: entrypoints; extra == "tag-propagation"
108
- Requires-Dist: PyYAML; extra == "tag-propagation"
109
- Requires-Dist: python-dateutil>=2.8.0; extra == "tag-propagation"
110
111
  Requires-Dist: tenacity; extra == "tag-propagation"
111
- Requires-Dist: prometheus-client; extra == "tag-propagation"
112
112
  Provides-Extra: term-propagation
113
- Requires-Dist: toml>=0.10.0; extra == "term-propagation"
113
+ Requires-Dist: PyYAML; extra == "term-propagation"
114
+ Requires-Dist: entrypoints; extra == "term-propagation"
114
115
  Requires-Dist: stackprinter; extra == "term-propagation"
116
+ Requires-Dist: prometheus-client; extra == "term-propagation"
117
+ Requires-Dist: python-dateutil>=2.8.0; extra == "term-propagation"
115
118
  Requires-Dist: click>=6.0.0; extra == "term-propagation"
119
+ Requires-Dist: toml>=0.10.0; extra == "term-propagation"
116
120
  Requires-Dist: click-default-group; extra == "term-propagation"
117
121
  Requires-Dist: progressbar2; extra == "term-propagation"
118
- Requires-Dist: entrypoints; extra == "term-propagation"
119
- Requires-Dist: PyYAML; extra == "term-propagation"
120
- Requires-Dist: python-dateutil>=2.8.0; extra == "term-propagation"
121
122
  Requires-Dist: tenacity; extra == "term-propagation"
122
- Requires-Dist: prometheus-client; extra == "term-propagation"
123
123
  Provides-Extra: snowflake-tag-propagation
124
- Requires-Dist: toml>=0.10.0; extra == "snowflake-tag-propagation"
124
+ Requires-Dist: PyYAML; extra == "snowflake-tag-propagation"
125
+ Requires-Dist: entrypoints; extra == "snowflake-tag-propagation"
125
126
  Requires-Dist: stackprinter; extra == "snowflake-tag-propagation"
126
- Requires-Dist: acryl-datahub[snowflake-slim]==1.3.1.5rc9; extra == "snowflake-tag-propagation"
127
+ Requires-Dist: prometheus-client; extra == "snowflake-tag-propagation"
128
+ Requires-Dist: python-dateutil>=2.8.0; extra == "snowflake-tag-propagation"
127
129
  Requires-Dist: click>=6.0.0; extra == "snowflake-tag-propagation"
130
+ Requires-Dist: toml>=0.10.0; extra == "snowflake-tag-propagation"
128
131
  Requires-Dist: click-default-group; extra == "snowflake-tag-propagation"
132
+ Requires-Dist: acryl-datahub[snowflake-slim]==1.3.1.5rc10; extra == "snowflake-tag-propagation"
129
133
  Requires-Dist: progressbar2; extra == "snowflake-tag-propagation"
130
- Requires-Dist: entrypoints; extra == "snowflake-tag-propagation"
131
- Requires-Dist: PyYAML; extra == "snowflake-tag-propagation"
132
- Requires-Dist: python-dateutil>=2.8.0; extra == "snowflake-tag-propagation"
133
134
  Requires-Dist: tenacity; extra == "snowflake-tag-propagation"
134
- Requires-Dist: prometheus-client; extra == "snowflake-tag-propagation"
135
135
  Provides-Extra: doc-propagation
136
- Requires-Dist: toml>=0.10.0; extra == "doc-propagation"
136
+ Requires-Dist: PyYAML; extra == "doc-propagation"
137
+ Requires-Dist: entrypoints; extra == "doc-propagation"
137
138
  Requires-Dist: stackprinter; extra == "doc-propagation"
139
+ Requires-Dist: prometheus-client; extra == "doc-propagation"
140
+ Requires-Dist: python-dateutil>=2.8.0; extra == "doc-propagation"
138
141
  Requires-Dist: click>=6.0.0; extra == "doc-propagation"
142
+ Requires-Dist: toml>=0.10.0; extra == "doc-propagation"
139
143
  Requires-Dist: click-default-group; extra == "doc-propagation"
140
144
  Requires-Dist: progressbar2; extra == "doc-propagation"
141
- Requires-Dist: entrypoints; extra == "doc-propagation"
142
- Requires-Dist: PyYAML; extra == "doc-propagation"
143
- Requires-Dist: python-dateutil>=2.8.0; extra == "doc-propagation"
144
145
  Requires-Dist: tenacity; extra == "doc-propagation"
145
- Requires-Dist: prometheus-client; extra == "doc-propagation"
146
+ Provides-Extra: observability
147
+ Requires-Dist: PyYAML; extra == "observability"
148
+ Requires-Dist: entrypoints; extra == "observability"
149
+ Requires-Dist: stackprinter; extra == "observability"
150
+ Requires-Dist: prometheus-client; extra == "observability"
151
+ Requires-Dist: python-dateutil>=2.8.0; extra == "observability"
152
+ Requires-Dist: opentelemetry-sdk>=1.20.0; extra == "observability"
153
+ Requires-Dist: click>=6.0.0; extra == "observability"
154
+ Requires-Dist: toml>=0.10.0; extra == "observability"
155
+ Requires-Dist: click-default-group; extra == "observability"
156
+ Requires-Dist: progressbar2; extra == "observability"
157
+ Requires-Dist: opentelemetry-api>=1.20.0; extra == "observability"
158
+ Requires-Dist: tenacity; extra == "observability"
146
159
  Provides-Extra: all
147
- Requires-Dist: slack-bolt>=1.15.5; extra == "all"
148
- Requires-Dist: toml>=0.10.0; extra == "all"
160
+ Requires-Dist: PyYAML; extra == "all"
161
+ Requires-Dist: entrypoints; extra == "all"
149
162
  Requires-Dist: stackprinter; extra == "all"
163
+ Requires-Dist: prometheus-client; extra == "all"
164
+ Requires-Dist: python-dateutil>=2.8.0; extra == "all"
150
165
  Requires-Dist: acryl-executor==0.3.2; extra == "all"
151
- Requires-Dist: acryl-datahub[snowflake-slim]==1.3.1.5rc9; extra == "all"
166
+ Requires-Dist: pymsteams>=0.2.2; extra == "all"
167
+ Requires-Dist: opentelemetry-sdk>=1.20.0; extra == "all"
152
168
  Requires-Dist: click>=6.0.0; extra == "all"
169
+ Requires-Dist: toml>=0.10.0; extra == "all"
153
170
  Requires-Dist: click-default-group; extra == "all"
154
- Requires-Dist: progressbar2; extra == "all"
155
- Requires-Dist: entrypoints; extra == "all"
156
- Requires-Dist: PyYAML; extra == "all"
157
171
  Requires-Dist: confluent-kafka[schemaregistry]; extra == "all"
158
- Requires-Dist: python-dateutil>=2.8.0; extra == "all"
172
+ Requires-Dist: acryl-datahub[snowflake-slim]==1.3.1.5rc10; extra == "all"
173
+ Requires-Dist: progressbar2; extra == "all"
174
+ Requires-Dist: opentelemetry-api>=1.20.0; extra == "all"
175
+ Requires-Dist: slack-bolt>=1.15.5; extra == "all"
159
176
  Requires-Dist: tenacity; extra == "all"
160
- Requires-Dist: pymsteams>=0.2.2; extra == "all"
161
- Requires-Dist: prometheus-client; extra == "all"
162
177
  Provides-Extra: dev
163
- Requires-Dist: toml>=0.10.0; extra == "dev"
164
- Requires-Dist: types-PyYAML; extra == "dev"
165
- Requires-Dist: acryl-executor==0.3.2; extra == "dev"
166
- Requires-Dist: sqlalchemy-stubs; extra == "dev"
167
- Requires-Dist: click-default-group; extra == "dev"
168
- Requires-Dist: types-python-dateutil; extra == "dev"
169
- Requires-Dist: pytest-docker>=0.10.3; extra == "dev"
170
- Requires-Dist: types-toml; extra == "dev"
171
- Requires-Dist: deepdiff; extra == "dev"
178
+ Requires-Dist: h11>=0.16; extra == "dev"
179
+ Requires-Dist: freezegun; extra == "dev"
180
+ Requires-Dist: prometheus-client; extra == "dev"
181
+ Requires-Dist: types-freezegun; extra == "dev"
182
+ Requires-Dist: requests-mock; extra == "dev"
183
+ Requires-Dist: types-dataclasses; extra == "dev"
184
+ Requires-Dist: confluent-kafka[schemaregistry]; extra == "dev"
185
+ Requires-Dist: pytest-dependency>=0.5.1; extra == "dev"
186
+ Requires-Dist: types-cachetools; extra == "dev"
187
+ Requires-Dist: ratelimit; extra == "dev"
188
+ Requires-Dist: tenacity; extra == "dev"
189
+ Requires-Dist: PyYAML; extra == "dev"
172
190
  Requires-Dist: types-requests; extra == "dev"
191
+ Requires-Dist: types-setuptools; extra == "dev"
192
+ Requires-Dist: python-dateutil>=2.8.0; extra == "dev"
193
+ Requires-Dist: types-pytz; extra == "dev"
194
+ Requires-Dist: httpcore>=1.0.9; extra == "dev"
195
+ Requires-Dist: types-click==0.1.12; extra == "dev"
196
+ Requires-Dist: aws-msk-iam-sasl-signer-python==1.0.2; extra == "dev"
197
+ Requires-Dist: mypy==1.17.1; extra == "dev"
173
198
  Requires-Dist: types-six; extra == "dev"
174
- Requires-Dist: types-dataclasses; extra == "dev"
199
+ Requires-Dist: build; extra == "dev"
200
+ Requires-Dist: acryl-datahub[datahub-kafka]==1.3.1.5rc10; extra == "dev"
175
201
  Requires-Dist: types-PyMySQL; extra == "dev"
202
+ Requires-Dist: typing-inspect; extra == "dev"
203
+ Requires-Dist: entrypoints; extra == "dev"
176
204
  Requires-Dist: stackprinter; extra == "dev"
177
- Requires-Dist: build; extra == "dev"
178
- Requires-Dist: aws-msk-iam-sasl-signer-python==1.0.2; extra == "dev"
179
- Requires-Dist: PyYAML; extra == "dev"
180
- Requires-Dist: confluent-kafka[schemaregistry]; extra == "dev"
181
- Requires-Dist: pytest>=6.2.2; extra == "dev"
205
+ Requires-Dist: pymsteams>=0.2.2; extra == "dev"
206
+ Requires-Dist: ruff==0.11.7; extra == "dev"
207
+ Requires-Dist: click>=6.0.0; extra == "dev"
208
+ Requires-Dist: types-python-dateutil; extra == "dev"
209
+ Requires-Dist: toml>=0.10.0; extra == "dev"
210
+ Requires-Dist: click-default-group; extra == "dev"
211
+ Requires-Dist: pydantic<3.0.0,>=2.0.0; extra == "dev"
212
+ Requires-Dist: progressbar2; extra == "dev"
182
213
  Requires-Dist: pytest-cov>=2.8.1; extra == "dev"
183
- Requires-Dist: python-dateutil>=2.8.0; extra == "dev"
214
+ Requires-Dist: types-toml; extra == "dev"
215
+ Requires-Dist: pytest>=6.2.2; extra == "dev"
216
+ Requires-Dist: acryl-executor==0.3.2; extra == "dev"
217
+ Requires-Dist: deepdiff; extra == "dev"
184
218
  Requires-Dist: tox; extra == "dev"
185
- Requires-Dist: types-setuptools; extra == "dev"
186
- Requires-Dist: coverage>=5.1; extra == "dev"
187
219
  Requires-Dist: twine; extra == "dev"
188
- Requires-Dist: types-click==0.1.12; extra == "dev"
189
220
  Requires-Dist: jsonpickle; extra == "dev"
190
- Requires-Dist: types-pytz; extra == "dev"
191
- Requires-Dist: pytest-dependency>=0.5.1; extra == "dev"
192
- Requires-Dist: prometheus-client; extra == "dev"
193
- Requires-Dist: pydantic<3.0.0,>=2.0.0; extra == "dev"
221
+ Requires-Dist: pytest-docker>=0.10.3; extra == "dev"
222
+ Requires-Dist: acryl-datahub[snowflake-slim]==1.3.1.5rc10; extra == "dev"
223
+ Requires-Dist: types-PyYAML; extra == "dev"
224
+ Requires-Dist: coverage>=5.1; extra == "dev"
194
225
  Requires-Dist: slack-bolt>=1.15.5; extra == "dev"
195
- Requires-Dist: ruff==0.11.7; extra == "dev"
196
- Requires-Dist: types-cachetools; extra == "dev"
197
- Requires-Dist: click>=6.0.0; extra == "dev"
198
- Requires-Dist: progressbar2; extra == "dev"
199
- Requires-Dist: entrypoints; extra == "dev"
200
- Requires-Dist: requests-mock; extra == "dev"
201
- Requires-Dist: mypy==1.17.1; extra == "dev"
202
- Requires-Dist: acryl-datahub[datahub-kafka]==1.3.1.5rc9; extra == "dev"
203
- Requires-Dist: types-freezegun; extra == "dev"
204
- Requires-Dist: ratelimit; extra == "dev"
205
- Requires-Dist: typing-inspect; extra == "dev"
206
- Requires-Dist: h11>=0.16; extra == "dev"
207
- Requires-Dist: freezegun; extra == "dev"
208
- Requires-Dist: acryl-datahub[snowflake-slim]==1.3.1.5rc9; extra == "dev"
226
+ Requires-Dist: sqlalchemy-stubs; extra == "dev"
209
227
  Requires-Dist: azure-identity==1.21.0; extra == "dev"
210
- Requires-Dist: httpcore>=1.0.9; extra == "dev"
211
- Requires-Dist: tenacity; extra == "dev"
212
- Requires-Dist: pymsteams>=0.2.2; extra == "dev"
213
228
  Provides-Extra: integration-tests
214
- Requires-Dist: slack-bolt>=1.15.5; extra == "integration-tests"
215
229
  Requires-Dist: acryl-executor==0.3.2; extra == "integration-tests"
216
- Requires-Dist: acryl-datahub[snowflake-slim]==1.3.1.5rc9; extra == "integration-tests"
217
- Requires-Dist: confluent-kafka[schemaregistry]; extra == "integration-tests"
218
230
  Requires-Dist: pymsteams>=0.2.2; extra == "integration-tests"
231
+ Requires-Dist: confluent-kafka[schemaregistry]; extra == "integration-tests"
232
+ Requires-Dist: acryl-datahub[snowflake-slim]==1.3.1.5rc10; extra == "integration-tests"
233
+ Requires-Dist: slack-bolt>=1.15.5; extra == "integration-tests"
219
234
  Dynamic: classifier
220
235
  Dynamic: description
221
236
  Dynamic: description-content-type
@@ -1,5 +1,5 @@
1
1
  datahub_actions/__init__.py,sha256=Pn9UTDbqYPt6jY_acE7MQIveX_Nzdfl5oGmi-Ze8CHs,647
2
- datahub_actions/_version.py,sha256=eWryIWAU9j5gdTM9B7DMIXhXxXht0_2bc4gKRc9cU60,339
2
+ datahub_actions/_version.py,sha256=yYoKMr5I5pxzVQxB11xhjKWxbFgE0iycOeBEygdOepA,340
3
3
  datahub_actions/entrypoints.py,sha256=_6NOpKhlfXuSUdPhDpPya7d9kJmwoRGrunxcNPMQE9k,4743
4
4
  datahub_actions/action/__init__.py,sha256=KYWPHGi7sDM0DXrrXmhlR6_zhym1qNbtFhjYk1Ug6ss,579
5
5
  datahub_actions/action/action.py,sha256=ET1fpeRn6KVD9diJ9ZOObsojrN9y6Vfn4tK7jzBQKHg,1537
@@ -13,6 +13,8 @@ datahub_actions/event/__init__.py,sha256=KYWPHGi7sDM0DXrrXmhlR6_zhym1qNbtFhjYk1U
13
13
  datahub_actions/event/event.py,sha256=2Eyizwzbf3fXsUV4n5p7gsYZS_CjEE5y9m1YvkmKOKU,990
14
14
  datahub_actions/event/event_envelope.py,sha256=x1QfDetMM7k5SLecD0Nb-duxMxKWU0rmeLroScvkicY,2258
15
15
  datahub_actions/event/event_registry.py,sha256=bWV2n9u1n8p9Onu9G2AVgZIfOxCjaBT0pKg2eOQdaig,4663
16
+ datahub_actions/observability/__init__.py,sha256=llKqfushdo7d6RNAtIx9ofS8fqFltea-BHkkzZCUyAA,643
17
+ datahub_actions/observability/kafka_lag_monitor.py,sha256=UrGSC18iUqvKCpxN8HzZ5gpvDjdNtAwuDgXgd77pfpE,7865
16
18
  datahub_actions/pipeline/__init__.py,sha256=KYWPHGi7sDM0DXrrXmhlR6_zhym1qNbtFhjYk1Ug6ss,579
17
19
  datahub_actions/pipeline/pipeline.py,sha256=6Bod5W3QJNAV0kXymooBxxJVuvAYv3mpvAa6zp-9u5c,12194
18
20
  datahub_actions/pipeline/pipeline_config.py,sha256=6mJKK0J9EuXaxPVjl7UuE0qzACpPGjQLr6yNv_-O7Xg,2347
@@ -55,7 +57,7 @@ datahub_actions/plugin/source/acryl/datahub_cloud_events_ack_manager.py,sha256=k
55
57
  datahub_actions/plugin/source/acryl/datahub_cloud_events_consumer.py,sha256=1j6s-uTyZcRlF74toHTrdNeVNq5MMh9_Fp8FOsV-8a0,7116
56
58
  datahub_actions/plugin/source/acryl/datahub_cloud_events_consumer_offsets_store.py,sha256=5m_VR_5yHjt4_YZEi1die5sr0ngl9dVobX10AjIodvA,3969
57
59
  datahub_actions/plugin/source/kafka/__init__.py,sha256=KYWPHGi7sDM0DXrrXmhlR6_zhym1qNbtFhjYk1Ug6ss,579
58
- datahub_actions/plugin/source/kafka/kafka_event_source.py,sha256=0SunE49cKmqEGaazyYTYMVZ-NsIDP89QcBa0Xs-JkdQ,11594
60
+ datahub_actions/plugin/source/kafka/kafka_event_source.py,sha256=5Nj4coUbboK7ZFtH0JzIvzzacdqWd9a1nHLBtpx3Uog,13380
59
61
  datahub_actions/plugin/source/kafka/utils.py,sha256=EEqBnv8Zd05zSg9T3f2FHaARaStD2j2M_xiSeaQBplA,758
60
62
  datahub_actions/plugin/transform/__init__.py,sha256=KYWPHGi7sDM0DXrrXmhlR6_zhym1qNbtFhjYk1Ug6ss,579
61
63
  datahub_actions/plugin/transform/filter/__init__.py,sha256=KYWPHGi7sDM0DXrrXmhlR6_zhym1qNbtFhjYk1Ug6ss,579
@@ -75,8 +77,8 @@ datahub_actions/utils/event_util.py,sha256=VluTOeyFcot48moK9qLmYL1ADAjsau0346Ngi
75
77
  datahub_actions/utils/kafka_msk_iam.py,sha256=JWg0MBEMcsG2AmW4yXiHvH_dnnsQDIRASdlvDXGTVcI,1013
76
78
  datahub_actions/utils/name_resolver.py,sha256=uXICSpy1IUe5uyFUiRk4vDQ9_G0JytPgKPSnqMA6fZk,10540
77
79
  datahub_actions/utils/social_util.py,sha256=FI_3qDjayX9LKlDjf43QHafnOznQk3v5Vp3Xyhq-lno,5271
78
- acryl_datahub_actions-1.3.1.5rc9.dist-info/METADATA,sha256=eBH8Q0aKNcELG4XylRTmF-cKWhZN47F_c0r4fjZw9_Y,18124
79
- acryl_datahub_actions-1.3.1.5rc9.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
80
- acryl_datahub_actions-1.3.1.5rc9.dist-info/entry_points.txt,sha256=Gbvj36kOFWrsJ1meJVFB7zYgrKbIGgufOpZDurJbehU,866
81
- acryl_datahub_actions-1.3.1.5rc9.dist-info/top_level.txt,sha256=93StcIqRM0PfcJoT06TFhcCjPnIw-CyFgBaF-4vqCKY,16
82
- acryl_datahub_actions-1.3.1.5rc9.dist-info/RECORD,,
80
+ acryl_datahub_actions-1.3.1.5rc10.dist-info/METADATA,sha256=dLcgOWiBJxMdiQqcG-pkqoufuxW4hG8wQka-9wJbzbs,18960
81
+ acryl_datahub_actions-1.3.1.5rc10.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
82
+ acryl_datahub_actions-1.3.1.5rc10.dist-info/entry_points.txt,sha256=Gbvj36kOFWrsJ1meJVFB7zYgrKbIGgufOpZDurJbehU,866
83
+ acryl_datahub_actions-1.3.1.5rc10.dist-info/top_level.txt,sha256=93StcIqRM0PfcJoT06TFhcCjPnIw-CyFgBaF-4vqCKY,16
84
+ acryl_datahub_actions-1.3.1.5rc10.dist-info/RECORD,,
@@ -1,6 +1,6 @@
1
1
  # Published at https://pypi.org/project/acryl-datahub-actions/.
2
2
  __package_name__ = "acryl-datahub-actions"
3
- __version__ = "1.3.1.5rc9"
3
+ __version__ = "1.3.1.5rc10"
4
4
 
5
5
 
6
6
  def is_dev_mode() -> bool:
@@ -0,0 +1,15 @@
1
+ # Copyright 2021 Acryl Data, Inc.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """Observability and metrics utilities for datahub-actions."""
@@ -0,0 +1,230 @@
1
+ # Copyright 2021 Acryl Data, Inc.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """Background thread for monitoring Kafka consumer lag.
16
+
17
+ This module provides a KafkaLagMonitor class that periodically calculates
18
+ and reports Kafka consumer lag metrics to Prometheus.
19
+ """
20
+
21
+ import logging
22
+ import threading
23
+ from dataclasses import dataclass
24
+ from typing import Dict, Optional
25
+
26
+ from confluent_kafka import Consumer, KafkaException, TopicPartition
27
+ from prometheus_client import Gauge
28
+
29
+ logger = logging.getLogger(__name__)
30
+
31
+ # Prometheus metrics
32
+ KAFKA_LAG_GAUGE = Gauge(
33
+ name="kafka_consumer_lag",
34
+ documentation="Kafka consumer lag aggregated per topic",
35
+ labelnames=["topic", "pipeline_name"],
36
+ )
37
+
38
+
39
+ @dataclass
40
+ class LagStats:
41
+ """Statistics for a topic's consumer lag."""
42
+
43
+ topic: str
44
+ total_lag: int
45
+ partition_lags: Dict[int, int] # partition_id -> lag
46
+
47
+
48
+ class KafkaLagMonitor:
49
+ """Background thread that periodically reports Kafka consumer lag.
50
+
51
+ This monitor:
52
+ 1. Queries assigned partitions from the Kafka consumer
53
+ 2. Gets high water marks for each partition
54
+ 3. Gets committed offsets for each partition
55
+ 4. Calculates lag = high_water_mark - committed_offset
56
+ 5. Aggregates per-topic lag (sum across partitions)
57
+ 6. Updates Prometheus Gauge metrics
58
+ 7. Optionally updates OpenTelemetry metrics if available
59
+ """
60
+
61
+ def __init__(
62
+ self,
63
+ consumer: Consumer,
64
+ pipeline_name: str,
65
+ interval_seconds: float = 30.0,
66
+ timeout_seconds: float = 5.0,
67
+ ):
68
+ """Initialize lag monitor.
69
+
70
+ Args:
71
+ consumer: confluent_kafka.Consumer instance to monitor
72
+ pipeline_name: Name of the action pipeline (for metric labels)
73
+ interval_seconds: How often to report lag (default: 30s)
74
+ timeout_seconds: Timeout for Kafka API calls (default: 5s)
75
+ """
76
+ self.consumer = consumer
77
+ self.pipeline_name = pipeline_name
78
+ self.interval_seconds = interval_seconds
79
+ self.timeout_seconds = timeout_seconds
80
+
81
+ self._stop_event = threading.Event()
82
+ self._thread: Optional[threading.Thread] = None
83
+
84
+ def start(self) -> None:
85
+ """Start the background monitoring thread."""
86
+ if self._thread is not None:
87
+ logger.warning("Lag monitor already started")
88
+ return
89
+
90
+ self._stop_event.clear()
91
+ self._thread = threading.Thread(
92
+ target=self._monitor_loop,
93
+ name=f"kafka-lag-monitor-{self.pipeline_name}",
94
+ daemon=True, # Daemon thread exits when main thread exits
95
+ )
96
+ self._thread.start()
97
+ logger.info(
98
+ f"Kafka lag monitor started for pipeline '{self.pipeline_name}' "
99
+ f"(interval={self.interval_seconds}s)"
100
+ )
101
+
102
+ def stop(self) -> None:
103
+ """Stop the background monitoring thread."""
104
+ if self._thread is None:
105
+ return
106
+
107
+ logger.info(f"Stopping Kafka lag monitor for pipeline '{self.pipeline_name}'")
108
+ self._stop_event.set()
109
+ self._thread.join(timeout=10.0)
110
+ self._thread = None
111
+
112
+ def _monitor_loop(self) -> None:
113
+ """Main monitoring loop that runs in background thread."""
114
+ while not self._stop_event.is_set():
115
+ try:
116
+ self._collect_and_report_lag()
117
+ except Exception as e:
118
+ # Log error but don't crash - monitoring should be resilient
119
+ logger.error(
120
+ f"Error collecting lag for pipeline '{self.pipeline_name}': {e}",
121
+ exc_info=True,
122
+ )
123
+
124
+ # Sleep with interrupt support
125
+ self._stop_event.wait(timeout=self.interval_seconds)
126
+
127
+ def _collect_and_report_lag(self) -> None:
128
+ """Collect lag statistics and update metrics."""
129
+ # Get assigned partitions
130
+ assignment = self.consumer.assignment()
131
+ if not assignment:
132
+ logger.debug(f"No partitions assigned to pipeline '{self.pipeline_name}'")
133
+ return
134
+
135
+ # Group partitions by topic
136
+ topic_partitions: Dict[str, list[TopicPartition]] = {}
137
+ for tp in assignment:
138
+ if tp.topic not in topic_partitions:
139
+ topic_partitions[tp.topic] = []
140
+ topic_partitions[tp.topic].append(tp)
141
+
142
+ # Calculate lag per topic
143
+ for topic, partitions in topic_partitions.items():
144
+ lag_stats = self._calculate_topic_lag(topic, partitions)
145
+ if lag_stats:
146
+ self._report_lag(lag_stats)
147
+
148
+ def _calculate_topic_lag(
149
+ self, topic: str, partitions: list[TopicPartition]
150
+ ) -> Optional[LagStats]:
151
+ """Calculate lag for all partitions of a topic.
152
+
153
+ Args:
154
+ topic: Topic name
155
+ partitions: List of TopicPartition objects for this topic
156
+
157
+ Returns:
158
+ LagStats with aggregated lag, or None if calculation failed
159
+ """
160
+ partition_lags: Dict[int, int] = {}
161
+
162
+ # Get committed offsets for all partitions at once
163
+ try:
164
+ committed_partitions = self.consumer.committed(
165
+ partitions, timeout=self.timeout_seconds
166
+ )
167
+ except KafkaException as e:
168
+ logger.warning(f"Failed to get committed offsets for topic '{topic}': {e}")
169
+ return None
170
+
171
+ # Calculate lag for each partition
172
+ for tp in committed_partitions:
173
+ try:
174
+ # Get high water mark
175
+ watermarks = self.consumer.get_watermark_offsets(
176
+ tp, timeout=self.timeout_seconds, cached=False
177
+ )
178
+ if watermarks is None:
179
+ logger.warning(
180
+ f"Failed to get watermarks for {topic}[{tp.partition}]"
181
+ )
182
+ continue
183
+
184
+ low, high = watermarks
185
+
186
+ # Calculate lag
187
+ if tp.offset < 0:
188
+ # No committed offset yet - show total available messages as lag
189
+ lag = high - low
190
+ else:
191
+ # Normal case: lag = high water mark - committed offset
192
+ lag = high - tp.offset
193
+
194
+ # Ensure non-negative lag
195
+ lag = max(0, lag)
196
+ partition_lags[tp.partition] = lag
197
+
198
+ except KafkaException as e:
199
+ logger.warning(
200
+ f"Error calculating lag for {topic}[{tp.partition}]: {e}"
201
+ )
202
+ continue
203
+
204
+ if not partition_lags:
205
+ return None
206
+
207
+ total_lag = sum(partition_lags.values())
208
+ return LagStats(
209
+ topic=topic,
210
+ total_lag=total_lag,
211
+ partition_lags=partition_lags,
212
+ )
213
+
214
+ def _report_lag(self, lag_stats: LagStats) -> None:
215
+ """Report lag statistics to metrics backends.
216
+
217
+ Args:
218
+ lag_stats: Lag statistics to report
219
+ """
220
+ # Always update Prometheus (base requirement)
221
+ KAFKA_LAG_GAUGE.labels(
222
+ topic=lag_stats.topic,
223
+ pipeline_name=self.pipeline_name,
224
+ ).set(lag_stats.total_lag)
225
+
226
+ logger.debug(
227
+ f"Pipeline '{self.pipeline_name}' topic '{lag_stats.topic}': "
228
+ f"lag={lag_stats.total_lag} "
229
+ f"(partitions: {lag_stats.partition_lags})"
230
+ )
@@ -13,6 +13,7 @@
13
13
  # limitations under the License.
14
14
 
15
15
  import logging
16
+ import os
16
17
  from dataclasses import dataclass
17
18
  from typing import Any, Callable, Dict, Iterable, Optional
18
19
 
@@ -41,6 +42,7 @@ from datahub_actions.event.event_registry import (
41
42
  )
42
43
 
43
44
  # May or may not need these.
45
+ from datahub_actions.observability.kafka_lag_monitor import KafkaLagMonitor
44
46
  from datahub_actions.pipeline.pipeline_context import PipelineContext
45
47
  from datahub_actions.plugin.source.kafka.utils import with_retry
46
48
  from datahub_actions.source.event_source import EventSource
@@ -124,6 +126,7 @@ def kafka_messages_observer(pipeline_name: str) -> Callable:
124
126
  class KafkaEventSource(EventSource):
125
127
  running = False
126
128
  source_config: KafkaEventSourceConfig
129
+ _lag_monitor: Optional[KafkaLagMonitor] = None
127
130
 
128
131
  def __init__(self, config: KafkaEventSourceConfig, ctx: PipelineContext):
129
132
  self.source_config = config
@@ -159,6 +162,41 @@ class KafkaEventSource(EventSource):
159
162
  )
160
163
  self._observe_message: Callable = kafka_messages_observer(ctx.pipeline_name)
161
164
 
165
+ # Initialize lag monitoring (if enabled)
166
+ if self._is_lag_monitoring_enabled():
167
+ lag_interval = float(
168
+ os.environ.get("DATAHUB_ACTIONS_KAFKA_LAG_INTERVAL_SECONDS", "30")
169
+ )
170
+ lag_timeout = float(
171
+ os.environ.get("DATAHUB_ACTIONS_KAFKA_LAG_TIMEOUT_SECONDS", "5")
172
+ )
173
+ self._lag_monitor = KafkaLagMonitor(
174
+ consumer=self.consumer,
175
+ pipeline_name=ctx.pipeline_name,
176
+ interval_seconds=lag_interval,
177
+ timeout_seconds=lag_timeout,
178
+ )
179
+ logger.info(
180
+ f"Kafka lag monitoring enabled for '{ctx.pipeline_name}' "
181
+ f"(interval={lag_interval}s, timeout={lag_timeout}s)"
182
+ )
183
+ else:
184
+ logger.debug(
185
+ f"Kafka lag monitoring disabled for pipeline '{ctx.pipeline_name}'"
186
+ )
187
+
188
+ @staticmethod
189
+ def _is_lag_monitoring_enabled() -> bool:
190
+ """Check if Kafka lag monitoring should be enabled.
191
+
192
+ Lag monitoring is enabled if:
193
+ 1. DATAHUB_ACTIONS_KAFKA_LAG_ENABLED=true (case-insensitive)
194
+
195
+ Default: False (conservative default for OSS rollout)
196
+ """
197
+ enabled_str = os.environ.get("DATAHUB_ACTIONS_KAFKA_LAG_ENABLED", "false")
198
+ return enabled_str.lower() in ("true", "1", "yes")
199
+
162
200
  @classmethod
163
201
  def create(cls, config_dict: dict, ctx: PipelineContext) -> "EventSource":
164
202
  config = KafkaEventSourceConfig.model_validate(config_dict)
@@ -169,6 +207,11 @@ class KafkaEventSource(EventSource):
169
207
  topics_to_subscribe = list(topic_routes.values())
170
208
  logger.debug(f"Subscribing to the following topics: {topics_to_subscribe}")
171
209
  self.consumer.subscribe(topics_to_subscribe)
210
+
211
+ # Start lag monitoring after subscription
212
+ if self._lag_monitor is not None:
213
+ self._lag_monitor.start()
214
+
172
215
  self.running = True
173
216
  while self.running:
174
217
  try:
@@ -229,6 +272,11 @@ class KafkaEventSource(EventSource):
229
272
  yield EventEnvelope(RELATIONSHIP_CHANGE_EVENT_V1_TYPE, rce, kafka_meta)
230
273
 
231
274
  def close(self) -> None:
275
+ # Stop lag monitoring first
276
+ if self._lag_monitor is not None:
277
+ self._lag_monitor.stop()
278
+
279
+ # Then close consumer
232
280
  if self.consumer:
233
281
  self.running = False
234
282
  self.consumer.close()