apache-airflow-providers-databricks 7.3.1rc1__py3-none-any.whl → 7.3.2rc1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of apache-airflow-providers-databricks might be problematic. Click here for more details.
- airflow/providers/databricks/__init__.py +1 -1
- airflow/providers/databricks/hooks/databricks.py +3 -6
- airflow/providers/databricks/hooks/databricks_base.py +14 -15
- airflow/providers/databricks/hooks/databricks_sql.py +3 -5
- airflow/providers/databricks/operators/databricks.py +26 -7
- airflow/providers/databricks/operators/databricks_workflow.py +9 -9
- airflow/providers/databricks/sensors/databricks_partition.py +2 -3
- airflow/providers/databricks/utils/databricks.py +6 -7
- {apache_airflow_providers_databricks-7.3.1rc1.dist-info → apache_airflow_providers_databricks-7.3.2rc1.dist-info}/METADATA +6 -6
- {apache_airflow_providers_databricks-7.3.1rc1.dist-info → apache_airflow_providers_databricks-7.3.2rc1.dist-info}/RECORD +12 -12
- {apache_airflow_providers_databricks-7.3.1rc1.dist-info → apache_airflow_providers_databricks-7.3.2rc1.dist-info}/WHEEL +0 -0
- {apache_airflow_providers_databricks-7.3.1rc1.dist-info → apache_airflow_providers_databricks-7.3.2rc1.dist-info}/entry_points.txt +0 -0
|
@@ -29,7 +29,7 @@ from airflow import __version__ as airflow_version
|
|
|
29
29
|
|
|
30
30
|
__all__ = ["__version__"]
|
|
31
31
|
|
|
32
|
-
__version__ = "7.3.
|
|
32
|
+
__version__ = "7.3.2"
|
|
33
33
|
|
|
34
34
|
if packaging.version.parse(packaging.version.parse(airflow_version).base_version) < packaging.version.parse(
|
|
35
35
|
"2.9.0"
|
|
@@ -384,8 +384,7 @@ class DatabricksHook(BaseDatabricksHook):
|
|
|
384
384
|
|
|
385
385
|
if not matching_jobs:
|
|
386
386
|
return None
|
|
387
|
-
|
|
388
|
-
return matching_jobs[0]["job_id"]
|
|
387
|
+
return matching_jobs[0]["job_id"]
|
|
389
388
|
|
|
390
389
|
def list_pipelines(
|
|
391
390
|
self, batch_size: int = 25, pipeline_name: str | None = None, notebook_path: str | None = None
|
|
@@ -445,8 +444,7 @@ class DatabricksHook(BaseDatabricksHook):
|
|
|
445
444
|
|
|
446
445
|
if not pipeline_name or len(matching_pipelines) == 0:
|
|
447
446
|
return None
|
|
448
|
-
|
|
449
|
-
return matching_pipelines[0]["pipeline_id"]
|
|
447
|
+
return matching_pipelines[0]["pipeline_id"]
|
|
450
448
|
|
|
451
449
|
def get_run_page_url(self, run_id: int) -> str:
|
|
452
450
|
"""
|
|
@@ -640,8 +638,7 @@ class DatabricksHook(BaseDatabricksHook):
|
|
|
640
638
|
repair_history = response["repair_history"]
|
|
641
639
|
if len(repair_history) == 1:
|
|
642
640
|
return None
|
|
643
|
-
|
|
644
|
-
return repair_history[-1]["id"]
|
|
641
|
+
return repair_history[-1]["id"]
|
|
645
642
|
|
|
646
643
|
def get_cluster_state(self, cluster_id: str) -> ClusterState:
|
|
647
644
|
"""
|
|
@@ -197,9 +197,8 @@ class BaseDatabricksHook(BaseHook):
|
|
|
197
197
|
if urlparse_host:
|
|
198
198
|
# In this case, host = https://xx.cloud.databricks.com
|
|
199
199
|
return urlparse_host
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
return host
|
|
200
|
+
# In this case, host = xx.cloud.databricks.com
|
|
201
|
+
return host
|
|
203
202
|
|
|
204
203
|
def _get_retry_object(self) -> Retrying:
|
|
205
204
|
"""
|
|
@@ -555,27 +554,27 @@ class BaseDatabricksHook(BaseHook):
|
|
|
555
554
|
"Using token auth. For security reasons, please set token in Password field instead of extra"
|
|
556
555
|
)
|
|
557
556
|
return self.databricks_conn.extra_dejson["token"]
|
|
558
|
-
|
|
557
|
+
if not self.databricks_conn.login and self.databricks_conn.password:
|
|
559
558
|
self.log.debug("Using token auth.")
|
|
560
559
|
return self.databricks_conn.password
|
|
561
|
-
|
|
560
|
+
if "azure_tenant_id" in self.databricks_conn.extra_dejson:
|
|
562
561
|
if self.databricks_conn.login == "" or self.databricks_conn.password == "":
|
|
563
562
|
raise AirflowException("Azure SPN credentials aren't provided")
|
|
564
563
|
self.log.debug("Using AAD Token for SPN.")
|
|
565
564
|
return self._get_aad_token(DEFAULT_DATABRICKS_SCOPE)
|
|
566
|
-
|
|
565
|
+
if self.databricks_conn.extra_dejson.get("use_azure_managed_identity", False):
|
|
567
566
|
self.log.debug("Using AAD Token for managed identity.")
|
|
568
567
|
self._check_azure_metadata_service()
|
|
569
568
|
return self._get_aad_token(DEFAULT_DATABRICKS_SCOPE)
|
|
570
|
-
|
|
569
|
+
if self.databricks_conn.extra_dejson.get(DEFAULT_AZURE_CREDENTIAL_SETTING_KEY, False):
|
|
571
570
|
self.log.debug("Using default Azure Credential authentication.")
|
|
572
571
|
return self._get_aad_token_for_default_az_credential(DEFAULT_DATABRICKS_SCOPE)
|
|
573
|
-
|
|
572
|
+
if self.databricks_conn.extra_dejson.get("service_principal_oauth", False):
|
|
574
573
|
if self.databricks_conn.login == "" or self.databricks_conn.password == "":
|
|
575
574
|
raise AirflowException("Service Principal credentials aren't provided")
|
|
576
575
|
self.log.debug("Using Service Principal Token.")
|
|
577
576
|
return self._get_sp_token(OIDC_TOKEN_SERVICE_URL.format(self.databricks_conn.host))
|
|
578
|
-
|
|
577
|
+
if raise_error:
|
|
579
578
|
raise AirflowException("Token authentication isn't configured")
|
|
580
579
|
|
|
581
580
|
return None
|
|
@@ -586,28 +585,28 @@ class BaseDatabricksHook(BaseHook):
|
|
|
586
585
|
"Using token auth. For security reasons, please set token in Password field instead of extra"
|
|
587
586
|
)
|
|
588
587
|
return self.databricks_conn.extra_dejson["token"]
|
|
589
|
-
|
|
588
|
+
if not self.databricks_conn.login and self.databricks_conn.password:
|
|
590
589
|
self.log.debug("Using token auth.")
|
|
591
590
|
return self.databricks_conn.password
|
|
592
|
-
|
|
591
|
+
if "azure_tenant_id" in self.databricks_conn.extra_dejson:
|
|
593
592
|
if self.databricks_conn.login == "" or self.databricks_conn.password == "":
|
|
594
593
|
raise AirflowException("Azure SPN credentials aren't provided")
|
|
595
594
|
self.log.debug("Using AAD Token for SPN.")
|
|
596
595
|
return await self._a_get_aad_token(DEFAULT_DATABRICKS_SCOPE)
|
|
597
|
-
|
|
596
|
+
if self.databricks_conn.extra_dejson.get("use_azure_managed_identity", False):
|
|
598
597
|
self.log.debug("Using AAD Token for managed identity.")
|
|
599
598
|
await self._a_check_azure_metadata_service()
|
|
600
599
|
return await self._a_get_aad_token(DEFAULT_DATABRICKS_SCOPE)
|
|
601
|
-
|
|
600
|
+
if self.databricks_conn.extra_dejson.get(DEFAULT_AZURE_CREDENTIAL_SETTING_KEY, False):
|
|
602
601
|
self.log.debug("Using AzureDefaultCredential for authentication.")
|
|
603
602
|
|
|
604
603
|
return await self._a_get_aad_token_for_default_az_credential(DEFAULT_DATABRICKS_SCOPE)
|
|
605
|
-
|
|
604
|
+
if self.databricks_conn.extra_dejson.get("service_principal_oauth", False):
|
|
606
605
|
if self.databricks_conn.login == "" or self.databricks_conn.password == "":
|
|
607
606
|
raise AirflowException("Service Principal credentials aren't provided")
|
|
608
607
|
self.log.debug("Using Service Principal Token.")
|
|
609
608
|
return await self._a_get_sp_token(OIDC_TOKEN_SERVICE_URL.format(self.databricks_conn.host))
|
|
610
|
-
|
|
609
|
+
if raise_error:
|
|
611
610
|
raise AirflowException("Token authentication isn't configured")
|
|
612
611
|
|
|
613
612
|
return None
|
|
@@ -283,8 +283,7 @@ class DatabricksSqlHook(BaseDatabricksHook, DbApiHook):
|
|
|
283
283
|
return None
|
|
284
284
|
if return_single_query_results(sql, return_last, split_statements):
|
|
285
285
|
return results[-1]
|
|
286
|
-
|
|
287
|
-
return results
|
|
286
|
+
return results
|
|
288
287
|
|
|
289
288
|
def _make_common_data_structure(self, result: T | Sequence[T]) -> tuple[Any, ...] | list[tuple[Any, ...]]:
|
|
290
289
|
"""Transform the databricks Row objects into namedtuple."""
|
|
@@ -297,12 +296,11 @@ class DatabricksSqlHook(BaseDatabricksHook, DbApiHook):
|
|
|
297
296
|
rows_fields = tuple(rows[0].__fields__)
|
|
298
297
|
rows_object = namedtuple("Row", rows_fields, rename=True) # type: ignore
|
|
299
298
|
return cast("list[tuple[Any, ...]]", [rows_object(*row) for row in rows])
|
|
300
|
-
|
|
299
|
+
if isinstance(result, Row):
|
|
301
300
|
row_fields = tuple(result.__fields__)
|
|
302
301
|
row_object = namedtuple("Row", row_fields, rename=True) # type: ignore
|
|
303
302
|
return cast("tuple[Any, ...]", row_object(*result))
|
|
304
|
-
|
|
305
|
-
raise TypeError(f"Expected Sequence[Row] or Row, but got {type(result)}")
|
|
303
|
+
raise TypeError(f"Expected Sequence[Row] or Row, but got {type(result)}")
|
|
306
304
|
|
|
307
305
|
def bulk_dump(self, table, tmp_file):
|
|
308
306
|
raise NotImplementedError()
|
|
@@ -1261,14 +1261,27 @@ class DatabricksTaskBaseOperator(BaseOperator, ABC):
|
|
|
1261
1261
|
def databricks_task_key(self) -> str:
|
|
1262
1262
|
return self._generate_databricks_task_key()
|
|
1263
1263
|
|
|
1264
|
-
def _generate_databricks_task_key(
|
|
1264
|
+
def _generate_databricks_task_key(
|
|
1265
|
+
self, task_id: str | None = None, task_dict: dict[str, BaseOperator] | None = None
|
|
1266
|
+
) -> str:
|
|
1265
1267
|
"""Create a databricks task key using the hash of dag_id and task_id."""
|
|
1268
|
+
if task_id:
|
|
1269
|
+
if not task_dict:
|
|
1270
|
+
raise ValueError(
|
|
1271
|
+
"Must pass task_dict if task_id is provided in _generate_databricks_task_key."
|
|
1272
|
+
)
|
|
1273
|
+
_task = task_dict.get(task_id)
|
|
1274
|
+
if _task and hasattr(_task, "databricks_task_key"):
|
|
1275
|
+
_databricks_task_key = _task.databricks_task_key
|
|
1276
|
+
else:
|
|
1277
|
+
task_key = f"{self.dag_id}__{task_id}".encode()
|
|
1278
|
+
_databricks_task_key = hashlib.md5(task_key).hexdigest()
|
|
1279
|
+
return _databricks_task_key
|
|
1266
1280
|
if not self._databricks_task_key or len(self._databricks_task_key) > 100:
|
|
1267
1281
|
self.log.info(
|
|
1268
1282
|
"databricks_task_key has not be provided or the provided one exceeds 100 characters and will be truncated by the Databricks API. This will cause failure when trying to monitor the task. A task_key will be generated using the hash value of dag_id+task_id"
|
|
1269
1283
|
)
|
|
1270
|
-
|
|
1271
|
-
task_key = f"{self.dag_id}__{task_id}".encode()
|
|
1284
|
+
task_key = f"{self.dag_id}__{self.task_id}".encode()
|
|
1272
1285
|
self._databricks_task_key = hashlib.md5(task_key).hexdigest()
|
|
1273
1286
|
self.log.info("Generated databricks task_key: %s", self._databricks_task_key)
|
|
1274
1287
|
return self._databricks_task_key
|
|
@@ -1354,14 +1367,17 @@ class DatabricksTaskBaseOperator(BaseOperator, ABC):
|
|
|
1354
1367
|
return {task["task_key"]: task for task in sorted_task_runs}[self.databricks_task_key]
|
|
1355
1368
|
|
|
1356
1369
|
def _convert_to_databricks_workflow_task(
|
|
1357
|
-
self,
|
|
1370
|
+
self,
|
|
1371
|
+
relevant_upstreams: list[BaseOperator],
|
|
1372
|
+
task_dict: dict[str, BaseOperator],
|
|
1373
|
+
context: Context | None = None,
|
|
1358
1374
|
) -> dict[str, object]:
|
|
1359
1375
|
"""Convert the operator to a Databricks workflow task that can be a task in a workflow."""
|
|
1360
1376
|
base_task_json = self._get_task_base_json()
|
|
1361
1377
|
result = {
|
|
1362
1378
|
"task_key": self.databricks_task_key,
|
|
1363
1379
|
"depends_on": [
|
|
1364
|
-
{"task_key": self._generate_databricks_task_key(task_id)}
|
|
1380
|
+
{"task_key": self._generate_databricks_task_key(task_id, task_dict)}
|
|
1365
1381
|
for task_id in self.upstream_task_ids
|
|
1366
1382
|
if task_id in relevant_upstreams
|
|
1367
1383
|
],
|
|
@@ -1571,7 +1587,10 @@ class DatabricksNotebookOperator(DatabricksTaskBaseOperator):
|
|
|
1571
1587
|
self.notebook_packages.append(task_group_package)
|
|
1572
1588
|
|
|
1573
1589
|
def _convert_to_databricks_workflow_task(
|
|
1574
|
-
self,
|
|
1590
|
+
self,
|
|
1591
|
+
relevant_upstreams: list[BaseOperator],
|
|
1592
|
+
task_dict: dict[str, BaseOperator],
|
|
1593
|
+
context: Context | None = None,
|
|
1575
1594
|
) -> dict[str, object]:
|
|
1576
1595
|
"""Convert the operator to a Databricks workflow task that can be a task in a workflow."""
|
|
1577
1596
|
databricks_workflow_task_group = self._databricks_workflow_task_group
|
|
@@ -1589,7 +1608,7 @@ class DatabricksNotebookOperator(DatabricksTaskBaseOperator):
|
|
|
1589
1608
|
**databricks_workflow_task_group.notebook_params,
|
|
1590
1609
|
}
|
|
1591
1610
|
|
|
1592
|
-
return super()._convert_to_databricks_workflow_task(relevant_upstreams, context=context)
|
|
1611
|
+
return super()._convert_to_databricks_workflow_task(relevant_upstreams, task_dict, context=context)
|
|
1593
1612
|
|
|
1594
1613
|
|
|
1595
1614
|
class DatabricksTaskOperator(DatabricksTaskBaseOperator):
|
|
@@ -88,7 +88,7 @@ class _CreateDatabricksWorkflowOperator(BaseOperator):
|
|
|
88
88
|
:param max_concurrent_runs: The maximum number of concurrent runs for the workflow.
|
|
89
89
|
:param notebook_params: A dictionary of notebook parameters to pass to the workflow. These parameters
|
|
90
90
|
will be passed to all notebooks in the workflow.
|
|
91
|
-
:param tasks_to_convert: A
|
|
91
|
+
:param tasks_to_convert: A dict of tasks to convert to a Databricks workflow. This list can also be
|
|
92
92
|
populated after instantiation using the `add_task` method.
|
|
93
93
|
"""
|
|
94
94
|
|
|
@@ -105,7 +105,7 @@ class _CreateDatabricksWorkflowOperator(BaseOperator):
|
|
|
105
105
|
job_clusters: list[dict[str, object]] | None = None,
|
|
106
106
|
max_concurrent_runs: int = 1,
|
|
107
107
|
notebook_params: dict | None = None,
|
|
108
|
-
tasks_to_convert:
|
|
108
|
+
tasks_to_convert: dict[str, BaseOperator] | None = None,
|
|
109
109
|
**kwargs,
|
|
110
110
|
):
|
|
111
111
|
self.databricks_conn_id = databricks_conn_id
|
|
@@ -114,7 +114,7 @@ class _CreateDatabricksWorkflowOperator(BaseOperator):
|
|
|
114
114
|
self.job_clusters = job_clusters or []
|
|
115
115
|
self.max_concurrent_runs = max_concurrent_runs
|
|
116
116
|
self.notebook_params = notebook_params or {}
|
|
117
|
-
self.tasks_to_convert = tasks_to_convert or
|
|
117
|
+
self.tasks_to_convert = tasks_to_convert or {}
|
|
118
118
|
self.relevant_upstreams = [task_id]
|
|
119
119
|
self.workflow_run_metadata: WorkflowRunMetadata | None = None
|
|
120
120
|
super().__init__(task_id=task_id, **kwargs)
|
|
@@ -129,9 +129,9 @@ class _CreateDatabricksWorkflowOperator(BaseOperator):
|
|
|
129
129
|
def _hook(self) -> DatabricksHook:
|
|
130
130
|
return self._get_hook(caller=self.caller)
|
|
131
131
|
|
|
132
|
-
def add_task(self, task: BaseOperator) -> None:
|
|
133
|
-
"""Add a task to the
|
|
134
|
-
self.tasks_to_convert
|
|
132
|
+
def add_task(self, task_id, task: BaseOperator) -> None:
|
|
133
|
+
"""Add a task to the dict of tasks to convert to a Databricks workflow."""
|
|
134
|
+
self.tasks_to_convert[task_id] = task
|
|
135
135
|
|
|
136
136
|
@property
|
|
137
137
|
def job_name(self) -> str:
|
|
@@ -143,9 +143,9 @@ class _CreateDatabricksWorkflowOperator(BaseOperator):
|
|
|
143
143
|
"""Create a workflow json to be used in the Databricks API."""
|
|
144
144
|
task_json = [
|
|
145
145
|
task._convert_to_databricks_workflow_task( # type: ignore[attr-defined]
|
|
146
|
-
relevant_upstreams=self.relevant_upstreams, context=context
|
|
146
|
+
relevant_upstreams=self.relevant_upstreams, task_dict=self.tasks_to_convert, context=context
|
|
147
147
|
)
|
|
148
|
-
for task in self.tasks_to_convert
|
|
148
|
+
for task_id, task in self.tasks_to_convert.items()
|
|
149
149
|
]
|
|
150
150
|
|
|
151
151
|
default_json = {
|
|
@@ -334,7 +334,7 @@ class DatabricksWorkflowTaskGroup(TaskGroup):
|
|
|
334
334
|
|
|
335
335
|
task.workflow_run_metadata = create_databricks_workflow_task.output
|
|
336
336
|
create_databricks_workflow_task.relevant_upstreams.append(task.task_id)
|
|
337
|
-
create_databricks_workflow_task.add_task(task)
|
|
337
|
+
create_databricks_workflow_task.add_task(task.task_id, task)
|
|
338
338
|
|
|
339
339
|
for root_task in roots:
|
|
340
340
|
root_task.set_upstream(create_databricks_workflow_task)
|
|
@@ -226,6 +226,5 @@ class DatabricksPartitionSensor(BaseSensorOperator):
|
|
|
226
226
|
self.log.debug("Partition sensor result: %s", partition_result)
|
|
227
227
|
if partition_result:
|
|
228
228
|
return True
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
raise AirflowException(message)
|
|
229
|
+
message = f"Specified partition(s): {self.partitions} were not found."
|
|
230
|
+
raise AirflowException(message)
|
|
@@ -36,17 +36,16 @@ def normalise_json_content(content, json_path: str = "json") -> str | bool | lis
|
|
|
36
36
|
normalise = normalise_json_content
|
|
37
37
|
if isinstance(content, (str, bool)):
|
|
38
38
|
return content
|
|
39
|
-
|
|
39
|
+
if isinstance(content, (int, float)):
|
|
40
40
|
# Databricks can tolerate either numeric or string types in the API backend.
|
|
41
41
|
return str(content)
|
|
42
|
-
|
|
42
|
+
if isinstance(content, (list, tuple)):
|
|
43
43
|
return [normalise(e, f"{json_path}[{i}]") for i, e in enumerate(content)]
|
|
44
|
-
|
|
44
|
+
if isinstance(content, dict):
|
|
45
45
|
return {k: normalise(v, f"{json_path}[{k}]") for k, v in content.items()}
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
raise AirflowException(msg)
|
|
46
|
+
param_type = type(content)
|
|
47
|
+
msg = f"Type {param_type} used for parameter {json_path} is not a number or a string"
|
|
48
|
+
raise AirflowException(msg)
|
|
50
49
|
|
|
51
50
|
|
|
52
51
|
def validate_trigger_event(event: dict):
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: apache-airflow-providers-databricks
|
|
3
|
-
Version: 7.3.
|
|
3
|
+
Version: 7.3.2rc1
|
|
4
4
|
Summary: Provider package apache-airflow-providers-databricks for Apache Airflow
|
|
5
5
|
Keywords: airflow-provider,databricks,airflow,integration
|
|
6
6
|
Author-email: Apache Software Foundation <dev@airflow.apache.org>
|
|
@@ -33,8 +33,8 @@ Requires-Dist: apache-airflow-providers-fab ; extra == "fab"
|
|
|
33
33
|
Requires-Dist: databricks-sdk==0.10.0 ; extra == "sdk"
|
|
34
34
|
Requires-Dist: apache-airflow-providers-standard ; extra == "standard"
|
|
35
35
|
Project-URL: Bug Tracker, https://github.com/apache/airflow/issues
|
|
36
|
-
Project-URL: Changelog, https://airflow.apache.org/docs/apache-airflow-providers-databricks/7.3.
|
|
37
|
-
Project-URL: Documentation, https://airflow.apache.org/docs/apache-airflow-providers-databricks/7.3.
|
|
36
|
+
Project-URL: Changelog, https://airflow.apache.org/docs/apache-airflow-providers-databricks/7.3.2/changelog.html
|
|
37
|
+
Project-URL: Documentation, https://airflow.apache.org/docs/apache-airflow-providers-databricks/7.3.2
|
|
38
38
|
Project-URL: Mastodon, https://fosstodon.org/@airflow
|
|
39
39
|
Project-URL: Slack Chat, https://s.apache.org/airflow-slack
|
|
40
40
|
Project-URL: Source Code, https://github.com/apache/airflow
|
|
@@ -69,7 +69,7 @@ Provides-Extra: standard
|
|
|
69
69
|
|
|
70
70
|
Package ``apache-airflow-providers-databricks``
|
|
71
71
|
|
|
72
|
-
Release: ``7.3.
|
|
72
|
+
Release: ``7.3.2``
|
|
73
73
|
|
|
74
74
|
|
|
75
75
|
`Databricks <https://databricks.com/>`__
|
|
@@ -82,7 +82,7 @@ This is a provider package for ``databricks`` provider. All classes for this pro
|
|
|
82
82
|
are in ``airflow.providers.databricks`` python package.
|
|
83
83
|
|
|
84
84
|
You can find package information and changelog for the provider
|
|
85
|
-
in the `documentation <https://airflow.apache.org/docs/apache-airflow-providers-databricks/7.3.
|
|
85
|
+
in the `documentation <https://airflow.apache.org/docs/apache-airflow-providers-databricks/7.3.2/>`_.
|
|
86
86
|
|
|
87
87
|
Installation
|
|
88
88
|
------------
|
|
@@ -130,5 +130,5 @@ Dependent package
|
|
|
130
130
|
============================================================================================================ ==============
|
|
131
131
|
|
|
132
132
|
The changelog for the provider package can be found in the
|
|
133
|
-
`changelog <https://airflow.apache.org/docs/apache-airflow-providers-databricks/7.3.
|
|
133
|
+
`changelog <https://airflow.apache.org/docs/apache-airflow-providers-databricks/7.3.2/changelog.html>`_.
|
|
134
134
|
|
|
@@ -1,27 +1,27 @@
|
|
|
1
1
|
airflow/providers/databricks/LICENSE,sha256=gXPVwptPlW1TJ4HSuG5OMPg-a3h43OGMkZRR1rpwfJA,10850
|
|
2
|
-
airflow/providers/databricks/__init__.py,sha256=
|
|
2
|
+
airflow/providers/databricks/__init__.py,sha256=1iYKoYjdiEI3gbSdzOWztaFJUwgD8AKuHKP04iHxV8o,1497
|
|
3
3
|
airflow/providers/databricks/exceptions.py,sha256=85RklmLOI_PnTzfXNIUd5fAu2aMMUhelwumQAX0wANE,1261
|
|
4
4
|
airflow/providers/databricks/get_provider_info.py,sha256=qNMX4Lft-NItPhFewFBSCi8n0_ISid_MQeETKQ67vdo,5573
|
|
5
5
|
airflow/providers/databricks/version_compat.py,sha256=aHg90_DtgoSnQvILFICexMyNlHlALBdaeWqkX3dFDug,1605
|
|
6
6
|
airflow/providers/databricks/hooks/__init__.py,sha256=mlJxuZLkd5x-iq2SBwD3mvRQpt3YR7wjz_nceyF1IaI,787
|
|
7
|
-
airflow/providers/databricks/hooks/databricks.py,sha256=
|
|
8
|
-
airflow/providers/databricks/hooks/databricks_base.py,sha256=
|
|
9
|
-
airflow/providers/databricks/hooks/databricks_sql.py,sha256=
|
|
7
|
+
airflow/providers/databricks/hooks/databricks.py,sha256=FIoiKWIc9AP3s8Av3Av9yleTg1kI0norwW5CAc6jTQc,28867
|
|
8
|
+
airflow/providers/databricks/hooks/databricks_base.py,sha256=D7-_74QgQaZm1NfHKl_UOXbVAXRo2xjnOx_r1MI-rWI,34871
|
|
9
|
+
airflow/providers/databricks/hooks/databricks_sql.py,sha256=fdxjjeR1u-1dSlbVEBYX0v3XAb8jTT74BVMi3mYY2OE,13092
|
|
10
10
|
airflow/providers/databricks/operators/__init__.py,sha256=mlJxuZLkd5x-iq2SBwD3mvRQpt3YR7wjz_nceyF1IaI,787
|
|
11
|
-
airflow/providers/databricks/operators/databricks.py,sha256=
|
|
11
|
+
airflow/providers/databricks/operators/databricks.py,sha256=E8fgk3Z67uOTSvWvbF23Miv6EruSGOTdFvHn7pGVWp0,80138
|
|
12
12
|
airflow/providers/databricks/operators/databricks_repos.py,sha256=m_72OnnU9df7UB-8SK2Tp5VjfNyjYeAnil3dCKs9SbA,13282
|
|
13
13
|
airflow/providers/databricks/operators/databricks_sql.py,sha256=thBHpt9_LMLJZ0PN-eLCI3AaT8IFq3NAHLDWDFP-Jiw,17031
|
|
14
|
-
airflow/providers/databricks/operators/databricks_workflow.py,sha256=
|
|
14
|
+
airflow/providers/databricks/operators/databricks_workflow.py,sha256=9WNQR9COa90fbqb9qSzut34K9Z1S_ZdpNHAfIcuH454,14227
|
|
15
15
|
airflow/providers/databricks/plugins/__init__.py,sha256=9hdXHABrVpkbpjZgUft39kOFL2xSGeG4GEua0Hmelus,785
|
|
16
16
|
airflow/providers/databricks/plugins/databricks_workflow.py,sha256=1UpsodBLRrTah9zBGBzfM7n1pdkzTo7yilt6QxASspQ,17460
|
|
17
17
|
airflow/providers/databricks/sensors/__init__.py,sha256=9hdXHABrVpkbpjZgUft39kOFL2xSGeG4GEua0Hmelus,785
|
|
18
|
-
airflow/providers/databricks/sensors/databricks_partition.py,sha256=
|
|
18
|
+
airflow/providers/databricks/sensors/databricks_partition.py,sha256=2zWdnqVaSSd7PFTZadfvtbsR7zOI4GwfZFOuEnXRLSM,10023
|
|
19
19
|
airflow/providers/databricks/sensors/databricks_sql.py,sha256=jIA9oGBUCAlXzyrqigxlg7JQDsBFuNIF8ZUEJM8gPxg,5766
|
|
20
20
|
airflow/providers/databricks/triggers/__init__.py,sha256=mlJxuZLkd5x-iq2SBwD3mvRQpt3YR7wjz_nceyF1IaI,787
|
|
21
21
|
airflow/providers/databricks/triggers/databricks.py,sha256=dSogx6GlcJfZ4CFhtlMeWs9sYFEYthP82S_U8-tM2Tk,9240
|
|
22
22
|
airflow/providers/databricks/utils/__init__.py,sha256=9hdXHABrVpkbpjZgUft39kOFL2xSGeG4GEua0Hmelus,785
|
|
23
|
-
airflow/providers/databricks/utils/databricks.py,sha256=
|
|
24
|
-
apache_airflow_providers_databricks-7.3.
|
|
25
|
-
apache_airflow_providers_databricks-7.3.
|
|
26
|
-
apache_airflow_providers_databricks-7.3.
|
|
27
|
-
apache_airflow_providers_databricks-7.3.
|
|
23
|
+
airflow/providers/databricks/utils/databricks.py,sha256=s0qEr_DsFhKW4uUiq2VQbtqcj52isYIplPZsUcxGPrI,2862
|
|
24
|
+
apache_airflow_providers_databricks-7.3.2rc1.dist-info/entry_points.txt,sha256=hjmZm3ab2cteTR4t9eE28oKixHwNIKtLCThd6sx3XRQ,227
|
|
25
|
+
apache_airflow_providers_databricks-7.3.2rc1.dist-info/WHEEL,sha256=G2gURzTEtmeR8nrdXUJfNiB3VYVxigPQ-bEQujpNiNs,82
|
|
26
|
+
apache_airflow_providers_databricks-7.3.2rc1.dist-info/METADATA,sha256=-vW0I-mfTB60WU9-Qk80dGC29S8C97ULEuGW8_GRA1s,6088
|
|
27
|
+
apache_airflow_providers_databricks-7.3.2rc1.dist-info/RECORD,,
|
|
File without changes
|