databricks-sdk 0.57.0__py3-none-any.whl → 0.58.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of databricks-sdk might be problematic. Click here for more details.

Files changed (30) hide show
  1. databricks/sdk/__init__.py +25 -4
  2. databricks/sdk/service/aibuilder.py +0 -36
  3. databricks/sdk/service/apps.py +1 -3
  4. databricks/sdk/service/billing.py +53 -23
  5. databricks/sdk/service/catalog.py +1692 -150
  6. databricks/sdk/service/cleanrooms.py +3 -22
  7. databricks/sdk/service/compute.py +245 -322
  8. databricks/sdk/service/dashboards.py +129 -162
  9. databricks/sdk/service/database.py +612 -97
  10. databricks/sdk/service/iam.py +3 -3
  11. databricks/sdk/service/jobs.py +6 -129
  12. databricks/sdk/service/marketplace.py +3 -2
  13. databricks/sdk/service/ml.py +713 -262
  14. databricks/sdk/service/oauth2.py +0 -1
  15. databricks/sdk/service/pipelines.py +12 -29
  16. databricks/sdk/service/provisioning.py +7 -125
  17. databricks/sdk/service/qualitymonitorv2.py +0 -18
  18. databricks/sdk/service/serving.py +39 -13
  19. databricks/sdk/service/settings.py +11 -128
  20. databricks/sdk/service/sharing.py +3 -9
  21. databricks/sdk/service/sql.py +94 -74
  22. databricks/sdk/service/vectorsearch.py +0 -19
  23. databricks/sdk/service/workspace.py +2 -6
  24. databricks/sdk/version.py +1 -1
  25. {databricks_sdk-0.57.0.dist-info → databricks_sdk-0.58.0.dist-info}/METADATA +1 -1
  26. {databricks_sdk-0.57.0.dist-info → databricks_sdk-0.58.0.dist-info}/RECORD +30 -30
  27. {databricks_sdk-0.57.0.dist-info → databricks_sdk-0.58.0.dist-info}/WHEEL +0 -0
  28. {databricks_sdk-0.57.0.dist-info → databricks_sdk-0.58.0.dist-info}/licenses/LICENSE +0 -0
  29. {databricks_sdk-0.57.0.dist-info → databricks_sdk-0.58.0.dist-info}/licenses/NOTICE +0 -0
  30. {databricks_sdk-0.57.0.dist-info → databricks_sdk-0.58.0.dist-info}/top_level.txt +0 -0
@@ -21,7 +21,6 @@ class AccessControlRequest:
21
21
  """name of the group"""
22
22
 
23
23
  permission_level: Optional[PermissionLevel] = None
24
- """Permission level"""
25
24
 
26
25
  service_principal_name: Optional[str] = None
27
26
  """application ID of a service principal"""
@@ -1220,7 +1219,6 @@ class Permission:
1220
1219
  inherited_from_object: Optional[List[str]] = None
1221
1220
 
1222
1221
  permission_level: Optional[PermissionLevel] = None
1223
- """Permission level"""
1224
1222
 
1225
1223
  def as_dict(self) -> dict:
1226
1224
  """Serializes the Permission into a dictionary suitable for use as a JSON request body."""
@@ -1387,7 +1385,6 @@ class PermissionsDescription:
1387
1385
  description: Optional[str] = None
1388
1386
 
1389
1387
  permission_level: Optional[PermissionLevel] = None
1390
- """Permission level"""
1391
1388
 
1392
1389
  def as_dict(self) -> dict:
1393
1390
  """Serializes the PermissionsDescription into a dictionary suitable for use as a JSON request body."""
@@ -3264,6 +3261,7 @@ class CurrentUserAPI:
3264
3261
  def me(self) -> User:
3265
3262
  """Get details about the current method caller's identity.
3266
3263
 
3264
+
3267
3265
  :returns: :class:`User`
3268
3266
  """
3269
3267
 
@@ -4147,6 +4145,7 @@ class UsersAPI:
4147
4145
  def get_permission_levels(self) -> GetPasswordPermissionLevelsResponse:
4148
4146
  """Gets the permission levels that a user can have on an object.
4149
4147
 
4148
+
4150
4149
  :returns: :class:`GetPasswordPermissionLevelsResponse`
4151
4150
  """
4152
4151
 
@@ -4160,6 +4159,7 @@ class UsersAPI:
4160
4159
  def get_permissions(self) -> PasswordPermissions:
4161
4160
  """Gets the permissions of all passwords. Passwords can inherit permissions from their root object.
4162
4161
 
4162
+
4163
4163
  :returns: :class:`PasswordPermissions`
4164
4164
  """
4165
4165
 
@@ -219,11 +219,6 @@ class BaseRun:
219
219
  """The URL to the detail page of the run."""
220
220
 
221
221
  run_type: Optional[RunType] = None
222
- """The type of a run. * `JOB_RUN`: Normal job run. A run created with :method:jobs/runNow. *
223
- `WORKFLOW_RUN`: Workflow run. A run created with [dbutils.notebook.run]. * `SUBMIT_RUN`: Submit
224
- run. A run created with :method:jobs/submit.
225
-
226
- [dbutils.notebook.run]: https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-workflow"""
227
222
 
228
223
  schedule: Optional[CronSchedule] = None
229
224
  """The cron schedule that triggered this run if it was triggered by the periodic scheduler."""
@@ -244,7 +239,6 @@ class BaseRun:
244
239
  """Deprecated. Please use the `status` field instead."""
245
240
 
246
241
  status: Optional[RunStatus] = None
247
- """The current status of the run"""
248
242
 
249
243
  tasks: Optional[List[RunTask]] = None
250
244
  """The list of tasks performed by the run. Each task has its own `run_id` which you can use to call
@@ -253,19 +247,8 @@ class BaseRun:
253
247
  root to determine if more results are available."""
254
248
 
255
249
  trigger: Optional[TriggerType] = None
256
- """The type of trigger that fired this run.
257
-
258
- * `PERIODIC`: Schedules that periodically trigger runs, such as a cron scheduler. * `ONE_TIME`:
259
- One time triggers that fire a single run. This occurs you triggered a single run on demand
260
- through the UI or the API. * `RETRY`: Indicates a run that is triggered as a retry of a
261
- previously failed run. This occurs when you request to re-run the job in case of failures. *
262
- `RUN_JOB_TASK`: Indicates a run that is triggered using a Run Job task. * `FILE_ARRIVAL`:
263
- Indicates a run that is triggered by a file arrival. * `TABLE`: Indicates a run that is
264
- triggered by a table update. * `CONTINUOUS_RESTART`: Indicates a run created by user to manually
265
- restart a continuous job run."""
266
250
 
267
251
  trigger_info: Optional[TriggerInfo] = None
268
- """Additional details about what triggered the run"""
269
252
 
270
253
  def as_dict(self) -> dict:
271
254
  """Serializes the BaseRun into a dictionary suitable for use as a JSON request body."""
@@ -1004,7 +987,6 @@ class CreateJob:
1004
987
  are used, `git_source` must be defined on the job."""
1005
988
 
1006
989
  health: Optional[JobsHealthRules] = None
1007
- """An optional set of health rules that can be defined for this job."""
1008
990
 
1009
991
  job_clusters: Optional[List[JobCluster]] = None
1010
992
  """A list of job cluster specifications that can be shared and reused by tasks of this job.
@@ -1043,10 +1025,6 @@ class CreateJob:
1043
1025
  """The queue settings of the job."""
1044
1026
 
1045
1027
  run_as: Optional[JobRunAs] = None
1046
- """Write-only setting. Specifies the user or service principal that the job runs as. If not
1047
- specified, the job runs as the user who created the job.
1048
-
1049
- Either `user_name` or `service_principal_name` should be specified. If not, an error is thrown."""
1050
1028
 
1051
1029
  schedule: Optional[CronSchedule] = None
1052
1030
  """An optional periodic schedule for this job. The default behavior is that the job only runs when
@@ -2557,8 +2535,6 @@ class GitSource:
2557
2535
  with git_branch or git_tag."""
2558
2536
 
2559
2537
  git_snapshot: Optional[GitSnapshot] = None
2560
- """Read-only state of the remote repository at the time the job was run. This field is only
2561
- included on job runs."""
2562
2538
 
2563
2539
  git_tag: Optional[str] = None
2564
2540
  """Name of the tag to be checked out and used by this job. This field cannot be specified in
@@ -2731,7 +2707,6 @@ class JobAccessControlRequest:
2731
2707
  """name of the group"""
2732
2708
 
2733
2709
  permission_level: Optional[JobPermissionLevel] = None
2734
- """Permission level"""
2735
2710
 
2736
2711
  service_principal_name: Optional[str] = None
2737
2712
  """application ID of a service principal"""
@@ -3054,9 +3029,6 @@ class JobEnvironment:
3054
3029
  """The key of an environment. It has to be unique within a job."""
3055
3030
 
3056
3031
  spec: Optional[compute.Environment] = None
3057
- """The environment entity used to preserve serverless environment side panel, jobs' environment for
3058
- non-notebook task, and DLT's environment for classic and serverless pipelines. In this minimal
3059
- environment spec, only pip dependencies are supported."""
3060
3032
 
3061
3033
  def as_dict(self) -> dict:
3062
3034
  """Serializes the JobEnvironment into a dictionary suitable for use as a JSON request body."""
@@ -3197,7 +3169,6 @@ class JobPermission:
3197
3169
  inherited_from_object: Optional[List[str]] = None
3198
3170
 
3199
3171
  permission_level: Optional[JobPermissionLevel] = None
3200
- """Permission level"""
3201
3172
 
3202
3173
  def as_dict(self) -> dict:
3203
3174
  """Serializes the JobPermission into a dictionary suitable for use as a JSON request body."""
@@ -3285,7 +3256,6 @@ class JobPermissionsDescription:
3285
3256
  description: Optional[str] = None
3286
3257
 
3287
3258
  permission_level: Optional[JobPermissionLevel] = None
3288
- """Permission level"""
3289
3259
 
3290
3260
  def as_dict(self) -> dict:
3291
3261
  """Serializes the JobPermissionsDescription into a dictionary suitable for use as a JSON request body."""
@@ -3435,7 +3405,6 @@ class JobSettings:
3435
3405
  are used, `git_source` must be defined on the job."""
3436
3406
 
3437
3407
  health: Optional[JobsHealthRules] = None
3438
- """An optional set of health rules that can be defined for this job."""
3439
3408
 
3440
3409
  job_clusters: Optional[List[JobCluster]] = None
3441
3410
  """A list of job cluster specifications that can be shared and reused by tasks of this job.
@@ -3474,10 +3443,6 @@ class JobSettings:
3474
3443
  """The queue settings of the job."""
3475
3444
 
3476
3445
  run_as: Optional[JobRunAs] = None
3477
- """Write-only setting. Specifies the user or service principal that the job runs as. If not
3478
- specified, the job runs as the user who created the job.
3479
-
3480
- Either `user_name` or `service_principal_name` should be specified. If not, an error is thrown."""
3481
3446
 
3482
3447
  schedule: Optional[CronSchedule] = None
3483
3448
  """An optional periodic schedule for this job. The default behavior is that the job only runs when
@@ -3736,18 +3701,8 @@ class JobsHealthOperator(Enum):
3736
3701
  @dataclass
3737
3702
  class JobsHealthRule:
3738
3703
  metric: JobsHealthMetric
3739
- """Specifies the health metric that is being evaluated for a particular health rule.
3740
-
3741
- * `RUN_DURATION_SECONDS`: Expected total time for a run in seconds. * `STREAMING_BACKLOG_BYTES`:
3742
- An estimate of the maximum bytes of data waiting to be consumed across all streams. This metric
3743
- is in Public Preview. * `STREAMING_BACKLOG_RECORDS`: An estimate of the maximum offset lag
3744
- across all streams. This metric is in Public Preview. * `STREAMING_BACKLOG_SECONDS`: An estimate
3745
- of the maximum consumer delay across all streams. This metric is in Public Preview. *
3746
- `STREAMING_BACKLOG_FILES`: An estimate of the maximum number of outstanding files across all
3747
- streams. This metric is in Public Preview."""
3748
3704
 
3749
3705
  op: JobsHealthOperator
3750
- """Specifies the operator used to compare the health metric value with the specified threshold."""
3751
3706
 
3752
3707
  value: int
3753
3708
  """Specifies the threshold value that the health metric should obey to satisfy the health rule."""
@@ -4453,11 +4408,6 @@ class PythonWheelTask:
4453
4408
  @dataclass
4454
4409
  class QueueDetails:
4455
4410
  code: Optional[QueueDetailsCodeCode] = None
4456
- """The reason for queuing the run. * `ACTIVE_RUNS_LIMIT_REACHED`: The run was queued due to
4457
- reaching the workspace limit of active task runs. * `MAX_CONCURRENT_RUNS_REACHED`: The run was
4458
- queued due to reaching the per-job limit of concurrent job runs. *
4459
- `ACTIVE_RUN_JOB_TASKS_LIMIT_REACHED`: The run was queued due to reaching the workspace limit of
4460
- active run job tasks."""
4461
4411
 
4462
4412
  message: Optional[str] = None
4463
4413
  """A descriptive message with the queuing details. This field is unstructured, and its exact format
@@ -4548,7 +4498,6 @@ class RepairHistoryItem:
4548
4498
  """Deprecated. Please use the `status` field instead."""
4549
4499
 
4550
4500
  status: Optional[RunStatus] = None
4551
- """The current status of the run"""
4552
4501
 
4553
4502
  task_run_ids: Optional[List[int]] = None
4554
4503
  """The run IDs of the task runs that ran as part of this repair history item."""
@@ -5292,11 +5241,6 @@ class Run:
5292
5241
  """The URL to the detail page of the run."""
5293
5242
 
5294
5243
  run_type: Optional[RunType] = None
5295
- """The type of a run. * `JOB_RUN`: Normal job run. A run created with :method:jobs/runNow. *
5296
- `WORKFLOW_RUN`: Workflow run. A run created with [dbutils.notebook.run]. * `SUBMIT_RUN`: Submit
5297
- run. A run created with :method:jobs/submit.
5298
-
5299
- [dbutils.notebook.run]: https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-workflow"""
5300
5244
 
5301
5245
  schedule: Optional[CronSchedule] = None
5302
5246
  """The cron schedule that triggered this run if it was triggered by the periodic scheduler."""
@@ -5317,7 +5261,6 @@ class Run:
5317
5261
  """Deprecated. Please use the `status` field instead."""
5318
5262
 
5319
5263
  status: Optional[RunStatus] = None
5320
- """The current status of the run"""
5321
5264
 
5322
5265
  tasks: Optional[List[RunTask]] = None
5323
5266
  """The list of tasks performed by the run. Each task has its own `run_id` which you can use to call
@@ -5326,19 +5269,8 @@ class Run:
5326
5269
  root to determine if more results are available."""
5327
5270
 
5328
5271
  trigger: Optional[TriggerType] = None
5329
- """The type of trigger that fired this run.
5330
-
5331
- * `PERIODIC`: Schedules that periodically trigger runs, such as a cron scheduler. * `ONE_TIME`:
5332
- One time triggers that fire a single run. This occurs you triggered a single run on demand
5333
- through the UI or the API. * `RETRY`: Indicates a run that is triggered as a retry of a
5334
- previously failed run. This occurs when you request to re-run the job in case of failures. *
5335
- `RUN_JOB_TASK`: Indicates a run that is triggered using a Run Job task. * `FILE_ARRIVAL`:
5336
- Indicates a run that is triggered by a file arrival. * `TABLE`: Indicates a run that is
5337
- triggered by a table update. * `CONTINUOUS_RESTART`: Indicates a run created by user to manually
5338
- restart a continuous job run."""
5339
5272
 
5340
5273
  trigger_info: Optional[TriggerInfo] = None
5341
- """Additional details about what triggered the run"""
5342
5274
 
5343
5275
  def as_dict(self) -> dict:
5344
5276
  """Serializes the Run into a dictionary suitable for use as a JSON request body."""
@@ -6482,7 +6414,6 @@ class RunStatus:
6482
6414
  """If the run was queued, details about the reason for queuing the run."""
6483
6415
 
6484
6416
  state: Optional[RunLifecycleStateV2State] = None
6485
- """The current state of the run."""
6486
6417
 
6487
6418
  termination_details: Optional[TerminationDetails] = None
6488
6419
  """If the run is in a TERMINATING or TERMINATED state, details about the reason for terminating the
@@ -6719,7 +6650,6 @@ class RunTask:
6719
6650
  """Deprecated. Please use the `status` field instead."""
6720
6651
 
6721
6652
  status: Optional[RunStatus] = None
6722
- """The current status of the run"""
6723
6653
 
6724
6654
  timeout_seconds: Optional[int] = None
6725
6655
  """An optional timeout applied to each run of this job task. A value of `0` means no timeout."""
@@ -7154,10 +7084,6 @@ class SparkSubmitTask:
7154
7084
  @dataclass
7155
7085
  class SqlAlertOutput:
7156
7086
  alert_state: Optional[SqlAlertState] = None
7157
- """The state of the SQL alert.
7158
-
7159
- * UNKNOWN: alert yet to be evaluated * OK: alert evaluated and did not fulfill trigger
7160
- conditions * TRIGGERED: alert evaluated and fulfilled trigger conditions"""
7161
7087
 
7162
7088
  output_link: Optional[str] = None
7163
7089
  """The link to find the output results."""
@@ -7790,7 +7716,6 @@ class SubmitRun:
7790
7716
  are used, `git_source` must be defined on the job."""
7791
7717
 
7792
7718
  health: Optional[JobsHealthRules] = None
7793
- """An optional set of health rules that can be defined for this job."""
7794
7719
 
7795
7720
  idempotency_token: Optional[str] = None
7796
7721
  """An optional token that can be used to guarantee the idempotency of job run requests. If a run
@@ -7999,7 +7924,6 @@ class SubmitTask:
7999
7924
  gen_ai_compute_task: Optional[GenAiComputeTask] = None
8000
7925
 
8001
7926
  health: Optional[JobsHealthRules] = None
8002
- """An optional set of health rules that can be defined for this job."""
8003
7927
 
8004
7928
  libraries: Optional[List[compute.Library]] = None
8005
7929
  """An optional list of libraries to be installed on the cluster. The default value is an empty
@@ -8428,7 +8352,6 @@ class Task:
8428
8352
  gen_ai_compute_task: Optional[GenAiComputeTask] = None
8429
8353
 
8430
8354
  health: Optional[JobsHealthRules] = None
8431
- """An optional set of health rules that can be defined for this job."""
8432
8355
 
8433
8356
  job_cluster_key: Optional[str] = None
8434
8357
  """If job_cluster_key, this task is executed reusing the cluster specified in
@@ -8940,55 +8863,12 @@ class TerminationCodeCode(Enum):
8940
8863
  @dataclass
8941
8864
  class TerminationDetails:
8942
8865
  code: Optional[TerminationCodeCode] = None
8943
- """The code indicates why the run was terminated. Additional codes might be introduced in future
8944
- releases. * `SUCCESS`: The run was completed successfully. * `SUCCESS_WITH_FAILURES`: The run
8945
- was completed successfully but some child runs failed. * `USER_CANCELED`: The run was
8946
- successfully canceled during execution by a user. * `CANCELED`: The run was canceled during
8947
- execution by the Databricks platform; for example, if the maximum run duration was exceeded. *
8948
- `SKIPPED`: Run was never executed, for example, if the upstream task run failed, the dependency
8949
- type condition was not met, or there were no material tasks to execute. * `INTERNAL_ERROR`: The
8950
- run encountered an unexpected error. Refer to the state message for further details. *
8951
- `DRIVER_ERROR`: The run encountered an error while communicating with the Spark Driver. *
8952
- `CLUSTER_ERROR`: The run failed due to a cluster error. Refer to the state message for further
8953
- details. * `REPOSITORY_CHECKOUT_FAILED`: Failed to complete the checkout due to an error when
8954
- communicating with the third party service. * `INVALID_CLUSTER_REQUEST`: The run failed because
8955
- it issued an invalid request to start the cluster. * `WORKSPACE_RUN_LIMIT_EXCEEDED`: The
8956
- workspace has reached the quota for the maximum number of concurrent active runs. Consider
8957
- scheduling the runs over a larger time frame. * `FEATURE_DISABLED`: The run failed because it
8958
- tried to access a feature unavailable for the workspace. * `CLUSTER_REQUEST_LIMIT_EXCEEDED`: The
8959
- number of cluster creation, start, and upsize requests have exceeded the allotted rate limit.
8960
- Consider spreading the run execution over a larger time frame. * `STORAGE_ACCESS_ERROR`: The run
8961
- failed due to an error when accessing the customer blob storage. Refer to the state message for
8962
- further details. * `RUN_EXECUTION_ERROR`: The run was completed with task failures. For more
8963
- details, refer to the state message or run output. * `UNAUTHORIZED_ERROR`: The run failed due to
8964
- a permission issue while accessing a resource. Refer to the state message for further details. *
8965
- `LIBRARY_INSTALLATION_ERROR`: The run failed while installing the user-requested library. Refer
8966
- to the state message for further details. The causes might include, but are not limited to: The
8967
- provided library is invalid, there are insufficient permissions to install the library, and so
8968
- forth. * `MAX_CONCURRENT_RUNS_EXCEEDED`: The scheduled run exceeds the limit of maximum
8969
- concurrent runs set for the job. * `MAX_SPARK_CONTEXTS_EXCEEDED`: The run is scheduled on a
8970
- cluster that has already reached the maximum number of contexts it is configured to create. See:
8971
- [Link]. * `RESOURCE_NOT_FOUND`: A resource necessary for run execution does not exist. Refer to
8972
- the state message for further details. * `INVALID_RUN_CONFIGURATION`: The run failed due to an
8973
- invalid configuration. Refer to the state message for further details. * `CLOUD_FAILURE`: The
8974
- run failed due to a cloud provider issue. Refer to the state message for further details. *
8975
- `MAX_JOB_QUEUE_SIZE_EXCEEDED`: The run was skipped due to reaching the job level queue size
8976
- limit. * `DISABLED`: The run was never executed because it was disabled explicitly by the user.
8977
-
8978
- [Link]: https://kb.databricks.com/en_US/notebooks/too-many-execution-contexts-are-open-right-now"""
8979
8866
 
8980
8867
  message: Optional[str] = None
8981
8868
  """A descriptive message with the termination details. This field is unstructured and the format
8982
8869
  might change."""
8983
8870
 
8984
8871
  type: Optional[TerminationTypeType] = None
8985
- """* `SUCCESS`: The run terminated without any issues * `INTERNAL_ERROR`: An error occurred in the
8986
- Databricks platform. Please look at the [status page] or contact support if the issue persists.
8987
- * `CLIENT_ERROR`: The run was terminated because of an error caused by user input or the job
8988
- configuration. * `CLOUD_FAILURE`: The run was terminated because of an issue with your cloud
8989
- provider.
8990
-
8991
- [status page]: https://status.databricks.com/"""
8992
8872
 
8993
8873
  def as_dict(self) -> dict:
8994
8874
  """Serializes the TerminationDetails into a dictionary suitable for use as a JSON request body."""
@@ -9154,10 +9034,13 @@ class TriggerType(Enum):
9154
9034
  through the UI or the API. * `RETRY`: Indicates a run that is triggered as a retry of a
9155
9035
  previously failed run. This occurs when you request to re-run the job in case of failures. *
9156
9036
  `RUN_JOB_TASK`: Indicates a run that is triggered using a Run Job task. * `FILE_ARRIVAL`:
9157
- Indicates a run that is triggered by a file arrival. * `TABLE`: Indicates a run that is
9158
- triggered by a table update. * `CONTINUOUS_RESTART`: Indicates a run created by user to manually
9159
- restart a continuous job run."""
9037
+ Indicates a run that is triggered by a file arrival. * `CONTINUOUS`: Indicates a run that is
9038
+ triggered by a continuous job. * `TABLE`: Indicates a run that is triggered by a table update. *
9039
+ `CONTINUOUS_RESTART`: Indicates a run created by user to manually restart a continuous job run.
9040
+ * `MODEL`: Indicates a run that is triggered by a model update."""
9160
9041
 
9042
+ CONTINUOUS = "CONTINUOUS"
9043
+ CONTINUOUS_RESTART = "CONTINUOUS_RESTART"
9161
9044
  FILE_ARRIVAL = "FILE_ARRIVAL"
9162
9045
  ONE_TIME = "ONE_TIME"
9163
9046
  PERIODIC = "PERIODIC"
@@ -9587,7 +9470,6 @@ class JobsAPI:
9587
9470
  Note: dbt and SQL File tasks support only version-controlled sources. If dbt or SQL File tasks are
9588
9471
  used, `git_source` must be defined on the job.
9589
9472
  :param health: :class:`JobsHealthRules` (optional)
9590
- An optional set of health rules that can be defined for this job.
9591
9473
  :param job_clusters: List[:class:`JobCluster`] (optional)
9592
9474
  A list of job cluster specifications that can be shared and reused by tasks of this job. Libraries
9593
9475
  cannot be declared in a shared job cluster. You must declare dependent libraries in task settings.
@@ -9617,10 +9499,6 @@ class JobsAPI:
9617
9499
  :param queue: :class:`QueueSettings` (optional)
9618
9500
  The queue settings of the job.
9619
9501
  :param run_as: :class:`JobRunAs` (optional)
9620
- Write-only setting. Specifies the user or service principal that the job runs as. If not specified,
9621
- the job runs as the user who created the job.
9622
-
9623
- Either `user_name` or `service_principal_name` should be specified. If not, an error is thrown.
9624
9502
  :param schedule: :class:`CronSchedule` (optional)
9625
9503
  An optional periodic schedule for this job. The default behavior is that the job only runs when
9626
9504
  triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.
@@ -10515,7 +10393,6 @@ class JobsAPI:
10515
10393
  Note: dbt and SQL File tasks support only version-controlled sources. If dbt or SQL File tasks are
10516
10394
  used, `git_source` must be defined on the job.
10517
10395
  :param health: :class:`JobsHealthRules` (optional)
10518
- An optional set of health rules that can be defined for this job.
10519
10396
  :param idempotency_token: str (optional)
10520
10397
  An optional token that can be used to guarantee the idempotency of job run requests. If a run with
10521
10398
  the provided token already exists, the request does not create a new run but returns the ID of the
@@ -2282,7 +2282,6 @@ class ListingSummary:
2282
2282
  share: Optional[ShareInfo] = None
2283
2283
 
2284
2284
  status: Optional[ListingStatus] = None
2285
- """Enums"""
2286
2285
 
2287
2286
  subtitle: Optional[str] = None
2288
2287
 
@@ -2462,7 +2461,6 @@ class PersonalizationRequest:
2462
2461
  comment: Optional[str] = None
2463
2462
 
2464
2463
  contact_info: Optional[ContactInfo] = None
2465
- """contact info for the consumer requesting data or performing a listing installation"""
2466
2464
 
2467
2465
  created_at: Optional[int] = None
2468
2466
 
@@ -4687,6 +4685,7 @@ class ProviderProviderAnalyticsDashboardsAPI:
4687
4685
  """Create provider analytics dashboard. Returns Marketplace specific `id`. Not to be confused with the
4688
4686
  Lakeview dashboard id.
4689
4687
 
4688
+
4690
4689
  :returns: :class:`ProviderAnalyticsDashboard`
4691
4690
  """
4692
4691
 
@@ -4700,6 +4699,7 @@ class ProviderProviderAnalyticsDashboardsAPI:
4700
4699
  def get(self) -> ListProviderAnalyticsDashboardResponse:
4701
4700
  """Get provider analytics dashboard.
4702
4701
 
4702
+
4703
4703
  :returns: :class:`ListProviderAnalyticsDashboardResponse`
4704
4704
  """
4705
4705
 
@@ -4713,6 +4713,7 @@ class ProviderProviderAnalyticsDashboardsAPI:
4713
4713
  def get_latest_version(self) -> GetLatestVersionProviderAnalyticsDashboardResponse:
4714
4714
  """Get latest version of provider analytics dashboard.
4715
4715
 
4716
+
4716
4717
  :returns: :class:`GetLatestVersionProviderAnalyticsDashboardResponse`
4717
4718
  """
4718
4719