yandexcloud 0.339.0__py3-none-any.whl → 0.340.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
yandexcloud/__init__.py CHANGED
@@ -11,4 +11,4 @@ from yandexcloud._retry_interceptor import RetryInterceptor
11
11
  from yandexcloud._retry_policy import RetryPolicy, ThrottlingMode
12
12
  from yandexcloud._sdk import SDK
13
13
 
14
- __version__ = "0.339.0"
14
+ __version__ = "0.340.0"
yandexcloud/_sdk.py CHANGED
@@ -190,6 +190,7 @@ _supported_modules = [
190
190
  ("yandex.cloud.serverless.containers", "serverless-containers"),
191
191
  ("yandex.cloud.serverless.functions", "serverless-functions"),
192
192
  ("yandex.cloud.serverless.triggers", "serverless-triggers"),
193
+ ("yandex.cloud.spark", "managed-spark"),
193
194
  ("yandex.cloud.storage", "storage-api"),
194
195
  ("yandex.cloud.vpc", "vpc"),
195
196
  ("yandex.cloud.ydb", "ydb"),
@@ -1,6 +1,12 @@
1
1
  from typing import TYPE_CHECKING
2
2
 
3
3
  from yandexcloud._wrappers.dataproc import Dataproc, InitializationAction
4
+ from yandexcloud._wrappers.spark import (
5
+ PysparkJobParameters,
6
+ Spark,
7
+ SparkClusterParameters,
8
+ SparkJobParameters,
9
+ )
4
10
 
5
11
  if TYPE_CHECKING:
6
12
  from yandexcloud._sdk import SDK
@@ -13,3 +19,12 @@ class Wrappers:
13
19
  self.Dataproc.sdk = sdk
14
20
  # pylint: disable-next=invalid-name
15
21
  self.InitializationAction = InitializationAction
22
+ # pylint: disable-next=invalid-name
23
+ self.Spark = Spark
24
+ # pylint: disable-next=invalid-name
25
+ self.SparkClusterParameters = SparkClusterParameters
26
+ # pylint: disable-next=invalid-name
27
+ self.SparkJobParameters = SparkJobParameters
28
+ # pylint: disable-next=invalid-name
29
+ self.PysparkJobParameters = PysparkJobParameters
30
+ self.Spark.sdk = sdk
@@ -0,0 +1,603 @@
1
+ # pylint: disable=no-member
2
+ # pylint: disable=duplicate-code
3
+ # mypy: ignore-errors
4
+ import logging
5
+ import random
6
+ from dataclasses import dataclass
7
+ from typing import Dict, List, Optional
8
+
9
+ import yandex.cloud.spark.v1.cluster_pb2 as cluster_pb
10
+ import yandex.cloud.spark.v1.cluster_service_pb2 as cluster_service_pb
11
+ import yandex.cloud.spark.v1.cluster_service_pb2_grpc as cluster_service_grpc_pb
12
+ import yandex.cloud.spark.v1.job_pb2 as job_pb
13
+ import yandex.cloud.spark.v1.job_service_pb2 as job_service_pb
14
+ import yandex.cloud.spark.v1.job_service_pb2_grpc as job_service_grpc_pb
15
+ import yandex.cloud.spark.v1.maintenance_pb2 as maintenance_pb
16
+
17
+
18
+ @dataclass
19
+ class SparkClusterParameters:
20
+ """
21
+ Spark Cluster Parameters.
22
+
23
+ Constructor arguments:
24
+ :param folder_id: ID of the folder in which cluster should be created.
25
+ :type folder_id: str
26
+ :param service_account_id: Service account that will be used to access
27
+ cloud resources.
28
+ :type service_account_id: str
29
+ :param name: Name of the Spark cluster. The name must be unique within
30
+ the folder.
31
+ :type name: str, optional
32
+ :param description: Text description of the Spark cluster.
33
+ :type description: str, optional
34
+ :param labels: Cluster labels as key:value pairs.
35
+ :type labels: Dict[str, str], optional
36
+ :param subnet_ids: Network subnets.
37
+ :type subnet_ids: List[str]
38
+ :param security_group_ids: Network security groups.
39
+ :type security_group_ids: List[str]
40
+ :param deletion_protection: Deletion Protection inhibits deletion of
41
+ the cluster.
42
+ :type deletion_protection: bool
43
+ :param driver_pool_resource_preset: Resource preset ID for Driver pool.
44
+ :type driver_pool_resource_preset: str
45
+ :param driver_pool_size: Node count for Driver pool with fixed size.
46
+ :type driver_pool_size: int, optional
47
+ :param driver_pool_min_size: Minimum node count for Driver pool with
48
+ autoscaling.
49
+ :type driver_pool_min_size: int, optional
50
+ :param driver_pool_max_size: Maximum node count for Driver pool with
51
+ autoscaling.
52
+ :type driver_pool_max_size: int, optional
53
+ :param executor_pool_resource_preset: Resource preset ID for Executor
54
+ pool.
55
+ :type executor_pool_resource_preset: str
56
+ :param executor_pool_size: Node count for Executor pool with fixed size.
57
+ :type executor_pool_size: int, optional
58
+ :param executor_pool_min_size: Minimum node count for Executor pool
59
+ with autoscaling.
60
+ :type executor_pool_min_size: int, optional
61
+ :param executor_pool_max_size: Maximum node count for Executor pool
62
+ with autoscaling.
63
+ :type executor_pool_max_size: int, optional
64
+ :param logging_enabled: Enable sending logs to Cloud Logging.
65
+ :type logging_enabled: bool, optional
66
+ :param log_group_id: Log Group ID in Cloud Logging to store cluster
67
+ logs.
68
+ :type log_group_id: str, optional
69
+ :param log_folder_id: Folder ID to store cluster logs in default log
70
+ group.
71
+ :type log_folder_id: str, optional
72
+ :param history_server_enabled: Enable Spark History Server.
73
+ :type history_server_enabled: bool, optional
74
+ :param pip_packages: Python packages that need to be installed using pip
75
+ (in pip requirement format).
76
+ :type pip_packages: List[str], optional
77
+ :param deb_packages: Deb-packages that need to be installed using system
78
+ package manager.
79
+ :type deb_packages: List[str], optional
80
+ :param metastore_cluster_id: Metastore cluster ID for default spark
81
+ configuration.
82
+ :type metastore_cluster_id: str, optional
83
+ :param maintenance_weekday: Weekday number for maintenance operations.
84
+ From 1 - Monday to 7 - Sunday.
85
+ :type maintenance_weekday: int, optional
86
+ :param maintenance_hour: Hour of the day for maintenance oprerations.
87
+ From 1 to 24.
88
+ :type maintenance_hour: int, optional
89
+ """
90
+
91
+ # pylint: disable=too-many-instance-attributes
92
+
93
+ folder_id: str
94
+ service_account_id: str
95
+ name: Optional[str] = None
96
+ description: str = ""
97
+ labels: Optional[Dict[str, str]] = None
98
+ subnet_ids: Optional[List[str]] = None
99
+ security_group_ids: Optional[List[str]] = None
100
+ deletion_protection: bool = False
101
+ driver_pool_resource_preset: str = ""
102
+ driver_pool_size: int = 0
103
+ driver_pool_min_size: int = 0
104
+ driver_pool_max_size: int = 0
105
+ executor_pool_resource_preset: str = ""
106
+ executor_pool_size: int = 0
107
+ executor_pool_min_size: int = 0
108
+ executor_pool_max_size: int = 0
109
+ logging_enabled: bool = True
110
+ log_group_id: Optional[str] = None
111
+ log_folder_id: Optional[str] = None
112
+ history_server_enabled: bool = True
113
+ pip_packages: Optional[List[str]] = None
114
+ deb_packages: Optional[List[str]] = None
115
+ metastore_cluster_id: str = ""
116
+ maintenance_weekday: Optional[int] = None
117
+ maintenance_hour: Optional[int] = None
118
+
119
+
120
+ @dataclass
121
+ class SparkJobParameters:
122
+ """
123
+ Spark Job Parameters.
124
+
125
+ Constructor arguments:
126
+ :param name: Name of the job.
127
+ :type name: str, optional
128
+ :param main_jar_file_uri: URI of the jar file that contains the main
129
+ class.
130
+ :type main_jar_file_uri: str
131
+ :param main_class: Name of the main class of the job.
132
+ :type main_class: str, optional
133
+ :param args: Arguments to be passed to the job.
134
+ :type args: List[str], optional
135
+ :param properties: Spark properties for the job.
136
+ :type properties: Dist[str, str], optional
137
+ :param packages: List of maven coordinates of jars to include on the
138
+ driver and executor classpaths.
139
+ :type packages: List[str], optional
140
+ :param file_uris: URIs of files to be placed in the working directory.
141
+ :type file_uris: List[str], optional
142
+ :param jar_file_uris: URIs of JAR dependencies to be added to the
143
+ CLASSPATH.
144
+ :type jar_archive_uris: List[str], optional
145
+ :param archive_uris: URIs of archives to be extracted into the working
146
+ directory.
147
+ :type archive_uris: List[str], optional
148
+ :param repositories: List of additional remote repositories to search
149
+ for the maven coordinates given with --packages.
150
+ :type repositories: List[str], optional
151
+ :param exclude_packages: List of groupId:artifactId, to exclude while
152
+ resolving the dependencies provided in --packages to avoid
153
+ dependency conflicts.
154
+ :type exclude_packages: List[str], optional
155
+ """
156
+
157
+ # pylint: disable=too-many-instance-attributes
158
+
159
+ name: str = ""
160
+ main_jar_file_uri: str = ""
161
+ main_class: str = ""
162
+ args: Optional[List[str]] = None
163
+ properties: Optional[Dict[str, str]] = None
164
+ packages: Optional[List[str]] = None
165
+ file_uris: Optional[List[str]] = None
166
+ jar_file_uris: Optional[List[str]] = None
167
+ archive_uris: Optional[List[str]] = None
168
+ repositories: Optional[List[str]] = None
169
+ exclude_packages: Optional[List[str]] = None
170
+
171
+
172
+ @dataclass
173
+ class PysparkJobParameters:
174
+ """
175
+ Pyspark Job Parameters.
176
+
177
+ Constructor arguments:
178
+ :param name: Name of the job.
179
+ :type name: str, optional
180
+ :param main_python_file_uri: URI of the main Python file.
181
+ :type main_python_file_uri: str
182
+ :param args: Arguments to be passed to the job.
183
+ :type args: List[str], optional
184
+ :param properties: Spark properties for the job.
185
+ :type properties: Dist[str, str], optional
186
+ :param packages: List of maven coordinates of jars to include on the
187
+ driver and executor classpaths.
188
+ :type packages: List[str], optional
189
+ :param file_uris: URIs of files to be placed in the working directory.
190
+ :type file_uris: List[str], optional
191
+ :param python_file_uris: URIs of python files used in the job.
192
+ :type python_file_uris: List[str], optional
193
+ :param jar_file_uris: URIs of JAR dependencies to be added to the
194
+ CLASSPATH.
195
+ :type jar_file_uris: List[str], optional
196
+ :param archive_uris: URIs of archives to be extracted into the working
197
+ directory.
198
+ :type archive_uris: List[str], optional
199
+ :param repositories: List of additional remote repositories to search
200
+ for the maven coordinates given with --packages.
201
+ :type repositories: List[str], optional
202
+ :param exclude_packages: List of groupId:artifactId, to exclude while
203
+ resolving the dependencies provided in --packages to avoid
204
+ dependency conflicts.
205
+ :type exclude_packages: List[str], optional
206
+ """
207
+
208
+ # pylint: disable=too-many-instance-attributes
209
+
210
+ name: str = ""
211
+ main_python_file_uri: str = ""
212
+ args: Optional[List[str]] = None
213
+ properties: Optional[Dict[str, str]] = None
214
+ packages: Optional[List[str]] = None
215
+ file_uris: Optional[List[str]] = None
216
+ python_file_uris: Optional[List[str]] = None
217
+ jar_file_uris: Optional[List[str]] = None
218
+ archive_uris: Optional[List[str]] = None
219
+ repositories: Optional[List[str]] = None
220
+ exclude_packages: Optional[List[str]] = None
221
+
222
+
223
+ class Spark:
224
+ """
225
+ A base hook for Yandex.Cloud Managed Service for Apache Spark
226
+
227
+ :param logger: Logger object
228
+ :type logger: Optional[logging.Logger]
229
+ :param sdk: SDK object. Normally is being set by Wrappers constructor
230
+ :type sdk: yandexcloud.SDK
231
+ """
232
+
233
+ def __init__(self, logger=None, sdk=None):
234
+ self.sdk = sdk or self.sdk
235
+ self.log = logger
236
+ if not self.log:
237
+ self.log = logging.getLogger()
238
+ self.log.addHandler(logging.NullHandler())
239
+ self.cluster_id = None
240
+
241
+ def create_cluster(self, spec: SparkClusterParameters) -> str:
242
+ """
243
+ Create cluster.
244
+
245
+ :param spec: Cluster parameters.
246
+ :type spec: SparkClusterParameters
247
+
248
+ :return: Operation result
249
+ :rtype: OperationResult
250
+ """
251
+
252
+ # pylint: disable=too-many-branches
253
+
254
+ if not spec.folder_id:
255
+ raise RuntimeError("Folder ID must be specified to create cluster.")
256
+
257
+ if not spec.name:
258
+ random_int = random.randint(0, 999)
259
+ spec.name = f"spark-{random_int}"
260
+
261
+ if spec.driver_pool_max_size > 0:
262
+ driver_pool = cluster_pb.ResourcePool(
263
+ resource_preset_id=spec.driver_pool_resource_preset,
264
+ scale_policy=cluster_pb.ScalePolicy(
265
+ auto_scale=cluster_pb.ScalePolicy.AutoScale(
266
+ min_size=spec.driver_pool_min_size,
267
+ max_size=spec.driver_pool_max_size,
268
+ ),
269
+ ),
270
+ )
271
+ elif spec.driver_pool_size > 0:
272
+ driver_pool = cluster_pb.ResourcePool(
273
+ resource_preset_id=spec.driver_pool_resource_preset,
274
+ scale_policy=cluster_pb.ScalePolicy(
275
+ fixed_scale=cluster_pb.ScalePolicy.FixedScale(
276
+ size=spec.driver_pool_size,
277
+ ),
278
+ ),
279
+ )
280
+ else:
281
+ raise RuntimeError("Driver pool size is not specified.")
282
+
283
+ if spec.executor_pool_max_size > 0:
284
+ executor_pool = cluster_pb.ResourcePool(
285
+ resource_preset_id=spec.executor_pool_resource_preset,
286
+ scale_policy=cluster_pb.ScalePolicy(
287
+ auto_scale=cluster_pb.ScalePolicy.AutoScale(
288
+ min_size=spec.executor_pool_min_size,
289
+ max_size=spec.executor_pool_max_size,
290
+ ),
291
+ ),
292
+ )
293
+ elif spec.executor_pool_size > 0:
294
+ executor_pool = cluster_pb.ResourcePool(
295
+ resource_preset_id=spec.executor_pool_resource_preset,
296
+ scale_policy=cluster_pb.ScalePolicy(
297
+ fixed_scale=cluster_pb.ScalePolicy.FixedScale(
298
+ size=spec.executor_pool_size,
299
+ ),
300
+ ),
301
+ )
302
+ else:
303
+ raise RuntimeError("Executor pool size is not specified.")
304
+
305
+ if spec.log_group_id is not None:
306
+ logging_config = cluster_pb.LoggingConfig(
307
+ enabled=True,
308
+ log_group_id=spec.log_group_id,
309
+ )
310
+ elif spec.log_folder_id is not None:
311
+ logging_config = cluster_pb.LoggingConfig(
312
+ enabled=True,
313
+ folder_id=spec.log_folder_id,
314
+ )
315
+ elif spec.logging_enabled:
316
+ logging_config = cluster_pb.LoggingConfig(
317
+ enabled=True,
318
+ folder_id=spec.folder_id,
319
+ )
320
+ else:
321
+ logging_config = cluster_pb.LoggingConfig(
322
+ enabled=False,
323
+ )
324
+
325
+ if spec.maintenance_hour is not None and spec.maintenance_weekday is not None:
326
+ if not 1 <= spec.maintenance_hour <= 24:
327
+ raise RuntimeError("Maintenance hour is not valid.")
328
+
329
+ if not 0 <= spec.maintenance_weekday <= 7:
330
+ raise RuntimeError("Maintenance weekday is not valid.")
331
+
332
+ maintenance_window = maintenance_pb.MaintenanceWindow(
333
+ weekly_maintenance_window=maintenance_pb.WeeklyMaintenanceWindow(
334
+ day=spec.maintenance_weekday,
335
+ hour=spec.maintenance_hour,
336
+ ),
337
+ )
338
+ else:
339
+ maintenance_window = maintenance_pb.MaintenanceWindow(
340
+ anytime=maintenance_pb.AnytimeMaintenanceWindow(),
341
+ )
342
+
343
+ request = cluster_service_pb.CreateClusterRequest(
344
+ folder_id=spec.folder_id,
345
+ name=spec.name,
346
+ description=spec.description,
347
+ labels=spec.labels,
348
+ config=cluster_pb.ClusterConfig(
349
+ resource_pools=cluster_pb.ResourcePools(
350
+ driver=driver_pool,
351
+ executor=executor_pool,
352
+ ),
353
+ history_server=cluster_pb.HistoryServerConfig(
354
+ enabled=spec.history_server_enabled,
355
+ ),
356
+ dependencies=cluster_pb.Dependencies(
357
+ pip_packages=spec.pip_packages,
358
+ deb_packages=spec.deb_packages,
359
+ ),
360
+ metastore=cluster_pb.Metastore(
361
+ cluster_id=spec.metastore_cluster_id,
362
+ ),
363
+ ),
364
+ network=cluster_pb.NetworkConfig(
365
+ subnet_ids=spec.subnet_ids,
366
+ security_group_ids=spec.security_group_ids,
367
+ ),
368
+ deletion_protection=spec.deletion_protection,
369
+ service_account_id=spec.service_account_id,
370
+ logging=logging_config,
371
+ maintenance_window=maintenance_window,
372
+ )
373
+
374
+ self.log.info("Creating Spark cluster. Request: %s.", request)
375
+
376
+ result = self.sdk.create_operation_and_get_result(
377
+ request,
378
+ service=cluster_service_grpc_pb.ClusterServiceStub,
379
+ method_name="Create",
380
+ response_type=cluster_pb.Cluster,
381
+ meta_type=cluster_service_pb.CreateClusterMetadata,
382
+ )
383
+
384
+ self.cluster_id = result.response.id
385
+ return result
386
+
387
+ def delete_cluster(self, cluster_id: Optional[str] = None):
388
+ """
389
+ Delete cluster.
390
+
391
+ :param cluster_id: ID of the cluster to remove.
392
+ :type cluster_id: str, optional
393
+
394
+ :return: Operation result
395
+ :rtype: OperationResult
396
+ """
397
+ cluster_id = cluster_id or self.cluster_id
398
+ if not cluster_id:
399
+ raise RuntimeError("Cluster id must be specified.")
400
+
401
+ request = cluster_service_pb.DeleteClusterRequest(cluster_id=cluster_id)
402
+
403
+ self.log.info("Deleting Spark cluster. Request: %s.", request)
404
+
405
+ return self.sdk.create_operation_and_get_result(
406
+ request,
407
+ service=cluster_service_grpc_pb.ClusterServiceStub,
408
+ method_name="Delete",
409
+ meta_type=cluster_service_pb.DeleteClusterMetadata,
410
+ )
411
+
412
+ def stop_cluster(self, cluster_id: Optional[str] = None):
413
+ """
414
+ Stop cluster.
415
+
416
+ :param cluster_id: ID of the cluster to stop.
417
+ :type cluster_id: str, optional
418
+
419
+ :return: Operation result
420
+ :rtype: OperationResult
421
+ """
422
+ cluster_id = cluster_id or self.cluster_id
423
+ if not cluster_id:
424
+ raise RuntimeError("Cluster id must be specified.")
425
+
426
+ request = cluster_service_pb.StopClusterRequest(cluster_id=cluster_id)
427
+
428
+ self.log.info("Stopping Spark cluster. Request: %s.", request)
429
+
430
+ return self.sdk.create_operation_and_get_result(
431
+ request,
432
+ service=cluster_service_grpc_pb.ClusterServiceStub,
433
+ method_name="Stop",
434
+ meta_type=cluster_service_pb.StopClusterMetadata,
435
+ )
436
+
437
+ def start_cluster(self, cluster_id: Optional[str] = None):
438
+ """
439
+ Start cluster.
440
+
441
+ :param cluster_id: ID of the cluster to start.
442
+ :type cluster_id: str, optional
443
+
444
+ :return: Operation result
445
+ :rtype: OperationResult
446
+ """
447
+ cluster_id = cluster_id or self.cluster_id
448
+ if not cluster_id:
449
+ raise RuntimeError("Cluster id must be specified.")
450
+
451
+ request = cluster_service_pb.StartClusterRequest(cluster_id=cluster_id)
452
+
453
+ self.log.info("Starting Spark cluster. Request: %s.", request)
454
+
455
+ return self.sdk.create_operation_and_get_result(
456
+ request,
457
+ service=cluster_service_grpc_pb.ClusterServiceStub,
458
+ method_name="Start",
459
+ meta_type=cluster_service_pb.StartClusterMetadata,
460
+ )
461
+
462
+ def create_spark_job(self, spec: SparkJobParameters, cluster_id: Optional[str] = None):
463
+ """
464
+ Run spark job.
465
+
466
+ :param cluster_id: ID of the cluster.
467
+ :type cluster_id: str, optional
468
+ :param spec: Job parameters.
469
+ :type spec: SparkJobParameters
470
+
471
+ :return: Operation result
472
+ :rtype: OperationResult
473
+ """
474
+ cluster_id = cluster_id or self.cluster_id
475
+ if not cluster_id:
476
+ raise RuntimeError("Cluster id must be specified.")
477
+
478
+ request = job_service_pb.CreateJobRequest(
479
+ cluster_id=cluster_id,
480
+ name=spec.name,
481
+ spark_job=job_pb.SparkJob(
482
+ main_jar_file_uri=spec.main_jar_file_uri,
483
+ main_class=spec.main_class,
484
+ args=spec.args,
485
+ properties=spec.properties,
486
+ packages=spec.packages,
487
+ file_uris=spec.file_uris,
488
+ jar_file_uris=spec.jar_file_uris,
489
+ archive_uris=spec.archive_uris,
490
+ repositories=spec.repositories,
491
+ exclude_packages=spec.exclude_packages,
492
+ ),
493
+ )
494
+
495
+ self.log.info("Running Spark job. Request: %s.", request)
496
+
497
+ return self.sdk.create_operation_and_get_result(
498
+ request,
499
+ service=job_service_grpc_pb.JobServiceStub,
500
+ method_name="Create",
501
+ response_type=job_pb.Job,
502
+ meta_type=job_service_pb.CreateJobMetadata,
503
+ )
504
+
505
+ def create_pyspark_job(self, spec: PysparkJobParameters, cluster_id: Optional[str] = None):
506
+ """
507
+ Run Pyspark job on the cluster.
508
+
509
+ :param cluster_id: ID of the cluster.
510
+ :type cluster_id: str, optional
511
+ :param spec: Job parameters.
512
+ :type spec: PysparkJobParameters
513
+
514
+ :return: Operation result
515
+ :rtype: OperationResult
516
+ """
517
+ cluster_id = cluster_id or self.cluster_id
518
+ if not cluster_id:
519
+ raise RuntimeError("Cluster id must be specified.")
520
+
521
+ request = job_service_pb.CreateJobRequest(
522
+ cluster_id=cluster_id,
523
+ name=spec.name,
524
+ pyspark_job=job_pb.PysparkJob(
525
+ main_python_file_uri=spec.main_python_file_uri,
526
+ args=spec.args,
527
+ properties=spec.properties,
528
+ packages=spec.packages,
529
+ file_uris=spec.file_uris,
530
+ python_file_uris=spec.python_file_uris,
531
+ jar_file_uris=spec.jar_file_uris,
532
+ archive_uris=spec.archive_uris,
533
+ repositories=spec.repositories,
534
+ exclude_packages=spec.exclude_packages,
535
+ ),
536
+ )
537
+
538
+ self.log.info("Running Pyspark job. Request: %s.", request)
539
+
540
+ return self.sdk.create_operation_and_get_result(
541
+ request,
542
+ service=job_service_grpc_pb.JobServiceStub,
543
+ method_name="Create",
544
+ response_type=job_pb.Job,
545
+ meta_type=job_service_pb.CreateJobMetadata,
546
+ )
547
+
548
+ def get_job(self, job_id: str, cluster_id: Optional[str] = None):
549
+ """
550
+ Get job info.
551
+
552
+ :param cluster_id: ID of the cluster.
553
+ :type cluster_id: str, optional
554
+ :param job_id: ID of the job.
555
+ :type job_id: str
556
+
557
+ :return: Job info, job status name
558
+ :rtype: Tuple[Job, str]
559
+ """
560
+ cluster_id = cluster_id or self.cluster_id
561
+ if not cluster_id:
562
+ raise RuntimeError("Cluster id must be specified.")
563
+
564
+ request = job_service_pb.GetJobRequest(
565
+ cluster_id=cluster_id,
566
+ job_id=job_id,
567
+ )
568
+ job = self.sdk.client(job_service_grpc_pb.JobServiceStub).Get(request)
569
+ return job, job_pb.Job.Status.Name(job.status)
570
+
571
+ def get_job_log(self, job_id: str, cluster_id: Optional[str] = None):
572
+ """
573
+ Get job log.
574
+
575
+ :param cluster_id: ID of the cluster.
576
+ :type cluster_id: str, optional
577
+ :param job_id: ID of the job.
578
+ :type job_id: str
579
+
580
+ :return: Job log
581
+ :rtype: List[str]
582
+ """
583
+ cluster_id = cluster_id or self.cluster_id
584
+ if not cluster_id:
585
+ raise RuntimeError("Cluster id must be specified.")
586
+
587
+ result = []
588
+ page_token = ""
589
+
590
+ while True:
591
+ request = job_service_pb.ListJobLogRequest(
592
+ cluster_id=cluster_id,
593
+ job_id=job_id,
594
+ page_token=page_token,
595
+ )
596
+ page = self.sdk.client(job_service_grpc_pb.JobServiceStub).ListLog(request)
597
+ if page.content:
598
+ result.extend(page.content.split("\n"))
599
+ if page.next_page_token and page_token != page.next_page_token:
600
+ page_token = page.next_page_token
601
+ else:
602
+ break
603
+ return result
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: yandexcloud
3
- Version: 0.339.0
3
+ Version: 0.340.0
4
4
  Summary: The Yandex Cloud official SDK
5
5
  Author-email: Yandex LLC <cloud@support.yandex.ru>
6
6
  License: MIT
@@ -145,6 +145,7 @@ Check `examples` directory for more examples.
145
145
  | yandex.cloud.serverless.containers | serverless-containers |
146
146
  | yandex.cloud.serverless.functions | serverless-functions |
147
147
  | yandex.cloud.serverless.triggers | serverless-triggers |
148
+ | yandex.cloud.spark | managed-spark |
148
149
  | yandex.cloud.storage | storage-api |
149
150
  | yandex.cloud.vpc | vpc |
150
151
  | yandex.cloud.ydb | ydb |
@@ -3119,7 +3119,7 @@ yandex/cloud/ydb/v1/storage_type_service_pb2.py,sha256=PFcJngBOukxC-vuzsLm6NJZVo
3119
3119
  yandex/cloud/ydb/v1/storage_type_service_pb2.pyi,sha256=FcS-A3KjCtMJU1fzxtUx2jSrUx53NKRNVJmKMQTF2DE,3315
3120
3120
  yandex/cloud/ydb/v1/storage_type_service_pb2_grpc.py,sha256=Q08BMBwPIgmN4nYUdQ0jnVUpLO-PWcAhy3HJgDxqj2g,5996
3121
3121
  yandex/cloud/ydb/v1/storage_type_service_pb2_grpc.pyi,sha256=8slxgr1UMJs-kkylAgfTPtFdZqc2zCHKNKrOawRjpqw,2650
3122
- yandexcloud/__init__.py,sha256=lAuxRpOnMohPZJvFnp_3mayZLnti-yzOzeVZplwBMzU,428
3122
+ yandexcloud/__init__.py,sha256=S6RmvL_Y7uYElvnoCvzE-ULerDDzf1GInYo2TGRS81g,428
3123
3123
  yandexcloud/_auth_fabric.py,sha256=_JPkJLNoM7FMMeJnY1-hqbl2Wt32u0lxvMiDib0btY8,5008
3124
3124
  yandexcloud/_auth_plugin.py,sha256=QnHsQW0MAIbeW343XgWURcNIILnH55dEHPHkfCZkY1Y,3560
3125
3125
  yandexcloud/_backoff.py,sha256=dXoKZZOFJnEjc2VH35zfHjpJ3hwN2kecy9qzDF-QSso,1246
@@ -3128,15 +3128,16 @@ yandexcloud/_helpers.py,sha256=zC8mc0Kz_1EQ77COT9Q9oP5lTJkKU7tCgC1wAdf3VgY,3030
3128
3128
  yandexcloud/_operation_waiter.py,sha256=5aoDL-uoWRjSCUq8BQphtsF8eO-S80NEwYio-vr1448,4761
3129
3129
  yandexcloud/_retry_interceptor.py,sha256=MvpSN0NrrTlIL_gpZZGC44snIm1HWUXFMWfnhBhHgfk,7784
3130
3130
  yandexcloud/_retry_policy.py,sha256=J8-Yrr1LaKHrvEEBTASH0pFM9tPRGM4mIpY-XyQZn1w,1332
3131
- yandexcloud/_sdk.py,sha256=OPCEsa257MR3SicTRMhlYvRGwBX5H_nhhqWmGOlKMWQ,7815
3131
+ yandexcloud/_sdk.py,sha256=heTQVTnClzlQ46dG1hjLnxoZG0wD33UX-CDjewra4pU,7860
3132
3132
  yandexcloud/auth.py,sha256=SD_IVGUhN_Ny8f-qtnNWtZQ1VPYrGbKypFAbQcaSQ58,1008
3133
3133
  yandexcloud/operations.py,sha256=T7SqdeEKwkLTDLhgAFuAIuNpnII0QUv1xTqLO2nlS14,1091
3134
3134
  yandexcloud/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3135
- yandexcloud/_wrappers/__init__.py,sha256=z19vuf3vgVGWFc-0d1LFsyVXiM-SYlhvvWRQI8l1T_8,428
3135
+ yandexcloud/_wrappers/__init__.py,sha256=gX7KWwxIo5udfNbcct_qsMsHnhq8KUP3kNIUDIU-Rvo,964
3136
3136
  yandexcloud/_wrappers/dataproc/__init__.py,sha256=rpiJ7CO_H9O3pPQfUFYOGFPOkR6_guyPnuG0tax3cfk,34647
3137
- yandexcloud-0.339.0.dist-info/AUTHORS,sha256=c7-HPkP4Ecf3bGm6vlBVtj38IKmSAdVmrDK_gP7Ih8w,232
3138
- yandexcloud-0.339.0.dist-info/LICENSE,sha256=AFcOYhNOyuBQP89lObqyipdScN2KUUS-OuWoUlVo6yE,1077
3139
- yandexcloud-0.339.0.dist-info/METADATA,sha256=rEQYu88A1dd-Qw01ZAZ64ia370LTXREHkzw3CCME97o,12170
3140
- yandexcloud-0.339.0.dist-info/WHEEL,sha256=52BFRY2Up02UkjOa29eZOS2VxUrpPORXg1pkohGGUS8,91
3141
- yandexcloud-0.339.0.dist-info/top_level.txt,sha256=p6aBMPGD526A1jM2WVnAneI2qO4kGDWeJi6uwYApDqg,19
3142
- yandexcloud-0.339.0.dist-info/RECORD,,
3137
+ yandexcloud/_wrappers/spark/__init__.py,sha256=bDR2jjksM3xJIUqdRBcmzyG9yzTMv8cq9pH2yJm0zx4,22281
3138
+ yandexcloud-0.340.0.dist-info/AUTHORS,sha256=c7-HPkP4Ecf3bGm6vlBVtj38IKmSAdVmrDK_gP7Ih8w,232
3139
+ yandexcloud-0.340.0.dist-info/LICENSE,sha256=AFcOYhNOyuBQP89lObqyipdScN2KUUS-OuWoUlVo6yE,1077
3140
+ yandexcloud-0.340.0.dist-info/METADATA,sha256=AnkBoLqQLcnFElBJtfzJdhmR7QeVPdYUoRaEbp6N-lc,12272
3141
+ yandexcloud-0.340.0.dist-info/WHEEL,sha256=52BFRY2Up02UkjOa29eZOS2VxUrpPORXg1pkohGGUS8,91
3142
+ yandexcloud-0.340.0.dist-info/top_level.txt,sha256=p6aBMPGD526A1jM2WVnAneI2qO4kGDWeJi6uwYApDqg,19
3143
+ yandexcloud-0.340.0.dist-info/RECORD,,