dataproc-spark-connect 1.0.0rc6__py2.py3-none-any.whl → 1.0.1__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {dataproc_spark_connect-1.0.0rc6.dist-info → dataproc_spark_connect-1.0.1.dist-info}/METADATA +65 -17
- {dataproc_spark_connect-1.0.0rc6.dist-info → dataproc_spark_connect-1.0.1.dist-info}/RECORD +6 -6
- google/cloud/dataproc_spark_connect/session.py +110 -33
- {dataproc_spark_connect-1.0.0rc6.dist-info → dataproc_spark_connect-1.0.1.dist-info}/WHEEL +0 -0
- {dataproc_spark_connect-1.0.0rc6.dist-info → dataproc_spark_connect-1.0.1.dist-info}/licenses/LICENSE +0 -0
- {dataproc_spark_connect-1.0.0rc6.dist-info → dataproc_spark_connect-1.0.1.dist-info}/top_level.txt +0 -0
{dataproc_spark_connect-1.0.0rc6.dist-info → dataproc_spark_connect-1.0.1.dist-info}/METADATA
RENAMED
|
@@ -1,10 +1,11 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: dataproc-spark-connect
|
|
3
|
-
Version: 1.0.
|
|
3
|
+
Version: 1.0.1
|
|
4
4
|
Summary: Dataproc client library for Spark Connect
|
|
5
5
|
Home-page: https://github.com/GoogleCloudDataproc/dataproc-spark-connect-python
|
|
6
6
|
Author: Google LLC
|
|
7
7
|
License: Apache 2.0
|
|
8
|
+
Description-Content-Type: text/markdown
|
|
8
9
|
License-File: LICENSE
|
|
9
10
|
Requires-Dist: google-api-core>=2.19
|
|
10
11
|
Requires-Dist: google-cloud-dataproc>=5.18
|
|
@@ -43,39 +44,86 @@ pip uninstall dataproc_spark_connect
|
|
|
43
44
|
|
|
44
45
|
This client requires permissions to
|
|
45
46
|
manage [Dataproc Sessions and Session Templates](https://cloud.google.com/dataproc-serverless/docs/concepts/iam).
|
|
46
|
-
If you are running the client outside of Google Cloud, you must set following
|
|
47
|
-
environment variables:
|
|
48
47
|
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
48
|
+
If you are running the client outside of Google Cloud, you need to provide
|
|
49
|
+
authentication credentials. Set the `GOOGLE_APPLICATION_CREDENTIALS` environment
|
|
50
|
+
variable to point to
|
|
51
|
+
your [Application Credentials](https://cloud.google.com/docs/authentication/provide-credentials-adc)
|
|
52
|
+
file.
|
|
53
|
+
|
|
54
|
+
You can specify the project and region either via environment variables or directly
|
|
55
|
+
in your code using the builder API:
|
|
56
|
+
|
|
57
|
+
* Environment variables: `GOOGLE_CLOUD_PROJECT` and `GOOGLE_CLOUD_REGION`
|
|
58
|
+
* Builder API: `.projectId()` and `.location()` methods (recommended)
|
|
56
59
|
|
|
57
60
|
## Usage
|
|
58
61
|
|
|
59
|
-
1. Install the latest version of Dataproc
|
|
60
|
-
Connect modules:
|
|
62
|
+
1. Install the latest version of Dataproc Spark Connect:
|
|
61
63
|
|
|
62
64
|
```sh
|
|
63
|
-
pip install
|
|
65
|
+
pip install -U dataproc-spark-connect
|
|
64
66
|
```
|
|
65
67
|
|
|
66
68
|
2. Add the required imports into your PySpark application or notebook and start
|
|
67
|
-
a Spark session
|
|
68
|
-
|
|
69
|
+
a Spark session using the fluent API:
|
|
70
|
+
|
|
71
|
+
```python
|
|
72
|
+
from google.cloud.dataproc_spark_connect import DataprocSparkSession
|
|
73
|
+
spark = DataprocSparkSession.builder.getOrCreate()
|
|
74
|
+
```
|
|
75
|
+
|
|
76
|
+
3. You can configure Spark properties using the `.config()` method:
|
|
77
|
+
|
|
78
|
+
```python
|
|
79
|
+
from google.cloud.dataproc_spark_connect import DataprocSparkSession
|
|
80
|
+
spark = DataprocSparkSession.builder.config('spark.executor.memory', '4g').config('spark.executor.cores', '2').getOrCreate()
|
|
81
|
+
```
|
|
82
|
+
|
|
83
|
+
4. For advanced configuration, you can use the `Session` class to customize
|
|
84
|
+
settings like subnetwork or other environment configurations:
|
|
69
85
|
|
|
70
86
|
```python
|
|
71
87
|
from google.cloud.dataproc_spark_connect import DataprocSparkSession
|
|
72
88
|
from google.cloud.dataproc_v1 import Session
|
|
73
89
|
session_config = Session()
|
|
74
90
|
session_config.environment_config.execution_config.subnetwork_uri = '<subnet>'
|
|
75
|
-
session_config.runtime_config.version = '
|
|
76
|
-
spark = DataprocSparkSession.builder.dataprocSessionConfig(session_config).getOrCreate()
|
|
91
|
+
session_config.runtime_config.version = '3.0'
|
|
92
|
+
spark = DataprocSparkSession.builder.projectId('my-project').location('us-central1').dataprocSessionConfig(session_config).getOrCreate()
|
|
93
|
+
```
|
|
94
|
+
|
|
95
|
+
### Reusing Named Sessions Across Notebooks
|
|
96
|
+
|
|
97
|
+
Named sessions allow you to share a single Spark session across multiple notebooks, improving efficiency by avoiding repeated session startup times and reducing costs.
|
|
98
|
+
|
|
99
|
+
To create or connect to a named session:
|
|
100
|
+
|
|
101
|
+
1. Create a session with a custom ID in your first notebook:
|
|
102
|
+
|
|
103
|
+
```python
|
|
104
|
+
from google.cloud.dataproc_spark_connect import DataprocSparkSession
|
|
105
|
+
session_id = 'my-ml-pipeline-session'
|
|
106
|
+
spark = DataprocSparkSession.builder.dataprocSessionId(session_id).getOrCreate()
|
|
107
|
+
df = spark.createDataFrame([(1, 'data')], ['id', 'value'])
|
|
108
|
+
df.show()
|
|
109
|
+
```
|
|
110
|
+
|
|
111
|
+
2. Reuse the same session in another notebook by specifying the same session ID:
|
|
112
|
+
|
|
113
|
+
```python
|
|
114
|
+
from google.cloud.dataproc_spark_connect import DataprocSparkSession
|
|
115
|
+
session_id = 'my-ml-pipeline-session'
|
|
116
|
+
spark = DataprocSparkSession.builder.dataprocSessionId(session_id).getOrCreate()
|
|
117
|
+
df = spark.createDataFrame([(2, 'more-data')], ['id', 'value'])
|
|
118
|
+
df.show()
|
|
77
119
|
```
|
|
78
120
|
|
|
121
|
+
3. Session IDs must be 4-63 characters long, start with a lowercase letter, contain only lowercase letters, numbers, and hyphens, and not end with a hyphen.
|
|
122
|
+
|
|
123
|
+
4. Named sessions persist until explicitly terminated or reach their configured TTL.
|
|
124
|
+
|
|
125
|
+
5. A session with a given ID that is in a TERMINATED state cannot be reused. It must be deleted before a new session with the same ID can be created.
|
|
126
|
+
|
|
79
127
|
### Using Spark SQL Magic Commands (Jupyter Notebooks)
|
|
80
128
|
|
|
81
129
|
The package supports the [sparksql-magic](https://github.com/cryeo/sparksql-magic) library for executing Spark SQL queries directly in Jupyter notebooks.
|
|
@@ -1,13 +1,13 @@
|
|
|
1
|
-
dataproc_spark_connect-1.0.
|
|
1
|
+
dataproc_spark_connect-1.0.1.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
2
2
|
google/cloud/dataproc_spark_connect/__init__.py,sha256=dIqHNWVWWrSuRf26x11kX5e9yMKSHCtmI_GBj1-FDdE,1101
|
|
3
3
|
google/cloud/dataproc_spark_connect/environment.py,sha256=o5WRKI1vyIaxZ8S2UhtDer6pdi4CXYRzI9Xdpq5hVkQ,2771
|
|
4
4
|
google/cloud/dataproc_spark_connect/exceptions.py,sha256=iwaHgNabcaxqquOpktGkOWKHMf8hgdPQJUgRnIbTXVs,970
|
|
5
5
|
google/cloud/dataproc_spark_connect/pypi_artifacts.py,sha256=gd-VMwiVP-EJuPp9Vf9Shx8pqps3oSKp0hBcSSZQS-A,1575
|
|
6
|
-
google/cloud/dataproc_spark_connect/session.py,sha256=
|
|
6
|
+
google/cloud/dataproc_spark_connect/session.py,sha256=loEpKA2ssA89EqT9gWphmfPsZwfHjayxd97J2avdQMc,55890
|
|
7
7
|
google/cloud/dataproc_spark_connect/client/__init__.py,sha256=6hCNSsgYlie6GuVpc5gjFsPnyeMTScTpXSPYqp1fplY,615
|
|
8
8
|
google/cloud/dataproc_spark_connect/client/core.py,sha256=GRc4OCTBvIvdagjxOPoDO22vLtt8xDSerdREMRDeUBY,4659
|
|
9
9
|
google/cloud/dataproc_spark_connect/client/proxy.py,sha256=qUZXvVY1yn934vE6nlO495XUZ53AUx9O74a9ozkGI9U,8976
|
|
10
|
-
dataproc_spark_connect-1.0.
|
|
11
|
-
dataproc_spark_connect-1.0.
|
|
12
|
-
dataproc_spark_connect-1.0.
|
|
13
|
-
dataproc_spark_connect-1.0.
|
|
10
|
+
dataproc_spark_connect-1.0.1.dist-info/METADATA,sha256=EubZvtsdEx04pokiRloTqqfohduAgM6sW5mTQ5Th4Ic,6840
|
|
11
|
+
dataproc_spark_connect-1.0.1.dist-info/WHEEL,sha256=JNWh1Fm1UdwIQV075glCn4MVuCRs0sotJIq-J6rbxCU,109
|
|
12
|
+
dataproc_spark_connect-1.0.1.dist-info/top_level.txt,sha256=_1QvSJIhFAGfxb79D6DhB7SUw2X6T4rwnz_LLrbcD3c,7
|
|
13
|
+
dataproc_spark_connect-1.0.1.dist-info/RECORD,,
|
|
@@ -14,6 +14,7 @@
|
|
|
14
14
|
|
|
15
15
|
import atexit
|
|
16
16
|
import datetime
|
|
17
|
+
import functools
|
|
17
18
|
import json
|
|
18
19
|
import logging
|
|
19
20
|
import os
|
|
@@ -25,8 +26,6 @@ import time
|
|
|
25
26
|
import uuid
|
|
26
27
|
import tqdm
|
|
27
28
|
from packaging import version
|
|
28
|
-
from tqdm import tqdm as cli_tqdm
|
|
29
|
-
from tqdm.notebook import tqdm as notebook_tqdm
|
|
30
29
|
from types import MethodType
|
|
31
30
|
from typing import Any, cast, ClassVar, Dict, Iterable, Optional, Union
|
|
32
31
|
|
|
@@ -67,6 +66,10 @@ SYSTEM_LABELS = {
|
|
|
67
66
|
"goog-colab-notebook-id",
|
|
68
67
|
}
|
|
69
68
|
|
|
69
|
+
_DATAPROC_SESSIONS_BASE_URL = (
|
|
70
|
+
"https://console.cloud.google.com/dataproc/interactive"
|
|
71
|
+
)
|
|
72
|
+
|
|
70
73
|
|
|
71
74
|
def _is_valid_label_value(value: str) -> bool:
|
|
72
75
|
"""
|
|
@@ -494,15 +497,21 @@ class DataprocSparkSession(SparkSession):
|
|
|
494
497
|
)
|
|
495
498
|
|
|
496
499
|
def _display_session_link_on_creation(self, session_id):
|
|
497
|
-
session_url = f"
|
|
500
|
+
session_url = f"{_DATAPROC_SESSIONS_BASE_URL}/{self._region}/{session_id}?project={self._project_id}"
|
|
498
501
|
plain_message = f"Creating Dataproc Session: {session_url}"
|
|
499
|
-
|
|
502
|
+
if environment.is_colab_enterprise():
|
|
503
|
+
html_element = f"""
|
|
500
504
|
<div>
|
|
501
505
|
<p>Creating Dataproc Spark Session<p>
|
|
502
|
-
<p><a href="{session_url}">Dataproc Session</a></p>
|
|
503
506
|
</div>
|
|
504
|
-
|
|
505
|
-
|
|
507
|
+
"""
|
|
508
|
+
else:
|
|
509
|
+
html_element = f"""
|
|
510
|
+
<div>
|
|
511
|
+
<p>Creating Dataproc Spark Session<p>
|
|
512
|
+
<p><a href="{session_url}">Dataproc Session</a></p>
|
|
513
|
+
</div>
|
|
514
|
+
"""
|
|
506
515
|
self._output_element_or_message(plain_message, html_element)
|
|
507
516
|
|
|
508
517
|
def _print_session_created_message(self):
|
|
@@ -554,7 +563,7 @@ class DataprocSparkSession(SparkSession):
|
|
|
554
563
|
|
|
555
564
|
if session_response is not None:
|
|
556
565
|
print(
|
|
557
|
-
f"Using existing Dataproc Session (configuration changes may not be applied):
|
|
566
|
+
f"Using existing Dataproc Session (configuration changes may not be applied): {_DATAPROC_SESSIONS_BASE_URL}/{self._region}/{s8s_session_id}?project={self._project_id}"
|
|
558
567
|
)
|
|
559
568
|
self._display_view_session_details_button(s8s_session_id)
|
|
560
569
|
if session is None:
|
|
@@ -583,6 +592,16 @@ class DataprocSparkSession(SparkSession):
|
|
|
583
592
|
session = PySparkSQLSession.builder.getOrCreate()
|
|
584
593
|
return session # type: ignore
|
|
585
594
|
|
|
595
|
+
if self._project_id is None:
|
|
596
|
+
raise DataprocSparkConnectException(
|
|
597
|
+
f"Error while creating Dataproc Session: project ID is not set"
|
|
598
|
+
)
|
|
599
|
+
|
|
600
|
+
if self._region is None:
|
|
601
|
+
raise DataprocSparkConnectException(
|
|
602
|
+
f"Error while creating Dataproc Session: location is not set"
|
|
603
|
+
)
|
|
604
|
+
|
|
586
605
|
# Handle custom session ID by setting it early and letting existing logic handle it
|
|
587
606
|
if self._custom_session_id:
|
|
588
607
|
self._handle_custom_session_id()
|
|
@@ -711,8 +730,6 @@ class DataprocSparkSession(SparkSession):
|
|
|
711
730
|
# Merge default configs with existing properties,
|
|
712
731
|
# user configs take precedence
|
|
713
732
|
for k, v in {
|
|
714
|
-
"spark.datasource.bigquery.viewsEnabled": "true",
|
|
715
|
-
"spark.datasource.bigquery.writeMethod": "direct",
|
|
716
733
|
"spark.sql.catalog.spark_catalog": "com.google.cloud.spark.bigquery.BigQuerySparkSessionCatalog",
|
|
717
734
|
"spark.sql.sources.default": "bigquery",
|
|
718
735
|
}.items():
|
|
@@ -734,7 +751,7 @@ class DataprocSparkSession(SparkSession):
|
|
|
734
751
|
|
|
735
752
|
# Runtime version to server Python version mapping
|
|
736
753
|
RUNTIME_PYTHON_MAP = {
|
|
737
|
-
"3.0": (3,
|
|
754
|
+
"3.0": (3, 12),
|
|
738
755
|
}
|
|
739
756
|
|
|
740
757
|
client_python = sys.version_info[:2] # (major, minor)
|
|
@@ -798,7 +815,7 @@ class DataprocSparkSession(SparkSession):
|
|
|
798
815
|
return
|
|
799
816
|
|
|
800
817
|
try:
|
|
801
|
-
session_url = f"
|
|
818
|
+
session_url = f"{_DATAPROC_SESSIONS_BASE_URL}/{self._region}/{session_id}?project={self._project_id}"
|
|
802
819
|
from IPython.core.interactiveshell import InteractiveShell
|
|
803
820
|
|
|
804
821
|
if not InteractiveShell.initialized():
|
|
@@ -981,6 +998,28 @@ class DataprocSparkSession(SparkSession):
|
|
|
981
998
|
clearProgressHandlers_wrapper_method, self
|
|
982
999
|
)
|
|
983
1000
|
|
|
1001
|
+
@staticmethod
|
|
1002
|
+
@functools.lru_cache(maxsize=1)
|
|
1003
|
+
def get_tqdm_bar():
|
|
1004
|
+
"""
|
|
1005
|
+
Return a tqdm implementation that works in the current environment.
|
|
1006
|
+
|
|
1007
|
+
- Uses CLI tqdm for interactive terminals.
|
|
1008
|
+
- Uses the notebook tqdm if available, otherwise falls back to CLI tqdm.
|
|
1009
|
+
"""
|
|
1010
|
+
from tqdm import tqdm as cli_tqdm
|
|
1011
|
+
|
|
1012
|
+
if environment.is_interactive_terminal():
|
|
1013
|
+
return cli_tqdm
|
|
1014
|
+
|
|
1015
|
+
try:
|
|
1016
|
+
import ipywidgets
|
|
1017
|
+
from tqdm.notebook import tqdm as notebook_tqdm
|
|
1018
|
+
|
|
1019
|
+
return notebook_tqdm
|
|
1020
|
+
except ImportError:
|
|
1021
|
+
return cli_tqdm
|
|
1022
|
+
|
|
984
1023
|
def _register_progress_execution_handler(self):
|
|
985
1024
|
from pyspark.sql.connect.shell.progress import StageInfo
|
|
986
1025
|
|
|
@@ -1005,9 +1044,12 @@ class DataprocSparkSession(SparkSession):
|
|
|
1005
1044
|
total_tasks += stage.num_tasks
|
|
1006
1045
|
completed_tasks += stage.num_completed_tasks
|
|
1007
1046
|
|
|
1008
|
-
|
|
1009
|
-
if
|
|
1010
|
-
|
|
1047
|
+
# Don't show progress bar till we receive some tasks
|
|
1048
|
+
if total_tasks == 0:
|
|
1049
|
+
return
|
|
1050
|
+
|
|
1051
|
+
# Get correct tqdm (notebook or CLI)
|
|
1052
|
+
tqdm_pbar = self.get_tqdm_bar()
|
|
1011
1053
|
|
|
1012
1054
|
# Use a lock to ensure only one thread can access and modify
|
|
1013
1055
|
# the shared dictionaries at a time.
|
|
@@ -1044,13 +1086,11 @@ class DataprocSparkSession(SparkSession):
|
|
|
1044
1086
|
@staticmethod
|
|
1045
1087
|
def _sql_lazy_transformation(req):
|
|
1046
1088
|
# Select SQL command
|
|
1047
|
-
|
|
1048
|
-
|
|
1049
|
-
|
|
1050
|
-
|
|
1051
|
-
|
|
1052
|
-
|
|
1053
|
-
return False
|
|
1089
|
+
try:
|
|
1090
|
+
query = req.plan.command.sql_command.input.sql.query
|
|
1091
|
+
return "select" in query.strip().lower().split()
|
|
1092
|
+
except AttributeError:
|
|
1093
|
+
return False
|
|
1054
1094
|
|
|
1055
1095
|
def _repr_html_(self) -> str:
|
|
1056
1096
|
if not self._active_s8s_session_id:
|
|
@@ -1058,7 +1098,7 @@ class DataprocSparkSession(SparkSession):
|
|
|
1058
1098
|
<div>No Active Dataproc Session</div>
|
|
1059
1099
|
"""
|
|
1060
1100
|
|
|
1061
|
-
s8s_session = f"
|
|
1101
|
+
s8s_session = f"{_DATAPROC_SESSIONS_BASE_URL}/{self._region}/{self._active_s8s_session_id}"
|
|
1062
1102
|
ui = f"{s8s_session}/sparkApplications/applications"
|
|
1063
1103
|
return f"""
|
|
1064
1104
|
<div>
|
|
@@ -1085,7 +1125,7 @@ class DataprocSparkSession(SparkSession):
|
|
|
1085
1125
|
)
|
|
1086
1126
|
|
|
1087
1127
|
url = (
|
|
1088
|
-
f"
|
|
1128
|
+
f"{_DATAPROC_SESSIONS_BASE_URL}/{self._region}/"
|
|
1089
1129
|
f"{self._active_s8s_session_id}/sparkApplications/application;"
|
|
1090
1130
|
f"associatedSqlOperationId={operation_id}?project={self._project_id}"
|
|
1091
1131
|
)
|
|
@@ -1177,20 +1217,52 @@ class DataprocSparkSession(SparkSession):
|
|
|
1177
1217
|
def _get_active_session_file_path():
|
|
1178
1218
|
return os.getenv("DATAPROC_SPARK_CONNECT_ACTIVE_SESSION_FILE_PATH")
|
|
1179
1219
|
|
|
1180
|
-
def stop(self) -> None:
|
|
1220
|
+
def stop(self, terminate: Optional[bool] = None) -> None:
|
|
1221
|
+
"""
|
|
1222
|
+
Stop the Spark session and optionally terminate the server-side session.
|
|
1223
|
+
|
|
1224
|
+
Parameters
|
|
1225
|
+
----------
|
|
1226
|
+
terminate : bool, optional
|
|
1227
|
+
Control server-side termination behavior.
|
|
1228
|
+
|
|
1229
|
+
- None (default): Auto-detect based on session type
|
|
1230
|
+
|
|
1231
|
+
- Managed sessions (auto-generated ID): terminate server
|
|
1232
|
+
- Named sessions (custom ID): client-side cleanup only
|
|
1233
|
+
|
|
1234
|
+
- True: Always terminate the server-side session
|
|
1235
|
+
- False: Never terminate the server-side session (client cleanup only)
|
|
1236
|
+
|
|
1237
|
+
Examples
|
|
1238
|
+
--------
|
|
1239
|
+
Auto-detect termination behavior (existing behavior):
|
|
1240
|
+
|
|
1241
|
+
>>> spark.stop()
|
|
1242
|
+
|
|
1243
|
+
Force terminate a named session:
|
|
1244
|
+
|
|
1245
|
+
>>> spark.stop(terminate=True)
|
|
1246
|
+
|
|
1247
|
+
Prevent termination of a managed session:
|
|
1248
|
+
|
|
1249
|
+
>>> spark.stop(terminate=False)
|
|
1250
|
+
"""
|
|
1181
1251
|
with DataprocSparkSession._lock:
|
|
1182
1252
|
if DataprocSparkSession._active_s8s_session_id is not None:
|
|
1183
|
-
#
|
|
1184
|
-
if
|
|
1185
|
-
#
|
|
1186
|
-
|
|
1187
|
-
|
|
1188
|
-
f"Stopping unmanaged session {DataprocSparkSession._active_s8s_session_id} without termination"
|
|
1253
|
+
# Determine if we should terminate the server-side session
|
|
1254
|
+
if terminate is None:
|
|
1255
|
+
# Auto-detect: managed sessions terminate, named sessions don't
|
|
1256
|
+
should_terminate = (
|
|
1257
|
+
not DataprocSparkSession._active_session_uses_custom_id
|
|
1189
1258
|
)
|
|
1190
1259
|
else:
|
|
1191
|
-
|
|
1260
|
+
should_terminate = terminate
|
|
1261
|
+
|
|
1262
|
+
if should_terminate:
|
|
1263
|
+
# Terminate the server-side session
|
|
1192
1264
|
logger.debug(
|
|
1193
|
-
f"Terminating
|
|
1265
|
+
f"Terminating session {DataprocSparkSession._active_s8s_session_id}"
|
|
1194
1266
|
)
|
|
1195
1267
|
terminate_s8s_session(
|
|
1196
1268
|
DataprocSparkSession._project_id,
|
|
@@ -1198,6 +1270,11 @@ class DataprocSparkSession(SparkSession):
|
|
|
1198
1270
|
DataprocSparkSession._active_s8s_session_id,
|
|
1199
1271
|
self._client_options,
|
|
1200
1272
|
)
|
|
1273
|
+
else:
|
|
1274
|
+
# Client-side cleanup only
|
|
1275
|
+
logger.debug(
|
|
1276
|
+
f"Stopping session {DataprocSparkSession._active_s8s_session_id} without termination"
|
|
1277
|
+
)
|
|
1201
1278
|
|
|
1202
1279
|
self._remove_stopped_session_from_file()
|
|
1203
1280
|
|
|
File without changes
|
|
File without changes
|
{dataproc_spark_connect-1.0.0rc6.dist-info → dataproc_spark_connect-1.0.1.dist-info}/top_level.txt
RENAMED
|
File without changes
|