salesforce-data-customcode 0.1.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. datacustomcode/__init__.py +20 -0
  2. datacustomcode/cli.py +215 -0
  3. datacustomcode/client.py +237 -0
  4. datacustomcode/cmd.py +105 -0
  5. datacustomcode/config.py +172 -0
  6. datacustomcode/config.yaml +19 -0
  7. datacustomcode/credentials.py +97 -0
  8. datacustomcode/deploy.py +468 -0
  9. datacustomcode/file/__init__.py +14 -0
  10. datacustomcode/file/base.py +19 -0
  11. datacustomcode/file/path/__init__.py +14 -0
  12. datacustomcode/file/path/default.py +171 -0
  13. datacustomcode/io/__init__.py +14 -0
  14. datacustomcode/io/base.py +28 -0
  15. datacustomcode/io/reader/__init__.py +14 -0
  16. datacustomcode/io/reader/base.py +34 -0
  17. datacustomcode/io/reader/query_api.py +172 -0
  18. datacustomcode/io/writer/__init__.py +14 -0
  19. datacustomcode/io/writer/base.py +49 -0
  20. datacustomcode/io/writer/csv.py +41 -0
  21. datacustomcode/io/writer/print.py +98 -0
  22. datacustomcode/mixin.py +94 -0
  23. datacustomcode/py.typed +0 -0
  24. datacustomcode/run.py +111 -0
  25. datacustomcode/scan.py +286 -0
  26. datacustomcode/spark/__init__.py +20 -0
  27. datacustomcode/spark/base.py +29 -0
  28. datacustomcode/spark/default.py +39 -0
  29. datacustomcode/template.py +36 -0
  30. datacustomcode/templates/.devcontainer/devcontainer.json +10 -0
  31. datacustomcode/templates/Dockerfile +18 -0
  32. datacustomcode/templates/Dockerfile.dependencies +11 -0
  33. datacustomcode/templates/README.md +0 -0
  34. datacustomcode/templates/account.ipynb +86 -0
  35. datacustomcode/templates/build_native_dependencies.sh +9 -0
  36. datacustomcode/templates/examples/employee_hierarchy/employee_data.csv +13 -0
  37. datacustomcode/templates/examples/employee_hierarchy/entrypoint.py +78 -0
  38. datacustomcode/templates/jupyterlab.sh +97 -0
  39. datacustomcode/templates/payload/config.json +1 -0
  40. datacustomcode/templates/payload/entrypoint.py +25 -0
  41. datacustomcode/templates/requirements-dev.txt +10 -0
  42. datacustomcode/templates/requirements.txt +1 -0
  43. datacustomcode/version.py +27 -0
  44. salesforce_data_customcode-0.1.15.dist-info/METADATA +340 -0
  45. salesforce_data_customcode-0.1.15.dist-info/RECORD +48 -0
  46. salesforce_data_customcode-0.1.15.dist-info/WHEEL +4 -0
  47. salesforce_data_customcode-0.1.15.dist-info/entry_points.txt +5 -0
  48. salesforce_data_customcode-0.1.15.dist-info/licenses/LICENSE.txt +206 -0
@@ -0,0 +1,20 @@
1
+ # Copyright (c) 2025, Salesforce, Inc.
2
+ # SPDX-License-Identifier: Apache-2
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from datacustomcode.client import Client
17
+ from datacustomcode.io.reader.query_api import QueryAPIDataCloudReader
18
+ from datacustomcode.io.writer.print import PrintDataCloudWriter
19
+
20
+ __all__ = ["Client", "QueryAPIDataCloudReader", "PrintDataCloudWriter"]
datacustomcode/cli.py ADDED
@@ -0,0 +1,215 @@
1
+ # Copyright (c) 2025, Salesforce, Inc.
2
+ # SPDX-License-Identifier: Apache-2
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ from importlib import metadata
16
+ import json
17
+ import os
18
+ import sys
19
+ from typing import List, Union
20
+
21
+ import click
22
+ from loguru import logger
23
+
24
+
25
+ @click.group()
26
+ @click.option("--debug", is_flag=True)
27
+ def cli(debug: bool):
28
+ logger.remove()
29
+ if debug:
30
+ logger.configure(handlers=[{"sink": sys.stderr, "level": "DEBUG"}])
31
+ else:
32
+ logger.configure(handlers=[{"sink": sys.stderr, "level": "INFO"}])
33
+
34
+
35
+ @cli.command()
36
+ def version():
37
+ """Display the current version of the package."""
38
+ print(__name__)
39
+ try:
40
+ version = metadata.version("salesforce-data-customcode")
41
+ click.echo(f"salesforce-data-customcode version: {version}")
42
+ except metadata.PackageNotFoundError:
43
+ click.echo("Version information not available")
44
+
45
+
46
+ @cli.command()
47
+ @click.option("--profile", default="default")
48
+ @click.option("--username", prompt=True)
49
+ @click.option("--password", prompt=True, hide_input=True)
50
+ @click.option("--client-id", prompt=True)
51
+ @click.option("--client-secret", prompt=True)
52
+ @click.option("--login-url", prompt=True)
53
+ def configure(
54
+ username: str,
55
+ password: str,
56
+ client_id: str,
57
+ client_secret: str,
58
+ login_url: str,
59
+ profile: str,
60
+ ) -> None:
61
+ from datacustomcode.credentials import Credentials
62
+
63
+ Credentials(
64
+ username=username,
65
+ password=password,
66
+ client_id=client_id,
67
+ client_secret=client_secret,
68
+ login_url=login_url,
69
+ ).update_ini(profile=profile)
70
+
71
+
72
+ @cli.command()
73
+ @click.argument("path", default="payload")
74
+ @click.option("--network", default="default")
75
+ def zip(path: str, network: str):
76
+ from datacustomcode.deploy import zip
77
+
78
+ logger.debug("Zipping project")
79
+ zip(path, network)
80
+
81
+
82
+ @cli.command()
83
+ @click.option("--path", default="payload")
84
+ @click.option("--name", required=True)
85
+ @click.option("--version", default="0.0.1")
86
+ @click.option("--description", default="Custom Data Transform Code")
87
+ @click.option("--profile", default="default")
88
+ @click.option("--network", default="default")
89
+ @click.option(
90
+ "--cpu-size",
91
+ default="CPU_2XL",
92
+ help="""CPU size for deployment. Available options:
93
+
94
+ \b
95
+ CPU_L - Large CPU instance
96
+ CPU_XL - X-Large CPU instance
97
+ CPU_2XL - 2X-Large CPU instance [DEFAULT]
98
+ CPU_4XL - 4X-Large CPU instance
99
+
100
+ Choose based on your workload requirements.""",
101
+ )
102
+ def deploy(
103
+ path: str,
104
+ name: str,
105
+ version: str,
106
+ description: str,
107
+ cpu_size: str,
108
+ profile: str,
109
+ network: str,
110
+ ):
111
+ from datacustomcode.credentials import Credentials
112
+ from datacustomcode.deploy import TransformationJobMetadata, deploy_full
113
+
114
+ logger.debug("Deploying project")
115
+
116
+ # Validate compute type
117
+ from datacustomcode.deploy import COMPUTE_TYPES
118
+
119
+ if cpu_size not in COMPUTE_TYPES.keys():
120
+ click.secho(
121
+ f"Error: Invalid CPU size '{cpu_size}'. "
122
+ f"Available options: {', '.join(COMPUTE_TYPES.keys())}",
123
+ fg="red",
124
+ )
125
+ raise click.Abort()
126
+
127
+ logger.debug(f"Deploying with CPU size: {cpu_size}")
128
+
129
+ metadata = TransformationJobMetadata(
130
+ name=name,
131
+ version=version,
132
+ description=description,
133
+ computeType=COMPUTE_TYPES[cpu_size],
134
+ )
135
+ try:
136
+ credentials = Credentials.from_available(profile=profile)
137
+ except ValueError as e:
138
+ click.secho(
139
+ f"Error: {e}",
140
+ fg="red",
141
+ )
142
+ raise click.Abort() from None
143
+ deploy_full(path, metadata, credentials, network)
144
+
145
+
146
+ @cli.command()
147
+ @click.argument("directory", default=".")
148
+ def init(directory: str):
149
+ from datacustomcode.scan import dc_config_json_from_file
150
+ from datacustomcode.template import copy_template
151
+
152
+ click.echo("Copying template to " + click.style(directory, fg="blue", bold=True))
153
+ copy_template(directory)
154
+ entrypoint_path = os.path.join(directory, "payload", "entrypoint.py")
155
+ config_location = os.path.join(os.path.dirname(entrypoint_path), "config.json")
156
+ config_json = dc_config_json_from_file(entrypoint_path)
157
+ with open(config_location, "w") as f:
158
+ json.dump(config_json, f, indent=2)
159
+
160
+ click.echo(
161
+ "Start developing by updating the code in "
162
+ + click.style(entrypoint_path, fg="blue", bold=True)
163
+ )
164
+ click.echo(
165
+ "You can run "
166
+ + click.style(f"datacustomcode scan {entrypoint_path}", fg="blue", bold=True)
167
+ + " to automatically update config.json when you make changes to your code"
168
+ )
169
+
170
+
171
+ @cli.command()
172
+ @click.argument("filename")
173
+ @click.option("--config")
174
+ @click.option("--dry-run", is_flag=True)
175
+ @click.option(
176
+ "--no-requirements", is_flag=True, help="Skip generating requirements.txt file"
177
+ )
178
+ def scan(filename: str, config: str, dry_run: bool, no_requirements: bool):
179
+ from datacustomcode.scan import dc_config_json_from_file, write_requirements_file
180
+
181
+ config_location = config or os.path.join(os.path.dirname(filename), "config.json")
182
+ click.echo(
183
+ "Dumping scan results to config file: "
184
+ + click.style(config_location, fg="blue", bold=True)
185
+ )
186
+ click.echo("Scanning " + click.style(filename, fg="blue", bold=True) + "...")
187
+ config_json = dc_config_json_from_file(filename)
188
+
189
+ click.secho(json.dumps(config_json, indent=2), fg="yellow")
190
+ if not dry_run:
191
+ with open(config_location, "w") as f:
192
+ json.dump(config_json, f, indent=2)
193
+
194
+ if not no_requirements:
195
+ requirements_path = write_requirements_file(filename)
196
+ click.echo(
197
+ "Generated requirements file: "
198
+ + click.style(requirements_path, fg="blue", bold=True)
199
+ )
200
+
201
+
202
+ @cli.command()
203
+ @click.argument("entrypoint")
204
+ @click.option("--config-file", default=None)
205
+ @click.option("--dependencies", default=[], multiple=True)
206
+ @click.option("--profile", default="default")
207
+ def run(
208
+ entrypoint: str,
209
+ config_file: Union[str, None],
210
+ dependencies: List[str],
211
+ profile: str,
212
+ ):
213
+ from datacustomcode.run import run_entrypoint
214
+
215
+ run_entrypoint(entrypoint, config_file, dependencies, profile)
@@ -0,0 +1,237 @@
1
+ # Copyright (c) 2025, Salesforce, Inc.
2
+ # SPDX-License-Identifier: Apache-2
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ from __future__ import annotations
16
+
17
+ from enum import Enum
18
+ from typing import (
19
+ TYPE_CHECKING,
20
+ ClassVar,
21
+ Optional,
22
+ )
23
+
24
+ from datacustomcode.config import config
25
+ from datacustomcode.file.path.default import DefaultFindFilePath
26
+ from datacustomcode.io.reader.base import BaseDataCloudReader
27
+ from datacustomcode.spark.default import DefaultSparkSessionProvider
28
+
29
+ if TYPE_CHECKING:
30
+ from pathlib import Path
31
+
32
+ from pyspark.sql import DataFrame as PySparkDataFrame
33
+
34
+ from datacustomcode.io.reader.base import BaseDataCloudReader
35
+ from datacustomcode.io.writer.base import BaseDataCloudWriter, WriteMode
36
+ from datacustomcode.spark.base import BaseSparkSessionProvider
37
+
38
+
39
+ class DataCloudObjectType(Enum):
40
+ DLO = "dlo"
41
+ DMO = "dmo"
42
+
43
+
44
+ class DataCloudAccessLayerException(Exception):
45
+ """Exception raised when mixing DMOs and DLOs is detected."""
46
+
47
+ def __init__(
48
+ self,
49
+ data_layer_history: dict[DataCloudObjectType, set[str]],
50
+ should_not_contain: DataCloudObjectType,
51
+ ) -> None:
52
+ self.data_layer_history = data_layer_history
53
+ self.should_not_contain = should_not_contain
54
+
55
+ def __str__(self) -> str:
56
+ msg = (
57
+ "Mixed use of DMOs and DLOs. "
58
+ "You can only read from DMOs to write to DMOs "
59
+ "and read from DLOs to write to DLOs. "
60
+ )
61
+ if self.should_not_contain is DataCloudObjectType.DLO:
62
+ msg += (
63
+ "You have read from the following DLOs: "
64
+ f"{self.data_layer_history[DataCloudObjectType.DLO]} "
65
+ f"and are attempting to write to DMO. "
66
+ )
67
+ else:
68
+ msg += (
69
+ "You have read from the following DMOs: "
70
+ f"{self.data_layer_history[DataCloudObjectType.DMO]} "
71
+ f"and are attempting to write to to a DLO. "
72
+ )
73
+ msg += "Restart to clear history."
74
+ return msg
75
+
76
+
77
+ class Client:
78
+ """Entrypoint for accessing DataCloud objects.
79
+
80
+ This is the object used to access Data Cloud DLOs and DMOs. Accessing DLOs/DMOs
81
+ are tracked and will throw an exception if they are mixed. In other words, you
82
+ can read from DLOs and write to DLOs, read from DMOs and write to DMOs, but you
83
+ cannot read from DLOs and write to DMOs or read from DMOs and write to DLOs.
84
+ Furthermore you cannot mix during merging tables. This class is a singleton to
85
+ prevent accidental mixing of DLOs and DMOs.
86
+
87
+ You can provide custom readers and writers to the client for advanced use
88
+ cases, but this is not recommended for testing as they may result in unexpected
89
+ behavior once deployed to Data Cloud. By default, the client intercepts all
90
+ read/write operations and mocks access to Data Cloud. For example, during
91
+ writing, we print to the console instead of writing to Data Cloud.
92
+
93
+ Args:
94
+ finder: Find a file path
95
+ reader: A custom reader to use for reading Data Cloud objects.
96
+ writer: A custom writer to use for writing Data Cloud objects.
97
+
98
+ Example:
99
+ >>> client = Client()
100
+ >>> file_path = client.find_file_path("data.csv")
101
+ >>> dlo = client.read_dlo("my_dlo")
102
+ >>> client.write_to_dmo("my_dmo", dlo)
103
+ """
104
+
105
+ _instance: ClassVar[Optional[Client]] = None
106
+ _reader: BaseDataCloudReader
107
+ _writer: BaseDataCloudWriter
108
+ _file: DefaultFindFilePath
109
+ _data_layer_history: dict[DataCloudObjectType, set[str]]
110
+
111
+ def __new__(
112
+ cls,
113
+ reader: Optional[BaseDataCloudReader] = None,
114
+ writer: Optional["BaseDataCloudWriter"] = None,
115
+ spark_provider: Optional["BaseSparkSessionProvider"] = None,
116
+ ) -> Client:
117
+ if cls._instance is None:
118
+ cls._instance = super().__new__(cls)
119
+
120
+ # Initialize Readers and Writers from config
121
+ # and/or provided reader and writer
122
+ if reader is None or writer is None:
123
+ # We need a spark because we will initialize readers and writers
124
+ if config.spark_config is None:
125
+ raise ValueError(
126
+ "Spark config is required when reader/writer is not provided"
127
+ )
128
+
129
+ provider: BaseSparkSessionProvider
130
+ if spark_provider is not None:
131
+ provider = spark_provider
132
+ elif config.spark_provider_config is not None:
133
+ provider = config.spark_provider_config.to_object()
134
+ else:
135
+ provider = DefaultSparkSessionProvider()
136
+
137
+ spark = provider.get_session(config.spark_config)
138
+
139
+ if config.reader_config is None and reader is None:
140
+ raise ValueError(
141
+ "Reader config is required when reader is not provided"
142
+ )
143
+ elif reader is None or (
144
+ config.reader_config is not None and config.reader_config.force
145
+ ):
146
+ reader_init = config.reader_config.to_object(spark) # type: ignore
147
+ else:
148
+ reader_init = reader
149
+ if config.writer_config is None and writer is None:
150
+ raise ValueError(
151
+ "Writer config is required when writer is not provided"
152
+ )
153
+ elif writer is None or (
154
+ config.writer_config is not None and config.writer_config.force
155
+ ):
156
+ writer_init = config.writer_config.to_object(spark) # type: ignore
157
+ else:
158
+ writer_init = writer
159
+ cls._instance._reader = reader_init
160
+ cls._instance._writer = writer_init
161
+ cls._instance._file = DefaultFindFilePath()
162
+ cls._instance._data_layer_history = {
163
+ DataCloudObjectType.DLO: set(),
164
+ DataCloudObjectType.DMO: set(),
165
+ }
166
+ elif (reader is not None or writer is not None) and cls._instance is not None:
167
+ raise ValueError("Cannot set reader or writer after client is initialized")
168
+ return cls._instance
169
+
170
+ def read_dlo(self, name: str) -> PySparkDataFrame:
171
+ """Read a DLO from Data Cloud.
172
+
173
+ Args:
174
+ name: The name of the DLO to read.
175
+
176
+ Returns:
177
+ A PySpark DataFrame containing the DLO data.
178
+ """
179
+ self._record_dlo_access(name)
180
+ return self._reader.read_dlo(name)
181
+
182
+ def read_dmo(self, name: str) -> PySparkDataFrame:
183
+ """Read a DMO from Data Cloud.
184
+
185
+ Args:
186
+ name: The name of the DMO to read.
187
+
188
+ Returns:
189
+ A PySpark DataFrame containing the DMO data.
190
+ """
191
+ self._record_dmo_access(name)
192
+ return self._reader.read_dmo(name)
193
+
194
+ def write_to_dlo(
195
+ self, name: str, dataframe: PySparkDataFrame, write_mode: WriteMode, **kwargs
196
+ ) -> None:
197
+ """Write a PySpark DataFrame to a DLO in Data Cloud.
198
+
199
+ Args:
200
+ name: The name of the DLO to write to.
201
+ dataframe: The PySpark DataFrame to write.
202
+ write_mode: The write mode to use for writing to the DLO.
203
+ """
204
+ self._validate_data_layer_history_does_not_contain(DataCloudObjectType.DMO)
205
+ return self._writer.write_to_dlo(name, dataframe, write_mode, **kwargs)
206
+
207
+ def write_to_dmo(
208
+ self, name: str, dataframe: PySparkDataFrame, write_mode: WriteMode, **kwargs
209
+ ) -> None:
210
+ """Write a PySpark DataFrame to a DMO in Data Cloud.
211
+
212
+ Args:
213
+ name: The name of the DMO to write to.
214
+ dataframe: The PySpark DataFrame to write.
215
+ write_mode: The write mode to use for writing to the DMO.
216
+ """
217
+ self._validate_data_layer_history_does_not_contain(DataCloudObjectType.DLO)
218
+ return self._writer.write_to_dmo(name, dataframe, write_mode, **kwargs)
219
+
220
+ def find_file_path(self, file_name: str) -> Path:
221
+ """Return a file path"""
222
+
223
+ return self._file.find_file_path(file_name)
224
+
225
+ def _validate_data_layer_history_does_not_contain(
226
+ self, data_cloud_object_type: DataCloudObjectType
227
+ ) -> None:
228
+ if len(self._data_layer_history[data_cloud_object_type]) > 0:
229
+ raise DataCloudAccessLayerException(
230
+ self._data_layer_history, data_cloud_object_type
231
+ )
232
+
233
+ def _record_dlo_access(self, name: str) -> None:
234
+ self._data_layer_history[DataCloudObjectType.DLO].add(name)
235
+
236
+ def _record_dmo_access(self, name: str) -> None:
237
+ self._data_layer_history[DataCloudObjectType.DMO].add(name)
datacustomcode/cmd.py ADDED
@@ -0,0 +1,105 @@
1
+ # Copyright (c) 2025, Salesforce, Inc.
2
+ # SPDX-License-Identifier: Apache-2
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ This module is shamelessly copied from conda to nicely wrap subprocess calls.
17
+ """
18
+
19
+ from __future__ import annotations
20
+
21
+ import contextlib
22
+ import subprocess
23
+ from typing import Any, Union
24
+
25
+
26
+ def _force_bytes(exc: Any) -> bytes:
27
+ with contextlib.suppress(TypeError):
28
+ return bytes(exc)
29
+ with contextlib.suppress(Exception):
30
+ return str(exc).encode()
31
+ return f"<unprintable {type(exc).__name__} object>".encode()
32
+
33
+
34
+ def _setdefault_kwargs(kwargs: dict[str, Any]) -> None:
35
+ for arg in ("stdin", "stdout", "stderr"):
36
+ kwargs.setdefault(arg, subprocess.PIPE)
37
+
38
+
39
+ def _oserror_to_output(e: OSError) -> tuple[int, bytes, None]:
40
+ return 1, _force_bytes(e).rstrip(b"\n") + b"\n", None
41
+
42
+
43
+ class CalledProcessError(RuntimeError):
44
+ """Nicely formatted subprocess call error."""
45
+
46
+ def __init__(
47
+ self,
48
+ returncode: int,
49
+ cmd: tuple[str, ...],
50
+ stdout: bytes,
51
+ stderr: Union[bytes, None],
52
+ ) -> None:
53
+ super().__init__(returncode, cmd, stdout, stderr)
54
+ self.returncode = returncode
55
+ self.cmd = cmd
56
+ self.stdout = stdout
57
+ self.stderr = stderr
58
+
59
+ def __bytes__(self) -> bytes:
60
+ def _indent_or_none(part: Union[bytes, None]) -> bytes:
61
+ if part:
62
+ return b"\n " + part.replace(b"\n", b"\n ").rstrip()
63
+ else:
64
+ return b" (none)"
65
+
66
+ return b"".join(
67
+ (
68
+ f"command: {self.cmd!r}\n".encode(),
69
+ f"return code: {self.returncode}\n".encode(),
70
+ b"stdout:",
71
+ self.stdout,
72
+ b"\n",
73
+ b"stderr:",
74
+ _indent_or_none(self.stderr),
75
+ )
76
+ )
77
+
78
+ def __str__(self) -> str:
79
+ return self.__bytes__().decode()
80
+
81
+
82
+ def _cmd_output(
83
+ *cmd: str,
84
+ check: bool = True,
85
+ **kwargs: Any,
86
+ ) -> tuple[int, bytes, Union[bytes, None]]:
87
+ _setdefault_kwargs(kwargs)
88
+ try:
89
+ kwargs.setdefault("shell", True)
90
+ proc = subprocess.Popen(cmd, **kwargs)
91
+ except OSError as e:
92
+ returncode, stdout_b, stderr_b = _oserror_to_output(e)
93
+ else:
94
+ stdout_b, stderr_b = proc.communicate()
95
+ returncode = proc.returncode
96
+ if check and returncode:
97
+ raise CalledProcessError(returncode, cmd, stdout_b, stderr_b)
98
+
99
+ return returncode, stdout_b, stderr_b
100
+
101
+
102
+ def cmd_output(*cmd: str, **kwargs: Any) -> Union[str, None]:
103
+ returncode, stdout_b, stderr_b = _cmd_output(*cmd, **kwargs)
104
+ stdout = stdout_b.decode() if stdout_b is not None else None
105
+ return stdout