lsst-daf-butler 29.2025.4500__py3-none-any.whl → 29.2025.4700__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lsst/daf/butler/__init__.py +0 -3
- lsst/daf/butler/_butler.py +23 -2
- lsst/daf/butler/_dataset_provenance.py +44 -9
- lsst/daf/butler/delegates/arrowtable.py +3 -1
- lsst/daf/butler/dimensions/_schema.py +2 -24
- lsst/daf/butler/direct_butler/_direct_butler.py +19 -15
- lsst/daf/butler/formatters/logs.py +2 -2
- lsst/daf/butler/logging.py +289 -109
- lsst/daf/butler/queries/_expression_strings.py +3 -3
- lsst/daf/butler/{registry/queries → queries}/expressions/__init__.py +0 -2
- lsst/daf/butler/queries/expressions/categorize.py +59 -0
- lsst/daf/butler/registry/_registry.py +1 -2
- lsst/daf/butler/registry/_registry_base.py +1 -2
- lsst/daf/butler/registry/collections/nameKey.py +20 -3
- lsst/daf/butler/registry/collections/synthIntKey.py +20 -6
- lsst/daf/butler/registry/datasets/byDimensions/_dataset_type_cache.py +12 -52
- lsst/daf/butler/registry/datasets/byDimensions/_manager.py +97 -87
- lsst/daf/butler/registry/datasets/byDimensions/tables.py +17 -12
- lsst/daf/butler/registry/interfaces/_collections.py +45 -8
- lsst/daf/butler/registry/interfaces/_datasets.py +10 -8
- lsst/daf/butler/registry/interfaces/_obscore.py +0 -5
- lsst/daf/butler/registry/managers.py +0 -22
- lsst/daf/butler/registry/obscore/_manager.py +0 -9
- lsst/daf/butler/registry/queries/__init__.py +0 -5
- lsst/daf/butler/registry/queries/_query_datasets.py +2 -2
- lsst/daf/butler/registry/sql_registry.py +5 -1
- lsst/daf/butler/registry/tests/_database.py +2 -2
- lsst/daf/butler/registry/tests/_registry.py +110 -3
- lsst/daf/butler/remote_butler/_remote_butler.py +14 -1
- lsst/daf/butler/remote_butler/server/handlers/_external.py +11 -0
- lsst/daf/butler/remote_butler/server_models.py +8 -0
- lsst/daf/butler/tests/hybrid_butler.py +4 -1
- lsst/daf/butler/version.py +1 -1
- {lsst_daf_butler-29.2025.4500.dist-info → lsst_daf_butler-29.2025.4700.dist-info}/METADATA +1 -2
- {lsst_daf_butler-29.2025.4500.dist-info → lsst_daf_butler-29.2025.4700.dist-info}/RECORD +52 -65
- lsst/daf/butler/_column_categorization.py +0 -83
- lsst/daf/butler/_column_tags.py +0 -210
- lsst/daf/butler/_column_type_info.py +0 -180
- lsst/daf/butler/registry/queries/_query_backend.py +0 -529
- lsst/daf/butler/registry/queries/_query_context.py +0 -474
- lsst/daf/butler/registry/queries/_readers.py +0 -348
- lsst/daf/butler/registry/queries/_sql_query_backend.py +0 -184
- lsst/daf/butler/registry/queries/_sql_query_context.py +0 -555
- lsst/daf/butler/registry/queries/butler_sql_engine.py +0 -226
- lsst/daf/butler/registry/queries/expressions/_predicate.py +0 -538
- lsst/daf/butler/registry/queries/expressions/categorize.py +0 -339
- lsst/daf/butler/registry/queries/expressions/check.py +0 -540
- lsst/daf/butler/registry/queries/expressions/normalForm.py +0 -1186
- lsst/daf/butler/registry/queries/find_first_dataset.py +0 -102
- /lsst/daf/butler/{registry/queries → queries}/expressions/parser/__init__.py +0 -0
- /lsst/daf/butler/{registry/queries → queries}/expressions/parser/exprTree.py +0 -0
- /lsst/daf/butler/{registry/queries → queries}/expressions/parser/parser.py +0 -0
- /lsst/daf/butler/{registry/queries → queries}/expressions/parser/parserLex.py +0 -0
- /lsst/daf/butler/{registry/queries → queries}/expressions/parser/parserYacc.py +0 -0
- /lsst/daf/butler/{registry/queries → queries}/expressions/parser/ply/__init__.py +0 -0
- /lsst/daf/butler/{registry/queries → queries}/expressions/parser/ply/lex.py +0 -0
- /lsst/daf/butler/{registry/queries → queries}/expressions/parser/ply/yacc.py +0 -0
- /lsst/daf/butler/{registry/queries → queries}/expressions/parser/treeVisitor.py +0 -0
- {lsst_daf_butler-29.2025.4500.dist-info → lsst_daf_butler-29.2025.4700.dist-info}/WHEEL +0 -0
- {lsst_daf_butler-29.2025.4500.dist-info → lsst_daf_butler-29.2025.4700.dist-info}/entry_points.txt +0 -0
- {lsst_daf_butler-29.2025.4500.dist-info → lsst_daf_butler-29.2025.4700.dist-info}/licenses/COPYRIGHT +0 -0
- {lsst_daf_butler-29.2025.4500.dist-info → lsst_daf_butler-29.2025.4700.dist-info}/licenses/LICENSE +0 -0
- {lsst_daf_butler-29.2025.4500.dist-info → lsst_daf_butler-29.2025.4700.dist-info}/licenses/bsd_license.txt +0 -0
- {lsst_daf_butler-29.2025.4500.dist-info → lsst_daf_butler-29.2025.4700.dist-info}/licenses/gpl-v3.0.txt +0 -0
- {lsst_daf_butler-29.2025.4500.dist-info → lsst_daf_butler-29.2025.4700.dist-info}/top_level.txt +0 -0
- {lsst_daf_butler-29.2025.4500.dist-info → lsst_daf_butler-29.2025.4700.dist-info}/zip-safe +0 -0
lsst/daf/butler/__init__.py
CHANGED
|
@@ -41,9 +41,6 @@ from ._butler_config import *
|
|
|
41
41
|
from ._butler_metrics import *
|
|
42
42
|
from ._butler_repo_index import *
|
|
43
43
|
from ._collection_type import CollectionType
|
|
44
|
-
from ._column_categorization import *
|
|
45
|
-
from ._column_tags import *
|
|
46
|
-
from ._column_type_info import *
|
|
47
44
|
from ._config import *
|
|
48
45
|
from ._config_support import LookupKey
|
|
49
46
|
from ._dataset_association import *
|
lsst/daf/butler/_butler.py
CHANGED
|
@@ -1012,7 +1012,7 @@ class Butler(LimitedButler): # numpydoc ignore=PR02
|
|
|
1012
1012
|
@abstractmethod
|
|
1013
1013
|
def get_dataset(
|
|
1014
1014
|
self,
|
|
1015
|
-
id: DatasetId,
|
|
1015
|
+
id: DatasetId | str,
|
|
1016
1016
|
*,
|
|
1017
1017
|
storage_class: str | StorageClass | None = None,
|
|
1018
1018
|
dimension_records: bool = False,
|
|
@@ -1023,7 +1023,8 @@ class Butler(LimitedButler): # numpydoc ignore=PR02
|
|
|
1023
1023
|
Parameters
|
|
1024
1024
|
----------
|
|
1025
1025
|
id : `DatasetId`
|
|
1026
|
-
The unique identifier for the dataset
|
|
1026
|
+
The unique identifier for the dataset, as an instance of
|
|
1027
|
+
`uuid.UUID` or a string containing a hexadecimal number.
|
|
1027
1028
|
storage_class : `str` or `StorageClass` or `None`
|
|
1028
1029
|
A storage class to use when creating the returned entry. If given
|
|
1029
1030
|
it must be compatible with the default storage class.
|
|
@@ -1040,6 +1041,26 @@ class Butler(LimitedButler): # numpydoc ignore=PR02
|
|
|
1040
1041
|
"""
|
|
1041
1042
|
raise NotImplementedError()
|
|
1042
1043
|
|
|
1044
|
+
@abstractmethod
|
|
1045
|
+
def get_many_datasets(self, ids: Iterable[DatasetId | str]) -> list[DatasetRef]:
|
|
1046
|
+
"""Retrieve a list of dataset entries.
|
|
1047
|
+
|
|
1048
|
+
Parameters
|
|
1049
|
+
----------
|
|
1050
|
+
ids : `~collections.abc.Iterable` [ `DatasetId` or `str` ]
|
|
1051
|
+
The unique identifiers for the datasets, as instances of
|
|
1052
|
+
`uuid.UUID` or strings containing a hexadecimal number.
|
|
1053
|
+
|
|
1054
|
+
Returns
|
|
1055
|
+
-------
|
|
1056
|
+
refs : `list` [ `DatasetRef` ]
|
|
1057
|
+
A list containing a `DatasetRef` for each of the given dataset IDs.
|
|
1058
|
+
If a dataset was not found, no error is thrown -- it is just not
|
|
1059
|
+
included in the list. The returned datasets are in no particular
|
|
1060
|
+
order.
|
|
1061
|
+
"""
|
|
1062
|
+
raise NotImplementedError()
|
|
1063
|
+
|
|
1043
1064
|
@abstractmethod
|
|
1044
1065
|
def find_dataset(
|
|
1045
1066
|
self,
|
|
@@ -112,6 +112,8 @@ class DatasetProvenance(pydantic.BaseModel):
|
|
|
112
112
|
sep: str = ".",
|
|
113
113
|
simple_types: bool = False,
|
|
114
114
|
use_upper: bool | None = None,
|
|
115
|
+
max_inputs: int | None = None,
|
|
116
|
+
store_minimalist_inputs: bool = False,
|
|
115
117
|
) -> dict[str, _PROV_TYPES]:
|
|
116
118
|
"""Return provenance as a flattened dictionary.
|
|
117
119
|
|
|
@@ -137,6 +139,13 @@ class DatasetProvenance(pydantic.BaseModel):
|
|
|
137
139
|
character of the prefix (defined by whether `str.isupper()` returns
|
|
138
140
|
true, else they will be lower case). If `False` the case will be
|
|
139
141
|
lower case, and if `True` the case will be upper case.
|
|
142
|
+
max_inputs : `int` or `None`, optional
|
|
143
|
+
Maximum number of inputs to be recorded in provenance. `None`
|
|
144
|
+
results in all inputs being recorded. If the number of inputs
|
|
145
|
+
exceeds this value no input provenance will be recorded.
|
|
146
|
+
store_minimalist_inputs : `bool`, optional
|
|
147
|
+
If `True` only the ID of the input is stored along with explicit
|
|
148
|
+
extras. If `False` the run and dataset type are also recorded.
|
|
140
149
|
|
|
141
150
|
Returns
|
|
142
151
|
-------
|
|
@@ -155,7 +164,13 @@ class DatasetProvenance(pydantic.BaseModel):
|
|
|
155
164
|
|
|
156
165
|
Each input dataset will have the ``id``, ``run``, and ``datasettype``
|
|
157
166
|
keys as defined above (but no ``dataid`` key) with an ``input N``
|
|
158
|
-
prefix where ``N`` starts counting at 0.
|
|
167
|
+
prefix where ``N`` starts counting at 0. It is possible to drop
|
|
168
|
+
the ``datasettype`` and ``run`` to save space by using the
|
|
169
|
+
``store_minimalist_inputs`` flag.
|
|
170
|
+
|
|
171
|
+
If there are too many inputs (see the ``max_inputs`` parameters)
|
|
172
|
+
no inputs will be recorded. The number of inputs is always recorded
|
|
173
|
+
to indicate that the inputs were dropped.
|
|
159
174
|
|
|
160
175
|
The quantum ID, if present, will use key ``quantum``.
|
|
161
176
|
|
|
@@ -171,6 +186,7 @@ class DatasetProvenance(pydantic.BaseModel):
|
|
|
171
186
|
"lsst.butler.dataid.detector": 10,
|
|
172
187
|
"lsst.butler.dataid.instrument": "LSSTCam",
|
|
173
188
|
"lsst.butler.quantum": "d93a735b-08f0-477d-bc95-2cc32d6d898b",
|
|
189
|
+
"lsst.butler.n_inputs": 2,
|
|
174
190
|
"lsst.butler.input.0.id": "3dfd7ba5-5e35-4565-9d87-4b33880ed06c",
|
|
175
191
|
"lsst.butler.input.0.run": "other_run",
|
|
176
192
|
"lsst.butler.input.0.datasettype": "astropy_parquet",
|
|
@@ -206,12 +222,28 @@ class DatasetProvenance(pydantic.BaseModel):
|
|
|
206
222
|
if self.quantum_id is not None:
|
|
207
223
|
prov[_make_key("quantum")] = self.quantum_id if not simple_types else str(self.quantum_id)
|
|
208
224
|
|
|
209
|
-
|
|
225
|
+
# Record the number of inputs so that people can determine how many
|
|
226
|
+
# there were even if they were dropped because they exceeded the
|
|
227
|
+
# allowed maximum. Do not record the count if we have a null provenance
|
|
228
|
+
# state with no ref and no inputs.
|
|
229
|
+
if ref is not None or len(self.inputs) > 0:
|
|
230
|
+
prov[_make_key("n_inputs")] = len(self.inputs)
|
|
231
|
+
|
|
232
|
+
# Remove all inputs if the maximum is found. Truncating to the
|
|
233
|
+
# maximum (or auto switching to minimalist mode and increasing the
|
|
234
|
+
# maximum by 3) is not preferred.
|
|
235
|
+
inputs = (
|
|
236
|
+
self.inputs
|
|
237
|
+
if max_inputs is None or (max_inputs is not None and len(self.inputs) <= max_inputs)
|
|
238
|
+
else []
|
|
239
|
+
)
|
|
240
|
+
for i, input in enumerate(inputs):
|
|
210
241
|
prov[_make_key("input", i, "id")] = input.id if not simple_types else str(input.id)
|
|
211
|
-
if
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
242
|
+
if not store_minimalist_inputs:
|
|
243
|
+
if input.run is not None: # for mypy
|
|
244
|
+
prov[_make_key("input", i, "run")] = input.run
|
|
245
|
+
if input.datasetType is not None: # for mypy
|
|
246
|
+
prov[_make_key("input", i, "datasettype")] = input.datasetType.name
|
|
215
247
|
|
|
216
248
|
if input.id in self.extras:
|
|
217
249
|
for xk, xv in self.extras[input.id].items():
|
|
@@ -369,7 +401,9 @@ class DatasetProvenance(pydantic.BaseModel):
|
|
|
369
401
|
# Prefix will always include the separator if it is defined.
|
|
370
402
|
prefix += sep
|
|
371
403
|
|
|
372
|
-
core_provenance = tuple(
|
|
404
|
+
core_provenance = tuple(
|
|
405
|
+
f"{prefix}{k}".lower() for k in ("run", "id", "datasettype", "quantum", "n_inputs")
|
|
406
|
+
)
|
|
373
407
|
|
|
374
408
|
# Need to escape the prefix and separator for regex usage.
|
|
375
409
|
esc_sep = re.escape(sep)
|
|
@@ -445,7 +479,7 @@ class DatasetProvenance(pydantic.BaseModel):
|
|
|
445
479
|
|
|
446
480
|
quantum_id = None
|
|
447
481
|
ref_id = None
|
|
448
|
-
input_ids = {}
|
|
482
|
+
input_ids: dict[int, uuid.UUID] = {}
|
|
449
483
|
extras: dict[int, dict[str, Any]] = {}
|
|
450
484
|
|
|
451
485
|
for k, standard in prov_keys.items():
|
|
@@ -475,8 +509,9 @@ class DatasetProvenance(pydantic.BaseModel):
|
|
|
475
509
|
|
|
476
510
|
provenance = cls(quantum_id=quantum_id)
|
|
477
511
|
|
|
512
|
+
input_refs = {ref.id: ref for ref in butler.get_many_datasets(input_ids.values())}
|
|
478
513
|
for i in sorted(input_ids):
|
|
479
|
-
input_ref =
|
|
514
|
+
input_ref = input_refs.get(input_ids[i])
|
|
480
515
|
if input_ref is None:
|
|
481
516
|
raise ValueError(f"Input dataset ({input_ids[i]}) is not known to this butler.")
|
|
482
517
|
provenance.add_input(input_ref)
|
|
@@ -229,7 +229,9 @@ def _add_arrow_provenance(
|
|
|
229
229
|
type_string = _checkArrowCompatibleType(in_memory_dataset)
|
|
230
230
|
if type_string == "astropy":
|
|
231
231
|
provenance = provenance if provenance is not None else DatasetProvenance()
|
|
232
|
-
prov_dict = provenance.to_flat_dict(
|
|
232
|
+
prov_dict = provenance.to_flat_dict(
|
|
233
|
+
ref, prefix="LSST.BUTLER", sep=".", simple_types=True, max_inputs=2000
|
|
234
|
+
)
|
|
233
235
|
|
|
234
236
|
# Strip any previous provenance.
|
|
235
237
|
DatasetProvenance.strip_provenance_from_flat_dict(in_memory_dataset.meta)
|
|
@@ -29,20 +29,17 @@ from __future__ import annotations
|
|
|
29
29
|
__all__ = ("DimensionRecordSchema", "addDimensionForeignKey")
|
|
30
30
|
|
|
31
31
|
import copy
|
|
32
|
-
from collections.abc import
|
|
32
|
+
from collections.abc import Set
|
|
33
33
|
from typing import TYPE_CHECKING
|
|
34
34
|
|
|
35
|
-
from lsst.utils.classes import
|
|
35
|
+
from lsst.utils.classes import immutable
|
|
36
36
|
|
|
37
37
|
from .. import arrow_utils, ddl
|
|
38
|
-
from .._column_tags import DimensionKeyColumnTag, DimensionRecordColumnTag
|
|
39
38
|
from .._named import NamedValueAbstractSet, NamedValueSet
|
|
40
39
|
from ..column_spec import RegionColumnSpec, TimespanColumnSpec
|
|
41
40
|
from ..timespan_database_representation import TimespanDatabaseRepresentation
|
|
42
41
|
|
|
43
42
|
if TYPE_CHECKING: # Imports needed only for type annotations; may be circular.
|
|
44
|
-
from lsst.daf.relation import ColumnTag
|
|
45
|
-
|
|
46
43
|
from ._elements import Dimension, DimensionElement, KeyColumnSpec, MetadataColumnSpec
|
|
47
44
|
from ._group import DimensionGroup
|
|
48
45
|
|
|
@@ -387,25 +384,6 @@ class DimensionElementFields:
|
|
|
387
384
|
lines.append(" timespan: lsst.daf.butler.Timespan")
|
|
388
385
|
return "\n".join(lines)
|
|
389
386
|
|
|
390
|
-
@property
|
|
391
|
-
@cached_getter
|
|
392
|
-
def columns(self) -> Mapping[ColumnTag, str]:
|
|
393
|
-
"""A mapping from `ColumnTag` to field name for all fields in this
|
|
394
|
-
element's records (`~collections.abc.Mapping`).
|
|
395
|
-
"""
|
|
396
|
-
result: dict[ColumnTag, str] = {}
|
|
397
|
-
for dimension_name, field_name in zip(
|
|
398
|
-
self.element.dimensions.names, self.dimensions.names, strict=True
|
|
399
|
-
):
|
|
400
|
-
result[DimensionKeyColumnTag(dimension_name)] = field_name
|
|
401
|
-
for field_name in self.facts.names:
|
|
402
|
-
result[DimensionRecordColumnTag(self.element.name, field_name)] = field_name
|
|
403
|
-
if self.element.spatial:
|
|
404
|
-
result[DimensionRecordColumnTag(self.element.name, "region")] = "region"
|
|
405
|
-
if self.element.temporal:
|
|
406
|
-
result[DimensionRecordColumnTag(self.element.name, "timespan")] = "timespan"
|
|
407
|
-
return result
|
|
408
|
-
|
|
409
387
|
element: DimensionElement
|
|
410
388
|
"""The dimension element these fields correspond to.
|
|
411
389
|
|
|
@@ -1256,12 +1256,13 @@ class DirectButler(Butler): # numpydoc ignore=PR02
|
|
|
1256
1256
|
|
|
1257
1257
|
def get_dataset(
|
|
1258
1258
|
self,
|
|
1259
|
-
id: DatasetId,
|
|
1259
|
+
id: DatasetId | str,
|
|
1260
1260
|
*,
|
|
1261
1261
|
storage_class: str | StorageClass | None = None,
|
|
1262
1262
|
dimension_records: bool = False,
|
|
1263
1263
|
datastore_records: bool = False,
|
|
1264
1264
|
) -> DatasetRef | None:
|
|
1265
|
+
id = _to_uuid(id)
|
|
1265
1266
|
ref = self._registry.getDataset(id)
|
|
1266
1267
|
if ref is not None:
|
|
1267
1268
|
if dimension_records:
|
|
@@ -1274,6 +1275,10 @@ class DirectButler(Butler): # numpydoc ignore=PR02
|
|
|
1274
1275
|
ref = self._registry.get_datastore_records(ref)
|
|
1275
1276
|
return ref
|
|
1276
1277
|
|
|
1278
|
+
def get_many_datasets(self, ids: Iterable[DatasetId | str]) -> list[DatasetRef]:
|
|
1279
|
+
uuids = [_to_uuid(id) for id in ids]
|
|
1280
|
+
return self._registry._managers.datasets.get_dataset_refs(uuids)
|
|
1281
|
+
|
|
1277
1282
|
def find_dataset(
|
|
1278
1283
|
self,
|
|
1279
1284
|
dataset_type: DatasetType | str,
|
|
@@ -1411,20 +1416,12 @@ class DirectButler(Butler): # numpydoc ignore=PR02
|
|
|
1411
1416
|
# Docstring inherited.
|
|
1412
1417
|
existence = {ref: DatasetExistence.UNRECOGNIZED for ref in refs}
|
|
1413
1418
|
|
|
1414
|
-
#
|
|
1415
|
-
for ref in
|
|
1416
|
-
|
|
1417
|
-
|
|
1418
|
-
|
|
1419
|
-
|
|
1420
|
-
# When checking a single ref we raise, but it's impolite to
|
|
1421
|
-
# do that when potentially hundreds of refs are being checked.
|
|
1422
|
-
# We could change the API to only accept UUIDs and that would
|
|
1423
|
-
# remove the ability to even check and remove the worry
|
|
1424
|
-
# about differing storage classes. Given the ongoing discussion
|
|
1425
|
-
# on refs vs UUIDs and whether to raise or have a new
|
|
1426
|
-
# private flag, treat this as a private API for now.
|
|
1427
|
-
existence[ref] |= DatasetExistence.RECORDED
|
|
1419
|
+
# Check which refs exist in the registry.
|
|
1420
|
+
id_map = {ref.id: ref for ref in existence.keys()}
|
|
1421
|
+
for registry_ref in self.get_many_datasets(id_map.keys()):
|
|
1422
|
+
# Consistency between the given DatasetRef and the information
|
|
1423
|
+
# recorded in the registry is not verified.
|
|
1424
|
+
existence[id_map[registry_ref.id]] |= DatasetExistence.RECORDED
|
|
1428
1425
|
|
|
1429
1426
|
# Ask datastore if it knows about these refs.
|
|
1430
1427
|
knows = self._datastore.knows_these(refs)
|
|
@@ -2610,3 +2607,10 @@ class _ImportDatasetsInfo(NamedTuple):
|
|
|
2610
2607
|
|
|
2611
2608
|
grouped_refs: defaultdict[_RefGroup, list[DatasetRef]]
|
|
2612
2609
|
dimension_records: dict[DimensionElement, dict[DataCoordinate, DimensionRecord]]
|
|
2610
|
+
|
|
2611
|
+
|
|
2612
|
+
def _to_uuid(id: DatasetId | str) -> uuid.UUID:
|
|
2613
|
+
if isinstance(id, uuid.UUID):
|
|
2614
|
+
return id
|
|
2615
|
+
else:
|
|
2616
|
+
return uuid.UUID(id)
|
|
@@ -66,5 +66,5 @@ class ButlerLogRecordsFormatter(FormatterV2):
|
|
|
66
66
|
# over pre-downloading the whole file (which can be very large).
|
|
67
67
|
return self._get_read_pytype().from_file(path)
|
|
68
68
|
|
|
69
|
-
def to_bytes(self, in_memory_dataset:
|
|
70
|
-
return in_memory_dataset.
|
|
69
|
+
def to_bytes(self, in_memory_dataset: ButlerLogRecords) -> bytes:
|
|
70
|
+
return in_memory_dataset.to_json_data().encode()
|