streamlit-nightly 1.21.1.dev20230423__py2.py3-none-any.whl → 1.21.1.dev20230425__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- streamlit/__init__.py +0 -7
- streamlit/elements/arrow.py +7 -264
- streamlit/elements/data_editor.py +109 -100
- streamlit/elements/file_uploader.py +17 -0
- streamlit/elements/layouts.py +0 -5
- streamlit/elements/lib/column_config_utils.py +371 -0
- streamlit/elements/lib/pandas_styler_utils.py +275 -0
- streamlit/runtime/connection_factory.py +5 -5
- streamlit/static/asset-manifest.json +20 -20
- streamlit/static/index.html +1 -1
- streamlit/static/static/js/{14.a19a6cd8.chunk.js → 14.9399e424.chunk.js} +1 -1
- streamlit/static/static/js/{227.087adf66.chunk.js → 227.9ccac1d5.chunk.js} +1 -1
- streamlit/static/static/js/{242.0daf8b47.chunk.js → 242.1b3289e0.chunk.js} +1 -1
- streamlit/static/static/js/{279.fdac58fc.chunk.js → 279.35b01780.chunk.js} +1 -1
- streamlit/static/static/js/{289.481fd42d.chunk.js → 289.e6157e40.chunk.js} +1 -1
- streamlit/static/static/js/{467.242e14ff.chunk.js → 467.50ac84df.chunk.js} +1 -1
- streamlit/static/static/js/{491.d0b710e9.chunk.js → 491.5a33a8ce.chunk.js} +1 -1
- streamlit/static/static/js/503.15864587.chunk.js +1 -0
- streamlit/static/static/js/{511.9f04ae9e.chunk.js → 511.e6ca580f.chunk.js} +1 -1
- streamlit/static/static/js/{578.ceaadcd5.chunk.js → 578.a65fcea0.chunk.js} +1 -1
- streamlit/static/static/js/{619.365611c8.chunk.js → 619.0325af0e.chunk.js} +1 -1
- streamlit/static/static/js/{628.7f41e2de.chunk.js → 628.9c70196b.chunk.js} +1 -1
- streamlit/static/static/js/{681.a2ba76c7.chunk.js → 681.9e30a8cd.chunk.js} +1 -1
- streamlit/static/static/js/{745.e2bcf16d.chunk.js → 745.e75ba963.chunk.js} +1 -1
- streamlit/static/static/js/{807.6789990f.chunk.js → 807.122f8b05.chunk.js} +1 -1
- streamlit/static/static/js/{828.096c1ad3.chunk.js → 828.0fde3da8.chunk.js} +1 -1
- streamlit/static/static/js/{871.ba625aee.chunk.js → 871.90a7dbae.chunk.js} +1 -1
- streamlit/static/static/js/{main.5e4731c6.js → main.ff35bd72.js} +2 -2
- streamlit/testing/element_tree.py +426 -548
- streamlit/type_util.py +19 -7
- {streamlit_nightly-1.21.1.dev20230423.dist-info → streamlit_nightly-1.21.1.dev20230425.dist-info}/METADATA +1 -1
- {streamlit_nightly-1.21.1.dev20230423.dist-info → streamlit_nightly-1.21.1.dev20230425.dist-info}/RECORD +38 -37
- streamlit/elements/show.py +0 -105
- streamlit/static/static/js/728.82770810.chunk.js +0 -1
- /streamlit/static/static/css/{728.23fa976d.chunk.css → 503.23fa976d.chunk.css} +0 -0
- /streamlit/static/static/js/{main.5e4731c6.js.LICENSE.txt → main.ff35bd72.js.LICENSE.txt} +0 -0
- {streamlit_nightly-1.21.1.dev20230423.data → streamlit_nightly-1.21.1.dev20230425.data}/scripts/streamlit.cmd +0 -0
- {streamlit_nightly-1.21.1.dev20230423.dist-info → streamlit_nightly-1.21.1.dev20230425.dist-info}/WHEEL +0 -0
- {streamlit_nightly-1.21.1.dev20230423.dist-info → streamlit_nightly-1.21.1.dev20230425.dist-info}/entry_points.txt +0 -0
- {streamlit_nightly-1.21.1.dev20230423.dist-info → streamlit_nightly-1.21.1.dev20230425.dist-info}/top_level.txt +0 -0
streamlit/__init__.py
CHANGED
@@ -71,7 +71,6 @@ from streamlit.commands.query_params import (
|
|
71
71
|
get_query_params as _get_query_params,
|
72
72
|
set_query_params as _set_query_params,
|
73
73
|
)
|
74
|
-
from streamlit.elements.show import show as _show
|
75
74
|
|
76
75
|
# Modules that the user should have access to. These are imported with "as"
|
77
76
|
# syntax pass mypy checking with implicit_reexport disabled.
|
@@ -201,18 +200,12 @@ session_state = _SessionStateProxy()
|
|
201
200
|
cache_data = _cache_data
|
202
201
|
cache_resource = _cache_resource
|
203
202
|
|
204
|
-
# Beta APIs
|
205
|
-
beta_container = _gather_metrics("beta_container", _main.beta_container)
|
206
|
-
beta_expander = _gather_metrics("beta_expander", _main.beta_expander)
|
207
|
-
beta_columns = _gather_metrics("beta_columns", _main.beta_columns)
|
208
|
-
|
209
203
|
# Experimental APIs
|
210
204
|
experimental_user = _UserInfoProxy()
|
211
205
|
experimental_singleton = _experimental_singleton
|
212
206
|
experimental_memo = _experimental_memo
|
213
207
|
experimental_get_query_params = _get_query_params
|
214
208
|
experimental_set_query_params = _set_query_params
|
215
|
-
experimental_show = _show
|
216
209
|
experimental_rerun = _rerun
|
217
210
|
experimental_data_editor = _main.experimental_data_editor
|
218
211
|
experimental_connection = _connection_factory
|
streamlit/elements/arrow.py
CHANGED
@@ -13,31 +13,25 @@
|
|
13
13
|
# limitations under the License.
|
14
14
|
|
15
15
|
from collections.abc import Iterable
|
16
|
-
from typing import
|
17
|
-
TYPE_CHECKING,
|
18
|
-
Any,
|
19
|
-
Dict,
|
20
|
-
List,
|
21
|
-
Mapping,
|
22
|
-
Optional,
|
23
|
-
TypeVar,
|
24
|
-
Union,
|
25
|
-
cast,
|
26
|
-
)
|
16
|
+
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union, cast
|
27
17
|
|
28
18
|
import pyarrow as pa
|
29
19
|
from numpy import ndarray
|
30
20
|
from pandas import DataFrame
|
31
|
-
from pandas.io.formats.style import Styler
|
32
21
|
|
33
22
|
from streamlit import type_util
|
23
|
+
from streamlit.elements.lib.pandas_styler_utils import marshall_styler
|
34
24
|
from streamlit.proto.Arrow_pb2 import Arrow as ArrowProto
|
35
25
|
from streamlit.runtime.metrics_util import gather_metrics
|
36
26
|
|
37
27
|
if TYPE_CHECKING:
|
28
|
+
from pandas.io.formats.style import Styler
|
29
|
+
|
38
30
|
from streamlit.delta_generator import DeltaGenerator
|
39
31
|
|
40
|
-
Data = Union[
|
32
|
+
Data = Union[
|
33
|
+
DataFrame, "Styler", pa.Table, ndarray, Iterable, Dict[str, List[Any]], None
|
34
|
+
]
|
41
35
|
|
42
36
|
|
43
37
|
class ArrowMixin:
|
@@ -195,254 +189,3 @@ def marshall(proto: ArrowProto, data: Data, default_uuid: Optional[str] = None)
|
|
195
189
|
else:
|
196
190
|
df = type_util.convert_anything_to_df(data)
|
197
191
|
proto.data = type_util.data_frame_to_bytes(df)
|
198
|
-
|
199
|
-
|
200
|
-
def marshall_styler(proto: ArrowProto, styler: Styler, default_uuid: str) -> None:
|
201
|
-
"""Marshall pandas.Styler into an Arrow proto.
|
202
|
-
|
203
|
-
Parameters
|
204
|
-
----------
|
205
|
-
proto : proto.Arrow
|
206
|
-
Output. The protobuf for Streamlit Arrow proto.
|
207
|
-
|
208
|
-
styler : pandas.Styler
|
209
|
-
Helps style a DataFrame or Series according to the data with HTML and CSS.
|
210
|
-
|
211
|
-
default_uuid : str
|
212
|
-
If pandas.Styler uuid is not provided, this value will be used.
|
213
|
-
|
214
|
-
"""
|
215
|
-
# pandas.Styler uuid should be set before _compute is called.
|
216
|
-
_marshall_uuid(proto, styler, default_uuid)
|
217
|
-
|
218
|
-
# We're using protected members of pandas.Styler to get styles,
|
219
|
-
# which is not ideal and could break if the interface changes.
|
220
|
-
styler._compute()
|
221
|
-
|
222
|
-
# In Pandas 1.3.0, styler._translate() signature was changed.
|
223
|
-
# 2 arguments were added: sparse_index and sparse_columns.
|
224
|
-
# The functionality that they provide is not yet supported.
|
225
|
-
if type_util.is_pandas_version_less_than("1.3.0"):
|
226
|
-
pandas_styles = styler._translate()
|
227
|
-
else:
|
228
|
-
pandas_styles = styler._translate(False, False)
|
229
|
-
|
230
|
-
_marshall_caption(proto, styler)
|
231
|
-
_marshall_styles(proto, styler, pandas_styles)
|
232
|
-
_marshall_display_values(proto, styler.data, pandas_styles)
|
233
|
-
|
234
|
-
|
235
|
-
def _marshall_uuid(proto: ArrowProto, styler: Styler, default_uuid: str) -> None:
|
236
|
-
"""Marshall pandas.Styler uuid into an Arrow proto.
|
237
|
-
|
238
|
-
Parameters
|
239
|
-
----------
|
240
|
-
proto : proto.Arrow
|
241
|
-
Output. The protobuf for Streamlit Arrow proto.
|
242
|
-
|
243
|
-
styler : pandas.Styler
|
244
|
-
Helps style a DataFrame or Series according to the data with HTML and CSS.
|
245
|
-
|
246
|
-
default_uuid : str
|
247
|
-
If pandas.Styler uuid is not provided, this value will be used.
|
248
|
-
|
249
|
-
"""
|
250
|
-
if styler.uuid is None:
|
251
|
-
styler.set_uuid(default_uuid)
|
252
|
-
|
253
|
-
proto.styler.uuid = str(styler.uuid)
|
254
|
-
|
255
|
-
|
256
|
-
def _marshall_caption(proto: ArrowProto, styler: Styler) -> None:
|
257
|
-
"""Marshall pandas.Styler caption into an Arrow proto.
|
258
|
-
|
259
|
-
Parameters
|
260
|
-
----------
|
261
|
-
proto : proto.Arrow
|
262
|
-
Output. The protobuf for Streamlit Arrow proto.
|
263
|
-
|
264
|
-
styler : pandas.Styler
|
265
|
-
Helps style a DataFrame or Series according to the data with HTML and CSS.
|
266
|
-
|
267
|
-
"""
|
268
|
-
if styler.caption is not None:
|
269
|
-
proto.styler.caption = styler.caption
|
270
|
-
|
271
|
-
|
272
|
-
def _marshall_styles(
|
273
|
-
proto: ArrowProto, styler: Styler, styles: Mapping[str, Any]
|
274
|
-
) -> None:
|
275
|
-
"""Marshall pandas.Styler styles into an Arrow proto.
|
276
|
-
|
277
|
-
Parameters
|
278
|
-
----------
|
279
|
-
proto : proto.Arrow
|
280
|
-
Output. The protobuf for Streamlit Arrow proto.
|
281
|
-
|
282
|
-
styler : pandas.Styler
|
283
|
-
Helps style a DataFrame or Series according to the data with HTML and CSS.
|
284
|
-
|
285
|
-
styles : dict
|
286
|
-
pandas.Styler translated styles.
|
287
|
-
|
288
|
-
"""
|
289
|
-
css_rules = []
|
290
|
-
|
291
|
-
if "table_styles" in styles:
|
292
|
-
table_styles = styles["table_styles"]
|
293
|
-
table_styles = _trim_pandas_styles(table_styles)
|
294
|
-
for style in table_styles:
|
295
|
-
# styles in "table_styles" have a space
|
296
|
-
# between the uuid and selector.
|
297
|
-
rule = _pandas_style_to_css(
|
298
|
-
"table_styles", style, styler.uuid, separator=" "
|
299
|
-
)
|
300
|
-
css_rules.append(rule)
|
301
|
-
|
302
|
-
if "cellstyle" in styles:
|
303
|
-
cellstyle = styles["cellstyle"]
|
304
|
-
cellstyle = _trim_pandas_styles(cellstyle)
|
305
|
-
for style in cellstyle:
|
306
|
-
rule = _pandas_style_to_css("cell_style", style, styler.uuid)
|
307
|
-
css_rules.append(rule)
|
308
|
-
|
309
|
-
if len(css_rules) > 0:
|
310
|
-
proto.styler.styles = "\n".join(css_rules)
|
311
|
-
|
312
|
-
|
313
|
-
M = TypeVar("M", bound=Mapping[str, Any])
|
314
|
-
|
315
|
-
|
316
|
-
def _trim_pandas_styles(styles: List[M]) -> List[M]:
|
317
|
-
"""Filter out empty styles.
|
318
|
-
|
319
|
-
Every cell will have a class, but the list of props
|
320
|
-
may just be [['', '']].
|
321
|
-
|
322
|
-
Parameters
|
323
|
-
----------
|
324
|
-
styles : list
|
325
|
-
pandas.Styler translated styles.
|
326
|
-
|
327
|
-
"""
|
328
|
-
return [x for x in styles if any(any(y) for y in x["props"])]
|
329
|
-
|
330
|
-
|
331
|
-
def _pandas_style_to_css(
|
332
|
-
style_type: str,
|
333
|
-
style: Mapping[str, Any],
|
334
|
-
uuid: str,
|
335
|
-
separator: str = "",
|
336
|
-
) -> str:
|
337
|
-
"""Convert pandas.Styler translated style to CSS.
|
338
|
-
|
339
|
-
Parameters
|
340
|
-
----------
|
341
|
-
style_type : str
|
342
|
-
Either "table_styles" or "cell_style".
|
343
|
-
|
344
|
-
style : dict
|
345
|
-
pandas.Styler translated style.
|
346
|
-
|
347
|
-
uuid : str
|
348
|
-
pandas.Styler uuid.
|
349
|
-
|
350
|
-
separator : str
|
351
|
-
A string separator used between table and cell selectors.
|
352
|
-
|
353
|
-
"""
|
354
|
-
declarations = []
|
355
|
-
for css_property, css_value in style["props"]:
|
356
|
-
declaration = css_property.strip() + ": " + css_value.strip()
|
357
|
-
declarations.append(declaration)
|
358
|
-
|
359
|
-
table_selector = f"#T_{uuid}"
|
360
|
-
|
361
|
-
# In pandas < 1.1.0
|
362
|
-
# translated_style["cellstyle"] has the following shape:
|
363
|
-
# [
|
364
|
-
# {
|
365
|
-
# "props": [["color", " black"], ["background-color", "orange"], ["", ""]],
|
366
|
-
# "selector": "row0_col0"
|
367
|
-
# }
|
368
|
-
# ...
|
369
|
-
# ]
|
370
|
-
#
|
371
|
-
# In pandas >= 1.1.0
|
372
|
-
# translated_style["cellstyle"] has the following shape:
|
373
|
-
# [
|
374
|
-
# {
|
375
|
-
# "props": [("color", " black"), ("background-color", "orange"), ("", "")],
|
376
|
-
# "selectors": ["row0_col0"]
|
377
|
-
# }
|
378
|
-
# ...
|
379
|
-
# ]
|
380
|
-
if style_type == "table_styles" or (
|
381
|
-
style_type == "cell_style" and type_util.is_pandas_version_less_than("1.1.0")
|
382
|
-
):
|
383
|
-
cell_selectors = [style["selector"]]
|
384
|
-
else:
|
385
|
-
cell_selectors = style["selectors"]
|
386
|
-
|
387
|
-
selectors = []
|
388
|
-
for cell_selector in cell_selectors:
|
389
|
-
selectors.append(table_selector + separator + cell_selector)
|
390
|
-
selector = ", ".join(selectors)
|
391
|
-
|
392
|
-
declaration_block = "; ".join(declarations)
|
393
|
-
rule_set = selector + " { " + declaration_block + " }"
|
394
|
-
|
395
|
-
return rule_set
|
396
|
-
|
397
|
-
|
398
|
-
def _marshall_display_values(
|
399
|
-
proto: ArrowProto, df: DataFrame, styles: Mapping[str, Any]
|
400
|
-
) -> None:
|
401
|
-
"""Marshall pandas.Styler display values into an Arrow proto.
|
402
|
-
|
403
|
-
Parameters
|
404
|
-
----------
|
405
|
-
proto : proto.Arrow
|
406
|
-
Output. The protobuf for Streamlit Arrow proto.
|
407
|
-
|
408
|
-
df : pandas.DataFrame
|
409
|
-
A dataframe with original values.
|
410
|
-
|
411
|
-
styles : dict
|
412
|
-
pandas.Styler translated styles.
|
413
|
-
|
414
|
-
"""
|
415
|
-
new_df = _use_display_values(df, styles)
|
416
|
-
proto.styler.display_values = type_util.data_frame_to_bytes(new_df)
|
417
|
-
|
418
|
-
|
419
|
-
def _use_display_values(df: DataFrame, styles: Mapping[str, Any]) -> DataFrame:
|
420
|
-
"""Create a new pandas.DataFrame where display values are used instead of original ones.
|
421
|
-
|
422
|
-
Parameters
|
423
|
-
----------
|
424
|
-
df : pandas.DataFrame
|
425
|
-
A dataframe with original values.
|
426
|
-
|
427
|
-
styles : dict
|
428
|
-
pandas.Styler translated styles.
|
429
|
-
|
430
|
-
"""
|
431
|
-
import re
|
432
|
-
|
433
|
-
# If values in a column are not of the same type, Arrow
|
434
|
-
# serialization would fail. Thus, we need to cast all values
|
435
|
-
# of the dataframe to strings before assigning them display values.
|
436
|
-
new_df = df.astype(str)
|
437
|
-
|
438
|
-
cell_selector_regex = re.compile(r"row(\d+)_col(\d+)")
|
439
|
-
if "body" in styles:
|
440
|
-
rows = styles["body"]
|
441
|
-
for row in rows:
|
442
|
-
for cell in row:
|
443
|
-
match = cell_selector_regex.match(cell["id"])
|
444
|
-
if match:
|
445
|
-
r, c = map(int, match.groups())
|
446
|
-
new_df.iat[r, c] = str(cell["display_value"])
|
447
|
-
|
448
|
-
return new_df
|
@@ -14,7 +14,6 @@
|
|
14
14
|
|
15
15
|
from __future__ import annotations
|
16
16
|
|
17
|
-
import contextlib
|
18
17
|
import json
|
19
18
|
from dataclasses import dataclass
|
20
19
|
from typing import (
|
@@ -34,13 +33,20 @@ from typing import (
|
|
34
33
|
|
35
34
|
import pandas as pd
|
36
35
|
import pyarrow as pa
|
37
|
-
from
|
38
|
-
from pandas.io.formats.style import Styler
|
39
|
-
from typing_extensions import Final, Literal, TypeAlias, TypedDict
|
36
|
+
from typing_extensions import Literal, TypeAlias, TypedDict
|
40
37
|
|
38
|
+
from streamlit import logger as _logger
|
41
39
|
from streamlit import type_util
|
42
|
-
from streamlit.elements.arrow import marshall_styler
|
43
40
|
from streamlit.elements.form import current_form_id
|
41
|
+
from streamlit.elements.lib.column_config_utils import (
|
42
|
+
INDEX_IDENTIFIER,
|
43
|
+
ColumnConfigMapping,
|
44
|
+
ColumnDataKind,
|
45
|
+
DataframeSchema,
|
46
|
+
determine_dataframe_schema,
|
47
|
+
marshall_column_config,
|
48
|
+
)
|
49
|
+
from streamlit.elements.lib.pandas_styler_utils import marshall_styler
|
44
50
|
from streamlit.errors import StreamlitAPIException
|
45
51
|
from streamlit.proto.Arrow_pb2 import Arrow as ArrowProto
|
46
52
|
from streamlit.runtime.metrics_util import gather_metrics
|
@@ -55,10 +61,11 @@ from streamlit.type_util import DataFormat, DataFrameGenericAlias, Key, is_type,
|
|
55
61
|
|
56
62
|
if TYPE_CHECKING:
|
57
63
|
import numpy as np
|
64
|
+
from pandas.io.formats.style import Styler
|
58
65
|
|
59
66
|
from streamlit.delta_generator import DeltaGenerator
|
60
67
|
|
61
|
-
|
68
|
+
_LOGGER = _logger.get_logger("root")
|
62
69
|
|
63
70
|
# All formats that support direct editing, meaning that these
|
64
71
|
# formats will be returned with the same type when used with data_editor.
|
@@ -81,7 +88,7 @@ EditableData = TypeVar(
|
|
81
88
|
DataTypes: TypeAlias = Union[
|
82
89
|
pd.DataFrame,
|
83
90
|
pd.Index,
|
84
|
-
Styler,
|
91
|
+
"Styler",
|
85
92
|
pa.Table,
|
86
93
|
"np.ndarray[Any, np.dtype[np.float64]]",
|
87
94
|
Tuple[Any],
|
@@ -91,25 +98,6 @@ DataTypes: TypeAlias = Union[
|
|
91
98
|
]
|
92
99
|
|
93
100
|
|
94
|
-
class ColumnConfig(TypedDict, total=False):
|
95
|
-
width: Optional[int]
|
96
|
-
title: Optional[str]
|
97
|
-
type: Optional[
|
98
|
-
Literal[
|
99
|
-
"text",
|
100
|
-
"number",
|
101
|
-
"boolean",
|
102
|
-
"list",
|
103
|
-
"categorical",
|
104
|
-
]
|
105
|
-
]
|
106
|
-
hidden: Optional[bool]
|
107
|
-
editable: Optional[bool]
|
108
|
-
alignment: Optional[Literal["left", "center", "right"]]
|
109
|
-
metadata: Optional[Dict[str, Any]]
|
110
|
-
column: Optional[Union[str, int]]
|
111
|
-
|
112
|
-
|
113
101
|
class EditingState(TypedDict, total=False):
|
114
102
|
"""
|
115
103
|
A dictionary representing the current state of the data editor.
|
@@ -133,44 +121,6 @@ class EditingState(TypedDict, total=False):
|
|
133
121
|
deleted_rows: List[int]
|
134
122
|
|
135
123
|
|
136
|
-
# A mapping of column names/IDs to column configs.
|
137
|
-
ColumnConfigMapping: TypeAlias = Dict[Union[int, str], ColumnConfig]
|
138
|
-
|
139
|
-
|
140
|
-
def _marshall_column_config(
|
141
|
-
proto: ArrowProto, columns: Optional[Dict[Union[int, str], ColumnConfig]] = None
|
142
|
-
) -> None:
|
143
|
-
"""Marshall the column config into the proto.
|
144
|
-
|
145
|
-
Parameters
|
146
|
-
----------
|
147
|
-
proto : ArrowProto
|
148
|
-
The proto to marshall into.
|
149
|
-
|
150
|
-
columns : Optional[ColumnConfigMapping]
|
151
|
-
The column config to marshall.
|
152
|
-
"""
|
153
|
-
if columns is None:
|
154
|
-
columns = {}
|
155
|
-
|
156
|
-
# Ignore all None values and prefix columns specified by index
|
157
|
-
def remove_none_values(input_dict: Dict[Any, Any]) -> Dict[Any, Any]:
|
158
|
-
new_dict = {}
|
159
|
-
for key, val in input_dict.items():
|
160
|
-
if isinstance(val, dict):
|
161
|
-
val = remove_none_values(val)
|
162
|
-
if val is not None:
|
163
|
-
new_dict[key] = val
|
164
|
-
return new_dict
|
165
|
-
|
166
|
-
proto.columns = json.dumps(
|
167
|
-
{
|
168
|
-
(f"col:{str(k)}" if isinstance(k, int) else k): v
|
169
|
-
for (k, v) in remove_none_values(columns).items()
|
170
|
-
}
|
171
|
-
)
|
172
|
-
|
173
|
-
|
174
124
|
@dataclass
|
175
125
|
class DataEditorSerde:
|
176
126
|
"""DataEditorSerde is used to serialize and deserialize the data editor state."""
|
@@ -190,7 +140,10 @@ class DataEditorSerde:
|
|
190
140
|
return json.dumps(editing_state, default=str)
|
191
141
|
|
192
142
|
|
193
|
-
def _parse_value(
|
143
|
+
def _parse_value(
|
144
|
+
value: str | int | float | bool | None,
|
145
|
+
column_data_kind: ColumnDataKind,
|
146
|
+
) -> Any:
|
194
147
|
"""Convert a value to the correct type.
|
195
148
|
|
196
149
|
Parameters
|
@@ -198,8 +151,9 @@ def _parse_value(value: Union[str, int, float, bool, None], dtype) -> Any:
|
|
198
151
|
value : str | int | float | bool | None
|
199
152
|
The value to convert.
|
200
153
|
|
201
|
-
|
202
|
-
The
|
154
|
+
column_data_kind : ColumnDataKind
|
155
|
+
The determined data kind of the column. The column data kind refers to the
|
156
|
+
shared data type of the values in the column (e.g. integer, float, string).
|
203
157
|
|
204
158
|
Returns
|
205
159
|
-------
|
@@ -208,23 +162,53 @@ def _parse_value(value: Union[str, int, float, bool, None], dtype) -> Any:
|
|
208
162
|
if value is None:
|
209
163
|
return None
|
210
164
|
|
211
|
-
|
165
|
+
try:
|
166
|
+
if column_data_kind == ColumnDataKind.STRING:
|
167
|
+
return str(value)
|
212
168
|
|
213
|
-
|
214
|
-
# The value is expected to be a ISO 8601 string
|
215
|
-
if is_datetime64_any_dtype(dtype):
|
216
|
-
return pd.to_datetime(value, errors="ignore")
|
217
|
-
elif is_integer_dtype(dtype):
|
218
|
-
with contextlib.suppress(ValueError):
|
169
|
+
if column_data_kind == ColumnDataKind.INTEGER:
|
219
170
|
return int(value)
|
220
|
-
|
221
|
-
|
171
|
+
|
172
|
+
if column_data_kind == ColumnDataKind.FLOAT:
|
222
173
|
return float(value)
|
174
|
+
|
175
|
+
if column_data_kind == ColumnDataKind.BOOLEAN:
|
176
|
+
return bool(value)
|
177
|
+
|
178
|
+
if column_data_kind in [
|
179
|
+
ColumnDataKind.DATETIME,
|
180
|
+
ColumnDataKind.DATE,
|
181
|
+
ColumnDataKind.TIME,
|
182
|
+
]:
|
183
|
+
datetime_value = pd.to_datetime(value, utc=False)
|
184
|
+
|
185
|
+
if datetime_value is pd.NaT:
|
186
|
+
return None
|
187
|
+
|
188
|
+
if isinstance(datetime_value, pd.Timestamp):
|
189
|
+
datetime_value = datetime_value.to_pydatetime()
|
190
|
+
|
191
|
+
if column_data_kind == ColumnDataKind.DATETIME:
|
192
|
+
return datetime_value
|
193
|
+
|
194
|
+
if column_data_kind == ColumnDataKind.DATE:
|
195
|
+
return datetime_value.date()
|
196
|
+
|
197
|
+
if column_data_kind == ColumnDataKind.TIME:
|
198
|
+
return datetime_value.time()
|
199
|
+
|
200
|
+
except (ValueError, pd.errors.ParserError) as ex:
|
201
|
+
_LOGGER.warning(
|
202
|
+
"Failed to parse value %s as %s. Exception: %s", value, column_data_kind, ex
|
203
|
+
)
|
204
|
+
return None
|
223
205
|
return value
|
224
206
|
|
225
207
|
|
226
208
|
def _apply_cell_edits(
|
227
|
-
df: pd.DataFrame,
|
209
|
+
df: pd.DataFrame,
|
210
|
+
edited_cells: Mapping[str, str | int | float | bool | None],
|
211
|
+
dataframe_schema: DataframeSchema,
|
228
212
|
) -> None:
|
229
213
|
"""Apply cell edits to the provided dataframe (inplace).
|
230
214
|
|
@@ -237,6 +221,8 @@ def _apply_cell_edits(
|
|
237
221
|
A dictionary of cell edits. The keys are the cell ids in the format
|
238
222
|
"row:column" and the values are the new cell values.
|
239
223
|
|
224
|
+
dataframe_schema: DataframeSchema
|
225
|
+
The schema of the dataframe.
|
240
226
|
"""
|
241
227
|
index_count = df.index.nlevels or 0
|
242
228
|
|
@@ -247,17 +233,21 @@ def _apply_cell_edits(
|
|
247
233
|
# The edited cell is part of the index
|
248
234
|
# To support multi-index in the future: use a tuple of values here
|
249
235
|
# instead of a single value
|
250
|
-
df.index.values[row_pos] = _parse_value(value,
|
236
|
+
df.index.values[row_pos] = _parse_value(value, dataframe_schema[col_pos])
|
251
237
|
else:
|
252
238
|
# We need to subtract the number of index levels from col_pos
|
253
239
|
# to get the correct column position for Pandas DataFrames
|
254
240
|
mapped_column = col_pos - index_count
|
255
241
|
df.iat[row_pos, mapped_column] = _parse_value(
|
256
|
-
value,
|
242
|
+
value, dataframe_schema[col_pos]
|
257
243
|
)
|
258
244
|
|
259
245
|
|
260
|
-
def _apply_row_additions(
|
246
|
+
def _apply_row_additions(
|
247
|
+
df: pd.DataFrame,
|
248
|
+
added_rows: List[Dict[str, Any]],
|
249
|
+
dataframe_schema: DataframeSchema,
|
250
|
+
) -> None:
|
261
251
|
"""Apply row additions to the provided dataframe (inplace).
|
262
252
|
|
263
253
|
Parameters
|
@@ -268,6 +258,9 @@ def _apply_row_additions(df: pd.DataFrame, added_rows: List[Dict[str, Any]]) ->
|
|
268
258
|
added_rows : List[Dict[str, Any]]
|
269
259
|
A list of row additions. Each row addition is a dictionary with the
|
270
260
|
column position as key and the new cell value as value.
|
261
|
+
|
262
|
+
dataframe_schema: DataframeSchema
|
263
|
+
The schema of the dataframe.
|
271
264
|
"""
|
272
265
|
if not added_rows:
|
273
266
|
return
|
@@ -279,7 +272,7 @@ def _apply_row_additions(df: pd.DataFrame, added_rows: List[Dict[str, Any]]) ->
|
|
279
272
|
# combination with loc. As a workaround, we manually track the values here:
|
280
273
|
range_index_stop = None
|
281
274
|
range_index_step = None
|
282
|
-
if
|
275
|
+
if isinstance(df.index, pd.RangeIndex):
|
283
276
|
range_index_stop = df.index.stop
|
284
277
|
range_index_step = df.index.step
|
285
278
|
|
@@ -292,14 +285,12 @@ def _apply_row_additions(df: pd.DataFrame, added_rows: List[Dict[str, Any]]) ->
|
|
292
285
|
if col_pos < index_count:
|
293
286
|
# To support multi-index in the future: use a tuple of values here
|
294
287
|
# instead of a single value
|
295
|
-
index_value = _parse_value(value,
|
288
|
+
index_value = _parse_value(value, dataframe_schema[col_pos])
|
296
289
|
else:
|
297
290
|
# We need to subtract the number of index levels from the col_pos
|
298
291
|
# to get the correct column position for Pandas DataFrames
|
299
292
|
mapped_column = col_pos - index_count
|
300
|
-
new_row[mapped_column] = _parse_value(
|
301
|
-
value, df.iloc[:, mapped_column].dtype
|
302
|
-
)
|
293
|
+
new_row[mapped_column] = _parse_value(value, dataframe_schema[col_pos])
|
303
294
|
# Append the new row to the dataframe
|
304
295
|
if range_index_stop is not None:
|
305
296
|
df.loc[range_index_stop, :] = new_row
|
@@ -329,7 +320,11 @@ def _apply_row_deletions(df: pd.DataFrame, deleted_rows: List[int]) -> None:
|
|
329
320
|
df.drop(df.index[deleted_rows], inplace=True)
|
330
321
|
|
331
322
|
|
332
|
-
def _apply_dataframe_edits(
|
323
|
+
def _apply_dataframe_edits(
|
324
|
+
df: pd.DataFrame,
|
325
|
+
data_editor_state: EditingState,
|
326
|
+
dataframe_schema: DataframeSchema,
|
327
|
+
) -> None:
|
333
328
|
"""Apply edits to the provided dataframe (inplace).
|
334
329
|
|
335
330
|
This includes cell edits, row additions and row deletions.
|
@@ -341,12 +336,15 @@ def _apply_dataframe_edits(df: pd.DataFrame, data_editor_state: EditingState) ->
|
|
341
336
|
|
342
337
|
data_editor_state : EditingState
|
343
338
|
The editing state of the data editor component.
|
339
|
+
|
340
|
+
dataframe_schema: DataframeSchema
|
341
|
+
The schema of the dataframe.
|
344
342
|
"""
|
345
343
|
if data_editor_state.get("edited_cells"):
|
346
|
-
_apply_cell_edits(df, data_editor_state["edited_cells"])
|
344
|
+
_apply_cell_edits(df, data_editor_state["edited_cells"], dataframe_schema)
|
347
345
|
|
348
346
|
if data_editor_state.get("added_rows"):
|
349
|
-
_apply_row_additions(df, data_editor_state["added_rows"])
|
347
|
+
_apply_row_additions(df, data_editor_state["added_rows"], dataframe_schema)
|
350
348
|
|
351
349
|
if data_editor_state.get("deleted_rows"):
|
352
350
|
_apply_row_deletions(df, data_editor_state["deleted_rows"])
|
@@ -376,7 +374,7 @@ def _apply_data_specific_configs(
|
|
376
374
|
if type_util.is_colum_type_arrow_incompatible(column_data):
|
377
375
|
if column_name not in columns_config:
|
378
376
|
columns_config[column_name] = {}
|
379
|
-
columns_config[column_name]["
|
377
|
+
columns_config[column_name]["disabled"] = True
|
380
378
|
# Convert incompatible type to string
|
381
379
|
data_df[column_name] = column_data.astype(str)
|
382
380
|
|
@@ -393,9 +391,9 @@ def _apply_data_specific_configs(
|
|
393
391
|
DataFormat.LIST_OF_ROWS,
|
394
392
|
DataFormat.COLUMN_VALUE_MAPPING,
|
395
393
|
]:
|
396
|
-
if
|
397
|
-
columns_config[
|
398
|
-
columns_config[
|
394
|
+
if INDEX_IDENTIFIER not in columns_config:
|
395
|
+
columns_config[INDEX_IDENTIFIER] = {}
|
396
|
+
columns_config[INDEX_IDENTIFIER]["hidden"] = True
|
399
397
|
|
400
398
|
# Rename the first column to "value" for some of the data formats
|
401
399
|
if data_format in [
|
@@ -593,13 +591,24 @@ class DataEditorMixin:
|
|
593
591
|
|
594
592
|
# Temporary workaround: We hide range indices if num_rows is dynamic.
|
595
593
|
# since the current way of handling this index during editing is a bit confusing.
|
596
|
-
if
|
597
|
-
if
|
598
|
-
columns_config[
|
599
|
-
columns_config[
|
594
|
+
if isinstance(data_df.index, pd.RangeIndex) and num_rows == "dynamic":
|
595
|
+
if INDEX_IDENTIFIER not in columns_config:
|
596
|
+
columns_config[INDEX_IDENTIFIER] = {}
|
597
|
+
columns_config[INDEX_IDENTIFIER]["hidden"] = True
|
598
|
+
|
599
|
+
# Convert the dataframe to an arrow table which is used as the main
|
600
|
+
# serialization format for sending the data to the frontend.
|
601
|
+
# We also utilize the arrow schema to determine the data kinds of every column.
|
602
|
+
arrow_table = pa.Table.from_pandas(data_df)
|
603
|
+
|
604
|
+
# Determine the dataframe schema which is required for parsing edited values
|
605
|
+
# and for checking type compatibilities.
|
606
|
+
dataframe_schema = determine_dataframe_schema(data_df, arrow_table.schema)
|
600
607
|
|
601
608
|
proto = ArrowProto()
|
609
|
+
|
602
610
|
proto.use_container_width = use_container_width
|
611
|
+
|
603
612
|
if width:
|
604
613
|
proto.width = width
|
605
614
|
if height:
|
@@ -614,14 +623,14 @@ class DataEditorMixin:
|
|
614
623
|
proto.form_id = current_form_id(self.dg)
|
615
624
|
|
616
625
|
if type_util.is_pandas_styler(data):
|
626
|
+
# Pandas styler will only work for non-editable/disabled columns.
|
617
627
|
delta_path = self.dg._get_delta_path_str()
|
618
628
|
default_uuid = str(hash(delta_path))
|
619
629
|
marshall_styler(proto, data, default_uuid)
|
620
630
|
|
621
|
-
|
622
|
-
proto.data = type_util.pyarrow_table_to_bytes(table)
|
631
|
+
proto.data = type_util.pyarrow_table_to_bytes(arrow_table)
|
623
632
|
|
624
|
-
|
633
|
+
marshall_column_config(proto, columns_config)
|
625
634
|
|
626
635
|
serde = DataEditorSerde()
|
627
636
|
|
@@ -637,7 +646,7 @@ class DataEditorMixin:
|
|
637
646
|
ctx=get_script_run_ctx(),
|
638
647
|
)
|
639
648
|
|
640
|
-
_apply_dataframe_edits(data_df, widget_state.value)
|
649
|
+
_apply_dataframe_edits(data_df, widget_state.value, dataframe_schema)
|
641
650
|
self.dg._enqueue("arrow_data_frame", proto)
|
642
651
|
return type_util.convert_df_to_data_format(data_df, data_format)
|
643
652
|
|