streamlit-nightly 1.37.2.dev20240810__py2.py3-none-any.whl → 1.37.2.dev20240812__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- streamlit/dataframe_util.py +61 -2
- streamlit/delta_generator.py +3 -158
- streamlit/elements/arrow.py +146 -2
- streamlit/elements/deck_gl_json_chart.py +2 -2
- streamlit/elements/image.py +12 -7
- streamlit/elements/lib/built_in_chart_utils.py +12 -5
- streamlit/elements/lib/column_config_utils.py +8 -0
- streamlit/elements/lib/dialog.py +6 -3
- streamlit/elements/lib/mutable_status_container.py +3 -2
- streamlit/elements/media.py +11 -9
- streamlit/elements/metric.py +2 -2
- streamlit/runtime/caching/hashing.py +16 -5
- streamlit/runtime/scriptrunner/__init__.py +2 -0
- streamlit/runtime/scriptrunner/exec_code.py +1 -1
- streamlit/runtime/scriptrunner/script_run_context.py +18 -6
- streamlit/source_util.py +2 -7
- streamlit/static/asset-manifest.json +2 -2
- streamlit/static/index.html +1 -1
- streamlit/static/static/js/{main.80efcd23.js → main.d1c1e1f9.js} +2 -2
- streamlit/type_util.py +5 -0
- {streamlit_nightly-1.37.2.dev20240810.dist-info → streamlit_nightly-1.37.2.dev20240812.dist-info}/METADATA +1 -1
- {streamlit_nightly-1.37.2.dev20240810.dist-info → streamlit_nightly-1.37.2.dev20240812.dist-info}/RECORD +27 -27
- /streamlit/static/static/js/{main.80efcd23.js.LICENSE.txt → main.d1c1e1f9.js.LICENSE.txt} +0 -0
- {streamlit_nightly-1.37.2.dev20240810.data → streamlit_nightly-1.37.2.dev20240812.data}/scripts/streamlit.cmd +0 -0
- {streamlit_nightly-1.37.2.dev20240810.dist-info → streamlit_nightly-1.37.2.dev20240812.dist-info}/WHEEL +0 -0
- {streamlit_nightly-1.37.2.dev20240810.dist-info → streamlit_nightly-1.37.2.dev20240812.dist-info}/entry_points.txt +0 -0
- {streamlit_nightly-1.37.2.dev20240810.dist-info → streamlit_nightly-1.37.2.dev20240812.dist-info}/top_level.txt +0 -0
streamlit/dataframe_util.py
CHANGED
@@ -42,6 +42,7 @@ from typing_extensions import TypeAlias, TypeGuard
|
|
42
42
|
|
43
43
|
from streamlit import config, errors, logger, string_util
|
44
44
|
from streamlit.type_util import (
|
45
|
+
NumpyShape,
|
45
46
|
has_callable_attr,
|
46
47
|
is_custom_dict,
|
47
48
|
is_dataclass_instance,
|
@@ -81,6 +82,11 @@ _SNOWPANDAS_INDEX_TYPE_STR: Final = (
|
|
81
82
|
_POLARS_DATAFRAME: Final = "polars.dataframe.frame.DataFrame"
|
82
83
|
_POLARS_SERIES: Final = "polars.series.series.Series"
|
83
84
|
_POLARS_LAZYFRAME: Final = "polars.lazyframe.frame.LazyFrame"
|
85
|
+
_DASK_DATAFRAME: Final = "dask.dataframe.core.DataFrame"
|
86
|
+
_DASK_SERIES: Final = "dask.dataframe.core.Series"
|
87
|
+
_DASK_INDEX: Final = "dask.dataframe.core.Index"
|
88
|
+
_RAY_MATERIALIZED_DATASET: Final = "ray.data.dataset.MaterializedDataset"
|
89
|
+
_RAY_DATASET: Final = "ray.data.dataset.Dataset"
|
84
90
|
|
85
91
|
V_co = TypeVar(
|
86
92
|
"V_co",
|
@@ -116,7 +122,7 @@ Data: TypeAlias = Union[
|
|
116
122
|
"Styler",
|
117
123
|
"Index",
|
118
124
|
"pa.Table",
|
119
|
-
"np.ndarray",
|
125
|
+
"np.ndarray[Any, np.dtype[Any]]",
|
120
126
|
Iterable[Any],
|
121
127
|
Dict[Any, Any],
|
122
128
|
None,
|
@@ -146,6 +152,8 @@ class DataFormat(Enum):
|
|
146
152
|
POLARS_SERIES = auto() # polars.series.series.Series
|
147
153
|
XARRAY_DATASET = auto() # xarray.Dataset
|
148
154
|
XARRAY_DATA_ARRAY = auto() # xarray.DataArray
|
155
|
+
DASK_OBJECT = auto() # dask.dataframe.core.DataFrame, Series, Index
|
156
|
+
RAY_DATASET = auto() # ray.data.dataset.Dataset, MaterializedDataset
|
149
157
|
LIST_OF_RECORDS = auto() # List[Dict[str, Scalar]]
|
150
158
|
LIST_OF_ROWS = auto() # List[List[Scalar]]
|
151
159
|
LIST_OF_VALUES = auto() # List[Scalar]
|
@@ -189,6 +197,8 @@ def is_dataframe_like(obj: object) -> bool:
|
|
189
197
|
DataFormat.POLARS_LAZYFRAME,
|
190
198
|
DataFormat.XARRAY_DATASET,
|
191
199
|
DataFormat.XARRAY_DATA_ARRAY,
|
200
|
+
DataFormat.DASK_OBJECT,
|
201
|
+
DataFormat.RAY_DATASET,
|
192
202
|
DataFormat.COLUMN_SERIES_MAPPING,
|
193
203
|
]
|
194
204
|
|
@@ -201,6 +211,8 @@ def is_unevaluated_data_object(obj: object) -> bool:
|
|
201
211
|
- PySpark DataFrame
|
202
212
|
- Modin DataFrame / Series
|
203
213
|
- Snowpandas DataFrame / Series / Index
|
214
|
+
- Dask DataFrame / Series / Index
|
215
|
+
- Ray Dataset
|
204
216
|
- Polars LazyFrame
|
205
217
|
- Generator functions
|
206
218
|
|
@@ -213,7 +225,9 @@ def is_unevaluated_data_object(obj: object) -> bool:
|
|
213
225
|
or is_pyspark_data_object(obj)
|
214
226
|
or is_snowpandas_data_object(obj)
|
215
227
|
or is_modin_data_object(obj)
|
228
|
+
or is_ray_dataset(obj)
|
216
229
|
or is_polars_lazyframe(obj)
|
230
|
+
or is_dask_object(obj)
|
217
231
|
or inspect.isgeneratorfunction(obj)
|
218
232
|
)
|
219
233
|
|
@@ -247,6 +261,15 @@ def is_pyspark_data_object(obj: object) -> bool:
|
|
247
261
|
)
|
248
262
|
|
249
263
|
|
264
|
+
def is_dask_object(obj: object) -> bool:
|
265
|
+
"""True if obj is a Dask DataFrame, Series, or Index."""
|
266
|
+
return (
|
267
|
+
is_type(obj, _DASK_DATAFRAME)
|
268
|
+
or is_type(obj, _DASK_SERIES)
|
269
|
+
or is_type(obj, _DASK_INDEX)
|
270
|
+
)
|
271
|
+
|
272
|
+
|
250
273
|
def is_modin_data_object(obj: object) -> bool:
|
251
274
|
"""True if obj is of Modin Dataframe or Series"""
|
252
275
|
return is_type(obj, _MODIN_DF_TYPE_STR) or is_type(obj, _MODIN_SERIES_TYPE_STR)
|
@@ -286,6 +309,11 @@ def is_polars_lazyframe(obj: object) -> bool:
|
|
286
309
|
return is_type(obj, _POLARS_LAZYFRAME)
|
287
310
|
|
288
311
|
|
312
|
+
def is_ray_dataset(obj: object) -> bool:
|
313
|
+
"""True if obj is a Ray Dataset."""
|
314
|
+
return is_type(obj, _RAY_DATASET) or is_type(obj, _RAY_MATERIALIZED_DATASET)
|
315
|
+
|
316
|
+
|
289
317
|
def is_pandas_styler(obj: object) -> TypeGuard[Styler]:
|
290
318
|
"""True if obj is a pandas Styler."""
|
291
319
|
return is_type(obj, _PANDAS_STYLER_TYPE_STR)
|
@@ -436,6 +464,31 @@ def convert_anything_to_pandas_df(
|
|
436
464
|
data = data.copy(deep=True)
|
437
465
|
return data.to_series().to_frame()
|
438
466
|
|
467
|
+
if is_dask_object(data):
|
468
|
+
data = data.head(max_unevaluated_rows, compute=True)
|
469
|
+
|
470
|
+
# Dask returns a Pandas object (DataFrame, Series, Index) when
|
471
|
+
# executing operations like `head`.
|
472
|
+
if isinstance(data, (pd.Series, pd.Index)):
|
473
|
+
data = data.to_frame()
|
474
|
+
|
475
|
+
if data.shape[0] == max_unevaluated_rows:
|
476
|
+
_show_data_information(
|
477
|
+
f"⚠️ Showing only {string_util.simplify_number(max_unevaluated_rows)} "
|
478
|
+
"rows. Call `compute()` on the data object to show more."
|
479
|
+
)
|
480
|
+
return cast(pd.DataFrame, data)
|
481
|
+
|
482
|
+
if is_ray_dataset(data):
|
483
|
+
data = data.limit(max_unevaluated_rows).to_pandas()
|
484
|
+
|
485
|
+
if data.shape[0] == max_unevaluated_rows:
|
486
|
+
_show_data_information(
|
487
|
+
f"⚠️ Showing only {string_util.simplify_number(max_unevaluated_rows)} "
|
488
|
+
"rows. Call `to_pandas()` on the dataset to show more."
|
489
|
+
)
|
490
|
+
return cast(pd.DataFrame, data)
|
491
|
+
|
439
492
|
if is_modin_data_object(data):
|
440
493
|
data = data.head(max_unevaluated_rows)._to_pandas()
|
441
494
|
|
@@ -952,7 +1005,7 @@ def determine_data_format(input_data: Any) -> DataFormat:
|
|
952
1005
|
elif isinstance(input_data, pd.DataFrame):
|
953
1006
|
return DataFormat.PANDAS_DATAFRAME
|
954
1007
|
elif isinstance(input_data, np.ndarray):
|
955
|
-
if len(input_data.shape) == 1:
|
1008
|
+
if len(cast(NumpyShape, input_data.shape)) == 1:
|
956
1009
|
# For technical reasons, we need to distinguish one
|
957
1010
|
# one-dimensional numpy array from multidimensional ones.
|
958
1011
|
return DataFormat.NUMPY_LIST
|
@@ -985,6 +1038,10 @@ def determine_data_format(input_data: Any) -> DataFormat:
|
|
985
1038
|
return DataFormat.XARRAY_DATASET
|
986
1039
|
elif is_xarray_data_array(input_data):
|
987
1040
|
return DataFormat.XARRAY_DATA_ARRAY
|
1041
|
+
elif is_ray_dataset(input_data):
|
1042
|
+
return DataFormat.RAY_DATASET
|
1043
|
+
elif is_dask_object(input_data):
|
1044
|
+
return DataFormat.DASK_OBJECT
|
988
1045
|
elif is_snowpark_data_object(input_data) or is_snowpark_row_list(input_data):
|
989
1046
|
return DataFormat.SNOWPARK_OBJECT
|
990
1047
|
elif (
|
@@ -1105,6 +1162,8 @@ def convert_pandas_df_to_data_format(
|
|
1105
1162
|
DataFormat.PANDAS_ARRAY,
|
1106
1163
|
DataFormat.MODIN_OBJECT,
|
1107
1164
|
DataFormat.SNOWPANDAS_OBJECT,
|
1165
|
+
DataFormat.DASK_OBJECT,
|
1166
|
+
DataFormat.RAY_DATASET,
|
1108
1167
|
]:
|
1109
1168
|
return df
|
1110
1169
|
elif data_format == DataFormat.NUMPY_LIST:
|
streamlit/delta_generator.py
CHANGED
@@ -24,7 +24,6 @@ from typing import (
|
|
24
24
|
Any,
|
25
25
|
Callable,
|
26
26
|
Final,
|
27
|
-
Hashable,
|
28
27
|
Iterable,
|
29
28
|
Literal,
|
30
29
|
NoReturn,
|
@@ -38,7 +37,6 @@ from streamlit import (
|
|
38
37
|
cli_util,
|
39
38
|
config,
|
40
39
|
cursor,
|
41
|
-
dataframe_util,
|
42
40
|
env_util,
|
43
41
|
logger,
|
44
42
|
runtime,
|
@@ -89,26 +87,23 @@ from streamlit.elements.widgets.slider import SliderMixin
|
|
89
87
|
from streamlit.elements.widgets.text_widgets import TextWidgetsMixin
|
90
88
|
from streamlit.elements.widgets.time_widgets import TimeWidgetsMixin
|
91
89
|
from streamlit.elements.write import WriteMixin
|
92
|
-
from streamlit.errors import
|
90
|
+
from streamlit.errors import StreamlitAPIException
|
93
91
|
from streamlit.proto import Block_pb2, ForwardMsg_pb2
|
94
92
|
from streamlit.proto.RootContainer_pb2 import RootContainer
|
95
93
|
from streamlit.runtime import caching
|
94
|
+
from streamlit.runtime.scriptrunner import enqueue_message as _enqueue_message
|
96
95
|
from streamlit.runtime.scriptrunner import get_script_run_ctx
|
97
96
|
|
98
97
|
if TYPE_CHECKING:
|
99
98
|
from google.protobuf.message import Message
|
100
|
-
from numpy import typing as npt
|
101
|
-
from pandas import DataFrame
|
102
99
|
|
103
100
|
from streamlit.cursor import Cursor
|
104
|
-
from streamlit.dataframe_util import Data
|
105
101
|
from streamlit.elements.lib.built_in_chart_utils import AddRowsMetadata
|
106
102
|
|
107
103
|
|
108
104
|
MAX_DELTA_BYTES: Final[int] = 14 * 1024 * 1024 # 14MB
|
109
105
|
|
110
106
|
Value = TypeVar("Value")
|
111
|
-
DG = TypeVar("DG", bound="DeltaGenerator")
|
112
107
|
|
113
108
|
# Type aliases for Ancestor Block Types
|
114
109
|
BlockType: TypeAlias = str
|
@@ -554,121 +549,6 @@ class DeltaGenerator(
|
|
554
549
|
|
555
550
|
return block_dg
|
556
551
|
|
557
|
-
def _arrow_add_rows(
|
558
|
-
self: DG,
|
559
|
-
data: Data = None,
|
560
|
-
**kwargs: (
|
561
|
-
DataFrame | npt.NDArray[Any] | Iterable[Any] | dict[Hashable, Any] | None
|
562
|
-
),
|
563
|
-
) -> DG | None:
|
564
|
-
"""Concatenate a dataframe to the bottom of the current one.
|
565
|
-
|
566
|
-
Parameters
|
567
|
-
----------
|
568
|
-
data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None
|
569
|
-
Table to concat. Optional.
|
570
|
-
|
571
|
-
**kwargs : pandas.DataFrame, numpy.ndarray, Iterable, dict, or None
|
572
|
-
The named dataset to concat. Optional. You can only pass in 1
|
573
|
-
dataset (including the one in the data parameter).
|
574
|
-
|
575
|
-
Example
|
576
|
-
-------
|
577
|
-
>>> import streamlit as st
|
578
|
-
>>> import pandas as pd
|
579
|
-
>>> import numpy as np
|
580
|
-
>>>
|
581
|
-
>>> df1 = pd.DataFrame(
|
582
|
-
... np.random.randn(50, 20), columns=("col %d" % i for i in range(20))
|
583
|
-
... )
|
584
|
-
>>> my_table = st.table(df1)
|
585
|
-
>>>
|
586
|
-
>>> df2 = pd.DataFrame(
|
587
|
-
... np.random.randn(50, 20), columns=("col %d" % i for i in range(20))
|
588
|
-
... )
|
589
|
-
>>> my_table.add_rows(df2)
|
590
|
-
>>> # Now the table shown in the Streamlit app contains the data for
|
591
|
-
>>> # df1 followed by the data for df2.
|
592
|
-
|
593
|
-
You can do the same thing with plots. For example, if you want to add
|
594
|
-
more data to a line chart:
|
595
|
-
|
596
|
-
>>> # Assuming df1 and df2 from the example above still exist...
|
597
|
-
>>> my_chart = st.line_chart(df1)
|
598
|
-
>>> my_chart.add_rows(df2)
|
599
|
-
>>> # Now the chart shown in the Streamlit app contains the data for
|
600
|
-
>>> # df1 followed by the data for df2.
|
601
|
-
|
602
|
-
And for plots whose datasets are named, you can pass the data with a
|
603
|
-
keyword argument where the key is the name:
|
604
|
-
|
605
|
-
>>> my_chart = st.vega_lite_chart(
|
606
|
-
... {
|
607
|
-
... "mark": "line",
|
608
|
-
... "encoding": {"x": "a", "y": "b"},
|
609
|
-
... "datasets": {
|
610
|
-
... "some_fancy_name": df1, # <-- named dataset
|
611
|
-
... },
|
612
|
-
... "data": {"name": "some_fancy_name"},
|
613
|
-
... }
|
614
|
-
... )
|
615
|
-
>>> my_chart.add_rows(some_fancy_name=df2) # <-- name used as keyword
|
616
|
-
|
617
|
-
"""
|
618
|
-
if self._root_container is None or self._cursor is None:
|
619
|
-
return self
|
620
|
-
|
621
|
-
if not self._cursor.is_locked:
|
622
|
-
raise StreamlitAPIException("Only existing elements can `add_rows`.")
|
623
|
-
|
624
|
-
# Accept syntax st._arrow_add_rows(df).
|
625
|
-
if data is not None and len(kwargs) == 0:
|
626
|
-
name = ""
|
627
|
-
# Accept syntax st._arrow_add_rows(foo=df).
|
628
|
-
elif len(kwargs) == 1:
|
629
|
-
name, data = kwargs.popitem()
|
630
|
-
# Raise error otherwise.
|
631
|
-
else:
|
632
|
-
raise StreamlitAPIException(
|
633
|
-
"Wrong number of arguments to add_rows()."
|
634
|
-
"Command requires exactly one dataset"
|
635
|
-
)
|
636
|
-
|
637
|
-
# When doing _arrow_add_rows on an element that does not already have data
|
638
|
-
# (for example, st.line_chart() without any args), call the original
|
639
|
-
# st.foo() element with new data instead of doing a _arrow_add_rows().
|
640
|
-
if (
|
641
|
-
"add_rows_metadata" in self._cursor.props
|
642
|
-
and self._cursor.props["add_rows_metadata"]
|
643
|
-
and self._cursor.props["add_rows_metadata"].last_index is None
|
644
|
-
):
|
645
|
-
st_method = getattr(
|
646
|
-
self, self._cursor.props["add_rows_metadata"].chart_command
|
647
|
-
)
|
648
|
-
st_method(data, **kwargs)
|
649
|
-
return None
|
650
|
-
|
651
|
-
new_data, self._cursor.props["add_rows_metadata"] = _prep_data_for_add_rows(
|
652
|
-
data,
|
653
|
-
self._cursor.props["add_rows_metadata"],
|
654
|
-
)
|
655
|
-
|
656
|
-
msg = ForwardMsg_pb2.ForwardMsg()
|
657
|
-
msg.metadata.delta_path[:] = self._cursor.delta_path
|
658
|
-
|
659
|
-
import streamlit.elements.arrow as arrow_proto
|
660
|
-
|
661
|
-
default_uuid = str(hash(self._get_delta_path_str()))
|
662
|
-
arrow_proto.marshall(msg.delta.arrow_add_rows.data, new_data, default_uuid)
|
663
|
-
|
664
|
-
if name:
|
665
|
-
msg.delta.arrow_add_rows.name = name
|
666
|
-
msg.delta.arrow_add_rows.has_name = True
|
667
|
-
|
668
|
-
_enqueue_message(msg)
|
669
|
-
|
670
|
-
return self
|
671
|
-
|
672
552
|
|
673
553
|
main_dg = DeltaGenerator(root_container=RootContainer.MAIN)
|
674
554
|
sidebar_dg = DeltaGenerator(root_container=RootContainer.SIDEBAR, parent=main_dg)
|
@@ -702,42 +582,7 @@ def get_last_dg_added_to_context_stack() -> DeltaGenerator | None:
|
|
702
582
|
return None
|
703
583
|
|
704
584
|
|
705
|
-
def
|
706
|
-
data: Data,
|
707
|
-
add_rows_metadata: AddRowsMetadata | None,
|
708
|
-
) -> tuple[Data, AddRowsMetadata | None]:
|
709
|
-
if not add_rows_metadata:
|
710
|
-
if dataframe_util.is_pandas_styler(data):
|
711
|
-
# When calling add_rows on st.table or st.dataframe we want styles to
|
712
|
-
# pass through.
|
713
|
-
return data, None
|
714
|
-
return dataframe_util.convert_anything_to_pandas_df(data), None
|
715
|
-
|
716
|
-
# If add_rows_metadata is set, it indicates that the add_rows used called
|
717
|
-
# on a chart based on our built-in chart commands.
|
718
|
-
|
719
|
-
# For built-in chart commands we have to reshape the data structure
|
720
|
-
# otherwise the input data and the actual data used
|
721
|
-
# by vega_lite will be different, and it will throw an error.
|
722
|
-
from streamlit.elements.lib.built_in_chart_utils import prep_chart_data_for_add_rows
|
723
|
-
|
724
|
-
return prep_chart_data_for_add_rows(data, add_rows_metadata)
|
725
|
-
|
726
|
-
|
727
|
-
def _enqueue_message(msg: ForwardMsg_pb2.ForwardMsg) -> None:
|
728
|
-
"""Enqueues a ForwardMsg proto to send to the app."""
|
729
|
-
ctx = get_script_run_ctx()
|
730
|
-
|
731
|
-
if ctx is None:
|
732
|
-
raise NoSessionContext()
|
733
|
-
|
734
|
-
if ctx.current_fragment_id and msg.WhichOneof("type") == "delta":
|
735
|
-
msg.delta.fragment_id = ctx.current_fragment_id
|
736
|
-
|
737
|
-
ctx.enqueue(msg)
|
738
|
-
|
739
|
-
|
740
|
-
def _writes_directly_to_sidebar(dg: DG) -> bool:
|
585
|
+
def _writes_directly_to_sidebar(dg: DeltaGenerator) -> bool:
|
741
586
|
in_sidebar = any(a._root_container == RootContainer.SIDEBAR for a in dg._ancestors)
|
742
587
|
has_container = bool(len(list(dg._ancestor_block_types)))
|
743
588
|
return in_sidebar and not has_container
|
streamlit/elements/arrow.py
CHANGED
@@ -18,7 +18,9 @@ import json
|
|
18
18
|
from dataclasses import dataclass
|
19
19
|
from typing import (
|
20
20
|
TYPE_CHECKING,
|
21
|
+
Any,
|
21
22
|
Final,
|
23
|
+
Hashable,
|
22
24
|
Iterable,
|
23
25
|
Literal,
|
24
26
|
TypedDict,
|
@@ -43,14 +45,22 @@ from streamlit.elements.lib.policies import check_widget_policies
|
|
43
45
|
from streamlit.elements.lib.utils import Key, to_key
|
44
46
|
from streamlit.errors import StreamlitAPIException
|
45
47
|
from streamlit.proto.Arrow_pb2 import Arrow as ArrowProto
|
48
|
+
from streamlit.proto.ForwardMsg_pb2 import ForwardMsg
|
46
49
|
from streamlit.runtime.metrics_util import gather_metrics
|
47
|
-
from streamlit.runtime.scriptrunner import
|
50
|
+
from streamlit.runtime.scriptrunner.script_run_context import (
|
51
|
+
enqueue_message,
|
52
|
+
get_script_run_ctx,
|
53
|
+
)
|
48
54
|
from streamlit.runtime.state import WidgetCallback, register_widget
|
49
55
|
from streamlit.runtime.state.common import compute_widget_id
|
50
56
|
|
51
57
|
if TYPE_CHECKING:
|
58
|
+
from numpy import typing as npt
|
59
|
+
from pandas import DataFrame
|
60
|
+
|
52
61
|
from streamlit.dataframe_util import Data
|
53
62
|
from streamlit.delta_generator import DeltaGenerator
|
63
|
+
from streamlit.elements.lib.built_in_chart_utils import AddRowsMetadata
|
54
64
|
|
55
65
|
|
56
66
|
SelectionMode: TypeAlias = Literal[
|
@@ -671,7 +681,7 @@ class ArrowMixin:
|
|
671
681
|
>>> my_chart.add_rows(some_fancy_name=df2) # <-- name used as keyword
|
672
682
|
|
673
683
|
"""
|
674
|
-
return self.dg
|
684
|
+
return _arrow_add_rows(self.dg, data, **kwargs)
|
675
685
|
|
676
686
|
@property
|
677
687
|
def dg(self) -> DeltaGenerator:
|
@@ -679,6 +689,140 @@ class ArrowMixin:
|
|
679
689
|
return cast("DeltaGenerator", self)
|
680
690
|
|
681
691
|
|
692
|
+
def _prep_data_for_add_rows(
|
693
|
+
data: Data,
|
694
|
+
add_rows_metadata: AddRowsMetadata | None,
|
695
|
+
) -> tuple[Data, AddRowsMetadata | None]:
|
696
|
+
if not add_rows_metadata:
|
697
|
+
if dataframe_util.is_pandas_styler(data):
|
698
|
+
# When calling add_rows on st.table or st.dataframe we want styles to
|
699
|
+
# pass through.
|
700
|
+
return data, None
|
701
|
+
return dataframe_util.convert_anything_to_pandas_df(data), None
|
702
|
+
|
703
|
+
# If add_rows_metadata is set, it indicates that the add_rows used called
|
704
|
+
# on a chart based on our built-in chart commands.
|
705
|
+
|
706
|
+
# For built-in chart commands we have to reshape the data structure
|
707
|
+
# otherwise the input data and the actual data used
|
708
|
+
# by vega_lite will be different, and it will throw an error.
|
709
|
+
from streamlit.elements.lib.built_in_chart_utils import prep_chart_data_for_add_rows
|
710
|
+
|
711
|
+
return prep_chart_data_for_add_rows(data, add_rows_metadata)
|
712
|
+
|
713
|
+
|
714
|
+
def _arrow_add_rows(
|
715
|
+
dg: DeltaGenerator,
|
716
|
+
data: Data = None,
|
717
|
+
**kwargs: (
|
718
|
+
DataFrame | npt.NDArray[Any] | Iterable[Any] | dict[Hashable, Any] | None
|
719
|
+
),
|
720
|
+
) -> DeltaGenerator | None:
|
721
|
+
"""Concatenate a dataframe to the bottom of the current one.
|
722
|
+
|
723
|
+
Parameters
|
724
|
+
----------
|
725
|
+
data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict, or None
|
726
|
+
Table to concat. Optional.
|
727
|
+
|
728
|
+
**kwargs : pandas.DataFrame, numpy.ndarray, Iterable, dict, or None
|
729
|
+
The named dataset to concat. Optional. You can only pass in 1
|
730
|
+
dataset (including the one in the data parameter).
|
731
|
+
|
732
|
+
Example
|
733
|
+
-------
|
734
|
+
>>> import streamlit as st
|
735
|
+
>>> import pandas as pd
|
736
|
+
>>> import numpy as np
|
737
|
+
>>>
|
738
|
+
>>> df1 = pd.DataFrame(
|
739
|
+
... np.random.randn(50, 20), columns=("col %d" % i for i in range(20))
|
740
|
+
... )
|
741
|
+
>>> my_table = st.table(df1)
|
742
|
+
>>>
|
743
|
+
>>> df2 = pd.DataFrame(
|
744
|
+
... np.random.randn(50, 20), columns=("col %d" % i for i in range(20))
|
745
|
+
... )
|
746
|
+
>>> my_table.add_rows(df2)
|
747
|
+
>>> # Now the table shown in the Streamlit app contains the data for
|
748
|
+
>>> # df1 followed by the data for df2.
|
749
|
+
|
750
|
+
You can do the same thing with plots. For example, if you want to add
|
751
|
+
more data to a line chart:
|
752
|
+
|
753
|
+
>>> # Assuming df1 and df2 from the example above still exist...
|
754
|
+
>>> my_chart = st.line_chart(df1)
|
755
|
+
>>> my_chart.add_rows(df2)
|
756
|
+
>>> # Now the chart shown in the Streamlit app contains the data for
|
757
|
+
>>> # df1 followed by the data for df2.
|
758
|
+
|
759
|
+
And for plots whose datasets are named, you can pass the data with a
|
760
|
+
keyword argument where the key is the name:
|
761
|
+
|
762
|
+
>>> my_chart = st.vega_lite_chart(
|
763
|
+
... {
|
764
|
+
... "mark": "line",
|
765
|
+
... "encoding": {"x": "a", "y": "b"},
|
766
|
+
... "datasets": {
|
767
|
+
... "some_fancy_name": df1, # <-- named dataset
|
768
|
+
... },
|
769
|
+
... "data": {"name": "some_fancy_name"},
|
770
|
+
... }
|
771
|
+
... )
|
772
|
+
>>> my_chart.add_rows(some_fancy_name=df2) # <-- name used as keyword
|
773
|
+
|
774
|
+
"""
|
775
|
+
if dg._root_container is None or dg._cursor is None:
|
776
|
+
return dg
|
777
|
+
|
778
|
+
if not dg._cursor.is_locked:
|
779
|
+
raise StreamlitAPIException("Only existing elements can `add_rows`.")
|
780
|
+
|
781
|
+
# Accept syntax st._arrow_add_rows(df).
|
782
|
+
if data is not None and len(kwargs) == 0:
|
783
|
+
name = ""
|
784
|
+
# Accept syntax st._arrow_add_rows(foo=df).
|
785
|
+
elif len(kwargs) == 1:
|
786
|
+
name, data = kwargs.popitem()
|
787
|
+
# Raise error otherwise.
|
788
|
+
else:
|
789
|
+
raise StreamlitAPIException(
|
790
|
+
"Wrong number of arguments to add_rows()."
|
791
|
+
"Command requires exactly one dataset"
|
792
|
+
)
|
793
|
+
|
794
|
+
# When doing _arrow_add_rows on an element that does not already have data
|
795
|
+
# (for example, st.line_chart() without any args), call the original
|
796
|
+
# st.foo() element with new data instead of doing a _arrow_add_rows().
|
797
|
+
if (
|
798
|
+
"add_rows_metadata" in dg._cursor.props
|
799
|
+
and dg._cursor.props["add_rows_metadata"]
|
800
|
+
and dg._cursor.props["add_rows_metadata"].last_index is None
|
801
|
+
):
|
802
|
+
st_method = getattr(dg, dg._cursor.props["add_rows_metadata"].chart_command)
|
803
|
+
st_method(data, **kwargs)
|
804
|
+
return None
|
805
|
+
|
806
|
+
new_data, dg._cursor.props["add_rows_metadata"] = _prep_data_for_add_rows(
|
807
|
+
data,
|
808
|
+
dg._cursor.props["add_rows_metadata"],
|
809
|
+
)
|
810
|
+
|
811
|
+
msg = ForwardMsg()
|
812
|
+
msg.metadata.delta_path[:] = dg._cursor.delta_path
|
813
|
+
|
814
|
+
default_uuid = str(hash(dg._get_delta_path_str()))
|
815
|
+
marshall(msg.delta.arrow_add_rows.data, new_data, default_uuid)
|
816
|
+
|
817
|
+
if name:
|
818
|
+
msg.delta.arrow_add_rows.name = name
|
819
|
+
msg.delta.arrow_add_rows.has_name = True
|
820
|
+
|
821
|
+
enqueue_message(msg)
|
822
|
+
|
823
|
+
return dg
|
824
|
+
|
825
|
+
|
682
826
|
def marshall(proto: ArrowProto, data: Data, default_uuid: str | None = None) -> None:
|
683
827
|
"""Marshall pandas.DataFrame into an Arrow proto.
|
684
828
|
|
@@ -16,7 +16,7 @@ from __future__ import annotations
|
|
16
16
|
|
17
17
|
import hashlib
|
18
18
|
import json
|
19
|
-
from typing import TYPE_CHECKING, Any, Final, Mapping, cast
|
19
|
+
from typing import TYPE_CHECKING, Any, Dict, Final, Mapping, cast
|
20
20
|
|
21
21
|
from streamlit import config
|
22
22
|
from streamlit.proto.DeckGlJsonChart_pb2 import DeckGlJsonChart as PydeckProto
|
@@ -158,7 +158,7 @@ def _get_pydeck_tooltip(pydeck_obj: Deck | None) -> dict[str, str] | None:
|
|
158
158
|
# For details, see: https://github.com/visgl/deck.gl/pull/7125/files
|
159
159
|
tooltip = getattr(pydeck_obj, "_tooltip", None)
|
160
160
|
if tooltip is not None and isinstance(tooltip, dict):
|
161
|
-
return tooltip
|
161
|
+
return cast(Dict[str, str], tooltip)
|
162
162
|
|
163
163
|
return None
|
164
164
|
|
streamlit/elements/image.py
CHANGED
@@ -34,6 +34,7 @@ from streamlit.errors import StreamlitAPIException
|
|
34
34
|
from streamlit.proto.Image_pb2 import ImageList as ImageListProto
|
35
35
|
from streamlit.runtime import caching
|
36
36
|
from streamlit.runtime.metrics_util import gather_metrics
|
37
|
+
from streamlit.type_util import NumpyShape
|
37
38
|
|
38
39
|
if TYPE_CHECKING:
|
39
40
|
from typing import Any
|
@@ -259,16 +260,17 @@ def _4d_to_list_3d(array: npt.NDArray[Any]) -> list[npt.NDArray[Any]]:
|
|
259
260
|
|
260
261
|
|
261
262
|
def _verify_np_shape(array: npt.NDArray[Any]) -> npt.NDArray[Any]:
|
262
|
-
|
263
|
+
shape: NumpyShape = array.shape
|
264
|
+
if len(shape) not in (2, 3):
|
263
265
|
raise StreamlitAPIException("Numpy shape has to be of length 2 or 3.")
|
264
|
-
if len(
|
266
|
+
if len(shape) == 3 and shape[-1] not in (1, 3, 4):
|
265
267
|
raise StreamlitAPIException(
|
266
268
|
"Channel can only be 1, 3, or 4 got %d. Shape is %s"
|
267
|
-
% (
|
269
|
+
% (shape[-1], str(shape))
|
268
270
|
)
|
269
271
|
|
270
272
|
# If there's only one channel, convert is to x, y
|
271
|
-
if len(
|
273
|
+
if len(shape) == 3 and shape[-1] == 1:
|
272
274
|
array = array[:, :, 0]
|
273
275
|
|
274
276
|
return array
|
@@ -418,7 +420,7 @@ def image_to_url(
|
|
418
420
|
)
|
419
421
|
|
420
422
|
if channels == "BGR":
|
421
|
-
if len(image.shape) == 3:
|
423
|
+
if len(cast(NumpyShape, image.shape)) == 3:
|
422
424
|
image = image[:, :, [2, 1, 0]]
|
423
425
|
else:
|
424
426
|
raise StreamlitAPIException(
|
@@ -509,7 +511,7 @@ def marshall_images(
|
|
509
511
|
images: Sequence[AtomicImage]
|
510
512
|
if isinstance(image, list):
|
511
513
|
images = image
|
512
|
-
elif isinstance(image, np.ndarray) and len(image.shape) == 4:
|
514
|
+
elif isinstance(image, np.ndarray) and len(cast(NumpyShape, image.shape)) == 4:
|
513
515
|
images = _4d_to_list_3d(image)
|
514
516
|
else:
|
515
517
|
images = [image]
|
@@ -520,7 +522,10 @@ def marshall_images(
|
|
520
522
|
if isinstance(caption, str):
|
521
523
|
captions = [caption]
|
522
524
|
# You can pass in a 1-D Numpy array as captions.
|
523
|
-
elif
|
525
|
+
elif (
|
526
|
+
isinstance(caption, np.ndarray)
|
527
|
+
and len(cast(NumpyShape, caption.shape)) == 1
|
528
|
+
):
|
524
529
|
captions = caption.tolist()
|
525
530
|
# If there are no captions then make the captions list the same size
|
526
531
|
# as the images list.
|
@@ -667,10 +667,15 @@ def _get_offset_encoding(
|
|
667
667
|
x_offset = alt.XOffset()
|
668
668
|
y_offset = alt.YOffset()
|
669
669
|
|
670
|
+
# our merge gate does not find the alt.UndefinedType type for some reason
|
671
|
+
_color_column: str | alt.UndefinedType = ( # type: ignore[name-defined]
|
672
|
+
color_column if color_column is not None else alt.utils.Undefined
|
673
|
+
)
|
674
|
+
|
670
675
|
if chart_type is ChartType.VERTICAL_BAR:
|
671
|
-
x_offset = alt.XOffset(field=
|
676
|
+
x_offset = alt.XOffset(field=_color_column)
|
672
677
|
elif chart_type is ChartType.HORIZONTAL_BAR:
|
673
|
-
y_offset = alt.YOffset(field=
|
678
|
+
y_offset = alt.YOffset(field=_color_column)
|
674
679
|
|
675
680
|
return x_offset, y_offset
|
676
681
|
|
@@ -912,11 +917,13 @@ def _get_color_encoding(
|
|
912
917
|
if len(color_values) != len(y_column_list):
|
913
918
|
raise StreamlitColorLengthError(color_values, y_column_list)
|
914
919
|
|
915
|
-
if len(
|
920
|
+
if len(color_values) == 1:
|
916
921
|
return alt.ColorValue(to_css_color(cast(Any, color_value[0])))
|
917
922
|
else:
|
918
923
|
return alt.Color(
|
919
|
-
field=color_column
|
924
|
+
field=color_column
|
925
|
+
if color_column is not None
|
926
|
+
else alt.utils.Undefined,
|
920
927
|
scale=alt.Scale(range=[to_css_color(c) for c in color_values]),
|
921
928
|
legend=_COLOR_LEGEND_SETTINGS,
|
922
929
|
type="nominal",
|
@@ -926,7 +933,7 @@ def _get_color_encoding(
|
|
926
933
|
raise StreamlitInvalidColorError(df, color_from_user)
|
927
934
|
|
928
935
|
elif color_column is not None:
|
929
|
-
column_type:
|
936
|
+
column_type: VegaLiteType
|
930
937
|
|
931
938
|
if color_column == _MELTED_COLOR_COLUMN_NAME:
|
932
939
|
column_type = "nominal"
|
@@ -494,6 +494,14 @@ def apply_data_specific_configs(
|
|
494
494
|
DataFormat.LIST_OF_RECORDS,
|
495
495
|
DataFormat.LIST_OF_ROWS,
|
496
496
|
DataFormat.COLUMN_VALUE_MAPPING,
|
497
|
+
# Dataframe-like objects that don't have an index:
|
498
|
+
DataFormat.PANDAS_ARRAY,
|
499
|
+
DataFormat.PANDAS_INDEX,
|
500
|
+
DataFormat.POLARS_DATAFRAME,
|
501
|
+
DataFormat.POLARS_SERIES,
|
502
|
+
DataFormat.POLARS_LAZYFRAME,
|
503
|
+
DataFormat.PYARROW_ARRAY,
|
504
|
+
DataFormat.RAY_DATASET,
|
497
505
|
]:
|
498
506
|
update_column_config(columns_config, INDEX_IDENTIFIER, {"hidden": True})
|
499
507
|
|