streamlit-nightly 1.37.2.dev20240818__py2.py3-none-any.whl → 1.37.2.dev20240820__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. streamlit/dataframe_util.py +70 -71
  2. streamlit/elements/image.py +15 -25
  3. streamlit/elements/lib/built_in_chart_utils.py +1 -1
  4. streamlit/elements/lib/options_selector_utils.py +3 -3
  5. streamlit/elements/lib/policies.py +10 -9
  6. streamlit/elements/widgets/radio.py +2 -2
  7. streamlit/elements/widgets/select_slider.py +2 -2
  8. streamlit/elements/widgets/selectbox.py +2 -2
  9. streamlit/runtime/caching/__init__.py +1 -11
  10. streamlit/runtime/caching/cache_data_api.py +11 -83
  11. streamlit/runtime/caching/cache_errors.py +13 -9
  12. streamlit/runtime/caching/cache_resource_api.py +9 -58
  13. streamlit/runtime/caching/cache_utils.py +7 -12
  14. streamlit/runtime/caching/cached_message_replay.py +29 -185
  15. streamlit/runtime/caching/legacy_cache_api.py +15 -11
  16. streamlit/runtime/scriptrunner_utils/script_run_context.py +9 -4
  17. streamlit/runtime/state/widgets.py +0 -5
  18. streamlit/static/asset-manifest.json +7 -7
  19. streamlit/static/index.html +1 -1
  20. streamlit/static/static/js/1307.74bce9ab.chunk.js +1 -0
  21. streamlit/static/static/js/3599.eaeac234.chunk.js +5 -0
  22. streamlit/static/static/js/6013.fb4531df.chunk.js +5 -0
  23. streamlit/static/static/js/7175.70728640.chunk.js +5 -0
  24. streamlit/static/static/js/8691.93a29403.chunk.js +5 -0
  25. streamlit/static/static/js/main.ff81c7a3.js +28 -0
  26. {streamlit_nightly-1.37.2.dev20240818.dist-info → streamlit_nightly-1.37.2.dev20240820.dist-info}/METADATA +1 -1
  27. {streamlit_nightly-1.37.2.dev20240818.dist-info → streamlit_nightly-1.37.2.dev20240820.dist-info}/RECORD +32 -32
  28. {streamlit_nightly-1.37.2.dev20240818.dist-info → streamlit_nightly-1.37.2.dev20240820.dist-info}/WHEEL +1 -1
  29. streamlit/static/static/js/1307.5225662c.chunk.js +0 -1
  30. streamlit/static/static/js/3599.17480cdf.chunk.js +0 -5
  31. streamlit/static/static/js/6013.fc3867be.chunk.js +0 -5
  32. streamlit/static/static/js/7175.ed4a2f0d.chunk.js +0 -5
  33. streamlit/static/static/js/8691.885f6268.chunk.js +0 -5
  34. streamlit/static/static/js/main.90c4efd0.js +0 -28
  35. /streamlit/static/static/js/{main.90c4efd0.js.LICENSE.txt → main.ff81c7a3.js.LICENSE.txt} +0 -0
  36. {streamlit_nightly-1.37.2.dev20240818.data → streamlit_nightly-1.37.2.dev20240820.data}/scripts/streamlit.cmd +0 -0
  37. {streamlit_nightly-1.37.2.dev20240818.dist-info → streamlit_nightly-1.37.2.dev20240820.dist-info}/entry_points.txt +0 -0
  38. {streamlit_nightly-1.37.2.dev20240818.dist-info → streamlit_nightly-1.37.2.dev20240820.dist-info}/top_level.txt +0 -0
@@ -70,29 +70,30 @@ _LOGGER: Final = logger.get_logger(__name__)
70
70
  _MAX_UNEVALUATED_DF_ROWS = 10000
71
71
 
72
72
  _PANDAS_DATA_OBJECT_TYPE_RE: Final = re.compile(r"^pandas.*$")
73
- _PANDAS_STYLER_TYPE_STR: Final = "pandas.io.formats.style.Styler"
74
- _XARRAY_DATA_ARRAY_TYPE_STR: Final = "xarray.core.dataarray.DataArray"
75
- _XARRAY_DATASET_TYPE_STR: Final = "xarray.core.dataset.Dataset"
76
- _SNOWPARK_DF_TYPE_STR: Final = "snowflake.snowpark.dataframe.DataFrame"
77
- _SNOWPARK_DF_ROW_TYPE_STR: Final = "snowflake.snowpark.row.Row"
78
- _SNOWPARK_TABLE_TYPE_STR: Final = "snowflake.snowpark.table.Table"
79
- _PYSPARK_DF_TYPE_STR: Final = "pyspark.sql.dataframe.DataFrame"
73
+
74
+ _DASK_DATAFRAME: Final = "dask.dataframe.core.DataFrame"
75
+ _DASK_INDEX: Final = "dask.dataframe.core.Index"
76
+ _DASK_SERIES: Final = "dask.dataframe.core.Series"
77
+ _DUCKDB_RELATION: Final = "duckdb.duckdb.DuckDBPyRelation"
80
78
  _MODIN_DF_TYPE_STR: Final = "modin.pandas.dataframe.DataFrame"
81
79
  _MODIN_SERIES_TYPE_STR: Final = "modin.pandas.series.Series"
80
+ _PANDAS_STYLER_TYPE_STR: Final = "pandas.io.formats.style.Styler"
81
+ _POLARS_DATAFRAME: Final = "polars.dataframe.frame.DataFrame"
82
+ _POLARS_LAZYFRAME: Final = "polars.lazyframe.frame.LazyFrame"
83
+ _POLARS_SERIES: Final = "polars.series.series.Series"
84
+ _PYSPARK_DF_TYPE_STR: Final = "pyspark.sql.dataframe.DataFrame"
85
+ _RAY_DATASET: Final = "ray.data.dataset.Dataset"
86
+ _RAY_MATERIALIZED_DATASET: Final = "ray.data.dataset.MaterializedDataset"
82
87
  _SNOWPANDAS_DF_TYPE_STR: Final = "snowflake.snowpark.modin.pandas.dataframe.DataFrame"
83
- _SNOWPANDAS_SERIES_TYPE_STR: Final = "snowflake.snowpark.modin.pandas.series.Series"
84
88
  _SNOWPANDAS_INDEX_TYPE_STR: Final = (
85
89
  "snowflake.snowpark.modin.plugin.extensions.index.Index"
86
90
  )
87
- _POLARS_DATAFRAME: Final = "polars.dataframe.frame.DataFrame"
88
- _POLARS_SERIES: Final = "polars.series.series.Series"
89
- _POLARS_LAZYFRAME: Final = "polars.lazyframe.frame.LazyFrame"
90
- _DASK_DATAFRAME: Final = "dask.dataframe.core.DataFrame"
91
- _DASK_SERIES: Final = "dask.dataframe.core.Series"
92
- _DASK_INDEX: Final = "dask.dataframe.core.Index"
93
- _RAY_MATERIALIZED_DATASET: Final = "ray.data.dataset.MaterializedDataset"
94
- _RAY_DATASET: Final = "ray.data.dataset.Dataset"
95
- _DUCKDB_RELATION: Final = "duckdb.duckdb.DuckDBPyRelation"
91
+ _SNOWPANDAS_SERIES_TYPE_STR: Final = "snowflake.snowpark.modin.pandas.series.Series"
92
+ _SNOWPARK_DF_ROW_TYPE_STR: Final = "snowflake.snowpark.row.Row"
93
+ _SNOWPARK_DF_TYPE_STR: Final = "snowflake.snowpark.dataframe.DataFrame"
94
+ _SNOWPARK_TABLE_TYPE_STR: Final = "snowflake.snowpark.table.Table"
95
+ _XARRAY_DATASET_TYPE_STR: Final = "xarray.core.dataset.Dataset"
96
+ _XARRAY_DATA_ARRAY_TYPE_STR: Final = "xarray.core.dataarray.DataArray"
96
97
 
97
98
  V_co = TypeVar(
98
99
  "V_co",
@@ -195,37 +196,38 @@ class DataFormat(Enum):
195
196
 
196
197
  UNKNOWN = auto()
197
198
  EMPTY = auto() # None
198
- PANDAS_DATAFRAME = auto() # pd.DataFrame
199
- PANDAS_SERIES = auto() # pd.Series
200
- PANDAS_INDEX = auto() # pd.Index
201
- PANDAS_ARRAY = auto() # pd.array
199
+
200
+ COLUMN_INDEX_MAPPING = auto() # {column: {index: value}}
201
+ COLUMN_SERIES_MAPPING = auto() # {column: Series(values)}
202
+ COLUMN_VALUE_MAPPING = auto() # {column: List[values]}
203
+ DASK_OBJECT = auto() # dask.dataframe.core.DataFrame, Series, Index
204
+ DBAPI_CURSOR = auto() # DBAPI Cursor (PEP 249)
205
+ DUCKDB_RELATION = auto() # DuckDB Relation
206
+ KEY_VALUE_DICT = auto() # {index: value}
207
+ LIST_OF_RECORDS = auto() # List[Dict[str, Scalar]]
208
+ LIST_OF_ROWS = auto() # List[List[Scalar]]
209
+ LIST_OF_VALUES = auto() # List[Scalar]
210
+ MODIN_OBJECT = auto() # Modin DataFrame, Series
202
211
  NUMPY_LIST = auto() # np.array[Scalar]
203
212
  NUMPY_MATRIX = auto() # np.array[List[Scalar]]
204
- PYARROW_TABLE = auto() # pyarrow.Table
205
- PYARROW_ARRAY = auto() # pyarrow.Array
206
- SNOWPARK_OBJECT = auto() # Snowpark DataFrame, Table, List[Row]
207
- PYSPARK_OBJECT = auto() # pyspark.DataFrame
208
- MODIN_OBJECT = auto() # Modin DataFrame, Series
209
- SNOWPANDAS_OBJECT = auto() # Snowpandas DataFrame, Series
213
+ PANDAS_ARRAY = auto() # pd.array
214
+ PANDAS_DATAFRAME = auto() # pd.DataFrame
215
+ PANDAS_INDEX = auto() # pd.Index
216
+ PANDAS_SERIES = auto() # pd.Series
210
217
  PANDAS_STYLER = auto() # pandas Styler
211
218
  POLARS_DATAFRAME = auto() # polars.dataframe.frame.DataFrame
212
219
  POLARS_LAZYFRAME = auto() # polars.lazyframe.frame.LazyFrame
213
220
  POLARS_SERIES = auto() # polars.series.series.Series
214
- XARRAY_DATASET = auto() # xarray.Dataset
215
- XARRAY_DATA_ARRAY = auto() # xarray.DataArray
216
- DASK_OBJECT = auto() # dask.dataframe.core.DataFrame, Series, Index
221
+ PYARROW_ARRAY = auto() # pyarrow.Array
222
+ PYARROW_TABLE = auto() # pyarrow.Table
223
+ PYSPARK_OBJECT = auto() # pyspark.DataFrame
217
224
  RAY_DATASET = auto() # ray.data.dataset.Dataset, MaterializedDataset
218
- LIST_OF_RECORDS = auto() # List[Dict[str, Scalar]]
219
- LIST_OF_ROWS = auto() # List[List[Scalar]]
220
- LIST_OF_VALUES = auto() # List[Scalar]
221
- TUPLE_OF_VALUES = auto() # Tuple[Scalar]
222
225
  SET_OF_VALUES = auto() # Set[Scalar]
223
- COLUMN_INDEX_MAPPING = auto() # {column: {index: value}}
224
- COLUMN_VALUE_MAPPING = auto() # {column: List[values]}
225
- COLUMN_SERIES_MAPPING = auto() # {column: Series(values)}
226
- KEY_VALUE_DICT = auto() # {index: value}
227
- DBAPI_CURSOR = auto() # DBAPI Cursor (PEP 249)
228
- DUCKDB_RELATION = auto() # DuckDB Relation
226
+ SNOWPANDAS_OBJECT = auto() # Snowpandas DataFrame, Series
227
+ SNOWPARK_OBJECT = auto() # Snowpark DataFrame, Table, List[Row]
228
+ TUPLE_OF_VALUES = auto() # Tuple[Scalar]
229
+ XARRAY_DATASET = auto() # xarray.Dataset
230
+ XARRAY_DATA_ARRAY = auto() # xarray.DataArray
229
231
 
230
232
 
231
233
  def is_pyarrow_version_less_than(v: str) -> bool:
@@ -288,30 +290,30 @@ def is_dataframe_like(obj: object) -> bool:
288
290
  # return False early to avoid unnecessary checks.
289
291
  return False
290
292
 
291
- return determine_data_format(obj) in [
293
+ return determine_data_format(obj) in {
294
+ DataFormat.COLUMN_SERIES_MAPPING,
295
+ DataFormat.DASK_OBJECT,
296
+ DataFormat.DBAPI_CURSOR,
297
+ DataFormat.MODIN_OBJECT,
298
+ DataFormat.NUMPY_LIST,
299
+ DataFormat.NUMPY_MATRIX,
300
+ DataFormat.PANDAS_ARRAY,
292
301
  DataFormat.PANDAS_DATAFRAME,
293
- DataFormat.PANDAS_SERIES,
294
302
  DataFormat.PANDAS_INDEX,
303
+ DataFormat.PANDAS_SERIES,
295
304
  DataFormat.PANDAS_STYLER,
296
- DataFormat.PANDAS_ARRAY,
297
- DataFormat.NUMPY_LIST,
298
- DataFormat.NUMPY_MATRIX,
299
- DataFormat.PYARROW_TABLE,
305
+ DataFormat.POLARS_DATAFRAME,
306
+ DataFormat.POLARS_LAZYFRAME,
307
+ DataFormat.POLARS_SERIES,
300
308
  DataFormat.PYARROW_ARRAY,
301
- DataFormat.SNOWPARK_OBJECT,
309
+ DataFormat.PYARROW_TABLE,
302
310
  DataFormat.PYSPARK_OBJECT,
303
- DataFormat.MODIN_OBJECT,
311
+ DataFormat.RAY_DATASET,
304
312
  DataFormat.SNOWPANDAS_OBJECT,
305
- DataFormat.POLARS_SERIES,
306
- DataFormat.POLARS_DATAFRAME,
307
- DataFormat.POLARS_LAZYFRAME,
313
+ DataFormat.SNOWPARK_OBJECT,
308
314
  DataFormat.XARRAY_DATASET,
309
315
  DataFormat.XARRAY_DATA_ARRAY,
310
- DataFormat.DASK_OBJECT,
311
- DataFormat.RAY_DATASET,
312
- DataFormat.COLUMN_SERIES_MAPPING,
313
- DataFormat.DBAPI_CURSOR,
314
- ]
316
+ }
315
317
 
316
318
 
317
319
  def is_unevaluated_data_object(obj: object) -> bool:
@@ -892,7 +894,7 @@ def convert_anything_to_arrow_bytes(
892
894
  return convert_pandas_df_to_arrow_bytes(df)
893
895
 
894
896
 
895
- def convert_anything_to_sequence(obj: OptionSequence[V_co]) -> list[V_co]:
897
+ def convert_anything_to_list(obj: OptionSequence[V_co]) -> list[V_co]:
896
898
  """Try to convert different formats to a list.
897
899
 
898
900
  If the input is a dataframe-like object, we just select the first
@@ -1309,21 +1311,21 @@ def convert_pandas_df_to_data_format(
1309
1311
  The converted dataframe.
1310
1312
  """
1311
1313
 
1312
- if data_format in [
1314
+ if data_format in {
1313
1315
  DataFormat.EMPTY,
1316
+ DataFormat.DASK_OBJECT,
1317
+ DataFormat.DBAPI_CURSOR,
1318
+ DataFormat.DUCKDB_RELATION,
1319
+ DataFormat.MODIN_OBJECT,
1320
+ DataFormat.PANDAS_ARRAY,
1314
1321
  DataFormat.PANDAS_DATAFRAME,
1315
- DataFormat.SNOWPARK_OBJECT,
1316
- DataFormat.PYSPARK_OBJECT,
1317
1322
  DataFormat.PANDAS_INDEX,
1318
1323
  DataFormat.PANDAS_STYLER,
1319
- DataFormat.PANDAS_ARRAY,
1320
- DataFormat.MODIN_OBJECT,
1321
- DataFormat.SNOWPANDAS_OBJECT,
1322
- DataFormat.DASK_OBJECT,
1324
+ DataFormat.PYSPARK_OBJECT,
1323
1325
  DataFormat.RAY_DATASET,
1324
- DataFormat.DBAPI_CURSOR,
1325
- DataFormat.DUCKDB_RELATION,
1326
- ]:
1326
+ DataFormat.SNOWPANDAS_OBJECT,
1327
+ DataFormat.SNOWPARK_OBJECT,
1328
+ }:
1327
1329
  return df
1328
1330
  elif data_format == DataFormat.NUMPY_LIST:
1329
1331
  import numpy as np
@@ -1347,10 +1349,7 @@ def convert_pandas_df_to_data_format(
1347
1349
  return pa.Array.from_pandas(_pandas_df_to_series(df))
1348
1350
  elif data_format == DataFormat.PANDAS_SERIES:
1349
1351
  return _pandas_df_to_series(df)
1350
- elif (
1351
- data_format == DataFormat.POLARS_DATAFRAME
1352
- or data_format == DataFormat.POLARS_LAZYFRAME
1353
- ):
1352
+ elif data_format in {DataFormat.POLARS_DATAFRAME, DataFormat.POLARS_LAZYFRAME}:
1354
1353
  import polars as pl # type: ignore[import-not-found]
1355
1354
 
1356
1355
  return pl.from_pandas(df)
@@ -25,7 +25,7 @@ import io
25
25
  import os
26
26
  import re
27
27
  from enum import IntEnum
28
- from typing import TYPE_CHECKING, Final, List, Literal, Sequence, Union, cast
28
+ from typing import TYPE_CHECKING, Final, Literal, Sequence, Union, cast
29
29
 
30
30
  from typing_extensions import TypeAlias
31
31
 
@@ -54,7 +54,7 @@ PILImage: TypeAlias = Union[
54
54
  "ImageFile.ImageFile", "Image.Image", "GifImagePlugin.GifImageFile"
55
55
  ]
56
56
  AtomicImage: TypeAlias = Union[PILImage, "npt.NDArray[Any]", io.BytesIO, str, bytes]
57
- ImageOrImageList: TypeAlias = Union[AtomicImage, List[AtomicImage]]
57
+ ImageOrImageList: TypeAlias = Union[AtomicImage, Sequence[AtomicImage]]
58
58
  UseColumnWith: TypeAlias = Union[Literal["auto", "always", "never"], bool, None]
59
59
  Channels: TypeAlias = Literal["RGB", "BGR"]
60
60
  ImageFormat: TypeAlias = Literal["JPEG", "PNG", "GIF"]
@@ -178,14 +178,11 @@ class ImageMixin:
178
178
 
179
179
 
180
180
  def _image_may_have_alpha_channel(image: PILImage) -> bool:
181
- if image.mode in ("RGBA", "LA", "P"):
182
- return True
183
- else:
184
- return False
181
+ return image.mode in ("RGBA", "LA", "P")
185
182
 
186
183
 
187
184
  def _image_is_gif(image: PILImage) -> bool:
188
- return bool(image.format == "GIF")
185
+ return image.format == "GIF"
189
186
 
190
187
 
191
188
  def _validate_image_format_string(
@@ -199,7 +196,7 @@ def _validate_image_format_string(
199
196
  "GIF" if the image is a GIF, and "JPEG" otherwise.
200
197
  """
201
198
  format = format.upper()
202
- if format == "JPEG" or format == "PNG":
199
+ if format in {"JPEG", "PNG"}:
203
200
  return cast(ImageFormat, format)
204
201
 
205
202
  # We are forgiving on the spelling of JPEG
@@ -509,30 +506,23 @@ def marshall_images(
509
506
 
510
507
  # Turn single image and caption into one element list.
511
508
  images: Sequence[AtomicImage]
512
- if isinstance(image, list):
513
- images = image
509
+ if isinstance(image, (list, set, tuple)):
510
+ images = list(image)
514
511
  elif isinstance(image, np.ndarray) and len(cast(NumpyShape, image.shape)) == 4:
515
512
  images = _4d_to_list_3d(image)
516
513
  else:
517
- images = [image]
514
+ images = [image] # type: ignore
518
515
 
519
516
  if isinstance(caption, list):
520
517
  captions: Sequence[str | None] = caption
518
+ elif isinstance(caption, str):
519
+ captions = [caption]
520
+ elif isinstance(caption, np.ndarray) and len(cast(NumpyShape, caption.shape)) == 1:
521
+ captions = caption.tolist()
522
+ elif caption is None:
523
+ captions = [None] * len(images)
521
524
  else:
522
- if isinstance(caption, str):
523
- captions = [caption]
524
- # You can pass in a 1-D Numpy array as captions.
525
- elif (
526
- isinstance(caption, np.ndarray)
527
- and len(cast(NumpyShape, caption.shape)) == 1
528
- ):
529
- captions = caption.tolist()
530
- # If there are no captions then make the captions list the same size
531
- # as the images list.
532
- elif caption is None:
533
- captions = [None] * len(images)
534
- else:
535
- captions = [str(caption)]
525
+ captions = [str(caption)]
536
526
 
537
527
  assert isinstance(
538
528
  captions, list
@@ -643,7 +643,7 @@ def _parse_y_columns(
643
643
 
644
644
  else:
645
645
  y_column_list = [
646
- str(col) for col in dataframe_util.convert_anything_to_sequence(y_from_user)
646
+ str(col) for col in dataframe_util.convert_anything_to_list(y_from_user)
647
647
  ]
648
648
 
649
649
  for col in y_column_list:
@@ -19,7 +19,7 @@ from typing import (
19
19
  Sequence,
20
20
  )
21
21
 
22
- from streamlit.dataframe_util import OptionSequence, convert_anything_to_sequence
22
+ from streamlit.dataframe_util import OptionSequence, convert_anything_to_list
23
23
  from streamlit.errors import StreamlitAPIException
24
24
  from streamlit.type_util import (
25
25
  T,
@@ -34,7 +34,7 @@ def check_and_convert_to_indices(
34
34
  if default_values is None:
35
35
  return None
36
36
 
37
- default_values = convert_anything_to_sequence(default_values)
37
+ default_values = convert_anything_to_list(default_values)
38
38
 
39
39
  for value in default_values:
40
40
  if value not in opt:
@@ -47,7 +47,7 @@ def check_and_convert_to_indices(
47
47
 
48
48
 
49
49
  def convert_to_sequence_and_check_comparable(options: OptionSequence[T]) -> Sequence[T]:
50
- indexable_options = convert_anything_to_sequence(options)
50
+ indexable_options = convert_anything_to_list(options)
51
51
  check_python_comparable(indexable_options)
52
52
  return indexable_options
53
53
 
@@ -19,7 +19,10 @@ from typing import TYPE_CHECKING, Any, Final, Sequence
19
19
  from streamlit import config, errors, logger, runtime
20
20
  from streamlit.elements.form_utils import is_in_form
21
21
  from streamlit.errors import StreamlitAPIException, StreamlitAPIWarning
22
- from streamlit.runtime.scriptrunner_utils.script_run_context import get_script_run_ctx
22
+ from streamlit.runtime.scriptrunner_utils.script_run_context import (
23
+ get_script_run_ctx,
24
+ in_cached_function,
25
+ )
23
26
  from streamlit.runtime.state import WidgetCallback, get_session_state
24
27
 
25
28
  if TYPE_CHECKING:
@@ -114,14 +117,12 @@ def check_cache_replay_rules() -> None:
114
117
  If there are other similar checks in the future, we could extend this
115
118
  function to check for those as well. And rename it to check_widget_usage_rules.
116
119
  """
117
- if runtime.exists():
118
- ctx = get_script_run_ctx()
119
- if ctx and ctx.disallow_cached_widget_usage:
120
- from streamlit import exception
121
-
122
- # We use an exception here to show a proper stack trace
123
- # that indicates to the user where the issue is.
124
- exception(CachedWidgetWarning())
120
+ if in_cached_function.get():
121
+ from streamlit import exception
122
+
123
+ # We use an exception here to show a proper stack trace
124
+ # that indicates to the user where the issue is.
125
+ exception(CachedWidgetWarning())
125
126
 
126
127
 
127
128
  _fragment_writes_widget_to_outside_error = (
@@ -18,7 +18,7 @@ from dataclasses import dataclass
18
18
  from textwrap import dedent
19
19
  from typing import TYPE_CHECKING, Any, Callable, Generic, Sequence, cast
20
20
 
21
- from streamlit.dataframe_util import OptionSequence, convert_anything_to_sequence
21
+ from streamlit.dataframe_util import OptionSequence, convert_anything_to_list
22
22
  from streamlit.elements.form_utils import current_form_id
23
23
  from streamlit.elements.lib.policies import (
24
24
  check_widget_policies,
@@ -270,7 +270,7 @@ class RadioMixin:
270
270
  )
271
271
  maybe_raise_label_warnings(label, label_visibility)
272
272
 
273
- opt = convert_anything_to_sequence(options)
273
+ opt = convert_anything_to_list(options)
274
274
  check_python_comparable(opt)
275
275
 
276
276
  id = compute_widget_id(
@@ -20,7 +20,7 @@ from typing import TYPE_CHECKING, Any, Callable, Generic, Sequence, Tuple, cast
20
20
 
21
21
  from typing_extensions import TypeGuard
22
22
 
23
- from streamlit.dataframe_util import OptionSequence, convert_anything_to_sequence
23
+ from streamlit.dataframe_util import OptionSequence, convert_anything_to_list
24
24
  from streamlit.elements.form_utils import current_form_id
25
25
  from streamlit.elements.lib.policies import (
26
26
  check_widget_policies,
@@ -287,7 +287,7 @@ class SelectSliderMixin:
287
287
  )
288
288
  maybe_raise_label_warnings(label, label_visibility)
289
289
 
290
- opt = convert_anything_to_sequence(options)
290
+ opt = convert_anything_to_list(options)
291
291
  check_python_comparable(opt)
292
292
 
293
293
  if len(opt) == 0:
@@ -17,7 +17,7 @@ from dataclasses import dataclass
17
17
  from textwrap import dedent
18
18
  from typing import TYPE_CHECKING, Any, Callable, Generic, Sequence, cast, overload
19
19
 
20
- from streamlit.dataframe_util import OptionSequence, convert_anything_to_sequence
20
+ from streamlit.dataframe_util import OptionSequence, convert_anything_to_list
21
21
  from streamlit.elements.form_utils import current_form_id
22
22
  from streamlit.elements.lib.policies import (
23
23
  check_widget_policies,
@@ -281,7 +281,7 @@ class SelectboxMixin:
281
281
  )
282
282
  maybe_raise_label_warnings(label, label_visibility)
283
283
 
284
- opt = convert_anything_to_sequence(options)
284
+ opt = convert_anything_to_list(options)
285
285
  check_python_comparable(opt)
286
286
 
287
287
  id = compute_widget_id(
@@ -14,7 +14,7 @@
14
14
 
15
15
  from __future__ import annotations
16
16
 
17
- from typing import TYPE_CHECKING, Any
17
+ from typing import TYPE_CHECKING
18
18
 
19
19
  from streamlit.runtime.caching.cache_data_api import (
20
20
  CACHE_DATA_MESSAGE_REPLAY_CTX,
@@ -33,7 +33,6 @@ if TYPE_CHECKING:
33
33
  from google.protobuf.message import Message
34
34
 
35
35
  from streamlit.proto.Block_pb2 import Block
36
- from streamlit.runtime.state.common import WidgetMetadata
37
36
 
38
37
 
39
38
  def save_element_message(
@@ -73,14 +72,6 @@ def save_block_message(
73
72
  )
74
73
 
75
74
 
76
- def save_widget_metadata(metadata: WidgetMetadata[Any]) -> None:
77
- """Save a widget's metadata to a thread-local callstack, so the widget
78
- can be registered again when that widget is replayed.
79
- """
80
- CACHE_DATA_MESSAGE_REPLAY_CTX.save_widget_metadata(metadata)
81
- CACHE_RESOURCE_MESSAGE_REPLAY_CTX.save_widget_metadata(metadata)
82
-
83
-
84
75
  def save_media_data(image_data: bytes | str, mimetype: str, image_id: str) -> None:
85
76
  CACHE_DATA_MESSAGE_REPLAY_CTX.save_image_data(image_data, mimetype, image_id)
86
77
  CACHE_RESOURCE_MESSAGE_REPLAY_CTX.save_image_data(image_data, mimetype, image_id)
@@ -99,7 +90,6 @@ __all__ = [
99
90
  "CACHE_DOCS_URL",
100
91
  "save_element_message",
101
92
  "save_block_message",
102
- "save_widget_metadata",
103
93
  "save_media_data",
104
94
  "get_data_cache_stats_provider",
105
95
  "get_resource_cache_stats_provider",
@@ -47,9 +47,7 @@ from streamlit.runtime.caching.cache_utils import (
47
47
  from streamlit.runtime.caching.cached_message_replay import (
48
48
  CachedMessageReplayContext,
49
49
  CachedResult,
50
- ElementMsgData,
51
50
  MsgData,
52
- MultiCacheResults,
53
51
  show_widget_replay_deprecation,
54
52
  )
55
53
  from streamlit.runtime.caching.storage import (
@@ -66,7 +64,6 @@ from streamlit.runtime.caching.storage.dummy_cache_storage import (
66
64
  MemoryCacheStorageManager,
67
65
  )
68
66
  from streamlit.runtime.metrics_util import gather_metrics
69
- from streamlit.runtime.scriptrunner_utils.script_run_context import get_script_run_ctx
70
67
  from streamlit.runtime.stats import CacheStat, CacheStatsProvider, group_stats
71
68
  from streamlit.time_util import time_to_seconds
72
69
 
@@ -93,13 +90,11 @@ class CachedDataFuncInfo(CachedFuncInfo):
93
90
  persist: CachePersistType,
94
91
  max_entries: int | None,
95
92
  ttl: float | timedelta | str | None,
96
- allow_widgets: bool,
97
93
  hash_funcs: HashFuncsDict | None = None,
98
94
  ):
99
95
  super().__init__(
100
96
  func,
101
97
  show_spinner=show_spinner,
102
- allow_widgets=allow_widgets,
103
98
  hash_funcs=hash_funcs,
104
99
  )
105
100
  self.persist = persist
@@ -128,7 +123,6 @@ class CachedDataFuncInfo(CachedFuncInfo):
128
123
  max_entries=self.max_entries,
129
124
  ttl=self.ttl,
130
125
  display_name=self.display_name,
131
- allow_widgets=self.allow_widgets,
132
126
  )
133
127
 
134
128
  def validate_params(self) -> None:
@@ -160,7 +154,6 @@ class DataCaches(CacheStatsProvider):
160
154
  max_entries: int | None,
161
155
  ttl: int | float | timedelta | str | None,
162
156
  display_name: str,
163
- allow_widgets: bool,
164
157
  ) -> DataCache:
165
158
  """Return the mem cache for the given key.
166
159
 
@@ -220,7 +213,6 @@ class DataCaches(CacheStatsProvider):
220
213
  max_entries=max_entries,
221
214
  ttl_seconds=ttl_seconds,
222
215
  display_name=display_name,
223
- allow_widgets=allow_widgets,
224
216
  )
225
217
  self._function_caches[key] = cache
226
218
  return cache
@@ -443,9 +435,11 @@ class CacheDataAPI:
443
435
 
444
436
  experimental_allow_widgets : bool
445
437
  Allow widgets to be used in the cached function. Defaults to False.
446
- Support for widgets in cached functions is currently experimental.
447
- Setting this parameter to True may lead to excessive memory use since the
448
- widget value is treated as an additional input parameter to the cache.
438
+
439
+ .. deprecated::
440
+ The cached widget replay functionality was removed in 1.38. Please
441
+ remove the ``experimental_allow_widgets`` parameter from your
442
+ caching decorators.
449
443
 
450
444
  hash_funcs : dict or None
451
445
  Mapping of types or fully qualified names to hash functions.
@@ -455,10 +449,6 @@ class CacheDataAPI:
455
449
  the provided function to generate a hash for it. See below for an example
456
450
  of how this can be used.
457
451
 
458
- .. deprecated::
459
- ``experimental_allow_widgets`` is deprecated and will be removed in
460
- a later version.
461
-
462
452
  Example
463
453
  -------
464
454
  >>> import streamlit as st
@@ -574,7 +564,6 @@ class CacheDataAPI:
574
564
  show_spinner=show_spinner,
575
565
  max_entries=max_entries,
576
566
  ttl=ttl,
577
- allow_widgets=experimental_allow_widgets,
578
567
  hash_funcs=hash_funcs,
579
568
  )
580
569
  )
@@ -589,7 +578,6 @@ class CacheDataAPI:
589
578
  show_spinner=show_spinner,
590
579
  max_entries=max_entries,
591
580
  ttl=ttl,
592
- allow_widgets=experimental_allow_widgets,
593
581
  hash_funcs=hash_funcs,
594
582
  )
595
583
  )
@@ -611,7 +599,6 @@ class DataCache(Cache):
611
599
  max_entries: int | None,
612
600
  ttl_seconds: float | None,
613
601
  display_name: str,
614
- allow_widgets: bool = False,
615
602
  ):
616
603
  super().__init__()
617
604
  self.key = key
@@ -620,7 +607,6 @@ class DataCache(Cache):
620
607
  self.ttl_seconds = ttl_seconds
621
608
  self.max_entries = max_entries
622
609
  self.persist = persist
623
- self.allow_widgets = allow_widgets
624
610
 
625
611
  def get_stats(self) -> list[CacheStat]:
626
612
  if isinstance(self.storage, CacheStatsProvider):
@@ -641,21 +627,12 @@ class DataCache(Cache):
641
627
 
642
628
  try:
643
629
  entry = pickle.loads(pickled_entry)
644
- if not isinstance(entry, MultiCacheResults):
630
+ if not isinstance(entry, CachedResult):
645
631
  # Loaded an old cache file format, remove it and let the caller
646
632
  # rerun the function.
647
633
  self.storage.delete(key)
648
634
  raise CacheKeyNotFoundError()
649
-
650
- ctx = get_script_run_ctx()
651
- if not ctx:
652
- raise CacheKeyNotFoundError()
653
-
654
- widget_key = entry.get_current_widget_key(ctx, CacheType.DATA)
655
- if widget_key in entry.results:
656
- return entry.results[widget_key]
657
- else:
658
- raise CacheKeyNotFoundError()
635
+ return entry
659
636
  except pickle.UnpicklingError as exc:
660
637
  raise CacheError(f"Failed to unpickle {key}") from exc
661
638
 
@@ -664,43 +641,13 @@ class DataCache(Cache):
664
641
  """Write a value and associated messages to the cache.
665
642
  The value must be pickleable.
666
643
  """
667
- ctx = get_script_run_ctx()
668
- if ctx is None:
669
- return
670
-
671
- main_id = st._main.id
672
- sidebar_id = st.sidebar.id
673
-
674
- if self.allow_widgets:
675
- widgets = {
676
- msg.widget_metadata.widget_id
677
- for msg in messages
678
- if isinstance(msg, ElementMsgData) and msg.widget_metadata is not None
679
- }
680
- else:
681
- widgets = set()
682
-
683
- multi_cache_results: MultiCacheResults | None = None
684
-
685
- # Try to find in cache storage, then falling back to a new result instance
686
- try:
687
- multi_cache_results = self._read_multi_results_from_storage(key)
688
- except (CacheKeyNotFoundError, pickle.UnpicklingError):
689
- pass
690
-
691
- if multi_cache_results is None:
692
- multi_cache_results = MultiCacheResults(widget_ids=widgets, results={})
693
- multi_cache_results.widget_ids.update(widgets)
694
- widget_key = multi_cache_results.get_current_widget_key(ctx, CacheType.DATA)
695
-
696
- result = CachedResult(value, messages, main_id, sidebar_id)
697
- multi_cache_results.results[widget_key] = result
698
-
699
644
  try:
700
- pickled_entry = pickle.dumps(multi_cache_results)
645
+ main_id = st._main.id
646
+ sidebar_id = st.sidebar.id
647
+ entry = CachedResult(value, messages, main_id, sidebar_id)
648
+ pickled_entry = pickle.dumps(entry)
701
649
  except (pickle.PicklingError, TypeError) as exc:
702
650
  raise CacheError(f"Failed to pickle {key}") from exc
703
-
704
651
  self.storage.set(key, pickled_entry)
705
652
 
706
653
  def _clear(self, key: str | None = None) -> None:
@@ -708,22 +655,3 @@ class DataCache(Cache):
708
655
  self.storage.clear()
709
656
  else:
710
657
  self.storage.delete(key)
711
-
712
- def _read_multi_results_from_storage(self, key: str) -> MultiCacheResults:
713
- """Look up the results from storage and ensure it has the right type.
714
-
715
- Raises a `CacheKeyNotFoundError` if the key has no entry, or if the
716
- entry is malformed.
717
- """
718
- try:
719
- pickled = self.storage.get(key)
720
- except CacheStorageKeyNotFoundError as e:
721
- raise CacheKeyNotFoundError(str(e)) from e
722
-
723
- maybe_results = pickle.loads(pickled)
724
-
725
- if isinstance(maybe_results, MultiCacheResults):
726
- return maybe_results
727
- else:
728
- self.storage.delete(key)
729
- raise CacheKeyNotFoundError()