streamlit-nightly 1.21.1.dev20230424__py2.py3-none-any.whl → 1.21.1.dev20230425__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. streamlit/__init__.py +0 -7
  2. streamlit/elements/data_editor.py +104 -96
  3. streamlit/elements/file_uploader.py +17 -0
  4. streamlit/elements/layouts.py +0 -5
  5. streamlit/elements/lib/column_config_utils.py +371 -0
  6. streamlit/static/asset-manifest.json +20 -20
  7. streamlit/static/index.html +1 -1
  8. streamlit/static/static/js/{14.a19a6cd8.chunk.js → 14.9399e424.chunk.js} +1 -1
  9. streamlit/static/static/js/{227.087adf66.chunk.js → 227.9ccac1d5.chunk.js} +1 -1
  10. streamlit/static/static/js/{242.0daf8b47.chunk.js → 242.1b3289e0.chunk.js} +1 -1
  11. streamlit/static/static/js/{279.fdac58fc.chunk.js → 279.35b01780.chunk.js} +1 -1
  12. streamlit/static/static/js/{289.481fd42d.chunk.js → 289.e6157e40.chunk.js} +1 -1
  13. streamlit/static/static/js/{467.242e14ff.chunk.js → 467.50ac84df.chunk.js} +1 -1
  14. streamlit/static/static/js/{491.d0b710e9.chunk.js → 491.5a33a8ce.chunk.js} +1 -1
  15. streamlit/static/static/js/503.15864587.chunk.js +1 -0
  16. streamlit/static/static/js/{511.9f04ae9e.chunk.js → 511.e6ca580f.chunk.js} +1 -1
  17. streamlit/static/static/js/{578.ceaadcd5.chunk.js → 578.a65fcea0.chunk.js} +1 -1
  18. streamlit/static/static/js/{619.365611c8.chunk.js → 619.0325af0e.chunk.js} +1 -1
  19. streamlit/static/static/js/{628.7f41e2de.chunk.js → 628.9c70196b.chunk.js} +1 -1
  20. streamlit/static/static/js/{681.a2ba76c7.chunk.js → 681.9e30a8cd.chunk.js} +1 -1
  21. streamlit/static/static/js/{745.e2bcf16d.chunk.js → 745.e75ba963.chunk.js} +1 -1
  22. streamlit/static/static/js/{807.6789990f.chunk.js → 807.122f8b05.chunk.js} +1 -1
  23. streamlit/static/static/js/{828.096c1ad3.chunk.js → 828.0fde3da8.chunk.js} +1 -1
  24. streamlit/static/static/js/{871.ba625aee.chunk.js → 871.90a7dbae.chunk.js} +1 -1
  25. streamlit/static/static/js/{main.e7d9d363.js → main.ff35bd72.js} +2 -2
  26. {streamlit_nightly-1.21.1.dev20230424.dist-info → streamlit_nightly-1.21.1.dev20230425.dist-info}/METADATA +1 -1
  27. {streamlit_nightly-1.21.1.dev20230424.dist-info → streamlit_nightly-1.21.1.dev20230425.dist-info}/RECORD +33 -33
  28. streamlit/elements/show.py +0 -105
  29. streamlit/static/static/js/728.79e35e5d.chunk.js +0 -1
  30. /streamlit/static/static/css/{728.23fa976d.chunk.css → 503.23fa976d.chunk.css} +0 -0
  31. /streamlit/static/static/js/{main.e7d9d363.js.LICENSE.txt → main.ff35bd72.js.LICENSE.txt} +0 -0
  32. {streamlit_nightly-1.21.1.dev20230424.data → streamlit_nightly-1.21.1.dev20230425.data}/scripts/streamlit.cmd +0 -0
  33. {streamlit_nightly-1.21.1.dev20230424.dist-info → streamlit_nightly-1.21.1.dev20230425.dist-info}/WHEEL +0 -0
  34. {streamlit_nightly-1.21.1.dev20230424.dist-info → streamlit_nightly-1.21.1.dev20230425.dist-info}/entry_points.txt +0 -0
  35. {streamlit_nightly-1.21.1.dev20230424.dist-info → streamlit_nightly-1.21.1.dev20230425.dist-info}/top_level.txt +0 -0
streamlit/__init__.py CHANGED
@@ -71,7 +71,6 @@ from streamlit.commands.query_params import (
71
71
  get_query_params as _get_query_params,
72
72
  set_query_params as _set_query_params,
73
73
  )
74
- from streamlit.elements.show import show as _show
75
74
 
76
75
  # Modules that the user should have access to. These are imported with "as"
77
76
  # syntax pass mypy checking with implicit_reexport disabled.
@@ -201,18 +200,12 @@ session_state = _SessionStateProxy()
201
200
  cache_data = _cache_data
202
201
  cache_resource = _cache_resource
203
202
 
204
- # Beta APIs
205
- beta_container = _gather_metrics("beta_container", _main.beta_container)
206
- beta_expander = _gather_metrics("beta_expander", _main.beta_expander)
207
- beta_columns = _gather_metrics("beta_columns", _main.beta_columns)
208
-
209
203
  # Experimental APIs
210
204
  experimental_user = _UserInfoProxy()
211
205
  experimental_singleton = _experimental_singleton
212
206
  experimental_memo = _experimental_memo
213
207
  experimental_get_query_params = _get_query_params
214
208
  experimental_set_query_params = _set_query_params
215
- experimental_show = _show
216
209
  experimental_rerun = _rerun
217
210
  experimental_data_editor = _main.experimental_data_editor
218
211
  experimental_connection = _connection_factory
@@ -14,7 +14,6 @@
14
14
 
15
15
  from __future__ import annotations
16
16
 
17
- import contextlib
18
17
  import json
19
18
  from dataclasses import dataclass
20
19
  from typing import (
@@ -34,11 +33,19 @@ from typing import (
34
33
 
35
34
  import pandas as pd
36
35
  import pyarrow as pa
37
- from pandas.api.types import is_datetime64_any_dtype, is_float_dtype, is_integer_dtype
38
- from typing_extensions import Final, Literal, TypeAlias, TypedDict
36
+ from typing_extensions import Literal, TypeAlias, TypedDict
39
37
 
38
+ from streamlit import logger as _logger
40
39
  from streamlit import type_util
41
40
  from streamlit.elements.form import current_form_id
41
+ from streamlit.elements.lib.column_config_utils import (
42
+ INDEX_IDENTIFIER,
43
+ ColumnConfigMapping,
44
+ ColumnDataKind,
45
+ DataframeSchema,
46
+ determine_dataframe_schema,
47
+ marshall_column_config,
48
+ )
42
49
  from streamlit.elements.lib.pandas_styler_utils import marshall_styler
43
50
  from streamlit.errors import StreamlitAPIException
44
51
  from streamlit.proto.Arrow_pb2 import Arrow as ArrowProto
@@ -58,7 +65,7 @@ if TYPE_CHECKING:
58
65
 
59
66
  from streamlit.delta_generator import DeltaGenerator
60
67
 
61
- _INDEX_IDENTIFIER: Final = "index"
68
+ _LOGGER = _logger.get_logger("root")
62
69
 
63
70
  # All formats that support direct editing, meaning that these
64
71
  # formats will be returned with the same type when used with data_editor.
@@ -91,25 +98,6 @@ DataTypes: TypeAlias = Union[
91
98
  ]
92
99
 
93
100
 
94
- class ColumnConfig(TypedDict, total=False):
95
- title: Optional[str]
96
- width: Optional[Literal["small", "medium", "large"]]
97
- hidden: Optional[bool]
98
- disabled: Optional[bool]
99
- required: Optional[bool]
100
- alignment: Optional[Literal["left", "center", "right"]]
101
- type: Optional[
102
- Literal[
103
- "text",
104
- "number",
105
- "boolean",
106
- "list",
107
- "categorical",
108
- ]
109
- ]
110
- type_options: Optional[Dict[str, Any]]
111
-
112
-
113
101
  class EditingState(TypedDict, total=False):
114
102
  """
115
103
  A dictionary representing the current state of the data editor.
@@ -133,44 +121,6 @@ class EditingState(TypedDict, total=False):
133
121
  deleted_rows: List[int]
134
122
 
135
123
 
136
- # A mapping of column names/IDs to column configs.
137
- ColumnConfigMapping: TypeAlias = Dict[Union[int, str], ColumnConfig]
138
-
139
-
140
- def _marshall_column_config(
141
- proto: ArrowProto, columns: Optional[Dict[Union[int, str], ColumnConfig]] = None
142
- ) -> None:
143
- """Marshall the column config into the proto.
144
-
145
- Parameters
146
- ----------
147
- proto : ArrowProto
148
- The proto to marshall into.
149
-
150
- columns : Optional[ColumnConfigMapping]
151
- The column config to marshall.
152
- """
153
- if columns is None:
154
- columns = {}
155
-
156
- # Ignore all None values and prefix columns specified by index
157
- def remove_none_values(input_dict: Dict[Any, Any]) -> Dict[Any, Any]:
158
- new_dict = {}
159
- for key, val in input_dict.items():
160
- if isinstance(val, dict):
161
- val = remove_none_values(val)
162
- if val is not None:
163
- new_dict[key] = val
164
- return new_dict
165
-
166
- proto.columns = json.dumps(
167
- {
168
- (f"col:{str(k)}" if isinstance(k, int) else k): v
169
- for (k, v) in remove_none_values(columns).items()
170
- }
171
- )
172
-
173
-
174
124
  @dataclass
175
125
  class DataEditorSerde:
176
126
  """DataEditorSerde is used to serialize and deserialize the data editor state."""
@@ -190,7 +140,10 @@ class DataEditorSerde:
190
140
  return json.dumps(editing_state, default=str)
191
141
 
192
142
 
193
- def _parse_value(value: Union[str, int, float, bool, None], dtype) -> Any:
143
+ def _parse_value(
144
+ value: str | int | float | bool | None,
145
+ column_data_kind: ColumnDataKind,
146
+ ) -> Any:
194
147
  """Convert a value to the correct type.
195
148
 
196
149
  Parameters
@@ -198,8 +151,9 @@ def _parse_value(value: Union[str, int, float, bool, None], dtype) -> Any:
198
151
  value : str | int | float | bool | None
199
152
  The value to convert.
200
153
 
201
- dtype
202
- The type of the value.
154
+ column_data_kind : ColumnDataKind
155
+ The determined data kind of the column. The column data kind refers to the
156
+ shared data type of the values in the column (e.g. integer, float, string).
203
157
 
204
158
  Returns
205
159
  -------
@@ -208,23 +162,53 @@ def _parse_value(value: Union[str, int, float, bool, None], dtype) -> Any:
208
162
  if value is None:
209
163
  return None
210
164
 
211
- # TODO(lukasmasuch): how to deal with date & time columns?
165
+ try:
166
+ if column_data_kind == ColumnDataKind.STRING:
167
+ return str(value)
212
168
 
213
- # Datetime values try to parse the value to datetime:
214
- # The value is expected to be a ISO 8601 string
215
- if is_datetime64_any_dtype(dtype):
216
- return pd.to_datetime(value, errors="ignore")
217
- elif is_integer_dtype(dtype):
218
- with contextlib.suppress(ValueError):
169
+ if column_data_kind == ColumnDataKind.INTEGER:
219
170
  return int(value)
220
- elif is_float_dtype(dtype):
221
- with contextlib.suppress(ValueError):
171
+
172
+ if column_data_kind == ColumnDataKind.FLOAT:
222
173
  return float(value)
174
+
175
+ if column_data_kind == ColumnDataKind.BOOLEAN:
176
+ return bool(value)
177
+
178
+ if column_data_kind in [
179
+ ColumnDataKind.DATETIME,
180
+ ColumnDataKind.DATE,
181
+ ColumnDataKind.TIME,
182
+ ]:
183
+ datetime_value = pd.to_datetime(value, utc=False)
184
+
185
+ if datetime_value is pd.NaT:
186
+ return None
187
+
188
+ if isinstance(datetime_value, pd.Timestamp):
189
+ datetime_value = datetime_value.to_pydatetime()
190
+
191
+ if column_data_kind == ColumnDataKind.DATETIME:
192
+ return datetime_value
193
+
194
+ if column_data_kind == ColumnDataKind.DATE:
195
+ return datetime_value.date()
196
+
197
+ if column_data_kind == ColumnDataKind.TIME:
198
+ return datetime_value.time()
199
+
200
+ except (ValueError, pd.errors.ParserError) as ex:
201
+ _LOGGER.warning(
202
+ "Failed to parse value %s as %s. Exception: %s", value, column_data_kind, ex
203
+ )
204
+ return None
223
205
  return value
224
206
 
225
207
 
226
208
  def _apply_cell_edits(
227
- df: pd.DataFrame, edited_cells: Mapping[str, str | int | float | bool | None]
209
+ df: pd.DataFrame,
210
+ edited_cells: Mapping[str, str | int | float | bool | None],
211
+ dataframe_schema: DataframeSchema,
228
212
  ) -> None:
229
213
  """Apply cell edits to the provided dataframe (inplace).
230
214
 
@@ -237,6 +221,8 @@ def _apply_cell_edits(
237
221
  A dictionary of cell edits. The keys are the cell ids in the format
238
222
  "row:column" and the values are the new cell values.
239
223
 
224
+ dataframe_schema: DataframeSchema
225
+ The schema of the dataframe.
240
226
  """
241
227
  index_count = df.index.nlevels or 0
242
228
 
@@ -247,17 +233,21 @@ def _apply_cell_edits(
247
233
  # The edited cell is part of the index
248
234
  # To support multi-index in the future: use a tuple of values here
249
235
  # instead of a single value
250
- df.index.values[row_pos] = _parse_value(value, df.index.dtype)
236
+ df.index.values[row_pos] = _parse_value(value, dataframe_schema[col_pos])
251
237
  else:
252
238
  # We need to subtract the number of index levels from col_pos
253
239
  # to get the correct column position for Pandas DataFrames
254
240
  mapped_column = col_pos - index_count
255
241
  df.iat[row_pos, mapped_column] = _parse_value(
256
- value, df.iloc[:, mapped_column].dtype
242
+ value, dataframe_schema[col_pos]
257
243
  )
258
244
 
259
245
 
260
- def _apply_row_additions(df: pd.DataFrame, added_rows: List[Dict[str, Any]]) -> None:
246
+ def _apply_row_additions(
247
+ df: pd.DataFrame,
248
+ added_rows: List[Dict[str, Any]],
249
+ dataframe_schema: DataframeSchema,
250
+ ) -> None:
261
251
  """Apply row additions to the provided dataframe (inplace).
262
252
 
263
253
  Parameters
@@ -268,6 +258,9 @@ def _apply_row_additions(df: pd.DataFrame, added_rows: List[Dict[str, Any]]) ->
268
258
  added_rows : List[Dict[str, Any]]
269
259
  A list of row additions. Each row addition is a dictionary with the
270
260
  column position as key and the new cell value as value.
261
+
262
+ dataframe_schema: DataframeSchema
263
+ The schema of the dataframe.
271
264
  """
272
265
  if not added_rows:
273
266
  return
@@ -279,7 +272,7 @@ def _apply_row_additions(df: pd.DataFrame, added_rows: List[Dict[str, Any]]) ->
279
272
  # combination with loc. As a workaround, we manually track the values here:
280
273
  range_index_stop = None
281
274
  range_index_step = None
282
- if type(df.index) == pd.RangeIndex:
275
+ if isinstance(df.index, pd.RangeIndex):
283
276
  range_index_stop = df.index.stop
284
277
  range_index_step = df.index.step
285
278
 
@@ -292,14 +285,12 @@ def _apply_row_additions(df: pd.DataFrame, added_rows: List[Dict[str, Any]]) ->
292
285
  if col_pos < index_count:
293
286
  # To support multi-index in the future: use a tuple of values here
294
287
  # instead of a single value
295
- index_value = _parse_value(value, df.index.dtype)
288
+ index_value = _parse_value(value, dataframe_schema[col_pos])
296
289
  else:
297
290
  # We need to subtract the number of index levels from the col_pos
298
291
  # to get the correct column position for Pandas DataFrames
299
292
  mapped_column = col_pos - index_count
300
- new_row[mapped_column] = _parse_value(
301
- value, df.iloc[:, mapped_column].dtype
302
- )
293
+ new_row[mapped_column] = _parse_value(value, dataframe_schema[col_pos])
303
294
  # Append the new row to the dataframe
304
295
  if range_index_stop is not None:
305
296
  df.loc[range_index_stop, :] = new_row
@@ -329,7 +320,11 @@ def _apply_row_deletions(df: pd.DataFrame, deleted_rows: List[int]) -> None:
329
320
  df.drop(df.index[deleted_rows], inplace=True)
330
321
 
331
322
 
332
- def _apply_dataframe_edits(df: pd.DataFrame, data_editor_state: EditingState) -> None:
323
+ def _apply_dataframe_edits(
324
+ df: pd.DataFrame,
325
+ data_editor_state: EditingState,
326
+ dataframe_schema: DataframeSchema,
327
+ ) -> None:
333
328
  """Apply edits to the provided dataframe (inplace).
334
329
 
335
330
  This includes cell edits, row additions and row deletions.
@@ -341,12 +336,15 @@ def _apply_dataframe_edits(df: pd.DataFrame, data_editor_state: EditingState) ->
341
336
 
342
337
  data_editor_state : EditingState
343
338
  The editing state of the data editor component.
339
+
340
+ dataframe_schema: DataframeSchema
341
+ The schema of the dataframe.
344
342
  """
345
343
  if data_editor_state.get("edited_cells"):
346
- _apply_cell_edits(df, data_editor_state["edited_cells"])
344
+ _apply_cell_edits(df, data_editor_state["edited_cells"], dataframe_schema)
347
345
 
348
346
  if data_editor_state.get("added_rows"):
349
- _apply_row_additions(df, data_editor_state["added_rows"])
347
+ _apply_row_additions(df, data_editor_state["added_rows"], dataframe_schema)
350
348
 
351
349
  if data_editor_state.get("deleted_rows"):
352
350
  _apply_row_deletions(df, data_editor_state["deleted_rows"])
@@ -393,9 +391,9 @@ def _apply_data_specific_configs(
393
391
  DataFormat.LIST_OF_ROWS,
394
392
  DataFormat.COLUMN_VALUE_MAPPING,
395
393
  ]:
396
- if _INDEX_IDENTIFIER not in columns_config:
397
- columns_config[_INDEX_IDENTIFIER] = {}
398
- columns_config[_INDEX_IDENTIFIER]["hidden"] = True
394
+ if INDEX_IDENTIFIER not in columns_config:
395
+ columns_config[INDEX_IDENTIFIER] = {}
396
+ columns_config[INDEX_IDENTIFIER]["hidden"] = True
399
397
 
400
398
  # Rename the first column to "value" for some of the data formats
401
399
  if data_format in [
@@ -593,13 +591,24 @@ class DataEditorMixin:
593
591
 
594
592
  # Temporary workaround: We hide range indices if num_rows is dynamic.
595
593
  # since the current way of handling this index during editing is a bit confusing.
596
- if type(data_df.index) is pd.RangeIndex and num_rows == "dynamic":
597
- if _INDEX_IDENTIFIER not in columns_config:
598
- columns_config[_INDEX_IDENTIFIER] = {}
599
- columns_config[_INDEX_IDENTIFIER]["hidden"] = True
594
+ if isinstance(data_df.index, pd.RangeIndex) and num_rows == "dynamic":
595
+ if INDEX_IDENTIFIER not in columns_config:
596
+ columns_config[INDEX_IDENTIFIER] = {}
597
+ columns_config[INDEX_IDENTIFIER]["hidden"] = True
598
+
599
+ # Convert the dataframe to an arrow table which is used as the main
600
+ # serialization format for sending the data to the frontend.
601
+ # We also utilize the arrow schema to determine the data kinds of every column.
602
+ arrow_table = pa.Table.from_pandas(data_df)
603
+
604
+ # Determine the dataframe schema which is required for parsing edited values
605
+ # and for checking type compatibilities.
606
+ dataframe_schema = determine_dataframe_schema(data_df, arrow_table.schema)
600
607
 
601
608
  proto = ArrowProto()
609
+
602
610
  proto.use_container_width = use_container_width
611
+
603
612
  if width:
604
613
  proto.width = width
605
614
  if height:
@@ -619,10 +628,9 @@ class DataEditorMixin:
619
628
  default_uuid = str(hash(delta_path))
620
629
  marshall_styler(proto, data, default_uuid)
621
630
 
622
- table = pa.Table.from_pandas(data_df)
623
- proto.data = type_util.pyarrow_table_to_bytes(table)
631
+ proto.data = type_util.pyarrow_table_to_bytes(arrow_table)
624
632
 
625
- _marshall_column_config(proto, columns_config)
633
+ marshall_column_config(proto, columns_config)
626
634
 
627
635
  serde = DataEditorSerde()
628
636
 
@@ -638,7 +646,7 @@ class DataEditorMixin:
638
646
  ctx=get_script_run_ctx(),
639
647
  )
640
648
 
641
- _apply_dataframe_edits(data_df, widget_state.value)
649
+ _apply_dataframe_edits(data_df, widget_state.value, dataframe_schema)
642
650
  self.dg._enqueue("arrow_data_frame", proto)
643
651
  return type_util.convert_df_to_data_format(data_df, data_format)
644
652
 
@@ -43,6 +43,15 @@ from streamlit.type_util import Key, LabelVisibility, maybe_raise_label_warnings
43
43
  SomeUploadedFiles = Optional[Union[UploadedFile, List[UploadedFile]]]
44
44
 
45
45
 
46
+ TYPE_PAIRS = [
47
+ (".jpg", ".jpeg"),
48
+ (".mpg", ".mpeg"),
49
+ (".mp4", ".mpeg4"),
50
+ (".tif", ".tiff"),
51
+ (".htm", ".html"),
52
+ ]
53
+
54
+
46
55
  def _get_file_recs(
47
56
  widget_id: str, widget_value: Optional[FileUploaderStateProto]
48
57
  ) -> List[UploadedFileRec]:
@@ -390,6 +399,14 @@ class FileUploaderMixin:
390
399
  for file_type in type
391
400
  ]
392
401
 
402
+ type = [t.lower() for t in type]
403
+
404
+ for x, y in TYPE_PAIRS:
405
+ if x in type and y not in type:
406
+ type.append(y)
407
+ if y in type and x not in type:
408
+ type.append(x)
409
+
393
410
  file_uploader_proto = FileUploaderProto()
394
411
  file_uploader_proto.label = label
395
412
  file_uploader_proto.type[:] = type if type is not None else []
@@ -416,8 +416,3 @@ class LayoutsMixin:
416
416
  def dg(self) -> "DeltaGenerator":
417
417
  """Get our DeltaGenerator."""
418
418
  return cast("DeltaGenerator", self)
419
-
420
- # Deprecated beta_ functions
421
- beta_container = deprecate_func_name(container, "beta_container", "2021-11-02")
422
- beta_expander = deprecate_func_name(expander, "beta_expander", "2021-11-02")
423
- beta_columns = deprecate_func_name(columns, "beta_columns", "2021-11-02")