supervisely 6.73.459__py3-none-any.whl → 6.73.468__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of supervisely might be problematic. Click here for more details.

@@ -1021,13 +1021,66 @@ class DatasetApi(UpdateableModule, RemoveableModuleApi):
1021
1021
 
1022
1022
  return dataset_tree
1023
1023
 
1024
- def tree(self, project_id: int) -> Generator[Tuple[List[str], DatasetInfo], None, None]:
1024
+ def _yield_tree(
1025
+ self, tree: Dict[DatasetInfo, Dict], path: List[str]
1026
+ ) -> Generator[Tuple[List[str], DatasetInfo], None, None]:
1027
+ """
1028
+ Helper method for recursive tree traversal.
1029
+ Yields tuples of (path, dataset) for all datasets in the tree. For each node (dataset) at the current level,
1030
+ yields its (path, dataset) before recursively traversing and yielding from its children.
1031
+
1032
+ :param tree: Tree structure to yield from.
1033
+ :type tree: Dict[DatasetInfo, Dict]
1034
+ :param path: Current path (used for recursion).
1035
+ :type path: List[str]
1036
+ :return: Generator of tuples of (path, dataset).
1037
+ :rtype: Generator[Tuple[List[str], DatasetInfo], None, None]
1038
+ """
1039
+ for dataset, children in tree.items():
1040
+ yield path, dataset
1041
+ new_path = path + [dataset.name]
1042
+ if children:
1043
+ yield from self._yield_tree(children, new_path)
1044
+
1045
+ def _find_dataset_in_tree(
1046
+ self, tree: Dict[DatasetInfo, Dict], target_id: int, path: List[str] = None
1047
+ ) -> Tuple[Optional[DatasetInfo], Optional[Dict], List[str]]:
1048
+ """Find a specific dataset in the tree and return its subtree and path.
1049
+
1050
+ :param tree: Tree structure to search in.
1051
+ :type tree: Dict[DatasetInfo, Dict]
1052
+ :param target_id: ID of the dataset to find.
1053
+ :type target_id: int
1054
+ :param path: Current path (used for recursion).
1055
+ :type path: List[str], optional
1056
+ :return: Tuple of (found_dataset, its_subtree, path_to_dataset).
1057
+ :rtype: Tuple[Optional[DatasetInfo], Optional[Dict], List[str]]
1058
+ """
1059
+ if path is None:
1060
+ path = []
1061
+
1062
+ for dataset, children in tree.items():
1063
+ if dataset.id == target_id:
1064
+ return dataset, children, path
1065
+ # Search in children
1066
+ if children:
1067
+ found_dataset, found_children, found_path = self._find_dataset_in_tree(
1068
+ children, target_id, path + [dataset.name]
1069
+ )
1070
+ if found_dataset is not None:
1071
+ return found_dataset, found_children, found_path
1072
+ return None, None, []
1073
+
1074
+ def tree(self, project_id: int, dataset_id: Optional[int] = None) -> Generator[Tuple[List[str], DatasetInfo], None, None]:
1025
1075
  """Yields tuples of (path, dataset) for all datasets in the project.
1026
1076
  Path of the dataset is a list of parents, e.g. ["ds1", "ds2", "ds3"].
1027
1077
  For root datasets, the path is an empty list.
1028
1078
 
1029
1079
  :param project_id: Project ID in which the Dataset is located.
1030
1080
  :type project_id: int
1081
+ :param dataset_id: Optional Dataset ID to start the tree from. If provided, only yields
1082
+ the subtree starting from this dataset (including the dataset itself and all its children).
1083
+ :type dataset_id: Optional[int]
1031
1084
  :return: Generator of tuples of (path, dataset).
1032
1085
  :rtype: Generator[Tuple[List[str], DatasetInfo], None, None]
1033
1086
  :Usage example:
@@ -1040,11 +1093,17 @@ class DatasetApi(UpdateableModule, RemoveableModuleApi):
1040
1093
 
1041
1094
  project_id = 123
1042
1095
 
1096
+ # Get all datasets in the project
1043
1097
  for parents, dataset in api.dataset.tree(project_id):
1044
1098
  parents: List[str]
1045
1099
  dataset: sly.DatasetInfo
1046
1100
  print(parents, dataset.name)
1047
1101
 
1102
+ # Get only a specific branch starting from dataset_id = 456
1103
+ for parents, dataset in api.dataset.tree(project_id, dataset_id=456):
1104
+ parents: List[str]
1105
+ dataset: sly.DatasetInfo
1106
+ print(parents, dataset.name)
1048
1107
 
1049
1108
  # Output:
1050
1109
  # [] ds1
@@ -1052,17 +1111,20 @@ class DatasetApi(UpdateableModule, RemoveableModuleApi):
1052
1111
  # ["ds1", "ds2"] ds3
1053
1112
  """
1054
1113
 
1055
- def yield_tree(
1056
- tree: Dict[DatasetInfo, Dict], path: List[str]
1057
- ) -> Generator[Tuple[List[str], DatasetInfo], None, None]:
1058
- """Yields tuples of (path, dataset) for all datasets in the tree."""
1059
- for dataset, children in tree.items():
1060
- yield path, dataset
1061
- new_path = path + [dataset.name]
1062
- if children:
1063
- yield from yield_tree(children, new_path)
1064
-
1065
- yield from yield_tree(self.get_tree(project_id), [])
1114
+ full_tree = self.get_tree(project_id)
1115
+
1116
+ if dataset_id is None:
1117
+ # Return the full tree
1118
+ yield from self._yield_tree(full_tree, [])
1119
+ else:
1120
+ # Find the specific dataset and return only its subtree
1121
+ target_dataset, subtree, dataset_path = self._find_dataset_in_tree(full_tree, dataset_id)
1122
+ if target_dataset is not None:
1123
+ # Yield the target dataset first, then its children
1124
+ yield dataset_path, target_dataset
1125
+ if subtree:
1126
+ new_path = dataset_path + [target_dataset.name]
1127
+ yield from self._yield_tree(subtree, new_path)
1066
1128
 
1067
1129
  def get_nested(self, project_id: int, dataset_id: int) -> List[DatasetInfo]:
1068
1130
  """Returns a list of all nested datasets in the specified dataset.
@@ -42,7 +42,7 @@ class CustomStaticFiles(StaticFiles):
42
42
  def _get_range_header(range_header: str, file_size: int) -> typing.Tuple[int, int]:
43
43
  def _invalid_range():
44
44
  return HTTPException(
45
- status.HTTP_416_REQUESTED_RANGE_NOT_SATISFIABLE,
45
+ status.HTTP_416_REQUESTED_RANGE_NOT_SATISFIABLE, #TODO: change to status.HTTP_416_RANGE_NOT_SATISFIABLE if update starlette to 0.48.0+
46
46
  detail=f"Invalid request range (Range:{range_header!r})",
47
47
  )
48
48
 
@@ -221,6 +221,11 @@ class FastTable(Widget):
221
221
  self._validate_input_data(data)
222
222
  self._source_data = self._prepare_input_data(data)
223
223
 
224
+ # Initialize filtered and searched data for proper initialization
225
+ self._filtered_data = self._filter(self._filter_value)
226
+ self._searched_data = self._search(self._search_str)
227
+ self._sorted_data = self._sort_table_data(self._searched_data)
228
+
224
229
  # prepare parsed_source_data, sliced_data, parsed_active_data
225
230
  (
226
231
  self._parsed_source_data,
@@ -265,9 +270,7 @@ class FastTable(Widget):
265
270
  self._sliced_data = self._slice_table_data(self._sorted_data, actual_page=self._active_page)
266
271
  self._parsed_active_data = self._unpack_pandas_table_data(self._sliced_data)
267
272
  StateJson().send_changes()
268
- DataJson()[self.widget_id]["data"] = {
269
- i: row for i, row in enumerate(self._parsed_active_data["data"])
270
- }
273
+ DataJson()[self.widget_id]["data"] = list(self._parsed_active_data["data"])
271
274
  DataJson()[self.widget_id]["total"] = self._rows_total
272
275
  DataJson().send_changes()
273
276
  StateJson()["reactToChanges"] = True
@@ -297,7 +300,7 @@ class FastTable(Widget):
297
300
  :rtype: Dict[str, Any]
298
301
  """
299
302
  return {
300
- "data": {i: row for i, row in enumerate(self._parsed_active_data["data"])},
303
+ "data": list(self._parsed_active_data["data"]),
301
304
  "columns": self._parsed_source_data["columns"],
302
305
  "projectMeta": self._project_meta,
303
306
  "columnsOptions": self._columns_options,
@@ -492,9 +495,7 @@ class FastTable(Widget):
492
495
  self._sort_column_idx = None
493
496
  self._sort_order = sort.get("order", None)
494
497
  self._page_size = init_options.pop("pageSize", 10)
495
- DataJson()[self.widget_id]["data"] = {
496
- i: row for i, row in enumerate(self._parsed_active_data["data"])
497
- }
498
+ DataJson()[self.widget_id]["data"] = list(self._parsed_active_data["data"])
498
499
  DataJson()[self.widget_id]["columns"] = self._parsed_active_data["columns"]
499
500
  DataJson()[self.widget_id]["columnsOptions"] = self._columns_options
500
501
  DataJson()[self.widget_id]["options"] = init_options
@@ -523,9 +524,7 @@ class FastTable(Widget):
523
524
  self._parsed_active_data = self._unpack_pandas_table_data(self._sliced_data)
524
525
  self._parsed_source_data = self._unpack_pandas_table_data(self._source_data)
525
526
  self._rows_total = len(self._parsed_source_data["data"])
526
- DataJson()[self.widget_id]["data"] = {
527
- i: row for i, row in enumerate(self._parsed_active_data["data"])
528
- }
527
+ DataJson()[self.widget_id]["data"] = list(self._parsed_active_data["data"])
529
528
  DataJson()[self.widget_id]["columns"] = self._parsed_active_data["columns"]
530
529
  DataJson()[self.widget_id]["total"] = len(self._source_data)
531
530
  DataJson().send_changes()
@@ -584,10 +583,17 @@ class FastTable(Widget):
584
583
  :rtype: pd.DataFrame
585
584
  """
586
585
  if active_page is True:
587
- temp_parsed_data = [d["items"] for d in self._parsed_active_data["data"]]
586
+ # Return sliced data directly from source to preserve None/NaN values
587
+ packed_data = self._sliced_data.copy()
588
+ # Reset column names to first level only
589
+ if isinstance(packed_data.columns, pd.MultiIndex):
590
+ packed_data.columns = packed_data.columns.get_level_values("first")
588
591
  else:
589
- temp_parsed_data = [d["items"] for d in self._parsed_source_data["data"]]
590
- packed_data = pd.DataFrame(data=temp_parsed_data, columns=self._columns_first_idx)
592
+ # Return source data directly to preserve None/NaN values
593
+ packed_data = self._source_data.copy()
594
+ # Reset column names to first level only
595
+ if isinstance(packed_data.columns, pd.MultiIndex):
596
+ packed_data.columns = packed_data.columns.get_level_values("first")
591
597
  return packed_data
592
598
 
593
599
  def clear_selection(self) -> None:
@@ -627,8 +633,12 @@ class FastTable(Widget):
627
633
  rows = []
628
634
  for row in selected_rows:
629
635
  row_index = row["idx"]
630
- row_data = row.get("row", row.get("items", None))
631
- if row_index is None or row_data is None:
636
+ if row_index is None:
637
+ continue
638
+ # Get original data from source_data to preserve None/NaN values
639
+ try:
640
+ row_data = self._source_data.loc[row_index].values.tolist()
641
+ except (KeyError, IndexError):
632
642
  continue
633
643
  rows.append(self.ClickedRow(row_data, row_index))
634
644
  return rows
@@ -639,8 +649,12 @@ class FastTable(Widget):
639
649
  if clicked_row is None:
640
650
  return None
641
651
  row_index = clicked_row["idx"]
642
- row = clicked_row["row"]
643
- if row_index is None or row is None:
652
+ if row_index is None:
653
+ return None
654
+ # Get original data from source_data to preserve None/NaN values
655
+ try:
656
+ row = self._source_data.loc[row_index].values.tolist()
657
+ except (KeyError, IndexError):
644
658
  return None
645
659
  return self.ClickedRow(row, row_index)
646
660
 
@@ -650,15 +664,19 @@ class FastTable(Widget):
650
664
  :return: Selected cell
651
665
  :rtype: ClickedCell
652
666
  """
653
- cell_data = StateJson()[self.widget_id]["clickedCell"]
667
+ cell_data = StateJson()[self.widget_id]["selectedCell"]
654
668
  if cell_data is None:
655
669
  return None
656
670
  row_index = cell_data["idx"]
657
- row = cell_data["row"]
658
671
  column_index = cell_data["column"]
672
+ if column_index is None or row_index is None:
673
+ return None
659
674
  column_name = self._columns_first_idx[column_index]
660
- column_value = row[column_index]
661
- if column_index is None or row is None:
675
+ # Get original data from source_data to preserve None/NaN values
676
+ try:
677
+ row = self._source_data.loc[row_index].values.tolist()
678
+ column_value = row[column_index]
679
+ except (KeyError, IndexError):
662
680
  return None
663
681
  return self.ClickedCell(row, column_index, row_index, column_name, column_value)
664
682
 
@@ -721,9 +739,7 @@ class FastTable(Widget):
721
739
  self._parsed_active_data,
722
740
  ) = self._prepare_working_data()
723
741
  self._rows_total = len(self._parsed_source_data["data"])
724
- DataJson()[self.widget_id]["data"] = {
725
- i: row for i, row in enumerate(self._parsed_active_data["data"])
726
- }
742
+ DataJson()[self.widget_id]["data"] = list(self._parsed_active_data["data"])
727
743
  DataJson()[self.widget_id]["total"] = self._rows_total
728
744
  DataJson().send_changes()
729
745
  self._maybe_update_selected_row()
@@ -741,9 +757,7 @@ class FastTable(Widget):
741
757
  self._parsed_active_data,
742
758
  ) = self._prepare_working_data()
743
759
  self._rows_total = len(self._parsed_source_data["data"])
744
- DataJson()[self.widget_id]["data"] = {
745
- i: row for i, row in enumerate(self._parsed_active_data["data"])
746
- }
760
+ DataJson()[self.widget_id]["data"] = list(self._parsed_active_data["data"])
747
761
  DataJson()[self.widget_id]["total"] = self._rows_total
748
762
  DataJson().send_changes()
749
763
  self._maybe_update_selected_row()
@@ -771,9 +785,7 @@ class FastTable(Widget):
771
785
  self._parsed_active_data,
772
786
  ) = self._prepare_working_data()
773
787
  self._rows_total = len(self._parsed_source_data["data"])
774
- DataJson()[self.widget_id]["data"] = {
775
- i: row for i, row in enumerate(self._parsed_active_data["data"])
776
- }
788
+ DataJson()[self.widget_id]["data"] = list(self._parsed_active_data["data"])
777
789
  DataJson()[self.widget_id]["total"] = self._rows_total
778
790
  self._maybe_update_selected_row()
779
791
  return popped_row
@@ -886,7 +898,11 @@ class FastTable(Widget):
886
898
  self._refresh()
887
899
 
888
900
  def _default_search_function(self, data: pd.DataFrame, search_value: str) -> pd.DataFrame:
889
- data = data[data.applymap(lambda x: search_value in str(x)).any(axis=1)]
901
+ # Use map() for pandas >= 2.1.0, fallback to applymap() for older versions
902
+ if hasattr(pd.DataFrame, "map"):
903
+ data = data[data.map(lambda x: search_value in str(x)).any(axis=1)]
904
+ else:
905
+ data = data[data.applymap(lambda x: search_value in str(x)).any(axis=1)]
890
906
  return data
891
907
 
892
908
  def _search(self, search_value: str) -> pd.DataFrame:
@@ -897,8 +913,14 @@ class FastTable(Widget):
897
913
  :return: Filtered data
898
914
  :rtype: pd.DataFrame
899
915
  """
900
- filtered_data = self._filtered_data.copy()
916
+ # Use filtered_data if available, otherwise use source_data directly
917
+ if self._filtered_data is not None:
918
+ filtered_data = self._filtered_data.copy()
919
+ else:
920
+ filtered_data = self._source_data.copy()
921
+
901
922
  if search_value == "":
923
+ self._search_str = search_value
902
924
  return filtered_data
903
925
  if self._search_str != search_value:
904
926
  self._active_page = 1
@@ -924,7 +946,24 @@ class FastTable(Widget):
924
946
  else:
925
947
  ascending = False
926
948
  try:
927
- data = data.sort_values(by=data.columns[column_idx], ascending=ascending)
949
+ column = data.columns[column_idx]
950
+ # Try to convert to numeric for proper sorting
951
+ numeric_column = pd.to_numeric(data[column], errors="coerce")
952
+
953
+ # Check if column contains numeric data (has at least one non-NaN numeric value)
954
+ if numeric_column.notna().sum() > 0:
955
+ # Create temporary column for sorting
956
+ data_copy = data.copy()
957
+ data_copy["_sort_key"] = numeric_column
958
+ # Sort by numeric values with NaN at the end
959
+ data_copy = data_copy.sort_values(
960
+ by="_sort_key", ascending=ascending, na_position="last"
961
+ )
962
+ # Remove temporary column and return original data in sorted order
963
+ data = data.loc[data_copy.index]
964
+ else:
965
+ # Sort as strings with NaN values at the end
966
+ data = data.sort_values(by=column, ascending=ascending, na_position="last")
928
967
  except IndexError as e:
929
968
  e.args = (
930
969
  f"Sorting by column idx = {column_idx} is not possible, your table has only {len(data.columns)} columns with idx from 0 to {len(data.columns) - 1}",
@@ -955,9 +994,7 @@ class FastTable(Widget):
955
994
  self._sorted_data = self._sort_table_data(self._searched_data)
956
995
  self._sliced_data = self._slice_table_data(self._sorted_data, actual_page=self._active_page)
957
996
  self._parsed_active_data = self._unpack_pandas_table_data(self._sliced_data)
958
- DataJson()[self.widget_id]["data"] = {
959
- i: row for i, row in enumerate(self._parsed_active_data["data"])
960
- }
997
+ DataJson()[self.widget_id]["data"] = list(self._parsed_active_data["data"])
961
998
  DataJson()[self.widget_id]["total"] = self._rows_total
962
999
  self._maybe_update_selected_row()
963
1000
  StateJson().send_changes()
@@ -1062,12 +1099,21 @@ class FastTable(Widget):
1062
1099
  def _get_pandas_unpacked_data(self, data: pd.DataFrame) -> dict:
1063
1100
  if not isinstance(data, pd.DataFrame):
1064
1101
  raise TypeError("Cannot parse input data, please use Pandas Dataframe as input data")
1065
- data = data.replace({np.nan: None})
1066
- # data = data.astype(object).replace(np.nan, "-") # TODO: replace None later
1102
+
1103
+ # Create a copy for frontend display to avoid modifying source data
1104
+ display_data = data.copy()
1105
+ # Replace NaN and None with empty string only for display
1106
+ display_data = display_data.replace({np.nan: "", None: ""})
1107
+
1108
+ # Handle MultiIndex columns - extract only the first level
1109
+ if isinstance(display_data.columns, pd.MultiIndex):
1110
+ columns = display_data.columns.get_level_values("first").tolist()
1111
+ else:
1112
+ columns = display_data.columns.to_list()
1067
1113
 
1068
1114
  unpacked_data = {
1069
- "columns": data.columns.to_list(),
1070
- "data": data.values.tolist(),
1115
+ "columns": columns,
1116
+ "data": display_data.values.tolist(),
1071
1117
  }
1072
1118
  return unpacked_data
1073
1119
 
@@ -1238,9 +1284,7 @@ class FastTable(Widget):
1238
1284
 
1239
1285
  self._sliced_data = self._slice_table_data(self._sorted_data, actual_page=self._active_page)
1240
1286
  self._parsed_active_data = self._unpack_pandas_table_data(self._sliced_data)
1241
- DataJson()[self.widget_id]["data"] = {
1242
- i: row for i, row in enumerate(self._parsed_active_data["data"])
1243
- }
1287
+ DataJson()[self.widget_id]["data"] = list(self._parsed_active_data["data"])
1244
1288
  DataJson()[self.widget_id]["total"] = self._rows_total
1245
1289
  DataJson().send_changes()
1246
1290
  StateJson().send_changes()
@@ -1309,6 +1353,7 @@ class FastTable(Widget):
1309
1353
 
1310
1354
  def select_row_by_value(self, column, value: Any):
1311
1355
  """Selects a row by value in a specific column.
1356
+ The first column with the given name is used in case of duplicate column names.
1312
1357
 
1313
1358
  :param column: Column name to filter by
1314
1359
  :type column: str
@@ -1322,7 +1367,12 @@ class FastTable(Widget):
1322
1367
  if column not in self._columns_first_idx:
1323
1368
  raise ValueError(f"Column '{column}' does not exist in the table.")
1324
1369
 
1325
- idx = self._source_data[self._source_data[column] == value].index.tolist()
1370
+ # Find the first column index with this name (in case of duplicates)
1371
+ column_idx = self._columns_first_idx.index(column)
1372
+ column_tuple = self._source_data.columns[column_idx]
1373
+
1374
+ # Use column tuple to access the specific column
1375
+ idx = self._source_data[self._source_data[column_tuple] == value].index.tolist()
1326
1376
  if not idx:
1327
1377
  raise ValueError(f"No rows found with {column} = {value}.")
1328
1378
  if len(idx) > 1:
@@ -1333,6 +1383,7 @@ class FastTable(Widget):
1333
1383
 
1334
1384
  def select_rows_by_value(self, column, values: List):
1335
1385
  """Selects rows by value in a specific column.
1386
+ The first column with the given name is used in case of duplicate column names.
1336
1387
 
1337
1388
  :param column: Column name to filter by
1338
1389
  :type column: str
@@ -1346,7 +1397,12 @@ class FastTable(Widget):
1346
1397
  if column not in self._columns_first_idx:
1347
1398
  raise ValueError(f"Column '{column}' does not exist in the table.")
1348
1399
 
1349
- idxs = self._source_data[self._source_data[column].isin(values)].index.tolist()
1400
+ # Find the first column index with this name (in case of duplicates)
1401
+ column_idx = self._columns_first_idx.index(column)
1402
+ column_tuple = self._source_data.columns[column_idx]
1403
+
1404
+ # Use column tuple to access the specific column
1405
+ idxs = self._source_data[self._source_data[column_tuple].isin(values)].index.tolist()
1350
1406
  self.select_rows(idxs)
1351
1407
 
1352
1408
  def _read_custom_columns(self, columns: List[Union[str, tuple]]) -> None:
@@ -7,7 +7,6 @@ from supervisely.app.widgets.checkbox.checkbox import Checkbox
7
7
  from supervisely.app.widgets.container.container import Container
8
8
  from supervisely.app.widgets.field.field import Field
9
9
  from supervisely.app.widgets.select.select import Select
10
- from supervisely.app.widgets.select_project.select_project import SelectProject
11
10
  from supervisely.app.widgets.tree_select.tree_select import TreeSelect
12
11
  from supervisely.project.project_type import ProjectType
13
12
 
@@ -120,7 +119,15 @@ class SelectDatasetTree(Widget):
120
119
  if self._project_id:
121
120
  project_info = self._api.project.get_info_by_id(self._project_id)
122
121
  if allowed_project_types is not None:
123
- if project_info.type not in [pt.value for pt in allowed_project_types]:
122
+ allowed_values = []
123
+ if not isinstance(allowed_project_types, list):
124
+ allowed_project_types = [allowed_project_types]
125
+
126
+ for pt in allowed_project_types:
127
+ if isinstance(pt, (ProjectType, str)):
128
+ allowed_values.append(str(pt))
129
+
130
+ if project_info.type not in allowed_values:
124
131
  self._project_id = None
125
132
 
126
133
  self._multiselect = multiselect
@@ -322,6 +329,7 @@ class SelectDatasetTree(Widget):
322
329
  """
323
330
  if not self._multiselect:
324
331
  raise ValueError("This method can only be called when multiselect is enabled.")
332
+ self._select_all_datasets_checkbox.uncheck()
325
333
  self._select_dataset.set_selected_by_id(dataset_ids)
326
334
 
327
335
  def team_changed(self, func: Callable) -> Callable:
@@ -1,3 +1,5 @@
1
+ # isort: skip_file
2
+
1
3
  import copy
2
4
  import io
3
5
 
@@ -54,9 +56,8 @@ class PackerUnpacker:
54
56
 
55
57
  @staticmethod
56
58
  def pandas_unpacker(data: pd.DataFrame):
57
- data = data.replace({np.nan: None})
58
- # data = data.astype(object).replace(np.nan, "-") # TODO: replace None later
59
-
59
+ # Keep None/NaN values in source data, don't replace them
60
+ # They will be converted to "" only when sending to frontend
60
61
  unpacked_data = {
61
62
  "columns": data.columns.to_list(),
62
63
  "data": data.values.tolist(),
@@ -169,9 +170,35 @@ class Table(Widget):
169
170
 
170
171
  super().__init__(widget_id=widget_id, file_path=__file__)
171
172
 
173
+ def _prepare_data_for_frontend(self, data_dict):
174
+ """Convert None and NaN values to empty strings for frontend display.
175
+ This preserves the original None/NaN values in _parsed_data.
176
+ """
177
+ import math
178
+
179
+ display_data = copy.deepcopy(data_dict)
180
+
181
+ # Convert None/NaN in data rows
182
+ for row in display_data.get("data", []):
183
+ for i in range(len(row)):
184
+ value = row[i]
185
+ # Check for None or NaN (NaN is a float that doesn't equal itself)
186
+ if value is None or (isinstance(value, float) and math.isnan(value)):
187
+ row[i] = ""
188
+
189
+ # Convert None/NaN in summary row if present
190
+ if "summaryRow" in display_data and display_data["summaryRow"] is not None:
191
+ summary_row = display_data["summaryRow"]
192
+ for i in range(len(summary_row)):
193
+ value = summary_row[i]
194
+ if value is None or (isinstance(value, float) and math.isnan(value)):
195
+ summary_row[i] = ""
196
+
197
+ return display_data
198
+
172
199
  def get_json_data(self):
173
200
  return {
174
- "table_data": self._parsed_data,
201
+ "table_data": self._prepare_data_for_frontend(self._parsed_data),
175
202
  "table_options": {
176
203
  "perPage": self._per_page,
177
204
  "pageSizes": self._page_sizes,
@@ -255,13 +282,17 @@ class Table(Widget):
255
282
 
256
283
  def read_json(self, value: dict) -> None:
257
284
  self._update_table_data(input_data=value)
258
- DataJson()[self.widget_id]["table_data"] = self._parsed_data
285
+ DataJson()[self.widget_id]["table_data"] = self._prepare_data_for_frontend(
286
+ self._parsed_data
287
+ )
259
288
  DataJson().send_changes()
260
289
  self.clear_selection()
261
290
 
262
291
  def read_pandas(self, value: pd.DataFrame) -> None:
263
292
  self._update_table_data(input_data=value)
264
- DataJson()[self.widget_id]["table_data"] = self._parsed_data
293
+ DataJson()[self.widget_id]["table_data"] = self._prepare_data_for_frontend(
294
+ self._parsed_data
295
+ )
265
296
  DataJson().send_changes()
266
297
  self.clear_selection()
267
298
 
@@ -272,7 +303,9 @@ class Table(Widget):
272
303
  index = len(table_data) if index > len(table_data) or index < 0 else index
273
304
 
274
305
  self._parsed_data["data"].insert(index, data)
275
- DataJson()[self.widget_id]["table_data"] = self._parsed_data
306
+ DataJson()[self.widget_id]["table_data"] = self._prepare_data_for_frontend(
307
+ self._parsed_data
308
+ )
276
309
  DataJson().send_changes()
277
310
 
278
311
  def pop_row(self, index=-1):
@@ -284,7 +317,9 @@ class Table(Widget):
284
317
 
285
318
  if len(self._parsed_data["data"]) != 0:
286
319
  popped_row = self._parsed_data["data"].pop(index)
287
- DataJson()[self.widget_id]["table_data"] = self._parsed_data
320
+ DataJson()[self.widget_id]["table_data"] = self._prepare_data_for_frontend(
321
+ self._parsed_data
322
+ )
288
323
  DataJson().send_changes()
289
324
  return popped_row
290
325
 
@@ -382,11 +417,27 @@ class Table(Widget):
382
417
  StateJson()[self.widget_id]["selected_row"] = {}
383
418
  StateJson().send_changes()
384
419
 
420
+ @staticmethod
421
+ def _values_equal(val1, val2):
422
+ """Compare two values, handling NaN specially."""
423
+ import math
424
+
425
+ # Check if both are NaN
426
+ is_nan1 = isinstance(val1, float) and math.isnan(val1)
427
+ is_nan2 = isinstance(val2, float) and math.isnan(val2)
428
+ if is_nan1 and is_nan2:
429
+ return True
430
+ # Check if both are None
431
+ if val1 is None and val2 is None:
432
+ return True
433
+ # Regular comparison
434
+ return val1 == val2
435
+
385
436
  def delete_row(self, key_column_name, key_cell_value):
386
437
  col_index = self._parsed_data["columns"].index(key_column_name)
387
438
  row_indices = []
388
439
  for idx, row in enumerate(self._parsed_data["data"]):
389
- if row[col_index] == key_cell_value:
440
+ if self._values_equal(row[col_index], key_cell_value):
390
441
  row_indices.append(idx)
391
442
  if len(row_indices) == 0:
392
443
  raise ValueError('Column "{key_column_name}" does not have value "{key_cell_value}"')
@@ -400,7 +451,7 @@ class Table(Widget):
400
451
  key_col_index = self._parsed_data["columns"].index(key_column_name)
401
452
  row_indices = []
402
453
  for idx, row in enumerate(self._parsed_data["data"]):
403
- if row[key_col_index] == key_cell_value:
454
+ if self._values_equal(row[key_col_index], key_cell_value):
404
455
  row_indices.append(idx)
405
456
  if len(row_indices) == 0:
406
457
  raise ValueError('Column "{key_column_name}" does not have value "{key_cell_value}"')
@@ -411,20 +462,24 @@ class Table(Widget):
411
462
 
412
463
  col_index = self._parsed_data["columns"].index(column_name)
413
464
  self._parsed_data["data"][row_indices[0]][col_index] = new_value
414
- DataJson()[self.widget_id]["table_data"] = self._parsed_data
465
+ DataJson()[self.widget_id]["table_data"] = self._prepare_data_for_frontend(
466
+ self._parsed_data
467
+ )
415
468
  DataJson().send_changes()
416
469
 
417
470
  def update_matching_cells(self, key_column_name, key_cell_value, column_name, new_value):
418
471
  key_col_index = self._parsed_data["columns"].index(key_column_name)
419
472
  row_indices = []
420
473
  for idx, row in enumerate(self._parsed_data["data"]):
421
- if row[key_col_index] == key_cell_value:
474
+ if self._values_equal(row[key_col_index], key_cell_value):
422
475
  row_indices.append(idx)
423
476
 
424
477
  col_index = self._parsed_data["columns"].index(column_name)
425
478
  for row_idx in row_indices:
426
479
  self._parsed_data["data"][row_idx][col_index] = new_value
427
- DataJson()[self.widget_id]["table_data"] = self._parsed_data
480
+ DataJson()[self.widget_id]["table_data"] = self._prepare_data_for_frontend(
481
+ self._parsed_data
482
+ )
428
483
  DataJson().send_changes()
429
484
 
430
485
  def sort(self, column_id: int = None, direction: Optional[Literal["asc", "desc"]] = None):
@@ -790,14 +790,20 @@ class InferenceImageCache:
790
790
  try:
791
791
  frame = self.get_frame_from_cache(video_id, hash_or_id)
792
792
  except Exception as e:
793
- logger.error(f"Error retrieving frame from cache: {e}", exc_info=True)
793
+ logger.error(
794
+ f"Error retrieving frame from cache: {repr(e)}. Frame will be re-downloaded",
795
+ exc_info=True,
796
+ )
794
797
  ids_to_load.append(hash_or_id)
795
798
  return pos, None
796
799
  return pos, frame
797
800
  try:
798
801
  image = self._cache.get_image(name_constructor(hash_or_id))
799
802
  except Exception as e:
800
- logger.error(f"Error retrieving image from cache: {e}", exc_info=True)
803
+ logger.error(
804
+ f"Error retrieving image from cache: {repr(e)}. Image will be re-downloaded",
805
+ exc_info=True,
806
+ )
801
807
  ids_to_load.append(hash_or_id)
802
808
  return pos, None
803
809
  return pos, image