supervisely 6.73.461__py3-none-any.whl → 6.73.470__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of supervisely might be problematic. Click here for more details.
- supervisely/api/dataset_api.py +74 -12
- supervisely/app/widgets/__init__.py +1 -0
- supervisely/app/widgets/fast_table/fast_table.py +164 -74
- supervisely/app/widgets/heatmap/__init__.py +0 -0
- supervisely/app/widgets/heatmap/heatmap.py +523 -0
- supervisely/app/widgets/heatmap/script.js +378 -0
- supervisely/app/widgets/heatmap/style.css +227 -0
- supervisely/app/widgets/heatmap/template.html +21 -0
- supervisely/app/widgets/select_dataset_tree/select_dataset_tree.py +10 -2
- supervisely/app/widgets/table/table.py +68 -13
- supervisely/convert/pointcloud/nuscenes_conv/nuscenes_converter.py +27 -16
- supervisely/convert/pointcloud_episodes/nuscenes_conv/nuscenes_converter.py +58 -22
- supervisely/convert/pointcloud_episodes/nuscenes_conv/nuscenes_helper.py +21 -47
- supervisely/nn/inference/inference.py +266 -9
- supervisely/nn/inference/inference_request.py +3 -9
- supervisely/nn/inference/predict_app/gui/input_selector.py +53 -27
- supervisely/nn/inference/session.py +43 -35
- supervisely/video/sampling.py +41 -21
- supervisely/video/video.py +25 -10
- {supervisely-6.73.461.dist-info → supervisely-6.73.470.dist-info}/METADATA +1 -1
- {supervisely-6.73.461.dist-info → supervisely-6.73.470.dist-info}/RECORD +25 -20
- {supervisely-6.73.461.dist-info → supervisely-6.73.470.dist-info}/LICENSE +0 -0
- {supervisely-6.73.461.dist-info → supervisely-6.73.470.dist-info}/WHEEL +0 -0
- {supervisely-6.73.461.dist-info → supervisely-6.73.470.dist-info}/entry_points.txt +0 -0
- {supervisely-6.73.461.dist-info → supervisely-6.73.470.dist-info}/top_level.txt +0 -0
supervisely/api/dataset_api.py
CHANGED
|
@@ -1021,13 +1021,66 @@ class DatasetApi(UpdateableModule, RemoveableModuleApi):
|
|
|
1021
1021
|
|
|
1022
1022
|
return dataset_tree
|
|
1023
1023
|
|
|
1024
|
-
def
|
|
1024
|
+
def _yield_tree(
|
|
1025
|
+
self, tree: Dict[DatasetInfo, Dict], path: List[str]
|
|
1026
|
+
) -> Generator[Tuple[List[str], DatasetInfo], None, None]:
|
|
1027
|
+
"""
|
|
1028
|
+
Helper method for recursive tree traversal.
|
|
1029
|
+
Yields tuples of (path, dataset) for all datasets in the tree. For each node (dataset) at the current level,
|
|
1030
|
+
yields its (path, dataset) before recursively traversing and yielding from its children.
|
|
1031
|
+
|
|
1032
|
+
:param tree: Tree structure to yield from.
|
|
1033
|
+
:type tree: Dict[DatasetInfo, Dict]
|
|
1034
|
+
:param path: Current path (used for recursion).
|
|
1035
|
+
:type path: List[str]
|
|
1036
|
+
:return: Generator of tuples of (path, dataset).
|
|
1037
|
+
:rtype: Generator[Tuple[List[str], DatasetInfo], None, None]
|
|
1038
|
+
"""
|
|
1039
|
+
for dataset, children in tree.items():
|
|
1040
|
+
yield path, dataset
|
|
1041
|
+
new_path = path + [dataset.name]
|
|
1042
|
+
if children:
|
|
1043
|
+
yield from self._yield_tree(children, new_path)
|
|
1044
|
+
|
|
1045
|
+
def _find_dataset_in_tree(
|
|
1046
|
+
self, tree: Dict[DatasetInfo, Dict], target_id: int, path: List[str] = None
|
|
1047
|
+
) -> Tuple[Optional[DatasetInfo], Optional[Dict], List[str]]:
|
|
1048
|
+
"""Find a specific dataset in the tree and return its subtree and path.
|
|
1049
|
+
|
|
1050
|
+
:param tree: Tree structure to search in.
|
|
1051
|
+
:type tree: Dict[DatasetInfo, Dict]
|
|
1052
|
+
:param target_id: ID of the dataset to find.
|
|
1053
|
+
:type target_id: int
|
|
1054
|
+
:param path: Current path (used for recursion).
|
|
1055
|
+
:type path: List[str], optional
|
|
1056
|
+
:return: Tuple of (found_dataset, its_subtree, path_to_dataset).
|
|
1057
|
+
:rtype: Tuple[Optional[DatasetInfo], Optional[Dict], List[str]]
|
|
1058
|
+
"""
|
|
1059
|
+
if path is None:
|
|
1060
|
+
path = []
|
|
1061
|
+
|
|
1062
|
+
for dataset, children in tree.items():
|
|
1063
|
+
if dataset.id == target_id:
|
|
1064
|
+
return dataset, children, path
|
|
1065
|
+
# Search in children
|
|
1066
|
+
if children:
|
|
1067
|
+
found_dataset, found_children, found_path = self._find_dataset_in_tree(
|
|
1068
|
+
children, target_id, path + [dataset.name]
|
|
1069
|
+
)
|
|
1070
|
+
if found_dataset is not None:
|
|
1071
|
+
return found_dataset, found_children, found_path
|
|
1072
|
+
return None, None, []
|
|
1073
|
+
|
|
1074
|
+
def tree(self, project_id: int, dataset_id: Optional[int] = None) -> Generator[Tuple[List[str], DatasetInfo], None, None]:
|
|
1025
1075
|
"""Yields tuples of (path, dataset) for all datasets in the project.
|
|
1026
1076
|
Path of the dataset is a list of parents, e.g. ["ds1", "ds2", "ds3"].
|
|
1027
1077
|
For root datasets, the path is an empty list.
|
|
1028
1078
|
|
|
1029
1079
|
:param project_id: Project ID in which the Dataset is located.
|
|
1030
1080
|
:type project_id: int
|
|
1081
|
+
:param dataset_id: Optional Dataset ID to start the tree from. If provided, only yields
|
|
1082
|
+
the subtree starting from this dataset (including the dataset itself and all its children).
|
|
1083
|
+
:type dataset_id: Optional[int]
|
|
1031
1084
|
:return: Generator of tuples of (path, dataset).
|
|
1032
1085
|
:rtype: Generator[Tuple[List[str], DatasetInfo], None, None]
|
|
1033
1086
|
:Usage example:
|
|
@@ -1040,11 +1093,17 @@ class DatasetApi(UpdateableModule, RemoveableModuleApi):
|
|
|
1040
1093
|
|
|
1041
1094
|
project_id = 123
|
|
1042
1095
|
|
|
1096
|
+
# Get all datasets in the project
|
|
1043
1097
|
for parents, dataset in api.dataset.tree(project_id):
|
|
1044
1098
|
parents: List[str]
|
|
1045
1099
|
dataset: sly.DatasetInfo
|
|
1046
1100
|
print(parents, dataset.name)
|
|
1047
1101
|
|
|
1102
|
+
# Get only a specific branch starting from dataset_id = 456
|
|
1103
|
+
for parents, dataset in api.dataset.tree(project_id, dataset_id=456):
|
|
1104
|
+
parents: List[str]
|
|
1105
|
+
dataset: sly.DatasetInfo
|
|
1106
|
+
print(parents, dataset.name)
|
|
1048
1107
|
|
|
1049
1108
|
# Output:
|
|
1050
1109
|
# [] ds1
|
|
@@ -1052,17 +1111,20 @@ class DatasetApi(UpdateableModule, RemoveableModuleApi):
|
|
|
1052
1111
|
# ["ds1", "ds2"] ds3
|
|
1053
1112
|
"""
|
|
1054
1113
|
|
|
1055
|
-
|
|
1056
|
-
|
|
1057
|
-
|
|
1058
|
-
|
|
1059
|
-
|
|
1060
|
-
|
|
1061
|
-
|
|
1062
|
-
|
|
1063
|
-
|
|
1064
|
-
|
|
1065
|
-
|
|
1114
|
+
full_tree = self.get_tree(project_id)
|
|
1115
|
+
|
|
1116
|
+
if dataset_id is None:
|
|
1117
|
+
# Return the full tree
|
|
1118
|
+
yield from self._yield_tree(full_tree, [])
|
|
1119
|
+
else:
|
|
1120
|
+
# Find the specific dataset and return only its subtree
|
|
1121
|
+
target_dataset, subtree, dataset_path = self._find_dataset_in_tree(full_tree, dataset_id)
|
|
1122
|
+
if target_dataset is not None:
|
|
1123
|
+
# Yield the target dataset first, then its children
|
|
1124
|
+
yield dataset_path, target_dataset
|
|
1125
|
+
if subtree:
|
|
1126
|
+
new_path = dataset_path + [target_dataset.name]
|
|
1127
|
+
yield from self._yield_tree(subtree, new_path)
|
|
1066
1128
|
|
|
1067
1129
|
def get_nested(self, project_id: int, dataset_id: int) -> List[DatasetInfo]:
|
|
1068
1130
|
"""Returns a list of all nested datasets in the specified dataset.
|
|
@@ -161,3 +161,4 @@ from supervisely.app.widgets.dropdown_checkbox_selector.dropdown_checkbox_select
|
|
|
161
161
|
from supervisely.app.widgets.ecosystem_model_selector.ecosystem_model_selector import (
|
|
162
162
|
EcosystemModelSelector,
|
|
163
163
|
)
|
|
164
|
+
from supervisely.app.widgets.heatmap.heatmap import Heatmap
|
|
@@ -221,6 +221,11 @@ class FastTable(Widget):
|
|
|
221
221
|
self._validate_input_data(data)
|
|
222
222
|
self._source_data = self._prepare_input_data(data)
|
|
223
223
|
|
|
224
|
+
# Initialize filtered and searched data for proper initialization
|
|
225
|
+
self._filtered_data = self._filter(self._filter_value)
|
|
226
|
+
self._searched_data = self._search(self._search_str)
|
|
227
|
+
self._sorted_data = self._sort_table_data(self._searched_data)
|
|
228
|
+
|
|
224
229
|
# prepare parsed_source_data, sliced_data, parsed_active_data
|
|
225
230
|
(
|
|
226
231
|
self._parsed_source_data,
|
|
@@ -265,9 +270,7 @@ class FastTable(Widget):
|
|
|
265
270
|
self._sliced_data = self._slice_table_data(self._sorted_data, actual_page=self._active_page)
|
|
266
271
|
self._parsed_active_data = self._unpack_pandas_table_data(self._sliced_data)
|
|
267
272
|
StateJson().send_changes()
|
|
268
|
-
DataJson()[self.widget_id]["data"] =
|
|
269
|
-
i: row for i, row in enumerate(self._parsed_active_data["data"])
|
|
270
|
-
}
|
|
273
|
+
DataJson()[self.widget_id]["data"] = list(self._parsed_active_data["data"])
|
|
271
274
|
DataJson()[self.widget_id]["total"] = self._rows_total
|
|
272
275
|
DataJson().send_changes()
|
|
273
276
|
StateJson()["reactToChanges"] = True
|
|
@@ -297,7 +300,7 @@ class FastTable(Widget):
|
|
|
297
300
|
:rtype: Dict[str, Any]
|
|
298
301
|
"""
|
|
299
302
|
return {
|
|
300
|
-
"data":
|
|
303
|
+
"data": list(self._parsed_active_data["data"]),
|
|
301
304
|
"columns": self._parsed_source_data["columns"],
|
|
302
305
|
"projectMeta": self._project_meta,
|
|
303
306
|
"columnsOptions": self._columns_options,
|
|
@@ -422,6 +425,8 @@ class FastTable(Widget):
|
|
|
422
425
|
def read_json(self, data: Dict, meta: Dict = None, custom_columns: Optional[List[Union[str, tuple]]] = None) -> None:
|
|
423
426
|
"""Replace table data with options and project meta in the widget
|
|
424
427
|
|
|
428
|
+
More about options in `Developer Portal <https://developer.supervisely.com/app-development/widgets/tables/fasttable#read_json>`_
|
|
429
|
+
|
|
425
430
|
:param data: Table data with options:
|
|
426
431
|
- data: table data
|
|
427
432
|
- columns: list of column names
|
|
@@ -477,12 +482,7 @@ class FastTable(Widget):
|
|
|
477
482
|
table_data = data.get("data", None)
|
|
478
483
|
self._validate_input_data(table_data)
|
|
479
484
|
self._source_data = self._prepare_input_data(table_data)
|
|
480
|
-
|
|
481
|
-
self._parsed_source_data,
|
|
482
|
-
self._sliced_data,
|
|
483
|
-
self._parsed_active_data,
|
|
484
|
-
) = self._prepare_working_data()
|
|
485
|
-
self._rows_total = len(self._parsed_source_data["data"])
|
|
485
|
+
|
|
486
486
|
init_options = DataJson()[self.widget_id]["options"]
|
|
487
487
|
init_options.update(self._table_options)
|
|
488
488
|
sort = init_options.pop("sort", {"column": None, "order": None})
|
|
@@ -491,10 +491,15 @@ class FastTable(Widget):
|
|
|
491
491
|
if self._sort_column_idx is not None and self._sort_column_idx > len(self._columns_first_idx) - 1:
|
|
492
492
|
self._sort_column_idx = None
|
|
493
493
|
self._sort_order = sort.get("order", None)
|
|
494
|
-
self._page_size = init_options.pop("pageSize", 10)
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
494
|
+
self._page_size = init_options.pop("pageSize", 10)
|
|
495
|
+
|
|
496
|
+
# Apply sorting before preparing working data
|
|
497
|
+
self._sorted_data = self._sort_table_data(self._source_data)
|
|
498
|
+
self._sliced_data = self._slice_table_data(self._sorted_data, actual_page=self._active_page)
|
|
499
|
+
self._parsed_active_data = self._unpack_pandas_table_data(self._sliced_data)
|
|
500
|
+
self._parsed_source_data = self._unpack_pandas_table_data(self._source_data)
|
|
501
|
+
self._rows_total = len(self._parsed_source_data["data"])
|
|
502
|
+
DataJson()[self.widget_id]["data"] = list(self._parsed_active_data["data"])
|
|
498
503
|
DataJson()[self.widget_id]["columns"] = self._parsed_active_data["columns"]
|
|
499
504
|
DataJson()[self.widget_id]["columnsOptions"] = self._columns_options
|
|
500
505
|
DataJson()[self.widget_id]["options"] = init_options
|
|
@@ -523,9 +528,7 @@ class FastTable(Widget):
|
|
|
523
528
|
self._parsed_active_data = self._unpack_pandas_table_data(self._sliced_data)
|
|
524
529
|
self._parsed_source_data = self._unpack_pandas_table_data(self._source_data)
|
|
525
530
|
self._rows_total = len(self._parsed_source_data["data"])
|
|
526
|
-
DataJson()[self.widget_id]["data"] =
|
|
527
|
-
i: row for i, row in enumerate(self._parsed_active_data["data"])
|
|
528
|
-
}
|
|
531
|
+
DataJson()[self.widget_id]["data"] = list(self._parsed_active_data["data"])
|
|
529
532
|
DataJson()[self.widget_id]["columns"] = self._parsed_active_data["columns"]
|
|
530
533
|
DataJson()[self.widget_id]["total"] = len(self._source_data)
|
|
531
534
|
DataJson().send_changes()
|
|
@@ -584,10 +587,17 @@ class FastTable(Widget):
|
|
|
584
587
|
:rtype: pd.DataFrame
|
|
585
588
|
"""
|
|
586
589
|
if active_page is True:
|
|
587
|
-
|
|
590
|
+
# Return sliced data directly from source to preserve None/NaN values
|
|
591
|
+
packed_data = self._sliced_data.copy()
|
|
592
|
+
# Reset column names to first level only
|
|
593
|
+
if isinstance(packed_data.columns, pd.MultiIndex):
|
|
594
|
+
packed_data.columns = packed_data.columns.get_level_values("first")
|
|
588
595
|
else:
|
|
589
|
-
|
|
590
|
-
|
|
596
|
+
# Return source data directly to preserve None/NaN values
|
|
597
|
+
packed_data = self._source_data.copy()
|
|
598
|
+
# Reset column names to first level only
|
|
599
|
+
if isinstance(packed_data.columns, pd.MultiIndex):
|
|
600
|
+
packed_data.columns = packed_data.columns.get_level_values("first")
|
|
591
601
|
return packed_data
|
|
592
602
|
|
|
593
603
|
def clear_selection(self) -> None:
|
|
@@ -627,8 +637,12 @@ class FastTable(Widget):
|
|
|
627
637
|
rows = []
|
|
628
638
|
for row in selected_rows:
|
|
629
639
|
row_index = row["idx"]
|
|
630
|
-
|
|
631
|
-
|
|
640
|
+
if row_index is None:
|
|
641
|
+
continue
|
|
642
|
+
# Get original data from source_data to preserve None/NaN values
|
|
643
|
+
try:
|
|
644
|
+
row_data = self._source_data.loc[row_index].values.tolist()
|
|
645
|
+
except (KeyError, IndexError):
|
|
632
646
|
continue
|
|
633
647
|
rows.append(self.ClickedRow(row_data, row_index))
|
|
634
648
|
return rows
|
|
@@ -639,8 +653,12 @@ class FastTable(Widget):
|
|
|
639
653
|
if clicked_row is None:
|
|
640
654
|
return None
|
|
641
655
|
row_index = clicked_row["idx"]
|
|
642
|
-
|
|
643
|
-
|
|
656
|
+
if row_index is None:
|
|
657
|
+
return None
|
|
658
|
+
# Get original data from source_data to preserve None/NaN values
|
|
659
|
+
try:
|
|
660
|
+
row = self._source_data.loc[row_index].values.tolist()
|
|
661
|
+
except (KeyError, IndexError):
|
|
644
662
|
return None
|
|
645
663
|
return self.ClickedRow(row, row_index)
|
|
646
664
|
|
|
@@ -650,15 +668,19 @@ class FastTable(Widget):
|
|
|
650
668
|
:return: Selected cell
|
|
651
669
|
:rtype: ClickedCell
|
|
652
670
|
"""
|
|
653
|
-
cell_data = StateJson()[self.widget_id]["
|
|
671
|
+
cell_data = StateJson()[self.widget_id]["selectedCell"]
|
|
654
672
|
if cell_data is None:
|
|
655
673
|
return None
|
|
656
674
|
row_index = cell_data["idx"]
|
|
657
|
-
row = cell_data["row"]
|
|
658
675
|
column_index = cell_data["column"]
|
|
676
|
+
if column_index is None or row_index is None:
|
|
677
|
+
return None
|
|
659
678
|
column_name = self._columns_first_idx[column_index]
|
|
660
|
-
|
|
661
|
-
|
|
679
|
+
# Get original data from source_data to preserve None/NaN values
|
|
680
|
+
try:
|
|
681
|
+
row = self._source_data.loc[row_index].values.tolist()
|
|
682
|
+
column_value = row[column_index]
|
|
683
|
+
except (KeyError, IndexError):
|
|
662
684
|
return None
|
|
663
685
|
return self.ClickedCell(row, column_index, row_index, column_name, column_value)
|
|
664
686
|
|
|
@@ -721,9 +743,7 @@ class FastTable(Widget):
|
|
|
721
743
|
self._parsed_active_data,
|
|
722
744
|
) = self._prepare_working_data()
|
|
723
745
|
self._rows_total = len(self._parsed_source_data["data"])
|
|
724
|
-
DataJson()[self.widget_id]["data"] =
|
|
725
|
-
i: row for i, row in enumerate(self._parsed_active_data["data"])
|
|
726
|
-
}
|
|
746
|
+
DataJson()[self.widget_id]["data"] = list(self._parsed_active_data["data"])
|
|
727
747
|
DataJson()[self.widget_id]["total"] = self._rows_total
|
|
728
748
|
DataJson().send_changes()
|
|
729
749
|
self._maybe_update_selected_row()
|
|
@@ -741,9 +761,7 @@ class FastTable(Widget):
|
|
|
741
761
|
self._parsed_active_data,
|
|
742
762
|
) = self._prepare_working_data()
|
|
743
763
|
self._rows_total = len(self._parsed_source_data["data"])
|
|
744
|
-
DataJson()[self.widget_id]["data"] =
|
|
745
|
-
i: row for i, row in enumerate(self._parsed_active_data["data"])
|
|
746
|
-
}
|
|
764
|
+
DataJson()[self.widget_id]["data"] = list(self._parsed_active_data["data"])
|
|
747
765
|
DataJson()[self.widget_id]["total"] = self._rows_total
|
|
748
766
|
DataJson().send_changes()
|
|
749
767
|
self._maybe_update_selected_row()
|
|
@@ -771,9 +789,7 @@ class FastTable(Widget):
|
|
|
771
789
|
self._parsed_active_data,
|
|
772
790
|
) = self._prepare_working_data()
|
|
773
791
|
self._rows_total = len(self._parsed_source_data["data"])
|
|
774
|
-
DataJson()[self.widget_id]["data"] =
|
|
775
|
-
i: row for i, row in enumerate(self._parsed_active_data["data"])
|
|
776
|
-
}
|
|
792
|
+
DataJson()[self.widget_id]["data"] = list(self._parsed_active_data["data"])
|
|
777
793
|
DataJson()[self.widget_id]["total"] = self._rows_total
|
|
778
794
|
self._maybe_update_selected_row()
|
|
779
795
|
return popped_row
|
|
@@ -886,7 +902,11 @@ class FastTable(Widget):
|
|
|
886
902
|
self._refresh()
|
|
887
903
|
|
|
888
904
|
def _default_search_function(self, data: pd.DataFrame, search_value: str) -> pd.DataFrame:
|
|
889
|
-
|
|
905
|
+
# Use map() for pandas >= 2.1.0, fallback to applymap() for older versions
|
|
906
|
+
if hasattr(pd.DataFrame, "map"):
|
|
907
|
+
data = data[data.map(lambda x: search_value in str(x)).any(axis=1)]
|
|
908
|
+
else:
|
|
909
|
+
data = data[data.applymap(lambda x: search_value in str(x)).any(axis=1)]
|
|
890
910
|
return data
|
|
891
911
|
|
|
892
912
|
def _search(self, search_value: str) -> pd.DataFrame:
|
|
@@ -897,8 +917,14 @@ class FastTable(Widget):
|
|
|
897
917
|
:return: Filtered data
|
|
898
918
|
:rtype: pd.DataFrame
|
|
899
919
|
"""
|
|
900
|
-
filtered_data
|
|
920
|
+
# Use filtered_data if available, otherwise use source_data directly
|
|
921
|
+
if self._filtered_data is not None:
|
|
922
|
+
filtered_data = self._filtered_data.copy()
|
|
923
|
+
else:
|
|
924
|
+
filtered_data = self._source_data.copy()
|
|
925
|
+
|
|
901
926
|
if search_value == "":
|
|
927
|
+
self._search_str = search_value
|
|
902
928
|
return filtered_data
|
|
903
929
|
if self._search_str != search_value:
|
|
904
930
|
self._active_page = 1
|
|
@@ -924,7 +950,24 @@ class FastTable(Widget):
|
|
|
924
950
|
else:
|
|
925
951
|
ascending = False
|
|
926
952
|
try:
|
|
927
|
-
|
|
953
|
+
column = data.columns[column_idx]
|
|
954
|
+
# Try to convert to numeric for proper sorting
|
|
955
|
+
numeric_column = pd.to_numeric(data[column], errors="coerce")
|
|
956
|
+
|
|
957
|
+
# Check if column contains numeric data (has at least one non-NaN numeric value)
|
|
958
|
+
if numeric_column.notna().sum() > 0:
|
|
959
|
+
# Create temporary column for sorting
|
|
960
|
+
data_copy = data.copy()
|
|
961
|
+
data_copy["_sort_key"] = numeric_column
|
|
962
|
+
# Sort by numeric values with NaN at the end
|
|
963
|
+
data_copy = data_copy.sort_values(
|
|
964
|
+
by="_sort_key", ascending=ascending, na_position="last"
|
|
965
|
+
)
|
|
966
|
+
# Remove temporary column and return original data in sorted order
|
|
967
|
+
data = data.loc[data_copy.index]
|
|
968
|
+
else:
|
|
969
|
+
# Sort as strings with NaN values at the end
|
|
970
|
+
data = data.sort_values(by=column, ascending=ascending, na_position="last")
|
|
928
971
|
except IndexError as e:
|
|
929
972
|
e.args = (
|
|
930
973
|
f"Sorting by column idx = {column_idx} is not possible, your table has only {len(data.columns)} columns with idx from 0 to {len(data.columns) - 1}",
|
|
@@ -933,31 +976,59 @@ class FastTable(Widget):
|
|
|
933
976
|
return data
|
|
934
977
|
|
|
935
978
|
def sort(
|
|
936
|
-
self,
|
|
979
|
+
self,
|
|
980
|
+
column_idx: Optional[int] = None,
|
|
981
|
+
order: Optional[Literal["asc", "desc"]] = None,
|
|
982
|
+
reset: bool = False,
|
|
937
983
|
) -> None:
|
|
938
984
|
"""Sorts table data by column index and order.
|
|
939
985
|
|
|
940
|
-
:param column_idx: Index of the column to sort by
|
|
986
|
+
:param column_idx: Index of the column to sort by. If None, keeps current column (unless reset=True).
|
|
941
987
|
:type column_idx: Optional[int]
|
|
942
|
-
:param order: Sorting order
|
|
988
|
+
:param order: Sorting order. If None, keeps current order (unless reset=True).
|
|
943
989
|
:type order: Optional[Literal["asc", "desc"]]
|
|
990
|
+
:param reset: If True, clears sorting completely. Default is False.
|
|
991
|
+
:type reset: bool
|
|
992
|
+
|
|
993
|
+
:Usage example:
|
|
994
|
+
|
|
995
|
+
.. code-block:: python
|
|
996
|
+
# Sorting examples
|
|
997
|
+
sort(column_idx=0, order="asc") # sort by column 0 ascending
|
|
998
|
+
sort(column_idx=1) # sort by column 1, keep current order
|
|
999
|
+
sort(order="desc") # keep current column, change order to descending
|
|
1000
|
+
sort(reset=True) # clear sorting completely
|
|
944
1001
|
"""
|
|
945
|
-
|
|
946
|
-
|
|
1002
|
+
# If reset=True, clear sorting completely
|
|
1003
|
+
if reset:
|
|
1004
|
+
self._sort_column_idx = None
|
|
1005
|
+
self._sort_order = None
|
|
1006
|
+
else:
|
|
1007
|
+
# Preserve current values if new ones are not provided
|
|
1008
|
+
if column_idx is not None:
|
|
1009
|
+
self._sort_column_idx = column_idx
|
|
1010
|
+
# else: keep current self._sort_column_idx
|
|
1011
|
+
|
|
1012
|
+
if order is not None:
|
|
1013
|
+
self._sort_order = order
|
|
1014
|
+
# else: keep current self._sort_order
|
|
1015
|
+
|
|
947
1016
|
self._validate_sort_attrs()
|
|
948
|
-
|
|
949
|
-
|
|
950
|
-
|
|
951
|
-
|
|
1017
|
+
|
|
1018
|
+
# Always update StateJson with current values (including None)
|
|
1019
|
+
StateJson()[self.widget_id]["sort"]["column"] = self._sort_column_idx
|
|
1020
|
+
StateJson()[self.widget_id]["sort"]["order"] = self._sort_order
|
|
1021
|
+
|
|
1022
|
+
# Apply filter, search, sort pipeline
|
|
952
1023
|
self._filtered_data = self._filter(self._filter_value)
|
|
953
1024
|
self._searched_data = self._search(self._search_str)
|
|
954
1025
|
self._rows_total = len(self._searched_data)
|
|
955
1026
|
self._sorted_data = self._sort_table_data(self._searched_data)
|
|
956
1027
|
self._sliced_data = self._slice_table_data(self._sorted_data, actual_page=self._active_page)
|
|
957
1028
|
self._parsed_active_data = self._unpack_pandas_table_data(self._sliced_data)
|
|
958
|
-
|
|
959
|
-
|
|
960
|
-
|
|
1029
|
+
|
|
1030
|
+
# Update DataJson with sorted and paginated data
|
|
1031
|
+
DataJson()[self.widget_id]["data"] = list(self._parsed_active_data["data"])
|
|
961
1032
|
DataJson()[self.widget_id]["total"] = self._rows_total
|
|
962
1033
|
self._maybe_update_selected_row()
|
|
963
1034
|
StateJson().send_changes()
|
|
@@ -965,22 +1036,22 @@ class FastTable(Widget):
|
|
|
965
1036
|
def _prepare_json_data(self, data: dict, key: str):
|
|
966
1037
|
if key in ("data", "columns"):
|
|
967
1038
|
default_value = []
|
|
1039
|
+
elif key == "options":
|
|
1040
|
+
default_value = {}
|
|
968
1041
|
else:
|
|
969
1042
|
default_value = None
|
|
1043
|
+
|
|
970
1044
|
source_data = data.get(key, default_value)
|
|
971
|
-
|
|
972
|
-
|
|
973
|
-
|
|
974
|
-
)
|
|
975
|
-
|
|
976
|
-
|
|
977
|
-
|
|
978
|
-
|
|
979
|
-
|
|
980
|
-
|
|
981
|
-
if column_idx is not None:
|
|
982
|
-
sort["column"] = sort.get("columnIndex")
|
|
983
|
-
sort.pop("columnIndex")
|
|
1045
|
+
|
|
1046
|
+
# Normalize options format: convert "columnIndex" to "column"
|
|
1047
|
+
if key == "options" and source_data is not None:
|
|
1048
|
+
sort = source_data.get("sort", None)
|
|
1049
|
+
if sort is not None:
|
|
1050
|
+
column_idx = sort.get("columnIndex", None)
|
|
1051
|
+
if column_idx is not None:
|
|
1052
|
+
sort["column"] = column_idx
|
|
1053
|
+
sort.pop("columnIndex")
|
|
1054
|
+
|
|
984
1055
|
return source_data
|
|
985
1056
|
|
|
986
1057
|
def _validate_sort(
|
|
@@ -1062,12 +1133,21 @@ class FastTable(Widget):
|
|
|
1062
1133
|
def _get_pandas_unpacked_data(self, data: pd.DataFrame) -> dict:
|
|
1063
1134
|
if not isinstance(data, pd.DataFrame):
|
|
1064
1135
|
raise TypeError("Cannot parse input data, please use Pandas Dataframe as input data")
|
|
1065
|
-
|
|
1066
|
-
#
|
|
1136
|
+
|
|
1137
|
+
# Create a copy for frontend display to avoid modifying source data
|
|
1138
|
+
display_data = data.copy()
|
|
1139
|
+
# Replace NaN and None with empty string only for display
|
|
1140
|
+
display_data = display_data.replace({np.nan: "", None: ""})
|
|
1141
|
+
|
|
1142
|
+
# Handle MultiIndex columns - extract only the first level
|
|
1143
|
+
if isinstance(display_data.columns, pd.MultiIndex):
|
|
1144
|
+
columns = display_data.columns.get_level_values("first").tolist()
|
|
1145
|
+
else:
|
|
1146
|
+
columns = display_data.columns.to_list()
|
|
1067
1147
|
|
|
1068
1148
|
unpacked_data = {
|
|
1069
|
-
"columns":
|
|
1070
|
-
"data":
|
|
1149
|
+
"columns": columns,
|
|
1150
|
+
"data": display_data.values.tolist(),
|
|
1071
1151
|
}
|
|
1072
1152
|
return unpacked_data
|
|
1073
1153
|
|
|
@@ -1238,9 +1318,7 @@ class FastTable(Widget):
|
|
|
1238
1318
|
|
|
1239
1319
|
self._sliced_data = self._slice_table_data(self._sorted_data, actual_page=self._active_page)
|
|
1240
1320
|
self._parsed_active_data = self._unpack_pandas_table_data(self._sliced_data)
|
|
1241
|
-
DataJson()[self.widget_id]["data"] =
|
|
1242
|
-
i: row for i, row in enumerate(self._parsed_active_data["data"])
|
|
1243
|
-
}
|
|
1321
|
+
DataJson()[self.widget_id]["data"] = list(self._parsed_active_data["data"])
|
|
1244
1322
|
DataJson()[self.widget_id]["total"] = self._rows_total
|
|
1245
1323
|
DataJson().send_changes()
|
|
1246
1324
|
StateJson().send_changes()
|
|
@@ -1309,6 +1387,7 @@ class FastTable(Widget):
|
|
|
1309
1387
|
|
|
1310
1388
|
def select_row_by_value(self, column, value: Any):
|
|
1311
1389
|
"""Selects a row by value in a specific column.
|
|
1390
|
+
The first column with the given name is used in case of duplicate column names.
|
|
1312
1391
|
|
|
1313
1392
|
:param column: Column name to filter by
|
|
1314
1393
|
:type column: str
|
|
@@ -1322,7 +1401,12 @@ class FastTable(Widget):
|
|
|
1322
1401
|
if column not in self._columns_first_idx:
|
|
1323
1402
|
raise ValueError(f"Column '{column}' does not exist in the table.")
|
|
1324
1403
|
|
|
1325
|
-
|
|
1404
|
+
# Find the first column index with this name (in case of duplicates)
|
|
1405
|
+
column_idx = self._columns_first_idx.index(column)
|
|
1406
|
+
column_tuple = self._source_data.columns[column_idx]
|
|
1407
|
+
|
|
1408
|
+
# Use column tuple to access the specific column
|
|
1409
|
+
idx = self._source_data[self._source_data[column_tuple] == value].index.tolist()
|
|
1326
1410
|
if not idx:
|
|
1327
1411
|
raise ValueError(f"No rows found with {column} = {value}.")
|
|
1328
1412
|
if len(idx) > 1:
|
|
@@ -1333,6 +1417,7 @@ class FastTable(Widget):
|
|
|
1333
1417
|
|
|
1334
1418
|
def select_rows_by_value(self, column, values: List):
|
|
1335
1419
|
"""Selects rows by value in a specific column.
|
|
1420
|
+
The first column with the given name is used in case of duplicate column names.
|
|
1336
1421
|
|
|
1337
1422
|
:param column: Column name to filter by
|
|
1338
1423
|
:type column: str
|
|
@@ -1346,7 +1431,12 @@ class FastTable(Widget):
|
|
|
1346
1431
|
if column not in self._columns_first_idx:
|
|
1347
1432
|
raise ValueError(f"Column '{column}' does not exist in the table.")
|
|
1348
1433
|
|
|
1349
|
-
|
|
1434
|
+
# Find the first column index with this name (in case of duplicates)
|
|
1435
|
+
column_idx = self._columns_first_idx.index(column)
|
|
1436
|
+
column_tuple = self._source_data.columns[column_idx]
|
|
1437
|
+
|
|
1438
|
+
# Use column tuple to access the specific column
|
|
1439
|
+
idxs = self._source_data[self._source_data[column_tuple].isin(values)].index.tolist()
|
|
1350
1440
|
self.select_rows(idxs)
|
|
1351
1441
|
|
|
1352
1442
|
def _read_custom_columns(self, columns: List[Union[str, tuple]]) -> None:
|
|
File without changes
|