dataframe-textual 1.10.1__py3-none-any.whl → 1.16.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dataframe_textual/__main__.py +2 -2
- dataframe_textual/common.py +104 -42
- dataframe_textual/data_frame_table.py +836 -372
- dataframe_textual/data_frame_viewer.py +17 -2
- dataframe_textual/sql_screen.py +3 -9
- dataframe_textual/table_screen.py +102 -54
- dataframe_textual/yes_no_screen.py +26 -22
- {dataframe_textual-1.10.1.dist-info → dataframe_textual-1.16.2.dist-info}/METADATA +202 -205
- dataframe_textual-1.16.2.dist-info/RECORD +14 -0
- {dataframe_textual-1.10.1.dist-info → dataframe_textual-1.16.2.dist-info}/WHEEL +1 -1
- dataframe_textual-1.10.1.dist-info/RECORD +0 -14
- {dataframe_textual-1.10.1.dist-info → dataframe_textual-1.16.2.dist-info}/entry_points.txt +0 -0
- {dataframe_textual-1.10.1.dist-info → dataframe_textual-1.16.2.dist-info}/licenses/LICENSE +0 -0
|
@@ -3,13 +3,15 @@
|
|
|
3
3
|
import sys
|
|
4
4
|
from collections import defaultdict, deque
|
|
5
5
|
from dataclasses import dataclass
|
|
6
|
+
from itertools import zip_longest
|
|
6
7
|
from pathlib import Path
|
|
7
8
|
from textwrap import dedent
|
|
8
9
|
from typing import Any
|
|
9
10
|
|
|
10
11
|
import polars as pl
|
|
11
|
-
from rich.text import Text
|
|
12
|
+
from rich.text import Text, TextType
|
|
12
13
|
from textual import work
|
|
14
|
+
from textual._two_way_dict import TwoWayDict
|
|
13
15
|
from textual.coordinate import Coordinate
|
|
14
16
|
from textual.events import Click
|
|
15
17
|
from textual.reactive import reactive
|
|
@@ -18,8 +20,11 @@ from textual.widgets import DataTable, TabPane
|
|
|
18
20
|
from textual.widgets._data_table import (
|
|
19
21
|
CellDoesNotExist,
|
|
20
22
|
CellKey,
|
|
23
|
+
CellType,
|
|
21
24
|
ColumnKey,
|
|
22
25
|
CursorType,
|
|
26
|
+
DuplicateKey,
|
|
27
|
+
Row,
|
|
23
28
|
RowKey,
|
|
24
29
|
)
|
|
25
30
|
|
|
@@ -29,17 +34,18 @@ from .common import (
|
|
|
29
34
|
NULL_DISPLAY,
|
|
30
35
|
RIDX,
|
|
31
36
|
SUBSCRIPT_DIGITS,
|
|
37
|
+
SUPPORTED_FORMATS,
|
|
32
38
|
DtypeConfig,
|
|
33
39
|
format_row,
|
|
34
40
|
get_next_item,
|
|
35
41
|
parse_placeholders,
|
|
36
|
-
|
|
42
|
+
round_to_nearest_hundreds,
|
|
37
43
|
sleep_async,
|
|
38
44
|
tentative_expr,
|
|
39
45
|
validate_expr,
|
|
40
46
|
)
|
|
41
47
|
from .sql_screen import AdvancedSqlScreen, SimpleSqlScreen
|
|
42
|
-
from .table_screen import FrequencyScreen, RowDetailScreen, StatisticsScreen
|
|
48
|
+
from .table_screen import FrequencyScreen, MetaColumnScreen, MetaShape, RowDetailScreen, StatisticsScreen
|
|
43
49
|
from .yes_no_screen import (
|
|
44
50
|
AddColumnScreen,
|
|
45
51
|
AddLinkScreen,
|
|
@@ -57,6 +63,9 @@ from .yes_no_screen import (
|
|
|
57
63
|
# Color for highlighting selections and matches
|
|
58
64
|
HIGHLIGHT_COLOR = "red"
|
|
59
65
|
|
|
66
|
+
# Buffer size for loading rows
|
|
67
|
+
BUFFER_SIZE = 5
|
|
68
|
+
|
|
60
69
|
# Warning threshold for loading rows
|
|
61
70
|
WARN_ROWS_THRESHOLD = 50_000
|
|
62
71
|
|
|
@@ -115,7 +124,7 @@ class DataFrameTable(DataTable):
|
|
|
115
124
|
- **g** - ⬆️ Jump to first row
|
|
116
125
|
- **G** - ⬇️ Jump to last row
|
|
117
126
|
- **HOME/END** - 🎯 Jump to first/last column
|
|
118
|
-
- **Ctrl+HOME/END** - 🎯 Jump to page top/
|
|
127
|
+
- **Ctrl+HOME/END** - 🎯 Jump to page top/top
|
|
119
128
|
- **Ctrl+F** - 📜 Page down
|
|
120
129
|
- **Ctrl+B** - 📜 Page up
|
|
121
130
|
- **PgUp/PgDn** - 📜 Page up/down
|
|
@@ -125,11 +134,13 @@ class DataFrameTable(DataTable):
|
|
|
125
134
|
- **U** - 🔄 Redo last undone action
|
|
126
135
|
- **Ctrl+U** - 🔁 Reset to initial state
|
|
127
136
|
|
|
128
|
-
## 👁️
|
|
137
|
+
## 👁️ Display
|
|
129
138
|
- **Enter** - 📋 Show row details in modal
|
|
130
139
|
- **F** - 📊 Show frequency distribution
|
|
131
140
|
- **s** - 📈 Show statistics for current column
|
|
132
141
|
- **S** - 📊 Show statistics for entire dataframe
|
|
142
|
+
- **m** - 📐 Show dataframe metadata (row/column counts)
|
|
143
|
+
- **M** - 📋 Show column metadata (ID, name, type)
|
|
133
144
|
- **h** - 👁️ Hide current column
|
|
134
145
|
- **H** - 👀 Show all hidden rows/columns
|
|
135
146
|
- **_** - 📏 Expand column to full width
|
|
@@ -143,31 +154,31 @@ class DataFrameTable(DataTable):
|
|
|
143
154
|
- **]** - 🔽 Sort column descending
|
|
144
155
|
- *(Multi-column sort supported)*
|
|
145
156
|
|
|
146
|
-
##
|
|
147
|
-
-
|
|
148
|
-
-
|
|
157
|
+
## ✅ Row Selection
|
|
158
|
+
- **\\\\** - ✅ Select rows in current column using cursor value
|
|
159
|
+
- **|** - ✅ Select rows with expression
|
|
160
|
+
- **'** - ✅ Select/deselect current row
|
|
161
|
+
- **t** - 💡 Toggle row selection (invert all)
|
|
162
|
+
- **T** - 🧹 Clear all selections and matches
|
|
163
|
+
- **{** - ⬆️ Go to previous selected row
|
|
164
|
+
- **}** - ⬇️ Go to next selected row
|
|
165
|
+
- *(Supports case-insensitive & whole-word matching)*
|
|
166
|
+
|
|
167
|
+
## 🔎 Find & Replace
|
|
149
168
|
- **/** - 🔎 Find in current column with cursor value
|
|
150
169
|
- **?** - 🔎 Find in current column with expression
|
|
151
170
|
- **;** - 🌐 Global find using cursor value
|
|
152
171
|
- **:** - 🌐 Global find with expression
|
|
153
172
|
- **n** - ⬇️ Go to next match
|
|
154
173
|
- **N** - ⬆️ Go to previous match
|
|
155
|
-
- **v** - 👁️ View/filter rows by cell or selected rows and hide others
|
|
156
|
-
- **V** - 🔧 View/filter rows by expression and hide others
|
|
157
|
-
- *(All search/find support case-insensitive & whole-word matching)*
|
|
158
|
-
|
|
159
|
-
## ✏️ Replace
|
|
160
174
|
- **r** - 🔄 Replace in current column (interactive or all)
|
|
161
175
|
- **R** - 🔄 Replace across all columns (interactive or all)
|
|
162
176
|
- *(Supports case-insensitive & whole-word matching)*
|
|
163
177
|
|
|
164
|
-
##
|
|
165
|
-
- **
|
|
166
|
-
- **
|
|
167
|
-
- **
|
|
168
|
-
- **{** - ⬆️ Go to previous selected row
|
|
169
|
-
- **}** - ⬇️ Go to next selected row
|
|
170
|
-
- **"** - 📍 Filter selected rows and remove others
|
|
178
|
+
## 👁️ View & Filter
|
|
179
|
+
- **"** - 📍 Filter selected rows (removes others)
|
|
180
|
+
- **v** - 👁️ View rows that are selected or contain matching cells (hide others)
|
|
181
|
+
- **V** - 🔧 View rows by expression (hides others)
|
|
171
182
|
|
|
172
183
|
## 🔍 SQL Interface
|
|
173
184
|
- **l** - 💬 Open simple SQL interface (select columns & where clause)
|
|
@@ -212,6 +223,8 @@ class DataFrameTable(DataTable):
|
|
|
212
223
|
("G", "jump_bottom", "Jump to bottom"),
|
|
213
224
|
("ctrl+f", "forward_page", "Page down"),
|
|
214
225
|
("ctrl+b", "backward_page", "Page up"),
|
|
226
|
+
("pageup", "page_up", "Page up"),
|
|
227
|
+
("pagedown", "page_down", "Page down"),
|
|
215
228
|
# Undo/Redo/Reset
|
|
216
229
|
("u", "undo", "Undo"),
|
|
217
230
|
("U", "redo", "Redo"),
|
|
@@ -230,7 +243,9 @@ class DataFrameTable(DataTable):
|
|
|
230
243
|
("ctrl+r", "copy_row", "Copy row to clipboard"),
|
|
231
244
|
# Save
|
|
232
245
|
("ctrl+s", "save_to_file", "Save to file"),
|
|
233
|
-
# Detail, Frequency, and Statistics
|
|
246
|
+
# Metadata, Detail, Frequency, and Statistics
|
|
247
|
+
("m", "metadata_shape", "Show metadata for row count and column count"),
|
|
248
|
+
("M", "metadata_column", "Show metadata for column"),
|
|
234
249
|
("enter", "view_row_detail", "View row details"),
|
|
235
250
|
("F", "show_frequency", "Show frequency"),
|
|
236
251
|
("s", "show_statistics", "Show statistics for column"),
|
|
@@ -242,25 +257,23 @@ class DataFrameTable(DataTable):
|
|
|
242
257
|
("v", "view_rows", "View rows"),
|
|
243
258
|
("V", "view_rows_expr", "View rows by expression"),
|
|
244
259
|
("quotation_mark", "filter_rows", "Filter selected"), # `"`
|
|
245
|
-
#
|
|
246
|
-
("backslash", "
|
|
247
|
-
("vertical_line", "
|
|
260
|
+
# Row Selection
|
|
261
|
+
("backslash", "select_row_cursor_value", "Select rows with cursor value in current column"), # `\`
|
|
262
|
+
("vertical_line", "select_row_expr", "Select rows with expression"), # `|`
|
|
248
263
|
("right_curly_bracket", "next_selected_row", "Go to next selected row"), # `}`
|
|
249
264
|
("left_curly_bracket", "previous_selected_row", "Go to previous selected row"), # `{`
|
|
250
|
-
#
|
|
265
|
+
("apostrophe", "toggle_row_selection", "Toggle row selection"), # `'`
|
|
266
|
+
("t", "toggle_selections", "Toggle all row selections"),
|
|
267
|
+
("T", "clear_selections_and_matches", "Clear selections"),
|
|
268
|
+
# Find & Replace
|
|
251
269
|
("slash", "find_cursor_value", "Find in column with cursor value"), # `/`
|
|
252
270
|
("question_mark", "find_expr", "Find in column with expression"), # `?`
|
|
253
271
|
("semicolon", "find_cursor_value('global')", "Global find with cursor value"), # `;`
|
|
254
272
|
("colon", "find_expr('global')", "Global find with expression"), # `:`
|
|
255
273
|
("n", "next_match", "Go to next match"), # `n`
|
|
256
274
|
("N", "previous_match", "Go to previous match"), # `Shift+n`
|
|
257
|
-
# Replace
|
|
258
275
|
("r", "replace", "Replace in column"), # `r`
|
|
259
276
|
("R", "replace_global", "Replace global"), # `Shift+R`
|
|
260
|
-
# Selection
|
|
261
|
-
("apostrophe", "toggle_row_selection", "Toggle row selection"), # `'`
|
|
262
|
-
("t", "toggle_selections", "Toggle all row selections"),
|
|
263
|
-
("T", "clear_selections_and_matches", "Clear selections"),
|
|
264
277
|
# Delete
|
|
265
278
|
("delete", "clear_cell", "Clear cell"),
|
|
266
279
|
("minus", "delete_column", "Delete column"), # `-`
|
|
@@ -316,9 +329,9 @@ class DataFrameTable(DataTable):
|
|
|
316
329
|
self.filename = filename or "untitled.csv" # Current filename
|
|
317
330
|
self.tabname = tabname or Path(filename).stem # Tab name
|
|
318
331
|
# Pagination & Loading
|
|
319
|
-
self.
|
|
320
|
-
self.BATCH_SIZE = self.INITIAL_BATCH_SIZE // 2
|
|
332
|
+
self.BATCH_SIZE = max((self.app.size.height // 100 + 1) * 100, 100)
|
|
321
333
|
self.loaded_rows = 0 # Track how many rows are currently loaded
|
|
334
|
+
self.loaded_ranges: list[tuple[int, int]] = [] # List of (start, end) row indices that are loaded
|
|
322
335
|
|
|
323
336
|
# State tracking (all 0-based indexing)
|
|
324
337
|
self.sorted_columns: dict[str, bool] = {} # col_name -> descending
|
|
@@ -412,6 +425,15 @@ class DataFrameTable(DataTable):
|
|
|
412
425
|
"""
|
|
413
426
|
return self.df.item(self.cursor_row_idx, self.cursor_col_idx)
|
|
414
427
|
|
|
428
|
+
@property
|
|
429
|
+
def has_hidden_rows(self) -> bool:
|
|
430
|
+
"""Check if there are any hidden rows.
|
|
431
|
+
|
|
432
|
+
Returns:
|
|
433
|
+
bool: True if there are hidden rows, False otherwise.
|
|
434
|
+
"""
|
|
435
|
+
return any(1 for v in self.visible_rows if v is False)
|
|
436
|
+
|
|
415
437
|
@property
|
|
416
438
|
def ordered_selected_rows(self) -> list[int]:
|
|
417
439
|
"""Get the list of selected row indices in order.
|
|
@@ -443,6 +465,22 @@ class DataFrameTable(DataTable):
|
|
|
443
465
|
"""
|
|
444
466
|
return self.histories[-1] if self.histories else None
|
|
445
467
|
|
|
468
|
+
def _round_to_nearest_hundreds(self, num: int):
|
|
469
|
+
"""Round a number to the nearest hundreds.
|
|
470
|
+
|
|
471
|
+
Args:
|
|
472
|
+
num: The number to round.
|
|
473
|
+
"""
|
|
474
|
+
return round_to_nearest_hundreds(num, N=self.BATCH_SIZE)
|
|
475
|
+
|
|
476
|
+
def get_row_idx(self, row_key: RowKey) -> int:
|
|
477
|
+
"""Get the row index for a given table row key.
|
|
478
|
+
|
|
479
|
+
Args:
|
|
480
|
+
row_key: Row key as string.
|
|
481
|
+
"""
|
|
482
|
+
return super().get_row_index(row_key)
|
|
483
|
+
|
|
446
484
|
def get_row_key(self, row_idx: int) -> RowKey:
|
|
447
485
|
"""Get the row key for a given table row index.
|
|
448
486
|
|
|
@@ -454,7 +492,18 @@ class DataFrameTable(DataTable):
|
|
|
454
492
|
"""
|
|
455
493
|
return self._row_locations.get_key(row_idx)
|
|
456
494
|
|
|
457
|
-
def
|
|
495
|
+
def get_col_idx(self, col_key: ColumnKey) -> int:
|
|
496
|
+
"""Get the column index for a given table column key.
|
|
497
|
+
|
|
498
|
+
Args:
|
|
499
|
+
col_key: Column key as string.
|
|
500
|
+
|
|
501
|
+
Returns:
|
|
502
|
+
Corresponding column index as int.
|
|
503
|
+
"""
|
|
504
|
+
return super().get_column_index(col_key)
|
|
505
|
+
|
|
506
|
+
def get_col_key(self, col_idx: int) -> ColumnKey:
|
|
458
507
|
"""Get the column key for a given table column index.
|
|
459
508
|
|
|
460
509
|
Args:
|
|
@@ -465,11 +514,11 @@ class DataFrameTable(DataTable):
|
|
|
465
514
|
"""
|
|
466
515
|
return self._column_locations.get_key(col_idx)
|
|
467
516
|
|
|
468
|
-
def
|
|
517
|
+
def _should_highlight(self, cursor: Coordinate, target_cell: Coordinate, type_of_cursor: CursorType) -> bool:
|
|
469
518
|
"""Determine if the given cell should be highlighted because of the cursor.
|
|
470
519
|
|
|
471
|
-
In "cell" mode, also highlights the row and column headers.
|
|
472
|
-
|
|
520
|
+
In "cell" mode, also highlights the row and column headers. This overrides the default
|
|
521
|
+
behavior of DataTable which only highlights the exact cell under the cursor.
|
|
473
522
|
|
|
474
523
|
Args:
|
|
475
524
|
cursor: The current position of the cursor.
|
|
@@ -566,7 +615,7 @@ class DataFrameTable(DataTable):
|
|
|
566
615
|
else:
|
|
567
616
|
content_tab.remove_class("dirty")
|
|
568
617
|
|
|
569
|
-
def move_cursor_to(self, ridx: int, cidx: int) -> None:
|
|
618
|
+
def move_cursor_to(self, ridx: int | None = None, cidx: int | None = None) -> None:
|
|
570
619
|
"""Move cursor based on the dataframe indices.
|
|
571
620
|
|
|
572
621
|
Args:
|
|
@@ -574,11 +623,11 @@ class DataFrameTable(DataTable):
|
|
|
574
623
|
cidx: Column index (0-based) in the dataframe.
|
|
575
624
|
"""
|
|
576
625
|
# Ensure the target row is loaded
|
|
577
|
-
|
|
578
|
-
|
|
626
|
+
start, stop = self._round_to_nearest_hundreds(ridx)
|
|
627
|
+
self.load_rows_range(start, stop)
|
|
579
628
|
|
|
580
|
-
row_key = str(ridx)
|
|
581
|
-
col_key = self.df.columns[cidx]
|
|
629
|
+
row_key = self.cursor_row_key if ridx is None else str(ridx)
|
|
630
|
+
col_key = self.cursor_col_key if cidx is None else self.df.columns[cidx]
|
|
582
631
|
row_idx, col_idx = self.get_cell_coordinate(row_key, col_key)
|
|
583
632
|
self.move_cursor(row=row_idx, column=col_idx)
|
|
584
633
|
|
|
@@ -594,15 +643,15 @@ class DataFrameTable(DataTable):
|
|
|
594
643
|
def on_key(self, event) -> None:
|
|
595
644
|
"""Handle key press events for pagination.
|
|
596
645
|
|
|
597
|
-
Currently handles "pagedown" and "down" keys to trigger lazy loading of additional rows
|
|
598
|
-
when scrolling near the end of the loaded data.
|
|
599
|
-
|
|
600
646
|
Args:
|
|
601
647
|
event: The key event object.
|
|
602
648
|
"""
|
|
603
|
-
if event.key
|
|
649
|
+
if event.key == "up":
|
|
604
650
|
# Let the table handle the navigation first
|
|
605
|
-
self.
|
|
651
|
+
self.load_rows_up()
|
|
652
|
+
elif event.key == "down":
|
|
653
|
+
# Let the table handle the navigation first
|
|
654
|
+
self.load_rows_down()
|
|
606
655
|
|
|
607
656
|
def on_click(self, event: Click) -> None:
|
|
608
657
|
"""Handle mouse click events on the table.
|
|
@@ -615,13 +664,13 @@ class DataFrameTable(DataTable):
|
|
|
615
664
|
if self.cursor_type == "cell" and event.chain > 1: # only on double-click or more
|
|
616
665
|
try:
|
|
617
666
|
row_idx = event.style.meta["row"]
|
|
618
|
-
|
|
667
|
+
col_idx = event.style.meta["column"]
|
|
619
668
|
except (KeyError, TypeError):
|
|
620
669
|
return # Unable to get row/column info
|
|
621
670
|
|
|
622
671
|
# header row
|
|
623
672
|
if row_idx == -1:
|
|
624
|
-
self.do_rename_column()
|
|
673
|
+
self.do_rename_column(col_idx)
|
|
625
674
|
else:
|
|
626
675
|
self.do_edit_cell()
|
|
627
676
|
|
|
@@ -632,16 +681,38 @@ class DataFrameTable(DataTable):
|
|
|
632
681
|
|
|
633
682
|
def action_jump_bottom(self) -> None:
|
|
634
683
|
"""Jump to the bottom of the table."""
|
|
635
|
-
self.
|
|
684
|
+
stop = len(self.df)
|
|
685
|
+
start = max(0, ((stop - self.BATCH_SIZE) // self.BATCH_SIZE + 1) * self.BATCH_SIZE)
|
|
686
|
+
self.load_rows_range(start, stop)
|
|
687
|
+
self.move_cursor(row=self.row_count - 1)
|
|
688
|
+
|
|
689
|
+
def action_page_up(self) -> None:
|
|
690
|
+
"""Move the cursor one page up."""
|
|
691
|
+
self._set_hover_cursor(False)
|
|
692
|
+
if self.show_cursor and self.cursor_type in ("cell", "row"):
|
|
693
|
+
height = self.scrollable_content_region.height - (self.header_height if self.show_header else 0)
|
|
694
|
+
|
|
695
|
+
col_idx = self.cursor_column
|
|
696
|
+
ridx = self.cursor_row_idx
|
|
697
|
+
next_ridx = max(0, ridx - height - BUFFER_SIZE)
|
|
698
|
+
start, stop = self._round_to_nearest_hundreds(next_ridx)
|
|
699
|
+
self.load_rows_range(start, stop)
|
|
636
700
|
|
|
637
|
-
|
|
638
|
-
|
|
701
|
+
self.move_cursor(row=self.get_row_idx(str(next_ridx)), column=col_idx)
|
|
702
|
+
else:
|
|
703
|
+
super().action_page_up()
|
|
704
|
+
|
|
705
|
+
def action_page_down(self) -> None:
|
|
639
706
|
super().action_page_down()
|
|
640
|
-
self.
|
|
707
|
+
self.load_rows_down()
|
|
641
708
|
|
|
642
709
|
def action_backward_page(self) -> None:
|
|
643
710
|
"""Scroll up one page."""
|
|
644
|
-
|
|
711
|
+
self.action_page_up()
|
|
712
|
+
|
|
713
|
+
def action_forward_page(self) -> None:
|
|
714
|
+
"""Scroll down one page."""
|
|
715
|
+
self.action_page_down()
|
|
645
716
|
|
|
646
717
|
def action_view_row_detail(self) -> None:
|
|
647
718
|
"""View details of the current row."""
|
|
@@ -687,6 +758,14 @@ class DataFrameTable(DataTable):
|
|
|
687
758
|
"""
|
|
688
759
|
self.do_show_statistics(scope)
|
|
689
760
|
|
|
761
|
+
def action_metadata_shape(self) -> None:
|
|
762
|
+
"""Show metadata about the dataframe (row and column counts)."""
|
|
763
|
+
self.do_metadata_shape()
|
|
764
|
+
|
|
765
|
+
def action_metadata_column(self) -> None:
|
|
766
|
+
"""Show metadata for the current column."""
|
|
767
|
+
self.do_metadata_column()
|
|
768
|
+
|
|
690
769
|
def action_view_rows(self) -> None:
|
|
691
770
|
"""View rows by current cell value."""
|
|
692
771
|
self.do_view_rows()
|
|
@@ -723,13 +802,13 @@ class DataFrameTable(DataTable):
|
|
|
723
802
|
"""Clear the current cell (set to None)."""
|
|
724
803
|
self.do_clear_cell()
|
|
725
804
|
|
|
726
|
-
def
|
|
727
|
-
"""
|
|
728
|
-
self.
|
|
805
|
+
def action_select_row_cursor_value(self) -> None:
|
|
806
|
+
"""Select rows with cursor value in the current column."""
|
|
807
|
+
self.do_select_row_cursor_value()
|
|
729
808
|
|
|
730
|
-
def
|
|
731
|
-
"""
|
|
732
|
-
self.
|
|
809
|
+
def action_select_row_expr(self) -> None:
|
|
810
|
+
"""Select rows by expression."""
|
|
811
|
+
self.do_select_row_expr()
|
|
733
812
|
|
|
734
813
|
def action_find_cursor_value(self, scope="column") -> None:
|
|
735
814
|
"""Find by cursor value.
|
|
@@ -846,7 +925,12 @@ class DataFrameTable(DataTable):
|
|
|
846
925
|
cell_str = str(self.df.item(ridx, cidx))
|
|
847
926
|
self.do_copy_to_clipboard(cell_str, f"Copied: [$success]{cell_str[:50]}[/]")
|
|
848
927
|
except IndexError:
|
|
849
|
-
self.notify(
|
|
928
|
+
self.notify(
|
|
929
|
+
f"Error copying cell ([$error]{ridx}[/], [$accent]{cidx}[/])",
|
|
930
|
+
title="Clipboard",
|
|
931
|
+
severity="error",
|
|
932
|
+
timeout=10,
|
|
933
|
+
)
|
|
850
934
|
|
|
851
935
|
def action_copy_column(self) -> None:
|
|
852
936
|
"""Copy the current column to clipboard (one value per line)."""
|
|
@@ -862,7 +946,7 @@ class DataFrameTable(DataTable):
|
|
|
862
946
|
f"Copied [$accent]{len(col_values)}[/] values from column [$success]{col_name}[/]",
|
|
863
947
|
)
|
|
864
948
|
except (FileNotFoundError, IndexError):
|
|
865
|
-
self.notify("Error copying column", title="Clipboard", severity="error")
|
|
949
|
+
self.notify(f"Error copying column [$error]{col_name}[/]", title="Clipboard", severity="error", timeout=10)
|
|
866
950
|
|
|
867
951
|
def action_copy_row(self) -> None:
|
|
868
952
|
"""Copy the current row to clipboard (values separated by tabs)."""
|
|
@@ -878,7 +962,7 @@ class DataFrameTable(DataTable):
|
|
|
878
962
|
f"Copied row [$accent]{ridx + 1}[/] with [$success]{len(row_values)}[/] values",
|
|
879
963
|
)
|
|
880
964
|
except (FileNotFoundError, IndexError):
|
|
881
|
-
self.notify("Error copying row", title="Clipboard", severity="error")
|
|
965
|
+
self.notify(f"Error copying row [$error]{ridx}[/]", title="Clipboard", severity="error", timeout=10)
|
|
882
966
|
|
|
883
967
|
def action_show_thousand_separator(self) -> None:
|
|
884
968
|
"""Toggle thousand separator for numeric display."""
|
|
@@ -911,9 +995,13 @@ class DataFrameTable(DataTable):
|
|
|
911
995
|
"""Open the advanced SQL interface screen."""
|
|
912
996
|
self.do_advanced_sql()
|
|
913
997
|
|
|
998
|
+
def on_mouse_scroll_up(self, event) -> None:
|
|
999
|
+
"""Load more rows when scrolling up with mouse."""
|
|
1000
|
+
self.load_rows_up()
|
|
1001
|
+
|
|
914
1002
|
def on_mouse_scroll_down(self, event) -> None:
|
|
915
1003
|
"""Load more rows when scrolling down with mouse."""
|
|
916
|
-
self.
|
|
1004
|
+
self.load_rows_down()
|
|
917
1005
|
|
|
918
1006
|
# Setup & Loading
|
|
919
1007
|
def reset_df(self, new_df: pl.DataFrame, dirty: bool = True) -> None:
|
|
@@ -933,8 +1021,8 @@ class DataFrameTable(DataTable):
|
|
|
933
1021
|
self.fixed_rows = 0
|
|
934
1022
|
self.fixed_columns = 0
|
|
935
1023
|
self.matches = defaultdict(set)
|
|
936
|
-
self.histories.clear()
|
|
937
|
-
self.history = None
|
|
1024
|
+
# self.histories.clear()
|
|
1025
|
+
# self.history = None
|
|
938
1026
|
self.dirty = dirty # Mark as dirty since data changed
|
|
939
1027
|
|
|
940
1028
|
def setup_table(self, reset: bool = False) -> None:
|
|
@@ -950,27 +1038,27 @@ class DataFrameTable(DataTable):
|
|
|
950
1038
|
if reset:
|
|
951
1039
|
self.reset_df(self.dataframe, dirty=False)
|
|
952
1040
|
|
|
953
|
-
# Lazy load up to
|
|
954
|
-
stop, visible_count = self.
|
|
1041
|
+
# Lazy load up to BATCH_SIZE visible rows
|
|
1042
|
+
stop, visible_count, row_idx = self.BATCH_SIZE, 0, 0
|
|
955
1043
|
for row_idx, visible in enumerate(self.visible_rows):
|
|
956
1044
|
if not visible:
|
|
957
1045
|
continue
|
|
958
1046
|
visible_count += 1
|
|
959
|
-
if visible_count > self.
|
|
960
|
-
stop = row_idx
|
|
1047
|
+
if visible_count > self.BATCH_SIZE:
|
|
1048
|
+
stop = row_idx
|
|
961
1049
|
break
|
|
962
1050
|
else:
|
|
963
|
-
stop = row_idx
|
|
1051
|
+
stop = row_idx
|
|
964
1052
|
|
|
965
|
-
#
|
|
966
|
-
|
|
967
|
-
|
|
1053
|
+
# Round up to next hundreds
|
|
1054
|
+
if stop % self.BATCH_SIZE != 0:
|
|
1055
|
+
stop = (stop // self.BATCH_SIZE + 1) * self.BATCH_SIZE
|
|
968
1056
|
|
|
969
1057
|
# Save current cursor position before clearing
|
|
970
1058
|
row_idx, col_idx = self.cursor_coordinate
|
|
971
1059
|
|
|
972
1060
|
self.setup_columns()
|
|
973
|
-
self.
|
|
1061
|
+
self.load_rows_range(0, stop)
|
|
974
1062
|
|
|
975
1063
|
# Restore cursor position
|
|
976
1064
|
if row_idx < len(self.rows) and col_idx < len(self.columns):
|
|
@@ -993,7 +1081,7 @@ class DataFrameTable(DataTable):
|
|
|
993
1081
|
column_widths = {}
|
|
994
1082
|
|
|
995
1083
|
# Get available width for the table (with some padding for borders/scrollbar)
|
|
996
|
-
available_width = self.
|
|
1084
|
+
available_width = self.scrollable_content_region.width
|
|
997
1085
|
|
|
998
1086
|
# Calculate how much width we need for string columns first
|
|
999
1087
|
string_cols = [col for col, dtype in zip(self.df.columns, self.df.dtypes) if dtype == pl.String]
|
|
@@ -1003,7 +1091,7 @@ class DataFrameTable(DataTable):
|
|
|
1003
1091
|
return column_widths
|
|
1004
1092
|
|
|
1005
1093
|
# Sample a reasonable number of rows to calculate widths (don't scan entire dataframe)
|
|
1006
|
-
sample_size = min(self.
|
|
1094
|
+
sample_size = min(self.BATCH_SIZE, len(self.df))
|
|
1007
1095
|
sample_lf = self.df.lazy().slice(0, sample_size)
|
|
1008
1096
|
|
|
1009
1097
|
# Determine widths for each column
|
|
@@ -1130,7 +1218,8 @@ class DataFrameTable(DataTable):
|
|
|
1130
1218
|
# Load max BATCH_SIZE rows at a time
|
|
1131
1219
|
chunk_size = min(self.BATCH_SIZE, stop - self.loaded_rows)
|
|
1132
1220
|
next_stop = min(self.loaded_rows + chunk_size, stop)
|
|
1133
|
-
self.
|
|
1221
|
+
self.load_rows_range(self.loaded_rows, next_stop)
|
|
1222
|
+
self.loaded_rows = next_stop
|
|
1134
1223
|
|
|
1135
1224
|
# If there's more to load, yield to event loop with delay
|
|
1136
1225
|
if next_stop < stop:
|
|
@@ -1142,114 +1231,382 @@ class DataFrameTable(DataTable):
|
|
|
1142
1231
|
if move_to_end:
|
|
1143
1232
|
self.call_after_refresh(lambda: self.move_cursor(row=self.row_count - 1))
|
|
1144
1233
|
|
|
1145
|
-
def
|
|
1146
|
-
"""
|
|
1234
|
+
def _calculate_load_range(self, start: int, stop: int) -> list[tuple[int, int]]:
|
|
1235
|
+
"""Calculate the actual ranges to load, accounting for already-loaded ranges.
|
|
1147
1236
|
|
|
1148
|
-
|
|
1149
|
-
|
|
1237
|
+
Handles complex cases where a loaded range is fully contained within the requested
|
|
1238
|
+
range (creating head and tail segments to load). All overlapping/adjacent loaded
|
|
1239
|
+
ranges are merged first to minimize gaps.
|
|
1150
1240
|
|
|
1151
1241
|
Args:
|
|
1152
|
-
|
|
1242
|
+
start: Requested start index (0-based).
|
|
1243
|
+
stop: Requested stop index (0-based, exclusive).
|
|
1244
|
+
|
|
1245
|
+
Returns:
|
|
1246
|
+
List of (actual_start, actual_stop) tuples to load. Empty list if the entire
|
|
1247
|
+
requested range is already loaded.
|
|
1248
|
+
|
|
1249
|
+
Example:
|
|
1250
|
+
If loaded ranges are [(150, 250)] and requesting (100, 300):
|
|
1251
|
+
- Returns [(100, 150), (250, 300)] to load head and tail
|
|
1252
|
+
If loaded ranges are [(0, 100), (100, 200)] and requesting (50, 150):
|
|
1253
|
+
- After merging, loaded_ranges becomes [(0, 200)]
|
|
1254
|
+
- Returns [] (already fully loaded)
|
|
1153
1255
|
"""
|
|
1154
|
-
|
|
1155
|
-
start
|
|
1156
|
-
|
|
1256
|
+
if not self.loaded_ranges:
|
|
1257
|
+
return [(start, stop)]
|
|
1258
|
+
|
|
1259
|
+
# Sort loaded ranges by start index
|
|
1260
|
+
sorted_ranges = sorted(self.loaded_ranges)
|
|
1261
|
+
|
|
1262
|
+
# Merge overlapping/adjacent ranges
|
|
1263
|
+
merged = []
|
|
1264
|
+
for range_start, range_stop in sorted_ranges:
|
|
1265
|
+
if merged and range_start <= merged[-1][1]:
|
|
1266
|
+
# Overlapping or adjacent: merge
|
|
1267
|
+
merged[-1] = (merged[-1][0], max(merged[-1][1], range_stop))
|
|
1268
|
+
else:
|
|
1269
|
+
merged.append((range_start, range_stop))
|
|
1270
|
+
|
|
1271
|
+
self.loaded_ranges = merged
|
|
1272
|
+
|
|
1273
|
+
# Calculate ranges to load by finding gaps in the merged ranges
|
|
1274
|
+
ranges_to_load = []
|
|
1275
|
+
current_pos = start
|
|
1276
|
+
|
|
1277
|
+
for range_start, range_stop in merged:
|
|
1278
|
+
# If there's a gap before this loaded range, add it to load list
|
|
1279
|
+
if current_pos < range_start and current_pos < stop:
|
|
1280
|
+
gap_end = min(range_start, stop)
|
|
1281
|
+
ranges_to_load.append((current_pos, gap_end))
|
|
1282
|
+
current_pos = range_stop
|
|
1283
|
+
elif current_pos >= range_stop:
|
|
1284
|
+
# Already moved past this loaded range
|
|
1285
|
+
continue
|
|
1286
|
+
else:
|
|
1287
|
+
# Current position is inside this loaded range, skip past it
|
|
1288
|
+
current_pos = max(current_pos, range_stop)
|
|
1157
1289
|
|
|
1158
|
-
|
|
1159
|
-
|
|
1160
|
-
|
|
1290
|
+
# If there's remaining range after all loaded ranges, add it
|
|
1291
|
+
if current_pos < stop:
|
|
1292
|
+
ranges_to_load.append((current_pos, stop))
|
|
1161
1293
|
|
|
1162
|
-
|
|
1163
|
-
match_cols = self.matches.get(ridx, set())
|
|
1294
|
+
return ranges_to_load
|
|
1164
1295
|
|
|
1165
|
-
|
|
1166
|
-
|
|
1167
|
-
if col in self.hidden_columns:
|
|
1168
|
-
continue # Skip hidden columns
|
|
1296
|
+
def _merge_loaded_ranges(self) -> None:
|
|
1297
|
+
"""Merge adjacent and overlapping ranges in self.loaded_ranges.
|
|
1169
1298
|
|
|
1170
|
-
|
|
1171
|
-
|
|
1299
|
+
Ranges like (0, 100) and (100, 200) are merged into (0, 200).
|
|
1300
|
+
"""
|
|
1301
|
+
if len(self.loaded_ranges) <= 1:
|
|
1302
|
+
return
|
|
1172
1303
|
|
|
1173
|
-
|
|
1174
|
-
|
|
1304
|
+
# Sort by start index
|
|
1305
|
+
sorted_ranges = sorted(self.loaded_ranges)
|
|
1175
1306
|
|
|
1176
|
-
|
|
1307
|
+
# Merge overlapping/adjacent ranges
|
|
1308
|
+
merged = [sorted_ranges[0]]
|
|
1309
|
+
for range_start, range_stop in sorted_ranges[1:]:
|
|
1310
|
+
# Overlapping or adjacent: merge
|
|
1311
|
+
if range_start <= merged[-1][1]:
|
|
1312
|
+
merged[-1] = (merged[-1][0], max(merged[-1][1], range_stop))
|
|
1313
|
+
else:
|
|
1314
|
+
merged.append((range_start, range_stop))
|
|
1177
1315
|
|
|
1178
|
-
|
|
1179
|
-
self.add_row(*formatted_row, key=str(ridx), label=str(ridx + 1))
|
|
1316
|
+
self.loaded_ranges = merged
|
|
1180
1317
|
|
|
1181
|
-
|
|
1182
|
-
|
|
1318
|
+
def _find_insert_position_for_row(self, ridx: int) -> int:
|
|
1319
|
+
"""Find the correct table position to insert a row with the given dataframe index.
|
|
1183
1320
|
|
|
1184
|
-
|
|
1185
|
-
|
|
1321
|
+
In the table display, rows are ordered by their dataframe index, regardless of
|
|
1322
|
+
the internal row keys. This method finds where a row should be inserted based on
|
|
1323
|
+
its dataframe index and the indices of already-loaded rows.
|
|
1186
1324
|
|
|
1187
|
-
|
|
1188
|
-
|
|
1189
|
-
self.log(f"Error loading rows: {str(e)}")
|
|
1325
|
+
Args:
|
|
1326
|
+
ridx: The 0-based dataframe row index.
|
|
1190
1327
|
|
|
1191
|
-
|
|
1192
|
-
|
|
1193
|
-
|
|
1194
|
-
|
|
1195
|
-
|
|
1328
|
+
Returns:
|
|
1329
|
+
The 0-based table position where the row should be inserted.
|
|
1330
|
+
"""
|
|
1331
|
+
# Count how many already-loaded rows have lower dataframe indices
|
|
1332
|
+
# Iterate through loaded rows instead of iterating 0..ridx for efficiency
|
|
1333
|
+
insert_pos = 0
|
|
1334
|
+
for row_key in self._row_locations:
|
|
1335
|
+
loaded_ridx = int(row_key.value)
|
|
1336
|
+
if loaded_ridx < ridx:
|
|
1337
|
+
insert_pos += 1
|
|
1196
1338
|
|
|
1197
|
-
|
|
1198
|
-
bottom_visible_row = self.scroll_y + visible_row_count
|
|
1339
|
+
return insert_pos
|
|
1199
1340
|
|
|
1200
|
-
|
|
1201
|
-
|
|
1202
|
-
self.load_rows(self.loaded_rows + self.BATCH_SIZE)
|
|
1341
|
+
def load_rows_segment(self, segment_start: int, segment_stop: int) -> int:
|
|
1342
|
+
"""Load a single contiguous segment of rows into the table.
|
|
1203
1343
|
|
|
1204
|
-
|
|
1205
|
-
|
|
1206
|
-
|
|
1344
|
+
This is the core loading logic that inserts rows at correct positions,
|
|
1345
|
+
respecting visibility and selection states. Used by load_rows_range()
|
|
1346
|
+
to handle each segment independently.
|
|
1207
1347
|
|
|
1208
1348
|
Args:
|
|
1209
|
-
|
|
1349
|
+
segment_start: Start loading rows from this index (0-based).
|
|
1350
|
+
segment_stop: Stop loading rows when this index is reached (0-based, exclusive).
|
|
1210
1351
|
"""
|
|
1211
|
-
#
|
|
1212
|
-
|
|
1213
|
-
stop = max(stop, max(self.matches.keys(), default=0) + 1)
|
|
1352
|
+
# Record this range before loading
|
|
1353
|
+
self.loaded_ranges.append((segment_start, segment_stop))
|
|
1214
1354
|
|
|
1215
|
-
|
|
1216
|
-
self.
|
|
1355
|
+
# Load the dataframe slice
|
|
1356
|
+
df_slice = self.df.slice(segment_start, segment_stop - segment_start)
|
|
1217
1357
|
|
|
1218
|
-
|
|
1219
|
-
|
|
1220
|
-
|
|
1221
|
-
|
|
1358
|
+
# Load each row at the correct position
|
|
1359
|
+
for ridx, row in enumerate(df_slice.rows(), segment_start):
|
|
1360
|
+
if not self.visible_rows[ridx]:
|
|
1361
|
+
continue # Skip hidden rows
|
|
1222
1362
|
|
|
1223
|
-
# Update all rows based on selected state
|
|
1224
|
-
for row in self.ordered_rows:
|
|
1225
|
-
ridx = int(row.key.value) # 0-based index
|
|
1226
1363
|
is_selected = self.selected_rows[ridx]
|
|
1227
1364
|
match_cols = self.matches.get(ridx, set())
|
|
1228
1365
|
|
|
1229
|
-
|
|
1230
|
-
|
|
1366
|
+
vals, dtypes, styles = [], [], []
|
|
1367
|
+
for cidx, (val, col, dtype) in enumerate(zip(row, self.df.columns, self.df.dtypes)):
|
|
1368
|
+
if col in self.hidden_columns:
|
|
1369
|
+
continue # Skip hidden columns
|
|
1370
|
+
|
|
1371
|
+
vals.append(val)
|
|
1372
|
+
dtypes.append(dtype)
|
|
1373
|
+
|
|
1374
|
+
# Highlight entire row with selection or cells with matches
|
|
1375
|
+
styles.append(HIGHLIGHT_COLOR if is_selected or cidx in match_cols else None)
|
|
1376
|
+
|
|
1377
|
+
formatted_row = format_row(vals, dtypes, styles=styles, thousand_separator=self.thousand_separator)
|
|
1378
|
+
|
|
1379
|
+
# Find correct insertion position and insert
|
|
1380
|
+
insert_pos = self._find_insert_position_for_row(ridx)
|
|
1381
|
+
self.insert_row(*formatted_row, key=str(ridx), label=str(ridx + 1), position=insert_pos)
|
|
1382
|
+
|
|
1383
|
+
# Number of rows loaded in this segment
|
|
1384
|
+
segment_count = len(df_slice)
|
|
1385
|
+
|
|
1386
|
+
# Update loaded rows count
|
|
1387
|
+
self.loaded_rows += segment_count
|
|
1388
|
+
|
|
1389
|
+
return segment_count
|
|
1390
|
+
|
|
1391
|
+
def load_rows_range(self, start: int, stop: int) -> int:
|
|
1392
|
+
"""Load a batch of rows into the table.
|
|
1393
|
+
|
|
1394
|
+
Row keys are 0-based indices as strings, which map directly to dataframe row indices.
|
|
1395
|
+
Row labels are 1-based indices as strings.
|
|
1396
|
+
|
|
1397
|
+
Intelligently handles range loading:
|
|
1398
|
+
1. Calculates which ranges actually need loading (avoiding reloading)
|
|
1399
|
+
2. Handles complex cases where loaded ranges create "holes" (head and tail segments)
|
|
1400
|
+
3. Inserts rows at correct positions in the table
|
|
1401
|
+
4. Merges adjacent/overlapping ranges to optimize future loading
|
|
1402
|
+
|
|
1403
|
+
Args:
|
|
1404
|
+
start: Start loading rows from this index (0-based).
|
|
1405
|
+
stop: Stop loading rows when this index is reached (0-based, exclusive).
|
|
1406
|
+
"""
|
|
1407
|
+
start = max(0, start) # Clamp to non-negative
|
|
1408
|
+
stop = min(stop, len(self.df)) # Clamp to dataframe length
|
|
1409
|
+
|
|
1410
|
+
try:
|
|
1411
|
+
# Calculate actual ranges to load, accounting for already-loaded ranges
|
|
1412
|
+
ranges_to_load = self._calculate_load_range(start, stop)
|
|
1413
|
+
|
|
1414
|
+
# If nothing needs loading, return early
|
|
1415
|
+
if not ranges_to_load:
|
|
1416
|
+
self.log(f"Range {start}-{stop} already loaded, skipping")
|
|
1417
|
+
return 0
|
|
1418
|
+
|
|
1419
|
+
# Track the number of loaded rows in this range
|
|
1420
|
+
range_count = 0
|
|
1231
1421
|
|
|
1232
|
-
#
|
|
1233
|
-
for
|
|
1234
|
-
|
|
1235
|
-
continue # No highlight needed for this cell
|
|
1422
|
+
# Load each segment
|
|
1423
|
+
for segment_start, segment_stop in ranges_to_load:
|
|
1424
|
+
range_count += self.load_rows_segment(segment_start, segment_stop)
|
|
1236
1425
|
|
|
1237
|
-
|
|
1238
|
-
|
|
1426
|
+
# Merge adjacent/overlapping ranges to optimize storage
|
|
1427
|
+
self._merge_loaded_ranges()
|
|
1239
1428
|
|
|
1240
|
-
|
|
1241
|
-
|
|
1242
|
-
|
|
1243
|
-
|
|
1244
|
-
|
|
1245
|
-
|
|
1246
|
-
|
|
1247
|
-
cell_text.style = dc.style
|
|
1248
|
-
need_update = True
|
|
1429
|
+
self.log(f"Loaded {range_count} rows for range {start}-{stop}/{len(self.df)}")
|
|
1430
|
+
return range_count
|
|
1431
|
+
|
|
1432
|
+
except Exception as e:
|
|
1433
|
+
self.notify("Error loading rows", title="Load", severity="error", timeout=10)
|
|
1434
|
+
self.log(f"Error loading rows: {str(e)}")
|
|
1435
|
+
return 0
|
|
1249
1436
|
|
|
1250
|
-
|
|
1251
|
-
|
|
1252
|
-
|
|
1437
|
+
def load_rows_up(self) -> None:
|
|
1438
|
+
"""Check if we need to load more rows and load them."""
|
|
1439
|
+
# If we've loaded everything, no need to check
|
|
1440
|
+
if self.loaded_rows >= len(self.df):
|
|
1441
|
+
return
|
|
1442
|
+
|
|
1443
|
+
top_row_index = int(self.scroll_y) + BUFFER_SIZE
|
|
1444
|
+
top_row_key = self.get_row_key(top_row_index)
|
|
1445
|
+
|
|
1446
|
+
if top_row_key:
|
|
1447
|
+
top_ridx = int(top_row_key.value)
|
|
1448
|
+
else:
|
|
1449
|
+
top_ridx = 0
|
|
1450
|
+
self.log(f"No top row key at index {top_row_index}, defaulting to 0")
|
|
1451
|
+
|
|
1452
|
+
# Load upward
|
|
1453
|
+
start, stop = self._round_to_nearest_hundreds(top_ridx - BUFFER_SIZE * 2)
|
|
1454
|
+
range_count = self.load_rows_range(start, stop)
|
|
1455
|
+
|
|
1456
|
+
# self.log(
|
|
1457
|
+
# "========",
|
|
1458
|
+
# f"{self.scrollable_content_region.height = },",
|
|
1459
|
+
# f"{self.header_height = },",
|
|
1460
|
+
# f"{self.scroll_y = },",
|
|
1461
|
+
# f"{top_row_index = },",
|
|
1462
|
+
# f"{top_ridx = },",
|
|
1463
|
+
# f"{start = },",
|
|
1464
|
+
# f"{stop = },",
|
|
1465
|
+
# f"{range_count = },",
|
|
1466
|
+
# f"{self.loaded_ranges = }",
|
|
1467
|
+
# )
|
|
1468
|
+
|
|
1469
|
+
# Adjust scroll to maintain position if rows were loaded above
|
|
1470
|
+
if range_count > 0:
|
|
1471
|
+
self.move_cursor(row=top_row_index + range_count)
|
|
1472
|
+
self.log(f"Loaded up: {range_count} rows in range {start}-{stop}/{len(self.df)}")
|
|
1473
|
+
|
|
1474
|
+
def load_rows_down(self) -> None:
|
|
1475
|
+
"""Check if we need to load more rows and load them."""
|
|
1476
|
+
# If we've loaded everything, no need to check
|
|
1477
|
+
if self.loaded_rows >= len(self.df):
|
|
1478
|
+
return
|
|
1479
|
+
|
|
1480
|
+
visible_row_count = self.scrollable_content_region.height - self.header_height
|
|
1481
|
+
bottom_row_index = self.scroll_y + visible_row_count - BUFFER_SIZE
|
|
1482
|
+
|
|
1483
|
+
bottom_row_key = self.get_row_key(bottom_row_index)
|
|
1484
|
+
if bottom_row_key:
|
|
1485
|
+
bottom_ridx = int(bottom_row_key.value)
|
|
1486
|
+
else:
|
|
1487
|
+
bottom_ridx = 0
|
|
1488
|
+
self.log(f"No bottom row key at index {bottom_row_index}, defaulting to 0")
|
|
1489
|
+
|
|
1490
|
+
# Load downward
|
|
1491
|
+
start, stop = self._round_to_nearest_hundreds(bottom_ridx + BUFFER_SIZE * 2)
|
|
1492
|
+
range_count = self.load_rows_range(start, stop)
|
|
1493
|
+
|
|
1494
|
+
# self.log(
|
|
1495
|
+
# "========",
|
|
1496
|
+
# f"{self.scrollable_content_region.height = },",
|
|
1497
|
+
# f"{self.header_height = },",
|
|
1498
|
+
# f"{self.scroll_y = },",
|
|
1499
|
+
# f"{bottom_row_index = },",
|
|
1500
|
+
# f"{bottom_ridx = },",
|
|
1501
|
+
# f"{start = },",
|
|
1502
|
+
# f"{stop = },",
|
|
1503
|
+
# f"{range_count = },",
|
|
1504
|
+
# f"{self.loaded_ranges = }",
|
|
1505
|
+
# )
|
|
1506
|
+
|
|
1507
|
+
if range_count > 0:
|
|
1508
|
+
self.log(f"Loaded down: {range_count} rows in range {start}-{stop}/{len(self.df)}")
|
|
1509
|
+
|
|
1510
|
+
def insert_row(
|
|
1511
|
+
self,
|
|
1512
|
+
*cells: CellType,
|
|
1513
|
+
height: int | None = 1,
|
|
1514
|
+
key: str | None = None,
|
|
1515
|
+
label: TextType | None = None,
|
|
1516
|
+
position: int | None = None,
|
|
1517
|
+
) -> RowKey:
|
|
1518
|
+
"""Insert a row at a specific position in the DataTable.
|
|
1519
|
+
|
|
1520
|
+
When inserting, all rows at and after the insertion position are shifted down,
|
|
1521
|
+
and their entries in self._row_locations are updated accordingly.
|
|
1522
|
+
|
|
1523
|
+
Args:
|
|
1524
|
+
*cells: Positional arguments should contain cell data.
|
|
1525
|
+
height: The height of a row (in lines). Use `None` to auto-detect the optimal
|
|
1526
|
+
height.
|
|
1527
|
+
key: A key which uniquely identifies this row. If None, it will be generated
|
|
1528
|
+
for you and returned.
|
|
1529
|
+
label: The label for the row. Will be displayed to the left if supplied.
|
|
1530
|
+
position: The 0-based row index where the new row should be inserted.
|
|
1531
|
+
If None, inserts at the end (same as add_row). If out of bounds,
|
|
1532
|
+
inserts at the nearest valid position.
|
|
1533
|
+
|
|
1534
|
+
Returns:
|
|
1535
|
+
Unique identifier for this row. Can be used to retrieve this row regardless
|
|
1536
|
+
of its current location in the DataTable (it could have moved after
|
|
1537
|
+
being added due to sorting or insertion/deletion of other rows).
|
|
1538
|
+
|
|
1539
|
+
Raises:
|
|
1540
|
+
DuplicateKey: If a row with the given key already exists.
|
|
1541
|
+
ValueError: If more cells are provided than there are columns.
|
|
1542
|
+
"""
|
|
1543
|
+
# Default to appending if position not specified or >= row_count
|
|
1544
|
+
row_count = self.row_count
|
|
1545
|
+
if position is None or position >= row_count:
|
|
1546
|
+
return self.add_row(*cells, height=height, key=key, label=label)
|
|
1547
|
+
|
|
1548
|
+
# Clamp position to valid range [0, row_count)
|
|
1549
|
+
position = max(0, position)
|
|
1550
|
+
|
|
1551
|
+
row_key = RowKey(key)
|
|
1552
|
+
if row_key in self._row_locations:
|
|
1553
|
+
raise DuplicateKey(f"The row key {row_key!r} already exists.")
|
|
1554
|
+
|
|
1555
|
+
if len(cells) > len(self.ordered_columns):
|
|
1556
|
+
raise ValueError("More values provided than there are columns.")
|
|
1557
|
+
|
|
1558
|
+
# TC: Rebuild self._row_locations to shift rows at and after position down by 1
|
|
1559
|
+
# Create a mapping of old index -> new index
|
|
1560
|
+
old_to_new = {}
|
|
1561
|
+
for old_idx in range(row_count):
|
|
1562
|
+
if old_idx < position:
|
|
1563
|
+
old_to_new[old_idx] = old_idx # No change
|
|
1564
|
+
else:
|
|
1565
|
+
old_to_new[old_idx] = old_idx + 1 # Shift down by 1
|
|
1566
|
+
|
|
1567
|
+
# Update _row_locations with the new indices
|
|
1568
|
+
new_row_locations = TwoWayDict({})
|
|
1569
|
+
for row_key_item in self._row_locations:
|
|
1570
|
+
old_idx = self.get_row_idx(row_key_item)
|
|
1571
|
+
new_idx = old_to_new.get(old_idx, old_idx)
|
|
1572
|
+
new_row_locations[row_key_item] = new_idx
|
|
1573
|
+
|
|
1574
|
+
# Update the internal mapping
|
|
1575
|
+
self._row_locations = new_row_locations
|
|
1576
|
+
# TC
|
|
1577
|
+
|
|
1578
|
+
row_index = position
|
|
1579
|
+
# Map the key of this row to its current index
|
|
1580
|
+
self._row_locations[row_key] = row_index
|
|
1581
|
+
self._data[row_key] = {column.key: cell for column, cell in zip_longest(self.ordered_columns, cells)}
|
|
1582
|
+
|
|
1583
|
+
label = Text.from_markup(label, end="") if isinstance(label, str) else label
|
|
1584
|
+
|
|
1585
|
+
# Rows with auto-height get a height of 0 because 1) we need an integer height
|
|
1586
|
+
# to do some intermediate computations and 2) because 0 doesn't impact the data
|
|
1587
|
+
# table while we don't figure out how tall this row is.
|
|
1588
|
+
self.rows[row_key] = Row(
|
|
1589
|
+
row_key,
|
|
1590
|
+
height or 0,
|
|
1591
|
+
label,
|
|
1592
|
+
height is None,
|
|
1593
|
+
)
|
|
1594
|
+
self._new_rows.add(row_key)
|
|
1595
|
+
self._require_update_dimensions = True
|
|
1596
|
+
self.cursor_coordinate = self.cursor_coordinate
|
|
1597
|
+
|
|
1598
|
+
# If a position has opened for the cursor to appear, where it previously
|
|
1599
|
+
# could not (e.g. when there's no data in the table), then a highlighted
|
|
1600
|
+
# event is posted, since there's now a highlighted cell when there wasn't
|
|
1601
|
+
# before.
|
|
1602
|
+
cell_now_available = self.row_count == 1 and len(self.columns) > 0
|
|
1603
|
+
visible_cursor = self.show_cursor and self.cursor_type != "none"
|
|
1604
|
+
if cell_now_available and visible_cursor:
|
|
1605
|
+
self._highlight_cursor()
|
|
1606
|
+
|
|
1607
|
+
self._update_count += 1
|
|
1608
|
+
self.check_idle()
|
|
1609
|
+
return row_key
|
|
1253
1610
|
|
|
1254
1611
|
# History & Undo
|
|
1255
1612
|
def create_history(self, description: str) -> None:
|
|
@@ -1321,7 +1678,7 @@ class DataFrameTable(DataTable):
|
|
|
1321
1678
|
# Restore state
|
|
1322
1679
|
self.apply_history(history)
|
|
1323
1680
|
|
|
1324
|
-
self.notify(f"Reverted: {history.description}", title="Undo")
|
|
1681
|
+
self.notify(f"Reverted: [$success]{history.description}[/]", title="Undo")
|
|
1325
1682
|
|
|
1326
1683
|
def do_redo(self) -> None:
|
|
1327
1684
|
"""Redo the last undone action."""
|
|
@@ -1340,7 +1697,7 @@ class DataFrameTable(DataTable):
|
|
|
1340
1697
|
# Clear redo state
|
|
1341
1698
|
self.history = None
|
|
1342
1699
|
|
|
1343
|
-
self.notify(f"Reapplied: {description}", title="Redo")
|
|
1700
|
+
self.notify(f"Reapplied: [$success]{description}[/]", title="Redo")
|
|
1344
1701
|
|
|
1345
1702
|
def do_reset(self) -> None:
|
|
1346
1703
|
"""Reset the table to the initial state."""
|
|
@@ -1390,6 +1747,14 @@ class DataFrameTable(DataTable):
|
|
|
1390
1747
|
cidx = self.cursor_col_idx
|
|
1391
1748
|
self.app.push_screen(StatisticsScreen(self, col_idx=cidx))
|
|
1392
1749
|
|
|
1750
|
+
def do_metadata_shape(self) -> None:
|
|
1751
|
+
"""Show metadata about the dataframe (row and column counts)."""
|
|
1752
|
+
self.app.push_screen(MetaShape(self))
|
|
1753
|
+
|
|
1754
|
+
def do_metadata_column(self) -> None:
|
|
1755
|
+
"""Show metadata for all columns in the dataframe."""
|
|
1756
|
+
self.app.push_screen(MetaColumnScreen(self))
|
|
1757
|
+
|
|
1393
1758
|
def do_freeze_row_column(self) -> None:
|
|
1394
1759
|
"""Open the freeze screen to set fixed rows and columns."""
|
|
1395
1760
|
self.app.push_screen(FreezeScreen(), callback=self.freeze_row_column)
|
|
@@ -1406,7 +1771,7 @@ class DataFrameTable(DataTable):
|
|
|
1406
1771
|
fixed_rows, fixed_columns = result
|
|
1407
1772
|
|
|
1408
1773
|
# Add to history
|
|
1409
|
-
self.add_history(f"Pinned [$
|
|
1774
|
+
self.add_history(f"Pinned [$success]{fixed_rows}[/] rows and [$accent]{fixed_columns}[/] columns")
|
|
1410
1775
|
|
|
1411
1776
|
# Apply the pin settings to the table
|
|
1412
1777
|
if fixed_rows >= 0:
|
|
@@ -1414,7 +1779,7 @@ class DataFrameTable(DataTable):
|
|
|
1414
1779
|
if fixed_columns >= 0:
|
|
1415
1780
|
self.fixed_columns = fixed_columns
|
|
1416
1781
|
|
|
1417
|
-
# self.notify(f"Pinned [$
|
|
1782
|
+
# self.notify(f"Pinned [$success]{fixed_rows}[/] rows and [$accent]{fixed_columns}[/] columns", title="Pin")
|
|
1418
1783
|
|
|
1419
1784
|
def do_hide_column(self) -> None:
|
|
1420
1785
|
"""Hide the currently selected column from the table display."""
|
|
@@ -1435,7 +1800,7 @@ class DataFrameTable(DataTable):
|
|
|
1435
1800
|
if col_idx >= len(self.columns):
|
|
1436
1801
|
self.move_cursor(column=len(self.columns) - 1)
|
|
1437
1802
|
|
|
1438
|
-
# self.notify(f"Hid column [$
|
|
1803
|
+
# self.notify(f"Hid column [$success]{col_name}[/]. Press [$accent]H[/] to show hidden columns", title="Hide")
|
|
1439
1804
|
|
|
1440
1805
|
def do_expand_column(self) -> None:
|
|
1441
1806
|
"""Expand the current column to show the widest cell in the loaded data."""
|
|
@@ -1471,7 +1836,9 @@ class DataFrameTable(DataTable):
|
|
|
1471
1836
|
|
|
1472
1837
|
# self.notify(f"Expanded column [$success]{col_name}[/] to width [$accent]{max_width}[/]", title="Expand")
|
|
1473
1838
|
except Exception as e:
|
|
1474
|
-
self.notify(
|
|
1839
|
+
self.notify(
|
|
1840
|
+
f"Error expanding column [$error]{col_name}[/]", title="Expand Column", severity="error", timeout=10
|
|
1841
|
+
)
|
|
1475
1842
|
self.log(f"Error expanding column `{col_name}`: {str(e)}")
|
|
1476
1843
|
|
|
1477
1844
|
def do_show_hidden_rows_columns(self) -> None:
|
|
@@ -1497,7 +1864,7 @@ class DataFrameTable(DataTable):
|
|
|
1497
1864
|
self.setup_table()
|
|
1498
1865
|
|
|
1499
1866
|
self.notify(
|
|
1500
|
-
f"Showed [$
|
|
1867
|
+
f"Showed [$success]{hidden_row_count}[/] hidden row(s) and/or [$accent]{hidden_col_count}[/] column(s)",
|
|
1501
1868
|
title="Show",
|
|
1502
1869
|
)
|
|
1503
1870
|
|
|
@@ -1520,6 +1887,7 @@ class DataFrameTable(DataTable):
|
|
|
1520
1887
|
|
|
1521
1888
|
# Add to history
|
|
1522
1889
|
self.add_history(f"Sorted on column [$success]{col_name}[/]", dirty=True)
|
|
1890
|
+
|
|
1523
1891
|
if old_desc is None:
|
|
1524
1892
|
# Add new column to sort
|
|
1525
1893
|
self.sorted_columns[col_name] = descending
|
|
@@ -1531,18 +1899,27 @@ class DataFrameTable(DataTable):
|
|
|
1531
1899
|
del self.sorted_columns[col_name]
|
|
1532
1900
|
self.sorted_columns[col_name] = descending
|
|
1533
1901
|
|
|
1902
|
+
lf = self.df.lazy().with_row_index(RIDX)
|
|
1903
|
+
|
|
1534
1904
|
# Apply multi-column sort
|
|
1535
1905
|
if sort_cols := list(self.sorted_columns.keys()):
|
|
1536
1906
|
descending_flags = list(self.sorted_columns.values())
|
|
1537
|
-
|
|
1538
|
-
|
|
1539
|
-
|
|
1540
|
-
df_sorted = self.df.with_row_index(RIDX)
|
|
1907
|
+
lf = lf.sort(sort_cols, descending=descending_flags, nulls_last=True)
|
|
1908
|
+
|
|
1909
|
+
df_sorted = lf.collect()
|
|
1541
1910
|
|
|
1542
|
-
# Updated
|
|
1911
|
+
# Updated visible rows, selected rows, and cell matches to match new order
|
|
1543
1912
|
old_row_indices = df_sorted[RIDX].to_list()
|
|
1544
|
-
|
|
1545
|
-
|
|
1913
|
+
if self.has_hidden_rows:
|
|
1914
|
+
self.visible_rows = [self.visible_rows[old_ridx] for old_ridx in old_row_indices]
|
|
1915
|
+
if any(self.selected_rows):
|
|
1916
|
+
self.selected_rows = [self.selected_rows[old_ridx] for old_ridx in old_row_indices]
|
|
1917
|
+
if any(self.matches):
|
|
1918
|
+
self.matches = {
|
|
1919
|
+
new_ridx: self.matches[old_ridx]
|
|
1920
|
+
for new_ridx, old_ridx in enumerate(old_row_indices)
|
|
1921
|
+
if old_ridx in self.matches
|
|
1922
|
+
}
|
|
1546
1923
|
|
|
1547
1924
|
# Update the dataframe
|
|
1548
1925
|
self.df = df_sorted.drop(RIDX)
|
|
@@ -1605,10 +1982,15 @@ class DataFrameTable(DataTable):
|
|
|
1605
1982
|
col_key = col_name
|
|
1606
1983
|
self.update_cell(row_key, col_key, formatted_value, update_width=True)
|
|
1607
1984
|
|
|
1608
|
-
# self.notify(f"Cell updated to [$success]{cell_value}[/]", title="Edit")
|
|
1985
|
+
# self.notify(f"Cell updated to [$success]{cell_value}[/]", title="Edit Cell")
|
|
1609
1986
|
except Exception as e:
|
|
1610
|
-
self.notify(
|
|
1611
|
-
|
|
1987
|
+
self.notify(
|
|
1988
|
+
f"Error updating cell ([$error]{ridx}[/], [$accent]{col_name}[/])",
|
|
1989
|
+
title="Edit Cell",
|
|
1990
|
+
severity="error",
|
|
1991
|
+
timeout=10,
|
|
1992
|
+
)
|
|
1993
|
+
self.log(f"Error updating cell ({ridx}, {col_name}): {str(e)}")
|
|
1612
1994
|
|
|
1613
1995
|
def do_edit_column(self) -> None:
|
|
1614
1996
|
"""Open modal to edit the entire column with an expression."""
|
|
@@ -1637,7 +2019,9 @@ class DataFrameTable(DataTable):
|
|
|
1637
2019
|
try:
|
|
1638
2020
|
expr = validate_expr(term, self.df.columns, cidx)
|
|
1639
2021
|
except Exception as e:
|
|
1640
|
-
self.notify(
|
|
2022
|
+
self.notify(
|
|
2023
|
+
f"Error validating expression [$error]{term}[/]", title="Edit Column", severity="error", timeout=10
|
|
2024
|
+
)
|
|
1641
2025
|
self.log(f"Error validating expression `{term}`: {str(e)}")
|
|
1642
2026
|
return
|
|
1643
2027
|
|
|
@@ -1649,14 +2033,14 @@ class DataFrameTable(DataTable):
|
|
|
1649
2033
|
expr = pl.lit(value)
|
|
1650
2034
|
except Exception:
|
|
1651
2035
|
self.notify(
|
|
1652
|
-
f"Error converting [$
|
|
2036
|
+
f"Error converting [$error]{term}[/] to [$accent]{dtype}[/]. Cast to string.",
|
|
1653
2037
|
title="Edit",
|
|
1654
2038
|
severity="error",
|
|
1655
2039
|
)
|
|
1656
2040
|
expr = pl.lit(str(term))
|
|
1657
2041
|
|
|
1658
2042
|
# Add to history
|
|
1659
|
-
self.add_history(f"Edited column [$
|
|
2043
|
+
self.add_history(f"Edited column [$success]{col_name}[/] with expression", dirty=True)
|
|
1660
2044
|
|
|
1661
2045
|
try:
|
|
1662
2046
|
# Apply the expression to the column
|
|
@@ -1666,6 +2050,7 @@ class DataFrameTable(DataTable):
|
|
|
1666
2050
|
f"Error applying expression: [$error]{term}[/] to column [$accent]{col_name}[/]",
|
|
1667
2051
|
title="Edit",
|
|
1668
2052
|
severity="error",
|
|
2053
|
+
timeout=10,
|
|
1669
2054
|
)
|
|
1670
2055
|
self.log(f"Error applying expression `{term}` to column `{col_name}`: {str(e)}")
|
|
1671
2056
|
return
|
|
@@ -1673,12 +2058,12 @@ class DataFrameTable(DataTable):
|
|
|
1673
2058
|
# Recreate table for display
|
|
1674
2059
|
self.setup_table()
|
|
1675
2060
|
|
|
1676
|
-
# self.notify(f"Column [$accent]{col_name}[/] updated with [$success]{expr}[/]", title="Edit")
|
|
2061
|
+
# self.notify(f"Column [$accent]{col_name}[/] updated with [$success]{expr}[/]", title="Edit Column")
|
|
1677
2062
|
|
|
1678
|
-
def do_rename_column(self) -> None:
|
|
2063
|
+
def do_rename_column(self, col_idx: int | None) -> None:
|
|
1679
2064
|
"""Open modal to rename the selected column."""
|
|
1680
|
-
|
|
1681
|
-
|
|
2065
|
+
col_idx = self.cursor_column if col_idx is None else col_idx
|
|
2066
|
+
col_name = self.get_col_key(col_idx).value
|
|
1682
2067
|
|
|
1683
2068
|
# Push the rename column modal screen
|
|
1684
2069
|
self.app.push_screen(
|
|
@@ -1700,7 +2085,7 @@ class DataFrameTable(DataTable):
|
|
|
1700
2085
|
return
|
|
1701
2086
|
|
|
1702
2087
|
# Add to history
|
|
1703
|
-
self.add_history(f"Renamed column [$
|
|
2088
|
+
self.add_history(f"Renamed column [$success]{col_name}[/] to [$accent]{new_name}[/]", dirty=True)
|
|
1704
2089
|
|
|
1705
2090
|
# Rename the column in the dataframe
|
|
1706
2091
|
self.df = self.df.rename({col_name: new_name})
|
|
@@ -1748,10 +2133,15 @@ class DataFrameTable(DataTable):
|
|
|
1748
2133
|
|
|
1749
2134
|
self.update_cell(row_key, col_key, formatted_value)
|
|
1750
2135
|
|
|
1751
|
-
# self.notify(f"Cell cleared to [$success]{NULL_DISPLAY}[/]", title="Clear")
|
|
2136
|
+
# self.notify(f"Cell cleared to [$success]{NULL_DISPLAY}[/]", title="Clear Cell")
|
|
1752
2137
|
except Exception as e:
|
|
1753
|
-
self.notify(
|
|
1754
|
-
|
|
2138
|
+
self.notify(
|
|
2139
|
+
f"Error clearing cell ([$error]{ridx}[/], [$accent]{col_name}[/])",
|
|
2140
|
+
title="Clear Cell",
|
|
2141
|
+
severity="error",
|
|
2142
|
+
timeout=10,
|
|
2143
|
+
)
|
|
2144
|
+
self.log(f"Error clearing cell ({ridx}, {col_name}): {str(e)}")
|
|
1755
2145
|
raise e
|
|
1756
2146
|
|
|
1757
2147
|
def do_add_column(self, col_name: str = None, col_value: pl.Expr = None) -> None:
|
|
@@ -1770,7 +2160,7 @@ class DataFrameTable(DataTable):
|
|
|
1770
2160
|
new_name = col_name
|
|
1771
2161
|
|
|
1772
2162
|
# Add to history
|
|
1773
|
-
self.add_history(f"Added column [$success]{new_name}[/] after column {cidx + 1}", dirty=True)
|
|
2163
|
+
self.add_history(f"Added column [$success]{new_name}[/] after column [$accent]{cidx + 1}[/]", dirty=True)
|
|
1774
2164
|
|
|
1775
2165
|
try:
|
|
1776
2166
|
# Create an empty column (all None values)
|
|
@@ -1796,8 +2186,8 @@ class DataFrameTable(DataTable):
|
|
|
1796
2186
|
|
|
1797
2187
|
# self.notify(f"Added column [$success]{new_name}[/]", title="Add Column")
|
|
1798
2188
|
except Exception as e:
|
|
1799
|
-
self.notify("Error adding column", title="Add Column", severity="error")
|
|
1800
|
-
self.log(f"Error adding column
|
|
2189
|
+
self.notify(f"Error adding column [$error]{new_name}[/]", title="Add Column", severity="error", timeout=10)
|
|
2190
|
+
self.log(f"Error adding column `{new_name}`: {str(e)}")
|
|
1801
2191
|
raise e
|
|
1802
2192
|
|
|
1803
2193
|
def do_add_column_expr(self) -> None:
|
|
@@ -1816,7 +2206,7 @@ class DataFrameTable(DataTable):
|
|
|
1816
2206
|
cidx, new_col_name, expr = result
|
|
1817
2207
|
|
|
1818
2208
|
# Add to history
|
|
1819
|
-
self.add_history(f"Added column [$success]{new_col_name}[/] with expression {expr}.", dirty=True)
|
|
2209
|
+
self.add_history(f"Added column [$success]{new_col_name}[/] with expression [$accent]{expr}[/].", dirty=True)
|
|
1820
2210
|
|
|
1821
2211
|
try:
|
|
1822
2212
|
# Create the column
|
|
@@ -1839,7 +2229,9 @@ class DataFrameTable(DataTable):
|
|
|
1839
2229
|
|
|
1840
2230
|
# self.notify(f"Added column [$success]{col_name}[/]", title="Add Column")
|
|
1841
2231
|
except Exception as e:
|
|
1842
|
-
self.notify(
|
|
2232
|
+
self.notify(
|
|
2233
|
+
f"Error adding column [$error]{new_col_name}[/]", title="Add Column", severity="error", timeout=10
|
|
2234
|
+
)
|
|
1843
2235
|
self.log(f"Error adding column `{new_col_name}`: {str(e)}")
|
|
1844
2236
|
|
|
1845
2237
|
def do_add_link_column(self) -> None:
|
|
@@ -1851,10 +2243,10 @@ class DataFrameTable(DataTable):
|
|
|
1851
2243
|
def add_link_column(self, result: tuple[str, str] | None) -> None:
|
|
1852
2244
|
"""Handle result from AddLinkScreen.
|
|
1853
2245
|
|
|
1854
|
-
Creates a new link column in the dataframe
|
|
1855
|
-
|
|
2246
|
+
Creates a new link column in the dataframe based on a user-provided template.
|
|
2247
|
+
Supports multiple placeholder types:
|
|
1856
2248
|
- `$_` - Current column (based on cursor position)
|
|
1857
|
-
- `$1`, `$2`, etc. - Column by 1-based
|
|
2249
|
+
- `$1`, `$2`, etc. - Column by index (1-based)
|
|
1858
2250
|
- `$name` - Column by name (e.g., `$id`, `$product_name`)
|
|
1859
2251
|
|
|
1860
2252
|
The template is evaluated for each row using Polars expressions with vectorized
|
|
@@ -1868,7 +2260,7 @@ class DataFrameTable(DataTable):
|
|
|
1868
2260
|
cidx, new_col_name, link_template = result
|
|
1869
2261
|
|
|
1870
2262
|
self.add_history(
|
|
1871
|
-
f"Added link column [$
|
|
2263
|
+
f"Added link column [$success]{new_col_name}[/] with template [$accent]{link_template}[/].", dirty=True
|
|
1872
2264
|
)
|
|
1873
2265
|
|
|
1874
2266
|
try:
|
|
@@ -1904,7 +2296,9 @@ class DataFrameTable(DataTable):
|
|
|
1904
2296
|
self.notify(f"Added link column [$success]{new_col_name}[/]. Use Ctrl/Cmd click to open.", title="Add Link")
|
|
1905
2297
|
|
|
1906
2298
|
except Exception as e:
|
|
1907
|
-
self.notify(
|
|
2299
|
+
self.notify(
|
|
2300
|
+
f"Error adding link column [$error]{new_col_name}[/]", title="Add Link", severity="error", timeout=10
|
|
2301
|
+
)
|
|
1908
2302
|
self.log(f"Error adding link column: {str(e)}")
|
|
1909
2303
|
|
|
1910
2304
|
def do_delete_column(self, more: str = None) -> None:
|
|
@@ -1920,7 +2314,7 @@ class DataFrameTable(DataTable):
|
|
|
1920
2314
|
# Remove all columns before the current column
|
|
1921
2315
|
if more == "before":
|
|
1922
2316
|
for i in range(col_idx + 1):
|
|
1923
|
-
col_key = self.
|
|
2317
|
+
col_key = self.get_col_key(i)
|
|
1924
2318
|
col_names_to_remove.append(col_key.value)
|
|
1925
2319
|
col_keys_to_remove.append(col_key)
|
|
1926
2320
|
|
|
@@ -1929,7 +2323,7 @@ class DataFrameTable(DataTable):
|
|
|
1929
2323
|
# Remove all columns after the current column
|
|
1930
2324
|
elif more == "after":
|
|
1931
2325
|
for i in range(col_idx, len(self.columns)):
|
|
1932
|
-
col_key = self.
|
|
2326
|
+
col_key = self.get_col_key(i)
|
|
1933
2327
|
col_names_to_remove.append(col_key.value)
|
|
1934
2328
|
col_keys_to_remove.append(col_key)
|
|
1935
2329
|
|
|
@@ -2009,7 +2403,7 @@ class DataFrameTable(DataTable):
|
|
|
2009
2403
|
# Move cursor to the new duplicated column
|
|
2010
2404
|
self.move_cursor(column=col_idx + 1)
|
|
2011
2405
|
|
|
2012
|
-
# self.notify(f"Duplicated column [$
|
|
2406
|
+
# self.notify(f"Duplicated column [$success]{col_name}[/] as [$accent]{new_col_name}[/]", title="Duplicate")
|
|
2013
2407
|
|
|
2014
2408
|
def do_delete_row(self, more: str = None) -> None:
|
|
2015
2409
|
"""Delete rows from the table and dataframe.
|
|
@@ -2056,7 +2450,7 @@ class DataFrameTable(DataTable):
|
|
|
2056
2450
|
try:
|
|
2057
2451
|
df = self.df.with_row_index(RIDX).filter(predicates)
|
|
2058
2452
|
except Exception as e:
|
|
2059
|
-
self.notify(f"Error deleting row(s): {e}", title="Delete", severity="error")
|
|
2453
|
+
self.notify(f"Error deleting row(s): {e}", title="Delete", severity="error", timeout=10)
|
|
2060
2454
|
self.histories.pop() # Remove last history entry
|
|
2061
2455
|
return
|
|
2062
2456
|
|
|
@@ -2075,7 +2469,7 @@ class DataFrameTable(DataTable):
|
|
|
2075
2469
|
|
|
2076
2470
|
deleted_count = old_count - len(self.df)
|
|
2077
2471
|
if deleted_count > 0:
|
|
2078
|
-
self.notify(f"Deleted [$
|
|
2472
|
+
self.notify(f"Deleted [$success]{deleted_count}[/] row(s)", title="Delete")
|
|
2079
2473
|
|
|
2080
2474
|
def do_duplicate_row(self) -> None:
|
|
2081
2475
|
"""Duplicate the currently selected row, inserting it right after the current row."""
|
|
@@ -2147,7 +2541,8 @@ class DataFrameTable(DataTable):
|
|
|
2147
2541
|
|
|
2148
2542
|
# Add to history
|
|
2149
2543
|
self.add_history(
|
|
2150
|
-
f"Moved column [$success]{col_name}[/] {direction} (swapped with [$success]{swap_name}[/])",
|
|
2544
|
+
f"Moved column [$success]{col_name}[/] [$accent]{direction}[/] (swapped with [$success]{swap_name}[/])",
|
|
2545
|
+
dirty=True,
|
|
2151
2546
|
)
|
|
2152
2547
|
|
|
2153
2548
|
# Swap columns in the table's internal column locations
|
|
@@ -2202,7 +2597,7 @@ class DataFrameTable(DataTable):
|
|
|
2202
2597
|
|
|
2203
2598
|
# Add to history
|
|
2204
2599
|
self.add_history(
|
|
2205
|
-
f"Moved row [$success]{row_key.value}[/] {direction} (swapped with row [$success]{swap_key.value}[/])",
|
|
2600
|
+
f"Moved row [$success]{row_key.value}[/] [$accent]{direction}[/] (swapped with row [$success]{swap_key.value}[/])",
|
|
2206
2601
|
dirty=True,
|
|
2207
2602
|
)
|
|
2208
2603
|
|
|
@@ -2213,8 +2608,8 @@ class DataFrameTable(DataTable):
|
|
|
2213
2608
|
self._row_locations[row_key],
|
|
2214
2609
|
self._row_locations[swap_key],
|
|
2215
2610
|
) = (
|
|
2216
|
-
self.
|
|
2217
|
-
self.
|
|
2611
|
+
self.get_row_idx(swap_key),
|
|
2612
|
+
self.get_row_idx(row_key),
|
|
2218
2613
|
)
|
|
2219
2614
|
|
|
2220
2615
|
self._update_count += 1
|
|
@@ -2254,12 +2649,12 @@ class DataFrameTable(DataTable):
|
|
|
2254
2649
|
try:
|
|
2255
2650
|
target_dtype = eval(dtype)
|
|
2256
2651
|
except Exception:
|
|
2257
|
-
self.notify(f"Invalid target data type: [$error]{dtype}[/]", title="Cast", severity="error")
|
|
2652
|
+
self.notify(f"Invalid target data type: [$error]{dtype}[/]", title="Cast", severity="error", timeout=10)
|
|
2258
2653
|
return
|
|
2259
2654
|
|
|
2260
2655
|
if current_dtype == target_dtype:
|
|
2261
2656
|
self.notify(
|
|
2262
|
-
f"Column [$
|
|
2657
|
+
f"Column [$warning]{col_name}[/] is already of type [$accent]{target_dtype}[/]",
|
|
2263
2658
|
title="Cast",
|
|
2264
2659
|
severity="warning",
|
|
2265
2660
|
)
|
|
@@ -2267,7 +2662,7 @@ class DataFrameTable(DataTable):
|
|
|
2267
2662
|
|
|
2268
2663
|
# Add to history
|
|
2269
2664
|
self.add_history(
|
|
2270
|
-
f"Cast column [$
|
|
2665
|
+
f"Cast column [$success]{col_name}[/] from [$accent]{current_dtype}[/] to [$success]{target_dtype}[/]",
|
|
2271
2666
|
dirty=True,
|
|
2272
2667
|
)
|
|
2273
2668
|
|
|
@@ -2278,27 +2673,33 @@ class DataFrameTable(DataTable):
|
|
|
2278
2673
|
# Recreate table for display
|
|
2279
2674
|
self.setup_table()
|
|
2280
2675
|
|
|
2281
|
-
self.notify(f"Cast column [$
|
|
2676
|
+
self.notify(f"Cast column [$success]{col_name}[/] to [$accent]{target_dtype}[/]", title="Cast")
|
|
2282
2677
|
except Exception as e:
|
|
2283
2678
|
self.notify(
|
|
2284
|
-
f"Error casting column [$
|
|
2679
|
+
f"Error casting column [$error]{col_name}[/] to [$accent]{target_dtype}[/]",
|
|
2285
2680
|
title="Cast",
|
|
2286
2681
|
severity="error",
|
|
2682
|
+
timeout=10,
|
|
2287
2683
|
)
|
|
2288
2684
|
self.log(f"Error casting column `{col_name}`: {str(e)}")
|
|
2289
2685
|
|
|
2290
|
-
#
|
|
2291
|
-
def
|
|
2686
|
+
# Row selection
|
|
2687
|
+
def do_select_row_cursor_value(self) -> None:
|
|
2292
2688
|
"""Search with cursor value in current column."""
|
|
2293
2689
|
cidx = self.cursor_col_idx
|
|
2690
|
+
col_name = self.cursor_col_name
|
|
2294
2691
|
|
|
2295
2692
|
# Get the value of the currently selected cell
|
|
2296
2693
|
term = NULL if self.cursor_value is None else str(self.cursor_value)
|
|
2694
|
+
if self.cursor_value is None:
|
|
2695
|
+
term = pl.col(col_name).is_null()
|
|
2696
|
+
else:
|
|
2697
|
+
term = pl.col(col_name) == self.cursor_value
|
|
2297
2698
|
|
|
2298
|
-
self.
|
|
2699
|
+
self.select_row((term, cidx, False, True))
|
|
2299
2700
|
|
|
2300
|
-
def
|
|
2301
|
-
"""
|
|
2701
|
+
def do_select_row_expr(self) -> None:
|
|
2702
|
+
"""Select rows by expression."""
|
|
2302
2703
|
cidx = self.cursor_col_idx
|
|
2303
2704
|
|
|
2304
2705
|
# Use current cell value as default search term
|
|
@@ -2307,26 +2708,33 @@ class DataFrameTable(DataTable):
|
|
|
2307
2708
|
# Push the search modal screen
|
|
2308
2709
|
self.app.push_screen(
|
|
2309
2710
|
SearchScreen("Search", term, self.df, cidx),
|
|
2310
|
-
callback=self.
|
|
2711
|
+
callback=self.select_row,
|
|
2311
2712
|
)
|
|
2312
2713
|
|
|
2313
|
-
def
|
|
2314
|
-
"""
|
|
2714
|
+
def select_row(self, result) -> None:
|
|
2715
|
+
"""Select rows by value or expression."""
|
|
2315
2716
|
if result is None:
|
|
2316
2717
|
return
|
|
2317
2718
|
|
|
2318
2719
|
term, cidx, match_nocase, match_whole = result
|
|
2319
2720
|
col_name = self.df.columns[cidx]
|
|
2320
2721
|
|
|
2321
|
-
|
|
2722
|
+
# Already a Polars expression
|
|
2723
|
+
if isinstance(term, pl.Expr):
|
|
2724
|
+
expr = term
|
|
2725
|
+
|
|
2726
|
+
# Null case
|
|
2727
|
+
elif term == NULL:
|
|
2322
2728
|
expr = pl.col(col_name).is_null()
|
|
2323
2729
|
|
|
2324
|
-
#
|
|
2730
|
+
# Expression in string form
|
|
2325
2731
|
elif tentative_expr(term):
|
|
2326
2732
|
try:
|
|
2327
2733
|
expr = validate_expr(term, self.df.columns, cidx)
|
|
2328
2734
|
except Exception as e:
|
|
2329
|
-
self.notify(
|
|
2735
|
+
self.notify(
|
|
2736
|
+
f"Error validating expression [$error]{term}[/]", title="Search", severity="error", timeout=10
|
|
2737
|
+
)
|
|
2330
2738
|
self.log(f"Error validating expression `{term}`: {str(e)}")
|
|
2331
2739
|
return
|
|
2332
2740
|
|
|
@@ -2350,47 +2758,121 @@ class DataFrameTable(DataTable):
|
|
|
2350
2758
|
term = f"(?i){term}"
|
|
2351
2759
|
expr = pl.col(col_name).cast(pl.String).str.contains(term)
|
|
2352
2760
|
self.notify(
|
|
2353
|
-
f"Error converting [$
|
|
2761
|
+
f"Error converting [$error]{term}[/] to [$accent]{dtype}[/]. Cast to string.",
|
|
2354
2762
|
title="Search",
|
|
2355
2763
|
severity="warning",
|
|
2356
2764
|
)
|
|
2357
2765
|
|
|
2358
2766
|
# Lazyframe for filtering
|
|
2359
2767
|
lf = self.df.lazy().with_row_index(RIDX)
|
|
2360
|
-
if
|
|
2768
|
+
if self.has_hidden_rows:
|
|
2361
2769
|
lf = lf.filter(self.visible_rows)
|
|
2362
2770
|
|
|
2363
2771
|
# Apply filter to get matched row indices
|
|
2364
2772
|
try:
|
|
2365
2773
|
matches = set(lf.filter(expr).select(RIDX).collect().to_series().to_list())
|
|
2366
2774
|
except Exception as e:
|
|
2367
|
-
self.notify(
|
|
2775
|
+
self.notify(
|
|
2776
|
+
f"Error applying search filter `[$error]{term}[/]`", title="Search", severity="error", timeout=10
|
|
2777
|
+
)
|
|
2368
2778
|
self.log(f"Error applying search filter `{term}`: {str(e)}")
|
|
2369
2779
|
return
|
|
2370
2780
|
|
|
2371
2781
|
match_count = len(matches)
|
|
2372
2782
|
if match_count == 0:
|
|
2373
2783
|
self.notify(
|
|
2374
|
-
f"No matches found for [$
|
|
2784
|
+
f"No matches found for `[$warning]{term}[/]`. Try [$accent](?i)abc[/] for case-insensitive search.",
|
|
2375
2785
|
title="Search",
|
|
2376
2786
|
severity="warning",
|
|
2377
2787
|
)
|
|
2378
2788
|
return
|
|
2379
2789
|
|
|
2790
|
+
message = f"Found [$success]{match_count}[/] matching row(s) for `[$accent]{term}[/]`"
|
|
2791
|
+
|
|
2380
2792
|
# Add to history
|
|
2381
|
-
self.add_history(
|
|
2793
|
+
self.add_history(message)
|
|
2382
2794
|
|
|
2383
2795
|
# Update selected rows to include new matches
|
|
2384
2796
|
for m in matches:
|
|
2385
2797
|
self.selected_rows[m] = True
|
|
2386
2798
|
|
|
2387
2799
|
# Show notification immediately, then start highlighting
|
|
2388
|
-
self.notify(
|
|
2800
|
+
self.notify(message, title="Select Row")
|
|
2389
2801
|
|
|
2390
2802
|
# Recreate table for display
|
|
2391
2803
|
self.setup_table()
|
|
2392
2804
|
|
|
2393
|
-
|
|
2805
|
+
def do_toggle_selections(self) -> None:
|
|
2806
|
+
"""Toggle selected rows highlighting on/off."""
|
|
2807
|
+
# Add to history
|
|
2808
|
+
self.add_history("Toggled row selection")
|
|
2809
|
+
|
|
2810
|
+
if self.has_hidden_rows:
|
|
2811
|
+
# Some rows are hidden - invert only selected visible rows and clear selections for hidden rows
|
|
2812
|
+
for i in range(len(self.selected_rows)):
|
|
2813
|
+
if self.visible_rows[i]:
|
|
2814
|
+
self.selected_rows[i] = not self.selected_rows[i]
|
|
2815
|
+
else:
|
|
2816
|
+
self.selected_rows[i] = False
|
|
2817
|
+
else:
|
|
2818
|
+
# Invert all selected rows
|
|
2819
|
+
self.selected_rows = [not selected for selected in self.selected_rows]
|
|
2820
|
+
|
|
2821
|
+
# Check if we're highlighting or un-highlighting
|
|
2822
|
+
if new_selected_count := self.selected_rows.count(True):
|
|
2823
|
+
self.notify(f"Toggled selection for [$success]{new_selected_count}[/] rows", title="Toggle")
|
|
2824
|
+
|
|
2825
|
+
# Recreate table for display
|
|
2826
|
+
self.setup_table()
|
|
2827
|
+
|
|
2828
|
+
def do_toggle_row_selection(self) -> None:
|
|
2829
|
+
"""Select/deselect current row."""
|
|
2830
|
+
# Add to history
|
|
2831
|
+
self.add_history("Toggled row selection")
|
|
2832
|
+
|
|
2833
|
+
ridx = self.cursor_row_idx
|
|
2834
|
+
self.selected_rows[ridx] = not self.selected_rows[ridx]
|
|
2835
|
+
|
|
2836
|
+
row_key = str(ridx)
|
|
2837
|
+
match_cols = self.matches.get(ridx, set())
|
|
2838
|
+
for col_idx, col in enumerate(self.ordered_columns):
|
|
2839
|
+
col_key = col.key
|
|
2840
|
+
cell_text: Text = self.get_cell(row_key, col_key)
|
|
2841
|
+
|
|
2842
|
+
if self.selected_rows[ridx] or (col_idx in match_cols):
|
|
2843
|
+
cell_text.style = HIGHLIGHT_COLOR
|
|
2844
|
+
else:
|
|
2845
|
+
# Reset to default style based on dtype
|
|
2846
|
+
dtype = self.df.dtypes[col_idx]
|
|
2847
|
+
dc = DtypeConfig(dtype)
|
|
2848
|
+
cell_text.style = dc.style
|
|
2849
|
+
|
|
2850
|
+
self.update_cell(row_key, col_key, cell_text)
|
|
2851
|
+
|
|
2852
|
+
def do_clear_selections_and_matches(self) -> None:
|
|
2853
|
+
"""Clear all selected rows and matches without removing them from the dataframe."""
|
|
2854
|
+
# Check if any selected rows or matches
|
|
2855
|
+
if not any(self.selected_rows) and not self.matches:
|
|
2856
|
+
self.notify("No selections to clear", title="Clear", severity="warning")
|
|
2857
|
+
return
|
|
2858
|
+
|
|
2859
|
+
row_count = sum(
|
|
2860
|
+
1 if (selected or idx in self.matches) else 0 for idx, selected in enumerate(self.selected_rows)
|
|
2861
|
+
)
|
|
2862
|
+
|
|
2863
|
+
# Add to history
|
|
2864
|
+
self.add_history("Cleared all selected rows")
|
|
2865
|
+
|
|
2866
|
+
# Clear all selections
|
|
2867
|
+
self.selected_rows = [False] * len(self.df)
|
|
2868
|
+
self.matches = defaultdict(set)
|
|
2869
|
+
|
|
2870
|
+
# Recreate table for display
|
|
2871
|
+
self.setup_table()
|
|
2872
|
+
|
|
2873
|
+
self.notify(f"Cleared selections for [$success]{row_count}[/] rows", title="Clear")
|
|
2874
|
+
|
|
2875
|
+
# Find & Replace
|
|
2394
2876
|
def find_matches(
|
|
2395
2877
|
self, term: str, cidx: int | None = None, match_nocase: bool = False, match_whole: bool = False
|
|
2396
2878
|
) -> dict[int, set[int]]:
|
|
@@ -2399,6 +2881,8 @@ class DataFrameTable(DataTable):
|
|
|
2399
2881
|
Args:
|
|
2400
2882
|
term: The search term (can be NULL, expression, or plain text)
|
|
2401
2883
|
cidx: Column index for column-specific search. If None, searches all columns.
|
|
2884
|
+
match_nocase: Whether to perform case-insensitive matching (for string terms)
|
|
2885
|
+
match_whole: Whether to match the whole cell content (for string terms)
|
|
2402
2886
|
|
|
2403
2887
|
Returns:
|
|
2404
2888
|
Dictionary mapping row indices to sets of column indices containing matches.
|
|
@@ -2412,7 +2896,7 @@ class DataFrameTable(DataTable):
|
|
|
2412
2896
|
|
|
2413
2897
|
# Lazyframe for filtering
|
|
2414
2898
|
lf = self.df.lazy().with_row_index(RIDX)
|
|
2415
|
-
if
|
|
2899
|
+
if self.has_hidden_rows:
|
|
2416
2900
|
lf = lf.filter(self.visible_rows)
|
|
2417
2901
|
|
|
2418
2902
|
# Determine which columns to search: single column or all columns
|
|
@@ -2430,7 +2914,9 @@ class DataFrameTable(DataTable):
|
|
|
2430
2914
|
try:
|
|
2431
2915
|
expr = validate_expr(term, self.df.columns, col_idx)
|
|
2432
2916
|
except Exception as e:
|
|
2433
|
-
self.notify(
|
|
2917
|
+
self.notify(
|
|
2918
|
+
f"Error validating expression [$error]{term}[/]", title="Find", severity="error", timeout=10
|
|
2919
|
+
)
|
|
2434
2920
|
self.log(f"Error validating expression `{term}`: {str(e)}")
|
|
2435
2921
|
return matches
|
|
2436
2922
|
else:
|
|
@@ -2444,7 +2930,7 @@ class DataFrameTable(DataTable):
|
|
|
2444
2930
|
try:
|
|
2445
2931
|
matched_ridxs = lf.filter(expr).select(RIDX).collect().to_series().to_list()
|
|
2446
2932
|
except Exception as e:
|
|
2447
|
-
self.notify(f"Error applying filter: {expr}", title="Find", severity="error")
|
|
2933
|
+
self.notify(f"Error applying filter: [$error]{expr}[/]", title="Find", severity="error", timeout=10)
|
|
2448
2934
|
self.log(f"Error applying filter: {str(e)}")
|
|
2449
2935
|
return matches
|
|
2450
2936
|
|
|
@@ -2495,27 +2981,27 @@ class DataFrameTable(DataTable):
|
|
|
2495
2981
|
try:
|
|
2496
2982
|
matches = self.find_matches(term, cidx, match_nocase, match_whole)
|
|
2497
2983
|
except Exception as e:
|
|
2498
|
-
self.notify(f"Error finding matches for [$error]{term}[/]", title="Find", severity="error")
|
|
2984
|
+
self.notify(f"Error finding matches for `[$error]{term}[/]`", title="Find", severity="error", timeout=10)
|
|
2499
2985
|
self.log(f"Error finding matches for `{term}`: {str(e)}")
|
|
2500
2986
|
return
|
|
2501
2987
|
|
|
2502
2988
|
if not matches:
|
|
2503
2989
|
self.notify(
|
|
2504
|
-
f"No matches found for [$
|
|
2990
|
+
f"No matches found for `[$warning]{term}[/]` in current column. Try [$accent](?i)abc[/] for case-insensitive search.",
|
|
2505
2991
|
title="Find",
|
|
2506
2992
|
severity="warning",
|
|
2507
2993
|
)
|
|
2508
2994
|
return
|
|
2509
2995
|
|
|
2510
2996
|
# Add to history
|
|
2511
|
-
self.add_history(f"Found [$
|
|
2997
|
+
self.add_history(f"Found `[$success]{term}[/]` in column [$accent]{col_name}[/]")
|
|
2512
2998
|
|
|
2513
2999
|
# Add to matches and count total
|
|
2514
3000
|
match_count = sum(len(col_idxs) for col_idxs in matches.values())
|
|
2515
3001
|
for ridx, col_idxs in matches.items():
|
|
2516
3002
|
self.matches[ridx].update(col_idxs)
|
|
2517
3003
|
|
|
2518
|
-
self.notify(f"Found [$
|
|
3004
|
+
self.notify(f"Found [$success]{match_count}[/] matches for `[$accent]{term}[/]`", title="Find")
|
|
2519
3005
|
|
|
2520
3006
|
# Recreate table for display
|
|
2521
3007
|
self.setup_table()
|
|
@@ -2529,20 +3015,20 @@ class DataFrameTable(DataTable):
|
|
|
2529
3015
|
try:
|
|
2530
3016
|
matches = self.find_matches(term, cidx=None, match_nocase=match_nocase, match_whole=match_whole)
|
|
2531
3017
|
except Exception as e:
|
|
2532
|
-
self.notify(f"Error finding matches for [$error]{term}[/]", title="Find", severity="error")
|
|
3018
|
+
self.notify(f"Error finding matches for `[$error]{term}[/]`", title="Find", severity="error", timeout=10)
|
|
2533
3019
|
self.log(f"Error finding matches for `{term}`: {str(e)}")
|
|
2534
3020
|
return
|
|
2535
3021
|
|
|
2536
3022
|
if not matches:
|
|
2537
3023
|
self.notify(
|
|
2538
|
-
f"No matches found for [$
|
|
3024
|
+
f"No matches found for `[$warning]{term}[/]` in any column. Try [$accent](?i)abc[/] for case-insensitive search.",
|
|
2539
3025
|
title="Global Find",
|
|
2540
3026
|
severity="warning",
|
|
2541
3027
|
)
|
|
2542
3028
|
return
|
|
2543
3029
|
|
|
2544
3030
|
# Add to history
|
|
2545
|
-
self.add_history(f"Found [$success]{term}[/] across all columns")
|
|
3031
|
+
self.add_history(f"Found `[$success]{term}[/]` across all columns")
|
|
2546
3032
|
|
|
2547
3033
|
# Add to matches and count total
|
|
2548
3034
|
match_count = sum(len(col_idxs) for col_idxs in matches.values())
|
|
@@ -2550,7 +3036,8 @@ class DataFrameTable(DataTable):
|
|
|
2550
3036
|
self.matches[ridx].update(col_idxs)
|
|
2551
3037
|
|
|
2552
3038
|
self.notify(
|
|
2553
|
-
f"Found [$
|
|
3039
|
+
f"Found [$success]{match_count}[/] matches for `[$accent]{term}[/]` across all columns",
|
|
3040
|
+
title="Global Find",
|
|
2554
3041
|
)
|
|
2555
3042
|
|
|
2556
3043
|
# Recreate table for display
|
|
@@ -2650,7 +3137,6 @@ class DataFrameTable(DataTable):
|
|
|
2650
3137
|
last_ridx = selected_row_indices[-1]
|
|
2651
3138
|
self.move_cursor_to(last_ridx, self.cursor_col_idx)
|
|
2652
3139
|
|
|
2653
|
-
# Replace
|
|
2654
3140
|
def do_replace(self) -> None:
|
|
2655
3141
|
"""Open replace screen for current column."""
|
|
2656
3142
|
# Push the replace modal screen
|
|
@@ -2700,7 +3186,7 @@ class DataFrameTable(DataTable):
|
|
|
2700
3186
|
|
|
2701
3187
|
# Add to history
|
|
2702
3188
|
self.add_history(
|
|
2703
|
-
f"Replaced [$
|
|
3189
|
+
f"Replaced [$success]{term_find}[/] with [$accent]{term_replace}[/] in column [$success]{col_name}[/]"
|
|
2704
3190
|
)
|
|
2705
3191
|
|
|
2706
3192
|
# Update matches
|
|
@@ -2738,9 +3224,10 @@ class DataFrameTable(DataTable):
|
|
|
2738
3224
|
|
|
2739
3225
|
except Exception as e:
|
|
2740
3226
|
self.notify(
|
|
2741
|
-
f"Error replacing [$
|
|
3227
|
+
f"Error replacing [$error]{term_find}[/] with [$accent]{term_replace}[/]",
|
|
2742
3228
|
title="Replace",
|
|
2743
3229
|
severity="error",
|
|
3230
|
+
timeout=10,
|
|
2744
3231
|
)
|
|
2745
3232
|
self.log(f"Error replacing `{term_find}` with `{term_replace}`: {str(e)}")
|
|
2746
3233
|
|
|
@@ -2750,7 +3237,7 @@ class DataFrameTable(DataTable):
|
|
|
2750
3237
|
self.app.push_screen(
|
|
2751
3238
|
ConfirmScreen(
|
|
2752
3239
|
"Replace All",
|
|
2753
|
-
label=f"Replace [$success]{term_find}[/] with [$success]{term_replace
|
|
3240
|
+
label=f"Replace `[$success]{term_find}[/]` with `[$success]{term_replace}[/]` for all [$accent]{state.total_occurrence}[/] occurrences?",
|
|
2754
3241
|
),
|
|
2755
3242
|
callback=self.handle_replace_all_confirmation,
|
|
2756
3243
|
)
|
|
@@ -2816,7 +3303,7 @@ class DataFrameTable(DataTable):
|
|
|
2816
3303
|
|
|
2817
3304
|
col_name = "all columns" if state.cidx is None else self.df.columns[state.cidx]
|
|
2818
3305
|
self.notify(
|
|
2819
|
-
f"Replaced [$
|
|
3306
|
+
f"Replaced [$success]{state.replaced_occurrence}[/] of [$accent]{state.total_occurrence}[/] in [$s]{col_name}[/]",
|
|
2820
3307
|
title="Replace",
|
|
2821
3308
|
)
|
|
2822
3309
|
|
|
@@ -2827,9 +3314,10 @@ class DataFrameTable(DataTable):
|
|
|
2827
3314
|
self.show_next_replace_confirmation()
|
|
2828
3315
|
except Exception as e:
|
|
2829
3316
|
self.notify(
|
|
2830
|
-
f"Error replacing [$
|
|
3317
|
+
f"Error replacing [$error]{term_find}[/] with [$accent]{term_replace}[/]",
|
|
2831
3318
|
title="Replace",
|
|
2832
3319
|
severity="error",
|
|
3320
|
+
timeout=10,
|
|
2833
3321
|
)
|
|
2834
3322
|
self.log(f"Error in interactive replace: {str(e)}")
|
|
2835
3323
|
|
|
@@ -2839,7 +3327,7 @@ class DataFrameTable(DataTable):
|
|
|
2839
3327
|
if state.done:
|
|
2840
3328
|
# All done - show final notification
|
|
2841
3329
|
col_name = "all columns" if state.cidx is None else self.df.columns[state.cidx]
|
|
2842
|
-
msg = f"Replaced [$
|
|
3330
|
+
msg = f"Replaced [$success]{state.replaced_occurrence}[/] of [$accent]{state.total_occurrence}[/] in [$success]{col_name}[/]"
|
|
2843
3331
|
if state.skipped_occurrence > 0:
|
|
2844
3332
|
msg += f", [$warning]{state.skipped_occurrence}[/] skipped"
|
|
2845
3333
|
self.notify(msg, title="Replace")
|
|
@@ -2857,7 +3345,7 @@ class DataFrameTable(DataTable):
|
|
|
2857
3345
|
state.current_occurrence += 1
|
|
2858
3346
|
|
|
2859
3347
|
# Show confirmation
|
|
2860
|
-
label = f"Replace [$warning]{state.term_find}[/] with [$success]{state.term_replace}[/] ({state.current_occurrence} of {state.total_occurrence})?"
|
|
3348
|
+
label = f"Replace `[$warning]{state.term_find}[/]` with `[$success]{state.term_replace}[/]` ({state.current_occurrence} of {state.total_occurrence})?"
|
|
2861
3349
|
|
|
2862
3350
|
self.app.push_screen(
|
|
2863
3351
|
ConfirmScreen("Replace", label=label, maybe="Skip"),
|
|
@@ -2932,105 +3420,7 @@ class DataFrameTable(DataTable):
|
|
|
2932
3420
|
# Show next confirmation
|
|
2933
3421
|
self.show_next_replace_confirmation()
|
|
2934
3422
|
|
|
2935
|
-
#
|
|
2936
|
-
def do_toggle_selections(self) -> None:
|
|
2937
|
-
"""Toggle selected rows highlighting on/off."""
|
|
2938
|
-
# Add to history
|
|
2939
|
-
self.add_history("Toggled row selection")
|
|
2940
|
-
|
|
2941
|
-
if False in self.visible_rows:
|
|
2942
|
-
# Some rows are hidden - invert only selected visible rows and clear selections for hidden rows
|
|
2943
|
-
for i in range(len(self.selected_rows)):
|
|
2944
|
-
if self.visible_rows[i]:
|
|
2945
|
-
self.selected_rows[i] = not self.selected_rows[i]
|
|
2946
|
-
else:
|
|
2947
|
-
self.selected_rows[i] = False
|
|
2948
|
-
else:
|
|
2949
|
-
# Invert all selected rows
|
|
2950
|
-
self.selected_rows = [not selected for selected in self.selected_rows]
|
|
2951
|
-
|
|
2952
|
-
# Check if we're highlighting or un-highlighting
|
|
2953
|
-
if new_selected_count := self.selected_rows.count(True):
|
|
2954
|
-
self.notify(f"Toggled selection for [$accent]{new_selected_count}[/] rows", title="Toggle")
|
|
2955
|
-
|
|
2956
|
-
# Recreate table for display
|
|
2957
|
-
self.setup_table()
|
|
2958
|
-
|
|
2959
|
-
def do_toggle_row_selection(self) -> None:
|
|
2960
|
-
"""Select/deselect current row."""
|
|
2961
|
-
# Add to history
|
|
2962
|
-
self.add_history("Toggled row selection")
|
|
2963
|
-
|
|
2964
|
-
ridx = self.cursor_row_idx
|
|
2965
|
-
self.selected_rows[ridx] = not self.selected_rows[ridx]
|
|
2966
|
-
|
|
2967
|
-
row_key = str(ridx)
|
|
2968
|
-
match_cols = self.matches.get(ridx, set())
|
|
2969
|
-
for col_idx, col in enumerate(self.ordered_columns):
|
|
2970
|
-
col_key = col.key
|
|
2971
|
-
cell_text: Text = self.get_cell(row_key, col_key)
|
|
2972
|
-
|
|
2973
|
-
if self.selected_rows[ridx] or (col_idx in match_cols):
|
|
2974
|
-
cell_text.style = HIGHLIGHT_COLOR
|
|
2975
|
-
else:
|
|
2976
|
-
# Reset to default style based on dtype
|
|
2977
|
-
dtype = self.df.dtypes[col_idx]
|
|
2978
|
-
dc = DtypeConfig(dtype)
|
|
2979
|
-
cell_text.style = dc.style
|
|
2980
|
-
|
|
2981
|
-
self.update_cell(row_key, col_key, cell_text)
|
|
2982
|
-
|
|
2983
|
-
def do_clear_selections_and_matches(self) -> None:
|
|
2984
|
-
"""Clear all selected rows and matches without removing them from the dataframe."""
|
|
2985
|
-
# Check if any selected rows or matches
|
|
2986
|
-
if not any(self.selected_rows) and not self.matches:
|
|
2987
|
-
self.notify("No selections to clear", title="Clear", severity="warning")
|
|
2988
|
-
return
|
|
2989
|
-
|
|
2990
|
-
row_count = sum(
|
|
2991
|
-
1 if (selected or idx in self.matches) else 0 for idx, selected in enumerate(self.selected_rows)
|
|
2992
|
-
)
|
|
2993
|
-
|
|
2994
|
-
# Add to history
|
|
2995
|
-
self.add_history("Cleared all selected rows")
|
|
2996
|
-
|
|
2997
|
-
# Clear all selections
|
|
2998
|
-
self.selected_rows = [False] * len(self.df)
|
|
2999
|
-
self.matches = defaultdict(set)
|
|
3000
|
-
|
|
3001
|
-
# Recreate table for display
|
|
3002
|
-
self.setup_table()
|
|
3003
|
-
|
|
3004
|
-
self.notify(f"Cleared selections for [$accent]{row_count}[/] rows", title="Clear")
|
|
3005
|
-
|
|
3006
|
-
# Filter & View
|
|
3007
|
-
def do_filter_rows(self) -> None:
|
|
3008
|
-
"""Keep only the rows with selections and matches, and remove others."""
|
|
3009
|
-
if not any(self.selected_rows) and not self.matches:
|
|
3010
|
-
self.notify("No rows to filter", title="Filter", severity="warning")
|
|
3011
|
-
return
|
|
3012
|
-
|
|
3013
|
-
filter_expr = [
|
|
3014
|
-
True if (selected or ridx in self.matches) else False for ridx, selected in enumerate(self.selected_rows)
|
|
3015
|
-
]
|
|
3016
|
-
|
|
3017
|
-
# Add to history
|
|
3018
|
-
self.add_history("Filtered to selections and matches", dirty=True)
|
|
3019
|
-
|
|
3020
|
-
# Apply filter to dataframe with row indices
|
|
3021
|
-
df_filtered = self.df.with_row_index(RIDX).filter(filter_expr)
|
|
3022
|
-
|
|
3023
|
-
# Update dataframe
|
|
3024
|
-
self.reset_df(df_filtered.drop(RIDX))
|
|
3025
|
-
|
|
3026
|
-
# Recreate table for display
|
|
3027
|
-
self.setup_table()
|
|
3028
|
-
|
|
3029
|
-
self.notify(
|
|
3030
|
-
f"Filtered rows with selections or matches and removed others. Now showing [$accent]{len(self.df)}[/] rows",
|
|
3031
|
-
title="Filter",
|
|
3032
|
-
)
|
|
3033
|
-
|
|
3423
|
+
# View & Filter
|
|
3034
3424
|
def do_view_rows(self) -> None:
|
|
3035
3425
|
"""View rows.
|
|
3036
3426
|
|
|
@@ -3039,6 +3429,7 @@ class DataFrameTable(DataTable):
|
|
|
3039
3429
|
"""
|
|
3040
3430
|
|
|
3041
3431
|
cidx = self.cursor_col_idx
|
|
3432
|
+
col_name = self.df.columns[cidx]
|
|
3042
3433
|
|
|
3043
3434
|
# If there are rows with selections or matches, use those
|
|
3044
3435
|
if any(self.selected_rows) or self.matches:
|
|
@@ -3049,7 +3440,7 @@ class DataFrameTable(DataTable):
|
|
|
3049
3440
|
else:
|
|
3050
3441
|
ridx = self.cursor_row_idx
|
|
3051
3442
|
value = self.df.item(ridx, cidx)
|
|
3052
|
-
term =
|
|
3443
|
+
term = pl.col(col_name).is_null() if value is None else pl.col(col_name) == value
|
|
3053
3444
|
|
|
3054
3445
|
self.view_rows((term, cidx, False, True))
|
|
3055
3446
|
|
|
@@ -3073,17 +3464,22 @@ class DataFrameTable(DataTable):
|
|
|
3073
3464
|
|
|
3074
3465
|
col_name = self.df.columns[cidx]
|
|
3075
3466
|
|
|
3076
|
-
|
|
3077
|
-
|
|
3467
|
+
# Support for polars expression
|
|
3468
|
+
if isinstance(term, pl.Expr):
|
|
3469
|
+
expr = term
|
|
3470
|
+
# Support for list of booleans (selected rows)
|
|
3078
3471
|
elif isinstance(term, (list, pl.Series)):
|
|
3079
|
-
# Support for list of booleans (selected rows)
|
|
3080
3472
|
expr = term
|
|
3473
|
+
elif term == NULL:
|
|
3474
|
+
expr = pl.col(col_name).is_null()
|
|
3081
3475
|
elif tentative_expr(term):
|
|
3082
|
-
# Support for polars
|
|
3476
|
+
# Support for polars expression in string form
|
|
3083
3477
|
try:
|
|
3084
3478
|
expr = validate_expr(term, self.df.columns, cidx)
|
|
3085
3479
|
except Exception as e:
|
|
3086
|
-
self.notify(
|
|
3480
|
+
self.notify(
|
|
3481
|
+
f"Error validating expression [$error]{term}[/]", title="Filter", severity="error", timeout=10
|
|
3482
|
+
)
|
|
3087
3483
|
self.log(f"Error validating expression `{term}`: {str(e)}")
|
|
3088
3484
|
return
|
|
3089
3485
|
else:
|
|
@@ -3112,7 +3508,7 @@ class DataFrameTable(DataTable):
|
|
|
3112
3508
|
lf = self.df.lazy().with_row_index(RIDX)
|
|
3113
3509
|
|
|
3114
3510
|
# Apply existing visibility filter first
|
|
3115
|
-
if
|
|
3511
|
+
if self.has_hidden_rows:
|
|
3116
3512
|
lf = lf.filter(self.visible_rows)
|
|
3117
3513
|
|
|
3118
3514
|
expr_str = "boolean list or series" if isinstance(expr, (list, pl.Series)) else str(expr)
|
|
@@ -3122,7 +3518,7 @@ class DataFrameTable(DataTable):
|
|
|
3122
3518
|
df_filtered = lf.filter(expr).collect()
|
|
3123
3519
|
except Exception as e:
|
|
3124
3520
|
self.histories.pop() # Remove last history entry
|
|
3125
|
-
self.notify(f"Error applying filter [$error]{expr_str}[/]", title="Filter", severity="error")
|
|
3521
|
+
self.notify(f"Error applying filter [$error]{expr_str}[/]", title="Filter", severity="error", timeout=10)
|
|
3126
3522
|
self.log(f"Error applying filter `{expr_str}`: {str(e)}")
|
|
3127
3523
|
return
|
|
3128
3524
|
|
|
@@ -3144,7 +3540,50 @@ class DataFrameTable(DataTable):
|
|
|
3144
3540
|
# Recreate table for display
|
|
3145
3541
|
self.setup_table()
|
|
3146
3542
|
|
|
3147
|
-
self.notify(f"Filtered to [$
|
|
3543
|
+
self.notify(f"Filtered to [$success]{matched_count}[/] matching rows", title="Filter")
|
|
3544
|
+
|
|
3545
|
+
def do_filter_rows(self) -> None:
|
|
3546
|
+
"""Keep only the rows with selections and cell matches, and remove others."""
|
|
3547
|
+
if any(self.selected_rows) or self.matches:
|
|
3548
|
+
message = "Filtered to rows with selection and cell matches (other rows removed)"
|
|
3549
|
+
filter_expr = [
|
|
3550
|
+
True if (selected or ridx in self.matches) else False
|
|
3551
|
+
for ridx, selected in enumerate(self.selected_rows)
|
|
3552
|
+
]
|
|
3553
|
+
else: # Search cursor value in current column
|
|
3554
|
+
message = "Filtered to rows matching cursor value (other rows removed)"
|
|
3555
|
+
cidx = self.cursor_col_idx
|
|
3556
|
+
col_name = self.df.columns[cidx]
|
|
3557
|
+
value = self.cursor_value
|
|
3558
|
+
|
|
3559
|
+
if value is None:
|
|
3560
|
+
filter_expr = pl.col(col_name).is_null()
|
|
3561
|
+
else:
|
|
3562
|
+
filter_expr = pl.col(col_name) == value
|
|
3563
|
+
|
|
3564
|
+
# Add to history
|
|
3565
|
+
self.add_history(message, dirty=True)
|
|
3566
|
+
|
|
3567
|
+
# Apply filter to dataframe with row indices
|
|
3568
|
+
df_filtered = self.df.with_row_index(RIDX).filter(filter_expr)
|
|
3569
|
+
|
|
3570
|
+
# Update selected rows
|
|
3571
|
+
selected_rows = [self.selected_rows[df_filtered[RIDX][ridx]] for ridx in range(len(df_filtered))]
|
|
3572
|
+
|
|
3573
|
+
# Update matches
|
|
3574
|
+
matches = {ridx: self.matches[df_filtered[RIDX][ridx]] for ridx in range(len(df_filtered))}
|
|
3575
|
+
|
|
3576
|
+
# Update dataframe
|
|
3577
|
+
self.reset_df(df_filtered.drop(RIDX))
|
|
3578
|
+
|
|
3579
|
+
# Restore selected rows and matches
|
|
3580
|
+
self.selected_rows = selected_rows
|
|
3581
|
+
self.matches = matches
|
|
3582
|
+
|
|
3583
|
+
# Recreate table for display
|
|
3584
|
+
self.setup_table()
|
|
3585
|
+
|
|
3586
|
+
self.notify(f"{message}. Now showing [$success]{len(self.df)}[/] rows", title="Filter")
|
|
3148
3587
|
|
|
3149
3588
|
# Copy & Save
|
|
3150
3589
|
def do_copy_to_clipboard(self, content: str, message: str) -> None:
|
|
@@ -3168,7 +3607,7 @@ class DataFrameTable(DataTable):
|
|
|
3168
3607
|
)
|
|
3169
3608
|
self.notify(message, title="Clipboard")
|
|
3170
3609
|
except FileNotFoundError:
|
|
3171
|
-
self.notify("Error copying to clipboard", title="Clipboard", severity="error")
|
|
3610
|
+
self.notify("Error copying to clipboard", title="Clipboard", severity="error", timeout=10)
|
|
3172
3611
|
|
|
3173
3612
|
def do_save_to_file(
|
|
3174
3613
|
self, title: str = "Save to File", all_tabs: bool | None = None, task_after_save: str | None = None
|
|
@@ -3191,13 +3630,13 @@ class DataFrameTable(DataTable):
|
|
|
3191
3630
|
"""Handle result from SaveFileScreen."""
|
|
3192
3631
|
if result is None:
|
|
3193
3632
|
return
|
|
3194
|
-
filename, all_tabs = result
|
|
3633
|
+
filename, all_tabs, overwrite_prompt = result
|
|
3195
3634
|
|
|
3196
3635
|
# Whether to save all tabs (for Excel files)
|
|
3197
3636
|
self._all_tabs = all_tabs
|
|
3198
3637
|
|
|
3199
3638
|
# Check if file exists
|
|
3200
|
-
if Path(filename).exists():
|
|
3639
|
+
if overwrite_prompt and Path(filename).exists():
|
|
3201
3640
|
self._pending_filename = filename
|
|
3202
3641
|
self.app.push_screen(
|
|
3203
3642
|
ConfirmScreen("File already exists. Overwrite?"),
|
|
@@ -3221,24 +3660,39 @@ class DataFrameTable(DataTable):
|
|
|
3221
3660
|
"""Actually save the dataframe to a file."""
|
|
3222
3661
|
filepath = Path(filename)
|
|
3223
3662
|
ext = filepath.suffix.lower()
|
|
3663
|
+
if ext.endswith(".gz"):
|
|
3664
|
+
ext = Path(filename).with_suffix("").suffix.lower()
|
|
3665
|
+
|
|
3666
|
+
fmt = ext.removeprefix(".")
|
|
3667
|
+
if fmt not in SUPPORTED_FORMATS:
|
|
3668
|
+
self.notify(
|
|
3669
|
+
f"Unsupported file format [$success]{fmt}[/]. Use [$accent]CSV[/] as fallback. Supported formats: {', '.join(SUPPORTED_FORMATS)}",
|
|
3670
|
+
title="Save to File",
|
|
3671
|
+
severity="warning",
|
|
3672
|
+
)
|
|
3673
|
+
fmt = "csv"
|
|
3224
3674
|
|
|
3225
3675
|
# Add to history
|
|
3226
3676
|
self.add_history(f"Saved dataframe to [$success]{filename}[/]")
|
|
3227
3677
|
|
|
3228
3678
|
try:
|
|
3229
|
-
if
|
|
3230
|
-
self.
|
|
3231
|
-
elif
|
|
3679
|
+
if fmt == "csv":
|
|
3680
|
+
self.df.write_csv(filename)
|
|
3681
|
+
elif fmt in ("tsv", "tab"):
|
|
3232
3682
|
self.df.write_csv(filename, separator="\t")
|
|
3233
|
-
elif
|
|
3683
|
+
elif fmt in ("xlsx", "xls"):
|
|
3684
|
+
self.save_excel(filename)
|
|
3685
|
+
elif fmt == "json":
|
|
3234
3686
|
self.df.write_json(filename)
|
|
3235
|
-
elif
|
|
3687
|
+
elif fmt == "ndjson":
|
|
3688
|
+
self.df.write_ndjson(filename)
|
|
3689
|
+
elif fmt == "parquet":
|
|
3236
3690
|
self.df.write_parquet(filename)
|
|
3237
|
-
else:
|
|
3691
|
+
else: # Fallback to CSV
|
|
3238
3692
|
self.df.write_csv(filename)
|
|
3239
3693
|
|
|
3240
|
-
|
|
3241
|
-
self.filename = filename
|
|
3694
|
+
# Update current filename
|
|
3695
|
+
self.filename = filename
|
|
3242
3696
|
|
|
3243
3697
|
# Reset dirty flag after save
|
|
3244
3698
|
if self._all_tabs:
|
|
@@ -3248,10 +3702,11 @@ class DataFrameTable(DataTable):
|
|
|
3248
3702
|
else:
|
|
3249
3703
|
self.dirty = False
|
|
3250
3704
|
|
|
3251
|
-
if self
|
|
3252
|
-
self.
|
|
3253
|
-
|
|
3254
|
-
self.
|
|
3705
|
+
if hasattr(self, "_task_after_save"):
|
|
3706
|
+
if self._task_after_save == "close_tab":
|
|
3707
|
+
self.app.do_close_tab()
|
|
3708
|
+
elif self._task_after_save == "quit_app":
|
|
3709
|
+
self.app.exit()
|
|
3255
3710
|
|
|
3256
3711
|
# From ConfirmScreen callback, so notify accordingly
|
|
3257
3712
|
if self._all_tabs:
|
|
@@ -3260,7 +3715,7 @@ class DataFrameTable(DataTable):
|
|
|
3260
3715
|
self.notify(f"Saved current tab to [$success]{filename}[/]", title="Save to File")
|
|
3261
3716
|
|
|
3262
3717
|
except Exception as e:
|
|
3263
|
-
self.notify(f"Error saving [$error]{filename}[/]", title="Save to File", severity="error")
|
|
3718
|
+
self.notify(f"Error saving [$error]{filename}[/]", title="Save to File", severity="error", timeout=10)
|
|
3264
3719
|
self.log(f"Error saving file `{filename}`: {str(e)}")
|
|
3265
3720
|
|
|
3266
3721
|
def save_excel(self, filename: str) -> None:
|
|
@@ -3329,7 +3784,7 @@ class DataFrameTable(DataTable):
|
|
|
3329
3784
|
# Execute the SQL query
|
|
3330
3785
|
try:
|
|
3331
3786
|
lf = self.df.lazy().with_row_index(RIDX)
|
|
3332
|
-
if
|
|
3787
|
+
if self.has_hidden_rows:
|
|
3333
3788
|
lf = lf.filter(self.visible_rows)
|
|
3334
3789
|
|
|
3335
3790
|
df_filtered = lf.sql(sql).collect()
|
|
@@ -3341,7 +3796,7 @@ class DataFrameTable(DataTable):
|
|
|
3341
3796
|
return
|
|
3342
3797
|
|
|
3343
3798
|
# Add to history
|
|
3344
|
-
self.add_history(f"SQL Query:\n[$
|
|
3799
|
+
self.add_history(f"SQL Query:\n[$success]{sql}[/]", dirty=not view)
|
|
3345
3800
|
|
|
3346
3801
|
if view:
|
|
3347
3802
|
# Just view - do not modify the dataframe
|
|
@@ -3355,9 +3810,18 @@ class DataFrameTable(DataTable):
|
|
|
3355
3810
|
col_name for col_name in self.df.columns if col_name not in filtered_col_names
|
|
3356
3811
|
}
|
|
3357
3812
|
else: # filter - modify the dataframe
|
|
3358
|
-
|
|
3359
|
-
|
|
3360
|
-
|
|
3813
|
+
# Update selected rows
|
|
3814
|
+
selected_rows = [self.selected_rows[df_filtered[RIDX][ridx]] for ridx in range(len(df_filtered))]
|
|
3815
|
+
|
|
3816
|
+
# Update matches
|
|
3817
|
+
matches = {ridx: self.matches[df_filtered[RIDX][ridx]] for ridx in range(len(df_filtered))}
|
|
3818
|
+
|
|
3819
|
+
# Update dataframe
|
|
3820
|
+
self.reset_df(df_filtered.drop(RIDX))
|
|
3821
|
+
|
|
3822
|
+
# Restore selected rows and matches
|
|
3823
|
+
self.selected_rows = selected_rows
|
|
3824
|
+
self.matches = matches
|
|
3361
3825
|
except Exception as e:
|
|
3362
3826
|
self.notify(f"Error executing SQL query [$error]{sql}[/]", title="SQL Query", severity="error", timeout=10)
|
|
3363
3827
|
self.log(f"Error executing SQL query `{sql}`: {str(e)}")
|