listpick 0.1.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- list_picker/__init__.py +0 -0
- list_picker/list_picker_app.py +2076 -0
- list_picker/ui/__init__.py +0 -0
- list_picker/ui/help_screen.py +91 -0
- list_picker/ui/input_field.py +165 -0
- list_picker/ui/keys.py +228 -0
- list_picker/ui/list_picker_colours.py +258 -0
- list_picker/utils/__init__.py +0 -0
- list_picker/utils/clipboard_operations.py +33 -0
- list_picker/utils/dump.py +78 -0
- list_picker/utils/filtering.py +32 -0
- list_picker/utils/generate_data.py +74 -0
- list_picker/utils/options_selectors.py +67 -0
- list_picker/utils/search_and_filter_utils.py +79 -0
- list_picker/utils/searching.py +76 -0
- list_picker/utils/sorting.py +120 -0
- list_picker/utils/table_to_list_of_lists.py +188 -0
- list_picker/utils/utils.py +251 -0
- listpick-0.1.4.0.dist-info/METADATA +186 -0
- listpick-0.1.4.0.dist-info/RECORD +22 -0
- listpick-0.1.4.0.dist-info/WHEEL +5 -0
- listpick-0.1.4.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,76 @@
|
|
1
|
+
#!/bin/python
|
2
|
+
import re
|
3
|
+
from typing import Tuple
|
4
|
+
from list_picker.utils.search_and_filter_utils import apply_filter, tokenise
|
5
|
+
|
6
|
+
def search(query: str, indexed_items: list[Tuple[int, list[str]]], highlights: list[dict]=[], cursor_pos:int=0, unselectable_indices:list=[], reverse:bool=False, continue_search:bool=False) -> Tuple[bool, int, int, int, list[dict]]:
|
7
|
+
"""
|
8
|
+
Search the indexed items and see which rows match the query.
|
9
|
+
|
10
|
+
Accepts:
|
11
|
+
regular expressions
|
12
|
+
--# to specify column to match
|
13
|
+
--i to specify case-sensitivity (it is case insensitive by default)
|
14
|
+
--v to specify inverse match
|
15
|
+
|
16
|
+
---Returns: a tuple consisting of the following
|
17
|
+
return_val: True if search item found
|
18
|
+
cursor_pos: The position of the next search match
|
19
|
+
search_index: If there are x matches then search_index tells us we are on the nth out of x
|
20
|
+
search_count: The number of matches
|
21
|
+
highlights: Adds the search highlights to the existing highlights list
|
22
|
+
I.e.,, we append the following to the highlights list to be displayed in draw_screen
|
23
|
+
{
|
24
|
+
"match": token,
|
25
|
+
"field": "all",
|
26
|
+
"color": 10,
|
27
|
+
"type": "search",
|
28
|
+
}
|
29
|
+
|
30
|
+
"""
|
31
|
+
|
32
|
+
# Clear previous search highlights
|
33
|
+
|
34
|
+
highlights = [highlight for highlight in highlights if "type" not in highlight or highlight["type"] != "search" ]
|
35
|
+
|
36
|
+
|
37
|
+
|
38
|
+
|
39
|
+
|
40
|
+
# Ensure we are searching from our current position forwards
|
41
|
+
searchables = list(range(cursor_pos+1, len(indexed_items))) + list(range(cursor_pos+1))
|
42
|
+
if reverse:
|
43
|
+
searchables = (list(range(cursor_pos, len(indexed_items))) + list(range(cursor_pos)))[::-1]
|
44
|
+
|
45
|
+
invert_filter = False
|
46
|
+
case_sensitive = False
|
47
|
+
filters = tokenise(query)
|
48
|
+
|
49
|
+
if not filters: return False, cursor_pos, 0,0,highlights
|
50
|
+
found = False
|
51
|
+
search_count = 0
|
52
|
+
search_list = []
|
53
|
+
|
54
|
+
for i in searchables:
|
55
|
+
# if apply_filter(indexed_items[i][1]):
|
56
|
+
if apply_filter(indexed_items[i][1], filters, add_highlights=True, highlights=highlights):
|
57
|
+
new_pos = i
|
58
|
+
if new_pos in unselectable_indices: continue
|
59
|
+
search_count += 1
|
60
|
+
search_list.append(i)
|
61
|
+
|
62
|
+
if not found:
|
63
|
+
cursor_pos = new_pos
|
64
|
+
found = True
|
65
|
+
# break
|
66
|
+
# return False
|
67
|
+
# for i in range(diff):
|
68
|
+
# cursor_down()
|
69
|
+
# break
|
70
|
+
if search_list:
|
71
|
+
search_index = sorted(search_list).index(cursor_pos)+1
|
72
|
+
else:
|
73
|
+
search_index = 0
|
74
|
+
|
75
|
+
return bool(search_list), cursor_pos, search_index, search_count, highlights
|
76
|
+
|
@@ -0,0 +1,120 @@
|
|
1
|
+
import re
|
2
|
+
from datetime import datetime
|
3
|
+
from typing import Tuple
|
4
|
+
|
5
|
+
def parse_numerical(value: str) -> float:
|
6
|
+
""" Match first number in string and return it as a float. If not number then return INF. """
|
7
|
+
try:
|
8
|
+
match = re.search(r'(\d+(\.\d+)?)', value)
|
9
|
+
if match:
|
10
|
+
return float(match.group(1))
|
11
|
+
return float('inf') # Default for non-numerical values
|
12
|
+
except ValueError:
|
13
|
+
return float('inf')
|
14
|
+
|
15
|
+
def parse_size(value: str) -> float:
|
16
|
+
""" Match size in string and return it as a float. If no match then return INF."""
|
17
|
+
size_units = {
|
18
|
+
'B': 1,
|
19
|
+
'KB': 1024,
|
20
|
+
'MB': 1024**2,
|
21
|
+
'GB': 1024**3,
|
22
|
+
'TB': 1024**4,
|
23
|
+
'PB': 1024**5,
|
24
|
+
'K': 1024,
|
25
|
+
'M': 1024**2,
|
26
|
+
'G': 1024**3,
|
27
|
+
'T': 1024**4,
|
28
|
+
'P': 1024**5
|
29
|
+
}
|
30
|
+
|
31
|
+
match = re.search(r'(\d+(\.\d+)?)(\s*([KMGTPEB][B]?)\s*)', value, re.IGNORECASE)
|
32
|
+
|
33
|
+
if match:
|
34
|
+
number = float(match.group(1))
|
35
|
+
unit = match.group(4).upper() if match.group(4) else 'B'
|
36
|
+
unit = re.sub(r'[^\w]', '', unit) # Remove non-alphanumeric characters
|
37
|
+
return number * size_units.get(unit, 1)
|
38
|
+
return float('inf') # Default for non-size values
|
39
|
+
|
40
|
+
def time_to_seconds(time_str: str) -> float:
|
41
|
+
"""Convert a time string to total seconds."""
|
42
|
+
if time_str.strip().upper() == "INF":
|
43
|
+
return float('inf') # Assign infinity for "INF"
|
44
|
+
|
45
|
+
time_units = {
|
46
|
+
'year': 365 * 24 * 3600,
|
47
|
+
'years': 365 * 24 * 3600,
|
48
|
+
'day': 24 * 3600,
|
49
|
+
'days': 24 * 3600,
|
50
|
+
'hour': 3600,
|
51
|
+
'hours': 3600,
|
52
|
+
'minute': 60,
|
53
|
+
'minutes': 60,
|
54
|
+
'sec': 1,
|
55
|
+
'secs': 1,
|
56
|
+
's': 1,
|
57
|
+
'min': 60,
|
58
|
+
}
|
59
|
+
|
60
|
+
total_seconds = 0
|
61
|
+
try:
|
62
|
+
tokens = time_str.split()
|
63
|
+
for i in range(0, len(tokens), 2):
|
64
|
+
if i + 1 < len(tokens):
|
65
|
+
num = int(tokens[i])
|
66
|
+
unit = tokens[i + 1]
|
67
|
+
total_seconds += num * time_units.get(unit, 0)
|
68
|
+
except:
|
69
|
+
return float('inf')
|
70
|
+
|
71
|
+
|
72
|
+
return total_seconds
|
73
|
+
|
74
|
+
def time_sort(time_str: str) -> datetime:
|
75
|
+
""" If there is a date in the string then convert it to strptime. If no match then return 00:00 (as datetime)."""
|
76
|
+
formats = [
|
77
|
+
"%Y-%m-%d %H:%M", # "2021-03-16 15:30"
|
78
|
+
"%Y-%m-%d", # "2021-03-16"
|
79
|
+
"%Y/%m/%d", # "2021/03/16"
|
80
|
+
"%d/%m/%Y", # "05/03/2024"
|
81
|
+
"%A %d %b %Y %H:%M:%S", # "Saturday 01 Feb 2025 21:19:47"
|
82
|
+
"%a %d %b %Y %H:%M:%S", # "Sat 01 Feb 2025 21:19:47"
|
83
|
+
"%d/%m/%Y", # "10/12/2023"
|
84
|
+
"%d/%m/%y", # "1/1/23"
|
85
|
+
"%H:%M", # "04:30"
|
86
|
+
"%H:%M:%S", # "04:30:23"
|
87
|
+
]
|
88
|
+
|
89
|
+
for fmt in formats:
|
90
|
+
try:
|
91
|
+
return datetime.strptime(time_str, fmt)
|
92
|
+
except ValueError:
|
93
|
+
pass
|
94
|
+
|
95
|
+
return datetime.strptime("00:00", "%H:%M")
|
96
|
+
|
97
|
+
def sort_items(indexed_items: list[Tuple[int,list[str]]], sort_method:int=0, sort_column:int=0, sort_reverse:bool=False):
|
98
|
+
""" Sort indexed_items based on the sort_method on sort_column. """
|
99
|
+
SORT_METHODS = ['original', 'lexical', 'LEXICAL', 'alphanum', 'ALPHANUM', 'time', 'numerical', 'size']
|
100
|
+
if sort_column is not None:
|
101
|
+
try:
|
102
|
+
if SORT_METHODS[sort_method] == 'numerical':
|
103
|
+
indexed_items.sort(key=lambda x: parse_numerical(x[1][sort_column]), reverse=sort_reverse)
|
104
|
+
elif SORT_METHODS[sort_method] == 'original':
|
105
|
+
indexed_items.sort(key=lambda x: x[0], reverse=sort_reverse)
|
106
|
+
elif SORT_METHODS[sort_method] == 'lexical':
|
107
|
+
# indexed_items.sort(key=lambda x: x[1][sort_column].lower(), reverse=sort_reverse)
|
108
|
+
indexed_items.sort(key=lambda x: (1 if x[1][sort_column].strip() == "" else 0, x[1][sort_column].lower()), reverse=sort_reverse)
|
109
|
+
elif SORT_METHODS[sort_method] == 'LEXICAL':
|
110
|
+
indexed_items.sort(key=lambda x: (1 if x[1][sort_column].strip() == "" else 0, x[1][sort_column]), reverse=sort_reverse)
|
111
|
+
elif SORT_METHODS[sort_method] == 'alphanum':
|
112
|
+
indexed_items.sort(key=lambda x: (1 if x[1][sort_column].strip() == "" else 0, "".join([chr(ord('z')+ord(c)) if not c.isalnum() else c.lower() for c in x[1][sort_column]])))
|
113
|
+
elif SORT_METHODS[sort_method] == 'time':
|
114
|
+
indexed_items.sort(key=lambda x:time_sort(x[1][sort_column]))
|
115
|
+
elif SORT_METHODS[sort_method] == 'ALPHANUM':
|
116
|
+
indexed_items.sort(key=lambda x: (1 if x[1][sort_column].strip() == "" else 0, "".join([chr(ord('z')+ord(c)) if not c.isalnum() else c for c in x[1][sort_column]])))
|
117
|
+
elif SORT_METHODS[sort_method] == 'size':
|
118
|
+
indexed_items.sort(key=lambda x: parse_size(x[1][sort_column]), reverse=sort_reverse)
|
119
|
+
except IndexError:
|
120
|
+
pass # Handle cases where sort_column is out of range
|
@@ -0,0 +1,188 @@
|
|
1
|
+
#!/bin/python
|
2
|
+
import sys
|
3
|
+
import csv
|
4
|
+
import json
|
5
|
+
import pandas as pd
|
6
|
+
from openpyxl import load_workbook
|
7
|
+
from io import StringIO
|
8
|
+
import argparse
|
9
|
+
from typing import Tuple, Iterable, Optional
|
10
|
+
import dill as pickle
|
11
|
+
import os
|
12
|
+
|
13
|
+
def read_file_content(file_path: str) -> str:
|
14
|
+
""" Read lines from file. """
|
15
|
+
with open(file_path, 'r') as file:
|
16
|
+
return file.read()
|
17
|
+
|
18
|
+
def strip_whitespace(item: Iterable) -> Iterable:
|
19
|
+
""" Strip whitespace from string or from list of strings. """
|
20
|
+
if isinstance(item, list):
|
21
|
+
return [strip_whitespace(sub_item) for sub_item in item]
|
22
|
+
elif isinstance(item, str):
|
23
|
+
return item.strip()
|
24
|
+
else:
|
25
|
+
return item
|
26
|
+
|
27
|
+
|
28
|
+
|
29
|
+
def table_to_list(input_arg: str, delimiter:str='\t', file_type:Optional[str]=None) -> Tuple[list[list[str]], list[str]]:
|
30
|
+
"""
|
31
|
+
Convert data string to list. The input_arg
|
32
|
+
Currently accepts: csv, tsv, json, xlsx, ods
|
33
|
+
"""
|
34
|
+
table_data = []
|
35
|
+
|
36
|
+
def parse_csv_like(data:str, delimiter:str) -> list[list[str]]:
|
37
|
+
""" Convert value-separated data (e.g., CSV or TSV) to list of lists. """
|
38
|
+
|
39
|
+
try:
|
40
|
+
reader = csv.reader(StringIO(data), delimiter=delimiter)
|
41
|
+
return [row for row in reader], []
|
42
|
+
except Exception as e:
|
43
|
+
print(f"Error reading CSV-like input: {e}")
|
44
|
+
return [], []
|
45
|
+
|
46
|
+
def csv_string_to_list(csv_string:str) -> list[list[str]]:
|
47
|
+
""" Convert csv string to list of lists using csv.reader. """
|
48
|
+
f = StringIO(csv_string)
|
49
|
+
reader = csv.reader(f, skipinitialspace=True)
|
50
|
+
return [row for row in reader], []
|
51
|
+
|
52
|
+
if file_type == 'csv' or delimiter in [',']:
|
53
|
+
try:
|
54
|
+
if input_arg == '--stdin':
|
55
|
+
input_data = sys.stdin.read()
|
56
|
+
elif input_arg == '--stdin2':
|
57
|
+
input_count = int(sys.stdin.readline())
|
58
|
+
input_data = "\n".join([sys.stdin.readline().strip() for i in range(input_count)])
|
59
|
+
else:
|
60
|
+
input_data = read_file_content(input_arg)
|
61
|
+
table_data = csv_string_to_list(input_data)
|
62
|
+
table_data = strip_whitespace(table_data)
|
63
|
+
return table_data, []
|
64
|
+
except Exception as e:
|
65
|
+
print(f"Error reading CSV/TSV input: {e}")
|
66
|
+
return [], []
|
67
|
+
|
68
|
+
elif file_type == 'tsv':
|
69
|
+
try:
|
70
|
+
if input_arg == '--stdin':
|
71
|
+
input_data = sys.stdin.read()
|
72
|
+
elif input_arg == '--stdin2':
|
73
|
+
input_count = int(sys.stdin.readline())
|
74
|
+
input_data = "\n".join([sys.stdin.readline().strip() for i in range(input_count)])
|
75
|
+
else:
|
76
|
+
input_data = read_file_content(input_arg)
|
77
|
+
|
78
|
+
# Adjust delimiter for TSV or CSV
|
79
|
+
if file_type == 'tsv' or delimiter == '\t':
|
80
|
+
delimiter = '\t'
|
81
|
+
else:
|
82
|
+
delimiter = ','
|
83
|
+
|
84
|
+
table_data = parse_csv_like(input_data, delimiter)
|
85
|
+
table_data = strip_whitespace(table_data)
|
86
|
+
return table_data, []
|
87
|
+
except Exception as e:
|
88
|
+
print(f"Error reading CSV/TSV input: {e}")
|
89
|
+
return [], []
|
90
|
+
|
91
|
+
elif file_type == 'json':
|
92
|
+
try:
|
93
|
+
if input_arg == '--stdin':
|
94
|
+
input_data = sys.stdin.read()
|
95
|
+
elif input_arg == '--stdin2':
|
96
|
+
input_count = int(sys.stdin.readline())
|
97
|
+
input_data = "\n".join([sys.stdin.readline() for i in range(input_count)])
|
98
|
+
else:
|
99
|
+
input_data = read_file_content(input_arg)
|
100
|
+
|
101
|
+
table_data = json.loads(input_data)
|
102
|
+
return table_data, []
|
103
|
+
except json.JSONDecodeError as e:
|
104
|
+
print(f"Error decoding JSON input: {e}")
|
105
|
+
return [], []
|
106
|
+
except FileNotFoundError as e:
|
107
|
+
print(f"File not found: {e}")
|
108
|
+
return [], []
|
109
|
+
|
110
|
+
elif file_type == 'xlsx':
|
111
|
+
try:
|
112
|
+
if input_arg == '--stdin':
|
113
|
+
input_data = sys.stdin.read()
|
114
|
+
with open('temp.xlsx', 'wb') as f:
|
115
|
+
f.write(input_data.encode())
|
116
|
+
elif input_arg == '--stdin2':
|
117
|
+
input_count = int(sys.stdin.readline())
|
118
|
+
input_data = "\n".join([sys.stdin.readline() for i in range(input_count)])
|
119
|
+
with open('temp.xlsx', 'wb') as f:
|
120
|
+
f.write(input_data.encode())
|
121
|
+
else:
|
122
|
+
input_data = read_file_content(input_arg)
|
123
|
+
with open('temp.xlsx', 'wb') as f:
|
124
|
+
f.write(input_data.encode())
|
125
|
+
|
126
|
+
wb = load_workbook(filename='temp.xlsx')
|
127
|
+
sheet = wb.active
|
128
|
+
for row in sheet.iter_rows(values_only=True):
|
129
|
+
table_data.append(list(row))
|
130
|
+
return table_data, []
|
131
|
+
except Exception as e:
|
132
|
+
print(f"Error loading Excel file: {e}")
|
133
|
+
return [], []
|
134
|
+
|
135
|
+
elif file_type == 'ods':
|
136
|
+
try:
|
137
|
+
df = pd.read_excel(input_arg, engine='odf')
|
138
|
+
table_data = df.values.tolist()
|
139
|
+
return table_data, []
|
140
|
+
except Exception as e:
|
141
|
+
print(f"Error loading ODS file: {e}")
|
142
|
+
return [], []
|
143
|
+
elif file_type == 'pkl':
|
144
|
+
with open(os.path.expandvars(os.path.expanduser(input_arg)), 'rb') as f:
|
145
|
+
loaded_data = pickle.load(f)
|
146
|
+
items = loaded_data["items"] if "items" in loaded_data else []
|
147
|
+
header = loaded_data["header"] if "header" in loaded_data else []
|
148
|
+
return items, header
|
149
|
+
|
150
|
+
if input_arg == '--stdin':
|
151
|
+
input_data = sys.stdin.read()
|
152
|
+
elif input_arg == '--stdin2':
|
153
|
+
input_count = int(sys.stdin.readline())
|
154
|
+
input_data = "\n".join([sys.stdin.readline() for i in range(input_count)])
|
155
|
+
else:
|
156
|
+
input_data = read_file_content(input_arg)
|
157
|
+
|
158
|
+
table_data = parse_csv_like(input_data, delimiter)
|
159
|
+
|
160
|
+
return table_data, []
|
161
|
+
|
162
|
+
if __name__ == '__main__':
|
163
|
+
parser = argparse.ArgumentParser(description='Convert table to list of lists.')
|
164
|
+
parser.add_argument('-i', dest='file', help='File containing the table to be converted')
|
165
|
+
parser.add_argument('--stdin', action='store_true', help='Table passed on stdin')
|
166
|
+
parser.add_argument('--stdin2', action='store_true', help='Table passed on stdin')
|
167
|
+
parser.add_argument('-d', dest='delimiter', default='\t', help='Delimiter for rows in the table (default: tab)')
|
168
|
+
parser.add_argument('-t', dest='file_type', choices=['tsv', 'csv', 'json', 'xlsx', 'ods'], help='Type of file (tsv, csv, json, xlsx, ods)')
|
169
|
+
|
170
|
+
args = parser.parse_args()
|
171
|
+
|
172
|
+
if args.file:
|
173
|
+
input_arg = args.file
|
174
|
+
elif args.stdin:
|
175
|
+
input_arg = '--stdin'
|
176
|
+
elif args.stdin2:
|
177
|
+
input_arg = '--stdin2'
|
178
|
+
else:
|
179
|
+
print("Error: Please provide input file or use --stdin option.")
|
180
|
+
sys.exit(1)
|
181
|
+
|
182
|
+
table_data = table_to_list(input_arg, args.delimiter, args.file_type)
|
183
|
+
# print(table_data)
|
184
|
+
|
185
|
+
len(table_data[0])
|
186
|
+
for row in table_data:
|
187
|
+
if len(row) != len(table_data[0]):
|
188
|
+
print(len(row), row)
|
@@ -0,0 +1,251 @@
|
|
1
|
+
#!/bin/python
|
2
|
+
from wcwidth import wcwidth, wcswidth
|
3
|
+
from math import log10
|
4
|
+
import subprocess
|
5
|
+
import tempfile
|
6
|
+
|
7
|
+
def truncate_to_display_width(text: str, max_column_width: int, centre=False) -> str:
|
8
|
+
"""
|
9
|
+
Truncate and/or pad text to max_column_width using wcwidth to ensure visual width is correct
|
10
|
+
with foreign character sets.
|
11
|
+
|
12
|
+
Return: The truncated and/or padded string which has a visual length of max_column_width.
|
13
|
+
If centre=True then the string is centred, if applicable.
|
14
|
+
|
15
|
+
"""
|
16
|
+
result = ''
|
17
|
+
width = 0
|
18
|
+
for char in text:
|
19
|
+
w = wcwidth(char)
|
20
|
+
if w < 0:
|
21
|
+
continue
|
22
|
+
if width + w > max_column_width:
|
23
|
+
break
|
24
|
+
result += char
|
25
|
+
width += w
|
26
|
+
# Pad if it's shorter
|
27
|
+
padding = max_column_width - wcswidth(result)
|
28
|
+
# return result + ' ' * padding
|
29
|
+
if centre:
|
30
|
+
result = ' '*(padding//2) + result + ' '*(padding//2 + padding%2)
|
31
|
+
else:
|
32
|
+
result = result + ' ' * padding
|
33
|
+
return result
|
34
|
+
|
35
|
+
def format_row_full(row: list[str], hidden_columns:list = []) -> str:
|
36
|
+
""" Format list of strings as a tab-separated single string. No hidden columns. """
|
37
|
+
return '\t'.join(str(row[i]) for i in range(len(row)) if i not in hidden_columns)
|
38
|
+
|
39
|
+
def format_full_row(row:str) -> str:
|
40
|
+
""" Format list of strings as a tab-separated single string. Includes hidden columns. """
|
41
|
+
return '\t'.join(row)
|
42
|
+
|
43
|
+
|
44
|
+
def format_row(row: list[str], hidden_columns: list, column_widths: list[int], separator: str, centre:bool=False) -> str:
|
45
|
+
""" Format list of strings as a single string. Requires separator string and the maximum width of the columns. """
|
46
|
+
row_str = ""
|
47
|
+
for i, cell in enumerate(row):
|
48
|
+
if i in hidden_columns: continue
|
49
|
+
val = truncate_to_display_width(str(cell), column_widths[i], centre)
|
50
|
+
row_str += val + separator
|
51
|
+
return row_str
|
52
|
+
# return row_str.strip()
|
53
|
+
|
54
|
+
def get_column_widths(items: list[list[str]], header: list[str]=[], max_column_width:int=70, number_columns:bool=True) -> list[int]:
|
55
|
+
""" Calculate maximum width of each column with clipping. """
|
56
|
+
if len(items) == 0: return [0]
|
57
|
+
assert len(items) > 0
|
58
|
+
widths = [max(wcswidth(str(row[i])) for row in items) for i in range(len(items[0]))]
|
59
|
+
if header:
|
60
|
+
header_widths = [wcswidth(str(h))+int(log10(i+1))+3*int(number_columns) for i, h in enumerate(header)]
|
61
|
+
return [min(max_column_width, max(widths[i], header_widths[i])) for i in range(len(header))]
|
62
|
+
return [min(max_column_width, width) for width in widths]
|
63
|
+
|
64
|
+
def get_mode_widths(item_list: list[str]) -> list[int]:
|
65
|
+
""" Calculate the maximum width of modes with clipping. """
|
66
|
+
widths = [wcswidth(str(row)) for row in item_list]
|
67
|
+
return widths
|
68
|
+
|
69
|
+
def intStringToExponentString(n: str) -> str:
|
70
|
+
""" Return exponent representation of integer. E.g., 1234 -> ¹²³⁴ """
|
71
|
+
n = str(n)
|
72
|
+
digitdict = { "0" : "⁰", "1" : "¹", "2" : "²", "3" : "³", "4" : "⁴", "5" : "⁵", "6" : "⁶", "7" : "⁷", "8" : "⁸", "9" : "⁹"}
|
73
|
+
return "".join([digitdict[char] for char in n])
|
74
|
+
|
75
|
+
def convert_seconds(seconds:int, long_format:bool=False) -> str:
|
76
|
+
""" Convert seconds to human readable format. E.g., 60*60*24*3+62=772262 -> 3d2m2s """
|
77
|
+
if isinstance(seconds, str):
|
78
|
+
seconds = int(seconds)
|
79
|
+
|
80
|
+
# Calculate years, days, hours, minutes, and seconds
|
81
|
+
years = seconds // (365 * 24 * 3600)
|
82
|
+
days = (seconds % (365 * 24 * 3600)) // (24 * 3600)
|
83
|
+
hours = (seconds % (24 * 3600)) // 3600
|
84
|
+
minutes = (seconds % 3600) // 60
|
85
|
+
remaining_seconds = seconds % 60
|
86
|
+
|
87
|
+
# Long format = years, days, hours, minutes, seconds
|
88
|
+
if long_format:
|
89
|
+
human_readable = []
|
90
|
+
if years > 0:
|
91
|
+
human_readable.append(f"{years} year{'s' if years > 1 else ''}")
|
92
|
+
if days > 0:
|
93
|
+
human_readable.append(f"{days} day{'s' if days > 1 else ''}")
|
94
|
+
if hours > 0:
|
95
|
+
human_readable.append(f"{hours} hour{'s' if hours > 1 else ''}")
|
96
|
+
if minutes > 0:
|
97
|
+
human_readable.append(f"{minutes} minute{'s' if minutes > 1 else ''}")
|
98
|
+
if remaining_seconds > 0 or not human_readable:
|
99
|
+
human_readable.append(f"{remaining_seconds} second{'s' if remaining_seconds != 1 else ''}")
|
100
|
+
return ', '.join(human_readable)
|
101
|
+
else:
|
102
|
+
# Compact format = y, d, h, m, s
|
103
|
+
compact_parts = []
|
104
|
+
if years > 0:
|
105
|
+
compact_parts.append(f"{years}y")
|
106
|
+
if days > 0:
|
107
|
+
compact_parts.append(f"{days}d")
|
108
|
+
if hours > 0:
|
109
|
+
compact_parts.append(f"{hours}h")
|
110
|
+
if minutes > 0:
|
111
|
+
compact_parts.append(f"{minutes}m")
|
112
|
+
if remaining_seconds > 0 or not compact_parts:
|
113
|
+
compact_parts.append(f"{remaining_seconds}s")
|
114
|
+
return ''.join(compact_parts)
|
115
|
+
|
116
|
+
def convert_percentage_to_ascii_bar(p:int, chars:int=8) -> str:
|
117
|
+
""" Convert percentage to an ascii status bar of length chars. """
|
118
|
+
|
119
|
+
done = "█"
|
120
|
+
notdone = "▒"
|
121
|
+
return done * int(p / 100 * chars) + (chars-(int(p / 100 * chars)))*notdone
|
122
|
+
return "[" + "=" * int(p / 100 * chars) + ">" + " " * (chars - int(p / 100 * chars) - 1) + "]"
|
123
|
+
|
124
|
+
|
125
|
+
def get_selected_indices(selections: dict[int, bool]) -> list[int]:
|
126
|
+
""" Return a list of indices which are True in the selections dictionary. """
|
127
|
+
|
128
|
+
# selected_indices = [items[i] for i, selected in selections.values() if selected]
|
129
|
+
selected_indices = [i for i, selected in selections.items() if selected]
|
130
|
+
return selected_indices
|
131
|
+
|
132
|
+
def get_selected_values(items: list[list[str]], selections: dict[int, bool]) -> list[list[str]]:
|
133
|
+
""" Return a list of rows based on wich are True in the selections dictionary. """
|
134
|
+
selected_values = [items[i] for i, selected in selections.items() if selected]
|
135
|
+
return selected_values
|
136
|
+
|
137
|
+
def format_size(n:int) -> str:
|
138
|
+
"""
|
139
|
+
Convert bytes to a human-readable format. E.g., 8*1024*1024*3 -> 8MB
|
140
|
+
|
141
|
+
Args:
|
142
|
+
n (int): The number of bytes to convert.
|
143
|
+
|
144
|
+
Returns:
|
145
|
+
str: A string representing the bytes in a more human-readable form.
|
146
|
+
"""
|
147
|
+
if n < 0:
|
148
|
+
raise ValueError("Number must be non-negative")
|
149
|
+
|
150
|
+
symbols = ('B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB')
|
151
|
+
prefix = {}
|
152
|
+
for i, symbol in enumerate(symbols):
|
153
|
+
prefix[symbol] = 1 << (i * 10)
|
154
|
+
|
155
|
+
if n == 0:
|
156
|
+
return "0B"
|
157
|
+
|
158
|
+
symbol, value = "B", 0
|
159
|
+
for symbol in reversed(symbols):
|
160
|
+
if n >= prefix[symbol]:
|
161
|
+
value = float(n) / prefix[symbol]
|
162
|
+
break
|
163
|
+
|
164
|
+
return f"{value:.1f}{symbol}"
|
165
|
+
|
166
|
+
|
167
|
+
def openFiles(files: list[str]) -> str:
|
168
|
+
"""
|
169
|
+
Opens multiple files using their associated applications.
|
170
|
+
Get mime types
|
171
|
+
Get default application for each mime type
|
172
|
+
Open all files; files with the same default application will be opened in one instance
|
173
|
+
|
174
|
+
Args:
|
175
|
+
files (list[str]): A list of file paths.
|
176
|
+
|
177
|
+
Returns:
|
178
|
+
str
|
179
|
+
"""
|
180
|
+
def get_mime_types(files):
|
181
|
+
types = {}
|
182
|
+
|
183
|
+
for file in files:
|
184
|
+
resp = subprocess.run(f"xdg-mime query filetype {file}", stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
|
185
|
+
ftype = resp.stdout.decode("utf-8").strip()
|
186
|
+
if ftype in types:
|
187
|
+
types[ftype] += [file]
|
188
|
+
else:
|
189
|
+
types[ftype] = [file]
|
190
|
+
|
191
|
+
return types
|
192
|
+
|
193
|
+
def get_applications(types):
|
194
|
+
apps = {}
|
195
|
+
|
196
|
+
for t in types:
|
197
|
+
resp = subprocess.run(f"xdg-mime query default {t}", stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
|
198
|
+
app = resp.stdout.decode("utf-8").strip()
|
199
|
+
if app in apps:
|
200
|
+
apps[app] += [t]
|
201
|
+
else:
|
202
|
+
apps[app] = [t]
|
203
|
+
|
204
|
+
return apps
|
205
|
+
for i in range(len(files)):
|
206
|
+
if ' ' in files[i] and files[i][0] not in ["'", '"']:
|
207
|
+
files[i] = repr(files[i])
|
208
|
+
|
209
|
+
types = get_mime_types(files)
|
210
|
+
apps = get_applications(types.keys())
|
211
|
+
|
212
|
+
apps_files = {}
|
213
|
+
for app, filetypes in apps.items():
|
214
|
+
flist = []
|
215
|
+
for filetype in filetypes:
|
216
|
+
flist += types[filetype]
|
217
|
+
apps_files[app] = flist
|
218
|
+
|
219
|
+
for app, files in apps_files.items():
|
220
|
+
files_str = ' '.join(files)
|
221
|
+
result = subprocess.Popen(f"gio launch /usr/share/applications/{app} {files_str}", shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
|
222
|
+
# if result.stderr:
|
223
|
+
# return result.stderr.read().decode("utf-8").strip()
|
224
|
+
return ""
|
225
|
+
|
226
|
+
def file_picker() -> str:
|
227
|
+
""" Run file picker (yazi by default) and return the path of the file picked. If no file is picked an empty string is returned. """
|
228
|
+
|
229
|
+
with tempfile.NamedTemporaryFile(delete=False) as tmpfile:
|
230
|
+
subprocess.run(f"yazi --chooser-file={tmpfile.name}", shell=True)
|
231
|
+
|
232
|
+
lines = tmpfile.readlines()
|
233
|
+
if lines:
|
234
|
+
filename = lines[0].decode("utf-8").strip()
|
235
|
+
return filename
|
236
|
+
else:
|
237
|
+
return ""
|
238
|
+
|
239
|
+
|
240
|
+
def dir_picker() -> str:
|
241
|
+
""" Run dir picker (yazi by default) and return the path of the directory one is in upon exit. """
|
242
|
+
|
243
|
+
with tempfile.NamedTemporaryFile(delete=False) as tmpfile:
|
244
|
+
subprocess.run(f"yazi --cwd-file={tmpfile.name}", shell=True)
|
245
|
+
|
246
|
+
lines = tmpfile.readlines()
|
247
|
+
if lines:
|
248
|
+
filename = lines[0].decode("utf-8").strip()
|
249
|
+
return filename
|
250
|
+
else:
|
251
|
+
return ""
|