visidata 3.0.2__py3-none-any.whl → 3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- visidata/__init__.py +12 -10
- visidata/_input.py +208 -202
- visidata/_open.py +4 -1
- visidata/_types.py +4 -3
- visidata/aggregators.py +88 -39
- visidata/apps/vdsql/_ibis.py +7 -11
- visidata/apps/vdsql/clickhouse.py +2 -2
- visidata/apps/vdsql/snowflake.py +1 -1
- visidata/apps/vgit/status.py +1 -1
- visidata/basesheet.py +11 -4
- visidata/canvas.py +54 -20
- visidata/clipboard.py +13 -6
- visidata/cliptext.py +7 -6
- visidata/cmdlog.py +40 -27
- visidata/column.py +14 -49
- visidata/ddw/regex.ddw +3 -2
- visidata/deprecated.py +14 -2
- visidata/desktop/visidata.desktop +2 -2
- visidata/editor.py +1 -0
- visidata/errors.py +1 -1
- visidata/experimental/sort_selected.py +54 -0
- visidata/expr.py +69 -18
- visidata/features/change_precision.py +1 -3
- visidata/features/cmdpalette.py +17 -2
- visidata/features/colorsheet.py +1 -1
- visidata/features/dedupe.py +3 -3
- visidata/features/go_col.py +71 -0
- visidata/features/graph_seaborn.py +1 -1
- visidata/features/join.py +20 -10
- visidata/features/layout.py +16 -3
- visidata/features/ping.py +16 -12
- visidata/features/regex.py +5 -5
- visidata/features/status_source.py +3 -1
- visidata/features/sysedit.py +1 -1
- visidata/features/transpose.py +2 -1
- visidata/features/type_ipaddr.py +2 -4
- visidata/features/unfurl.py +1 -0
- visidata/form.py +2 -2
- visidata/freqtbl.py +16 -11
- visidata/fuzzymatch.py +1 -0
- visidata/graph.py +163 -12
- visidata/guide.py +57 -24
- visidata/guides/ClipboardGuide.md +48 -0
- visidata/guides/ColumnsGuide.md +52 -0
- visidata/guides/CommandsSheet.md +28 -0
- visidata/guides/DirSheet.md +34 -0
- visidata/guides/ErrorsSheet.md +17 -0
- visidata/guides/FrequencyTable.md +42 -0
- visidata/guides/GrepSheet.md +28 -0
- visidata/guides/JsonSheet.md +38 -0
- visidata/guides/MacrosSheet.md +19 -0
- visidata/guides/MeltGuide.md +52 -0
- visidata/guides/MemorySheet.md +7 -0
- visidata/guides/MenuGuide.md +26 -0
- visidata/guides/ModifyGuide.md +38 -0
- visidata/guides/PivotGuide.md +71 -0
- visidata/guides/RegexGuide.md +107 -0
- visidata/guides/SelectionGuide.md +44 -0
- visidata/guides/SlideGuide.md +26 -0
- visidata/guides/SortGuide.md +0 -0
- visidata/guides/SplitpaneGuide.md +15 -0
- visidata/guides/TypesSheet.md +43 -0
- visidata/guides/XsvGuide.md +36 -0
- visidata/help.py +6 -6
- visidata/hint.py +2 -1
- visidata/indexsheet.py +2 -2
- visidata/interface.py +13 -14
- visidata/keys.py +4 -1
- visidata/loaders/api_airtable.py +1 -1
- visidata/loaders/archive.py +1 -1
- visidata/loaders/csv.py +9 -5
- visidata/loaders/eml.py +11 -6
- visidata/loaders/f5log.py +1 -0
- visidata/loaders/fec.py +18 -42
- visidata/loaders/fixed_width.py +19 -3
- visidata/loaders/grep.py +121 -0
- visidata/loaders/html.py +1 -0
- visidata/loaders/http.py +6 -1
- visidata/loaders/json.py +22 -4
- visidata/loaders/jsonla.py +8 -2
- visidata/loaders/mailbox.py +1 -0
- visidata/loaders/markdown.py +25 -6
- visidata/loaders/msgpack.py +19 -0
- visidata/loaders/npy.py +0 -1
- visidata/loaders/odf.py +18 -4
- visidata/loaders/orgmode.py +1 -1
- visidata/loaders/rec.py +6 -4
- visidata/loaders/sas.py +11 -4
- visidata/loaders/scrape.py +0 -1
- visidata/loaders/texttables.py +2 -0
- visidata/loaders/tsv.py +24 -7
- visidata/loaders/unzip_http.py +127 -3
- visidata/loaders/vds.py +4 -0
- visidata/loaders/vdx.py +1 -1
- visidata/loaders/xlsx.py +5 -0
- visidata/loaders/xml.py +2 -1
- visidata/macros.py +14 -31
- visidata/main.py +14 -13
- visidata/mainloop.py +14 -6
- visidata/man/vd.1 +72 -39
- visidata/man/vd.txt +72 -41
- visidata/memory.py +15 -4
- visidata/menu.py +14 -3
- visidata/metasheets.py +5 -6
- visidata/modify.py +4 -4
- visidata/mouse.py +2 -0
- visidata/movement.py +14 -28
- visidata/optionssheet.py +3 -5
- visidata/path.py +59 -37
- visidata/pivot.py +8 -5
- visidata/pyobj.py +63 -9
- visidata/save.py +16 -9
- visidata/search.py +4 -4
- visidata/selection.py +10 -56
- visidata/settings.py +37 -35
- visidata/sheets.py +186 -108
- visidata/shell.py +22 -12
- visidata/sidebar.py +71 -16
- visidata/sort.py +21 -6
- visidata/statusbar.py +42 -5
- visidata/stored_list.py +5 -2
- visidata/tests/conftest.py +1 -0
- visidata/tests/test_commands.py +9 -1
- visidata/tests/test_completer.py +18 -0
- visidata/tests/test_edittext.py +3 -2
- visidata/text_source.py +7 -4
- visidata/textsheet.py +20 -6
- visidata/themes/ascii8.py +9 -6
- visidata/themes/asciimono.py +14 -4
- visidata/threads.py +13 -3
- visidata/tuiwin.py +5 -1
- visidata/type_currency.py +1 -2
- visidata/type_date.py +6 -1
- visidata/undo.py +10 -5
- visidata/utils.py +9 -3
- visidata/vdobj.py +21 -1
- visidata/wrappers.py +9 -1
- {visidata-3.0.2.data → visidata-3.1.data}/data/share/applications/visidata.desktop +2 -2
- {visidata-3.0.2.data → visidata-3.1.data}/data/share/man/man1/vd.1 +72 -39
- {visidata-3.0.2.data → visidata-3.1.data}/data/share/man/man1/visidata.1 +72 -39
- {visidata-3.0.2.dist-info → visidata-3.1.dist-info}/METADATA +24 -6
- visidata-3.1.dist-info/RECORD +284 -0
- visidata-3.0.2.dist-info/RECORD +0 -258
- {visidata-3.0.2.data → visidata-3.1.data}/scripts/vd +0 -0
- {visidata-3.0.2.data → visidata-3.1.data}/scripts/vd2to3.vdx +0 -0
- {visidata-3.0.2.dist-info → visidata-3.1.dist-info}/LICENSE.gpl3 +0 -0
- {visidata-3.0.2.dist-info → visidata-3.1.dist-info}/WHEEL +0 -0
- {visidata-3.0.2.dist-info → visidata-3.1.dist-info}/entry_points.txt +0 -0
- {visidata-3.0.2.dist-info → visidata-3.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,19 @@
|
|
1
|
+
from visidata import vd, VisiData, JsonSheet
|
2
|
+
|
3
|
+
|
4
|
+
@VisiData.api
|
5
|
+
def open_msgpack(vd, p):
|
6
|
+
return MsgpackSheet(p.name, source=p)
|
7
|
+
|
8
|
+
|
9
|
+
VisiData.open_msgpackz = VisiData.open_msgpack
|
10
|
+
|
11
|
+
|
12
|
+
class MsgpackSheet(JsonSheet):
|
13
|
+
def iterload(self):
|
14
|
+
msgpack = vd.importModule('msgpack')
|
15
|
+
data = self.source.read_bytes()
|
16
|
+
if self.options.filetype == 'msgpackz':
|
17
|
+
brotli = vd.importModule('brotli')
|
18
|
+
data = brotli.decompress(data)
|
19
|
+
yield from msgpack.unpackb(data, raw=False)
|
visidata/loaders/npy.py
CHANGED
visidata/loaders/odf.py
CHANGED
@@ -45,9 +45,11 @@ class OdsSheet(SequenceSheet):
|
|
45
45
|
text_s = S().qname
|
46
46
|
|
47
47
|
cell_names = [odf.table.CoveredTableCell().qname, odf.table.TableCell().qname]
|
48
|
+
empty_rows = 0
|
48
49
|
for odsrow in self.source.getElementsByType(odf.table.TableRow):
|
49
50
|
row = []
|
50
51
|
|
52
|
+
empty_cells = 0
|
51
53
|
for cell in odsrow.childNodes:
|
52
54
|
if cell.qname not in cell_names: continue
|
53
55
|
value = ''
|
@@ -66,8 +68,20 @@ class OdsSheet(SequenceSheet):
|
|
66
68
|
else:
|
67
69
|
value = str(cell)
|
68
70
|
|
69
|
-
|
70
|
-
|
71
|
+
column_repeat = int(cell.attributes.get((TABLENS, "number-columns-repeated"), 1))
|
72
|
+
if value is None:
|
73
|
+
empty_cells += column_repeat
|
74
|
+
else:
|
75
|
+
row.extend([""] * empty_cells)
|
76
|
+
empty_cells = 0
|
77
|
+
row.extend([value]*column_repeat)
|
71
78
|
|
72
|
-
|
73
|
-
|
79
|
+
row_repeat = int(odsrow.attributes.get((TABLENS, "number-rows-repeated"), 1))
|
80
|
+
if len(row) == 0:
|
81
|
+
empty_rows += row_repeat
|
82
|
+
else:
|
83
|
+
for i in range(empty_rows):
|
84
|
+
yield []
|
85
|
+
empty_rows = 0
|
86
|
+
for i in range(row_repeat):
|
87
|
+
yield list(row)
|
visidata/loaders/orgmode.py
CHANGED
visidata/loaders/rec.py
CHANGED
@@ -128,14 +128,16 @@ def save_rec(vd, p, *vsheets):
|
|
128
128
|
comments = getattr(vs, 'comments', [])
|
129
129
|
if comments:
|
130
130
|
fp.write('# ' + '\n# '.join(comments) + '\n')
|
131
|
-
fp.write('%rec:
|
132
|
-
fp.write('\n')
|
131
|
+
fp.write(f'%rec: {vs.name}\n')
|
133
132
|
for col in vs.visibleCols:
|
134
133
|
if col.keycol:
|
135
|
-
fp.write('%key:
|
134
|
+
fp.write(f'%key: {col.name}\n')
|
136
135
|
for row in Progress(vs.rows):
|
137
136
|
for col in vs.visibleCols:
|
138
|
-
|
137
|
+
cell = col.getCell(row)
|
138
|
+
if cell.value is not None:
|
139
|
+
val = encode_multiline(cell.text)
|
140
|
+
fp.write(f'{col.name}: {val}\n')
|
139
141
|
|
140
142
|
fp.write('\n')
|
141
143
|
fp.write('\n')
|
visidata/loaders/sas.py
CHANGED
@@ -18,14 +18,21 @@ def open_sas7bdat(vd, p):
|
|
18
18
|
class XptSheet(Sheet):
|
19
19
|
def iterload(self):
|
20
20
|
xport = vd.importExternal('xport')
|
21
|
+
xport.v56 = vd.importExternal('xport.v56', 'xport>=3')
|
21
22
|
with open(self.source, 'rb') as fp:
|
22
|
-
self.
|
23
|
+
self.library = xport.v56.load(fp)
|
23
24
|
|
24
25
|
self.columns = []
|
25
|
-
|
26
|
-
self.addColumn(ColumnItem(var.name, i, type=float if var.numeric else str))
|
26
|
+
dataset = self.library[list(self.library.keys())[0]]
|
27
27
|
|
28
|
-
|
28
|
+
varnames = dataset.contents.Variable.values
|
29
|
+
types = dataset.contents.Type.values
|
30
|
+
|
31
|
+
for i, (varname, typestr) in enumerate(zip(varnames, types)):
|
32
|
+
self.addColumn(ColumnItem(varname, i, type=float if typestr == 'Numeric' else str))
|
33
|
+
|
34
|
+
for row in dataset.values:
|
35
|
+
yield list(row)
|
29
36
|
|
30
37
|
|
31
38
|
class SasSheet(Sheet):
|
visidata/loaders/scrape.py
CHANGED
visidata/loaders/texttables.py
CHANGED
visidata/loaders/tsv.py
CHANGED
@@ -23,7 +23,7 @@ def adaptive_bufferer(fp, max_buffer_size=65536):
|
|
23
23
|
"""Loading e.g. tsv files goes faster with a large buffer. But when the input stream
|
24
24
|
is slow (e.g. 1 byte/second) and the buffer size is large, it can take a long time until
|
25
25
|
the buffer is filled. Only when the buffer is filled (or the input stream is finished)
|
26
|
-
you can see the data
|
26
|
+
you can see the data visualized in visidata. That's why we use an adaptive buffer.
|
27
27
|
For fast input streams, the buffer becomes large, for slow input streams, the buffer stays
|
28
28
|
small"""
|
29
29
|
buffer_size = 8
|
@@ -42,10 +42,10 @@ def adaptive_bufferer(fp, max_buffer_size=65536):
|
|
42
42
|
current_delta = current_time - previous_start_time
|
43
43
|
|
44
44
|
if current_delta < 1:
|
45
|
-
# if it takes
|
45
|
+
# if it takes less than one second to fill the buffer, double the size of the buffer
|
46
46
|
buffer_size = min(buffer_size * 2, max_buffer_size)
|
47
47
|
else:
|
48
|
-
# if it takes
|
48
|
+
# if it takes longer than one second, decrease the buffer size so it takes about
|
49
49
|
# 1 second to fill it
|
50
50
|
previous_start_time = current_time
|
51
51
|
buffer_size = math.ceil(min(processed_buffer_size / current_delta, max_buffer_size))
|
@@ -75,13 +75,24 @@ class TsvSheet(SequenceSheet):
|
|
75
75
|
def iterload(self):
|
76
76
|
delim = self.delimiter or self.options.delimiter
|
77
77
|
rowdelim = self.row_delimiter or self.options.row_delimiter
|
78
|
+
if delim == '':
|
79
|
+
vd.warning("using '\\x00' as field delimiter")
|
80
|
+
delim = '\x00' #2272
|
81
|
+
self.options.regex_skip = ''
|
82
|
+
if rowdelim == '':
|
83
|
+
vd.warning("using '\\x00' as row delimiter")
|
84
|
+
rowdelim = '\x00'
|
85
|
+
self.options.regex_skip = ''
|
86
|
+
if delim == rowdelim:
|
87
|
+
vd.fail('field delimiter and row delimiter cannot be the same')
|
78
88
|
|
79
89
|
with self.open_text_source() as fp:
|
90
|
+
regex_skip = getattr(fp, '_regex_skip', None)
|
80
91
|
for line in splitter(adaptive_bufferer(fp), rowdelim):
|
81
|
-
if not line:
|
92
|
+
if not line or (regex_skip and regex_skip.match(line)):
|
82
93
|
continue
|
83
94
|
|
84
|
-
row =
|
95
|
+
row = line.split(delim)
|
85
96
|
|
86
97
|
if len(row) < self.nVisibleCols:
|
87
98
|
# extend rows that are missing entries
|
@@ -95,6 +106,14 @@ def save_tsv(vd, p, vs, delimiter='', row_delimiter=''):
|
|
95
106
|
'Write sheet to file `fn` as TSV.'
|
96
107
|
unitsep = delimiter or vs.options.delimiter
|
97
108
|
rowsep = row_delimiter or vs.options.row_delimiter
|
109
|
+
if unitsep == '':
|
110
|
+
vd.warning("saving with '\\x00' as field delimiter")
|
111
|
+
unitsep = '\x00'
|
112
|
+
if rowsep == '':
|
113
|
+
vd.warning("saving with '\\x00' as row delimiter")
|
114
|
+
rowsep = '\x00'
|
115
|
+
if unitsep == rowsep:
|
116
|
+
vd.fail('field delimiter and row delimiter cannot be the same')
|
98
117
|
trdict = vs.safe_trdict()
|
99
118
|
|
100
119
|
with p.open(mode='w', encoding=vs.options.save_encoding) as fp:
|
@@ -136,8 +155,6 @@ def append_tsv_row(vs, row):
|
|
136
155
|
fp.write(newrow)
|
137
156
|
|
138
157
|
|
139
|
-
TsvSheet.options.regex_skip = '^#.*'
|
140
|
-
|
141
158
|
vd.addGlobals({
|
142
159
|
'TsvSheet': TsvSheet,
|
143
160
|
})
|
visidata/loaders/unzip_http.py
CHANGED
@@ -18,18 +18,44 @@
|
|
18
18
|
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
19
19
|
# SOFTWARE.
|
20
20
|
|
21
|
+
"""
|
22
|
+
usage: unzip_http [-h] [-l] [-f] [-o] url [files ...]
|
23
|
+
|
24
|
+
Extract individual files from .zip files over http without downloading the
|
25
|
+
entire archive. HTTP server must send `Accept-Ranges: bytes` and
|
26
|
+
`Content-Length` in headers.
|
27
|
+
|
28
|
+
positional arguments:
|
29
|
+
url URL of the remote zip file
|
30
|
+
files Files to extract. If no filenames given, displays .zip
|
31
|
+
contents (filenames and sizes). Each filename can be a
|
32
|
+
wildcard glob.
|
33
|
+
|
34
|
+
options:
|
35
|
+
-h, --help show this help message and exit
|
36
|
+
-l, --list List files in the remote zip file
|
37
|
+
-f, --full-filepaths Recreate folder structure from zip file when extracting
|
38
|
+
(instead of extracting the files to the current
|
39
|
+
directory)
|
40
|
+
-o, --stdout Write files to stdout (if multiple files: concatenate
|
41
|
+
them to stdout, in zipfile order)
|
42
|
+
"""
|
43
|
+
|
21
44
|
import sys
|
22
45
|
import os
|
23
46
|
import io
|
47
|
+
import math
|
48
|
+
import time
|
24
49
|
import zlib
|
25
50
|
import struct
|
26
51
|
import fnmatch
|
52
|
+
import argparse
|
27
53
|
import pathlib
|
28
54
|
import urllib.parse
|
29
55
|
from visidata import vd
|
30
56
|
|
31
57
|
|
32
|
-
__version__ = '0.
|
58
|
+
__version__ = '0.6'
|
33
59
|
|
34
60
|
|
35
61
|
def error(s):
|
@@ -130,7 +156,10 @@ class RemoteZipFile:
|
|
130
156
|
warning(f"{hostname} Accept-Ranges header ('{r}') is not 'bytes'--trying anyway")
|
131
157
|
|
132
158
|
self.zip_size = int(resp.headers['Content-Length'])
|
133
|
-
resp = self.get_range(
|
159
|
+
resp = self.get_range(
|
160
|
+
max(self.zip_size-65536, 0),
|
161
|
+
65536
|
162
|
+
)
|
134
163
|
|
135
164
|
cdir_start = -1
|
136
165
|
i = resp.data.rfind(self.magic_eocd64)
|
@@ -147,7 +176,10 @@ class RemoteZipFile:
|
|
147
176
|
if cdir_start < 0 or cdir_start >= self.zip_size:
|
148
177
|
error('cannot find central directory')
|
149
178
|
|
150
|
-
|
179
|
+
if self.zip_size <= 65536:
|
180
|
+
filehdr_index = cdir_start
|
181
|
+
else:
|
182
|
+
filehdr_index = 65536 - (self.zip_size - cdir_start)
|
151
183
|
|
152
184
|
if filehdr_index < 0:
|
153
185
|
resp = self.get_range(cdir_start, self.zip_size - cdir_start)
|
@@ -258,3 +290,95 @@ class RemoteZipStream(io.RawIOBase):
|
|
258
290
|
self._buffer = self._buffer[n:]
|
259
291
|
|
260
292
|
return ret
|
293
|
+
|
294
|
+
|
295
|
+
### script start
|
296
|
+
|
297
|
+
class StreamProgress:
|
298
|
+
def __init__(self, fp, name='', total=0):
|
299
|
+
self.name = name
|
300
|
+
self.fp = fp
|
301
|
+
self.total = total
|
302
|
+
self.start_time = time.time()
|
303
|
+
self.last_update = 0
|
304
|
+
self.amtread = 0
|
305
|
+
|
306
|
+
def read(self, n):
|
307
|
+
r = self.fp.read(n)
|
308
|
+
self.amtread += len(r)
|
309
|
+
now = time.time()
|
310
|
+
if now - self.last_update > 0.1:
|
311
|
+
self.last_update = now
|
312
|
+
|
313
|
+
elapsed_s = now - self.start_time
|
314
|
+
sys.stderr.write(f'\r{elapsed_s:.0f}s {self.amtread/10**6:.02f}/{self.total/10**6:.02f}MB ({self.amtread/10**6/elapsed_s:.02f} MB/s) {self.name}')
|
315
|
+
|
316
|
+
if not r:
|
317
|
+
sys.stderr.write('\n')
|
318
|
+
|
319
|
+
return r
|
320
|
+
|
321
|
+
|
322
|
+
def list_files(rzf):
|
323
|
+
def safelog(x):
|
324
|
+
return 1 if x == 0 else math.ceil(math.log10(x))
|
325
|
+
|
326
|
+
digits_compr = max(safelog(f.compress_size) for f in rzf.infolist())
|
327
|
+
digits_plain = max(safelog(f.file_size ) for f in rzf.infolist())
|
328
|
+
fmtstr = f'%{digits_compr}d -> %{digits_plain}d\t%s'
|
329
|
+
for f in rzf.infolist():
|
330
|
+
print(fmtstr % (f.compress_size, f.file_size, f.filename), file=sys.stderr)
|
331
|
+
|
332
|
+
|
333
|
+
def extract_one(outfile, rzf, f, ofname):
|
334
|
+
print(f'Extracting {f.filename} to {ofname}...', file=sys.stderr)
|
335
|
+
|
336
|
+
fp = StreamProgress(rzf.open(f), name=f.filename, total=f.compress_size)
|
337
|
+
while r := fp.read(2**18):
|
338
|
+
outfile.write(r)
|
339
|
+
|
340
|
+
|
341
|
+
def download_file(f, rzf, args):
|
342
|
+
if not any(fnmatch.fnmatch(f.filename, g) for g in args.files):
|
343
|
+
return
|
344
|
+
|
345
|
+
if args.stdout:
|
346
|
+
extract_one(sys.stdout.buffer, rzf, f, "stdout")
|
347
|
+
else:
|
348
|
+
path = pathlib.Path(f.filename)
|
349
|
+
if args.full_filepaths:
|
350
|
+
path.parent.mkdir(parents=True, exist_ok=True)
|
351
|
+
else:
|
352
|
+
path = path.name
|
353
|
+
|
354
|
+
with open(str(path), 'wb') as of:
|
355
|
+
extract_one(of, rzf, f, str(path))
|
356
|
+
|
357
|
+
|
358
|
+
def main():
|
359
|
+
parser = argparse.ArgumentParser(prog='unzip-http', \
|
360
|
+
description="Extract individual files from .zip files over http without downloading the entire archive. HTTP server must send `Accept-Ranges: bytes` and `Content-Length` in headers.")
|
361
|
+
|
362
|
+
parser.add_argument('-l', '--list', action='store_true', default=False,
|
363
|
+
help="List files in the remote zip file")
|
364
|
+
parser.add_argument('-f', '--full-filepaths', action='store_true', default=False,
|
365
|
+
help="Recreate folder structure from zip file when extracting (instead of extracting the files to the current directory)")
|
366
|
+
parser.add_argument('-o', '--stdout', action='store_true', default=False,
|
367
|
+
help="Write files to stdout (if multiple files: concatenate them to stdout, in zipfile order)")
|
368
|
+
|
369
|
+
parser.add_argument("url", nargs=1, help="URL of the remote zip file")
|
370
|
+
parser.add_argument("files", nargs='*', help="Files to extract. If no filenames given, displays .zip contents (filenames and sizes). Each filename can be a wildcard glob.")
|
371
|
+
|
372
|
+
args = parser.parse_args()
|
373
|
+
|
374
|
+
rzf = RemoteZipFile(args.url[0])
|
375
|
+
if args.list or len(args.files) == 0:
|
376
|
+
list_files(rzf)
|
377
|
+
else:
|
378
|
+
for f in rzf.infolist():
|
379
|
+
download_file(f, rzf, args)
|
380
|
+
|
381
|
+
|
382
|
+
|
383
|
+
if __name__ == '__main__':
|
384
|
+
main()
|
visidata/loaders/vds.py
CHANGED
@@ -34,6 +34,10 @@ def save_vds(vd, p, *sheets):
|
|
34
34
|
d['col'] = type(col).__name__
|
35
35
|
fp.write('#'+json.dumps(d)+NL)
|
36
36
|
|
37
|
+
if not vs.rows:
|
38
|
+
fp.write(NL) #2342 blank line to separate sheets without rows
|
39
|
+
continue
|
40
|
+
|
37
41
|
with Progress(gerund='saving'):
|
38
42
|
for row in vs.iterdispvals(*vs.columns, format=False):
|
39
43
|
d = {col.name:val for col, val in row.items()}
|
visidata/loaders/vdx.py
CHANGED
visidata/loaders/xlsx.py
CHANGED
@@ -10,13 +10,16 @@ from visidata.type_date import date
|
|
10
10
|
|
11
11
|
|
12
12
|
vd.option('xlsx_meta_columns', False, 'include columns for cell objects, font colors, and fill colors', replay=True)
|
13
|
+
vd.option('xlsx_color_cells', True, 'color cells based on xlsx source')
|
13
14
|
|
14
15
|
@VisiData.api
|
15
16
|
def open_xls(vd, p):
|
17
|
+
p.is_local() or vd.fail('xls loader does not support remote files')
|
16
18
|
return XlsIndexSheet(p.base_stem, source=p)
|
17
19
|
|
18
20
|
@VisiData.api
|
19
21
|
def open_xlsx(vd, p):
|
22
|
+
p.is_local() or vd.fail('xlsx loader does not support remote files')
|
20
23
|
return XlsxIndexSheet(p.base_stem, source=p)
|
21
24
|
|
22
25
|
class XlsxIndexSheet(IndexSheet):
|
@@ -203,6 +206,8 @@ HLSMAX = 240
|
|
203
206
|
|
204
207
|
@XlsxSheet.api
|
205
208
|
def colorize_xlsx_cell(sheet, col, row):
|
209
|
+
if not hasattr(col, 'column_letter') or not sheet.options.xlsx_color_cells:
|
210
|
+
return ''
|
206
211
|
fg = getattrdeep(row, col.column_letter+'.font.color', None)
|
207
212
|
bg = getattrdeep(row, col.column_letter+'.fill.start_color', None)
|
208
213
|
fg = sheet.xlsx_color_to_xterm256(fg)
|
visidata/loaders/xml.py
CHANGED
@@ -85,8 +85,9 @@ class XmlSheet(Sheet):
|
|
85
85
|
@VisiData.api
|
86
86
|
def save_xml(vd, p, vs):
|
87
87
|
isinstance(vs, XmlSheet) or vd.fail('must save xml from XmlSheet')
|
88
|
-
vs.root.write(str(p), encoding=options.
|
88
|
+
vs.root.write(str(p), encoding=vs.options.save_encoding, standalone=False, pretty_print=True)
|
89
89
|
|
90
|
+
XmlSheet.options.save_encoding = 'utf-8' #2520
|
90
91
|
|
91
92
|
XmlSheet.addCommand('za', 'addcol-xmlattr', 'attr=input("add attribute: "); addColumnAtCursor(AttribColumn(attr, attr))', 'add column for xml attribute')
|
92
93
|
XmlSheet.addCommand('v', 'visibility', 'showColumnsBasedOnRow(cursorRow)', 'show only columns in current row attributes')
|
visidata/macros.py
CHANGED
@@ -3,9 +3,9 @@ from functools import wraps
|
|
3
3
|
|
4
4
|
from visidata.cmdlog import CommandLog, CommandLogJsonl
|
5
5
|
from visidata import vd, UNLOADED, asyncthread, vlen
|
6
|
-
from visidata import IndexSheet, VisiData, Sheet, Path, VisiDataMetaSheet, Column, ItemColumn, AttrColumn, BaseSheet
|
6
|
+
from visidata import IndexSheet, VisiData, Sheet, Path, VisiDataMetaSheet, Column, ItemColumn, AttrColumn, BaseSheet
|
7
7
|
|
8
|
-
vd.macroMode = None
|
8
|
+
vd.macroMode = None # CommandLog
|
9
9
|
vd.macrobindings = {}
|
10
10
|
|
11
11
|
|
@@ -70,7 +70,10 @@ def loadMacro(vd, p:Path):
|
|
70
70
|
|
71
71
|
@VisiData.api
|
72
72
|
def runMacro(vd, binding:str):
|
73
|
+
mm = vd.macroMode
|
74
|
+
vd.macroMode = None
|
73
75
|
vd.replay_sync(vd.macrobindings[binding])
|
76
|
+
vd.macroMode = mm
|
74
77
|
|
75
78
|
|
76
79
|
@VisiData.api
|
@@ -100,10 +103,13 @@ def saveMacro(self, rows, ks):
|
|
100
103
|
# needs to happen before, because the original afterexecsheet resets vd.activecommand to None
|
101
104
|
@CommandLogJsonl.before
|
102
105
|
def afterExecSheet(cmdlog, sheet, escaped, err):
|
103
|
-
if
|
104
|
-
|
105
|
-
|
106
|
-
|
106
|
+
if not vd.macroMode: return
|
107
|
+
if not vd.activeCommand: return
|
108
|
+
if vd.activeCommand.longname == 'macro-record': return
|
109
|
+
|
110
|
+
cmd = copy(vd.activeCommand)
|
111
|
+
cmd.sheet = ''
|
112
|
+
vd.macroMode.addRow(cmd)
|
107
113
|
|
108
114
|
|
109
115
|
@CommandLogJsonl.api
|
@@ -146,33 +152,10 @@ def reloadMacros(vd):
|
|
146
152
|
vd.setMacro(r.binding, vs)
|
147
153
|
|
148
154
|
|
149
|
-
|
150
|
-
guide_text = '''# Macros
|
151
|
-
Macros allow you to bind a command sequence to a keystroke or longname, to replay when that keystroke is pressed or the command is executed by longname.
|
152
|
-
|
153
|
-
The basic usage is:
|
154
|
-
1. {help.commands.macro_record}.
|
155
|
-
2. Execute a series of commands.
|
156
|
-
3. `m` again to complete the recording, and prompt for the keystroke or longname to bind it to.
|
157
|
-
|
158
|
-
The macro will then be executed everytime the provided keystroke or longname are used. Note: the Alt+keys and the function keys are left unbound; overriding other keys may conflict with existing bindings, now or in the future.
|
159
|
-
|
160
|
-
Executing a macro will the series of commands starting on the current row and column on the current sheet.
|
161
|
-
|
162
|
-
# The Macros Sheet
|
163
|
-
|
164
|
-
- {help.commands.macro_sheet}
|
165
|
-
|
166
|
-
- `d` (`delete-row`) to mark macros for deletion
|
167
|
-
- {help.commands.commit_sheet}
|
168
|
-
- `Enter` (`open-row`) to open the macro in the current row, and view the series of commands composing it'''
|
169
|
-
|
170
|
-
|
171
|
-
Sheet.addCommand('m', 'macro-record', 'vd.cmdlog.startMacro()', 'record macro')
|
155
|
+
Sheet.addCommand('m', 'macro-record', 'vd.cmdlog.startMacro()', 'start/stop macro recording', replay=False)
|
172
156
|
Sheet.addCommand('gm', 'macro-sheet', 'vd.push(vd.macrosheet)', 'open an index of existing macros')
|
173
157
|
|
174
158
|
vd.addMenuItems('''
|
159
|
+
System > Record macro > macro-record
|
175
160
|
System > Macros sheet > macro-sheet
|
176
161
|
''')
|
177
|
-
|
178
|
-
vd.addGuide('MacrosSheet', MacrosGuide)
|
visidata/main.py
CHANGED
@@ -2,7 +2,7 @@
|
|
2
2
|
# Usage: $0 [<options>] [<input> ...]
|
3
3
|
# $0 [<options>] --play <cmdlog> [--batch] [-w <waitsecs>] [-o <output>] [field=value ...]
|
4
4
|
|
5
|
-
__version__ = '3.
|
5
|
+
__version__ = '3.1'
|
6
6
|
__version_info__ = 'saul.pw/VisiData v' + __version__
|
7
7
|
|
8
8
|
from copy import copy
|
@@ -37,16 +37,20 @@ def eval_vd(logpath, *args, **kwargs):
|
|
37
37
|
'Instantiate logpath with args/kwargs replaced and replay all commands.'
|
38
38
|
log = logpath.read_text()
|
39
39
|
if args or kwargs:
|
40
|
-
if logpath.ext in ['vdj', 'json', 'jsonl']:
|
40
|
+
if logpath.ext in ['vdj', 'json', 'jsonl'] or logpath is vd.stdinSource:
|
41
41
|
from string import Template
|
42
42
|
log = Template(log).safe_substitute(**kwargs)
|
43
43
|
else:
|
44
44
|
log = log.format(*args, **kwargs)
|
45
45
|
|
46
46
|
src = Path(logpath.given, fptext=io.StringIO(log), filesize=len(log))
|
47
|
-
|
47
|
+
if logpath is vd.stdinSource:
|
48
|
+
# replay from stdin only supports .vdj
|
49
|
+
vs = vd.openSource(src, filetype='vdj')
|
50
|
+
else:
|
51
|
+
vs = vd.openSource(src, filetype=src.ext)
|
48
52
|
vs.name += '_vd'
|
49
|
-
vs.reload()
|
53
|
+
vd.sync(vs.reload())
|
50
54
|
vs.vd = vd
|
51
55
|
return vs
|
52
56
|
|
@@ -220,6 +224,8 @@ def main_vd():
|
|
220
224
|
elif arg in ['--']:
|
221
225
|
optsdone = True
|
222
226
|
elif arg == '-':
|
227
|
+
if not flPipedInput:
|
228
|
+
vd.fail('to use stdin as a data source, data must be piped into it')
|
223
229
|
inputs.append((vd.stdinSource, copy(current_args)))
|
224
230
|
elif arg in ['-g', '--global']:
|
225
231
|
flGlobal = True
|
@@ -252,10 +258,8 @@ def main_vd():
|
|
252
258
|
current_args[optname] = optval
|
253
259
|
if flGlobal:
|
254
260
|
global_args[optname] = optval
|
255
|
-
|
256
261
|
elif arg.startswith('+'): # position cursor at start
|
257
262
|
after_config.append((vd.moveToPos, *vd.parsePos(arg[1:], inputs=inputs)))
|
258
|
-
|
259
263
|
elif current_args.get('play', None) and '=' in arg:
|
260
264
|
# parse 'key=value' pairs for formatting cmdlog template in replay mode
|
261
265
|
k, v = arg.split('=', maxsplit=1)
|
@@ -282,9 +286,9 @@ def main_vd():
|
|
282
286
|
vd.domotd()
|
283
287
|
|
284
288
|
if args.batch:
|
285
|
-
options.
|
286
|
-
|
287
|
-
|
289
|
+
if not vd.options.interactive:
|
290
|
+
options.undo = False
|
291
|
+
options.quitguard = False
|
288
292
|
vd.execAsync = vd.execSync # disable async
|
289
293
|
|
290
294
|
for cmd in (args.preplay or '').split():
|
@@ -332,7 +336,6 @@ def main_vd():
|
|
332
336
|
if args.batch:
|
333
337
|
if sources:
|
334
338
|
vd.push(sources[0])
|
335
|
-
sources[0].reload()
|
336
339
|
|
337
340
|
for (f, *parms) in after_config:
|
338
341
|
f(sources, *parms)
|
@@ -342,20 +345,18 @@ def main_vd():
|
|
342
345
|
else:
|
343
346
|
if args.play == '-':
|
344
347
|
vdfile = vd.stdinSource
|
345
|
-
vdfile.name = 'stdin.vd'
|
346
348
|
else:
|
347
349
|
vdfile = Path(args.play)
|
348
350
|
|
349
351
|
vs = eval_vd(vdfile, *fmtargs, **fmtkwargs)
|
350
|
-
vd.sync(vs.reload())
|
351
352
|
if args.batch:
|
352
353
|
if not args.debug:
|
353
354
|
vd.outputProgressThread = visidata.VisiData.execAsync(vd, vd.outputProgressEvery, vs, seconds=0.5, sheet=BaseSheet()) #1182
|
355
|
+
vd.reloadMacros()
|
354
356
|
if vd.replay_sync(vs): # error
|
355
357
|
return 1
|
356
358
|
|
357
359
|
if vd.options.interactive:
|
358
|
-
vd.editline = lambda *args, vd=vd, **kwargs: visidata.VisiData.editline(vd, *args, **kwargs)
|
359
360
|
vd.execAsync = lambda *args, vd=vd, **kwargs: visidata.VisiData.execAsync(vd, *args, **kwargs)
|
360
361
|
run()
|
361
362
|
else:
|
visidata/mainloop.py
CHANGED
@@ -165,8 +165,11 @@ def mainloop(vd, scr):
|
|
165
165
|
numTimeouts = 0
|
166
166
|
prefixWaiting = False
|
167
167
|
vd.scrFull = scr
|
168
|
+
if vd.options.disp_expert >= 5:
|
169
|
+
vd.disp_help = -1
|
168
170
|
|
169
171
|
vd.keystrokes = ''
|
172
|
+
vd.drawThread = threading.current_thread()
|
170
173
|
while True:
|
171
174
|
if not vd.stackedSheets and vd.currentReplay is None:
|
172
175
|
return
|
@@ -177,11 +180,13 @@ def mainloop(vd, scr):
|
|
177
180
|
continue # waiting for replay to push sheet
|
178
181
|
|
179
182
|
threading.current_thread().sheet = sheet
|
180
|
-
vd.drawThread = threading.current_thread()
|
181
183
|
|
182
184
|
vd.setWindows(vd.scrFull)
|
183
185
|
|
184
|
-
|
186
|
+
# a newly created sheet needs to be drawn once to set its _scr
|
187
|
+
if vd.activeSheet._scr is None or \
|
188
|
+
not vd.drainPendingKeys(scr) or \
|
189
|
+
time.time() - vd._lastDrawTime > vd.min_draw_ms/1000: #1459
|
185
190
|
vd.draw_all()
|
186
191
|
vd._lastDrawTime = time.time()
|
187
192
|
|
@@ -216,10 +221,10 @@ def mainloop(vd, scr):
|
|
216
221
|
pass
|
217
222
|
elif keystroke == 'Ctrl+Q':
|
218
223
|
return vd.lastErrors and '\n'.join(vd.lastErrors[-1])
|
219
|
-
elif vd.bindkeys._get(vd.keystrokes):
|
224
|
+
elif vd.bindkeys._get(vd.keystrokes) is not None:
|
220
225
|
sheet.execCommand(vd.keystrokes, keystrokes=vd.keystrokes)
|
221
226
|
prefixWaiting = False
|
222
|
-
elif
|
227
|
+
elif vd.keystrokes in vd.allPrefixes:
|
223
228
|
prefixWaiting = True
|
224
229
|
else:
|
225
230
|
vd.status('no command for "%s"' % (vd.keystrokes))
|
@@ -249,12 +254,15 @@ def mainloop(vd, scr):
|
|
249
254
|
# no idle redraw unless background threads are running
|
250
255
|
time.sleep(0) # yield to other threads which may not have started yet
|
251
256
|
if vd._nextCommands:
|
252
|
-
|
257
|
+
if vd.options.replay_wait > 0:
|
258
|
+
vd.curses_timeout = int(vd.options.replay_wait*1000)
|
259
|
+
else:
|
260
|
+
vd.curses_timeout = nonidle_timeout
|
253
261
|
elif vd.unfinishedThreads:
|
254
262
|
vd.curses_timeout = nonidle_timeout
|
255
263
|
else:
|
256
264
|
numTimeouts += 1
|
257
|
-
if vd.timeouts_before_idle >= 0 and numTimeouts
|
265
|
+
if vd.timeouts_before_idle >= 0 and numTimeouts >= vd.timeouts_before_idle:
|
258
266
|
vd.curses_timeout = -1
|
259
267
|
else:
|
260
268
|
vd.curses_timeout = nonidle_timeout
|