visidata 3.0.1__py3-none-any.whl → 3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (151) hide show
  1. visidata/__init__.py +12 -10
  2. visidata/_input.py +208 -199
  3. visidata/_open.py +4 -1
  4. visidata/_types.py +4 -3
  5. visidata/aggregators.py +88 -39
  6. visidata/apps/vdsql/_ibis.py +9 -11
  7. visidata/apps/vdsql/clickhouse.py +2 -2
  8. visidata/apps/vdsql/snowflake.py +1 -1
  9. visidata/apps/vgit/status.py +1 -1
  10. visidata/basesheet.py +11 -4
  11. visidata/canvas.py +66 -24
  12. visidata/clipboard.py +13 -6
  13. visidata/cliptext.py +7 -6
  14. visidata/cmdlog.py +40 -27
  15. visidata/column.py +14 -49
  16. visidata/ddw/regex.ddw +3 -2
  17. visidata/deprecated.py +14 -2
  18. visidata/desktop/visidata.desktop +2 -2
  19. visidata/editor.py +1 -0
  20. visidata/errors.py +1 -1
  21. visidata/experimental/sort_selected.py +54 -0
  22. visidata/expr.py +69 -18
  23. visidata/features/change_precision.py +1 -3
  24. visidata/features/cmdpalette.py +23 -4
  25. visidata/features/colorsheet.py +1 -1
  26. visidata/features/dedupe.py +3 -3
  27. visidata/features/go_col.py +71 -0
  28. visidata/features/graph_seaborn.py +1 -1
  29. visidata/features/join.py +20 -10
  30. visidata/features/layout.py +18 -5
  31. visidata/features/ping.py +16 -12
  32. visidata/features/regex.py +5 -5
  33. visidata/features/slide.py +15 -17
  34. visidata/features/status_source.py +3 -1
  35. visidata/features/sysedit.py +1 -1
  36. visidata/features/transpose.py +2 -1
  37. visidata/features/type_ipaddr.py +2 -4
  38. visidata/features/unfurl.py +1 -0
  39. visidata/form.py +2 -2
  40. visidata/freqtbl.py +16 -11
  41. visidata/fuzzymatch.py +1 -0
  42. visidata/graph.py +173 -12
  43. visidata/guide.py +61 -25
  44. visidata/guides/ClipboardGuide.md +48 -0
  45. visidata/guides/ColumnsGuide.md +52 -0
  46. visidata/guides/CommandsSheet.md +28 -0
  47. visidata/guides/DirSheet.md +34 -0
  48. visidata/guides/ErrorsSheet.md +17 -0
  49. visidata/guides/FrequencyTable.md +42 -0
  50. visidata/guides/GrepSheet.md +28 -0
  51. visidata/guides/JsonSheet.md +38 -0
  52. visidata/guides/MacrosSheet.md +19 -0
  53. visidata/guides/MeltGuide.md +52 -0
  54. visidata/guides/MemorySheet.md +7 -0
  55. visidata/guides/MenuGuide.md +26 -0
  56. visidata/guides/ModifyGuide.md +38 -0
  57. visidata/guides/PivotGuide.md +71 -0
  58. visidata/guides/RegexGuide.md +107 -0
  59. visidata/guides/SelectionGuide.md +44 -0
  60. visidata/guides/SlideGuide.md +26 -0
  61. visidata/guides/SortGuide.md +0 -0
  62. visidata/guides/SplitpaneGuide.md +15 -0
  63. visidata/guides/TypesSheet.md +43 -0
  64. visidata/guides/XsvGuide.md +36 -0
  65. visidata/help.py +6 -6
  66. visidata/hint.py +2 -1
  67. visidata/indexsheet.py +2 -2
  68. visidata/interface.py +13 -14
  69. visidata/keys.py +4 -1
  70. visidata/loaders/api_airtable.py +1 -1
  71. visidata/loaders/archive.py +1 -1
  72. visidata/loaders/csv.py +9 -5
  73. visidata/loaders/eml.py +11 -6
  74. visidata/loaders/f5log.py +1 -0
  75. visidata/loaders/fec.py +18 -42
  76. visidata/loaders/fixed_width.py +19 -3
  77. visidata/loaders/grep.py +121 -0
  78. visidata/loaders/html.py +1 -0
  79. visidata/loaders/http.py +6 -1
  80. visidata/loaders/json.py +22 -4
  81. visidata/loaders/jsonla.py +8 -2
  82. visidata/loaders/mailbox.py +1 -0
  83. visidata/loaders/markdown.py +25 -6
  84. visidata/loaders/msgpack.py +19 -0
  85. visidata/loaders/npy.py +0 -1
  86. visidata/loaders/odf.py +18 -4
  87. visidata/loaders/orgmode.py +1 -1
  88. visidata/loaders/rec.py +6 -4
  89. visidata/loaders/sas.py +11 -4
  90. visidata/loaders/scrape.py +0 -1
  91. visidata/loaders/texttables.py +2 -0
  92. visidata/loaders/tsv.py +24 -7
  93. visidata/loaders/unzip_http.py +127 -3
  94. visidata/loaders/vds.py +4 -0
  95. visidata/loaders/vdx.py +1 -1
  96. visidata/loaders/xlsx.py +5 -0
  97. visidata/loaders/xml.py +2 -1
  98. visidata/macros.py +14 -31
  99. visidata/main.py +20 -15
  100. visidata/mainloop.py +17 -6
  101. visidata/man/vd.1 +74 -39
  102. visidata/man/vd.txt +73 -41
  103. visidata/memory.py +16 -5
  104. visidata/menu.py +14 -3
  105. visidata/metasheets.py +5 -6
  106. visidata/modify.py +4 -4
  107. visidata/mouse.py +2 -0
  108. visidata/movement.py +14 -28
  109. visidata/optionssheet.py +3 -5
  110. visidata/path.py +59 -37
  111. visidata/pivot.py +8 -5
  112. visidata/pyobj.py +63 -9
  113. visidata/rename_col.py +18 -1
  114. visidata/save.py +16 -9
  115. visidata/search.py +4 -4
  116. visidata/selection.py +10 -56
  117. visidata/settings.py +37 -35
  118. visidata/sheets.py +189 -118
  119. visidata/shell.py +23 -14
  120. visidata/sidebar.py +71 -16
  121. visidata/sort.py +21 -6
  122. visidata/statusbar.py +42 -5
  123. visidata/stored_list.py +5 -2
  124. visidata/tests/conftest.py +1 -0
  125. visidata/tests/test_commands.py +9 -1
  126. visidata/tests/test_completer.py +18 -0
  127. visidata/tests/test_edittext.py +3 -2
  128. visidata/text_source.py +7 -4
  129. visidata/textsheet.py +20 -6
  130. visidata/themes/ascii8.py +9 -6
  131. visidata/themes/asciimono.py +14 -4
  132. visidata/threads.py +13 -3
  133. visidata/tuiwin.py +5 -1
  134. visidata/type_currency.py +1 -2
  135. visidata/type_date.py +6 -1
  136. visidata/undo.py +10 -13
  137. visidata/utils.py +9 -3
  138. visidata/vdobj.py +21 -1
  139. visidata/wrappers.py +9 -1
  140. {visidata-3.0.1.data → visidata-3.1.data}/data/share/applications/visidata.desktop +2 -2
  141. {visidata-3.0.1.data → visidata-3.1.data}/data/share/man/man1/vd.1 +74 -39
  142. {visidata-3.0.1.data → visidata-3.1.data}/data/share/man/man1/visidata.1 +74 -39
  143. {visidata-3.0.1.dist-info → visidata-3.1.dist-info}/METADATA +33 -5
  144. visidata-3.1.dist-info/RECORD +284 -0
  145. visidata-3.0.1.dist-info/RECORD +0 -258
  146. {visidata-3.0.1.data → visidata-3.1.data}/scripts/vd +0 -0
  147. {visidata-3.0.1.data → visidata-3.1.data}/scripts/vd2to3.vdx +0 -0
  148. {visidata-3.0.1.dist-info → visidata-3.1.dist-info}/LICENSE.gpl3 +0 -0
  149. {visidata-3.0.1.dist-info → visidata-3.1.dist-info}/WHEEL +0 -0
  150. {visidata-3.0.1.dist-info → visidata-3.1.dist-info}/entry_points.txt +0 -0
  151. {visidata-3.0.1.dist-info → visidata-3.1.dist-info}/top_level.txt +0 -0
visidata/interface.py CHANGED
@@ -10,12 +10,12 @@ vd.theme_option('disp_error_val', '', 'displayed contents for computation except
10
10
  vd.theme_option('disp_ambig_width', 1, 'width to use for unicode chars marked ambiguous')
11
11
 
12
12
  vd.theme_option('disp_pending', '', 'string to display in pending cells')
13
- vd.theme_option('note_pending', '', 'note to display for pending cells')
14
- vd.theme_option('note_format_exc', '?', 'cell note for an exception during formatting')
15
- vd.theme_option('note_getter_exc', '!', 'cell note for an exception during computation')
16
- vd.theme_option('note_type_exc', '!', 'cell note for an exception during type conversion')
13
+ vd.theme_option('disp_note_pending', ':', 'note to display for pending cells')
14
+ vd.theme_option('disp_note_fmtexc', '?', 'cell note for an exception during formatting')
15
+ vd.theme_option('disp_note_getexc', '!', 'cell note for an exception during computation')
16
+ vd.theme_option('disp_note_typeexc', '!', 'cell note for an exception during type conversion')
17
17
 
18
- vd.theme_option('color_note_pending', 'bold magenta', 'color of note in pending cells')
18
+ vd.theme_option('color_note_pending', 'bold green', 'color of note in pending cells')
19
19
  vd.theme_option('color_note_type', '226 yellow', 'color of cell note for non-str types in anytype columns')
20
20
  vd.theme_option('color_note_row', '220 yellow', 'color of row note on left edge')
21
21
  vd.option('scroll_incr', -3, 'amount to scroll with scrollwheel')
@@ -35,24 +35,23 @@ vd.theme_option('disp_selected_note', '•', '') #
35
35
  vd.theme_option('disp_sort_asc', '↑↟⇞⇡⇧⇑', 'characters for ascending sort') # ↑▲↟↥↾↿⇞⇡⇧⇈⤉⤒⥔⥘⥜⥠⍏˄ˆ
36
36
  vd.theme_option('disp_sort_desc', '↓↡⇟⇣⇩⇓', 'characters for descending sort') # ↓▼↡↧⇂⇃⇟⇣⇩⇊⤈⤓⥕⥙⥝⥡⍖˅ˇ
37
37
  vd.theme_option('color_default', 'white on black', 'the default fg and bg colors')
38
- vd.theme_option('color_default_hdr', 'bold', 'color of the column headers')
39
- vd.theme_option('color_bottom_hdr', 'underline', 'color of the bottom header row')
38
+ vd.theme_option('color_default_hdr', 'bold white on black', 'color of the column headers')
39
+ vd.theme_option('color_bottom_hdr', 'underline white on black', 'color of the bottom header row')
40
40
  vd.theme_option('color_current_row', 'reverse', 'color of the cursor row')
41
- vd.theme_option('color_current_col', 'bold', 'color of the cursor column')
41
+ vd.theme_option('color_current_col', 'bold on 232', 'color of the cursor column')
42
42
  vd.theme_option('color_current_cell', '', 'color of current cell, if different from color_current_row+color_current_col')
43
43
  vd.theme_option('color_current_hdr', 'bold reverse', 'color of the header for the cursor column')
44
- vd.theme_option('color_column_sep', '246 blue', 'color of column separators')
44
+ vd.theme_option('color_column_sep', 'white on black', 'color of column separators')
45
45
  vd.theme_option('color_key_col', '81 cyan', 'color of key columns')
46
46
  vd.theme_option('color_hidden_col', '8', 'color of hidden columns on metasheets')
47
47
  vd.theme_option('color_selected_row', '215 yellow', 'color of selected rows')
48
- vd.theme_option('color_clickable', 'underline', 'color of internally clickable item')
48
+ vd.theme_option('color_clickable', 'bold', 'color of internally clickable item')
49
49
  vd.theme_option('color_code', 'bold white on 237', 'color of code sample')
50
- vd.theme_option('color_heading', 'bold 200', 'color of header')
50
+ vd.theme_option('color_heading', 'bold black on yellow', 'color of header')
51
51
  vd.theme_option('color_guide_unwritten', '243 on black', 'color of unwritten guides in GuideGuide')
52
52
 
53
53
  vd.theme_option('force_256_colors', False, 'use 256 colors even if curses reports fewer')
54
54
 
55
55
  vd.option('quitguard', False, 'confirm before quitting modified sheet')
56
- vd.option('default_width', 20, 'default column width', replay=True, max_help=1) # TODO: make not replay and remove from markdown saver
57
- vd.option('default_height', 4, 'default column height', max_help=-1)
58
- vd.option('textwrap_cells', True, 'wordwrap text for multiline rows', max_help=1)
56
+ vd.option('default_width', 20, 'default column width', replay=True) # TODO: make not replay and remove from markdown saver
57
+ vd.option('default_height', 4, 'default column height')
visidata/keys.py CHANGED
@@ -47,6 +47,9 @@ visidata.vd.prettykeys_trdict = {
47
47
  'KEY_SPREVIOUS': 'Shift+PgUp',
48
48
  'KEY_SNEXT': 'Shift+PgDn',
49
49
 
50
+ 'kxIN': 'FocusIn',
51
+ 'kxOUT': 'FocusOut',
52
+
50
53
  'KEY_BACKSPACE': 'Bksp',
51
54
  'BUTTON1_RELEASED': 'LeftBtnUp',
52
55
  'BUTTON2_RELEASED': 'MiddleBtnUp',
@@ -72,7 +75,7 @@ for i in range(1, 13):
72
75
 
73
76
  @visidata.VisiData.api
74
77
  def prettykeys(vd, key):
75
- if not key:
78
+ if not key or '+' in key[:-1]:
76
79
  return key
77
80
 
78
81
  for k, v in vd.prettykeys_trdict.items():
@@ -1,7 +1,7 @@
1
1
  import re
2
2
  import os
3
3
 
4
- from visidata import vd, date, asyncthread, VisiData, Progress, Sheet, Column, ItemColumn, deduceType, TypedWrapper, setitem
4
+ from visidata import vd, date, asyncthread, VisiData, Progress, Sheet, Column, ItemColumn, deduceType, TypedWrapper, setitem, AttrDict
5
5
 
6
6
 
7
7
  vd.option('airtable_auth_token', '', 'Airtable API key from https://airtable.com/account')
@@ -15,7 +15,7 @@ def guess_zip(vd, p):
15
15
 
16
16
  @VisiData.api
17
17
  def guess_tar(vd, p):
18
- if tarfile.is_tarfile(p.fp):
18
+ if tarfile.is_tarfile(p.open_bytes()):
19
19
  return dict(filetype='tar')
20
20
 
21
21
  @VisiData.api
visidata/loaders/csv.py CHANGED
@@ -3,7 +3,9 @@ from visidata import TypedExceptionWrapper, Progress
3
3
 
4
4
  vd.option('csv_dialect', 'excel', 'dialect passed to csv.reader', replay=True)
5
5
  vd.option('csv_delimiter', ',', 'delimiter passed to csv.reader', replay=True)
6
+ vd.option('csv_doublequote', True, 'quote-doubling setting passed to csv.reader', replay=True)
6
7
  vd.option('csv_quotechar', '"', 'quotechar passed to csv.reader', replay=True)
8
+ vd.option('csv_quoting', 0, 'quoting style passed to csv.reader and csv.writer', replay=True)
7
9
  vd.option('csv_skipinitialspace', True, 'skipinitialspace passed to csv.reader', replay=True)
8
10
  vd.option('csv_escapechar', None, 'escapechar passed to csv.reader', replay=True)
9
11
  vd.option('csv_lineterminator', '\r\n', 'lineterminator passed to csv.writer', replay=True)
@@ -14,7 +16,10 @@ vd.option('safety_first', False, 'sanitize input/output to handle edge cases, wi
14
16
  def guess_csv(vd, p):
15
17
  import csv
16
18
  csv.field_size_limit(2**31-1) #288 Windows has max 32-bit
17
- line = next(p.open())
19
+ try:
20
+ line = next(p.open())
21
+ except StopIteration:
22
+ return
18
23
  if ',' in line:
19
24
  dialect = csv.Sniffer().sniff(line)
20
25
  r = dict(filetype='csv', _likelihood=0)
@@ -41,7 +46,7 @@ class CsvSheet(SequenceSheet):
41
46
  import csv
42
47
  csv.field_size_limit(2**31-1) #288 Windows has max 32-bit
43
48
 
44
- with self.open_text_source() as fp:
49
+ with self.open_text_source(newline='') as fp:
45
50
  if options.safety_first:
46
51
  rdr = csv.reader(removeNulls(fp), **options.getall('csv_'))
47
52
  else:
@@ -69,11 +74,10 @@ def save_csv(vd, p, sheet):
69
74
  if ''.join(colnames):
70
75
  cw.writerow(colnames)
71
76
 
72
- with Progress(gerund='saving'):
77
+ with Progress(gerund='saving', total=sheet.nRows) as prog:
73
78
  for dispvals in sheet.iterdispvals(format=True):
74
79
  cw.writerow(dispvals.values())
75
-
76
- CsvSheet.options.regex_skip = '^#.*'
80
+ prog.addProgress(1)
77
81
 
78
82
  vd.addGlobals({
79
83
  'CsvSheet': CsvSheet
visidata/loaders/eml.py CHANGED
@@ -14,15 +14,19 @@ class EmailSheet(TableSheet):
14
14
  Column('payload', type=vlen, getter=lambda c,r: r.get_payload(decode=False)),
15
15
  ]
16
16
  def iterload(self):
17
- import email
17
+ import email.parser
18
18
  parser = email.parser.Parser()
19
19
  with self.source.open(encoding='utf-8') as fp:
20
20
  yield from parser.parse(fp).walk()
21
21
 
22
22
  @EmailSheet.api
23
23
  def extract_part(sheet, givenpath, part):
24
- with givenpath.open_bytes(mode='w') as fp:
25
- fp.write(part.get_payload(decode=True))
24
+ payload = part.get_payload(decode=True)
25
+ if payload is None:
26
+ vd.warning('empty payload')
27
+ else:
28
+ with givenpath.open_bytes(mode='w') as fp:
29
+ fp.write(payload)
26
30
 
27
31
  @EmailSheet.api
28
32
  def extract_parts(sheet, givenpath, *parts):
@@ -37,10 +41,11 @@ def extract_parts(sheet, givenpath, *parts):
37
41
  try:
38
42
  os.makedirs(givenpath, exist_ok=True)
39
43
  except FileExistsError:
40
- pass
44
+ vd.debug(f'{givenpath} already exists')
41
45
 
42
- for part in parts:
43
- vd.execAsync(sheet.extract_part, givenpath / part.get_filename(), part)
46
+ for i, part in enumerate(parts):
47
+ fn = part.get_filename() or f'part{i}'
48
+ vd.execAsync(sheet.extract_part, givenpath / fn, part)
44
49
  elif len(parts) == 1:
45
50
  vd.execAsync(sheet.extract_part, givenpath, parts[0])
46
51
  else:
visidata/loaders/f5log.py CHANGED
@@ -1092,6 +1092,7 @@ class F5LogSheet(Sheet):
1092
1092
  object_regex = None
1093
1093
 
1094
1094
  try:
1095
+ import zoneinfo
1095
1096
  self._log_tz = zoneinfo.ZoneInfo(
1096
1097
  vd.options.get("f5log_log_timzeone", "UTC")
1097
1098
  )
visidata/loaders/fec.py CHANGED
@@ -37,16 +37,14 @@ Thanks to all who have contributed to those projects.
37
37
  from copy import copy
38
38
  from visidata import (
39
39
  vd,
40
+ VisiData,
40
41
  Path,
41
42
  Sheet,
42
43
  TextSheet,
43
- Column,
44
44
  ColumnAttr,
45
45
  ColumnItem,
46
- ENTER,
47
46
  asyncthread,
48
47
  Progress,
49
- addGlobals,
50
48
  )
51
49
 
52
50
  class DiveSheet(Sheet):
@@ -111,10 +109,10 @@ class DiveSheet(Sheet):
111
109
  vd.warning("Can't dive on lists with heterogenous item types.")
112
110
  return False
113
111
 
114
- def dive(self):
112
+ def openRow(self, row):
115
113
  if self.is_keyvalue:
116
- cell = self.cursorRow["value"]
117
- name = vd.joinSheetnames(self.name, self.cursorRow["key"])
114
+ cell = row["value"]
115
+ name = vd.joinSheetnames(self.name, row["key"])
118
116
 
119
117
  if isinstance(cell, (list, dict)):
120
118
  vs = self.__class__(name, source = cell)
@@ -123,19 +121,13 @@ class DiveSheet(Sheet):
123
121
  return
124
122
  else:
125
123
  name = vd.joinSheetnames(self.name, "row")
126
- vs = self.__class__(name, source = self.cursorRow)
124
+ vs = self.__class__(name, source = self.row)
127
125
 
128
126
  success = vs.reload()
129
127
  if success == False:
130
- return
131
-
132
- vd.push(vs)
128
+ vd.fail('could not reload new sheet')
129
+ return vs
133
130
 
134
- DiveSheet.addCommand(
135
- ENTER,
136
- 'dive-row',
137
- 'vd.sheet.dive()'
138
- )
139
131
 
140
132
  class FECItemizationSheet(Sheet):
141
133
  "A sheet to display a list of FEC itemizations from a given form/schedule."
@@ -159,19 +151,9 @@ class FECItemizationSheet(Sheet):
159
151
  self.columns.clear()
160
152
  for i, name in enumerate(row.keys()):
161
153
  self.addColumn(ColumnItem(name))
162
- def dive(self):
163
- vs = DiveSheet(
164
- vd.joinSheetnames(self.name, "detail"),
165
- source = self.cursorRow
166
- )
167
- vs.reload()
168
- vd.push(vs)
169
-
170
- FECItemizationSheet.addCommand(
171
- ENTER,
172
- 'dive-row',
173
- 'vd.sheet.dive()'
174
- )
154
+
155
+ def openRow(self, row):
156
+ return row
175
157
 
176
158
  class FECScheduleSheet(Sheet):
177
159
  "A sheet to display the list of itemized schedules in a filing."
@@ -199,11 +181,8 @@ class FECScheduleSheet(Sheet):
199
181
  )
200
182
  self.addRow(vs)
201
183
 
202
- FECScheduleSheet.addCommand(
203
- ENTER,
204
- 'dive-row',
205
- 'vd.push(cursorRow)'
206
- )
184
+ def openRow(self, row):
185
+ return row
207
186
 
208
187
  COMPONENT_SHEET_CLASSES = {
209
188
  "header": DiveSheet,
@@ -230,7 +209,7 @@ class FECFiling(Sheet):
230
209
  @asyncthread
231
210
  def reload(self):
232
211
  from fecfile import fecparser
233
- self.rows = []
212
+ self.rows = [] # rowdef: Sheet, of a type from COMPONENT_SHEET_CLASSES.values()
234
213
 
235
214
  row_dict = { }
236
215
  itemization_subsheets = {}
@@ -310,16 +289,13 @@ class FECFiling(Sheet):
310
289
  sheet_row.source[form_type].append(item.data)
311
290
  sheet_row.size += 1
312
291
 
313
- FECFiling.addCommand(
314
- ENTER,
315
- 'dive-row',
316
- 'vd.push(cursorRow)'
317
- )
292
+ def openRow(self, row):
293
+ return row
318
294
 
319
- def open_fec(p):
295
+ @VisiData.api
296
+ def open_fec(vd, p):
320
297
  return FECFiling(p.base_stem, source=p)
321
298
 
322
- addGlobals({
323
- "open_fec": open_fec,
299
+ vd.addGlobals({
324
300
  "DiveSheet": DiveSheet
325
301
  })
@@ -1,5 +1,5 @@
1
1
 
2
- from visidata import VisiData, vd, Sheet, Column, Progress, SequenceSheet
2
+ from visidata import VisiData, vd, Sheet, Column, Progress, SequenceSheet, dispwidth
3
3
 
4
4
 
5
5
  vd.option('fixed_rows', 1000, 'number of rows to check for fixed width columns')
@@ -9,6 +9,22 @@ vd.option('fixed_maxcols', 0, 'max number of fixed-width columns to create (0 is
9
9
  def open_fixed(vd, p):
10
10
  return FixedWidthColumnsSheet(p.base_stem, source=p, headerlines=[])
11
11
 
12
+ @Column.api
13
+ def getMaxDataWidth(col, rows): #2255 need real max width for fixed width saver
14
+ '''Return the maximum length of any cell in column or its header,
15
+ even if wider than window. (Slow for large cells!)'''
16
+
17
+ w = 0
18
+ nlen = dispwidth(col.name)
19
+ if len(rows) > 0:
20
+ w_max = 0
21
+ for r in rows:
22
+ row_w = dispwidth(col.getDisplayValue(r))
23
+ if w_max < row_w:
24
+ w_max = row_w
25
+ w = w_max
26
+ return max(w, nlen)
27
+
12
28
  class FixedWidthColumn(Column):
13
29
  def __init__(self, name, i, j, **kwargs):
14
30
  super().__init__(name, **kwargs)
@@ -38,7 +54,7 @@ def columnize(rows):
38
54
  # collapse fields
39
55
  for i in allNonspaces:
40
56
  if i > prev+1:
41
- yield colstart, i
57
+ yield colstart, prev+1 #2255
42
58
  colstart = i
43
59
  prev = i
44
60
 
@@ -84,7 +100,7 @@ def save_fixed(vd, p, *vsheets):
84
100
  widths = {} # Column -> width:int
85
101
  # headers
86
102
  for col in Progress(sheet.visibleCols, gerund='sizing'):
87
- widths[col] = col.getMaxWidth(sheet.rows) #1849
103
+ widths[col] = col.getMaxDataWidth(sheet.rows) #1849 #2255
88
104
  fp.write(('{0:%s} ' % widths[col]).format(col.name))
89
105
  fp.write('\n')
90
106
 
@@ -0,0 +1,121 @@
1
+ #!/usr/bin/python3
2
+
3
+ from visidata import vd, VisiData, JsonSheet, ColumnAttr, Path, ENTER, AttrDict, ExpectedException, stacktrace, TypedExceptionWrapper
4
+ import json
5
+ import os
6
+ from os import linesep
7
+
8
+ @VisiData.api
9
+ def open_grep(vd, p):
10
+ return GrepSheet(p.base_stem, source=p)
11
+
12
+ @VisiData.api
13
+ def save_grep(vd, p, *vsheets):
14
+ vd.save_jsonl(p, *vsheets)
15
+
16
+ def format_row(rowdict):
17
+ # handle rows that are output of 'rg --json'
18
+ if 'type' in rowdict and rowdict['type'] == 'match':
19
+ match_data = rowdict['data']
20
+ d = {
21
+ 'file': match_data['path']['text'],
22
+ 'line_no': match_data['line_number'],
23
+ 'text': match_data['lines']['text'].rstrip(linesep)
24
+ }
25
+ return AttrDict(d)
26
+ # handle a .grep file that was saved by visidata, or
27
+ # ripgrep rows that were preprocessed by jq: 'rg --json |jq [...]'
28
+ if 'line_no' in rowdict:
29
+ rowdict['text'] = rowdict['text'].rstrip(linesep)
30
+ return AttrDict(rowdict)
31
+ return None
32
+
33
+ class GrepSheet(JsonSheet):
34
+ # The input file is in JSON Lines format, where each line describes a JSON object.
35
+ # The JSON objects are either in the ripgrep grep_printer format:
36
+ # https://docs.rs/grep-printer/0.1.0/grep_printer/struct.JSON.html
37
+ # or contain the keys 'file', 'line_no', and 'text'.
38
+ _rowtype = 'lines' # rowdef: AttrDict
39
+
40
+ columns = [
41
+ ColumnAttr('file', type=str),
42
+ ColumnAttr('line_no', type=int),
43
+ ColumnAttr('text', type=str)
44
+ ]
45
+ nKeys = 2
46
+ def iterload(self):
47
+ with self.open_text_source() as fp:
48
+ for L in fp:
49
+ try:
50
+ if not L: # skip blank lines
51
+ continue
52
+ json_obj = json.loads(L)
53
+ if not isinstance(json_obj, dict):
54
+ vd.fail(f'line does not hold a JSON object: {L}')
55
+ row = format_row(json_obj)
56
+ if not row: #skip lines that do not contain match data
57
+ continue
58
+ yield row
59
+ except ValueError as e:
60
+ if self.rows: # if any rows have been added already
61
+ e.stacktrace = stacktrace()
62
+ yield TypedExceptionWrapper(json.loads, L, exception=e) # an error on one line
63
+ else:
64
+ # If input is not JSON, parse it as output of 'grep -n': file:line_no:text
65
+ # If that does not parse, parse it as output of typical 'grep': file:text
66
+ with self.open_text_source() as fp:
67
+ try:
68
+ extract_line_no = True
69
+ for L in fp:
70
+ L = L.rstrip(linesep)
71
+ sep1 = L.index(':')
72
+ if extract_line_no:
73
+ sep2 = L.find(':', sep1+1)
74
+ try:
75
+ if sep2 == -1: raise ValueError
76
+ line_no = int(L[sep1+1:sep2]) # may raise ValueError
77
+ if line_no < 1: raise ValueError
78
+ text = L[sep2+1:]
79
+ except ValueError: # if we can't find a line_no that is > 0, with a separator after it
80
+ extract_line_no = False
81
+ line_no = None
82
+ text = L[sep1+1:]
83
+ else:
84
+ text = L[sep1+1:]
85
+ yield AttrDict({'file': L[:sep1],
86
+ 'line_no': line_no,
87
+ 'text': text})
88
+ except ValueError:
89
+ vd.fail('file is not grep output')
90
+ break
91
+
92
+ def afterLoad(self):
93
+ if self.nRows == 0:
94
+ vd.status('no grep results found in input data')
95
+
96
+ @GrepSheet.api
97
+ def sysopen_row(sheet, row):
98
+ '''Open the file in an editor at the specific line.'''
99
+ if sheet.nRows == 0: return
100
+ try:
101
+ given = row.file
102
+ if vd.options.grep_base_dir and not os.path.isabs(given):
103
+ given = vd.options.grep_base_dir + os.sep + row.file
104
+ p = Path(given)
105
+ except TypeError:
106
+ vd.fail(f'cannot open row: {given}')
107
+ if p.exists():
108
+ # works for vim and emacsclient
109
+ if row.line_no is not None:
110
+ vd.launchEditor(p.given, f'+{row.line_no:d}')
111
+ else:
112
+ vd.launchEditor(p.given)
113
+ else:
114
+ vd.fail(f'cannot find file: {p.given}')
115
+
116
+ GrepSheet.addCommand(ENTER, 'sysopen-row', 'sysopen_row(cursorRow)', 'open current file in external $EDITOR, at the line')
117
+
118
+ vd.addGlobals({
119
+ 'GrepSheet': GrepSheet,
120
+ })
121
+ vd.option('grep_base_dir', None, 'base directory for relative paths opened with sysopen-row')
visidata/loaders/html.py CHANGED
@@ -2,6 +2,7 @@ import html
2
2
  import urllib.parse
3
3
  import copy
4
4
  import itertools
5
+ import re
5
6
 
6
7
  from visidata import VisiData, vd, Sheet, options, Column, Progress, IndexSheet, ItemColumn
7
8
 
visidata/loaders/http.py CHANGED
@@ -49,7 +49,12 @@ def openurl_http(vd, path, filetype=None):
49
49
  ctx.verify_mode = ssl.CERT_NONE
50
50
 
51
51
  req = urllib.request.Request(path.given, **vd.options.getall('http_req_'))
52
- response = urllib.request.urlopen(req, context=ctx)
52
+ try:
53
+ response = urllib.request.urlopen(req, context=ctx)
54
+ except urllib.error.HTTPError as e:
55
+ vd.fail(f'cannot open URL: HTTP Error {e.code}: {e.reason}')
56
+ except urllib.error.URLError as e:
57
+ vd.fail(f'cannot open URL: {e.reason}')
53
58
 
54
59
  filetype = filetype or vd.guessFiletype(path, response, funcprefix='guessurl_').get('filetype') # try guessing by url
55
60
  filetype = filetype or vd.guessFiletype(path, funcprefix='guess_').get('filetype') # try guessing by contents
visidata/loaders/json.py CHANGED
@@ -1,4 +1,5 @@
1
1
  import json
2
+ from collections import Counter
2
3
 
3
4
  from visidata import vd, date, anytype, VisiData, PyobjSheet, AttrDict, stacktrace, TypedExceptionWrapper, AlwaysDict, ItemColumn, wrapply, TypedWrapper, Progress, Sheet
4
5
 
@@ -10,7 +11,10 @@ vd.option('default_colname', '', 'column name to use for non-dict rows')
10
11
  @VisiData.api
11
12
  def guess_json(vd, p):
12
13
  with p.open(encoding=vd.options.encoding) as fp:
13
- line = next(fp)
14
+ try:
15
+ line = next(fp)
16
+ except StopIteration:
17
+ return
14
18
 
15
19
  line = line.strip()
16
20
 
@@ -57,7 +61,7 @@ class JsonSheet(Sheet):
57
61
  yield TypedExceptionWrapper(json.loads, L, exception=e) # an error on one line
58
62
  else:
59
63
  with self.open_text_source() as fp:
60
- ret = json.load(fp)
64
+ ret = json.load(fp, object_hook=AttrDict)
61
65
  if isinstance(ret, list):
62
66
  yield from ret
63
67
  else:
@@ -81,7 +85,7 @@ class JsonSheet(Sheet):
81
85
 
82
86
  for k in row:
83
87
  if k not in self._knownKeys:
84
- c = ItemColumn(k, type=float if isinstance(row[k], (float, int)) else anytype)
88
+ c = ItemColumn(k)
85
89
  self.addColumn(c)
86
90
 
87
91
  return ret
@@ -90,7 +94,7 @@ class JsonSheet(Sheet):
90
94
  return AttrDict(fields)
91
95
 
92
96
  def openRow(self, row):
93
- return PyobjSheet("%s[%s]" % (self.name, self.keystr(row)), source=row)
97
+ return PyobjSheet("%s[%s]" % (self.name, self.rowname(row)), source=row)
94
98
 
95
99
  JsonSheet.init('_knownKeys', set, copy=True) # set of row keys already seen
96
100
 
@@ -139,6 +143,13 @@ def save_json(vd, p, *vsheets):
139
143
 
140
144
  jsonenc = _vjsonEncoder(indent=indent, sort_keys=vs.options.json_sort_keys, ensure_ascii=vs.options.json_ensure_ascii)
141
145
 
146
+ dupnames = find_duplicates([vs.name for vs in vsheets])
147
+ for name in dupnames:
148
+ vd.warning('json cannot save sheet with duplicated name: ' + name)
149
+ for vs in vsheets:
150
+ dupnames = find_duplicates([c.name for c in vs.visibleCols])
151
+ for name in dupnames:
152
+ vd.warning('json cannot save column with duplicated name: ' + name)
142
153
  if len(vsheets) == 1:
143
154
  fp.write('[\n')
144
155
  vs = vsheets[0]
@@ -161,6 +172,9 @@ def save_json(vd, p, *vsheets):
161
172
  def write_jsonl(vs, fp):
162
173
  vcols = vs.visibleCols
163
174
  jsonenc = _vjsonEncoder()
175
+ dupnames = find_duplicates([c.name for c in vcols])
176
+ for name in dupnames:
177
+ vd.warning('json cannot save column with duplicated name: ' + name)
164
178
  with Progress(gerund='saving'):
165
179
  for i, row in enumerate(vs.iterrows()):
166
180
  rowdict = _rowdict(vcols, row, keep_nulls=(i==0))
@@ -177,6 +191,8 @@ def write_jsonl(vs, fp):
177
191
  @VisiData.api
178
192
  def save_jsonl(vd, p, *vsheets):
179
193
  with p.open(mode='w', encoding=vsheets[0].options.save_encoding) as fp:
194
+ if len(vsheets) > 1:
195
+ vd.warning('jsonl cannot separate sheets yet. Concatenating all rows.')
180
196
  for vs in vsheets:
181
197
  vs.write_jsonl(fp)
182
198
 
@@ -186,6 +202,8 @@ def JSON(vd, s:str):
186
202
  'Parse `s` as JSON.'
187
203
  return json.loads(s)
188
204
 
205
+ def find_duplicates(names):
206
+ return list(colname for colname,count in Counter(names).items() if count > 1)
189
207
 
190
208
  JsonSheet.options.encoding = 'utf-8'
191
209
  JsonSheet.options.regex_skip = r'^(//|#).*'
@@ -18,10 +18,16 @@ def guess_jsonla(vd, p):
18
18
  '''
19
19
 
20
20
  with p.open(encoding=vd.options.encoding) as fp:
21
- first_line = next(fp)
21
+ try:
22
+ first_line = next(fp)
23
+ except StopIteration:
24
+ return
22
25
 
23
26
  if first_line.strip().startswith('['):
24
- ret = json.loads(first_line)
27
+ try:
28
+ ret = json.loads(first_line)
29
+ except json.decoder.JSONDecodeError:
30
+ return
25
31
  if isinstance(ret, list) and all(isinstance(v, str) for v in ret):
26
32
  return dict(filetype='jsonla')
27
33
 
@@ -4,6 +4,7 @@ from visidata import VisiData, Sheet, ItemColumn, date, Column
4
4
  @VisiData.api
5
5
  def open_mbox(vd, p):
6
6
  return MboxSheet(p.base_stem, source=p, format='mbox')
7
+ VisiData.open_mailbox = VisiData.open_mbox
7
8
 
8
9
  @VisiData.api
9
10
  def open_maildir(vd, p):
@@ -1,5 +1,11 @@
1
1
  from visidata import VisiData, vd, options, Progress
2
2
 
3
+ def markdown_link(s, href):
4
+ if not href:
5
+ return s
6
+
7
+ return f'[{s}]({href})'
8
+
3
9
  def markdown_escape(s, style='orgmode'):
4
10
  if style == 'jira':
5
11
  return s
@@ -31,17 +37,30 @@ def write_md(p, *vsheets, md_style='orgmode'):
31
37
  if len(vsheets) > 1:
32
38
  fp.write('# %s\n\n' % vs.name)
33
39
 
34
- fp.write(delim + delim.join('%-*s' % (col.width or options.default_width, markdown_escape(col.name, md_style)) for col in vs.visibleCols) + '|\n')
40
+ hdrs = []
41
+ for col in vs.visibleCols:
42
+ if col.name.endswith('_href'):
43
+ continue
44
+ hdrs.append('%-*s' % (col.width or options.default_width, markdown_escape(col.name, md_style)))
45
+
46
+ fp.write(delim + delim.join(hdrs) + delim + '\n')
47
+
35
48
  if md_style == 'orgmode':
36
- fp.write('|' + '|'.join(markdown_colhdr(col) for col in vs.visibleCols) + '|\n')
49
+ fp.write('|' + '|'.join(markdown_colhdr(col) for col in vs.visibleCols if not col.name.endswith('_href')) + '|\n')
37
50
 
38
51
  with Progress(gerund='saving'):
39
52
  for dispvals in vs.iterdispvals(format=True):
40
- s = '|'
53
+ vals = []
41
54
  for col, val in dispvals.items():
42
- s += '%-*s|' % (col.width or options.default_width, markdown_escape(val, md_style))
43
- s += '\n'
44
- fp.write(s)
55
+ if col.name.endswith('_href'):
56
+ continue
57
+ val = markdown_escape(val, md_style)
58
+ linkcol = vs.colsByName.get(col.name + '_href')
59
+ if linkcol:
60
+ val = markdown_link(val, dispvals.get(linkcol))
61
+ vals.append('%-*s' % (col.width or options.default_width, val))
62
+ fp.write('|' + '|'.join(vals) + '|\n')
63
+
45
64
  fp.write('\n')
46
65
 
47
66