visidata 2.11.1__py3-none-any.whl → 3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- visidata/__init__.py +72 -91
- visidata/_input.py +259 -42
- visidata/_open.py +84 -29
- visidata/_types.py +21 -3
- visidata/_urlcache.py +17 -4
- visidata/aggregators.py +65 -25
- visidata/apps/__init__.py +0 -0
- visidata/apps/vdsql/__about__.py +8 -0
- visidata/apps/vdsql/__init__.py +5 -0
- visidata/apps/vdsql/__main__.py +27 -0
- visidata/apps/vdsql/_ibis.py +748 -0
- visidata/apps/vdsql/bigquery.py +61 -0
- visidata/apps/vdsql/clickhouse.py +53 -0
- visidata/apps/vdsql/setup.py +40 -0
- visidata/apps/vdsql/snowflake.py +67 -0
- visidata/apps/vgit/__init__.py +13 -0
- {vgit → visidata/apps/vgit}/blame.py +5 -2
- {vgit → visidata/apps/vgit}/branch.py +31 -16
- {vgit → visidata/apps/vgit}/config.py +3 -3
- visidata/apps/vgit/diff.py +169 -0
- visidata/apps/vgit/gitsheet.py +161 -0
- {vgit → visidata/apps/vgit}/grep.py +6 -5
- visidata/apps/vgit/log.py +81 -0
- {vgit → visidata/apps/vgit}/main.py +18 -5
- {vgit → visidata/apps/vgit}/remote.py +8 -4
- visidata/apps/vgit/repos.py +71 -0
- {vgit → visidata/apps/vgit}/setup.py +6 -4
- visidata/apps/vgit/stash.py +69 -0
- visidata/apps/vgit/status.py +204 -0
- {vgit → visidata/apps/vgit}/statusbar.py +2 -0
- visidata/basesheet.py +59 -50
- visidata/canvas.py +208 -93
- visidata/choose.py +6 -6
- visidata/clean_names.py +29 -0
- visidata/clipboard.py +73 -17
- visidata/cliptext.py +220 -46
- visidata/cmdlog.py +88 -114
- visidata/color.py +142 -56
- visidata/column.py +121 -129
- visidata/ddw/input.ddw +74 -79
- visidata/ddw/regex.ddw +57 -0
- visidata/ddwplay.py +33 -14
- visidata/deprecated.py +77 -3
- visidata/desktop/visidata.desktop +7 -0
- visidata/editor.py +12 -6
- visidata/errors.py +5 -1
- visidata/experimental/__init__.py +0 -0
- visidata/experimental/diff_sheet.py +29 -0
- visidata/experimental/digit_autoedit.py +6 -0
- visidata/experimental/gdrive.py +89 -0
- visidata/experimental/google.py +37 -0
- visidata/experimental/gsheets.py +79 -0
- visidata/experimental/live_search.py +37 -0
- visidata/experimental/liveupdate.py +45 -0
- visidata/experimental/mark.py +133 -0
- visidata/experimental/noahs_tapestry/__init__.py +1 -0
- visidata/experimental/noahs_tapestry/tapestry.py +147 -0
- visidata/experimental/rownum.py +73 -0
- visidata/experimental/slide_cells.py +26 -0
- visidata/expr.py +8 -4
- visidata/extensible.py +30 -5
- visidata/features/__init__.py +0 -0
- visidata/features/addcol_audiometadata.py +42 -0
- visidata/features/addcol_histogram.py +34 -0
- visidata/features/canvas_save_svg.py +69 -0
- visidata/features/change_precision.py +46 -0
- visidata/features/cmdpalette.py +163 -0
- visidata/features/colorbrewer.py +363 -0
- visidata/{colorsheet.py → features/colorsheet.py} +17 -16
- visidata/features/command_server.py +105 -0
- visidata/features/currency_to_usd.py +70 -0
- visidata/{customdate.py → features/customdate.py} +2 -0
- visidata/features/dedupe.py +132 -0
- visidata/{describe.py → features/describe.py} +17 -15
- visidata/features/errors_guide.py +26 -0
- visidata/features/expand_cols.py +202 -0
- visidata/{fill.py → features/fill.py} +3 -1
- visidata/{freeze.py → features/freeze.py} +11 -6
- visidata/features/graph_seaborn.py +79 -0
- visidata/features/helloworld.py +10 -0
- visidata/features/hint_types.py +17 -0
- visidata/{incr.py → features/incr.py} +5 -0
- visidata/{join.py → features/join.py} +107 -53
- visidata/features/known_cols.py +21 -0
- visidata/features/layout.py +62 -0
- visidata/{melt.py → features/melt.py} +32 -21
- visidata/features/normcol.py +118 -0
- visidata/features/open_config.py +7 -0
- visidata/features/open_syspaste.py +18 -0
- visidata/features/ping.py +157 -0
- visidata/features/procmgr.py +208 -0
- visidata/features/random_sample.py +6 -0
- visidata/{regex.py → features/regex.py} +47 -31
- visidata/features/reload_every.py +55 -0
- visidata/features/rename_col_cascade.py +30 -0
- visidata/features/scroll_context.py +60 -0
- visidata/features/select_equal_selected.py +11 -0
- visidata/features/setcol_fake.py +65 -0
- visidata/{slide.py → features/slide.py} +75 -21
- visidata/features/sparkline.py +48 -0
- visidata/features/status_source.py +20 -0
- visidata/{sysedit.py → features/sysedit.py} +2 -1
- visidata/features/sysopen_mailcap.py +46 -0
- visidata/features/term_extras.py +13 -0
- visidata/{transpose.py → features/transpose.py} +5 -4
- visidata/features/type_ipaddr.py +73 -0
- visidata/features/type_url.py +11 -0
- visidata/{unfurl.py → features/unfurl.py} +9 -9
- visidata/{window.py → features/window.py} +2 -2
- visidata/form.py +50 -21
- visidata/freqtbl.py +81 -33
- visidata/fuzzymatch.py +414 -0
- visidata/graph.py +105 -33
- visidata/guide.py +180 -0
- visidata/help.py +75 -44
- visidata/hint.py +39 -0
- visidata/indexsheet.py +109 -0
- visidata/input_history.py +55 -0
- visidata/interface.py +58 -0
- visidata/keys.py +17 -16
- visidata/loaders/__init__.py +9 -0
- visidata/loaders/_pandas.py +61 -21
- visidata/loaders/api_airtable.py +70 -0
- visidata/loaders/api_bitio.py +102 -0
- visidata/loaders/api_matrix.py +148 -0
- visidata/loaders/api_reddit.py +306 -0
- visidata/loaders/api_zulip.py +249 -0
- visidata/loaders/archive.py +41 -7
- visidata/loaders/arrow.py +7 -7
- visidata/loaders/conll.py +49 -0
- visidata/loaders/csv.py +25 -7
- visidata/loaders/eml.py +3 -4
- visidata/loaders/f5log.py +1204 -0
- visidata/loaders/fec.py +325 -0
- visidata/loaders/fixed_width.py +2 -4
- visidata/loaders/frictionless.py +3 -3
- visidata/loaders/geojson.py +8 -5
- visidata/loaders/google.py +48 -0
- visidata/loaders/graphviz.py +4 -4
- visidata/loaders/hdf5.py +4 -4
- visidata/loaders/html.py +48 -10
- visidata/loaders/http.py +84 -30
- visidata/loaders/imap.py +20 -10
- visidata/loaders/jrnl.py +52 -0
- visidata/loaders/json.py +83 -29
- visidata/loaders/jsonla.py +74 -0
- visidata/loaders/lsv.py +15 -11
- visidata/loaders/mailbox.py +40 -0
- visidata/loaders/markdown.py +1 -3
- visidata/loaders/mbtiles.py +4 -5
- visidata/loaders/mysql.py +11 -13
- visidata/loaders/npy.py +7 -7
- visidata/loaders/odf.py +4 -1
- visidata/loaders/orgmode.py +428 -0
- visidata/loaders/pandas_freqtbl.py +14 -20
- visidata/loaders/parquet.py +62 -6
- visidata/loaders/pcap.py +3 -3
- visidata/loaders/pdf.py +4 -3
- visidata/loaders/png.py +19 -13
- visidata/loaders/postgres.py +9 -8
- visidata/loaders/rec.py +7 -3
- visidata/loaders/s3.py +342 -0
- visidata/loaders/sas.py +5 -5
- visidata/loaders/scrape.py +186 -0
- visidata/loaders/shp.py +6 -5
- visidata/loaders/spss.py +5 -6
- visidata/loaders/sqlite.py +68 -28
- visidata/loaders/texttables.py +1 -1
- visidata/loaders/toml.py +60 -0
- visidata/loaders/tsv.py +61 -19
- visidata/loaders/ttf.py +19 -7
- visidata/loaders/unzip_http.py +6 -5
- visidata/loaders/usv.py +1 -1
- visidata/loaders/vcf.py +16 -16
- visidata/loaders/vds.py +10 -7
- visidata/loaders/vdx.py +30 -5
- visidata/loaders/xlsb.py +8 -1
- visidata/loaders/xlsx.py +145 -25
- visidata/loaders/xml.py +6 -3
- visidata/loaders/xword.py +4 -4
- visidata/loaders/yaml.py +15 -5
- visidata/macros.py +129 -42
- visidata/main.py +119 -94
- visidata/mainloop.py +101 -155
- visidata/man/parse_options.py +2 -2
- visidata/man/vd.1 +301 -148
- visidata/man/vd.txt +290 -153
- visidata/memory.py +3 -3
- visidata/menu.py +104 -423
- visidata/metasheets.py +59 -141
- visidata/modify.py +78 -23
- visidata/motd.py +3 -3
- visidata/mouse.py +137 -0
- visidata/movement.py +43 -35
- visidata/optionssheet.py +99 -0
- visidata/path.py +113 -32
- visidata/pivot.py +73 -47
- visidata/plugins.py +65 -192
- visidata/pyobj.py +50 -201
- visidata/rename_col.py +20 -0
- visidata/save.py +37 -20
- visidata/search.py +54 -10
- visidata/selection.py +84 -5
- visidata/settings.py +162 -25
- visidata/sheets.py +229 -257
- visidata/shell.py +51 -21
- visidata/sidebar.py +162 -0
- visidata/sort.py +11 -4
- visidata/statusbar.py +113 -104
- visidata/stored_list.py +43 -0
- visidata/stored_prop.py +38 -0
- visidata/tests/conftest.py +3 -3
- visidata/tests/test_cliptext.py +39 -0
- visidata/tests/test_commands.py +62 -7
- visidata/tests/test_edittext.py +2 -2
- visidata/tests/test_features.py +17 -0
- visidata/tests/test_menu.py +14 -0
- visidata/tests/test_path.py +13 -4
- visidata/text_source.py +53 -0
- visidata/textsheet.py +10 -3
- visidata/theme.py +44 -0
- visidata/themes/__init__.py +0 -0
- visidata/themes/ascii8.py +84 -0
- visidata/themes/asciimono.py +84 -0
- visidata/themes/light.py +17 -0
- visidata/threads.py +87 -39
- visidata/tuiwin.py +22 -0
- visidata/type_currency.py +22 -3
- visidata/type_date.py +31 -9
- visidata/type_floatsi.py +5 -1
- visidata/undo.py +17 -5
- visidata/utils.py +106 -23
- visidata/vdobj.py +28 -17
- visidata/windows.py +10 -0
- visidata/wrappers.py +9 -3
- visidata-3.0.data/data/share/applications/visidata.desktop +7 -0
- {visidata-2.11.1.data → visidata-3.0.data}/data/share/man/man1/vd.1 +301 -148
- {visidata-2.11.1.data → visidata-3.0.data}/data/share/man/man1/visidata.1 +301 -148
- visidata-3.0.data/scripts/vd2to3.vdx +9 -0
- {visidata-2.11.1.dist-info → visidata-3.0.dist-info}/METADATA +12 -8
- visidata-3.0.dist-info/RECORD +257 -0
- {visidata-2.11.1.dist-info → visidata-3.0.dist-info}/WHEEL +1 -1
- vgit/__init__.py +0 -1
- vgit/gitsheet.py +0 -164
- visidata/layout.py +0 -44
- visidata/misc.py +0 -5
- visidata-2.11.1.data/scripts/vgit +0 -9
- visidata-2.11.1.dist-info/RECORD +0 -155
- {vgit → visidata/apps/vgit}/__main__.py +0 -0
- {vgit → visidata/apps/vgit}/abort.py +0 -0
- /visidata/{repeat.py → features/repeat.py} +0 -0
- {visidata-2.11.1.data → visidata-3.0.data}/scripts/vd +0 -0
- {visidata-2.11.1.dist-info → visidata-3.0.dist-info}/LICENSE.gpl3 +0 -0
- {visidata-2.11.1.dist-info → visidata-3.0.dist-info}/entry_points.txt +0 -0
- {visidata-2.11.1.dist-info → visidata-3.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1204 @@
|
|
1
|
+
'''
|
2
|
+
This plugin supports the default log format for:
|
3
|
+
|
4
|
+
/var/log/ltm*
|
5
|
+
/var/log/gtm*
|
6
|
+
/var/log/apm*
|
7
|
+
/var/log/audit*
|
8
|
+
|
9
|
+
It extracts common log entries, particularly around monitoring, iRules and configuration change audits. It tries to extract data into common fields to assist rapid filtering.
|
10
|
+
|
11
|
+
f5log_object_regex provides a simple way to perform a regex on an object name extracted by a splitter and get extra columns out of it. This is very useful when objectnames have a structure. Simply use named groups in your regex to get named columns out.
|
12
|
+
|
13
|
+
Regex: (?:/Common/)(?P<site>[^-]+)-(?P<vstype>[^-]+)-(?P<application>[^-]+)
|
14
|
+
|
15
|
+
/Common/newyork-www-banking1
|
16
|
+
|
17
|
+
... | site | vstype | appliction | ...
|
18
|
+
... | newyork | www | banking1 | ...
|
19
|
+
|
20
|
+
Adding to .visidatarc
|
21
|
+
|
22
|
+
echo 'visidata.vd.options.set("f5log_object_regex", r"(?:/Common/)(?P<site>[^-]+)-(?P<vstype>[^-]+)-(?P<application>[^-]+)", obj="global")' > ~/.visidatarc
|
23
|
+
'''
|
24
|
+
|
25
|
+
__author__ = "James Deucker <me@bitwisecook.org>"
|
26
|
+
__version__ = "1.0.10"
|
27
|
+
|
28
|
+
from datetime import datetime, timedelta
|
29
|
+
from ipaddress import ip_address
|
30
|
+
|
31
|
+
import re
|
32
|
+
import traceback
|
33
|
+
from typing import Any, Dict, Optional
|
34
|
+
|
35
|
+
from visidata import Path, VisiData, Sheet, date, AttrColumn, vd, Column, CellColorizer, RowColorizer
|
36
|
+
|
37
|
+
|
38
|
+
class hexint(int):
|
39
|
+
def __new__(cls, value, *args, **kwargs):
|
40
|
+
return super(cls, cls).__new__(cls, value, base=16)
|
41
|
+
|
42
|
+
def __str__(self):
|
43
|
+
return hex(self)
|
44
|
+
|
45
|
+
|
46
|
+
class delta_t(int):
|
47
|
+
def __new__(cls, value, *args, **kwargs):
|
48
|
+
return super(cls, cls).__new__(cls, value, base=10)
|
49
|
+
|
50
|
+
|
51
|
+
vd.addType(hexint, icon="ⓧ", formatter=lambda fmt, num: str(num))
|
52
|
+
vd.addType(
|
53
|
+
delta_t,
|
54
|
+
icon="⇥",
|
55
|
+
formatter=lambda fmt, delta: str(timedelta(seconds=delta)),
|
56
|
+
)
|
57
|
+
vd.theme_option("color_f5log_mon_up", "green", "color of f5log monitor status up")
|
58
|
+
vd.theme_option("color_f5log_mon_down", "red", "color of f5log monitor status down")
|
59
|
+
vd.theme_option("color_f5log_mon_unknown", "blue", "color of f5log monitor status unknown")
|
60
|
+
vd.theme_option("color_f5log_mon_checking", "magenta", "color of monitor status checking")
|
61
|
+
vd.theme_option("color_f5log_mon_disabled", "black", "color of monitor status disabled")
|
62
|
+
vd.theme_option("color_f5log_logid_alarm", "red", "color of alarms")
|
63
|
+
vd.theme_option("color_f5log_logid_warn", "yellow", "color of warnings")
|
64
|
+
vd.theme_option("color_f5log_logid_notice", "cyan", "color of notice")
|
65
|
+
vd.theme_option("color_f5log_logid_info", "green", "color of info")
|
66
|
+
vd.option(
|
67
|
+
"f5log_object_regex",
|
68
|
+
None,
|
69
|
+
"A regex to perform on the object name, useful where object names have a structure to extract. Use the (?P<foo>...) named groups form to get column names.",
|
70
|
+
help='regex'
|
71
|
+
)
|
72
|
+
vd.option(
|
73
|
+
"f5log_log_year",
|
74
|
+
None,
|
75
|
+
"Override the default year used for log parsing. Use all four digits of the year (e.g., 2022). By default (None) use the year from the ctime of the file, or failing that the current year.",
|
76
|
+
)
|
77
|
+
vd.option(
|
78
|
+
"f5log_log_timezone",
|
79
|
+
"UTC",
|
80
|
+
"The timezone the source file is in, by default UTC.",
|
81
|
+
)
|
82
|
+
|
83
|
+
|
84
|
+
class F5LogSheet(Sheet):
|
85
|
+
class F5LogRow:
|
86
|
+
def __init__(
|
87
|
+
self,
|
88
|
+
msg: str = None,
|
89
|
+
timestamp: datetime = None,
|
90
|
+
host: str = None,
|
91
|
+
level: str = None,
|
92
|
+
process: str = None,
|
93
|
+
proc_pid: int = None,
|
94
|
+
logid1: hexint = None,
|
95
|
+
logid2: hexint = None,
|
96
|
+
message: str = None,
|
97
|
+
kv: Optional[Dict[str, Any]] = None,
|
98
|
+
**kwargs,
|
99
|
+
):
|
100
|
+
self._data = {
|
101
|
+
"msg": msg,
|
102
|
+
"timestamp": timestamp,
|
103
|
+
"host": host,
|
104
|
+
"level": level,
|
105
|
+
"process": process,
|
106
|
+
"proc_pid": proc_pid,
|
107
|
+
"logid1": logid1,
|
108
|
+
"logid2": logid2,
|
109
|
+
"message": message,
|
110
|
+
"kv": kv,
|
111
|
+
**kwargs,
|
112
|
+
}
|
113
|
+
|
114
|
+
def __getattr__(self, item):
|
115
|
+
return self._data.get(
|
116
|
+
item, self._data["kv"].get(item) if self._data["kv"] else None
|
117
|
+
)
|
118
|
+
|
119
|
+
# strptime is slow so we need to parse manually
|
120
|
+
_months = {
|
121
|
+
"Jan": 1,
|
122
|
+
"Feb": 2,
|
123
|
+
"Mar": 3,
|
124
|
+
"Apr": 4,
|
125
|
+
"May": 5,
|
126
|
+
"Jun": 6,
|
127
|
+
"Jul": 7,
|
128
|
+
"Aug": 8,
|
129
|
+
"Sep": 9,
|
130
|
+
"Oct": 10,
|
131
|
+
"Nov": 11,
|
132
|
+
"Dec": 12,
|
133
|
+
}
|
134
|
+
|
135
|
+
_proto = {
|
136
|
+
6: "tcp",
|
137
|
+
17: "udp",
|
138
|
+
132: "sctp",
|
139
|
+
}
|
140
|
+
|
141
|
+
rowtype = "logs"
|
142
|
+
|
143
|
+
columns = [
|
144
|
+
AttrColumn("rawmsg", type=str, width=0),
|
145
|
+
AttrColumn("timestamp", type=date),
|
146
|
+
AttrColumn("host", type=str),
|
147
|
+
AttrColumn("level", type=str),
|
148
|
+
AttrColumn("process", type=str, width=10),
|
149
|
+
AttrColumn("proc_pid", type=int, width=7),
|
150
|
+
AttrColumn("logid1", type=hexint),
|
151
|
+
AttrColumn("logid2", type=hexint),
|
152
|
+
AttrColumn("message", type=str, width=90),
|
153
|
+
AttrColumn("object", type=str, width=50),
|
154
|
+
]
|
155
|
+
|
156
|
+
re_f5log = re.compile(
|
157
|
+
r"^(?:(?:audit|gtm|ltm|security|tmm|user|\<\d+\>)\s+)?(?:(?P<date1>[A-Z][a-z]{2}\s+\d{1,2}\s+\d{2}:\d{2}:\d{2})|(?P<date2>\d+-\d+-\d+T\d+:\d+:\d+[+-]\d+:\d+)|(?P<date3>\d+-\d+\s\d{2}:\d{2}:\d{2}))\s+(?P<host>\S+)\s+(?:(?P<level>\S+)\s+(?:(?P<process>[a-z0-9_()-]+\s?)\[(?P<pid>\d+)\]:\s+)?(?:(?P<logid1>[0-9a-f]{8}):(?P<logid2>[0-9a-f]):\s+)?)?(?P<message>.*)$"
|
158
|
+
)
|
159
|
+
re_ltm_irule = re.compile(
|
160
|
+
r"(?:(?P<irule_msg>TCL\serror|Rule|Pending\srule):?\s(?P<irule>\S+)\s\<(?P<event>[A-Z_0-9]+)\>(?:\s-\s|:\s|\s)?)(?P<message>aborted\sfor\s(?P<srchost>\S+)\s->\s(?P<dsthost>\S+)|.*)"
|
161
|
+
)
|
162
|
+
re_ltm_pool_mon_status_msg = re.compile(
|
163
|
+
r"^(Pool|Node)\s(?P<poolobj>\S+)\s(member|address)\s(?P<poolmemberobj>\S+)\smonitor\sstatus\s(?P<newstatus>.+)\.\s\[\s(?:(?:(?:(?P<monitorobj>\S+):\s(?P<monitorstatus>\w+)(?:;\slast\serror:\s\S*\s?(?P<lasterr>[^\]]*))?)?(?:,\s)?)+)?\s]\s*(?:\[\swas\s(?P<prevstatus>.+)\sfor\s(?P<durationhr>-?\d+)hrs?:(?P<durationmin>-?\d+)mins?:(?P<durationsec>-?\d+)sec\s\])?$"
|
164
|
+
)
|
165
|
+
re_ltm_ip_msg = re.compile(
|
166
|
+
r"(?:.*?)(?P<ip1>\d+\.\d+\.\d+\.\d+)(?:[:.](?P<port1>\d+))?(?:(?:\s->\s|:)(?P<ip2>\d+\.\d+\.\d+\.\d+)(?:[:.](?P<port2>\d+))?)?(?:\smonitor\sstatus\s(?P<mon_status>\w+)\.\s\[[^]]+\]\s+\[\swas\s(?P<prev_status>\w+)\sfor\s((?P<durationhr>-?\d+)hrs?:(?P<durationmin>-?\d+)mins?:(?P<durationsec>-?\d+)secs?)\s\]|\.?(?:.*))"
|
167
|
+
)
|
168
|
+
re_ltm_conn_error = re.compile(
|
169
|
+
r"^(?:\(null\sconnflow\):\s)?Connection\serror:\s(?P<func>[^:]+):(?P<funcloc>[^:]+):\s(?P<error>.*)\s?\((?P<errno>\d+)\)(?:\s(?P<errormsg>.*))$"
|
170
|
+
)
|
171
|
+
re_ltm_cert_expiry = re.compile(
|
172
|
+
r"Certificate\s'(?P<cert_cn>.*)'\sin\sfile\s(?P<file>\S+)\s(?P<message>will\sexpire|expired)\son\s(?P<date1>[A-Z][a-z]{2}\s+\d{1,2}\s+\d{2}:\d{2}:\d{2}\s\d+\s\S+)"
|
173
|
+
)
|
174
|
+
re_gtm_monitor = re.compile(
|
175
|
+
r"^(?:SNMP_TRAP:\s)?(?P<objtype>VS|Pool|Monitor|Wide\sIP|Server|Data\scenter|Prober\sPool|Box)\s\(?(?P<object>\S+?)\)?\s(?:member\s\(?(?P<pool_member>\S+?)\)?\s)?(?:\(ip(?::port)?=(?P<ipport>[^\)]+)\)\s)?(?:\(Server\s(?P<server>[^\)]+)\)\s)?(?:state\schange\s)?(?:(?P<prev_status>\w+)\s-->\s)?(?P<new_status>\w+)(?:(?:\s\(\s?)(?P<msg>(?:(?P<type>\w+)\s(?P<monitor_object>\S+)\s:\s)?state:\s(?P<state>\S+)|.*)\))?$"
|
176
|
+
)
|
177
|
+
re_gtm_monitor_instance = re.compile(
|
178
|
+
r"^Monitor\sinstance\s(?P<object>\S+)\s(?P<monip>\S+)\s(?P<prevstatus>\S+)\s-->\s(?P<newstatus>\S+)\sfrom\s(?P<srcgtm>\S+)\s\((?:state:?\s)?(?P<state>.*)\)$"
|
179
|
+
)
|
180
|
+
re_ltm_poolnode_abled = re.compile(
|
181
|
+
r"^(?P<objtype>Pool|Node|Monitor)\s(?P<object>\S+)\s(?:address|member|instance)\s(?P<member>\S+)\s(session\sstatus|has\sbeen)\s(?P<status>.+)\.$"
|
182
|
+
)
|
183
|
+
re_ltm_no_shared_ciphers = re.compile(
|
184
|
+
r"^(?P<msg>No\sshared\sciphers\sbetween\sSSL\speers)\s(?P<srchost>\d+\.\d+\.\d+\.\d+|[0-9a-f:]+)\.(?P<srcport>\d+)\:(?P<dsthost>\d+\.\d+\.\d+\.\d+|[0-9a-f:]+)\.(?P<dstport>\d+)\.$"
|
185
|
+
)
|
186
|
+
re_ltm_http_process_state = re.compile(
|
187
|
+
r"^http_process_state_(?P<httpstate>\S+)\s-\sInvalid\saction:0x(?P<actionid>[a-f0-9]+)\s(?P<msg>.*?)\s*(?P<sidea>(?:server|client)side)\s\((?P<src>\S+)\s->\s(?P<vsdst>\S+)\)\s+(?:(?P<sideb>(?:server|client)side)\s)?(?:\((?P<poolsrc>\S+)\s->\s(?P<dst>\S+)\)|\(\(null\sconnflow\)\))\s\((?P<sideaa>\S+)\sside:\svip=(?P<vs>\S+)\sprofile=(?P<profile>\S+)\s(?:pool=(?P<pool>\S+)\s)?(?P<sideaaa>\S+)_ip=(?P<sideasrc>\S+)\)$"
|
188
|
+
)
|
189
|
+
re_ltm_http_header_exceeded = re.compile(
|
190
|
+
r"^(?P<msg>HTTP\sheader\s(?:count|\((?P<size>\d+)\))\sexceeded\smaximum\sallowed\s(?P<type>count|size)\sof\s(?P<limit>\d+))\s\((?P<side>\S+)\sside:\svip=(?P<object>\S+)\sprofile=(?P<profile>\S+)\spool=(?P<pool>\S+)\s(?P<sideip>client|server)_ip=(?P<sidehost>.*)\)$"
|
191
|
+
)
|
192
|
+
|
193
|
+
f5log_mon_colors = {
|
194
|
+
("monitor_status", "down"): "color_f5log_mon_down",
|
195
|
+
("monitor_status", "up"): "color_f5log_mon_up",
|
196
|
+
("monitor_status", "enabled"): "color_f5log_mon_up",
|
197
|
+
("monitor_status", "forced disabled"): "color_f5log_mon_disabled",
|
198
|
+
("monitor_status", "node disabled"): "color_f5log_mon_disabled",
|
199
|
+
("monitor_status", "checking"): "color_f5log_mon_checking",
|
200
|
+
("new_status", "available"): "color_f5log_mon_up",
|
201
|
+
("new_status", "unavailable"): "color_f5log_mon_down",
|
202
|
+
("new_status", "up"): "color_f5log_mon_up",
|
203
|
+
("new_status", "down"): "color_f5log_mon_down",
|
204
|
+
("new_status", "green"): "color_f5log_mon_up",
|
205
|
+
("new_status", "red"): "color_f5log_mon_down",
|
206
|
+
("new_status", "now has available members"): "color_f5log_mon_up",
|
207
|
+
("new_status", "no members available"): "color_f5log_mon_down",
|
208
|
+
("new_status", "blue"): "color_f5log_mon_unknown",
|
209
|
+
("new_status", "checking"): "color_f5log_mon_checking",
|
210
|
+
("new_status", "unchecked"): "color_f5log_mon_unknown",
|
211
|
+
("new_status", "node down"): "color_f5log_mon_disabled",
|
212
|
+
("new_status", "forced down"): "color_f5log_mon_disabled",
|
213
|
+
("new_status", "disabled"): "color_f5log_mon_disabled",
|
214
|
+
("prev_status", "available"): "color_f5log_mon_up",
|
215
|
+
("prev_status", "unavailable"): "color_f5log_mon_down",
|
216
|
+
("prev_status", "up"): "color_f5log_mon_up",
|
217
|
+
("prev_status", "down"): "color_f5log_mon_down",
|
218
|
+
("prev_status", "green"): "color_f5log_mon_up",
|
219
|
+
("prev_status", "red"): "color_f5log_mon_down",
|
220
|
+
("prev_status", "now has available members"): "color_f5log_mon_up",
|
221
|
+
("prev_status", "no members available"): "color_f5log_mon_down",
|
222
|
+
("prev_status", "blue"): "color_f5log_mon_unknown",
|
223
|
+
("prev_status", "checking"): "color_f5log_mon_checking",
|
224
|
+
("prev_status", "unchecked"): "color_f5log_mon_unknown",
|
225
|
+
("prev_status", "node down"): "color_f5log_mon_disabled",
|
226
|
+
("prev_status", "forced down"): "color_f5log_mon_disabled",
|
227
|
+
("prev_status", "disabled"): "color_f5log_mon_disabled",
|
228
|
+
}
|
229
|
+
|
230
|
+
def colorizeMonitors(sheet, col: Column, row: F5LogRow, value):
|
231
|
+
if row is None or col is None:
|
232
|
+
return None
|
233
|
+
return sheet.f5log_mon_colors.get((col.name, value.value), None)
|
234
|
+
|
235
|
+
f5log_warn_logid = {
|
236
|
+
"01010013": "color_f5log_logid_notice",
|
237
|
+
"01010029": "color_f5log_logid_warn",
|
238
|
+
"01010038": "color_f5log_logid_warn",
|
239
|
+
"01010201": "color_f5log_logid_warn",
|
240
|
+
"01010281": "color_f5log_logid_warn",
|
241
|
+
"01070333": "color_f5log_logid_warn",
|
242
|
+
"01070596": "color_f5log_logid_alarm",
|
243
|
+
"010c0018": "color_f5log_logid_warn",
|
244
|
+
"010c0019": "color_f5log_logid_info",
|
245
|
+
"010c003e": "color_f5log_logid_alarm",
|
246
|
+
"010c003f": "color_f5log_logid_alarm",
|
247
|
+
"010c0044": "color_f5log_logid_warn",
|
248
|
+
"010c0052": "color_f5log_logid_warn",
|
249
|
+
"010c0053": "color_f5log_logid_info",
|
250
|
+
"010c0054": "color_f5log_logid_alarm",
|
251
|
+
"010c0055": "color_f5log_logid_alarm",
|
252
|
+
"010c0057": "color_f5log_logid_info",
|
253
|
+
"010e0001": "color_f5log_logid_alarm",
|
254
|
+
"010e0004": "color_f5log_logid_alarm",
|
255
|
+
"01340011": "color_f5log_logid_warn",
|
256
|
+
"01390002": "color_f5log_logid_notice",
|
257
|
+
"01140029": "color_f5log_logid_alarm",
|
258
|
+
"01140030": "color_f5log_logid_warn",
|
259
|
+
"01140045": "color_f5log_logid_alarm",
|
260
|
+
"01190004": "color_f5log_logid_alarm",
|
261
|
+
"011ae0f3": "color_f5log_logid_alarm",
|
262
|
+
"011e0002": "color_f5log_logid_alarm",
|
263
|
+
"011e0003": "color_f5log_logid_alarm",
|
264
|
+
"011f0005": "color_f5log_logid_warn",
|
265
|
+
"014f0004": "color_f5log_logid_warn",
|
266
|
+
}
|
267
|
+
|
268
|
+
def colorizeRows(sheet, col: Column, row: F5LogRow, value):
|
269
|
+
if row is None or col is None:
|
270
|
+
return None
|
271
|
+
if (
|
272
|
+
row.logid1 is None
|
273
|
+
and row.message is not None
|
274
|
+
and row.message.startswith("boot_marker")
|
275
|
+
):
|
276
|
+
return "color_f5log_logid_notice"
|
277
|
+
return sheet.f5log_warn_logid.get(row.logid1, None)
|
278
|
+
|
279
|
+
@staticmethod
|
280
|
+
def split_audit_bigip_tmsh_audit(msg):
|
281
|
+
# skip 'AUDIT - ' at the start of the line
|
282
|
+
e = msg[8:].split("=", maxsplit=6)
|
283
|
+
|
284
|
+
for ee, ne in zip(e, e[1:]):
|
285
|
+
yield {ee[ee.rfind(" ") + 1 :]: ne[: ne.rfind(" ")]}
|
286
|
+
|
287
|
+
@staticmethod
|
288
|
+
def split_audit_scriptd_run_script(msg):
|
289
|
+
# skip 'AUDIT - ' at the start of the line
|
290
|
+
e = msg[8:].split("=")
|
291
|
+
|
292
|
+
for ee, ne in zip(e, e[1:]):
|
293
|
+
yield {ee[ee.rfind(" ") + 1 :]: ne[: ne.rfind(" ")].strip('"')}
|
294
|
+
|
295
|
+
@staticmethod
|
296
|
+
def split_audit_mcpd_mcp_error(msg):
|
297
|
+
# skip 'AUDIT - ' at the start of the line
|
298
|
+
# skip the status at the end of the line
|
299
|
+
msg = msg[8:]
|
300
|
+
status = None
|
301
|
+
status_loc = msg.rfind("[Status=")
|
302
|
+
if status_loc >= 0:
|
303
|
+
status = msg[status_loc + 1 : -1]
|
304
|
+
msg = msg[: status_loc - 1]
|
305
|
+
yield {
|
306
|
+
"status": status.split("=", maxsplit=1)[1],
|
307
|
+
}
|
308
|
+
# we need to get one word back from the first opening curly
|
309
|
+
# find the curly
|
310
|
+
cmd_data_loc = msg.find(" { ")
|
311
|
+
if cmd_data_loc >= 0:
|
312
|
+
# get the cmd_data
|
313
|
+
cmd_data = msg[cmd_data_loc + 1 :]
|
314
|
+
# split the message and the command
|
315
|
+
msg, cmd = msg[:cmd_data_loc].rsplit(" ", maxsplit=1)
|
316
|
+
# strip off the trailling " -" from the message
|
317
|
+
msg = msg[:-2]
|
318
|
+
object = cmd_data.split('"', maxsplit=2)
|
319
|
+
if len(object) == 3:
|
320
|
+
yield {"object": object[1]}
|
321
|
+
yield {
|
322
|
+
"command": cmd,
|
323
|
+
"cmd_data": cmd_data,
|
324
|
+
}
|
325
|
+
|
326
|
+
e = msg.split(" - ")
|
327
|
+
|
328
|
+
for ee in e[0].split(","):
|
329
|
+
ee = ee.strip().split(" ")
|
330
|
+
# yield the kvs in the first bit split on ,
|
331
|
+
if ee[0].startswith("tmsh-pid-"):
|
332
|
+
# of course tmsh-pid- is different
|
333
|
+
yield {ee[0][: ee[0].rfind("-")]: int(ee[0][ee[0].rfind("-") + 1 :])}
|
334
|
+
elif len(ee) == 1:
|
335
|
+
yield {ee[0]: None}
|
336
|
+
else:
|
337
|
+
yield {ee[0]: ee[1]}
|
338
|
+
|
339
|
+
for ee in e[1:]:
|
340
|
+
ee = ee.strip().split(" ", maxsplit=1)
|
341
|
+
if ee[0] == "transaction":
|
342
|
+
yield {"transaction": int(ee[1][1:].split("-")[0])}
|
343
|
+
yield {"transaction_step": int(ee[1][1:].split("-")[1])}
|
344
|
+
elif ee[0] == "object":
|
345
|
+
yield {"object_id": ee[1]}
|
346
|
+
else:
|
347
|
+
# yield the rest of the kvs
|
348
|
+
try:
|
349
|
+
yield {ee[0]: ee[1]}
|
350
|
+
except IndexError:
|
351
|
+
yield {ee[0]: None}
|
352
|
+
|
353
|
+
@staticmethod
|
354
|
+
def split_ltm_pool_mon_status(msg):
|
355
|
+
m = F5LogSheet.re_ltm_pool_mon_status_msg.match(msg)
|
356
|
+
if m is None:
|
357
|
+
return
|
358
|
+
m = m.groupdict()
|
359
|
+
if m.get("durationhr") and m.get("durationmin") and m.get("durationsec"):
|
360
|
+
duration = timedelta(
|
361
|
+
hours=int(m.get("durationhr")),
|
362
|
+
minutes=int(m.get("durationmin")),
|
363
|
+
seconds=int(m.get("durationsec")),
|
364
|
+
).total_seconds()
|
365
|
+
else:
|
366
|
+
duration = None
|
367
|
+
dst = m.get("poolmemberobj")
|
368
|
+
if dst:
|
369
|
+
dst = dst.split("/")[-1]
|
370
|
+
if "." in dst and len(dst.split(":")) == 2:
|
371
|
+
# ipv4
|
372
|
+
dsthost, dstport = dst.split(":")
|
373
|
+
elif "." in dst and len(dst.split(":")) == 1:
|
374
|
+
# ipv4
|
375
|
+
dsthost, dstport = dst, None
|
376
|
+
else:
|
377
|
+
# ipv6
|
378
|
+
dsthost, dstport = dst.rsplit(":", maxsplit=1)
|
379
|
+
try:
|
380
|
+
# see if it's an IP and if so parse it
|
381
|
+
dsthost = ip_address(dsthost)
|
382
|
+
except ValueError:
|
383
|
+
dsthost = None
|
384
|
+
try:
|
385
|
+
# see if it's a port number and if so parse it
|
386
|
+
dstport = int(dstport)
|
387
|
+
except (ValueError, TypeError):
|
388
|
+
dstport = None
|
389
|
+
else:
|
390
|
+
dsthost, dstport = None, None
|
391
|
+
yield {
|
392
|
+
"object": m.get("poolobj"),
|
393
|
+
"objtype": "pool",
|
394
|
+
"pool_member": m.get("poolmemberobj"),
|
395
|
+
"monitor": m.get("monitorobj"),
|
396
|
+
"dsthost": dsthost,
|
397
|
+
"dstport": dstport,
|
398
|
+
"monitor_status": m.get("monitorstatus"),
|
399
|
+
"new_status": m.get("newstatus"),
|
400
|
+
"prev_status": m.get("prevstatus"),
|
401
|
+
"last_error": m.get("lasterr"),
|
402
|
+
"duration_s": duration,
|
403
|
+
}
|
404
|
+
|
405
|
+
@staticmethod
|
406
|
+
def split_ltm_poolnode_mon_abled(msg):
|
407
|
+
m = F5LogSheet.re_ltm_poolnode_abled.match(msg)
|
408
|
+
if m is None:
|
409
|
+
return
|
410
|
+
m = m.groupdict()
|
411
|
+
yield {
|
412
|
+
"object": m.get("object"),
|
413
|
+
"objtype": m.get("objtype").lower(),
|
414
|
+
"pool_member": m.get("member"),
|
415
|
+
"monitor_status": m.get("status"),
|
416
|
+
}
|
417
|
+
|
418
|
+
@staticmethod
|
419
|
+
def split_ltm_pool_has_no_avail_mem(msg):
|
420
|
+
yield {
|
421
|
+
"object": msg.split(" ")[-1],
|
422
|
+
"objtype": "pool",
|
423
|
+
"new_status": "no members available",
|
424
|
+
"prev_status": None,
|
425
|
+
}
|
426
|
+
|
427
|
+
@staticmethod
|
428
|
+
def split_ltm_pool_has_avail_mem(msg):
|
429
|
+
yield {
|
430
|
+
"object": msg.split(" ")[1],
|
431
|
+
"objtype": "pool",
|
432
|
+
"new_status": "now has available members",
|
433
|
+
"prev_status": None,
|
434
|
+
}
|
435
|
+
|
436
|
+
@staticmethod
|
437
|
+
def split_ltm_rule(msg):
|
438
|
+
m = F5LogSheet.re_ltm_irule.match(msg)
|
439
|
+
if m is None:
|
440
|
+
return
|
441
|
+
m = m.groupdict()
|
442
|
+
yield {
|
443
|
+
"object": m.get("irule"),
|
444
|
+
"objtype": "rule",
|
445
|
+
"irule_event": m.get("event"),
|
446
|
+
"irule_msg": m.get("irule_msg"),
|
447
|
+
"msg": m.get("message"),
|
448
|
+
}
|
449
|
+
if m.get("message", "").startswith("aborted for"):
|
450
|
+
src = m.get("srchost")
|
451
|
+
if src and len(src.split(":")) == 2:
|
452
|
+
# ipv4
|
453
|
+
srchost, srcport = src.split(":")
|
454
|
+
else:
|
455
|
+
# ipv6
|
456
|
+
srchost, srcport = src.split(".")
|
457
|
+
dst = m.get("dsthost")
|
458
|
+
if dst and len(dst.split(":")) == 2:
|
459
|
+
# ipv4
|
460
|
+
dsthost, dstport = dst.split(":")
|
461
|
+
else:
|
462
|
+
# ipv6
|
463
|
+
dsthost, dstport = dst.split(".")
|
464
|
+
yield {
|
465
|
+
"srchost": ip_address(srchost),
|
466
|
+
"srcport": int(srcport),
|
467
|
+
"dsthost": ip_address(dsthost),
|
468
|
+
"dstport": int(dstport),
|
469
|
+
}
|
470
|
+
|
471
|
+
@staticmethod
|
472
|
+
def split_ltm_rule_missing_datagroup(msg):
|
473
|
+
if "error: Unable to find value_list" in msg:
|
474
|
+
m = msg.split(" ", maxsplit=12)
|
475
|
+
yield {
|
476
|
+
"object": m[1].strip("[").strip("]"),
|
477
|
+
"objtype": "rule",
|
478
|
+
"msg": "error: Unable to find value_list",
|
479
|
+
"missing_dg": m[7].strip("("),
|
480
|
+
"funcloc": int(m[11].strip(":")),
|
481
|
+
"error": m[12],
|
482
|
+
}
|
483
|
+
else:
|
484
|
+
m = msg.split(" ", maxsplit=5)
|
485
|
+
yield {
|
486
|
+
"object": m[1].strip("[").strip("]"),
|
487
|
+
"objtype": "rule",
|
488
|
+
"msg": m[5].split("]", maxsplit=1)[0].strip("[]"),
|
489
|
+
"error": m[5].split("]", maxsplit=1)[1],
|
490
|
+
"funcloc": int(m[3].split(":")[1]),
|
491
|
+
}
|
492
|
+
|
493
|
+
@staticmethod
|
494
|
+
def split_ltm_cert_expiry(msg):
|
495
|
+
m = F5LogSheet.re_ltm_cert_expiry.match(msg)
|
496
|
+
if m is None:
|
497
|
+
return
|
498
|
+
m = m.groupdict()
|
499
|
+
yield {
|
500
|
+
"cert_cn": m.get("cert_cn"),
|
501
|
+
"object": m.get("file"),
|
502
|
+
"objtype": "ssl-cert",
|
503
|
+
"date": datetime.strptime(
|
504
|
+
m.get("date1").replace(" ", " "),
|
505
|
+
"%b %d %H:%M:%S %Y %Z",
|
506
|
+
)
|
507
|
+
if m.get("date1") is not None
|
508
|
+
else None,
|
509
|
+
"msg": m.get("message"),
|
510
|
+
}
|
511
|
+
|
512
|
+
@staticmethod
|
513
|
+
def split_ltm_connection_error(msg):
|
514
|
+
m = F5LogSheet.re_ltm_conn_error.match(msg)
|
515
|
+
if m is None:
|
516
|
+
return
|
517
|
+
m = m.groupdict()
|
518
|
+
yield {
|
519
|
+
"func": m.get("func"),
|
520
|
+
"funcloc": m.get("funcloc"),
|
521
|
+
"error": m.get("error"),
|
522
|
+
"errno": m.get("errno"),
|
523
|
+
"errmsg": m.get("errormsg"),
|
524
|
+
}
|
525
|
+
|
526
|
+
@staticmethod
|
527
|
+
def split_ltm_virtual_status(msg):
|
528
|
+
m = msg.split(" ")
|
529
|
+
if m[0] == "SNMP_TRAP:":
|
530
|
+
yield {
|
531
|
+
"object": m[2],
|
532
|
+
"objtype": "vs",
|
533
|
+
"new_status": m[-1],
|
534
|
+
"prev_status": None,
|
535
|
+
}
|
536
|
+
else:
|
537
|
+
yield {
|
538
|
+
"object": m[1],
|
539
|
+
"objtype": "vs",
|
540
|
+
"new_status": m[-1],
|
541
|
+
"prev_status": None,
|
542
|
+
}
|
543
|
+
|
544
|
+
@staticmethod
|
545
|
+
def split_ltm_virtual_address_status_or_irule_profile_err(msg):
|
546
|
+
# big-ip has a conflict on this logid
|
547
|
+
m = msg.split(" ")
|
548
|
+
if "event in rule" in msg:
|
549
|
+
yield {
|
550
|
+
"object": m[4].strip("()"),
|
551
|
+
"objtype": "rule",
|
552
|
+
"irule_event": m[0],
|
553
|
+
"msg": f"event in rule {' '.join(m[5:-2])}",
|
554
|
+
"target_obj": m[-1].strip("()."),
|
555
|
+
"target_objtype": m[-2].replace("virtual-server", "vs"),
|
556
|
+
}
|
557
|
+
else:
|
558
|
+
yield {
|
559
|
+
"object": m[2],
|
560
|
+
"objtype": "virtual address",
|
561
|
+
"new_status": m[9].lower().strip("."),
|
562
|
+
"prev_status": m[7].lower(),
|
563
|
+
}
|
564
|
+
|
565
|
+
@staticmethod
|
566
|
+
def split_ltm_ssl_handshake_fail(msg):
|
567
|
+
src = msg.split(" ")[5]
|
568
|
+
if len(src.split(":")) == 2:
|
569
|
+
# ipv4
|
570
|
+
srchost, srcport = src.split(":")
|
571
|
+
else:
|
572
|
+
# ipv6
|
573
|
+
srchost, srcport = src.split(".")
|
574
|
+
dst = msg.split(" ")[7]
|
575
|
+
if len(dst.split(":")) == 2:
|
576
|
+
# ipv4
|
577
|
+
dsthost, dstport = dst.split(":")
|
578
|
+
else:
|
579
|
+
dsthost, dstport = dst.split(".")
|
580
|
+
yield {
|
581
|
+
"srchost": ip_address(srchost),
|
582
|
+
"srcport": int(srcport),
|
583
|
+
"dsthost": ip_address(dsthost),
|
584
|
+
"dstport": int(dstport),
|
585
|
+
}
|
586
|
+
|
587
|
+
@staticmethod
|
588
|
+
def split_ltm_shared_ciphers(msg):
|
589
|
+
m = F5LogSheet.re_ltm_no_shared_ciphers.match(msg)
|
590
|
+
if m is None:
|
591
|
+
return
|
592
|
+
m = m.groupdict()
|
593
|
+
yield {
|
594
|
+
"srchost": ip_address(m.get("srchost")),
|
595
|
+
"srcport": int(m.get("srcport")),
|
596
|
+
"dsthost": ip_address(m.get("dsthost")),
|
597
|
+
"dstport": int(m.get("dstport")),
|
598
|
+
}
|
599
|
+
|
600
|
+
@staticmethod
|
601
|
+
def split_ltm_rst_reason(msg):
|
602
|
+
m = msg.split(" ", maxsplit=7)
|
603
|
+
src, dst = m[3].strip(","), m[5].strip(",")
|
604
|
+
reasonc1, reasonc2 = m[6].split(":")
|
605
|
+
if len(src.split(":")) == 2:
|
606
|
+
# ipv4
|
607
|
+
srchost, srcport = src.split(":")
|
608
|
+
else:
|
609
|
+
# ipv6
|
610
|
+
srchost, srcport = src.rsplit(":", maxsplit=1)
|
611
|
+
if len(dst.split(":")) == 2:
|
612
|
+
# ipv4
|
613
|
+
dsthost, dstport = dst.split(":")
|
614
|
+
else:
|
615
|
+
# ipv6
|
616
|
+
dsthost, dstport = dst.rsplit(":", maxsplit=1)
|
617
|
+
yield {
|
618
|
+
"srchost": ip_address(srchost),
|
619
|
+
"srcport": int(srcport) if srcport else None,
|
620
|
+
"dsthost": ip_address(dsthost),
|
621
|
+
"dstport": int(dstport) if dstport else None,
|
622
|
+
"rst_reason_code1": hexint(reasonc1[3:]),
|
623
|
+
"rst_reason_code2": hexint(reasonc2[:-1]),
|
624
|
+
"rst_reason": m[7],
|
625
|
+
}
|
626
|
+
|
627
|
+
@staticmethod
|
628
|
+
def split_ltm_inet_port_exhaust(msg):
|
629
|
+
m = msg.split(" ")
|
630
|
+
srchost, dst = m[-5], m[-3]
|
631
|
+
if len(dst.split(":")) == 2:
|
632
|
+
dsthost, dstport = dst.split(":")
|
633
|
+
else:
|
634
|
+
# ipv6
|
635
|
+
dsthost, dstport = dst.rsplit(":", maxsplit=1)
|
636
|
+
yield {
|
637
|
+
"msg": " ".join(m[:5] if len(m) == 11 else m[:3]),
|
638
|
+
"srchost": ip_address(srchost),
|
639
|
+
"dsthost": ip_address(dsthost),
|
640
|
+
"dstport": int(dstport),
|
641
|
+
"proto": F5LogSheet._proto[int(m[-1].strip(")"))],
|
642
|
+
}
|
643
|
+
|
644
|
+
@staticmethod
|
645
|
+
def split_ltm_conn_limit_reached(msg):
|
646
|
+
m = msg.split(" ", maxsplit=11)
|
647
|
+
src, dst = m[4], m[6].strip(",")
|
648
|
+
if len(src.split(":")) == 2:
|
649
|
+
srchost, srcport = src.split(":")
|
650
|
+
else:
|
651
|
+
# ipv6
|
652
|
+
srchost, srcport = src.rsplit(".", maxsplit=1)
|
653
|
+
if len(dst.split(":")) == 2:
|
654
|
+
dsthost, dstport = dst.split(":")
|
655
|
+
else:
|
656
|
+
# ipv6
|
657
|
+
dsthost, dstport = dst.rsplit(".", maxsplit=1)
|
658
|
+
yield {
|
659
|
+
"msg": m[-1],
|
660
|
+
"object": m[10].strip(":"),
|
661
|
+
"objtype": m[9].lower(),
|
662
|
+
"srchost": ip_address(srchost),
|
663
|
+
"srcport": int(dstport),
|
664
|
+
"dsthost": ip_address(dsthost),
|
665
|
+
"dstport": int(dstport),
|
666
|
+
"proto": m[8].strip(",").lower(),
|
667
|
+
}
|
668
|
+
|
669
|
+
@staticmethod
|
670
|
+
def split_ltm_syncookie_threshold(msg):
|
671
|
+
# Syncookie threshold 1993 exceeded, virtual = 203.24.253.132:443
|
672
|
+
m = msg.split(" ")
|
673
|
+
dst = m[-1]
|
674
|
+
if len(dst.split(":")) == 2:
|
675
|
+
dsthost, dstport = dst.split(":")
|
676
|
+
else:
|
677
|
+
# ipv6
|
678
|
+
dsthost, dstport = dst.rsplit(".", maxsplit=1)
|
679
|
+
yield {
|
680
|
+
"msg": "Syncookie threshold exceeded",
|
681
|
+
"threshold": int(m[2]),
|
682
|
+
"objtype": m[-3].lower(),
|
683
|
+
"dsthost": ip_address(dsthost),
|
684
|
+
"dstport": int(dstport),
|
685
|
+
}
|
686
|
+
|
687
|
+
@staticmethod
|
688
|
+
def split_ltm_sweeper_active2(msg):
|
689
|
+
m = msg.split(" ")
|
690
|
+
yield {
|
691
|
+
"policy": m[3],
|
692
|
+
"mode": m[4],
|
693
|
+
"object": m[7].strip(")."),
|
694
|
+
"objtype": m[6].strip("("),
|
695
|
+
"msg": " ".join(m[8:]).strip("()"),
|
696
|
+
}
|
697
|
+
|
698
|
+
@staticmethod
|
699
|
+
def split_ltm_sweeper_active3(msg):
|
700
|
+
m = msg.split(" ")
|
701
|
+
yield {
|
702
|
+
"policy": m[3],
|
703
|
+
"object": m[6].strip(")."),
|
704
|
+
"objtype": m[5].strip("("),
|
705
|
+
"msg": " ".join(m[7:]).strip("()"),
|
706
|
+
}
|
707
|
+
|
708
|
+
@staticmethod
|
709
|
+
def split_ltm_dns_failed_xfr_rcode(msg):
|
710
|
+
m = msg.split(" ")
|
711
|
+
yield {
|
712
|
+
"msg": " ".join([m[0], *m[4:]]),
|
713
|
+
"zone": m[3],
|
714
|
+
}
|
715
|
+
|
716
|
+
@staticmethod
|
717
|
+
def split_ltm_dns_failed_rr(msg):
|
718
|
+
m = msg.rsplit(" ", maxsplit=3)
|
719
|
+
yield {
|
720
|
+
"msg": m[0],
|
721
|
+
"zone": m[-1].strip("."),
|
722
|
+
}
|
723
|
+
|
724
|
+
@staticmethod
|
725
|
+
def split_ltm_dns_failed_xfr(msg):
|
726
|
+
m = msg.split(" ")
|
727
|
+
src = m[6].strip(",")
|
728
|
+
yield {
|
729
|
+
"msg": " ".join(m[:3]) + ", " + " ".join(m[-2:]),
|
730
|
+
"srchost": ip_address(src),
|
731
|
+
"zone": m[4],
|
732
|
+
}
|
733
|
+
|
734
|
+
@staticmethod
|
735
|
+
def split_ltm_dns_handling_notify(msg):
|
736
|
+
m = msg.split(" ")
|
737
|
+
yield {
|
738
|
+
"msg": " ".join(m[:2]),
|
739
|
+
"zone": m[4].rstrip("."),
|
740
|
+
}
|
741
|
+
|
742
|
+
@staticmethod
|
743
|
+
def split_ltm_dns_axfr_succeeded_1f(msg):
|
744
|
+
m = msg.split(" ")
|
745
|
+
yield {
|
746
|
+
"msg": " ".join([*m[:3], m[-1]]),
|
747
|
+
"srchost": ip_address(m[6]),
|
748
|
+
"zone": m[4],
|
749
|
+
}
|
750
|
+
|
751
|
+
@staticmethod
|
752
|
+
def split_ltm_dns_axfr_succeeded_2c(msg):
|
753
|
+
m = msg.split(" ")
|
754
|
+
yield {
|
755
|
+
"msg": " ".join([*m[:1], m[-1]]),
|
756
|
+
"srchost": m[10],
|
757
|
+
"zone": m[4],
|
758
|
+
"serial": m[8],
|
759
|
+
}
|
760
|
+
|
761
|
+
@staticmethod
|
762
|
+
def split_ltm_dns_ignoring_tfer(msg):
|
763
|
+
# Ignoring transfer for zone qaautomation-dns.com from 10.17.205.164; transfer not enabled.
|
764
|
+
m = msg.split(" ")
|
765
|
+
yield {
|
766
|
+
"msg": " ".join([*m[:2], *m[-3:]]),
|
767
|
+
"srchost": m[6].strip(";"),
|
768
|
+
"zone": m[4],
|
769
|
+
}
|
770
|
+
|
771
|
+
@staticmethod
|
772
|
+
def split_ltm_http_process_state(msg):
|
773
|
+
m = F5LogSheet.re_ltm_http_process_state.match(msg)
|
774
|
+
if not m:
|
775
|
+
return
|
776
|
+
m = m.groupdict()
|
777
|
+
src, vsdst, backendsrc, dst = (
|
778
|
+
m.get("src"),
|
779
|
+
m.get("vsdst"),
|
780
|
+
m.get("poolsrc"),
|
781
|
+
m.get("dst"),
|
782
|
+
)
|
783
|
+
if src:
|
784
|
+
if len(src.split(":")) == 2:
|
785
|
+
srchost, srcport = src.split(":")
|
786
|
+
else:
|
787
|
+
# ipv6
|
788
|
+
srchost, srcport = src.rsplit(".", maxsplit=1)
|
789
|
+
else:
|
790
|
+
srchost, srcport = None, None
|
791
|
+
if dst:
|
792
|
+
if len(dst.split(":")) == 2:
|
793
|
+
dsthost, dstport = dst.split(":")
|
794
|
+
else:
|
795
|
+
# ipv6
|
796
|
+
dsthost, dstport = dst.rsplit(".", maxsplit=1)
|
797
|
+
else:
|
798
|
+
dsthost, dstport = None, None
|
799
|
+
if vsdst:
|
800
|
+
if len(vsdst.split(":")) == 2:
|
801
|
+
vsdsthost, vsdstport = vsdst.split(":")
|
802
|
+
else:
|
803
|
+
# ipv6
|
804
|
+
vsdsthost, vsdstport = vsdst.rsplit(".", maxsplit=1)
|
805
|
+
else:
|
806
|
+
vsdsthost, vsdstport = None, None
|
807
|
+
if backendsrc:
|
808
|
+
if len(backendsrc.split(":")) == 2:
|
809
|
+
backendsrchost, backendsrcport = backendsrc.split(":")
|
810
|
+
else:
|
811
|
+
# ipv6
|
812
|
+
backendsrchost, backendsrcport = backendsrc.rsplit(".", maxsplit=1)
|
813
|
+
else:
|
814
|
+
backendsrchost, backendsrcport = None, None
|
815
|
+
yield {
|
816
|
+
"object": m.get("vs"),
|
817
|
+
"msg": m.get("msg"),
|
818
|
+
"srchost": ip_address(srchost) if srchost else None,
|
819
|
+
"srcport": int(srcport) if srcport else None,
|
820
|
+
"dsthost": ip_address(dsthost) if dsthost else None,
|
821
|
+
"dstport": int(dstport) if dstport else None,
|
822
|
+
"pool": m.get("pool"),
|
823
|
+
"profile": m.get("profile"),
|
824
|
+
"httpstate": m.get("httpstate"),
|
825
|
+
"actionid": hexint(m.get("actionid")),
|
826
|
+
"sidea": m.get("sidea"),
|
827
|
+
"vsdsthost": ip_address(vsdsthost) if vsdsthost else None,
|
828
|
+
"vsdstport": int(vsdstport) if vsdstport else None,
|
829
|
+
"backendsrchost": ip_address(backendsrchost) if backendsrchost else None,
|
830
|
+
"backendsrcport": int(backendsrcport) if backendsrcport else None,
|
831
|
+
"sideb": m.get("sideb"),
|
832
|
+
"sideasrc": ip_address(m.get("sideasrc")) if m.get("sideasrc") else None,
|
833
|
+
}
|
834
|
+
|
835
|
+
@staticmethod
|
836
|
+
def split_ltm_http_header_exceeded(msg):
|
837
|
+
m = F5LogSheet.re_ltm_http_header_exceeded.match(msg)
|
838
|
+
if not m:
|
839
|
+
return
|
840
|
+
m = m.groupdict()
|
841
|
+
host = m.get("sidehost")
|
842
|
+
yield {
|
843
|
+
"object": m.get("object"),
|
844
|
+
"msg": m.get("msg"),
|
845
|
+
"srchost": ip_address(host)
|
846
|
+
if host and m.get("sideip") == "client"
|
847
|
+
else None,
|
848
|
+
"dsthost": ip_address(host)
|
849
|
+
if host and m.get("sideip") == "server"
|
850
|
+
else None,
|
851
|
+
"pool": m.get("pool"),
|
852
|
+
"profile": m.get("profile"),
|
853
|
+
"size": int(m.get("size")) if m.get("size") else None,
|
854
|
+
"limit": int(m.get("limit")) if m.get("limit") else None,
|
855
|
+
}
|
856
|
+
|
857
|
+
@staticmethod
|
858
|
+
def split_gtm_monitor(msg):
|
859
|
+
m = F5LogSheet.re_gtm_monitor.match(msg)
|
860
|
+
if m is None:
|
861
|
+
return
|
862
|
+
m = m.groupdict()
|
863
|
+
dst = m.get("ipport")
|
864
|
+
if dst:
|
865
|
+
if len(dst.split(".")) == 4:
|
866
|
+
# ipv4
|
867
|
+
if ":" in dst:
|
868
|
+
dsthost, dstport = dst.split(":")
|
869
|
+
else:
|
870
|
+
dsthost, dstport = dst, None
|
871
|
+
else:
|
872
|
+
# ipv6
|
873
|
+
if "." in dst:
|
874
|
+
dsthost, dstport = dst.rsplit(".", maxsplit=1)
|
875
|
+
else:
|
876
|
+
dsthost, dstport = dst, None
|
877
|
+
else:
|
878
|
+
dsthost, dstport = None, None
|
879
|
+
yield {
|
880
|
+
"objtype": m.get("objtype").lower() if m.get("objtype") else None,
|
881
|
+
"object": m.get("object"),
|
882
|
+
"pool_member": m.get("pool_member"),
|
883
|
+
"monitor_object": m.get("monitor_object"),
|
884
|
+
"dsthost": ip_address(dsthost) if dsthost else None,
|
885
|
+
"dstport": int(dstport) if dstport else None,
|
886
|
+
"server": m.get("server"),
|
887
|
+
"new_status": m.get("new_status").lower() if m.get("new_status") else None,
|
888
|
+
"prev_status": m.get("prev_status").lower()
|
889
|
+
if m.get("prev_status")
|
890
|
+
else None,
|
891
|
+
"msg": m.get("msg"),
|
892
|
+
"type": m.get("type").lower() if m.get("type") in m else None,
|
893
|
+
"state": m.get("state"),
|
894
|
+
}
|
895
|
+
|
896
|
+
@staticmethod
|
897
|
+
def split_gtm_monitor_instance(msg):
|
898
|
+
m = F5LogSheet.re_gtm_monitor_instance.match(msg)
|
899
|
+
if m is None:
|
900
|
+
return
|
901
|
+
m = m.groupdict()
|
902
|
+
if m.get("monip"):
|
903
|
+
if len(m.get("monip").split(":")) == 2:
|
904
|
+
# ipv4
|
905
|
+
dsthost, dstport = m.get("monip").split(":")
|
906
|
+
else:
|
907
|
+
dsthost, dstport = m.get("monip").rsplit(":", maxsplit=1)
|
908
|
+
else:
|
909
|
+
dsthost, dstport = None, None
|
910
|
+
yield {
|
911
|
+
"object": m.get("object"),
|
912
|
+
"objtype": "monitor",
|
913
|
+
"dsthost": ip_address(dsthost) if dsthost else None,
|
914
|
+
"dstport": int(dstport) if dstport else None,
|
915
|
+
"new_status": m.get("newstatus", "").lower(),
|
916
|
+
"prev_status": m.get("prevstatus", "").lower(),
|
917
|
+
"src_gtm": m.get("srcgtm"),
|
918
|
+
"state": m.get("state").lower(),
|
919
|
+
}
|
920
|
+
|
921
|
+
@staticmethod
|
922
|
+
def split_gtm_syncgroup_change(msg):
|
923
|
+
m = msg.split(" ")
|
924
|
+
yield {
|
925
|
+
"object": m[3],
|
926
|
+
"srchost": ip_address(m[4].strip("()")),
|
927
|
+
"syncgroup": m[-1],
|
928
|
+
"msg": f"BIG-IP GTM {m[5]} sync group",
|
929
|
+
}
|
930
|
+
|
931
|
+
@staticmethod
|
932
|
+
def split_gtm_changed_state(msg):
|
933
|
+
m = msg.split(" ")
|
934
|
+
yield {
|
935
|
+
"msg": f"{m[0]} changed state",
|
936
|
+
"new_status": m[6].lower().strip("."),
|
937
|
+
"prev_status": m[4].lower(),
|
938
|
+
}
|
939
|
+
|
940
|
+
@staticmethod
|
941
|
+
def split_tmm_address_conflict(msg):
|
942
|
+
m = msg.split(" ")
|
943
|
+
dsthost = m[4]
|
944
|
+
yield {
|
945
|
+
"object": " ".join(m[7:]),
|
946
|
+
"objtype": "address",
|
947
|
+
"dsthost": ip_address(dsthost),
|
948
|
+
"dstmac": m[5].strip("()"),
|
949
|
+
}
|
950
|
+
|
951
|
+
splitters = {
|
952
|
+
0x01010028: split_ltm_pool_has_no_avail_mem.__func__,
|
953
|
+
0x01010038: split_ltm_syncookie_threshold.__func__,
|
954
|
+
0x01010201: split_ltm_inet_port_exhaust.__func__,
|
955
|
+
0x01010281: split_ltm_inet_port_exhaust.__func__,
|
956
|
+
0x01010221: split_ltm_pool_has_avail_mem.__func__,
|
957
|
+
0x01070151: split_ltm_rule_missing_datagroup.__func__,
|
958
|
+
0x01070417: split_audit_mcpd_mcp_error.__func__,
|
959
|
+
0x01070639: split_ltm_poolnode_mon_abled.__func__,
|
960
|
+
0x01070641: split_ltm_poolnode_mon_abled.__func__,
|
961
|
+
0x01070807: split_ltm_poolnode_mon_abled.__func__,
|
962
|
+
0x01070808: split_ltm_poolnode_mon_abled.__func__,
|
963
|
+
0x01070727: split_ltm_pool_mon_status.__func__,
|
964
|
+
0x01070728: split_ltm_pool_mon_status.__func__,
|
965
|
+
0x01070638: split_ltm_pool_mon_status.__func__,
|
966
|
+
0x01070640: split_ltm_pool_mon_status.__func__,
|
967
|
+
0x01071681: split_ltm_virtual_status.__func__,
|
968
|
+
0x01071682: split_ltm_virtual_status.__func__,
|
969
|
+
0x01071912: split_ltm_virtual_address_status_or_irule_profile_err.__func__,
|
970
|
+
0x01071913: split_ltm_virtual_address_status_or_irule_profile_err.__func__,
|
971
|
+
0x010719E7: split_ltm_virtual_address_status_or_irule_profile_err.__func__,
|
972
|
+
0x010719E8: split_ltm_virtual_address_status_or_irule_profile_err.__func__,
|
973
|
+
0x010719EA: split_gtm_changed_state.__func__,
|
974
|
+
0x01071BA9: split_ltm_virtual_status.__func__,
|
975
|
+
0x01190004: split_tmm_address_conflict.__func__,
|
976
|
+
0x011A1004: split_gtm_monitor.__func__,
|
977
|
+
0x011A1005: split_gtm_monitor.__func__,
|
978
|
+
0x011A1101: split_gtm_monitor.__func__,
|
979
|
+
0x011A1102: split_gtm_monitor.__func__,
|
980
|
+
0x011A3003: split_gtm_monitor.__func__,
|
981
|
+
0x011A3004: split_gtm_monitor.__func__,
|
982
|
+
0x011A4002: split_gtm_monitor.__func__,
|
983
|
+
0x011A4003: split_gtm_monitor.__func__,
|
984
|
+
0x011A4004: split_gtm_monitor.__func__,
|
985
|
+
0x011A4101: split_gtm_monitor.__func__,
|
986
|
+
0x011A4102: split_gtm_monitor.__func__,
|
987
|
+
0x011A5003: split_gtm_monitor.__func__,
|
988
|
+
0x011A5004: split_gtm_monitor.__func__,
|
989
|
+
0x011A5008: split_gtm_syncgroup_change.__func__,
|
990
|
+
0x011A5009: split_gtm_syncgroup_change.__func__,
|
991
|
+
0x011A500B: split_gtm_monitor.__func__,
|
992
|
+
0x011A500C: split_gtm_monitor.__func__,
|
993
|
+
0x011A6005: split_gtm_monitor.__func__,
|
994
|
+
0x011A6006: split_gtm_monitor.__func__,
|
995
|
+
0x011AB003: split_gtm_monitor.__func__,
|
996
|
+
0x011AB004: split_gtm_monitor.__func__,
|
997
|
+
0x011AE0F2: split_gtm_monitor_instance.__func__,
|
998
|
+
# 0x01220000: split_ltm_rule.__func__,
|
999
|
+
0x011E0002: split_ltm_sweeper_active2.__func__,
|
1000
|
+
0x011E0003: split_ltm_sweeper_active3.__func__,
|
1001
|
+
0x011F0005: split_ltm_http_header_exceeded.__func__,
|
1002
|
+
0x011F0011: split_ltm_http_header_exceeded.__func__,
|
1003
|
+
0x011F0007: split_ltm_http_process_state.__func__,
|
1004
|
+
0x011F0016: split_ltm_http_process_state.__func__,
|
1005
|
+
0x01220001: split_ltm_rule.__func__,
|
1006
|
+
0x01220002: split_ltm_rule.__func__,
|
1007
|
+
# 0x01220003: split_ltm_rule.__func__,
|
1008
|
+
# 0x01220004: split_ltm_rule.__func__,
|
1009
|
+
# 0x01220005: split_ltm_rule.__func__,
|
1010
|
+
0x01220007: split_ltm_rule.__func__,
|
1011
|
+
0x01220008: split_ltm_rule.__func__,
|
1012
|
+
0x01220009: split_ltm_rule.__func__,
|
1013
|
+
0x01220010: split_ltm_rule.__func__,
|
1014
|
+
0x01220011: split_ltm_rule.__func__,
|
1015
|
+
0x01200012: split_ltm_conn_limit_reached.__func__,
|
1016
|
+
0x01200014: split_ltm_conn_limit_reached.__func__,
|
1017
|
+
0x01230140: split_ltm_rst_reason.__func__,
|
1018
|
+
0x01260013: split_ltm_ssl_handshake_fail.__func__,
|
1019
|
+
0x01260026: split_ltm_shared_ciphers.__func__,
|
1020
|
+
0x01260008: split_ltm_connection_error.__func__,
|
1021
|
+
0x01260009: split_ltm_connection_error.__func__,
|
1022
|
+
0x01420002: split_audit_bigip_tmsh_audit.__func__,
|
1023
|
+
0x01420007: split_ltm_cert_expiry.__func__,
|
1024
|
+
0x01420008: split_ltm_cert_expiry.__func__,
|
1025
|
+
0x014F0005: split_audit_scriptd_run_script.__func__,
|
1026
|
+
0x0153100E: split_ltm_dns_failed_xfr_rcode.__func__,
|
1027
|
+
0x01531015: split_ltm_dns_failed_rr.__func__,
|
1028
|
+
0x01531018: split_ltm_dns_failed_xfr.__func__,
|
1029
|
+
0x0153101C: split_ltm_dns_handling_notify.__func__,
|
1030
|
+
0x0153101F: split_ltm_dns_axfr_succeeded_1f.__func__,
|
1031
|
+
0x01531022: split_ltm_dns_ignoring_tfer.__func__,
|
1032
|
+
0x0153102C: split_ltm_dns_axfr_succeeded_2c.__func__,
|
1033
|
+
}
|
1034
|
+
|
1035
|
+
# these logs can have IDs we care about splitting but would be errors
|
1036
|
+
# the match is starts_with because of course some logs have extra dynamic junk
|
1037
|
+
no_split_logs = (
|
1038
|
+
"Per-invocation log rate exceeded; throttling",
|
1039
|
+
"Resuming log processing at this invocation",
|
1040
|
+
"Re-enabling general logging;",
|
1041
|
+
"Cumulative log rate exceeded! Throttling all non-debug logs.",
|
1042
|
+
)
|
1043
|
+
|
1044
|
+
extra_cols = {
|
1045
|
+
"rawmsg",
|
1046
|
+
"timestamp",
|
1047
|
+
"host",
|
1048
|
+
"level",
|
1049
|
+
"process",
|
1050
|
+
"proc_pid",
|
1051
|
+
"logid1",
|
1052
|
+
"logid2",
|
1053
|
+
"message",
|
1054
|
+
"object",
|
1055
|
+
}
|
1056
|
+
|
1057
|
+
# precedence, coloropt, func
|
1058
|
+
colorizers = [
|
1059
|
+
CellColorizer(100, None, colorizeMonitors),
|
1060
|
+
RowColorizer(101, None, colorizeRows),
|
1061
|
+
]
|
1062
|
+
|
1063
|
+
def __init__(self, *args, **kwargs):
|
1064
|
+
super().__init__(*args, **kwargs)
|
1065
|
+
# the default F5 logs don't have the year so we have to guess from the file ctime
|
1066
|
+
# TODO: make this overridable
|
1067
|
+
try:
|
1068
|
+
import zoneinfo
|
1069
|
+
except ImportError:
|
1070
|
+
from backports import zoneinfo
|
1071
|
+
self._log_tz = zoneinfo.ZoneInfo("UTC")
|
1072
|
+
try:
|
1073
|
+
self._year = int(
|
1074
|
+
vd.options.get(
|
1075
|
+
"f5log_log_year",
|
1076
|
+
datetime.utcfromtimestamp(self.source.stat().st_ctime).year,
|
1077
|
+
)
|
1078
|
+
)
|
1079
|
+
except (AttributeError, ValueError, TypeError):
|
1080
|
+
self._year = datetime.now().year
|
1081
|
+
|
1082
|
+
def iterload(self):
|
1083
|
+
self.rows = [] # rowdef: [F5LogRow]
|
1084
|
+
|
1085
|
+
if vd.options.get("f5log_object_regex"):
|
1086
|
+
try:
|
1087
|
+
object_regex = re.compile(vd.options.get("f5log_object_regex"))
|
1088
|
+
except re.error as exc:
|
1089
|
+
# TODO: make this error into the errors sheet
|
1090
|
+
object_regex = None
|
1091
|
+
else:
|
1092
|
+
object_regex = None
|
1093
|
+
|
1094
|
+
try:
|
1095
|
+
self._log_tz = zoneinfo.ZoneInfo(
|
1096
|
+
vd.options.get("f5log_log_timzeone", "UTC")
|
1097
|
+
)
|
1098
|
+
except zoneinfo.ZoneInfoNotFoundError as exc:
|
1099
|
+
# TODO: make this error go into the errors sheet
|
1100
|
+
self._log_tz = zoneinfo.ZoneInfo("UTC")
|
1101
|
+
|
1102
|
+
for line in self.source:
|
1103
|
+
m = F5LogSheet.re_f5log.match(line)
|
1104
|
+
if m:
|
1105
|
+
m = m.groupdict()
|
1106
|
+
else:
|
1107
|
+
# TODO: somehow make this use an error sheet
|
1108
|
+
yield F5LogSheet.F5LogRow(
|
1109
|
+
rawmsg=line, kv={"PARSE_ERROR": "unable to parse line"}
|
1110
|
+
)
|
1111
|
+
continue
|
1112
|
+
kv = {
|
1113
|
+
"message": m.get("message"),
|
1114
|
+
}
|
1115
|
+
if m.get("date1"):
|
1116
|
+
#
|
1117
|
+
_t = m.get("date1")
|
1118
|
+
# strptime is quite slow so we need to manually extract the time on the hot path
|
1119
|
+
try:
|
1120
|
+
timestamp = datetime(
|
1121
|
+
year=self._year,
|
1122
|
+
month=self._months[_t[:3]],
|
1123
|
+
day=int(_t[4:6]),
|
1124
|
+
hour=int(_t[7:9]),
|
1125
|
+
minute=int(_t[10:12]),
|
1126
|
+
second=int(_t[13:15]),
|
1127
|
+
tzinfo=self._log_tz,
|
1128
|
+
)
|
1129
|
+
except ValueError as exc:
|
1130
|
+
yield F5LogSheet.F5LogRow(
|
1131
|
+
rawmsg=line,
|
1132
|
+
PARSE_ERROR="\n".join(
|
1133
|
+
traceback.format_exception(
|
1134
|
+
etype=type(exc), value=exc, tb=exc.__traceback__
|
1135
|
+
),
|
1136
|
+
),
|
1137
|
+
)
|
1138
|
+
elif m.get("date2"):
|
1139
|
+
timestamp = datetime.strptime(m.get("date2"), "%Y-%m-%dT%H:%M:%S%z")
|
1140
|
+
elif m.get("date3"):
|
1141
|
+
# whoever designed tmsh show sys log needs to have a good hard think about themselves
|
1142
|
+
timestamp = datetime.strptime(
|
1143
|
+
f'{self._year}-{m.get("date3")}', "%Y-%m-%d %H:%M:%S"
|
1144
|
+
)
|
1145
|
+
timestamp = datetime(
|
1146
|
+
year=timestamp.year,
|
1147
|
+
month=timestamp.month,
|
1148
|
+
day=timestamp.day,
|
1149
|
+
hour=timestamp.hour,
|
1150
|
+
minute=timestamp.minute,
|
1151
|
+
second=timestamp.second,
|
1152
|
+
tzinfo=self._log_tz,
|
1153
|
+
)
|
1154
|
+
# because this is madness
|
1155
|
+
m["level"], m["host"] = m.get("host"), m.get("level")
|
1156
|
+
else:
|
1157
|
+
timestamp = None
|
1158
|
+
|
1159
|
+
logid1 = int(m.get("logid1"), base=16) if m.get("logid1") else None
|
1160
|
+
if logid1 in self.splitters and not any(
|
1161
|
+
m.get("message", "").startswith(_) for _ in F5LogSheet.no_split_logs
|
1162
|
+
):
|
1163
|
+
try:
|
1164
|
+
for entry in F5LogSheet.splitters[logid1](m.get("message")):
|
1165
|
+
kv.update(entry)
|
1166
|
+
except (IndexError, ValueError) as exc:
|
1167
|
+
# TODO: somehow make this use an error sheet
|
1168
|
+
yield F5LogSheet.F5LogRow(
|
1169
|
+
rawmsg=line,
|
1170
|
+
PARSE_ERROR="\n".join(
|
1171
|
+
traceback.format_exception(
|
1172
|
+
etype=type(exc), value=exc, tb=exc.__traceback__
|
1173
|
+
)
|
1174
|
+
),
|
1175
|
+
)
|
1176
|
+
if "object" in kv and object_regex:
|
1177
|
+
om = object_regex.match(kv.get("object", ""))
|
1178
|
+
if om:
|
1179
|
+
kv.update(om.groupdict())
|
1180
|
+
for k, v in kv.items():
|
1181
|
+
if k not in self.extra_cols:
|
1182
|
+
F5LogSheet.addColumn(self, AttrColumn(k))
|
1183
|
+
self.extra_cols.add(k)
|
1184
|
+
elif logid1 is None and m.get("message").startswith("Rule "):
|
1185
|
+
for entry in self.split_ltm_rule(m.get("message")):
|
1186
|
+
kv.update(entry)
|
1187
|
+
yield F5LogSheet.F5LogRow(
|
1188
|
+
# rawmsg=line,
|
1189
|
+
timestamp=timestamp,
|
1190
|
+
host=m.get("host"),
|
1191
|
+
level=m.get("level"),
|
1192
|
+
process=m.get("process"),
|
1193
|
+
proc_pid=int(m.get("pid")) if m.get("pid") is not None else None,
|
1194
|
+
logid1=m.get("logid1") if m.get("logid1") is not None else None,
|
1195
|
+
logid2=m.get("logid2") if m.get("logid2") is not None else None,
|
1196
|
+
**kv,
|
1197
|
+
)
|
1198
|
+
|
1199
|
+
|
1200
|
+
@VisiData.api
|
1201
|
+
def open_f5log(vd: VisiData, p: Path) -> Sheet:
|
1202
|
+
sheet = F5LogSheet(p.base_stem, source=p)
|
1203
|
+
sheet.options["disp_date_fmt"] = "%Y-%m-%d %H:%M:%S"
|
1204
|
+
return sheet
|