LogLead 1.0.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. loglead-1.0.0/LICENSE +21 -0
  2. loglead-1.0.0/LogLead.egg-info/PKG-INFO +134 -0
  3. loglead-1.0.0/LogLead.egg-info/SOURCES.txt +57 -0
  4. loglead-1.0.0/LogLead.egg-info/dependency_links.txt +1 -0
  5. loglead-1.0.0/LogLead.egg-info/requires.txt +20 -0
  6. loglead-1.0.0/LogLead.egg-info/top_level.txt +1 -0
  7. loglead-1.0.0/MANIFEST.in +1 -0
  8. loglead-1.0.0/PKG-INFO +134 -0
  9. loglead-1.0.0/PYPI_README.md +70 -0
  10. loglead-1.0.0/README.md +75 -0
  11. loglead-1.0.0/loglead/OOV_detector.py +55 -0
  12. loglead-1.0.0/loglead/RarityModel.py +70 -0
  13. loglead-1.0.0/loglead/__init__.py +6 -0
  14. loglead-1.0.0/loglead/anomaly_detection.py +497 -0
  15. loglead-1.0.0/loglead/enhancers/__init__.py +4 -0
  16. loglead-1.0.0/loglead/enhancers/eventlog.py +425 -0
  17. loglead-1.0.0/loglead/enhancers/sequence.py +132 -0
  18. loglead-1.0.0/loglead/explainer.py +370 -0
  19. loglead-1.0.0/loglead/loaders/__init__.py +14 -0
  20. loglead-1.0.0/loglead/loaders/adfa.py +68 -0
  21. loglead-1.0.0/loglead/loaders/awsctd.py +74 -0
  22. loglead-1.0.0/loglead/loaders/base.py +128 -0
  23. loglead-1.0.0/loglead/loaders/bgl.py +20 -0
  24. loglead-1.0.0/loglead/loaders/gelf.py +28 -0
  25. loglead-1.0.0/loglead/loaders/hadoop.py +145 -0
  26. loglead-1.0.0/loglead/loaders/hdfs.py +39 -0
  27. loglead-1.0.0/loglead/loaders/nezha.py +545 -0
  28. loglead-1.0.0/loglead/loaders/pro.py +48 -0
  29. loglead-1.0.0/loglead/loaders/raw.py +52 -0
  30. loglead-1.0.0/loglead/loaders/supercomputers.py +45 -0
  31. loglead-1.0.0/loglead/next_event_prediction.py +146 -0
  32. loglead-1.0.0/loglead/parsers/AEL/AEL.py +234 -0
  33. loglead-1.0.0/loglead/parsers/AEL/LICENSE +202 -0
  34. loglead-1.0.0/loglead/parsers/AEL/README.md +70 -0
  35. loglead-1.0.0/loglead/parsers/Brain/Brain.py +409 -0
  36. loglead-1.0.0/loglead/parsers/Brain/LICENSE +203 -0
  37. loglead-1.0.0/loglead/parsers/Brain/README.md +77 -0
  38. loglead-1.0.0/loglead/parsers/README.md +22 -0
  39. loglead-1.0.0/loglead/parsers/__init__.py +15 -0
  40. loglead-1.0.0/loglead/parsers/bert/README.md +15 -0
  41. loglead-1.0.0/loglead/parsers/bert/bertembedding.py +82 -0
  42. loglead-1.0.0/loglead/parsers/drain3/LICENSE +22 -0
  43. loglead-1.0.0/loglead/parsers/drain3/README.md +3 -0
  44. loglead-1.0.0/loglead/parsers/drain3/drain.py +18 -0
  45. loglead-1.0.0/loglead/parsers/drain3/drain3.ini +30 -0
  46. loglead-1.0.0/loglead/parsers/drain3/drain3_no_masking.ini +30 -0
  47. loglead-1.0.0/loglead/parsers/iplom/IPLoM.py +690 -0
  48. loglead-1.0.0/loglead/parsers/iplom/LICENSE +202 -0
  49. loglead-1.0.0/loglead/parsers/iplom/README.md +69 -0
  50. loglead-1.0.0/loglead/parsers/lenma/LICENSE +25 -0
  51. loglead-1.0.0/loglead/parsers/lenma/README.md +14 -0
  52. loglead-1.0.0/loglead/parsers/lenma/lenma.py +319 -0
  53. loglead-1.0.0/loglead/parsers/pl_iplom/README.md +39 -0
  54. loglead-1.0.0/loglead/parsers/pl_iplom/pl_iplom.py +508 -0
  55. loglead-1.0.0/loglead/parsers/pyspell/LICENSE +29 -0
  56. loglead-1.0.0/loglead/parsers/pyspell/README.md +16 -0
  57. loglead-1.0.0/loglead/parsers/pyspell/spell.py +224 -0
  58. loglead-1.0.0/pyproject.toml +65 -0
  59. loglead-1.0.0/setup.cfg +4 -0
loglead-1.0.0/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2023 mmantyla
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,134 @@
1
+ Metadata-Version: 2.1
2
+ Name: LogLead
3
+ Version: 1.0.0
4
+ Summary: LogLead stands for Log Loader, Enhancer, and Anomaly Detector
5
+ Author-email: Mika Mäntylä <mika.mantyla@helsinki.fi>, Jesse Nyyssölä <jesse.nyyssola@helsinki.fi>, Yuqing Wang <yuqing.wang@helsinki.fi>, Alexander Bakhtin <alexander.bakhtin@protonmail.com>
6
+ Maintainer-email: Alexander Bakhtin <alexander.bakhtin@protonmail.com>
7
+ License: MIT License
8
+
9
+ Copyright (c) 2023 mmantyla
10
+
11
+ Permission is hereby granted, free of charge, to any person obtaining a copy
12
+ of this software and associated documentation files (the "Software"), to deal
13
+ in the Software without restriction, including without limitation the rights
14
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
15
+ copies of the Software, and to permit persons to whom the Software is
16
+ furnished to do so, subject to the following conditions:
17
+
18
+ The above copyright notice and this permission notice shall be included in all
19
+ copies or substantial portions of the Software.
20
+
21
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
24
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
26
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
27
+ SOFTWARE.
28
+
29
+ Project-URL: Homepage, https://github.com/EvoTestOps/LogLead
30
+ Keywords: logs,anomaly detection,log parsing
31
+ Classifier: Development Status :: 4 - Beta
32
+ Classifier: Intended Audience :: Education
33
+ Classifier: Intended Audience :: Science/Research
34
+ Classifier: Intended Audience :: Information Technology
35
+ Classifier: License :: OSI Approved :: MIT License
36
+ Classifier: Programming Language :: Python :: 3
37
+ Classifier: Programming Language :: Python :: 3.9
38
+ Classifier: Topic :: Scientific/Engineering :: Mathematics
39
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
40
+ Classifier: Topic :: System :: Logging
41
+ Requires-Python: >=3.9
42
+ Description-Content-Type: text/markdown
43
+ License-File: LICENSE
44
+ Requires-Dist: polars[numpy,pandas,pyarrow]>=1.5
45
+ Requires-Dist: scipy>=1.10.1
46
+ Requires-Dist: regex>=2023.10.3
47
+ Requires-Dist: drain3>=0.9.11
48
+ Requires-Dist: tipping>=0.1.3
49
+ Requires-Dist: scikit-learn>=1.2.2
50
+ Requires-Dist: xgboost>=1.7.3
51
+ Requires-Dist: python-dotenv>=1.0.1
52
+ Requires-Dist: pyyaml
53
+ Requires-Dist: psutil
54
+ Requires-Dist: jinja2
55
+ Requires-Dist: matplotlib
56
+ Requires-Dist: requests
57
+ Requires-Dist: tqdm
58
+ Requires-Dist: GitPython
59
+ Requires-Dist: py7zr
60
+ Requires-Dist: shap>=0.42.1
61
+ Requires-Dist: umap-learn>=0.5.6
62
+ Requires-Dist: plotly>=5.19.0
63
+ Requires-Dist: nbformat>=4.2.0
64
+
65
+ # LogLead
66
+ LogLead is designed to efficiently benchmark log anomaly detection algorithms and log representations.
67
+
68
+ Currently, it features nearly 1,000 unique anomaly detection combinations, encompassing 8 public datasets, 11 log representations (enhancers), and 11 classifiers. These resources enable you to benchmark your own data, log representation, or classifier against a diverse range of scenarios. LogLead is an actively evolving project, and we are continually adding new datasets, representations, and classifiers. If there's something you believe should be included, please submit a request for a dataset, enhancer, or classifier in the [issue tracker](https://github.com/EvoTestOps/LogLead/issues).
69
+
70
+ A key strength of LogLead is its custom loader system, which efficiently isolates the unique aspects of logs from different systems. This design allows for a reduction in redundant code, as the same enhancement and anomaly detection code can be applied universally once the logs are loaded.
71
+
72
+ ## Installing LogLead
73
+
74
+ Simply install with `pip`:
75
+
76
+ ```
77
+ python -m pip install loglead
78
+ ```
79
+
80
+ NOTE: pip version does not have the `tensorflow` dependencies necessary for `BertEmbeddings`.
81
+ Install them manually (preferably in a conda enviroment).
82
+
83
+ ### Known issues
84
+
85
+ - If `scikit-learn` wheel fails to compile, check that you can `gcc` and `g++` installed.
86
+
87
+ ## Demos
88
+ In the following demonstrations, you'll notice a significant aspect of LogLead's design efficiency: code reusability. Both demos, while analyzing different datasets, share a substantial amount of their underlying code. This not only showcases LogLead's versatility in handling various log formats but also its ability to streamline the analysis process through reusable code components.
89
+
90
+ ### Thunderbird Supercomputer Log Demo
91
+ - **Script**: [TB_samples.py](https://github.com/EvoTestOps/LogLead/blob/main/demo/TB_samples.py)
92
+ - **Description**: This demo presents a Thunderbird supercomputer log, labeled at the line (event) level. A first column marked with “-” indicates normal behavior, while other markings represent anomalies.
93
+ - **Log Snapshot**: View the log [here](https://github.com/logpai/loghub/blob/master/Thunderbird/Thunderbird_2k.log_structured.csv).
94
+ - **Dataset**: The demo includes a parquet file containing a subset of 263,408 log events, with 21,955 anomalies.
95
+ - **Screencast**: For an overview of the demo, watch our [5-minute screencast on YouTube](https://www.youtube.com/watch?v=8stdbtTfJVo).
96
+ ### Hadoop Distributed File System (HDFS) Log Demo
97
+
98
+ - **Script**: [HDFS_samples.py](https://github.com/EvoTestOps/LogLead/blob/main/demo/HDFS_samples.py)
99
+ - **Description**: This demo showcases logs from the Hadoop Distributed File System (HDFS), labeled at the sequence level (a sequence is a collection of multiple log events).
100
+ - **Log Snapshot**: View the log [here](https://github.com/logpai/loghub/blob/master/HDFS/HDFS_2k.log_structured.csv).
101
+ - **Anomaly Labels**: Provided in a separate file.
102
+ - **Dataset**: The demo includes a parquet file containing a subset of 222,579 log events, forming 11,501 sequences with 350 anomalies.
103
+
104
+ ## Example of Anomaly Detection results
105
+ Below you can see anomaly detection results (F1-Binary) trained on 0.5% subset of HDFS data.
106
+ We use 5 different log message enhancement strategies: [Words](https://en.wikipedia.org/wiki/Bag-of-words_model), [Drain](https://github.com/logpai/Drain3), [LenMa](https://github.com/keiichishima/templateminer), [Spell](https://github.com/logpai/logparser/tree/main/logparser/Spell), and [BERT](https://github.com/google-research/bert)
107
+
108
+ The enhancement strategies are tested with 5 different machine learning algorithms: DT (Decision Tree), SVM (Support Vector Machine), LR (Logistic Regression), RF (Random Forest), and XGB (eXtreme Gradient Boosting).
109
+
110
+ | | Words | Drain | Lenma | Spell | Bert | Average |
111
+ |---------|--------|--------|--------|--------|--------|---------|
112
+ | DT | 0.9719 | 0.9816 | 0.9803 | 0.9828 | 0.9301 | 0.9693 |
113
+ | SVM | 0.9568 | 0.9591 | 0.9605 | 0.9559 | 0.8569 | 0.9378 |
114
+ | LR | 0.9476 | 0.8879 | 0.8900 | 0.9233 | 0.5841 | 0.8466 |
115
+ | RF | 0.9717 | 0.9749 | 0.9668 | 0.9809 | 0.9382 | 0.9665 |
116
+ | XGB | 0.9721 | 0.9482 | 0.9492 | 0.9535 | 0.9408 | 0.9528 |
117
+ |---------|--------|--------|--------|--------|--------|---------|
118
+ | Average | 0.9640 | 0.9503 | 0.9494 | 0.9593 | 0.8500 | |
119
+
120
+ ## Functional overview
121
+ LogLead is composed of distinct modules: the Loader, Enhancer, and Anomaly Detector. We use [Polars](https://www.pola.rs/) dataframes as its notably faster than Pandas.
122
+
123
+ **Loader:** This module reads in the log files and deals with the specifics features of each log file. It produces a dataframe with certain semi-mandatory fields. These fields enable actions in the subsequent stages. LogLead has loaders to the following public datasets from 10 different systems:
124
+ * 3: [HDFS_v1](https://github.com/logpai/loghub/tree/master/HDFS#hdfs_v1), [Hadoop](https://github.com/logpai/loghub/tree/master/Hadoop), [BGL](https://github.com/logpai/loghub/tree/master/BGL) thanks to amazing [LogHub team](https://github.com/logpai/loghub). For full data see [Zenodo](https://zenodo.org/records/3227177).
125
+ * 3: [Sprit, Thunderbird and Liberty](https://www.usenix.org/cfdr-data#hpc4) can be found from Usenix site.
126
+ * 2: [Nezha](https://github.com/IntelligentDDS/Nezha) has data from two systems [TrainTicket](https://github.com/FudanSELab/train-ticket) and [Google Cloud Webshop demo](https://github.com/GoogleCloudPlatform/microservices-demo). It is the first dataset of microservice-based systems. Like other traditional log datasets it has Log data but additionally there are Traces and Metrics.
127
+ * 2: [ADFA](https://github.com/verazuo/a-labelled-version-of-the-ADFA-LD-dataset) and [AWSCTD](https://github.com/DjPasco/AWSCTD) are two datasets designed for intrusion detection.
128
+
129
+ **Enhancer:** This module extracts additional data from logs. The enhancement takes place directly within the dataframes, where new columns are added as a result of the enhancement process. For example, log parsing, the creation of tokens from log messages, and measuring log sequence lengths are all considered forms of log enhancement. Enhancement can happen at the event level or be aggregated to the sequence level. Some of the enhancers available: Event Length (chracters, words, lines), Sequence Length, Sequence [Duration](https://pola-rs.github.io/polars/py-polars/html/reference/api/polars.Duration.html), following "NLP" enhancers: [Regex](https://crates.io/crates/regex), [Words](https://en.wikipedia.org/wiki/Bag-of-words_model), [Character n-grams](https://en.wikipedia.org/wiki/N-gram). Log parsers: [Drain](https://github.com/logpai/Drain3), [LenMa](https://github.com/keiichishima/templateminer), [Spell](https://github.com/bave/pyspell), [IPLoM](https://github.com/EvoTestOps/LogLead/tree/main/parsers/iplom), [AEL](https://github.com/EvoTestOps/LogLead/tree/main/parsers/AEL), [Brain](https://github.com/EvoTestOps/LogLead/tree/main/parsers/Brain), [Fast-IPLoM](https://github.com/EvoTestOps/LogLead/tree/main/parsers/fast_iplom), [Tipping](https://pypi.org/project/tipping/), and [BERT](https://github.com/google-research/bert). [NextEventPrediction](https://arxiv.org/abs/2202.09214) including its probablities and perplexity. Next event prediction can be computed on top of any of the parser output.
130
+
131
+ **Anomaly Detector:** This module uses the enhanced log data to perform Anomaly Detection. It is mainly using SKlearn at the moment but there are few customer algorithms as well. LogLead has been integrated and tested with following models:
132
+ * Supervised (5): [Decision Tree](https://en.wikipedia.org/wiki/Decision_tree), [Support Vector Machine](https://en.wikipedia.org/wiki/Support_vector_machine), [Logistic Regression](https://en.wikipedia.org/wiki/Logistic_regression), [Random Forest](https://en.wikipedia.org/wiki/Random_forest), [eXtreme Gradient Boosting](https://en.wikipedia.org/wiki/XGBoost)
133
+ * Unsupervised (4): [One-class SVM](https://en.wikipedia.org/wiki/Support_vector_machine#One-class_SVM), [Local Outlier Factor](https://en.wikipedia.org/wiki/Local_outlier_factor), [Isolation Forest](https://en.wikipedia.org/wiki/Isolation_forest), [K-Means](https://en.wikipedia.org/wiki/K-means_clustering)
134
+ * Custom Unsupervised (2): [Out-of-Vocabulary Detector](https://github.com/EvoTestOps/LogLead/blob/main/loglead/OOV_detector.py) counts amount words or character n-grams that are novel in test set. [Rarity Model](https://github.com/EvoTestOps/LogLead/blob/main/loglead/RarityModel.py), scores seen words or character n-grams based on their rarity in training set. See our public [preprint](https://arxiv.org/abs/2312.01934) for more details
@@ -0,0 +1,57 @@
1
+ LICENSE
2
+ MANIFEST.in
3
+ PYPI_README.md
4
+ README.md
5
+ pyproject.toml
6
+ LogLead.egg-info/PKG-INFO
7
+ LogLead.egg-info/SOURCES.txt
8
+ LogLead.egg-info/dependency_links.txt
9
+ LogLead.egg-info/requires.txt
10
+ LogLead.egg-info/top_level.txt
11
+ loglead/OOV_detector.py
12
+ loglead/RarityModel.py
13
+ loglead/__init__.py
14
+ loglead/anomaly_detection.py
15
+ loglead/explainer.py
16
+ loglead/next_event_prediction.py
17
+ loglead/enhancers/__init__.py
18
+ loglead/enhancers/eventlog.py
19
+ loglead/enhancers/sequence.py
20
+ loglead/loaders/__init__.py
21
+ loglead/loaders/adfa.py
22
+ loglead/loaders/awsctd.py
23
+ loglead/loaders/base.py
24
+ loglead/loaders/bgl.py
25
+ loglead/loaders/gelf.py
26
+ loglead/loaders/hadoop.py
27
+ loglead/loaders/hdfs.py
28
+ loglead/loaders/nezha.py
29
+ loglead/loaders/pro.py
30
+ loglead/loaders/raw.py
31
+ loglead/loaders/supercomputers.py
32
+ loglead/parsers/README.md
33
+ loglead/parsers/__init__.py
34
+ loglead/parsers/AEL/AEL.py
35
+ loglead/parsers/AEL/LICENSE
36
+ loglead/parsers/AEL/README.md
37
+ loglead/parsers/Brain/Brain.py
38
+ loglead/parsers/Brain/LICENSE
39
+ loglead/parsers/Brain/README.md
40
+ loglead/parsers/bert/README.md
41
+ loglead/parsers/bert/bertembedding.py
42
+ loglead/parsers/drain3/LICENSE
43
+ loglead/parsers/drain3/README.md
44
+ loglead/parsers/drain3/drain.py
45
+ loglead/parsers/drain3/drain3.ini
46
+ loglead/parsers/drain3/drain3_no_masking.ini
47
+ loglead/parsers/iplom/IPLoM.py
48
+ loglead/parsers/iplom/LICENSE
49
+ loglead/parsers/iplom/README.md
50
+ loglead/parsers/lenma/LICENSE
51
+ loglead/parsers/lenma/README.md
52
+ loglead/parsers/lenma/lenma.py
53
+ loglead/parsers/pl_iplom/README.md
54
+ loglead/parsers/pl_iplom/pl_iplom.py
55
+ loglead/parsers/pyspell/LICENSE
56
+ loglead/parsers/pyspell/README.md
57
+ loglead/parsers/pyspell/spell.py
@@ -0,0 +1,20 @@
1
+ polars[numpy,pandas,pyarrow]>=1.5
2
+ scipy>=1.10.1
3
+ regex>=2023.10.3
4
+ drain3>=0.9.11
5
+ tipping>=0.1.3
6
+ scikit-learn>=1.2.2
7
+ xgboost>=1.7.3
8
+ python-dotenv>=1.0.1
9
+ pyyaml
10
+ psutil
11
+ jinja2
12
+ matplotlib
13
+ requests
14
+ tqdm
15
+ GitPython
16
+ py7zr
17
+ shap>=0.42.1
18
+ umap-learn>=0.5.6
19
+ plotly>=5.19.0
20
+ nbformat>=4.2.0
@@ -0,0 +1 @@
1
+ loglead
@@ -0,0 +1 @@
1
+ recursive-include loglead *.ini *.md LICENSE
loglead-1.0.0/PKG-INFO ADDED
@@ -0,0 +1,134 @@
1
+ Metadata-Version: 2.1
2
+ Name: LogLead
3
+ Version: 1.0.0
4
+ Summary: LogLead stands for Log Loader, Enhancer, and Anomaly Detector
5
+ Author-email: Mika Mäntylä <mika.mantyla@helsinki.fi>, Jesse Nyyssölä <jesse.nyyssola@helsinki.fi>, Yuqing Wang <yuqing.wang@helsinki.fi>, Alexander Bakhtin <alexander.bakhtin@protonmail.com>
6
+ Maintainer-email: Alexander Bakhtin <alexander.bakhtin@protonmail.com>
7
+ License: MIT License
8
+
9
+ Copyright (c) 2023 mmantyla
10
+
11
+ Permission is hereby granted, free of charge, to any person obtaining a copy
12
+ of this software and associated documentation files (the "Software"), to deal
13
+ in the Software without restriction, including without limitation the rights
14
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
15
+ copies of the Software, and to permit persons to whom the Software is
16
+ furnished to do so, subject to the following conditions:
17
+
18
+ The above copyright notice and this permission notice shall be included in all
19
+ copies or substantial portions of the Software.
20
+
21
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
24
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
26
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
27
+ SOFTWARE.
28
+
29
+ Project-URL: Homepage, https://github.com/EvoTestOps/LogLead
30
+ Keywords: logs,anomaly detection,log parsing
31
+ Classifier: Development Status :: 4 - Beta
32
+ Classifier: Intended Audience :: Education
33
+ Classifier: Intended Audience :: Science/Research
34
+ Classifier: Intended Audience :: Information Technology
35
+ Classifier: License :: OSI Approved :: MIT License
36
+ Classifier: Programming Language :: Python :: 3
37
+ Classifier: Programming Language :: Python :: 3.9
38
+ Classifier: Topic :: Scientific/Engineering :: Mathematics
39
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
40
+ Classifier: Topic :: System :: Logging
41
+ Requires-Python: >=3.9
42
+ Description-Content-Type: text/markdown
43
+ License-File: LICENSE
44
+ Requires-Dist: polars[numpy,pandas,pyarrow]>=1.5
45
+ Requires-Dist: scipy>=1.10.1
46
+ Requires-Dist: regex>=2023.10.3
47
+ Requires-Dist: drain3>=0.9.11
48
+ Requires-Dist: tipping>=0.1.3
49
+ Requires-Dist: scikit-learn>=1.2.2
50
+ Requires-Dist: xgboost>=1.7.3
51
+ Requires-Dist: python-dotenv>=1.0.1
52
+ Requires-Dist: pyyaml
53
+ Requires-Dist: psutil
54
+ Requires-Dist: jinja2
55
+ Requires-Dist: matplotlib
56
+ Requires-Dist: requests
57
+ Requires-Dist: tqdm
58
+ Requires-Dist: GitPython
59
+ Requires-Dist: py7zr
60
+ Requires-Dist: shap>=0.42.1
61
+ Requires-Dist: umap-learn>=0.5.6
62
+ Requires-Dist: plotly>=5.19.0
63
+ Requires-Dist: nbformat>=4.2.0
64
+
65
+ # LogLead
66
+ LogLead is designed to efficiently benchmark log anomaly detection algorithms and log representations.
67
+
68
+ Currently, it features nearly 1,000 unique anomaly detection combinations, encompassing 8 public datasets, 11 log representations (enhancers), and 11 classifiers. These resources enable you to benchmark your own data, log representation, or classifier against a diverse range of scenarios. LogLead is an actively evolving project, and we are continually adding new datasets, representations, and classifiers. If there's something you believe should be included, please submit a request for a dataset, enhancer, or classifier in the [issue tracker](https://github.com/EvoTestOps/LogLead/issues).
69
+
70
+ A key strength of LogLead is its custom loader system, which efficiently isolates the unique aspects of logs from different systems. This design allows for a reduction in redundant code, as the same enhancement and anomaly detection code can be applied universally once the logs are loaded.
71
+
72
+ ## Installing LogLead
73
+
74
+ Simply install with `pip`:
75
+
76
+ ```
77
+ python -m pip install loglead
78
+ ```
79
+
80
+ NOTE: pip version does not have the `tensorflow` dependencies necessary for `BertEmbeddings`.
81
+ Install them manually (preferably in a conda enviroment).
82
+
83
+ ### Known issues
84
+
85
+ - If `scikit-learn` wheel fails to compile, check that you can `gcc` and `g++` installed.
86
+
87
+ ## Demos
88
+ In the following demonstrations, you'll notice a significant aspect of LogLead's design efficiency: code reusability. Both demos, while analyzing different datasets, share a substantial amount of their underlying code. This not only showcases LogLead's versatility in handling various log formats but also its ability to streamline the analysis process through reusable code components.
89
+
90
+ ### Thunderbird Supercomputer Log Demo
91
+ - **Script**: [TB_samples.py](https://github.com/EvoTestOps/LogLead/blob/main/demo/TB_samples.py)
92
+ - **Description**: This demo presents a Thunderbird supercomputer log, labeled at the line (event) level. A first column marked with “-” indicates normal behavior, while other markings represent anomalies.
93
+ - **Log Snapshot**: View the log [here](https://github.com/logpai/loghub/blob/master/Thunderbird/Thunderbird_2k.log_structured.csv).
94
+ - **Dataset**: The demo includes a parquet file containing a subset of 263,408 log events, with 21,955 anomalies.
95
+ - **Screencast**: For an overview of the demo, watch our [5-minute screencast on YouTube](https://www.youtube.com/watch?v=8stdbtTfJVo).
96
+ ### Hadoop Distributed File System (HDFS) Log Demo
97
+
98
+ - **Script**: [HDFS_samples.py](https://github.com/EvoTestOps/LogLead/blob/main/demo/HDFS_samples.py)
99
+ - **Description**: This demo showcases logs from the Hadoop Distributed File System (HDFS), labeled at the sequence level (a sequence is a collection of multiple log events).
100
+ - **Log Snapshot**: View the log [here](https://github.com/logpai/loghub/blob/master/HDFS/HDFS_2k.log_structured.csv).
101
+ - **Anomaly Labels**: Provided in a separate file.
102
+ - **Dataset**: The demo includes a parquet file containing a subset of 222,579 log events, forming 11,501 sequences with 350 anomalies.
103
+
104
+ ## Example of Anomaly Detection results
105
+ Below you can see anomaly detection results (F1-Binary) trained on 0.5% subset of HDFS data.
106
+ We use 5 different log message enhancement strategies: [Words](https://en.wikipedia.org/wiki/Bag-of-words_model), [Drain](https://github.com/logpai/Drain3), [LenMa](https://github.com/keiichishima/templateminer), [Spell](https://github.com/logpai/logparser/tree/main/logparser/Spell), and [BERT](https://github.com/google-research/bert)
107
+
108
+ The enhancement strategies are tested with 5 different machine learning algorithms: DT (Decision Tree), SVM (Support Vector Machine), LR (Logistic Regression), RF (Random Forest), and XGB (eXtreme Gradient Boosting).
109
+
110
+ | | Words | Drain | Lenma | Spell | Bert | Average |
111
+ |---------|--------|--------|--------|--------|--------|---------|
112
+ | DT | 0.9719 | 0.9816 | 0.9803 | 0.9828 | 0.9301 | 0.9693 |
113
+ | SVM | 0.9568 | 0.9591 | 0.9605 | 0.9559 | 0.8569 | 0.9378 |
114
+ | LR | 0.9476 | 0.8879 | 0.8900 | 0.9233 | 0.5841 | 0.8466 |
115
+ | RF | 0.9717 | 0.9749 | 0.9668 | 0.9809 | 0.9382 | 0.9665 |
116
+ | XGB | 0.9721 | 0.9482 | 0.9492 | 0.9535 | 0.9408 | 0.9528 |
117
+ |---------|--------|--------|--------|--------|--------|---------|
118
+ | Average | 0.9640 | 0.9503 | 0.9494 | 0.9593 | 0.8500 | |
119
+
120
+ ## Functional overview
121
+ LogLead is composed of distinct modules: the Loader, Enhancer, and Anomaly Detector. We use [Polars](https://www.pola.rs/) dataframes as its notably faster than Pandas.
122
+
123
+ **Loader:** This module reads in the log files and deals with the specifics features of each log file. It produces a dataframe with certain semi-mandatory fields. These fields enable actions in the subsequent stages. LogLead has loaders to the following public datasets from 10 different systems:
124
+ * 3: [HDFS_v1](https://github.com/logpai/loghub/tree/master/HDFS#hdfs_v1), [Hadoop](https://github.com/logpai/loghub/tree/master/Hadoop), [BGL](https://github.com/logpai/loghub/tree/master/BGL) thanks to amazing [LogHub team](https://github.com/logpai/loghub). For full data see [Zenodo](https://zenodo.org/records/3227177).
125
+ * 3: [Sprit, Thunderbird and Liberty](https://www.usenix.org/cfdr-data#hpc4) can be found from Usenix site.
126
+ * 2: [Nezha](https://github.com/IntelligentDDS/Nezha) has data from two systems [TrainTicket](https://github.com/FudanSELab/train-ticket) and [Google Cloud Webshop demo](https://github.com/GoogleCloudPlatform/microservices-demo). It is the first dataset of microservice-based systems. Like other traditional log datasets it has Log data but additionally there are Traces and Metrics.
127
+ * 2: [ADFA](https://github.com/verazuo/a-labelled-version-of-the-ADFA-LD-dataset) and [AWSCTD](https://github.com/DjPasco/AWSCTD) are two datasets designed for intrusion detection.
128
+
129
+ **Enhancer:** This module extracts additional data from logs. The enhancement takes place directly within the dataframes, where new columns are added as a result of the enhancement process. For example, log parsing, the creation of tokens from log messages, and measuring log sequence lengths are all considered forms of log enhancement. Enhancement can happen at the event level or be aggregated to the sequence level. Some of the enhancers available: Event Length (chracters, words, lines), Sequence Length, Sequence [Duration](https://pola-rs.github.io/polars/py-polars/html/reference/api/polars.Duration.html), following "NLP" enhancers: [Regex](https://crates.io/crates/regex), [Words](https://en.wikipedia.org/wiki/Bag-of-words_model), [Character n-grams](https://en.wikipedia.org/wiki/N-gram). Log parsers: [Drain](https://github.com/logpai/Drain3), [LenMa](https://github.com/keiichishima/templateminer), [Spell](https://github.com/bave/pyspell), [IPLoM](https://github.com/EvoTestOps/LogLead/tree/main/parsers/iplom), [AEL](https://github.com/EvoTestOps/LogLead/tree/main/parsers/AEL), [Brain](https://github.com/EvoTestOps/LogLead/tree/main/parsers/Brain), [Fast-IPLoM](https://github.com/EvoTestOps/LogLead/tree/main/parsers/fast_iplom), [Tipping](https://pypi.org/project/tipping/), and [BERT](https://github.com/google-research/bert). [NextEventPrediction](https://arxiv.org/abs/2202.09214) including its probablities and perplexity. Next event prediction can be computed on top of any of the parser output.
130
+
131
+ **Anomaly Detector:** This module uses the enhanced log data to perform Anomaly Detection. It is mainly using SKlearn at the moment but there are few customer algorithms as well. LogLead has been integrated and tested with following models:
132
+ * Supervised (5): [Decision Tree](https://en.wikipedia.org/wiki/Decision_tree), [Support Vector Machine](https://en.wikipedia.org/wiki/Support_vector_machine), [Logistic Regression](https://en.wikipedia.org/wiki/Logistic_regression), [Random Forest](https://en.wikipedia.org/wiki/Random_forest), [eXtreme Gradient Boosting](https://en.wikipedia.org/wiki/XGBoost)
133
+ * Unsupervised (4): [One-class SVM](https://en.wikipedia.org/wiki/Support_vector_machine#One-class_SVM), [Local Outlier Factor](https://en.wikipedia.org/wiki/Local_outlier_factor), [Isolation Forest](https://en.wikipedia.org/wiki/Isolation_forest), [K-Means](https://en.wikipedia.org/wiki/K-means_clustering)
134
+ * Custom Unsupervised (2): [Out-of-Vocabulary Detector](https://github.com/EvoTestOps/LogLead/blob/main/loglead/OOV_detector.py) counts amount words or character n-grams that are novel in test set. [Rarity Model](https://github.com/EvoTestOps/LogLead/blob/main/loglead/RarityModel.py), scores seen words or character n-grams based on their rarity in training set. See our public [preprint](https://arxiv.org/abs/2312.01934) for more details
@@ -0,0 +1,70 @@
1
+ # LogLead
2
+ LogLead is designed to efficiently benchmark log anomaly detection algorithms and log representations.
3
+
4
+ Currently, it features nearly 1,000 unique anomaly detection combinations, encompassing 8 public datasets, 11 log representations (enhancers), and 11 classifiers. These resources enable you to benchmark your own data, log representation, or classifier against a diverse range of scenarios. LogLead is an actively evolving project, and we are continually adding new datasets, representations, and classifiers. If there's something you believe should be included, please submit a request for a dataset, enhancer, or classifier in the [issue tracker](https://github.com/EvoTestOps/LogLead/issues).
5
+
6
+ A key strength of LogLead is its custom loader system, which efficiently isolates the unique aspects of logs from different systems. This design allows for a reduction in redundant code, as the same enhancement and anomaly detection code can be applied universally once the logs are loaded.
7
+
8
+ ## Installing LogLead
9
+
10
+ Simply install with `pip`:
11
+
12
+ ```
13
+ python -m pip install loglead
14
+ ```
15
+
16
+ NOTE: pip version does not have the `tensorflow` dependencies necessary for `BertEmbeddings`.
17
+ Install them manually (preferably in a conda enviroment).
18
+
19
+ ### Known issues
20
+
21
+ - If `scikit-learn` wheel fails to compile, check that you can `gcc` and `g++` installed.
22
+
23
+ ## Demos
24
+ In the following demonstrations, you'll notice a significant aspect of LogLead's design efficiency: code reusability. Both demos, while analyzing different datasets, share a substantial amount of their underlying code. This not only showcases LogLead's versatility in handling various log formats but also its ability to streamline the analysis process through reusable code components.
25
+
26
+ ### Thunderbird Supercomputer Log Demo
27
+ - **Script**: [TB_samples.py](https://github.com/EvoTestOps/LogLead/blob/main/demo/TB_samples.py)
28
+ - **Description**: This demo presents a Thunderbird supercomputer log, labeled at the line (event) level. A first column marked with “-” indicates normal behavior, while other markings represent anomalies.
29
+ - **Log Snapshot**: View the log [here](https://github.com/logpai/loghub/blob/master/Thunderbird/Thunderbird_2k.log_structured.csv).
30
+ - **Dataset**: The demo includes a parquet file containing a subset of 263,408 log events, with 21,955 anomalies.
31
+ - **Screencast**: For an overview of the demo, watch our [5-minute screencast on YouTube](https://www.youtube.com/watch?v=8stdbtTfJVo).
32
+ ### Hadoop Distributed File System (HDFS) Log Demo
33
+
34
+ - **Script**: [HDFS_samples.py](https://github.com/EvoTestOps/LogLead/blob/main/demo/HDFS_samples.py)
35
+ - **Description**: This demo showcases logs from the Hadoop Distributed File System (HDFS), labeled at the sequence level (a sequence is a collection of multiple log events).
36
+ - **Log Snapshot**: View the log [here](https://github.com/logpai/loghub/blob/master/HDFS/HDFS_2k.log_structured.csv).
37
+ - **Anomaly Labels**: Provided in a separate file.
38
+ - **Dataset**: The demo includes a parquet file containing a subset of 222,579 log events, forming 11,501 sequences with 350 anomalies.
39
+
40
+ ## Example of Anomaly Detection results
41
+ Below you can see anomaly detection results (F1-Binary) trained on 0.5% subset of HDFS data.
42
+ We use 5 different log message enhancement strategies: [Words](https://en.wikipedia.org/wiki/Bag-of-words_model), [Drain](https://github.com/logpai/Drain3), [LenMa](https://github.com/keiichishima/templateminer), [Spell](https://github.com/logpai/logparser/tree/main/logparser/Spell), and [BERT](https://github.com/google-research/bert)
43
+
44
+ The enhancement strategies are tested with 5 different machine learning algorithms: DT (Decision Tree), SVM (Support Vector Machine), LR (Logistic Regression), RF (Random Forest), and XGB (eXtreme Gradient Boosting).
45
+
46
+ | | Words | Drain | Lenma | Spell | Bert | Average |
47
+ |---------|--------|--------|--------|--------|--------|---------|
48
+ | DT | 0.9719 | 0.9816 | 0.9803 | 0.9828 | 0.9301 | 0.9693 |
49
+ | SVM | 0.9568 | 0.9591 | 0.9605 | 0.9559 | 0.8569 | 0.9378 |
50
+ | LR | 0.9476 | 0.8879 | 0.8900 | 0.9233 | 0.5841 | 0.8466 |
51
+ | RF | 0.9717 | 0.9749 | 0.9668 | 0.9809 | 0.9382 | 0.9665 |
52
+ | XGB | 0.9721 | 0.9482 | 0.9492 | 0.9535 | 0.9408 | 0.9528 |
53
+ |---------|--------|--------|--------|--------|--------|---------|
54
+ | Average | 0.9640 | 0.9503 | 0.9494 | 0.9593 | 0.8500 | |
55
+
56
+ ## Functional overview
57
+ LogLead is composed of distinct modules: the Loader, Enhancer, and Anomaly Detector. We use [Polars](https://www.pola.rs/) dataframes as its notably faster than Pandas.
58
+
59
+ **Loader:** This module reads in the log files and deals with the specifics features of each log file. It produces a dataframe with certain semi-mandatory fields. These fields enable actions in the subsequent stages. LogLead has loaders to the following public datasets from 10 different systems:
60
+ * 3: [HDFS_v1](https://github.com/logpai/loghub/tree/master/HDFS#hdfs_v1), [Hadoop](https://github.com/logpai/loghub/tree/master/Hadoop), [BGL](https://github.com/logpai/loghub/tree/master/BGL) thanks to amazing [LogHub team](https://github.com/logpai/loghub). For full data see [Zenodo](https://zenodo.org/records/3227177).
61
+ * 3: [Sprit, Thunderbird and Liberty](https://www.usenix.org/cfdr-data#hpc4) can be found from Usenix site.
62
+ * 2: [Nezha](https://github.com/IntelligentDDS/Nezha) has data from two systems [TrainTicket](https://github.com/FudanSELab/train-ticket) and [Google Cloud Webshop demo](https://github.com/GoogleCloudPlatform/microservices-demo). It is the first dataset of microservice-based systems. Like other traditional log datasets it has Log data but additionally there are Traces and Metrics.
63
+ * 2: [ADFA](https://github.com/verazuo/a-labelled-version-of-the-ADFA-LD-dataset) and [AWSCTD](https://github.com/DjPasco/AWSCTD) are two datasets designed for intrusion detection.
64
+
65
+ **Enhancer:** This module extracts additional data from logs. The enhancement takes place directly within the dataframes, where new columns are added as a result of the enhancement process. For example, log parsing, the creation of tokens from log messages, and measuring log sequence lengths are all considered forms of log enhancement. Enhancement can happen at the event level or be aggregated to the sequence level. Some of the enhancers available: Event Length (chracters, words, lines), Sequence Length, Sequence [Duration](https://pola-rs.github.io/polars/py-polars/html/reference/api/polars.Duration.html), following "NLP" enhancers: [Regex](https://crates.io/crates/regex), [Words](https://en.wikipedia.org/wiki/Bag-of-words_model), [Character n-grams](https://en.wikipedia.org/wiki/N-gram). Log parsers: [Drain](https://github.com/logpai/Drain3), [LenMa](https://github.com/keiichishima/templateminer), [Spell](https://github.com/bave/pyspell), [IPLoM](https://github.com/EvoTestOps/LogLead/tree/main/parsers/iplom), [AEL](https://github.com/EvoTestOps/LogLead/tree/main/parsers/AEL), [Brain](https://github.com/EvoTestOps/LogLead/tree/main/parsers/Brain), [Fast-IPLoM](https://github.com/EvoTestOps/LogLead/tree/main/parsers/fast_iplom), [Tipping](https://pypi.org/project/tipping/), and [BERT](https://github.com/google-research/bert). [NextEventPrediction](https://arxiv.org/abs/2202.09214) including its probablities and perplexity. Next event prediction can be computed on top of any of the parser output.
66
+
67
+ **Anomaly Detector:** This module uses the enhanced log data to perform Anomaly Detection. It is mainly using SKlearn at the moment but there are few customer algorithms as well. LogLead has been integrated and tested with following models:
68
+ * Supervised (5): [Decision Tree](https://en.wikipedia.org/wiki/Decision_tree), [Support Vector Machine](https://en.wikipedia.org/wiki/Support_vector_machine), [Logistic Regression](https://en.wikipedia.org/wiki/Logistic_regression), [Random Forest](https://en.wikipedia.org/wiki/Random_forest), [eXtreme Gradient Boosting](https://en.wikipedia.org/wiki/XGBoost)
69
+ * Unsupervised (4): [One-class SVM](https://en.wikipedia.org/wiki/Support_vector_machine#One-class_SVM), [Local Outlier Factor](https://en.wikipedia.org/wiki/Local_outlier_factor), [Isolation Forest](https://en.wikipedia.org/wiki/Isolation_forest), [K-Means](https://en.wikipedia.org/wiki/K-means_clustering)
70
+ * Custom Unsupervised (2): [Out-of-Vocabulary Detector](https://github.com/EvoTestOps/LogLead/blob/main/loglead/OOV_detector.py) counts amount words or character n-grams that are novel in test set. [Rarity Model](https://github.com/EvoTestOps/LogLead/blob/main/loglead/RarityModel.py), scores seen words or character n-grams based on their rarity in training set. See our public [preprint](https://arxiv.org/abs/2312.01934) for more details
@@ -0,0 +1,75 @@
1
+ # LogLead
2
+ LogLead is designed to efficiently benchmark log anomaly detection algorithms and log representations.
3
+
4
+ <img src="images/Log%20processing.svg">
5
+
6
+ Currently, it features nearly 1,000 unique anomaly detection combinations, encompassing 8 public datasets, 11 log representations (enhancers), and 11 classifiers. These resources enable you to benchmark your own data, log representation, or classifier against a diverse range of scenarios. LogLead is an actively evolving project, and we are continually adding new datasets, representations, and classifiers. If there's something you believe should be included, please submit a request for a dataset, enhancer, or classifier in the [issue tracker](https://github.com/EvoTestOps/LogLead/issues).
7
+
8
+ A key strength of LogLead is its custom loader system, which efficiently isolates the unique aspects of logs from different systems. This design allows for a reduction in redundant code, as the same enhancement and anomaly detection code can be applied universally once the logs are loaded.
9
+ <!--
10
+ ## Installing LogLead
11
+
12
+ Simply install with `pip`:
13
+
14
+ ```
15
+ python -m pip install loglead
16
+ ```
17
+
18
+ NOTE: pip version does not have the `tensorflow` dependencies necessary for `BertEmbeddings`.
19
+ Install them manually (preferably in a conda enviroment).
20
+
21
+ ### Known issues
22
+
23
+ - If `scikit-learn` wheel fails to compile, check that you can `gcc` and `g++` installed.
24
+ -->
25
+
26
+ ## Demos
27
+ In the following demonstrations, you'll notice a significant aspect of LogLead's design efficiency: code reusability. Both demos, while analyzing different datasets, share a substantial amount of their underlying code. This not only showcases LogLead's versatility in handling various log formats but also its ability to streamline the analysis process through reusable code components.
28
+
29
+ ### Thunderbird Supercomputer Log Demo
30
+ - **Script**: [TB_samples.py](https://github.com/EvoTestOps/LogLead/blob/main/demo/TB_samples.py)
31
+ - **Description**: This demo presents a Thunderbird supercomputer log, labeled at the line (event) level. A first column marked with “-” indicates normal behavior, while other markings represent anomalies.
32
+ - **Log Snapshot**: View the log [here](https://github.com/logpai/loghub/blob/master/Thunderbird/Thunderbird_2k.log_structured.csv).
33
+ - **Dataset**: The demo includes a parquet file containing a subset of 263,408 log events, with 21,955 anomalies.
34
+ - **Screencast**: For an overview of the demo, watch our [5-minute screencast on YouTube](https://www.youtube.com/watch?v=8stdbtTfJVo).
35
+ ### Hadoop Distributed File System (HDFS) Log Demo
36
+
37
+ - **Script**: [HDFS_samples.py](https://github.com/EvoTestOps/LogLead/blob/main/demo/HDFS_samples.py)
38
+ - **Description**: This demo showcases logs from the Hadoop Distributed File System (HDFS), labeled at the sequence level (a sequence is a collection of multiple log events).
39
+ - **Log Snapshot**: View the log [here](https://github.com/logpai/loghub/blob/master/HDFS/HDFS_2k.log_structured.csv).
40
+ - **Anomaly Labels**: Provided in a separate file.
41
+ - **Dataset**: The demo includes a parquet file containing a subset of 222,579 log events, forming 11,501 sequences with 350 anomalies.
42
+
43
+ ## Example of Anomaly Detection results
44
+ Below you can see anomaly detection results (F1-Binary) trained on 0.5% subset of HDFS data.
45
+ We use 5 different log message enhancement strategies: [Words](https://en.wikipedia.org/wiki/Bag-of-words_model), [Drain](https://github.com/logpai/Drain3), [LenMa](https://github.com/keiichishima/templateminer), [Spell](https://github.com/logpai/logparser/tree/main/logparser/Spell), and [BERT](https://github.com/google-research/bert)
46
+
47
+ The enhancement strategies are tested with 5 different machine learning algorithms: DT (Decision Tree), SVM (Support Vector Machine), LR (Logistic Regression), RF (Random Forest), and XGB (eXtreme Gradient Boosting).
48
+
49
+ | | Words | Drain | Lenma | Spell | Bert | Average |
50
+ |---------|--------|--------|--------|--------|--------|---------|
51
+ | DT | 0.9719 | 0.9816 | 0.9803 | 0.9828 | 0.9301 | 0.9693 |
52
+ | SVM | 0.9568 | 0.9591 | 0.9605 | 0.9559 | 0.8569 | 0.9378 |
53
+ | LR | 0.9476 | 0.8879 | 0.8900 | 0.9233 | 0.5841 | 0.8466 |
54
+ | RF | 0.9717 | 0.9749 | 0.9668 | 0.9809 | 0.9382 | 0.9665 |
55
+ | XGB | 0.9721 | 0.9482 | 0.9492 | 0.9535 | 0.9408 | 0.9528 |
56
+ |---------|--------|--------|--------|--------|--------|---------|
57
+ | Average | 0.9640 | 0.9503 | 0.9494 | 0.9593 | 0.8500 | |
58
+
59
+ ## Functional overview
60
+ LogLead is composed of distinct modules: the Loader, Enhancer, and Anomaly Detector. We use [Polars](https://www.pola.rs/) dataframes as its notably faster than Pandas.
61
+
62
+ <img src="images/LogLead_Dataflow_Diagram.png" width="40%">
63
+
64
+ **Loader:** This module reads in the log files and deals with the specifics features of each log file. It produces a dataframe with certain semi-mandatory fields. These fields enable actions in the subsequent stages. LogLead has loaders to the following public datasets from 10 different systems:
65
+ * 3: [HDFS_v1](https://github.com/logpai/loghub/tree/master/HDFS#hdfs_v1), [Hadoop](https://github.com/logpai/loghub/tree/master/Hadoop), [BGL](https://github.com/logpai/loghub/tree/master/BGL) thanks to amazing [LogHub team](https://github.com/logpai/loghub). For full data see [Zenodo](https://zenodo.org/records/3227177).
66
+ * 3: [Sprit, Thunderbird and Liberty](https://www.usenix.org/cfdr-data#hpc4) can be found from Usenix site.
67
+ * 2: [Nezha](https://github.com/IntelligentDDS/Nezha) has data from two systems [TrainTicket](https://github.com/FudanSELab/train-ticket) and [Google Cloud Webshop demo](https://github.com/GoogleCloudPlatform/microservices-demo). It is the first dataset of microservice-based systems. Like other traditional log datasets it has Log data but additionally there are Traces and Metrics.
68
+ * 2: [ADFA](https://github.com/verazuo/a-labelled-version-of-the-ADFA-LD-dataset) and [AWSCTD](https://github.com/DjPasco/AWSCTD) are two datasets designed for intrusion detection.
69
+
70
+ **Enhancer:** This module extracts additional data from logs. The enhancement takes place directly within the dataframes, where new columns are added as a result of the enhancement process. For example, log parsing, the creation of tokens from log messages, and measuring log sequence lengths are all considered forms of log enhancement. Enhancement can happen at the event level or be aggregated to the sequence level. Some of the enhancers available: Event Length (chracters, words, lines), Sequence Length, Sequence [Duration](https://pola-rs.github.io/polars/py-polars/html/reference/api/polars.Duration.html), following "NLP" enhancers: [Regex](https://crates.io/crates/regex), [Words](https://en.wikipedia.org/wiki/Bag-of-words_model), [Character n-grams](https://en.wikipedia.org/wiki/N-gram). Log parsers: [Drain](https://github.com/logpai/Drain3), [LenMa](https://github.com/keiichishima/templateminer), [Spell](https://github.com/bave/pyspell), [IPLoM](https://github.com/EvoTestOps/LogLead/tree/main/parsers/iplom), [AEL](https://github.com/EvoTestOps/LogLead/tree/main/parsers/AEL), [Brain](https://github.com/EvoTestOps/LogLead/tree/main/parsers/Brain), [Fast-IPLoM](https://github.com/EvoTestOps/LogLead/tree/main/parsers/fast_iplom), [Tipping](https://pypi.org/project/tipping/), and [BERT](https://github.com/google-research/bert). [NextEventPrediction](https://arxiv.org/abs/2202.09214) including its probablities and perplexity. Next event prediction can be computed on top of any of the parser output.
71
+
72
+ **Anomaly Detector:** This module uses the enhanced log data to perform Anomaly Detection. It is mainly using SKlearn at the moment but there are few customer algorithms as well. LogLead has been integrated and tested with following models:
73
+ * Supervised (5): [Decision Tree](https://en.wikipedia.org/wiki/Decision_tree), [Support Vector Machine](https://en.wikipedia.org/wiki/Support_vector_machine), [Logistic Regression](https://en.wikipedia.org/wiki/Logistic_regression), [Random Forest](https://en.wikipedia.org/wiki/Random_forest), [eXtreme Gradient Boosting](https://en.wikipedia.org/wiki/XGBoost)
74
+ * Unsupervised (4): [One-class SVM](https://en.wikipedia.org/wiki/Support_vector_machine#One-class_SVM), [Local Outlier Factor](https://en.wikipedia.org/wiki/Local_outlier_factor), [Isolation Forest](https://en.wikipedia.org/wiki/Isolation_forest), [K-Means](https://en.wikipedia.org/wiki/K-means_clustering)
75
+ * Custom Unsupervised (2): [Out-of-Vocabulary Detector](https://github.com/EvoTestOps/LogLead/blob/main/loglead/OOV_detector.py) counts amount words or character n-grams that are novel in test set. [Rarity Model](https://github.com/EvoTestOps/LogLead/blob/main/loglead/RarityModel.py), scores seen words or character n-grams based on their rarity in training set. See our public [preprint](https://arxiv.org/abs/2312.01934) for more details
@@ -0,0 +1,55 @@
1
+ import polars as pl
2
+ import numpy as np
3
+
4
+ __all__ = ['OOV_detector']
5
+
6
+
7
+ class OOV_detector:
8
+ def __init__(self, len_col, test_df, threshold=1):
9
+ self.len_col = len_col
10
+ self.test_df = test_df
11
+ self.scores = 0
12
+ self.threshold = threshold
13
+
14
+ def fit(self, X_train=None, labels=None):
15
+ # The "training set" to compare against comes inherently from test data's sparse matrix
16
+ return
17
+
18
+ def predict(self, X_test):
19
+ # Give array of 0s if the needed length column is lacking in the df
20
+ if self.len_col not in self.test_df.columns:
21
+ print("Column not found for OOVD")
22
+ return np.zeros(self.test_df.select(pl.len()).item())
23
+ else:
24
+ msglen = self.test_df[self.len_col]
25
+ test_word_count_np = np.array(X_test.tocsr().sum(axis=1)).squeeze()
26
+ test_word_count_series = pl.Series(test_word_count_np)
27
+ self.scores = np.array(msglen - test_word_count_series)
28
+ self.is_ano = (self.scores > self.threshold).astype(int)
29
+ return self.is_ano
30
+
31
+ def custom_plot(self, labels, x_axis_scale=1.0):
32
+ # Double the font size
33
+ # mpl.rcParams.update({'font.size': mpl.rcParams['font.size']*1.5})
34
+ try:
35
+ import matplotlib.pyplot as plt
36
+ except Exception as e:
37
+ raise ImportError("Error importing matplotlib") from e
38
+
39
+ labels_bool = np.array(labels).astype(bool)
40
+ scores_norm = self.scores[~labels_bool]
41
+ scores_ano = self.scores[labels_bool]
42
+
43
+ plt.figure(figsize=(8, 6)) # 4:3 aspect ratio
44
+ plt.hist(scores_norm, bins=50, color='blue', alpha=0.5, label='Normal')
45
+ plt.hist(scores_ano, bins=50, color='red', alpha=0.5, label='Anomaly')
46
+ plt.xlabel('Value')
47
+ plt.ylabel('Frequency')
48
+ plt.legend(loc='upper right')
49
+
50
+ # Adjust x-axis limit based on the parameter
51
+ max_score = max(np.max(scores_norm), np.max(scores_ano))
52
+ plt.xlim([0, max_score * x_axis_scale])
53
+
54
+ plt.tight_layout()
55
+ plt.show()