javacore-analyser 2.0rc1__py3-none-any.whl → 2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- javacore_analyser/__main__.py +43 -0
- javacore_analyser/abstract_snapshot_collection.py +2 -2
- javacore_analyser/constants.py +2 -1
- javacore_analyser/data/expand.js +2 -0
- javacore_analyser/data/html/processing_data.html +17 -0
- javacore_analyser/data/jquery/search.js +22 -0
- javacore_analyser/data/jquery/wait2scripts.js +31 -7
- javacore_analyser/data/xml/javacores/javacore.xsl +126 -117
- javacore_analyser/data/xml/report.xsl +36 -20
- javacore_analyser/data/xml/threads/thread.xsl +162 -153
- javacore_analyser/javacore.py +5 -3
- javacore_analyser/javacore_analyser_batch.py +45 -22
- javacore_analyser/javacore_analyser_web.py +62 -26
- javacore_analyser/javacore_set.py +82 -43
- javacore_analyser/snapshot_collection_collection.py +5 -3
- javacore_analyser/stack_trace.py +2 -2
- javacore_analyser/stack_trace_element.py +1 -1
- javacore_analyser/templates/index.html +9 -4
- javacore_analyser/thread_snapshot.py +3 -2
- javacore_analyser/tips.py +5 -3
- javacore_analyser/verbose_gc.py +4 -2
- {javacore_analyser-2.0rc1.dist-info → javacore_analyser-2.1.dist-info}/METADATA +46 -20
- javacore_analyser-2.1.dist-info/RECORD +46 -0
- {javacore_analyser-2.0rc1.dist-info → javacore_analyser-2.1.dist-info}/WHEEL +1 -1
- javacore_analyser-2.0rc1.dist-info/RECORD +0 -44
- {javacore_analyser-2.0rc1.dist-info → javacore_analyser-2.1.dist-info}/entry_points.txt +0 -0
- {javacore_analyser-2.0rc1.dist-info → javacore_analyser-2.1.dist-info}/licenses/LICENSE +0 -0
@@ -2,6 +2,7 @@
|
|
2
2
|
# Copyright IBM Corp. 2024 - 2024
|
3
3
|
# SPDX-License-Identifier: Apache-2.0
|
4
4
|
#
|
5
|
+
import argparse
|
5
6
|
import locale
|
6
7
|
import logging
|
7
8
|
import os
|
@@ -9,6 +10,7 @@ import re
|
|
9
10
|
import shutil
|
10
11
|
import sys
|
11
12
|
import tempfile
|
13
|
+
import threading
|
12
14
|
import time
|
13
15
|
from pathlib import Path
|
14
16
|
|
@@ -16,29 +18,30 @@ from flask import Flask, render_template, request, send_from_directory, redirect
|
|
16
18
|
from waitress import serve
|
17
19
|
|
18
20
|
import javacore_analyser.javacore_analyser_batch
|
19
|
-
from javacore_analyser.constants import DEFAULT_REPORTS_DIR, DEFAULT_PORT
|
21
|
+
from javacore_analyser.constants import DEFAULT_REPORTS_DIR, DEFAULT_PORT, TEMP_DIR
|
20
22
|
from javacore_analyser.logging_utils import create_console_logging, create_file_logging
|
21
23
|
|
22
24
|
"""
|
23
25
|
To run the application from cmd type:
|
24
|
-
|
26
|
+
|
25
27
|
flask --app javacore_analyser_web run
|
26
28
|
"""
|
27
29
|
app = Flask(__name__)
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
30
|
+
reports_dir = DEFAULT_REPORTS_DIR
|
31
|
+
|
32
|
+
|
33
|
+
# Assisted by watsonx Code Assistant
|
34
|
+
def create_temp_data_in_reports_dir(directory):
|
35
|
+
tmp_reports_dir = os.path.join(directory, TEMP_DIR)
|
36
|
+
if os.path.isdir(tmp_reports_dir):
|
37
|
+
shutil.rmtree(tmp_reports_dir, ignore_errors=True)
|
38
|
+
os.mkdir(tmp_reports_dir)
|
36
39
|
|
37
40
|
|
38
41
|
@app.route('/')
|
39
42
|
def index():
|
40
43
|
reports = [{"name": Path(f).name, "date": time.ctime(os.path.getctime(f)), "timestamp": os.path.getctime(f)}
|
41
|
-
for f in os.scandir(reports_dir) if f.is_dir()]
|
44
|
+
for f in os.scandir(reports_dir) if f.is_dir() and Path(f).name is not TEMP_DIR]
|
42
45
|
reports.sort(key=lambda item: item["timestamp"], reverse=True)
|
43
46
|
return render_template('index.html', reports=reports)
|
44
47
|
|
@@ -50,8 +53,8 @@ def dir_listing(path):
|
|
50
53
|
|
51
54
|
@app.route('/zip/<path:path>')
|
52
55
|
def compress(path):
|
56
|
+
temp_zip_dir = tempfile.TemporaryDirectory()
|
53
57
|
try:
|
54
|
-
temp_zip_dir = tempfile.TemporaryDirectory()
|
55
58
|
temp_zip_dir_name = temp_zip_dir.name
|
56
59
|
zip_filename = path + ".zip"
|
57
60
|
report_location = os.path.join(reports_dir, path)
|
@@ -68,7 +71,7 @@ def compress(path):
|
|
68
71
|
def delete(path):
|
69
72
|
# Checking if the report exists. This is to prevent attempt to delete any data by deleting any file outside
|
70
73
|
# report dir if you prepare path variable.
|
71
|
-
reports_list = os.listdir(reports_dir)
|
74
|
+
# reports_list = os.listdir(reports_dir)
|
72
75
|
report_location = os.path.normpath(os.path.join(reports_dir, path))
|
73
76
|
if not report_location.startswith(reports_dir):
|
74
77
|
logging.error("Deleted report in report list. Not deleting")
|
@@ -82,10 +85,19 @@ def delete(path):
|
|
82
85
|
# Latest GenAI contribution: ibm/granite-20b-code-instruct-v2
|
83
86
|
@app.route('/upload', methods=['POST'])
|
84
87
|
def upload_file():
|
88
|
+
|
89
|
+
report_name = request.values.get("report_name")
|
90
|
+
report_name = re.sub(r'[^a-zA-Z0-9]', '_', report_name)
|
91
|
+
|
92
|
+
# Create a temporary directory to store uploaded files
|
93
|
+
# Note We have to use permanent files and then delete them.
|
94
|
+
# tempfile.Temporary_directory function does not work when you want to access files from another threads.
|
95
|
+
javacores_temp_dir_name = os.path.normpath(os.path.join(reports_dir, TEMP_DIR, report_name))
|
96
|
+
if not javacores_temp_dir_name.startswith(reports_dir):
|
97
|
+
raise Exception("Security exception: Uncontrolled data used in path expression")
|
98
|
+
|
85
99
|
try:
|
86
|
-
|
87
|
-
javacores_temp_dir = tempfile.TemporaryDirectory()
|
88
|
-
javacores_temp_dir_name = javacores_temp_dir.name
|
100
|
+
os.mkdir(javacores_temp_dir_name)
|
89
101
|
|
90
102
|
# Get the list of files from webpage
|
91
103
|
files = request.files.getlist("files")
|
@@ -97,26 +109,50 @@ def upload_file():
|
|
97
109
|
file.save(file_name)
|
98
110
|
input_files.append(file_name)
|
99
111
|
|
100
|
-
report_name = request.values.get("report_name")
|
101
|
-
report_name = re.sub(r'[^a-zA-Z0-9]', '_', report_name)
|
102
|
-
|
103
112
|
# Process the uploaded file
|
104
|
-
report_output_dir = reports_dir
|
105
|
-
|
106
|
-
|
107
|
-
|
113
|
+
report_output_dir = os.path.join(reports_dir, report_name)
|
114
|
+
processing_thread = threading.Thread(
|
115
|
+
target=javacore_analyser.javacore_analyser_batch.process_javacores_and_generate_report_data,
|
116
|
+
name="Processing javacore data", args=(input_files, report_output_dir)
|
117
|
+
)
|
118
|
+
processing_thread.start()
|
119
|
+
|
120
|
+
time.sleep(1) # Give 1 second to generate index.html in processing_thread before redirecting
|
108
121
|
return redirect("/reports/" + report_name + "/index.html")
|
109
122
|
finally:
|
110
|
-
|
123
|
+
shutil.rmtree(javacores_temp_dir_name, ignore_errors=True)
|
124
|
+
|
111
125
|
|
112
126
|
def main():
|
113
|
-
|
114
|
-
|
127
|
+
parser = argparse.ArgumentParser()
|
128
|
+
parser.add_argument("--debug", help="Debug mode. Use only for development", default=False)
|
129
|
+
parser.add_argument("--port", help="Port to run application", default=DEFAULT_PORT)
|
130
|
+
parser.add_argument("--reports-dir", help="Directory where app reports are stored",
|
131
|
+
default=DEFAULT_REPORTS_DIR)
|
132
|
+
args = parser.parse_args()
|
133
|
+
debug = args.debug
|
134
|
+
port = args.port
|
135
|
+
reports_directory = args.reports_dir
|
136
|
+
|
137
|
+
run_web(debug, port, reports_directory)
|
138
|
+
|
139
|
+
|
140
|
+
def run_web(debug=False, port=5000, reports_directory=DEFAULT_REPORTS_DIR):
|
141
|
+
global reports_dir
|
142
|
+
reports_dir = reports_directory
|
143
|
+
create_console_logging()
|
144
|
+
logging.info("Javacore analyser")
|
145
|
+
logging.info("Python version: " + sys.version)
|
146
|
+
logging.info("Preferred encoding: " + locale.getpreferredencoding())
|
147
|
+
logging.info("Reports directory: " + reports_dir)
|
148
|
+
create_file_logging(reports_dir)
|
149
|
+
create_temp_data_in_reports_dir(reports_dir)
|
115
150
|
if debug:
|
116
151
|
app.run(debug=True, port=port) # Run Flask for development
|
117
152
|
else:
|
118
153
|
serve(app, port=port) # Run Waitress in production
|
119
154
|
|
155
|
+
|
120
156
|
if __name__ == '__main__':
|
121
157
|
"""
|
122
158
|
The application passes the following environmental variables:
|
@@ -17,6 +17,7 @@ from xml.dom.minidom import parseString
|
|
17
17
|
import importlib_resources
|
18
18
|
from lxml import etree
|
19
19
|
from lxml.etree import XMLSyntaxError
|
20
|
+
from tqdm import tqdm
|
20
21
|
|
21
22
|
from javacore_analyser import tips
|
22
23
|
from javacore_analyser.code_snapshot_collection import CodeSnapshotCollection
|
@@ -32,7 +33,7 @@ def _create_xml_xsl_for_collection(tmp_dir, templates_dir, xml_xsl_filename, col
|
|
32
33
|
logging.info("Creating xmls and xsls in " + tmp_dir)
|
33
34
|
os.mkdir(tmp_dir)
|
34
35
|
extensions = [".xsl", ".xml"]
|
35
|
-
for extension in extensions:
|
36
|
+
for extension in tqdm(extensions, desc="Creating xml/xsl files", unit=" file"):
|
36
37
|
file_full_path = os.path.normpath(os.path.join(templates_dir, xml_xsl_filename + extension))
|
37
38
|
if not file_full_path.startswith(templates_dir):
|
38
39
|
raise Exception("Security exception: Uncontrolled data used in path expression")
|
@@ -89,6 +90,7 @@ class JavacoreSet:
|
|
89
90
|
|
90
91
|
# Assisted by WCA@IBM
|
91
92
|
# Latest GenAI contribution: ibm/granite-8b-code-instruct
|
93
|
+
@staticmethod
|
92
94
|
def process_javacores(input_path):
|
93
95
|
"""
|
94
96
|
Processes Java core data and generates tips based on the analysis.
|
@@ -124,49 +126,53 @@ class JavacoreSet:
|
|
124
126
|
temp_dir = tempfile.TemporaryDirectory()
|
125
127
|
temp_dir_name = temp_dir.name
|
126
128
|
logging.info("Created temp dir: " + temp_dir_name)
|
127
|
-
self.__create_output_files_structure(output_dir)
|
128
129
|
self.__create_report_xml(temp_dir_name + "/report.xml")
|
130
|
+
placeholder_filename = os.path.join(output_dir, "data", "html", "processing_data.html")
|
131
|
+
self.__generate_placeholder_htmls(placeholder_filename,
|
132
|
+
os.path.join(output_dir, "threads"),
|
133
|
+
self.threads, "thread")
|
134
|
+
self.__generate_placeholder_htmls(placeholder_filename,
|
135
|
+
os.path.join(output_dir, "javacores"),
|
136
|
+
self.javacores, "")
|
137
|
+
self.__create_index_html(temp_dir_name, output_dir)
|
129
138
|
self.__generate_htmls_for_threads(output_dir, temp_dir_name)
|
130
139
|
self.__generate_htmls_for_javacores(output_dir, temp_dir_name)
|
131
|
-
self.__create_index_html(temp_dir_name, output_dir)
|
132
140
|
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
raise Exception("Security exception: Uncontrolled data used in path expression")
|
139
|
-
if os.path.isdir(data_output_dir):
|
140
|
-
shutil.rmtree(data_output_dir, ignore_errors=True)
|
141
|
-
logging.info("Data dir: " + data_output_dir)
|
141
|
+
@staticmethod
|
142
|
+
def __generate_placeholder_htmls(placeholder_file, directory, collection, file_prefix):
|
143
|
+
if os.path.exists(directory):
|
144
|
+
shutil.rmtree(directory)
|
145
|
+
os.mkdir(directory)
|
142
146
|
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
+
for element in tqdm(collection, desc="Generating placeholder htmls", unit=" file"):
|
148
|
+
filename = file_prefix + "_" + element.get_id() + ".html"
|
149
|
+
if filename.startswith("_"):
|
150
|
+
filename = filename[1:]
|
151
|
+
file_path = os.path.join(directory, filename)
|
152
|
+
shutil.copy2(placeholder_file, file_path)
|
147
153
|
|
148
154
|
def __generate_htmls_for_threads(self, output_dir, temp_dir_name):
|
149
|
-
_create_xml_xsl_for_collection(temp_dir_name
|
150
|
-
output_dir
|
155
|
+
_create_xml_xsl_for_collection(os.path.join(temp_dir_name, "threads"),
|
156
|
+
os.path.join(output_dir, "data", "xml", "threads"), "thread",
|
151
157
|
self.threads,
|
152
158
|
"thread")
|
153
159
|
self.generate_htmls_from_xmls_xsls(self.report_xml_file,
|
154
|
-
temp_dir_name
|
155
|
-
output_dir
|
160
|
+
os.path.join(temp_dir_name, "threads"),
|
161
|
+
os.path.join(output_dir, "threads"))
|
156
162
|
|
157
163
|
def __generate_htmls_for_javacores(self, output_dir, temp_dir_name):
|
158
|
-
_create_xml_xsl_for_collection(temp_dir_name
|
159
|
-
output_dir
|
164
|
+
_create_xml_xsl_for_collection(os.path.join(temp_dir_name, "javacores"),
|
165
|
+
os.path.join(output_dir, "data", "xml", "javacores"), "javacore",
|
160
166
|
self.javacores,
|
161
167
|
"")
|
162
168
|
self.generate_htmls_from_xmls_xsls(self.report_xml_file,
|
163
|
-
temp_dir_name
|
164
|
-
output_dir
|
169
|
+
os.path.join(temp_dir_name, "javacores"),
|
170
|
+
os.path.join(output_dir, "javacores"))
|
165
171
|
|
166
172
|
def populate_snapshot_collections(self):
|
167
173
|
for javacore in self.javacores:
|
168
174
|
javacore.print_javacore()
|
169
|
-
for s in javacore.snapshots:
|
175
|
+
for s in tqdm(javacore.snapshots, desc="Populating snapshot collection", unit=" javacore"):
|
170
176
|
self.threads.add_snapshot(s)
|
171
177
|
self.stacks.add_snapshot(s)
|
172
178
|
|
@@ -234,6 +240,7 @@ class JavacoreSet:
|
|
234
240
|
filename = os.path.join(self.path, filename)
|
235
241
|
curr_line = ""
|
236
242
|
i = 0
|
243
|
+
file = None
|
237
244
|
try:
|
238
245
|
file = open(filename, 'r')
|
239
246
|
for line in file:
|
@@ -253,17 +260,18 @@ class JavacoreSet:
|
|
253
260
|
elif line.startswith(JAVA_VERSION):
|
254
261
|
self.java_version = line[len(JAVA_VERSION) + 1:].strip()
|
255
262
|
continue
|
256
|
-
except Exception as
|
257
|
-
|
258
|
-
|
259
|
-
|
260
|
-
|
263
|
+
except Exception as ex:
|
264
|
+
logging.exception(ex)
|
265
|
+
logging.error(f'Error during processing file: {file.name} \n'
|
266
|
+
f'line number: {i} \n'
|
267
|
+
f'line: {curr_line}\n'
|
268
|
+
f'Check the exception below what happened')
|
261
269
|
finally:
|
262
270
|
file.close()
|
263
271
|
|
264
272
|
def parse_javacores(self):
|
265
273
|
""" creates a Javacore object for each javacore...txt file in the given path """
|
266
|
-
for filename in self.files:
|
274
|
+
for filename in tqdm(self.files, "Parsing javacore files", unit=" file"):
|
267
275
|
filename = os.path.join(self.path, filename)
|
268
276
|
javacore = Javacore()
|
269
277
|
javacore.create(filename, self)
|
@@ -283,7 +291,7 @@ class JavacoreSet:
|
|
283
291
|
# return None
|
284
292
|
|
285
293
|
def sort_snapshots(self):
|
286
|
-
for thread in self.threads:
|
294
|
+
for thread in tqdm(self.threads, "Sorting snapshot data", unit=" snapshot"):
|
287
295
|
thread.sort_snapshots()
|
288
296
|
# thread.compare_call_stacks()
|
289
297
|
|
@@ -314,7 +322,7 @@ class JavacoreSet:
|
|
314
322
|
def print_thread_states(self):
|
315
323
|
for thread in self.threads:
|
316
324
|
logging.debug("max running states:" + str(thread.get_continuous_running_states()))
|
317
|
-
logging.debug(thread.name + "(id: " + str(thread.id) + "; hash: " + thread.get_hash() + ") " +
|
325
|
+
logging.debug(thread.name + "(id: " + str(thread.id) + "; hash: " + thread.get_hash() + ") " +
|
318
326
|
"states: " + thread.get_snapshot_states())
|
319
327
|
|
320
328
|
# Assisted by WCA@IBM
|
@@ -377,6 +385,8 @@ class JavacoreSet:
|
|
377
385
|
|
378
386
|
verbose_gc_list_node = self.doc.createElement("verbose_gc_list")
|
379
387
|
report_info_node.appendChild(verbose_gc_list_node)
|
388
|
+
|
389
|
+
total_collects_in_time_limits = 0
|
380
390
|
for vgc in self.gc_parser.get_files():
|
381
391
|
verbose_gc_node = self.doc.createElement("verbose_gc")
|
382
392
|
verbose_gc_list_node.appendChild(verbose_gc_node)
|
@@ -386,9 +396,11 @@ class JavacoreSet:
|
|
386
396
|
verbose_gc_collects_node = self.doc.createElement("verbose_gc_collects")
|
387
397
|
verbose_gc_node.appendChild(verbose_gc_collects_node)
|
388
398
|
verbose_gc_collects_node.appendChild(self.doc.createTextNode(str(vgc.get_number_of_collects())))
|
399
|
+
total_collects_in_time_limits += vgc.get_number_of_collects()
|
389
400
|
verbose_gc_total_collects_node = self.doc.createElement("verbose_gc_total_collects")
|
390
401
|
verbose_gc_node.appendChild(verbose_gc_total_collects_node)
|
391
402
|
verbose_gc_total_collects_node.appendChild(self.doc.createTextNode(str(vgc.get_total_number_of_collects())))
|
403
|
+
verbose_gc_list_node.setAttribute("total_collects_in_time_limits", str(total_collects_in_time_limits))
|
392
404
|
|
393
405
|
system_info_node = self.doc.createElement("system_info")
|
394
406
|
doc_node.appendChild(system_info_node)
|
@@ -474,6 +486,7 @@ class JavacoreSet:
|
|
474
486
|
Returns:
|
475
487
|
str: The JavaCore set in the XML format.
|
476
488
|
"""
|
489
|
+
file = None
|
477
490
|
try:
|
478
491
|
file = open(self.report_xml_file, "r")
|
479
492
|
content = file.read()
|
@@ -492,11 +505,12 @@ class JavacoreSet:
|
|
492
505
|
def __create_index_html(input_dir, output_dir):
|
493
506
|
|
494
507
|
# Copy index.xml and report.xsl to temp - for index.html we don't need to generate anything. Copying is enough.
|
495
|
-
#index_xml = validate_uncontrolled_data_used_in_path([output_dir, "data", "xml", "index.xml"])
|
496
|
-
index_xml = os.path.normpath(importlib_resources.files("javacore_analyser") / "data" / "xml" / "index.xml")
|
508
|
+
# index_xml = validate_uncontrolled_data_used_in_path([output_dir, "data", "xml", "index.xml"])
|
509
|
+
index_xml = os.path.normpath(str(importlib_resources.files("javacore_analyser") / "data" / "xml" / "index.xml"))
|
497
510
|
shutil.copy2(index_xml, input_dir)
|
498
511
|
|
499
|
-
report_xsl = os.path.normpath(
|
512
|
+
report_xsl = os.path.normpath(
|
513
|
+
str(importlib_resources.files("javacore_analyser") / "data" / "xml" / "report.xsl"))
|
500
514
|
shutil.copy2(report_xsl, input_dir)
|
501
515
|
|
502
516
|
xslt_doc = etree.parse(input_dir + "/report.xsl")
|
@@ -519,17 +533,22 @@ class JavacoreSet:
|
|
519
533
|
os.mkdir(output_dir)
|
520
534
|
shutil.copy2(report_xml_file, data_input_dir)
|
521
535
|
|
522
|
-
# Generating list of tuples. This is required attribute for p.map function executed few lines below.
|
523
|
-
generate_html_from_xml_xsl_files_params = []
|
524
|
-
for file in os.listdir(data_input_dir):
|
525
|
-
generate_html_from_xml_xsl_files_params.append((file, data_input_dir, output_dir))
|
526
|
-
|
527
536
|
# https://docs.python.org/3.8/library/multiprocessing.html
|
528
537
|
threads_no = JavacoreSet.get_number_of_parallel_threads()
|
529
538
|
logging.info(f"Using {threads_no} threads to generate html files")
|
539
|
+
|
540
|
+
list_files = os.listdir(data_input_dir)
|
541
|
+
progress_bar = tqdm(desc="Generating html files", unit=' files')
|
542
|
+
|
543
|
+
# Generating list of tuples. This is required attribute for p.map function executed few lines below.
|
544
|
+
generate_html_from_xml_xsl_files_params = []
|
545
|
+
for file in list_files:
|
546
|
+
generate_html_from_xml_xsl_files_params.append((file, data_input_dir, output_dir, progress_bar))
|
547
|
+
|
530
548
|
with Pool(threads_no) as p:
|
531
549
|
p.map(JavacoreSet.generate_html_from_xml_xsl_files, generate_html_from_xml_xsl_files_params)
|
532
550
|
|
551
|
+
progress_bar.close()
|
533
552
|
logging.info(f"Generated html files in {output_dir}")
|
534
553
|
|
535
554
|
# Run with the same number of threads as you have processes but leave one thread for something else.
|
@@ -540,7 +559,7 @@ class JavacoreSet:
|
|
540
559
|
@staticmethod
|
541
560
|
def generate_html_from_xml_xsl_files(args):
|
542
561
|
|
543
|
-
collection_file, collection_input_dir, output_dir = args
|
562
|
+
collection_file, collection_input_dir, output_dir, progress_bar = args
|
544
563
|
|
545
564
|
if not collection_file.endswith(".xsl"): return
|
546
565
|
|
@@ -567,10 +586,30 @@ class JavacoreSet:
|
|
567
586
|
logging.debug("Generating file " + html_file)
|
568
587
|
output_doc.write(html_file, pretty_print=True)
|
569
588
|
|
589
|
+
progress_bar.update(1)
|
590
|
+
|
591
|
+
@staticmethod
|
592
|
+
def create_xml_xsl_for_collection(tmp_dir, xml_xsls_prefix_path, collection, output_file_prefix):
|
593
|
+
logging.info("Creating xmls and xsls in " + tmp_dir)
|
594
|
+
os.mkdir(tmp_dir)
|
595
|
+
extensions = [".xsl", ".xml"]
|
596
|
+
for extension in extensions:
|
597
|
+
file_content = Path(xml_xsls_prefix_path + extension).read_text()
|
598
|
+
for element in tqdm(collection, desc="Creating xml/xsl files", unit=" files"):
|
599
|
+
element_id = element.get_id()
|
600
|
+
filename = output_file_prefix + "_" + str(element_id) + extension
|
601
|
+
if filename.startswith("_"):
|
602
|
+
filename = filename[1:]
|
603
|
+
file = os.path.join(tmp_dir, filename)
|
604
|
+
logging.debug("Writing file " + file)
|
605
|
+
f = open(file, "w")
|
606
|
+
f.write(file_content.format(id=element_id))
|
607
|
+
f.close()
|
608
|
+
|
570
609
|
@staticmethod
|
571
610
|
def parse_mem_arg(line):
|
572
611
|
line = line.split()[-1] # avoid matching the '2' in tag name 2CIUSERARG
|
573
|
-
tokens = re.findall("
|
612
|
+
tokens = re.findall("\d+[KkMmGg]?$", line)
|
574
613
|
if len(tokens) != 1: return UNKNOWN
|
575
614
|
return tokens[0]
|
576
615
|
|
@@ -2,6 +2,8 @@
|
|
2
2
|
# Copyright IBM Corp. 2024 - 2024
|
3
3
|
# SPDX-License-Identifier: Apache-2.0
|
4
4
|
#
|
5
|
+
from tqdm import tqdm
|
6
|
+
|
5
7
|
|
6
8
|
class SnapshotCollectionCollection:
|
7
9
|
|
@@ -23,15 +25,15 @@ class SnapshotCollectionCollection:
|
|
23
25
|
def __iter__(self):
|
24
26
|
return self.snapshot_collections.__iter__()
|
25
27
|
|
26
|
-
def __next__(self):
|
27
|
-
|
28
|
+
# def __next__(self):
|
29
|
+
# return self.snapshot_collections.__next__()
|
28
30
|
|
29
31
|
def get_xml(self, doc):
|
30
32
|
info_node = doc.createElement(self.snapshot_collection_type.__name__)
|
31
33
|
|
32
34
|
all_threads_node = doc.createElement('all_snapshot_collection')
|
33
35
|
info_node.appendChild(all_threads_node)
|
34
|
-
for collection in self.snapshot_collections:
|
36
|
+
for collection in tqdm(self.snapshot_collections, desc=" Generating threads data", unit=" thread"):
|
35
37
|
all_threads_node.appendChild(collection.get_xml(doc))
|
36
38
|
|
37
39
|
return info_node
|
javacore_analyser/stack_trace.py
CHANGED
@@ -26,8 +26,8 @@ class StackTrace:
|
|
26
26
|
def __iter__(self):
|
27
27
|
return self.stack_trace_elements.__iter__()
|
28
28
|
|
29
|
-
def __next__(self):
|
30
|
-
|
29
|
+
# def __next__(self):
|
30
|
+
# return self.stack_trace_elements.__next__()
|
31
31
|
|
32
32
|
def equals(self, stack_trace):
|
33
33
|
self_stack_trace_size = len(self.stack_trace_elements)
|
@@ -17,12 +17,17 @@
|
|
17
17
|
|
18
18
|
<h2>Generate report:</h2>
|
19
19
|
<form action="/upload" method="post" enctype="multipart/form-data">
|
20
|
-
<
|
21
|
-
<
|
20
|
+
<ol>
|
21
|
+
<li>Choose archive (zip, 7z, tgz, bz2) file or multiple javacore files:
|
22
|
+
<input type="file" name="files" multiple required>
|
23
|
+
</li>
|
24
|
+
<li>Give the name for your report: <input type="text" id="report_name" name="report_name" required></li>
|
25
|
+
<li>Click button to process: <input type="submit" value="Run"></li>
|
26
|
+
</ol>
|
22
27
|
<strong>
|
23
|
-
NOTE:
|
28
|
+
NOTE: Report generation is an expensive operation. It may take a few minutes. Please be patient.
|
24
29
|
</strong>
|
25
|
-
|
30
|
+
|
26
31
|
</form>
|
27
32
|
<br></br>
|
28
33
|
|
@@ -2,7 +2,7 @@
|
|
2
2
|
# Copyright IBM Corp. 2024 - 2024
|
3
3
|
# SPDX-License-Identifier: Apache-2.0
|
4
4
|
#
|
5
|
-
|
5
|
+
import logging
|
6
6
|
import os
|
7
7
|
import re
|
8
8
|
from datetime import datetime
|
@@ -123,7 +123,8 @@ class ThreadSnapshot:
|
|
123
123
|
try:
|
124
124
|
token = m.group(1)
|
125
125
|
self.cpu_usage = float(token)
|
126
|
-
except:
|
126
|
+
except Exception as ex:
|
127
|
+
logging.warning(ex)
|
127
128
|
self.cpu_usage = 0
|
128
129
|
# tokens = re.findall("[0-9]+\.[0-9]+", line)
|
129
130
|
# if len(tokens) == 0:
|
javacore_analyser/tips.py
CHANGED
@@ -2,6 +2,7 @@
|
|
2
2
|
# Copyright IBM Corp. 2024 - 2024
|
3
3
|
# SPDX-License-Identifier: Apache-2.0
|
4
4
|
#
|
5
|
+
import logging
|
5
6
|
|
6
7
|
# This is a module containing list of the tips.
|
7
8
|
# Each tip has to implement dynamic method generate(javacore_set)
|
@@ -16,6 +17,7 @@ class TestTip:
|
|
16
17
|
|
17
18
|
@staticmethod
|
18
19
|
def generate(javacore_set):
|
20
|
+
logging.info(javacore_set)
|
19
21
|
return ["this is a test tip. Ignore it."]
|
20
22
|
|
21
23
|
|
@@ -157,7 +159,7 @@ class BlockingThreadsTip:
|
|
157
159
|
result.append(BlockingThreadsTip.BLOCKING_THREADS_TEXT.format(blocker_name,
|
158
160
|
blocked_size / javacores_no))
|
159
161
|
if len(result) >= BlockingThreadsTip.MAX_BLOCKING_THREADS_NO:
|
160
|
-
break
|
162
|
+
break
|
161
163
|
return result
|
162
164
|
|
163
165
|
|
@@ -165,9 +167,9 @@ class HighCpuUsageTip:
|
|
165
167
|
# Generates the tip if the thread is using above x percent of CPU. Also informs, if this is verbose gc thread.
|
166
168
|
|
167
169
|
# Report as high cpu usage for the application using the cpu usage above this value
|
168
|
-
CRITICAL_CPU_USAGE = 50
|
170
|
+
CRITICAL_CPU_USAGE = 50
|
169
171
|
|
170
|
-
CRITICAL_USAGE_FOR_GC = 5
|
172
|
+
CRITICAL_USAGE_FOR_GC = 5
|
171
173
|
|
172
174
|
MAX_NUMBER_OF_HIGH_CPU_USAGE_THREADS = 5
|
173
175
|
|
javacore_analyser/verbose_gc.py
CHANGED
@@ -8,6 +8,8 @@ import ntpath
|
|
8
8
|
from datetime import datetime
|
9
9
|
from xml.dom.minidom import Element, parseString
|
10
10
|
|
11
|
+
from tqdm import tqdm
|
12
|
+
|
11
13
|
ROOT_CLOSING_TAG = "</verbosegc>"
|
12
14
|
GC_START = "gc-start"
|
13
15
|
GC_END = "gc-end"
|
@@ -43,7 +45,7 @@ class VerboseGcParser:
|
|
43
45
|
|
44
46
|
def parse_files(self, start_time, stop_time):
|
45
47
|
logging.info("Started parsing GC files")
|
46
|
-
for file_path in self.__file_paths:
|
48
|
+
for file_path in tqdm(self.__file_paths, desc="Parsing verbose gc", unit=" file"):
|
47
49
|
try:
|
48
50
|
collects_from_time_range = 0
|
49
51
|
file = VerboseGcFile(file_path)
|
@@ -109,6 +111,7 @@ class VerboseGcFile:
|
|
109
111
|
|
110
112
|
def __parse(self):
|
111
113
|
# read in the file as collection of lines
|
114
|
+
file = None
|
112
115
|
try:
|
113
116
|
xml_text = ""
|
114
117
|
root_closing_tag_available = False
|
@@ -152,7 +155,6 @@ class VerboseGcFile:
|
|
152
155
|
gets the total number of gc collections in this VerboseGcFile
|
153
156
|
regardless of the time when tey occurred with regards to the javacores
|
154
157
|
'''
|
155
|
-
|
156
158
|
def get_total_number_of_collects(self):
|
157
159
|
if self.__total_number_of_collects < 0:
|
158
160
|
self.get_collects()
|