atomicshop 2.2.9__py3-none-any.whl → 2.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of atomicshop might be problematic. Click here for more details.

@@ -0,0 +1,465 @@
1
+ import datetime
2
+
3
+ from .. import filesystem, domains, datetimes, urls
4
+ from ..basics import dicts
5
+ from ..file_io import tomls, xlsxs
6
+ from ..wrappers.loggingw import reading
7
+ from ..print_api import print_api
8
+
9
+
10
+ def get_the_last_day_number(statistics_content: list, stop_after_lines: int = None) -> int:
11
+ """
12
+ This function gets the last day number from the statistics content.
13
+
14
+ :param statistics_content: list, of lines in the statistics content.
15
+ :param stop_after_lines: integer, if specified, the function will stop after the specified number of lines.
16
+ :return: integer, the last day number.
17
+ """
18
+
19
+ last_day_number = None
20
+ start_time_temp = None
21
+ for line_index, line in enumerate(statistics_content):
22
+ try:
23
+ request_time = datetime.datetime.strptime(line['request_time_sent'], '%Y-%m-%d %H:%M:%S.%f')
24
+ except ValueError:
25
+ continue
26
+
27
+ if not start_time_temp:
28
+ start_time_temp = request_time
29
+
30
+ if stop_after_lines:
31
+ if line_index == stop_after_lines:
32
+ break
33
+
34
+ last_day_number = datetimes.get_difference_between_dates_in_days(start_time_temp, request_time)
35
+ return last_day_number
36
+
37
+
38
+ def create_empty_features_dict() -> dict:
39
+ """
40
+ This function creates an empty dictionary for the daily stats. This should be initiated for each 'host_type' of:
41
+ 'domain', 'subdomain', 'url_no_parameters'.
42
+ :return: dict
43
+ """
44
+
45
+ return {
46
+ 'total_count': {}, 'normal_count': {}, 'error_count': {},
47
+ 'request_0_byte_count': {}, 'response_0_byte_count': {},
48
+ 'request_sizes_list': {}, 'response_sizes_list': {},
49
+ 'request_sizes_no_0_bytes_list': {}, 'response_sizes_no_0_bytes_list': {},
50
+ 'average_request_size': {}, 'average_response_size': {},
51
+ 'average_request_size_no_0_bytes': {}, 'average_response_size_no_0_bytes': {}}
52
+
53
+
54
+ def add_to_count_to_daily_stats(
55
+ daily_stats: dict, current_day: int, last_day: int, host_type: str, feature: str, host_name: str) -> None:
56
+ """
57
+ This function adds 1 to the 'count' feature of the current day in the daily stats.
58
+
59
+ :param daily_stats: dict, the daily statistics dict.
60
+ :param current_day: integer, the current day number.
61
+ :param last_day: integer, the last day number.
62
+ :param host_type: string, the type of the host. Can be: 'domain', 'subdomain', 'url_no_parameters'.
63
+ :param feature: string, the feature to add the count to. Can be: 'total_count', 'normal_count', 'error_count',
64
+ 'request_0_byte_count', 'response_0_byte_count'.
65
+ :param host_name: string, the name of the host.
66
+
67
+ :return: None.
68
+ """
69
+
70
+ # Aggregate daily domain hits.
71
+ if host_name not in daily_stats[host_type][feature].keys():
72
+ daily_stats[host_type][feature][host_name] = {}
73
+ # Iterate from first day to the last day.
74
+ for day in range(0, last_day + 1):
75
+ daily_stats[host_type][feature][host_name][day] = 0
76
+
77
+ # Add count to current day.
78
+ daily_stats[host_type][feature][host_name][current_day] += 1
79
+
80
+
81
+ def add_to_list_to_daily_stats(
82
+ daily_stats: dict, current_day: int, last_day: int, host_type: str, feature: str, host_name: str,
83
+ size: float) -> None:
84
+ """
85
+ This function adds the 'size' to the 'feature' list of the current day in the daily stats.
86
+
87
+ :param daily_stats: dict, the daily statistics dict.
88
+ :param current_day: integer, the current day number.
89
+ :param last_day: integer, the last day number.
90
+ :param host_type: string, the type of the host. Can be: 'domain', 'subdomain', 'url_no_parameters'.
91
+ :param feature: string, the feature to add the count to. Can be: 'request_sizes_list', 'response_sizes_list',
92
+ 'request_sizes_no_0_bytes_list', 'response_sizes_no_0_bytes_list'.
93
+ :param host_name: string, the name of the host.
94
+ :param size: float, the size in bytes to add to the list.
95
+
96
+ :return: None.
97
+ """
98
+
99
+ # Aggregate daily domain hits.
100
+ if host_name not in daily_stats[host_type][feature].keys():
101
+ daily_stats[host_type][feature][host_name] = {}
102
+ # Iterate from first day to the last day.
103
+ for day in range(0, last_day + 1):
104
+ daily_stats[host_type][feature][host_name][day] = []
105
+
106
+ # Add count to current day.
107
+ daily_stats[host_type][feature][host_name][current_day].append(size)
108
+
109
+
110
+ def add_to_average_to_daily_stats(
111
+ daily_stats: dict, current_day: int, last_day: int, host_type: str, feature: str, host_name: str,
112
+ list_of_sizes: list) -> None:
113
+ """
114
+ This function adds the average size in bytes calculated from the 'list_of_sizes' to the 'feature' of the current
115
+ day in the daily stats.
116
+
117
+ :param daily_stats: dict, the daily statistics dict.
118
+ :param current_day: integer, the current day number.
119
+ :param last_day: integer, the last day number.
120
+ :param host_type: string, the type of the host. Can be: 'domain', 'subdomain', 'url_no_parameters'.
121
+ :param feature: string, the feature to add the count to. Can be: 'average_request_size', 'average_response_size',
122
+ 'average_request_size_no_0_bytes', 'average_response_size_no_0_bytes'.
123
+ :param host_name: string, the name of the host.
124
+ :param list_of_sizes: list, the list of sizes to calculate the average from.
125
+
126
+ :return: None.
127
+ """
128
+
129
+ # Aggregate daily domain hits.
130
+ if host_name not in daily_stats[host_type][feature].keys():
131
+ daily_stats[host_type][feature][host_name] = {}
132
+ # Iterate from first day to the last day.
133
+ for day in range(0, last_day + 1):
134
+ daily_stats[host_type][feature][host_name][day] = 0
135
+
136
+ # If the list of size is empty, add 0 to the average, since we cannot divide by 0.
137
+ if len(list_of_sizes) == 0:
138
+ daily_stats[host_type][feature][host_name][current_day] = 0
139
+ else:
140
+ daily_stats[host_type][feature][host_name][current_day] = sum(list_of_sizes) / len(list_of_sizes)
141
+
142
+
143
+ def analyze(main_file_path: str):
144
+ """
145
+ This function is the main function for the statistic analyzer.
146
+ :param main_file_path: Path to the main file that is calling this function (__file__).
147
+ :return:
148
+ """
149
+
150
+ # Get the config and set variables.
151
+ script_directory: str = filesystem.get_file_directory(main_file_path)
152
+ config_path: str = filesystem.add_object_to_path(script_directory, 'config_stats.toml')
153
+ config: dict = tomls.read_toml_file(config_path)
154
+ summary_path: str = filesystem.check_absolute_path___add_full(config['report_file_path'], script_directory)
155
+
156
+ # Get the content from statistics files.
157
+ statistics_content: list = reading.get_logs(
158
+ config['statistic_files_path'], pattern='statistics*.csv', log_type='csv',
159
+ )
160
+
161
+ # Initialize loop.
162
+ line_total_count: int = len(statistics_content)
163
+ start_time = None
164
+ last_day_number = None
165
+ overall_stats: dict = {
166
+ 'domain': {'total_count': {}, 'normal_count': {}, 'error_count': {}},
167
+ 'subdomain': {'total_count': {}, 'normal_count': {}, 'error_count': {}}
168
+ }
169
+ daily_stats: dict = {
170
+ 'domain': create_empty_features_dict(),
171
+ 'subdomain': create_empty_features_dict(),
172
+ 'url_no_parameters': create_empty_features_dict()
173
+ }
174
+
175
+ # Start the main loop.
176
+ for line_index, line in enumerate(statistics_content):
177
+ # Converting time string to object.
178
+ # If the time string is not of the specific format, continue to the next line.
179
+ try:
180
+ request_time = datetime.datetime.strptime(line['request_time_sent'], '%Y-%m-%d %H:%M:%S.%f')
181
+ except ValueError:
182
+ continue
183
+
184
+ if not start_time:
185
+ start_time = request_time
186
+
187
+ # For testing, you can set the 'break_after_lines' to an integer, which symbolizes the number of the line
188
+ # of the 'statistics_content' to stop the loop after.
189
+ break_after_lines = None
190
+
191
+ # Find the last day number. If 'break_after_lines' is specified, the loop will stop after the specified line.
192
+ if not last_day_number:
193
+ last_day_number = get_the_last_day_number(statistics_content, break_after_lines)
194
+
195
+ if break_after_lines:
196
+ if line_index == break_after_lines:
197
+ break
198
+
199
+ if config['strings_to_include_in_subdomain'] and config['strings_to_include_in_subdomain'] != ['']:
200
+ # Checking that 'strings_to_include_in_subdomain' are in the subdomain, if not, continue to the next line.
201
+ if not any(string in line['host'] for string in config['strings_to_include_in_subdomain']):
202
+ continue
203
+
204
+ if config['strings_to_exclude_from_subdomain'] and config['strings_to_exclude_from_subdomain'] != ['']:
205
+ # Checking that 'strings_to_exclude_from_subdomain' are not in the subdomain, if they are, continue.
206
+ if any(string in line['host'] for string in config['strings_to_exclude_from_subdomain']):
207
+ continue
208
+
209
+ # Get the subdomain with the main domain from the 'host' column of current line.
210
+ subdomain = line['host']
211
+ # Get the main domain from the subdomain.
212
+ # Check if suffix of the 'host' is '.com'.
213
+ if line['host'].endswith('.com'):
214
+ # Get only the main domain.
215
+ main_domain = line['host'].split('.')[-2] + '.com'
216
+ # If the suffix is not '.com', use the 'domains' library to get the main domain.
217
+ else:
218
+ # This is the slowest part of the whole loop.
219
+ main_domain = domains.get_registered_domain(line['host'])
220
+
221
+ # If the domain is empty, continue to the next line.
222
+ if not main_domain:
223
+ continue
224
+
225
+ # If the domain is already in the dict, add 1 to the counter, else add the key to the dict.
226
+ if main_domain in overall_stats['domain']['total_count'].keys():
227
+ overall_stats['domain']['total_count'][main_domain] = (
228
+ overall_stats['domain']['total_count'][main_domain] + 1)
229
+ else:
230
+ overall_stats['domain']['total_count'][main_domain] = 1
231
+
232
+ # If the subdomain is already in the dict, add 1 to the counter, else add the key to the dict.
233
+ if subdomain in overall_stats['subdomain']['total_count'].keys():
234
+ overall_stats['subdomain']['total_count'][subdomain] = (
235
+ overall_stats['subdomain']['total_count'][subdomain] + 1)
236
+ else:
237
+ # overall_stats['subdomain']['total_count'] = {}
238
+ overall_stats['subdomain']['total_count'][subdomain] = 1
239
+
240
+ # Check if there is an error in the line and count the domain under 'error_count' key.
241
+ if line['error'] != '':
242
+ # If the domain is already in the dict, add 1 to the counter, else add the key to the dict.
243
+ if main_domain in overall_stats['domain']['error_count'].keys():
244
+ overall_stats['domain']['error_count'][main_domain] = (
245
+ overall_stats['domain']['error_count'][main_domain] + 1)
246
+ else:
247
+ # overall_stats['domain']['total_count'] = {}
248
+ overall_stats['domain']['error_count'][main_domain] = 1
249
+
250
+ # If the subdomain is already in the dict, add 1 to the counter, else add the key to the dict.
251
+ if subdomain in overall_stats['subdomain']['error_count'].keys():
252
+ overall_stats['subdomain']['error_count'][subdomain] = (
253
+ overall_stats['subdomain']['error_count'][subdomain] + 1)
254
+ else:
255
+ # overall_stats['subdomain']['total_count'] = {}
256
+ overall_stats['subdomain']['error_count'][subdomain] = 1
257
+ else:
258
+ # If the domain is already in the dict, add 1 to the counter, else add the key to the dict.
259
+ if main_domain in overall_stats['domain']['normal_count'].keys():
260
+ overall_stats['domain']['normal_count'][main_domain] = (
261
+ overall_stats['domain']['normal_count'][main_domain] + 1)
262
+ else:
263
+ # overall_stats['domain']['total_count'] = {}
264
+ overall_stats['domain']['normal_count'][main_domain] = 1
265
+
266
+ # If the subdomain is already in the dict, add 1 to the counter, else add the key to the dict.
267
+ if subdomain in overall_stats['subdomain']['normal_count'].keys():
268
+ overall_stats['subdomain']['normal_count'][subdomain] = (
269
+ overall_stats['subdomain']['normal_count'][subdomain] + 1)
270
+ else:
271
+ # overall_stats['subdomain']['total_count'] = {}
272
+ overall_stats['subdomain']['normal_count'][subdomain] = 1
273
+
274
+ # Get the URL without parameters.
275
+ url = line['host'] + line['path']
276
+ url_no_parameters = urls.url_parser(url)['path']
277
+
278
+ # Get the request and response sizes.
279
+ # If the size is not numeric that can be converted to integer, set it to None.
280
+ # Since, probably there was an SSL 'error' in the line.
281
+ try:
282
+ request_size = int(line['request_size_bytes'])
283
+ response_size = int(line['response_size_bytes'])
284
+ except ValueError:
285
+ request_size = None
286
+ response_size = None
287
+
288
+ # Start Day aggregation ========================================================================================
289
+ # Daily stats.
290
+ day_number = datetimes.get_difference_between_dates_in_days(start_time, request_time)
291
+
292
+ # Add 1 to the total count of the current day.
293
+ add_to_count_to_daily_stats(
294
+ daily_stats, day_number, last_day_number, 'domain', 'total_count', main_domain)
295
+ add_to_count_to_daily_stats(
296
+ daily_stats, day_number, last_day_number, 'subdomain', 'total_count', subdomain)
297
+ add_to_count_to_daily_stats(
298
+ daily_stats, day_number, last_day_number, 'url_no_parameters', 'total_count', url_no_parameters)
299
+
300
+ # Handle line if it has error.
301
+ if line['error'] != '':
302
+ add_to_count_to_daily_stats(
303
+ daily_stats, day_number, last_day_number, 'domain', 'error_count', main_domain)
304
+ add_to_count_to_daily_stats(
305
+ daily_stats, day_number, last_day_number, 'subdomain', 'error_count', subdomain)
306
+ add_to_count_to_daily_stats(
307
+ daily_stats, day_number, last_day_number, 'url_no_parameters', 'error_count', url_no_parameters)
308
+ else:
309
+ add_to_count_to_daily_stats(
310
+ daily_stats, day_number, last_day_number, 'domain', 'normal_count', main_domain)
311
+ add_to_count_to_daily_stats(
312
+ daily_stats, day_number, last_day_number, 'subdomain', 'normal_count', subdomain)
313
+ add_to_count_to_daily_stats(
314
+ daily_stats, day_number, last_day_number, 'url_no_parameters', 'normal_count', url_no_parameters)
315
+
316
+ if request_size == 0:
317
+ add_to_count_to_daily_stats(
318
+ daily_stats, day_number, last_day_number, 'domain', 'request_0_byte_count',
319
+ main_domain)
320
+ add_to_count_to_daily_stats(
321
+ daily_stats, day_number, last_day_number, 'subdomain', 'request_0_byte_count',
322
+ subdomain)
323
+ add_to_count_to_daily_stats(
324
+ daily_stats, day_number, last_day_number, 'url_no_parameters', 'request_0_byte_count',
325
+ url_no_parameters)
326
+
327
+ if response_size == 0:
328
+ add_to_count_to_daily_stats(
329
+ daily_stats, day_number, last_day_number, 'domain', 'response_0_byte_count',
330
+ main_domain)
331
+ add_to_count_to_daily_stats(
332
+ daily_stats, day_number, last_day_number, 'subdomain', 'response_0_byte_count',
333
+ subdomain)
334
+ add_to_count_to_daily_stats(
335
+ daily_stats, day_number, last_day_number, 'url_no_parameters', 'response_0_byte_count',
336
+ url_no_parameters)
337
+
338
+ if request_size is not None and response_size is not None:
339
+ add_to_list_to_daily_stats(
340
+ daily_stats, day_number, last_day_number, 'domain', 'request_sizes_list', main_domain, request_size)
341
+ add_to_list_to_daily_stats(
342
+ daily_stats, day_number, last_day_number, 'subdomain', 'request_sizes_list', subdomain, request_size)
343
+ add_to_list_to_daily_stats(
344
+ daily_stats, day_number, last_day_number, 'url_no_parameters', 'request_sizes_list', url_no_parameters,
345
+ request_size)
346
+
347
+ add_to_list_to_daily_stats(
348
+ daily_stats, day_number, last_day_number, 'domain', 'response_sizes_list', main_domain, response_size)
349
+ add_to_list_to_daily_stats(
350
+ daily_stats, day_number, last_day_number, 'subdomain', 'response_sizes_list', subdomain, response_size)
351
+ add_to_list_to_daily_stats(
352
+ daily_stats, day_number, last_day_number, 'url_no_parameters', 'response_sizes_list', url_no_parameters,
353
+ response_size)
354
+
355
+ if request_size != 0 and request_size is not None:
356
+ add_to_list_to_daily_stats(
357
+ daily_stats, day_number, last_day_number, 'domain', 'request_sizes_no_0_bytes_list',
358
+ main_domain, request_size)
359
+ add_to_list_to_daily_stats(
360
+ daily_stats, day_number, last_day_number, 'subdomain', 'request_sizes_no_0_bytes_list',
361
+ subdomain, request_size)
362
+ add_to_list_to_daily_stats(
363
+ daily_stats, day_number, last_day_number, 'url_no_parameters', 'request_sizes_no_0_bytes_list',
364
+ url_no_parameters, request_size)
365
+
366
+ if response_size != 0 and response_size is not None:
367
+ add_to_list_to_daily_stats(
368
+ daily_stats, day_number, last_day_number, 'domain', 'response_sizes_no_0_bytes_list',
369
+ main_domain, response_size)
370
+ add_to_list_to_daily_stats(
371
+ daily_stats, day_number, last_day_number, 'subdomain', 'response_sizes_no_0_bytes_list',
372
+ subdomain, response_size)
373
+ add_to_list_to_daily_stats(
374
+ daily_stats, day_number, last_day_number, 'url_no_parameters', 'response_sizes_no_0_bytes_list',
375
+ url_no_parameters, response_size)
376
+
377
+ print_api(f'Processing line: {line_index+1}/{line_total_count}', print_end='\r')
378
+
379
+ # Calculate daily average request and response sizes.
380
+ for host_type, features in daily_stats.items():
381
+ for feature, hosts in features.items():
382
+ if feature == 'request_sizes_list':
383
+ feature_name = 'average_request_size'
384
+ elif feature == 'response_sizes_list':
385
+ feature_name = 'average_response_size'
386
+ elif feature == 'request_sizes_no_0_bytes_list':
387
+ feature_name = 'average_request_size_no_0_bytes'
388
+ elif feature == 'response_sizes_no_0_bytes_list':
389
+ feature_name = 'average_response_size_no_0_bytes'
390
+ else:
391
+ continue
392
+
393
+ for host_name, days in hosts.items():
394
+ for day, sizes in days.items():
395
+ add_to_average_to_daily_stats(
396
+ daily_stats, day, last_day_number, host_type, feature_name, host_name, sizes)
397
+
398
+ # Sorting overall stats.
399
+ sorted_overall_stats: dict = {
400
+ 'domain': {'total_count': {}, 'normal_count': {}, 'error_count': {}},
401
+ 'subdomain': {'total_count': {}, 'normal_count': {}, 'error_count': {}}
402
+ }
403
+ for feature_dict, feature_dict_value in overall_stats.items():
404
+ for feature, feature_value in feature_dict_value.items():
405
+ sorted_overall_stats[feature_dict][feature] = (
406
+ dicts.sort_by_values(feature_value, reverse=True))
407
+
408
+ # Create combined dictionary of the sorted statistics to export to XLSX file.
409
+ combined_sorted_stats = {}
410
+ # Add overall stats.
411
+ for feature_dict, feature_dict_value in sorted_overall_stats.items():
412
+ for feature, feature_value in feature_dict_value.items():
413
+ for feature_index, (host_name, counter) in enumerate(feature_value.items()):
414
+ if feature_index == 0:
415
+ try:
416
+ combined_sorted_stats[f'overall_stats']['host_name'].append('')
417
+ combined_sorted_stats[f'overall_stats']['counter'].append('')
418
+ combined_sorted_stats[f'overall_stats']['host_name'].append(f'{feature_dict}_{feature}')
419
+ combined_sorted_stats[f'overall_stats']['counter'].append('counter')
420
+ except KeyError:
421
+ combined_sorted_stats[f'overall_stats'] = \
422
+ {f'host_name': [f'{feature_dict}_{feature}'], 'counter': ['counter']}
423
+
424
+ combined_sorted_stats[f'overall_stats']['host_name'].append(host_name)
425
+ combined_sorted_stats[f'overall_stats']['counter'].append(counter)
426
+
427
+ feature_name = ''
428
+ # Add daily stats to combined dict. Each day will be a column.
429
+ for host_type, features in daily_stats.items():
430
+ for feature, hosts in features.items():
431
+ if 'count' in feature:
432
+ feature_name = 'counts'
433
+ elif 'list' in feature:
434
+ feature_name = 'lists'
435
+ elif 'average' in feature:
436
+ feature_name = 'averages'
437
+
438
+ for feature_index, (host_name, days) in enumerate(hosts.items()):
439
+ if feature_index == 0:
440
+ try:
441
+ combined_sorted_stats[f'daily_{feature_name}']['host_name'].append('')
442
+ for day in days.keys():
443
+ combined_sorted_stats[f'daily_{feature_name}']['Day' + str(day)].append('')
444
+ combined_sorted_stats[f'daily_{feature_name}']['host_name'].append(f'{host_type}_{feature}')
445
+ for day in days.keys():
446
+ (combined_sorted_stats[f'daily_{feature_name}']['Day' + str(day)].
447
+ append('Day' + str(day)))
448
+ except KeyError:
449
+ combined_sorted_stats[f'daily_{feature_name}'] = {f'host_name': [f'{host_type}_{feature}']}
450
+ for day in days.keys():
451
+ combined_sorted_stats[f'daily_{feature_name}']['Day' + str(day)] = ['Day' + str(day)]
452
+
453
+ combined_sorted_stats[f'daily_{feature_name}']['host_name'].append(host_name)
454
+ for day_number, counter in days.items():
455
+ combined_sorted_stats[f'daily_{feature_name}']['Day' + str(day_number)].append(counter)
456
+
457
+ try:
458
+ xlsxs.write_xlsx(combined_sorted_stats, file_path=summary_path)
459
+ except FileNotFoundError:
460
+ directory_path = filesystem.get_file_directory(summary_path)
461
+ print_api(f'Directory does not exist, creating it: {directory_path}')
462
+ filesystem.create_directory(directory_path)
463
+ xlsxs.write_xlsx(combined_sorted_stats, file_path=summary_path)
464
+
465
+ return
@@ -17,6 +17,11 @@ for connection in psutil.net_connections():
17
17
  command_line = psutil.Process(connection.pid).cmdline()
18
18
  # Command line object is returned as list of parameters. We need 'shlex.join' to join the iterables
19
19
  # to regular, readable string.
20
- print(shlex.join(command_line))
20
+ result = shlex.join(command_line)
21
+ # If the result is still a PID, we'll try to get process name.
22
+ if result.isnumeric():
23
+ # Get the process name from the connection PID.
24
+ result = psutil.Process(connection.pid).name()
25
+ print(result)
21
26
  # Break the loop, when first match is found.
22
27
  break
File without changes
@@ -0,0 +1,2 @@
1
+ FACT_ADDRESS: str = 'http://localhost:5000'
2
+ FIRMWARE_ENDPOINT: str = '/rest/firmware'
@@ -0,0 +1,80 @@
1
+ import requests
2
+ import base64
3
+
4
+ from . import fact_config
5
+ from ... print_api import print_api
6
+ from ... file_io import file_io
7
+
8
+
9
+ def upload_firmware(firmware_file_path: str, params: dict, use_all_analysis_systems: bool = False):
10
+ """
11
+ Upload firmware binary file to the server.
12
+
13
+ :param firmware_file_path: Path to firmware file.
14
+ :param use_all_analysis_systems: Use all analysis systems.
15
+ :param params: Parameters:
16
+ {
17
+ "device_name": <string>,
18
+ "device_part": <string>, # new in FACT 2.5
19
+ "device_class": <string>,
20
+ "file_name": <string>,
21
+ "version": <string>, # supersedes firmware_version field
22
+ "vendor": <string>,
23
+ "release_date": <string>,
24
+ "tags": <string>,
25
+ "requested_analysis_systems": <list>,
26
+ "binary": <string(base64)>
27
+ }
28
+
29
+ 'device_name' and 'tags' aren't required.
30
+ 'binary' and 'file_name' is filled by this function from the firmware file.
31
+ 'requested_analysis_systems' is filled by this function if 'use_all_analysis_systems' is True.
32
+
33
+ Example from https://github.com/fkie-cad/FACT_core/wiki/Rest-API#restfirmwareuid:
34
+ {
35
+ "device_name": "rest_test",
36
+ "device_part": <string>,
37
+ "device_class": "Router",
38
+ "file_name": "firmware.bin",
39
+ "version": "1.1",
40
+ "vendor": "AVM",
41
+ "release_date": "2011-01-01",
42
+ "tags": "tag1,tag2",
43
+ "requested_analysis_systems": ["file_type", "file_hashes"],
44
+ "binary": "dGVzdDEyMzQgdBzb21lIHRlc3QgZQ=="
45
+ }
46
+
47
+ :return: None.
48
+ """
49
+
50
+ url: str = f'{fact_config.FACT_ADDRESS}/{fact_config.FIRMWARE_ENDPOINT}'
51
+
52
+ # Add all analysis systems to the list.
53
+ if use_all_analysis_systems:
54
+ params['requested_analysis_systems'] = [
55
+ 'binwalk', 'cpu_architecture', 'crypto_hints', 'crypto_material', 'cve_lookup', 'cwe_checker',
56
+ 'device_tree', 'elf_analysis', 'exploit_mitigations', 'file_hashes', 'file_system_metadata',
57
+ 'file_type', 'hardware_analysis', 'hashlookup', 'information_leaks', 'init_systems', 'input_vectors',
58
+ 'interesting_uris', 'ip_and_uri_finder', 'ipc_analyzer', 'kernel_config', 'known_vulnerabilities',
59
+ 'printable_strings', 'qemu_exec', 'software_components', 'source_code_analysis', 'string_evaluator',
60
+ 'tlsh', 'unpacker', 'users_and_passwords'
61
+ ]
62
+
63
+ # Open firmware file.
64
+ firmware_binary_content = file_io.read_file(firmware_file_path, file_mode='rb')
65
+ # Encode firmware file to base64.
66
+ params['binary'] = base64.b64encode(firmware_binary_content)
67
+
68
+ # Send firmware file to the server.
69
+ response = requests.put(
70
+ url,
71
+ params=params,
72
+ )
73
+
74
+ # Check response status code.
75
+ if response.status_code == 200:
76
+ # Print response.
77
+ print_api(response.json())
78
+ else:
79
+ # Print error.
80
+ print_api('Error: ' + str(response.status_code), error_type=True, logger_method='critical')
@@ -2,6 +2,7 @@ import os
2
2
  from typing import Literal
3
3
 
4
4
  from ... import filesystem, datetimes
5
+ from ...basics import list_of_dicts
5
6
  from ...file_io import csvs
6
7
 
7
8
 
@@ -26,6 +27,7 @@ def get_logs(
26
27
  'all' - Each CSV file has a header. Get the header from each file.
27
28
  :param remove_logs: Boolean, if True, the logs will be removed after getting them.
28
29
  :param move_to_path: Path to move the logs to.
30
+
29
31
  :param print_kwargs: Keyword arguments dict for 'print_api' function.
30
32
  """
31
33
 
@@ -35,13 +37,9 @@ def get_logs(
35
37
  if remove_logs and move_to_path:
36
38
  raise ValueError('Both "remove_logs" and "move_to_path" cannot be True/specified at the same time.')
37
39
 
38
- logs_files: list = filesystem.get_files_and_folders(
39
- path, string_contains=pattern)
40
-
41
- # If there's more than 1 file, it means that the latest file is 'statistics.csv' and it is the first in
42
- # The found list, so we need to move it to the last place.
43
- if len(logs_files) > 1:
44
- logs_files = list(logs_files[1:] + [logs_files[0]])
40
+ logs_files: list = filesystem.get_file_paths_and_relative_directories(
41
+ path, file_name_check_pattern=pattern,
42
+ add_last_modified_time=True, sort_by_last_modified_time=True)
45
43
 
46
44
  # Read all the logs.
47
45
  logs_content: list = list()
@@ -49,12 +47,12 @@ def get_logs(
49
47
  for single_file in logs_files:
50
48
  if log_type == 'csv':
51
49
  if header_type_of_files == 'all':
52
- csv_content, _ = csvs.read_csv_to_list(single_file, **print_kwargs)
50
+ csv_content, _ = csvs.read_csv_to_list(single_file['path'], **print_kwargs)
53
51
  logs_content.extend(csv_content)
54
52
  elif header_type_of_files == 'first':
55
53
  # The function gets empty header to read it from the CSV file, the returns the header that it read.
56
54
  # Then each time the header is fed once again to the function.
57
- csv_content, header = csvs.read_csv_to_list(single_file, header=header, **print_kwargs)
55
+ csv_content, header = csvs.read_csv_to_list(single_file['path'], header=header, **print_kwargs)
58
56
  # Any way the first file will be read with header.
59
57
  logs_content.extend(csv_content)
60
58
 
@@ -65,7 +63,7 @@ def get_logs(
65
63
  if remove_logs:
66
64
  # Remove the statistics files.
67
65
  for single_file in logs_files:
68
- filesystem.remove_file(single_file)
66
+ filesystem.remove_file(single_file['path'])
69
67
 
70
68
  if move_to_path:
71
69
  # Get formatted time stamp for file name.
@@ -78,8 +76,8 @@ def get_logs(
78
76
  filesystem.create_directory(move_to_path_with_timestamp)
79
77
  # Move the statistics files.
80
78
  for single_file in logs_files:
81
- single_file_name = filesystem.get_file_name(single_file)
79
+ single_file_name = filesystem.get_file_name(single_file['path'])
82
80
  move_to_path_with_file = f'{move_to_path_with_timestamp}{os.sep}{single_file_name}'
83
- filesystem.move_file(single_file, move_to_path_with_file)
81
+ filesystem.move_file(single_file['path'], move_to_path_with_file)
84
82
 
85
83
  return logs_content
@@ -57,14 +57,14 @@ def execute_test(config_static):
57
57
  loggingw.get_logger_with_stream_handler("network")
58
58
 
59
59
  # Get all the files in requests folder recursively.
60
- request_file_list, _ = get_file_paths_and_relative_directories(config['requests_directory'])
60
+ request_file_list = get_file_paths_and_relative_directories(config['requests_directory'])
61
61
  print(f"Found request files: {len(request_file_list)}")
62
62
 
63
63
  # Get contents of all request files to list of contents.
64
64
  requests_bytes_list: list = list()
65
65
  for request_file_path in request_file_list:
66
66
  if config['request_type'] == 'json':
67
- request_file_content = jsons.read_json_file(request_file_path)
67
+ request_file_content = jsons.read_json_file(request_file_path['path'])
68
68
 
69
69
  # If imported json is regular and not combined json.
70
70
  if isinstance(request_file_content, dict):
@@ -79,13 +79,13 @@ def execute_test(config_static):
79
79
  requests_bytes_list.extend(
80
80
  get_key_values_from_json(json_dict, config['request_json_hex_key_list']))
81
81
  elif config['request_type'] == 'string':
82
- request_file_content = file_io.read_file(request_file_path)
82
+ request_file_content = file_io.read_file(request_file_path['path'])
83
83
  # Convert string content to bytes and append to list.
84
84
  requests_bytes_list.append(request_file_content.encode())
85
85
  print(f"Extracted 1 request.")
86
86
  elif config['request_type'] == 'binary':
87
87
  # The content is already in bytes, so just appending.
88
- requests_bytes_list.append(file_io.read_file(request_file_path, 'rb'))
88
+ requests_bytes_list.append(file_io.read_file(request_file_path['path'], 'rb'))
89
89
  print(f"Extracted 1 request.")
90
90
 
91
91
  print(f"Finished parsing. Parsed requests: {len(requests_bytes_list)}")