rs-mrt-dau-utilities 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- rs_mrt_dau_utilities/__init__.py +2 -0
- rs_mrt_dau_utilities/delay_meas/__init__.py +3 -0
- rs_mrt_dau_utilities/delay_meas/delay_meas.py +55 -0
- rs_mrt_dau_utilities/delay_meas/dev.py +184 -0
- rs_mrt_dau_utilities/ip_analysis/__init__.py +15 -0
- rs_mrt_dau_utilities/ip_analysis/ip_analysis.py +201 -0
- rs_mrt_dau_utilities/py.typed +0 -0
- rs_mrt_dau_utilities-0.1.0.dist-info/METADATA +106 -0
- rs_mrt_dau_utilities-0.1.0.dist-info/RECORD +10 -0
- rs_mrt_dau_utilities-0.1.0.dist-info/WHEEL +4 -0
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
import altair as alt
|
|
2
|
+
import polars as pl
|
|
3
|
+
|
|
4
|
+
from .dev import delay_get_segment, delay_get_start_stop_segment, delay_parse_log
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def extract_delay_from_log(log_file: str) -> dict[str, pl.DataFrame]:
|
|
8
|
+
"""
|
|
9
|
+
Extract delay information from the centralservice.log file.
|
|
10
|
+
This file is located in the following directory:
|
|
11
|
+
/DATAUNIT/rohde-schwarz/log/DAU/centralservice.log
|
|
12
|
+
The function parses the log file, extracts relevant data, and returns a dictionary containing Polars DataFrames.
|
|
13
|
+
Each keys is a combination of the segments found (start-stop pairs) and the measurements found in one segment.
|
|
14
|
+
- "1_1": first segment (start-stop) and first meas_id
|
|
15
|
+
- "1_2": first segment (start-stop) and second meas_id
|
|
16
|
+
- "2_1": second segment (start-stop) and first meas_id
|
|
17
|
+
- ...
|
|
18
|
+
"""
|
|
19
|
+
# Parse the log file to extract delay information
|
|
20
|
+
parsed_data = delay_parse_log(log_file)
|
|
21
|
+
|
|
22
|
+
# Get start and stop segments from the command DataFrame
|
|
23
|
+
results_per_segment = delay_get_start_stop_segment(
|
|
24
|
+
parsed_data["command"], parsed_data["hash"]
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
# Get segments of data based on the extracted start and stop times
|
|
28
|
+
result_per_hash = delay_get_segment(results_per_segment)
|
|
29
|
+
|
|
30
|
+
return result_per_hash
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def plot_all(results_one_segment: pl.DataFrame) -> alt.RepeatChart:
|
|
34
|
+
"""
|
|
35
|
+
Plot all the delays found for this measurement.
|
|
36
|
+
"""
|
|
37
|
+
items = results_one_segment.columns
|
|
38
|
+
filtered_items = [item for item in items if item.startswith("delay")]
|
|
39
|
+
|
|
40
|
+
chart = (
|
|
41
|
+
alt.Chart(results_one_segment)
|
|
42
|
+
.mark_point()
|
|
43
|
+
.encode(
|
|
44
|
+
# x='min_time:T',
|
|
45
|
+
# alt.X(items[1], title='time'),
|
|
46
|
+
alt.X(alt.repeat("column"), type="temporal", title="time"),
|
|
47
|
+
alt.Y(alt.repeat("row"), type="quantitative"),
|
|
48
|
+
# y=r'ip\.throughput_interval_bps_dst_src:Q',
|
|
49
|
+
# color='flow_id',
|
|
50
|
+
)
|
|
51
|
+
.properties(width=1100, height=300)
|
|
52
|
+
.repeat(row=filtered_items, column=[items[1]])
|
|
53
|
+
.interactive()
|
|
54
|
+
)
|
|
55
|
+
return chart
|
|
@@ -0,0 +1,184 @@
|
|
|
1
|
+
import base64
|
|
2
|
+
import datetime
|
|
3
|
+
import gzip
|
|
4
|
+
import json
|
|
5
|
+
import re
|
|
6
|
+
|
|
7
|
+
import polars as pl
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def delay_parse_log(log_file: str) -> dict[str, pl.DataFrame]:
|
|
11
|
+
"""
|
|
12
|
+
Parse the centralservice.log file and return a dictionary containing 2 dataframes:
|
|
13
|
+
- hash: DataFrame containing the hash data
|
|
14
|
+
- command: DataFrame containing the command data
|
|
15
|
+
"""
|
|
16
|
+
with open(log_file, "r") as f:
|
|
17
|
+
lines = f.readlines()
|
|
18
|
+
|
|
19
|
+
fl: dict[str, list] = {"hash": [], "command": []}
|
|
20
|
+
for line in lines:
|
|
21
|
+
# Extract the relevant information from the log line
|
|
22
|
+
match_hash = re.search(
|
|
23
|
+
r"(.*) INFO centralservice::delay_meas_core: mime=.*, data=(.*)", line
|
|
24
|
+
)
|
|
25
|
+
if match_hash:
|
|
26
|
+
timestamp = match_hash.group(1)
|
|
27
|
+
encoded = match_hash.group(2)
|
|
28
|
+
decoded = base64.b64decode(encoded)
|
|
29
|
+
decompress = gzip.decompress(decoded)
|
|
30
|
+
for line in decompress.decode("utf-8").splitlines():
|
|
31
|
+
data = json.loads(line)
|
|
32
|
+
for i in data["meas"]:
|
|
33
|
+
# Add the two timestamps together
|
|
34
|
+
i["timestamp"] = (
|
|
35
|
+
i["timestamp"]["secs"] * 1000000000 + i["timestamp"]["nanos"]
|
|
36
|
+
)
|
|
37
|
+
i["hash"] = data["hash"]
|
|
38
|
+
fl["hash"].append(i)
|
|
39
|
+
match_cmd = re.search(
|
|
40
|
+
r"(.*) INFO centralservice::delay_meas_core: (.*) msg from FSW received",
|
|
41
|
+
line,
|
|
42
|
+
)
|
|
43
|
+
if match_cmd:
|
|
44
|
+
timestamp = match_cmd.group(1)
|
|
45
|
+
cmd = match_cmd.group(2)
|
|
46
|
+
# config = match_cmd.group(3)
|
|
47
|
+
json_dict = {
|
|
48
|
+
"timestamp": datetime.datetime.fromisoformat(timestamp),
|
|
49
|
+
"command": cmd,
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
fl["command"].append(json_dict)
|
|
53
|
+
# logging.d(fl["command"])
|
|
54
|
+
|
|
55
|
+
# print("hash:", fl["hash"])
|
|
56
|
+
# Create a DataFrame for the hash data and cast the 'hash' column to UInt64
|
|
57
|
+
df = pl.DataFrame(fl["hash"], infer_schema_length=None).cast({"hash": pl.UInt64})
|
|
58
|
+
|
|
59
|
+
# convert the timestamp to datetime with the correct timezone
|
|
60
|
+
df = df.with_columns(
|
|
61
|
+
timestamp=pl.from_epoch("timestamp", time_unit="ns").dt.replace_time_zone("UTC")
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
return {"hash": df, "command": pl.DataFrame(fl["command"])}
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def delay_get_start_stop_segment(
|
|
68
|
+
command_df: pl.DataFrame, hash_df: pl.DataFrame
|
|
69
|
+
) -> list[pl.DataFrame]:
|
|
70
|
+
""" """
|
|
71
|
+
# search for the segments start - stop
|
|
72
|
+
start_time = None
|
|
73
|
+
result = []
|
|
74
|
+
|
|
75
|
+
# Iterate through the DataFrame rows
|
|
76
|
+
for row in command_df.iter_rows(named=True):
|
|
77
|
+
if row["command"] == "Start":
|
|
78
|
+
start_time = row["timestamp"]
|
|
79
|
+
elif row["command"] == "Stop" and start_time is not None:
|
|
80
|
+
# Append the pair of start and stop times to the result list
|
|
81
|
+
result.append({"Start": start_time, "Stop": row["timestamp"]})
|
|
82
|
+
# Reset start_time to None after pairing
|
|
83
|
+
start_time = None
|
|
84
|
+
|
|
85
|
+
# Create a new DataFrame from the result list
|
|
86
|
+
paired_df = pl.DataFrame(result)
|
|
87
|
+
# print("paired_df:", paired_df)
|
|
88
|
+
results_per_segment = []
|
|
89
|
+
# Iterate through the DataFrame rows
|
|
90
|
+
for row in paired_df.iter_rows(named=True):
|
|
91
|
+
start_time = row["Start"]
|
|
92
|
+
stop_time = row["Stop"]
|
|
93
|
+
# Filter the hash DataFrame for the current segment
|
|
94
|
+
filtered_hash_df = hash_df.filter(
|
|
95
|
+
pl.col("timestamp").is_between(start_time, stop_time)
|
|
96
|
+
)
|
|
97
|
+
# Append the filtered DataFrame to the result list
|
|
98
|
+
results_per_segment.append(filtered_hash_df)
|
|
99
|
+
|
|
100
|
+
return results_per_segment
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def delay_get_segment(
|
|
104
|
+
result_per_segment: list[pl.DataFrame], all_paths: bool = False
|
|
105
|
+
) -> dict[str, pl.DataFrame]:
|
|
106
|
+
""" """
|
|
107
|
+
result_per_hash = {}
|
|
108
|
+
segment = 1 # first segment (start-stop) begins with 1
|
|
109
|
+
# group the hash together
|
|
110
|
+
for m in result_per_segment:
|
|
111
|
+
# Group by 'hash'
|
|
112
|
+
# only keep the groups that have 'Upc' in the 'origin' column
|
|
113
|
+
# filtered_groups=m.sort("timestamp").group_by('hash').all().filter(pl.col('origin').list.contains('Upc')) #.head(10)
|
|
114
|
+
|
|
115
|
+
# print(filtered_groups)
|
|
116
|
+
# getting the number of unique meas_id
|
|
117
|
+
list_of_meas = pl.Series(
|
|
118
|
+
m.select(pl.col("meas_id").drop_nulls().unique())
|
|
119
|
+
).to_list()
|
|
120
|
+
# print("list of meas: ", list_of_meas)
|
|
121
|
+
|
|
122
|
+
for meas in list_of_meas:
|
|
123
|
+
aaa = (
|
|
124
|
+
m.lazy()
|
|
125
|
+
.filter(pl.col("meas_id").is_in([meas]) | pl.col("meas_id").is_null())
|
|
126
|
+
.sort("timestamp")
|
|
127
|
+
.group_by("hash")
|
|
128
|
+
.all()
|
|
129
|
+
.filter(pl.col("origin").list.contains("Upc"))
|
|
130
|
+
.explode(
|
|
131
|
+
"timestamp", "origin", "meas_id"
|
|
132
|
+
) # .group_by('hash','origin').all()
|
|
133
|
+
.sort("timestamp", "hash", "origin")
|
|
134
|
+
.with_columns(idx=pl.col("hash").rank("ordinal").over("hash", "origin"))
|
|
135
|
+
.collect()
|
|
136
|
+
)
|
|
137
|
+
eee3 = aaa.pivot(index="hash", on=["origin", "idx"], values="timestamp")
|
|
138
|
+
iii = (
|
|
139
|
+
eee3.select(pl.all().exclude("hash"))
|
|
140
|
+
.rename(lambda cn: cn[2:-1].replace('",', "_"))
|
|
141
|
+
.insert_column(0, eee3["hash"])
|
|
142
|
+
)
|
|
143
|
+
# # remove the group who are not meas or n/a
|
|
144
|
+
# fil_only_one_meas = m.filter(pl.col("meas_id").is_in([meas]) | pl.col("meas_id").is_null())
|
|
145
|
+
# #print("fil_only_one_meas:", fil_only_one_meas)
|
|
146
|
+
# # add a number at the end of every origin, sorted by timestamp
|
|
147
|
+
# ddd = fil_only_one_meas.sort("timestamp").group_by('hash').all().filter(pl.col('origin').list.contains('Upc'))
|
|
148
|
+
# eee = ddd.explode("timestamp","origin","meas_id") #.group_by('hash','origin').all()
|
|
149
|
+
# #print(eee)
|
|
150
|
+
# eee2 = eee.sort('timestamp','hash','origin').with_columns(
|
|
151
|
+
# idx = pl.col('hash').rank("ordinal").over('hash', 'origin')
|
|
152
|
+
# #pl.int_range(pl.len()).over(pl.col('origin'))
|
|
153
|
+
# )
|
|
154
|
+
# #print(eee2)
|
|
155
|
+
# eee3 = eee2.pivot(index="hash", on=["origin", "idx"], values="timestamp")
|
|
156
|
+
# # rename the columns
|
|
157
|
+
# iii = eee3.select(pl.all().exclude("hash")).rename(
|
|
158
|
+
# lambda cn: cn[2:-1].replace("\",", "_")
|
|
159
|
+
# ).insert_column(0, eee3["hash"])
|
|
160
|
+
# print(iii)
|
|
161
|
+
column_present = iii.columns
|
|
162
|
+
column_present = [x for x in column_present if x != "hash"]
|
|
163
|
+
# print(column_present)
|
|
164
|
+
iii = iii.with_columns(
|
|
165
|
+
delay_global_us=(
|
|
166
|
+
pl.col(column_present[-1]) - pl.col(column_present[0])
|
|
167
|
+
).dt.total_microseconds()
|
|
168
|
+
)
|
|
169
|
+
|
|
170
|
+
if all_paths:
|
|
171
|
+
last_column = ""
|
|
172
|
+
for i in column_present:
|
|
173
|
+
if last_column == "":
|
|
174
|
+
last_column = i
|
|
175
|
+
else:
|
|
176
|
+
iii = iii.with_columns(
|
|
177
|
+
(pl.col(i) - pl.col(last_column))
|
|
178
|
+
.alias("delay-" + last_column + "->" + i + "_us")
|
|
179
|
+
.dt.total_microseconds()
|
|
180
|
+
)
|
|
181
|
+
last_column = i
|
|
182
|
+
result_per_hash[str(segment) + "_" + str(meas)] = iii
|
|
183
|
+
segment += 1
|
|
184
|
+
return result_per_hash
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
from .ip_analysis import (
|
|
2
|
+
ipanalysis_init_dataframes,
|
|
3
|
+
ipanalysis_parse_json_result,
|
|
4
|
+
ipanalysis_parse_scpi_result,
|
|
5
|
+
ipanalysis_parse_scpi_schema_result,
|
|
6
|
+
ipanalysis_update_dataframes,
|
|
7
|
+
)
|
|
8
|
+
|
|
9
|
+
__all__ = [
|
|
10
|
+
"ipanalysis_init_dataframes",
|
|
11
|
+
"ipanalysis_update_dataframes",
|
|
12
|
+
"ipanalysis_parse_scpi_schema_result",
|
|
13
|
+
"ipanalysis_parse_json_result",
|
|
14
|
+
"ipanalysis_parse_scpi_result",
|
|
15
|
+
]
|
|
@@ -0,0 +1,201 @@
|
|
|
1
|
+
import base64
|
|
2
|
+
import gzip
|
|
3
|
+
import json
|
|
4
|
+
import logging
|
|
5
|
+
import re
|
|
6
|
+
|
|
7
|
+
import fast_json_normalize
|
|
8
|
+
import polars as pl
|
|
9
|
+
|
|
10
|
+
# parse a SCPI result obtained with FETCh:DATA:MEASurement:IPANalysis:RESult?
|
|
11
|
+
# return a list of pattern: ['time', json_messages']
|
|
12
|
+
# example of use: Print the parsed sequences
|
|
13
|
+
# parsed_sequences = parse_scpi_result(scpi_result)
|
|
14
|
+
# for sequence in parsed_sequences:
|
|
15
|
+
# print(f"Time: {sequence['time']}")
|
|
16
|
+
# for message in sequence['json_messages']:
|
|
17
|
+
# print(f"message: {message}")
|
|
18
|
+
# #print(json.dumps(message, indent=2))
|
|
19
|
+
# print()
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def ipanalysis_parse_scpi_result(scpi_result: str) -> list[dict]:
|
|
23
|
+
"""
|
|
24
|
+
Processes a given SCPI result string by splitting it into sequences based on a time pattern and SCPI block.
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
scpi_result (str): A string containing the SCPI result data.
|
|
28
|
+
|
|
29
|
+
Returns:
|
|
30
|
+
list: A list of dictionaries, each containing a time and a list of parsed JSON messages.
|
|
31
|
+
"""
|
|
32
|
+
# Split the input into sequences based on the pattern: time, SCPI block
|
|
33
|
+
sequences = re.split(r'(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})",', scpi_result)[1:]
|
|
34
|
+
|
|
35
|
+
# Initialize a list to store the parsed sequences
|
|
36
|
+
parsed_sequences = []
|
|
37
|
+
|
|
38
|
+
# print(f"split len: {len(sequences)}")
|
|
39
|
+
|
|
40
|
+
# Iterate over the sequences in pairs (time, SCPI block)
|
|
41
|
+
for i in range(0, len(sequences), 2):
|
|
42
|
+
time = sequences[i]
|
|
43
|
+
scpi_block_with_len = sequences[i + 1]
|
|
44
|
+
|
|
45
|
+
# remove the length of the block data
|
|
46
|
+
scpi_block = re.split(r"#(\d+)", scpi_block_with_len)[2]
|
|
47
|
+
|
|
48
|
+
# print(f"time_json_messages: {time_json_messages}")
|
|
49
|
+
time_json_messages = ipanalysis_parse_json_result(time, scpi_block)
|
|
50
|
+
|
|
51
|
+
# Store the time and parsed JSON messages in the result list
|
|
52
|
+
parsed_sequences.append(time_json_messages)
|
|
53
|
+
|
|
54
|
+
return parsed_sequences
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def ipanalysis_parse_json_result(time: str, encoded_json_block: str) -> dict:
|
|
58
|
+
"""
|
|
59
|
+
Processes a base64 block:
|
|
60
|
+
- obtain the binary gzip string
|
|
61
|
+
- decompress the gzip string
|
|
62
|
+
- process the JSON message string by splitting it into individual JSON messages and parsing each message.
|
|
63
|
+
|
|
64
|
+
Args:
|
|
65
|
+
time (str): A string representing the time associated with the JSON messages.
|
|
66
|
+
encoded_json_block (str): A base64 block who is a gzip string containing the JSON messages, separated by newline characters.
|
|
67
|
+
|
|
68
|
+
Returns:
|
|
69
|
+
dict: A dictionary containing the time and a list of parsed JSON messages.
|
|
70
|
+
"""
|
|
71
|
+
decoded_scpi_block = base64.b64decode(encoded_json_block)
|
|
72
|
+
decompressed_data = gzip.decompress(decoded_scpi_block)
|
|
73
|
+
# Split the SCPI block into individual JSON messages
|
|
74
|
+
json_block = decompressed_data.decode("utf-8")
|
|
75
|
+
|
|
76
|
+
json_messages = json_block.strip().split("\n")
|
|
77
|
+
# Parse each JSON message
|
|
78
|
+
parsed_json_messages = []
|
|
79
|
+
for message in json_messages:
|
|
80
|
+
try:
|
|
81
|
+
parsed_json_messages.append(json.loads(message))
|
|
82
|
+
except json.JSONDecodeError:
|
|
83
|
+
print(f"\njson.JSONDecodeError: {message}")
|
|
84
|
+
continue
|
|
85
|
+
|
|
86
|
+
# Store the time and parsed JSON messages in the result
|
|
87
|
+
return {"time": time, "json_messages": parsed_json_messages}
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
def ipanalysis_parse_scpi_schema_result(schema_result: str) -> dict | None:
|
|
91
|
+
"""
|
|
92
|
+
Parses the SCPI schema result string and extracts the JSON schema.
|
|
93
|
+
|
|
94
|
+
Args:
|
|
95
|
+
schema_result (str): A string containing the SCPI schema result.
|
|
96
|
+
|
|
97
|
+
Returns:
|
|
98
|
+
dict: A dictionary representing the parsed JSON schema, or None if the schema is not found or if there is an error in parsing.
|
|
99
|
+
"""
|
|
100
|
+
try:
|
|
101
|
+
# Find the index of '{"$schema"'
|
|
102
|
+
start_index = schema_result.find('{"$schema"')
|
|
103
|
+
if start_index != -1:
|
|
104
|
+
# Create a new string starting from '{"$schema"'
|
|
105
|
+
json_schema_str = schema_result[start_index:].strip()
|
|
106
|
+
json_schema = json.loads(json_schema_str)
|
|
107
|
+
return json_schema
|
|
108
|
+
else:
|
|
109
|
+
logging.warning(
|
|
110
|
+
'The keyword {"$schema"} was not found in the input string.'
|
|
111
|
+
)
|
|
112
|
+
return None
|
|
113
|
+
except json.JSONDecodeError as e:
|
|
114
|
+
logging.error(f"Error decoding JSON schema: {e}")
|
|
115
|
+
return None
|
|
116
|
+
except Exception as e:
|
|
117
|
+
logging.error(f"An unexpected error occurred: {e}")
|
|
118
|
+
return None
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
def ipanalysis_init_dataframes() -> dict[str, pl.DataFrame]:
|
|
122
|
+
"""
|
|
123
|
+
Initializes and returns a dictionary of empty Polars DataFrames for IP analysis.
|
|
124
|
+
|
|
125
|
+
Returns:
|
|
126
|
+
dict: A dictionary containing empty DataFrames for various categories.
|
|
127
|
+
"""
|
|
128
|
+
return {
|
|
129
|
+
"flow_started": pl.DataFrame(),
|
|
130
|
+
"report": pl.DataFrame(),
|
|
131
|
+
"upd_classification": pl.DataFrame(),
|
|
132
|
+
"upd_network": pl.DataFrame(),
|
|
133
|
+
"upd_fqdn": pl.DataFrame(),
|
|
134
|
+
"flow_closed": pl.DataFrame(),
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
def ipanalysis_update_dataframes(
|
|
139
|
+
list_of_dfs: dict[str, pl.DataFrame], message: dict
|
|
140
|
+
) -> dict[str, pl.DataFrame]:
|
|
141
|
+
"""
|
|
142
|
+
Updates the dictionary of Polars DataFrames based on the contents of a given message.
|
|
143
|
+
|
|
144
|
+
Args:
|
|
145
|
+
list_of_dfs (dict): A dictionary containing Polars DataFrames for various categories (ipanalysis_init_dataframes may be used to get the initial values).
|
|
146
|
+
message (dict): A dictionary containing the message data to be processed.
|
|
147
|
+
|
|
148
|
+
Returns:
|
|
149
|
+
dict: The updated dictionary of Polars DataFrames.
|
|
150
|
+
"""
|
|
151
|
+
data = message
|
|
152
|
+
msgs = []
|
|
153
|
+
# default
|
|
154
|
+
key = "report"
|
|
155
|
+
# test for a REPORT
|
|
156
|
+
if "REPORT" in data:
|
|
157
|
+
for i in data["REPORT"]["flows_stat"]:
|
|
158
|
+
i["time"] = (
|
|
159
|
+
data["REPORT"]["time"]["secs"] * 1000000000
|
|
160
|
+
+ data["REPORT"]["time"]["nanos"]
|
|
161
|
+
)
|
|
162
|
+
msgs.append(i)
|
|
163
|
+
# test for a FLOW_STARTED
|
|
164
|
+
elif "FLOW_STARTED" in data:
|
|
165
|
+
msgs = [data["FLOW_STARTED"]]
|
|
166
|
+
key = "flow_started"
|
|
167
|
+
# test for a CLASSIFICATION
|
|
168
|
+
elif "UPDATE_CLASSIFICATION" in data:
|
|
169
|
+
msgs = [data["UPDATE_CLASSIFICATION"]]
|
|
170
|
+
key = "upd_classification"
|
|
171
|
+
# test for a NETWORK
|
|
172
|
+
elif "UPDATE_NETWORK" in data:
|
|
173
|
+
msgs = [data["UPDATE_NETWORK"]]
|
|
174
|
+
key = "upd_network"
|
|
175
|
+
# test for a FQDN
|
|
176
|
+
elif "UPDATE_FQDN" in data:
|
|
177
|
+
msgs = [data["UPDATE_FQDN"]]
|
|
178
|
+
key = "upd_fqdn"
|
|
179
|
+
# test for a FLOW_CLOSED
|
|
180
|
+
elif "FLOW_CLOSED" in data:
|
|
181
|
+
msgs = [data["FLOW_CLOSED"]]
|
|
182
|
+
key = "flow_closed"
|
|
183
|
+
|
|
184
|
+
# normalize the data
|
|
185
|
+
for i in msgs:
|
|
186
|
+
# test if 'time' key has not been replaced
|
|
187
|
+
if isinstance(i["time"], dict):
|
|
188
|
+
i["time"] = i["time"]["secs"] * 1000000000 + i["time"]["nanos"]
|
|
189
|
+
msg_df = fast_json_normalize.fast_json_normalize(
|
|
190
|
+
i,
|
|
191
|
+
separator="_",
|
|
192
|
+
to_pandas=False,
|
|
193
|
+
order_to_pandas=False,
|
|
194
|
+
)
|
|
195
|
+
# convert the time to datetime with the correct timezone
|
|
196
|
+
msg_df = pl.DataFrame(msg_df).with_columns(
|
|
197
|
+
time=pl.from_epoch("time", time_unit="ns").dt.replace_time_zone("UTC")
|
|
198
|
+
)
|
|
199
|
+
list_of_dfs[key] = pl.concat([list_of_dfs[key], msg_df], how="diagonal")
|
|
200
|
+
|
|
201
|
+
return list_of_dfs
|
|
File without changes
|
|
@@ -0,0 +1,106 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: rs-mrt-dau-utilities
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Add your description here
|
|
5
|
+
Author: Didier Chagniot
|
|
6
|
+
Author-email: Didier Chagniot <didier.chagniot@rohde-schwarz.com>
|
|
7
|
+
License-Expression: MIT
|
|
8
|
+
Requires-Dist: altair>=5.5.0
|
|
9
|
+
Requires-Dist: fast-json-normalize>=0.0.9
|
|
10
|
+
Requires-Dist: polars==1.35.2
|
|
11
|
+
Requires-Python: >=3.11
|
|
12
|
+
Description-Content-Type: text/markdown
|
|
13
|
+
|
|
14
|
+
# rs-mrt-dau-utilities
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
## Getting started
|
|
19
|
+
|
|
20
|
+
To make it easy for you to get started with GitLab, here's a list of recommended next steps.
|
|
21
|
+
|
|
22
|
+
Already a pro? Just edit this README.md and make it your own. Want to make it easy? [Use the template at the bottom](#editing-this-readme)!
|
|
23
|
+
|
|
24
|
+
## Add your files
|
|
25
|
+
|
|
26
|
+
- [ ] [Create](https://docs.gitlab.com/ee/user/project/repository/web_editor.html#create-a-file) or [upload](https://docs.gitlab.com/ee/user/project/repository/web_editor.html#upload-a-file) files
|
|
27
|
+
- [ ] [Add files using the command line](https://docs.gitlab.com/topics/git/add_files/#add-files-to-a-git-repository) or push an existing Git repository with the following command:
|
|
28
|
+
|
|
29
|
+
```
|
|
30
|
+
cd existing_repo
|
|
31
|
+
git remote add origin https://code.rsint.net/MRT-DAU/packages/python/rs-mrt-dau-utilities.git
|
|
32
|
+
git branch -M main
|
|
33
|
+
git push -uf origin main
|
|
34
|
+
```
|
|
35
|
+
|
|
36
|
+
## Integrate with your tools
|
|
37
|
+
|
|
38
|
+
- [ ] [Set up project integrations](https://code.rsint.net/MRT-DAU/packages/python/rs-mrt-dau-utilities/-/settings/integrations)
|
|
39
|
+
|
|
40
|
+
## Collaborate with your team
|
|
41
|
+
|
|
42
|
+
- [ ] [Invite team members and collaborators](https://docs.gitlab.com/ee/user/project/members/)
|
|
43
|
+
- [ ] [Create a new merge request](https://docs.gitlab.com/ee/user/project/merge_requests/creating_merge_requests.html)
|
|
44
|
+
- [ ] [Automatically close issues from merge requests](https://docs.gitlab.com/ee/user/project/issues/managing_issues.html#closing-issues-automatically)
|
|
45
|
+
- [ ] [Enable merge request approvals](https://docs.gitlab.com/ee/user/project/merge_requests/approvals/)
|
|
46
|
+
- [ ] [Set auto-merge](https://docs.gitlab.com/user/project/merge_requests/auto_merge/)
|
|
47
|
+
|
|
48
|
+
## Test and Deploy
|
|
49
|
+
|
|
50
|
+
Use the built-in continuous integration in GitLab.
|
|
51
|
+
|
|
52
|
+
- [ ] [Get started with GitLab CI/CD](https://docs.gitlab.com/ee/ci/quick_start/)
|
|
53
|
+
- [ ] [Analyze your code for known vulnerabilities with Static Application Security Testing (SAST)](https://docs.gitlab.com/ee/user/application_security/sast/)
|
|
54
|
+
- [ ] [Deploy to Kubernetes, Amazon EC2, or Amazon ECS using Auto Deploy](https://docs.gitlab.com/ee/topics/autodevops/requirements.html)
|
|
55
|
+
- [ ] [Use pull-based deployments for improved Kubernetes management](https://docs.gitlab.com/ee/user/clusters/agent/)
|
|
56
|
+
- [ ] [Set up protected environments](https://docs.gitlab.com/ee/ci/environments/protected_environments.html)
|
|
57
|
+
|
|
58
|
+
***
|
|
59
|
+
|
|
60
|
+
# Editing this README
|
|
61
|
+
|
|
62
|
+
When you're ready to make this README your own, just edit this file and use the handy template below (or feel free to structure it however you want - this is just a starting point!). Thanks to [makeareadme.com](https://www.makeareadme.com/) for this template.
|
|
63
|
+
|
|
64
|
+
## Suggestions for a good README
|
|
65
|
+
|
|
66
|
+
Every project is different, so consider which of these sections apply to yours. The sections used in the template are suggestions for most open source projects. Also keep in mind that while a README can be too long and detailed, too long is better than too short. If you think your README is too long, consider utilizing another form of documentation rather than cutting out information.
|
|
67
|
+
|
|
68
|
+
## Name
|
|
69
|
+
Choose a self-explaining name for your project.
|
|
70
|
+
|
|
71
|
+
## Description
|
|
72
|
+
Let people know what your project can do specifically. Provide context and add a link to any reference visitors might be unfamiliar with. A list of Features or a Background subsection can also be added here. If there are alternatives to your project, this is a good place to list differentiating factors.
|
|
73
|
+
|
|
74
|
+
## Badges
|
|
75
|
+
On some READMEs, you may see small images that convey metadata, such as whether or not all the tests are passing for the project. You can use Shields to add some to your README. Many services also have instructions for adding a badge.
|
|
76
|
+
|
|
77
|
+
## Visuals
|
|
78
|
+
Depending on what you are making, it can be a good idea to include screenshots or even a video (you'll frequently see GIFs rather than actual videos). Tools like ttygif can help, but check out Asciinema for a more sophisticated method.
|
|
79
|
+
|
|
80
|
+
## Installation
|
|
81
|
+
Within a particular ecosystem, there may be a common way of installing things, such as using Yarn, NuGet, or Homebrew. However, consider the possibility that whoever is reading your README is a novice and would like more guidance. Listing specific steps helps remove ambiguity and gets people to using your project as quickly as possible. If it only runs in a specific context like a particular programming language version or operating system or has dependencies that have to be installed manually, also add a Requirements subsection.
|
|
82
|
+
|
|
83
|
+
## Usage
|
|
84
|
+
Use examples liberally, and show the expected output if you can. It's helpful to have inline the smallest example of usage that you can demonstrate, while providing links to more sophisticated examples if they are too long to reasonably include in the README.
|
|
85
|
+
|
|
86
|
+
## Support
|
|
87
|
+
Tell people where they can go to for help. It can be any combination of an issue tracker, a chat room, an email address, etc.
|
|
88
|
+
|
|
89
|
+
## Roadmap
|
|
90
|
+
If you have ideas for releases in the future, it is a good idea to list them in the README.
|
|
91
|
+
|
|
92
|
+
## Contributing
|
|
93
|
+
State if you are open to contributions and what your requirements are for accepting them.
|
|
94
|
+
|
|
95
|
+
For people who want to make changes to your project, it's helpful to have some documentation on how to get started. Perhaps there is a script that they should run or some environment variables that they need to set. Make these steps explicit. These instructions could also be useful to your future self.
|
|
96
|
+
|
|
97
|
+
You can also document commands to lint the code or run tests. These steps help to ensure high code quality and reduce the likelihood that the changes inadvertently break something. Having instructions for running tests is especially helpful if it requires external setup, such as starting a Selenium server for testing in a browser.
|
|
98
|
+
|
|
99
|
+
## Authors and acknowledgment
|
|
100
|
+
Show your appreciation to those who have contributed to the project.
|
|
101
|
+
|
|
102
|
+
## License
|
|
103
|
+
For open source projects, say how it is licensed.
|
|
104
|
+
|
|
105
|
+
## Project status
|
|
106
|
+
If you have run out of energy or time for your project, put a note at the top of the README saying that development has slowed down or stopped completely. Someone may choose to fork your project or volunteer to step in as a maintainer or owner, allowing your project to keep going. You can also make an explicit request for maintainers.
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
rs_mrt_dau_utilities/__init__.py,sha256=gHiOVHy2TW3g29joN2dkqwbVqwQxzJoFv2I0d3BbERA,70
|
|
2
|
+
rs_mrt_dau_utilities/delay_meas/__init__.py,sha256=8G_Dv6jVT_6NVDKGlmCGuAjScyf-CbB1RF4fCupNIBQ,107
|
|
3
|
+
rs_mrt_dau_utilities/delay_meas/delay_meas.py,sha256=w__9hUlJSg-tbSCvW6xh_SdsgDLgOZrCRO3h4o55UQA,2032
|
|
4
|
+
rs_mrt_dau_utilities/delay_meas/dev.py,sha256=6KUTSE3Em6QsEWHpbEYmG06jKfTKsMZeDkFI3UwZnHI,7372
|
|
5
|
+
rs_mrt_dau_utilities/ip_analysis/__init__.py,sha256=LQUaB1PjfXH-I2y1j47UL1tLBCwtwFAuYDkhzHP2JB0,404
|
|
6
|
+
rs_mrt_dau_utilities/ip_analysis/ip_analysis.py,sha256=Z0dEkiG_tJ5yw_Ix00zW28qHlXCAQ-1O2_OrJJ_tb3U,7025
|
|
7
|
+
rs_mrt_dau_utilities/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
8
|
+
rs_mrt_dau_utilities-0.1.0.dist-info/WHEEL,sha256=3id4o64OvRm9dUknh3mMJNcfoTRK08ua5cU6DFyVy-4,79
|
|
9
|
+
rs_mrt_dau_utilities-0.1.0.dist-info/METADATA,sha256=B5nyC0eeZbJxC7kS9MFmESS2_eus0CGQFt2b6xNzOKc,6571
|
|
10
|
+
rs_mrt_dau_utilities-0.1.0.dist-info/RECORD,,
|