pyxcp 0.25.2__cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyxcp/__init__.py +20 -0
- pyxcp/aml/EtasCANMonitoring.a2l +82 -0
- pyxcp/aml/EtasCANMonitoring.aml +67 -0
- pyxcp/aml/XCP_Common.aml +408 -0
- pyxcp/aml/XCPonCAN.aml +78 -0
- pyxcp/aml/XCPonEth.aml +33 -0
- pyxcp/aml/XCPonFlx.aml +113 -0
- pyxcp/aml/XCPonSxI.aml +66 -0
- pyxcp/aml/XCPonUSB.aml +106 -0
- pyxcp/aml/ifdata_CAN.a2l +20 -0
- pyxcp/aml/ifdata_Eth.a2l +11 -0
- pyxcp/aml/ifdata_Flx.a2l +94 -0
- pyxcp/aml/ifdata_SxI.a2l +13 -0
- pyxcp/aml/ifdata_USB.a2l +81 -0
- pyxcp/asam/__init__.py +0 -0
- pyxcp/asam/types.py +131 -0
- pyxcp/asamkeydll +0 -0
- pyxcp/asamkeydll.c +116 -0
- pyxcp/asamkeydll.sh +2 -0
- pyxcp/checksum.py +732 -0
- pyxcp/cmdline.py +83 -0
- pyxcp/config/__init__.py +1257 -0
- pyxcp/config/legacy.py +120 -0
- pyxcp/constants.py +47 -0
- pyxcp/cpp_ext/__init__.py +0 -0
- pyxcp/cpp_ext/aligned_buffer.hpp +168 -0
- pyxcp/cpp_ext/bin.hpp +105 -0
- pyxcp/cpp_ext/blockmem.hpp +58 -0
- pyxcp/cpp_ext/cpp_ext.cpython-310-x86_64-linux-gnu.so +0 -0
- pyxcp/cpp_ext/cpp_ext.cpython-311-x86_64-linux-gnu.so +0 -0
- pyxcp/cpp_ext/cpp_ext.cpython-312-x86_64-linux-gnu.so +0 -0
- pyxcp/cpp_ext/cpp_ext.cpython-313-x86_64-linux-gnu.so +0 -0
- pyxcp/cpp_ext/daqlist.hpp +374 -0
- pyxcp/cpp_ext/event.hpp +67 -0
- pyxcp/cpp_ext/extension_wrapper.cpp +131 -0
- pyxcp/cpp_ext/framing.hpp +360 -0
- pyxcp/cpp_ext/helper.hpp +280 -0
- pyxcp/cpp_ext/mcobject.hpp +248 -0
- pyxcp/cpp_ext/sxi_framing.hpp +332 -0
- pyxcp/cpp_ext/tsqueue.hpp +46 -0
- pyxcp/daq_stim/__init__.py +306 -0
- pyxcp/daq_stim/optimize/__init__.py +67 -0
- pyxcp/daq_stim/optimize/binpacking.py +41 -0
- pyxcp/daq_stim/scheduler.cpp +62 -0
- pyxcp/daq_stim/scheduler.hpp +75 -0
- pyxcp/daq_stim/stim.cpp +13 -0
- pyxcp/daq_stim/stim.cpython-310-x86_64-linux-gnu.so +0 -0
- pyxcp/daq_stim/stim.cpython-311-x86_64-linux-gnu.so +0 -0
- pyxcp/daq_stim/stim.cpython-312-x86_64-linux-gnu.so +0 -0
- pyxcp/daq_stim/stim.cpython-313-x86_64-linux-gnu.so +0 -0
- pyxcp/daq_stim/stim.hpp +604 -0
- pyxcp/daq_stim/stim_wrapper.cpp +50 -0
- pyxcp/dllif.py +100 -0
- pyxcp/errormatrix.py +878 -0
- pyxcp/examples/conf_can.toml +19 -0
- pyxcp/examples/conf_can_user.toml +16 -0
- pyxcp/examples/conf_can_vector.json +11 -0
- pyxcp/examples/conf_can_vector.toml +11 -0
- pyxcp/examples/conf_eth.toml +9 -0
- pyxcp/examples/conf_nixnet.json +20 -0
- pyxcp/examples/conf_socket_can.toml +12 -0
- pyxcp/examples/run_daq.py +165 -0
- pyxcp/examples/xcp_policy.py +60 -0
- pyxcp/examples/xcp_read_benchmark.py +38 -0
- pyxcp/examples/xcp_skel.py +48 -0
- pyxcp/examples/xcp_unlock.py +38 -0
- pyxcp/examples/xcp_user_supplied_driver.py +43 -0
- pyxcp/examples/xcphello.py +79 -0
- pyxcp/examples/xcphello_recorder.py +107 -0
- pyxcp/master/__init__.py +10 -0
- pyxcp/master/errorhandler.py +677 -0
- pyxcp/master/master.py +2645 -0
- pyxcp/py.typed +0 -0
- pyxcp/recorder/.idea/.gitignore +8 -0
- pyxcp/recorder/.idea/misc.xml +4 -0
- pyxcp/recorder/.idea/modules.xml +8 -0
- pyxcp/recorder/.idea/recorder.iml +6 -0
- pyxcp/recorder/.idea/sonarlint/issuestore/3/8/3808afc69ac1edb9d760000a2f137335b1b99728 +7 -0
- pyxcp/recorder/.idea/sonarlint/issuestore/9/a/9a2aa4db38d3115ed60da621e012c0efc0172aae +0 -0
- pyxcp/recorder/.idea/sonarlint/issuestore/b/4/b49006702b459496a8e8c94ebe60947108361b91 +0 -0
- pyxcp/recorder/.idea/sonarlint/issuestore/index.pb +7 -0
- pyxcp/recorder/.idea/sonarlint/securityhotspotstore/3/8/3808afc69ac1edb9d760000a2f137335b1b99728 +0 -0
- pyxcp/recorder/.idea/sonarlint/securityhotspotstore/9/a/9a2aa4db38d3115ed60da621e012c0efc0172aae +0 -0
- pyxcp/recorder/.idea/sonarlint/securityhotspotstore/b/4/b49006702b459496a8e8c94ebe60947108361b91 +0 -0
- pyxcp/recorder/.idea/sonarlint/securityhotspotstore/index.pb +7 -0
- pyxcp/recorder/.idea/vcs.xml +10 -0
- pyxcp/recorder/__init__.py +96 -0
- pyxcp/recorder/build_clang.cmd +1 -0
- pyxcp/recorder/build_clang.sh +2 -0
- pyxcp/recorder/build_gcc.cmd +1 -0
- pyxcp/recorder/build_gcc.sh +2 -0
- pyxcp/recorder/build_gcc_arm.sh +2 -0
- pyxcp/recorder/converter/__init__.py +445 -0
- pyxcp/recorder/lz4.c +2829 -0
- pyxcp/recorder/lz4.h +879 -0
- pyxcp/recorder/lz4hc.c +2041 -0
- pyxcp/recorder/lz4hc.h +413 -0
- pyxcp/recorder/mio.hpp +1714 -0
- pyxcp/recorder/reader.hpp +138 -0
- pyxcp/recorder/reco.py +278 -0
- pyxcp/recorder/recorder.rst +0 -0
- pyxcp/recorder/rekorder.cpp +59 -0
- pyxcp/recorder/rekorder.cpython-310-x86_64-linux-gnu.so +0 -0
- pyxcp/recorder/rekorder.cpython-311-x86_64-linux-gnu.so +0 -0
- pyxcp/recorder/rekorder.cpython-312-x86_64-linux-gnu.so +0 -0
- pyxcp/recorder/rekorder.cpython-313-x86_64-linux-gnu.so +0 -0
- pyxcp/recorder/rekorder.hpp +274 -0
- pyxcp/recorder/setup.py +41 -0
- pyxcp/recorder/test_reko.py +34 -0
- pyxcp/recorder/unfolder.hpp +1354 -0
- pyxcp/recorder/wrap.cpp +184 -0
- pyxcp/recorder/writer.hpp +302 -0
- pyxcp/scripts/__init__.py +0 -0
- pyxcp/scripts/pyxcp_probe_can_drivers.py +20 -0
- pyxcp/scripts/xcp_examples.py +64 -0
- pyxcp/scripts/xcp_fetch_a2l.py +40 -0
- pyxcp/scripts/xcp_id_scanner.py +18 -0
- pyxcp/scripts/xcp_info.py +144 -0
- pyxcp/scripts/xcp_profile.py +26 -0
- pyxcp/scripts/xmraw_converter.py +31 -0
- pyxcp/stim/__init__.py +0 -0
- pyxcp/tests/test_asam_types.py +24 -0
- pyxcp/tests/test_binpacking.py +186 -0
- pyxcp/tests/test_can.py +1324 -0
- pyxcp/tests/test_checksum.py +95 -0
- pyxcp/tests/test_daq.py +193 -0
- pyxcp/tests/test_daq_opt.py +426 -0
- pyxcp/tests/test_frame_padding.py +156 -0
- pyxcp/tests/test_framing.py +262 -0
- pyxcp/tests/test_master.py +2116 -0
- pyxcp/tests/test_transport.py +177 -0
- pyxcp/tests/test_utils.py +30 -0
- pyxcp/timing.py +60 -0
- pyxcp/transport/__init__.py +13 -0
- pyxcp/transport/base.py +484 -0
- pyxcp/transport/base_transport.hpp +0 -0
- pyxcp/transport/can.py +660 -0
- pyxcp/transport/eth.py +254 -0
- pyxcp/transport/sxi.py +209 -0
- pyxcp/transport/transport_ext.hpp +214 -0
- pyxcp/transport/transport_wrapper.cpp +249 -0
- pyxcp/transport/usb_transport.py +229 -0
- pyxcp/types.py +987 -0
- pyxcp/utils.py +127 -0
- pyxcp/vector/__init__.py +0 -0
- pyxcp/vector/map.py +82 -0
- pyxcp-0.25.2.dist-info/METADATA +341 -0
- pyxcp-0.25.2.dist-info/RECORD +151 -0
- pyxcp-0.25.2.dist-info/WHEEL +6 -0
- pyxcp-0.25.2.dist-info/entry_points.txt +9 -0
- pyxcp-0.25.2.dist-info/licenses/LICENSE +165 -0
|
@@ -0,0 +1,138 @@
|
|
|
1
|
+
|
|
2
|
+
#ifndef RECORDER_READER_HPP
|
|
3
|
+
#define RECORDER_READER_HPP
|
|
4
|
+
|
|
5
|
+
#include <iostream>
|
|
6
|
+
|
|
7
|
+
class XcpLogFileReader {
|
|
8
|
+
public:
|
|
9
|
+
|
|
10
|
+
explicit XcpLogFileReader(const std::string &file_name) {
|
|
11
|
+
if (!file_name.ends_with(detail::FILE_EXTENSION)) {
|
|
12
|
+
m_file_name = file_name + detail::FILE_EXTENSION;
|
|
13
|
+
} else {
|
|
14
|
+
m_file_name = file_name;
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
m_mmap = new mio::mmap_source(m_file_name);
|
|
18
|
+
blob_t magic[detail::MAGIC_SIZE + 1];
|
|
19
|
+
|
|
20
|
+
read_bytes(0UL, detail::MAGIC_SIZE, magic);
|
|
21
|
+
if (memcmp(detail::MAGIC.c_str(), magic, detail::MAGIC_SIZE) != 0) {
|
|
22
|
+
throw std::runtime_error("Invalid file magic.");
|
|
23
|
+
}
|
|
24
|
+
m_offset = detail::MAGIC_SIZE;
|
|
25
|
+
|
|
26
|
+
read_bytes(m_offset, detail::FILE_HEADER_SIZE, reinterpret_cast<blob_t *>(&m_header));
|
|
27
|
+
// printf("Sizes: %u %u %.3f\n", m_header.size_uncompressed,
|
|
28
|
+
// m_header.size_compressed,
|
|
29
|
+
// float(m_header.size_uncompressed) / float(m_header.size_compressed));
|
|
30
|
+
if (m_header.hdr_size != detail::FILE_HEADER_SIZE + detail::MAGIC_SIZE) {
|
|
31
|
+
throw std::runtime_error("File header size does not match.");
|
|
32
|
+
}
|
|
33
|
+
if (detail::VERSION != m_header.version) {
|
|
34
|
+
throw std::runtime_error("File version mismatch.");
|
|
35
|
+
}
|
|
36
|
+
#if 0
|
|
37
|
+
if (m_header.num_containers < 1) {
|
|
38
|
+
throw std::runtime_error("At least one container required.");
|
|
39
|
+
}
|
|
40
|
+
#endif
|
|
41
|
+
m_offset += detail::FILE_HEADER_SIZE;
|
|
42
|
+
|
|
43
|
+
if ((m_header.options & XMRAW_HAS_METADATA) == XMRAW_HAS_METADATA) {
|
|
44
|
+
std::size_t metadata_length = 0;
|
|
45
|
+
std::size_t data_start = m_offset + sizeof(std::size_t);
|
|
46
|
+
|
|
47
|
+
read_bytes(m_offset, sizeof(std::size_t), reinterpret_cast<blob_t *>(&metadata_length));
|
|
48
|
+
|
|
49
|
+
std::copy(ptr(data_start), ptr(data_start + metadata_length), std::back_inserter(m_metadata));
|
|
50
|
+
m_offset += (metadata_length + sizeof(std::size_t));
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
[[nodiscard]] FileHeaderType get_header() const noexcept {
|
|
55
|
+
return m_header;
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
[[nodiscard]] auto get_header_as_tuple() const noexcept -> HeaderTuple {
|
|
59
|
+
auto hdr = get_header();
|
|
60
|
+
|
|
61
|
+
return std::make_tuple(
|
|
62
|
+
hdr.version, hdr.options, hdr.num_containers, hdr.record_count, hdr.size_uncompressed, hdr.size_compressed,
|
|
63
|
+
(double)((std::uint64_t)(((double)hdr.size_uncompressed / (double)hdr.size_compressed * 100.0) + 0.5)) / 100.0
|
|
64
|
+
);
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
[[nodiscard]] auto get_metadata() const noexcept {
|
|
68
|
+
return m_metadata;
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
void reset() noexcept {
|
|
72
|
+
m_current_container = 0;
|
|
73
|
+
m_offset = file_header_size();
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
std::optional<FrameVector> next_block() {
|
|
77
|
+
auto container = ContainerHeaderType{};
|
|
78
|
+
auto frame = frame_header_t{};
|
|
79
|
+
std::uint64_t boffs = 0;
|
|
80
|
+
auto result = FrameVector{};
|
|
81
|
+
payload_t payload;
|
|
82
|
+
|
|
83
|
+
if (m_current_container >= m_header.num_containers) {
|
|
84
|
+
return std::nullopt;
|
|
85
|
+
}
|
|
86
|
+
read_bytes(m_offset, detail::CONTAINER_SIZE, reinterpret_cast<blob_t *>(&container));
|
|
87
|
+
__ALIGN auto buffer = new blob_t[container.size_uncompressed];
|
|
88
|
+
m_offset += detail::CONTAINER_SIZE;
|
|
89
|
+
result.reserve(container.record_count);
|
|
90
|
+
const int uc_size = ::LZ4_decompress_safe(
|
|
91
|
+
reinterpret_cast<char const *>(ptr(m_offset)), reinterpret_cast<char *>(buffer), container.size_compressed,
|
|
92
|
+
container.size_uncompressed
|
|
93
|
+
);
|
|
94
|
+
if (uc_size < 0) {
|
|
95
|
+
throw std::runtime_error("LZ4 decompression failed.");
|
|
96
|
+
}
|
|
97
|
+
boffs = 0;
|
|
98
|
+
for (std::uint64_t idx = 0; idx < container.record_count; ++idx) {
|
|
99
|
+
_fcopy(reinterpret_cast<char *>(&frame), reinterpret_cast<char const *>(&(buffer[boffs])), sizeof(frame_header_t));
|
|
100
|
+
boffs += sizeof(frame_header_t);
|
|
101
|
+
result.emplace_back(
|
|
102
|
+
frame.category, frame.counter, frame.timestamp, frame.length, create_payload(frame.length, &buffer[boffs])
|
|
103
|
+
);
|
|
104
|
+
boffs += frame.length;
|
|
105
|
+
}
|
|
106
|
+
m_offset += container.size_compressed;
|
|
107
|
+
m_current_container += 1;
|
|
108
|
+
delete[] buffer;
|
|
109
|
+
|
|
110
|
+
return std::optional<FrameVector>{ result };
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
~XcpLogFileReader() noexcept {
|
|
114
|
+
delete m_mmap;
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
protected:
|
|
118
|
+
|
|
119
|
+
[[nodiscard]] blob_t const *ptr(std::uint64_t pos = 0) const {
|
|
120
|
+
return reinterpret_cast<blob_t const *>(m_mmap->data() + pos);
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
void read_bytes(std::uint64_t pos, std::uint64_t count, blob_t *buf) const {
|
|
124
|
+
auto addr = reinterpret_cast<char const *>(ptr(pos));
|
|
125
|
+
_fcopy(reinterpret_cast<char *>(buf), addr, count);
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
private:
|
|
129
|
+
|
|
130
|
+
std::string m_file_name;
|
|
131
|
+
std::uint64_t m_offset{ 0 };
|
|
132
|
+
std::uint64_t m_current_container{ 0 };
|
|
133
|
+
mio::mmap_source *m_mmap{ nullptr };
|
|
134
|
+
FileHeaderType m_header;
|
|
135
|
+
std::string m_metadata;
|
|
136
|
+
};
|
|
137
|
+
|
|
138
|
+
#endif // RECORDER_READER_HPP
|
pyxcp/recorder/reco.py
ADDED
|
@@ -0,0 +1,278 @@
|
|
|
1
|
+
#!/usr/bin/env python
|
|
2
|
+
"""Raw XCP traffic recorder.
|
|
3
|
+
|
|
4
|
+
Data is stored in LZ4 compressed containers.
|
|
5
|
+
|
|
6
|
+
Examples
|
|
7
|
+
--------
|
|
8
|
+
|
|
9
|
+
See
|
|
10
|
+
|
|
11
|
+
- ``_ for recording / writing
|
|
12
|
+
|
|
13
|
+
- ``_ for reading.
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
import enum
|
|
17
|
+
import mmap
|
|
18
|
+
import struct
|
|
19
|
+
from collections import namedtuple
|
|
20
|
+
|
|
21
|
+
import lz4.block as lz4block
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
FILE_EXTENSION = ".xmraw" # XCP Measurement / raw data.
|
|
25
|
+
|
|
26
|
+
MAGIC = b"ASAMINT::XCP_RAW"
|
|
27
|
+
|
|
28
|
+
FILE_HEADER_STRUCT = struct.Struct(f"<{len(MAGIC):d}sHHHLLLL")
|
|
29
|
+
FileHeader = namedtuple(
|
|
30
|
+
"FileHeader",
|
|
31
|
+
"magic hdr_size version options num_containers record_count size_compressed size_uncompressed",
|
|
32
|
+
)
|
|
33
|
+
|
|
34
|
+
CONTAINER_HEADER_STRUCT = struct.Struct("<LLL")
|
|
35
|
+
ContainerHeader = namedtuple("ContainerHeader", "record_count size_compressed size_uncompressed")
|
|
36
|
+
|
|
37
|
+
DAQ_RECORD_STRUCT = struct.Struct("<BHdL")
|
|
38
|
+
DAQRecord = namedtuple("DAQRecord", "category counter timestamp payload")
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class XcpLogCategory(enum.IntEnum):
|
|
42
|
+
""" """
|
|
43
|
+
|
|
44
|
+
DAQ = 1
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
class XcpLogFileParseError(Exception):
|
|
48
|
+
"""Log file is damaged is some way."""
|
|
49
|
+
|
|
50
|
+
pass
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
class XcpLogFileCapacityExceededError(Exception):
|
|
54
|
+
pass
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
class XcpLogFileWriter:
|
|
58
|
+
"""
|
|
59
|
+
Parameters
|
|
60
|
+
----------
|
|
61
|
+
file_name: str
|
|
62
|
+
Don't specify extension.
|
|
63
|
+
|
|
64
|
+
prealloc: int
|
|
65
|
+
Pre-allocate a sparse file (size in MB).
|
|
66
|
+
|
|
67
|
+
chunk_size: int
|
|
68
|
+
Number of kilobytes to collect before compressing.
|
|
69
|
+
|
|
70
|
+
compression_level: int
|
|
71
|
+
s. LZ4 documentation.
|
|
72
|
+
"""
|
|
73
|
+
|
|
74
|
+
def __init__(
|
|
75
|
+
self,
|
|
76
|
+
file_name: str,
|
|
77
|
+
prealloc: int = 10,
|
|
78
|
+
chunk_size: int = 1024,
|
|
79
|
+
compression_level: int = 9,
|
|
80
|
+
):
|
|
81
|
+
self._is_closed = True
|
|
82
|
+
try:
|
|
83
|
+
self._of = open(f"{file_name}{FILE_EXTENSION}", "w+b")
|
|
84
|
+
except Exception:
|
|
85
|
+
raise
|
|
86
|
+
else:
|
|
87
|
+
self._of.truncate(1024 * 1024 * prealloc) # Create sparse file (hopefully).
|
|
88
|
+
self._mapping = mmap.mmap(self._of.fileno(), 0)
|
|
89
|
+
self.container_header_offset = FILE_HEADER_STRUCT.size
|
|
90
|
+
self.current_offset = self.container_header_offset + CONTAINER_HEADER_STRUCT.size
|
|
91
|
+
self.total_size_uncompressed = self.total_size_compressed = 0
|
|
92
|
+
self.container_size_uncompressed = self.container_size_compressed = 0
|
|
93
|
+
self.total_record_count = 0
|
|
94
|
+
self.chunk_size = chunk_size * 1024
|
|
95
|
+
self.num_containers = 0
|
|
96
|
+
self.intermediate_storage = []
|
|
97
|
+
self.compression_level = compression_level
|
|
98
|
+
self.prealloc = prealloc
|
|
99
|
+
self._is_closed = False
|
|
100
|
+
|
|
101
|
+
def add_xcp_frames(self, xcp_frames: list):
|
|
102
|
+
for counter, timestamp, raw_data in xcp_frames:
|
|
103
|
+
length = len(raw_data)
|
|
104
|
+
item = DAQ_RECORD_STRUCT.pack(1, counter, timestamp, length) + raw_data
|
|
105
|
+
self.intermediate_storage.append(item)
|
|
106
|
+
self.container_size_uncompressed += len(item)
|
|
107
|
+
if self.container_size_uncompressed > self.chunk_size:
|
|
108
|
+
self._compress_framez()
|
|
109
|
+
|
|
110
|
+
def _compress_framez(self):
|
|
111
|
+
compressed_data = lz4block.compress(b"".join(self.intermediate_storage), compression=self.compression_level)
|
|
112
|
+
record_count = len(self.intermediate_storage)
|
|
113
|
+
hdr = CONTAINER_HEADER_STRUCT.pack(record_count, len(compressed_data), self.container_size_uncompressed)
|
|
114
|
+
self.set(self.current_offset, compressed_data)
|
|
115
|
+
self.set(self.container_header_offset, hdr)
|
|
116
|
+
self.container_header_offset = self.current_offset + len(compressed_data)
|
|
117
|
+
self.current_offset = self.container_header_offset + CONTAINER_HEADER_STRUCT.size
|
|
118
|
+
self.intermediate_storage = []
|
|
119
|
+
self.total_record_count += record_count
|
|
120
|
+
self.num_containers += 1
|
|
121
|
+
self.total_size_uncompressed += self.container_size_uncompressed
|
|
122
|
+
self.total_size_compressed += len(compressed_data)
|
|
123
|
+
self.container_size_uncompressed = 0
|
|
124
|
+
self.container_size_compressed = 0
|
|
125
|
+
|
|
126
|
+
def __del__(self):
|
|
127
|
+
if not self._is_closed:
|
|
128
|
+
self.close()
|
|
129
|
+
|
|
130
|
+
def close(self):
|
|
131
|
+
if not self._is_closed:
|
|
132
|
+
if hasattr(self, "_mapping"):
|
|
133
|
+
if self.intermediate_storage:
|
|
134
|
+
self._compress_framez()
|
|
135
|
+
self._write_header(
|
|
136
|
+
version=0x0100,
|
|
137
|
+
options=0x0000,
|
|
138
|
+
num_containers=self.num_containers,
|
|
139
|
+
record_count=self.total_record_count,
|
|
140
|
+
size_compressed=self.total_size_compressed,
|
|
141
|
+
size_uncompressed=self.total_size_uncompressed,
|
|
142
|
+
)
|
|
143
|
+
self._mapping.flush()
|
|
144
|
+
self._mapping.close()
|
|
145
|
+
self._of.truncate(self.current_offset)
|
|
146
|
+
self._of.close()
|
|
147
|
+
self._is_closed = True
|
|
148
|
+
|
|
149
|
+
def set(self, address: int, data: bytes):
|
|
150
|
+
"""Write to memory mapped file.
|
|
151
|
+
|
|
152
|
+
Parameters
|
|
153
|
+
----------
|
|
154
|
+
address: int
|
|
155
|
+
|
|
156
|
+
data: bytes-like
|
|
157
|
+
"""
|
|
158
|
+
length = len(data)
|
|
159
|
+
try:
|
|
160
|
+
self._mapping[address : address + length] = data
|
|
161
|
+
except IndexError:
|
|
162
|
+
raise XcpLogFileCapacityExceededError(f"Maximum file size of {self.prealloc} MBytes exceeded.") from None
|
|
163
|
+
|
|
164
|
+
def _write_header(
|
|
165
|
+
self,
|
|
166
|
+
version,
|
|
167
|
+
options,
|
|
168
|
+
num_containers,
|
|
169
|
+
record_count,
|
|
170
|
+
size_compressed,
|
|
171
|
+
size_uncompressed,
|
|
172
|
+
):
|
|
173
|
+
hdr = FILE_HEADER_STRUCT.pack(
|
|
174
|
+
MAGIC,
|
|
175
|
+
FILE_HEADER_STRUCT.size,
|
|
176
|
+
version,
|
|
177
|
+
options,
|
|
178
|
+
num_containers,
|
|
179
|
+
record_count,
|
|
180
|
+
size_compressed,
|
|
181
|
+
size_uncompressed,
|
|
182
|
+
)
|
|
183
|
+
self.set(0x00000000, hdr)
|
|
184
|
+
|
|
185
|
+
@property
|
|
186
|
+
def compression_ratio(self):
|
|
187
|
+
if self.total_size_compressed:
|
|
188
|
+
return self.total_size_uncompressed / self.total_size_compressed
|
|
189
|
+
|
|
190
|
+
|
|
191
|
+
class XcpLogFileReader:
|
|
192
|
+
"""
|
|
193
|
+
Parameters
|
|
194
|
+
----------
|
|
195
|
+
file_name: str
|
|
196
|
+
Don't specify extension.
|
|
197
|
+
"""
|
|
198
|
+
|
|
199
|
+
def __init__(self, file_name):
|
|
200
|
+
self._is_closed = True
|
|
201
|
+
try:
|
|
202
|
+
self._log_file = open(f"{file_name}{FILE_EXTENSION}", "r+b")
|
|
203
|
+
except Exception:
|
|
204
|
+
raise
|
|
205
|
+
else:
|
|
206
|
+
self._mapping = mmap.mmap(self._log_file.fileno(), 0)
|
|
207
|
+
self._is_closed = False
|
|
208
|
+
(
|
|
209
|
+
magic,
|
|
210
|
+
_,
|
|
211
|
+
_,
|
|
212
|
+
_,
|
|
213
|
+
self.num_containers,
|
|
214
|
+
self.total_record_count,
|
|
215
|
+
self.total_size_compressed,
|
|
216
|
+
self.total_size_uncompressed,
|
|
217
|
+
) = FILE_HEADER_STRUCT.unpack(self.get(0, FILE_HEADER_STRUCT.size))
|
|
218
|
+
if magic != MAGIC:
|
|
219
|
+
raise XcpLogFileParseError(f"Invalid file magic: {magic!r}.")
|
|
220
|
+
|
|
221
|
+
def __del__(self):
|
|
222
|
+
if not self._is_closed:
|
|
223
|
+
self.close()
|
|
224
|
+
|
|
225
|
+
@property
|
|
226
|
+
def frames(self):
|
|
227
|
+
"""Iterate over all frames in file.
|
|
228
|
+
|
|
229
|
+
Yields
|
|
230
|
+
------
|
|
231
|
+
DAQRecord
|
|
232
|
+
"""
|
|
233
|
+
offset = FILE_HEADER_STRUCT.size
|
|
234
|
+
for _ in range(self.num_containers):
|
|
235
|
+
(
|
|
236
|
+
record_count,
|
|
237
|
+
size_compressed,
|
|
238
|
+
size_uncompressed,
|
|
239
|
+
) = CONTAINER_HEADER_STRUCT.unpack(self.get(offset, CONTAINER_HEADER_STRUCT.size))
|
|
240
|
+
offset += CONTAINER_HEADER_STRUCT.size
|
|
241
|
+
uncompressed_data = memoryview(lz4block.decompress(self.get(offset, size_compressed)))
|
|
242
|
+
frame_offset = 0
|
|
243
|
+
for _ in range(record_count):
|
|
244
|
+
category, counter, timestamp, frame_length = DAQ_RECORD_STRUCT.unpack(
|
|
245
|
+
uncompressed_data[frame_offset : frame_offset + DAQ_RECORD_STRUCT.size]
|
|
246
|
+
)
|
|
247
|
+
frame_offset += DAQ_RECORD_STRUCT.size
|
|
248
|
+
frame_data = uncompressed_data[frame_offset : frame_offset + frame_length] # .tobytes()
|
|
249
|
+
frame_offset += len(frame_data)
|
|
250
|
+
frame = DAQRecord(category, counter, timestamp, frame_data)
|
|
251
|
+
yield frame
|
|
252
|
+
offset += size_compressed
|
|
253
|
+
|
|
254
|
+
def get(self, address: int, length: int):
|
|
255
|
+
"""Read from memory mapped file.
|
|
256
|
+
|
|
257
|
+
Parameters
|
|
258
|
+
----------
|
|
259
|
+
address: int
|
|
260
|
+
|
|
261
|
+
length: int
|
|
262
|
+
|
|
263
|
+
Returns
|
|
264
|
+
-------
|
|
265
|
+
memoryview
|
|
266
|
+
"""
|
|
267
|
+
return self._mapping[address : address + length]
|
|
268
|
+
|
|
269
|
+
def close(self):
|
|
270
|
+
if hasattr(self, "self._mapping"):
|
|
271
|
+
self._mapping.close()
|
|
272
|
+
self._log_file.close()
|
|
273
|
+
self._is_closed = True
|
|
274
|
+
|
|
275
|
+
@property
|
|
276
|
+
def compression_ratio(self):
|
|
277
|
+
if self.total_size_compressed:
|
|
278
|
+
return self.total_size_uncompressed / self.total_size_compressed
|
|
File without changes
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
|
|
2
|
+
#define STANDALONE_REKORDER (1)
|
|
3
|
+
|
|
4
|
+
#include "rekorder.hpp"
|
|
5
|
+
|
|
6
|
+
void some_records(XcpLogFileWriter& writer) {
|
|
7
|
+
const auto COUNT = 1024 * 100 * 5;
|
|
8
|
+
unsigned filler = 0x00;
|
|
9
|
+
char buffer[1024];
|
|
10
|
+
|
|
11
|
+
for (auto idx = 0; idx < COUNT; ++idx) {
|
|
12
|
+
auto fr = frame_header_t{};
|
|
13
|
+
fr.category = 1;
|
|
14
|
+
fr.counter = idx;
|
|
15
|
+
fr.timestamp = std::clock();
|
|
16
|
+
fr.length = 10 + (rand() % 240);
|
|
17
|
+
filler = (filler + 1) % 16;
|
|
18
|
+
memset(buffer, filler, fr.length);
|
|
19
|
+
writer.add_frame(fr.category, fr.counter, fr.timestamp, fr.length, std::bit_cast<char const *>(&buffer));
|
|
20
|
+
}
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
int main() {
|
|
24
|
+
srand(42);
|
|
25
|
+
|
|
26
|
+
printf("\nWRITER\n");
|
|
27
|
+
printf("======\n");
|
|
28
|
+
|
|
29
|
+
auto writer = XcpLogFileWriter("test_logger", 250, 1);
|
|
30
|
+
some_records(writer);
|
|
31
|
+
writer.finalize();
|
|
32
|
+
|
|
33
|
+
printf("\nREADER\n");
|
|
34
|
+
printf("======\n");
|
|
35
|
+
|
|
36
|
+
auto reader = XcpLogFileReader("test_logger");
|
|
37
|
+
auto header = reader.get_header();
|
|
38
|
+
|
|
39
|
+
printf("size: %u\n", header.hdr_size);
|
|
40
|
+
printf("version: %u\n", header.version);
|
|
41
|
+
printf("options: %u\n", header.options);
|
|
42
|
+
printf("containers: %u\n", header.num_containers);
|
|
43
|
+
printf("records: %u\n", header.record_count);
|
|
44
|
+
printf("size/compressed: %u\n", header.size_compressed);
|
|
45
|
+
printf("size/uncompressed: %u\n", header.size_uncompressed);
|
|
46
|
+
printf("compression ratio: %.2f\n", static_cast<float>(header.size_uncompressed) / static_cast<float>(header.size_compressed));
|
|
47
|
+
|
|
48
|
+
while (true) {
|
|
49
|
+
const auto& frames = reader.next_block();
|
|
50
|
+
if (!frames) {
|
|
51
|
+
break;
|
|
52
|
+
}
|
|
53
|
+
for (const auto& frame : frames.value()) {
|
|
54
|
+
auto const& [category, counter, timestamp, length, payload] = frame;
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
printf("---\n");
|
|
58
|
+
printf("Finished.\n");
|
|
59
|
+
}
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|