pyxcp 0.25.2__cp314-cp314-win_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (154) hide show
  1. pyxcp/__init__.py +20 -0
  2. pyxcp/aml/EtasCANMonitoring.a2l +82 -0
  3. pyxcp/aml/EtasCANMonitoring.aml +67 -0
  4. pyxcp/aml/XCP_Common.aml +408 -0
  5. pyxcp/aml/XCPonCAN.aml +78 -0
  6. pyxcp/aml/XCPonEth.aml +33 -0
  7. pyxcp/aml/XCPonFlx.aml +113 -0
  8. pyxcp/aml/XCPonSxI.aml +66 -0
  9. pyxcp/aml/XCPonUSB.aml +106 -0
  10. pyxcp/aml/ifdata_CAN.a2l +20 -0
  11. pyxcp/aml/ifdata_Eth.a2l +11 -0
  12. pyxcp/aml/ifdata_Flx.a2l +94 -0
  13. pyxcp/aml/ifdata_SxI.a2l +13 -0
  14. pyxcp/aml/ifdata_USB.a2l +81 -0
  15. pyxcp/asam/__init__.py +0 -0
  16. pyxcp/asam/types.py +131 -0
  17. pyxcp/asamkeydll.c +116 -0
  18. pyxcp/asamkeydll.exe +0 -0
  19. pyxcp/asamkeydll.sh +2 -0
  20. pyxcp/checksum.py +732 -0
  21. pyxcp/cmdline.py +83 -0
  22. pyxcp/config/__init__.py +1257 -0
  23. pyxcp/config/legacy.py +120 -0
  24. pyxcp/constants.py +47 -0
  25. pyxcp/cpp_ext/__init__.py +0 -0
  26. pyxcp/cpp_ext/aligned_buffer.hpp +168 -0
  27. pyxcp/cpp_ext/bin.hpp +105 -0
  28. pyxcp/cpp_ext/blockmem.hpp +58 -0
  29. pyxcp/cpp_ext/cpp_ext.cp310-win_arm64.pyd +0 -0
  30. pyxcp/cpp_ext/cpp_ext.cp311-win_arm64.pyd +0 -0
  31. pyxcp/cpp_ext/cpp_ext.cp312-win_arm64.pyd +0 -0
  32. pyxcp/cpp_ext/cpp_ext.cp313-win_arm64.pyd +0 -0
  33. pyxcp/cpp_ext/cpp_ext.cp314-win_arm64.pyd +0 -0
  34. pyxcp/cpp_ext/daqlist.hpp +374 -0
  35. pyxcp/cpp_ext/event.hpp +67 -0
  36. pyxcp/cpp_ext/extension_wrapper.cpp +131 -0
  37. pyxcp/cpp_ext/framing.hpp +360 -0
  38. pyxcp/cpp_ext/helper.hpp +280 -0
  39. pyxcp/cpp_ext/mcobject.hpp +248 -0
  40. pyxcp/cpp_ext/sxi_framing.hpp +332 -0
  41. pyxcp/cpp_ext/tsqueue.hpp +46 -0
  42. pyxcp/daq_stim/__init__.py +306 -0
  43. pyxcp/daq_stim/optimize/__init__.py +67 -0
  44. pyxcp/daq_stim/optimize/binpacking.py +41 -0
  45. pyxcp/daq_stim/scheduler.cpp +62 -0
  46. pyxcp/daq_stim/scheduler.hpp +75 -0
  47. pyxcp/daq_stim/stim.cp310-win_arm64.pyd +0 -0
  48. pyxcp/daq_stim/stim.cp311-win_arm64.pyd +0 -0
  49. pyxcp/daq_stim/stim.cp312-win_arm64.pyd +0 -0
  50. pyxcp/daq_stim/stim.cp313-win_arm64.pyd +0 -0
  51. pyxcp/daq_stim/stim.cp314-win_arm64.pyd +0 -0
  52. pyxcp/daq_stim/stim.cpp +13 -0
  53. pyxcp/daq_stim/stim.hpp +604 -0
  54. pyxcp/daq_stim/stim_wrapper.cpp +50 -0
  55. pyxcp/dllif.py +100 -0
  56. pyxcp/errormatrix.py +878 -0
  57. pyxcp/examples/conf_can.toml +19 -0
  58. pyxcp/examples/conf_can_user.toml +16 -0
  59. pyxcp/examples/conf_can_vector.json +11 -0
  60. pyxcp/examples/conf_can_vector.toml +11 -0
  61. pyxcp/examples/conf_eth.toml +9 -0
  62. pyxcp/examples/conf_nixnet.json +20 -0
  63. pyxcp/examples/conf_socket_can.toml +12 -0
  64. pyxcp/examples/run_daq.py +165 -0
  65. pyxcp/examples/xcp_policy.py +60 -0
  66. pyxcp/examples/xcp_read_benchmark.py +38 -0
  67. pyxcp/examples/xcp_skel.py +48 -0
  68. pyxcp/examples/xcp_unlock.py +38 -0
  69. pyxcp/examples/xcp_user_supplied_driver.py +43 -0
  70. pyxcp/examples/xcphello.py +79 -0
  71. pyxcp/examples/xcphello_recorder.py +107 -0
  72. pyxcp/master/__init__.py +10 -0
  73. pyxcp/master/errorhandler.py +677 -0
  74. pyxcp/master/master.py +2645 -0
  75. pyxcp/py.typed +0 -0
  76. pyxcp/recorder/.idea/.gitignore +8 -0
  77. pyxcp/recorder/.idea/misc.xml +4 -0
  78. pyxcp/recorder/.idea/modules.xml +8 -0
  79. pyxcp/recorder/.idea/recorder.iml +6 -0
  80. pyxcp/recorder/.idea/sonarlint/issuestore/3/8/3808afc69ac1edb9d760000a2f137335b1b99728 +7 -0
  81. pyxcp/recorder/.idea/sonarlint/issuestore/9/a/9a2aa4db38d3115ed60da621e012c0efc0172aae +0 -0
  82. pyxcp/recorder/.idea/sonarlint/issuestore/b/4/b49006702b459496a8e8c94ebe60947108361b91 +0 -0
  83. pyxcp/recorder/.idea/sonarlint/issuestore/index.pb +7 -0
  84. pyxcp/recorder/.idea/sonarlint/securityhotspotstore/3/8/3808afc69ac1edb9d760000a2f137335b1b99728 +0 -0
  85. pyxcp/recorder/.idea/sonarlint/securityhotspotstore/9/a/9a2aa4db38d3115ed60da621e012c0efc0172aae +0 -0
  86. pyxcp/recorder/.idea/sonarlint/securityhotspotstore/b/4/b49006702b459496a8e8c94ebe60947108361b91 +0 -0
  87. pyxcp/recorder/.idea/sonarlint/securityhotspotstore/index.pb +7 -0
  88. pyxcp/recorder/.idea/vcs.xml +10 -0
  89. pyxcp/recorder/__init__.py +96 -0
  90. pyxcp/recorder/build_clang.cmd +1 -0
  91. pyxcp/recorder/build_clang.sh +2 -0
  92. pyxcp/recorder/build_gcc.cmd +1 -0
  93. pyxcp/recorder/build_gcc.sh +2 -0
  94. pyxcp/recorder/build_gcc_arm.sh +2 -0
  95. pyxcp/recorder/converter/__init__.py +445 -0
  96. pyxcp/recorder/lz4.c +2829 -0
  97. pyxcp/recorder/lz4.h +879 -0
  98. pyxcp/recorder/lz4hc.c +2041 -0
  99. pyxcp/recorder/lz4hc.h +413 -0
  100. pyxcp/recorder/mio.hpp +1714 -0
  101. pyxcp/recorder/reader.hpp +138 -0
  102. pyxcp/recorder/reco.py +278 -0
  103. pyxcp/recorder/recorder.rst +0 -0
  104. pyxcp/recorder/rekorder.cp310-win_arm64.pyd +0 -0
  105. pyxcp/recorder/rekorder.cp311-win_arm64.pyd +0 -0
  106. pyxcp/recorder/rekorder.cp312-win_arm64.pyd +0 -0
  107. pyxcp/recorder/rekorder.cp313-win_arm64.pyd +0 -0
  108. pyxcp/recorder/rekorder.cp314-win_arm64.pyd +0 -0
  109. pyxcp/recorder/rekorder.cpp +59 -0
  110. pyxcp/recorder/rekorder.hpp +274 -0
  111. pyxcp/recorder/setup.py +41 -0
  112. pyxcp/recorder/test_reko.py +34 -0
  113. pyxcp/recorder/unfolder.hpp +1354 -0
  114. pyxcp/recorder/wrap.cpp +184 -0
  115. pyxcp/recorder/writer.hpp +302 -0
  116. pyxcp/scripts/__init__.py +0 -0
  117. pyxcp/scripts/pyxcp_probe_can_drivers.py +20 -0
  118. pyxcp/scripts/xcp_examples.py +64 -0
  119. pyxcp/scripts/xcp_fetch_a2l.py +40 -0
  120. pyxcp/scripts/xcp_id_scanner.py +18 -0
  121. pyxcp/scripts/xcp_info.py +144 -0
  122. pyxcp/scripts/xcp_profile.py +26 -0
  123. pyxcp/scripts/xmraw_converter.py +31 -0
  124. pyxcp/stim/__init__.py +0 -0
  125. pyxcp/tests/test_asam_types.py +24 -0
  126. pyxcp/tests/test_binpacking.py +186 -0
  127. pyxcp/tests/test_can.py +1324 -0
  128. pyxcp/tests/test_checksum.py +95 -0
  129. pyxcp/tests/test_daq.py +193 -0
  130. pyxcp/tests/test_daq_opt.py +426 -0
  131. pyxcp/tests/test_frame_padding.py +156 -0
  132. pyxcp/tests/test_framing.py +262 -0
  133. pyxcp/tests/test_master.py +2116 -0
  134. pyxcp/tests/test_transport.py +177 -0
  135. pyxcp/tests/test_utils.py +30 -0
  136. pyxcp/timing.py +60 -0
  137. pyxcp/transport/__init__.py +13 -0
  138. pyxcp/transport/base.py +484 -0
  139. pyxcp/transport/base_transport.hpp +0 -0
  140. pyxcp/transport/can.py +660 -0
  141. pyxcp/transport/eth.py +254 -0
  142. pyxcp/transport/sxi.py +209 -0
  143. pyxcp/transport/transport_ext.hpp +214 -0
  144. pyxcp/transport/transport_wrapper.cpp +249 -0
  145. pyxcp/transport/usb_transport.py +229 -0
  146. pyxcp/types.py +987 -0
  147. pyxcp/utils.py +127 -0
  148. pyxcp/vector/__init__.py +0 -0
  149. pyxcp/vector/map.py +82 -0
  150. pyxcp-0.25.2.dist-info/METADATA +341 -0
  151. pyxcp-0.25.2.dist-info/RECORD +154 -0
  152. pyxcp-0.25.2.dist-info/WHEEL +4 -0
  153. pyxcp-0.25.2.dist-info/entry_points.txt +9 -0
  154. pyxcp-0.25.2.dist-info/licenses/LICENSE +165 -0
@@ -0,0 +1,306 @@
1
+ #!/usr/bin/env python
2
+
3
+ from time import time_ns
4
+ from typing import Dict, List, Optional, TextIO, Union
5
+
6
+ from pyxcp import types
7
+ from pyxcp.config import get_application
8
+ from pyxcp.cpp_ext.cpp_ext import DaqList, PredefinedDaqList
9
+ from pyxcp.daq_stim.optimize import make_continuous_blocks
10
+ from pyxcp.daq_stim.optimize.binpacking import first_fit_decreasing
11
+ from pyxcp.recorder import DaqOnlinePolicy as _DaqOnlinePolicy
12
+ from pyxcp.recorder import DaqRecorderPolicy as _DaqRecorderPolicy
13
+ from pyxcp.recorder import MeasurementParameters
14
+ from pyxcp.utils import CurrentDatetime
15
+
16
+ DAQ_ID_FIELD_SIZE = {
17
+ "IDF_ABS_ODT_NUMBER": 1,
18
+ "IDF_REL_ODT_NUMBER_ABS_DAQ_LIST_NUMBER_BYTE": 2,
19
+ "IDF_REL_ODT_NUMBER_ABS_DAQ_LIST_NUMBER_WORD": 3,
20
+ "IDF_REL_ODT_NUMBER_ABS_DAQ_LIST_NUMBER_WORD_ALIGNED": 4,
21
+ }
22
+
23
+ DAQ_TIMESTAMP_SIZE = {
24
+ "S1": 1,
25
+ "S2": 2,
26
+ "S4": 4,
27
+ }
28
+
29
+
30
+ class DaqProcessor:
31
+ def __init__(self, daq_lists: List[Union[DaqList, PredefinedDaqList]]):
32
+ self.daq_lists = daq_lists
33
+ self.is_predefined = [isinstance(d, PredefinedDaqList) for d in daq_lists]
34
+ self.log = get_application().log
35
+ # Flag indicating a fatal OS-level error occurred during DAQ (e.g., disk full, out-of-memory)
36
+ self._fatal_os_error: bool = False
37
+
38
+ def setup(self, start_datetime: Optional[CurrentDatetime] = None, write_multiple: bool = True):
39
+ if not self.xcp_master.slaveProperties.supportsDaq:
40
+ raise RuntimeError("DAQ functionality is not supported.")
41
+ self.daq_info = self.xcp_master.getDaqInfo(include_event_lists=False)
42
+ if start_datetime is None:
43
+ start_datetime = CurrentDatetime(time_ns())
44
+ self.start_datetime = start_datetime
45
+ try:
46
+ processor = self.daq_info.get("processor")
47
+ properties = processor.get("properties")
48
+ resolution = self.daq_info.get("resolution")
49
+ if properties["configType"] == "STATIC" and not all(self.is_predefined):
50
+ raise TypeError(
51
+ "DAQ configuration is static, but in your configuration are only dynamic DAQ lists -- cannot proceed."
52
+ )
53
+ self.supports_timestampes = properties["timestampSupported"]
54
+ self.supports_prescaler = properties["prescalerSupported"]
55
+ self.supports_pid_off = properties["pidOffSupported"]
56
+ if self.supports_timestampes:
57
+ mode = resolution.get("timestampMode")
58
+ self.ts_fixed = mode.get("fixed")
59
+ self.ts_size = DAQ_TIMESTAMP_SIZE[mode.get("size")]
60
+ ts_factor = types.DAQ_TIMESTAMP_UNIT_TO_NS[mode.get("unit")]
61
+ ts_ticks = resolution.get("timestampTicks")
62
+ self.ts_scale_factor = ts_factor * ts_ticks
63
+ else:
64
+ self.ts_size = 0
65
+ self.ts_fixed = False
66
+ self.ts_scale_factor = 0.0
67
+ key_byte = processor.get("keyByte")
68
+ header_len = DAQ_ID_FIELD_SIZE[key_byte["identificationField"]]
69
+ max_dto = self.xcp_master.slaveProperties.maxDto
70
+ self.min_daq = processor.get("minDaq")
71
+ max_odt_entry_size = resolution.get("maxOdtEntrySizeDaq")
72
+ max_payload_size = min(max_odt_entry_size, max_dto - header_len)
73
+ # First ODT may contain timestamp.
74
+ self.selectable_timestamps = False
75
+ max_payload_size_first = max_payload_size
76
+ if not self.supports_timestampes:
77
+ # print("NO TIMESTAMP SUPPORT")
78
+ pass
79
+ else:
80
+ if self.ts_fixed:
81
+ # print("Fixed timestamp")
82
+ max_payload_size_first = max_payload_size - self.ts_size
83
+ else:
84
+ # print("timestamp variable.")
85
+ self.selectable_timestamps = True
86
+ except Exception as e:
87
+ raise TypeError(f"DAQ_INFO corrupted: {e}") from e
88
+
89
+ # DAQ optimization.
90
+ # For dynamic DaqList instances, compute physical layout; skip for PredefinedDaqList.
91
+ for idx, daq_list in enumerate(self.daq_lists):
92
+ if isinstance(daq_list, PredefinedDaqList):
93
+ continue
94
+ if self.selectable_timestamps:
95
+ if daq_list.enable_timestamps:
96
+ max_payload_size_first = max_payload_size - self.ts_size
97
+ else:
98
+ max_payload_size_first = max_payload_size
99
+ ttt = make_continuous_blocks(daq_list.measurements, max_payload_size, max_payload_size_first)
100
+ daq_list.measurements_opt = first_fit_decreasing(ttt, max_payload_size, max_payload_size_first)
101
+ byte_order = 0 if self.xcp_master.slaveProperties.byteOrder == "INTEL" else 1
102
+ self._first_pids = []
103
+ daq_count = len(self.daq_lists)
104
+
105
+ # Decide whether DAQ allocation must be performed.
106
+ config_static = self.daq_info.get("processor", {}).get("properties", {}).get("configType") == "STATIC"
107
+
108
+ if not config_static:
109
+ # For dynamic configuration, program only dynamic (non-predefined) DAQ lists.
110
+ self.xcp_master.freeDaq()
111
+ # Allocate the number of DAQ lists required.
112
+ self.xcp_master.allocDaq(daq_count)
113
+ measurement_list = []
114
+ for i, daq_list in enumerate(self.daq_lists, self.min_daq):
115
+ if isinstance(daq_list, PredefinedDaqList):
116
+ # Skip allocation for predefined DAQ lists.
117
+ continue
118
+ measurements = daq_list.measurements_opt
119
+ measurement_list.append((i, measurements))
120
+ odt_count = len(measurements)
121
+ self.xcp_master.allocOdt(i, odt_count)
122
+ # Iterate again over ODT entries -- we need to respect sequencing requirements.
123
+ for i, measurements in measurement_list:
124
+ for j, measurement in enumerate(measurements):
125
+ entry_count = len(measurement.entries)
126
+ self.xcp_master.allocOdtEntry(i, j, entry_count)
127
+ # Write DAQs (only for dynamic lists)
128
+ for i, daq_list in enumerate(self.daq_lists, self.min_daq):
129
+ if isinstance(daq_list, PredefinedDaqList):
130
+ continue
131
+ measurements = daq_list.measurements_opt
132
+ for j, measurement in enumerate(measurements):
133
+ if len(measurement.entries) == 0:
134
+ continue # CAN special case: No room for data in first ODT.
135
+ self.xcp_master.setDaqPtr(i, j, 0)
136
+ for entry in measurement.entries:
137
+ self.xcp_master.writeDaq(0xFF, entry.length, entry.ext, entry.address)
138
+ else:
139
+ # STATIC configuration on the slave: skip allocation and programming; lists/ODTs are predefined.
140
+ pass
141
+
142
+ # arm DAQ lists -- this is technically a function on its own.
143
+ first_daq_list = 0 if config_static else self.min_daq
144
+ for i, daq_list in enumerate(self.daq_lists, first_daq_list):
145
+ mode = 0x00
146
+ if self.supports_timestampes and (self.ts_fixed or (self.selectable_timestamps and daq_list.enable_timestamps)):
147
+ mode = 0x10
148
+ if daq_list.stim:
149
+ mode |= 0x02
150
+ ###
151
+ ## mode |= 0x20
152
+ ###
153
+ self.xcp_master.setDaqListMode(
154
+ daq_list_number=i,
155
+ mode=mode,
156
+ event_channel_number=daq_list.event_num,
157
+ prescaler=daq_list.prescaler,
158
+ priority=daq_list.priority,
159
+ )
160
+ res = self.xcp_master.startStopDaqList(0x02, i)
161
+ self._first_pids.append(res.firstPid)
162
+ if start_datetime:
163
+ pass
164
+ self.measurement_params = MeasurementParameters(
165
+ byte_order,
166
+ header_len,
167
+ self.supports_timestampes,
168
+ self.ts_fixed,
169
+ self.supports_prescaler,
170
+ self.selectable_timestamps,
171
+ self.ts_scale_factor,
172
+ self.ts_size,
173
+ self.min_daq,
174
+ self.start_datetime,
175
+ self.daq_lists,
176
+ self._first_pids,
177
+ )
178
+ self.set_parameters(self.measurement_params)
179
+
180
+ def start(self):
181
+ self.xcp_master.startStopSynch(0x01)
182
+
183
+ def stop(self):
184
+ # If a fatal OS error occurred during acquisition, skip sending stop to the slave to avoid
185
+ # cascading timeouts/unrecoverable errors and shut down transport gracefully instead.
186
+ if getattr(self, "_fatal_os_error", False):
187
+ try:
188
+ self.log.error(
189
+ "DAQ stop skipped due to previous fatal OS error (e.g., disk full or out-of-memory). Closing transport."
190
+ )
191
+ except Exception:
192
+ pass # nosec
193
+ try:
194
+ # Best-effort: stop listener and close transport so threads finish cleanly.
195
+ if hasattr(self.xcp_master, "transport") and self.xcp_master.transport is not None:
196
+ # Signal listeners to stop
197
+ try:
198
+ if hasattr(self.xcp_master.transport, "closeEvent"):
199
+ self.xcp_master.transport.closeEvent.set()
200
+ except Exception:
201
+ pass # nosec
202
+ # Close transport connection
203
+ try:
204
+ self.xcp_master.transport.close()
205
+ except Exception:
206
+ pass # nosec
207
+ finally:
208
+ return
209
+ self.xcp_master.startStopSynch(0x00)
210
+
211
+ def first_pids(self):
212
+ return self._first_pids
213
+
214
+
215
+ class DaqRecorder(DaqProcessor, _DaqRecorderPolicy):
216
+ def __init__(self, daq_lists: List[DaqList], file_name: str, prealloc: int = 200, chunk_size: int = 1):
217
+ DaqProcessor.__init__(self, daq_lists)
218
+ _DaqRecorderPolicy.__init__(self)
219
+ self.file_name = file_name
220
+ self.prealloc = prealloc
221
+ self.chunk_size = chunk_size
222
+
223
+ def initialize(self):
224
+ metadata = self.measurement_params.dumps()
225
+ _DaqRecorderPolicy.create_writer(self, self.file_name, self.prealloc, self.chunk_size, metadata)
226
+ _DaqRecorderPolicy.initialize(self)
227
+
228
+ def finalize(self):
229
+ _DaqRecorderPolicy.finalize(self)
230
+
231
+ def start(self):
232
+ DaqProcessor.start(self)
233
+
234
+
235
+ class DaqOnlinePolicy(DaqProcessor, _DaqOnlinePolicy):
236
+ """Base class for on-line measurements.
237
+ Handles multiple inheritence.
238
+ """
239
+
240
+ def __init__(self, daq_lists: List[DaqList]):
241
+ DaqProcessor.__init__(self, daq_lists)
242
+ _DaqOnlinePolicy.__init__(self)
243
+
244
+ def start(self):
245
+ DaqProcessor.start(self)
246
+
247
+
248
+ class DaqToCsv(DaqOnlinePolicy):
249
+ """Save a measurement as CSV files (one per DAQ-list)."""
250
+
251
+ def initialize(self):
252
+ self.log.debug("DaqCsv::Initialize()")
253
+ self.files: Dict[int, TextIO] = {}
254
+ for num, daq_list in enumerate(self.daq_lists):
255
+ if daq_list.stim:
256
+ continue
257
+ out_file = open(f"{daq_list.name}.csv", "w")
258
+ self.files[num] = out_file
259
+ hdr = ",".join(["timestamp0", "timestamp1"] + [h[0] for h in daq_list.headers])
260
+ out_file.write(f"{hdr}\n")
261
+
262
+ def on_daq_list(self, daq_list: int, timestamp0: int, timestamp1: int, payload: list):
263
+ # Guard against hard OS errors (e.g., disk full) during file writes.
264
+ if getattr(self, "_fatal_os_error", False):
265
+ return
266
+ try:
267
+ self.files[daq_list].write(f"{timestamp0},{timestamp1},{', '.join([str(x) for x in payload])}\n")
268
+ except (OSError, MemoryError) as ex:
269
+ # Mark fatal condition to alter shutdown path and avoid further writes/commands.
270
+ self._fatal_os_error = True
271
+ try:
272
+ self.log.critical(f"DAQ file write failed: {ex.__class__.__name__}: {ex}. Initiating graceful shutdown.")
273
+ except Exception:
274
+ pass # nosec
275
+ # Stop listener to prevent more DAQ traffic and avoid thread crashes.
276
+ try:
277
+ if hasattr(self.xcp_master, "transport") and self.xcp_master.transport is not None:
278
+ if hasattr(self.xcp_master.transport, "closeEvent"):
279
+ self.xcp_master.transport.closeEvent.set()
280
+ except Exception:
281
+ pass # nosec
282
+ # Best-effort: close any opened files to flush buffers and release resources.
283
+ try:
284
+ for f in getattr(self, "files", {}).values():
285
+ try:
286
+ f.flush()
287
+ except Exception:
288
+ pass # nosec
289
+ try:
290
+ f.close()
291
+ except Exception:
292
+ pass # nosec
293
+ except Exception:
294
+ pass # nosec
295
+ # Do not re-raise; allow the system to continue to a controlled shutdown.
296
+ return
297
+
298
+ def finalize(self):
299
+ self.log.debug("DaqCsv::finalize()")
300
+ ##
301
+ ## NOTE: `finalize` is guaranteed to be called, but `Initialize` may fail for reasons.
302
+ ## So if you allocate resources in `Initialize` check if this really happened.
303
+ ##
304
+ if hasattr(self, "files"):
305
+ for f in self.files.values():
306
+ f.close()
@@ -0,0 +1,67 @@
1
+ #!/usr/bin/env python
2
+ """Optimize data-structures like memory sections."""
3
+
4
+ from itertools import groupby
5
+ from operator import attrgetter
6
+ from typing import List
7
+
8
+ from pyxcp.cpp_ext.cpp_ext import McObject
9
+
10
+
11
+ def make_continuous_blocks(chunks: List[McObject], upper_bound=None, upper_bound_initial=None) -> List[McObject]:
12
+ """Try to make continous blocks from a list of small, unordered `chunks`.
13
+
14
+ Parameters
15
+ ----------
16
+ chunks: list of `McObject`
17
+
18
+ Returns
19
+ -------
20
+ sorted list of `McObject`
21
+ """
22
+
23
+ def key_func(x):
24
+ return (x.ext, x.address)
25
+
26
+ values = []
27
+ # 1. Groupy by address.
28
+ for _key, value in groupby(sorted(chunks, key=key_func), key=key_func):
29
+ # 2. Pick the largest one.
30
+ values.append(max(value, key=attrgetter("length")))
31
+ result_sections = []
32
+ last_section = None
33
+ last_ext = None
34
+ first_section = True
35
+ if upper_bound_initial is None:
36
+ upper_bound_initial = upper_bound
37
+ while values:
38
+ section = values.pop(0)
39
+ if (last_section and section.address <= last_section.address + last_section.length) and not (section.ext != last_ext):
40
+ last_end = last_section.address + last_section.length - 1
41
+ current_end = section.address + section.length - 1
42
+ if last_end > section.address:
43
+ pass
44
+ else:
45
+ offset = current_end - last_end
46
+ if upper_bound:
47
+ if first_section:
48
+ upper_bound = upper_bound_initial
49
+ first_section = False
50
+ if last_section.length + offset <= upper_bound:
51
+ last_section.length += offset
52
+ last_section.add_component(section)
53
+ else:
54
+ result_sections.append(
55
+ McObject(name="", address=section.address, ext=section.ext, length=section.length, components=[section])
56
+ )
57
+ else:
58
+ last_section.length += offset
59
+ last_section.add_component(section)
60
+ else:
61
+ # Create a new section.
62
+ result_sections.append(
63
+ McObject(name="", address=section.address, ext=section.ext, length=section.length, components=[section])
64
+ )
65
+ last_section = result_sections[-1]
66
+ last_ext = last_section.ext
67
+ return result_sections
@@ -0,0 +1,41 @@
1
+ #!/usr/bin/env python
2
+ """Bin-packing algorithms."""
3
+
4
+ from typing import List, Optional
5
+
6
+ from pyxcp.cpp_ext.cpp_ext import Bin
7
+
8
+
9
+ def first_fit_decreasing(items, bin_size: int, initial_bin_size: Optional[int] = None) -> List[Bin]:
10
+ """bin-packing with first-fit-decreasing algorithm.
11
+
12
+ Parameters
13
+ ----------
14
+ items: list
15
+ items that need to be stored/allocated.
16
+
17
+ bin_size: int
18
+
19
+ Returns
20
+ -------
21
+ list
22
+ Resulting bins
23
+ """
24
+ if initial_bin_size is None:
25
+ initial_bin_size = bin_size
26
+ # bin_size = max(bin_size, initial_bin_size)
27
+ bins = [Bin(size=initial_bin_size)] # Initial bin
28
+ for item in sorted(items, key=lambda x: x.length, reverse=True):
29
+ if item.length > bin_size:
30
+ raise ValueError(f"Item {item!r} is too large to fit in a {bin_size} byte sized bin.")
31
+ for bin in bins:
32
+ if bin.residual_capacity >= item.length:
33
+ bin.append(item)
34
+ bin.residual_capacity -= item.length
35
+ break
36
+ else:
37
+ new_bin = Bin(size=bin_size)
38
+ bins.append(new_bin)
39
+ new_bin.append(item)
40
+ new_bin.residual_capacity -= item.length
41
+ return bins
@@ -0,0 +1,62 @@
1
+ #include "scheduler.hpp"
2
+
3
+ #if defined(_WIN32)
4
+
5
+ #include <cstdio>
6
+ #include <cstdint>
7
+
8
+ VOID CALLBACK TimerRoutine(PVOID lpParam, BOOLEAN TimerOrWaitFired) {
9
+ if (lpParam == nullptr) {
10
+ std::printf("TimerRoutine lpParam is NULL\n");
11
+ return;
12
+ }
13
+
14
+ const auto* param = static_cast<const int*>(lpParam);
15
+ std::printf("Timer routine called. Parameter is %d.\n", *param);
16
+
17
+ if (TimerOrWaitFired) {
18
+ std::printf("The wait timed out.\n");
19
+ } else {
20
+ std::printf("The wait event was signaled.\n");
21
+ }
22
+ }
23
+
24
+ #endif // _WIN32
25
+
26
+ // Vectorized multiply implementation with bounds checking
27
+ namespace {
28
+ constexpr size_t VECTOR_SIZE = 4;
29
+ }
30
+
31
+ #if defined(_M_X64) || defined(_M_IX86) || defined(__SSE__)
32
+ #include <xmmintrin.h>
33
+
34
+ void mul4_vectorized(float* ptr) {
35
+ if (ptr == nullptr) return;
36
+
37
+ __m128 f = _mm_loadu_ps(ptr);
38
+ f = _mm_mul_ps(f, f);
39
+ _mm_storeu_ps(ptr, f);
40
+ }
41
+
42
+ #elif defined(_M_ARM64) || defined(__ARM_NEON)
43
+ #include <arm_neon.h>
44
+
45
+ void mul4_vectorized(float* ptr) {
46
+ if (ptr == nullptr) return;
47
+
48
+ float32x4_t f = vld1q_f32(ptr);
49
+ f = vmulq_f32(f, f);
50
+ vst1q_f32(ptr, f);
51
+ }
52
+
53
+ #else
54
+ // Scalar fallback
55
+ void mul4_vectorized(float* ptr) {
56
+ if (ptr == nullptr) return;
57
+
58
+ for (size_t i = 0; i < VECTOR_SIZE; ++i) {
59
+ ptr[i] *= ptr[i];
60
+ }
61
+ }
62
+ #endif
@@ -0,0 +1,75 @@
1
+
2
+
3
+ #ifndef STIM_SCHEDULER_HPP
4
+ #define STIM_SCHEDULER_HPP
5
+
6
+ #if !defined(_CRT_SECURE_NO_WARNINGS)
7
+ #define _CRT_SECURE_NO_WARNINGS (1)
8
+ #endif
9
+
10
+ #include <stdio.h>
11
+
12
+ #if defined(_WIN32)
13
+ #include <windows.h>
14
+
15
+ #include <thread>
16
+
17
+ VOID CALLBACK TimerRoutine(PVOID lpParam, BOOLEAN TimerOrWaitFired);
18
+
19
+ struct Scheduler {
20
+ Scheduler() = default;
21
+ ~Scheduler() = default;
22
+
23
+ bool start_thread() noexcept {
24
+ if (timer_thread.joinable()) {
25
+ return false;
26
+ }
27
+
28
+ m_TimerQueue = CreateTimerQueue();
29
+ if (NULL == m_TimerQueue) {
30
+ printf("CreateTimerQueue failed (%d)\n", GetLastError());
31
+ return false;
32
+ }
33
+
34
+ // Set a timer to call the timer routine in 10 seconds.
35
+ if (!CreateTimerQueueTimer(&m_timer, m_TimerQueue, (WAITORTIMERCALLBACK)TimerRoutine, nullptr, 1, 500, 0)) {
36
+ printf("CreateTimerQueueTimer failed (%d)\n", GetLastError());
37
+ return false;
38
+ }
39
+
40
+ stop_timer_thread_flag = false;
41
+ timer_thread = std::jthread([this]() {
42
+ while (!stop_timer_thread_flag) {
43
+ printf("ENTER SLEEP loop!!!\n");
44
+ SleepEx(INFINITE, TRUE);
45
+ stop_timer_thread_flag = TRUE;
46
+ }
47
+ });
48
+ return true;
49
+ }
50
+
51
+ bool stop_thread() noexcept {
52
+ if (!timer_thread.joinable()) {
53
+ return false;
54
+ }
55
+ stop_timer_thread_flag = true;
56
+ // my_queue.put(std::nullopt);
57
+ timer_thread.join();
58
+ return true;
59
+ }
60
+
61
+ std::jthread timer_thread{};
62
+ bool stop_timer_thread_flag{};
63
+ HANDLE m_timer{};
64
+ HANDLE m_TimerQueue;
65
+ };
66
+ #else
67
+
68
+ struct Scheduler {
69
+ Scheduler() = default;
70
+ ~Scheduler() = default;
71
+ };
72
+
73
+ #endif
74
+
75
+ #endif // STIM_SCHEDULER_HPP
Binary file
Binary file
Binary file
Binary file
Binary file
@@ -0,0 +1,13 @@
1
+
2
+ #if defined(_MSC_VER)
3
+ #pragma comment(lib, "Winmm.lib")
4
+ #pragma comment(lib, "Avrt.lib")
5
+ #endif
6
+
7
+ #include "stim.hpp"
8
+
9
+ void make_dto() {
10
+ }
11
+
12
+ void init() {
13
+ }