pyxcp 0.25.5__cp312-cp312-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (153) hide show
  1. pyxcp/__init__.py +20 -0
  2. pyxcp/aml/EtasCANMonitoring.a2l +82 -0
  3. pyxcp/aml/EtasCANMonitoring.aml +67 -0
  4. pyxcp/aml/XCP_Common.aml +408 -0
  5. pyxcp/aml/XCPonCAN.aml +78 -0
  6. pyxcp/aml/XCPonEth.aml +33 -0
  7. pyxcp/aml/XCPonFlx.aml +113 -0
  8. pyxcp/aml/XCPonSxI.aml +66 -0
  9. pyxcp/aml/XCPonUSB.aml +106 -0
  10. pyxcp/aml/ifdata_CAN.a2l +20 -0
  11. pyxcp/aml/ifdata_Eth.a2l +11 -0
  12. pyxcp/aml/ifdata_Flx.a2l +94 -0
  13. pyxcp/aml/ifdata_SxI.a2l +13 -0
  14. pyxcp/aml/ifdata_USB.a2l +81 -0
  15. pyxcp/asam/__init__.py +0 -0
  16. pyxcp/asam/types.py +131 -0
  17. pyxcp/asamkeydll +0 -0
  18. pyxcp/asamkeydll.c +116 -0
  19. pyxcp/asamkeydll.sh +2 -0
  20. pyxcp/checksum.py +732 -0
  21. pyxcp/cmdline.py +71 -0
  22. pyxcp/config/__init__.py +1257 -0
  23. pyxcp/config/legacy.py +120 -0
  24. pyxcp/constants.py +47 -0
  25. pyxcp/cpp_ext/__init__.py +0 -0
  26. pyxcp/cpp_ext/aligned_buffer.hpp +168 -0
  27. pyxcp/cpp_ext/bin.hpp +105 -0
  28. pyxcp/cpp_ext/blockmem.hpp +58 -0
  29. pyxcp/cpp_ext/cpp_ext.cpython-310-darwin.so +0 -0
  30. pyxcp/cpp_ext/cpp_ext.cpython-311-darwin.so +0 -0
  31. pyxcp/cpp_ext/cpp_ext.cpython-312-darwin.so +0 -0
  32. pyxcp/cpp_ext/daqlist.hpp +374 -0
  33. pyxcp/cpp_ext/event.hpp +67 -0
  34. pyxcp/cpp_ext/extension_wrapper.cpp +208 -0
  35. pyxcp/cpp_ext/framing.hpp +360 -0
  36. pyxcp/cpp_ext/helper.hpp +280 -0
  37. pyxcp/cpp_ext/mcobject.hpp +248 -0
  38. pyxcp/cpp_ext/sxi_framing.hpp +332 -0
  39. pyxcp/cpp_ext/tsqueue.hpp +46 -0
  40. pyxcp/daq_stim/__init__.py +291 -0
  41. pyxcp/daq_stim/optimize/__init__.py +67 -0
  42. pyxcp/daq_stim/optimize/binpacking.py +41 -0
  43. pyxcp/daq_stim/scheduler.cpp +62 -0
  44. pyxcp/daq_stim/scheduler.hpp +75 -0
  45. pyxcp/daq_stim/stim.cpp +13 -0
  46. pyxcp/daq_stim/stim.cpython-310-darwin.so +0 -0
  47. pyxcp/daq_stim/stim.cpython-311-darwin.so +0 -0
  48. pyxcp/daq_stim/stim.cpython-312-darwin.so +0 -0
  49. pyxcp/daq_stim/stim.hpp +604 -0
  50. pyxcp/daq_stim/stim_wrapper.cpp +50 -0
  51. pyxcp/dllif.py +100 -0
  52. pyxcp/errormatrix.py +878 -0
  53. pyxcp/examples/conf_can.toml +19 -0
  54. pyxcp/examples/conf_can_user.toml +16 -0
  55. pyxcp/examples/conf_can_vector.json +11 -0
  56. pyxcp/examples/conf_can_vector.toml +11 -0
  57. pyxcp/examples/conf_eth.toml +9 -0
  58. pyxcp/examples/conf_nixnet.json +20 -0
  59. pyxcp/examples/conf_socket_can.toml +12 -0
  60. pyxcp/examples/run_daq.py +165 -0
  61. pyxcp/examples/xcp_policy.py +60 -0
  62. pyxcp/examples/xcp_read_benchmark.py +38 -0
  63. pyxcp/examples/xcp_skel.py +48 -0
  64. pyxcp/examples/xcp_unlock.py +36 -0
  65. pyxcp/examples/xcp_user_supplied_driver.py +43 -0
  66. pyxcp/examples/xcphello.py +65 -0
  67. pyxcp/examples/xcphello_recorder.py +107 -0
  68. pyxcp/master/__init__.py +10 -0
  69. pyxcp/master/errorhandler.py +677 -0
  70. pyxcp/master/master.py +2641 -0
  71. pyxcp/py.typed +0 -0
  72. pyxcp/recorder/.idea/.gitignore +8 -0
  73. pyxcp/recorder/.idea/misc.xml +4 -0
  74. pyxcp/recorder/.idea/modules.xml +8 -0
  75. pyxcp/recorder/.idea/recorder.iml +6 -0
  76. pyxcp/recorder/.idea/sonarlint/issuestore/3/8/3808afc69ac1edb9d760000a2f137335b1b99728 +7 -0
  77. pyxcp/recorder/.idea/sonarlint/issuestore/9/a/9a2aa4db38d3115ed60da621e012c0efc0172aae +0 -0
  78. pyxcp/recorder/.idea/sonarlint/issuestore/b/4/b49006702b459496a8e8c94ebe60947108361b91 +0 -0
  79. pyxcp/recorder/.idea/sonarlint/issuestore/index.pb +7 -0
  80. pyxcp/recorder/.idea/sonarlint/securityhotspotstore/3/8/3808afc69ac1edb9d760000a2f137335b1b99728 +0 -0
  81. pyxcp/recorder/.idea/sonarlint/securityhotspotstore/9/a/9a2aa4db38d3115ed60da621e012c0efc0172aae +0 -0
  82. pyxcp/recorder/.idea/sonarlint/securityhotspotstore/b/4/b49006702b459496a8e8c94ebe60947108361b91 +0 -0
  83. pyxcp/recorder/.idea/sonarlint/securityhotspotstore/index.pb +7 -0
  84. pyxcp/recorder/.idea/vcs.xml +10 -0
  85. pyxcp/recorder/__init__.py +96 -0
  86. pyxcp/recorder/build_clang.cmd +1 -0
  87. pyxcp/recorder/build_clang.sh +2 -0
  88. pyxcp/recorder/build_gcc.cmd +1 -0
  89. pyxcp/recorder/build_gcc.sh +2 -0
  90. pyxcp/recorder/build_gcc_arm.sh +2 -0
  91. pyxcp/recorder/converter/__init__.py +444 -0
  92. pyxcp/recorder/lz4.c +2829 -0
  93. pyxcp/recorder/lz4.h +879 -0
  94. pyxcp/recorder/lz4hc.c +2041 -0
  95. pyxcp/recorder/lz4hc.h +413 -0
  96. pyxcp/recorder/mio.hpp +1714 -0
  97. pyxcp/recorder/reader.hpp +138 -0
  98. pyxcp/recorder/reco.py +278 -0
  99. pyxcp/recorder/recorder.rst +0 -0
  100. pyxcp/recorder/rekorder.cpp +59 -0
  101. pyxcp/recorder/rekorder.cpython-310-darwin.so +0 -0
  102. pyxcp/recorder/rekorder.cpython-311-darwin.so +0 -0
  103. pyxcp/recorder/rekorder.cpython-312-darwin.so +0 -0
  104. pyxcp/recorder/rekorder.hpp +274 -0
  105. pyxcp/recorder/setup.py +41 -0
  106. pyxcp/recorder/test_reko.py +34 -0
  107. pyxcp/recorder/unfolder.hpp +1354 -0
  108. pyxcp/recorder/wrap.cpp +184 -0
  109. pyxcp/recorder/writer.hpp +302 -0
  110. pyxcp/scripts/__init__.py +0 -0
  111. pyxcp/scripts/pyxcp_probe_can_drivers.py +20 -0
  112. pyxcp/scripts/xcp_examples.py +64 -0
  113. pyxcp/scripts/xcp_fetch_a2l.py +40 -0
  114. pyxcp/scripts/xcp_id_scanner.py +18 -0
  115. pyxcp/scripts/xcp_info.py +159 -0
  116. pyxcp/scripts/xcp_profile.py +26 -0
  117. pyxcp/scripts/xmraw_converter.py +31 -0
  118. pyxcp/stim/__init__.py +0 -0
  119. pyxcp/tests/test_asam_types.py +24 -0
  120. pyxcp/tests/test_binpacking.py +186 -0
  121. pyxcp/tests/test_can.py +1324 -0
  122. pyxcp/tests/test_checksum.py +95 -0
  123. pyxcp/tests/test_daq.py +193 -0
  124. pyxcp/tests/test_daq_opt.py +426 -0
  125. pyxcp/tests/test_frame_padding.py +156 -0
  126. pyxcp/tests/test_framing.py +262 -0
  127. pyxcp/tests/test_master.py +2116 -0
  128. pyxcp/tests/test_transport.py +177 -0
  129. pyxcp/tests/test_utils.py +30 -0
  130. pyxcp/timing.py +60 -0
  131. pyxcp/transport/__init__.py +13 -0
  132. pyxcp/transport/base.py +484 -0
  133. pyxcp/transport/base_transport.hpp +0 -0
  134. pyxcp/transport/can.py +660 -0
  135. pyxcp/transport/eth.py +254 -0
  136. pyxcp/transport/hdf5_policy.py +167 -0
  137. pyxcp/transport/sxi.py +209 -0
  138. pyxcp/transport/transport_ext.cpython-310-darwin.so +0 -0
  139. pyxcp/transport/transport_ext.cpython-311-darwin.so +0 -0
  140. pyxcp/transport/transport_ext.cpython-312-darwin.so +0 -0
  141. pyxcp/transport/transport_ext.hpp +214 -0
  142. pyxcp/transport/transport_wrapper.cpp +249 -0
  143. pyxcp/transport/usb_transport.py +229 -0
  144. pyxcp/types.py +987 -0
  145. pyxcp/utils/__init__.py +127 -0
  146. pyxcp/utils/cli.py +78 -0
  147. pyxcp/vector/__init__.py +0 -0
  148. pyxcp/vector/map.py +82 -0
  149. pyxcp-0.25.5.dist-info/METADATA +341 -0
  150. pyxcp-0.25.5.dist-info/RECORD +153 -0
  151. pyxcp-0.25.5.dist-info/WHEEL +6 -0
  152. pyxcp-0.25.5.dist-info/entry_points.txt +9 -0
  153. pyxcp-0.25.5.dist-info/licenses/LICENSE +165 -0
@@ -0,0 +1,291 @@
1
+ #!/usr/bin/env python
2
+
3
+ from contextlib import suppress
4
+ from time import time_ns
5
+ from typing import Dict, List, Optional, TextIO, Union
6
+
7
+ from pyxcp.cpp_ext.cpp_ext import DaqList, PredefinedDaqList
8
+
9
+ from pyxcp import types
10
+ from pyxcp.config import get_application
11
+ from pyxcp.daq_stim.optimize import make_continuous_blocks
12
+ from pyxcp.daq_stim.optimize.binpacking import first_fit_decreasing
13
+ from pyxcp.recorder import DaqOnlinePolicy as _DaqOnlinePolicy
14
+ from pyxcp.recorder import DaqRecorderPolicy as _DaqRecorderPolicy
15
+ from pyxcp.recorder import MeasurementParameters
16
+ from pyxcp.utils import CurrentDatetime
17
+
18
+ DAQ_ID_FIELD_SIZE = {
19
+ "IDF_ABS_ODT_NUMBER": 1,
20
+ "IDF_REL_ODT_NUMBER_ABS_DAQ_LIST_NUMBER_BYTE": 2,
21
+ "IDF_REL_ODT_NUMBER_ABS_DAQ_LIST_NUMBER_WORD": 3,
22
+ "IDF_REL_ODT_NUMBER_ABS_DAQ_LIST_NUMBER_WORD_ALIGNED": 4,
23
+ }
24
+
25
+ DAQ_TIMESTAMP_SIZE = {
26
+ "S1": 1,
27
+ "S2": 2,
28
+ "S4": 4,
29
+ }
30
+
31
+
32
+ class DaqProcessor:
33
+ def __init__(self, daq_lists: List[Union[DaqList, PredefinedDaqList]]):
34
+ self.daq_lists = daq_lists
35
+ self.is_predefined = [isinstance(d, PredefinedDaqList) for d in daq_lists]
36
+ self.log = get_application().log
37
+ # Flag indicating a fatal OS-level error occurred during DAQ (e.g., disk full, out-of-memory)
38
+ self._fatal_os_error: bool = False
39
+
40
+ def setup(self, start_datetime: Optional[CurrentDatetime] = None, write_multiple: bool = True):
41
+ if not self.xcp_master.slaveProperties.supportsDaq:
42
+ raise RuntimeError("DAQ functionality is not supported.")
43
+ self.daq_info = self.xcp_master.getDaqInfo(include_event_lists=False)
44
+ if start_datetime is None:
45
+ start_datetime = CurrentDatetime(time_ns())
46
+ self.start_datetime = start_datetime
47
+ try:
48
+ processor = self.daq_info.get("processor")
49
+ properties = processor.get("properties")
50
+ resolution = self.daq_info.get("resolution")
51
+ if properties["configType"] == "STATIC" and not all(self.is_predefined):
52
+ raise TypeError(
53
+ "DAQ configuration is static, but in your configuration are only dynamic DAQ lists -- cannot proceed."
54
+ )
55
+ self.supports_timestampes = properties["timestampSupported"]
56
+ self.supports_prescaler = properties["prescalerSupported"]
57
+ self.supports_pid_off = properties["pidOffSupported"]
58
+ if self.supports_timestampes:
59
+ mode = resolution.get("timestampMode")
60
+ self.ts_fixed = mode.get("fixed")
61
+ self.ts_size = DAQ_TIMESTAMP_SIZE[mode.get("size")]
62
+ ts_factor = types.DAQ_TIMESTAMP_UNIT_TO_NS[mode.get("unit")]
63
+ ts_ticks = resolution.get("timestampTicks")
64
+ self.ts_scale_factor = ts_factor * ts_ticks
65
+ else:
66
+ self.ts_size = 0
67
+ self.ts_fixed = False
68
+ self.ts_scale_factor = 0.0
69
+ key_byte = processor.get("keyByte")
70
+ header_len = DAQ_ID_FIELD_SIZE[key_byte["identificationField"]]
71
+ max_dto = self.xcp_master.slaveProperties.maxDto
72
+ self.min_daq = processor.get("minDaq")
73
+ max_odt_entry_size = resolution.get("maxOdtEntrySizeDaq")
74
+ max_payload_size = min(max_odt_entry_size, max_dto - header_len)
75
+ # First ODT may contain timestamp.
76
+ self.selectable_timestamps = False
77
+ max_payload_size_first = max_payload_size
78
+ if not self.supports_timestampes:
79
+ self.log.info("No timestamp support")
80
+ else:
81
+ if self.ts_fixed:
82
+ self.log.debug("Fixed timestamps")
83
+ max_payload_size_first = max_payload_size - self.ts_size
84
+ else:
85
+ self.log.debug("Variable timestamps.")
86
+ self.selectable_timestamps = True
87
+ except Exception as e:
88
+ raise TypeError(f"DAQ_INFO corrupted: {e}") from e
89
+
90
+ # DAQ optimization.
91
+ # For dynamic DaqList instances, compute physical layout; skip for PredefinedDaqList.
92
+ for idx, daq_list in enumerate(self.daq_lists):
93
+ if isinstance(daq_list, PredefinedDaqList):
94
+ continue
95
+ if self.selectable_timestamps:
96
+ if daq_list.enable_timestamps:
97
+ max_payload_size_first = max_payload_size - self.ts_size
98
+ else:
99
+ max_payload_size_first = max_payload_size
100
+ ttt = make_continuous_blocks(daq_list.measurements, max_payload_size, max_payload_size_first)
101
+ daq_list.measurements_opt = first_fit_decreasing(ttt, max_payload_size, max_payload_size_first)
102
+ byte_order = 0 if self.xcp_master.slaveProperties.byteOrder == "INTEL" else 1
103
+ self._first_pids = []
104
+ daq_count = len(self.daq_lists)
105
+
106
+ # Decide whether DAQ allocation must be performed.
107
+ config_static = self.daq_info.get("processor", {}).get("properties", {}).get("configType") == "STATIC"
108
+
109
+ if not config_static:
110
+ # For dynamic configuration, program only dynamic (non-predefined) DAQ lists.
111
+ self.xcp_master.freeDaq()
112
+ # Allocate the number of DAQ lists required.
113
+ self.xcp_master.allocDaq(daq_count)
114
+ measurement_list = []
115
+ for i, daq_list in enumerate(self.daq_lists, self.min_daq):
116
+ if isinstance(daq_list, PredefinedDaqList):
117
+ # Skip allocation for predefined DAQ lists.
118
+ continue
119
+ measurements = daq_list.measurements_opt
120
+ measurement_list.append((i, measurements))
121
+ odt_count = len(measurements)
122
+ self.xcp_master.allocOdt(i, odt_count)
123
+ # Iterate again over ODT entries -- we need to respect sequencing requirements.
124
+ for i, measurements in measurement_list:
125
+ for j, measurement in enumerate(measurements):
126
+ entry_count = len(measurement.entries)
127
+ self.xcp_master.allocOdtEntry(i, j, entry_count)
128
+ # Write DAQs (only for dynamic lists)
129
+ for i, daq_list in enumerate(self.daq_lists, self.min_daq):
130
+ if isinstance(daq_list, PredefinedDaqList):
131
+ continue
132
+ measurements = daq_list.measurements_opt
133
+ for j, measurement in enumerate(measurements):
134
+ if len(measurement.entries) == 0:
135
+ continue # CAN special case: No room for data in first ODT.
136
+ self.xcp_master.setDaqPtr(i, j, 0)
137
+ for entry in measurement.entries:
138
+ self.xcp_master.writeDaq(0xFF, entry.length, entry.ext, entry.address)
139
+ else:
140
+ # STATIC configuration on the slave: skip allocation and programming; lists/ODTs are predefined.
141
+ pass
142
+
143
+ # arm DAQ lists -- this is technically a function on its own.
144
+ first_daq_list = 0 if config_static else self.min_daq
145
+ for i, daq_list in enumerate(self.daq_lists, first_daq_list):
146
+ mode = 0x00
147
+ if self.supports_timestampes and (self.ts_fixed or (self.selectable_timestamps and daq_list.enable_timestamps)):
148
+ mode = 0x10
149
+ if daq_list.stim:
150
+ mode |= 0x02
151
+ ###
152
+ ## mode |= 0x20
153
+ ###
154
+ self.xcp_master.setDaqListMode(
155
+ daq_list_number=i,
156
+ mode=mode,
157
+ event_channel_number=daq_list.event_num,
158
+ prescaler=daq_list.prescaler,
159
+ priority=daq_list.priority,
160
+ )
161
+ res = self.xcp_master.startStopDaqList(0x02, i)
162
+ self._first_pids.append(res.firstPid)
163
+ self.measurement_params = MeasurementParameters(
164
+ byte_order,
165
+ header_len,
166
+ self.supports_timestampes,
167
+ self.ts_fixed,
168
+ self.supports_prescaler,
169
+ self.selectable_timestamps,
170
+ self.ts_scale_factor,
171
+ self.ts_size,
172
+ self.min_daq,
173
+ self.start_datetime,
174
+ self.daq_lists,
175
+ self._first_pids,
176
+ )
177
+ self.set_parameters(self.measurement_params)
178
+
179
+ def start(self):
180
+ self.xcp_master.startStopSynch(0x01)
181
+
182
+ def stop(self):
183
+ # If a fatal OS error occurred during acquisition, skip sending stop to the slave to avoid
184
+ # cascading timeouts/unrecoverable errors and shut down transport gracefully instead.
185
+ if getattr(self, "_fatal_os_error", False):
186
+ with suppress(Exception):
187
+ self.log.error(
188
+ "DAQ stop skipped due to previous fatal OS error (e.g., disk full or out-of-memory). Closing transport."
189
+ )
190
+ try:
191
+ # Best-effort: stop listener and close transport so threads finish cleanly.
192
+ if hasattr(self.xcp_master, "transport") and self.xcp_master.transport is not None:
193
+ # Signal listeners to stop
194
+ with suppress(Exception):
195
+ if hasattr(self.xcp_master.transport, "closeEvent"):
196
+ self.xcp_master.transport.closeEvent.set()
197
+
198
+ # Close transport connection
199
+ with suppress(Exception):
200
+ self.xcp_master.transport.close()
201
+ finally:
202
+ return
203
+ self.xcp_master.startStopSynch(0x00)
204
+
205
+ def first_pids(self):
206
+ return self._first_pids
207
+
208
+
209
+ class DaqRecorder(DaqProcessor, _DaqRecorderPolicy):
210
+ def __init__(self, daq_lists: List[DaqList], file_name: str, prealloc: int = 200, chunk_size: int = 1):
211
+ DaqProcessor.__init__(self, daq_lists)
212
+ _DaqRecorderPolicy.__init__(self)
213
+ self.file_name = file_name
214
+ self.prealloc = prealloc
215
+ self.chunk_size = chunk_size
216
+
217
+ def initialize(self):
218
+ metadata = self.measurement_params.dumps()
219
+ _DaqRecorderPolicy.create_writer(self, self.file_name, self.prealloc, self.chunk_size, metadata)
220
+ _DaqRecorderPolicy.initialize(self)
221
+
222
+ def finalize(self):
223
+ _DaqRecorderPolicy.finalize(self)
224
+
225
+ def start(self):
226
+ DaqProcessor.start(self)
227
+
228
+
229
+ class DaqOnlinePolicy(DaqProcessor, _DaqOnlinePolicy):
230
+ """Base class for on-line measurements.
231
+ Handles multiple inheritence.
232
+ """
233
+
234
+ def __init__(self, daq_lists: List[DaqList]):
235
+ DaqProcessor.__init__(self, daq_lists)
236
+ _DaqOnlinePolicy.__init__(self)
237
+
238
+ def start(self):
239
+ DaqProcessor.start(self)
240
+
241
+
242
+ class DaqToCsv(DaqOnlinePolicy):
243
+ """Save a measurement as CSV files (one per DAQ-list)."""
244
+
245
+ def initialize(self):
246
+ self.log.debug("DaqCsv::Initialize()")
247
+ self.files: Dict[int, TextIO] = {}
248
+ for num, daq_list in enumerate(self.daq_lists):
249
+ if daq_list.stim:
250
+ continue
251
+ out_file = open(f"{daq_list.name}.csv", "w")
252
+ self.files[num] = out_file
253
+ hdr = ",".join(["timestamp0", "timestamp1"] + [h[0] for h in daq_list.headers])
254
+ out_file.write(f"{hdr}\n")
255
+
256
+ def on_daq_list(self, daq_list: int, timestamp0: int, timestamp1: int, payload: list):
257
+ # Guard against hard OS errors (e.g., disk full) during file writes.
258
+ if getattr(self, "_fatal_os_error", False):
259
+ return
260
+ try:
261
+ self.files[daq_list].write(f"{timestamp0},{timestamp1},{', '.join([str(x) for x in payload])}\n")
262
+ except (OSError, MemoryError) as ex:
263
+ # Mark fatal condition to alter shutdown path and avoid further writes/commands.
264
+ self._fatal_os_error = True
265
+ with suppress(Exception):
266
+ self.log.critical(f"DAQ file write failed: {ex.__class__.__name__}: {ex}. Initiating graceful shutdown.")
267
+
268
+ # Stop listener to prevent more DAQ traffic and avoid thread crashes.
269
+ with suppress(Exception):
270
+ if hasattr(self.xcp_master, "transport") and self.xcp_master.transport is not None:
271
+ if hasattr(self.xcp_master.transport, "closeEvent"):
272
+ self.xcp_master.transport.closeEvent.set()
273
+ # Best-effort: close any opened files to flush buffers and release resources.
274
+ with suppress(Exception):
275
+ for f in getattr(self, "files", {}).values():
276
+ with suppress(Exception):
277
+ f.flush()
278
+ with suppress(Exception):
279
+ f.close()
280
+ # Do not re-raise; allow the system to continue to a controlled shutdown.
281
+ return
282
+
283
+ def finalize(self):
284
+ self.log.debug("DaqCsv::finalize()")
285
+ ##
286
+ ## NOTE: `finalize` is guaranteed to be called, but `Initialize` may fail for reasons.
287
+ ## So if you allocate resources in `Initialize` check if this really happened.
288
+ ##
289
+ if hasattr(self, "files"):
290
+ for f in self.files.values():
291
+ f.close()
@@ -0,0 +1,67 @@
1
+ #!/usr/bin/env python
2
+ """Optimize data-structures like memory sections."""
3
+
4
+ from itertools import groupby
5
+ from operator import attrgetter
6
+ from typing import List
7
+
8
+ from pyxcp.cpp_ext.cpp_ext import McObject
9
+
10
+
11
+ def make_continuous_blocks(chunks: List[McObject], upper_bound=None, upper_bound_initial=None) -> List[McObject]:
12
+ """Try to make continous blocks from a list of small, unordered `chunks`.
13
+
14
+ Parameters
15
+ ----------
16
+ chunks: list of `McObject`
17
+
18
+ Returns
19
+ -------
20
+ sorted list of `McObject`
21
+ """
22
+
23
+ def key_func(x):
24
+ return (x.ext, x.address)
25
+
26
+ values = []
27
+ # 1. Groupy by address.
28
+ for _key, value in groupby(sorted(chunks, key=key_func), key=key_func):
29
+ # 2. Pick the largest one.
30
+ values.append(max(value, key=attrgetter("length")))
31
+ result_sections = []
32
+ last_section = None
33
+ last_ext = None
34
+ first_section = True
35
+ if upper_bound_initial is None:
36
+ upper_bound_initial = upper_bound
37
+ while values:
38
+ section = values.pop(0)
39
+ if (last_section and section.address <= last_section.address + last_section.length) and not (section.ext != last_ext):
40
+ last_end = last_section.address + last_section.length - 1
41
+ current_end = section.address + section.length - 1
42
+ if last_end > section.address:
43
+ pass
44
+ else:
45
+ offset = current_end - last_end
46
+ if upper_bound:
47
+ if first_section:
48
+ upper_bound = upper_bound_initial
49
+ first_section = False
50
+ if last_section.length + offset <= upper_bound:
51
+ last_section.length += offset
52
+ last_section.add_component(section)
53
+ else:
54
+ result_sections.append(
55
+ McObject(name="", address=section.address, ext=section.ext, length=section.length, components=[section])
56
+ )
57
+ else:
58
+ last_section.length += offset
59
+ last_section.add_component(section)
60
+ else:
61
+ # Create a new section.
62
+ result_sections.append(
63
+ McObject(name="", address=section.address, ext=section.ext, length=section.length, components=[section])
64
+ )
65
+ last_section = result_sections[-1]
66
+ last_ext = last_section.ext
67
+ return result_sections
@@ -0,0 +1,41 @@
1
+ #!/usr/bin/env python
2
+ """Bin-packing algorithms."""
3
+
4
+ from typing import List, Optional
5
+
6
+ from pyxcp.cpp_ext.cpp_ext import Bin
7
+
8
+
9
+ def first_fit_decreasing(items, bin_size: int, initial_bin_size: Optional[int] = None) -> List[Bin]:
10
+ """bin-packing with first-fit-decreasing algorithm.
11
+
12
+ Parameters
13
+ ----------
14
+ items: list
15
+ items that need to be stored/allocated.
16
+
17
+ bin_size: int
18
+
19
+ Returns
20
+ -------
21
+ list
22
+ Resulting bins
23
+ """
24
+ if initial_bin_size is None:
25
+ initial_bin_size = bin_size
26
+ # bin_size = max(bin_size, initial_bin_size)
27
+ bins = [Bin(size=initial_bin_size)] # Initial bin
28
+ for item in sorted(items, key=lambda x: x.length, reverse=True):
29
+ if item.length > bin_size:
30
+ raise ValueError(f"Item {item!r} is too large to fit in a {bin_size} byte sized bin.")
31
+ for bin in bins:
32
+ if bin.residual_capacity >= item.length:
33
+ bin.append(item)
34
+ bin.residual_capacity -= item.length
35
+ break
36
+ else:
37
+ new_bin = Bin(size=bin_size)
38
+ bins.append(new_bin)
39
+ new_bin.append(item)
40
+ new_bin.residual_capacity -= item.length
41
+ return bins
@@ -0,0 +1,62 @@
1
+ #include "scheduler.hpp"
2
+
3
+ #if defined(_WIN32)
4
+
5
+ #include <cstdio>
6
+ #include <cstdint>
7
+
8
+ VOID CALLBACK TimerRoutine(PVOID lpParam, BOOLEAN TimerOrWaitFired) {
9
+ if (lpParam == nullptr) {
10
+ std::printf("TimerRoutine lpParam is NULL\n");
11
+ return;
12
+ }
13
+
14
+ const auto* param = static_cast<const int*>(lpParam);
15
+ std::printf("Timer routine called. Parameter is %d.\n", *param);
16
+
17
+ if (TimerOrWaitFired) {
18
+ std::printf("The wait timed out.\n");
19
+ } else {
20
+ std::printf("The wait event was signaled.\n");
21
+ }
22
+ }
23
+
24
+ #endif // _WIN32
25
+
26
+ // Vectorized multiply implementation with bounds checking
27
+ namespace {
28
+ constexpr size_t VECTOR_SIZE = 4;
29
+ }
30
+
31
+ #if defined(_M_X64) || defined(_M_IX86) || defined(__SSE__)
32
+ #include <xmmintrin.h>
33
+
34
+ void mul4_vectorized(float* ptr) {
35
+ if (ptr == nullptr) return;
36
+
37
+ __m128 f = _mm_loadu_ps(ptr);
38
+ f = _mm_mul_ps(f, f);
39
+ _mm_storeu_ps(ptr, f);
40
+ }
41
+
42
+ #elif defined(_M_ARM64) || defined(__ARM_NEON)
43
+ #include <arm_neon.h>
44
+
45
+ void mul4_vectorized(float* ptr) {
46
+ if (ptr == nullptr) return;
47
+
48
+ float32x4_t f = vld1q_f32(ptr);
49
+ f = vmulq_f32(f, f);
50
+ vst1q_f32(ptr, f);
51
+ }
52
+
53
+ #else
54
+ // Scalar fallback
55
+ void mul4_vectorized(float* ptr) {
56
+ if (ptr == nullptr) return;
57
+
58
+ for (size_t i = 0; i < VECTOR_SIZE; ++i) {
59
+ ptr[i] *= ptr[i];
60
+ }
61
+ }
62
+ #endif
@@ -0,0 +1,75 @@
1
+
2
+
3
+ #ifndef STIM_SCHEDULER_HPP
4
+ #define STIM_SCHEDULER_HPP
5
+
6
+ #if !defined(_CRT_SECURE_NO_WARNINGS)
7
+ #define _CRT_SECURE_NO_WARNINGS (1)
8
+ #endif
9
+
10
+ #include <stdio.h>
11
+
12
+ #if defined(_WIN32)
13
+ #include <windows.h>
14
+
15
+ #include <thread>
16
+
17
+ VOID CALLBACK TimerRoutine(PVOID lpParam, BOOLEAN TimerOrWaitFired);
18
+
19
+ struct Scheduler {
20
+ Scheduler() = default;
21
+ ~Scheduler() = default;
22
+
23
+ bool start_thread() noexcept {
24
+ if (timer_thread.joinable()) {
25
+ return false;
26
+ }
27
+
28
+ m_TimerQueue = CreateTimerQueue();
29
+ if (NULL == m_TimerQueue) {
30
+ printf("CreateTimerQueue failed (%d)\n", GetLastError());
31
+ return false;
32
+ }
33
+
34
+ // Set a timer to call the timer routine in 10 seconds.
35
+ if (!CreateTimerQueueTimer(&m_timer, m_TimerQueue, (WAITORTIMERCALLBACK)TimerRoutine, nullptr, 1, 500, 0)) {
36
+ printf("CreateTimerQueueTimer failed (%d)\n", GetLastError());
37
+ return false;
38
+ }
39
+
40
+ stop_timer_thread_flag = false;
41
+ timer_thread = std::jthread([this]() {
42
+ while (!stop_timer_thread_flag) {
43
+ printf("ENTER SLEEP loop!!!\n");
44
+ SleepEx(INFINITE, TRUE);
45
+ stop_timer_thread_flag = TRUE;
46
+ }
47
+ });
48
+ return true;
49
+ }
50
+
51
+ bool stop_thread() noexcept {
52
+ if (!timer_thread.joinable()) {
53
+ return false;
54
+ }
55
+ stop_timer_thread_flag = true;
56
+ // my_queue.put(std::nullopt);
57
+ timer_thread.join();
58
+ return true;
59
+ }
60
+
61
+ std::jthread timer_thread{};
62
+ bool stop_timer_thread_flag{};
63
+ HANDLE m_timer{};
64
+ HANDLE m_TimerQueue;
65
+ };
66
+ #else
67
+
68
+ struct Scheduler {
69
+ Scheduler() = default;
70
+ ~Scheduler() = default;
71
+ };
72
+
73
+ #endif
74
+
75
+ #endif // STIM_SCHEDULER_HPP
@@ -0,0 +1,13 @@
1
+
2
+ #if defined(_MSC_VER)
3
+ #pragma comment(lib, "Winmm.lib")
4
+ #pragma comment(lib, "Avrt.lib")
5
+ #endif
6
+
7
+ #include "stim.hpp"
8
+
9
+ void make_dto() {
10
+ }
11
+
12
+ void init() {
13
+ }