turbine-lib 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
turbine_lib/datfile.py ADDED
@@ -0,0 +1,338 @@
1
+ import logging
2
+
3
+ from binarydata import BinaryData
4
+ from database import Database
5
+ from datexportapi import DatExportApi
6
+ from ddssubfile import DDSSubfile
7
+ from fontsubfile import SubfileFONT
8
+ from subfile import FILE_TYPE, file_type_from_file_contents, string_from_file_type
9
+ from subfiledata import SubfileData
10
+ from textsubfile import TextSubfile
11
+
12
+
13
+ class SubfileInfo:
14
+
15
+ def __init__(self):
16
+ self.file_id = -1
17
+ self.size = -1
18
+ self.iteration = -1
19
+
20
+ def __lt__(self, other):
21
+ return self.file_id < other.file_id
22
+
23
+
24
+ class DatFile:
25
+ # Class variable - shared by all instances
26
+ api_ = DatExportApi()
27
+
28
+ def __init__(self, file_handle):
29
+ self.file_handle_ = file_handle
30
+ self.initialized_ = False
31
+ self.files_info_ = {} # Dictionary mapping file_id to SubfileInfo
32
+ self.filename_ = ""
33
+ self.export_data_buf_ = BinaryData(bytearray(64 * 1024 * 1024)) # 64 MB - max file size
34
+
35
+ def __del__(self):
36
+ self.deinit()
37
+
38
+ def init(self, filename):
39
+ if self.initialized_:
40
+ self.deinit()
41
+
42
+ if self.api_.open_dat_file(self.file_handle_, filename, 130) == self.file_handle_:
43
+ self.initialized_ = True
44
+ self.filename_ = filename
45
+ self.load_all_files_info()
46
+ return True
47
+
48
+ return False
49
+
50
+ def load_all_files_info(self):
51
+ subfiles_num = self.api_.get_num_subfiles(self.file_handle_)
52
+ file_ids = [0] * subfiles_num
53
+ sizes = [0] * subfiles_num
54
+ iterations = [0] * subfiles_num
55
+
56
+ # Initialize lists with dummy values
57
+ for i in range(subfiles_num):
58
+ file_ids[i] = 0
59
+ sizes[i] = 0
60
+ iterations[i] = 0
61
+
62
+ if subfiles_num > 0:
63
+ self.api_.get_subfile_sizes(self.file_handle_, file_ids, sizes, iterations, 0, subfiles_num)
64
+
65
+ for i in range(subfiles_num):
66
+ file_info = SubfileInfo()
67
+ file_info.file_id = file_ids[i]
68
+ file_info.size = sizes[i]
69
+ file_info.iteration = iterations[i]
70
+ self.files_info_[file_info.file_id] = file_info
71
+
72
+ def deinit(self):
73
+ if self.initialized_:
74
+ self.api_.close_dat_file(self.file_handle_)
75
+ self.files_info_.clear()
76
+ self.initialized_ = False
77
+
78
+ def initialized(self):
79
+ return self.initialized_
80
+
81
+ def get_filename(self):
82
+ return self.filename_
83
+
84
+ def get_subfile_info(self, file_id):
85
+ if file_id in self.files_info_:
86
+ return self.files_info_[file_id]
87
+ else:
88
+ return SubfileInfo()
89
+
90
+ def get_files_num_in_dat_file(self):
91
+ return self.api_.get_num_subfiles(self.file_handle_)
92
+
93
+ def patch_all_files_from_database(self, db):
94
+ patched_files_num = 0
95
+
96
+ file = db.get_next_file()
97
+ i = 0
98
+ total_files = db.count_rows()
99
+ logging.info("Patching all files from database...")
100
+
101
+ while not file.empty():
102
+ if i > 0 and total_files > 0 and i * 100 // total_files != (i - 1) * 100 // total_files:
103
+ logging.info(f"Completed {i * 100 // total_files}%")
104
+ print(f"Completed {i * 100 // total_files}%")
105
+ # gl.progress_wnd.UpdateStageText(f"正在安装第{i}条翻译到dat文件...")
106
+ # gl.progress_wnd.UpdatePercent(int(i * 100 / total_files))
107
+ i += 1
108
+
109
+ if not file.options or "fid" not in file.options:
110
+ logging.info(f"Incorrect db entry - no file_id specified")
111
+ file = db.get_next_file()
112
+ continue
113
+
114
+ self.patch_file(file)
115
+ patched_files_num += 1
116
+ file = db.get_next_file()
117
+
118
+ return patched_files_num
119
+
120
+ def patch_file(self, file_data=None, file_id=None, file_type=None, path_to_file=None, version=-1, iteration=-1):
121
+ if file_data is not None:
122
+ # Patch with SubfileData
123
+ if not file_data.options or "fid" not in file_data.options:
124
+ print("Trying to patch file, but file id is not specified, skipping!")
125
+ return
126
+
127
+ file_id_inner = int(file_data.options["fid"])
128
+
129
+ if file_id_inner not in self.files_info_:
130
+ print(f"Trying to patch file, not existing in files_info. File id = {file_id_inner}")
131
+ return
132
+
133
+ file_info = self.files_info_[file_id_inner]
134
+
135
+ existing_file_version = 0 # will be evaluated with api_.GetSubfileData
136
+ size, existing_file_version = self.api_.get_subfile_data(
137
+ self.file_handle_, file_id_inner, self.export_data_buf_.data(), existing_file_version)
138
+
139
+ if size <= 0:
140
+ print(f"Trying to patch file, not existing in .dat file. File id = {file_id_inner}")
141
+ return
142
+
143
+ old_data = self.export_data_buf_.cut_data(0, size)
144
+
145
+ # Check file size is reasonable
146
+ if size > 64 * 1024 * 1024: # Exceeds buffer size
147
+ print(f"File size too large for buffer. File id = {file_id_inner}, Size = {size}")
148
+ return
149
+
150
+ # Build file for import
151
+ try:
152
+ file_binary = self.build_for_import(old_data, file_data)
153
+ except Exception as e:
154
+ print(f"Exception in build_for_import: {e} file_id {file_id_inner}")
155
+ raise e
156
+ return
157
+
158
+ if version == -1:
159
+ version = existing_file_version
160
+
161
+ if iteration == -1:
162
+ iteration = file_info.iteration
163
+
164
+ # Convert BinaryData to bytearray for ctypes
165
+ file_bytes = file_binary.data()
166
+ self.api_.put_subfile_data(
167
+ self.file_handle_, file_id_inner, file_bytes, 0, len(file_bytes), version, iteration, False)
168
+
169
+ elif file_id is not None and file_type is not None and path_to_file is not None:
170
+ # Patch with file path
171
+ new_data = BinaryData(bytearray(64 * 1024 * 1024))
172
+
173
+ try:
174
+ with open(path_to_file, 'rb') as f:
175
+ file_content = f.read()
176
+ data_size = len(file_content)
177
+ new_data.data_[:data_size] = file_content
178
+ except Exception as e:
179
+ print(f"Error reading file {path_to_file}: {e}")
180
+ return
181
+
182
+ imported_subfile = SubfileData()
183
+ imported_subfile.binary_data = new_data.cut_data(0, data_size)
184
+ imported_subfile.options = {"ext": string_from_file_type(file_type), "fid": file_id}
185
+
186
+ self.patch_file(imported_subfile, version=version, iteration=iteration)
187
+
188
+ def get_existing_file_type(self, file_id):
189
+ version = 0
190
+ size, version = self.api_.get_subfile_data(
191
+ self.file_handle_, file_id, self.export_data_buf_.data(), version)
192
+ return file_type_from_file_contents(file_id, self.export_data_buf_)
193
+
194
+ def perform_operation_on_all_subfiles(self, operation):
195
+ if not self.files_info_:
196
+ self.load_all_files_info()
197
+
198
+ print("Performing operation on all files...")
199
+ i = 0
200
+ for file_id, info in self.files_info_.items():
201
+ if i > 0 and len(self.files_info_) > 0 and i * 100 // len(self.files_info_) != (i - 1) * 100 // len(
202
+ self.files_info_):
203
+ print(f"Completed {i * 100 // len(self.files_info_)}%")
204
+ operation(info)
205
+ i += 1
206
+
207
+ def export_files_by_type(self, file_type, db_or_path):
208
+ num_files = 0
209
+
210
+ if isinstance(db_or_path, Database):
211
+ # Export to database
212
+ def operation(info):
213
+ nonlocal num_files
214
+ file_type_existing = self.get_existing_file_type(info.file_id)
215
+ if file_type_existing == file_type:
216
+ self.export_file_by_id(info.file_id, db_or_path)
217
+ num_files += 1
218
+
219
+ self.perform_operation_on_all_subfiles(operation)
220
+ return num_files
221
+ else:
222
+ # Export to directory
223
+ path_to_directory = db_or_path
224
+
225
+ def operation(info):
226
+ nonlocal num_files
227
+ file_type_existing = self.get_existing_file_type(info.file_id)
228
+ if file_type_existing == file_type:
229
+ target_path = f"{path_to_directory}/{info.file_id}"
230
+ self.export_file_by_id(info.file_id, target_path)
231
+ num_files += 1
232
+
233
+ self.perform_operation_on_all_subfiles(operation)
234
+ return num_files
235
+
236
+ def export_file_by_id(self, file_id, target):
237
+ if file_id <= 0:
238
+ print("Invalid file ID!")
239
+ return
240
+ version = 0
241
+ size, version = self.api_.get_subfile_data(
242
+ self.file_handle_, file_id, self.export_data_buf_.data(), version)
243
+
244
+ data = self.export_data_buf_.cut_data(0, size)
245
+ file_result = self.build_for_export(file_id, data)
246
+
247
+ if isinstance(target, Database):
248
+ # Export to database
249
+ target.push_file(file_result)
250
+ else:
251
+ # Export to file path
252
+ ext = string_from_file_type(self.get_existing_file_type(file_id))
253
+ target_file_path = f'{target}{ext}'
254
+ # target_file_path = f'{target}.dds'
255
+ try:
256
+ with open(target_file_path, 'wb') as f:
257
+ if ext == ".txt":
258
+ f.write(file_result.text_data.encode("utf-8"))
259
+ else:
260
+ f.write(file_result.binary_data.data())
261
+ except Exception as e:
262
+ print(f"Error writing file {target_file_path}: {e}")
263
+
264
+ def get_file_version(self, file_id):
265
+ return self.api_.get_subfile_version(self.file_handle_, file_id)
266
+
267
+ def get_file(self, file_id):
268
+ version = 0
269
+ size, version = self.api_.get_subfile_data(
270
+ self.file_handle_, file_id, self.export_data_buf_.data(), version)
271
+ data = self.export_data_buf_.cut_data(0, size)
272
+ return self.build_for_export(file_id, data)
273
+
274
+ def build_for_import(self, old_data, outer_data):
275
+ if not outer_data.options or "ext" not in outer_data.options:
276
+ print(f"No extension established for file with id {outer_data.options.get('fid', 'unknown')}")
277
+ return BinaryData()
278
+
279
+ # In Python, we'll directly call the appropriate subfile class
280
+ ext = outer_data.options["ext"]
281
+ if ext == ".txt":
282
+ return TextSubfile.build_for_import(old_data, outer_data)
283
+ elif ext == ".dds":
284
+ for_import = DDSSubfile.build_for_import(old_data, outer_data)
285
+ return for_import
286
+ elif ext == ".fontbin":
287
+ for_import = SubfileFONT.build_for_import(old_data, outer_data)
288
+ return for_import
289
+ # Add other file types as needed
290
+ else:
291
+ # Default implementation for unknown types
292
+ return old_data # Just return the original data
293
+
294
+ def compress(self, buffer):
295
+ # Handle compression (reverse of decompression)
296
+ decompressed_length = len(buffer)
297
+
298
+ # Create buffer for compressed data (with 4 bytes extra for length header)
299
+ compressed_buffer = bytearray(decompressed_length + 1000) # Extra space for compression
300
+ compressed_length = decompressed_length + 1000 # Using list to pass by reference
301
+
302
+ # Compress the data
303
+ compressed_length = self.api_.compress2(compressed_buffer, compressed_length, buffer.data(),
304
+ decompressed_length, 9)
305
+
306
+ # Create final buffer with length header
307
+ final_buffer = bytearray(4 + compressed_length)
308
+
309
+ # Write decompressed length as header (little endian)
310
+ final_buffer[0] = decompressed_length & 0xFF
311
+ final_buffer[1] = (decompressed_length >> 8) & 0xFF
312
+ final_buffer[2] = (decompressed_length >> 16) & 0xFF
313
+ final_buffer[3] = (decompressed_length >> 24) & 0xFF
314
+
315
+ # Copy compressed data
316
+ for i in range(compressed_length):
317
+ final_buffer[4 + i] = compressed_buffer[i]
318
+
319
+ # Update buffer
320
+ return BinaryData(final_buffer)
321
+
322
+ def build_for_export(self, file_id, inner_data):
323
+ file_type = file_type_from_file_contents(file_id, inner_data)
324
+ result = SubfileData()
325
+
326
+ if file_type == FILE_TYPE.TEXT:
327
+ result = TextSubfile.build_for_export(inner_data)
328
+ elif file_type == FILE_TYPE.DDS:
329
+ result = DDSSubfile.build_for_export(inner_data)
330
+ elif file_type == FILE_TYPE.FONT:
331
+ result = SubfileFONT.build_for_export(inner_data)
332
+ # Add other file types as needed
333
+ else:
334
+ # Default implementation for unknown types
335
+ result.binary_data = inner_data
336
+
337
+ result.options["fid"] = file_id
338
+ return result
@@ -0,0 +1,305 @@
1
+ from subfile import Subfile, FILE_TYPE
2
+ from binarydata import BinaryData
3
+ from subfiledata import SubfileData
4
+
5
+
6
+ class DDSSubfile(Subfile):
7
+ """Implementation of Subfile for DDS type files"""
8
+
9
+ @staticmethod
10
+ def build_for_export(file_data):
11
+ if file_data.empty() or len(file_data) < 256:
12
+ return SubfileData()
13
+
14
+ dds_data = BinaryData(bytearray(len(file_data) - 24 + 128))
15
+ for i in range(128):
16
+ dds_data[i] = 0
17
+
18
+ # Copy data from offset 24 in file_data to offset 128 in dds_data
19
+ for i in range(len(file_data) - 24):
20
+ dds_data[128 + i] = file_data[24 + i]
21
+
22
+ if len(file_data) >= 20 and file_data[16] == 0x44 and file_data[17] == 0x58 and file_data[18] == 0x54: # dxt
23
+
24
+ if 0x31 == file_data[19]: # dxt1
25
+ dds_data[0] = 0x44 # D
26
+ dds_data[1] = 0x44 # D
27
+ dds_data[2] = 0x53 # S
28
+ dds_data[3] = 0x20 # ''
29
+
30
+ dds_data[4] = 0x7C
31
+
32
+ dds_data[8] = 0x7
33
+ dds_data[9] = 0x10
34
+
35
+ # width, height
36
+ dds_data[12] = file_data[12]
37
+ dds_data[13] = file_data[13]
38
+ dds_data[14] = file_data[14]
39
+ dds_data[15] = file_data[15]
40
+
41
+ dds_data[16] = file_data[8]
42
+ dds_data[17] = file_data[9]
43
+ dds_data[18] = file_data[10]
44
+ dds_data[19] = file_data[11]
45
+
46
+ dds_data[76] = 0x20
47
+ dds_data[80] = 0x4
48
+
49
+ dds_data[84] = 0x44 # 'D'
50
+ dds_data[85] = 0x58 # 'X'
51
+ dds_data[86] = 0x54 # 'T'
52
+ dds_data[87] = 0x31 # '1'
53
+ elif file_data[19] == 0x33: ## dxt3
54
+ dds_data[0] = 0x44 # D
55
+ dds_data[1] = 0x44 # D
56
+ dds_data[2] = 0x53 # S
57
+ dds_data[3] = 0x20 # ''
58
+
59
+ dds_data[4] = 0x7C
60
+
61
+ dds_data[8] = 0x7
62
+ dds_data[9] = 0x10
63
+
64
+ # width, height
65
+ dds_data[12] = file_data[12]
66
+ dds_data[13] = file_data[13]
67
+ dds_data[14] = file_data[14]
68
+ dds_data[15] = file_data[15]
69
+
70
+ dds_data[16] = file_data[8]
71
+ dds_data[17] = file_data[9]
72
+ dds_data[18] = file_data[10]
73
+ dds_data[19] = file_data[11]
74
+
75
+ dds_data[22] = 0x1
76
+
77
+ dds_data[76] = 0x20
78
+ dds_data[80] = 0x4
79
+
80
+ dds_data[84] = 0x44 # 'D'
81
+ dds_data[85] = 0x58 # 'X'
82
+ dds_data[86] = 0x54 # 'T'
83
+ dds_data[87] = 0x33 # '3'
84
+
85
+ dds_data[108] = 0x8
86
+ dds_data[109] = 0x10
87
+ dds_data[110] = 0x40
88
+
89
+ elif file_data[19] == 0x35: # dxt5
90
+ dds_data[0] = 0x44 # ‘D’
91
+ dds_data[1] = 0x44 # ’D‘
92
+ dds_data[2] = 0x53 # ‘S’
93
+ dds_data[3] = 0x20 # ‘’
94
+
95
+ dds_data[4] = 0x7C
96
+
97
+ dds_data[8] = 0x7
98
+ dds_data[9] = 0x10
99
+ dds_data[10] = 0x8
100
+
101
+ # width, height
102
+ dds_data[12] = file_data[12]
103
+ dds_data[13] = file_data[13]
104
+ dds_data[14] = file_data[14]
105
+ dds_data[15] = file_data[15]
106
+
107
+ dds_data[16] = file_data[8]
108
+ dds_data[17] = file_data[9]
109
+ dds_data[18] = file_data[10]
110
+ dds_data[19] = file_data[11]
111
+
112
+ dds_data[22] = 0x1
113
+ dds_data[28] = 0x1
114
+
115
+ dds_data[76] = 0x20
116
+ dds_data[80] = 0x4
117
+
118
+ dds_data[84] = 0x44 # 'D'
119
+ dds_data[85] = 0x58 # ‘X’
120
+ dds_data[86] = 0x54 # ‘T’
121
+ dds_data[87] = 0x35 # ‘5’
122
+
123
+ # dds_data[88] = 0x20
124
+ dds_data[88] = 0x20
125
+ dds_data[94] = 0xFF
126
+ dds_data[97] = 0xFF
127
+ dds_data[100] = 0xFF
128
+ dds_data[107] = 0xFF
129
+ dds_data[113] = 0x10
130
+ else:
131
+ dds_data[0] = 0x44 # D
132
+ dds_data[1] = 0x44 # D
133
+ dds_data[2] = 0x53 # S
134
+ dds_data[3] = 0x20 # ''
135
+
136
+ dds_data[4] = 0x7C
137
+
138
+ dds_data[8] = 0x7
139
+ dds_data[9] = 0x10
140
+
141
+ # width, height
142
+ dds_data[12] = file_data[12]
143
+ dds_data[13] = file_data[13]
144
+ dds_data[14] = file_data[14]
145
+ dds_data[15] = file_data[15]
146
+ dds_data[16] = file_data[8]
147
+ dds_data[17] = file_data[9]
148
+ dds_data[18] = file_data[10]
149
+ dds_data[19] = file_data[11]
150
+
151
+ dds_data[76] = 0x20
152
+ dds_data[80] = 0x40
153
+ # dds_data[88] = 0x18
154
+ dds_data[88] = 0x08
155
+ dds_data[94] = 0xFF
156
+ dds_data[97] = 0xFF
157
+ dds_data[100] = 0xFF
158
+
159
+ # compression = file_data.to_number(4, 0x10)
160
+ #
161
+ # if compression == 20: # 14 00 00 00 - 888 (R8G8B8)
162
+ # dds_data[0x4C] = 0x20 # ?
163
+ # dds_data[0x50] = 0x40 # compressed or not
164
+ #
165
+ # dds_data[0x58] = 0x18 # bytes per pixel
166
+ # dds_data[0x5E] = 0xFF
167
+ # dds_data[0x61] = 0xFF
168
+ # dds_data[0x64] = 0xFF
169
+ # elif compression == 21: # 15 00 00 00 - 8888 (R8G8B8A8)
170
+ # dds_data[0x4C] = 0x20 # ?
171
+ # dds_data[0x50] = 0x40 # compressed or not
172
+ #
173
+ # dds_data[0x58] = 0x20 # bytes per pixel
174
+ # dds_data[0x5E] = 0xFF
175
+ # dds_data[0x61] = 0xFF
176
+ # dds_data[0x64] = 0xFF
177
+ # dds_data[0x6B] = 0xFF
178
+ # elif compression == 28: # 1C 00 00 00 - 332 (?)
179
+ # dds_data[0x4C] = 0x20 # ?
180
+ # dds_data[0x50] = 0x40 # compressed or not
181
+ #
182
+ # dds_data[0x58] = 0x08 # bytes per pixel
183
+ # dds_data[0x5E] = 0xFF
184
+ # dds_data[0x61] = 0xFF
185
+ # dds_data[0x64] = 0xFF
186
+ # elif compression == 827611204: # 44 58 54 31 - DXT1
187
+ # dds_data[76] = 32
188
+ # dds_data[80] = 4
189
+ #
190
+ # dds_data[84] = 68
191
+ # dds_data[85] = 88
192
+ # dds_data[86] = 84
193
+ # dds_data[87] = 49
194
+ # elif compression == 861165636: # 44 58 54 33 - DXT3
195
+ # dds_data[22] = 1
196
+ # dds_data[76] = 32
197
+ # dds_data[80] = 4
198
+ #
199
+ # dds_data[84] = 68
200
+ # dds_data[85] = 88
201
+ # dds_data[86] = 84
202
+ # dds_data[87] = 51
203
+ #
204
+ # dds_data[108] = 8
205
+ # dds_data[109] = 16
206
+ # dds_data[110] = 64
207
+ # elif compression == 894720068: # 44 58 54 35 - DXT5
208
+ # dds_data[10] = 8
209
+ # dds_data[22] = 1
210
+ # dds_data[28] = 1
211
+ # dds_data[76] = 32
212
+ # dds_data[80] = 4
213
+ #
214
+ # dds_data[84] = 68
215
+ # dds_data[85] = 88
216
+ # dds_data[86] = 84
217
+ # dds_data[87] = 53
218
+ #
219
+ # dds_data[88] = 32
220
+ # dds_data[94] = 255
221
+ # dds_data[97] = 255
222
+ # dds_data[100] = 255
223
+ # dds_data[107] = 255
224
+ # dds_data[109] = 16
225
+ # else:
226
+ # print("Unknown header format.")
227
+ # return SubfileData()
228
+
229
+ result = SubfileData()
230
+ result.binary_data = dds_data
231
+ result.options = {"ext": ".dds"}
232
+ return result
233
+
234
+ @staticmethod
235
+ def build_for_import(old_data, data):
236
+ file_size = BinaryData.from_number(4, len(data.binary_data) - 128)
237
+ import_header = BinaryData(bytearray(20))
238
+ for i in range(20):
239
+ import_header[i] = 0
240
+ # file_id
241
+ # import_header[0]= 0x0C
242
+ # import_header[1]= 0x3B
243
+ # import_header[2]= 0x00
244
+ # import_header[3]= 0x41
245
+
246
+ old_data_arr = old_data.data()
247
+ import_header[0] = old_data_arr[0]
248
+ import_header[1] = old_data_arr[1]
249
+ import_header[2] = old_data_arr[2]
250
+ import_header[3] = old_data_arr[3]
251
+
252
+ # import_header[4]= 0x0F
253
+ import_header[4] = old_data_arr[4]
254
+ import_header[5] = old_data_arr[5]
255
+ import_header[6] = old_data_arr[6]
256
+ import_header[7] = old_data_arr[7]
257
+
258
+ # width height
259
+ # import_header[8]= 0x00
260
+ # import_header[9]= 0x01
261
+ # import_header[10]= 0x00
262
+ # import_header[11]= 0x00
263
+ # import_header[12]= 0x44
264
+ # import_header[13]= 0x01
265
+ # import_header[14]= 0x00
266
+ # import_header[15]= 0x00
267
+
268
+ import_header[8] = data.binary_data[16]
269
+ import_header[9] = data.binary_data[17]
270
+ import_header[10] = data.binary_data[18]
271
+ import_header[11] = data.binary_data[19]
272
+ import_header[12] = data.binary_data[12]
273
+ import_header[13] = data.binary_data[13]
274
+ import_header[14] = data.binary_data[14]
275
+ import_header[15] = data.binary_data[15]
276
+
277
+ # import_header[16]= 0x1C
278
+ import_header[16] = old_data_arr[16]
279
+ import_header[17] = old_data_arr[17]
280
+ import_header[18] = old_data_arr[18]
281
+ import_header[19] = old_data_arr[19]
282
+
283
+ # print("new_header:")
284
+ # DDSSubfile.print_hex_bytes(import_header.data())
285
+
286
+ # print("old_header:")
287
+ # DDSSubfile.print_hex_bytes(old_data.data())
288
+
289
+ # return old_data.cut_data(0, 20) + file_size + data.binary_data.cut_data(128)
290
+ return import_header.cut_data(0, 20) + file_size + data.binary_data.cut_data(128)
291
+
292
+ @staticmethod
293
+ def print_hex_bytes(data, count=20):
294
+ """
295
+ 打印bytearray前count字节的十六进制表示
296
+
297
+ Args:
298
+ data: bytearray 或 bytes 对象
299
+ count: 要打印的字节数,默认为20
300
+ """
301
+ # 获取前count个字节
302
+ bytes_to_print = data[:count]
303
+
304
+ # 方法3:使用f-string (Python 3.6+)
305
+ print("十六进制(f-string):", ' '.join(f'{b:02X}' for b in bytes_to_print))