dcmjs 0.49.3 → 0.50.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +50 -0
- package/build/dcmjs.es.js +1071 -112
- package/build/dcmjs.es.js.map +1 -1
- package/build/dcmjs.js +1071 -112
- package/build/dcmjs.js.map +1 -1
- package/build/dcmjs.min.js +2 -2
- package/build/dcmjs.min.js.map +1 -1
- package/generate/dictionary.mjs +56029 -0
- package/package.json +18 -2
- package/.babelrc +0 -9
- package/.github/workflows/lint-and-format.yml +0 -27
- package/.github/workflows/publish-package.yml +0 -45
- package/.github/workflows/tests.yml +0 -24
- package/.prettierrc +0 -5
- package/.vscode/extensions.json +0 -7
- package/.vscode/settings.json +0 -8
- package/changelog.md +0 -31
- package/docs/ArrayBufferExpanderListener.md +0 -303
- package/docs/AsyncDicomReader-skill.md +0 -730
- package/eslint.config.mjs +0 -30
- package/generate-dictionary.js +0 -145
- package/jest.setup.js +0 -39
- package/netlify.toml +0 -22
- package/rollup.config.mjs +0 -57
- package/test/ArrayBufferExpanderListener.test.js +0 -365
- package/test/DICOMWEB.test.js +0 -1
- package/test/DicomMetaDictionary.test.js +0 -73
- package/test/SequenceOfItems.test.js +0 -86
- package/test/adapters.test.js +0 -43
- package/test/anonymizer.test.js +0 -176
- package/test/arrayItem.json +0 -351
- package/test/async-data.test.js +0 -575
- package/test/data-encoding.test.js +0 -59
- package/test/data-options.test.js +0 -199
- package/test/data.test.js +0 -1776
- package/test/derivations.test.js +0 -1
- package/test/helper/DicomDataReadBufferStreamBuilder.js +0 -89
- package/test/information-filter.test.js +0 -165
- package/test/integration/DicomMessage.readFile.test.js +0 -50
- package/test/lossless-read-write.test.js +0 -1407
- package/test/mocks/minimal_fields_dataset.json +0 -17
- package/test/mocks/null_number_vrs_dataset.json +0 -102
- package/test/normalizers.test.js +0 -38
- package/test/odd-frame-bit-data.js +0 -138
- package/test/rawTags.js +0 -170
- package/test/readBufferStream.test.js +0 -158
- package/test/sample-dicom.json +0 -904
- package/test/sample-op.lei +0 -0
- package/test/sample-sr.json +0 -997
- package/test/sr-tid.test.js +0 -251
- package/test/testUtils.js +0 -85
- package/test/utilities/deepEqual.test.js +0 -87
- package/test/utilities.test.js +0 -205
- package/test/video-test-dict.js +0 -40
- package/test/writeBufferStream.test.js +0 -149
package/test/async-data.test.js
DELETED
|
@@ -1,575 +0,0 @@
|
|
|
1
|
-
import fs from "fs";
|
|
2
|
-
import dcmjs from "../src/index.js";
|
|
3
|
-
import {
|
|
4
|
-
TagHex,
|
|
5
|
-
IMPLICIT_LITTLE_ENDIAN,
|
|
6
|
-
UNDEFINED_LENGTH
|
|
7
|
-
} from "../src/constants/dicom";
|
|
8
|
-
import { getTestDataset } from "./testUtils.js";
|
|
9
|
-
import { videoTestMeta, videoTestDict } from "./video-test-dict.js";
|
|
10
|
-
import { oddFrameBitData } from "./odd-frame-bit-data.js";
|
|
11
|
-
|
|
12
|
-
const { DicomDict, DicomMessage } = dcmjs.data;
|
|
13
|
-
const { AsyncDicomReader } = dcmjs.async;
|
|
14
|
-
const { DicomMetadataListener } = dcmjs.utilities;
|
|
15
|
-
|
|
16
|
-
// Ensure DicomMessage is set on DicomDict
|
|
17
|
-
DicomDict.setDicomMessageClass(DicomMessage);
|
|
18
|
-
|
|
19
|
-
describe("AsyncDicomReader", () => {
|
|
20
|
-
test("DICOM part 10 complete listener uncompressed", async () => {
|
|
21
|
-
const buffer = fs.readFileSync("test/sample-dicom.dcm");
|
|
22
|
-
const reader = new AsyncDicomReader();
|
|
23
|
-
const listener = new DicomMetadataListener();
|
|
24
|
-
|
|
25
|
-
reader.stream.addBuffer(buffer);
|
|
26
|
-
reader.stream.setComplete();
|
|
27
|
-
|
|
28
|
-
const { meta, dict } = await reader.readFile({ listener });
|
|
29
|
-
expect(meta[TagHex.TransferSyntaxUID].Value[0]).toBe(
|
|
30
|
-
"1.2.840.10008.1.2"
|
|
31
|
-
);
|
|
32
|
-
expect(dict[TagHex.Rows].Value[0]).toBe(512);
|
|
33
|
-
});
|
|
34
|
-
|
|
35
|
-
test("async reader listen test uncompressed", async () => {
|
|
36
|
-
// Don't use such a small chunk size in production, but doing it
|
|
37
|
-
// here stresses the buffer stream read, and so does using an odd
|
|
38
|
-
// prime
|
|
39
|
-
const stream = fs.createReadStream("test/sample-dicom.dcm", {
|
|
40
|
-
highWaterMark: 37
|
|
41
|
-
});
|
|
42
|
-
const reader = new AsyncDicomReader();
|
|
43
|
-
const listener = new DicomMetadataListener();
|
|
44
|
-
|
|
45
|
-
const readPromise = reader.stream.fromAsyncStream(stream);
|
|
46
|
-
let isRead = false;
|
|
47
|
-
readPromise.then(() => {
|
|
48
|
-
isRead = true;
|
|
49
|
-
});
|
|
50
|
-
|
|
51
|
-
const { meta, dict } = await reader.readFile({ listener });
|
|
52
|
-
expect(isRead).toBe(true);
|
|
53
|
-
expect(meta[TagHex.TransferSyntaxUID].Value[0]).toBe(
|
|
54
|
-
"1.2.840.10008.1.2"
|
|
55
|
-
);
|
|
56
|
-
expect(dict[TagHex.Rows].Value[0]).toBe(512);
|
|
57
|
-
// Uncompressed PixelData now matches compressed: an array of frames, each frame an array of chunks
|
|
58
|
-
const frames = dict[TagHex.PixelData].Value;
|
|
59
|
-
expect(Array.isArray(frames)).toBe(true);
|
|
60
|
-
expect(frames.length).toBe(1);
|
|
61
|
-
expect(Array.isArray(frames[0])).toBe(true);
|
|
62
|
-
expect(frames[0].length).toBe(1);
|
|
63
|
-
expect(frames[0][0].byteLength).toBe(512 * 512 * 2);
|
|
64
|
-
});
|
|
65
|
-
|
|
66
|
-
test("async reader listen test compressed", async () => {
|
|
67
|
-
const reader = new AsyncDicomReader();
|
|
68
|
-
|
|
69
|
-
const stream = fs.createReadStream("test/sample-op.dcm", {
|
|
70
|
-
highWaterMark: 256
|
|
71
|
-
});
|
|
72
|
-
reader.stream.fromAsyncStream(stream);
|
|
73
|
-
|
|
74
|
-
const { meta, dict } = await reader.readFile();
|
|
75
|
-
expect(meta[TagHex.TransferSyntaxUID].Value[0]).toBe(
|
|
76
|
-
"1.2.840.10008.1.2.4.70"
|
|
77
|
-
);
|
|
78
|
-
expect(dict[TagHex.Rows].Value[0]).toBe(1536);
|
|
79
|
-
const frames = dict[TagHex.PixelData].Value;
|
|
80
|
-
expect(Array.isArray(frames)).toBe(true);
|
|
81
|
-
expect(frames.length).toBe(1);
|
|
82
|
-
// Frames are always arrays, even for single fragments
|
|
83
|
-
expect(Array.isArray(frames[0])).toBe(true);
|
|
84
|
-
const frame0 = frames[0];
|
|
85
|
-
const chunk0 = frame0[0];
|
|
86
|
-
expect(chunk0).toBeInstanceOf(ArrayBuffer);
|
|
87
|
-
expect(chunk0.byteLength).toBe(101304);
|
|
88
|
-
});
|
|
89
|
-
|
|
90
|
-
test("compressed multiframe data test", async () => {
|
|
91
|
-
const url =
|
|
92
|
-
"https://github.com/dcmjs-org/data/releases/download/binary-parsing-stressors/multiframe-ultrasound.dcm";
|
|
93
|
-
const dcmPath = await getTestDataset(url, "multiframe-ultrasound.dcm");
|
|
94
|
-
const reader = new AsyncDicomReader();
|
|
95
|
-
|
|
96
|
-
const stream = fs.createReadStream(dcmPath, {
|
|
97
|
-
highWaterMark: 4001
|
|
98
|
-
});
|
|
99
|
-
reader.stream.fromAsyncStream(stream);
|
|
100
|
-
|
|
101
|
-
const { meta, dict } = await reader.readFile();
|
|
102
|
-
expect(meta[TagHex.TransferSyntaxUID].Value[0]).toBe(
|
|
103
|
-
"1.2.840.10008.1.2.4.50"
|
|
104
|
-
);
|
|
105
|
-
const numFrames = dict[TagHex.NumberOfFrames].Value[0];
|
|
106
|
-
expect(numFrames).toBe(29);
|
|
107
|
-
const frames = dict[TagHex.PixelData].Value;
|
|
108
|
-
expect(frames.length).toBe(numFrames);
|
|
109
|
-
});
|
|
110
|
-
|
|
111
|
-
test("compressed fragmented multiframe data test", async () => {
|
|
112
|
-
const url =
|
|
113
|
-
"https://github.com/dcmjs-org/data/releases/download/encapsulation/encapsulation-fragment-multiframe.dcm";
|
|
114
|
-
const dcmPath = await getTestDataset(
|
|
115
|
-
url,
|
|
116
|
-
"encapsulation-fragment-multiframe-b.dcm"
|
|
117
|
-
);
|
|
118
|
-
const reader = new AsyncDicomReader();
|
|
119
|
-
|
|
120
|
-
const stream = fs.createReadStream(dcmPath, {
|
|
121
|
-
highWaterMark: 4001
|
|
122
|
-
});
|
|
123
|
-
reader.stream.fromAsyncStream(stream);
|
|
124
|
-
|
|
125
|
-
const { meta, dict } = await reader.readFile();
|
|
126
|
-
expect(meta[TagHex.TransferSyntaxUID].Value[0]).toBe(
|
|
127
|
-
"1.2.840.10008.1.2.4.90"
|
|
128
|
-
);
|
|
129
|
-
const numFrames = dict[TagHex.NumberOfFrames].Value[0];
|
|
130
|
-
expect(numFrames).toBe(2);
|
|
131
|
-
const frames = dict[TagHex.PixelData].Value;
|
|
132
|
-
expect(frames.length).toBe(numFrames);
|
|
133
|
-
expect(frames[0].length).toBe(2);
|
|
134
|
-
expect(frames[1].length).toBe(2);
|
|
135
|
-
});
|
|
136
|
-
|
|
137
|
-
test("raw LEI encoded file test", async () => {
|
|
138
|
-
const buffer = fs.readFileSync("test/sample-op.lei");
|
|
139
|
-
const reader = new AsyncDicomReader();
|
|
140
|
-
const listener = new DicomMetadataListener();
|
|
141
|
-
|
|
142
|
-
reader.stream.addBuffer(buffer);
|
|
143
|
-
reader.stream.setComplete();
|
|
144
|
-
|
|
145
|
-
const { meta, dict } = await reader.readFile({ listener });
|
|
146
|
-
|
|
147
|
-
// Raw LEI files have no meta header
|
|
148
|
-
expect(meta).toEqual({});
|
|
149
|
-
|
|
150
|
-
// Verify transfer syntax was detected as LEI
|
|
151
|
-
expect(listener.information.transferSyntaxUid).toBe(
|
|
152
|
-
"1.2.840.10008.1.2"
|
|
153
|
-
);
|
|
154
|
-
|
|
155
|
-
// Verify we can read some basic tags from the dataset
|
|
156
|
-
expect(dict).toBeDefined();
|
|
157
|
-
expect(Object.keys(dict).length).toBeGreaterThan(0);
|
|
158
|
-
|
|
159
|
-
// Verify we can read Rows if present
|
|
160
|
-
if (dict[TagHex.Rows]) {
|
|
161
|
-
expect(dict[TagHex.Rows].Value[0]).toBeDefined();
|
|
162
|
-
}
|
|
163
|
-
});
|
|
164
|
-
|
|
165
|
-
describe("LEI object data tests", () => {
|
|
166
|
-
let leiBuffer;
|
|
167
|
-
let parsedDict;
|
|
168
|
-
|
|
169
|
-
beforeAll(async () => {
|
|
170
|
-
// Create an LEI object containing a sequence with a code value
|
|
171
|
-
const dicomDict = new DicomDict({
|
|
172
|
-
[TagHex.TransferSyntaxUID]: {
|
|
173
|
-
vr: "UI",
|
|
174
|
-
Value: [IMPLICIT_LITTLE_ENDIAN]
|
|
175
|
-
}
|
|
176
|
-
});
|
|
177
|
-
|
|
178
|
-
// Add a sequence (Concept Code Sequence - 0040A043) with a single item
|
|
179
|
-
// containing a Code Value (00080100)
|
|
180
|
-
dicomDict.dict["0040A043"] = {
|
|
181
|
-
vr: "SQ",
|
|
182
|
-
Value: [
|
|
183
|
-
{
|
|
184
|
-
"00080100": {
|
|
185
|
-
vr: "SH",
|
|
186
|
-
Value: ["TEST123"]
|
|
187
|
-
}
|
|
188
|
-
}
|
|
189
|
-
]
|
|
190
|
-
};
|
|
191
|
-
|
|
192
|
-
// Add Per-frame Functional Groups Sequence (52009229) with two frames
|
|
193
|
-
// Each frame contains a Functional Group Sequence (52009230) with one functional group
|
|
194
|
-
dicomDict.dict["52009229"] = {
|
|
195
|
-
vr: "SQ",
|
|
196
|
-
Value: [
|
|
197
|
-
{
|
|
198
|
-
// Frame 1: Contains a Functional Group Sequence with one functional group
|
|
199
|
-
52009230: {
|
|
200
|
-
vr: "SQ",
|
|
201
|
-
Value: [
|
|
202
|
-
{
|
|
203
|
-
// Functional group containing a code value
|
|
204
|
-
"00080100": {
|
|
205
|
-
vr: "SH",
|
|
206
|
-
Value: ["FRAME1_CODE"]
|
|
207
|
-
}
|
|
208
|
-
}
|
|
209
|
-
]
|
|
210
|
-
}
|
|
211
|
-
},
|
|
212
|
-
{
|
|
213
|
-
// Frame 2: Contains a Functional Group Sequence with one functional group
|
|
214
|
-
52009230: {
|
|
215
|
-
vr: "SQ",
|
|
216
|
-
Value: [
|
|
217
|
-
{
|
|
218
|
-
// Functional group containing a code value
|
|
219
|
-
"00080100": {
|
|
220
|
-
vr: "SH",
|
|
221
|
-
Value: ["FRAME2_CODE"]
|
|
222
|
-
}
|
|
223
|
-
}
|
|
224
|
-
]
|
|
225
|
-
}
|
|
226
|
-
}
|
|
227
|
-
]
|
|
228
|
-
};
|
|
229
|
-
|
|
230
|
-
// Write to buffer (this creates a Part 10 file)
|
|
231
|
-
leiBuffer = dicomDict.write();
|
|
232
|
-
|
|
233
|
-
// Parse with AsyncDicomReader
|
|
234
|
-
const reader = new AsyncDicomReader();
|
|
235
|
-
const listener = new DicomMetadataListener();
|
|
236
|
-
|
|
237
|
-
reader.stream.addBuffer(leiBuffer);
|
|
238
|
-
reader.stream.setComplete();
|
|
239
|
-
|
|
240
|
-
const result = await reader.readFile({ listener });
|
|
241
|
-
parsedDict = result.dict;
|
|
242
|
-
});
|
|
243
|
-
|
|
244
|
-
test("sequence has a single object containing the code value", () => {
|
|
245
|
-
// Check that the sequence exists
|
|
246
|
-
expect(parsedDict["0040A043"]).toBeDefined();
|
|
247
|
-
expect(parsedDict["0040A043"].vr).toBe("SQ");
|
|
248
|
-
expect(parsedDict["0040A043"].Value).toBeDefined();
|
|
249
|
-
expect(Array.isArray(parsedDict["0040A043"].Value)).toBe(true);
|
|
250
|
-
|
|
251
|
-
// Check that the sequence has a single object
|
|
252
|
-
expect(parsedDict["0040A043"].Value.length).toBe(1);
|
|
253
|
-
|
|
254
|
-
// Check that the single object contains the code value
|
|
255
|
-
const sequenceItem = parsedDict["0040A043"].Value[0];
|
|
256
|
-
expect(sequenceItem).toBeDefined();
|
|
257
|
-
expect(sequenceItem["00080100"]).toBeDefined();
|
|
258
|
-
expect(sequenceItem["00080100"].Value).toBeDefined();
|
|
259
|
-
expect(sequenceItem["00080100"].Value[0]).toBe("TEST123");
|
|
260
|
-
});
|
|
261
|
-
|
|
262
|
-
test("per-frame functional groups sequence has two frames with functional groups", () => {
|
|
263
|
-
// Check that the Per-frame Functional Groups Sequence exists
|
|
264
|
-
expect(parsedDict["52009229"]).toBeDefined();
|
|
265
|
-
expect(parsedDict["52009229"].vr).toBe("SQ");
|
|
266
|
-
expect(parsedDict["52009229"].Value).toBeDefined();
|
|
267
|
-
expect(Array.isArray(parsedDict["52009229"].Value)).toBe(true);
|
|
268
|
-
|
|
269
|
-
// Check that the sequence has two frames
|
|
270
|
-
expect(parsedDict["52009229"].Value.length).toBe(2);
|
|
271
|
-
|
|
272
|
-
// Check Frame 1
|
|
273
|
-
const frame1 = parsedDict["52009229"].Value[0];
|
|
274
|
-
expect(frame1).toBeDefined();
|
|
275
|
-
expect(frame1["52009230"]).toBeDefined(); // Functional Group Sequence
|
|
276
|
-
expect(frame1["52009230"].vr).toBe("SQ");
|
|
277
|
-
expect(Array.isArray(frame1["52009230"].Value)).toBe(true);
|
|
278
|
-
expect(frame1["52009230"].Value.length).toBe(1); // Single functional group
|
|
279
|
-
|
|
280
|
-
// Check functional group in Frame 1
|
|
281
|
-
const functionalGroup1 = frame1["52009230"].Value[0];
|
|
282
|
-
expect(functionalGroup1).toBeDefined();
|
|
283
|
-
expect(functionalGroup1["00080100"]).toBeDefined();
|
|
284
|
-
expect(functionalGroup1["00080100"].Value).toBeDefined();
|
|
285
|
-
expect(functionalGroup1["00080100"].Value[0]).toBe("FRAME1_CODE");
|
|
286
|
-
|
|
287
|
-
// Check Frame 2
|
|
288
|
-
const frame2 = parsedDict["52009229"].Value[1];
|
|
289
|
-
expect(frame2).toBeDefined();
|
|
290
|
-
expect(frame2["52009230"]).toBeDefined(); // Functional Group Sequence
|
|
291
|
-
expect(frame2["52009230"].vr).toBe("SQ");
|
|
292
|
-
expect(Array.isArray(frame2["52009230"].Value)).toBe(true);
|
|
293
|
-
expect(frame2["52009230"].Value.length).toBe(1); // Single functional group
|
|
294
|
-
|
|
295
|
-
// Check functional group in Frame 2
|
|
296
|
-
const functionalGroup2 = frame2["52009230"].Value[0];
|
|
297
|
-
expect(functionalGroup2).toBeDefined();
|
|
298
|
-
expect(functionalGroup2["00080100"]).toBeDefined();
|
|
299
|
-
expect(functionalGroup2["00080100"].Value).toBeDefined();
|
|
300
|
-
expect(functionalGroup2["00080100"].Value[0]).toBe("FRAME2_CODE");
|
|
301
|
-
});
|
|
302
|
-
});
|
|
303
|
-
|
|
304
|
-
test("video transfer syntax with multiple fragments and maxFragmentSize", async () => {
|
|
305
|
-
// Create a DICOM file with video transfer syntax (H.264)
|
|
306
|
-
const videoTransferSyntax = "1.2.840.10008.1.2.4.102"; // MPEG-4 AVC/H.264 High Profile / Level 4.1
|
|
307
|
-
const maxFragmentSize = 1024; // 1KB for testing
|
|
308
|
-
|
|
309
|
-
// Create fragments of different sizes:
|
|
310
|
-
// - Fragment 1: 512 bytes (smaller than maxFragmentSize)
|
|
311
|
-
// - Fragment 2: 1024 bytes (exactly maxFragmentSize)
|
|
312
|
-
// - Fragment 3: 2048 bytes (larger than maxFragmentSize, should be split)
|
|
313
|
-
const fragment1Size = 512;
|
|
314
|
-
const fragment2Size = 1024;
|
|
315
|
-
const fragment3Size = 2048;
|
|
316
|
-
|
|
317
|
-
const fragment1 = new Uint8Array(fragment1Size);
|
|
318
|
-
const fragment2 = new Uint8Array(fragment2Size);
|
|
319
|
-
const fragment3 = new Uint8Array(fragment3Size);
|
|
320
|
-
|
|
321
|
-
// Fill with test data
|
|
322
|
-
for (let i = 0; i < fragment1Size; i++) {
|
|
323
|
-
fragment1[i] = 0x01;
|
|
324
|
-
}
|
|
325
|
-
for (let i = 0; i < fragment2Size; i++) {
|
|
326
|
-
fragment2[i] = 0x02;
|
|
327
|
-
}
|
|
328
|
-
for (let i = 0; i < fragment3Size; i++) {
|
|
329
|
-
fragment3[i] = 0x03;
|
|
330
|
-
}
|
|
331
|
-
|
|
332
|
-
// Create DICOM dict and write base file
|
|
333
|
-
const dicomDict = new DicomDict(videoTestMeta);
|
|
334
|
-
dicomDict.dict = videoTestDict;
|
|
335
|
-
const baseBuffer = dicomDict.write();
|
|
336
|
-
|
|
337
|
-
// Append compressed pixel data with fragments
|
|
338
|
-
const WriteBufferStream = dcmjs.data.WriteBufferStream;
|
|
339
|
-
const writeStream = new WriteBufferStream(null, true);
|
|
340
|
-
|
|
341
|
-
// Write the base buffer first
|
|
342
|
-
const baseArray = new Uint8Array(baseBuffer);
|
|
343
|
-
for (let i = 0; i < baseArray.length; i++) {
|
|
344
|
-
writeStream.writeUint8(baseArray[i]);
|
|
345
|
-
}
|
|
346
|
-
|
|
347
|
-
// Pixel Data (7FE0,0010) with undefined length
|
|
348
|
-
writeStream.writeUint16(0x7fe0);
|
|
349
|
-
writeStream.writeUint16(0x0010);
|
|
350
|
-
writeStream.writeAsciiString("OB");
|
|
351
|
-
writeStream.writeUint16(0x0000); // Reserved
|
|
352
|
-
writeStream.writeUint32(UNDEFINED_LENGTH); // Undefined length
|
|
353
|
-
|
|
354
|
-
// Basic Offset Table (BOT) - empty for video (no offsets)
|
|
355
|
-
writeStream.writeUint16(0xfffe); // Item tag
|
|
356
|
-
writeStream.writeUint16(0xe000);
|
|
357
|
-
writeStream.writeUint32(0x00000000); // Length 0 (no offsets)
|
|
358
|
-
|
|
359
|
-
// Fragment 1
|
|
360
|
-
writeStream.writeUint16(0xfffe); // Item tag
|
|
361
|
-
writeStream.writeUint16(0xe000);
|
|
362
|
-
writeStream.writeUint32(fragment1Size);
|
|
363
|
-
for (let i = 0; i < fragment1Size; i++) {
|
|
364
|
-
writeStream.writeUint8(fragment1[i]);
|
|
365
|
-
}
|
|
366
|
-
|
|
367
|
-
// Fragment 2
|
|
368
|
-
writeStream.writeUint16(0xfffe); // Item tag
|
|
369
|
-
writeStream.writeUint16(0xe000);
|
|
370
|
-
writeStream.writeUint32(fragment2Size);
|
|
371
|
-
for (let i = 0; i < fragment2Size; i++) {
|
|
372
|
-
writeStream.writeUint8(fragment2[i]);
|
|
373
|
-
}
|
|
374
|
-
|
|
375
|
-
// Fragment 3
|
|
376
|
-
writeStream.writeUint16(0xfffe); // Item tag
|
|
377
|
-
writeStream.writeUint16(0xe000);
|
|
378
|
-
writeStream.writeUint32(fragment3Size);
|
|
379
|
-
for (let i = 0; i < fragment3Size; i++) {
|
|
380
|
-
writeStream.writeUint8(fragment3[i]);
|
|
381
|
-
}
|
|
382
|
-
|
|
383
|
-
// Sequence Delimitation Item
|
|
384
|
-
writeStream.writeUint16(0xfffe);
|
|
385
|
-
writeStream.writeUint16(0xe0dd);
|
|
386
|
-
writeStream.writeUint32(0x00000000);
|
|
387
|
-
|
|
388
|
-
const buffer = writeStream.getBuffer();
|
|
389
|
-
|
|
390
|
-
// Read with AsyncDicomReader
|
|
391
|
-
const reader = new AsyncDicomReader({ maxFragmentSize });
|
|
392
|
-
const listener = new DicomMetadataListener();
|
|
393
|
-
|
|
394
|
-
reader.stream.addBuffer(buffer);
|
|
395
|
-
reader.stream.setComplete();
|
|
396
|
-
|
|
397
|
-
const { meta, dict } = await reader.readFile({ listener });
|
|
398
|
-
|
|
399
|
-
// Verify transfer syntax
|
|
400
|
-
expect(meta[TagHex.TransferSyntaxUID].Value[0]).toBe(
|
|
401
|
-
videoTransferSyntax
|
|
402
|
-
);
|
|
403
|
-
|
|
404
|
-
// Verify pixel data
|
|
405
|
-
expect(dict[TagHex.PixelData]).toBeDefined();
|
|
406
|
-
const frames = dict[TagHex.PixelData].Value;
|
|
407
|
-
|
|
408
|
-
// Frames are always arrays, so for video we have a single frame containing all fragments
|
|
409
|
-
expect(Array.isArray(frames)).toBe(true);
|
|
410
|
-
expect(frames.length).toBe(1); // Single frame for video
|
|
411
|
-
|
|
412
|
-
// Get the fragments array from the first frame (unwrap 1 level if needed)
|
|
413
|
-
const pixelData = Array.isArray(frames[0][0])
|
|
414
|
-
? frames[0][0]
|
|
415
|
-
: frames[0];
|
|
416
|
-
|
|
417
|
-
// For video transfer syntax, all fragments should be combined into a single array
|
|
418
|
-
// Fragment 3 (2048 bytes) should be split into 2 fragments of 1024 bytes each
|
|
419
|
-
// So we should have: fragment1 (512), fragment2 (1024), fragment3_part1 (1024), fragment3_part2 (1024)
|
|
420
|
-
expect(Array.isArray(pixelData)).toBe(true);
|
|
421
|
-
// Expect 4 after splitting 2048 -> 2x1024; allow an extra nesting level and flatten if needed
|
|
422
|
-
const flatPixelData = pixelData.flat ? pixelData.flat() : pixelData;
|
|
423
|
-
expect(flatPixelData.length).toBe(4); // All fragments combined, with fragment3 split
|
|
424
|
-
|
|
425
|
-
// Verify fragment 1 (512 bytes, unchanged)
|
|
426
|
-
expect(flatPixelData[0]).toBeInstanceOf(ArrayBuffer);
|
|
427
|
-
expect(flatPixelData[0].byteLength).toBe(512);
|
|
428
|
-
const frag1Data = new Uint8Array(flatPixelData[0]);
|
|
429
|
-
expect(frag1Data[0]).toBe(0x01);
|
|
430
|
-
expect(frag1Data[511]).toBe(0x01);
|
|
431
|
-
|
|
432
|
-
// Verify fragment 2 (1024 bytes, unchanged)
|
|
433
|
-
expect(flatPixelData[1]).toBeInstanceOf(ArrayBuffer);
|
|
434
|
-
expect(flatPixelData[1].byteLength).toBe(1024);
|
|
435
|
-
const frag2Data = new Uint8Array(flatPixelData[1]);
|
|
436
|
-
expect(frag2Data[0]).toBe(0x02);
|
|
437
|
-
expect(frag2Data[1023]).toBe(0x02);
|
|
438
|
-
|
|
439
|
-
// Verify fragment 3 part 1 (1024 bytes, split from 2048)
|
|
440
|
-
expect(flatPixelData[2]).toBeInstanceOf(ArrayBuffer);
|
|
441
|
-
expect(flatPixelData[2].byteLength).toBe(1024);
|
|
442
|
-
const frag3Part1Data = new Uint8Array(flatPixelData[2]);
|
|
443
|
-
expect(frag3Part1Data[0]).toBe(0x03);
|
|
444
|
-
expect(frag3Part1Data[1023]).toBe(0x03);
|
|
445
|
-
|
|
446
|
-
// Verify fragment 3 part 2 (1024 bytes, split from 2048)
|
|
447
|
-
expect(flatPixelData[3]).toBeInstanceOf(ArrayBuffer);
|
|
448
|
-
expect(flatPixelData[3].byteLength).toBe(1024);
|
|
449
|
-
const frag3Part2Data = new Uint8Array(flatPixelData[3]);
|
|
450
|
-
expect(frag3Part2Data[0]).toBe(0x03);
|
|
451
|
-
expect(frag3Part2Data[1023]).toBe(0x03);
|
|
452
|
-
});
|
|
453
|
-
|
|
454
|
-
test("readUncompressedBitFrame with 3 frames having odd total bit length", async () => {
|
|
455
|
-
// Test data: 3 frames, each with 7 bits (odd)
|
|
456
|
-
// Total: 21 bits (odd, not even byte-aligned, requires 3 bytes)
|
|
457
|
-
|
|
458
|
-
// Create pixel data buffer with 3 frames (3 bytes total for 21 bits)
|
|
459
|
-
const packedData = oddFrameBitData.getPackedData();
|
|
460
|
-
const pixelDataBuffer = new Uint8Array(packedData);
|
|
461
|
-
|
|
462
|
-
// Create AsyncDicomReader and set up the stream
|
|
463
|
-
const reader = new AsyncDicomReader();
|
|
464
|
-
const listener = new DicomMetadataListener();
|
|
465
|
-
|
|
466
|
-
// Set up listener information
|
|
467
|
-
listener.information = {
|
|
468
|
-
rows: oddFrameBitData.rows,
|
|
469
|
-
columns: oddFrameBitData.columns,
|
|
470
|
-
samplesPerPixel: oddFrameBitData.samplesPerPixel,
|
|
471
|
-
bitsAllocated: oddFrameBitData.bitsAllocated,
|
|
472
|
-
numberOfFrames: oddFrameBitData.numberOfFrames.toString()
|
|
473
|
-
};
|
|
474
|
-
|
|
475
|
-
// Set the listener on the reader (required for readUncompressed to access listener.information)
|
|
476
|
-
reader.listener = listener;
|
|
477
|
-
|
|
478
|
-
// Add pixel data to stream
|
|
479
|
-
reader.stream.addBuffer(pixelDataBuffer.buffer);
|
|
480
|
-
reader.stream.setComplete();
|
|
481
|
-
|
|
482
|
-
// Create tag info for pixel data
|
|
483
|
-
const tagInfo = {
|
|
484
|
-
tag: TagHex.PixelData,
|
|
485
|
-
length: oddFrameBitData.totalBytes,
|
|
486
|
-
vr: "OW"
|
|
487
|
-
};
|
|
488
|
-
|
|
489
|
-
// Call readUncompressed, which should detect odd-length frames and call readUncompressedBitFrame
|
|
490
|
-
// The method expects frames to be stored in an array structure
|
|
491
|
-
const framesArray = [];
|
|
492
|
-
listener.startObject(framesArray);
|
|
493
|
-
await reader.readUncompressed(tagInfo);
|
|
494
|
-
const frames = listener.pop();
|
|
495
|
-
|
|
496
|
-
// Verify pixel data information
|
|
497
|
-
expect(listener.information.rows).toBe(oddFrameBitData.rows);
|
|
498
|
-
expect(listener.information.columns).toBe(oddFrameBitData.columns);
|
|
499
|
-
expect(listener.information.samplesPerPixel).toBe(
|
|
500
|
-
oddFrameBitData.samplesPerPixel
|
|
501
|
-
);
|
|
502
|
-
expect(listener.information.bitsAllocated).toBe(
|
|
503
|
-
oddFrameBitData.bitsAllocated
|
|
504
|
-
);
|
|
505
|
-
expect(listener.information.numberOfFrames).toBe(
|
|
506
|
-
oddFrameBitData.numberOfFrames.toString()
|
|
507
|
-
);
|
|
508
|
-
|
|
509
|
-
// Verify frames structure
|
|
510
|
-
expect(Array.isArray(frames)).toBe(true);
|
|
511
|
-
expect(frames.length).toBe(3);
|
|
512
|
-
|
|
513
|
-
// Verify each frame
|
|
514
|
-
const bytesPerFrame = Math.ceil(oddFrameBitData.bitsPerFrame / 8);
|
|
515
|
-
expect(bytesPerFrame).toBe(1); // 7 bits = 1 byte
|
|
516
|
-
|
|
517
|
-
// Get expected unpacked frames
|
|
518
|
-
const expectedFrames = oddFrameBitData.getExpectedFrames();
|
|
519
|
-
|
|
520
|
-
for (let i = 0; i < frames.length; i++) {
|
|
521
|
-
expect(Array.isArray(frames[i])).toBe(true);
|
|
522
|
-
// Each frame should be an array containing the frame data
|
|
523
|
-
const frameChunks = frames[i];
|
|
524
|
-
expect(frameChunks.length).toBe(1); // Single chunk per frame (1 byte each)
|
|
525
|
-
|
|
526
|
-
// Verify the chunk is an ArrayBuffer
|
|
527
|
-
expect(frameChunks[0]).toBeInstanceOf(ArrayBuffer);
|
|
528
|
-
expect(frameChunks[0].byteLength).toBe(bytesPerFrame);
|
|
529
|
-
|
|
530
|
-
// Verify the unpacked frame data (each frame starts at byte 0)
|
|
531
|
-
const frameData = new Uint8Array(frameChunks[0]);
|
|
532
|
-
const expectedData = expectedFrames[i];
|
|
533
|
-
expect(frameData.length).toBe(expectedData.length);
|
|
534
|
-
// Compare the first byte (only 7 bits are valid, but we compare the whole byte)
|
|
535
|
-
expect(frameData[0]).toBe(expectedData[0]);
|
|
536
|
-
}
|
|
537
|
-
|
|
538
|
-
// Verify total bit length is odd (not even byte-aligned)
|
|
539
|
-
const totalBits = oddFrameBitData.totalBits;
|
|
540
|
-
expect(totalBits).toBe(21);
|
|
541
|
-
expect(totalBits % 2).toBe(1); // Odd number
|
|
542
|
-
expect(totalBits % 8).not.toBe(0); // Not even byte-aligned (21 % 8 = 5)
|
|
543
|
-
|
|
544
|
-
// Verify total bytes read matches expected
|
|
545
|
-
let totalBytesRead = 0;
|
|
546
|
-
for (const frame of frames) {
|
|
547
|
-
for (const chunk of frame) {
|
|
548
|
-
totalBytesRead += chunk.byteLength;
|
|
549
|
-
}
|
|
550
|
-
}
|
|
551
|
-
expect(totalBytesRead).toBe(oddFrameBitData.totalBytes);
|
|
552
|
-
expect(totalBytesRead).toBe(3); // 3 bytes for 21 bits
|
|
553
|
-
});
|
|
554
|
-
|
|
555
|
-
test("private tags are read correctly", async () => {
|
|
556
|
-
const url =
|
|
557
|
-
"https://github.com/dcmjs-org/data/releases/download/binary-parsing-stressors/large-private-tags.dcm";
|
|
558
|
-
const dcmPath = await getTestDataset(url, "large-private-tags.dcm");
|
|
559
|
-
|
|
560
|
-
// First, read the file with DicomMessage to identify which private tags exist
|
|
561
|
-
// and determine their order relative to PixelData
|
|
562
|
-
const syncDict = DicomMessage.readFile(fs.readFileSync(dcmPath).buffer);
|
|
563
|
-
|
|
564
|
-
// Get all tags in order (approximate - dict keys may not preserve exact order)
|
|
565
|
-
const { dict } = syncDict;
|
|
566
|
-
|
|
567
|
-
const privateCreator = dict["7FE10010"];
|
|
568
|
-
expect(privateCreator.Value[0]).toBe("GEMS_Ultrasound_MovieGroup_001");
|
|
569
|
-
expect(privateCreator.vr).toBe("LO");
|
|
570
|
-
const privateSq = dict["7FE11001"];
|
|
571
|
-
const [sq0] = privateSq.Value;
|
|
572
|
-
const obj1002 = sq0["7FE11002"];
|
|
573
|
-
expect(obj1002.Value[0]).toBe("2D+Trace");
|
|
574
|
-
});
|
|
575
|
-
});
|
|
@@ -1,59 +0,0 @@
|
|
|
1
|
-
import { getZippedTestDataset } from "./testUtils.js";
|
|
2
|
-
import dcmjs from "../src/index.js";
|
|
3
|
-
import fs from "fs";
|
|
4
|
-
import fsPromises from "fs/promises";
|
|
5
|
-
import path from "path";
|
|
6
|
-
|
|
7
|
-
const { DicomMetaDictionary, DicomMessage } = dcmjs.data;
|
|
8
|
-
|
|
9
|
-
const expectedPatientNames = {
|
|
10
|
-
SCSARAB: "قباني^لنزار",
|
|
11
|
-
SCSFREN: "Buc^Jérôme",
|
|
12
|
-
SCSGERM: "Äneas^Rüdiger",
|
|
13
|
-
SCSGREEK: "Διονυσιος",
|
|
14
|
-
SCSHBRW: "שרון^דבורה",
|
|
15
|
-
SCSRUSS: "Люкceмбypг",
|
|
16
|
-
SCSX1: "Wang^XiaoDong=王^小東", // Trailing "=" gets stripped, as is permitted by the spec
|
|
17
|
-
SCSX2: "Wang^XiaoDong=王^小东" // idem
|
|
18
|
-
// These are not yet supported, because they use multiple encodings, which would require a more complex parser...
|
|
19
|
-
//"SCSH31": "X",
|
|
20
|
-
//"SCSH32": "X",
|
|
21
|
-
//"SCSI2": "X",
|
|
22
|
-
};
|
|
23
|
-
|
|
24
|
-
it("test_encodings", async () => {
|
|
25
|
-
const url =
|
|
26
|
-
"https://github.com/dcmjs-org/data/releases/download/dclunie-charsets/dclunie-charsets.zip";
|
|
27
|
-
const unzipPath = await getZippedTestDataset(
|
|
28
|
-
url,
|
|
29
|
-
"dclunie-charsets.zip",
|
|
30
|
-
"dclunie-charsets"
|
|
31
|
-
);
|
|
32
|
-
const filesPath = unzipPath + "/charsettests";
|
|
33
|
-
const fileNames = await fsPromises.readdir(filesPath);
|
|
34
|
-
|
|
35
|
-
fileNames.forEach(fileName => {
|
|
36
|
-
if (fileName in expectedPatientNames) {
|
|
37
|
-
const arrayBuffer = fs.readFileSync(
|
|
38
|
-
path.join(filesPath, fileName)
|
|
39
|
-
).buffer;
|
|
40
|
-
const dicomDict = DicomMessage.readFile(arrayBuffer);
|
|
41
|
-
const dataset = DicomMetaDictionary.naturalizeDataset(
|
|
42
|
-
dicomDict.dict
|
|
43
|
-
);
|
|
44
|
-
expect(String(dataset.PatientName)).toEqual(
|
|
45
|
-
expectedPatientNames[fileName]
|
|
46
|
-
);
|
|
47
|
-
|
|
48
|
-
// write to memory and expect correctly loaded utf-8 DICOM
|
|
49
|
-
const newDicomDict = DicomMessage.readFile(dicomDict.write());
|
|
50
|
-
const newDataset = DicomMetaDictionary.naturalizeDataset(
|
|
51
|
-
newDicomDict.dict
|
|
52
|
-
);
|
|
53
|
-
expect(String(newDataset.PatientName)).toEqual(
|
|
54
|
-
expectedPatientNames[fileName]
|
|
55
|
-
);
|
|
56
|
-
expect(newDataset.SpecificCharacterSet).toEqual("ISO_IR 192");
|
|
57
|
-
}
|
|
58
|
-
});
|
|
59
|
-
});
|