@opencloning/utils 1.2.0 → 1.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +16 -0
- package/package.json +2 -2
- package/src/config/urlWhitelist.js +4 -0
- package/src/utils/fileParsers.js +17 -23
- package/src/utils/fileParsers.test.js +241 -0
- package/src/utils/getHttpClient.js +5 -1
- package/src/utils/readNwrite.js +12 -0
- package/src/utils/readNwrite.test.js +146 -0
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,21 @@
|
|
|
1
1
|
# @opencloning/utils
|
|
2
2
|
|
|
3
|
+
## 1.3.0
|
|
4
|
+
|
|
5
|
+
### Minor Changes
|
|
6
|
+
|
|
7
|
+
- [#597](https://github.com/manulera/OpenCloning_frontend/pull/597) [`d5a456d`](https://github.com/manulera/OpenCloning_frontend/commit/d5a456d70ccfe949b21aae260d2c99507ff6a88e) Thanks [@manulera](https://github.com/manulera)! - Changes associated with new "Syntax Builder" application for creating and managing modular cloning syntaxes, along with significant refactoring of assembler components to support both the new app and the existing OpenCloning application.
|
|
8
|
+
|
|
9
|
+
- Added a new standalone app (`apps/syntax-builder`) for building and editing cloning syntaxes with visual previews
|
|
10
|
+
- Refactored assembler components to be more modular and reusable across applications
|
|
11
|
+
- Enhanced file parsing utilities to support bidirectional conversion between JSON and delimited formats
|
|
12
|
+
- Added graph-based validation and visualization for syntax parts using the graphology library
|
|
13
|
+
|
|
14
|
+
### Patch Changes
|
|
15
|
+
|
|
16
|
+
- Updated dependencies [[`d5a456d`](https://github.com/manulera/OpenCloning_frontend/commit/d5a456d70ccfe949b21aae260d2c99507ff6a88e)]:
|
|
17
|
+
- @opencloning/store@1.3.0
|
|
18
|
+
|
|
3
19
|
## 1.2.0
|
|
4
20
|
|
|
5
21
|
### Patch Changes
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@opencloning/utils",
|
|
3
|
-
"version": "1.
|
|
3
|
+
"version": "1.3.0",
|
|
4
4
|
"type": "module",
|
|
5
5
|
"main": "./src/utils/index.js",
|
|
6
6
|
"exports": {
|
|
@@ -27,7 +27,7 @@
|
|
|
27
27
|
"directory": "packages/utils"
|
|
28
28
|
},
|
|
29
29
|
"dependencies": {
|
|
30
|
-
"@opencloning/store": "1.
|
|
30
|
+
"@opencloning/store": "1.3.0",
|
|
31
31
|
"@teselagen/bio-parsers": "^0.4.32",
|
|
32
32
|
"@teselagen/sequence-utils": "^0.3.35",
|
|
33
33
|
"@zip.js/zip.js": "^2.7.62",
|
|
@@ -1,3 +1,5 @@
|
|
|
1
|
+
/* istanbul ignore file */
|
|
2
|
+
|
|
1
3
|
export default [
|
|
2
4
|
// GitHub repository for OpenCloning templates
|
|
3
5
|
'https://assets.opencloning.org/OpenCloning-submission',
|
|
@@ -9,6 +11,8 @@ export default [
|
|
|
9
11
|
'https://assets.opencloning.org/SnapGene_crawler',
|
|
10
12
|
// GitHub repository for Open DNA Collections index
|
|
11
13
|
'https://assets.opencloning.org/open-dna-collections',
|
|
14
|
+
// GitHub repository for syntaxes
|
|
15
|
+
'https://assets.opencloning.org/syntaxes/syntaxes',
|
|
12
16
|
// NCBI entrez API
|
|
13
17
|
'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi',
|
|
14
18
|
'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esummary.fcgi',
|
package/src/utils/fileParsers.js
CHANGED
|
@@ -1,7 +1,8 @@
|
|
|
1
1
|
import { stringIsNotDNA } from '@opencloning/store/cloning_utils';
|
|
2
2
|
import { readSubmittedTextFile } from './readNwrite';
|
|
3
3
|
|
|
4
|
-
export
|
|
4
|
+
export async function delimitedFileToJson(fileUploaded, requiredHeaders = []) {
|
|
5
|
+
|
|
5
6
|
const fileContent = await readSubmittedTextFile(fileUploaded);
|
|
6
7
|
const allLines = fileContent.split(/\r\n|\r|\n/);
|
|
7
8
|
|
|
@@ -27,43 +28,36 @@ export const primersFromTextFile = async (fileUploaded, existingNames) => {
|
|
|
27
28
|
|
|
28
29
|
const headers = lines[0].split(delimiter);
|
|
29
30
|
|
|
30
|
-
const requiredHeaders = ['name', 'sequence'];
|
|
31
31
|
const missingHeaders = requiredHeaders.filter(
|
|
32
32
|
(header) => !headers.includes(header),
|
|
33
33
|
);
|
|
34
34
|
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
throw new Error('Headers should have at least 2 columns');
|
|
35
|
+
if (missingHeaders.length > 0) {
|
|
36
|
+
throw new Error(`Headers missing: ${missingHeaders.join(', ')}`);
|
|
38
37
|
}
|
|
39
38
|
|
|
40
39
|
// All lines should have the same number of tabs
|
|
41
40
|
if (lines.some((line) => line.split(delimiter).length !== headers.length)) {
|
|
42
41
|
throw new Error('All lines should have the same number of columns');
|
|
43
42
|
}
|
|
44
|
-
|
|
45
|
-
// Required headers should be present
|
|
46
|
-
if (missingHeaders.length > 0) {
|
|
47
|
-
throw new Error(`Headers missing: ${missingHeaders.join(', ')}`);
|
|
48
|
-
}
|
|
49
|
-
|
|
50
|
-
const primersToAdd = lines.slice(1).map((line) => {
|
|
43
|
+
return lines.slice(1).map((line) => {
|
|
51
44
|
const values = line.split(delimiter);
|
|
52
|
-
const obj = {
|
|
45
|
+
const obj = {};
|
|
53
46
|
headers.forEach((header, i) => {
|
|
54
47
|
obj[header] = values[i];
|
|
55
48
|
});
|
|
56
|
-
|
|
57
|
-
if (existingNames.includes(obj.name)) {
|
|
58
|
-
obj.error = 'existing';
|
|
59
|
-
} else if (stringIsNotDNA(obj.sequence)) {
|
|
60
|
-
obj.error = 'invalid';
|
|
61
|
-
// TODO: Improvement: check for already existing sequences
|
|
62
|
-
// While this is not a problem, it removes data redundancy
|
|
63
|
-
}
|
|
64
|
-
|
|
65
49
|
return obj;
|
|
66
50
|
});
|
|
51
|
+
};
|
|
67
52
|
|
|
68
|
-
|
|
53
|
+
export const primersFromTextFile = async (fileUploaded, existingNames) => {
|
|
54
|
+
const primers = await delimitedFileToJson(fileUploaded, ['name', 'sequence']);
|
|
55
|
+
return primers.map((primer) => {
|
|
56
|
+
if (existingNames.includes(primer.name)) {
|
|
57
|
+
return { ...primer, error: 'existing' };
|
|
58
|
+
} else if (stringIsNotDNA(primer.sequence)) {
|
|
59
|
+
return { ...primer, error: 'invalid' };
|
|
60
|
+
}
|
|
61
|
+
return { ...primer, error: '' };
|
|
62
|
+
});
|
|
69
63
|
};
|
|
@@ -0,0 +1,241 @@
|
|
|
1
|
+
import { describe, it, expect } from 'vitest';
|
|
2
|
+
import { delimitedFileToJson, primersFromTextFile } from './fileParsers';
|
|
3
|
+
|
|
4
|
+
// Helper function to create a File object for testing
|
|
5
|
+
// File is a browser global available in jsdom test environment
|
|
6
|
+
function createFile(content, filename) {
|
|
7
|
+
// eslint-disable-next-line no-undef
|
|
8
|
+
return new File([content], filename, { type: 'text/plain' });
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
describe('delimitedFileToJson', () => {
|
|
12
|
+
it('parses CSV file with comma delimiter', async () => {
|
|
13
|
+
const file = createFile('name,sequence\nprimer1,ATGC\nprimer2,CGTG', 'test.csv');
|
|
14
|
+
|
|
15
|
+
const result = await delimitedFileToJson(file);
|
|
16
|
+
|
|
17
|
+
expect(result).toEqual([
|
|
18
|
+
{ name: 'primer1', sequence: 'ATGC' },
|
|
19
|
+
{ name: 'primer2', sequence: 'CGTG' }
|
|
20
|
+
]);
|
|
21
|
+
});
|
|
22
|
+
|
|
23
|
+
it('parses CSV file with semicolon delimiter', async () => {
|
|
24
|
+
const file = createFile('name;sequence\nprimer1;ATGC\nprimer2;CGTG', 'test.csv');
|
|
25
|
+
|
|
26
|
+
const result = await delimitedFileToJson(file);
|
|
27
|
+
|
|
28
|
+
expect(result).toEqual([
|
|
29
|
+
{ name: 'primer1', sequence: 'ATGC' },
|
|
30
|
+
{ name: 'primer2', sequence: 'CGTG' }
|
|
31
|
+
]);
|
|
32
|
+
});
|
|
33
|
+
|
|
34
|
+
it('parses TSV file with tab delimiter', async () => {
|
|
35
|
+
const file = createFile('name\tsequence\nprimer1\tATGC\nprimer2\tCGTG', 'test.tsv');
|
|
36
|
+
|
|
37
|
+
const result = await delimitedFileToJson(file);
|
|
38
|
+
|
|
39
|
+
expect(result).toEqual([
|
|
40
|
+
{ name: 'primer1', sequence: 'ATGC' },
|
|
41
|
+
{ name: 'primer2', sequence: 'CGTG' }
|
|
42
|
+
]);
|
|
43
|
+
});
|
|
44
|
+
|
|
45
|
+
it('throws error for non-CSV/TSV file', async () => {
|
|
46
|
+
const file = createFile('some content', 'test.txt');
|
|
47
|
+
|
|
48
|
+
await expect(delimitedFileToJson(file)).rejects.toThrow('File must be a .csv or .tsv file');
|
|
49
|
+
});
|
|
50
|
+
|
|
51
|
+
it('throws error when CSV contains both comma and semicolon', async () => {
|
|
52
|
+
const file = createFile('name,sequence;other\nprimer1,ATGC;extra', 'test.csv');
|
|
53
|
+
|
|
54
|
+
await expect(delimitedFileToJson(file)).rejects.toThrow('File must contain only one delimiter, either comma or semicolon');
|
|
55
|
+
});
|
|
56
|
+
|
|
57
|
+
it('throws error for empty file', async () => {
|
|
58
|
+
const file = createFile('', 'test.csv');
|
|
59
|
+
|
|
60
|
+
await expect(delimitedFileToJson(file)).rejects.toThrow('File is empty');
|
|
61
|
+
});
|
|
62
|
+
|
|
63
|
+
it('throws error for file with only empty lines', async () => {
|
|
64
|
+
const file = createFile('\n\n \n\r\n', 'test.csv');
|
|
65
|
+
|
|
66
|
+
await expect(delimitedFileToJson(file)).rejects.toThrow('File is empty');
|
|
67
|
+
});
|
|
68
|
+
|
|
69
|
+
it('removes empty lines', async () => {
|
|
70
|
+
const file = createFile('name,sequence\n\nprimer1,ATGC\n \nprimer2,CGTG', 'test.csv');
|
|
71
|
+
|
|
72
|
+
const result = await delimitedFileToJson(file);
|
|
73
|
+
|
|
74
|
+
expect(result).toEqual([
|
|
75
|
+
{ name: 'primer1', sequence: 'ATGC' },
|
|
76
|
+
{ name: 'primer2', sequence: 'CGTG' }
|
|
77
|
+
]);
|
|
78
|
+
});
|
|
79
|
+
|
|
80
|
+
it('validates required headers', async () => {
|
|
81
|
+
const file = createFile('name,sequence\nprimer1,ATGC', 'test.csv');
|
|
82
|
+
|
|
83
|
+
await expect(delimitedFileToJson(file, ['name', 'sequence', 'required'])).rejects.toThrow('Headers missing: required');
|
|
84
|
+
|
|
85
|
+
const file2 = createFile('name,sequence\nprimer1,ATGC', 'test.csv');
|
|
86
|
+
|
|
87
|
+
const result = await delimitedFileToJson(file2, ['name', 'sequence']);
|
|
88
|
+
|
|
89
|
+
expect(result).toEqual([
|
|
90
|
+
{ name: 'primer1', sequence: 'ATGC' }
|
|
91
|
+
]);
|
|
92
|
+
});
|
|
93
|
+
|
|
94
|
+
it('validates multiple missing headers', async () => {
|
|
95
|
+
const file = createFile('name,sequence\nprimer1,ATGC', 'test.csv');
|
|
96
|
+
|
|
97
|
+
await expect(delimitedFileToJson(file, ['name', 'required1', 'required2'])).rejects.toThrow('Headers missing: required1, required2');
|
|
98
|
+
});
|
|
99
|
+
|
|
100
|
+
it('throws error when rows have inconsistent column count', async () => {
|
|
101
|
+
const file = createFile('name,sequence\nprimer1,ATGC,extra\nprimer2,CGTG', 'test.csv');
|
|
102
|
+
|
|
103
|
+
await expect(delimitedFileToJson(file)).rejects.toThrow('All lines should have the same number of columns');
|
|
104
|
+
});
|
|
105
|
+
|
|
106
|
+
it('handles Windows line endings (\\r\\n)', async () => {
|
|
107
|
+
const file = createFile('name,sequence\r\nprimer1,ATGC\r\nprimer2,CGTG', 'test.csv');
|
|
108
|
+
|
|
109
|
+
const result = await delimitedFileToJson(file);
|
|
110
|
+
|
|
111
|
+
expect(result).toEqual([
|
|
112
|
+
{ name: 'primer1', sequence: 'ATGC' },
|
|
113
|
+
{ name: 'primer2', sequence: 'CGTG' }
|
|
114
|
+
]);
|
|
115
|
+
});
|
|
116
|
+
|
|
117
|
+
it('handles Mac line endings (\\r)', async () => {
|
|
118
|
+
const file = createFile('name,sequence\rprimer1,ATGC\rprimer2,CGTG', 'test.csv');
|
|
119
|
+
|
|
120
|
+
const result = await delimitedFileToJson(file);
|
|
121
|
+
|
|
122
|
+
expect(result).toEqual([
|
|
123
|
+
{ name: 'primer1', sequence: 'ATGC' },
|
|
124
|
+
{ name: 'primer2', sequence: 'CGTG' }
|
|
125
|
+
]);
|
|
126
|
+
});
|
|
127
|
+
|
|
128
|
+
it('handles Unix line endings (\\n)', async () => {
|
|
129
|
+
const file = createFile('name,sequence\nprimer1,ATGC\nprimer2,CGTG', 'test.csv');
|
|
130
|
+
|
|
131
|
+
const result = await delimitedFileToJson(file);
|
|
132
|
+
|
|
133
|
+
expect(result).toEqual([
|
|
134
|
+
{ name: 'primer1', sequence: 'ATGC' },
|
|
135
|
+
{ name: 'primer2', sequence: 'CGTG' }
|
|
136
|
+
]);
|
|
137
|
+
});
|
|
138
|
+
|
|
139
|
+
it('handles empty required headers array', async () => {
|
|
140
|
+
const file = createFile('name,sequence\nprimer1,ATGC', 'test.csv');
|
|
141
|
+
|
|
142
|
+
const result = await delimitedFileToJson(file, []);
|
|
143
|
+
|
|
144
|
+
expect(result).toEqual([
|
|
145
|
+
{ name: 'primer1', sequence: 'ATGC' }
|
|
146
|
+
]);
|
|
147
|
+
});
|
|
148
|
+
|
|
149
|
+
it('handles single row data', async () => {
|
|
150
|
+
const file = createFile('name,sequence\nprimer1,ATGC', 'test.csv');
|
|
151
|
+
|
|
152
|
+
const result = await delimitedFileToJson(file);
|
|
153
|
+
|
|
154
|
+
expect(result).toEqual([
|
|
155
|
+
{ name: 'primer1', sequence: 'ATGC' }
|
|
156
|
+
]);
|
|
157
|
+
});
|
|
158
|
+
});
|
|
159
|
+
|
|
160
|
+
describe('primersFromTextFile', () => {
|
|
161
|
+
it('parses valid primers without errors', async () => {
|
|
162
|
+
const file = createFile('name,sequence\nprimer1,ATGC\nprimer2,CGTG', 'test.csv');
|
|
163
|
+
|
|
164
|
+
const result = await primersFromTextFile(file, []);
|
|
165
|
+
|
|
166
|
+
expect(result).toEqual([
|
|
167
|
+
{ name: 'primer1', sequence: 'ATGC', error: '' },
|
|
168
|
+
{ name: 'primer2', sequence: 'CGTG', error: '' }
|
|
169
|
+
]);
|
|
170
|
+
});
|
|
171
|
+
|
|
172
|
+
it('marks primers with existing names', async () => {
|
|
173
|
+
const file = createFile('name,sequence\nprimer1,ATGC\nprimer2,CGTG', 'test.csv');
|
|
174
|
+
|
|
175
|
+
const result = await primersFromTextFile(file, ['primer1']);
|
|
176
|
+
|
|
177
|
+
expect(result).toEqual([
|
|
178
|
+
{ name: 'primer1', sequence: 'ATGC', error: 'existing' },
|
|
179
|
+
{ name: 'primer2', sequence: 'CGTG', error: '' }
|
|
180
|
+
]);
|
|
181
|
+
});
|
|
182
|
+
|
|
183
|
+
it('marks primers with invalid DNA sequences', async () => {
|
|
184
|
+
const file = createFile('name,sequence\nprimer1,ATGC\nprimer2,XYZ', 'test.csv');
|
|
185
|
+
|
|
186
|
+
const result = await primersFromTextFile(file, []);
|
|
187
|
+
|
|
188
|
+
expect(result).toEqual([
|
|
189
|
+
{ name: 'primer1', sequence: 'ATGC', error: '' },
|
|
190
|
+
{ name: 'primer2', sequence: 'XYZ', error: 'invalid' }
|
|
191
|
+
]);
|
|
192
|
+
});
|
|
193
|
+
|
|
194
|
+
it('prioritizes existing name error over invalid sequence error', async () => {
|
|
195
|
+
const file = createFile('name,sequence\nprimer1,XYZ', 'test.csv');
|
|
196
|
+
|
|
197
|
+
const result = await primersFromTextFile(file, ['primer1']);
|
|
198
|
+
|
|
199
|
+
expect(result).toEqual([
|
|
200
|
+
{ name: 'primer1', sequence: 'XYZ', error: 'existing' }
|
|
201
|
+
]);
|
|
202
|
+
});
|
|
203
|
+
|
|
204
|
+
it('handles multiple primers with mixed errors', async () => {
|
|
205
|
+
const file = createFile('name,sequence\nprimer1,ATGC\nprimer2,XYZ\nprimer3,CGTG', 'test.csv');
|
|
206
|
+
|
|
207
|
+
const result = await primersFromTextFile(file, ['primer1']);
|
|
208
|
+
|
|
209
|
+
expect(result).toEqual([
|
|
210
|
+
{ name: 'primer1', sequence: 'ATGC', error: 'existing' },
|
|
211
|
+
{ name: 'primer2', sequence: 'XYZ', error: 'invalid' },
|
|
212
|
+
{ name: 'primer3', sequence: 'CGTG', error: '' }
|
|
213
|
+
]);
|
|
214
|
+
});
|
|
215
|
+
|
|
216
|
+
it('requires name and sequence headers', async () => {
|
|
217
|
+
const file = createFile('name,other\nprimer1,ATGC', 'test.csv');
|
|
218
|
+
|
|
219
|
+
await expect(primersFromTextFile(file, [])).rejects.toThrow('Headers missing: sequence');
|
|
220
|
+
});
|
|
221
|
+
|
|
222
|
+
it('handles empty existing names array', async () => {
|
|
223
|
+
const file = createFile('name,sequence\nprimer1,ATGC', 'test.csv');
|
|
224
|
+
|
|
225
|
+
const result = await primersFromTextFile(file, []);
|
|
226
|
+
|
|
227
|
+
expect(result).toEqual([
|
|
228
|
+
{ name: 'primer1', sequence: 'ATGC', error: '' }
|
|
229
|
+
]);
|
|
230
|
+
});
|
|
231
|
+
|
|
232
|
+
it('handles TSV format', async () => {
|
|
233
|
+
const file = createFile('name\tsequence\nprimer1\tATGC', 'test.tsv');
|
|
234
|
+
|
|
235
|
+
const result = await primersFromTextFile(file, []);
|
|
236
|
+
|
|
237
|
+
expect(result).toEqual([
|
|
238
|
+
{ name: 'primer1', sequence: 'ATGC', error: '' }
|
|
239
|
+
]);
|
|
240
|
+
});
|
|
241
|
+
});
|
|
@@ -2,7 +2,11 @@ import axios from 'axios';
|
|
|
2
2
|
import urlWhitelist from '../config/urlWhitelist';
|
|
3
3
|
|
|
4
4
|
export default function getHttpClient(extraUrls = []) {
|
|
5
|
-
const
|
|
5
|
+
const baseURL = import.meta.env.BASE_URL || null;
|
|
6
|
+
const whitelist = [...urlWhitelist, window.location.origin, ...extraUrls];
|
|
7
|
+
if (baseURL) {
|
|
8
|
+
whitelist.push(baseURL);
|
|
9
|
+
}
|
|
6
10
|
|
|
7
11
|
const client = axios.create();
|
|
8
12
|
|
package/src/utils/readNwrite.js
CHANGED
|
@@ -293,3 +293,15 @@ export function updateVerificationFileNames(verificationFiles, originalFiles, va
|
|
|
293
293
|
return file;
|
|
294
294
|
});
|
|
295
295
|
}
|
|
296
|
+
|
|
297
|
+
export function jsonToDelimitedFile(json, delimiter = '\t') {
|
|
298
|
+
const headers = Object.keys(json[0]);
|
|
299
|
+
const rows = json.map((obj) =>
|
|
300
|
+
headers
|
|
301
|
+
.map((header) =>
|
|
302
|
+
String(obj[header]).replaceAll(/[\r\n]+/g, ' ').replaceAll(delimiter, ' ')
|
|
303
|
+
)
|
|
304
|
+
.join(delimiter)
|
|
305
|
+
);
|
|
306
|
+
return [headers.join(delimiter), ...rows].join('\n');
|
|
307
|
+
}
|
|
@@ -0,0 +1,146 @@
|
|
|
1
|
+
import { describe, it, expect } from 'vitest';
|
|
2
|
+
import { jsonToDelimitedFile } from './readNwrite';
|
|
3
|
+
|
|
4
|
+
describe('jsonToDelimitedFile', () => {
|
|
5
|
+
it('converts JSON array to delimited file with default tab delimiter', () => {
|
|
6
|
+
const json = [
|
|
7
|
+
{ name: 'primer1', sequence: 'ATGC', length: 4 },
|
|
8
|
+
{ name: 'primer2', sequence: 'CGTG', length: 4 }
|
|
9
|
+
];
|
|
10
|
+
|
|
11
|
+
const result = jsonToDelimitedFile(json);
|
|
12
|
+
|
|
13
|
+
expect(result).toBe('name\tsequence\tlength\nprimer1\tATGC\t4\nprimer2\tCGTG\t4');
|
|
14
|
+
});
|
|
15
|
+
|
|
16
|
+
it('converts JSON array to CSV with comma delimiter', () => {
|
|
17
|
+
const json = [
|
|
18
|
+
{ name: 'primer1', sequence: 'ATGC' },
|
|
19
|
+
{ name: 'primer2', sequence: 'CGTG' }
|
|
20
|
+
];
|
|
21
|
+
|
|
22
|
+
const result = jsonToDelimitedFile(json, ',');
|
|
23
|
+
|
|
24
|
+
expect(result).toBe('name,sequence\nprimer1,ATGC\nprimer2,CGTG');
|
|
25
|
+
});
|
|
26
|
+
|
|
27
|
+
it('handles single row', () => {
|
|
28
|
+
const json = [
|
|
29
|
+
{ name: 'primer1', sequence: 'ATGC' }
|
|
30
|
+
];
|
|
31
|
+
|
|
32
|
+
const result = jsonToDelimitedFile(json);
|
|
33
|
+
|
|
34
|
+
expect(result).toBe('name\tsequence\nprimer1\tATGC');
|
|
35
|
+
});
|
|
36
|
+
|
|
37
|
+
it('handles empty string values', () => {
|
|
38
|
+
const json = [
|
|
39
|
+
{ name: 'primer1', sequence: '', length: 4 },
|
|
40
|
+
{ name: '', sequence: 'ATGC', length: 4 }
|
|
41
|
+
];
|
|
42
|
+
|
|
43
|
+
const result = jsonToDelimitedFile(json);
|
|
44
|
+
|
|
45
|
+
expect(result).toBe('name\tsequence\tlength\nprimer1\t\t4\n\tATGC\t4');
|
|
46
|
+
});
|
|
47
|
+
|
|
48
|
+
it('handles null and undefined values', () => {
|
|
49
|
+
const json = [
|
|
50
|
+
{ name: 'primer1', sequence: null, length: undefined },
|
|
51
|
+
{ name: null, sequence: 'ATGC', length: 4 }
|
|
52
|
+
];
|
|
53
|
+
|
|
54
|
+
const result = jsonToDelimitedFile(json);
|
|
55
|
+
|
|
56
|
+
expect(result).toBe('name\tsequence\tlength\nprimer1\tnull\tundefined\nnull\tATGC\t4');
|
|
57
|
+
});
|
|
58
|
+
|
|
59
|
+
it('handles numeric values', () => {
|
|
60
|
+
const json = [
|
|
61
|
+
{ name: 'primer1', sequence: 'ATGC', length: 4, temperature: 65.5 },
|
|
62
|
+
{ name: 'primer2', sequence: 'CGTG', length: 4, temperature: 67.0 }
|
|
63
|
+
];
|
|
64
|
+
|
|
65
|
+
const result = jsonToDelimitedFile(json);
|
|
66
|
+
|
|
67
|
+
expect(result).toBe('name\tsequence\tlength\ttemperature\nprimer1\tATGC\t4\t65.5\nprimer2\tCGTG\t4\t67');
|
|
68
|
+
});
|
|
69
|
+
|
|
70
|
+
it('handles boolean values', () => {
|
|
71
|
+
const json = [
|
|
72
|
+
{ name: 'primer1', sequence: 'ATGC', active: true },
|
|
73
|
+
{ name: 'primer2', sequence: 'CGTG', active: false }
|
|
74
|
+
];
|
|
75
|
+
|
|
76
|
+
const result = jsonToDelimitedFile(json);
|
|
77
|
+
|
|
78
|
+
expect(result).toBe('name\tsequence\tactive\nprimer1\tATGC\ttrue\nprimer2\tCGTG\tfalse');
|
|
79
|
+
});
|
|
80
|
+
|
|
81
|
+
it('handles values containing delimiter characters', () => {
|
|
82
|
+
const json = [
|
|
83
|
+
{ name: 'primer1', sequence: 'ATGC', description: 'test\twith\ttabs' },
|
|
84
|
+
{ name: 'primer2', sequence: 'CGTG', description: 'test,with,commas' }
|
|
85
|
+
];
|
|
86
|
+
|
|
87
|
+
const result = jsonToDelimitedFile(json, '\t');
|
|
88
|
+
|
|
89
|
+
expect(result).toBe('name\tsequence\tdescription\nprimer1\tATGC\ttest with tabs\nprimer2\tCGTG\ttest,with,commas');
|
|
90
|
+
});
|
|
91
|
+
|
|
92
|
+
it('handles values containing newlines', () => {
|
|
93
|
+
const json = [
|
|
94
|
+
{ name: 'primer1', sequence: 'ATGC', notes: 'line1\nline2' },
|
|
95
|
+
{ name: 'primer2', sequence: 'CGTG', notes: 'single line' }
|
|
96
|
+
];
|
|
97
|
+
|
|
98
|
+
const result = jsonToDelimitedFile(json);
|
|
99
|
+
|
|
100
|
+
expect(result).toBe('name\tsequence\tnotes\nprimer1\tATGC\tline1 line2\nprimer2\tCGTG\tsingle line');
|
|
101
|
+
});
|
|
102
|
+
|
|
103
|
+
it('preserves order of headers based on first object', () => {
|
|
104
|
+
const json = [
|
|
105
|
+
{ name: 'primer1', sequence: 'ATGC', length: 4 },
|
|
106
|
+
{ length: 4, sequence: 'CGTG', name: 'primer2' }
|
|
107
|
+
];
|
|
108
|
+
|
|
109
|
+
const result = jsonToDelimitedFile(json);
|
|
110
|
+
|
|
111
|
+
expect(result).toBe('name\tsequence\tlength\nprimer1\tATGC\t4\nprimer2\tCGTG\t4');
|
|
112
|
+
});
|
|
113
|
+
|
|
114
|
+
it('handles objects with different keys by using first object keys', () => {
|
|
115
|
+
const json = [
|
|
116
|
+
{ name: 'primer1', sequence: 'ATGC' },
|
|
117
|
+
{ name: 'primer2', sequence: 'CGTG', extra: 'field' }
|
|
118
|
+
];
|
|
119
|
+
|
|
120
|
+
const result = jsonToDelimitedFile(json);
|
|
121
|
+
|
|
122
|
+
expect(result).toBe('name\tsequence\nprimer1\tATGC\nprimer2\tCGTG');
|
|
123
|
+
});
|
|
124
|
+
|
|
125
|
+
it('handles missing values in subsequent objects', () => {
|
|
126
|
+
const json = [
|
|
127
|
+
{ name: 'primer1', sequence: 'ATGC', length: 4 },
|
|
128
|
+
{ name: 'primer2', sequence: 'CGTG' }
|
|
129
|
+
];
|
|
130
|
+
|
|
131
|
+
const result = jsonToDelimitedFile(json);
|
|
132
|
+
|
|
133
|
+
expect(result).toBe('name\tsequence\tlength\nprimer1\tATGC\t4\nprimer2\tCGTG\tundefined');
|
|
134
|
+
});
|
|
135
|
+
|
|
136
|
+
it('handles custom delimiter with multiple characters', () => {
|
|
137
|
+
const json = [
|
|
138
|
+
{ name: 'primer1', sequence: 'ATGC' },
|
|
139
|
+
{ name: 'primer2', sequence: 'CGTG' }
|
|
140
|
+
];
|
|
141
|
+
|
|
142
|
+
const result = jsonToDelimitedFile(json, ' | ');
|
|
143
|
+
|
|
144
|
+
expect(result).toBe('name | sequence\nprimer1 | ATGC\nprimer2 | CGTG');
|
|
145
|
+
});
|
|
146
|
+
});
|