@opencloning/utils 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md ADDED
@@ -0,0 +1,17 @@
1
+ # @opencloning/utils
2
+
3
+ ## 1.0.0
4
+
5
+ ### Major Changes
6
+
7
+ - babe2f9: Switch to monorepo structure and use changesets
8
+
9
+ ### Minor Changes
10
+
11
+ - 8cd33bb: Rearrange dependencies
12
+
13
+ ### Patch Changes
14
+
15
+ - Updated dependencies [8cd33bb]
16
+ - Updated dependencies [babe2f9]
17
+ - @opencloning/store@1.0.0
package/package.json ADDED
@@ -0,0 +1,22 @@
1
+ {
2
+ "name": "@opencloning/utils",
3
+ "version": "1.0.0",
4
+ "type": "module",
5
+ "repository": {
6
+ "type": "git",
7
+ "url": "https://github.com/manulera/OpenCloning_frontend.git",
8
+ "directory": "packages/utils"
9
+ },
10
+ "dependencies": {
11
+ "@opencloning/store": "workspace:*",
12
+ "@teselagen/bio-parsers": "^0.4.32",
13
+ "@teselagen/sequence-utils": "^0.3.35",
14
+ "@zip.js/zip.js": "^2.7.62",
15
+ "dom-to-svg": "^0.12.2",
16
+ "lodash-es": "^4.17.21"
17
+ },
18
+ "peerDependencies": {
19
+ "react": "^18.3.1",
20
+ "react-redux": "^8.1.3"
21
+ }
22
+ }
@@ -0,0 +1,19 @@
1
+ export default [
2
+ // GitHub repository for OpenCloning templates
3
+ 'https://assets.opencloning.org/OpenCloning-submission',
4
+ // GitHub repository for annotated iGEM distribution
5
+ 'https://assets.opencloning.org/annotated-igem-distribution',
6
+ // GitHub repository for SEVA plasmids index
7
+ 'https://assets.opencloning.org/seva_plasmids_index',
8
+ // GitHub repository for SnapGene index
9
+ 'https://assets.opencloning.org/SnapGene_crawler',
10
+ // GitHub repository for Open DNA Collections index
11
+ 'https://assets.opencloning.org/open-dna-collections',
12
+ // NCBI entrez API
13
+ 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi',
14
+ 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esummary.fcgi',
15
+ // Datasets api
16
+ 'https://api.ncbi.nlm.nih.gov/datasets/v2alpha/taxonomy/taxon_suggest',
17
+ 'https://api.ncbi.nlm.nih.gov/datasets/v2alpha/genome/taxon',
18
+ 'https://api.ncbi.nlm.nih.gov/datasets/v2alpha/genome/accession',
19
+ ];
@@ -0,0 +1,14 @@
1
+ {
2
+ "M": "AC",
3
+ "R": "AG",
4
+ "W": "AT",
5
+ "S": "CG",
6
+ "Y": "CT",
7
+ "K": "GT",
8
+ "V": "ACG",
9
+ "H": "ACT",
10
+ "D": "AGT",
11
+ "B": "CGT",
12
+ "X": "GATC",
13
+ "N": "GATC"
14
+ }
@@ -0,0 +1,20 @@
1
+ import { aliasedEnzymesByName, getReverseComplementSequenceString as reverseComplement } from '@teselagen/sequence-utils';
2
+ import ambiguousDnaBases from './ambiguous_dna_bases.json';
3
+
4
+ const enzymeArray = Object.values(aliasedEnzymesByName);
5
+
6
+ export function getEnzymeRecognitionSequence(enzyme) {
7
+ if (!enzyme) {
8
+ return '';
9
+ }
10
+ const recognitionSeq = enzymeArray.find((e) => e.aliases.includes(enzyme))?.site;
11
+ if (!recognitionSeq) {
12
+ return '????';
13
+ }
14
+ return recognitionSeq.split('').map((base) => (base in ambiguousDnaBases ? ambiguousDnaBases[base] : base)).join('');
15
+ }
16
+
17
+ export function isEnzymePalyndromic(enzyme) {
18
+ const recognitionSeq = getEnzymeRecognitionSequence(enzyme);
19
+ return recognitionSeq === reverseComplement(recognitionSeq);
20
+ }
@@ -0,0 +1,18 @@
1
+ export default function error2String(error) {
2
+ if (error.code === 'ERR_NETWORK') { return 'Cannot connect to backend server'; }
3
+ if (!error.code) {
4
+ return error.message;
5
+ }
6
+ const { detail } = error.response.data;
7
+ if (error.response.status === 500) return 'Internal server error';
8
+ if (typeof detail === 'string') {
9
+ return detail;
10
+ } if (typeof detail === 'object') {
11
+ const detail2 = detail.map((d) => {
12
+ const { input, ...rest } = d;
13
+ return rest;
14
+ });
15
+ return `Server error message:\n${JSON.stringify(detail2, null, 2)}`;
16
+ }
17
+ return '';
18
+ }
@@ -0,0 +1,69 @@
1
+ import { stringIsNotDNA } from '@opencloning/store/cloning_utils';
2
+ import { readSubmittedTextFile } from './readNwrite';
3
+
4
+ export const primersFromTextFile = async (fileUploaded, existingNames) => {
5
+ const fileContent = await readSubmittedTextFile(fileUploaded);
6
+ const allLines = fileContent.split(/\r\n|\r|\n/);
7
+
8
+ let delimiter = null;
9
+ if (fileUploaded.name.endsWith('.csv')) {
10
+ delimiter = new RegExp('[,;]');
11
+ } else if (fileUploaded.name.endsWith('.tsv')) {
12
+ delimiter = /\t/;
13
+ } else {
14
+ throw new Error('File must be a .csv or .tsv file');
15
+ }
16
+ // Remove empty lines
17
+ const lines = allLines.filter(line => line.trim() !== '');
18
+
19
+ // If any line contains , and ;, throw an error
20
+ if (fileUploaded.name.endsWith('.csv') && fileContent.includes(',') && fileContent.includes(';')) {
21
+ throw new Error('File must contain only one delimiter, either comma or semicolon');
22
+ }
23
+
24
+ if (lines.length === 0) {
25
+ throw new Error('File is empty');
26
+ }
27
+
28
+ const headers = lines[0].split(delimiter);
29
+
30
+ const requiredHeaders = ['name', 'sequence'];
31
+ const missingHeaders = requiredHeaders.filter(
32
+ (header) => !headers.includes(header),
33
+ );
34
+
35
+ // The number of tabs on headers and all lines should be > 1 and the same
36
+ if (headers.length < 2) {
37
+ throw new Error('Headers should have at least 2 columns');
38
+ }
39
+
40
+ // All lines should have the same number of tabs
41
+ if (lines.some((line) => line.split(delimiter).length !== headers.length)) {
42
+ throw new Error('All lines should have the same number of columns');
43
+ }
44
+
45
+ // Required headers should be present
46
+ if (missingHeaders.length > 0) {
47
+ throw new Error(`Headers missing: ${missingHeaders.join(', ')}`);
48
+ }
49
+
50
+ const primersToAdd = lines.slice(1).map((line) => {
51
+ const values = line.split(delimiter);
52
+ const obj = { error: '' };
53
+ headers.forEach((header, i) => {
54
+ obj[header] = values[i];
55
+ });
56
+
57
+ if (existingNames.includes(obj.name)) {
58
+ obj.error = 'existing';
59
+ } else if (stringIsNotDNA(obj.sequence)) {
60
+ obj.error = 'invalid';
61
+ // TODO: Improvement: check for already existing sequences
62
+ // While this is not a problem, it removes data redundancy
63
+ }
64
+
65
+ return obj;
66
+ });
67
+
68
+ return primersToAdd;
69
+ };
@@ -0,0 +1,20 @@
1
+ import axios from 'axios';
2
+ import urlWhitelist from '../config/urlWhitelist';
3
+
4
+ export default function getHttpClient(extraUrls = []) {
5
+ const whitelist = [...urlWhitelist, import.meta.env.BASE_URL, window.location.origin, ...extraUrls];
6
+
7
+ const client = axios.create();
8
+
9
+ client.interceptors.request.use((config) => {
10
+ const url = new URL(config.url, config.baseURL || window.location.origin);
11
+
12
+ if (!whitelist.some((whitelistedUrl) => url.href.match(new RegExp(`^${whitelistedUrl}`)))) {
13
+ return Promise.reject(new Error(`Request blocked: URL not in whitelist ${url.href}`));
14
+ }
15
+
16
+ return config;
17
+ }, (error) => Promise.reject(error));
18
+
19
+ return client;
20
+ }
@@ -0,0 +1,109 @@
1
+ import { clone } from 'lodash-es';
2
+ import { getRangeLength, getSequenceWithinRange } from '@teselagen/range-utils';
3
+ import { getComplementSequenceString, bioData } from '@teselagen/sequence-utils';
4
+
5
+ const { ambiguous_dna_values } = bioData;
6
+ export function getStructuredBases({
7
+ annotationRange,
8
+ forward,
9
+ bases = '',
10
+ start,
11
+ end,
12
+ fullSequence,
13
+ primerBindsOn,
14
+ sequenceLength,
15
+ }) {
16
+ const annLen = getRangeLength({ start, end }, sequenceLength);
17
+ let basesToUse = bases;
18
+ if (bases.length < annLen) {
19
+ if (forward && primerBindsOn === '3prime') {
20
+ const toAddLen = annLen - bases.length;
21
+ for (let index = 0; index < toAddLen; index++) {
22
+ basesToUse = `&${basesToUse}`;
23
+ }
24
+ } else if (!forward && primerBindsOn === '5prime') {
25
+ const toAddLen = annLen - bases.length;
26
+ for (let index = 0; index < toAddLen; index++) {
27
+ basesToUse += '&';
28
+ }
29
+ }
30
+ }
31
+ const aRange = {
32
+ // tnr: this probably needs to be changed in case annotation wraps origin
33
+ start: annotationRange.start - start,
34
+ end: annotationRange.end - start,
35
+ };
36
+ const r = {
37
+ aRange,
38
+ basesNoInserts: basesToUse,
39
+ inserts: [],
40
+ };
41
+
42
+ const baseLen = basesToUse.length;
43
+ const diffLen = baseLen - annLen;
44
+ if (diffLen > 0) {
45
+ r.basesNoInserts = basesToUse.slice(
46
+ primerBindsOn === '5prime' ? 0 : diffLen,
47
+ primerBindsOn === '5prime' ? annLen : baseLen,
48
+ );
49
+ const insertBases = basesToUse.slice(
50
+ primerBindsOn === '5prime' ? annLen : 0,
51
+ primerBindsOn === '5prime' ? baseLen : diffLen,
52
+ );
53
+
54
+ r.inserts = [
55
+ {
56
+ bases: insertBases,
57
+ index: primerBindsOn === '5prime' ? annLen : 0,
58
+ },
59
+ ];
60
+ }
61
+ const basesForRange = getSequenceWithinRange(
62
+ aRange,
63
+ forward ? r.basesNoInserts : r.basesNoInserts.split('').reverse().join(''),
64
+ );
65
+ r.basesNoInsertsWithMetaData = basesForRange.split('').map((b, i) => {
66
+ const indexOfBase = i + annotationRange.start;
67
+ let seqForBase = (fullSequence && fullSequence[indexOfBase]) || '';
68
+ if (!forward) {
69
+ seqForBase = getComplementSequenceString(seqForBase);
70
+ }
71
+ const isMatch = seqForBase.toLowerCase() === b.toLowerCase();
72
+ const isAmbiguousMatch = !isMatch
73
+ && ambiguous_dna_values[b.toUpperCase()].length > 1
74
+ && ambiguous_dna_values[b.toUpperCase()].includes(seqForBase.toUpperCase());
75
+ return {
76
+ b,
77
+ isMatch,
78
+ isAmbiguousMatch,
79
+ };
80
+ });
81
+ r.allBasesWithMetaData = clone(r.basesNoInsertsWithMetaData);
82
+ if (!forward) {
83
+ r.allBasesWithMetaData = r.allBasesWithMetaData.reverse();
84
+ }
85
+ r.inserts
86
+ .sort((a, b) => a.index - b.index)
87
+ .forEach(({ bases, index }) => {
88
+ r.allBasesWithMetaData.splice(
89
+ index,
90
+ 0,
91
+ ...bases.split('').map((b) => ({ b, isMatch: false })),
92
+ );
93
+ });
94
+
95
+ return r;
96
+ }
97
+
98
+ // const basesToUse = 'aaaaaaatcggtctcaa';
99
+ // const { allBasesWithMetaData } = getStructuredBases({
100
+ // annotationRange: { start: 0, end: 10 },
101
+ // forward: true,
102
+ // bases: basesToUse,
103
+ // start: 0 - 1,
104
+ // end: 10 - 1,
105
+ // fullSequence: 'tcggtctcaaacgcagttcga',
106
+ // primerBindsOn: '3prime',
107
+ // sequenceLength: 21,
108
+ // });
109
+ // console.log(allBasesWithMetaData);
@@ -0,0 +1,102 @@
1
+ import getHttpClient from './getHttpClient';
2
+
3
+ const httpClient = getHttpClient();
4
+
5
+
6
+ export async function taxonSuggest(userInput) {
7
+ const url = `https://api.ncbi.nlm.nih.gov/datasets/v2alpha/taxonomy/taxon_suggest/${userInput}`;
8
+ const params = {
9
+ taxon_resource_filter: 'TAXON_RESOURCE_FILTER_GENOME',
10
+ tax_rank_filter: 'higher_taxon',
11
+ };
12
+ const resp = await httpClient.get(url, { params });
13
+ const taxons = resp.data.sci_name_and_ids;
14
+
15
+ // This might change if the API endpoint changes
16
+ return taxons === undefined ? [] : taxons.filter((e) => e.rank === 'SPECIES');
17
+ }
18
+
19
+ export async function getReferenceAssemblyId(taxonId) {
20
+ const url = `https://api.ncbi.nlm.nih.gov/datasets/v2alpha/genome/taxon/${taxonId}/dataset_report?filters.reference_only=true`;
21
+ const resp = await httpClient.get(url);
22
+ const { reports } = resp.data;
23
+ return reports === undefined ? null : resp.data.reports[0].accession;
24
+ }
25
+
26
+ export async function geneSuggest(assemblyId, userInput) {
27
+ const url = `https://api.ncbi.nlm.nih.gov/datasets/v2alpha/genome/accession/${assemblyId}/annotation_report?search_text=${userInput}`;
28
+ const resp = await httpClient.get(url);
29
+ // TODO: add support for api key
30
+ // const resp = await httpClient.get(url, { params: {
31
+ // api_key: 'blah',
32
+ // } });
33
+ const { reports } = resp.data;
34
+ return reports === undefined ? [] : reports;
35
+ }
36
+
37
+ export async function getInfoFromAssemblyId(assemblyId) {
38
+ const url = `https://api.ncbi.nlm.nih.gov/datasets/v2alpha/genome/accession/${assemblyId}/dataset_report?filters.assembly_version=all_assemblies`;
39
+ const resp = await httpClient.get(url);
40
+
41
+ if (resp.status === 404 || resp.data.reports === undefined) {
42
+ return null;
43
+ }
44
+ const { reports } = resp.data;
45
+ if (reports === undefined) {
46
+ return null;
47
+ }
48
+
49
+ const species = reports[0].organism;
50
+ const exactAssemblyMatch = reports.find((report) => report.accession === assemblyId);
51
+ let exactMatch = null;
52
+ let newerAssembly = null;
53
+ let pairedAccession = null;
54
+ if (!exactAssemblyMatch) {
55
+ // The assembly ID is valid, but it's not an exact match to a particular version.
56
+ // For example, passing GCA_000007565 will return entries, but the actual assembly versions
57
+ // are GCA_000007565.1, GCA_000007565.2
58
+ exactMatch = false;
59
+ newerAssembly = reports[0].current_accession;
60
+ pairedAccession = reports[0].paired_accession;
61
+ } else {
62
+ exactMatch = true;
63
+ newerAssembly = exactAssemblyMatch.assembly_info.assembly_status !== 'current' ? exactAssemblyMatch.current_accession : null;
64
+ pairedAccession = exactAssemblyMatch.paired_accession;
65
+ }
66
+
67
+ // const url2 = `https://api.ncbi.nlm.nih.gov/datasets/v2alpha/genome/accession/${assemblyId}/annotation_report/download_summary`;
68
+ // try {
69
+ // const resp2 = await httpClient.get(url2);
70
+ // const hasAnnotation = resp2.data.record_count !== undefined;
71
+ // return { species, hasAnnotation, newerAssembly };
72
+ // } catch (error) {
73
+ // return { species, hasAnnotation: false, newerAssembly };
74
+ // }
75
+ return { species, hasAnnotation: reports[0].annotation_info !== undefined, newerAssembly, exactMatch, pairedAccession };
76
+
77
+ // I used to check like this, but no longer works (see https://github.com/ncbi/datasets/issues/380)
78
+ // const annotationInfo = reports[0].annotation_info || null;
79
+ // return reports === undefined ? null : { species, annotationInfo };
80
+ }
81
+
82
+ export async function getInfoFromSequenceAccession(sequenceAccession) {
83
+ const url = `https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esummary.fcgi?id=${sequenceAccession}&db=nuccore&retmode=json`;
84
+ // For example: https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esummary.fcgi?id=CP046095.1&db=nuccore&retmode=json
85
+
86
+ const resp = await httpClient.get(url);
87
+ // I don't think this ever happens, but just in case
88
+ if (resp.status === 404 || resp.data.result.uids.length === 0) {
89
+ return null;
90
+ }
91
+
92
+ const { taxid: taxId, accessionversion: sequenceAccessionStandard } = resp.data.result[resp.data.result.uids[0]];
93
+
94
+ if (!taxId) {
95
+ return { species: null, sequenceAccessionStandard };
96
+ }
97
+
98
+ const url2 = `https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esummary.fcgi?id=${taxId}&db=taxonomy&retmode=json`;
99
+ const resp2 = await httpClient.get(url2);
100
+ const { scientificname: organismName } = resp2.data.result[resp2.data.result.uids[0]];
101
+ return { species: { tax_id: taxId, organism_name: organismName }, sequenceAccessionStandard };
102
+ }
@@ -0,0 +1,184 @@
1
+ import { cloneDeep } from 'lodash-es';
2
+ import { getUsedPrimerIds, mergePrimersInSource, shiftStateIds, getSourcesTakingSequenceAsInput } from '@opencloning/store/cloning_utils';
3
+
4
+ export function getParentNodes(node, sequences, sources) {
5
+ const parentSequences = sequences.filter((sequence) => node.source.input.includes(sequence.id));
6
+
7
+ return parentSequences.map((parentSequence) => {
8
+ const parentSource = sources.find((source) => source.id === parentSequence.id);
9
+ const parentNode = { source: parentSource, sequence: parentSequence };
10
+ return { ...parentNode, parentNodes: getParentNodes(parentNode, sequences, sources) };
11
+ });
12
+ }
13
+
14
+ export function substateHasFiles(cloningState, id) {
15
+ const source = cloningState.sources.find((s) => s.id === id);
16
+ const allParentIds = getAllParentSources(source, cloningState.sources).map((s) => s.id);
17
+ allParentIds.push(id);
18
+ return cloningState.files ? cloningState.files.some((f) => allParentIds.includes(f.sequence_id)) : false;
19
+ };
20
+
21
+ function getAllSourceIdsInParentNodes(node) {
22
+ const parentNodesSourceIds = node.parentNodes.map((parentNode) => parentNode.source.id);
23
+ return parentNodesSourceIds.concat(node.parentNodes.flatMap((parentNode) => getAllSourceIdsInParentNodes(parentNode)));
24
+ }
25
+
26
+ function parentNodeSorter(a, b) {
27
+ const aValue = getAllSourceIdsInParentNodes(a).concat(a.source.id);
28
+ const bValue = getAllSourceIdsInParentNodes(b).concat(b.source.id);
29
+ return Math.min(...aValue) - Math.min(...bValue);
30
+ }
31
+
32
+ export function getImmediateParentSources(sources, source) {
33
+ const parentIds = source.input.map(({sequence}) => sequence);
34
+ return sources.filter((s) => parentIds.includes(s.id));
35
+ }
36
+
37
+ export function getAllParentSources(source, sources, stopAtDatabaseId = false, parentSources = []) {
38
+ const thisParentSources = getImmediateParentSources(sources, source);
39
+ parentSources.push(...thisParentSources);
40
+ thisParentSources.forEach((parentSource) => {
41
+ if (stopAtDatabaseId && parentSource.database_id) {
42
+ return;
43
+ }
44
+ getAllParentSources(parentSource, sources, stopAtDatabaseId, parentSources);
45
+ });
46
+ return parentSources;
47
+ }
48
+
49
+ export function getSortedSourceIds(sources2sort, sources) {
50
+ const sortedSources = [...sources2sort];
51
+ sortedSources.sort((source1, source2) => {
52
+ // We also include the source itself for sorting, in case of grafting state
53
+ const parentSources1 = getAllParentSources(source1, sources);
54
+ const parentSources2 = getAllParentSources(source2, sources);
55
+ const parentSources1Ids = parentSources1.map((source) => source.id);
56
+ const parentSources2Ids = parentSources2.map((source) => source.id);
57
+ parentSources1Ids.push(source1.id);
58
+ parentSources2Ids.push(source2.id);
59
+ return Math.min(...parentSources1Ids) - Math.min(...parentSources2Ids);
60
+ });
61
+ return sortedSources.map((source) => source.id);
62
+ }
63
+
64
+ export const collectParentSequencesAndSources = (source, sources, sequences, stopAtDatabaseId = false) => {
65
+ const parentSources = getAllParentSources(source, sources, stopAtDatabaseId);
66
+ const parentSourceIds = parentSources.map((s) => s.id);
67
+ const parentSequences = sequences.filter((s) => parentSourceIds.includes(s.id));
68
+ return { parentSources, parentSequences };
69
+ };
70
+
71
+ export const getSubState = (state, id, stopAtDatabaseId = false) => {
72
+ const { sequences, sources, primers, appInfo, files } = state.cloning;
73
+ const sequencesToExport = sequences.filter((e) => e.id === id);
74
+ const sourcesToExport = sources.filter((s) => s.id === id);
75
+ if (sequencesToExport.length === 0) {
76
+ throw new Error(`Sequence with id ${id} not found`);
77
+ }
78
+ if (sourcesToExport.length === 0) {
79
+ throw new Error(`Source with id ${id} not found`);
80
+ }
81
+ const { parentSources, parentSequences } = collectParentSequencesAndSources(sourcesToExport[0], sources, sequences, stopAtDatabaseId);
82
+ sequencesToExport.push(...parentSequences);
83
+ sourcesToExport.push(...parentSources);
84
+
85
+ const primerIdsToExport = getUsedPrimerIds(sourcesToExport);
86
+ const primersToExport = primers.filter((p) => primerIdsToExport.includes(p.id));
87
+ const outputIds = sourcesToExport.map((s) => s.id);
88
+ const filesToExport = files ? files.filter((f) => outputIds.includes(f.sequence_id)) : [];
89
+ return { sequences: sequencesToExport, sources: sourcesToExport, primers: primersToExport, appInfo, files: filesToExport };
90
+ };
91
+
92
+ export const shiftState = (newState, oldState, skipPrimers = false) => {
93
+ if (newState.primers === undefined || newState.sequences === undefined || newState.sources === undefined) {
94
+ throw new Error('JSON file should contain at least keys: primers, sequences and sources');
95
+ }
96
+ if (newState.primers.length > 0 && skipPrimers) {
97
+ throw new Error('Primers cannot be loaded when skipping primers');
98
+ }
99
+
100
+ return shiftStateIds(newState, oldState, skipPrimers);
101
+ };
102
+
103
+ export function getGraftSequenceId({ sources, sequences }) {
104
+ const sequenceIdsThatAreInput = sources.reduce((result, source) => result.concat(source.input.map(({sequence}) => sequence)), []);
105
+ const allSequenceIds = sequences.map((seq) => seq.id);
106
+ const sequenceIdsThatAreNotInput = allSequenceIds.filter((sequenceId) => !sequenceIdsThatAreInput.includes(sequenceId));
107
+ const sourcesWithoutOutput = sources.filter((source) => !allSequenceIds.includes(source.id));
108
+ if (sourcesWithoutOutput.length === 0 && sequenceIdsThatAreNotInput.length === 1) {
109
+ return sequenceIdsThatAreNotInput[0];
110
+ }
111
+ return null;
112
+ }
113
+
114
+ export function mergePrimersInState(mergedState) {
115
+ const newState = cloneDeep(mergedState);
116
+ const removedPrimerIds = [];
117
+ for (let i = 0; i < newState.primers.length - 1; i++) {
118
+ const p1 = newState.primers[i];
119
+ for (let j = i + 1; j < newState.primers.length; j++) {
120
+ const p2 = newState.primers[j];
121
+ if (p1.name === p2.name) {
122
+ const sameDatabaseId = (!Boolean(p1.database_id) && !Boolean(p2.database_id)) || p1.database_id === p2.database_id;
123
+ if (p1.sequence === p2.sequence && sameDatabaseId) {
124
+ newState.sources = newState.sources.map((s) => mergePrimersInSource(s, p1.id, p2.id));
125
+ removedPrimerIds.push(p2.id);
126
+ } else {
127
+ throw new Error(`Primer name ${p1.name} exists in current session but has different sequence or database_id`);
128
+ }
129
+ }
130
+ }
131
+ }
132
+ newState.primers = newState.primers.filter((p) => !removedPrimerIds.includes(p.id));
133
+ return newState;
134
+ }
135
+
136
+ export function graftState(parentState, childState, graftSourceId) {
137
+ const { shiftedState: shiftedParentState, idShift } = shiftState(parentState, childState);
138
+
139
+ const graftSequenceId = getGraftSequenceId(shiftedParentState);
140
+ if (graftSequenceId === null) {
141
+ throw new Error('Invalid parent state');
142
+ }
143
+ const graftSequenceInParent = shiftedParentState.sequences.find((seq) => seq.id === graftSequenceId);
144
+
145
+ const parentGraftSource = shiftedParentState.sources.find((source) => source.id === graftSequenceId);
146
+ const childGraftSource = childState.sources.find((source) => source.id === graftSourceId);
147
+ const graftSequenceInChild = childState.sequences.find((seq) => seq.id === childGraftSource.id);
148
+ const mergedSource = { ...parentGraftSource, id: childGraftSource.id };
149
+
150
+ const parentSources = shiftedParentState.sources.filter((source) => source.id !== parentGraftSource.id);
151
+ const parentSequences = shiftedParentState.sequences.filter((seq) => seq.id !== graftSequenceId);
152
+ const childSources = childState.sources.filter((source) => source.id !== childGraftSource.id);
153
+
154
+ let childSequences = [...childState.sequences];
155
+ if (graftSequenceInChild && graftSequenceInChild.type === 'TemplateSequence') {
156
+ const updatedSequence = { ...graftSequenceInParent, id: graftSequenceInChild.id };
157
+ childSequences = childSequences.filter((seq) => seq.id !== graftSequenceInChild.id);
158
+ childSequences.push(updatedSequence);
159
+ }
160
+
161
+ let mergedState = {
162
+ sources: [...parentSources, ...childSources, mergedSource],
163
+ sequences: [...parentSequences, ...childSequences],
164
+ primers: [...shiftedParentState.primers, ...childState.primers],
165
+ files: [...shiftedParentState.files, ...childState.files],
166
+ };
167
+ mergedState = mergePrimersInState(mergedState);
168
+ return { mergedState, idShift };
169
+ }
170
+
171
+ export const mergeStates = (newState, oldState, skipPrimers = false) => {
172
+ const { shiftedState, idShift } = shiftState(newState, oldState, skipPrimers);
173
+ let mergedState = {
174
+ sources: [...oldState.sources, ...shiftedState.sources],
175
+ sequences: [...oldState.sequences, ...shiftedState.sequences],
176
+ primers: [...oldState.primers, ...shiftedState.primers],
177
+ files: [...oldState.files, ...shiftedState.files],
178
+ sourcesWithHiddenAncestors: oldState.sourcesWithHiddenAncestors,
179
+ };
180
+ if (!skipPrimers) {
181
+ mergedState = mergePrimersInState(mergedState);
182
+ }
183
+ return { mergedState, idShift };
184
+ };