varvamp 1.1.3__py3-none-any.whl → 1.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- varvamp/__init__.py +1 -1
- varvamp/command.py +60 -33
- varvamp/scripts/blast.py +36 -66
- varvamp/scripts/default_config.py +1 -2
- varvamp/scripts/logging.py +12 -10
- varvamp/scripts/primers.py +2 -2
- varvamp/scripts/qpcr.py +38 -36
- varvamp/scripts/reporting.py +194 -156
- varvamp/scripts/scheme.py +115 -107
- {varvamp-1.1.3.dist-info → varvamp-1.2.1.dist-info}/METADATA +4 -6
- varvamp-1.2.1.dist-info/RECORD +21 -0
- {varvamp-1.1.3.dist-info → varvamp-1.2.1.dist-info}/WHEEL +1 -1
- varvamp-1.1.3.dist-info/RECORD +0 -21
- {varvamp-1.1.3.dist-info → varvamp-1.2.1.dist-info}/entry_points.txt +0 -0
- {varvamp-1.1.3.dist-info → varvamp-1.2.1.dist-info}/top_level.txt +0 -0
varvamp/scripts/scheme.py
CHANGED
|
@@ -21,12 +21,10 @@ def construct_graph(nodes, init_graph):
|
|
|
21
21
|
graph[node] = {}
|
|
22
22
|
|
|
23
23
|
graph.update(init_graph)
|
|
24
|
-
|
|
25
24
|
for node, neighbors in graph.items():
|
|
26
25
|
for neighbor in neighbors.keys():
|
|
27
26
|
if graph[neighbor].get(node, False) is False:
|
|
28
|
-
graph[neighbor][node] = float("infinity")
|
|
29
|
-
|
|
27
|
+
graph[neighbor][node] = (float("infinity"), 0)
|
|
30
28
|
return graph
|
|
31
29
|
|
|
32
30
|
|
|
@@ -61,13 +59,12 @@ class Graph(object):
|
|
|
61
59
|
"""
|
|
62
60
|
return self.graph[node1][node2]
|
|
63
61
|
|
|
64
|
-
|
|
65
62
|
def find_amplicons(all_primers, opt_len, max_len):
|
|
66
63
|
"""
|
|
67
64
|
finds all possible amplicons, creates a dictionary
|
|
68
65
|
"""
|
|
69
66
|
amplicon_number = 0
|
|
70
|
-
|
|
67
|
+
amplicons = []
|
|
71
68
|
|
|
72
69
|
for left_name in all_primers["+"]:
|
|
73
70
|
left_primer = all_primers["+"][left_name]
|
|
@@ -82,17 +79,17 @@ def find_amplicons(all_primers, opt_len, max_len):
|
|
|
82
79
|
# penalty multiplied by the e^(fold length of the optimal length).
|
|
83
80
|
amplicon_costs = (right_primer[3] + left_primer[3])*math.exp(amplicon_length/opt_len)
|
|
84
81
|
amplicon_name = "AMPLICON_"+str(amplicon_number)
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
82
|
+
amplicons.append(
|
|
83
|
+
{
|
|
84
|
+
"id": amplicon_name,
|
|
85
|
+
"penalty": amplicon_costs,
|
|
86
|
+
"length": amplicon_length,
|
|
87
|
+
"LEFT": left_primer + [left_name],
|
|
88
|
+
"RIGHT": right_primer + [right_name],
|
|
89
|
+
}
|
|
90
|
+
)
|
|
93
91
|
amplicon_number += 1
|
|
94
|
-
|
|
95
|
-
return amplicon_dict
|
|
92
|
+
return amplicons
|
|
96
93
|
|
|
97
94
|
|
|
98
95
|
def create_amplicon_graph(amplicons, min_overlap):
|
|
@@ -107,24 +104,30 @@ def create_amplicon_graph(amplicons, min_overlap):
|
|
|
107
104
|
# before the min overlap
|
|
108
105
|
min_overlap = min_overlap + config.PRIMER_SIZES[2]
|
|
109
106
|
|
|
110
|
-
for
|
|
107
|
+
for current_amplicon in amplicons:
|
|
111
108
|
# remember all vertices
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
start = current_amplicon[
|
|
115
|
-
stop = current_amplicon[
|
|
116
|
-
for
|
|
117
|
-
next_amplicon = amplicons[next_amp]
|
|
109
|
+
amplicon_id = current_amplicon["id"]
|
|
110
|
+
nodes.append(amplicon_id)
|
|
111
|
+
start = current_amplicon["LEFT"][1] + current_amplicon["length"]/2
|
|
112
|
+
stop = current_amplicon["RIGHT"][2] - min_overlap
|
|
113
|
+
for next_amplicon in amplicons:
|
|
118
114
|
# check if the next amplicon lies within the start/stop range of
|
|
119
115
|
# the current amplicon and if its non-overlapping part is large
|
|
120
116
|
# enough to ensure space for a primer and the min overlap of the
|
|
121
117
|
# following amplicon.
|
|
122
|
-
if
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
118
|
+
if start <= next_amplicon["LEFT"][1] <= stop and next_amplicon["RIGHT"][2] > current_amplicon["RIGHT"][2] + next_amplicon["length"]/2:
|
|
119
|
+
if amplicon_id not in amplicon_graph:
|
|
120
|
+
amplicon_graph[amplicon_id] = {
|
|
121
|
+
next_amplicon["id"]: (
|
|
122
|
+
next_amplicon.get("off_targets", False),
|
|
123
|
+
next_amplicon["penalty"]
|
|
124
|
+
)
|
|
125
|
+
}
|
|
126
|
+
else:
|
|
127
|
+
amplicon_graph[amplicon_id][next_amplicon["id"]] = (
|
|
128
|
+
next_amplicon.get("off_targets", False),
|
|
129
|
+
next_amplicon["penalty"]
|
|
130
|
+
)
|
|
128
131
|
|
|
129
132
|
# return a graph object
|
|
130
133
|
return Graph(nodes, amplicon_graph)
|
|
@@ -136,17 +139,21 @@ def dijkstra_algorithm(graph, start_node):
|
|
|
136
139
|
"""
|
|
137
140
|
|
|
138
141
|
previous_nodes = {}
|
|
139
|
-
shortest_path = {node: float('infinity') for node in graph.get_nodes()}
|
|
140
|
-
shortest_path[start_node] = 0
|
|
142
|
+
shortest_path = {node: (float('infinity'), 0) for node in graph.get_nodes()}
|
|
143
|
+
shortest_path[start_node] = (0, 0)
|
|
141
144
|
|
|
142
|
-
nodes_to_test = [(0, start_node)]
|
|
145
|
+
nodes_to_test = [((0, 0), start_node)]
|
|
143
146
|
|
|
144
147
|
while nodes_to_test:
|
|
145
148
|
current_distance, current_node = heapq.heappop(nodes_to_test)
|
|
146
149
|
if current_distance > shortest_path[current_node]:
|
|
147
150
|
continue
|
|
148
151
|
for neighbor in graph.get_neighbors(current_node):
|
|
149
|
-
|
|
152
|
+
off_targets, base_penalty = graph.value(current_node, neighbor)
|
|
153
|
+
distance = (
|
|
154
|
+
current_distance[0] + off_targets,
|
|
155
|
+
current_distance[1] + base_penalty
|
|
156
|
+
)
|
|
150
157
|
# Only consider this new path if it's a better path
|
|
151
158
|
if not distance < shortest_path[neighbor]:
|
|
152
159
|
continue
|
|
@@ -167,11 +174,12 @@ def get_end_node(previous_nodes, shortest_path, amplicons):
|
|
|
167
174
|
for node in previous_nodes.keys():
|
|
168
175
|
# check if a node has a larger stop -> empty dict and set new
|
|
169
176
|
# best stop nucleotide
|
|
170
|
-
|
|
177
|
+
amplicon_stop = amplicons[node]["RIGHT"][2]
|
|
178
|
+
if amplicon_stop > stop_nucleotide:
|
|
171
179
|
possible_end_nodes = {node: shortest_path[node]}
|
|
172
|
-
stop_nucleotide =
|
|
180
|
+
stop_nucleotide = amplicon_stop
|
|
173
181
|
# if nodes have the same stop nucleotide, add to dictionary
|
|
174
|
-
elif
|
|
182
|
+
elif amplicon_stop == stop_nucleotide:
|
|
175
183
|
possible_end_nodes[node] = shortest_path[node]
|
|
176
184
|
|
|
177
185
|
# return the end node with the lowest penalty costs
|
|
@@ -196,49 +204,44 @@ def get_min_path(previous_nodes, start_node, target_node):
|
|
|
196
204
|
return path[::-1]
|
|
197
205
|
|
|
198
206
|
|
|
199
|
-
def
|
|
207
|
+
def create_scheme(amplicon_path, amplicons_by_id):
|
|
200
208
|
"""
|
|
201
|
-
creates the final scheme
|
|
209
|
+
creates the final tiled-amplicon scheme
|
|
202
210
|
"""
|
|
203
|
-
|
|
204
|
-
scheme_dictionary = {
|
|
205
|
-
0: {},
|
|
206
|
-
1: {}
|
|
207
|
-
}
|
|
211
|
+
amplicon_scheme = []
|
|
208
212
|
|
|
209
213
|
for pool in (0, 1):
|
|
210
|
-
for
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
scheme_dictionary[pool][amp][primer_pair[0]] = all_primers["+"][primer_pair[0]]
|
|
214
|
-
scheme_dictionary[pool][amp][primer_pair[1]] = all_primers["-"][primer_pair[1]]
|
|
214
|
+
for amp_id in amplicon_path[pool::2]:
|
|
215
|
+
amplicons_by_id[amp_id]["pool"] = pool
|
|
216
|
+
amplicon_scheme.append(amplicons_by_id[amp_id])
|
|
215
217
|
|
|
216
|
-
return
|
|
218
|
+
return amplicon_scheme
|
|
217
219
|
|
|
218
220
|
|
|
219
|
-
def find_best_covering_scheme(amplicons, amplicon_graph
|
|
221
|
+
def find_best_covering_scheme(amplicons, amplicon_graph):
|
|
220
222
|
"""
|
|
221
223
|
this brute forces the amplicon scheme search until the largest
|
|
222
224
|
coverage with the minimal costs is achieved.
|
|
223
225
|
"""
|
|
224
226
|
# ini
|
|
225
227
|
best_coverage = 0
|
|
226
|
-
max_stop = max(amplicons
|
|
227
|
-
lowest_costs = float('infinity')
|
|
228
|
-
|
|
229
|
-
for
|
|
228
|
+
max_stop = max(amplicons, key=lambda x: x["RIGHT"][2])["RIGHT"][2]
|
|
229
|
+
lowest_costs = (float('infinity'),)
|
|
230
|
+
# a dict for fast access to amplicons by their ID
|
|
231
|
+
amps_by_id = {amp["id"]: amp for amp in amplicons}
|
|
232
|
+
for amplicon in amplicons:
|
|
230
233
|
# if the currently best coverage + start nucleotide of the currently tested amplicon
|
|
231
234
|
# is smaller than the maximal stop nucleotide there might be a better amplicon
|
|
232
235
|
# scheme that covers more of the genome
|
|
233
|
-
if
|
|
234
|
-
previous_nodes, shortest_path = dijkstra_algorithm(amplicon_graph,
|
|
236
|
+
if amplicon["LEFT"][1] + best_coverage <= max_stop:
|
|
237
|
+
previous_nodes, shortest_path = dijkstra_algorithm(amplicon_graph, amplicon["id"])
|
|
235
238
|
# only continue if there are previous_nodes
|
|
236
239
|
if previous_nodes:
|
|
237
|
-
target_node, costs = get_end_node(previous_nodes, shortest_path,
|
|
238
|
-
coverage =
|
|
240
|
+
target_node, costs = get_end_node(previous_nodes, shortest_path, amps_by_id)
|
|
241
|
+
coverage = amps_by_id[target_node]["RIGHT"][2] - amplicon["LEFT"][1]
|
|
239
242
|
# if the new coverage is larger, go for the larger coverage
|
|
240
243
|
if coverage > best_coverage:
|
|
241
|
-
best_start_node =
|
|
244
|
+
best_start_node = amplicon["id"]
|
|
242
245
|
best_target_node = target_node
|
|
243
246
|
best_previous_nodes = previous_nodes
|
|
244
247
|
lowest_costs = costs
|
|
@@ -246,18 +249,19 @@ def find_best_covering_scheme(amplicons, amplicon_graph, all_primers):
|
|
|
246
249
|
# if the coverages are identical, go for the lowest costs
|
|
247
250
|
elif coverage == best_coverage:
|
|
248
251
|
if costs < lowest_costs:
|
|
249
|
-
best_start_node =
|
|
252
|
+
best_start_node = amplicon["id"]
|
|
250
253
|
best_target_node = target_node
|
|
251
254
|
best_previous_nodes = previous_nodes
|
|
252
255
|
lowest_costs = costs
|
|
253
256
|
best_coverage = coverage
|
|
254
257
|
else:
|
|
255
258
|
# check if the single amplicon has the largest coverage so far
|
|
256
|
-
coverage =
|
|
259
|
+
coverage = amplicon["length"]
|
|
257
260
|
if coverage > best_coverage:
|
|
258
|
-
best_start_node =
|
|
261
|
+
best_start_node = amplicon["id"]
|
|
259
262
|
best_previous_nodes = previous_nodes
|
|
260
|
-
lowest_costs =
|
|
263
|
+
lowest_costs = (
|
|
264
|
+
amplicon.get("off_targets", False), amplicon["penalty"])
|
|
261
265
|
best_coverage = coverage
|
|
262
266
|
# no need to check more, the best covering amplicon scheme was found and
|
|
263
267
|
# has the minimal costs compared to the schemes with the same coverage
|
|
@@ -265,13 +269,12 @@ def find_best_covering_scheme(amplicons, amplicon_graph, all_primers):
|
|
|
265
269
|
break
|
|
266
270
|
|
|
267
271
|
if best_previous_nodes:
|
|
268
|
-
|
|
272
|
+
amplicon_path = get_min_path(best_previous_nodes, best_start_node, best_target_node)
|
|
269
273
|
else:
|
|
270
274
|
# if no previous nodes are found but the single amplicon results in the largest
|
|
271
275
|
# coverage - return as the best scheme
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
return best_coverage, create_scheme_dic(amplicon_scheme, amplicons, all_primers)
|
|
276
|
+
amplicon_path = [best_start_node]
|
|
277
|
+
return best_coverage, create_scheme(amplicon_path, amps_by_id)
|
|
275
278
|
|
|
276
279
|
|
|
277
280
|
def test_scheme_for_dimers(amplicon_scheme):
|
|
@@ -280,17 +283,20 @@ def test_scheme_for_dimers(amplicon_scheme):
|
|
|
280
283
|
"""
|
|
281
284
|
|
|
282
285
|
primer_dimers = []
|
|
283
|
-
|
|
284
|
-
for pool in
|
|
286
|
+
pools = {amp["pool"] for amp in amplicon_scheme}
|
|
287
|
+
for pool in pools:
|
|
285
288
|
# test the primer dimers only within the respective pools
|
|
286
289
|
tested_primers = []
|
|
287
|
-
for amp in amplicon_scheme
|
|
288
|
-
|
|
290
|
+
for amp_index, amp in enumerate(amplicon_scheme):
|
|
291
|
+
if amp["pool"] != pool:
|
|
292
|
+
continue
|
|
293
|
+
for primer in ["LEFT", "RIGHT"]:
|
|
289
294
|
# remember where the currrent primer was in the scheme
|
|
290
|
-
|
|
291
|
-
|
|
295
|
+
# store the amplicon's index in the scheme, the current primer's original name and its details
|
|
296
|
+
current_primer = (amp_index, amp[primer][-1], amp[primer][:-1])
|
|
297
|
+
current_seq = current_primer[2][0]
|
|
292
298
|
for tested in tested_primers:
|
|
293
|
-
tested_seq = tested[
|
|
299
|
+
tested_seq = tested[2][0]
|
|
294
300
|
if primers.calc_dimer(current_seq, tested_seq).tm <= config.PRIMER_MAX_DIMER_TMP:
|
|
295
301
|
continue
|
|
296
302
|
primer_dimers.append((current_primer, tested))
|
|
@@ -309,22 +315,22 @@ def get_overlapping_primers(dimer, left_primer_candidates, right_primer_candidat
|
|
|
309
315
|
|
|
310
316
|
overlapping_primers = []
|
|
311
317
|
# test each primer in dimer
|
|
312
|
-
for primer in dimer:
|
|
318
|
+
for amp_index, primer_name, primer in dimer:
|
|
313
319
|
overlapping_primers_temp = []
|
|
314
|
-
thirds_len = int((primer[
|
|
320
|
+
thirds_len = int((primer[2] - primer[1]) / 3)
|
|
315
321
|
# get the middle third of the primer (here are the previously excluded primers)
|
|
316
|
-
overlap_set = set(range(primer[
|
|
322
|
+
overlap_set = set(range(primer[1] + thirds_len, primer[2] - thirds_len))
|
|
317
323
|
# check in which list to look for them
|
|
318
|
-
if "RIGHT" in
|
|
324
|
+
if "RIGHT" in primer_name:
|
|
319
325
|
primers_to_test = right_primer_candidates
|
|
320
326
|
else:
|
|
321
327
|
primers_to_test = left_primer_candidates
|
|
322
328
|
# and check this list for all primers that overlap
|
|
323
329
|
for potential_new in primers_to_test:
|
|
324
|
-
primer_positions = list(range(potential_new[1], potential_new[2]
|
|
330
|
+
primer_positions = list(range(potential_new[1], potential_new[2]))
|
|
325
331
|
if not any(x in primer_positions for x in overlap_set):
|
|
326
332
|
continue
|
|
327
|
-
overlapping_primers_temp.append((
|
|
333
|
+
overlapping_primers_temp.append((amp_index, primer_name, potential_new))
|
|
328
334
|
|
|
329
335
|
overlapping_primers.append(overlapping_primers_temp)
|
|
330
336
|
|
|
@@ -339,7 +345,7 @@ def test_overlaps_for_dimers(overlapping_primers):
|
|
|
339
345
|
for second_overlap in overlapping_primers[1]:
|
|
340
346
|
# return the first match. primers are sorted by penalty.
|
|
341
347
|
# first pair that makes it has the lowest penalty
|
|
342
|
-
if primers.calc_dimer(first_overlap[
|
|
348
|
+
if primers.calc_dimer(first_overlap[2][0], second_overlap[2][0]).tm <= config.PRIMER_MAX_DIMER_TMP:
|
|
343
349
|
return [first_overlap, second_overlap]
|
|
344
350
|
|
|
345
351
|
|
|
@@ -360,53 +366,55 @@ def check_and_solve_heterodimers(amplicon_scheme, left_primer_candidates, right_
|
|
|
360
366
|
return []
|
|
361
367
|
|
|
362
368
|
for dimer in primer_dimers:
|
|
363
|
-
# get overlapping primers that have not been
|
|
369
|
+
# get overlapping primers that have not been considered
|
|
364
370
|
overlapping_primers = get_overlapping_primers(dimer, left_primer_candidates, right_primer_candidates)
|
|
365
371
|
# test all possible primers against each other for dimers
|
|
366
372
|
new_primers = test_overlaps_for_dimers(overlapping_primers)
|
|
367
373
|
# now change these primers in the scheme
|
|
368
374
|
if new_primers:
|
|
369
|
-
for
|
|
375
|
+
for amp_index, primer_name, primer in new_primers:
|
|
370
376
|
# overwrite in final scheme
|
|
371
|
-
|
|
372
|
-
# and
|
|
373
|
-
if "LEFT" in
|
|
377
|
+
# ATTENTION: doesn't update the amplicon penalty currently
|
|
378
|
+
# This is ok only because that value isn't used after and doesn't get reported anywhere.
|
|
379
|
+
if "LEFT" in primer_name:
|
|
374
380
|
strand = "+"
|
|
381
|
+
amplicon_scheme[amp_index]["LEFT"] = primer + [primer_name]
|
|
375
382
|
else:
|
|
376
383
|
strand = "-"
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
384
|
+
amplicon_scheme[amp_index]["RIGHT"] = primer + [primer_name]
|
|
385
|
+
amplicon_scheme[amp_index]["length"] = amplicon_scheme[amp_index]["RIGHT"][2] - amplicon_scheme[amp_index]["LEFT"][1]
|
|
386
|
+
# and in all primers
|
|
387
|
+
all_primers[strand][primer_name] = primer
|
|
388
|
+
# get remaining dimers in the revised scheme and add pool identifier for reporting
|
|
389
|
+
primer_dimers = [
|
|
390
|
+
(amplicon_scheme[primer1[0]]["pool"], primer1, primer2)
|
|
391
|
+
for primer1, primer2 in test_scheme_for_dimers(amplicon_scheme)
|
|
392
|
+
]
|
|
380
393
|
|
|
381
394
|
return primer_dimers
|
|
382
395
|
|
|
383
396
|
|
|
384
|
-
def find_single_amplicons(amplicons,
|
|
397
|
+
def find_single_amplicons(amplicons, n):
|
|
385
398
|
"""
|
|
386
399
|
find non-overlapping amplicons with low penalties
|
|
387
400
|
from all found amplicons. only for the SINGLE mode.
|
|
388
401
|
"""
|
|
389
402
|
# sort amplicons
|
|
390
|
-
sorted_amplicons = sorted(amplicons
|
|
391
|
-
to_retain = [
|
|
392
|
-
|
|
393
|
-
amplicon_set = set(amplicon_range)
|
|
403
|
+
sorted_amplicons = sorted(amplicons, key=lambda x: (x.get("off_targets", False), x["penalty"]))
|
|
404
|
+
to_retain = []
|
|
405
|
+
retained_ranges = []
|
|
394
406
|
# find lowest non-overlapping
|
|
395
407
|
for amp in sorted_amplicons:
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
408
|
+
overlaps_retained = False
|
|
409
|
+
amp_range = range(amp["LEFT"][1], amp["RIGHT"][2])
|
|
410
|
+
for r in retained_ranges:
|
|
411
|
+
if amp_range.start < r.stop and r.start < amp_range.stop:
|
|
412
|
+
overlaps_retained = True
|
|
413
|
+
break
|
|
414
|
+
if not overlaps_retained:
|
|
415
|
+
retained_ranges.append(amp_range)
|
|
399
416
|
to_retain.append(amp)
|
|
400
|
-
|
|
401
|
-
scheme_dictionary = {0: {}}
|
|
402
|
-
counter = 1
|
|
403
|
-
for amp in to_retain:
|
|
404
|
-
scheme_dictionary[0][amp[0]] = {}
|
|
405
|
-
scheme_dictionary[0][amp[0]][amp[1][2]] = all_primers["+"][amp[1][2]]
|
|
406
|
-
scheme_dictionary[0][amp[0]][amp[1][3]] = all_primers["-"][amp[1][3]]
|
|
407
|
-
if n != float("inf"):
|
|
408
|
-
if counter == n:
|
|
417
|
+
if len(to_retain) == n:
|
|
409
418
|
break
|
|
410
|
-
counter += 1
|
|
411
419
|
|
|
412
|
-
return
|
|
420
|
+
return to_retain
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: varvamp
|
|
3
|
-
Version: 1.1
|
|
3
|
+
Version: 1.2.1
|
|
4
4
|
Summary: Variable VirusAMPlicons (varVAMP) is a tool to design primers for highly diverse viruses
|
|
5
5
|
Home-page: https://github.com/jonas-fuchs/varVAMP
|
|
6
6
|
Author: Dr. Jonas Fuchs
|
|
@@ -61,15 +61,13 @@ For a lot of virus genera it is difficult to design pan-specific primers. varVAM
|
|
|
61
61
|
|
|
62
62
|
We, in collaboration with specialists for the respective viruses, have already designed and wet-lab evaluated primer schemes for various viral pathogens. All the input data and varVAMP outputs are freely available [here](https://github.com/jonas-fuchs/ViralPrimerSchemes).
|
|
63
63
|
|
|
64
|
-
|
|
64
|
+
Moreover, varVAMP primers are now available at [primerschemes](https://labs.primalscheme.com/). varVAMP now reports primer bed files in ARTICv3 format. Feel free to contribute newly designed schemes via this [Github repository of the QuickLab](https://github.com/quick-lab/primerschemes). Use [primal-page](https://github.com/ChrisgKent/primal-page) developed by [Chris Kent](https://github.com/ChrisgKent) to generate data for compatible pull-requests.
|
|
65
65
|
|
|
66
|
-
# Citing varVAMP
|
|
67
|
-
|
|
68
|
-
Please cite with the respective DOI of the version you used:
|
|
66
|
+
# Citing varVAMP
|
|
69
67
|
|
|
70
68
|
**varVAMP: automated pan-specific primer design for tiled full genome sequencing and qPCR of highly diverse viral pathogens.**
|
|
71
69
|
|
|
72
|
-
(
|
|
70
|
+
[biorxiv preprint](https://doi.org/10.1101/2024.05.08.593102)
|
|
73
71
|
|
|
74
72
|
---
|
|
75
73
|
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
varvamp/__init__.py,sha256=dcU1t4AmPwg3LoG01RN2Q4VvzPKoZ00gRfg-qoofv28,107
|
|
2
|
+
varvamp/__main__.py,sha256=9R3mbX2_Q5LByPx8WLoTbvZ-G2dbRSMpDlgze0Wm5Fc,98
|
|
3
|
+
varvamp/command.py,sha256=Oh6WdYgbPKRChrxCoOOEFJHjINF3BIXahmSlYILDfPU,19723
|
|
4
|
+
varvamp/scripts/__init__.py,sha256=DtRsgfnA60BvccKMuD-Ueo1UpxaeANUfIWxGi1xPcV0,46
|
|
5
|
+
varvamp/scripts/alignment.py,sha256=w2I7BnaUt_rh1EaDQaU9Q_dmL_q019NtnaC8YMccOgM,7317
|
|
6
|
+
varvamp/scripts/blast.py,sha256=yF1-FjL7-P8sxSO6x4Q15gzj_I3LbE8ZfP37w6K3Akk,8434
|
|
7
|
+
varvamp/scripts/consensus.py,sha256=eU5eKU9iXyWeln2BW_LMPI8JiEq0_NXBI0jy4kMOvQg,3375
|
|
8
|
+
varvamp/scripts/default_config.py,sha256=8yjZ1ugZ5pus1daWRXeQMo-lVIikJXV4_q7FPEkTS64,3766
|
|
9
|
+
varvamp/scripts/get_config.py,sha256=Kvg3YhttUbDeRxjKhfxLjI5JSKpPoiLSEYqcE6rT4Xk,2940
|
|
10
|
+
varvamp/scripts/logging.py,sha256=bk7SA7DtNtKtAJXRGLAQyA0stpLqBmRUydBG0dpoZQ0,20820
|
|
11
|
+
varvamp/scripts/param_estimation.py,sha256=jpfmbjCFp23V07af3y1uO2dQZJiWLlyOBa11aMphkko,3694
|
|
12
|
+
varvamp/scripts/primers.py,sha256=zLNs3lWiHS0dBmRIMpi8v0XxWi26JrS43YJW7xhEneM,13422
|
|
13
|
+
varvamp/scripts/qpcr.py,sha256=qO6JTl36WMUZGoz6NHIjRrtMOF87yQedmj-sE85Yay8,14453
|
|
14
|
+
varvamp/scripts/regions.py,sha256=VgYAlZmkC3rT946yhAdxATi-YT4oGcEwAPa0BkkfWIg,3239
|
|
15
|
+
varvamp/scripts/reporting.py,sha256=_o-A-X1OPEB6ym0w-q3Tb0nehVJwuCbsoyspZjzmvs4,22921
|
|
16
|
+
varvamp/scripts/scheme.py,sha256=lB-HT-rFSjN0o1KemzojyMT3jBw-LSrDGakshBXCAR4,16198
|
|
17
|
+
varvamp-1.2.1.dist-info/METADATA,sha256=tqQlI16IR0-DMDNYV1e6Ka9SzbzKbtqF1zV4gwBiVRM,5382
|
|
18
|
+
varvamp-1.2.1.dist-info/WHEEL,sha256=cVxcB9AmuTcXqmwrtPhNK88dr7IR_b6qagTj0UvIEbY,91
|
|
19
|
+
varvamp-1.2.1.dist-info/entry_points.txt,sha256=puzW-basyBexZT4JVRUfUEqobvFmEyfqRQaqFjp7rB0,49
|
|
20
|
+
varvamp-1.2.1.dist-info/top_level.txt,sha256=11oVwE3SBUB9aTmvpvEDru95Tc5GZqQikzzFjw2eVGc,8
|
|
21
|
+
varvamp-1.2.1.dist-info/RECORD,,
|
varvamp-1.1.3.dist-info/RECORD
DELETED
|
@@ -1,21 +0,0 @@
|
|
|
1
|
-
varvamp/__init__.py,sha256=060aYAsVPugZY2O5wRQHxoHkmtzcWkS2hjeksYxJxbU,107
|
|
2
|
-
varvamp/__main__.py,sha256=9R3mbX2_Q5LByPx8WLoTbvZ-G2dbRSMpDlgze0Wm5Fc,98
|
|
3
|
-
varvamp/command.py,sha256=rCP0k7wb2vbnlSKwTRkla4_pGQBl972EGmyWRBjfKXA,18516
|
|
4
|
-
varvamp/scripts/__init__.py,sha256=DtRsgfnA60BvccKMuD-Ueo1UpxaeANUfIWxGi1xPcV0,46
|
|
5
|
-
varvamp/scripts/alignment.py,sha256=w2I7BnaUt_rh1EaDQaU9Q_dmL_q019NtnaC8YMccOgM,7317
|
|
6
|
-
varvamp/scripts/blast.py,sha256=9gDijzLI0jcsk3b4oJ6a0r-bjIWkNcn_NAQ2HlYO86s,9699
|
|
7
|
-
varvamp/scripts/consensus.py,sha256=eU5eKU9iXyWeln2BW_LMPI8JiEq0_NXBI0jy4kMOvQg,3375
|
|
8
|
-
varvamp/scripts/default_config.py,sha256=FkZUiIy8McyFXRzd8UTTTVRPbWNS8Oxmye3i-WsyDWw,3876
|
|
9
|
-
varvamp/scripts/get_config.py,sha256=Kvg3YhttUbDeRxjKhfxLjI5JSKpPoiLSEYqcE6rT4Xk,2940
|
|
10
|
-
varvamp/scripts/logging.py,sha256=ftOrySTkh5y0dEXQSDMJHMjA7Q9myJv9EhRcb6SmERc,20624
|
|
11
|
-
varvamp/scripts/param_estimation.py,sha256=jpfmbjCFp23V07af3y1uO2dQZJiWLlyOBa11aMphkko,3694
|
|
12
|
-
varvamp/scripts/primers.py,sha256=IEOoccW5OKK8d2a0ZQ1L1CICZIzOqKfeZp5e8iE717I,13428
|
|
13
|
-
varvamp/scripts/qpcr.py,sha256=Oh1HUJ6QF3dvghS5LceUgZqC5UHNB1arOsxQVNa-WnY,14483
|
|
14
|
-
varvamp/scripts/regions.py,sha256=VgYAlZmkC3rT946yhAdxATi-YT4oGcEwAPa0BkkfWIg,3239
|
|
15
|
-
varvamp/scripts/reporting.py,sha256=CT_ZD9-73Di56zBHmaQ8Kr4VkWjqf8zgb0ohk8_8Puo,21338
|
|
16
|
-
varvamp/scripts/scheme.py,sha256=rHeigj-tYyLJgB4w5F5tXZA-kkXkpDZ-kKpdE1Elonk,15229
|
|
17
|
-
varvamp-1.1.3.dist-info/METADATA,sha256=lmMlO4FDPWNxhbxON1i_VBUYf6y_EwxKoOKBNZjk3mo,5149
|
|
18
|
-
varvamp-1.1.3.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
|
|
19
|
-
varvamp-1.1.3.dist-info/entry_points.txt,sha256=puzW-basyBexZT4JVRUfUEqobvFmEyfqRQaqFjp7rB0,49
|
|
20
|
-
varvamp-1.1.3.dist-info/top_level.txt,sha256=11oVwE3SBUB9aTmvpvEDru95Tc5GZqQikzzFjw2eVGc,8
|
|
21
|
-
varvamp-1.1.3.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|