opencloning 0.3.2__py3-none-any.whl → 0.3.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- opencloning/app_settings.py +8 -0
- opencloning/batch_cloning/pombe/pombe_get_primers.py +3 -3
- opencloning/bug_fixing/backend_v0_3.py +1 -2
- opencloning/dna_functions.py +12 -12
- opencloning/endpoints/external_import.py +7 -7
- opencloning/http_client.py +53 -0
- opencloning/ncbi_requests.py +5 -6
- {opencloning-0.3.2.dist-info → opencloning-0.3.4.dist-info}/METADATA +2 -3
- {opencloning-0.3.2.dist-info → opencloning-0.3.4.dist-info}/RECORD +11 -10
- {opencloning-0.3.2.dist-info → opencloning-0.3.4.dist-info}/LICENSE +0 -0
- {opencloning-0.3.2.dist-info → opencloning-0.3.4.dist-info}/WHEEL +0 -0
opencloning/app_settings.py
CHANGED
|
@@ -28,6 +28,9 @@ PLANNOTATE_TIMEOUT = int(os.environ['PLANNOTATE_TIMEOUT']) if 'PLANNOTATE_TIMEOU
|
|
|
28
28
|
if PLANNOTATE_URL is not None and not PLANNOTATE_URL.endswith('/'):
|
|
29
29
|
PLANNOTATE_URL += '/'
|
|
30
30
|
|
|
31
|
+
PROXY_URL = os.environ.get('PROXY_URL')
|
|
32
|
+
PROXY_CERT_FILE = os.environ.get('PROXY_CERT_FILE')
|
|
33
|
+
|
|
31
34
|
|
|
32
35
|
class Settings(BaseModel):
|
|
33
36
|
SERVE_FRONTEND: bool
|
|
@@ -37,6 +40,9 @@ class Settings(BaseModel):
|
|
|
37
40
|
ALLOWED_ORIGINS: list[str]
|
|
38
41
|
PLANNOTATE_URL: str | None
|
|
39
42
|
PLANNOTATE_TIMEOUT: int
|
|
43
|
+
PROXY_URL: str | None
|
|
44
|
+
# Must be a full path to the proxy certificate file
|
|
45
|
+
PROXY_CERT_FILE: str | None
|
|
40
46
|
|
|
41
47
|
|
|
42
48
|
settings = Settings(
|
|
@@ -47,4 +53,6 @@ settings = Settings(
|
|
|
47
53
|
ALLOWED_ORIGINS=ALLOWED_ORIGINS,
|
|
48
54
|
PLANNOTATE_URL=PLANNOTATE_URL,
|
|
49
55
|
PLANNOTATE_TIMEOUT=PLANNOTATE_TIMEOUT,
|
|
56
|
+
PROXY_URL=PROXY_URL,
|
|
57
|
+
PROXY_CERT_FILE=PROXY_CERT_FILE,
|
|
50
58
|
)
|
|
@@ -1,12 +1,12 @@
|
|
|
1
1
|
from bs4 import BeautifulSoup
|
|
2
2
|
import asyncio
|
|
3
|
-
from httpx import AsyncClient, Response
|
|
4
3
|
import re
|
|
5
4
|
import os
|
|
6
5
|
import argparse
|
|
7
6
|
from Bio import SeqIO
|
|
8
7
|
from Bio.Seq import Seq
|
|
9
8
|
from Bio.SeqRecord import SeqRecord
|
|
9
|
+
from ...http_client import get_http_client, Response
|
|
10
10
|
|
|
11
11
|
default_settings_primer_design = {
|
|
12
12
|
'length': 80,
|
|
@@ -25,8 +25,8 @@ default_settings_primer_design = {
|
|
|
25
25
|
|
|
26
26
|
|
|
27
27
|
async def async_post(url, headers, data, params=None) -> Response:
|
|
28
|
-
async with
|
|
29
|
-
return await client.post(url, headers=headers, data=data, params=params)
|
|
28
|
+
async with get_http_client() as client:
|
|
29
|
+
return await client.post(url, headers=headers, data=data, params=params, timeout=20.0)
|
|
30
30
|
|
|
31
31
|
|
|
32
32
|
async def get_primers(gene):
|
|
@@ -12,7 +12,6 @@ from ..pydantic_models import (
|
|
|
12
12
|
from .._version import __version__
|
|
13
13
|
import json
|
|
14
14
|
import os
|
|
15
|
-
from packaging import version
|
|
16
15
|
import copy
|
|
17
16
|
|
|
18
17
|
|
|
@@ -96,7 +95,7 @@ def main(file_path: str):
|
|
|
96
95
|
cs = fix_backend_v0_3(data)
|
|
97
96
|
|
|
98
97
|
if cs is not None:
|
|
99
|
-
cs.backend_version = __version__
|
|
98
|
+
cs.backend_version = __version__
|
|
100
99
|
with open(new_file_path, 'w') as f:
|
|
101
100
|
f.write(cs.model_dump_json(indent=2, exclude_none=True))
|
|
102
101
|
|
opencloning/dna_functions.py
CHANGED
|
@@ -7,7 +7,6 @@ from pydna.dseq import Dseq
|
|
|
7
7
|
from .pydantic_models import TextFileSequence, AddgeneIdSource, SequenceFileFormat, WekWikGeneIdSource, SEVASource
|
|
8
8
|
from opencloning_linkml.datamodel import PlannotateAnnotationReport
|
|
9
9
|
from pydna.parsers import parse as pydna_parse
|
|
10
|
-
import requests
|
|
11
10
|
from bs4 import BeautifulSoup
|
|
12
11
|
import regex
|
|
13
12
|
from Bio.SeqFeature import SimpleLocation, Location
|
|
@@ -18,7 +17,7 @@ import io
|
|
|
18
17
|
import warnings
|
|
19
18
|
from Bio.SeqIO.InsdcIO import GenBankIterator, GenBankScanner
|
|
20
19
|
import re
|
|
21
|
-
import
|
|
20
|
+
from .http_client import get_http_client, ConnectError, TimeoutException
|
|
22
21
|
from .ncbi_requests import get_genbank_sequence
|
|
23
22
|
|
|
24
23
|
|
|
@@ -72,7 +71,7 @@ async def get_sequences_from_file_url(
|
|
|
72
71
|
url: str, format: SequenceFileFormat = SequenceFileFormat('genbank')
|
|
73
72
|
) -> list[Dseqrecord]:
|
|
74
73
|
# TODO once pydna parse is fixed it should handle urls that point to non-gb files
|
|
75
|
-
async with
|
|
74
|
+
async with get_http_client() as client:
|
|
76
75
|
resp = await client.get(url)
|
|
77
76
|
|
|
78
77
|
if resp.status_code != 200:
|
|
@@ -83,8 +82,9 @@ async def get_sequences_from_file_url(
|
|
|
83
82
|
return custom_file_parser(io.StringIO(resp.text), format)
|
|
84
83
|
|
|
85
84
|
|
|
86
|
-
def
|
|
87
|
-
|
|
85
|
+
async def get_sequence_from_snapgene_url(url: str) -> Dseqrecord:
|
|
86
|
+
async with get_http_client() as client:
|
|
87
|
+
resp = await client.get(url)
|
|
88
88
|
# Check that resp.content is not empty
|
|
89
89
|
if len(resp.content) == 0:
|
|
90
90
|
raise HTTPError(url, 404, 'invalid snapgene id', 'invalid snapgene id', None)
|
|
@@ -96,7 +96,7 @@ def get_sequence_from_snagene_url(url: str) -> Dseqrecord:
|
|
|
96
96
|
async def request_from_addgene(source: AddgeneIdSource) -> tuple[Dseqrecord, AddgeneIdSource]:
|
|
97
97
|
|
|
98
98
|
url = f'https://www.addgene.org/{source.repository_id}/sequences/'
|
|
99
|
-
async with
|
|
99
|
+
async with get_http_client() as client:
|
|
100
100
|
resp = await client.get(url)
|
|
101
101
|
if resp.status_code == 404:
|
|
102
102
|
raise HTTPError(url, 404, 'wrong addgene id', 'wrong addgene id', None)
|
|
@@ -150,7 +150,7 @@ async def request_from_addgene(source: AddgeneIdSource) -> tuple[Dseqrecord, Add
|
|
|
150
150
|
|
|
151
151
|
async def request_from_wekwikgene(source: WekWikGeneIdSource) -> tuple[Dseqrecord, WekWikGeneIdSource]:
|
|
152
152
|
url = f'https://wekwikgene.wllsb.edu.cn/plasmids/{source.repository_id}'
|
|
153
|
-
async with
|
|
153
|
+
async with get_http_client() as client:
|
|
154
154
|
resp = await client.get(url)
|
|
155
155
|
if resp.status_code == 404:
|
|
156
156
|
raise HTTPError(url, 404, 'invalid wekwikgene id', 'invalid wekwikgene id', None)
|
|
@@ -333,10 +333,10 @@ def custom_file_parser(
|
|
|
333
333
|
|
|
334
334
|
async def get_sequence_from_euroscarf_url(plasmid_id: str) -> Dseqrecord:
|
|
335
335
|
url = f'http://www.euroscarf.de/plasmid_details.php?accno={plasmid_id}'
|
|
336
|
-
async with
|
|
336
|
+
async with get_http_client() as client:
|
|
337
337
|
try:
|
|
338
338
|
resp = await client.get(url)
|
|
339
|
-
except
|
|
339
|
+
except ConnectError as e:
|
|
340
340
|
raise HTTPError(url, 504, 'could not connect to euroscarf', 'could not connect to euroscarf', None) from e
|
|
341
341
|
# I don't think this ever happens
|
|
342
342
|
if resp.status_code != 200:
|
|
@@ -365,7 +365,7 @@ async def get_sequence_from_euroscarf_url(plasmid_id: str) -> Dseqrecord:
|
|
|
365
365
|
async def annotate_with_plannotate(
|
|
366
366
|
file_content: str, file_name: str, url: str, timeout: int = 20
|
|
367
367
|
) -> tuple[Dseqrecord, PlannotateAnnotationReport, str]:
|
|
368
|
-
async with
|
|
368
|
+
async with get_http_client() as client:
|
|
369
369
|
try:
|
|
370
370
|
response = await client.post(
|
|
371
371
|
url,
|
|
@@ -379,9 +379,9 @@ async def annotate_with_plannotate(
|
|
|
379
379
|
dseqr = custom_file_parser(io.StringIO(data['gb_file']), 'genbank')[0]
|
|
380
380
|
report = [PlannotateAnnotationReport.model_validate(r) for r in data['report']]
|
|
381
381
|
return dseqr, report, data['version']
|
|
382
|
-
except
|
|
382
|
+
except TimeoutException as e:
|
|
383
383
|
raise HTTPError(url, 504, 'plannotate server timeout', 'plannotate server timeout', None) from e
|
|
384
|
-
except
|
|
384
|
+
except ConnectError as e:
|
|
385
385
|
raise HTTPError(
|
|
386
386
|
url, 500, 'cannot connect to plannotate server', 'cannot connect to plannotate server', None
|
|
387
387
|
) from e
|
|
@@ -3,7 +3,6 @@ from pydantic import create_model
|
|
|
3
3
|
import io
|
|
4
4
|
import warnings
|
|
5
5
|
import asyncio
|
|
6
|
-
import httpx
|
|
7
6
|
from starlette.responses import RedirectResponse
|
|
8
7
|
from Bio import BiopythonParserWarning
|
|
9
8
|
from typing import Annotated
|
|
@@ -31,13 +30,14 @@ from ..dna_functions import (
|
|
|
31
30
|
request_from_addgene,
|
|
32
31
|
request_from_wekwikgene,
|
|
33
32
|
get_sequences_from_file_url,
|
|
34
|
-
|
|
33
|
+
get_sequence_from_snapgene_url,
|
|
35
34
|
custom_file_parser,
|
|
36
35
|
get_sequence_from_euroscarf_url,
|
|
37
36
|
get_seva_plasmid,
|
|
38
37
|
)
|
|
39
38
|
from .. import request_examples
|
|
40
39
|
from .. import ncbi_requests
|
|
40
|
+
from ..http_client import ConnectError
|
|
41
41
|
|
|
42
42
|
|
|
43
43
|
router = get_router()
|
|
@@ -262,7 +262,7 @@ async def get_from_repository_id_genbank(source: RepositoryIdSource):
|
|
|
262
262
|
if seq_length > 100000:
|
|
263
263
|
raise HTTPException(400, 'sequence is too long (max 100000 bp)')
|
|
264
264
|
seq = await ncbi_requests.get_genbank_sequence(source.repository_id)
|
|
265
|
-
except
|
|
265
|
+
except ConnectError as exception:
|
|
266
266
|
raise HTTPException(504, f'Unable to connect to NCBI: {exception}')
|
|
267
267
|
|
|
268
268
|
return {'sequences': [format_sequence_genbank(seq, source.output_name)], 'sources': [source.model_copy()]}
|
|
@@ -279,7 +279,7 @@ async def get_from_repository_id_addgene(source: AddgeneIdSource):
|
|
|
279
279
|
dseq, out_source = await request_from_addgene(source)
|
|
280
280
|
except HTTPError as exception:
|
|
281
281
|
repository_id_http_error_handler(exception, source)
|
|
282
|
-
except
|
|
282
|
+
except ConnectError:
|
|
283
283
|
raise HTTPException(504, 'unable to connect to Addgene')
|
|
284
284
|
|
|
285
285
|
return {'sequences': [format_sequence_genbank(dseq, source.output_name)], 'sources': [out_source]}
|
|
@@ -296,7 +296,7 @@ async def get_from_repository_id_wekwikgene(source: WekWikGeneIdSource):
|
|
|
296
296
|
dseq, out_source = await request_from_wekwikgene(source)
|
|
297
297
|
except HTTPError as exception:
|
|
298
298
|
repository_id_http_error_handler(exception, source)
|
|
299
|
-
except
|
|
299
|
+
except ConnectError:
|
|
300
300
|
raise HTTPException(504, 'unable to connect to WekWikGene')
|
|
301
301
|
return {'sequences': [format_sequence_genbank(dseq, source.output_name)], 'sources': [out_source]}
|
|
302
302
|
|
|
@@ -332,7 +332,7 @@ async def get_from_repository_id_snapgene(
|
|
|
332
332
|
try:
|
|
333
333
|
plasmid_set, plasmid_name = source.repository_id.split('/')
|
|
334
334
|
url = f'https://www.snapgene.com/local/fetch.php?set={plasmid_set}&plasmid={plasmid_name}'
|
|
335
|
-
dseq =
|
|
335
|
+
dseq = await get_sequence_from_snapgene_url(url)
|
|
336
336
|
# Unless a name is provided, we use the plasmid name from snapgene
|
|
337
337
|
if source.output_name is None:
|
|
338
338
|
source.output_name = plasmid_name
|
|
@@ -467,7 +467,7 @@ async def get_from_repository_id_seva(source: SEVASource):
|
|
|
467
467
|
return {'sequences': [format_sequence_genbank(dseq, source.output_name)], 'sources': [source]}
|
|
468
468
|
except HTTPError as exception:
|
|
469
469
|
repository_id_http_error_handler(exception, source)
|
|
470
|
-
except
|
|
470
|
+
except ConnectError:
|
|
471
471
|
raise HTTPException(504, 'unable to connect to SEVA')
|
|
472
472
|
except Exception as exception:
|
|
473
473
|
raise HTTPException(400, f'Error parsing file: {exception}')
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
# Response is imported into other modules from here
|
|
2
|
+
from httpx import ( # noqa: F401
|
|
3
|
+
Response,
|
|
4
|
+
AsyncClient,
|
|
5
|
+
ConnectError,
|
|
6
|
+
TimeoutException,
|
|
7
|
+
AsyncHTTPTransport,
|
|
8
|
+
Request,
|
|
9
|
+
RequestError,
|
|
10
|
+
)
|
|
11
|
+
import ssl
|
|
12
|
+
import certifi
|
|
13
|
+
from .app_settings import settings
|
|
14
|
+
import re
|
|
15
|
+
|
|
16
|
+
white_listed_urls = {
|
|
17
|
+
r'^https://www.addgene.org/',
|
|
18
|
+
r'^https://media.addgene.org/',
|
|
19
|
+
r'^https://wekwikgene.wllsb.edu.cn',
|
|
20
|
+
r'^https://seva-plasmids.com/',
|
|
21
|
+
r'^https://api.ncbi.nlm.nih.gov/datasets/v2alpha/',
|
|
22
|
+
r'^https://eutils.ncbi.nlm.nih.gov/entrez/eutils/',
|
|
23
|
+
r'^https://www.snapgene.com/local/fetch.php',
|
|
24
|
+
r'^https://benchling.com/',
|
|
25
|
+
r'^https://raw.githubusercontent.com/manulera/annotated-igem-distribution',
|
|
26
|
+
r'^http://www.euroscarf.de/',
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
if settings.PLANNOTATE_URL:
|
|
30
|
+
white_listed_urls.add(settings.PLANNOTATE_URL)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class WhiteListTransport(AsyncHTTPTransport):
|
|
34
|
+
async def handle_async_request(self, request: Request) -> Response:
|
|
35
|
+
if any(re.match(url, str(request.url)) for url in white_listed_urls):
|
|
36
|
+
return await super().handle_async_request(request)
|
|
37
|
+
|
|
38
|
+
raise RequestError(f'Request to {request.url} is not whitelisted')
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
proxy = None
|
|
42
|
+
if settings.PROXY_URL:
|
|
43
|
+
proxy = settings.PROXY_URL
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def get_http_client():
|
|
47
|
+
transport = WhiteListTransport()
|
|
48
|
+
client_ctx = None
|
|
49
|
+
if proxy is not None:
|
|
50
|
+
client_ctx = ssl.create_default_context(cafile=certifi.where())
|
|
51
|
+
if settings.PROXY_CERT_FILE:
|
|
52
|
+
client_ctx.load_verify_locations(cafile=settings.PROXY_CERT_FILE)
|
|
53
|
+
return AsyncClient(proxy=proxy, verify=client_ctx, transport=transport)
|
opencloning/ncbi_requests.py
CHANGED
|
@@ -1,24 +1,23 @@
|
|
|
1
|
-
import requests
|
|
2
1
|
from fastapi import HTTPException
|
|
3
2
|
from pydna.parsers import parse as pydna_parse
|
|
4
|
-
from httpx import AsyncClient, Response
|
|
5
3
|
from pydna.dseqrecord import Dseqrecord
|
|
6
4
|
from .app_settings import settings
|
|
5
|
+
from .http_client import get_http_client, Response
|
|
7
6
|
|
|
8
7
|
headers = None if settings.NCBI_API_KEY is None else {'api_key': settings.NCBI_API_KEY}
|
|
9
8
|
|
|
10
9
|
|
|
11
10
|
async def async_get(url, headers, params=None) -> Response:
|
|
12
|
-
async with
|
|
13
|
-
return await client.get(url, headers=headers, params=params)
|
|
11
|
+
async with get_http_client() as client:
|
|
12
|
+
return await client.get(url, headers=headers, params=params, timeout=20.0)
|
|
14
13
|
|
|
15
14
|
|
|
16
15
|
# TODO: this does not return old assembly accessions, see https://github.com/ncbi/datasets/issues/380#issuecomment-2231142816
|
|
17
|
-
def get_assembly_accession_from_sequence_accession(sequence_accession: str) -> list[str]:
|
|
16
|
+
async def get_assembly_accession_from_sequence_accession(sequence_accession: str) -> list[str]:
|
|
18
17
|
"""Get the assembly accession from a sequence accession"""
|
|
19
18
|
|
|
20
19
|
url = f'https://api.ncbi.nlm.nih.gov/datasets/v2alpha/genome/sequence_accession/{sequence_accession}/sequence_assemblies'
|
|
21
|
-
resp =
|
|
20
|
+
resp = await async_get(url, headers=headers)
|
|
22
21
|
data = resp.json()
|
|
23
22
|
if 'accessions' in data:
|
|
24
23
|
return data['accessions']
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: opencloning
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.4
|
|
4
4
|
Summary: Backend of OpenCloning, a web application to generate molecular cloning strategies in json format, and share them with others.
|
|
5
5
|
License: MIT
|
|
6
6
|
Author: Manuel Lera-Ramirez
|
|
@@ -14,7 +14,7 @@ Classifier: Programming Language :: Python :: 3.13
|
|
|
14
14
|
Requires-Dist: beautifulsoup4 (>=4.11.1,<5.0.0)
|
|
15
15
|
Requires-Dist: biopython (==1.84)
|
|
16
16
|
Requires-Dist: fastapi
|
|
17
|
-
Requires-Dist: httpx (>=0.
|
|
17
|
+
Requires-Dist: httpx (>=0.28.1,<0.29.0)
|
|
18
18
|
Requires-Dist: opencloning-linkml (==0.3.0a0)
|
|
19
19
|
Requires-Dist: openpyxl (>=3.1.5,<4.0.0)
|
|
20
20
|
Requires-Dist: packaging (>=25.0,<26.0)
|
|
@@ -25,7 +25,6 @@ Requires-Dist: pydna (==5.5.0)
|
|
|
25
25
|
Requires-Dist: python-multipart
|
|
26
26
|
Requires-Dist: pyyaml (>=6.0.2,<7.0.0)
|
|
27
27
|
Requires-Dist: regex (>=2023.10.3,<2024.0.0)
|
|
28
|
-
Requires-Dist: requests (>=2.31.0,<3.0.0)
|
|
29
28
|
Requires-Dist: uvicorn
|
|
30
29
|
Project-URL: Repository, https://github.com/manulera/OpenCloning_backend
|
|
31
30
|
Description-Content-Type: text/markdown
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
opencloning/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
2
|
opencloning/_version.py,sha256=6QbWXLSZypjtWL_CwJFHH4dzMRK3AUH4B0YudzvGz9s,200
|
|
3
3
|
opencloning/api_config_utils.py,sha256=inAXPGYNDz-DuEoSqitImj0Vv5TpQSbMZH9D3dQb5P0,4319
|
|
4
|
-
opencloning/app_settings.py,sha256=
|
|
4
|
+
opencloning/app_settings.py,sha256=MScy0le1Dn00rxqDkpeoBLWwjWQTyzHBQ-MqSDdmoe4,2054
|
|
5
5
|
opencloning/assembly2.py,sha256=_UdQCnjc2wmWoeVT9NKwXcYUAEBrBT0OSBNMbGYAIL8,57041
|
|
6
6
|
opencloning/batch_cloning/EBIC/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
7
7
|
opencloning/batch_cloning/EBIC/barcode.gb,sha256=G6kP6MuY23S-n3xg16LQaTasFtYFqik5eEgcooZ9ATM,815
|
|
@@ -15,36 +15,37 @@ opencloning/batch_cloning/pombe/index.html,sha256=3YchoKGpcKDfvTOW1Rdih4PkbZIkMj
|
|
|
15
15
|
opencloning/batch_cloning/pombe/pombe_all.sh,sha256=0yvDdBaIdt2RsIrvnjgn5L3KtYBToq3Rl8-X8RFHibE,364
|
|
16
16
|
opencloning/batch_cloning/pombe/pombe_clone.py,sha256=OY6yOlBK-9OAmHu3HUhP50mIXvyR0HJg1_2OjBFifV8,8123
|
|
17
17
|
opencloning/batch_cloning/pombe/pombe_gather.py,sha256=qI-biMZGLxVDV-vOf3sT6bS1BawtTihZNcSYNlnnEi8,2432
|
|
18
|
-
opencloning/batch_cloning/pombe/pombe_get_primers.py,sha256=
|
|
18
|
+
opencloning/batch_cloning/pombe/pombe_get_primers.py,sha256=nrn6V4_l8FWWfqS4WU3qPt95KwzvCXxETh8E-zWbRhM,3968
|
|
19
19
|
opencloning/batch_cloning/pombe/pombe_summary.py,sha256=W9DLpnCuwK7w2DhHLu60N7L6jquuYubD3ZRFwdhNPVw,4033
|
|
20
20
|
opencloning/batch_cloning/ziqiang_et_al2024/__init__.py,sha256=zZUbj3uMzd9rKMXi5s9LQ1yUg7sccdS0f_4kpw7SQlk,7584
|
|
21
21
|
opencloning/batch_cloning/ziqiang_et_al2024/index.html,sha256=EDncANDhhQkhi5FjnnAP6liHkG5srf4_Y46IrnMUG5g,4607
|
|
22
22
|
opencloning/batch_cloning/ziqiang_et_al2024/ziqiang_et_al2024.json,sha256=d-7oXbxoKhMKLz4FJ2OGDdedWTisoaRqLAh1NZnScBg,157189
|
|
23
23
|
opencloning/bug_fixing/README.md,sha256=fGf_4HPTZ2TjGfIOZzw8IAD78-N-RnBrX8mz14wjXEQ,4237
|
|
24
24
|
opencloning/bug_fixing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
25
|
-
opencloning/bug_fixing/backend_v0_3.py,sha256=
|
|
25
|
+
opencloning/bug_fixing/backend_v0_3.py,sha256=BTnqCXYIcOShut0WfwH6tDjJ1fecAHFHVSDTOKJgUZw,4119
|
|
26
26
|
opencloning/cre_lox.py,sha256=x_OVYzfaLJH5eVyp05_I9YNycT606UL683AswhQ-gjU,4294
|
|
27
|
-
opencloning/dna_functions.py,sha256=
|
|
27
|
+
opencloning/dna_functions.py,sha256=WvEiKuZEzy3mXjIkOMv4Ymej6xQ3gm4mymM8JCgvE_o,16611
|
|
28
28
|
opencloning/dna_utils.py,sha256=uv97aO04dbk3NnqbN6GlnwOu0MOpK88rl2np2QcEQ4Y,6301
|
|
29
29
|
opencloning/ebic/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
30
30
|
opencloning/ebic/primer_design.py,sha256=gPZTF9w5SV7WGgnefp_HBM831y0z73M1Kb0QUPnbfIM,2270
|
|
31
31
|
opencloning/ebic/primer_design_settings.py,sha256=OnFsuh0QCvplUEPXLZouzRo9R7rm4nLbcd2LkDCiIDM,1896
|
|
32
32
|
opencloning/endpoints/annotation.py,sha256=3rlIXeNQzoqPD9lJUEBGLGxvlhUCTcfkqno814A8P0U,2283
|
|
33
33
|
opencloning/endpoints/assembly.py,sha256=MMwvlyM2NHnT04Pp7LdKQRvahq0itdqTtZx3OQhOswc,20980
|
|
34
|
-
opencloning/endpoints/external_import.py,sha256=
|
|
34
|
+
opencloning/endpoints/external_import.py,sha256=i2hAKSYi2IIdglmDhDhWmBzHbhn6A9A8ybbj4nmhROM,18188
|
|
35
35
|
opencloning/endpoints/no_assembly.py,sha256=NY6rhEDCNoZVn6Xk81cen2n-FkMr7ierfxM8G0npbQs,4722
|
|
36
36
|
opencloning/endpoints/no_input.py,sha256=DuqKD3Ph3a44ZxPMEzZv1nwD5xlxYsN7YyxXcfjSUFc,3844
|
|
37
37
|
opencloning/endpoints/other.py,sha256=7YBXU5UrVCjEjOjdYWw-0sASXn3MhWVZYwDYSZD4C9E,3452
|
|
38
38
|
opencloning/endpoints/primer_design.py,sha256=3eiQ7MwgeLoAuXFUMNF-DzjzwH_eJGCjd4s32CjxIic,12717
|
|
39
39
|
opencloning/gateway.py,sha256=pFB3gsCQL715kOFOP1NQOOsQqrkWuQe5qXk4IunF5SA,8486
|
|
40
40
|
opencloning/get_router.py,sha256=l2DXaTbeL2tDqlnVMlcewutzt1sjaHlxku1X9HVUwJk,252
|
|
41
|
+
opencloning/http_client.py,sha256=nepAAJXA9Wtehjeex3PuQRbUlB0kbhxSOPqzcg7gMtY,1624
|
|
41
42
|
opencloning/main.py,sha256=l9PrPBMtGMEWxAPiPWR15Qv2oDNnRoNd8H8E3bZW6Do,3750
|
|
42
|
-
opencloning/ncbi_requests.py,sha256=
|
|
43
|
+
opencloning/ncbi_requests.py,sha256=b8ow9TDpXbyYk_0HeK-7RXWwwZGrhH-MylSNc3_tH0I,5557
|
|
43
44
|
opencloning/primer_design.py,sha256=nqCmYIZ7UvU4CQwVGJwX7T5LTHwt3-51_ZcTZZAgT_Y,9175
|
|
44
45
|
opencloning/pydantic_models.py,sha256=lMO78M4MwDgzTEGz9qzsaADwAFXagWK4qGsF1K1hLZw,18865
|
|
45
46
|
opencloning/request_examples.py,sha256=QAsJxVaq5tHwlPB404IiJ9WC6SA7iNY7XnJm63BWT_E,2944
|
|
46
47
|
opencloning/utils.py,sha256=0Lvw1h1AsUJTK2b9mNzYVi_DBeWmWCFA5dIPl_gERcI,1479
|
|
47
|
-
opencloning-0.3.
|
|
48
|
-
opencloning-0.3.
|
|
49
|
-
opencloning-0.3.
|
|
50
|
-
opencloning-0.3.
|
|
48
|
+
opencloning-0.3.4.dist-info/LICENSE,sha256=VSdVE1f8axjIh6gvo9ZZygJdTVkRFMcwCW_hvjOHC_w,1058
|
|
49
|
+
opencloning-0.3.4.dist-info/METADATA,sha256=beszv-cKQj-br6Zcn9kW_v-PiaHe-MIOJ6PUZ7atBPs,9041
|
|
50
|
+
opencloning-0.3.4.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
|
|
51
|
+
opencloning-0.3.4.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|