pybiolib 1.1.1711__py3-none-any.whl → 1.1.1730__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- biolib/_internal/http_client.py +12 -8
- biolib/api/client.py +12 -6
- biolib/biolib_api_client/biolib_job_api.py +11 -40
- biolib/compute_node/cloud_utils/cloud_utils.py +35 -63
- biolib/compute_node/job_worker/job_storage.py +9 -13
- biolib/compute_node/job_worker/job_worker.py +2 -2
- biolib/jobs/job.py +1 -3
- biolib/lfs/utils.py +9 -17
- biolib/utils/__init__.py +12 -39
- biolib/utils/multipart_uploader.py +27 -60
- {pybiolib-1.1.1711.dist-info → pybiolib-1.1.1730.dist-info}/METADATA +1 -1
- {pybiolib-1.1.1711.dist-info → pybiolib-1.1.1730.dist-info}/RECORD +15 -15
- {pybiolib-1.1.1711.dist-info → pybiolib-1.1.1730.dist-info}/LICENSE +0 -0
- {pybiolib-1.1.1711.dist-info → pybiolib-1.1.1730.dist-info}/WHEEL +0 -0
- {pybiolib-1.1.1711.dist-info → pybiolib-1.1.1730.dist-info}/entry_points.txt +0 -0
biolib/_internal/http_client.py
CHANGED
@@ -11,7 +11,8 @@ import urllib.parse
|
|
11
11
|
from biolib.biolib_logging import logger_no_user_data
|
12
12
|
from biolib.typing_utils import Dict, Optional, Union, Literal, cast
|
13
13
|
|
14
|
-
|
14
|
+
|
15
|
+
def _create_ssl_context():
|
15
16
|
context = ssl.create_default_context()
|
16
17
|
try:
|
17
18
|
if platform.system() == 'Darwin':
|
@@ -42,9 +43,10 @@ class HttpError(urllib.error.HTTPError):
|
|
42
43
|
|
43
44
|
class HttpResponse:
|
44
45
|
def __init__(self, response):
|
45
|
-
self.
|
46
|
-
self.
|
47
|
-
self.
|
46
|
+
self.headers: Dict[str, str] = dict(response.headers)
|
47
|
+
self.status_code: int = int(response.status)
|
48
|
+
self.content: bytes = response.read()
|
49
|
+
self.url: str = response.geturl()
|
48
50
|
|
49
51
|
@property
|
50
52
|
def text(self) -> str:
|
@@ -60,13 +62,14 @@ class HttpClient:
|
|
60
62
|
@staticmethod
|
61
63
|
def request(
|
62
64
|
url: str,
|
63
|
-
method: Optional[Literal['GET', 'POST', 'PATCH']] = None,
|
65
|
+
method: Optional[Literal['GET', 'POST', 'PATCH', 'PUT']] = None,
|
64
66
|
data: Optional[Union[Dict, bytes]] = None,
|
65
67
|
headers: Optional[Dict[str, str]] = None,
|
66
68
|
retries: int = 5,
|
69
|
+
timeout_in_seconds: Optional[int] = None,
|
67
70
|
) -> HttpResponse:
|
68
71
|
if not HttpClient.ssl_context:
|
69
|
-
HttpClient.ssl_context =
|
72
|
+
HttpClient.ssl_context = _create_ssl_context()
|
70
73
|
headers_to_send = headers or {}
|
71
74
|
if isinstance(data, dict):
|
72
75
|
headers_to_send['Accept'] = 'application/json'
|
@@ -78,7 +81,8 @@ class HttpClient:
|
|
78
81
|
headers=headers_to_send,
|
79
82
|
method=method or 'GET',
|
80
83
|
)
|
81
|
-
|
84
|
+
if timeout_in_seconds is None:
|
85
|
+
timeout_in_seconds = 60 if isinstance(data, dict) else 180 # TODO: Calculate timeout based on data size
|
82
86
|
|
83
87
|
last_error: Optional[urllib.error.URLError] = None
|
84
88
|
for retry_count in range(retries + 1):
|
@@ -89,7 +93,7 @@ class HttpClient:
|
|
89
93
|
with urllib.request.urlopen(
|
90
94
|
request,
|
91
95
|
context=HttpClient.ssl_context,
|
92
|
-
timeout=timeout_in_seconds
|
96
|
+
timeout=timeout_in_seconds,
|
93
97
|
) as response:
|
94
98
|
return HttpResponse(response)
|
95
99
|
|
biolib/api/client.py
CHANGED
@@ -4,7 +4,10 @@ from biolib.typing_utils import Dict, Optional, Union
|
|
4
4
|
from biolib.biolib_api_client import BiolibApiClient as DeprecatedApiClient
|
5
5
|
from biolib._internal.http_client import HttpResponse, HttpClient
|
6
6
|
|
7
|
-
OptionalHeaders =
|
7
|
+
OptionalHeaders = Union[
|
8
|
+
Optional[Dict[str, str]],
|
9
|
+
Optional[Dict[str, Union[str, None]]],
|
10
|
+
]
|
8
11
|
|
9
12
|
|
10
13
|
class ApiClient(HttpClient):
|
@@ -14,11 +17,12 @@ class ApiClient(HttpClient):
|
|
14
17
|
params: Optional[Dict[str, Union[str, int]]] = None,
|
15
18
|
headers: OptionalHeaders = None,
|
16
19
|
authenticate: bool = True,
|
20
|
+
retries: int = 10,
|
17
21
|
) -> HttpResponse:
|
18
22
|
return self.request(
|
19
23
|
headers=self._get_headers(opt_headers=headers, authenticate=authenticate),
|
20
24
|
method='GET',
|
21
|
-
retries=
|
25
|
+
retries=retries,
|
22
26
|
url=self._get_absolute_url(path=path, query_params=params),
|
23
27
|
)
|
24
28
|
|
@@ -27,21 +31,23 @@ class ApiClient(HttpClient):
|
|
27
31
|
path: str,
|
28
32
|
data: Optional[Union[Dict, bytes]] = None,
|
29
33
|
headers: OptionalHeaders = None,
|
34
|
+
authenticate: bool = True,
|
35
|
+
retries: int = 5,
|
30
36
|
) -> HttpResponse:
|
31
37
|
return self.request(
|
32
38
|
data=data,
|
33
|
-
headers=self._get_headers(opt_headers=headers, authenticate=
|
39
|
+
headers=self._get_headers(opt_headers=headers, authenticate=authenticate),
|
34
40
|
method='POST',
|
35
|
-
retries=
|
41
|
+
retries=retries,
|
36
42
|
url=self._get_absolute_url(path=path, query_params=None),
|
37
43
|
)
|
38
44
|
|
39
|
-
def patch(self, path: str, data: Dict, headers: OptionalHeaders = None) -> HttpResponse:
|
45
|
+
def patch(self, path: str, data: Dict, headers: OptionalHeaders = None, retries: int = 5) -> HttpResponse:
|
40
46
|
return self.request(
|
41
47
|
data=data,
|
42
48
|
headers=self._get_headers(opt_headers=headers, authenticate=True),
|
43
49
|
method='PATCH',
|
44
|
-
retries=
|
50
|
+
retries=retries,
|
45
51
|
url=self._get_absolute_url(path=path, query_params=None),
|
46
52
|
)
|
47
53
|
|
@@ -1,16 +1,12 @@
|
|
1
1
|
import os
|
2
|
-
import time
|
3
2
|
from urllib.parse import urlparse
|
4
3
|
|
5
|
-
import requests
|
6
|
-
|
7
4
|
import biolib.api
|
8
5
|
|
9
6
|
from biolib import utils
|
10
|
-
from biolib.
|
11
|
-
from biolib.biolib_api_client import
|
12
|
-
from biolib.biolib_errors import
|
13
|
-
JobResultError, JobResultNotFound
|
7
|
+
from biolib._internal.http_client import HttpError
|
8
|
+
from biolib.biolib_api_client import CloudJob, JobState
|
9
|
+
from biolib.biolib_errors import JobResultPermissionError, JobResultError, JobResultNotFound, StorageDownloadFailed
|
14
10
|
from biolib.biolib_logging import logger
|
15
11
|
from biolib.utils import BIOLIB_PACKAGE_VERSION
|
16
12
|
from biolib.typing_utils import TypedDict, Optional, Literal, Dict
|
@@ -96,35 +92,11 @@ class BiolibJobApi:
|
|
96
92
|
|
97
93
|
@staticmethod
|
98
94
|
def create_cloud_job(job_id: str, result_name_prefix: Optional[str]) -> CloudJob:
|
99
|
-
response = None
|
100
95
|
data = {'job_id': job_id}
|
101
96
|
if result_name_prefix:
|
102
97
|
data['result_name_prefix'] = result_name_prefix
|
103
98
|
|
104
|
-
|
105
|
-
try:
|
106
|
-
response = requests.post(
|
107
|
-
f'{BiolibApiClient.get().base_url}/api/jobs/cloud/',
|
108
|
-
json=data,
|
109
|
-
auth=BearerAuth(BiolibApiClient.get().access_token)
|
110
|
-
)
|
111
|
-
|
112
|
-
if response.status_code == 503:
|
113
|
-
raise RetryLimitException(response.content)
|
114
|
-
# Handle possible validation errors from backend
|
115
|
-
elif not response.ok:
|
116
|
-
raise BioLibError(response.text)
|
117
|
-
|
118
|
-
break
|
119
|
-
|
120
|
-
except RetryLimitException as retry_exception: # pylint: disable=broad-except
|
121
|
-
if retry > 3:
|
122
|
-
raise BioLibError('Reached retry limit for cloud job creation') from retry_exception
|
123
|
-
time.sleep(1)
|
124
|
-
|
125
|
-
if not response:
|
126
|
-
raise BioLibError('Could not create new cloud job')
|
127
|
-
|
99
|
+
response = biolib.api.client.post(path='/jobs/cloud/', data=data)
|
128
100
|
cloud_job: CloudJob = response.json()
|
129
101
|
return cloud_job
|
130
102
|
|
@@ -136,9 +108,9 @@ class BiolibJobApi:
|
|
136
108
|
) -> str:
|
137
109
|
try:
|
138
110
|
response = biolib.api.client.get(
|
139
|
-
path=f'
|
111
|
+
path=f'/jobs/{job_uuid}/storage/{storage_type}/download/',
|
140
112
|
authenticate=True,
|
141
|
-
headers={'Job-Auth-Token': job_auth_token}
|
113
|
+
headers={'Job-Auth-Token': job_auth_token},
|
142
114
|
)
|
143
115
|
presigned_s3_download_link_response: PresignedS3DownloadLinkResponse = response.json()
|
144
116
|
presigned_download_url = presigned_s3_download_link_response['presigned_download_url']
|
@@ -151,21 +123,20 @@ class BiolibJobApi:
|
|
151
123
|
|
152
124
|
return presigned_download_url
|
153
125
|
|
154
|
-
except
|
155
|
-
status_code = error.response.status_code
|
126
|
+
except HttpError as error:
|
156
127
|
if storage_type == 'results':
|
157
|
-
if
|
128
|
+
if error.code == 401:
|
158
129
|
raise JobResultPermissionError('You must be signed in to get result of the job') from None
|
159
|
-
elif
|
130
|
+
elif error.code == 403:
|
160
131
|
raise JobResultPermissionError(
|
161
132
|
'Cannot get result of job. Maybe the job was created without being signed in?'
|
162
133
|
) from None
|
163
|
-
elif
|
134
|
+
elif error.code == 404:
|
164
135
|
raise JobResultNotFound('Job result not found') from None
|
165
136
|
else:
|
166
137
|
raise JobResultError('Failed to get result of job') from error
|
167
138
|
else:
|
168
|
-
raise StorageDownloadFailed(error
|
139
|
+
raise StorageDownloadFailed(f'Failed to download result of job got error: {error}') from error
|
169
140
|
|
170
141
|
except Exception as error: # pylint: disable=broad-except
|
171
142
|
if storage_type == 'results':
|
@@ -6,12 +6,10 @@ import subprocess
|
|
6
6
|
import time
|
7
7
|
from datetime import datetime
|
8
8
|
from socket import gethostbyname, gethostname
|
9
|
-
import requests
|
10
9
|
|
11
|
-
from biolib import utils
|
12
|
-
from biolib.biolib_errors import BioLibError
|
10
|
+
from biolib import utils, api
|
13
11
|
from biolib.biolib_logging import logger_no_user_data
|
14
|
-
from biolib.typing_utils import Optional, List
|
12
|
+
from biolib.typing_utils import Optional, List, Dict, cast
|
15
13
|
from biolib.biolib_api_client import BiolibApiClient
|
16
14
|
from biolib.compute_node.webserver.webserver_types import WebserverConfig, ComputeNodeInfo, ShutdownTimes
|
17
15
|
|
@@ -74,18 +72,15 @@ class CloudUtils:
|
|
74
72
|
if utils.IS_RUNNING_IN_CLOUD:
|
75
73
|
config = CloudUtils.get_webserver_config()
|
76
74
|
try:
|
77
|
-
|
78
|
-
|
79
|
-
'
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
)
|
87
|
-
|
88
|
-
except Exception as error: # pylint: disable=broad-except
|
75
|
+
api.client.post(
|
76
|
+
authenticate=False,
|
77
|
+
path='/jobs/deregister/',
|
78
|
+
data={
|
79
|
+
'auth_token': config['compute_node_info']['auth_token'],
|
80
|
+
'public_id': config['compute_node_info']['public_id'],
|
81
|
+
},
|
82
|
+
)
|
83
|
+
except BaseException as error:
|
89
84
|
logger_no_user_data.error(f'Failed to deregister got error: {error}')
|
90
85
|
else:
|
91
86
|
logger_no_user_data.error("Not deregistering as environment is not cloud")
|
@@ -120,34 +115,26 @@ class CloudUtils:
|
|
120
115
|
)
|
121
116
|
return
|
122
117
|
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
response = requests.post(
|
128
|
-
url=f'{config["base_url"]}/api/jobs/cloud/finish/',
|
129
|
-
json={
|
130
|
-
'auth_token': config["compute_node_info"]["auth_token"],
|
131
|
-
'cloud_job_id': cloud_job_id,
|
132
|
-
'system_exception_code': system_exception_code,
|
133
|
-
'exit_code': exit_code
|
134
|
-
},
|
135
|
-
timeout=10,
|
136
|
-
)
|
137
|
-
|
138
|
-
response.raise_for_status()
|
139
|
-
|
140
|
-
opt_error_string = f' with error code {system_exception_code}' if system_exception_code else ''
|
141
|
-
logger_no_user_data.debug(
|
142
|
-
f'Cloud Job "{cloud_job_id}" was reported as finished' + opt_error_string
|
143
|
-
)
|
144
|
-
return
|
118
|
+
logger_no_user_data.debug(
|
119
|
+
f'Reporting CloudJob "{cloud_job_id}" as finished with exit code: {exit_code} '
|
120
|
+
f'and system exception code: {system_exception_code}'
|
121
|
+
)
|
145
122
|
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
123
|
+
config = CloudUtils.get_webserver_config()
|
124
|
+
try:
|
125
|
+
api.client.post(
|
126
|
+
authenticate=False,
|
127
|
+
path='/jobs/cloud/finish/',
|
128
|
+
retries=100,
|
129
|
+
data={
|
130
|
+
'auth_token': config['compute_node_info']['auth_token'],
|
131
|
+
'cloud_job_id': cloud_job_id,
|
132
|
+
'system_exception_code': system_exception_code,
|
133
|
+
'exit_code': exit_code
|
134
|
+
},
|
135
|
+
)
|
136
|
+
except BaseException as error:
|
137
|
+
logger_no_user_data.debug(f'Failed to finish CloudJob "{cloud_job_id}" due to: {error}')
|
151
138
|
|
152
139
|
@staticmethod
|
153
140
|
def _report_availability() -> None:
|
@@ -158,26 +145,11 @@ class CloudUtils:
|
|
158
145
|
logger_no_user_data.debug(
|
159
146
|
f'Registering with {compute_node_info} to host {api_client.base_url} at {datetime.now()}'
|
160
147
|
)
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
response = requests.post(
|
167
|
-
url=f'{api_client.base_url}/api/jobs/report_available/',
|
168
|
-
json=compute_node_info,
|
169
|
-
)
|
170
|
-
break
|
171
|
-
except Exception as error: # pylint: disable=broad-except
|
172
|
-
logger_no_user_data.error(f'Self-registering failed with error: {error}')
|
173
|
-
if retry_count < max_retries - 1:
|
174
|
-
seconds_to_sleep = 1
|
175
|
-
logger_no_user_data.info(f'Retrying self-registering in {seconds_to_sleep} seconds')
|
176
|
-
time.sleep(seconds_to_sleep)
|
177
|
-
|
178
|
-
if not response:
|
179
|
-
raise BioLibError('Failed to register. Max retry limit reached')
|
180
|
-
|
148
|
+
response = api.client.post(
|
149
|
+
authenticate=False,
|
150
|
+
path='/jobs/report_available/',
|
151
|
+
data=cast(Dict[str, str], compute_node_info),
|
152
|
+
)
|
181
153
|
if response.status_code != 201:
|
182
154
|
raise Exception("Non 201 error code")
|
183
155
|
else:
|
@@ -1,9 +1,8 @@
|
|
1
1
|
import os
|
2
2
|
|
3
|
-
import requests
|
4
|
-
|
5
3
|
from biolib import utils
|
6
|
-
from biolib.
|
4
|
+
from biolib._internal.http_client import HttpClient
|
5
|
+
from biolib.biolib_api_client import CreatedJobDict
|
7
6
|
from biolib.biolib_api_client.biolib_job_api import BiolibJobApi
|
8
7
|
from biolib.compute_node.cloud_utils import CloudUtils
|
9
8
|
from biolib.biolib_logging import logger_no_user_data
|
@@ -15,24 +14,23 @@ class JobStorage:
|
|
15
14
|
|
16
15
|
@staticmethod
|
17
16
|
def upload_module_input(job: CreatedJobDict, module_input_serialized: bytes) -> None:
|
18
|
-
base_url = BiolibApiClient.get().base_url
|
19
17
|
job_uuid = job['public_id']
|
20
18
|
headers = {'Job-Auth-Token': job['auth_token']}
|
21
19
|
|
22
20
|
multipart_uploader = utils.MultiPartUploader(
|
23
21
|
start_multipart_upload_request=dict(
|
24
22
|
requires_biolib_auth=False,
|
25
|
-
|
23
|
+
path=f'/jobs/{job_uuid}/storage/input/start_upload/',
|
26
24
|
headers=headers
|
27
25
|
),
|
28
26
|
get_presigned_upload_url_request=dict(
|
29
27
|
requires_biolib_auth=False,
|
30
|
-
|
28
|
+
path=f'/jobs/{job_uuid}/storage/input/presigned_upload_url/',
|
31
29
|
headers=headers
|
32
30
|
),
|
33
31
|
complete_upload_request=dict(
|
34
32
|
requires_biolib_auth=False,
|
35
|
-
|
33
|
+
path=f'/jobs/{job_uuid}/storage/input/complete_upload/',
|
36
34
|
headers=headers
|
37
35
|
),
|
38
36
|
)
|
@@ -60,7 +58,6 @@ class JobStorage:
|
|
60
58
|
|
61
59
|
@staticmethod
|
62
60
|
def _get_module_output_uploader(job_uuid: str) -> utils.MultiPartUploader:
|
63
|
-
base_url = BiolibApiClient.get().base_url
|
64
61
|
config = CloudUtils.get_webserver_config()
|
65
62
|
compute_node_auth_token = config['compute_node_info']['auth_token'] # pylint: disable=unsubscriptable-object
|
66
63
|
headers = {'Compute-Node-Auth-Token': compute_node_auth_token}
|
@@ -68,17 +65,17 @@ class JobStorage:
|
|
68
65
|
return utils.MultiPartUploader(
|
69
66
|
start_multipart_upload_request=dict(
|
70
67
|
requires_biolib_auth=False,
|
71
|
-
|
68
|
+
path=f'/jobs/{job_uuid}/storage/results/start_upload/',
|
72
69
|
headers=headers,
|
73
70
|
),
|
74
71
|
get_presigned_upload_url_request=dict(
|
75
72
|
requires_biolib_auth=False,
|
76
|
-
|
73
|
+
path=f'/jobs/{job_uuid}/storage/results/presigned_upload_url/',
|
77
74
|
headers=headers,
|
78
75
|
),
|
79
76
|
complete_upload_request=dict(
|
80
77
|
requires_biolib_auth=False,
|
81
|
-
|
78
|
+
path=f'/jobs/{job_uuid}/storage/results/complete_upload/',
|
82
79
|
headers=headers,
|
83
80
|
),
|
84
81
|
)
|
@@ -92,8 +89,7 @@ class JobStorage:
|
|
92
89
|
job_auth_token=job['auth_token'],
|
93
90
|
storage_type='input',
|
94
91
|
)
|
95
|
-
response =
|
96
|
-
response.raise_for_status()
|
92
|
+
response = HttpClient.request(url=presigned_download_url)
|
97
93
|
data: bytes = response.content
|
98
94
|
logger_no_user_data.debug(f'Job "{job_uuid}" module input downloaded')
|
99
95
|
return data
|
@@ -12,9 +12,9 @@ import os
|
|
12
12
|
import signal
|
13
13
|
from types import FrameType
|
14
14
|
|
15
|
-
import requests
|
16
15
|
from docker.models.networks import Network # type: ignore
|
17
16
|
|
17
|
+
from biolib._internal.http_client import HttpClient
|
18
18
|
from biolib.biolib_binary_format.stdout_and_stderr import StdoutAndStderr
|
19
19
|
from biolib.compute_node.job_worker.job_legacy_input_wait_timeout_thread import JobLegacyInputWaitTimeout
|
20
20
|
from biolib.compute_node.job_worker.job_storage import JobStorage
|
@@ -526,7 +526,7 @@ class JobWorker:
|
|
526
526
|
start_time = time()
|
527
527
|
logger_no_user_data.debug(f'Job "{root_job_id}" downloading runtime zip...')
|
528
528
|
try:
|
529
|
-
runtime_zip_bytes =
|
529
|
+
runtime_zip_bytes = HttpClient.request(url=runtime_zip_url).content
|
530
530
|
except Exception as exception:
|
531
531
|
raise ComputeProcessException(
|
532
532
|
exception,
|
biolib/jobs/job.py
CHANGED
@@ -6,7 +6,6 @@ from pathlib import Path
|
|
6
6
|
from collections import OrderedDict
|
7
7
|
from urllib.parse import urlparse
|
8
8
|
|
9
|
-
import requests
|
10
9
|
from biolib import api, utils
|
11
10
|
from biolib._internal.http_client import HttpClient
|
12
11
|
from biolib.biolib_api_client.biolib_job_api import BiolibJobApi
|
@@ -127,8 +126,7 @@ class Job:
|
|
127
126
|
job_auth_token=self._job_dict['auth_token'],
|
128
127
|
storage_type='input',
|
129
128
|
)
|
130
|
-
response =
|
131
|
-
response.raise_for_status()
|
129
|
+
response = HttpClient.request(url=presigned_download_url)
|
132
130
|
module_input_serialized: bytes = response.content
|
133
131
|
return ModuleInput(module_input_serialized).deserialize()
|
134
132
|
|
biolib/lfs/utils.py
CHANGED
@@ -6,9 +6,8 @@ from collections import namedtuple
|
|
6
6
|
from pathlib import Path
|
7
7
|
from struct import Struct
|
8
8
|
|
9
|
-
import requests
|
10
|
-
|
11
9
|
from biolib import utils
|
10
|
+
from biolib._internal.http_client import HttpClient
|
12
11
|
from biolib.app import BioLibApp
|
13
12
|
from biolib.biolib_api_client.biolib_account_api import BiolibAccountApi
|
14
13
|
from biolib.biolib_api_client.biolib_large_file_system_api import BiolibLargeFileSystemApi
|
@@ -143,18 +142,17 @@ def push_large_file_system(lfs_uri: str, input_dir: str, chunk_size_in_mb: Optio
|
|
143
142
|
|
144
143
|
iterable_zip_stream = get_iterable_zip_stream(files=files_to_zip, chunk_size=chunk_size_in_bytes)
|
145
144
|
|
146
|
-
base_url = BiolibApiClient.get().base_url
|
147
145
|
multipart_uploader = utils.MultiPartUploader(
|
148
146
|
use_process_pool=True,
|
149
147
|
get_presigned_upload_url_request=dict(
|
150
148
|
headers=None,
|
151
149
|
requires_biolib_auth=True,
|
152
|
-
|
150
|
+
path=f'/lfs/versions/{lfs_resource_version_uuid}/presigned_upload_url/',
|
153
151
|
),
|
154
152
|
complete_upload_request=dict(
|
155
153
|
headers=None,
|
156
154
|
requires_biolib_auth=True,
|
157
|
-
|
155
|
+
path=f'/lfs/versions/{lfs_resource_version_uuid}/complete_upload/',
|
158
156
|
),
|
159
157
|
)
|
160
158
|
|
@@ -221,25 +219,19 @@ def get_file_data_from_large_file_system(lfs_uri: str, file_path: str) -> bytes:
|
|
221
219
|
local_file_header_start = file_info['header_offset'] + len(local_file_header_signature_bytes)
|
222
220
|
local_file_header_end = local_file_header_start + local_file_header_struct.size
|
223
221
|
|
224
|
-
local_file_header_response =
|
222
|
+
local_file_header_response = HttpClient.request(
|
225
223
|
url=lfs_url,
|
226
|
-
stream=True,
|
227
224
|
headers={'range': f'bytes={local_file_header_start}-{local_file_header_end - 1}'},
|
228
|
-
|
225
|
+
timeout_in_seconds=300,
|
229
226
|
)
|
230
|
-
local_file_header_response.
|
231
|
-
local_file_header_bytes: bytes = local_file_header_response.raw.data
|
232
|
-
local_file_header = LocalFileHeader._make(local_file_header_struct.unpack(local_file_header_bytes))
|
227
|
+
local_file_header = LocalFileHeader._make(local_file_header_struct.unpack(local_file_header_response.content))
|
233
228
|
|
234
229
|
file_start = local_file_header_end + local_file_header.file_name_len + local_file_header.extra_field_len
|
235
230
|
file_end = file_start + file_info['file_size']
|
236
231
|
|
237
|
-
response =
|
232
|
+
response = HttpClient.request(
|
238
233
|
url=lfs_url,
|
239
|
-
stream=True,
|
240
234
|
headers={'range': f'bytes={file_start}-{file_end - 1}'},
|
241
|
-
|
235
|
+
timeout_in_seconds=300, # timeout after 5 min
|
242
236
|
)
|
243
|
-
response.
|
244
|
-
data: bytes = response.raw.data
|
245
|
-
return data
|
237
|
+
return response.content
|
biolib/utils/__init__.py
CHANGED
@@ -1,22 +1,19 @@
|
|
1
1
|
import collections.abc
|
2
2
|
import multiprocessing
|
3
3
|
import os
|
4
|
-
import time
|
5
4
|
import socket
|
6
5
|
import sys
|
7
6
|
|
8
7
|
from typing import Optional
|
9
|
-
import requests
|
10
8
|
from importlib_metadata import version, PackageNotFoundError
|
11
9
|
|
12
10
|
from biolib.utils.seq_util import SeqUtil, SeqUtilRecord
|
13
|
-
|
14
|
-
# try fetching version, if it fails (usually when in dev), add default
|
15
|
-
from biolib.biolib_errors import BioLibError
|
11
|
+
from biolib._internal.http_client import HttpClient
|
16
12
|
from biolib.biolib_logging import logger_no_user_data, logger
|
17
13
|
from biolib.typing_utils import Tuple, Iterator
|
18
14
|
from .multipart_uploader import MultiPartUploader, get_chunk_iterator_from_bytes
|
19
15
|
|
16
|
+
# try fetching version, if it fails (usually when in dev), add default
|
20
17
|
try:
|
21
18
|
BIOLIB_PACKAGE_VERSION = version('pybiolib')
|
22
19
|
except PackageNotFoundError:
|
@@ -87,38 +84,17 @@ DownloadChunkInputTuple = Tuple[ByteRangeTuple, str]
|
|
87
84
|
|
88
85
|
|
89
86
|
def _download_chunk(input_tuple: DownloadChunkInputTuple) -> bytes:
|
90
|
-
max_download_retries = 10
|
91
|
-
|
92
87
|
byte_range, presigned_url = input_tuple
|
93
88
|
start, end = byte_range
|
94
89
|
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
timeout=300, # timeout after 5 min
|
104
|
-
)
|
105
|
-
if response.ok:
|
106
|
-
return_value: bytes = response.raw.data
|
107
|
-
logger_no_user_data.debug(f'Returning raw data for part {start}')
|
108
|
-
return return_value
|
109
|
-
else:
|
110
|
-
logger_no_user_data.warning(
|
111
|
-
f'Got not ok response when downloading part {start}:{end}. '
|
112
|
-
f'Got response status {response.status_code} and content: {response.content.decode()} '
|
113
|
-
f'Retrying...'
|
114
|
-
)
|
115
|
-
except Exception: # pylint: disable=broad-except
|
116
|
-
logger_no_user_data.warning(f'Encountered error when downloading part {start}:{end}. Retrying...')
|
117
|
-
|
118
|
-
time.sleep(5)
|
119
|
-
|
120
|
-
logger_no_user_data.debug(f'Max retries hit, when downloading part {start}:{end}. Exiting...')
|
121
|
-
raise BioLibError(f'Max retries hit, when downloading part {start}:{end}. Exiting...')
|
90
|
+
response = HttpClient.request(
|
91
|
+
url=presigned_url,
|
92
|
+
headers={'range': f'bytes={start}-{end}'},
|
93
|
+
timeout_in_seconds=300, # timeout after 5 min
|
94
|
+
retries=10,
|
95
|
+
)
|
96
|
+
logger_no_user_data.debug(f'Returning raw data for part {start}')
|
97
|
+
return response.content
|
122
98
|
|
123
99
|
|
124
100
|
class ChunkIterator(collections.abc.Iterator):
|
@@ -153,11 +129,8 @@ class ChunkIterator(collections.abc.Iterator):
|
|
153
129
|
def download_presigned_s3_url(presigned_url: str, output_file_path: str) -> None:
|
154
130
|
chunk_size = 50_000_000
|
155
131
|
|
156
|
-
|
157
|
-
|
158
|
-
raise Exception(f'Got response status code {response.status_code} and content {response.content.decode()}')
|
159
|
-
|
160
|
-
file_size = int(response.headers['Content-Range'].split('/')[1])
|
132
|
+
response = HttpClient.request(url=presigned_url, headers={'range': 'bytes=0-1'})
|
133
|
+
file_size = int(response.headers['Content-Range'].split('/')[1])
|
161
134
|
|
162
135
|
chunk_iterator = ChunkIterator(file_size, chunk_size, presigned_url)
|
163
136
|
|
@@ -5,10 +5,9 @@ import os
|
|
5
5
|
import time
|
6
6
|
from urllib.parse import urlparse
|
7
7
|
|
8
|
-
import
|
9
|
-
|
8
|
+
import biolib.api
|
9
|
+
from biolib._internal.http_client import HttpClient
|
10
10
|
from biolib.biolib_api_client import BiolibApiClient
|
11
|
-
from biolib.biolib_api_client.auth import BearerAuth
|
12
11
|
from biolib.biolib_errors import BioLibError
|
13
12
|
from biolib.biolib_logging import logger, logger_no_user_data
|
14
13
|
from biolib.typing_utils import TypedDict, List, Iterator, Tuple, Optional, Dict
|
@@ -33,7 +32,7 @@ def get_chunk_iterator_from_file_object(file_object, chunk_size_in_bytes: int =
|
|
33
32
|
class RequestOptions(TypedDict):
|
34
33
|
headers: Optional[Dict[str, str]]
|
35
34
|
requires_biolib_auth: bool
|
36
|
-
|
35
|
+
path: str
|
37
36
|
|
38
37
|
|
39
38
|
class _PartMetadata(TypedDict):
|
@@ -67,20 +66,15 @@ class MultiPartUploader:
|
|
67
66
|
logger_no_user_data.debug(f'Starting multipart upload of payload with size {payload_size_in_bytes} bytes')
|
68
67
|
|
69
68
|
if self._start_multipart_upload_request:
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
url=self._start_multipart_upload_request['url'],
|
76
|
-
)
|
77
|
-
if start_multipart_upload.ok:
|
78
|
-
logger_no_user_data.debug('Multipart upload started')
|
79
|
-
else:
|
80
|
-
logger_no_user_data.debug(
|
81
|
-
f'Failed to start multipart upload got response status: {start_multipart_upload.status_code}'
|
69
|
+
try:
|
70
|
+
biolib.api.client.post(
|
71
|
+
authenticate=self._start_multipart_upload_request['requires_biolib_auth'],
|
72
|
+
headers=self._start_multipart_upload_request['headers'],
|
73
|
+
path=self._start_multipart_upload_request['path'],
|
82
74
|
)
|
83
|
-
|
75
|
+
except BaseException as error:
|
76
|
+
logger_no_user_data.debug(f'Failed to start multipart upload got error: {error}')
|
77
|
+
raise error
|
84
78
|
|
85
79
|
# if multiprocessing start method is spawn or we are running in a daemon process,
|
86
80
|
# multiprocessing.Pool may fail when called from script
|
@@ -116,30 +110,12 @@ class MultiPartUploader:
|
|
116
110
|
BiolibApiClient.refresh_auth_token()
|
117
111
|
|
118
112
|
logger_no_user_data.debug(f'Uploaded {len(parts)} parts, now calling complete upload...')
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
timeout=30,
|
126
|
-
url=self._complete_upload_request['url'],
|
127
|
-
)
|
128
|
-
if complete_upload_response.ok:
|
129
|
-
logger_no_user_data.debug('Multipart upload completed returning')
|
130
|
-
return
|
131
|
-
|
132
|
-
logger_no_user_data.warning(
|
133
|
-
f'Failed to complete multipart upload got response status {complete_upload_response.status_code}. '
|
134
|
-
f'Retrying...'
|
135
|
-
)
|
136
|
-
|
137
|
-
except Exception as error: # pylint: disable=broad-except
|
138
|
-
logger_no_user_data.warning('Encountered error when completing multipart upload. Retrying...')
|
139
|
-
logger.debug(f'Multipart complete error: {error}')
|
140
|
-
time.sleep(index * index + 2)
|
141
|
-
|
142
|
-
raise BioLibError('Max retries hit, when completing multipart upload')
|
113
|
+
biolib.api.client.post(
|
114
|
+
authenticate=requires_biolib_auth,
|
115
|
+
headers=self._complete_upload_request['headers'],
|
116
|
+
data={'parts': parts, 'size_bytes': self._bytes_uploaded},
|
117
|
+
path=self._complete_upload_request['path'],
|
118
|
+
)
|
143
119
|
|
144
120
|
def _upload_chunk(self, _input: _UploadChunkInputType) -> _UploadChunkReturnType:
|
145
121
|
part_number, chunk = _input
|
@@ -152,18 +128,12 @@ class MultiPartUploader:
|
|
152
128
|
logger_no_user_data.debug(f'Uploading part number {part_number} with size {len(chunk)} bytes...')
|
153
129
|
try:
|
154
130
|
logger_no_user_data.debug(f'Getting upload URL for chunk {part_number}...')
|
155
|
-
get_url_response =
|
156
|
-
|
131
|
+
get_url_response = biolib.api.client.get(
|
132
|
+
authenticate=requires_biolib_auth,
|
157
133
|
headers=self._get_presigned_upload_url_request['headers'],
|
158
134
|
params={'part_number': part_number},
|
159
|
-
|
160
|
-
url=self._get_presigned_upload_url_request['url'],
|
135
|
+
path=self._get_presigned_upload_url_request['path'],
|
161
136
|
)
|
162
|
-
if not get_url_response.ok:
|
163
|
-
raise Exception(
|
164
|
-
f'Failed to get upload URL for part {part_number} got response status code '
|
165
|
-
f'{get_url_response.status_code}'
|
166
|
-
)
|
167
137
|
|
168
138
|
presigned_upload_url = get_url_response.json()['presigned_upload_url']
|
169
139
|
|
@@ -174,16 +144,13 @@ class MultiPartUploader:
|
|
174
144
|
presigned_upload_url = \
|
175
145
|
f'{app_caller_proxy_job_storage_base_url}{parsed_url.path}?{parsed_url.query}'
|
176
146
|
|
177
|
-
put_chunk_response =
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
'Retrying...'
|
185
|
-
)
|
186
|
-
logger.debug(f'Response content: {put_chunk_response.content.decode()}')
|
147
|
+
put_chunk_response = HttpClient.request(
|
148
|
+
url=presigned_upload_url,
|
149
|
+
data=chunk,
|
150
|
+
method='PUT',
|
151
|
+
timeout_in_seconds=300,
|
152
|
+
)
|
153
|
+
return _PartMetadata(PartNumber=part_number, ETag=put_chunk_response.headers['ETag']), len(chunk)
|
187
154
|
|
188
155
|
except Exception as error: # pylint: disable=broad-except
|
189
156
|
logger_no_user_data.warning(f'Encountered error when uploading part {part_number}. Retrying...')
|
@@ -2,10 +2,10 @@ LICENSE,sha256=F2h7gf8i0agDIeWoBPXDMYScvQOz02pAWkKhTGOHaaw,1067
|
|
2
2
|
README.md,sha256=_IH7pxFiqy2bIAmaVeA-iVTyUwWRjMIlfgtUbYTtmls,368
|
3
3
|
biolib/__init__.py,sha256=ImZ0G02v6kpvzfyz8lPrLp2TdTKobhbaHZXvf8LV8Bg,3819
|
4
4
|
biolib/_internal/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
5
|
-
biolib/_internal/http_client.py,sha256=
|
5
|
+
biolib/_internal/http_client.py,sha256=HgKv559gVjeR-pgxhP0mqhd9a7Seuy9AJl57TxNwjJ8,4098
|
6
6
|
biolib/_internal/push_application.py,sha256=NXQNLPucqkq7eKbNCJQA8OYx4CXYHRJ1CG1ViPAJJH4,10680
|
7
7
|
biolib/api/__init__.py,sha256=iIO8ZRdn7YDhY5cR47-Wo1YsNOK8H6RN6jn8yor5WJI,137
|
8
|
-
biolib/api/client.py,sha256=
|
8
|
+
biolib/api/client.py,sha256=MtDkH2Amr2Fko-bCR5DdookJu0yZ1q-6K_PPg4KK_Ek,2941
|
9
9
|
biolib/app/__init__.py,sha256=cdPtcfb_U-bxb9iSL4fCEq2rpD9OjkyY4W-Zw60B0LI,37
|
10
10
|
biolib/app/app.py,sha256=O8bpuHTWF-sFgQJX9NWoVjwaFpydJqqsFa-p7VPO7fE,10004
|
11
11
|
biolib/app/search_apps.py,sha256=CIanvDebNpMtWck0w4IoR_tRZ6LpSWC8DPxXW2e6Ww4,1495
|
@@ -15,7 +15,7 @@ biolib/biolib_api_client/app_types.py,sha256=vEjkpMwaMfz8MxBBZQfWCkxqT7NXxWocB_O
|
|
15
15
|
biolib/biolib_api_client/auth.py,sha256=BAXtic6DdaA2QjoDVglnO3PFPoBETQbSraTpIwsZbFc,1267
|
16
16
|
biolib/biolib_api_client/biolib_account_api.py,sha256=sHng5jDvSktv6tOLKU8wJRieidY2kLxRU8hI_6ZauXE,210
|
17
17
|
biolib/biolib_api_client/biolib_app_api.py,sha256=pr52ARrDeB5ioEA7NAjMdmBYb9V2FPy0_URCwdCRZ0A,4397
|
18
|
-
biolib/biolib_api_client/biolib_job_api.py,sha256=
|
18
|
+
biolib/biolib_api_client/biolib_job_api.py,sha256=IpFahcRzm7GNy8DJ-XHYe-x7r4Voba8o22IXw5puHn8,6782
|
19
19
|
biolib/biolib_api_client/biolib_large_file_system_api.py,sha256=p8QhvQ0aI0NJgyRm7duqDVtPx0zrVaSLKS22ocOafFQ,1038
|
20
20
|
biolib/biolib_api_client/common_types.py,sha256=RH-1KNHqUF-EkTpfPOSTt5Mq1GPdfju_cqXDesscO1I,123
|
21
21
|
biolib/biolib_api_client/job_types.py,sha256=XlDIxijxymLoJcClXhl91h1E4b2fT3pszO9wjlssD4A,1284
|
@@ -48,7 +48,7 @@ biolib/cli/start.py,sha256=TpZq1VHVkLTW1_PyjIGkJg44E3PzxSaGrN2D_rNJL0c,1736
|
|
48
48
|
biolib/compute_node/.gitignore,sha256=GZdZ4g7HftqfOfasFpBC5zV1YQAbht1a7EzcXD6f3zg,45
|
49
49
|
biolib/compute_node/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
50
50
|
biolib/compute_node/cloud_utils/__init__.py,sha256=VZSScLqaz5tg_gpMvWgwkAu9Qf-vgW_QHRoDOaAmU44,67
|
51
|
-
biolib/compute_node/cloud_utils/cloud_utils.py,sha256=
|
51
|
+
biolib/compute_node/cloud_utils/cloud_utils.py,sha256=kxlmQP2VUAt0GB94REJeaJ7ZeHfVoIZqb_z92CLbXS0,7540
|
52
52
|
biolib/compute_node/job_worker/__init__.py,sha256=ipdPWaABKYrltxny15e2kK8PWdEE7VzXbkKK6wM_zDk,71
|
53
53
|
biolib/compute_node/job_worker/cache_state.py,sha256=N2AxvvgfizKx9P_ysM0GDCmueMUlgcveA4xSdz_Z2IQ,4822
|
54
54
|
biolib/compute_node/job_worker/cache_types.py,sha256=ajpLy8i09QeQS9dEqTn3T6NVNMY_YsHQkSD5nvIHccQ,818
|
@@ -60,8 +60,8 @@ biolib/compute_node/job_worker/executors/tars/__init__.py,sha256=47DEQpj8HBSa-_T
|
|
60
60
|
biolib/compute_node/job_worker/executors/types.py,sha256=yP5gG39hr-DLnw9bOE--VHi-1arDbIYiGuV1rlTbbHI,1466
|
61
61
|
biolib/compute_node/job_worker/job_legacy_input_wait_timeout_thread.py,sha256=_cvEiZbOwfkv6fYmfrvdi_FVviIEYr_dSClQcOQaUWM,1198
|
62
62
|
biolib/compute_node/job_worker/job_max_runtime_timer_thread.py,sha256=K_xgz7IhiIjpLlXRk8sqaMyLoApcidJkgu29sJX0gb8,1174
|
63
|
-
biolib/compute_node/job_worker/job_storage.py,sha256=
|
64
|
-
biolib/compute_node/job_worker/job_worker.py,sha256=
|
63
|
+
biolib/compute_node/job_worker/job_storage.py,sha256=Ol43f43W6aD2EUkA6G2i9-WxdREr5JPSjo1xFylddOQ,4030
|
64
|
+
biolib/compute_node/job_worker/job_worker.py,sha256=98-3ksp-mfTKge9yoUw7_ab08buBnC40dVe5rT_3Ncw,27949
|
65
65
|
biolib/compute_node/job_worker/large_file_system.py,sha256=XXqRlVtYhs-Ji9zQGIk5KQPXFO_Q5jJH0nnlw4GkeMY,10461
|
66
66
|
biolib/compute_node/job_worker/mappings.py,sha256=Z48Kg4nbcOvsT2-9o3RRikBkqflgO4XeaWxTGz-CNvI,2499
|
67
67
|
biolib/compute_node/job_worker/utilization_reporter_thread.py,sha256=7tm5Yk9coqJ9VbEdnO86tSXI0iM0omwIyKENxdxiVXk,8575
|
@@ -80,12 +80,12 @@ biolib/experiments/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuF
|
|
80
80
|
biolib/experiments/experiment.py,sha256=xUd3LkV5ztFIjTx_ehBu8e-flvfuQnYimFtPj1upzb0,5942
|
81
81
|
biolib/experiments/types.py,sha256=n9GxdFA7cLMfHvLLqLmZzX31ELeSSkMXFoEEdFsdWGY,171
|
82
82
|
biolib/jobs/__init__.py,sha256=aIb2H2DHjQbM2Bs-dysFijhwFcL58Blp0Co0gimED3w,32
|
83
|
-
biolib/jobs/job.py,sha256=
|
83
|
+
biolib/jobs/job.py,sha256=mrRQd0aI8VElYgwd7BtXE-mjB0c4xbyRh8Ngpd2gZ7Q,15181
|
84
84
|
biolib/jobs/job_result.py,sha256=8GasUmUXD8SjUYrE2N-HrDx7-AI6TEkFONH8H91t01Q,4913
|
85
85
|
biolib/jobs/types.py,sha256=4OAvlhOKANzFMrZDd-mhXpEd8RaKcx8sPneZUoWhJ2U,970
|
86
86
|
biolib/lfs/__init__.py,sha256=wspftSqe_x7xPMZbeW8mcIBuxbHFCiFI3V4TuZU4nZ4,193
|
87
87
|
biolib/lfs/cache.py,sha256=pQS2np21rdJ6I3DpoOutnzPHpLOZgUIS8TMltUJk_k4,2226
|
88
|
-
biolib/lfs/utils.py,sha256=
|
88
|
+
biolib/lfs/utils.py,sha256=dVDNoXXfXFt9Sbf68tJ6BPv_cNAHMcqbrY2wLj-lLUA,9773
|
89
89
|
biolib/runtime/__init__.py,sha256=WyuNnkzj1r-Sx4J-xWzp3US2oo9m5aIuCOYRmjlZ3ug,44
|
90
90
|
biolib/runtime/results.py,sha256=7Fzrddb319dLMgTS1AVvz-RaTxAWOr7Qt75ftfSci9E,778
|
91
91
|
biolib/sdk/__init__.py,sha256=vqqE2Sf3gLySkaJv8VgWtPRkYMi9hOf66y9Ay2waYVE,1339
|
@@ -95,14 +95,14 @@ biolib/templates/example_app.py,sha256=EB3E3RT4SeO_ii5nVQqJpi5KDGNE_huF1ub-e5ZFv
|
|
95
95
|
biolib/typing_utils.py,sha256=krMhxB3SedVQA3HXIrC7DBXWpHKWN5JNmXGcSrrysOc,263
|
96
96
|
biolib/user/__init__.py,sha256=Db5wtxLfFz3ID9TULSSTo77csw9tO6RtxMRvV5cqKEE,39
|
97
97
|
biolib/user/sign_in.py,sha256=CvHBgoYjg5d5CL9etVopns_g1pImgW8mQH3zAjpe1Ik,2061
|
98
|
-
biolib/utils/__init__.py,sha256=
|
98
|
+
biolib/utils/__init__.py,sha256=AuBhxgZV8-3LWPnewhhYw4isQhPmVs-k55umk8t-r7U,5535
|
99
99
|
biolib/utils/app_uri.py,sha256=hOFsTQfA7QbyQyg9ItGdD8VDWBJw0vYMqzLdSiJXmqQ,1857
|
100
100
|
biolib/utils/cache_state.py,sha256=BFrZlV4XZIueIFzAFiPidX4hmwADKY5Y5ZuqlerF5l0,3060
|
101
|
-
biolib/utils/multipart_uploader.py,sha256=
|
101
|
+
biolib/utils/multipart_uploader.py,sha256=eJj4G20XpxNovAmTjBuWqVIPvEvQURKlvtqexB7yXNw,8031
|
102
102
|
biolib/utils/seq_util.py,sha256=gLnqCr_mcLcxakO44vGBqUn76VI7kLHgXKqyManjd24,4292
|
103
103
|
biolib/utils/zip/remote_zip.py,sha256=NCdUnVbGCv7SfXCI-yVU-is_OnyWmLAnVpIdSvo-W4k,23500
|
104
|
-
pybiolib-1.1.
|
105
|
-
pybiolib-1.1.
|
106
|
-
pybiolib-1.1.
|
107
|
-
pybiolib-1.1.
|
108
|
-
pybiolib-1.1.
|
104
|
+
pybiolib-1.1.1730.dist-info/LICENSE,sha256=F2h7gf8i0agDIeWoBPXDMYScvQOz02pAWkKhTGOHaaw,1067
|
105
|
+
pybiolib-1.1.1730.dist-info/METADATA,sha256=mdgphE7hYhgXyT21dCjOJ69UQA0WRGIcVZuxWf4H9LQ,1543
|
106
|
+
pybiolib-1.1.1730.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
|
107
|
+
pybiolib-1.1.1730.dist-info/entry_points.txt,sha256=p6DyaP_2kctxegTX23WBznnrDi4mz6gx04O5uKtRDXg,42
|
108
|
+
pybiolib-1.1.1730.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|