pybiolib 1.1.1711__tar.gz → 1.1.1730__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (105) hide show
  1. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/PKG-INFO +1 -1
  2. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/_internal/http_client.py +12 -8
  3. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/api/client.py +12 -6
  4. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/biolib_api_client/biolib_job_api.py +11 -40
  5. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/compute_node/cloud_utils/cloud_utils.py +35 -63
  6. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/compute_node/job_worker/job_storage.py +9 -13
  7. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/compute_node/job_worker/job_worker.py +2 -2
  8. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/jobs/job.py +1 -3
  9. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/lfs/utils.py +9 -17
  10. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/utils/__init__.py +12 -39
  11. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/utils/multipart_uploader.py +27 -60
  12. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/pyproject.toml +1 -1
  13. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/LICENSE +0 -0
  14. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/README.md +0 -0
  15. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/__init__.py +0 -0
  16. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/_internal/__init__.py +0 -0
  17. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/_internal/push_application.py +0 -0
  18. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/api/__init__.py +0 -0
  19. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/app/__init__.py +0 -0
  20. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/app/app.py +0 -0
  21. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/app/search_apps.py +0 -0
  22. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/biolib_api_client/__init__.py +0 -0
  23. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/biolib_api_client/api_client.py +0 -0
  24. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/biolib_api_client/app_types.py +0 -0
  25. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/biolib_api_client/auth.py +0 -0
  26. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/biolib_api_client/biolib_account_api.py +0 -0
  27. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/biolib_api_client/biolib_app_api.py +0 -0
  28. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/biolib_api_client/biolib_large_file_system_api.py +0 -0
  29. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/biolib_api_client/common_types.py +0 -0
  30. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/biolib_api_client/job_types.py +0 -0
  31. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/biolib_api_client/lfs_types.py +0 -0
  32. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/biolib_api_client/user_state.py +0 -0
  33. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/biolib_binary_format/__init__.py +0 -0
  34. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/biolib_binary_format/base_bbf_package.py +0 -0
  35. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/biolib_binary_format/file_in_container.py +0 -0
  36. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/biolib_binary_format/module_input.py +0 -0
  37. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/biolib_binary_format/module_output_v2.py +0 -0
  38. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/biolib_binary_format/remote_endpoints.py +0 -0
  39. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/biolib_binary_format/remote_stream_seeker.py +0 -0
  40. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/biolib_binary_format/saved_job.py +0 -0
  41. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/biolib_binary_format/stdout_and_stderr.py +0 -0
  42. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/biolib_binary_format/system_exception.py +0 -0
  43. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/biolib_binary_format/system_status_update.py +0 -0
  44. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/biolib_binary_format/utils.py +0 -0
  45. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/biolib_docker_client/__init__.py +0 -0
  46. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/biolib_download_container.py +0 -0
  47. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/biolib_errors.py +0 -0
  48. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/biolib_logging.py +0 -0
  49. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/cli/__init__.py +0 -0
  50. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/cli/download_container.py +0 -0
  51. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/cli/init.py +0 -0
  52. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/cli/lfs.py +0 -0
  53. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/cli/push.py +0 -0
  54. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/cli/run.py +0 -0
  55. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/cli/runtime.py +0 -0
  56. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/cli/start.py +0 -0
  57. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/compute_node/.gitignore +0 -0
  58. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/compute_node/__init__.py +0 -0
  59. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/compute_node/cloud_utils/__init__.py +0 -0
  60. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/compute_node/job_worker/__init__.py +0 -0
  61. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/compute_node/job_worker/cache_state.py +0 -0
  62. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/compute_node/job_worker/cache_types.py +0 -0
  63. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/compute_node/job_worker/docker_image_cache.py +0 -0
  64. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/compute_node/job_worker/executors/__init__.py +0 -0
  65. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/compute_node/job_worker/executors/docker_executor.py +0 -0
  66. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/compute_node/job_worker/executors/docker_types.py +0 -0
  67. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/compute_node/job_worker/executors/tars/__init__.py +0 -0
  68. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/compute_node/job_worker/executors/types.py +0 -0
  69. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/compute_node/job_worker/job_legacy_input_wait_timeout_thread.py +0 -0
  70. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/compute_node/job_worker/job_max_runtime_timer_thread.py +0 -0
  71. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/compute_node/job_worker/large_file_system.py +0 -0
  72. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/compute_node/job_worker/mappings.py +0 -0
  73. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/compute_node/job_worker/utilization_reporter_thread.py +0 -0
  74. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/compute_node/job_worker/utils.py +0 -0
  75. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/compute_node/remote_host_proxy.py +0 -0
  76. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/compute_node/socker_listener_thread.py +0 -0
  77. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/compute_node/socket_sender_thread.py +0 -0
  78. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/compute_node/utils.py +0 -0
  79. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/compute_node/webserver/__init__.py +0 -0
  80. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/compute_node/webserver/gunicorn_flask_application.py +0 -0
  81. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/compute_node/webserver/webserver.py +0 -0
  82. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/compute_node/webserver/webserver_types.py +0 -0
  83. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/compute_node/webserver/webserver_utils.py +0 -0
  84. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/compute_node/webserver/worker_thread.py +0 -0
  85. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/experiments/__init__.py +0 -0
  86. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/experiments/experiment.py +0 -0
  87. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/experiments/types.py +0 -0
  88. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/jobs/__init__.py +0 -0
  89. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/jobs/job_result.py +0 -0
  90. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/jobs/types.py +0 -0
  91. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/lfs/__init__.py +0 -0
  92. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/lfs/cache.py +0 -0
  93. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/runtime/__init__.py +0 -0
  94. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/runtime/results.py +0 -0
  95. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/sdk/__init__.py +0 -0
  96. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/tables.py +0 -0
  97. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/templates/__init__.py +0 -0
  98. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/templates/example_app.py +0 -0
  99. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/typing_utils.py +0 -0
  100. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/user/__init__.py +0 -0
  101. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/user/sign_in.py +0 -0
  102. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/utils/app_uri.py +0 -0
  103. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/utils/cache_state.py +0 -0
  104. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/utils/seq_util.py +0 -0
  105. {pybiolib-1.1.1711 → pybiolib-1.1.1730}/biolib/utils/zip/remote_zip.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pybiolib
3
- Version: 1.1.1711
3
+ Version: 1.1.1730
4
4
  Summary: BioLib Python Client
5
5
  Home-page: https://github.com/biolib
6
6
  License: MIT
@@ -11,7 +11,8 @@ import urllib.parse
11
11
  from biolib.biolib_logging import logger_no_user_data
12
12
  from biolib.typing_utils import Dict, Optional, Union, Literal, cast
13
13
 
14
- def create_ssl_context():
14
+
15
+ def _create_ssl_context():
15
16
  context = ssl.create_default_context()
16
17
  try:
17
18
  if platform.system() == 'Darwin':
@@ -42,9 +43,10 @@ class HttpError(urllib.error.HTTPError):
42
43
 
43
44
  class HttpResponse:
44
45
  def __init__(self, response):
45
- self.status_code = response.status
46
- self.content = response.read()
47
- self.url = response.geturl()
46
+ self.headers: Dict[str, str] = dict(response.headers)
47
+ self.status_code: int = int(response.status)
48
+ self.content: bytes = response.read()
49
+ self.url: str = response.geturl()
48
50
 
49
51
  @property
50
52
  def text(self) -> str:
@@ -60,13 +62,14 @@ class HttpClient:
60
62
  @staticmethod
61
63
  def request(
62
64
  url: str,
63
- method: Optional[Literal['GET', 'POST', 'PATCH']] = None,
65
+ method: Optional[Literal['GET', 'POST', 'PATCH', 'PUT']] = None,
64
66
  data: Optional[Union[Dict, bytes]] = None,
65
67
  headers: Optional[Dict[str, str]] = None,
66
68
  retries: int = 5,
69
+ timeout_in_seconds: Optional[int] = None,
67
70
  ) -> HttpResponse:
68
71
  if not HttpClient.ssl_context:
69
- HttpClient.ssl_context = create_ssl_context()
72
+ HttpClient.ssl_context = _create_ssl_context()
70
73
  headers_to_send = headers or {}
71
74
  if isinstance(data, dict):
72
75
  headers_to_send['Accept'] = 'application/json'
@@ -78,7 +81,8 @@ class HttpClient:
78
81
  headers=headers_to_send,
79
82
  method=method or 'GET',
80
83
  )
81
- timeout_in_seconds = 60 if isinstance(data, dict) else 180 # TODO: Calculate timeout based on data size
84
+ if timeout_in_seconds is None:
85
+ timeout_in_seconds = 60 if isinstance(data, dict) else 180 # TODO: Calculate timeout based on data size
82
86
 
83
87
  last_error: Optional[urllib.error.URLError] = None
84
88
  for retry_count in range(retries + 1):
@@ -89,7 +93,7 @@ class HttpClient:
89
93
  with urllib.request.urlopen(
90
94
  request,
91
95
  context=HttpClient.ssl_context,
92
- timeout=timeout_in_seconds
96
+ timeout=timeout_in_seconds,
93
97
  ) as response:
94
98
  return HttpResponse(response)
95
99
 
@@ -4,7 +4,10 @@ from biolib.typing_utils import Dict, Optional, Union
4
4
  from biolib.biolib_api_client import BiolibApiClient as DeprecatedApiClient
5
5
  from biolib._internal.http_client import HttpResponse, HttpClient
6
6
 
7
- OptionalHeaders = Optional[Dict[str, Union[str, None]]]
7
+ OptionalHeaders = Union[
8
+ Optional[Dict[str, str]],
9
+ Optional[Dict[str, Union[str, None]]],
10
+ ]
8
11
 
9
12
 
10
13
  class ApiClient(HttpClient):
@@ -14,11 +17,12 @@ class ApiClient(HttpClient):
14
17
  params: Optional[Dict[str, Union[str, int]]] = None,
15
18
  headers: OptionalHeaders = None,
16
19
  authenticate: bool = True,
20
+ retries: int = 10,
17
21
  ) -> HttpResponse:
18
22
  return self.request(
19
23
  headers=self._get_headers(opt_headers=headers, authenticate=authenticate),
20
24
  method='GET',
21
- retries=10,
25
+ retries=retries,
22
26
  url=self._get_absolute_url(path=path, query_params=params),
23
27
  )
24
28
 
@@ -27,21 +31,23 @@ class ApiClient(HttpClient):
27
31
  path: str,
28
32
  data: Optional[Union[Dict, bytes]] = None,
29
33
  headers: OptionalHeaders = None,
34
+ authenticate: bool = True,
35
+ retries: int = 5,
30
36
  ) -> HttpResponse:
31
37
  return self.request(
32
38
  data=data,
33
- headers=self._get_headers(opt_headers=headers, authenticate=True),
39
+ headers=self._get_headers(opt_headers=headers, authenticate=authenticate),
34
40
  method='POST',
35
- retries=5,
41
+ retries=retries,
36
42
  url=self._get_absolute_url(path=path, query_params=None),
37
43
  )
38
44
 
39
- def patch(self, path: str, data: Dict, headers: OptionalHeaders = None) -> HttpResponse:
45
+ def patch(self, path: str, data: Dict, headers: OptionalHeaders = None, retries: int = 5) -> HttpResponse:
40
46
  return self.request(
41
47
  data=data,
42
48
  headers=self._get_headers(opt_headers=headers, authenticate=True),
43
49
  method='PATCH',
44
- retries=5,
50
+ retries=retries,
45
51
  url=self._get_absolute_url(path=path, query_params=None),
46
52
  )
47
53
 
@@ -1,16 +1,12 @@
1
1
  import os
2
- import time
3
2
  from urllib.parse import urlparse
4
3
 
5
- import requests
6
-
7
4
  import biolib.api
8
5
 
9
6
  from biolib import utils
10
- from biolib.biolib_api_client.auth import BearerAuth
11
- from biolib.biolib_api_client import BiolibApiClient, CloudJob, JobState
12
- from biolib.biolib_errors import BioLibError, RetryLimitException, StorageDownloadFailed, JobResultPermissionError, \
13
- JobResultError, JobResultNotFound
7
+ from biolib._internal.http_client import HttpError
8
+ from biolib.biolib_api_client import CloudJob, JobState
9
+ from biolib.biolib_errors import JobResultPermissionError, JobResultError, JobResultNotFound, StorageDownloadFailed
14
10
  from biolib.biolib_logging import logger
15
11
  from biolib.utils import BIOLIB_PACKAGE_VERSION
16
12
  from biolib.typing_utils import TypedDict, Optional, Literal, Dict
@@ -96,35 +92,11 @@ class BiolibJobApi:
96
92
 
97
93
  @staticmethod
98
94
  def create_cloud_job(job_id: str, result_name_prefix: Optional[str]) -> CloudJob:
99
- response = None
100
95
  data = {'job_id': job_id}
101
96
  if result_name_prefix:
102
97
  data['result_name_prefix'] = result_name_prefix
103
98
 
104
- for retry in range(4):
105
- try:
106
- response = requests.post(
107
- f'{BiolibApiClient.get().base_url}/api/jobs/cloud/',
108
- json=data,
109
- auth=BearerAuth(BiolibApiClient.get().access_token)
110
- )
111
-
112
- if response.status_code == 503:
113
- raise RetryLimitException(response.content)
114
- # Handle possible validation errors from backend
115
- elif not response.ok:
116
- raise BioLibError(response.text)
117
-
118
- break
119
-
120
- except RetryLimitException as retry_exception: # pylint: disable=broad-except
121
- if retry > 3:
122
- raise BioLibError('Reached retry limit for cloud job creation') from retry_exception
123
- time.sleep(1)
124
-
125
- if not response:
126
- raise BioLibError('Could not create new cloud job')
127
-
99
+ response = biolib.api.client.post(path='/jobs/cloud/', data=data)
128
100
  cloud_job: CloudJob = response.json()
129
101
  return cloud_job
130
102
 
@@ -136,9 +108,9 @@ class BiolibJobApi:
136
108
  ) -> str:
137
109
  try:
138
110
  response = biolib.api.client.get(
139
- path=f'{BiolibApiClient.get().base_url}/api/jobs/{job_uuid}/storage/{storage_type}/download/',
111
+ path=f'/jobs/{job_uuid}/storage/{storage_type}/download/',
140
112
  authenticate=True,
141
- headers={'Job-Auth-Token': job_auth_token}
113
+ headers={'Job-Auth-Token': job_auth_token},
142
114
  )
143
115
  presigned_s3_download_link_response: PresignedS3DownloadLinkResponse = response.json()
144
116
  presigned_download_url = presigned_s3_download_link_response['presigned_download_url']
@@ -151,21 +123,20 @@ class BiolibJobApi:
151
123
 
152
124
  return presigned_download_url
153
125
 
154
- except requests.exceptions.HTTPError as error:
155
- status_code = error.response.status_code
126
+ except HttpError as error:
156
127
  if storage_type == 'results':
157
- if status_code == 401:
128
+ if error.code == 401:
158
129
  raise JobResultPermissionError('You must be signed in to get result of the job') from None
159
- elif status_code == 403:
130
+ elif error.code == 403:
160
131
  raise JobResultPermissionError(
161
132
  'Cannot get result of job. Maybe the job was created without being signed in?'
162
133
  ) from None
163
- elif status_code == 404:
134
+ elif error.code == 404:
164
135
  raise JobResultNotFound('Job result not found') from None
165
136
  else:
166
137
  raise JobResultError('Failed to get result of job') from error
167
138
  else:
168
- raise StorageDownloadFailed(error.response.content) from error
139
+ raise StorageDownloadFailed(f'Failed to download result of job got error: {error}') from error
169
140
 
170
141
  except Exception as error: # pylint: disable=broad-except
171
142
  if storage_type == 'results':
@@ -6,12 +6,10 @@ import subprocess
6
6
  import time
7
7
  from datetime import datetime
8
8
  from socket import gethostbyname, gethostname
9
- import requests
10
9
 
11
- from biolib import utils
12
- from biolib.biolib_errors import BioLibError
10
+ from biolib import utils, api
13
11
  from biolib.biolib_logging import logger_no_user_data
14
- from biolib.typing_utils import Optional, List
12
+ from biolib.typing_utils import Optional, List, Dict, cast
15
13
  from biolib.biolib_api_client import BiolibApiClient
16
14
  from biolib.compute_node.webserver.webserver_types import WebserverConfig, ComputeNodeInfo, ShutdownTimes
17
15
 
@@ -74,18 +72,15 @@ class CloudUtils:
74
72
  if utils.IS_RUNNING_IN_CLOUD:
75
73
  config = CloudUtils.get_webserver_config()
76
74
  try:
77
- response = requests.post(url=f'{config["base_url"]}/api/jobs/deregister/', json={
78
- 'auth_token': config["compute_node_info"]["auth_token"],
79
- 'public_id': config["compute_node_info"]["public_id"],
80
- })
81
-
82
- if not response.ok:
83
- response_content = response.content.decode()
84
- logger_no_user_data.error(
85
- f'Failed to deregister got status {response.status_code} and response {response_content}'
86
- )
87
-
88
- except Exception as error: # pylint: disable=broad-except
75
+ api.client.post(
76
+ authenticate=False,
77
+ path='/jobs/deregister/',
78
+ data={
79
+ 'auth_token': config['compute_node_info']['auth_token'],
80
+ 'public_id': config['compute_node_info']['public_id'],
81
+ },
82
+ )
83
+ except BaseException as error:
89
84
  logger_no_user_data.error(f'Failed to deregister got error: {error}')
90
85
  else:
91
86
  logger_no_user_data.error("Not deregistering as environment is not cloud")
@@ -120,34 +115,26 @@ class CloudUtils:
120
115
  )
121
116
  return
122
117
 
123
- config = CloudUtils.get_webserver_config()
124
-
125
- for _ in range(100):
126
- try:
127
- response = requests.post(
128
- url=f'{config["base_url"]}/api/jobs/cloud/finish/',
129
- json={
130
- 'auth_token': config["compute_node_info"]["auth_token"],
131
- 'cloud_job_id': cloud_job_id,
132
- 'system_exception_code': system_exception_code,
133
- 'exit_code': exit_code
134
- },
135
- timeout=10,
136
- )
137
-
138
- response.raise_for_status()
139
-
140
- opt_error_string = f' with error code {system_exception_code}' if system_exception_code else ''
141
- logger_no_user_data.debug(
142
- f'Cloud Job "{cloud_job_id}" was reported as finished' + opt_error_string
143
- )
144
- return
118
+ logger_no_user_data.debug(
119
+ f'Reporting CloudJob "{cloud_job_id}" as finished with exit code: {exit_code} '
120
+ f'and system exception code: {system_exception_code}'
121
+ )
145
122
 
146
- except Exception as error: # pylint: disable=broad-except
147
- logger_no_user_data.debug(
148
- f'Could not finish cloud job "{cloud_job_id}" due to {error}, retrying...'
149
- )
150
- time.sleep(10)
123
+ config = CloudUtils.get_webserver_config()
124
+ try:
125
+ api.client.post(
126
+ authenticate=False,
127
+ path='/jobs/cloud/finish/',
128
+ retries=100,
129
+ data={
130
+ 'auth_token': config['compute_node_info']['auth_token'],
131
+ 'cloud_job_id': cloud_job_id,
132
+ 'system_exception_code': system_exception_code,
133
+ 'exit_code': exit_code
134
+ },
135
+ )
136
+ except BaseException as error:
137
+ logger_no_user_data.debug(f'Failed to finish CloudJob "{cloud_job_id}" due to: {error}')
151
138
 
152
139
  @staticmethod
153
140
  def _report_availability() -> None:
@@ -158,26 +145,11 @@ class CloudUtils:
158
145
  logger_no_user_data.debug(
159
146
  f'Registering with {compute_node_info} to host {api_client.base_url} at {datetime.now()}'
160
147
  )
161
-
162
- response: Optional[requests.Response] = None
163
- max_retries = 5
164
- for retry_count in range(max_retries):
165
- try:
166
- response = requests.post(
167
- url=f'{api_client.base_url}/api/jobs/report_available/',
168
- json=compute_node_info,
169
- )
170
- break
171
- except Exception as error: # pylint: disable=broad-except
172
- logger_no_user_data.error(f'Self-registering failed with error: {error}')
173
- if retry_count < max_retries - 1:
174
- seconds_to_sleep = 1
175
- logger_no_user_data.info(f'Retrying self-registering in {seconds_to_sleep} seconds')
176
- time.sleep(seconds_to_sleep)
177
-
178
- if not response:
179
- raise BioLibError('Failed to register. Max retry limit reached')
180
-
148
+ response = api.client.post(
149
+ authenticate=False,
150
+ path='/jobs/report_available/',
151
+ data=cast(Dict[str, str], compute_node_info),
152
+ )
181
153
  if response.status_code != 201:
182
154
  raise Exception("Non 201 error code")
183
155
  else:
@@ -1,9 +1,8 @@
1
1
  import os
2
2
 
3
- import requests
4
-
5
3
  from biolib import utils
6
- from biolib.biolib_api_client import BiolibApiClient, CreatedJobDict
4
+ from biolib._internal.http_client import HttpClient
5
+ from biolib.biolib_api_client import CreatedJobDict
7
6
  from biolib.biolib_api_client.biolib_job_api import BiolibJobApi
8
7
  from biolib.compute_node.cloud_utils import CloudUtils
9
8
  from biolib.biolib_logging import logger_no_user_data
@@ -15,24 +14,23 @@ class JobStorage:
15
14
 
16
15
  @staticmethod
17
16
  def upload_module_input(job: CreatedJobDict, module_input_serialized: bytes) -> None:
18
- base_url = BiolibApiClient.get().base_url
19
17
  job_uuid = job['public_id']
20
18
  headers = {'Job-Auth-Token': job['auth_token']}
21
19
 
22
20
  multipart_uploader = utils.MultiPartUploader(
23
21
  start_multipart_upload_request=dict(
24
22
  requires_biolib_auth=False,
25
- url=f'{base_url}/api/jobs/{job_uuid}/storage/input/start_upload/',
23
+ path=f'/jobs/{job_uuid}/storage/input/start_upload/',
26
24
  headers=headers
27
25
  ),
28
26
  get_presigned_upload_url_request=dict(
29
27
  requires_biolib_auth=False,
30
- url=f'{base_url}/api/jobs/{job_uuid}/storage/input/presigned_upload_url/',
28
+ path=f'/jobs/{job_uuid}/storage/input/presigned_upload_url/',
31
29
  headers=headers
32
30
  ),
33
31
  complete_upload_request=dict(
34
32
  requires_biolib_auth=False,
35
- url=f'{base_url}/api/jobs/{job_uuid}/storage/input/complete_upload/',
33
+ path=f'/jobs/{job_uuid}/storage/input/complete_upload/',
36
34
  headers=headers
37
35
  ),
38
36
  )
@@ -60,7 +58,6 @@ class JobStorage:
60
58
 
61
59
  @staticmethod
62
60
  def _get_module_output_uploader(job_uuid: str) -> utils.MultiPartUploader:
63
- base_url = BiolibApiClient.get().base_url
64
61
  config = CloudUtils.get_webserver_config()
65
62
  compute_node_auth_token = config['compute_node_info']['auth_token'] # pylint: disable=unsubscriptable-object
66
63
  headers = {'Compute-Node-Auth-Token': compute_node_auth_token}
@@ -68,17 +65,17 @@ class JobStorage:
68
65
  return utils.MultiPartUploader(
69
66
  start_multipart_upload_request=dict(
70
67
  requires_biolib_auth=False,
71
- url=f'{base_url}/api/jobs/{job_uuid}/storage/results/start_upload/',
68
+ path=f'/jobs/{job_uuid}/storage/results/start_upload/',
72
69
  headers=headers,
73
70
  ),
74
71
  get_presigned_upload_url_request=dict(
75
72
  requires_biolib_auth=False,
76
- url=f'{base_url}/api/jobs/{job_uuid}/storage/results/presigned_upload_url/',
73
+ path=f'/jobs/{job_uuid}/storage/results/presigned_upload_url/',
77
74
  headers=headers,
78
75
  ),
79
76
  complete_upload_request=dict(
80
77
  requires_biolib_auth=False,
81
- url=f'{base_url}/api/jobs/{job_uuid}/storage/results/complete_upload/',
78
+ path=f'/jobs/{job_uuid}/storage/results/complete_upload/',
82
79
  headers=headers,
83
80
  ),
84
81
  )
@@ -92,8 +89,7 @@ class JobStorage:
92
89
  job_auth_token=job['auth_token'],
93
90
  storage_type='input',
94
91
  )
95
- response = requests.get(url=presigned_download_url)
96
- response.raise_for_status()
92
+ response = HttpClient.request(url=presigned_download_url)
97
93
  data: bytes = response.content
98
94
  logger_no_user_data.debug(f'Job "{job_uuid}" module input downloaded')
99
95
  return data
@@ -12,9 +12,9 @@ import os
12
12
  import signal
13
13
  from types import FrameType
14
14
 
15
- import requests
16
15
  from docker.models.networks import Network # type: ignore
17
16
 
17
+ from biolib._internal.http_client import HttpClient
18
18
  from biolib.biolib_binary_format.stdout_and_stderr import StdoutAndStderr
19
19
  from biolib.compute_node.job_worker.job_legacy_input_wait_timeout_thread import JobLegacyInputWaitTimeout
20
20
  from biolib.compute_node.job_worker.job_storage import JobStorage
@@ -526,7 +526,7 @@ class JobWorker:
526
526
  start_time = time()
527
527
  logger_no_user_data.debug(f'Job "{root_job_id}" downloading runtime zip...')
528
528
  try:
529
- runtime_zip_bytes = requests.get(runtime_zip_url).content
529
+ runtime_zip_bytes = HttpClient.request(url=runtime_zip_url).content
530
530
  except Exception as exception:
531
531
  raise ComputeProcessException(
532
532
  exception,
@@ -6,7 +6,6 @@ from pathlib import Path
6
6
  from collections import OrderedDict
7
7
  from urllib.parse import urlparse
8
8
 
9
- import requests
10
9
  from biolib import api, utils
11
10
  from biolib._internal.http_client import HttpClient
12
11
  from biolib.biolib_api_client.biolib_job_api import BiolibJobApi
@@ -127,8 +126,7 @@ class Job:
127
126
  job_auth_token=self._job_dict['auth_token'],
128
127
  storage_type='input',
129
128
  )
130
- response = requests.get(url=presigned_download_url)
131
- response.raise_for_status()
129
+ response = HttpClient.request(url=presigned_download_url)
132
130
  module_input_serialized: bytes = response.content
133
131
  return ModuleInput(module_input_serialized).deserialize()
134
132
 
@@ -6,9 +6,8 @@ from collections import namedtuple
6
6
  from pathlib import Path
7
7
  from struct import Struct
8
8
 
9
- import requests
10
-
11
9
  from biolib import utils
10
+ from biolib._internal.http_client import HttpClient
12
11
  from biolib.app import BioLibApp
13
12
  from biolib.biolib_api_client.biolib_account_api import BiolibAccountApi
14
13
  from biolib.biolib_api_client.biolib_large_file_system_api import BiolibLargeFileSystemApi
@@ -143,18 +142,17 @@ def push_large_file_system(lfs_uri: str, input_dir: str, chunk_size_in_mb: Optio
143
142
 
144
143
  iterable_zip_stream = get_iterable_zip_stream(files=files_to_zip, chunk_size=chunk_size_in_bytes)
145
144
 
146
- base_url = BiolibApiClient.get().base_url
147
145
  multipart_uploader = utils.MultiPartUploader(
148
146
  use_process_pool=True,
149
147
  get_presigned_upload_url_request=dict(
150
148
  headers=None,
151
149
  requires_biolib_auth=True,
152
- url=f'{base_url}/api/lfs/versions/{lfs_resource_version_uuid}/presigned_upload_url/',
150
+ path=f'/lfs/versions/{lfs_resource_version_uuid}/presigned_upload_url/',
153
151
  ),
154
152
  complete_upload_request=dict(
155
153
  headers=None,
156
154
  requires_biolib_auth=True,
157
- url=f'{base_url}/api/lfs/versions/{lfs_resource_version_uuid}/complete_upload/',
155
+ path=f'/lfs/versions/{lfs_resource_version_uuid}/complete_upload/',
158
156
  ),
159
157
  )
160
158
 
@@ -221,25 +219,19 @@ def get_file_data_from_large_file_system(lfs_uri: str, file_path: str) -> bytes:
221
219
  local_file_header_start = file_info['header_offset'] + len(local_file_header_signature_bytes)
222
220
  local_file_header_end = local_file_header_start + local_file_header_struct.size
223
221
 
224
- local_file_header_response = requests.get(
222
+ local_file_header_response = HttpClient.request(
225
223
  url=lfs_url,
226
- stream=True,
227
224
  headers={'range': f'bytes={local_file_header_start}-{local_file_header_end - 1}'},
228
- timeout=300,
225
+ timeout_in_seconds=300,
229
226
  )
230
- local_file_header_response.raise_for_status()
231
- local_file_header_bytes: bytes = local_file_header_response.raw.data
232
- local_file_header = LocalFileHeader._make(local_file_header_struct.unpack(local_file_header_bytes))
227
+ local_file_header = LocalFileHeader._make(local_file_header_struct.unpack(local_file_header_response.content))
233
228
 
234
229
  file_start = local_file_header_end + local_file_header.file_name_len + local_file_header.extra_field_len
235
230
  file_end = file_start + file_info['file_size']
236
231
 
237
- response = requests.get(
232
+ response = HttpClient.request(
238
233
  url=lfs_url,
239
- stream=True,
240
234
  headers={'range': f'bytes={file_start}-{file_end - 1}'},
241
- timeout=300, # timeout after 5 min
235
+ timeout_in_seconds=300, # timeout after 5 min
242
236
  )
243
- response.raise_for_status()
244
- data: bytes = response.raw.data
245
- return data
237
+ return response.content
@@ -1,22 +1,19 @@
1
1
  import collections.abc
2
2
  import multiprocessing
3
3
  import os
4
- import time
5
4
  import socket
6
5
  import sys
7
6
 
8
7
  from typing import Optional
9
- import requests
10
8
  from importlib_metadata import version, PackageNotFoundError
11
9
 
12
10
  from biolib.utils.seq_util import SeqUtil, SeqUtilRecord
13
-
14
- # try fetching version, if it fails (usually when in dev), add default
15
- from biolib.biolib_errors import BioLibError
11
+ from biolib._internal.http_client import HttpClient
16
12
  from biolib.biolib_logging import logger_no_user_data, logger
17
13
  from biolib.typing_utils import Tuple, Iterator
18
14
  from .multipart_uploader import MultiPartUploader, get_chunk_iterator_from_bytes
19
15
 
16
+ # try fetching version, if it fails (usually when in dev), add default
20
17
  try:
21
18
  BIOLIB_PACKAGE_VERSION = version('pybiolib')
22
19
  except PackageNotFoundError:
@@ -87,38 +84,17 @@ DownloadChunkInputTuple = Tuple[ByteRangeTuple, str]
87
84
 
88
85
 
89
86
  def _download_chunk(input_tuple: DownloadChunkInputTuple) -> bytes:
90
- max_download_retries = 10
91
-
92
87
  byte_range, presigned_url = input_tuple
93
88
  start, end = byte_range
94
89
 
95
- for retry_attempt in range(max_download_retries):
96
- if retry_attempt > 0:
97
- logger_no_user_data.debug(f'Attempt number {retry_attempt} for part {start}')
98
- try:
99
- response = requests.get(
100
- url=presigned_url,
101
- stream=True,
102
- headers={'range': f'bytes={start}-{end}'},
103
- timeout=300, # timeout after 5 min
104
- )
105
- if response.ok:
106
- return_value: bytes = response.raw.data
107
- logger_no_user_data.debug(f'Returning raw data for part {start}')
108
- return return_value
109
- else:
110
- logger_no_user_data.warning(
111
- f'Got not ok response when downloading part {start}:{end}. '
112
- f'Got response status {response.status_code} and content: {response.content.decode()} '
113
- f'Retrying...'
114
- )
115
- except Exception: # pylint: disable=broad-except
116
- logger_no_user_data.warning(f'Encountered error when downloading part {start}:{end}. Retrying...')
117
-
118
- time.sleep(5)
119
-
120
- logger_no_user_data.debug(f'Max retries hit, when downloading part {start}:{end}. Exiting...')
121
- raise BioLibError(f'Max retries hit, when downloading part {start}:{end}. Exiting...')
90
+ response = HttpClient.request(
91
+ url=presigned_url,
92
+ headers={'range': f'bytes={start}-{end}'},
93
+ timeout_in_seconds=300, # timeout after 5 min
94
+ retries=10,
95
+ )
96
+ logger_no_user_data.debug(f'Returning raw data for part {start}')
97
+ return response.content
122
98
 
123
99
 
124
100
  class ChunkIterator(collections.abc.Iterator):
@@ -153,11 +129,8 @@ class ChunkIterator(collections.abc.Iterator):
153
129
  def download_presigned_s3_url(presigned_url: str, output_file_path: str) -> None:
154
130
  chunk_size = 50_000_000
155
131
 
156
- with requests.get(presigned_url, stream=True, headers={'range': 'bytes=0-1'}) as response:
157
- if not response.ok:
158
- raise Exception(f'Got response status code {response.status_code} and content {response.content.decode()}')
159
-
160
- file_size = int(response.headers['Content-Range'].split('/')[1])
132
+ response = HttpClient.request(url=presigned_url, headers={'range': 'bytes=0-1'})
133
+ file_size = int(response.headers['Content-Range'].split('/')[1])
161
134
 
162
135
  chunk_iterator = ChunkIterator(file_size, chunk_size, presigned_url)
163
136
 
@@ -5,10 +5,9 @@ import os
5
5
  import time
6
6
  from urllib.parse import urlparse
7
7
 
8
- import requests
9
-
8
+ import biolib.api
9
+ from biolib._internal.http_client import HttpClient
10
10
  from biolib.biolib_api_client import BiolibApiClient
11
- from biolib.biolib_api_client.auth import BearerAuth
12
11
  from biolib.biolib_errors import BioLibError
13
12
  from biolib.biolib_logging import logger, logger_no_user_data
14
13
  from biolib.typing_utils import TypedDict, List, Iterator, Tuple, Optional, Dict
@@ -33,7 +32,7 @@ def get_chunk_iterator_from_file_object(file_object, chunk_size_in_bytes: int =
33
32
  class RequestOptions(TypedDict):
34
33
  headers: Optional[Dict[str, str]]
35
34
  requires_biolib_auth: bool
36
- url: str
35
+ path: str
37
36
 
38
37
 
39
38
  class _PartMetadata(TypedDict):
@@ -67,20 +66,15 @@ class MultiPartUploader:
67
66
  logger_no_user_data.debug(f'Starting multipart upload of payload with size {payload_size_in_bytes} bytes')
68
67
 
69
68
  if self._start_multipart_upload_request:
70
- requires_biolib_auth = self._start_multipart_upload_request['requires_biolib_auth']
71
- start_multipart_upload = requests.post(
72
- auth=BearerAuth(BiolibApiClient.get().access_token) if requires_biolib_auth else None,
73
- headers=self._start_multipart_upload_request['headers'],
74
- timeout=30,
75
- url=self._start_multipart_upload_request['url'],
76
- )
77
- if start_multipart_upload.ok:
78
- logger_no_user_data.debug('Multipart upload started')
79
- else:
80
- logger_no_user_data.debug(
81
- f'Failed to start multipart upload got response status: {start_multipart_upload.status_code}'
69
+ try:
70
+ biolib.api.client.post(
71
+ authenticate=self._start_multipart_upload_request['requires_biolib_auth'],
72
+ headers=self._start_multipart_upload_request['headers'],
73
+ path=self._start_multipart_upload_request['path'],
82
74
  )
83
- raise Exception('Failed to start multipart upload')
75
+ except BaseException as error:
76
+ logger_no_user_data.debug(f'Failed to start multipart upload got error: {error}')
77
+ raise error
84
78
 
85
79
  # if multiprocessing start method is spawn or we are running in a daemon process,
86
80
  # multiprocessing.Pool may fail when called from script
@@ -116,30 +110,12 @@ class MultiPartUploader:
116
110
  BiolibApiClient.refresh_auth_token()
117
111
 
118
112
  logger_no_user_data.debug(f'Uploaded {len(parts)} parts, now calling complete upload...')
119
- for index in range(3):
120
- try:
121
- complete_upload_response = requests.post(
122
- auth=BearerAuth(BiolibApiClient.get().access_token) if requires_biolib_auth else None,
123
- headers=self._complete_upload_request['headers'],
124
- json={'parts': parts, 'size_bytes': self._bytes_uploaded},
125
- timeout=30,
126
- url=self._complete_upload_request['url'],
127
- )
128
- if complete_upload_response.ok:
129
- logger_no_user_data.debug('Multipart upload completed returning')
130
- return
131
-
132
- logger_no_user_data.warning(
133
- f'Failed to complete multipart upload got response status {complete_upload_response.status_code}. '
134
- f'Retrying...'
135
- )
136
-
137
- except Exception as error: # pylint: disable=broad-except
138
- logger_no_user_data.warning('Encountered error when completing multipart upload. Retrying...')
139
- logger.debug(f'Multipart complete error: {error}')
140
- time.sleep(index * index + 2)
141
-
142
- raise BioLibError('Max retries hit, when completing multipart upload')
113
+ biolib.api.client.post(
114
+ authenticate=requires_biolib_auth,
115
+ headers=self._complete_upload_request['headers'],
116
+ data={'parts': parts, 'size_bytes': self._bytes_uploaded},
117
+ path=self._complete_upload_request['path'],
118
+ )
143
119
 
144
120
  def _upload_chunk(self, _input: _UploadChunkInputType) -> _UploadChunkReturnType:
145
121
  part_number, chunk = _input
@@ -152,18 +128,12 @@ class MultiPartUploader:
152
128
  logger_no_user_data.debug(f'Uploading part number {part_number} with size {len(chunk)} bytes...')
153
129
  try:
154
130
  logger_no_user_data.debug(f'Getting upload URL for chunk {part_number}...')
155
- get_url_response = requests.get(
156
- auth=BearerAuth(BiolibApiClient.get().access_token) if requires_biolib_auth else None,
131
+ get_url_response = biolib.api.client.get(
132
+ authenticate=requires_biolib_auth,
157
133
  headers=self._get_presigned_upload_url_request['headers'],
158
134
  params={'part_number': part_number},
159
- timeout=30,
160
- url=self._get_presigned_upload_url_request['url'],
135
+ path=self._get_presigned_upload_url_request['path'],
161
136
  )
162
- if not get_url_response.ok:
163
- raise Exception(
164
- f'Failed to get upload URL for part {part_number} got response status code '
165
- f'{get_url_response.status_code}'
166
- )
167
137
 
168
138
  presigned_upload_url = get_url_response.json()['presigned_upload_url']
169
139
 
@@ -174,16 +144,13 @@ class MultiPartUploader:
174
144
  presigned_upload_url = \
175
145
  f'{app_caller_proxy_job_storage_base_url}{parsed_url.path}?{parsed_url.query}'
176
146
 
177
- put_chunk_response = requests.put(url=presigned_upload_url, data=chunk, timeout=300)
178
-
179
- if put_chunk_response.ok:
180
- return _PartMetadata(PartNumber=part_number, ETag=put_chunk_response.headers['ETag']), len(chunk)
181
- else:
182
- logger_no_user_data.warning(
183
- f'Got response with status {put_chunk_response.status_code} when uploading part {part_number}. '
184
- 'Retrying...'
185
- )
186
- logger.debug(f'Response content: {put_chunk_response.content.decode()}')
147
+ put_chunk_response = HttpClient.request(
148
+ url=presigned_upload_url,
149
+ data=chunk,
150
+ method='PUT',
151
+ timeout_in_seconds=300,
152
+ )
153
+ return _PartMetadata(PartNumber=part_number, ETag=put_chunk_response.headers['ETag']), len(chunk)
187
154
 
188
155
  except Exception as error: # pylint: disable=broad-except
189
156
  logger_no_user_data.warning(f'Encountered error when uploading part {part_number}. Retrying...')
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "pybiolib"
3
- version = "1.1.1711"
3
+ version = "1.1.1730"
4
4
  description = "BioLib Python Client"
5
5
  readme = "README.md"
6
6
  license = "MIT"
File without changes
File without changes