pybiolib 1.1.1747__py3-none-any.whl → 1.1.1881__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. biolib/__init__.py +8 -2
  2. biolib/_internal/data_record/__init__.py +1 -0
  3. biolib/_internal/data_record/data_record.py +153 -0
  4. biolib/_internal/data_record/remote_storage_endpoint.py +27 -0
  5. biolib/_internal/http_client.py +14 -15
  6. biolib/_internal/push_application.py +22 -37
  7. biolib/_internal/runtime.py +73 -0
  8. biolib/_internal/utils/__init__.py +18 -0
  9. biolib/app/app.py +6 -1
  10. biolib/app/search_apps.py +8 -12
  11. biolib/biolib_api_client/api_client.py +14 -9
  12. biolib/biolib_api_client/app_types.py +1 -0
  13. biolib/biolib_api_client/biolib_app_api.py +1 -1
  14. biolib/biolib_binary_format/utils.py +19 -2
  15. biolib/cli/__init__.py +6 -2
  16. biolib/cli/auth.py +58 -0
  17. biolib/cli/data_record.py +43 -0
  18. biolib/cli/download_container.py +3 -1
  19. biolib/cli/init.py +1 -0
  20. biolib/cli/lfs.py +39 -9
  21. biolib/cli/push.py +1 -1
  22. biolib/cli/run.py +3 -2
  23. biolib/cli/start.py +1 -0
  24. biolib/compute_node/cloud_utils/cloud_utils.py +2 -2
  25. biolib/compute_node/job_worker/cache_state.py +1 -1
  26. biolib/compute_node/job_worker/executors/docker_executor.py +9 -7
  27. biolib/compute_node/job_worker/job_worker.py +8 -2
  28. biolib/compute_node/remote_host_proxy.py +30 -2
  29. biolib/jobs/job.py +28 -29
  30. biolib/lfs/__init__.py +0 -2
  31. biolib/lfs/utils.py +23 -107
  32. biolib/runtime/__init__.py +13 -1
  33. biolib/sdk/__init__.py +17 -4
  34. biolib/user/sign_in.py +8 -12
  35. biolib/utils/__init__.py +1 -1
  36. biolib/utils/app_uri.py +11 -4
  37. biolib/utils/cache_state.py +2 -2
  38. biolib/utils/seq_util.py +15 -10
  39. {pybiolib-1.1.1747.dist-info → pybiolib-1.1.1881.dist-info}/METADATA +1 -1
  40. {pybiolib-1.1.1747.dist-info → pybiolib-1.1.1881.dist-info}/RECORD +43 -39
  41. {pybiolib-1.1.1747.dist-info → pybiolib-1.1.1881.dist-info}/WHEEL +1 -1
  42. biolib/biolib_api_client/biolib_account_api.py +0 -8
  43. biolib/biolib_api_client/biolib_large_file_system_api.py +0 -34
  44. biolib/runtime/results.py +0 -20
  45. {pybiolib-1.1.1747.dist-info → pybiolib-1.1.1881.dist-info}/LICENSE +0 -0
  46. {pybiolib-1.1.1747.dist-info → pybiolib-1.1.1881.dist-info}/entry_points.txt +0 -0
@@ -1,5 +1,6 @@
1
1
  from abc import ABC, abstractmethod
2
2
  import io
3
+ from typing import Optional, Callable
3
4
 
4
5
  from biolib._internal.http_client import HttpClient
5
6
 
@@ -106,10 +107,18 @@ class InMemoryIndexableBuffer(IndexableBuffer):
106
107
 
107
108
  class LazyLoadedFile:
108
109
 
109
- def __init__(self, path: str, buffer: IndexableBuffer, start: int, length: int):
110
+ def __init__(
111
+ self,
112
+ path: str,
113
+ buffer: IndexableBuffer,
114
+ start: Optional[int],
115
+ length: int,
116
+ start_func: Optional[Callable[[], int]] = None,
117
+ ):
110
118
  self._path = path
111
119
  self._buffer = buffer
112
120
  self._start = start
121
+ self._start_func = start_func
113
122
  self._length = length
114
123
 
115
124
  def __repr__(self) -> str:
@@ -119,8 +128,16 @@ class LazyLoadedFile:
119
128
  def path(self) -> str:
120
129
  return self._path
121
130
 
131
+ @property
132
+ def name(self) -> str:
133
+ return self._path.split('/')[-1]
134
+
122
135
  @property
123
136
  def start(self) -> int:
137
+ if self._start is None:
138
+ assert self._start_func is not None, 'No start function or start value'
139
+ self._start = self._start_func()
140
+
124
141
  return self._start
125
142
 
126
143
  @property
@@ -131,4 +148,4 @@ class LazyLoadedFile:
131
148
  return io.BytesIO(self.get_data())
132
149
 
133
150
  def get_data(self) -> bytes:
134
- return self._buffer.get_data(start=self._start, length=self._length)
151
+ return self._buffer.get_data(start=self.start, length=self._length)
biolib/cli/__init__.py CHANGED
@@ -5,7 +5,7 @@ import click
5
5
 
6
6
  from biolib import utils
7
7
  from biolib.biolib_logging import logger, logger_no_user_data
8
- from biolib.cli import init, lfs, push, run, start, runtime, download_container
8
+ from biolib.cli import auth, data_record, download_container, init, lfs, push, run, runtime, start
9
9
 
10
10
 
11
11
  @click.version_option(version=utils.BIOLIB_PACKAGE_VERSION, prog_name='pybiolib')
@@ -20,13 +20,17 @@ def cli() -> None:
20
20
  logger_no_user_data.configure(default_log_level=logging.WARNING)
21
21
 
22
22
 
23
+ cli.add_command(auth.login)
24
+ cli.add_command(auth.logout)
25
+ cli.add_command(auth.whoami)
26
+ cli.add_command(download_container.download_container)
23
27
  cli.add_command(init.init)
24
28
  cli.add_command(lfs.lfs)
25
29
  cli.add_command(push.push)
26
30
  cli.add_command(run.run)
27
31
  cli.add_command(runtime.runtime)
28
32
  cli.add_command(start.start)
29
- cli.add_command(download_container.download_container)
33
+ cli.add_command(data_record.data_record)
30
34
 
31
35
  # allow this script to be called without poetry in dev e.g. by an IDE debugger
32
36
  if utils.IS_DEV and __name__ == '__main__':
biolib/cli/auth.py ADDED
@@ -0,0 +1,58 @@
1
+ import logging
2
+ import sys
3
+
4
+ import click
5
+
6
+ from biolib import api, biolib_errors
7
+ from biolib.biolib_api_client.api_client import BiolibApiClient
8
+ from biolib.biolib_logging import logger, logger_no_user_data
9
+ from biolib.user import sign_in, sign_out
10
+
11
+
12
+ @click.command(help='Login your to BioLib account with web browser')
13
+ @click.option(
14
+ '-w',
15
+ is_flag=True,
16
+ default=False,
17
+ required=False,
18
+ type=bool,
19
+ help='Automatically open the login page in the default web browser',
20
+ )
21
+ def login(w: bool) -> None: # pylint: disable=invalid-name
22
+ logger.configure(default_log_level=logging.INFO)
23
+ logger_no_user_data.configure(default_log_level=logging.INFO)
24
+ sign_in(open_in_default_browser=w)
25
+
26
+
27
+ @click.command(help='Logout of your BioLib account')
28
+ def logout() -> None:
29
+ logger.configure(default_log_level=logging.INFO)
30
+ logger_no_user_data.configure(default_log_level=logging.INFO)
31
+ sign_out()
32
+
33
+
34
+ @click.command(help='Prints out the full name of the user logged in')
35
+ def whoami() -> None:
36
+ client = BiolibApiClient.get()
37
+ if client.is_signed_in:
38
+ user_uuid = None
39
+ if client.access_token is None:
40
+ print('Unable to fetch user credentials. Please try logging out and logging in again.')
41
+ exit(1)
42
+ try:
43
+ user_uuid = client.decode_jwt_without_checking_signature(jwt=client.access_token)['payload']['public_id']
44
+ except biolib_errors.BioLibError as error:
45
+ print(
46
+ f'Unable to reference user public_id in access token:\n {error.message}',
47
+ file=sys.stderr,
48
+ )
49
+ exit(1)
50
+ response = api.client.get(path=f'/user/{user_uuid}/')
51
+ user_dict = response.json()
52
+ email = user_dict['email']
53
+ intrinsic_account = [account for account in user_dict['accounts'] if account['role'] == 'intrinsic'][0]
54
+ display_name = intrinsic_account['display_name']
55
+ print(f'Name: {display_name}\nEmail: {email}')
56
+ else:
57
+ print('Not logged in', file=sys.stderr)
58
+ exit(1)
@@ -0,0 +1,43 @@
1
+ import logging
2
+ import os
3
+
4
+ import click
5
+
6
+ from biolib._internal.data_record import DataRecord
7
+ from biolib.biolib_logging import logger, logger_no_user_data
8
+ from biolib.typing_utils import Optional
9
+
10
+
11
+ @click.group(help='Data Records')
12
+ def data_record() -> None:
13
+ logger.configure(default_log_level=logging.INFO)
14
+ logger_no_user_data.configure(default_log_level=logging.INFO)
15
+
16
+
17
+ @data_record.command(help='Create a Data Record')
18
+ @click.option('--destination', type=str, required=True)
19
+ @click.option('--data-path', required=True, type=click.Path(exists=True))
20
+ @click.option('--name', type=str, required=False)
21
+ def create(destination: str, data_path: str, name: Optional[str] = None) -> None:
22
+ DataRecord.create(destination, data_path, name)
23
+
24
+
25
+ @data_record.command(help='Download files from a Data Record')
26
+ @click.argument('uri', required=True)
27
+ @click.option('--file', required=False, type=str)
28
+ @click.option('--path-filter', required=False, type=str, hide_input=True)
29
+ def download(uri: str, file: Optional[str], path_filter: Optional[str]) -> None:
30
+ record = DataRecord(uri=uri)
31
+ if file is not None:
32
+ try:
33
+ file_obj = [file_obj for file_obj in record.list_files() if file_obj.path == file][0]
34
+ except IndexError:
35
+ raise Exception('File not found in data record') from None
36
+
37
+ assert not os.path.exists(file_obj.name), 'File already exists in current directory'
38
+ with open(file_obj.name, 'wb') as file_handle:
39
+ file_handle.write(file_obj.get_data())
40
+
41
+ else:
42
+ assert not os.path.exists(record.name), f'Directory with name {record.name} already exists in current directory'
43
+ record.save_files(output_dir=record.name, path_filter=path_filter)
@@ -1,10 +1,12 @@
1
1
  import logging
2
+
2
3
  import click
4
+
3
5
  from biolib.biolib_download_container import download_container_from_uri
4
6
  from biolib.biolib_logging import logger, logger_no_user_data
5
7
 
6
8
 
7
- @click.command(help='Push an application to BioLib', name='download-container')
9
+ @click.command(help='Pull an application from BioLib', name='download-container', hidden=True)
8
10
  @click.argument('uri')
9
11
  def download_container(uri: str) -> None:
10
12
  logger.configure(default_log_level=logging.INFO)
biolib/cli/init.py CHANGED
@@ -2,6 +2,7 @@ import os
2
2
  import sys
3
3
 
4
4
  import click
5
+
5
6
  from biolib import templates
6
7
 
7
8
 
biolib/cli/lfs.py CHANGED
@@ -1,12 +1,15 @@
1
+ import json
1
2
  import logging
3
+ import os
2
4
  import sys
5
+ from typing import Dict, List
3
6
 
4
7
  import click
5
8
 
6
- import biolib.lfs
7
9
  from biolib import biolib_errors
8
- from biolib.biolib_logging import logger_no_user_data, logger
9
- from biolib.lfs import push_large_file_system, create_large_file_system, describe_large_file_system, prune_lfs_cache
10
+ from biolib._internal.data_record import DataRecord
11
+ from biolib.biolib_logging import logger, logger_no_user_data
12
+ from biolib.lfs import create_large_file_system, prune_lfs_cache, push_large_file_system
10
13
  from biolib.typing_utils import Optional
11
14
 
12
15
 
@@ -44,9 +47,16 @@ def download_file(uri: str, file_path: str) -> None:
44
47
  logger.configure(default_log_level=logging.INFO)
45
48
  logger_no_user_data.configure(default_log_level=logging.INFO)
46
49
  try:
47
- data = biolib.lfs.get_file_data_from_large_file_system(lfs_uri=uri, file_path=file_path)
48
- with open(file_path, mode='wb') as file:
49
- file.write(data)
50
+ record = DataRecord(uri=uri)
51
+ try:
52
+ file_obj = [file_obj for file_obj in record.list_files() if file_obj.path == file_path][0]
53
+ except IndexError:
54
+ raise Exception('File not found in data record') from None
55
+
56
+ assert not os.path.exists(file_obj.name), 'File already exists in current directory'
57
+ with open(file_obj.name, 'wb') as file_handle:
58
+ file_handle.write(file_obj.get_data())
59
+
50
60
  except biolib_errors.BioLibError as error:
51
61
  print(f'An error occurred:\n{error.message}', file=sys.stderr)
52
62
  exit(1)
@@ -54,9 +64,29 @@ def download_file(uri: str, file_path: str) -> None:
54
64
 
55
65
  @lfs.command(help='Describe a Large File System')
56
66
  @click.argument('uri', required=True)
57
- @click.option('--json', is_flag=True, default=False, required=False, help='Format output as JSON')
58
- def describe(uri: str, json: bool) -> None:
59
- describe_large_file_system(lfs_uri=uri, output_as_json=json)
67
+ @click.option('--json', 'output_as_json', is_flag=True, default=False, required=False, help='Format output as JSON')
68
+ def describe(uri: str, output_as_json: bool) -> None:
69
+ data_record = DataRecord(uri)
70
+ files_info: List[Dict] = []
71
+ total_size_in_bytes = 0
72
+ for file in data_record.list_files():
73
+ files_info.append({'path': file.path, 'size_bytes': file.length})
74
+ total_size_in_bytes += file.length
75
+
76
+ if output_as_json:
77
+ print(
78
+ json.dumps(
79
+ obj={'uri': data_record.uri, 'size_bytes': total_size_in_bytes, 'files': files_info},
80
+ indent=4,
81
+ )
82
+ )
83
+ else:
84
+ print(f'Large File System {data_record.uri}\ntotal {total_size_in_bytes} bytes\n')
85
+ print('size bytes path')
86
+ for file_info in files_info:
87
+ size_string = str(file_info['size_bytes'])
88
+ leading_space_string = ' ' * (10 - len(size_string))
89
+ print(f"{leading_space_string}{size_string} {file_info['path']}")
60
90
 
61
91
 
62
92
  @lfs.command(help='Prune LFS cache', hidden=True)
biolib/cli/push.py CHANGED
@@ -3,8 +3,8 @@ from typing import Optional
3
3
 
4
4
  import click
5
5
 
6
- from biolib.biolib_logging import logger, logger_no_user_data
7
6
  from biolib._internal.push_application import push_application
7
+ from biolib.biolib_logging import logger, logger_no_user_data
8
8
 
9
9
 
10
10
  @click.command(help='Push an application to BioLib')
biolib/cli/run.py CHANGED
@@ -1,4 +1,5 @@
1
1
  import sys
2
+
2
3
  import click
3
4
 
4
5
  from biolib import biolib_errors, utils
@@ -27,13 +28,13 @@ def run(local: bool, non_blocking: bool, uri: str, args: Tuple[str]) -> None:
27
28
  stdin = sys.stdin.read()
28
29
  return stdin
29
30
 
30
- blocking = False if non_blocking else True
31
+ blocking = not non_blocking
31
32
  job = app.cli(
32
33
  args=list(args),
33
34
  stdin=_get_stdin(),
34
35
  files=None,
35
36
  machine=('local' if local else ''),
36
- blocking=blocking
37
+ blocking=blocking,
37
38
  )
38
39
 
39
40
  if blocking:
biolib/cli/start.py CHANGED
@@ -24,6 +24,7 @@ def start(host: str, port: int, tls_certificate: Optional[str], tls_key: Optiona
24
24
 
25
25
  try:
26
26
  from biolib.compute_node.webserver import webserver # pylint: disable=import-outside-toplevel
27
+
27
28
  webserver.start_webserver(
28
29
  host=host,
29
30
  port=port,
@@ -81,8 +81,8 @@ class CloudUtils:
81
81
  'error': error,
82
82
  },
83
83
  )
84
- except BaseException as error:
85
- logger_no_user_data.error(f'Failed to deregister got error: {error}')
84
+ except BaseException as error_object:
85
+ logger_no_user_data.error(f'Failed to deregister got error: {error_object}')
86
86
  else:
87
87
  logger_no_user_data.error("Not deregistering as environment is not cloud")
88
88
 
@@ -16,7 +16,7 @@ class DockerCacheStateError(CacheStateError):
16
16
 
17
17
  class LfsCacheState(CacheState):
18
18
 
19
- def __init__(self):
19
+ def __init__(self) -> None:
20
20
  super().__init__()
21
21
 
22
22
  self._storage_path_for_write: str = self._get_storage_path_for_write()
@@ -18,6 +18,8 @@ from docker.errors import ImageNotFound, APIError # type: ignore
18
18
  from docker.models.containers import Container # type: ignore
19
19
 
20
20
  from biolib import utils
21
+
22
+ from biolib._internal.runtime import RuntimeJobDataDict
21
23
  from biolib.biolib_binary_format import ModuleInput, ModuleOutputV2
22
24
  from biolib.biolib_docker_client import BiolibDockerClient
23
25
  from biolib.biolib_errors import DockerContainerNotFoundDuringExecutionException, BioLibError
@@ -37,7 +39,7 @@ from biolib.typing_utils import List, Dict, Optional
37
39
 
38
40
  class DockerExecutor:
39
41
 
40
- def __init__(self, options: LocalExecutorOptions):
42
+ def __init__(self, options: LocalExecutorOptions) -> None:
41
43
  self._options: LocalExecutorOptions = options
42
44
  self._is_cleaning_up = False
43
45
 
@@ -267,12 +269,12 @@ class DockerExecutor:
267
269
  internal_network = self._options['internal_network']
268
270
  extra_hosts: Dict[str, str] = {}
269
271
 
270
- biolib_system_secret = {
271
- 'version': '1.0.0',
272
- 'job_requested_machine': self._options['job']['requested_machine'],
273
- 'job_uuid': self._options['job']['public_id'],
274
- 'job_auth_token': self._options['job']['auth_token'],
275
- }
272
+ biolib_system_secret = RuntimeJobDataDict(
273
+ version='1.0.0',
274
+ job_requested_machine=self._options['job']['requested_machine'],
275
+ job_uuid=self._options['job']['public_id'],
276
+ job_auth_token=self._options['job']['auth_token'],
277
+ )
276
278
  secrets: Dict[str, str] = dict(
277
279
  **module.get('secrets', {}),
278
280
  biolib_system_secret=json.dumps(biolib_system_secret, indent=4),
@@ -97,7 +97,11 @@ class JobWorker:
97
97
  if socket_port:
98
98
  self._connect_to_parent()
99
99
 
100
- def _handle_exit_gracefully(self, signum: int, frame: FrameType) -> None: # pylint: disable=unused-argument
100
+ def _handle_exit_gracefully(
101
+ self,
102
+ signum: int,
103
+ frame: Optional[FrameType], # pylint: disable=unused-argument
104
+ ) -> None:
101
105
  job_id = self._root_job_wrapper["job"]["public_id"] if self._root_job_wrapper else None
102
106
  logger_no_user_data.debug(
103
107
  f'_JobWorker ({job_id}) got exit signal {signal.Signals(signum).name}' # pylint: disable=no-member
@@ -246,6 +250,7 @@ class JobWorker:
246
250
 
247
251
  def _start_network_and_remote_host_proxies(self, job: CreatedJobDict) -> None:
248
252
  app_version = job['app_version']
253
+ app = app_version.get('app', {})
249
254
  job_id = job['public_id']
250
255
  remote_hosts = app_version['remote_hosts']
251
256
  if utils.IS_RUNNING_IN_CLOUD:
@@ -307,7 +312,8 @@ class JobWorker:
307
312
  self._public_network,
308
313
  self._internal_network,
309
314
  job_id,
310
- ports
315
+ ports,
316
+ can_push_data_record_for_user=app.get('can_push_data_record_for_user', False),
311
317
  )
312
318
  remote_host_proxy.start()
313
319
  self._remote_host_proxies.append(remote_host_proxy)
@@ -31,8 +31,10 @@ class RemoteHostProxy:
31
31
  public_network: Network,
32
32
  internal_network: Optional[Network],
33
33
  job_id: str,
34
- ports: List[int]
34
+ ports: List[int],
35
+ can_push_data_record_for_user: bool,
35
36
  ):
37
+ self._can_push_data_record_for_user: bool = can_push_data_record_for_user
36
38
  self.is_app_caller_proxy = remote_host['hostname'] == 'AppCallerProxy'
37
39
 
38
40
  # Default to port 443 for now until backend serves remote_hosts with port specified
@@ -165,7 +167,7 @@ class RemoteHostProxy:
165
167
 
166
168
  if utils.IS_RUNNING_IN_CLOUD:
167
169
  config = CloudUtils.get_webserver_config()
168
- s3_results_bucket_name = config['s3_general_storage_bucket_name'] # pylint: disable=unsubscriptable-object
170
+ s3_results_bucket_name = config['s3_general_storage_bucket_name']
169
171
  s3_results_base_url = f'https://{s3_results_bucket_name}.s3.amazonaws.com'
170
172
  else:
171
173
  if base_url in ('https://biolib.com', 'https://staging-elb.biolib.com'):
@@ -205,6 +207,12 @@ http {{
205
207
  default "";
206
208
  }}
207
209
 
210
+ map $request_method $bearer_token_on_post_and_get {{
211
+ POST "{bearer_token}";
212
+ GET "{bearer_token}";
213
+ default "";
214
+ }}
215
+
208
216
  server {{
209
217
  listen 80;
210
218
  resolver 127.0.0.11 valid=30s;
@@ -279,6 +287,13 @@ http {{
279
287
  proxy_ssl_server_name on;
280
288
  }}
281
289
 
290
+ location /api/lfs/ {{
291
+ proxy_pass {base_url}/api/lfs/;
292
+ proxy_set_header authorization {'$bearer_token_on_post_and_get' if self._can_push_data_record_for_user else '""'};
293
+ proxy_set_header cookie "";
294
+ proxy_ssl_server_name on;
295
+ }}
296
+
282
297
  location /api/ {{
283
298
  proxy_pass {base_url}/api/;
284
299
  proxy_set_header authorization "";
@@ -307,12 +322,25 @@ http {{
307
322
  proxy_ssl_server_name on;
308
323
  }}
309
324
 
325
+ {f"""
326
+ location /proxy/storage/lfs/versions/ {{
327
+ proxy_pass {cloud_base_url}/proxy/storage/lfs/versions/;
328
+ proxy_set_header authorization "";
329
+ proxy_set_header cookie "";
330
+ proxy_ssl_server_name on;
331
+ }}
332
+ """ if self._can_push_data_record_for_user else ''}
333
+
310
334
  location /proxy/cloud/ {{
311
335
  proxy_pass {cloud_base_url}/proxy/cloud/;
312
336
  proxy_set_header authorization "";
313
337
  proxy_set_header cookie "";
314
338
  proxy_ssl_server_name on;
315
339
  }}
340
+
341
+ location / {{
342
+ return 404 "Not found";
343
+ }}
316
344
  }}
317
345
  }}
318
346
  '''
biolib/jobs/job.py CHANGED
@@ -8,6 +8,8 @@ from urllib.parse import urlparse
8
8
 
9
9
  from biolib import api, utils
10
10
  from biolib._internal.http_client import HttpClient
11
+ from biolib._internal.utils import open_browser_window_from_notebook
12
+ from biolib.biolib_api_client import BiolibApiClient
11
13
  from biolib.biolib_api_client.biolib_job_api import BiolibJobApi
12
14
  from biolib.biolib_binary_format import LazyLoadedFile, ModuleOutputV2, ModuleInput, ModuleInputDict
13
15
  from biolib.biolib_binary_format.stdout_and_stderr import StdoutAndStderr
@@ -23,12 +25,14 @@ from biolib.utils import IS_RUNNING_IN_NOTEBOOK
23
25
 
24
26
  class Job:
25
27
  # Columns to print in table when showing Job
26
- table_columns_to_row_map = OrderedDict({
27
- 'ID': {'key': 'uuid', 'params': {'width': 36}},
28
- 'Application': {'key': 'app_uri', 'params': {}},
29
- 'Status': {'key': 'state', 'params': {}},
30
- 'Started At': {'key': 'started_at', 'params': {}},
31
- })
28
+ table_columns_to_row_map = OrderedDict(
29
+ {
30
+ 'ID': {'key': 'uuid', 'params': {'width': 36}},
31
+ 'Application': {'key': 'app_uri', 'params': {}},
32
+ 'Status': {'key': 'state', 'params': {}},
33
+ 'Started At': {'key': 'started_at', 'params': {}},
34
+ }
35
+ )
32
36
 
33
37
  def __init__(self, job_dict: JobDict):
34
38
  self._uuid: str = job_dict['uuid']
@@ -172,6 +176,17 @@ class Job:
172
176
  time.sleep(2)
173
177
  logger.info(f'Job {self.id} has finished.')
174
178
 
179
+ def open_browser(self) -> None:
180
+ api_client = BiolibApiClient.get()
181
+ results_url_to_open = f'{api_client.base_url}/results/{self.id}/?token={self._auth_token}'
182
+ if IS_RUNNING_IN_NOTEBOOK:
183
+ print(f'Opening results page at: {results_url_to_open}')
184
+ print('If your browser does not open automatically, click on the link above.')
185
+ open_browser_window_from_notebook(results_url_to_open)
186
+ else:
187
+ print('Please copy and paste the following link into your browser:')
188
+ print(results_url_to_open)
189
+
175
190
  def _get_cloud_job(self) -> CloudJobDict:
176
191
  self._refetch_job_dict(force_refetch=True)
177
192
  if self._job_dict['cloud_job'] is None:
@@ -190,20 +205,11 @@ class Job:
190
205
  @staticmethod
191
206
  def show_jobs(count: int = 25) -> None:
192
207
  job_dicts = Job._get_job_dicts(count)
193
- BioLibTable(
194
- columns_to_row_map=Job.table_columns_to_row_map,
195
- rows=job_dicts,
196
- title='Jobs'
197
- ).print_table()
208
+ BioLibTable(columns_to_row_map=Job.table_columns_to_row_map, rows=job_dicts, title='Jobs').print_table()
198
209
 
199
210
  @staticmethod
200
211
  def _get_job_dicts(count: int) -> List['JobDict']:
201
- job_dicts: List['JobDict'] = api.client.get(
202
- path='/jobs/',
203
- params={
204
- 'page_size': str(count)
205
- }
206
- ).json()['results']
212
+ job_dicts: List['JobDict'] = api.client.get(path='/jobs/', params={'page_size': str(count)}).json()['results']
207
213
  return job_dicts
208
214
 
209
215
  @staticmethod
@@ -235,9 +241,7 @@ class Job:
235
241
  def show(self) -> None:
236
242
  self._refetch_job_dict()
237
243
  BioLibTable(
238
- columns_to_row_map=Job.table_columns_to_row_map,
239
- rows=[self._job_dict],
240
- title=f'Job: {self._uuid}'
244
+ columns_to_row_map=Job.table_columns_to_row_map, rows=[self._job_dict], title=f'Job: {self._uuid}'
241
245
  ).print_table()
242
246
 
243
247
  def stream_logs(self) -> None:
@@ -305,12 +309,10 @@ class Job:
305
309
 
306
310
  def _print_full_logs(self, node_url: str) -> None:
307
311
  try:
308
- response_json = HttpClient.request(
309
- url=f'{node_url}/v1/job/{self._uuid}/status/?logs=full'
310
- ).json()
312
+ response_json = HttpClient.request(url=f'{node_url}/v1/job/{self._uuid}/status/?logs=full').json()
311
313
  except Exception as error:
312
314
  logger.error(f'Could not get full streamed logs due to: {error}')
313
- raise BioLibError from error
315
+ raise BioLibError('Could not get full streamed logs') from error
314
316
 
315
317
  for status_update in response_json.get('previous_status_updates', []):
316
318
  logger.info(f'Cloud: {status_update["log_message"]}')
@@ -336,9 +338,7 @@ class Job:
336
338
  def _get_job_status_from_compute_node(self, compute_node_url):
337
339
  for _ in range(15):
338
340
  try:
339
- return HttpClient.request(
340
- url=f'{compute_node_url}/v1/job/{self._uuid}/status/'
341
- ).json()
341
+ return HttpClient.request(url=f'{compute_node_url}/v1/job/{self._uuid}/status/').json()
342
342
  except Exception: # pylint: disable=broad-except
343
343
  cloud_job = self._get_cloud_job()
344
344
  logger.debug("Failed to get status from compute node, retrying...")
@@ -346,8 +346,7 @@ class Job:
346
346
  logger.debug("Job no longer exists on compute node, checking for error...")
347
347
  if cloud_job['error_code'] != SystemExceptionCodes.COMPLETED_SUCCESSFULLY.value:
348
348
  error_message = SystemExceptionCodeMap.get(
349
- cloud_job['error_code'],
350
- f'Unknown error code {cloud_job["error_code"]}'
349
+ cloud_job['error_code'], f'Unknown error code {cloud_job["error_code"]}'
351
350
  )
352
351
  raise BioLibError(f'Cloud: {error_message}') from None
353
352
  else:
biolib/lfs/__init__.py CHANGED
@@ -1,6 +1,4 @@
1
1
  from .cache import prune_lfs_cache
2
2
  from .utils import \
3
- describe_large_file_system, \
4
- get_file_data_from_large_file_system, \
5
3
  push_large_file_system, \
6
4
  create_large_file_system