pybiolib 0.2.951__py3-none-any.whl → 1.2.1890__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (262) hide show
  1. biolib/__init__.py +357 -11
  2. biolib/_data_record/data_record.py +380 -0
  3. biolib/_index/__init__.py +0 -0
  4. biolib/_index/index.py +55 -0
  5. biolib/_index/query_result.py +103 -0
  6. biolib/_internal/__init__.py +0 -0
  7. biolib/_internal/add_copilot_prompts.py +58 -0
  8. biolib/_internal/add_gui_files.py +81 -0
  9. biolib/_internal/data_record/__init__.py +1 -0
  10. biolib/_internal/data_record/data_record.py +85 -0
  11. biolib/_internal/data_record/push_data.py +116 -0
  12. biolib/_internal/data_record/remote_storage_endpoint.py +43 -0
  13. biolib/_internal/errors.py +5 -0
  14. biolib/_internal/file_utils.py +125 -0
  15. biolib/_internal/fuse_mount/__init__.py +1 -0
  16. biolib/_internal/fuse_mount/experiment_fuse_mount.py +209 -0
  17. biolib/_internal/http_client.py +159 -0
  18. biolib/_internal/lfs/__init__.py +1 -0
  19. biolib/_internal/lfs/cache.py +51 -0
  20. biolib/_internal/libs/__init__.py +1 -0
  21. biolib/_internal/libs/fusepy/__init__.py +1257 -0
  22. biolib/_internal/push_application.py +488 -0
  23. biolib/_internal/runtime.py +22 -0
  24. biolib/_internal/string_utils.py +13 -0
  25. biolib/_internal/templates/__init__.py +1 -0
  26. biolib/_internal/templates/copilot_template/.github/instructions/general-app-knowledge.instructions.md +10 -0
  27. biolib/_internal/templates/copilot_template/.github/instructions/style-general.instructions.md +20 -0
  28. biolib/_internal/templates/copilot_template/.github/instructions/style-python.instructions.md +16 -0
  29. biolib/_internal/templates/copilot_template/.github/instructions/style-react-ts.instructions.md +47 -0
  30. biolib/_internal/templates/copilot_template/.github/prompts/biolib_app_inputs.prompt.md +11 -0
  31. biolib/_internal/templates/copilot_template/.github/prompts/biolib_onboard_repo.prompt.md +19 -0
  32. biolib/_internal/templates/copilot_template/.github/prompts/biolib_run_apps.prompt.md +12 -0
  33. biolib/_internal/templates/dashboard_template/.biolib/config.yml +5 -0
  34. biolib/_internal/templates/github_workflow_template/.github/workflows/biolib.yml +21 -0
  35. biolib/_internal/templates/gitignore_template/.gitignore +10 -0
  36. biolib/_internal/templates/gui_template/.yarnrc.yml +1 -0
  37. biolib/_internal/templates/gui_template/App.tsx +53 -0
  38. biolib/_internal/templates/gui_template/Dockerfile +27 -0
  39. biolib/_internal/templates/gui_template/biolib-sdk.ts +82 -0
  40. biolib/_internal/templates/gui_template/dev-data/output.json +7 -0
  41. biolib/_internal/templates/gui_template/index.css +5 -0
  42. biolib/_internal/templates/gui_template/index.html +13 -0
  43. biolib/_internal/templates/gui_template/index.tsx +10 -0
  44. biolib/_internal/templates/gui_template/package.json +27 -0
  45. biolib/_internal/templates/gui_template/tsconfig.json +24 -0
  46. biolib/_internal/templates/gui_template/vite-plugin-dev-data.ts +50 -0
  47. biolib/_internal/templates/gui_template/vite.config.mts +10 -0
  48. biolib/_internal/templates/init_template/.biolib/config.yml +19 -0
  49. biolib/_internal/templates/init_template/Dockerfile +14 -0
  50. biolib/_internal/templates/init_template/requirements.txt +1 -0
  51. biolib/_internal/templates/init_template/run.py +12 -0
  52. biolib/_internal/templates/init_template/run.sh +4 -0
  53. biolib/_internal/templates/templates.py +25 -0
  54. biolib/_internal/tree_utils.py +106 -0
  55. biolib/_internal/utils/__init__.py +65 -0
  56. biolib/_internal/utils/auth.py +46 -0
  57. biolib/_internal/utils/job_url.py +33 -0
  58. biolib/_internal/utils/multinode.py +263 -0
  59. biolib/_runtime/runtime.py +157 -0
  60. biolib/_session/session.py +44 -0
  61. biolib/_shared/__init__.py +0 -0
  62. biolib/_shared/types/__init__.py +74 -0
  63. biolib/_shared/types/account.py +12 -0
  64. biolib/_shared/types/account_member.py +8 -0
  65. biolib/_shared/types/app.py +9 -0
  66. biolib/_shared/types/data_record.py +40 -0
  67. biolib/_shared/types/experiment.py +32 -0
  68. biolib/_shared/types/file_node.py +17 -0
  69. biolib/_shared/types/push.py +6 -0
  70. biolib/_shared/types/resource.py +37 -0
  71. biolib/_shared/types/resource_deploy_key.py +11 -0
  72. biolib/_shared/types/resource_permission.py +14 -0
  73. biolib/_shared/types/resource_version.py +19 -0
  74. biolib/_shared/types/result.py +14 -0
  75. biolib/_shared/types/typing.py +10 -0
  76. biolib/_shared/types/user.py +19 -0
  77. biolib/_shared/utils/__init__.py +7 -0
  78. biolib/_shared/utils/resource_uri.py +75 -0
  79. biolib/api/__init__.py +6 -0
  80. biolib/api/client.py +168 -0
  81. biolib/app/app.py +252 -49
  82. biolib/app/search_apps.py +45 -0
  83. biolib/biolib_api_client/api_client.py +126 -31
  84. biolib/biolib_api_client/app_types.py +24 -4
  85. biolib/biolib_api_client/auth.py +31 -8
  86. biolib/biolib_api_client/biolib_app_api.py +147 -52
  87. biolib/biolib_api_client/biolib_job_api.py +161 -141
  88. biolib/biolib_api_client/job_types.py +21 -5
  89. biolib/biolib_api_client/lfs_types.py +7 -23
  90. biolib/biolib_api_client/user_state.py +56 -0
  91. biolib/biolib_binary_format/__init__.py +1 -4
  92. biolib/biolib_binary_format/file_in_container.py +105 -0
  93. biolib/biolib_binary_format/module_input.py +24 -7
  94. biolib/biolib_binary_format/module_output_v2.py +149 -0
  95. biolib/biolib_binary_format/remote_endpoints.py +34 -0
  96. biolib/biolib_binary_format/remote_stream_seeker.py +59 -0
  97. biolib/biolib_binary_format/saved_job.py +3 -2
  98. biolib/biolib_binary_format/{attestation_document.py → stdout_and_stderr.py} +8 -8
  99. biolib/biolib_binary_format/system_status_update.py +3 -2
  100. biolib/biolib_binary_format/utils.py +175 -0
  101. biolib/biolib_docker_client/__init__.py +11 -2
  102. biolib/biolib_errors.py +36 -0
  103. biolib/biolib_logging.py +27 -10
  104. biolib/cli/__init__.py +38 -0
  105. biolib/cli/auth.py +46 -0
  106. biolib/cli/data_record.py +164 -0
  107. biolib/cli/index.py +32 -0
  108. biolib/cli/init.py +421 -0
  109. biolib/cli/lfs.py +101 -0
  110. biolib/cli/push.py +50 -0
  111. biolib/cli/run.py +63 -0
  112. biolib/cli/runtime.py +14 -0
  113. biolib/cli/sdk.py +16 -0
  114. biolib/cli/start.py +56 -0
  115. biolib/compute_node/cloud_utils/cloud_utils.py +110 -161
  116. biolib/compute_node/job_worker/cache_state.py +66 -88
  117. biolib/compute_node/job_worker/cache_types.py +1 -6
  118. biolib/compute_node/job_worker/docker_image_cache.py +112 -37
  119. biolib/compute_node/job_worker/executors/__init__.py +0 -3
  120. biolib/compute_node/job_worker/executors/docker_executor.py +532 -199
  121. biolib/compute_node/job_worker/executors/docker_types.py +9 -1
  122. biolib/compute_node/job_worker/executors/types.py +19 -9
  123. biolib/compute_node/job_worker/job_legacy_input_wait_timeout_thread.py +30 -0
  124. biolib/compute_node/job_worker/job_max_runtime_timer_thread.py +3 -5
  125. biolib/compute_node/job_worker/job_storage.py +108 -0
  126. biolib/compute_node/job_worker/job_worker.py +397 -212
  127. biolib/compute_node/job_worker/large_file_system.py +87 -38
  128. biolib/compute_node/job_worker/network_alloc.py +99 -0
  129. biolib/compute_node/job_worker/network_buffer.py +240 -0
  130. biolib/compute_node/job_worker/utilization_reporter_thread.py +197 -0
  131. biolib/compute_node/job_worker/utils.py +9 -24
  132. biolib/compute_node/remote_host_proxy.py +400 -98
  133. biolib/compute_node/utils.py +31 -9
  134. biolib/compute_node/webserver/compute_node_results_proxy.py +189 -0
  135. biolib/compute_node/webserver/proxy_utils.py +28 -0
  136. biolib/compute_node/webserver/webserver.py +130 -44
  137. biolib/compute_node/webserver/webserver_types.py +2 -6
  138. biolib/compute_node/webserver/webserver_utils.py +77 -12
  139. biolib/compute_node/webserver/worker_thread.py +183 -42
  140. biolib/experiments/__init__.py +0 -0
  141. biolib/experiments/experiment.py +356 -0
  142. biolib/jobs/__init__.py +1 -0
  143. biolib/jobs/job.py +741 -0
  144. biolib/jobs/job_result.py +185 -0
  145. biolib/jobs/types.py +50 -0
  146. biolib/py.typed +0 -0
  147. biolib/runtime/__init__.py +14 -0
  148. biolib/sdk/__init__.py +91 -0
  149. biolib/tables.py +34 -0
  150. biolib/typing_utils.py +2 -7
  151. biolib/user/__init__.py +1 -0
  152. biolib/user/sign_in.py +54 -0
  153. biolib/utils/__init__.py +162 -0
  154. biolib/utils/cache_state.py +94 -0
  155. biolib/utils/multipart_uploader.py +194 -0
  156. biolib/utils/seq_util.py +150 -0
  157. biolib/utils/zip/remote_zip.py +640 -0
  158. pybiolib-1.2.1890.dist-info/METADATA +41 -0
  159. pybiolib-1.2.1890.dist-info/RECORD +177 -0
  160. {pybiolib-0.2.951.dist-info → pybiolib-1.2.1890.dist-info}/WHEEL +1 -1
  161. pybiolib-1.2.1890.dist-info/entry_points.txt +2 -0
  162. README.md +0 -17
  163. biolib/app/app_result.py +0 -68
  164. biolib/app/utils.py +0 -62
  165. biolib/biolib-js/0-biolib.worker.js +0 -1
  166. biolib/biolib-js/1-biolib.worker.js +0 -1
  167. biolib/biolib-js/2-biolib.worker.js +0 -1
  168. biolib/biolib-js/3-biolib.worker.js +0 -1
  169. biolib/biolib-js/4-biolib.worker.js +0 -1
  170. biolib/biolib-js/5-biolib.worker.js +0 -1
  171. biolib/biolib-js/6-biolib.worker.js +0 -1
  172. biolib/biolib-js/index.html +0 -10
  173. biolib/biolib-js/main-biolib.js +0 -1
  174. biolib/biolib_api_client/biolib_account_api.py +0 -21
  175. biolib/biolib_api_client/biolib_large_file_system_api.py +0 -108
  176. biolib/biolib_binary_format/aes_encrypted_package.py +0 -42
  177. biolib/biolib_binary_format/module_output.py +0 -58
  178. biolib/biolib_binary_format/rsa_encrypted_aes_package.py +0 -57
  179. biolib/biolib_push.py +0 -114
  180. biolib/cli.py +0 -203
  181. biolib/cli_utils.py +0 -273
  182. biolib/compute_node/cloud_utils/enclave_parent_types.py +0 -7
  183. biolib/compute_node/enclave/__init__.py +0 -2
  184. biolib/compute_node/enclave/enclave_remote_hosts.py +0 -53
  185. biolib/compute_node/enclave/nitro_secure_module_utils.py +0 -64
  186. biolib/compute_node/job_worker/executors/base_executor.py +0 -18
  187. biolib/compute_node/job_worker/executors/pyppeteer_executor.py +0 -173
  188. biolib/compute_node/job_worker/executors/remote/__init__.py +0 -1
  189. biolib/compute_node/job_worker/executors/remote/nitro_enclave_utils.py +0 -81
  190. biolib/compute_node/job_worker/executors/remote/remote_executor.py +0 -51
  191. biolib/lfs.py +0 -196
  192. biolib/pyppeteer/.circleci/config.yml +0 -100
  193. biolib/pyppeteer/.coveragerc +0 -3
  194. biolib/pyppeteer/.gitignore +0 -89
  195. biolib/pyppeteer/.pre-commit-config.yaml +0 -28
  196. biolib/pyppeteer/CHANGES.md +0 -253
  197. biolib/pyppeteer/CONTRIBUTING.md +0 -26
  198. biolib/pyppeteer/LICENSE +0 -12
  199. biolib/pyppeteer/README.md +0 -137
  200. biolib/pyppeteer/docs/Makefile +0 -177
  201. biolib/pyppeteer/docs/_static/custom.css +0 -28
  202. biolib/pyppeteer/docs/_templates/layout.html +0 -10
  203. biolib/pyppeteer/docs/changes.md +0 -1
  204. biolib/pyppeteer/docs/conf.py +0 -299
  205. biolib/pyppeteer/docs/index.md +0 -21
  206. biolib/pyppeteer/docs/make.bat +0 -242
  207. biolib/pyppeteer/docs/reference.md +0 -211
  208. biolib/pyppeteer/docs/server.py +0 -60
  209. biolib/pyppeteer/poetry.lock +0 -1699
  210. biolib/pyppeteer/pyppeteer/__init__.py +0 -135
  211. biolib/pyppeteer/pyppeteer/accessibility.py +0 -286
  212. biolib/pyppeteer/pyppeteer/browser.py +0 -401
  213. biolib/pyppeteer/pyppeteer/browser_fetcher.py +0 -194
  214. biolib/pyppeteer/pyppeteer/command.py +0 -22
  215. biolib/pyppeteer/pyppeteer/connection/__init__.py +0 -242
  216. biolib/pyppeteer/pyppeteer/connection/cdpsession.py +0 -101
  217. biolib/pyppeteer/pyppeteer/coverage.py +0 -346
  218. biolib/pyppeteer/pyppeteer/device_descriptors.py +0 -787
  219. biolib/pyppeteer/pyppeteer/dialog.py +0 -79
  220. biolib/pyppeteer/pyppeteer/domworld.py +0 -597
  221. biolib/pyppeteer/pyppeteer/emulation_manager.py +0 -53
  222. biolib/pyppeteer/pyppeteer/errors.py +0 -48
  223. biolib/pyppeteer/pyppeteer/events.py +0 -63
  224. biolib/pyppeteer/pyppeteer/execution_context.py +0 -156
  225. biolib/pyppeteer/pyppeteer/frame/__init__.py +0 -299
  226. biolib/pyppeteer/pyppeteer/frame/frame_manager.py +0 -306
  227. biolib/pyppeteer/pyppeteer/helpers.py +0 -245
  228. biolib/pyppeteer/pyppeteer/input.py +0 -371
  229. biolib/pyppeteer/pyppeteer/jshandle.py +0 -598
  230. biolib/pyppeteer/pyppeteer/launcher.py +0 -683
  231. biolib/pyppeteer/pyppeteer/lifecycle_watcher.py +0 -169
  232. biolib/pyppeteer/pyppeteer/models/__init__.py +0 -103
  233. biolib/pyppeteer/pyppeteer/models/_protocol.py +0 -12460
  234. biolib/pyppeteer/pyppeteer/multimap.py +0 -82
  235. biolib/pyppeteer/pyppeteer/network_manager.py +0 -678
  236. biolib/pyppeteer/pyppeteer/options.py +0 -8
  237. biolib/pyppeteer/pyppeteer/page.py +0 -1728
  238. biolib/pyppeteer/pyppeteer/pipe_transport.py +0 -59
  239. biolib/pyppeteer/pyppeteer/target.py +0 -147
  240. biolib/pyppeteer/pyppeteer/task_queue.py +0 -24
  241. biolib/pyppeteer/pyppeteer/timeout_settings.py +0 -36
  242. biolib/pyppeteer/pyppeteer/tracing.py +0 -93
  243. biolib/pyppeteer/pyppeteer/us_keyboard_layout.py +0 -305
  244. biolib/pyppeteer/pyppeteer/util.py +0 -18
  245. biolib/pyppeteer/pyppeteer/websocket_transport.py +0 -47
  246. biolib/pyppeteer/pyppeteer/worker.py +0 -101
  247. biolib/pyppeteer/pyproject.toml +0 -97
  248. biolib/pyppeteer/spell.txt +0 -137
  249. biolib/pyppeteer/tox.ini +0 -72
  250. biolib/pyppeteer/utils/generate_protocol_types.py +0 -603
  251. biolib/start_cli.py +0 -7
  252. biolib/utils.py +0 -47
  253. biolib/validators/validate_app_version.py +0 -183
  254. biolib/validators/validate_argument.py +0 -134
  255. biolib/validators/validate_module.py +0 -323
  256. biolib/validators/validate_zip_file.py +0 -40
  257. biolib/validators/validator_utils.py +0 -103
  258. pybiolib-0.2.951.dist-info/LICENSE +0 -21
  259. pybiolib-0.2.951.dist-info/METADATA +0 -61
  260. pybiolib-0.2.951.dist-info/RECORD +0 -153
  261. pybiolib-0.2.951.dist-info/entry_points.txt +0 -3
  262. /LICENSE → /pybiolib-1.2.1890.dist-info/licenses/LICENSE +0 -0
biolib/jobs/job.py ADDED
@@ -0,0 +1,741 @@
1
+ import base64
2
+ import sys
3
+ import time
4
+ from collections import OrderedDict
5
+ from datetime import datetime, timedelta, timezone
6
+ from pathlib import Path
7
+ from urllib.parse import urlparse
8
+
9
+ import biolib.api.client
10
+ from biolib import utils
11
+ from biolib._internal.http_client import HttpClient
12
+ from biolib._internal.tree_utils import build_tree_from_files, build_tree_str
13
+ from biolib._internal.utils import PathFilter, filter_lazy_loaded_files, open_browser_window_from_notebook
14
+ from biolib._shared.utils import parse_resource_uri
15
+ from biolib.api.client import ApiClient
16
+ from biolib.biolib_api_client import BiolibApiClient, CreatedJobDict
17
+ from biolib.biolib_api_client.biolib_app_api import BiolibAppApi
18
+ from biolib.biolib_api_client.biolib_job_api import BiolibJobApi
19
+ from biolib.biolib_binary_format import LazyLoadedFile, ModuleInput, ModuleInputDict, ModuleOutputV2
20
+ from biolib.biolib_binary_format.remote_endpoints import RemoteJobStorageEndpoint
21
+ from biolib.biolib_binary_format.stdout_and_stderr import StdoutAndStderr
22
+ from biolib.biolib_binary_format.utils import InMemoryIndexableBuffer
23
+ from biolib.biolib_errors import BioLibError, CloudJobFinishedError
24
+ from biolib.biolib_logging import logger, logger_no_user_data
25
+ from biolib.compute_node.job_worker.job_storage import JobStorage
26
+ from biolib.compute_node.utils import SystemExceptionCodeMap, SystemExceptionCodes
27
+ from biolib.jobs.job_result import JobResult
28
+ from biolib.jobs.types import CloudJobDict, CloudJobStartedDict, JobDict
29
+ from biolib.tables import BioLibTable
30
+ from biolib.typing_utils import Dict, Generator, List, Optional, Tuple, Union, cast
31
+ from biolib.utils import IS_RUNNING_IN_NOTEBOOK
32
+
33
+
34
+ class Result:
35
+ # Columns to print in table when showing Result
36
+ table_columns_to_row_map = OrderedDict(
37
+ {
38
+ 'ID': {'key': 'uuid', 'params': {'width': 36}},
39
+ 'Name': {'key': 'main_result.name', 'params': {}},
40
+ 'Application': {'key': 'app_uri', 'params': {}},
41
+ 'Status': {'key': 'state', 'params': {}},
42
+ 'Started At': {'key': 'started_at', 'params': {}},
43
+ }
44
+ )
45
+
46
+ def __init__(self, job_dict: JobDict, _api_client: Optional[ApiClient] = None):
47
+ self._api_client: Optional[ApiClient] = _api_client
48
+
49
+ self._uuid: str = job_dict['uuid']
50
+ self._auth_token: str = job_dict['auth_token']
51
+
52
+ self._job_dict: JobDict = job_dict
53
+ self._job_dict_last_fetched_at: datetime = datetime.now(timezone.utc)
54
+ self._result: Optional[JobResult] = None
55
+ self._cached_input_arguments: Optional[List[str]] = None
56
+
57
+ def __str__(self):
58
+ return f"Result of {self._job_dict['app_uri']} created at {self._job_dict['created_at']} ({self._uuid})"
59
+
60
+ def __repr__(self):
61
+ # Get job status and shareable link
62
+ status = self.get_status()
63
+ shareable_link = self.get_shareable_link()
64
+
65
+ # ANSI color codes for terminal output
66
+ blue = '\033[34m'
67
+ white = '\033[90m'
68
+ reset = '\033[0m'
69
+
70
+ # Start with the header section
71
+ output_lines = [
72
+ '--- BioLib Result ---',
73
+ f'ID: {self._uuid}',
74
+ f'Status: {status}',
75
+ f'Link: {shareable_link}',
76
+ ]
77
+
78
+ # Only show output files if the job is not pending
79
+ if not self.is_pending():
80
+ output_lines.append('Output Files:')
81
+
82
+ try:
83
+ # Get files from the job
84
+ files = self.list_output_files()
85
+
86
+ # If no files, indicate that
87
+ if not files:
88
+ output_lines.append('No output files')
89
+ return '\n'.join(output_lines)
90
+
91
+ # If more than 25 files, show simplified message
92
+ if len(files) > 25:
93
+ output_lines.append(f'{len(files)} output files in result.')
94
+ return '\n'.join(output_lines)
95
+
96
+ # Build the tree representation
97
+ tree_data = build_tree_from_files(files)
98
+ output_lines.extend(build_tree_str(tree_data, blue=blue, white=white, reset=reset))
99
+ except Exception:
100
+ output_lines.append('Error accessing output files')
101
+
102
+ return '\n'.join(output_lines)
103
+
104
+ @property
105
+ def id(self) -> str: # pylint: disable=invalid-name
106
+ return self._uuid
107
+
108
+ @property
109
+ def result(self) -> JobResult:
110
+ if not self._result:
111
+ self._result = JobResult(job_uuid=self._uuid, job_auth_token=self._auth_token)
112
+
113
+ return self._result
114
+
115
+ @property
116
+ def stdout(self) -> bytes:
117
+ logger.warning('The property .stdout is deprecated, please use .get_stdout()')
118
+ return self.result.get_stdout()
119
+
120
+ @property
121
+ def stderr(self) -> bytes:
122
+ logger.warning('The property .stderr is deprecated, please use .get_stderr()')
123
+ return self.result.get_stderr()
124
+
125
+ @property
126
+ def exitcode(self) -> int:
127
+ logger.warning('The property .exitcode is deprecated, please use .get_exit_code()')
128
+ return self.result.get_exit_code()
129
+
130
+ def is_finished(self) -> bool:
131
+ if self._job_dict['ended_at']:
132
+ return True
133
+
134
+ self._refetch_job_dict()
135
+ return bool(self._job_dict['ended_at'])
136
+
137
+ def is_pending(self) -> bool:
138
+ """Returns whether the result is in a pending state.
139
+
140
+ A result is considered pending if it's not finished yet.
141
+ The result state is re-fetched when this method is called.
142
+
143
+ Returns:
144
+ bool: True if the result is in a pending state, False otherwise.
145
+
146
+ Example::
147
+ >>> result = biolib.get_result("result_id")
148
+ >>> if result.is_pending():
149
+ >>> print("Result is still running")
150
+ >>> else:
151
+ >>> print("Result has finished")
152
+ """
153
+ return not self.is_finished()
154
+
155
+ def get_name(self) -> str:
156
+ self._refetch_job_dict()
157
+ return self._job_dict['main_result']['name']
158
+
159
+ def to_dict(self) -> Dict:
160
+ # Construct user facing dict with friendly named keys
161
+ return dict(
162
+ app_uri=self._job_dict['app_uri'],
163
+ created_at=self._job_dict['created_at'],
164
+ finished_at=self._job_dict['ended_at'],
165
+ job_id=self._job_dict['uuid'],
166
+ started_at=self._job_dict['started_at'],
167
+ state=self._job_dict['state'],
168
+ )
169
+
170
+ def list_output_files(
171
+ self,
172
+ path_filter: Optional[PathFilter] = None,
173
+ ) -> List[LazyLoadedFile]:
174
+ """List output files from the result.
175
+
176
+ Args:
177
+ path_filter (PathFilter, optional): Filter to apply to the output files.
178
+ Can be a string glob pattern or a callable that takes a path string and returns a boolean.
179
+
180
+ Returns:
181
+ List[LazyLoadedFile]: List of output files.
182
+
183
+ Example::
184
+ >>> result = biolib.get_result("result_id")
185
+ >>> output_files = result.list_output_files()
186
+ >>> # Filter files with a glob pattern
187
+ >>> output_files = result.list_output_files("*.pdb")
188
+ """
189
+ return self.result.list_output_files(path_filter=path_filter)
190
+
191
+ def list_input_files(
192
+ self,
193
+ path_filter: Optional[PathFilter] = None,
194
+ ) -> List[LazyLoadedFile]:
195
+ """List input files from the result.
196
+
197
+ Args:
198
+ path_filter (PathFilter, optional): Filter to apply to the input files.
199
+ Can be a string glob pattern or a callable that takes a path string and returns a boolean.
200
+
201
+ Returns:
202
+ List[LazyLoadedFile]: List of input files.
203
+
204
+ Example::
205
+ >>> result = biolib.get_result("result_id")
206
+ >>> input_files = result.list_input_files()
207
+ >>> # Filter files with a glob pattern
208
+ >>> input_files = result.list_input_files("*.txt")
209
+ """
210
+ presigned_download_url = BiolibJobApi.get_job_storage_download_url(
211
+ job_uuid=self.id,
212
+ job_auth_token=self._auth_token,
213
+ storage_type='input',
214
+ )
215
+ response = HttpClient.request(url=presigned_download_url)
216
+ module_input_serialized: bytes = response.content
217
+ module_input = ModuleInput(module_input_serialized).deserialize()
218
+
219
+ files = []
220
+ for path, data in module_input['files'].items():
221
+ buffer = InMemoryIndexableBuffer(data)
222
+ lazy_file = LazyLoadedFile(path=path, buffer=buffer, start=0, length=len(data))
223
+ files.append(lazy_file)
224
+
225
+ if not path_filter:
226
+ return files
227
+
228
+ return filter_lazy_loaded_files(files, path_filter)
229
+
230
+ def get_output_file(self, filename: str) -> LazyLoadedFile:
231
+ return self.result.get_output_file(filename=filename)
232
+
233
+ def load_file_as_numpy(self, *args, **kwargs):
234
+ try:
235
+ import numpy # type: ignore # pylint: disable=import-outside-toplevel,import-error
236
+ except ImportError: # pylint: disable=raise-missing-from
237
+ raise Exception('Failed to import numpy, please make sure it is installed.') from None
238
+ file_handle = self.result.get_output_file(*args, **kwargs).get_file_handle()
239
+ return numpy.load(file_handle, allow_pickle=False) # type: ignore
240
+
241
+ def get_stdout(self) -> bytes:
242
+ return self.result.get_stdout()
243
+
244
+ def get_stderr(self) -> bytes:
245
+ return self.result.get_stderr()
246
+
247
+ def get_exit_code(self) -> int:
248
+ return self.result.get_exit_code()
249
+
250
+ def _get_module_input(self) -> ModuleInputDict:
251
+ self._refetch_job_dict()
252
+ presigned_download_url = BiolibJobApi.get_job_storage_download_url(
253
+ job_uuid=self._job_dict['uuid'],
254
+ job_auth_token=self._job_dict['auth_token'],
255
+ storage_type='input',
256
+ )
257
+ response = HttpClient.request(url=presigned_download_url)
258
+ module_input_serialized: bytes = response.content
259
+ return ModuleInput(module_input_serialized).deserialize()
260
+
261
+ def get_input_arguments(self) -> List[str]:
262
+ if self._cached_input_arguments is None:
263
+ logger.debug('Fetching input arguments...')
264
+ module_input = self._get_module_input()
265
+ self._cached_input_arguments = module_input['arguments']
266
+
267
+ return self._cached_input_arguments
268
+
269
+ def save_input_files(self, output_dir: str, overwrite: bool = False) -> None:
270
+ logger.info('Downloading input files...')
271
+ module_input = self._get_module_input()
272
+
273
+ files = module_input['files'].items()
274
+ logger.info(f'Saving input {len(files)} files to "{output_dir}"...')
275
+ for path, data in files:
276
+ # Remove leading slash of file_path
277
+ destination_file_path = Path(output_dir) / Path(path.lstrip('/'))
278
+ if destination_file_path.exists():
279
+ if not overwrite:
280
+ raise BioLibError(f'File {destination_file_path} already exists. Set overwrite=True to overwrite.')
281
+ else:
282
+ destination_file_path.rename(
283
+ f'{destination_file_path}.biolib-renamed.{time.strftime("%Y%m%d%H%M%S")}'
284
+ )
285
+
286
+ dir_path = destination_file_path.parent
287
+ if dir_path:
288
+ dir_path.mkdir(parents=True, exist_ok=True)
289
+
290
+ with open(destination_file_path, mode='wb') as file_handler:
291
+ file_handler.write(data)
292
+
293
+ logger.info(f' - {destination_file_path}')
294
+
295
+ def save_files(
296
+ self,
297
+ output_dir: str,
298
+ path_filter: Optional[PathFilter] = None,
299
+ skip_file_if_exists: bool = False,
300
+ overwrite: bool = False,
301
+ flat: bool = False,
302
+ ) -> None:
303
+ """Save output files from the result to a local directory.
304
+
305
+ Args:
306
+ output_dir (str): Directory path where files will be saved.
307
+ path_filter (PathFilter, optional): Filter to apply to output files.
308
+ Can be a string glob pattern or a callable that takes a path and returns a boolean.
309
+ skip_file_if_exists (bool, optional): If True, skip files that already exist locally.
310
+ Defaults to False.
311
+ overwrite (bool, optional): If True, overwrite existing files by renaming them with a timestamp.
312
+ Defaults to False.
313
+ flat (bool, optional): If True, save all files directly to output_dir using only their basenames,
314
+ without creating subdirectories. When enabled, raises an error if duplicate basenames exist
315
+ in the filtered output or if any basename already exists in output_dir. Defaults to False.
316
+
317
+ Raises:
318
+ BioLibError: If flat=True and duplicate basenames are found in filtered output.
319
+ BioLibError: If flat=True and a file with the same basename already exists in output_dir.
320
+ BioLibError: If a file already exists and neither skip_file_if_exists nor overwrite is True.
321
+
322
+ Example::
323
+ >>> result = biolib.get_result("result_id")
324
+ >>> # Save all files preserving directory structure
325
+ >>> result.save_files("./output")
326
+ >>> # Save files flat without subdirectories
327
+ >>> result.save_files("./output", flat=True)
328
+ >>> # Save only specific files
329
+ >>> result.save_files("./output", path_filter="*.txt")
330
+ """
331
+ self.result.save_files(
332
+ output_dir=output_dir,
333
+ path_filter=path_filter,
334
+ skip_file_if_exists=skip_file_if_exists,
335
+ overwrite=overwrite,
336
+ flat=flat,
337
+ )
338
+
339
+ def get_status(self) -> str:
340
+ self._refetch_job_dict()
341
+ return self._job_dict['state']
342
+
343
+ def wait(self):
344
+ logger.info(f'Waiting for job {self.id} to finish...')
345
+ while not self.is_finished():
346
+ time.sleep(2)
347
+ logger.info(f'Result {self.id} has finished.')
348
+
349
+ def get_shareable_link(self, embed_view: Optional[bool] = None) -> str:
350
+ api_client = BiolibApiClient.get()
351
+ prefix = '/embed' if embed_view else ''
352
+ shareable_link = f'{api_client.base_url}{prefix}/results/{self.id}/?token={self._auth_token}'
353
+ return shareable_link
354
+
355
+ def open_browser(self) -> None:
356
+ results_url_to_open = self.get_shareable_link()
357
+ if IS_RUNNING_IN_NOTEBOOK:
358
+ print(f'Opening results page at: {results_url_to_open}')
359
+ print('If your browser does not open automatically, click on the link above.')
360
+ open_browser_window_from_notebook(results_url_to_open)
361
+ else:
362
+ print('Please copy and paste the following link into your browser:')
363
+ print(results_url_to_open)
364
+
365
+ def cancel(self) -> None:
366
+ try:
367
+ biolib.api.client.patch(
368
+ path=f'/jobs/{self._uuid}/',
369
+ headers={'Job-Auth-Token': self._auth_token} if self._auth_token else None,
370
+ data={'state': 'cancelled'},
371
+ )
372
+ logger.info(f'Result {self._uuid} canceled')
373
+ except Exception as error:
374
+ logger.error(f'Failed to cancel result {self._uuid} due to: {error}')
375
+
376
+ def delete(self) -> None:
377
+ """Delete the result.
378
+
379
+ Example::
380
+ >>> result = biolib.get_result("result_id")
381
+ >>> result.delete()
382
+ """
383
+ try:
384
+ biolib.api.client.delete(path=f'/jobs/{self._uuid}/')
385
+ logger.info(f'Result {self._uuid} deleted')
386
+ except Exception as error:
387
+ raise BioLibError(f'Failed to delete job {self._uuid} due to: {error}') from error
388
+
389
+ def rename(self, name: str) -> None:
390
+ try:
391
+ biolib.api.client.patch(
392
+ path=f'/jobs/{self._uuid}/main_result/',
393
+ headers={'Job-Auth-Token': self._auth_token} if self._auth_token else None,
394
+ data={'result_name_prefix': name},
395
+ )
396
+ self._refetch_job_dict(force_refetch=True)
397
+ updated_name = self.get_name()
398
+ logger.info(f'Result {self._uuid} renamed to "{updated_name}"')
399
+ except Exception as error:
400
+ raise BioLibError(f'Failed to rename job {self._uuid} due to: {error}') from error
401
+
402
+ def recompute(
403
+ self,
404
+ app_uri: Optional[str] = None,
405
+ machine: Optional[str] = None,
406
+ blocking: bool = True,
407
+ arguments: Optional[List[str]] = None,
408
+ ) -> 'Result':
409
+ """Recompute the result with the same input files but potentially different arguments.
410
+
411
+ Args:
412
+ app_uri (Optional[str], optional): The URI of the app to use for recomputation.
413
+ If None, uses the original app URI. Defaults to None.
414
+ machine (Optional[str], optional): The machine to run the result on.
415
+ If None, uses the original requested machine. Defaults to None.
416
+ blocking (bool, optional): Whether to block until the result completes.
417
+ If True, streams logs until completion. Defaults to True.
418
+ arguments (Optional[List[str]], optional): New arguments to use for the result.
419
+ If None, uses the original arguments. Defaults to None.
420
+
421
+ Returns:
422
+ Result: A new Result instance for the recomputed result.
423
+
424
+ Example::
425
+ >>> result = biolib.get_result("result_id")
426
+ >>> # Recompute with the same arguments
427
+ >>> new_result = result.recompute()
428
+ >>> # Recompute with different arguments
429
+ >>> new_result = result.recompute(arguments=["--new-arg", "value"])
430
+ """
431
+ self._refetch_job_dict()
432
+ app_response = BiolibAppApi.get_by_uri(uri=app_uri or self._job_dict['app_uri'])
433
+
434
+ job_storage_input = RemoteJobStorageEndpoint(
435
+ job_auth_token=self._auth_token,
436
+ job_uuid=self._uuid,
437
+ storage_type='input',
438
+ )
439
+ http_response = HttpClient.request(url=job_storage_input.get_remote_url())
440
+ module_input_serialized = http_response.content
441
+
442
+ # If arguments are provided, deserialize the module input, update the arguments, and serialize it again
443
+ if arguments is not None:
444
+ module_input = ModuleInput(module_input_serialized)
445
+ module_input_dict = module_input.deserialize()
446
+
447
+ # Create a new ModuleInput with updated arguments
448
+ module_input_serialized = ModuleInput().serialize(
449
+ stdin=module_input_dict['stdin'], arguments=arguments, files=module_input_dict['files']
450
+ )
451
+
452
+ original_requested_machine = (
453
+ self._job_dict['requested_machine'] if self._job_dict['requested_machine'] else None
454
+ )
455
+ job = self._start_job_in_cloud(
456
+ app_uri=app_response['app_uri'],
457
+ app_version_uuid=app_response['app_version']['public_id'],
458
+ module_input_serialized=module_input_serialized,
459
+ override_command=self._job_dict['arguments_override_command'],
460
+ machine=machine if machine else original_requested_machine,
461
+ )
462
+ if blocking:
463
+ job.stream_logs()
464
+
465
+ return job
466
+
467
+ def _get_cloud_job(self) -> CloudJobDict:
468
+ self._refetch_job_dict(force_refetch=True)
469
+ if self._job_dict['cloud_job'] is None:
470
+ raise BioLibError(f'Result {self._uuid} did not register correctly. Try creating a new result.')
471
+
472
+ return self._job_dict['cloud_job']
473
+
474
+ def _set_result_module_output(self, module_output: ModuleOutputV2) -> None:
475
+ self._result = JobResult(job_uuid=self._uuid, job_auth_token=self._auth_token, module_output=module_output)
476
+
477
+ @staticmethod
478
+ def fetch_jobs(count: int, status: Optional[str] = None) -> List['Result']:
479
+ job_dicts = Result._get_job_dicts(count, status)
480
+ return [Result(job_dict) for job_dict in job_dicts]
481
+
482
+ @staticmethod
483
+ def show_jobs(count: int = 25) -> None:
484
+ job_dicts = Result._get_job_dicts(count)
485
+ BioLibTable(columns_to_row_map=Job.table_columns_to_row_map, rows=job_dicts, title='Jobs').print_table()
486
+
487
+ @staticmethod
488
+ def _get_job_dicts(count: int, status: Optional[str] = None) -> List['JobDict']:
489
+ job_states = ['in_progress', 'completed', 'failed', 'cancelled']
490
+ if status is not None and status not in job_states:
491
+ raise Exception('Invalid status filter')
492
+
493
+ page_size = min(count, 1_000)
494
+ params: Dict[str, Union[str, int]] = dict(page_size=page_size)
495
+ if status:
496
+ params['state'] = status
497
+
498
+ api_path = '/jobs/'
499
+ response = biolib.api.client.get(api_path, params=params).json()
500
+ jobs = [job_dict for job_dict in response['results']]
501
+
502
+ for page_number in range(2, response['page_count'] + 1):
503
+ if len(jobs) >= count:
504
+ break
505
+ page_response = biolib.api.client.get(path=api_path, params=dict(**params, page=page_number)).json()
506
+ jobs.extend([job_dict for job_dict in page_response['results']])
507
+
508
+ return jobs[:count]
509
+
510
+ @staticmethod
511
+ def _get_job_dict(uuid: str, auth_token: Optional[str] = None, api_client: Optional[ApiClient] = None) -> JobDict:
512
+ api = api_client or biolib.api.client
513
+ job_dict: JobDict = api.get(
514
+ path=f'/jobs/{uuid}/',
515
+ headers={'Job-Auth-Token': auth_token} if auth_token else None,
516
+ ).json()
517
+
518
+ return job_dict
519
+
520
+ @staticmethod
521
+ def create_from_uuid(uuid: str, auth_token: Optional[str] = None) -> 'Result':
522
+ job_dict = Result._get_job_dict(uuid=uuid, auth_token=auth_token)
523
+ return Result(job_dict)
524
+
525
+ @staticmethod
526
+ def _yield_logs_packages(stdout_and_stderr_packages_b64) -> Generator[Tuple[str, bytes], None, None]:
527
+ for stdout_and_stderr_package_b64 in stdout_and_stderr_packages_b64:
528
+ stdout_and_stderr_package = base64.b64decode(stdout_and_stderr_package_b64)
529
+ stdout_and_stderr = StdoutAndStderr(stdout_and_stderr_package).deserialize()
530
+ yield ('stdout', stdout_and_stderr)
531
+
532
+ def show(self) -> None:
533
+ self._refetch_job_dict()
534
+ BioLibTable(
535
+ columns_to_row_map=Result.table_columns_to_row_map,
536
+ rows=[self._job_dict],
537
+ title=f'Result: {self._uuid}',
538
+ ).print_table()
539
+
540
+ def stream_logs(self, as_iterator: bool = False):
541
+ if as_iterator:
542
+ return self._iter_logs()
543
+ self._stream_logs()
544
+ return None
545
+
546
+ def _stream_logs(self, enable_print: bool = True) -> None:
547
+ try:
548
+ for stream_type, data in self._iter_logs(enable_print=enable_print):
549
+ if stream_type == 'stdout':
550
+ if IS_RUNNING_IN_NOTEBOOK:
551
+ sys.stdout.write(data.decode(encoding='utf-8', errors='replace'))
552
+ # Note: we avoid flush() in notebook as that breaks \r handling
553
+ else:
554
+ sys.stdout.buffer.write(data)
555
+ sys.stdout.buffer.flush()
556
+ elif stream_type == 'stderr':
557
+ if IS_RUNNING_IN_NOTEBOOK:
558
+ sys.stderr.write(data.decode(encoding='utf-8', errors='replace'))
559
+ # Note: we avoid flush() in notebook as that breaks \r handling
560
+ else:
561
+ sys.stderr.buffer.write(data)
562
+ sys.stderr.buffer.flush()
563
+ finally:
564
+ # Flush after having processed all packages
565
+ if IS_RUNNING_IN_NOTEBOOK:
566
+ sys.stdout.flush()
567
+ sys.stderr.flush()
568
+
569
+ def _iter_logs(self, enable_print: bool = True) -> Generator[Tuple[str, bytes], None, None]:
570
+ try:
571
+ cloud_job = self._get_cloud_job_awaiting_started()
572
+ except CloudJobFinishedError:
573
+ logger.info(f'--- The result {self.id} has already completed (no streaming will take place) ---')
574
+ logger.info('--- The stdout log is printed below: ---')
575
+ yield ('stdout', self.get_stdout())
576
+ logger.info('--- The stderr log is printed below: ---')
577
+ yield ('stderr', self.get_stderr())
578
+ logger.info(f'--- The job {self.id} has already completed. Its output was printed above. ---')
579
+ return
580
+
581
+ compute_node_url = cloud_job['compute_node_url']
582
+ logger_no_user_data.debug(f'Using compute node URL "{compute_node_url}"')
583
+
584
+ if utils.BIOLIB_CLOUD_BASE_URL:
585
+ compute_node_url = utils.BIOLIB_CLOUD_BASE_URL + str(urlparse(compute_node_url).path)
586
+ logger_no_user_data.debug(f'Using cloud proxy URL from env var BIOLIB_CLOUD_BASE_URL: {compute_node_url}')
587
+
588
+ if enable_print:
589
+ yield from self._yield_full_logs(node_url=compute_node_url)
590
+
591
+ final_status_messages: List[str] = []
592
+ while True:
593
+ time.sleep(2)
594
+ status_json = self._get_job_status_from_compute_node(compute_node_url)
595
+ if not status_json:
596
+ # this can happen if the job is finished but already removed from the compute node
597
+ logger.warning('WARN: We were unable to retrieve the full log of the job, please try again')
598
+ break
599
+ job_is_completed = status_json['is_completed']
600
+ for status_update in status_json['status_updates']:
601
+ # If the job is completed, print the log messages after all stdout and stderr has been written
602
+ if job_is_completed:
603
+ final_status_messages.append(status_update['log_message'])
604
+ else:
605
+ # Print the status before writing stdout and stderr
606
+ logger.info(f'Cloud: {status_update["log_message"]}')
607
+
608
+ if enable_print:
609
+ yield from self._yield_logs_packages(status_json['stdout_and_stderr_packages_b64'])
610
+
611
+ if 'error_code' in status_json:
612
+ error_code = status_json['error_code']
613
+ error_message = SystemExceptionCodeMap.get(error_code, f'Unknown error code {error_code}')
614
+
615
+ raise BioLibError(f'Cloud: {error_message}')
616
+
617
+ if job_is_completed:
618
+ break
619
+
620
+ # Print the final log messages after stdout and stderr has been written
621
+ for message in final_status_messages:
622
+ logger.info(f'Cloud: {message}')
623
+
624
+ self.wait() # Wait for compute node to tell the backend that the job is finished
625
+
626
+ def _yield_full_logs(self, node_url: str) -> Generator[Tuple[str, bytes], None, None]:
627
+ try:
628
+ response_json = HttpClient.request(url=f'{node_url}/v1/job/{self._uuid}/status/?logs=full').json()
629
+ except Exception as error:
630
+ logger.error(f'Could not get full streamed logs due to: {error}')
631
+ raise BioLibError('Could not get full streamed logs') from error
632
+
633
+ for status_update in response_json.get('previous_status_updates', []):
634
+ logger.info(f'Cloud: {status_update["log_message"]}')
635
+
636
+ yield from self._yield_logs_packages(response_json['streamed_logs_packages_b64'])
637
+
638
+ def _get_cloud_job_awaiting_started(self) -> CloudJobStartedDict:
639
+ retry_count = 0
640
+ while True:
641
+ retry_count += 1
642
+ time.sleep(min(10, retry_count))
643
+ cloud_job = self._get_cloud_job()
644
+
645
+ if cloud_job['finished_at']:
646
+ raise CloudJobFinishedError()
647
+
648
+ if cloud_job and cloud_job['started_at']:
649
+ if not cloud_job['compute_node_url']:
650
+ raise BioLibError(f'Failed to get URL to compute node for job {self._uuid}')
651
+
652
+ return cast(CloudJobStartedDict, cloud_job)
653
+
654
+ logger.info('Cloud: The job has been queued. Please wait...')
655
+
656
+ def _get_job_status_from_compute_node(self, compute_node_url):
657
+ for _ in range(15):
658
+ try:
659
+ return HttpClient.request(url=f'{compute_node_url}/v1/job/{self._uuid}/status/').json()
660
+ except Exception: # pylint: disable=broad-except
661
+ cloud_job = self._get_cloud_job()
662
+ logger.debug('Failed to get status from compute node, retrying...')
663
+ if cloud_job['finished_at']:
664
+ logger.debug('Result no longer exists on compute node, checking for error...')
665
+ if cloud_job['error_code'] != SystemExceptionCodes.COMPLETED_SUCCESSFULLY.value:
666
+ error_message = SystemExceptionCodeMap.get(
667
+ cloud_job['error_code'], f'Unknown error code {cloud_job["error_code"]}'
668
+ )
669
+ raise BioLibError(f'Cloud: {error_message}') from None
670
+ else:
671
+ logger.info(f'The job {self._uuid} is finished. Get its output by calling `.result()`')
672
+ return
673
+
674
+ time.sleep(2)
675
+
676
+ raise BioLibError(
677
+ 'Failed to stream logs, did you lose internet connection?\n'
678
+ 'Call `.stream_logs()` on your job to resume streaming logs.'
679
+ )
680
+
681
+ def _refetch_job_dict(self, force_refetch: Optional[bool] = False) -> None:
682
+ if not force_refetch and self._job_dict_last_fetched_at > datetime.now(timezone.utc) - timedelta(seconds=2):
683
+ return
684
+
685
+ self._job_dict = self._get_job_dict(self._uuid, self._auth_token)
686
+ self._job_dict_last_fetched_at = datetime.now(timezone.utc)
687
+
688
+ @staticmethod
689
+ def _start_job_in_cloud(
690
+ app_uri: str,
691
+ app_version_uuid: str,
692
+ module_input_serialized: bytes,
693
+ override_command: bool = False,
694
+ machine: Optional[str] = None,
695
+ experiment_id: Optional[str] = None,
696
+ result_prefix: Optional[str] = None,
697
+ timeout: Optional[int] = None,
698
+ notify: bool = False,
699
+ requested_machine_count: Optional[int] = None,
700
+ temporary_client_secrets: Optional[Dict[str, str]] = None,
701
+ api_client: Optional[ApiClient] = None,
702
+ ) -> 'Result':
703
+ if len(module_input_serialized) < 500_000 and temporary_client_secrets is None:
704
+ _job_dict = BiolibJobApi.create_job_with_data(
705
+ app_version_uuid=app_version_uuid,
706
+ app_resource_name_prefix=parse_resource_uri(app_uri)['resource_prefix'],
707
+ arguments_override_command=override_command,
708
+ experiment_uuid=experiment_id,
709
+ module_input_serialized=module_input_serialized,
710
+ notify=notify,
711
+ requested_machine=machine,
712
+ requested_timeout_seconds=timeout,
713
+ result_name_prefix=result_prefix,
714
+ requested_machine_count=requested_machine_count,
715
+ api_client=api_client,
716
+ )
717
+ return Result(cast(JobDict, _job_dict))
718
+
719
+ job_dict: CreatedJobDict = BiolibJobApi.create(
720
+ app_version_id=app_version_uuid,
721
+ app_resource_name_prefix=parse_resource_uri(app_uri)['resource_prefix'],
722
+ experiment_uuid=experiment_id,
723
+ machine=machine,
724
+ notify=notify,
725
+ override_command=override_command,
726
+ timeout=timeout,
727
+ requested_machine_count=requested_machine_count,
728
+ temporary_client_secrets=temporary_client_secrets,
729
+ api_client=api_client,
730
+ )
731
+ JobStorage.upload_module_input(job=job_dict, module_input_serialized=module_input_serialized)
732
+ cloud_job = BiolibJobApi.create_cloud_job(job_id=job_dict['public_id'], result_name_prefix=result_prefix)
733
+ logger.debug(f"Cloud: Job created with id {cloud_job['public_id']}")
734
+ return Result(cast(JobDict, job_dict), _api_client=api_client)
735
+
736
+
737
+ class Job(Result):
738
+ """
739
+ Deprecated class. `Job` extends the `Result` class and is retained for backward compatibility.
740
+ Please use the `Result` class instead.
741
+ """