pybiolib 1.2.611__py3-none-any.whl → 1.2.659__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
biolib/__init__.py CHANGED
@@ -4,6 +4,7 @@ from urllib.parse import urlparse as _urlparse
4
4
 
5
5
  from biolib import typing_utils as _typing_utils
6
6
  from biolib.app import BioLibApp as _BioLibApp
7
+
7
8
  # TODO: Fix ignore of type
8
9
  from biolib.app.search_apps import search_apps # type: ignore
9
10
  from biolib.biolib_errors import BioLibError
@@ -24,6 +25,7 @@ import biolib.utils
24
25
 
25
26
  # ------------------------------------ Function definitions for public Python API ------------------------------------
26
27
 
28
+
27
29
  def call_cli() -> None:
28
30
  biolib.cli.cli()
29
31
 
@@ -51,9 +53,9 @@ def load(uri: str) -> _BioLibApp:
51
53
 
52
54
 
53
55
  def search(
54
- search_query: Optional[str] = None,
55
- team: Optional[str] = None,
56
- count: int = 100,
56
+ search_query: Optional[str] = None,
57
+ team: Optional[str] = None,
58
+ count: int = 100,
57
59
  ) -> List[str]:
58
60
  r"""Search for BioLib applications.
59
61
 
@@ -98,6 +100,26 @@ def get_job(job_id: str, job_token: Optional[str] = None) -> _Job:
98
100
  return _Job.create_from_uuid(uuid=job_id, auth_token=job_token)
99
101
 
100
102
 
103
+ def get_result(result_id: str, result_token: Optional[str] = None) -> _Job:
104
+ r"""Get a result by its ID.
105
+
106
+ Args:
107
+ result_id (str): The UUID of the result to retrieve
108
+ result_token (str, optional): Authentication token for accessing the result.
109
+ Only needed for result that aren't owned by the current user.
110
+
111
+ Returns:
112
+ Result: The result object
113
+
114
+ Example::
115
+
116
+ >>> result = biolib.get_result('abc123')
117
+ >>> # Access shared result
118
+ >>> result = biolib.get_result('abc123', result_token='xyz789')
119
+ """
120
+ return _Job.create_from_uuid(uuid=result_id, auth_token=result_token)
121
+
122
+
101
123
  def get_data_record(uri: str) -> _DataRecord:
102
124
  r"""Get a data record by its URI.
103
125
 
@@ -255,7 +277,7 @@ def set_api_base_url(api_base_url: str) -> None:
255
277
  biolib.utils.BIOLIB_BASE_URL = api_base_url
256
278
  biolib.utils.BIOLIB_SITE_HOSTNAME = _urlparse(api_base_url).hostname
257
279
  biolib.utils.BASE_URL_IS_PUBLIC_BIOLIB = api_base_url.endswith('biolib.com') or (
258
- os.environ.get('BIOLIB_ENVIRONMENT_IS_PUBLIC_BIOLIB', '').upper() == 'TRUE'
280
+ os.environ.get('BIOLIB_ENVIRONMENT_IS_PUBLIC_BIOLIB', '').upper() == 'TRUE'
259
281
  )
260
282
 
261
283
 
@@ -0,0 +1,7 @@
1
+ Variable names are allowed to be verbose, and should be descriptive.
2
+
3
+ Code comments should only be added for complex logic or unintuitive code that is not adequately explained by the function names themselves.
4
+
5
+ Unit tests are not necessary. Tests should instead be written as simple examples demonstrating the functionality of relevant functions.
6
+
7
+ Always use 4 spaces for indentation when writing code.
@@ -0,0 +1,60 @@
1
+ # Main task
2
+ Your task is to make sure that all inputs are handled correctly in the give Python script and/or biolib config file.
3
+
4
+ Inputs are defined in the [config.yml](../../.biolib/config.yml) file and the main Python script, usually found in [run.py](../../run.py) or [main.py](../../main.py).
5
+
6
+ # Syntax of config.yml
7
+ The file config.yml contains the information needed to render and run an application on BioLib. This configuration defines the entry to your application and what input arguments the user can set. When you edit an application using the graphical interface on BioLib the config.yml file is automatically updated.
8
+
9
+ The config file and python script specify how input options and settings will be rendered to the user of the application, and how inputs will be parsed. The input field should follow this structure:
10
+
11
+ ```
12
+ arguments:
13
+ - key: --data # required
14
+ description: 'Input Dropdown' # required
15
+ key_value_separator: ' ' # optional, default is ' '
16
+ default_value: '' # optional, default is ''
17
+ type: dropdown # required
18
+ options:
19
+ 'This will be shown as option one': 'value1'
20
+ 'This will be shown as option two': 'value2'
21
+ required: true # optional, default is true
22
+ ```
23
+
24
+ Under `type` you have the following options:
25
+
26
+ * `text` provides a text input field
27
+ * `file` provides a file select where users can upload an input file
28
+ * `text-file` provides both a text input field and a file select allowing the user supply either
29
+ * `sequence` like text-file, with checks for valid FASTA input characters
30
+ * `hidden` allows the application creator to provide a default input argument without it being shown to the end-user
31
+ * `toggle` provides a toggle switch where users can choose two options. Note that the options need to be named 'on' : 'value1' and 'off': 'value2'
32
+ * `number` provides a number input field
33
+ * `radio` provides a "radio select" where users can select one amongst a number of prespecified options
34
+ * `dropdown` provides a dropdown menu where users can select one amongst a number of prespecified options
35
+ * `multiselect` provides a dropdown menu where users can select one or more prespecified options
36
+
37
+ `sub_arguments`: Allow you to specify arguments that are only rendered if a user chooses a particular option in the parent argument. For example, an application might allow the user to run one of two commands, where each of these commands would need different input arguments:
38
+
39
+ ```
40
+ arguments:
41
+ - key: --function
42
+ description: 'Choose a function'
43
+ key_value_separator: ''
44
+ default_value: ''
45
+ type: dropdown
46
+ options:
47
+ 'Command A': a
48
+ 'Command B': b
49
+ sub_arguments:
50
+ a:
51
+ - key: --argument_a
52
+ description: "Argument A takes a file input"
53
+ type: file
54
+ b:
55
+ - key: --argument_b
56
+ description: 'Argument B takes a text input'
57
+ type: text
58
+ ```
59
+
60
+ Inputs in the Python script should be parsed with argparse, and should also enshrine the same requirements and defaults such that use is identical between the frontend and Python.
@@ -0,0 +1,302 @@
1
+ # BioLib Run Apps Prompt
2
+ Your task is to run some kind of apps at the users discretion. Here are instructions on how running an app works.
3
+
4
+ ## Login
5
+
6
+ You need to be logged in, unless this is running as part of a different app. To log in with your BioLib account in a Python notebook run the code below and follow the instructions shown:
7
+
8
+ ```python
9
+ import biolib
10
+ biolib.login()
11
+ ```
12
+
13
+ Alternatively, you can use an API token and set it as the `BIOLIB_TOKEN` environment variable. If the user requests this, direct them [here](https://biolib.com/settings/api-tokens/).
14
+
15
+ ## Run using .cli()
16
+
17
+ To load an application into your Python script, add the following:
18
+
19
+ ```python
20
+ import biolib
21
+ app = biolib.load('author/application')
22
+ ```
23
+
24
+ To run an application call the function `.cli()` on the application you loaded above. For instance, to run samtools with the `--help` command:
25
+
26
+ ```python
27
+ import biolib
28
+
29
+ samtools = biolib.load('samtools/samtools')
30
+ job = samtools.cli(args='--help')
31
+ print(job.get_stdout().decode())
32
+ ```
33
+
34
+ Running an application returns a job object, which allows you to monitor progress and save results.
35
+
36
+ ## Non blocking
37
+
38
+ By default, calling the function `.cli()` blocks until the application is finished. You can pass the keyword argument `blocking=False` to return immediately. For example the code below will print "in_progress":
39
+
40
+ ```python
41
+ import biolib
42
+
43
+ samtools = biolib.load('samtools/samtools')
44
+ job = samtools.cli(args='--help', blocking=False)
45
+ print(job.get_status())
46
+ ```
47
+
48
+ ## Result prefix
49
+
50
+ You can annotate the result with a custom name when calling `.cli()` using the keyword argument `result_prefix` as:
51
+
52
+ ```python
53
+ import biolib
54
+
55
+ samtools = biolib.load('samtools/samtools')
56
+ job = samtools.cli(args='--help', result_prefix='my_help_test')
57
+ ```
58
+
59
+ Setting the result prefix makes it easy to distinguish results from one another on the result page.
60
+
61
+ ## Run using .run()
62
+
63
+ The `.run()` function is a more Pythonic way to run applications where all keyword arguments are passed to the application as command line arguments. This function blocks and waits until the application is finished.
64
+
65
+ ```python
66
+ samtools = biolib.load('samtools/samtools')
67
+ job = samtools.run()
68
+ ```
69
+
70
+ ## Run using .start()
71
+
72
+ The `.start()` function is a more Pythonic way to run applications where all keyword arguments are passed to the application as command line arguments. This function returns immediately when the job is created.
73
+
74
+ ```python
75
+ samtools = biolib.load('samtools/samtools')
76
+ job = samtools.start()
77
+ ```
78
+
79
+ ## Search
80
+
81
+ To search for applications on BioLib use the function `biolib.search()` which takes a search query as the first argument:
82
+
83
+ ```python
84
+ app_list = biolib.search('samtools')
85
+ print(app_list)
86
+ ```
87
+
88
+ Should print something like below:
89
+
90
+ ```
91
+ ['samtools/samtools',
92
+ 'samtools/samtools-fixmate',
93
+ 'samtools/samtools-stats',
94
+ 'samtools/samtools-collate',
95
+ 'samtools/samtools-fastq',
96
+ ...]
97
+ ```
98
+
99
+ To run a specific application you can pass a value from the list above to `biolib.load()` and then call `app.cli()`:
100
+
101
+ ```python
102
+ app = biolib.load(app_list[0])
103
+ job = app.cli('--help')
104
+ ```
105
+
106
+ ## Results
107
+
108
+ When a job has completed, its outputs can be accessed by the following functions:
109
+
110
+ ```python
111
+ job.wait() # Wait until done
112
+ job.get_stdout() # Returns stdout as bytes
113
+ job.get_stderr() # Returns stderr as bytes
114
+ job.get_exit_code() # Returns exit code of the application as an integer
115
+ ```
116
+
117
+ ## Save files to disk
118
+
119
+ To save the output files to a local directory like "result_files" run:
120
+
121
+ ```python
122
+ job.save_files(output_dir='result_files')
123
+ ```
124
+
125
+ The `.save_files()` function also takes an optional `path_filter` argument as a glob pattern. For example to save all `.pdb` files from a result you can run:
126
+
127
+ ```python
128
+ job.save_files(output_dir='result_files', path_filter='*.pdb')
129
+ ```
130
+
131
+ ## In memory files
132
+
133
+ Work with result files without saving them to disk. To list the output files from a job:
134
+
135
+ ```python
136
+ job.list_output_files()
137
+ ```
138
+
139
+ To load a single file into memory, without saving it to disk, run:
140
+
141
+ ```python
142
+ my_csv_file = job.get_output_file('/my_file.csv')
143
+ ```
144
+
145
+ To pass an output file to a library like Pandas or BioPython, run `.get_file_handle()` on the object:
146
+
147
+ ```python
148
+ import pandas as pd
149
+ my_dataframe = pd.read_csv(my_csv_file.get_file_handle())
150
+ ```
151
+
152
+ ## Jobs
153
+
154
+ A job object refers to a specific run of an application. It holds progress information of the application run and the result when the job has completed.
155
+
156
+ ### List jobs
157
+
158
+ When signed in, you can print a table of your jobs by running:
159
+
160
+ ```python
161
+ biolib.show_jobs(count=25)
162
+ ```
163
+
164
+ where count refers to the number of jobs you want to show.
165
+
166
+ ### Retrieve a job
167
+
168
+ To retrieve a Job in python call `biolib.get_job()` with the Job's ID.
169
+
170
+ ```python
171
+ job = biolib.get_job(job_id)
172
+ print(job.get_status())
173
+ ```
174
+
175
+ You can use this to determine if a job has completed or is still in progress.
176
+
177
+ ### Open in browser
178
+
179
+ You can open the job in your web browser to view the graphical and interactive output files.
180
+
181
+ ```python
182
+ job.open_browser()
183
+ ```
184
+
185
+ ### Stream output
186
+
187
+ If your Job is still running you can attach to its stdout and stderr by running:
188
+
189
+ ```python
190
+ job.stream_logs()
191
+ ```
192
+
193
+ This will print current output and keep streaming stdout and stderr until the job has finished.
194
+
195
+ ### Download output files
196
+
197
+ You can download job output files using the job ID. The job ID can be found under "Details" on the Results page, or in the share link:
198
+
199
+ ```python
200
+ job_id = '1a234567-b89...'
201
+ job = biolib.get_job(job_id)
202
+ job.save_files('job_output/')
203
+ ```
204
+
205
+ ### Download input files
206
+
207
+ To download the input files of a job:
208
+
209
+ ```python
210
+ job_id = '1a234567-b89...'
211
+ job = biolib.get_job(job_id)
212
+ job.save_input_files(output_dir='input_files')
213
+ ```
214
+
215
+ ## Start jobs in parallel
216
+
217
+ Use the `blocking=False` argument to `.cli()` on an application to get the job immediately without having to wait for the application to finish.
218
+
219
+ This feature allows for parallelized workflows as the one below:
220
+
221
+ ```python
222
+ samtools = biolib.load('samtools/samtools')
223
+ my_fasta_files = ['seq1.fasta', 'seq2.fasta']
224
+
225
+ my_jobs = []
226
+ for file in my_fasta_files:
227
+ job = samtools.cli(file, blocking=False)
228
+ my_jobs.append(job)
229
+ ```
230
+
231
+ ## Experiments
232
+
233
+ An Experiment is a collection of jobs that you can retrieve together. To group the jobs in an Experiment use the following syntax:
234
+
235
+ ```python
236
+ with biolib.Experiment('my-experiment-name'):
237
+ my_application.cli(input_1) # these two jobs will be
238
+ my_application.cli(input_2) # grouped in the same Experiment
239
+ ```
240
+
241
+ All jobs started under the with statement will be grouped under the Experiment's ID (in this case `my-experiment-name`).
242
+
243
+ ### List experiments
244
+
245
+ When logged in, you can print a table of your experiments by running:
246
+
247
+ ```python
248
+ biolib.show_experiments(count=10)
249
+ ```
250
+
251
+ where count refers to the number of experiments you want to show.
252
+
253
+ ### Retrieve an experiment
254
+
255
+ To load an Experiment in Python, run the following:
256
+
257
+ ```python
258
+ my_experiment = biolib.get_experiment('my-experiment-name')
259
+ print(my_experiment)
260
+ ```
261
+
262
+ ### Wait for all jobs
263
+
264
+ To block and wait until all jobs of an experiment have finished, use the `.wait()` function:
265
+
266
+ ```python
267
+ my_experiment.wait()
268
+ ```
269
+
270
+ ### Retrieve jobs
271
+
272
+ To get a list of the Job objects contained in an Experiment, run:
273
+
274
+ ```python
275
+ my_jobs = my_experiment.get_jobs()
276
+ for job in my_jobs:
277
+ # Print output
278
+ if job.get_status() == 'completed':
279
+ print(job.get_stdout())
280
+ else:
281
+ job.stream_logs()
282
+
283
+ # Save output files
284
+ job.save_files('my_results')
285
+ ```
286
+
287
+ ### List jobs in an Experiment
288
+
289
+ To show an overview of the jobs in your experiment run:
290
+
291
+ ```python
292
+ my_experiment.show_jobs()
293
+ ```
294
+
295
+ This prints a table of the jobs contained in your experiment.
296
+
297
+ ### Mount files
298
+
299
+ Using `.mount_files()` you can mount all jobs of the experiment and their output files to a local directory. This allows you to explore all the files in the experiment using your file browser.
300
+
301
+ ```python
302
+ my_experiment.mount_files(mount_path='my_local_directory')
File without changes
biolib/cli/__init__.py CHANGED
@@ -5,7 +5,7 @@ import click
5
5
 
6
6
  from biolib import utils
7
7
  from biolib.biolib_logging import logger, logger_no_user_data
8
- from biolib.cli import auth, data_record, download_container, init, lfs, push, run, runtime, start
8
+ from biolib.cli import auth, data_record, download_container, init, lfs, push, run, runtime, sdk, start
9
9
 
10
10
 
11
11
  @click.version_option(version=utils.BIOLIB_PACKAGE_VERSION, prog_name='pybiolib')
@@ -31,6 +31,7 @@ cli.add_command(run.run)
31
31
  cli.add_command(runtime.runtime)
32
32
  cli.add_command(start.start)
33
33
  cli.add_command(data_record.data_record)
34
+ cli.add_command(sdk.sdk)
34
35
 
35
36
  # allow this script to be called without poetry in dev e.g. by an IDE debugger
36
37
  if utils.IS_DEV and __name__ == '__main__':
biolib/cli/sdk.py ADDED
@@ -0,0 +1,53 @@
1
+ import os
2
+ import shutil
3
+ import sys
4
+
5
+ import click
6
+
7
+ from biolib._internal import llm_instructions
8
+
9
+
10
+ @click.group(name='sdk', help='Advanced commands for developers')
11
+ def sdk():
12
+ pass
13
+
14
+
15
+ @sdk.command(
16
+ name='add-copilot-prompts', help='Add BioLib-specific GitHub Copilot prompts and instructions to your repository'
17
+ )
18
+ @click.option('--force', is_flag=True, help='Force overwrite existing files.')
19
+ def add_copilot_prompts(force: bool) -> None:
20
+ current_working_directory = os.getcwd()
21
+ config_file_path = f'{current_working_directory}/.biolib/config.yml'
22
+ if not os.path.exists(config_file_path):
23
+ err_string = """
24
+ Error: Current directory has not been initialized as a BioLib application.
25
+ Please run the "biolib init" command first"""
26
+ click.echo(err_string, file=sys.stderr)
27
+ exit(1)
28
+ source_path = os.path.join(os.path.dirname(llm_instructions.__file__), '.github')
29
+ destination_path = os.path.join(current_working_directory, '.github')
30
+
31
+ conflicting_files = []
32
+
33
+ for root, _, files in os.walk(source_path):
34
+ relative_dir = os.path.relpath(root, source_path)
35
+ destination_dir = os.path.join(destination_path, relative_dir)
36
+ for file in files:
37
+ source_file = os.path.join(root, file)
38
+ destination_file = os.path.join(destination_dir, file)
39
+ if os.path.exists(destination_file) and not force:
40
+ with open(source_file, 'rb') as fsrc, open(destination_file, 'rb') as fdest:
41
+ if fsrc.read() != fdest.read():
42
+ conflicting_files.append(os.path.relpath(destination_file, current_working_directory))
43
+ else:
44
+ os.makedirs(destination_dir, exist_ok=True)
45
+ shutil.copy2(source_file, destination_file)
46
+
47
+ if conflicting_files:
48
+ click.echo('The following files were not overwritten. Use --force to override them:', file=sys.stderr)
49
+ for conflicting_file in conflicting_files:
50
+ click.echo(f' {conflicting_file}', file=sys.stderr)
51
+ exit(1)
52
+
53
+ click.echo(f'Prompt and instruction files added to {destination_path}/')
@@ -195,11 +195,13 @@ class JobWorker:
195
195
  def _cleanup(self) -> None:
196
196
  self.is_cleaning_up = True
197
197
 
198
+ logger_no_user_data.debug('Cleaning up executers...')
198
199
  for executor in self._executors:
199
200
  executor.cleanup()
200
201
 
201
202
  proxy_count = len(self._remote_host_proxies)
202
203
  if proxy_count > 0:
204
+ logger_no_user_data.debug('Cleaning up proxies...')
203
205
  proxy_cleanup_start_time = time()
204
206
 
205
207
  for proxy in self._remote_host_proxies:
@@ -212,10 +214,12 @@ class JobWorker:
212
214
  self._remote_host_proxies = []
213
215
  logger_no_user_data.debug(f'Cleaned up {proxy_count} proxies in {time() - proxy_cleanup_start_time}')
214
216
 
217
+ logger_no_user_data.debug('Cleaning up networks...')
215
218
  self._cleanup_network(self._internal_network)
216
219
  self._internal_network = None
217
220
  self._cleanup_network(self._public_network)
218
221
  self._public_network = None
222
+ logger_no_user_data.debug('Cleaned up networks...')
219
223
 
220
224
  @staticmethod
221
225
  def _cleanup_network(network: Optional[Network]) -> None:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: pybiolib
3
- Version: 1.2.611
3
+ Version: 1.2.659
4
4
  Summary: BioLib Python Client
5
5
  License: MIT
6
6
  Keywords: biolib
@@ -1,4 +1,4 @@
1
- biolib/__init__.py,sha256=nPgMXXoJ4wqGwDqMgwtAYrw_-GVMdaWwKemOpRKKljU,9774
1
+ biolib/__init__.py,sha256=9uAH7V0r3t3tEA3Lx0mbd9b9rTI1fRt-rGzkRvwMfZw,10402
2
2
  biolib/_data_record/data_record.py,sha256=zKvnh5T-dIVY46-kgVzMBoZ666ZhcTCFQnWvZT0D6RM,12026
3
3
  biolib/_internal/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
4
  biolib/_internal/data_record/__init__.py,sha256=fGdME6JGRU_2VxpJbYpGXYndjN-feUkmKY4fuMyq3cg,76
@@ -13,6 +13,10 @@ biolib/_internal/lfs/__init__.py,sha256=gSWo_xg61UniYgD7yNYxeT4I9uaXBCBSi3_nmZjn
13
13
  biolib/_internal/lfs/cache.py,sha256=pQS2np21rdJ6I3DpoOutnzPHpLOZgUIS8TMltUJk_k4,2226
14
14
  biolib/_internal/libs/__init__.py,sha256=Jdf4tNPqe_oIIf6zYml6TiqhL_02Vyqwge6IELrAFhw,98
15
15
  biolib/_internal/libs/fusepy/__init__.py,sha256=AWDzNFS-XV_5yKb0Qx7kggIhPzq1nj_BZS5y2Nso08k,41944
16
+ biolib/_internal/llm_instructions/.github/copilot-instructions.md,sha256=7xW_wXe-2QAJnMnyANGLd_RTB7Nw0BX_2crKLQUUMYc,403
17
+ biolib/_internal/llm_instructions/.github/prompts/biolib_app_inputs.prompt.md,sha256=DcBUNaViuNfXoomj-IVUcIcBtJlXuIYOroUfDM_R6wU,3212
18
+ biolib/_internal/llm_instructions/.github/prompts/biolib_run_apps.prompt.md,sha256=HYv-MgpQKIU7Hv90SvZ3tqXY6KWxYVnqamXDRyInOHM,7926
19
+ biolib/_internal/llm_instructions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
20
  biolib/_internal/push_application.py,sha256=Ffj3iZ0nma4KHTB-PKCeFvhCHHhAEhgKClxnfBUpL8U,12296
17
21
  biolib/_internal/runtime.py,sha256=BiHl4klUHr36MCpqKaUso4idHeBZfPAahLYRQrabFqA,486
18
22
  biolib/_internal/types/__init__.py,sha256=xLgOQJFh3GRtiqIJq7MaqHReZx4pp34_zcaFQ_JjuJ4,198
@@ -58,7 +62,7 @@ biolib/biolib_docker_client/__init__.py,sha256=aBfA6mtWSI5dBEfNNMD6bIZzCPloW4ghK
58
62
  biolib/biolib_download_container.py,sha256=8TmBV8iv3bCvkNlHa1SSsc4zl0wX_eaxhfnW5rvFIh8,1779
59
63
  biolib/biolib_errors.py,sha256=eyQolC4oGi350BDEMOd-gAHYVqo2L2lYgv6c4ZdXPjQ,1184
60
64
  biolib/biolib_logging.py,sha256=S2y6fk7jjxZQMN2BtKkHBb4JbVQQSSU97T7ep4P_QCE,2855
61
- biolib/cli/__init__.py,sha256=0v3c_J-U0k46c5ZWeQjLG_kTaKDJm81LBxQpDO2B_aI,1286
65
+ biolib/cli/__init__.py,sha256=IHC2bEyA27pvgp-18SGfFVJOP456elanz7suDP8D084,1316
62
66
  biolib/cli/auth.py,sha256=rpWGmXs6Fz6CGrO9K8ibPRszOdXG78Vig_boKaVCD9A,2082
63
67
  biolib/cli/data_record.py,sha256=t8DfJK2EZ_SNZ9drDA_N5Jqy8DNwf9f5SlFrIaOvtv0,3501
64
68
  biolib/cli/download_container.py,sha256=HIZVHOPmslGE5M2Dsp9r2cCkAEJx__vcsDz5Wt5LRos,483
@@ -67,6 +71,7 @@ biolib/cli/lfs.py,sha256=z2qHUwink85mv9yDgifbVKkVwuyknGhMDTfly_gLKJM,4151
67
71
  biolib/cli/push.py,sha256=RxB4RHpjtL27Fpq7WRGojJ53R9jUuE1Cq9_NmHFzNsM,1306
68
72
  biolib/cli/run.py,sha256=MCo0ZqW2pHBxOoCI3i5gAx5D0auW9fmxHqkAF4TRhms,2134
69
73
  biolib/cli/runtime.py,sha256=Xv-nrma5xX8NidWcvbUKcUvuN5TCarZa4A8mPVmF-z0,361
74
+ biolib/cli/sdk.py,sha256=YTeriA-jC5XuEbhMMQ4DlOZXM2y0vN5YsKDNoAeZao8,2140
70
75
  biolib/cli/start.py,sha256=rg8VVY8rboFhf1iQo3zE3WA5oh_R1VWWfYJEO1gMReY,1737
71
76
  biolib/compute_node/.gitignore,sha256=GZdZ4g7HftqfOfasFpBC5zV1YQAbht1a7EzcXD6f3zg,45
72
77
  biolib/compute_node/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -84,7 +89,7 @@ biolib/compute_node/job_worker/executors/types.py,sha256=wbjWZZ2f9FttjqUCCOeZmn7
84
89
  biolib/compute_node/job_worker/job_legacy_input_wait_timeout_thread.py,sha256=_cvEiZbOwfkv6fYmfrvdi_FVviIEYr_dSClQcOQaUWM,1198
85
90
  biolib/compute_node/job_worker/job_max_runtime_timer_thread.py,sha256=K_xgz7IhiIjpLlXRk8sqaMyLoApcidJkgu29sJX0gb8,1174
86
91
  biolib/compute_node/job_worker/job_storage.py,sha256=lScHI3ubcHKagSEW243tgbIWXUfbWDHDjEOPMvXxJE8,4603
87
- biolib/compute_node/job_worker/job_worker.py,sha256=xp1laEWUhUsKzF4ITdsTWcQlSn133pAjkeiPMAG5jqQ,28892
92
+ biolib/compute_node/job_worker/job_worker.py,sha256=3m6_ZzsSbuygbh9UmQ2RQrPZgJVsjcSR5V57ddECcqo,29139
88
93
  biolib/compute_node/job_worker/large_file_system.py,sha256=XXqRlVtYhs-Ji9zQGIk5KQPXFO_Q5jJH0nnlw4GkeMY,10461
89
94
  biolib/compute_node/job_worker/mappings.py,sha256=Z48Kg4nbcOvsT2-9o3RRikBkqflgO4XeaWxTGz-CNvI,2499
90
95
  biolib/compute_node/job_worker/utilization_reporter_thread.py,sha256=7tm5Yk9coqJ9VbEdnO86tSXI0iM0omwIyKENxdxiVXk,8575
@@ -120,8 +125,8 @@ biolib/utils/cache_state.py,sha256=u256F37QSRIVwqKlbnCyzAX4EMI-kl6Dwu6qwj-Qmag,3
120
125
  biolib/utils/multipart_uploader.py,sha256=XvGP1I8tQuKhAH-QugPRoEsCi9qvbRk-DVBs5PNwwJo,8452
121
126
  biolib/utils/seq_util.py,sha256=Ozk0blGtPur_D9MwShD02r_mphyQmgZkx-lOHOwnlIM,6730
122
127
  biolib/utils/zip/remote_zip.py,sha256=0wErYlxir5921agfFeV1xVjf29l9VNgGQvNlWOlj2Yc,23232
123
- pybiolib-1.2.611.dist-info/LICENSE,sha256=F2h7gf8i0agDIeWoBPXDMYScvQOz02pAWkKhTGOHaaw,1067
124
- pybiolib-1.2.611.dist-info/METADATA,sha256=RvKSHD8jbb_CwygaoekwwPbMLUqGkYWYwokOpkEAebw,1570
125
- pybiolib-1.2.611.dist-info/WHEEL,sha256=fGIA9gx4Qxk2KDKeNJCbOEwSrmLtjWCwzBz351GyrPQ,88
126
- pybiolib-1.2.611.dist-info/entry_points.txt,sha256=p6DyaP_2kctxegTX23WBznnrDi4mz6gx04O5uKtRDXg,42
127
- pybiolib-1.2.611.dist-info/RECORD,,
128
+ pybiolib-1.2.659.dist-info/LICENSE,sha256=F2h7gf8i0agDIeWoBPXDMYScvQOz02pAWkKhTGOHaaw,1067
129
+ pybiolib-1.2.659.dist-info/METADATA,sha256=qpaLR_QUpfGpkRX0F9kNFxh5_I5GGlh1eLnIfzopGs4,1570
130
+ pybiolib-1.2.659.dist-info/WHEEL,sha256=fGIA9gx4Qxk2KDKeNJCbOEwSrmLtjWCwzBz351GyrPQ,88
131
+ pybiolib-1.2.659.dist-info/entry_points.txt,sha256=p6DyaP_2kctxegTX23WBznnrDi4mz6gx04O5uKtRDXg,42
132
+ pybiolib-1.2.659.dist-info/RECORD,,