pybiolib 1.2.1056__py3-none-any.whl → 1.2.1642__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pybiolib might be problematic. Click here for more details.

Files changed (86) hide show
  1. biolib/__init__.py +33 -10
  2. biolib/_data_record/data_record.py +24 -11
  3. biolib/_index/__init__.py +0 -0
  4. biolib/_index/index.py +51 -0
  5. biolib/_index/types.py +7 -0
  6. biolib/_internal/data_record/data_record.py +1 -1
  7. biolib/_internal/data_record/push_data.py +1 -1
  8. biolib/_internal/data_record/remote_storage_endpoint.py +3 -3
  9. biolib/_internal/file_utils.py +7 -4
  10. biolib/_internal/index/__init__.py +1 -0
  11. biolib/_internal/index/index.py +18 -0
  12. biolib/_internal/lfs/cache.py +4 -2
  13. biolib/_internal/push_application.py +89 -23
  14. biolib/_internal/runtime.py +2 -0
  15. biolib/_internal/templates/gui_template/App.tsx +38 -2
  16. biolib/_internal/templates/gui_template/Dockerfile +2 -0
  17. biolib/_internal/templates/gui_template/biolib-sdk.ts +37 -0
  18. biolib/_internal/templates/gui_template/dev-data/output.json +7 -0
  19. biolib/_internal/templates/gui_template/package.json +1 -0
  20. biolib/_internal/templates/gui_template/vite-plugin-dev-data.ts +49 -0
  21. biolib/_internal/templates/gui_template/vite.config.mts +2 -1
  22. biolib/_internal/templates/init_template/.github/workflows/biolib.yml +6 -1
  23. biolib/_internal/templates/init_template/Dockerfile +2 -0
  24. biolib/_internal/utils/__init__.py +25 -0
  25. biolib/_internal/utils/job_url.py +33 -0
  26. biolib/_runtime/runtime.py +9 -0
  27. biolib/_session/session.py +7 -5
  28. biolib/_shared/__init__.py +0 -0
  29. biolib/_shared/types/__init__.py +69 -0
  30. biolib/_shared/types/resource.py +17 -0
  31. biolib/_shared/types/resource_deploy_key.py +11 -0
  32. biolib/{_internal → _shared}/types/resource_permission.py +1 -1
  33. biolib/_shared/utils/__init__.py +7 -0
  34. biolib/_shared/utils/resource_uri.py +75 -0
  35. biolib/api/client.py +1 -1
  36. biolib/app/app.py +56 -23
  37. biolib/biolib_api_client/app_types.py +1 -6
  38. biolib/biolib_api_client/biolib_app_api.py +17 -0
  39. biolib/biolib_binary_format/module_input.py +8 -0
  40. biolib/biolib_binary_format/remote_endpoints.py +3 -3
  41. biolib/biolib_binary_format/remote_stream_seeker.py +39 -25
  42. biolib/cli/__init__.py +2 -1
  43. biolib/cli/data_record.py +17 -0
  44. biolib/cli/index.py +32 -0
  45. biolib/cli/lfs.py +1 -1
  46. biolib/cli/start.py +14 -1
  47. biolib/compute_node/job_worker/executors/docker_executor.py +31 -9
  48. biolib/compute_node/job_worker/executors/docker_types.py +1 -1
  49. biolib/compute_node/job_worker/executors/types.py +6 -5
  50. biolib/compute_node/job_worker/job_worker.py +149 -93
  51. biolib/compute_node/job_worker/large_file_system.py +2 -6
  52. biolib/compute_node/job_worker/network_alloc.py +99 -0
  53. biolib/compute_node/job_worker/network_buffer.py +240 -0
  54. biolib/compute_node/job_worker/utilization_reporter_thread.py +2 -2
  55. biolib/compute_node/remote_host_proxy.py +125 -67
  56. biolib/compute_node/utils.py +2 -0
  57. biolib/compute_node/webserver/compute_node_results_proxy.py +188 -0
  58. biolib/compute_node/webserver/proxy_utils.py +28 -0
  59. biolib/compute_node/webserver/webserver.py +64 -19
  60. biolib/experiments/experiment.py +98 -16
  61. biolib/jobs/job.py +119 -29
  62. biolib/jobs/job_result.py +70 -33
  63. biolib/jobs/types.py +1 -0
  64. biolib/sdk/__init__.py +17 -2
  65. biolib/typing_utils.py +1 -1
  66. biolib/utils/cache_state.py +2 -2
  67. biolib/utils/seq_util.py +1 -1
  68. {pybiolib-1.2.1056.dist-info → pybiolib-1.2.1642.dist-info}/METADATA +4 -2
  69. {pybiolib-1.2.1056.dist-info → pybiolib-1.2.1642.dist-info}/RECORD +84 -66
  70. {pybiolib-1.2.1056.dist-info → pybiolib-1.2.1642.dist-info}/WHEEL +1 -1
  71. biolib/_internal/types/__init__.py +0 -6
  72. biolib/utils/app_uri.py +0 -57
  73. /biolib/{_internal → _shared}/types/account.py +0 -0
  74. /biolib/{_internal → _shared}/types/account_member.py +0 -0
  75. /biolib/{_internal → _shared}/types/app.py +0 -0
  76. /biolib/{_internal → _shared}/types/data_record.py +0 -0
  77. /biolib/{_internal → _shared}/types/experiment.py +0 -0
  78. /biolib/{_internal → _shared}/types/file_node.py +0 -0
  79. /biolib/{_internal → _shared}/types/push.py +0 -0
  80. /biolib/{_internal/types/resource.py → _shared/types/resource_types.py} +0 -0
  81. /biolib/{_internal → _shared}/types/resource_version.py +0 -0
  82. /biolib/{_internal → _shared}/types/result.py +0 -0
  83. /biolib/{_internal → _shared}/types/typing.py +0 -0
  84. /biolib/{_internal → _shared}/types/user.py +0 -0
  85. {pybiolib-1.2.1056.dist-info → pybiolib-1.2.1642.dist-info}/entry_points.txt +0 -0
  86. {pybiolib-1.2.1056.dist-info → pybiolib-1.2.1642.dist-info/licenses}/LICENSE +0 -0
@@ -0,0 +1,240 @@
1
+ import contextlib
2
+ import json
3
+ import os
4
+ import socket
5
+ import time
6
+ from typing import List, Optional
7
+
8
+ from docker.errors import NotFound
9
+ from docker.models.networks import Network
10
+
11
+ from biolib import utils
12
+ from biolib.biolib_docker_client import BiolibDockerClient
13
+ from biolib.biolib_logging import logger_no_user_data
14
+ from biolib.compute_node.job_worker.network_alloc import _allocate_network_with_retries
15
+
16
+
17
+ class NetworkBuffer:
18
+ BUFFER_SIZE = 25
19
+ NETWORK_NAME_PREFIX = 'biolib-remote-host-network-'
20
+ NETWORK_LABEL = 'biolib-role=remote-host-network'
21
+
22
+ _BIOLIB_DIR = '/biolib' if utils.IS_RUNNING_IN_CLOUD else '/tmp/biolib'
23
+ _NETWORKS_FILE = os.path.join(_BIOLIB_DIR, 'remote-host-networks.json')
24
+ _LOCK_FILE = os.path.join(_BIOLIB_DIR, 'remote-host-networks.lock')
25
+ _LOCK_TIMEOUT_SECONDS = 60
26
+ _STALE_LOCK_THRESHOLD_SECONDS = 600
27
+
28
+ _instance: Optional['NetworkBuffer'] = None
29
+
30
+ def __init__(self):
31
+ os.makedirs(self._BIOLIB_DIR, exist_ok=True)
32
+ self._docker = BiolibDockerClient.get_docker_client()
33
+
34
+ @classmethod
35
+ def get_instance(cls) -> 'NetworkBuffer':
36
+ if cls._instance is None:
37
+ cls._instance = cls()
38
+ return cls._instance
39
+
40
+ def _acquire_lock(self) -> None:
41
+ start_time = time.time()
42
+ retry_count = 0
43
+
44
+ while time.time() - start_time < self._LOCK_TIMEOUT_SECONDS:
45
+ try:
46
+ with open(self._LOCK_FILE, 'x') as lock_file:
47
+ lock_info = {
48
+ 'pid': os.getpid(),
49
+ 'hostname': socket.gethostname(),
50
+ 'started_at': time.time(),
51
+ }
52
+ json.dump(lock_info, lock_file)
53
+ return
54
+ except FileExistsError:
55
+ if retry_count == 0:
56
+ self._check_and_remove_stale_lock()
57
+
58
+ time.sleep(0.5)
59
+ retry_count += 1
60
+
61
+ raise RuntimeError(
62
+ f'Failed to acquire network buffer lock after {self._LOCK_TIMEOUT_SECONDS}s: {self._LOCK_FILE}'
63
+ )
64
+
65
+ def _check_and_remove_stale_lock(self) -> None:
66
+ try:
67
+ if not os.path.exists(self._LOCK_FILE):
68
+ return
69
+
70
+ lock_mtime = os.path.getmtime(self._LOCK_FILE)
71
+ lock_age = time.time() - lock_mtime
72
+
73
+ if lock_age > self._STALE_LOCK_THRESHOLD_SECONDS:
74
+ try:
75
+ with open(self._LOCK_FILE) as f:
76
+ lock_info = json.load(f)
77
+ lock_pid = lock_info.get('pid')
78
+
79
+ if lock_pid:
80
+ try:
81
+ os.kill(lock_pid, 0)
82
+ logger_no_user_data.warning(
83
+ f'Lock file is old ({lock_age:.0f}s) but process {lock_pid} is still alive'
84
+ )
85
+ return
86
+ except (OSError, ProcessLookupError):
87
+ pass
88
+
89
+ except (json.JSONDecodeError, KeyError, ValueError):
90
+ pass
91
+
92
+ logger_no_user_data.warning(
93
+ f'Removing stale lock file (age: {lock_age:.0f}s, threshold: {self._STALE_LOCK_THRESHOLD_SECONDS}s)'
94
+ )
95
+ os.remove(self._LOCK_FILE)
96
+
97
+ except Exception as error:
98
+ logger_no_user_data.debug(f'Error checking stale lock: {error}')
99
+
100
+ def _release_lock(self) -> None:
101
+ with contextlib.suppress(FileNotFoundError):
102
+ os.remove(self._LOCK_FILE)
103
+
104
+ def _read_available_networks(self) -> List[str]:
105
+ if not os.path.exists(self._NETWORKS_FILE):
106
+ return []
107
+
108
+ try:
109
+ with open(self._NETWORKS_FILE) as f:
110
+ network_ids = json.load(f)
111
+ if not isinstance(network_ids, list):
112
+ logger_no_user_data.error(
113
+ f'Invalid network buffer file format (expected list, got {type(network_ids).__name__})'
114
+ )
115
+ self._backup_corrupted_file()
116
+ return []
117
+ return network_ids
118
+ except json.JSONDecodeError as error:
119
+ logger_no_user_data.error(f'Corrupted network buffer file: {error}')
120
+ self._backup_corrupted_file()
121
+ return []
122
+ except Exception as error:
123
+ logger_no_user_data.error(f'Failed to read network buffer file: {error}')
124
+ return []
125
+
126
+ def _write_available_networks(self, network_ids: List[str]) -> None:
127
+ temp_file = f'{self._NETWORKS_FILE}.tmp'
128
+ try:
129
+ with open(temp_file, 'w') as f:
130
+ json.dump(network_ids, f, indent=2)
131
+ f.flush()
132
+ os.fsync(f.fileno())
133
+
134
+ os.replace(temp_file, self._NETWORKS_FILE)
135
+ except Exception as error:
136
+ logger_no_user_data.error(f'Failed to write network buffer file: {error}')
137
+ with contextlib.suppress(FileNotFoundError):
138
+ os.remove(temp_file)
139
+ raise
140
+
141
+ def _backup_corrupted_file(self) -> None:
142
+ try:
143
+ timestamp = int(time.time())
144
+ backup_path = f'{self._NETWORKS_FILE}.corrupt-{timestamp}'
145
+ os.rename(self._NETWORKS_FILE, backup_path)
146
+ logger_no_user_data.error(f'Backed up corrupted file to {backup_path}')
147
+ except Exception as error:
148
+ logger_no_user_data.error(f'Failed to backup corrupted file: {error}')
149
+
150
+ def allocate_networks(self, job_id: str, count: int) -> List[Network]:
151
+ try:
152
+ self._acquire_lock()
153
+
154
+ available_ids = self._read_available_networks()
155
+ allocated: List[Network] = []
156
+
157
+ for _ in range(count):
158
+ network = None
159
+
160
+ while available_ids and network is None:
161
+ net_id = available_ids.pop(0)
162
+ try:
163
+ network = self._docker.networks.get(net_id)
164
+ logger_no_user_data.debug(
165
+ f'Allocated network {network.id} ({network.name}) from buffer for job {job_id}'
166
+ )
167
+ except NotFound:
168
+ logger_no_user_data.warning(
169
+ f'Network {net_id} in buffer file no longer exists in Docker, skipping'
170
+ )
171
+ network = None
172
+
173
+ if network is None:
174
+ logger_no_user_data.debug(f'Buffer exhausted, creating network on-the-fly for job {job_id}')
175
+ network = self._create_network()
176
+
177
+ allocated.append(network)
178
+
179
+ self._write_available_networks(available_ids)
180
+ return allocated
181
+
182
+ except RuntimeError as error:
183
+ logger_no_user_data.warning(f'Lock acquisition failed: {error}. Creating networks on-the-fly.')
184
+ allocated = []
185
+ for _ in range(count):
186
+ network = self._create_network()
187
+ allocated.append(network)
188
+ return allocated
189
+
190
+ finally:
191
+ self._release_lock()
192
+
193
+ def fill_buffer(self) -> int:
194
+ try:
195
+ self._acquire_lock()
196
+
197
+ available_ids = self._read_available_networks()
198
+ current_count = len(available_ids)
199
+ needed = self.BUFFER_SIZE - current_count
200
+
201
+ if needed <= 0:
202
+ logger_no_user_data.debug(
203
+ f'Buffer already has {current_count} available networks (target: {self.BUFFER_SIZE})'
204
+ )
205
+ return 0
206
+
207
+ logger_no_user_data.debug(
208
+ f'Filling buffer: current={current_count}, target={self.BUFFER_SIZE}, creating={needed}'
209
+ )
210
+
211
+ created_count = 0
212
+ for _ in range(needed):
213
+ try:
214
+ network = self._create_network()
215
+ if network.id:
216
+ available_ids.append(network.id)
217
+ created_count += 1
218
+ logger_no_user_data.debug(f'Created buffer network {network.id} ({created_count}/{needed})')
219
+ else:
220
+ logger_no_user_data.error('Created network has no ID, skipping')
221
+ except Exception as error:
222
+ logger_no_user_data.error(f'Failed to create buffer network: {error}')
223
+ continue
224
+
225
+ self._write_available_networks(available_ids)
226
+ logger_no_user_data.debug(f'Buffer fill complete: created {created_count} networks')
227
+ return created_count
228
+
229
+ finally:
230
+ self._release_lock()
231
+
232
+ def _create_network(self) -> Network:
233
+ network = _allocate_network_with_retries(
234
+ name_prefix=self.NETWORK_NAME_PREFIX,
235
+ docker_client=self._docker,
236
+ internal=True,
237
+ driver='bridge',
238
+ labels={'biolib-role': 'remote-host-network'},
239
+ )
240
+ return network
@@ -1,7 +1,7 @@
1
1
  import threading
2
2
  import time
3
3
  import subprocess
4
- from datetime import datetime
4
+ from datetime import datetime, timezone
5
5
 
6
6
  from docker.models.containers import Container # type: ignore
7
7
 
@@ -173,7 +173,7 @@ class UtilizationReporterThread(threading.Thread):
173
173
  gpu_max_usage_in_percent=gpu_max_usage_in_percent,
174
174
  memory_average_usage_in_percent=memory_average_usage_in_percent,
175
175
  memory_max_usage_in_percent=memory_max_usage_in_percent,
176
- recorded_at=datetime.utcnow().isoformat(),
176
+ recorded_at=datetime.now(timezone.utc).isoformat(),
177
177
  sampling_period_in_milliseconds=self._sampling_period_in_milliseconds * self._samples_between_writes,
178
178
  )
179
179
 
@@ -1,14 +1,13 @@
1
1
  import base64
2
2
  import io
3
- import subprocess
3
+ import ipaddress
4
4
  import tarfile
5
5
  import time
6
6
  from urllib.parse import urlparse
7
7
 
8
- from docker.errors import ImageNotFound # type: ignore
9
- from docker.models.containers import Container # type: ignore
10
- from docker.models.images import Image # type: ignore
11
- from docker.models.networks import Network # type: ignore
8
+ from docker.models.containers import Container
9
+ from docker.models.networks import Network
10
+ from docker.types import EndpointConfig
12
11
 
13
12
  from biolib import utils
14
13
  from biolib.biolib_api_client import BiolibApiClient, RemoteHost
@@ -16,7 +15,22 @@ from biolib.biolib_docker_client import BiolibDockerClient
16
15
  from biolib.biolib_errors import BioLibError
17
16
  from biolib.biolib_logging import logger_no_user_data
18
17
  from biolib.compute_node.cloud_utils import CloudUtils
19
- from biolib.typing_utils import List, Optional
18
+ from biolib.compute_node.utils import BIOLIB_PROXY_NETWORK_NAME
19
+ from biolib.compute_node.webserver.proxy_utils import get_biolib_nginx_proxy_image
20
+ from biolib.typing_utils import Dict, List, Optional
21
+
22
+
23
+ def get_static_ip_from_network(network: Network, offset: int = 2) -> str:
24
+ ipam_config = network.attrs['IPAM']['Config']
25
+ if not ipam_config:
26
+ raise BioLibError(f'Network {network.name} has no IPAM configuration')
27
+
28
+ subnet_str = ipam_config[0]['Subnet']
29
+ subnet = ipaddress.ip_network(subnet_str, strict=False)
30
+
31
+ static_ip = str(subnet.network_address + offset)
32
+
33
+ return static_ip
20
34
 
21
35
 
22
36
  # Prepare for remote hosts with specified port
@@ -24,78 +38,98 @@ class RemoteHostExtended(RemoteHost):
24
38
  ports: List[int]
25
39
 
26
40
 
41
+ class RemoteHostMapping:
42
+ def __init__(self, hostname: str, ports: List[int], network: Network, static_ip: str):
43
+ self.hostname = hostname
44
+ self.ports = ports
45
+ self.network = network
46
+ self.static_ip = static_ip
47
+
48
+
27
49
  class RemoteHostProxy:
28
50
  def __init__(
29
51
  self,
30
- remote_host: RemoteHost,
31
- public_network: Network,
32
- internal_network: Optional[Network],
52
+ remote_host_mappings: List[RemoteHostMapping],
33
53
  job_id: str,
34
- ports: List[int],
54
+ app_caller_network: Optional[Network] = None,
35
55
  ):
36
- self.is_app_caller_proxy = remote_host['hostname'] == 'AppCallerProxy'
37
- self._remote_host: RemoteHostExtended = RemoteHostExtended(hostname=remote_host['hostname'], ports=ports)
38
- self._public_network: Network = public_network
39
- self._internal_network: Optional[Network] = internal_network
56
+ self._remote_host_mappings = remote_host_mappings
57
+ self._app_caller_network = app_caller_network
58
+ self.is_app_caller_proxy = app_caller_network is not None
40
59
 
41
60
  if not job_id:
42
61
  raise Exception('RemoteHostProxy missing argument "job_id"')
43
62
 
44
- self._name = f'biolib-remote-host-proxy-{job_id}-{self.hostname}'
63
+ suffix = '-AppCallerProxy' if app_caller_network else ''
64
+ self._name = f'biolib-remote-host-proxy-{job_id}{suffix}'
45
65
  self._job_uuid = job_id
46
66
  self._container: Optional[Container] = None
47
- self._enclave_traffic_forwarder_processes: List[subprocess.Popen] = []
48
67
  self._docker = BiolibDockerClient().get_docker_client()
49
68
 
50
- @property
51
- def hostname(self) -> str:
52
- return self._remote_host['hostname']
69
+ def get_hostname_to_ip_mapping(self) -> Dict[str, str]:
70
+ return {mapping.hostname: mapping.static_ip for mapping in self._remote_host_mappings}
71
+
72
+ def get_remote_host_networks(self) -> List[Network]:
73
+ networks = [mapping.network for mapping in self._remote_host_mappings]
74
+ return networks
53
75
 
54
76
  def get_ip_address_on_network(self, network: Network) -> str:
55
77
  if not self._container:
56
- raise Exception('RemoteHostProxy not yet started')
78
+ raise BioLibError('RemoteHostProxy not yet started')
57
79
 
58
80
  container_networks = self._container.attrs['NetworkSettings']['Networks']
59
81
  if network.name in container_networks:
60
82
  ip_address: str = container_networks[network.name]['IPAddress']
83
+ if not ip_address:
84
+ raise BioLibError(f'No IP address found for network {network.name}')
61
85
  return ip_address
62
86
 
63
- raise Exception(f'RemoteHostProxy not connected to network {network.name}')
87
+ raise BioLibError(f'RemoteHostProxy not connected to network {network.name}')
64
88
 
65
89
  def start(self) -> None:
66
- # TODO: Implement nice error handling in this method
67
-
68
- upstream_server_name = self._remote_host['hostname']
69
- upstream_server_ports = self._remote_host['ports']
70
-
71
90
  docker = BiolibDockerClient.get_docker_client()
72
91
 
92
+ networking_config: Optional[Dict[str, EndpointConfig]] = (
93
+ None
94
+ if not self.is_app_caller_proxy
95
+ else {
96
+ BIOLIB_PROXY_NETWORK_NAME: docker.api.create_endpoint_config(
97
+ aliases=[f'biolib-app-caller-proxy-{self._job_uuid}']
98
+ )
99
+ }
100
+ )
101
+
73
102
  for index in range(3):
74
103
  logger_no_user_data.debug(f'Attempt {index} at creating RemoteHostProxy container "{self._name}"...')
75
104
  try:
76
105
  self._container = docker.containers.create(
77
106
  detach=True,
78
- image=self._get_biolib_remote_host_proxy_image(),
107
+ image=get_biolib_nginx_proxy_image(),
79
108
  name=self._name,
80
- network=self._public_network.name,
109
+ network=BIOLIB_PROXY_NETWORK_NAME,
110
+ networking_config=networking_config,
81
111
  )
82
112
  break
83
- except Exception as error: # pylint: disable=broad-except
113
+ except Exception as error:
84
114
  logger_no_user_data.exception(f'Failed to create container "{self._name}" hit error: {error}')
85
115
 
86
116
  logger_no_user_data.debug('Sleeping before re-trying container creation...')
87
117
  time.sleep(3)
88
118
 
89
- if not self._container:
119
+ if not self._container or not self._container.id:
90
120
  raise BioLibError(f'Exceeded re-try limit for creating container {self._name}')
91
121
 
92
- self._write_nginx_config_to_container(
93
- upstream_server_name,
94
- upstream_server_ports,
95
- )
122
+ for mapping in self._remote_host_mappings:
123
+ mapping.network.connect(self._container.id, ipv4_address=mapping.static_ip)
124
+ logger_no_user_data.debug(
125
+ f'Connected proxy to network {mapping.network.name} with static IP {mapping.static_ip}'
126
+ )
127
+
128
+ if self._app_caller_network:
129
+ self._app_caller_network.connect(self._container.id)
130
+ logger_no_user_data.debug(f'Connected app caller proxy to network {self._app_caller_network.name}')
96
131
 
97
- if self._internal_network:
98
- self._internal_network.connect(self._container.id)
132
+ self._write_nginx_config_to_container()
99
133
 
100
134
  self._container.start()
101
135
 
@@ -121,28 +155,7 @@ class RemoteHostProxy:
121
155
  if self._container:
122
156
  self._container.remove(force=True)
123
157
 
124
- for process in self._enclave_traffic_forwarder_processes:
125
- process.terminate()
126
-
127
- def _get_biolib_remote_host_proxy_image(self) -> Image:
128
- if utils.IS_RUNNING_IN_CLOUD:
129
- try:
130
- logger_no_user_data.debug('Getting local Docker image for remote host proxy')
131
- return self._docker.images.get('biolib-remote-host-proxy:latest')
132
- except ImageNotFound:
133
- logger_no_user_data.debug(
134
- 'Local Docker image for remote host proxy not available. Falling back to public image...'
135
- )
136
-
137
- public_image_uri = 'public.ecr.aws/h5y4b3l1/biolib-remote-host-proxy:latest'
138
- try:
139
- logger_no_user_data.debug('Getting public Docker image for remote host proxy')
140
- return self._docker.images.get(public_image_uri)
141
- except ImageNotFound:
142
- logger_no_user_data.debug('Pulling public Docker image for remote host proxy')
143
- return self._docker.images.pull(public_image_uri)
144
-
145
- def _write_nginx_config_to_container(self, upstream_server_name: str, upstream_server_ports: List[int]) -> None:
158
+ def _write_nginx_config_to_container(self) -> None:
146
159
  if not self._container:
147
160
  raise Exception('RemoteHostProxy container not defined when attempting to write NGINX config')
148
161
 
@@ -328,6 +341,22 @@ http {{
328
341
  proxy_ssl_server_name on;
329
342
  }}
330
343
 
344
+ location /api/proxy/index/ {{
345
+ proxy_pass https://$upstream_hostname$request_uri;
346
+ proxy_set_header authorization "Basic {biolib_index_basic_auth_base64}";
347
+ proxy_set_header cookie "";
348
+ proxy_ssl_server_name on;
349
+ }}
350
+
351
+ location ~* "^/api/accounts/(?<account_id>[a-z0-9-]{{36}})/metrics/jobs/$" {{
352
+ proxy_pass https://$upstream_hostname/api/accounts/$account_id/metrics/jobs/$is_args$args;
353
+ proxy_set_header authorization "";
354
+ proxy_set_header compute-node-auth-token "{compute_node_auth_token}";
355
+ proxy_set_header job-uuid "{self._job_uuid}";
356
+ proxy_set_header cookie "";
357
+ proxy_ssl_server_name on;
358
+ }}
359
+
331
360
  location /api/ {{
332
361
  proxy_pass https://$upstream_hostname$request_uri;
333
362
  proxy_set_header authorization "";
@@ -367,28 +396,57 @@ http {{
367
396
  return 404 "Not found";
368
397
  }}
369
398
  }}
399
+
400
+ server {{
401
+ listen 1080;
402
+ resolver 127.0.0.11 ipv6=off valid=30s;
403
+
404
+ if ($http_biolib_result_uuid != "{self._job_uuid}") {{
405
+ return 403 "Invalid or missing biolib-result-uuid header";
406
+ }}
407
+
408
+ if ($http_biolib_result_port = "") {{
409
+ return 400 "Missing biolib-result-port header";
410
+ }}
411
+
412
+ location / {{
413
+ proxy_pass http://main:$http_biolib_result_port$request_uri;
414
+ proxy_set_header biolib-result-uuid "";
415
+ proxy_set_header biolib-result-port "";
416
+ proxy_pass_request_headers on;
417
+ }}
418
+ }}
370
419
  }}
371
420
  """
372
421
  else:
422
+ port_to_mappings: Dict[int, List[RemoteHostMapping]] = {}
423
+ for mapping in self._remote_host_mappings:
424
+ for port in mapping.ports:
425
+ if port not in port_to_mappings:
426
+ port_to_mappings[port] = []
427
+ port_to_mappings[port].append(mapping)
428
+
373
429
  nginx_config = """
374
430
  events {}
375
431
  error_log /dev/stdout info;
376
432
  stream {
377
433
  resolver 127.0.0.11 valid=30s;"""
378
- for idx, upstream_server_port in enumerate(upstream_server_ports):
434
+
435
+ for port, mappings in port_to_mappings.items():
379
436
  nginx_config += f"""
380
- map "" $upstream_{idx} {{
381
- default {upstream_server_name}:{upstream_server_port};
382
- }}
437
+ map $server_addr $backend_{port} {{"""
438
+ for mapping in mappings:
439
+ nginx_config += f'\n {mapping.static_ip} {mapping.hostname}:{port};'
383
440
 
441
+ nginx_config += f"""
442
+ }}
384
443
  server {{
385
- listen {self._remote_host['ports'][idx]};
386
- proxy_pass $upstream_{idx};
444
+ listen 0.0.0.0:{port};
445
+ proxy_pass $backend_{port};
387
446
  }}
388
-
389
447
  server {{
390
- listen {self._remote_host['ports'][idx]} udp;
391
- proxy_pass $upstream_{idx};
448
+ listen 0.0.0.0:{port} udp;
449
+ proxy_pass $backend_{port};
392
450
  }}"""
393
451
 
394
452
  nginx_config += """
@@ -4,6 +4,8 @@ from enum import Enum
4
4
 
5
5
  from biolib.biolib_logging import logger
6
6
 
7
+ BIOLIB_PROXY_NETWORK_NAME = 'biolib-proxy-network'
8
+
7
9
 
8
10
  def get_package_type(package):
9
11
  package_type = int.from_bytes(package[1:2], 'big')