tetra-rp 0.10.0__py3-none-any.whl → 0.12.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -17,10 +17,9 @@ from ..utils.backoff import get_backoff_delay
17
17
  from .base import DeployableResource
18
18
  from .cloud import runpod
19
19
  from .constants import CONSOLE_URL
20
- from .cpu import CpuInstanceType
21
20
  from .environment import EnvironmentVars
22
21
  from .gpu import GpuGroup
23
- from .network_volume import NetworkVolume
22
+ from .network_volume import NetworkVolume, DataCenter
24
23
  from .template import KeyValuePair, PodTemplate
25
24
 
26
25
 
@@ -65,6 +64,7 @@ class ServerlessResource(DeployableResource):
65
64
  _input_only = {
66
65
  "id",
67
66
  "cudaVersions",
67
+ "datacenter",
68
68
  "env",
69
69
  "gpus",
70
70
  "flashboot",
@@ -78,14 +78,13 @@ class ServerlessResource(DeployableResource):
78
78
  flashboot: Optional[bool] = True
79
79
  gpus: Optional[List[GpuGroup]] = [GpuGroup.ANY] # for gpuIds
80
80
  imageName: Optional[str] = "" # for template.imageName
81
-
82
81
  networkVolume: Optional[NetworkVolume] = None
82
+ datacenter: DataCenter = Field(default=DataCenter.EU_RO_1)
83
83
 
84
84
  # === Input Fields ===
85
85
  executionTimeoutMs: Optional[int] = None
86
86
  gpuCount: Optional[int] = 1
87
87
  idleTimeout: Optional[int] = 5
88
- instanceIds: Optional[List[CpuInstanceType]] = None
89
88
  locations: Optional[str] = None
90
89
  name: str
91
90
  networkVolumeId: Optional[str] = None
@@ -133,15 +132,6 @@ class ServerlessResource(DeployableResource):
133
132
  """Convert ServerlessScalerType enum to string."""
134
133
  return value.value if value is not None else None
135
134
 
136
- @field_serializer("instanceIds")
137
- def serialize_instance_ids(
138
- self, value: Optional[List[CpuInstanceType]]
139
- ) -> Optional[List[str]]:
140
- """Convert CpuInstanceType enums to strings."""
141
- if value is None:
142
- return None
143
- return [item.value if hasattr(item, "value") else str(item) for item in value]
144
-
145
135
  @field_validator("gpus")
146
136
  @classmethod
147
137
  def validate_gpus(cls, value: List[GpuGroup]) -> List[GpuGroup]:
@@ -156,14 +146,24 @@ class ServerlessResource(DeployableResource):
156
146
  if self.flashboot:
157
147
  self.name += "-fb"
158
148
 
149
+ # Sync datacenter to locations field for API
150
+ if not self.locations:
151
+ self.locations = self.datacenter.value
152
+
153
+ # Validate datacenter consistency between endpoint and network volume
154
+ if self.networkVolume and self.networkVolume.dataCenterId != self.datacenter:
155
+ raise ValueError(
156
+ f"Network volume datacenter ({self.networkVolume.dataCenterId.value}) "
157
+ f"must match endpoint datacenter ({self.datacenter.value})"
158
+ )
159
+
159
160
  if self.networkVolume and self.networkVolume.is_created:
160
161
  # Volume already exists, use its ID
161
162
  self.networkVolumeId = self.networkVolume.id
162
163
 
163
- if self.instanceIds:
164
- return self._sync_input_fields_cpu()
165
- else:
166
- return self._sync_input_fields_gpu()
164
+ self._sync_input_fields_gpu()
165
+
166
+ return self
167
167
 
168
168
  def _sync_input_fields_gpu(self):
169
169
  # GPU-specific fields
@@ -187,27 +187,16 @@ class ServerlessResource(DeployableResource):
187
187
 
188
188
  return self
189
189
 
190
- def _sync_input_fields_cpu(self):
191
- # Override GPU-specific fields for CPU
192
- self.gpuCount = 0
193
- self.allowedCudaVersions = ""
194
- self.gpuIds = ""
195
-
196
- return self
197
-
198
190
  async def _ensure_network_volume_deployed(self) -> None:
199
191
  """
200
- Ensures network volume is deployed and ready.
192
+ Ensures network volume is deployed and ready if one is specified.
201
193
  Updates networkVolumeId with the deployed volume ID.
202
194
  """
203
195
  if self.networkVolumeId:
204
196
  return
205
197
 
206
- if not self.networkVolume:
207
- log.info(f"{self.name} requires a default network volume")
208
- self.networkVolume = NetworkVolume(name=f"{self.name}-volume")
209
-
210
- if deployedNetworkVolume := await self.networkVolume.deploy():
198
+ if self.networkVolume:
199
+ deployedNetworkVolume = await self.networkVolume.deploy()
211
200
  self.networkVolumeId = deployedNetworkVolume.id
212
201
 
213
202
  def is_deployed(self) -> bool:
@@ -265,7 +254,7 @@ class ServerlessResource(DeployableResource):
265
254
  )
266
255
 
267
256
  try:
268
- # log.debug(f"[{log_group}] Payload: {payload}")
257
+ # log.debug(f"[{self}] Payload: {payload}")
269
258
 
270
259
  log.info(f"{self} | API /run_sync")
271
260
  response = await asyncio.to_thread(_fetch_job)
@@ -346,6 +335,26 @@ class ServerlessEndpoint(ServerlessResource):
346
335
  Inherits from ServerlessResource.
347
336
  """
348
337
 
338
+ def _create_new_template(self) -> PodTemplate:
339
+ """Create a new PodTemplate with standard configuration."""
340
+ return PodTemplate(
341
+ name=self.resource_id,
342
+ imageName=self.imageName,
343
+ env=KeyValuePair.from_dict(self.env or get_env_vars()),
344
+ )
345
+
346
+ def _configure_existing_template(self) -> None:
347
+ """Configure an existing template with necessary overrides."""
348
+ if self.template is None:
349
+ return
350
+
351
+ self.template.name = f"{self.resource_id}__{self.template.resource_id}"
352
+
353
+ if self.imageName:
354
+ self.template.imageName = self.imageName
355
+ if self.env:
356
+ self.template.env = KeyValuePair.from_dict(self.env)
357
+
349
358
  @model_validator(mode="after")
350
359
  def set_serverless_template(self):
351
360
  if not any([self.imageName, self.template, self.templateId]):
@@ -354,32 +363,13 @@ class ServerlessEndpoint(ServerlessResource):
354
363
  )
355
364
 
356
365
  if not self.templateId and not self.template:
357
- self.template = PodTemplate(
358
- name=self.resource_id,
359
- imageName=self.imageName,
360
- env=KeyValuePair.from_dict(self.env or get_env_vars()),
361
- )
362
-
366
+ self.template = self._create_new_template()
363
367
  elif self.template:
364
- self.template.name = f"{self.resource_id}__{self.template.resource_id}"
365
- if self.imageName:
366
- self.template.imageName = self.imageName
367
- if self.env:
368
- self.template.env = KeyValuePair.from_dict(self.env)
368
+ self._configure_existing_template()
369
369
 
370
370
  return self
371
371
 
372
372
 
373
- class CpuServerlessEndpoint(ServerlessEndpoint):
374
- """
375
- Convenience class for CPU serverless endpoint.
376
- Represents a CPU-only serverless endpoint distinct from a live serverless.
377
- Inherits from ServerlessEndpoint.
378
- """
379
-
380
- instanceIds: Optional[List[CpuInstanceType]] = [CpuInstanceType.CPU3G_2_8]
381
-
382
-
383
373
  class JobOutput(BaseModel):
384
374
  id: str
385
375
  workerId: str
@@ -389,7 +379,7 @@ class JobOutput(BaseModel):
389
379
  output: Optional[Any] = None
390
380
  error: Optional[str] = ""
391
381
 
392
- def model_post_init(self, __context):
382
+ def model_post_init(self, _: Any) -> None:
393
383
  log_group = f"Worker:{self.workerId}"
394
384
  log.info(f"{log_group} | Delay Time: {self.delayTime} ms")
395
385
  log.info(f"{log_group} | Execution Time: {self.executionTime} ms")
@@ -0,0 +1,154 @@
1
+ """
2
+ CPU-specific serverless endpoint classes.
3
+
4
+ This module contains all CPU-related serverless functionality, separate from GPU serverless.
5
+ """
6
+
7
+ from typing import List, Optional
8
+
9
+ from pydantic import field_serializer, model_validator
10
+
11
+ from .cpu import (
12
+ CpuInstanceType,
13
+ CPU_INSTANCE_DISK_LIMITS,
14
+ get_max_disk_size_for_instances,
15
+ )
16
+ from .serverless import ServerlessEndpoint, get_env_vars
17
+ from .template import KeyValuePair, PodTemplate
18
+
19
+
20
+ class CpuEndpointMixin:
21
+ """Mixin class that provides CPU-specific functionality for serverless endpoints."""
22
+
23
+ instanceIds: Optional[List[CpuInstanceType]]
24
+
25
+ def _is_cpu_endpoint(self) -> bool:
26
+ """Check if this is a CPU endpoint (has instanceIds)."""
27
+ return (
28
+ hasattr(self, "instanceIds")
29
+ and self.instanceIds is not None
30
+ and len(self.instanceIds) > 0
31
+ )
32
+
33
+ def _get_cpu_container_disk_size(self) -> Optional[int]:
34
+ """Get the appropriate container disk size for CPU instances."""
35
+ if not self._is_cpu_endpoint():
36
+ return None
37
+ return get_max_disk_size_for_instances(self.instanceIds)
38
+
39
+ def _apply_cpu_disk_sizing(self, template: PodTemplate) -> None:
40
+ """Apply CPU disk sizing to a template if it's using the default size."""
41
+ if not self._is_cpu_endpoint():
42
+ return
43
+
44
+ # Only auto-size if template is using the default value
45
+ default_disk_size = PodTemplate.model_fields["containerDiskInGb"].default
46
+ if template.containerDiskInGb == default_disk_size:
47
+ cpu_disk_size = self._get_cpu_container_disk_size()
48
+ if cpu_disk_size is not None:
49
+ template.containerDiskInGb = cpu_disk_size
50
+
51
+ def validate_cpu_container_disk_size(self) -> None:
52
+ """
53
+ Validate that container disk size doesn't exceed limits for CPU instances.
54
+
55
+ Raises:
56
+ ValueError: If container disk size exceeds the limit for any CPU instance
57
+ """
58
+ if (
59
+ not self._is_cpu_endpoint()
60
+ or not hasattr(self, "template")
61
+ or not self.template
62
+ or not self.template.containerDiskInGb
63
+ ):
64
+ return
65
+
66
+ max_allowed_disk_size = self._get_cpu_container_disk_size()
67
+ if max_allowed_disk_size is None:
68
+ return
69
+
70
+ if self.template.containerDiskInGb > max_allowed_disk_size:
71
+ instance_limits = []
72
+ for instance_type in self.instanceIds:
73
+ limit = CPU_INSTANCE_DISK_LIMITS[instance_type]
74
+ instance_limits.append(f"{instance_type.value}: max {limit}GB")
75
+
76
+ raise ValueError(
77
+ f"Container disk size {self.template.containerDiskInGb}GB exceeds the maximum "
78
+ f"allowed for CPU instances. Instance limits: {', '.join(instance_limits)}. "
79
+ f"Maximum allowed: {max_allowed_disk_size}GB"
80
+ )
81
+
82
+ def _sync_cpu_fields(self):
83
+ """Sync CPU-specific fields, overriding GPU defaults."""
84
+ # Override GPU-specific fields for CPU
85
+ if hasattr(self, "gpuCount"):
86
+ self.gpuCount = 0
87
+ if hasattr(self, "allowedCudaVersions"):
88
+ self.allowedCudaVersions = ""
89
+ if hasattr(self, "gpuIds"):
90
+ self.gpuIds = ""
91
+
92
+ @field_serializer("instanceIds")
93
+ def serialize_instance_ids(
94
+ self, value: Optional[List[CpuInstanceType]]
95
+ ) -> Optional[List[str]]:
96
+ """Convert CpuInstanceType enums to strings."""
97
+ if value is None:
98
+ return None
99
+ return [item.value if hasattr(item, "value") else str(item) for item in value]
100
+
101
+
102
+ class CpuServerlessEndpoint(CpuEndpointMixin, ServerlessEndpoint):
103
+ """
104
+ CPU-only serverless endpoint with automatic disk sizing and validation.
105
+ Represents a CPU-only serverless endpoint distinct from a live serverless.
106
+ """
107
+
108
+ instanceIds: Optional[List[CpuInstanceType]] = [CpuInstanceType.CPU3G_2_8]
109
+
110
+ def _create_new_template(self) -> PodTemplate:
111
+ """Create a new PodTemplate with CPU-appropriate disk sizing."""
112
+ template = PodTemplate(
113
+ name=self.resource_id,
114
+ imageName=self.imageName,
115
+ env=KeyValuePair.from_dict(self.env or get_env_vars()),
116
+ )
117
+ # Apply CPU-specific disk sizing
118
+ self._apply_cpu_disk_sizing(template)
119
+ return template
120
+
121
+ def _configure_existing_template(self) -> None:
122
+ """Configure an existing template with necessary overrides and CPU sizing."""
123
+ if self.template is None:
124
+ return
125
+
126
+ self.template.name = f"{self.resource_id}__{self.template.resource_id}"
127
+
128
+ if self.imageName:
129
+ self.template.imageName = self.imageName
130
+ if self.env:
131
+ self.template.env = KeyValuePair.from_dict(self.env)
132
+
133
+ # Apply CPU-specific disk sizing
134
+ self._apply_cpu_disk_sizing(self.template)
135
+
136
+ @model_validator(mode="after")
137
+ def set_serverless_template(self):
138
+ # Sync CPU-specific fields first
139
+ self._sync_cpu_fields()
140
+
141
+ if not any([self.imageName, self.template, self.templateId]):
142
+ raise ValueError(
143
+ "Either imageName, template, or templateId must be provided"
144
+ )
145
+
146
+ if not self.templateId and not self.template:
147
+ self.template = self._create_new_template()
148
+ elif self.template:
149
+ self._configure_existing_template()
150
+
151
+ # Validate container disk size for CPU instances
152
+ self.validate_cpu_container_disk_size()
153
+
154
+ return self
@@ -22,7 +22,7 @@ class KeyValuePair(BaseModel):
22
22
  class PodTemplate(BaseResource):
23
23
  advancedStart: Optional[bool] = False
24
24
  config: Optional[Dict[str, Any]] = {}
25
- containerDiskInGb: Optional[int] = 10
25
+ containerDiskInGb: Optional[int] = 64
26
26
  containerRegistryAuthId: Optional[str] = ""
27
27
  dockerArgs: Optional[str] = ""
28
28
  env: Optional[List[KeyValuePair]] = []
@@ -0,0 +1,260 @@
1
+ """
2
+ Cross-platform file locking utilities.
3
+
4
+ Provides unified file locking interface that works across Windows, macOS, and Linux.
5
+ Uses platform-appropriate locking mechanisms:
6
+ - Windows: msvcrt.locking()
7
+ - Unix/Linux/macOS: fcntl.flock()
8
+ - Fallback: Basic file existence checking (limited protection)
9
+ """
10
+
11
+ import contextlib
12
+ import logging
13
+ import platform
14
+ import time
15
+ from pathlib import Path
16
+ from typing import BinaryIO, Optional
17
+
18
+ log = logging.getLogger(__name__)
19
+
20
+ # Platform detection
21
+ _IS_WINDOWS = platform.system() == "Windows"
22
+ _IS_UNIX = platform.system() in ("Linux", "Darwin")
23
+
24
+ # Initialize availability flags
25
+ _WINDOWS_LOCKING_AVAILABLE = False
26
+ _UNIX_LOCKING_AVAILABLE = False
27
+
28
+ # Import platform-specific modules
29
+ if _IS_WINDOWS:
30
+ try:
31
+ import msvcrt
32
+
33
+ _WINDOWS_LOCKING_AVAILABLE = True
34
+ except ImportError:
35
+ msvcrt = None
36
+ log.warning("msvcrt not available on Windows platform")
37
+
38
+ if _IS_UNIX:
39
+ try:
40
+ import fcntl
41
+
42
+ _UNIX_LOCKING_AVAILABLE = True
43
+ except ImportError:
44
+ fcntl = None
45
+ log.warning("fcntl not available on Unix platform")
46
+
47
+
48
+ class FileLockError(Exception):
49
+ """Exception raised when file locking operations fail."""
50
+
51
+ pass
52
+
53
+
54
+ class FileLockTimeout(FileLockError):
55
+ """Exception raised when file locking times out."""
56
+
57
+ pass
58
+
59
+
60
+ @contextlib.contextmanager
61
+ def file_lock(
62
+ file_handle: BinaryIO,
63
+ exclusive: bool = True,
64
+ timeout: Optional[float] = 10.0,
65
+ retry_interval: float = 0.1,
66
+ ):
67
+ """
68
+ Cross-platform file locking context manager.
69
+
70
+ Args:
71
+ file_handle: Open file handle to lock
72
+ exclusive: True for exclusive lock, False for shared lock
73
+ timeout: Maximum seconds to wait for lock (None = no timeout)
74
+ retry_interval: Seconds to wait between lock attempts
75
+
76
+ Raises:
77
+ FileLockTimeout: If lock cannot be acquired within timeout
78
+ FileLockError: If locking operation fails
79
+
80
+ Usage:
81
+ with open("file.dat", "rb") as f:
82
+ with file_lock(f, exclusive=False): # Shared read lock
83
+ data = f.read()
84
+
85
+ with open("file.dat", "wb") as f:
86
+ with file_lock(f, exclusive=True): # Exclusive write lock
87
+ f.write(data)
88
+ """
89
+ lock_acquired = False
90
+ start_time = time.time()
91
+
92
+ try:
93
+ # Platform-specific locking
94
+ while not lock_acquired:
95
+ try:
96
+ if _IS_WINDOWS and _WINDOWS_LOCKING_AVAILABLE:
97
+ _acquire_windows_lock(file_handle, exclusive)
98
+ elif _IS_UNIX and _UNIX_LOCKING_AVAILABLE:
99
+ _acquire_unix_lock(file_handle, exclusive)
100
+ else:
101
+ # Fallback - limited protection via file existence
102
+ _acquire_fallback_lock(file_handle, exclusive, timeout)
103
+
104
+ lock_acquired = True
105
+ log.debug(f"File lock acquired (exclusive={exclusive})")
106
+
107
+ except (OSError, IOError, FileLockError) as e:
108
+ # Check timeout
109
+ if timeout is not None and (time.time() - start_time) >= timeout:
110
+ raise FileLockTimeout(
111
+ f"Could not acquire file lock within {timeout} seconds: {e}"
112
+ ) from e
113
+
114
+ # Retry after interval
115
+ time.sleep(retry_interval)
116
+
117
+ # Lock acquired successfully
118
+ yield
119
+
120
+ finally:
121
+ # Release lock
122
+ if lock_acquired:
123
+ try:
124
+ if _IS_WINDOWS and _WINDOWS_LOCKING_AVAILABLE:
125
+ _release_windows_lock(file_handle)
126
+ elif _IS_UNIX and _UNIX_LOCKING_AVAILABLE:
127
+ _release_unix_lock(file_handle)
128
+ else:
129
+ _release_fallback_lock(file_handle)
130
+
131
+ log.debug("File lock released")
132
+
133
+ except Exception as e:
134
+ log.error(f"Error releasing file lock: {e}")
135
+ # Don't raise - we're in cleanup
136
+
137
+
138
+ def _acquire_windows_lock(file_handle: BinaryIO, exclusive: bool) -> None:
139
+ """Acquire Windows file lock using msvcrt.locking()."""
140
+ if not _WINDOWS_LOCKING_AVAILABLE:
141
+ raise FileLockError("Windows file locking not available (msvcrt missing)")
142
+
143
+ # Windows locking modes
144
+ if exclusive:
145
+ lock_mode = msvcrt.LK_NBLCK # Non-blocking exclusive lock
146
+ else:
147
+ # Windows doesn't have shared locks in msvcrt
148
+ # Fall back to exclusive for compatibility
149
+ lock_mode = msvcrt.LK_NBLCK
150
+ log.debug("Windows: Using exclusive lock instead of shared (msvcrt limitation)")
151
+
152
+ try:
153
+ # Lock the entire file (position 0, length 1)
154
+ file_handle.seek(0)
155
+ msvcrt.locking(file_handle.fileno(), lock_mode, 1)
156
+ except OSError as e:
157
+ raise FileLockError(f"Failed to acquire Windows file lock: {e}") from e
158
+
159
+
160
+ def _release_windows_lock(file_handle: BinaryIO) -> None:
161
+ """Release Windows file lock."""
162
+ if not _WINDOWS_LOCKING_AVAILABLE:
163
+ return
164
+
165
+ try:
166
+ file_handle.seek(0)
167
+ msvcrt.locking(file_handle.fileno(), msvcrt.LK_UNLCK, 1)
168
+ except OSError as e:
169
+ raise FileLockError(f"Failed to release Windows file lock: {e}") from e
170
+
171
+
172
+ def _acquire_unix_lock(file_handle: BinaryIO, exclusive: bool) -> None:
173
+ """Acquire Unix file lock using fcntl.flock()."""
174
+ if not _UNIX_LOCKING_AVAILABLE:
175
+ raise FileLockError("Unix file locking not available (fcntl missing)")
176
+
177
+ # Unix locking modes
178
+ if exclusive:
179
+ lock_mode = fcntl.LOCK_EX | fcntl.LOCK_NB # Non-blocking exclusive
180
+ else:
181
+ lock_mode = fcntl.LOCK_SH | fcntl.LOCK_NB # Non-blocking shared
182
+
183
+ try:
184
+ fcntl.flock(file_handle.fileno(), lock_mode)
185
+ except (OSError, IOError) as e:
186
+ raise FileLockError(f"Failed to acquire Unix file lock: {e}") from e
187
+
188
+
189
+ def _release_unix_lock(file_handle: BinaryIO) -> None:
190
+ """Release Unix file lock."""
191
+ if not _UNIX_LOCKING_AVAILABLE:
192
+ return
193
+
194
+ try:
195
+ fcntl.flock(file_handle.fileno(), fcntl.LOCK_UN)
196
+ except (OSError, IOError) as e:
197
+ raise FileLockError(f"Failed to release Unix file lock: {e}") from e
198
+
199
+
200
+ def _acquire_fallback_lock(
201
+ file_handle: BinaryIO, exclusive: bool, timeout: Optional[float]
202
+ ) -> None:
203
+ """
204
+ Fallback locking using lock files.
205
+
206
+ This provides minimal protection but doesn't prevent all race conditions.
207
+ It's better than no locking but not as robust as OS-level file locks.
208
+ """
209
+ log.warning(
210
+ "Using fallback file locking - limited protection against race conditions"
211
+ )
212
+
213
+ # Create lock file based on the original file
214
+ file_path = (
215
+ Path(file_handle.name) if hasattr(file_handle, "name") else Path("unknown")
216
+ )
217
+ lock_file = file_path.with_suffix(file_path.suffix + ".lock")
218
+
219
+ start_time = time.time()
220
+
221
+ while True:
222
+ try:
223
+ # Try to create lock file atomically
224
+ lock_file.touch(mode=0o600, exist_ok=False)
225
+ log.debug(f"Fallback lock file created: {lock_file}")
226
+ return
227
+
228
+ except FileExistsError:
229
+ # Lock file exists, check timeout
230
+ if timeout is not None and (time.time() - start_time) >= timeout:
231
+ raise FileLockError(f"Fallback lock timeout: {lock_file} exists")
232
+
233
+ # Wait and retry
234
+ time.sleep(0.1)
235
+
236
+
237
+ def _release_fallback_lock(file_handle: BinaryIO) -> None:
238
+ """Release fallback lock by removing lock file."""
239
+ try:
240
+ file_path = (
241
+ Path(file_handle.name) if hasattr(file_handle, "name") else Path("unknown")
242
+ )
243
+ lock_file = file_path.with_suffix(file_path.suffix + ".lock")
244
+
245
+ if lock_file.exists():
246
+ lock_file.unlink()
247
+ log.debug(f"Fallback lock file removed: {lock_file}")
248
+
249
+ except Exception as e:
250
+ log.error(f"Failed to remove fallback lock file: {e}")
251
+
252
+
253
+ def get_platform_info() -> dict:
254
+ """Get information about current platform and available locking mechanisms."""
255
+ return {
256
+ "platform": platform.system(),
257
+ "windows_locking": _IS_WINDOWS and _WINDOWS_LOCKING_AVAILABLE,
258
+ "unix_locking": _IS_UNIX and _UNIX_LOCKING_AVAILABLE,
259
+ "fallback_only": not (_WINDOWS_LOCKING_AVAILABLE or _UNIX_LOCKING_AVAILABLE),
260
+ }
@@ -1,7 +1,21 @@
1
+ import threading
2
+
3
+
1
4
  class SingletonMixin:
5
+ """Thread-safe singleton mixin class.
6
+
7
+ Uses threading.Lock to ensure only one instance is created
8
+ per class, even under concurrent access.
9
+ """
10
+
2
11
  _instances = {}
12
+ _lock = threading.Lock()
3
13
 
4
14
  def __new__(cls, *args, **kwargs):
15
+ # Use double-checked locking pattern for performance
5
16
  if cls not in cls._instances:
6
- cls._instances[cls] = super().__new__(cls)
17
+ with cls._lock:
18
+ # Check again inside the lock (double-checked locking)
19
+ if cls not in cls._instances:
20
+ cls._instances[cls] = super().__new__(cls)
7
21
  return cls._instances[cls]
tetra_rp/execute_class.py CHANGED
@@ -202,6 +202,8 @@ def create_remote_class(
202
202
  resource_config: ServerlessResource,
203
203
  dependencies: Optional[List[str]],
204
204
  system_dependencies: Optional[List[str]],
205
+ accelerate_downloads: bool,
206
+ hf_models_to_cache: Optional[List[str]],
205
207
  extra: dict,
206
208
  ):
207
209
  """
@@ -219,6 +221,8 @@ def create_remote_class(
219
221
  self._resource_config = resource_config
220
222
  self._dependencies = dependencies or []
221
223
  self._system_dependencies = system_dependencies or []
224
+ self._accelerate_downloads = accelerate_downloads
225
+ self._hf_models_to_cache = hf_models_to_cache
222
226
  self._extra = extra
223
227
  self._constructor_args = args
224
228
  self._constructor_kwargs = kwargs
@@ -302,6 +306,8 @@ def create_remote_class(
302
306
  constructor_kwargs=constructor_kwargs,
303
307
  dependencies=self._dependencies,
304
308
  system_dependencies=self._system_dependencies,
309
+ accelerate_downloads=self._accelerate_downloads,
310
+ hf_models_to_cache=self._hf_models_to_cache,
305
311
  instance_id=self._instance_id,
306
312
  create_new_instance=not hasattr(
307
313
  self, "_stub"