tetra-rp 0.17.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tetra-rp might be problematic. Click here for more details.

Files changed (66) hide show
  1. tetra_rp/__init__.py +43 -0
  2. tetra_rp/cli/__init__.py +0 -0
  3. tetra_rp/cli/commands/__init__.py +1 -0
  4. tetra_rp/cli/commands/build.py +534 -0
  5. tetra_rp/cli/commands/deploy.py +370 -0
  6. tetra_rp/cli/commands/init.py +119 -0
  7. tetra_rp/cli/commands/resource.py +191 -0
  8. tetra_rp/cli/commands/run.py +100 -0
  9. tetra_rp/cli/main.py +85 -0
  10. tetra_rp/cli/utils/__init__.py +1 -0
  11. tetra_rp/cli/utils/conda.py +127 -0
  12. tetra_rp/cli/utils/deployment.py +172 -0
  13. tetra_rp/cli/utils/ignore.py +139 -0
  14. tetra_rp/cli/utils/skeleton.py +184 -0
  15. tetra_rp/cli/utils/skeleton_template/.env.example +3 -0
  16. tetra_rp/cli/utils/skeleton_template/.flashignore +40 -0
  17. tetra_rp/cli/utils/skeleton_template/.gitignore +44 -0
  18. tetra_rp/cli/utils/skeleton_template/README.md +256 -0
  19. tetra_rp/cli/utils/skeleton_template/main.py +43 -0
  20. tetra_rp/cli/utils/skeleton_template/requirements.txt +1 -0
  21. tetra_rp/cli/utils/skeleton_template/workers/__init__.py +0 -0
  22. tetra_rp/cli/utils/skeleton_template/workers/cpu/__init__.py +20 -0
  23. tetra_rp/cli/utils/skeleton_template/workers/cpu/endpoint.py +38 -0
  24. tetra_rp/cli/utils/skeleton_template/workers/gpu/__init__.py +20 -0
  25. tetra_rp/cli/utils/skeleton_template/workers/gpu/endpoint.py +62 -0
  26. tetra_rp/client.py +128 -0
  27. tetra_rp/config.py +29 -0
  28. tetra_rp/core/__init__.py +0 -0
  29. tetra_rp/core/api/__init__.py +6 -0
  30. tetra_rp/core/api/runpod.py +319 -0
  31. tetra_rp/core/exceptions.py +50 -0
  32. tetra_rp/core/resources/__init__.py +37 -0
  33. tetra_rp/core/resources/base.py +47 -0
  34. tetra_rp/core/resources/cloud.py +4 -0
  35. tetra_rp/core/resources/constants.py +4 -0
  36. tetra_rp/core/resources/cpu.py +146 -0
  37. tetra_rp/core/resources/environment.py +41 -0
  38. tetra_rp/core/resources/gpu.py +68 -0
  39. tetra_rp/core/resources/live_serverless.py +62 -0
  40. tetra_rp/core/resources/network_volume.py +148 -0
  41. tetra_rp/core/resources/resource_manager.py +145 -0
  42. tetra_rp/core/resources/serverless.py +463 -0
  43. tetra_rp/core/resources/serverless_cpu.py +162 -0
  44. tetra_rp/core/resources/template.py +94 -0
  45. tetra_rp/core/resources/utils.py +50 -0
  46. tetra_rp/core/utils/__init__.py +0 -0
  47. tetra_rp/core/utils/backoff.py +43 -0
  48. tetra_rp/core/utils/constants.py +10 -0
  49. tetra_rp/core/utils/file_lock.py +260 -0
  50. tetra_rp/core/utils/json.py +33 -0
  51. tetra_rp/core/utils/lru_cache.py +75 -0
  52. tetra_rp/core/utils/singleton.py +21 -0
  53. tetra_rp/core/validation.py +44 -0
  54. tetra_rp/execute_class.py +319 -0
  55. tetra_rp/logger.py +34 -0
  56. tetra_rp/protos/__init__.py +0 -0
  57. tetra_rp/protos/remote_execution.py +148 -0
  58. tetra_rp/stubs/__init__.py +5 -0
  59. tetra_rp/stubs/live_serverless.py +155 -0
  60. tetra_rp/stubs/registry.py +117 -0
  61. tetra_rp/stubs/serverless.py +30 -0
  62. tetra_rp-0.17.1.dist-info/METADATA +976 -0
  63. tetra_rp-0.17.1.dist-info/RECORD +66 -0
  64. tetra_rp-0.17.1.dist-info/WHEEL +5 -0
  65. tetra_rp-0.17.1.dist-info/entry_points.txt +2 -0
  66. tetra_rp-0.17.1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,94 @@
1
+ import requests
2
+ from typing import Dict, List, Optional, Any
3
+ from pydantic import BaseModel, model_validator
4
+ from .base import BaseResource
5
+
6
+
7
+ class KeyValuePair(BaseModel):
8
+ key: str
9
+ value: str
10
+
11
+ @classmethod
12
+ def from_dict(cls, data: Dict[str, str]) -> "List[KeyValuePair]":
13
+ """
14
+ Create a list of KeyValuePair instances from a dictionary.
15
+ """
16
+ if not isinstance(data, dict):
17
+ raise ValueError("Input must be a dictionary.")
18
+
19
+ return [cls(key=key, value=value) for key, value in data.items()]
20
+
21
+
22
+ class PodTemplate(BaseResource):
23
+ advancedStart: Optional[bool] = False
24
+ config: Optional[Dict[str, Any]] = {}
25
+ containerDiskInGb: Optional[int] = 64
26
+ containerRegistryAuthId: Optional[str] = ""
27
+ dockerArgs: Optional[str] = ""
28
+ env: Optional[List[KeyValuePair]] = []
29
+ imageName: Optional[str] = ""
30
+ name: Optional[str] = ""
31
+ ports: Optional[str] = ""
32
+ startScript: Optional[str] = ""
33
+
34
+ @model_validator(mode="after")
35
+ def sync_input_fields(self):
36
+ self.name = f"{self.name}__{self.resource_id}"
37
+ return self
38
+
39
+
40
+ def update_system_dependencies(
41
+ template_id, token, system_dependencies, base_entry_cmd=None
42
+ ):
43
+ """
44
+ Updates Runpod template with system dependencies installed via apt-get,
45
+ and appends the app start command.
46
+
47
+ Args:
48
+ template_id (str): Runpod template ID.
49
+ token (str): Runpod API token.
50
+ system_dependencies (List[str]): List of apt packages to install.
51
+ base_entry_cmd (List[str]): The default command to run the app, e.g. ["uv", "run", "handler.py"]
52
+ Returns:
53
+ dict: API response JSON or error info.
54
+ """
55
+
56
+ # Compose apt-get install command if any packages specified
57
+ apt_cmd = ""
58
+ if system_dependencies:
59
+ joined_pkgs = " ".join(system_dependencies)
60
+ apt_cmd = f"apt-get update && apt-get install -y {joined_pkgs} && "
61
+
62
+ # Default start command if not provided
63
+ app_cmd = base_entry_cmd or ["uv", "run", "handler.py"]
64
+ app_cmd_str = " ".join(app_cmd)
65
+
66
+ # Full command to run in entrypoint shell
67
+ full_cmd = f"{apt_cmd}exec {app_cmd_str}"
68
+
69
+ payload = {
70
+ # other required fields like disk, env, image, etc, should be fetched or passed in real usage
71
+ "dockerEntrypoint": ["/bin/bash", "-c", full_cmd],
72
+ "dockerStartCmd": [],
73
+ # placeholder values, replace as needed or fetch from current template state
74
+ "containerDiskInGb": 50,
75
+ "containerRegistryAuthId": "",
76
+ "env": {},
77
+ "imageName": "your-image-name",
78
+ "isPublic": False,
79
+ "name": "your-template-name",
80
+ "ports": ["8888/http", "22/tcp"],
81
+ "readme": "",
82
+ "volumeInGb": 20,
83
+ "volumeMountPath": "/workspace",
84
+ }
85
+
86
+ headers = {"Authorization": f"Bearer {token}", "Content-Type": "application/json"}
87
+
88
+ url = f"https://rest.runpod.io/v1/templates/{template_id}/update"
89
+ response = requests.post(url, json=payload, headers=headers)
90
+
91
+ try:
92
+ return response.json()
93
+ except Exception:
94
+ return {"error": "Invalid JSON response", "text": response.text}
@@ -0,0 +1,50 @@
1
+ from typing import Callable, Any, List, Union
2
+ from pydantic import BaseModel
3
+ from .gpu import GpuType, GpuTypeDetail
4
+ from .serverless import ServerlessEndpoint
5
+
6
+
7
+ """
8
+ Define the mapping for the methods and their return types
9
+ Only include methods from runpod.*
10
+ """
11
+ RUNPOD_TYPED_OPERATIONS = {
12
+ "get_gpus": List[GpuType],
13
+ "get_gpu": GpuTypeDetail,
14
+ "get_endpoints": List[ServerlessEndpoint],
15
+ }
16
+
17
+
18
+ def inquire(method: Callable, *args, **kwargs) -> Union[List[Any], Any]:
19
+ """
20
+ This function dynamically determines the return type of the provided method
21
+ based on a predefined mapping (`definitions`) and validates the result using
22
+ Pydantic models if applicable.
23
+
24
+ Refer to `RUNPOD_TYPED_OPERATIONS` for the mapping.
25
+
26
+ Example:
27
+ ----------
28
+ >>> import runpod
29
+ >>> inquire(runpod.get_gpus)
30
+ [
31
+ GpuType(id='NVIDIA A100 80GB', displayName='A100 80GB', memoryInGb=80),
32
+ GpuType(id='NVIDIA A100 40GB', displayName='A100 40GB', memoryInGb=40),
33
+ GpuType(id='NVIDIA A10', displayName='A10', memoryInGb=24)
34
+ ]
35
+ """
36
+ method_name = method.__name__
37
+ return_type = RUNPOD_TYPED_OPERATIONS.get(method_name)
38
+
39
+ raw_result = method(*args, **kwargs)
40
+
41
+ if hasattr(return_type, "__origin__") and return_type.__origin__ is list:
42
+ # List case
43
+ model_type = return_type.__args__[0]
44
+ if issubclass(model_type, BaseModel):
45
+ return [model_type.model_validate(item) for item in raw_result]
46
+ elif isinstance(return_type, type) and issubclass(return_type, BaseModel):
47
+ # Single object case
48
+ return return_type.model_validate(raw_result)
49
+ else:
50
+ raise ValueError(f"Unsupported return type for method '{method_name}'")
File without changes
@@ -0,0 +1,43 @@
1
+ import math
2
+ import random
3
+ from enum import Enum
4
+
5
+
6
+ class BackoffStrategy(str, Enum):
7
+ EXPONENTIAL = "exponential"
8
+ LINEAR = "linear"
9
+ LOGARITHMIC = "logarithmic"
10
+
11
+
12
+ def get_backoff_delay(
13
+ attempt: int,
14
+ base: float = 0.1,
15
+ max_seconds: float = 10.0,
16
+ jitter: float = 0.2,
17
+ strategy: BackoffStrategy = BackoffStrategy.EXPONENTIAL,
18
+ ) -> float:
19
+ """
20
+ Returns a backoff delay in seconds based on the number of attempts and strategy.
21
+
22
+ Parameters:
23
+ - attempt (int): The number of failed attempts or polls.
24
+ - base (float): The base delay time in seconds.
25
+ - max_seconds (float): The maximum delay.
26
+ - jitter (float): Random jitter as a fraction (e.g., 0.2 = ±20%). Prevent thundering herd
27
+ - strategy (BackoffStrategy): The backoff curve to apply.
28
+
29
+ Returns:
30
+ - float: The delay in seconds.
31
+ """
32
+ if strategy == BackoffStrategy.EXPONENTIAL:
33
+ delay = base * (2**attempt)
34
+ elif strategy == BackoffStrategy.LINEAR:
35
+ delay = base + (attempt * base)
36
+ elif strategy == BackoffStrategy.LOGARITHMIC:
37
+ delay = base * math.log2(attempt + 2)
38
+ else:
39
+ raise ValueError(f"Unsupported backoff strategy: {strategy}")
40
+
41
+ # Clamp to max and apply jitter
42
+ delay = min(delay, max_seconds)
43
+ return delay * random.uniform(1 - jitter, 1 + jitter)
@@ -0,0 +1,10 @@
1
+ """
2
+ Constants for utility modules and caching configurations.
3
+
4
+ This module contains configurable constants used across the tetra-rp codebase
5
+ to ensure consistency and easy maintenance.
6
+ """
7
+
8
+ # Cache key generation constants
9
+ HASH_TRUNCATE_LENGTH = 16 # Length to truncate hash values for cache keys
10
+ UUID_FALLBACK_LENGTH = 8 # Length to truncate UUID values for fallback keys
@@ -0,0 +1,260 @@
1
+ """
2
+ Cross-platform file locking utilities.
3
+
4
+ Provides unified file locking interface that works across Windows, macOS, and Linux.
5
+ Uses platform-appropriate locking mechanisms:
6
+ - Windows: msvcrt.locking()
7
+ - Unix/Linux/macOS: fcntl.flock()
8
+ - Fallback: Basic file existence checking (limited protection)
9
+ """
10
+
11
+ import contextlib
12
+ import logging
13
+ import platform
14
+ import time
15
+ from pathlib import Path
16
+ from typing import BinaryIO, Optional
17
+
18
+ log = logging.getLogger(__name__)
19
+
20
+ # Platform detection
21
+ _IS_WINDOWS = platform.system() == "Windows"
22
+ _IS_UNIX = platform.system() in ("Linux", "Darwin")
23
+
24
+ # Initialize availability flags
25
+ _WINDOWS_LOCKING_AVAILABLE = False
26
+ _UNIX_LOCKING_AVAILABLE = False
27
+
28
+ # Import platform-specific modules
29
+ if _IS_WINDOWS:
30
+ try:
31
+ import msvcrt
32
+
33
+ _WINDOWS_LOCKING_AVAILABLE = True
34
+ except ImportError:
35
+ msvcrt = None
36
+ log.warning("msvcrt not available on Windows platform")
37
+
38
+ if _IS_UNIX:
39
+ try:
40
+ import fcntl
41
+
42
+ _UNIX_LOCKING_AVAILABLE = True
43
+ except ImportError:
44
+ fcntl = None
45
+ log.warning("fcntl not available on Unix platform")
46
+
47
+
48
+ class FileLockError(Exception):
49
+ """Exception raised when file locking operations fail."""
50
+
51
+ pass
52
+
53
+
54
+ class FileLockTimeout(FileLockError):
55
+ """Exception raised when file locking times out."""
56
+
57
+ pass
58
+
59
+
60
+ @contextlib.contextmanager
61
+ def file_lock(
62
+ file_handle: BinaryIO,
63
+ exclusive: bool = True,
64
+ timeout: Optional[float] = 10.0,
65
+ retry_interval: float = 0.1,
66
+ ):
67
+ """
68
+ Cross-platform file locking context manager.
69
+
70
+ Args:
71
+ file_handle: Open file handle to lock
72
+ exclusive: True for exclusive lock, False for shared lock
73
+ timeout: Maximum seconds to wait for lock (None = no timeout)
74
+ retry_interval: Seconds to wait between lock attempts
75
+
76
+ Raises:
77
+ FileLockTimeout: If lock cannot be acquired within timeout
78
+ FileLockError: If locking operation fails
79
+
80
+ Usage:
81
+ with open("file.dat", "rb") as f:
82
+ with file_lock(f, exclusive=False): # Shared read lock
83
+ data = f.read()
84
+
85
+ with open("file.dat", "wb") as f:
86
+ with file_lock(f, exclusive=True): # Exclusive write lock
87
+ f.write(data)
88
+ """
89
+ lock_acquired = False
90
+ start_time = time.time()
91
+
92
+ try:
93
+ # Platform-specific locking
94
+ while not lock_acquired:
95
+ try:
96
+ if _IS_WINDOWS and _WINDOWS_LOCKING_AVAILABLE:
97
+ _acquire_windows_lock(file_handle, exclusive)
98
+ elif _IS_UNIX and _UNIX_LOCKING_AVAILABLE:
99
+ _acquire_unix_lock(file_handle, exclusive)
100
+ else:
101
+ # Fallback - limited protection via file existence
102
+ _acquire_fallback_lock(file_handle, exclusive, timeout)
103
+
104
+ lock_acquired = True
105
+ log.debug(f"File lock acquired (exclusive={exclusive})")
106
+
107
+ except (OSError, IOError, FileLockError) as e:
108
+ # Check timeout
109
+ if timeout is not None and (time.time() - start_time) >= timeout:
110
+ raise FileLockTimeout(
111
+ f"Could not acquire file lock within {timeout} seconds: {e}"
112
+ ) from e
113
+
114
+ # Retry after interval
115
+ time.sleep(retry_interval)
116
+
117
+ # Lock acquired successfully
118
+ yield
119
+
120
+ finally:
121
+ # Release lock
122
+ if lock_acquired:
123
+ try:
124
+ if _IS_WINDOWS and _WINDOWS_LOCKING_AVAILABLE:
125
+ _release_windows_lock(file_handle)
126
+ elif _IS_UNIX and _UNIX_LOCKING_AVAILABLE:
127
+ _release_unix_lock(file_handle)
128
+ else:
129
+ _release_fallback_lock(file_handle)
130
+
131
+ log.debug("File lock released")
132
+
133
+ except Exception as e:
134
+ log.error(f"Error releasing file lock: {e}")
135
+ # Don't raise - we're in cleanup
136
+
137
+
138
+ def _acquire_windows_lock(file_handle: BinaryIO, exclusive: bool) -> None:
139
+ """Acquire Windows file lock using msvcrt.locking()."""
140
+ if not _WINDOWS_LOCKING_AVAILABLE:
141
+ raise FileLockError("Windows file locking not available (msvcrt missing)")
142
+
143
+ # Windows locking modes
144
+ if exclusive:
145
+ lock_mode = msvcrt.LK_NBLCK # Non-blocking exclusive lock
146
+ else:
147
+ # Windows doesn't have shared locks in msvcrt
148
+ # Fall back to exclusive for compatibility
149
+ lock_mode = msvcrt.LK_NBLCK
150
+ log.debug("Windows: Using exclusive lock instead of shared (msvcrt limitation)")
151
+
152
+ try:
153
+ # Lock the entire file (position 0, length 1)
154
+ file_handle.seek(0)
155
+ msvcrt.locking(file_handle.fileno(), lock_mode, 1)
156
+ except OSError as e:
157
+ raise FileLockError(f"Failed to acquire Windows file lock: {e}") from e
158
+
159
+
160
+ def _release_windows_lock(file_handle: BinaryIO) -> None:
161
+ """Release Windows file lock."""
162
+ if not _WINDOWS_LOCKING_AVAILABLE:
163
+ return
164
+
165
+ try:
166
+ file_handle.seek(0)
167
+ msvcrt.locking(file_handle.fileno(), msvcrt.LK_UNLCK, 1)
168
+ except OSError as e:
169
+ raise FileLockError(f"Failed to release Windows file lock: {e}") from e
170
+
171
+
172
+ def _acquire_unix_lock(file_handle: BinaryIO, exclusive: bool) -> None:
173
+ """Acquire Unix file lock using fcntl.flock()."""
174
+ if not _UNIX_LOCKING_AVAILABLE:
175
+ raise FileLockError("Unix file locking not available (fcntl missing)")
176
+
177
+ # Unix locking modes
178
+ if exclusive:
179
+ lock_mode = fcntl.LOCK_EX | fcntl.LOCK_NB # Non-blocking exclusive
180
+ else:
181
+ lock_mode = fcntl.LOCK_SH | fcntl.LOCK_NB # Non-blocking shared
182
+
183
+ try:
184
+ fcntl.flock(file_handle.fileno(), lock_mode)
185
+ except (OSError, IOError) as e:
186
+ raise FileLockError(f"Failed to acquire Unix file lock: {e}") from e
187
+
188
+
189
+ def _release_unix_lock(file_handle: BinaryIO) -> None:
190
+ """Release Unix file lock."""
191
+ if not _UNIX_LOCKING_AVAILABLE:
192
+ return
193
+
194
+ try:
195
+ fcntl.flock(file_handle.fileno(), fcntl.LOCK_UN)
196
+ except (OSError, IOError) as e:
197
+ raise FileLockError(f"Failed to release Unix file lock: {e}") from e
198
+
199
+
200
+ def _acquire_fallback_lock(
201
+ file_handle: BinaryIO, exclusive: bool, timeout: Optional[float]
202
+ ) -> None:
203
+ """
204
+ Fallback locking using lock files.
205
+
206
+ This provides minimal protection but doesn't prevent all race conditions.
207
+ It's better than no locking but not as robust as OS-level file locks.
208
+ """
209
+ log.warning(
210
+ "Using fallback file locking - limited protection against race conditions"
211
+ )
212
+
213
+ # Create lock file based on the original file
214
+ file_path = (
215
+ Path(file_handle.name) if hasattr(file_handle, "name") else Path("unknown")
216
+ )
217
+ lock_file = file_path.with_suffix(file_path.suffix + ".lock")
218
+
219
+ start_time = time.time()
220
+
221
+ while True:
222
+ try:
223
+ # Try to create lock file atomically
224
+ lock_file.touch(mode=0o600, exist_ok=False)
225
+ log.debug(f"Fallback lock file created: {lock_file}")
226
+ return
227
+
228
+ except FileExistsError:
229
+ # Lock file exists, check timeout
230
+ if timeout is not None and (time.time() - start_time) >= timeout:
231
+ raise FileLockError(f"Fallback lock timeout: {lock_file} exists")
232
+
233
+ # Wait and retry
234
+ time.sleep(0.1)
235
+
236
+
237
+ def _release_fallback_lock(file_handle: BinaryIO) -> None:
238
+ """Release fallback lock by removing lock file."""
239
+ try:
240
+ file_path = (
241
+ Path(file_handle.name) if hasattr(file_handle, "name") else Path("unknown")
242
+ )
243
+ lock_file = file_path.with_suffix(file_path.suffix + ".lock")
244
+
245
+ if lock_file.exists():
246
+ lock_file.unlink()
247
+ log.debug(f"Fallback lock file removed: {lock_file}")
248
+
249
+ except Exception as e:
250
+ log.error(f"Failed to remove fallback lock file: {e}")
251
+
252
+
253
+ def get_platform_info() -> dict:
254
+ """Get information about current platform and available locking mechanisms."""
255
+ return {
256
+ "platform": platform.system(),
257
+ "windows_locking": _IS_WINDOWS and _WINDOWS_LOCKING_AVAILABLE,
258
+ "unix_locking": _IS_UNIX and _UNIX_LOCKING_AVAILABLE,
259
+ "fallback_only": not (_WINDOWS_LOCKING_AVAILABLE or _UNIX_LOCKING_AVAILABLE),
260
+ }
@@ -0,0 +1,33 @@
1
+ from enum import Enum
2
+ from typing import Any
3
+ from pydantic import BaseModel
4
+
5
+
6
+ def normalize_for_json(obj: Any) -> Any:
7
+ """
8
+ Recursively normalizes an object for JSON serialization.
9
+
10
+ This function handles various data types and ensures that objects
11
+ are converted into JSON-serializable formats. It supports the following:
12
+ - `BaseModel` instances: Converts them to dictionaries using `model_dump()`.
13
+ - Dictionaries: Recursively normalizes their values.
14
+ - Lists: Recursively normalizes their elements.
15
+ - Tuples: Recursively normalizes their elements and returns a tuple.
16
+ - Other types: Returns the object as is.
17
+
18
+ Args:
19
+ obj (Any): The object to normalize.
20
+
21
+ Returns:
22
+ Any: A JSON-serializable representation of the input object.
23
+ """
24
+ if isinstance(obj, BaseModel):
25
+ return normalize_for_json(obj.model_dump())
26
+ elif isinstance(obj, Enum):
27
+ return obj.value
28
+ elif isinstance(obj, dict):
29
+ return {k: normalize_for_json(v) for k, v in obj.items()}
30
+ elif isinstance(obj, (list, tuple)):
31
+ return type(obj)(normalize_for_json(i) for i in obj)
32
+ else:
33
+ return obj
@@ -0,0 +1,75 @@
1
+ """
2
+ LRU Cache implementation using OrderedDict for memory-efficient caching with automatic eviction.
3
+
4
+ This module provides a Least Recently Used (LRU) cache implementation that automatically
5
+ manages memory by evicting the least recently used items when the cache exceeds its
6
+ maximum size limit. It maintains O(1) access time and provides a dict-like interface.
7
+ Thread-safe for concurrent access.
8
+ """
9
+
10
+ import threading
11
+ from collections import OrderedDict
12
+ from typing import Any, Dict, Optional
13
+
14
+
15
+ class LRUCache:
16
+ """
17
+ A Least Recently Used (LRU) cache implementation using OrderedDict.
18
+
19
+ Automatically evicts the least recently used items when the cache exceeds
20
+ the maximum size limit. Provides dict-like interface with O(1) operations.
21
+ Thread-safe for concurrent access using RLock.
22
+
23
+ Args:
24
+ max_size: Maximum number of items to store in cache (default: 1000)
25
+ """
26
+
27
+ def __init__(self, max_size: int = 1000):
28
+ self.max_size = max_size
29
+ self.cache = OrderedDict()
30
+ self._lock = threading.RLock()
31
+
32
+ def get(self, key: str) -> Optional[Dict[str, Any]]:
33
+ """Get item from cache, moving it to end (most recent) if found."""
34
+ with self._lock:
35
+ if key in self.cache:
36
+ self.cache.move_to_end(key)
37
+ return self.cache[key]
38
+ return None
39
+
40
+ def set(self, key: str, value: Dict[str, Any]) -> None:
41
+ """Set item in cache, evicting oldest if at capacity."""
42
+ with self._lock:
43
+ if key in self.cache:
44
+ self.cache.move_to_end(key)
45
+ else:
46
+ if len(self.cache) >= self.max_size:
47
+ self.cache.popitem(last=False) # Remove oldest
48
+ self.cache[key] = value
49
+
50
+ def clear(self) -> None:
51
+ """Clear all items from cache."""
52
+ with self._lock:
53
+ self.cache.clear()
54
+
55
+ def __contains__(self, key: str) -> bool:
56
+ """Check if key exists in cache."""
57
+ with self._lock:
58
+ return key in self.cache
59
+
60
+ def __len__(self) -> int:
61
+ """Return number of items in cache."""
62
+ with self._lock:
63
+ return len(self.cache)
64
+
65
+ def __getitem__(self, key: str) -> Dict[str, Any]:
66
+ """Get item using bracket notation, moving to end if found."""
67
+ with self._lock:
68
+ if key in self.cache:
69
+ self.cache.move_to_end(key)
70
+ return self.cache[key]
71
+ raise KeyError(key)
72
+
73
+ def __setitem__(self, key: str, value: Dict[str, Any]) -> None:
74
+ """Set item using bracket notation."""
75
+ self.set(key, value)
@@ -0,0 +1,21 @@
1
+ import threading
2
+
3
+
4
+ class SingletonMixin:
5
+ """Thread-safe singleton mixin class.
6
+
7
+ Uses threading.Lock to ensure only one instance is created
8
+ per class, even under concurrent access.
9
+ """
10
+
11
+ _instances = {}
12
+ _lock = threading.Lock()
13
+
14
+ def __new__(cls, *args, **kwargs):
15
+ # Use double-checked locking pattern for performance
16
+ if cls not in cls._instances:
17
+ with cls._lock:
18
+ # Check again inside the lock (double-checked locking)
19
+ if cls not in cls._instances:
20
+ cls._instances[cls] = super().__new__(cls)
21
+ return cls._instances[cls]
@@ -0,0 +1,44 @@
1
+ """Validation utilities for tetra_rp configuration.
2
+
3
+ Provides validation functions for required environment variables and configuration.
4
+ """
5
+
6
+ import os
7
+
8
+ from tetra_rp.core.exceptions import RunpodAPIKeyError
9
+
10
+
11
+ def validate_api_key() -> str:
12
+ """Validate that RUNPOD_API_KEY environment variable is set.
13
+
14
+ Returns:
15
+ The API key value if present.
16
+
17
+ Raises:
18
+ RunpodAPIKeyError: If RUNPOD_API_KEY is not set or is empty.
19
+ """
20
+ api_key = os.getenv("RUNPOD_API_KEY")
21
+
22
+ if not api_key or not api_key.strip():
23
+ raise RunpodAPIKeyError()
24
+
25
+ return api_key
26
+
27
+
28
+ def validate_api_key_with_context(operation: str) -> str:
29
+ """Validate API key with additional context about the operation.
30
+
31
+ Args:
32
+ operation: Description of what operation requires the API key.
33
+
34
+ Returns:
35
+ The API key value if present.
36
+
37
+ Raises:
38
+ RunpodAPIKeyError: If RUNPOD_API_KEY is not set, with operation context.
39
+ """
40
+ try:
41
+ return validate_api_key()
42
+ except RunpodAPIKeyError as e:
43
+ context_message = f"Cannot {operation}: {str(e)}"
44
+ raise RunpodAPIKeyError(context_message) from e