tetra-rp 0.10.0__tar.gz → 0.11.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tetra-rp might be problematic. Click here for more details.

Files changed (41) hide show
  1. {tetra_rp-0.10.0 → tetra_rp-0.11.0}/PKG-INFO +2 -1
  2. {tetra_rp-0.10.0 → tetra_rp-0.11.0}/pyproject.toml +2 -1
  3. {tetra_rp-0.10.0 → tetra_rp-0.11.0}/src/tetra_rp/__init__.py +2 -0
  4. {tetra_rp-0.10.0 → tetra_rp-0.11.0}/src/tetra_rp/client.py +25 -3
  5. {tetra_rp-0.10.0 → tetra_rp-0.11.0}/src/tetra_rp/core/resources/__init__.py +2 -1
  6. {tetra_rp-0.10.0 → tetra_rp-0.11.0}/src/tetra_rp/core/resources/network_volume.py +7 -11
  7. {tetra_rp-0.10.0 → tetra_rp-0.11.0}/src/tetra_rp/core/resources/serverless.py +17 -8
  8. {tetra_rp-0.10.0 → tetra_rp-0.11.0}/src/tetra_rp/core/resources/template.py +1 -1
  9. {tetra_rp-0.10.0 → tetra_rp-0.11.0}/src/tetra_rp/execute_class.py +6 -0
  10. {tetra_rp-0.10.0 → tetra_rp-0.11.0}/src/tetra_rp/protos/remote_execution.py +36 -12
  11. {tetra_rp-0.10.0 → tetra_rp-0.11.0}/src/tetra_rp/stubs/live_serverless.py +12 -1
  12. {tetra_rp-0.10.0 → tetra_rp-0.11.0}/src/tetra_rp/stubs/registry.py +14 -2
  13. {tetra_rp-0.10.0 → tetra_rp-0.11.0}/src/tetra_rp.egg-info/PKG-INFO +2 -1
  14. {tetra_rp-0.10.0 → tetra_rp-0.11.0}/src/tetra_rp.egg-info/requires.txt +1 -0
  15. {tetra_rp-0.10.0 → tetra_rp-0.11.0}/README.md +0 -0
  16. {tetra_rp-0.10.0 → tetra_rp-0.11.0}/setup.cfg +0 -0
  17. {tetra_rp-0.10.0 → tetra_rp-0.11.0}/src/tetra_rp/core/__init__.py +0 -0
  18. {tetra_rp-0.10.0 → tetra_rp-0.11.0}/src/tetra_rp/core/api/__init__.py +0 -0
  19. {tetra_rp-0.10.0 → tetra_rp-0.11.0}/src/tetra_rp/core/api/runpod.py +0 -0
  20. {tetra_rp-0.10.0 → tetra_rp-0.11.0}/src/tetra_rp/core/resources/base.py +0 -0
  21. {tetra_rp-0.10.0 → tetra_rp-0.11.0}/src/tetra_rp/core/resources/cloud.py +0 -0
  22. {tetra_rp-0.10.0 → tetra_rp-0.11.0}/src/tetra_rp/core/resources/constants.py +0 -0
  23. {tetra_rp-0.10.0 → tetra_rp-0.11.0}/src/tetra_rp/core/resources/cpu.py +0 -0
  24. {tetra_rp-0.10.0 → tetra_rp-0.11.0}/src/tetra_rp/core/resources/environment.py +0 -0
  25. {tetra_rp-0.10.0 → tetra_rp-0.11.0}/src/tetra_rp/core/resources/gpu.py +0 -0
  26. {tetra_rp-0.10.0 → tetra_rp-0.11.0}/src/tetra_rp/core/resources/live_serverless.py +0 -0
  27. {tetra_rp-0.10.0 → tetra_rp-0.11.0}/src/tetra_rp/core/resources/resource_manager.py +0 -0
  28. {tetra_rp-0.10.0 → tetra_rp-0.11.0}/src/tetra_rp/core/resources/utils.py +0 -0
  29. {tetra_rp-0.10.0 → tetra_rp-0.11.0}/src/tetra_rp/core/utils/__init__.py +0 -0
  30. {tetra_rp-0.10.0 → tetra_rp-0.11.0}/src/tetra_rp/core/utils/backoff.py +0 -0
  31. {tetra_rp-0.10.0 → tetra_rp-0.11.0}/src/tetra_rp/core/utils/constants.py +0 -0
  32. {tetra_rp-0.10.0 → tetra_rp-0.11.0}/src/tetra_rp/core/utils/json.py +0 -0
  33. {tetra_rp-0.10.0 → tetra_rp-0.11.0}/src/tetra_rp/core/utils/lru_cache.py +0 -0
  34. {tetra_rp-0.10.0 → tetra_rp-0.11.0}/src/tetra_rp/core/utils/singleton.py +0 -0
  35. {tetra_rp-0.10.0 → tetra_rp-0.11.0}/src/tetra_rp/logger.py +0 -0
  36. {tetra_rp-0.10.0 → tetra_rp-0.11.0}/src/tetra_rp/protos/__init__.py +0 -0
  37. {tetra_rp-0.10.0 → tetra_rp-0.11.0}/src/tetra_rp/stubs/__init__.py +0 -0
  38. {tetra_rp-0.10.0 → tetra_rp-0.11.0}/src/tetra_rp/stubs/serverless.py +0 -0
  39. {tetra_rp-0.10.0 → tetra_rp-0.11.0}/src/tetra_rp.egg-info/SOURCES.txt +0 -0
  40. {tetra_rp-0.10.0 → tetra_rp-0.11.0}/src/tetra_rp.egg-info/dependency_links.txt +0 -0
  41. {tetra_rp-0.10.0 → tetra_rp-0.11.0}/src/tetra_rp.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: tetra_rp
3
- Version: 0.10.0
3
+ Version: 0.11.0
4
4
  Summary: A Python library for distributed inference and serving of machine learning models
5
5
  Author-email: Marut Pandya <pandyamarut@gmail.com>, Patrick Rachford <prachford@icloud.com>, Dean Quinanola <dean.quinanola@runpod.io>
6
6
  License: MIT
@@ -13,6 +13,7 @@ Description-Content-Type: text/markdown
13
13
  Requires-Dist: cloudpickle>=3.1.1
14
14
  Requires-Dist: runpod
15
15
  Requires-Dist: python-dotenv>=1.0.0
16
+ Requires-Dist: pydantic>=2.0.0
16
17
 
17
18
  # Tetra: Serverless computing for AI workloads
18
19
 
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "tetra_rp"
3
- version = "0.10.0"
3
+ version = "0.11.0"
4
4
  description = "A Python library for distributed inference and serving of machine learning models"
5
5
  authors = [
6
6
  { name = "Marut Pandya", email = "pandyamarut@gmail.com" },
@@ -21,6 +21,7 @@ dependencies = [
21
21
  "cloudpickle>=3.1.1",
22
22
  "runpod",
23
23
  "python-dotenv>=1.0.0",
24
+ "pydantic>=2.0.0",
24
25
  ]
25
26
 
26
27
  [dependency-groups]
@@ -14,6 +14,7 @@ from .core.resources import ( # noqa: E402
14
14
  CpuServerlessEndpoint,
15
15
  CpuInstanceType,
16
16
  CudaVersion,
17
+ DataCenter,
17
18
  GpuGroup,
18
19
  LiveServerless,
19
20
  PodTemplate,
@@ -29,6 +30,7 @@ __all__ = [
29
30
  "CpuServerlessEndpoint",
30
31
  "CpuInstanceType",
31
32
  "CudaVersion",
33
+ "DataCenter",
32
34
  "GpuGroup",
33
35
  "LiveServerless",
34
36
  "PodTemplate",
@@ -14,6 +14,8 @@ def remote(
14
14
  resource_config: ServerlessResource,
15
15
  dependencies: Optional[List[str]] = None,
16
16
  system_dependencies: Optional[List[str]] = None,
17
+ accelerate_downloads: bool = True,
18
+ hf_models_to_cache: Optional[List[str]] = None,
17
19
  **extra,
18
20
  ):
19
21
  """
@@ -22,10 +24,17 @@ def remote(
22
24
  This decorator allows a function to be executed in a remote serverless environment, with support for
23
25
  dynamic resource provisioning and installation of required dependencies.
24
26
 
27
+ Args:
25
28
  resource_config (ServerlessResource): Configuration object specifying the serverless resource
26
29
  to be provisioned or used.
27
30
  dependencies (List[str], optional): A list of pip package names to be installed in the remote
28
31
  environment before executing the function. Defaults to None.
32
+ system_dependencies (List[str], optional): A list of system packages to be installed in the remote
33
+ environment before executing the function. Defaults to None.
34
+ accelerate_downloads (bool, optional): Enable download acceleration for dependencies and models.
35
+ Defaults to True.
36
+ hf_models_to_cache (List[str], optional): List of HuggingFace model IDs to pre-cache using
37
+ download acceleration. Defaults to None.
29
38
  extra (dict, optional): Additional parameters for the execution of the resource. Defaults to an empty dict.
30
39
 
31
40
  Returns:
@@ -37,7 +46,8 @@ def remote(
37
46
  @remote(
38
47
  resource_config=my_resource_config,
39
48
  dependencies=["numpy", "pandas"],
40
- sync=True # Optional, to run synchronously
49
+ accelerate_downloads=True,
50
+ hf_models_to_cache=["gpt2", "bert-base-uncased"]
41
51
  )
42
52
  async def my_function(data):
43
53
  # Function logic here
@@ -49,7 +59,13 @@ def remote(
49
59
  if inspect.isclass(func_or_class):
50
60
  # Handle class decoration
51
61
  return create_remote_class(
52
- func_or_class, resource_config, dependencies, system_dependencies, extra
62
+ func_or_class,
63
+ resource_config,
64
+ dependencies,
65
+ system_dependencies,
66
+ accelerate_downloads,
67
+ hf_models_to_cache,
68
+ extra,
53
69
  )
54
70
  else:
55
71
  # Handle function decoration (unchanged)
@@ -62,7 +78,13 @@ def remote(
62
78
 
63
79
  stub = stub_resource(remote_resource, **extra)
64
80
  return await stub(
65
- func_or_class, dependencies, system_dependencies, *args, **kwargs
81
+ func_or_class,
82
+ dependencies,
83
+ system_dependencies,
84
+ accelerate_downloads,
85
+ hf_models_to_cache,
86
+ *args,
87
+ **kwargs,
66
88
  )
67
89
 
68
90
  return wrapper
@@ -12,7 +12,7 @@ from .serverless import (
12
12
  CudaVersion,
13
13
  )
14
14
  from .template import PodTemplate
15
- from .network_volume import NetworkVolume
15
+ from .network_volume import NetworkVolume, DataCenter
16
16
 
17
17
 
18
18
  __all__ = [
@@ -21,6 +21,7 @@ __all__ = [
21
21
  "CpuInstanceType",
22
22
  "CpuServerlessEndpoint",
23
23
  "CudaVersion",
24
+ "DataCenter",
24
25
  "DeployableResource",
25
26
  "GpuGroup",
26
27
  "GpuType",
@@ -38,8 +38,8 @@ class NetworkVolume(DeployableResource):
38
38
  dataCenterId: DataCenter = Field(default=DataCenter.EU_RO_1, frozen=True)
39
39
 
40
40
  id: Optional[str] = Field(default=None)
41
- name: Optional[str] = None
42
- size: Optional[int] = Field(default=50, gt=0) # Size in GB
41
+ name: str
42
+ size: Optional[int] = Field(default=100, gt=0) # Size in GB
43
43
 
44
44
  def __str__(self) -> str:
45
45
  return f"{self.__class__.__name__}:{self.id}"
@@ -47,15 +47,11 @@ class NetworkVolume(DeployableResource):
47
47
  @property
48
48
  def resource_id(self) -> str:
49
49
  """Unique resource ID based on name and datacenter for idempotent behavior."""
50
- if self.name:
51
- # Use name + datacenter for volumes with names to ensure idempotence
52
- resource_type = self.__class__.__name__
53
- config_key = f"{self.name}:{self.dataCenterId.value}"
54
- hash_obj = hashlib.md5(f"{resource_type}:{config_key}".encode())
55
- return f"{resource_type}_{hash_obj.hexdigest()}"
56
- else:
57
- # Fall back to default behavior for unnamed volumes
58
- return super().resource_id
50
+ # Use name + datacenter to ensure idempotence
51
+ resource_type = self.__class__.__name__
52
+ config_key = f"{self.name}:{self.dataCenterId.value}"
53
+ hash_obj = hashlib.md5(f"{resource_type}:{config_key}".encode())
54
+ return f"{resource_type}_{hash_obj.hexdigest()}"
59
55
 
60
56
  @field_serializer("dataCenterId")
61
57
  def serialize_data_center_id(self, value: Optional[DataCenter]) -> Optional[str]:
@@ -20,7 +20,7 @@ from .constants import CONSOLE_URL
20
20
  from .cpu import CpuInstanceType
21
21
  from .environment import EnvironmentVars
22
22
  from .gpu import GpuGroup
23
- from .network_volume import NetworkVolume
23
+ from .network_volume import NetworkVolume, DataCenter
24
24
  from .template import KeyValuePair, PodTemplate
25
25
 
26
26
 
@@ -65,6 +65,7 @@ class ServerlessResource(DeployableResource):
65
65
  _input_only = {
66
66
  "id",
67
67
  "cudaVersions",
68
+ "datacenter",
68
69
  "env",
69
70
  "gpus",
70
71
  "flashboot",
@@ -78,8 +79,8 @@ class ServerlessResource(DeployableResource):
78
79
  flashboot: Optional[bool] = True
79
80
  gpus: Optional[List[GpuGroup]] = [GpuGroup.ANY] # for gpuIds
80
81
  imageName: Optional[str] = "" # for template.imageName
81
-
82
82
  networkVolume: Optional[NetworkVolume] = None
83
+ datacenter: DataCenter = Field(default=DataCenter.EU_RO_1)
83
84
 
84
85
  # === Input Fields ===
85
86
  executionTimeoutMs: Optional[int] = None
@@ -156,6 +157,17 @@ class ServerlessResource(DeployableResource):
156
157
  if self.flashboot:
157
158
  self.name += "-fb"
158
159
 
160
+ # Sync datacenter to locations field for API
161
+ if not self.locations:
162
+ self.locations = self.datacenter.value
163
+
164
+ # Validate datacenter consistency between endpoint and network volume
165
+ if self.networkVolume and self.networkVolume.dataCenterId != self.datacenter:
166
+ raise ValueError(
167
+ f"Network volume datacenter ({self.networkVolume.dataCenterId.value}) "
168
+ f"must match endpoint datacenter ({self.datacenter.value})"
169
+ )
170
+
159
171
  if self.networkVolume and self.networkVolume.is_created:
160
172
  # Volume already exists, use its ID
161
173
  self.networkVolumeId = self.networkVolume.id
@@ -197,17 +209,14 @@ class ServerlessResource(DeployableResource):
197
209
 
198
210
  async def _ensure_network_volume_deployed(self) -> None:
199
211
  """
200
- Ensures network volume is deployed and ready.
212
+ Ensures network volume is deployed and ready if one is specified.
201
213
  Updates networkVolumeId with the deployed volume ID.
202
214
  """
203
215
  if self.networkVolumeId:
204
216
  return
205
217
 
206
- if not self.networkVolume:
207
- log.info(f"{self.name} requires a default network volume")
208
- self.networkVolume = NetworkVolume(name=f"{self.name}-volume")
209
-
210
- if deployedNetworkVolume := await self.networkVolume.deploy():
218
+ if self.networkVolume:
219
+ deployedNetworkVolume = await self.networkVolume.deploy()
211
220
  self.networkVolumeId = deployedNetworkVolume.id
212
221
 
213
222
  def is_deployed(self) -> bool:
@@ -22,7 +22,7 @@ class KeyValuePair(BaseModel):
22
22
  class PodTemplate(BaseResource):
23
23
  advancedStart: Optional[bool] = False
24
24
  config: Optional[Dict[str, Any]] = {}
25
- containerDiskInGb: Optional[int] = 10
25
+ containerDiskInGb: Optional[int] = 64
26
26
  containerRegistryAuthId: Optional[str] = ""
27
27
  dockerArgs: Optional[str] = ""
28
28
  env: Optional[List[KeyValuePair]] = []
@@ -202,6 +202,8 @@ def create_remote_class(
202
202
  resource_config: ServerlessResource,
203
203
  dependencies: Optional[List[str]],
204
204
  system_dependencies: Optional[List[str]],
205
+ accelerate_downloads: bool,
206
+ hf_models_to_cache: Optional[List[str]],
205
207
  extra: dict,
206
208
  ):
207
209
  """
@@ -219,6 +221,8 @@ def create_remote_class(
219
221
  self._resource_config = resource_config
220
222
  self._dependencies = dependencies or []
221
223
  self._system_dependencies = system_dependencies or []
224
+ self._accelerate_downloads = accelerate_downloads
225
+ self._hf_models_to_cache = hf_models_to_cache
222
226
  self._extra = extra
223
227
  self._constructor_args = args
224
228
  self._constructor_kwargs = kwargs
@@ -302,6 +306,8 @@ def create_remote_class(
302
306
  constructor_kwargs=constructor_kwargs,
303
307
  dependencies=self._dependencies,
304
308
  system_dependencies=self._system_dependencies,
309
+ accelerate_downloads=self._accelerate_downloads,
310
+ hf_models_to_cache=self._hf_models_to_cache,
305
311
  instance_id=self._instance_id,
306
312
  create_new_instance=not hasattr(
307
313
  self, "_stub"
@@ -1,11 +1,22 @@
1
- # TODO: generate using betterproto
1
+ """Remote execution protocol definitions using Pydantic models.
2
+
3
+ This module defines the request/response protocol for remote function and class execution.
4
+ The models align with the protobuf schema for communication with remote workers.
5
+ """
6
+
2
7
  from abc import ABC, abstractmethod
3
- from typing import Dict, List, Optional
8
+ from typing import Any, Dict, List, Optional
4
9
 
5
10
  from pydantic import BaseModel, Field, model_validator
6
11
 
7
12
 
8
13
  class FunctionRequest(BaseModel):
14
+ """Request model for remote function or class execution.
15
+
16
+ Supports both function-based execution and class instantiation with method calls.
17
+ All serialized data (args, kwargs, etc.) are base64-encoded cloudpickle strings.
18
+ """
19
+
9
20
  # MADE OPTIONAL - can be None for class-only execution
10
21
  function_name: Optional[str] = Field(
11
22
  default=None,
@@ -15,19 +26,19 @@ class FunctionRequest(BaseModel):
15
26
  default=None,
16
27
  description="Source code of the function to execute",
17
28
  )
18
- args: List = Field(
29
+ args: List[str] = Field(
19
30
  default_factory=list,
20
31
  description="List of base64-encoded cloudpickle-serialized arguments",
21
32
  )
22
- kwargs: Dict = Field(
33
+ kwargs: Dict[str, str] = Field(
23
34
  default_factory=dict,
24
35
  description="Dictionary of base64-encoded cloudpickle-serialized keyword arguments",
25
36
  )
26
- dependencies: Optional[List] = Field(
37
+ dependencies: Optional[List[str]] = Field(
27
38
  default=None,
28
39
  description="Optional list of pip packages to install before executing the function",
29
40
  )
30
- system_dependencies: Optional[List] = Field(
41
+ system_dependencies: Optional[List[str]] = Field(
31
42
  default=None,
32
43
  description="Optional list of system dependencies to install before executing the function",
33
44
  )
@@ -44,11 +55,11 @@ class FunctionRequest(BaseModel):
44
55
  default=None,
45
56
  description="Source code of the class to instantiate (for class execution)",
46
57
  )
47
- constructor_args: Optional[List] = Field(
58
+ constructor_args: List[str] = Field(
48
59
  default_factory=list,
49
60
  description="List of base64-encoded cloudpickle-serialized constructor arguments",
50
61
  )
51
- constructor_kwargs: Optional[Dict] = Field(
62
+ constructor_kwargs: Dict[str, str] = Field(
52
63
  default_factory=dict,
53
64
  description="Dictionary of base64-encoded cloudpickle-serialized constructor keyword arguments",
54
65
  )
@@ -65,6 +76,16 @@ class FunctionRequest(BaseModel):
65
76
  description="Whether to create a new instance or reuse existing one",
66
77
  )
67
78
 
79
+ # Download acceleration fields
80
+ accelerate_downloads: bool = Field(
81
+ default=True,
82
+ description="Enable download acceleration for dependencies and models",
83
+ )
84
+ hf_models_to_cache: Optional[List[str]] = Field(
85
+ default=None,
86
+ description="List of HuggingFace model IDs to pre-cache using acceleration",
87
+ )
88
+
68
89
  @model_validator(mode="after")
69
90
  def validate_execution_requirements(self) -> "FunctionRequest":
70
91
  """Validate that required fields are provided based on execution_type"""
@@ -92,7 +113,12 @@ class FunctionRequest(BaseModel):
92
113
 
93
114
 
94
115
  class FunctionResponse(BaseModel):
95
- # EXISTING FIELDS (unchanged)
116
+ """Response model for remote function or class execution results.
117
+
118
+ Contains execution results, error information, and metadata about class instances
119
+ when applicable. The result field contains base64-encoded cloudpickle data.
120
+ """
121
+
96
122
  success: bool = Field(
97
123
  description="Indicates if the function execution was successful",
98
124
  )
@@ -108,12 +134,10 @@ class FunctionResponse(BaseModel):
108
134
  default=None,
109
135
  description="Captured standard output from the function execution",
110
136
  )
111
-
112
- # NEW FIELDS FOR CLASS SUPPORT
113
137
  instance_id: Optional[str] = Field(
114
138
  default=None, description="ID of the class instance that was used/created"
115
139
  )
116
- instance_info: Optional[Dict] = Field(
140
+ instance_info: Optional[Dict[str, Any]] = Field(
117
141
  default=None,
118
142
  description="Metadata about the class instance (creation time, call count, etc.)",
119
143
  )
@@ -60,13 +60,24 @@ class LiveServerlessStub(RemoteExecutorStub):
60
60
  def __init__(self, server: LiveServerless):
61
61
  self.server = server
62
62
 
63
- def prepare_request(self, func, dependencies, system_dependencies, *args, **kwargs):
63
+ def prepare_request(
64
+ self,
65
+ func,
66
+ dependencies,
67
+ system_dependencies,
68
+ accelerate_downloads,
69
+ hf_models_to_cache,
70
+ *args,
71
+ **kwargs,
72
+ ):
64
73
  source, src_hash = get_function_source(func)
65
74
 
66
75
  request = {
67
76
  "function_name": func.__name__,
68
77
  "dependencies": dependencies,
69
78
  "system_dependencies": system_dependencies,
79
+ "accelerate_downloads": accelerate_downloads,
80
+ "hf_models_to_cache": hf_models_to_cache,
70
81
  }
71
82
 
72
83
  # check if the function is already cached
@@ -26,13 +26,25 @@ def _(resource, **extra):
26
26
 
27
27
  # Function execution
28
28
  async def stubbed_resource(
29
- func, dependencies, system_dependencies, *args, **kwargs
29
+ func,
30
+ dependencies,
31
+ system_dependencies,
32
+ accelerate_downloads,
33
+ hf_models_to_cache,
34
+ *args,
35
+ **kwargs,
30
36
  ) -> dict:
31
37
  if args == (None,):
32
38
  args = []
33
39
 
34
40
  request = stub.prepare_request(
35
- func, dependencies, system_dependencies, *args, **kwargs
41
+ func,
42
+ dependencies,
43
+ system_dependencies,
44
+ accelerate_downloads,
45
+ hf_models_to_cache,
46
+ *args,
47
+ **kwargs,
36
48
  )
37
49
  response = await stub.ExecuteFunction(request)
38
50
  return stub.handle_response(response)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: tetra_rp
3
- Version: 0.10.0
3
+ Version: 0.11.0
4
4
  Summary: A Python library for distributed inference and serving of machine learning models
5
5
  Author-email: Marut Pandya <pandyamarut@gmail.com>, Patrick Rachford <prachford@icloud.com>, Dean Quinanola <dean.quinanola@runpod.io>
6
6
  License: MIT
@@ -13,6 +13,7 @@ Description-Content-Type: text/markdown
13
13
  Requires-Dist: cloudpickle>=3.1.1
14
14
  Requires-Dist: runpod
15
15
  Requires-Dist: python-dotenv>=1.0.0
16
+ Requires-Dist: pydantic>=2.0.0
16
17
 
17
18
  # Tetra: Serverless computing for AI workloads
18
19
 
@@ -1,3 +1,4 @@
1
1
  cloudpickle>=3.1.1
2
2
  runpod
3
3
  python-dotenv>=1.0.0
4
+ pydantic>=2.0.0
File without changes
File without changes