tetra-rp 0.6.0__py3-none-any.whl → 0.24.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (97) hide show
  1. tetra_rp/__init__.py +109 -19
  2. tetra_rp/cli/commands/__init__.py +1 -0
  3. tetra_rp/cli/commands/apps.py +143 -0
  4. tetra_rp/cli/commands/build.py +1082 -0
  5. tetra_rp/cli/commands/build_utils/__init__.py +1 -0
  6. tetra_rp/cli/commands/build_utils/handler_generator.py +176 -0
  7. tetra_rp/cli/commands/build_utils/lb_handler_generator.py +309 -0
  8. tetra_rp/cli/commands/build_utils/manifest.py +430 -0
  9. tetra_rp/cli/commands/build_utils/mothership_handler_generator.py +75 -0
  10. tetra_rp/cli/commands/build_utils/scanner.py +596 -0
  11. tetra_rp/cli/commands/deploy.py +580 -0
  12. tetra_rp/cli/commands/init.py +123 -0
  13. tetra_rp/cli/commands/resource.py +108 -0
  14. tetra_rp/cli/commands/run.py +296 -0
  15. tetra_rp/cli/commands/test_mothership.py +458 -0
  16. tetra_rp/cli/commands/undeploy.py +533 -0
  17. tetra_rp/cli/main.py +97 -0
  18. tetra_rp/cli/utils/__init__.py +1 -0
  19. tetra_rp/cli/utils/app.py +15 -0
  20. tetra_rp/cli/utils/conda.py +127 -0
  21. tetra_rp/cli/utils/deployment.py +530 -0
  22. tetra_rp/cli/utils/ignore.py +143 -0
  23. tetra_rp/cli/utils/skeleton.py +184 -0
  24. tetra_rp/cli/utils/skeleton_template/.env.example +4 -0
  25. tetra_rp/cli/utils/skeleton_template/.flashignore +40 -0
  26. tetra_rp/cli/utils/skeleton_template/.gitignore +44 -0
  27. tetra_rp/cli/utils/skeleton_template/README.md +263 -0
  28. tetra_rp/cli/utils/skeleton_template/main.py +44 -0
  29. tetra_rp/cli/utils/skeleton_template/mothership.py +55 -0
  30. tetra_rp/cli/utils/skeleton_template/pyproject.toml +58 -0
  31. tetra_rp/cli/utils/skeleton_template/requirements.txt +1 -0
  32. tetra_rp/cli/utils/skeleton_template/workers/__init__.py +0 -0
  33. tetra_rp/cli/utils/skeleton_template/workers/cpu/__init__.py +19 -0
  34. tetra_rp/cli/utils/skeleton_template/workers/cpu/endpoint.py +36 -0
  35. tetra_rp/cli/utils/skeleton_template/workers/gpu/__init__.py +19 -0
  36. tetra_rp/cli/utils/skeleton_template/workers/gpu/endpoint.py +61 -0
  37. tetra_rp/client.py +136 -33
  38. tetra_rp/config.py +29 -0
  39. tetra_rp/core/api/runpod.py +591 -39
  40. tetra_rp/core/deployment.py +232 -0
  41. tetra_rp/core/discovery.py +425 -0
  42. tetra_rp/core/exceptions.py +50 -0
  43. tetra_rp/core/resources/__init__.py +27 -9
  44. tetra_rp/core/resources/app.py +738 -0
  45. tetra_rp/core/resources/base.py +139 -4
  46. tetra_rp/core/resources/constants.py +21 -0
  47. tetra_rp/core/resources/cpu.py +115 -13
  48. tetra_rp/core/resources/gpu.py +182 -16
  49. tetra_rp/core/resources/live_serverless.py +153 -16
  50. tetra_rp/core/resources/load_balancer_sls_resource.py +440 -0
  51. tetra_rp/core/resources/network_volume.py +126 -31
  52. tetra_rp/core/resources/resource_manager.py +436 -35
  53. tetra_rp/core/resources/serverless.py +537 -120
  54. tetra_rp/core/resources/serverless_cpu.py +201 -0
  55. tetra_rp/core/resources/template.py +1 -59
  56. tetra_rp/core/utils/constants.py +10 -0
  57. tetra_rp/core/utils/file_lock.py +260 -0
  58. tetra_rp/core/utils/http.py +67 -0
  59. tetra_rp/core/utils/lru_cache.py +75 -0
  60. tetra_rp/core/utils/singleton.py +36 -1
  61. tetra_rp/core/validation.py +44 -0
  62. tetra_rp/execute_class.py +301 -0
  63. tetra_rp/protos/remote_execution.py +98 -9
  64. tetra_rp/runtime/__init__.py +1 -0
  65. tetra_rp/runtime/circuit_breaker.py +274 -0
  66. tetra_rp/runtime/config.py +12 -0
  67. tetra_rp/runtime/exceptions.py +49 -0
  68. tetra_rp/runtime/generic_handler.py +206 -0
  69. tetra_rp/runtime/lb_handler.py +189 -0
  70. tetra_rp/runtime/load_balancer.py +160 -0
  71. tetra_rp/runtime/manifest_fetcher.py +192 -0
  72. tetra_rp/runtime/metrics.py +325 -0
  73. tetra_rp/runtime/models.py +73 -0
  74. tetra_rp/runtime/mothership_provisioner.py +512 -0
  75. tetra_rp/runtime/production_wrapper.py +266 -0
  76. tetra_rp/runtime/reliability_config.py +149 -0
  77. tetra_rp/runtime/retry_manager.py +118 -0
  78. tetra_rp/runtime/serialization.py +124 -0
  79. tetra_rp/runtime/service_registry.py +346 -0
  80. tetra_rp/runtime/state_manager_client.py +248 -0
  81. tetra_rp/stubs/live_serverless.py +35 -17
  82. tetra_rp/stubs/load_balancer_sls.py +357 -0
  83. tetra_rp/stubs/registry.py +145 -19
  84. {tetra_rp-0.6.0.dist-info → tetra_rp-0.24.0.dist-info}/METADATA +398 -60
  85. tetra_rp-0.24.0.dist-info/RECORD +99 -0
  86. {tetra_rp-0.6.0.dist-info → tetra_rp-0.24.0.dist-info}/WHEEL +1 -1
  87. tetra_rp-0.24.0.dist-info/entry_points.txt +2 -0
  88. tetra_rp/core/pool/cluster_manager.py +0 -177
  89. tetra_rp/core/pool/dataclass.py +0 -18
  90. tetra_rp/core/pool/ex.py +0 -38
  91. tetra_rp/core/pool/job.py +0 -22
  92. tetra_rp/core/pool/worker.py +0 -19
  93. tetra_rp/core/resources/utils.py +0 -50
  94. tetra_rp/core/utils/json.py +0 -33
  95. tetra_rp-0.6.0.dist-info/RECORD +0 -39
  96. /tetra_rp/{core/pool → cli}/__init__.py +0 -0
  97. {tetra_rp-0.6.0.dist-info → tetra_rp-0.24.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,44 @@
1
+ import logging
2
+ import os
3
+
4
+ from fastapi import FastAPI
5
+
6
+ from workers.cpu import cpu_router
7
+ from workers.gpu import gpu_router
8
+
9
+ logger = logging.getLogger(__name__)
10
+
11
+
12
+ app = FastAPI(
13
+ title="Flash Application",
14
+ description="Distributed GPU and CPU computing with Runpod Flash",
15
+ version="0.1.0",
16
+ )
17
+
18
+ # Include routers
19
+ app.include_router(gpu_router, prefix="/gpu", tags=["GPU Workers"])
20
+ app.include_router(cpu_router, prefix="/cpu", tags=["CPU Workers"])
21
+
22
+
23
+ @app.get("/")
24
+ def home():
25
+ return {
26
+ "message": "Flash Application",
27
+ "docs": "/docs",
28
+ "endpoints": {"gpu_hello": "/gpu/hello", "cpu_hello": "/cpu/hello"},
29
+ }
30
+
31
+
32
+ @app.get("/ping")
33
+ def ping():
34
+ return {"status": "healthy"}
35
+
36
+
37
+ if __name__ == "__main__":
38
+ import uvicorn
39
+
40
+ host = os.getenv("FLASH_HOST", "localhost")
41
+ port = int(os.getenv("FLASH_PORT", 8888))
42
+ logger.info(f"Starting Flash server on {host}:{port}")
43
+
44
+ uvicorn.run(app, host=host, port=port)
@@ -0,0 +1,55 @@
1
+ """
2
+ Mothership Endpoint Configuration
3
+
4
+ The mothership endpoint serves your FastAPI application routes.
5
+ It is automatically deployed as a CPU-optimized load-balanced endpoint.
6
+
7
+ To customize this configuration:
8
+ - Modify worker scaling: change workersMin and workersMax values
9
+ - Use GPU load balancer: import LiveLoadBalancer instead of CpuLiveLoadBalancer
10
+ - Change endpoint name: update the 'name' parameter
11
+
12
+ To disable mothership deployment:
13
+ - Delete this file, or
14
+ - Comment out the 'mothership' variable below
15
+
16
+ Documentation: https://docs.runpod.io/flash/mothership
17
+ """
18
+
19
+ from tetra_rp import CpuLiveLoadBalancer
20
+
21
+ # Mothership endpoint configuration
22
+ # This serves your FastAPI app routes from main.py
23
+ mothership = CpuLiveLoadBalancer(
24
+ name="mothership",
25
+ workersMin=1,
26
+ workersMax=3,
27
+ )
28
+
29
+ # Examples of customization:
30
+
31
+ # Increase scaling for high traffic
32
+ # mothership = CpuLiveLoadBalancer(
33
+ # name="mothership",
34
+ # workersMin=2,
35
+ # workersMax=10,
36
+ # )
37
+
38
+ # Use GPU-based load balancer instead of CPU
39
+ # (requires importing LiveLoadBalancer)
40
+ # from tetra_rp import LiveLoadBalancer
41
+ # mothership = LiveLoadBalancer(
42
+ # name="mothership",
43
+ # gpus=[GpuGroup.ANY],
44
+ # )
45
+
46
+ # Custom endpoint name
47
+ # mothership = CpuLiveLoadBalancer(
48
+ # name="my-api-gateway",
49
+ # workersMin=1,
50
+ # workersMax=3,
51
+ # )
52
+
53
+ # To disable mothership:
54
+ # - Delete this entire file, or
55
+ # - Comment out the 'mothership' variable above
@@ -0,0 +1,58 @@
1
+ [build-system]
2
+ requires = ["setuptools>=61.0"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "{{project_name}}"
7
+ version = "0.1.0"
8
+ description = "Flash serverless application"
9
+ readme = "README.md"
10
+ requires-python = ">=3.11"
11
+ dependencies = [
12
+ "tetra-rp",
13
+ "fastapi>=0.104.0",
14
+ "uvicorn>=0.24.0",
15
+ ]
16
+
17
+ [project.optional-dependencies]
18
+ dev = [
19
+ "pytest>=7.0",
20
+ "pytest-asyncio>=0.21",
21
+ "pytest-cov>=4.0",
22
+ "ruff>=0.1",
23
+ "mypy>=1.0",
24
+ ]
25
+
26
+ [tool.ruff]
27
+ line-length = 100
28
+ target-version = "py311"
29
+
30
+ [tool.ruff.lint]
31
+ select = ["E", "F", "I", "N", "W"]
32
+ ignore = ["E501"]
33
+
34
+ [tool.pytest.ini_options]
35
+ testpaths = ["tests"]
36
+ python_files = ["test_*.py", "*_test.py"]
37
+ python_classes = ["Test*"]
38
+ python_functions = ["test_*"]
39
+ asyncio_mode = "auto"
40
+
41
+ [tool.mypy]
42
+ python_version = "3.11"
43
+ warn_return_any = false
44
+ warn_unused_configs = true
45
+ disallow_untyped_defs = false
46
+
47
+ [tool.coverage.run]
48
+ source = ["src"]
49
+ omit = ["*/tests/*"]
50
+
51
+ [tool.coverage.report]
52
+ exclude_lines = [
53
+ "pragma: no cover",
54
+ "def __repr__",
55
+ "raise AssertionError",
56
+ "raise NotImplementedError",
57
+ "if __name__ == .__main__.:",
58
+ ]
@@ -0,0 +1 @@
1
+ tetra_rp
@@ -0,0 +1,19 @@
1
+ from fastapi import APIRouter
2
+ from pydantic import BaseModel
3
+
4
+ from .endpoint import cpu_hello
5
+
6
+ cpu_router = APIRouter()
7
+
8
+
9
+ class MessageRequest(BaseModel):
10
+ """Request model for CPU worker."""
11
+
12
+ message: str = "Hello from CPU!"
13
+
14
+
15
+ @cpu_router.post("/hello")
16
+ async def hello(request: MessageRequest):
17
+ """Simple CPU worker endpoint."""
18
+ result = await cpu_hello({"message": request.message})
19
+ return result
@@ -0,0 +1,36 @@
1
+ from tetra_rp import CpuLiveServerless, remote
2
+
3
+ cpu_config = CpuLiveServerless(
4
+ name="cpu_worker",
5
+ workersMin=0,
6
+ workersMax=5,
7
+ idleTimeout=5,
8
+ )
9
+
10
+
11
+ @remote(resource_config=cpu_config)
12
+ async def cpu_hello(input_data: dict) -> dict:
13
+ """Simple CPU worker example."""
14
+ import platform
15
+ from datetime import datetime
16
+
17
+ message = input_data.get("message", "Hello from CPU worker!")
18
+
19
+ return {
20
+ "status": "success",
21
+ "message": message,
22
+ "worker_type": "CPU",
23
+ "timestamp": datetime.now().isoformat(),
24
+ "platform": platform.system(),
25
+ "python_version": platform.python_version(),
26
+ }
27
+
28
+
29
+ # Test locally with: python -m workers.cpu.endpoint
30
+ if __name__ == "__main__":
31
+ import asyncio
32
+
33
+ test_payload = {"message": "Testing CPU worker"}
34
+ print(f"Testing CPU worker with payload: {test_payload}")
35
+ result = asyncio.run(cpu_hello(test_payload))
36
+ print(f"Result: {result}")
@@ -0,0 +1,19 @@
1
+ from fastapi import APIRouter
2
+ from pydantic import BaseModel
3
+
4
+ from .endpoint import gpu_hello
5
+
6
+ gpu_router = APIRouter()
7
+
8
+
9
+ class MessageRequest(BaseModel):
10
+ """Request model for GPU worker."""
11
+
12
+ message: str = "Hello from GPU!"
13
+
14
+
15
+ @gpu_router.post("/hello")
16
+ async def hello(request: MessageRequest):
17
+ """Simple GPU worker endpoint."""
18
+ result = await gpu_hello({"message": request.message})
19
+ return result
@@ -0,0 +1,61 @@
1
+ from tetra_rp import GpuGroup, LiveServerless, remote
2
+
3
+ gpu_config = LiveServerless(
4
+ name="gpu_worker",
5
+ gpus=[GpuGroup.ANY],
6
+ workersMin=0,
7
+ workersMax=3,
8
+ idleTimeout=5,
9
+ )
10
+
11
+
12
+ @remote(resource_config=gpu_config, dependencies=["torch"])
13
+ async def gpu_hello(input_data: dict) -> dict:
14
+ """Simple GPU worker example with GPU detection."""
15
+ import platform
16
+ from datetime import datetime
17
+
18
+ try:
19
+ import torch
20
+
21
+ gpu_available = torch.cuda.is_available()
22
+ if gpu_available:
23
+ gpu_name = torch.cuda.get_device_name(0)
24
+ gpu_count = torch.cuda.device_count()
25
+ gpu_memory = torch.cuda.get_device_properties(0).total_memory / (1024**3)
26
+ else:
27
+ gpu_name = "No GPU detected"
28
+ gpu_count = 0
29
+ gpu_memory = 0
30
+ except Exception as e:
31
+ gpu_available = False
32
+ gpu_name = f"Error detecting GPU: {str(e)}"
33
+ gpu_count = 0
34
+ gpu_memory = 0
35
+
36
+ message = input_data.get("message", "Hello from GPU worker!")
37
+
38
+ return {
39
+ "status": "success",
40
+ "message": message,
41
+ "worker_type": "GPU",
42
+ "gpu_info": {
43
+ "available": gpu_available,
44
+ "name": gpu_name,
45
+ "count": gpu_count,
46
+ "memory_gb": round(gpu_memory, 2) if gpu_memory else 0,
47
+ },
48
+ "timestamp": datetime.now().isoformat(),
49
+ "platform": platform.system(),
50
+ "python_version": platform.python_version(),
51
+ }
52
+
53
+
54
+ # Test locally with: python -m workers.gpu.endpoint
55
+ if __name__ == "__main__":
56
+ import asyncio
57
+
58
+ test_payload = {"message": "Testing GPU worker"}
59
+ print(f"Testing GPU worker with payload: {test_payload}")
60
+ result = asyncio.run(gpu_hello(test_payload))
61
+ print(f"Result: {result}")
tetra_rp/client.py CHANGED
@@ -1,71 +1,174 @@
1
+ import os
2
+ import inspect
1
3
  import logging
2
4
  from functools import wraps
3
5
  from typing import List, Optional
4
- from .core.resources import ServerlessResource, ResourceManager, NetworkVolume
5
- from .stubs import stub_resource
6
6
 
7
+ from .core.resources import LoadBalancerSlsResource, ResourceManager, ServerlessResource
8
+ from .execute_class import create_remote_class
9
+ from .stubs import stub_resource
7
10
 
8
11
  log = logging.getLogger(__name__)
9
12
 
10
13
 
11
14
  def remote(
12
15
  resource_config: ServerlessResource,
13
- dependencies: List[str] = None,
14
- system_dependencies: List[str] = None,
15
- mount_volume: Optional[NetworkVolume] = None,
16
+ dependencies: Optional[List[str]] = None,
17
+ system_dependencies: Optional[List[str]] = None,
18
+ accelerate_downloads: bool = True,
19
+ local: bool = False,
20
+ method: Optional[str] = None,
21
+ path: Optional[str] = None,
16
22
  **extra,
17
23
  ):
18
24
  """
19
25
  Decorator to enable dynamic resource provisioning and dependency management for serverless functions.
20
26
 
21
27
  This decorator allows a function to be executed in a remote serverless environment, with support for
22
- dynamic resource provisioning and installation of required dependencies.
28
+ dynamic resource provisioning and installation of required dependencies. It can also bypass remote
29
+ execution entirely for local testing.
30
+
31
+ Supports both sync and async function definitions:
32
+ - `def my_function(...)` - Regular synchronous function
33
+ - `async def my_function(...)` - Asynchronous function
34
+
35
+ In both cases, the decorated function returns an awaitable that must be called with `await`.
23
36
 
37
+ Args:
24
38
  resource_config (ServerlessResource): Configuration object specifying the serverless resource
25
- to be provisioned or used.
39
+ to be provisioned or used. Not used when local=True.
26
40
  dependencies (List[str], optional): A list of pip package names to be installed in the remote
27
- environment before executing the function. Defaults to None.
28
- mount_volume (NetworkVolume, optional): Configuration for creating and mounting a network volume.
29
- Should contain 'size', 'datacenter_id', and 'name' keys. Defaults to None.
41
+ environment before executing the function. Not used when local=True. Defaults to None.
42
+ system_dependencies (List[str], optional): A list of system packages to be installed in the remote
43
+ environment before executing the function. Not used when local=True. Defaults to None.
44
+ accelerate_downloads (bool, optional): Enable download acceleration for dependencies and models.
45
+ Only applies to remote execution. Defaults to True.
46
+ local (bool, optional): Execute function/class locally instead of provisioning remote servers.
47
+ Returns the unwrapped function/class for direct local execution. Users must ensure all required
48
+ dependencies are already installed in their local environment. Defaults to False.
49
+ method (str, optional): HTTP method for load-balanced endpoints (LoadBalancerSlsResource).
50
+ Required for LoadBalancerSlsResource: "GET", "POST", "PUT", "DELETE", "PATCH".
51
+ Ignored for queue-based endpoints. Defaults to None.
52
+ path (str, optional): HTTP path for load-balanced endpoints (LoadBalancerSlsResource).
53
+ Required for LoadBalancerSlsResource. Must start with "/". Example: "/api/process".
54
+ Ignored for queue-based endpoints. Defaults to None.
30
55
  extra (dict, optional): Additional parameters for the execution of the resource. Defaults to an empty dict.
31
56
 
32
57
  Returns:
33
- Callable: A decorator that wraps the target function, enabling remote execution with the
34
- specified resource configuration and dependencies.
58
+ Callable: A decorator that wraps the target function, enabling remote execution with the specified
59
+ resource configuration and dependencies, or returns the unwrapped function/class for local execution.
35
60
 
36
61
  Example:
37
62
  ```python
63
+ # Queue-based endpoint (recommended for reliability)
64
+ @remote(
65
+ resource_config=LiveServerless(name="gpu_worker"),
66
+ dependencies=["torch>=2.0.0"],
67
+ )
68
+ async def gpu_task(data: dict) -> dict:
69
+ import torch
70
+ # GPU processing here
71
+ return {"result": "processed"}
72
+
73
+ # Load-balanced endpoint (for low-latency APIs)
74
+ @remote(
75
+ resource_config=LoadBalancerSlsResource(name="api-service"),
76
+ method="POST",
77
+ path="/api/process",
78
+ )
79
+ async def api_endpoint(x: int, y: int) -> dict:
80
+ return {"result": x + y}
81
+
82
+ # Local execution (testing/development)
38
83
  @remote(
39
84
  resource_config=my_resource_config,
40
85
  dependencies=["numpy", "pandas"],
41
- sync=True # Optional, to run synchronously
86
+ local=True,
42
87
  )
43
- async def my_function(data):
44
- # Function logic here
88
+ async def my_test_function(data):
89
+ # Runs locally - dependencies must be pre-installed
45
90
  pass
46
91
  ```
47
92
  """
48
93
 
49
- def decorator(func):
50
- @wraps(func)
51
- async def wrapper(*args, **kwargs):
52
- # Create netowrk volume if mount_volume is provided
53
- if mount_volume:
54
- try:
55
- network_volume = await mount_volume.deploy()
56
- resource_config.networkVolumeId = network_volume.id
57
- except Exception as e:
58
- log.error(f"Failed to create or mount network volume: {e}")
59
- raise
60
-
61
- resource_manager = ResourceManager()
62
- remote_resource = await resource_manager.get_or_deploy_resource(
63
- resource_config
94
+ def decorator(func_or_class):
95
+ # Validate HTTP routing parameters for LoadBalancerSlsResource
96
+ is_lb_resource = isinstance(resource_config, LoadBalancerSlsResource)
97
+
98
+ if is_lb_resource:
99
+ if not method or not path:
100
+ raise ValueError(
101
+ f"LoadBalancerSlsResource requires both 'method' and 'path' parameters. "
102
+ f"Got method={method}, path={path}. "
103
+ f"Example: @remote(resource_config, method='POST', path='/api/process')"
104
+ )
105
+ if not path.startswith("/"):
106
+ raise ValueError(f"path must start with '/'. Got: {path}")
107
+ valid_methods = {"GET", "POST", "PUT", "DELETE", "PATCH"}
108
+ if method not in valid_methods:
109
+ raise ValueError(
110
+ f"method must be one of {valid_methods}. Got: {method}"
111
+ )
112
+ elif method or path:
113
+ log.warning(
114
+ f"HTTP routing parameters (method={method}, path={path}) are only used "
115
+ f"with LoadBalancerSlsResource, but resource_config is {type(resource_config).__name__}. "
116
+ f"They will be ignored."
117
+ )
118
+
119
+ # Store routing metadata for scanner and build system
120
+ routing_config = {
121
+ "resource_config": resource_config,
122
+ "method": method,
123
+ "path": path,
124
+ "dependencies": dependencies,
125
+ "system_dependencies": system_dependencies,
126
+ }
127
+
128
+ if os.getenv("RUNPOD_POD_ID") or os.getenv("RUNPOD_ENDPOINT_ID"):
129
+ # Worker mode when running on RunPod platform
130
+ func_or_class.__remote_config__ = routing_config
131
+ return func_or_class
132
+
133
+ # Local execution mode - execute without provisioning remote servers
134
+ if local:
135
+ func_or_class.__remote_config__ = routing_config
136
+ return func_or_class
137
+
138
+ # Remote execution mode
139
+ if inspect.isclass(func_or_class):
140
+ # Handle class decoration
141
+ wrapped_class = create_remote_class(
142
+ func_or_class,
143
+ resource_config,
144
+ dependencies,
145
+ system_dependencies,
146
+ accelerate_downloads,
147
+ extra,
64
148
  )
149
+ wrapped_class.__remote_config__ = routing_config
150
+ return wrapped_class
151
+ else:
152
+ # Handle function decoration
153
+ @wraps(func_or_class)
154
+ async def wrapper(*args, **kwargs):
155
+ resource_manager = ResourceManager()
156
+ remote_resource = await resource_manager.get_or_deploy_resource(
157
+ resource_config
158
+ )
65
159
 
66
- stub = stub_resource(remote_resource, **extra)
67
- return await stub(func, dependencies, system_dependencies, *args, **kwargs)
160
+ stub = stub_resource(remote_resource, **extra)
161
+ return await stub(
162
+ func_or_class,
163
+ dependencies,
164
+ system_dependencies,
165
+ accelerate_downloads,
166
+ *args,
167
+ **kwargs,
168
+ )
68
169
 
69
- return wrapper
170
+ # Store routing metadata on wrapper for scanner
171
+ wrapper.__remote_config__ = routing_config
172
+ return wrapper
70
173
 
71
174
  return decorator
tetra_rp/config.py ADDED
@@ -0,0 +1,29 @@
1
+ """Configuration management for tetra-rp CLI."""
2
+
3
+ from pathlib import Path
4
+ from typing import NamedTuple
5
+
6
+
7
+ class TetraPaths(NamedTuple):
8
+ """Paths for tetra-rp configuration and data."""
9
+
10
+ tetra_dir: Path
11
+ config_file: Path
12
+ deployments_file: Path
13
+
14
+ def ensure_tetra_dir(self) -> None:
15
+ """Ensure the .tetra directory exists."""
16
+ self.tetra_dir.mkdir(exist_ok=True)
17
+
18
+
19
+ def get_paths() -> TetraPaths:
20
+ """Get standardized paths for tetra-rp configuration."""
21
+ tetra_dir = Path.cwd() / ".tetra"
22
+ config_file = tetra_dir / "config.json"
23
+ deployments_file = tetra_dir / "deployments.json"
24
+
25
+ return TetraPaths(
26
+ tetra_dir=tetra_dir,
27
+ config_file=config_file,
28
+ deployments_file=deployments_file,
29
+ )