podstack 1.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- podstack/__init__.py +222 -0
- podstack/annotations.py +725 -0
- podstack/client.py +322 -0
- podstack/exceptions.py +125 -0
- podstack/execution.py +291 -0
- podstack/gpu_runner.py +1141 -0
- podstack/models.py +274 -0
- podstack/notebook.py +410 -0
- podstack/registry/__init__.py +402 -0
- podstack/registry/client.py +957 -0
- podstack/registry/exceptions.py +107 -0
- podstack/registry/experiment.py +227 -0
- podstack/registry/model.py +273 -0
- podstack/registry/model_utils.py +231 -0
- podstack-1.2.0.dist-info/METADATA +299 -0
- podstack-1.2.0.dist-info/RECORD +27 -0
- podstack-1.2.0.dist-info/WHEEL +5 -0
- podstack-1.2.0.dist-info/licenses/LICENSE +21 -0
- podstack-1.2.0.dist-info/top_level.txt +2 -0
- podstack_gpu/__init__.py +126 -0
- podstack_gpu/app.py +675 -0
- podstack_gpu/exceptions.py +35 -0
- podstack_gpu/image.py +325 -0
- podstack_gpu/runner.py +746 -0
- podstack_gpu/secret.py +189 -0
- podstack_gpu/utils.py +203 -0
- podstack_gpu/volume.py +198 -0
podstack/__init__.py
ADDED
|
@@ -0,0 +1,222 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Podstack Python SDK
|
|
3
|
+
|
|
4
|
+
High-performance GPU platform SDK for ML workloads with experiment tracking
|
|
5
|
+
and model registry.
|
|
6
|
+
|
|
7
|
+
Usage:
|
|
8
|
+
import podstack
|
|
9
|
+
|
|
10
|
+
# Initialize (required for GPU execution and registry)
|
|
11
|
+
podstack.init(
|
|
12
|
+
api_key="your-api-key",
|
|
13
|
+
project_id="your-project-id"
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
# GPU Execution - Actually runs on remote GPU!
|
|
17
|
+
@podstack.gpu(type="L40S", fraction=100)
|
|
18
|
+
def train():
|
|
19
|
+
import torch
|
|
20
|
+
print(f"Running on: {torch.cuda.get_device_name(0)}")
|
|
21
|
+
return {"trained": True}
|
|
22
|
+
|
|
23
|
+
result = train() # Executes on remote GPU
|
|
24
|
+
|
|
25
|
+
# Or run code directly
|
|
26
|
+
result = podstack.run_on_gpu('''
|
|
27
|
+
import torch
|
|
28
|
+
print(f"GPU: {torch.cuda.get_device_name(0)}")
|
|
29
|
+
''', gpu="L40S")
|
|
30
|
+
|
|
31
|
+
Registry (Experiment Tracking & Model Management):
|
|
32
|
+
from podstack import registry
|
|
33
|
+
|
|
34
|
+
registry.init(api_key="your-api-key", project_id="your-project-id")
|
|
35
|
+
registry.set_experiment("my-experiment")
|
|
36
|
+
|
|
37
|
+
with registry.start_run(name="training") as run:
|
|
38
|
+
registry.log_params({"lr": 0.001})
|
|
39
|
+
registry.log_metrics({"loss": 0.5}, step=1)
|
|
40
|
+
|
|
41
|
+
registry.register_model(name="my-model", run_id=run.id)
|
|
42
|
+
|
|
43
|
+
Decorators:
|
|
44
|
+
import podstack
|
|
45
|
+
|
|
46
|
+
@podstack.gpu(type="L40S", fraction=100)
|
|
47
|
+
@podstack.experiment(name="my-experiment")
|
|
48
|
+
@podstack.run(name="training-v1", track_gpu=True)
|
|
49
|
+
def train():
|
|
50
|
+
...
|
|
51
|
+
|
|
52
|
+
@podstack.model.register(name="my-model")
|
|
53
|
+
def save_model():
|
|
54
|
+
...
|
|
55
|
+
"""
|
|
56
|
+
|
|
57
|
+
__version__ = "1.2.0"
|
|
58
|
+
|
|
59
|
+
from .client import Client
|
|
60
|
+
from .notebook import Notebook, NotebookStatus
|
|
61
|
+
from .execution import Execution, ExecutionStatus
|
|
62
|
+
from .exceptions import (
|
|
63
|
+
PodstackError,
|
|
64
|
+
AuthenticationError,
|
|
65
|
+
NotFoundError,
|
|
66
|
+
RateLimitError,
|
|
67
|
+
GPUNotAvailableError,
|
|
68
|
+
ExecutionTimeoutError,
|
|
69
|
+
)
|
|
70
|
+
from .models import (
|
|
71
|
+
GPUType,
|
|
72
|
+
Environment,
|
|
73
|
+
Project,
|
|
74
|
+
Version,
|
|
75
|
+
WalletBalance,
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
# Registry module import
|
|
79
|
+
from . import registry
|
|
80
|
+
|
|
81
|
+
# GPU Runner module import
|
|
82
|
+
from . import gpu_runner
|
|
83
|
+
from .gpu_runner import (
|
|
84
|
+
GPURunner,
|
|
85
|
+
GPUExecutionResult,
|
|
86
|
+
run as run_on_gpu,
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
# Annotations module import
|
|
90
|
+
from . import annotations
|
|
91
|
+
from .annotations import (
|
|
92
|
+
gpu,
|
|
93
|
+
environment,
|
|
94
|
+
auto_shutdown,
|
|
95
|
+
experiment,
|
|
96
|
+
run,
|
|
97
|
+
model,
|
|
98
|
+
get_gpu_config,
|
|
99
|
+
get_environment,
|
|
100
|
+
get_auto_shutdown_minutes,
|
|
101
|
+
enable_remote_execution,
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
def init(
|
|
106
|
+
api_key: str = None,
|
|
107
|
+
project_id: str = None,
|
|
108
|
+
api_url: str = None,
|
|
109
|
+
registry_url: str = None
|
|
110
|
+
):
|
|
111
|
+
"""
|
|
112
|
+
Initialize the Podstack SDK.
|
|
113
|
+
|
|
114
|
+
This sets up both GPU execution and registry services.
|
|
115
|
+
|
|
116
|
+
Args:
|
|
117
|
+
api_key: API key for authentication (or set PODSTACK_API_KEY env var)
|
|
118
|
+
project_id: Project ID (or set PODSTACK_PROJECT_ID env var)
|
|
119
|
+
api_url: Notebook service URL (optional)
|
|
120
|
+
registry_url: Registry service URL (optional)
|
|
121
|
+
|
|
122
|
+
Example:
|
|
123
|
+
import podstack
|
|
124
|
+
|
|
125
|
+
podstack.init(
|
|
126
|
+
api_key="your-api-key",
|
|
127
|
+
project_id="your-project-id"
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
# Now you can use GPU execution
|
|
131
|
+
@podstack.gpu(type="L40S")
|
|
132
|
+
def train():
|
|
133
|
+
...
|
|
134
|
+
"""
|
|
135
|
+
# Initialize GPU runner
|
|
136
|
+
gpu_runner.init(
|
|
137
|
+
api_key=api_key,
|
|
138
|
+
project_id=project_id,
|
|
139
|
+
api_url=api_url
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
# Initialize registry
|
|
143
|
+
registry.init(
|
|
144
|
+
api_key=api_key,
|
|
145
|
+
project_id=project_id,
|
|
146
|
+
api_url=registry_url
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
def auto_init():
|
|
151
|
+
"""
|
|
152
|
+
Auto-initialize the SDK from environment variables.
|
|
153
|
+
|
|
154
|
+
Checks for PODSTACK_API_KEY and PODSTACK_PROJECT_ID env vars.
|
|
155
|
+
Called automatically at import time when PODSTACK_AUTO_INIT=1 or
|
|
156
|
+
when running inside a Podstack notebook (PODSTACK_NOTEBOOK_ID is set).
|
|
157
|
+
"""
|
|
158
|
+
import os
|
|
159
|
+
api_key = os.getenv("PODSTACK_API_KEY")
|
|
160
|
+
project_id = os.getenv("PODSTACK_PROJECT_ID")
|
|
161
|
+
|
|
162
|
+
if api_key and project_id:
|
|
163
|
+
init(
|
|
164
|
+
api_key=api_key,
|
|
165
|
+
project_id=project_id,
|
|
166
|
+
api_url=os.getenv("PODSTACK_API_URL"),
|
|
167
|
+
registry_url=os.getenv("PODSTACK_REGISTRY_URL"),
|
|
168
|
+
)
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
# Auto-initialize when running in a Podstack notebook or when explicitly requested
|
|
172
|
+
import os as _os
|
|
173
|
+
if _os.getenv("PODSTACK_AUTO_INIT") == "1" or _os.getenv("PODSTACK_NOTEBOOK_ID"):
|
|
174
|
+
auto_init()
|
|
175
|
+
del _os
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
__all__ = [
|
|
179
|
+
# Version
|
|
180
|
+
"__version__",
|
|
181
|
+
# Initialization
|
|
182
|
+
"init",
|
|
183
|
+
"auto_init",
|
|
184
|
+
# Core
|
|
185
|
+
"Client",
|
|
186
|
+
"Notebook",
|
|
187
|
+
"NotebookStatus",
|
|
188
|
+
"Execution",
|
|
189
|
+
"ExecutionStatus",
|
|
190
|
+
# Exceptions
|
|
191
|
+
"PodstackError",
|
|
192
|
+
"AuthenticationError",
|
|
193
|
+
"NotFoundError",
|
|
194
|
+
"RateLimitError",
|
|
195
|
+
"GPUNotAvailableError",
|
|
196
|
+
"ExecutionTimeoutError",
|
|
197
|
+
# Models
|
|
198
|
+
"GPUType",
|
|
199
|
+
"Environment",
|
|
200
|
+
"Project",
|
|
201
|
+
"Version",
|
|
202
|
+
"WalletBalance",
|
|
203
|
+
# Registry
|
|
204
|
+
"registry",
|
|
205
|
+
# GPU Runner
|
|
206
|
+
"gpu_runner",
|
|
207
|
+
"GPURunner",
|
|
208
|
+
"GPUExecutionResult",
|
|
209
|
+
"run_on_gpu",
|
|
210
|
+
# Annotations
|
|
211
|
+
"annotations",
|
|
212
|
+
"gpu",
|
|
213
|
+
"environment",
|
|
214
|
+
"auto_shutdown",
|
|
215
|
+
"experiment",
|
|
216
|
+
"run",
|
|
217
|
+
"model",
|
|
218
|
+
"get_gpu_config",
|
|
219
|
+
"get_environment",
|
|
220
|
+
"get_auto_shutdown_minutes",
|
|
221
|
+
"enable_remote_execution",
|
|
222
|
+
]
|