tetra-rp 0.6.0__py3-none-any.whl → 0.24.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (97) hide show
  1. tetra_rp/__init__.py +109 -19
  2. tetra_rp/cli/commands/__init__.py +1 -0
  3. tetra_rp/cli/commands/apps.py +143 -0
  4. tetra_rp/cli/commands/build.py +1082 -0
  5. tetra_rp/cli/commands/build_utils/__init__.py +1 -0
  6. tetra_rp/cli/commands/build_utils/handler_generator.py +176 -0
  7. tetra_rp/cli/commands/build_utils/lb_handler_generator.py +309 -0
  8. tetra_rp/cli/commands/build_utils/manifest.py +430 -0
  9. tetra_rp/cli/commands/build_utils/mothership_handler_generator.py +75 -0
  10. tetra_rp/cli/commands/build_utils/scanner.py +596 -0
  11. tetra_rp/cli/commands/deploy.py +580 -0
  12. tetra_rp/cli/commands/init.py +123 -0
  13. tetra_rp/cli/commands/resource.py +108 -0
  14. tetra_rp/cli/commands/run.py +296 -0
  15. tetra_rp/cli/commands/test_mothership.py +458 -0
  16. tetra_rp/cli/commands/undeploy.py +533 -0
  17. tetra_rp/cli/main.py +97 -0
  18. tetra_rp/cli/utils/__init__.py +1 -0
  19. tetra_rp/cli/utils/app.py +15 -0
  20. tetra_rp/cli/utils/conda.py +127 -0
  21. tetra_rp/cli/utils/deployment.py +530 -0
  22. tetra_rp/cli/utils/ignore.py +143 -0
  23. tetra_rp/cli/utils/skeleton.py +184 -0
  24. tetra_rp/cli/utils/skeleton_template/.env.example +4 -0
  25. tetra_rp/cli/utils/skeleton_template/.flashignore +40 -0
  26. tetra_rp/cli/utils/skeleton_template/.gitignore +44 -0
  27. tetra_rp/cli/utils/skeleton_template/README.md +263 -0
  28. tetra_rp/cli/utils/skeleton_template/main.py +44 -0
  29. tetra_rp/cli/utils/skeleton_template/mothership.py +55 -0
  30. tetra_rp/cli/utils/skeleton_template/pyproject.toml +58 -0
  31. tetra_rp/cli/utils/skeleton_template/requirements.txt +1 -0
  32. tetra_rp/cli/utils/skeleton_template/workers/__init__.py +0 -0
  33. tetra_rp/cli/utils/skeleton_template/workers/cpu/__init__.py +19 -0
  34. tetra_rp/cli/utils/skeleton_template/workers/cpu/endpoint.py +36 -0
  35. tetra_rp/cli/utils/skeleton_template/workers/gpu/__init__.py +19 -0
  36. tetra_rp/cli/utils/skeleton_template/workers/gpu/endpoint.py +61 -0
  37. tetra_rp/client.py +136 -33
  38. tetra_rp/config.py +29 -0
  39. tetra_rp/core/api/runpod.py +591 -39
  40. tetra_rp/core/deployment.py +232 -0
  41. tetra_rp/core/discovery.py +425 -0
  42. tetra_rp/core/exceptions.py +50 -0
  43. tetra_rp/core/resources/__init__.py +27 -9
  44. tetra_rp/core/resources/app.py +738 -0
  45. tetra_rp/core/resources/base.py +139 -4
  46. tetra_rp/core/resources/constants.py +21 -0
  47. tetra_rp/core/resources/cpu.py +115 -13
  48. tetra_rp/core/resources/gpu.py +182 -16
  49. tetra_rp/core/resources/live_serverless.py +153 -16
  50. tetra_rp/core/resources/load_balancer_sls_resource.py +440 -0
  51. tetra_rp/core/resources/network_volume.py +126 -31
  52. tetra_rp/core/resources/resource_manager.py +436 -35
  53. tetra_rp/core/resources/serverless.py +537 -120
  54. tetra_rp/core/resources/serverless_cpu.py +201 -0
  55. tetra_rp/core/resources/template.py +1 -59
  56. tetra_rp/core/utils/constants.py +10 -0
  57. tetra_rp/core/utils/file_lock.py +260 -0
  58. tetra_rp/core/utils/http.py +67 -0
  59. tetra_rp/core/utils/lru_cache.py +75 -0
  60. tetra_rp/core/utils/singleton.py +36 -1
  61. tetra_rp/core/validation.py +44 -0
  62. tetra_rp/execute_class.py +301 -0
  63. tetra_rp/protos/remote_execution.py +98 -9
  64. tetra_rp/runtime/__init__.py +1 -0
  65. tetra_rp/runtime/circuit_breaker.py +274 -0
  66. tetra_rp/runtime/config.py +12 -0
  67. tetra_rp/runtime/exceptions.py +49 -0
  68. tetra_rp/runtime/generic_handler.py +206 -0
  69. tetra_rp/runtime/lb_handler.py +189 -0
  70. tetra_rp/runtime/load_balancer.py +160 -0
  71. tetra_rp/runtime/manifest_fetcher.py +192 -0
  72. tetra_rp/runtime/metrics.py +325 -0
  73. tetra_rp/runtime/models.py +73 -0
  74. tetra_rp/runtime/mothership_provisioner.py +512 -0
  75. tetra_rp/runtime/production_wrapper.py +266 -0
  76. tetra_rp/runtime/reliability_config.py +149 -0
  77. tetra_rp/runtime/retry_manager.py +118 -0
  78. tetra_rp/runtime/serialization.py +124 -0
  79. tetra_rp/runtime/service_registry.py +346 -0
  80. tetra_rp/runtime/state_manager_client.py +248 -0
  81. tetra_rp/stubs/live_serverless.py +35 -17
  82. tetra_rp/stubs/load_balancer_sls.py +357 -0
  83. tetra_rp/stubs/registry.py +145 -19
  84. {tetra_rp-0.6.0.dist-info → tetra_rp-0.24.0.dist-info}/METADATA +398 -60
  85. tetra_rp-0.24.0.dist-info/RECORD +99 -0
  86. {tetra_rp-0.6.0.dist-info → tetra_rp-0.24.0.dist-info}/WHEEL +1 -1
  87. tetra_rp-0.24.0.dist-info/entry_points.txt +2 -0
  88. tetra_rp/core/pool/cluster_manager.py +0 -177
  89. tetra_rp/core/pool/dataclass.py +0 -18
  90. tetra_rp/core/pool/ex.py +0 -38
  91. tetra_rp/core/pool/job.py +0 -22
  92. tetra_rp/core/pool/worker.py +0 -19
  93. tetra_rp/core/resources/utils.py +0 -50
  94. tetra_rp/core/utils/json.py +0 -33
  95. tetra_rp-0.6.0.dist-info/RECORD +0 -39
  96. /tetra_rp/{core/pool → cli}/__init__.py +0 -0
  97. {tetra_rp-0.6.0.dist-info → tetra_rp-0.24.0.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: tetra_rp
3
- Version: 0.6.0
3
+ Version: 0.24.0
4
4
  Summary: A Python library for distributed inference and serving of machine learning models
5
5
  Author-email: Marut Pandya <pandyamarut@gmail.com>, Patrick Rachford <prachford@icloud.com>, Dean Quinanola <dean.quinanola@runpod.io>
6
6
  License: MIT
@@ -8,43 +8,62 @@ Classifier: Development Status :: 3 - Alpha
8
8
  Classifier: Programming Language :: Python :: 3
9
9
  Classifier: License :: OSI Approved :: MIT License
10
10
  Classifier: Operating System :: OS Independent
11
- Requires-Python: <3.14,>=3.9
11
+ Requires-Python: <3.15,>=3.10
12
12
  Description-Content-Type: text/markdown
13
13
  Requires-Dist: cloudpickle>=3.1.1
14
- Requires-Dist: runpod~=1.7.9
14
+ Requires-Dist: runpod
15
15
  Requires-Dist: python-dotenv>=1.0.0
16
+ Requires-Dist: pydantic>=2.0.0
17
+ Requires-Dist: rich>=14.0.0
18
+ Requires-Dist: typer>=0.12.0
19
+ Requires-Dist: questionary>=2.0.0
20
+ Requires-Dist: pathspec>=0.11.0
21
+ Requires-Dist: tomli>=2.0.0; python_version < "3.11"
16
22
 
17
- # Tetra: Serverless computing for AI workloads
23
+ # Flash: Serverless computing for AI workloads
18
24
 
19
- Tetra is a Python SDK that streamlines the development and deployment of AI workflows on Runpod's [Serverless infrastructure](http://docs.runpod.io/serverless/overview). Write Python functions locally, and Tetra handles the infrastructure, provisioning GPUs and CPUs, managing dependencies, and transferring data, allowing you to focus on building AI applications.
25
+ Runpod Flash is a Python SDK that streamlines the development and deployment of AI workflows on Runpod's [Serverless infrastructure](http://docs.runpod.io/serverless/overview). Write Python functions locally, and Flash handles the infrastructure, provisioning GPUs and CPUs, managing dependencies, and transferring data, allowing you to focus on building AI applications.
20
26
 
21
- You can find a repository of prebuilt Tetra examples at [runpod/tetra-examples](https://github.com/runpod/tetra-examples).
27
+ You can find a repository of prebuilt Flash examples at [runpod/flash-examples](https://github.com/runpod/flash-examples).
22
28
 
23
29
  > [!Note]
24
30
  > **New feature - Consolidated template management:** `PodTemplate` overrides now seamlessly integrate with `ServerlessResource` defaults, providing more consistent resource configuration and reducing deployment complexity.
25
31
 
26
32
  ## Table of contents
27
33
 
28
- - [Requirements](#requirements)
29
- - [Getting started](#getting-started)
34
+ - [Overview](#overview)
35
+ - [Get started](#get-started)
36
+ - [Create Flash API endpoints](#create-flash-api-endpoints)
30
37
  - [Key concepts](#key-concepts)
31
38
  - [How it works](#how-it-works)
32
- - [Use cases](#use-cases)
33
39
  - [Advanced features](#advanced-features)
34
40
  - [Configuration](#configuration)
35
41
  - [Workflow examples](#workflow-examples)
42
+ - [Use cases](#use-cases)
43
+ - [Limitations](#limitations)
36
44
  - [Contributing](#contributing)
37
45
  - [Troubleshooting](#troubleshooting)
38
46
 
39
- ## Getting started
47
+ ## Overview
48
+
49
+ There are two basic modes for using Flash. You can:
50
+
51
+ - Build and run standalone Python scripts using the `@remote` decorator.
52
+ - Create Flash API endpoints with FastAPI (using the same script syntax).
53
+
54
+ Follow the steps in the next section to install Flash and create your first script before learning how to [create Flash API endpoints](#create-flash-api-endpoints).
40
55
 
41
- Before you can use Tetra, you'll need:
56
+ To learn more about how Flash works, see [Key concepts](#key-concepts).
57
+
58
+ ## Get started
59
+
60
+ Before you can use Flash, you'll need:
42
61
 
43
62
  - Python 3.9 (or higher) installed on your local machine.
44
63
  - A Runpod account with API key ([sign up here](https://runpod.io/console)).
45
64
  - Basic knowledge of Python and async programming.
46
65
 
47
- ### Step 1: Install Tetra
66
+ ### Step 1: Install Flash
48
67
 
49
68
  ```bash
50
69
  pip install tetra_rp
@@ -64,16 +83,20 @@ Or save it in a `.env` file in your project directory:
64
83
  echo "RUNPOD_API_KEY=[YOUR_API_KEY]" > .env
65
84
  ```
66
85
 
67
- ### Step 3: Write your first Tetra function
86
+ ### Step 3: Create your first Flash function
68
87
 
69
88
  Add the following code to a new Python file:
70
89
 
71
90
  ```python
72
91
  import asyncio
73
92
  from tetra_rp import remote, LiveServerless
93
+ from dotenv import load_dotenv
94
+
95
+ # Uncomment if using a .env file
96
+ # load_dotenv()
74
97
 
75
98
  # Configure GPU resources
76
- gpu_config = LiveServerless(name="tetra-quickstart")
99
+ gpu_config = LiveServerless(name="flash-quickstart")
77
100
 
78
101
  @remote(
79
102
  resource_config=gpu_config,
@@ -108,11 +131,169 @@ Run the example:
108
131
  python your_script.py
109
132
  ```
110
133
 
134
+ The first time you run the script, it will take significantly longer to process than successive runs (about one minute for first run vs. one second for future runs), as your endpoint must be initialized.
135
+
136
+ When it's finished, you should see output similar to this:
137
+
138
+ ```bash
139
+ 2025-11-19 12:35:15,109 | INFO | Created endpoint: rb50waqznmn2kg - flash-quickstart-fb
140
+ 2025-11-19 12:35:15,112 | INFO | URL: https://console.runpod.io/serverless/user/endpoint/rb50waqznmn2kg
141
+ 2025-11-19 12:35:15,114 | INFO | LiveServerless:rb50waqznmn2kg | API /run
142
+ 2025-11-19 12:35:15,655 | INFO | LiveServerless:rb50waqznmn2kg | Started Job:b0b341e7-e460-4305-9acd-fc2dfd1bd65c-u2
143
+ 2025-11-19 12:35:15,762 | INFO | Job:b0b341e7-e460-4305-9acd-fc2dfd1bd65c-u2 | Status: IN_QUEUE
144
+ 2025-11-19 12:35:16,301 | INFO | Job:b0b341e7-e460-4305-9acd-fc2dfd1bd65c-u2 | .
145
+ 2025-11-19 12:35:17,756 | INFO | Job:b0b341e7-e460-4305-9acd-fc2dfd1bd65c-u2 | ..
146
+ 2025-11-19 12:35:22,610 | INFO | Job:b0b341e7-e460-4305-9acd-fc2dfd1bd65c-u2 | ...
147
+ 2025-11-19 12:35:37,163 | INFO | Job:b0b341e7-e460-4305-9acd-fc2dfd1bd65c-u2 | ....
148
+ 2025-11-19 12:35:59,248 | INFO | Job:b0b341e7-e460-4305-9acd-fc2dfd1bd65c-u2 | .....
149
+ 2025-11-19 12:36:09,983 | INFO | Job:b0b341e7-e460-4305-9acd-fc2dfd1bd65c-u2 | Status: COMPLETED
150
+ 2025-11-19 12:36:10,068 | INFO | Worker:icmkdgnrmdf8gz | Delay Time: 51842 ms
151
+ 2025-11-19 12:36:10,068 | INFO | Worker:icmkdgnrmdf8gz | Execution Time: 1533 ms
152
+ 2025-11-19 17:36:07,485 | INFO | Installing Python dependencies: ['torch', 'numpy']
153
+ Sum: 15
154
+ Computed on: NVIDIA GeForce RTX 4090
155
+ ```
156
+
157
+ ## Create Flash API endpoints
158
+
159
+ > [!Note]
160
+ > **Flash API endpoints are currently only available for local testing:** Using `flash run` will start the API server on your local machine. Future updates will add the ability to build and deploy API servers for production deployments.
161
+
162
+ You can use Flash to deploy and serve API endpoints that compute responses using GPU and CPU Serverless workers. These endpoints will run scripts using the same Python remote decorators [demonstrated above](#get-started)
163
+
164
+ ### Step 1: Initialize a new project
165
+
166
+ Use the `flash init` command to generate a structured project template with a preconfigured FastAPI application entry point.
167
+
168
+ Run this command to initialize a new project directory:
169
+
170
+ ```bash
171
+ flash init my_project
172
+ ```
173
+
174
+ You can also initialize your current directory:
175
+ ```
176
+ flash init
177
+ ```
178
+
179
+ ### Step 2: Explore the project template
180
+
181
+ This is the structure of the project template created by `flash init`:
182
+
183
+ ```txt
184
+ my_project/
185
+ ├── main.py # FastAPI application entry point
186
+ ├── workers/
187
+ │ ├── gpu/ # GPU worker example
188
+ │ │ ├── __init__.py # FastAPI router
189
+ │ │ └── endpoint.py # GPU script @remote decorated function
190
+ │ └── cpu/ # CPU worker example
191
+ │ ├── __init__.py # FastAPI router
192
+ │ └── endpoint.py # CPU script with @remote decorated function
193
+ ├── .env # Environment variable template
194
+ ├── .gitignore # Git ignore patterns
195
+ ├── .flashignore # Flash deployment ignore patterns
196
+ ├── requirements.txt # Python dependencies
197
+ └── README.md # Project documentation
198
+ ```
199
+
200
+ This template includes:
201
+
202
+ - A FastAPI application entry point and routers.
203
+ - Templates for Python dependencies, `.env`, `.gitignore`, etc.
204
+ - Flash scripts (`endpoint.py`) for both GPU and CPU workers, which include:
205
+ - Pre-configured worker scaling limits using the `LiveServerless()` object.
206
+ - A `@remote` decorated function that returns a response from a worker.
207
+
208
+ When you start the FastAPI server, it creates API endpoints at `/gpu/hello` and `/cpu/hello`, which call the remote function described in their respective `endpoint.py` files.
209
+
210
+ ### Step 3: Install Python dependencies
211
+
212
+ After initializing the project, navigate into the project directory:
213
+
214
+ ```bash
215
+ cd my_project
216
+ ```
217
+
218
+ Install required dependencies:
219
+
220
+ ```bash
221
+ pip install -r requirements.txt
222
+ ```
223
+
224
+ ### Step 4: Configure your API key
225
+
226
+ Open the `.env` template file in a text editor and add your [Runpod API key](https://docs.runpod.io/get-started/api-keys):
227
+
228
+ ```bash
229
+ # Use your text editor of choice, e.g.
230
+ cursor .env
231
+ ```
232
+
233
+ Remove the `#` symbol from the beginning of the `RUNPOD_API_KEY` line and replace `your_api_key_here` with your actual Runpod API key:
234
+
235
+ ```txt
236
+ RUNPOD_API_KEY=your_api_key_here
237
+ # FLASH_HOST=localhost
238
+ # FLASH_PORT=8888
239
+ # LOG_LEVEL=INFO
240
+ ```
241
+
242
+ Save the file and close it.
243
+
244
+ ### Step 5: Start the local API server
245
+
246
+ Use `flash run` to start the API server:
247
+
248
+ ```bash
249
+ flash run
250
+ ```
251
+
252
+ Open a new terminal tab or window and test your GPU API using cURL:
253
+
254
+ ```bash
255
+ curl -X POST http://localhost:8888/gpu/hello \
256
+ -H "Content-Type: application/json" \
257
+ -d '{"message": "Hello from the GPU!"}'
258
+ ```
259
+
260
+ If you switch back to the terminal tab where you used `flash run`, you'll see the details of the job's progress.
261
+
262
+ ### Faster testing with auto-provisioning
263
+
264
+ For development with multiple endpoints, use `--auto-provision` to deploy all resources before testing:
265
+
266
+ ```bash
267
+ flash run --auto-provision
268
+ ```
269
+
270
+ This eliminates cold-start delays by provisioning all serverless endpoints upfront. Endpoints are cached and reused across server restarts, making subsequent runs much faster. Resources are identified by name, so the same endpoint won't be re-deployed if configuration hasn't changed.
271
+
272
+ ### Step 6: Open the API explorer
273
+
274
+ Besides starting the API server, `flash run` also starts an interactive API explorer. Point your web browser at [http://localhost:8888/docs](http://localhost:8888/docs) to explore the API.
275
+
276
+ To run remote functions in the explorer:
277
+
278
+ 1. Expand one of the functions under **GPU Workers** or **CPU Workers**.
279
+ 2. Click **Try it out** and then **Execute**
280
+
281
+ You'll get a response from your workers right in the explorer.
282
+
283
+ ### Step 7: Customize your API
284
+
285
+ To customize your API endpoint and functionality:
286
+
287
+ 1. Add/edit remote functions in your `endpoint.py` files.
288
+ 2. Test the scripts individually by running `python endpoint.py`.
289
+ 3. Configure your FastAPI routers by editing the `__init__.py` files.
290
+ 4. Add any new endpoints to your `main.py` file.
291
+
111
292
  ## Key concepts
112
293
 
113
294
  ### Remote functions
114
295
 
115
- Tetra's `@remote` decorator marks functions for execution on Runpod's infrastructure. Everything inside the decorated function runs remotely, while code outside runs locally.
296
+ The Flash `@remote` decorator marks functions for execution on Runpod's infrastructure. Everything inside the decorated function runs remotely, while code outside runs locally.
116
297
 
117
298
  ```python
118
299
  @remote(resource_config=config, dependencies=["pandas"])
@@ -129,7 +310,7 @@ async def main():
129
310
 
130
311
  ### Resource configuration
131
312
 
132
- Tetra provides fine-grained control over hardware allocation through configuration objects:
313
+ Flash provides fine-grained control over hardware allocation through configuration objects:
133
314
 
134
315
  ```python
135
316
  from tetra_rp import LiveServerless, GpuGroup, CpuInstanceType, PodTemplate
@@ -152,7 +333,7 @@ cpu_config = LiveServerless(
152
333
 
153
334
  ### Dependency management
154
335
 
155
- Specify Python packages in the decorator, and Tetra installs them automatically:
336
+ Specify Python packages in the decorator, and Flash installs them automatically:
156
337
 
157
338
  ```python
158
339
  @remote(
@@ -181,35 +362,58 @@ results = await asyncio.gather(
181
362
  )
182
363
  ```
183
364
 
365
+ ### Load-Balanced Endpoints with HTTP Routing
366
+
367
+ For API endpoints requiring low-latency HTTP access with direct routing, use load-balanced endpoints:
368
+
369
+ ```python
370
+ from tetra_rp import LiveLoadBalancer, remote
371
+
372
+ api = LiveLoadBalancer(name="api-service")
373
+
374
+ @remote(api, method="POST", path="/api/process")
375
+ async def process_data(x: int, y: int):
376
+ return {"result": x + y}
377
+
378
+ @remote(api, method="GET", path="/api/health")
379
+ def health_check():
380
+ return {"status": "ok"}
381
+
382
+ # Call functions directly
383
+ result = await process_data(5, 3) # → {"result": 8}
384
+ ```
385
+
386
+ **Key differences from queue-based endpoints:**
387
+ - **Direct HTTP routing** - Requests routed directly to workers, no queue
388
+ - **Lower latency** - No queuing overhead
389
+ - **Custom HTTP methods** - GET, POST, PUT, DELETE, PATCH support
390
+ - **No automatic retries** - Users handle errors directly
391
+
392
+ Load-balanced endpoints are ideal for REST APIs, webhooks, and real-time services. Queue-based endpoints are better for batch processing and fault-tolerant workflows.
393
+
394
+ For detailed information:
395
+ - **User guide:** [Using @remote with Load-Balanced Endpoints](docs/Using_Remote_With_LoadBalancer.md)
396
+ - **Runtime architecture:** [LoadBalancer Runtime Architecture](docs/LoadBalancer_Runtime_Architecture.md) - details on deployment, request flows, and execution
397
+
184
398
  ## How it works
185
399
 
186
- Tetra orchestrates workflow execution through a sophisticated multi-step process:
400
+ Flash orchestrates workflow execution through a sophisticated multi-step process:
187
401
 
188
- 1. **Function identification**: The `@remote` decorator marks functions for remote execution, enabling Tetra to distinguish between local and remote operations.
189
- 2. **Dependency analysis**: Tetra automatically analyzes function dependencies to construct an optimal execution order, ensuring data flows correctly between sequential and parallel operations.
190
- 3. **Resource provisioning and execution**: For each remote function, Tetra:
402
+ 1. **Function identification**: The `@remote` decorator marks functions for remote execution, enabling Flash to distinguish between local and remote operations.
403
+ 2. **Dependency analysis**: Flash automatically analyzes function dependencies to construct an optimal execution order, ensuring data flows correctly between sequential and parallel operations.
404
+ 3. **Resource provisioning and execution**: For each remote function, Flash:
191
405
  - Dynamically provisions endpoint and worker resources on Runpod's infrastructure.
192
406
  - Serializes and securely transfers input data to the remote worker.
193
407
  - Executes the function on the remote infrastructure with the specified GPU or CPU resources.
194
408
  - Returns results to your local environment for further processing.
195
409
  4. **Data orchestration**: Results flow seamlessly between functions according to your local Python code structure, maintaining the same programming model whether functions run locally or remotely.
196
410
 
197
- ## Use cases
198
-
199
- Tetra is well-suited for a diverse range of AI and data processing workloads:
200
-
201
- - **Multi-modal AI pipelines**: Orchestrate unified workflows combining text, image, and audio models with GPU acceleration.
202
- - **Distributed model training**: Scale training operations across multiple GPU workers for faster model development.
203
- - **AI research experimentation**: Rapidly prototype and test complex model combinations without infrastructure overhead.
204
- - **Production inference systems**: Deploy sophisticated multi-stage inference pipelines for real-world applications.
205
- - **Data processing workflows**: Efficiently process large datasets using CPU workers for general computation and GPU workers for accelerated tasks.
206
- - **Hybrid GPU/CPU workflows**: Optimize cost and performance by combining CPU preprocessing with GPU inference.
207
411
 
208
412
  ## Advanced features
209
413
 
210
414
  ### Custom Docker images
211
415
 
212
- `LiveServerless` resources use a fixed Docker image that's optimized for Tetra runtime, and supports full remote code execution. For specialized environments that require a custom Docker image, use `ServerlessEndpoint` or `CpuServerlessEndpoint`:
416
+ `LiveServerless` resources use a fixed Docker image that's optimized for Flash runtime, and supports full remote code execution. For specialized environments that require a custom Docker image, use `ServerlessEndpoint` or `CpuServerlessEndpoint`:
213
417
 
214
418
  ```python
215
419
  from tetra_rp import ServerlessEndpoint
@@ -223,9 +427,9 @@ custom_gpu = ServerlessEndpoint(
223
427
 
224
428
  Unlike `LiveServerless`, these endpoints only support dictionary payloads in the form of `{"input": {...}}` (similar to a traditional [Serverless endpoint request](https://docs.runpod.io/serverless/endpoints/send-requests)), and cannot execute arbitrary Python functions remotely.
225
429
 
226
- ### Persistent storage
430
+ ### Persistent storage with network volumes
227
431
 
228
- Attach network volumes for model caching:
432
+ Attach [network volumes](https://docs.runpod.io/storage/network-volumes) for persistent storage across workers and endpoints:
229
433
 
230
434
  ```python
231
435
  config = LiveServerless(
@@ -246,11 +450,134 @@ config = LiveServerless(
246
450
  )
247
451
  ```
248
452
 
453
+ Environment variables are excluded from configuration hashing, which means changing environment values won't trigger endpoint recreation. This allows different processes to load environment variables from `.env` files without causing false drift detection. Only structural changes (like GPU type, image, or template modifications) trigger endpoint updates.
454
+
455
+ ### Build Process and Handler Generation
456
+
457
+ Flash uses a sophisticated build process to package your application for deployment. Understanding how handlers are generated helps you debug issues and optimize your deployments.
458
+
459
+ #### How Flash Builds Your Application
460
+
461
+ When you run `flash build`, the following happens:
462
+
463
+ 1. **Discovery**: Flash scans your code for `@remote` decorated functions
464
+ 2. **Grouping**: Functions are grouped by their `resource_config`
465
+ 3. **Handler Generation**: For each resource config, Flash generates a lightweight handler file
466
+ 4. **Manifest Creation**: A `flash_manifest.json` file maps functions to their endpoints
467
+ 5. **Dependency Installation**: Python packages are installed with Linux x86_64 compatibility
468
+ 6. **Packaging**: Everything is bundled into `archive.tar.gz` for deployment
469
+
470
+ #### Cross-Platform Builds
471
+
472
+ Flash automatically handles cross-platform builds, ensuring your deployments work correctly regardless of your development platform:
473
+
474
+ - **Automatic Platform Targeting**: Dependencies are installed for Linux x86_64 (RunPod's serverless platform), even when building on macOS or Windows
475
+ - **Python Version Matching**: The build uses your current Python version to ensure package compatibility
476
+ - **Binary Wheel Enforcement**: Only pre-built binary wheels are used, preventing platform-specific compilation issues
477
+
478
+ This means you can build on macOS ARM64, Windows, or any other platform, and the resulting package will run correctly on RunPod serverless.
479
+
480
+ #### Handler Architecture
481
+
482
+ Flash uses a factory pattern for handlers to eliminate code duplication:
483
+
484
+ ```python
485
+ # Generated handler (handler_gpu_config.py)
486
+ from tetra_rp.runtime.generic_handler import create_handler
487
+ from workers.gpu import process_data
488
+
489
+ FUNCTION_REGISTRY = {
490
+ "process_data": process_data,
491
+ }
492
+
493
+ handler = create_handler(FUNCTION_REGISTRY)
494
+ ```
495
+
496
+ This approach provides:
497
+ - **Single source of truth**: All handler logic in one place
498
+ - **Easier maintenance**: Bug fixes don't require rebuilding projects
499
+
500
+ #### Cross-Endpoint Function Calls
501
+
502
+ Flash enables functions on different endpoints to call each other. The runtime automatically discovers endpoints using the manifest and routes calls appropriately:
503
+
504
+ ```python
505
+ # CPU endpoint function
506
+ @remote(resource_config=cpu_config)
507
+ def preprocess(data):
508
+ return clean_data
509
+
510
+ # GPU endpoint function
511
+ @remote(resource_config=gpu_config)
512
+ async def inference(data):
513
+ # Can call CPU endpoint function
514
+ clean = preprocess(data)
515
+ return result
516
+ ```
517
+
518
+ The runtime wrapper handles service discovery and routing automatically.
519
+
520
+ #### Build Artifacts
521
+
522
+ After `flash build` completes:
523
+ - `.flash/.build/`: Temporary build directory (removed unless `--keep-build`)
524
+ - `.flash/archive.tar.gz`: Deployment package
525
+ - `.flash/flash_manifest.json`: Service discovery configuration
526
+
527
+ For more details on the handler architecture, see [docs/Runtime_Generic_Handler.md](docs/Runtime_Generic_Handler.md).
528
+
529
+ For information on load-balanced endpoints (required for Mothership and HTTP services), see [docs/Load_Balancer_Endpoints.md](docs/Load_Balancer_Endpoints.md).
530
+
531
+ #### Troubleshooting Build Issues
532
+
533
+ **No @remote functions found:**
534
+ - Ensure your functions are decorated with `@remote(resource_config)`
535
+ - Check that Python files are not excluded by `.gitignore` or `.flashignore`
536
+ - Verify function decorators have valid syntax
537
+
538
+ **Handler generation failed:**
539
+ - Check for syntax errors in your Python files (these will be logged)
540
+ - Verify all imports in your worker modules are available
541
+ - Ensure resource config variables (e.g., `gpu_config`) are defined before functions reference them
542
+ - Use `--keep-build` to inspect generated handler files in `.flash/.build/`
543
+
544
+ **Build succeeded but deployment failed:**
545
+ - Verify all function imports work in the deployment environment
546
+ - Check that environment variables required by your functions are available
547
+ - Review the generated `flash_manifest.json` for correct function mappings
548
+
549
+ **Dependency installation failed:**
550
+ - If a package doesn't have pre-built Linux x86_64 wheels, the build will fail with an error
551
+ - For newer Python versions (3.13+), some packages may require manylinux_2_27 or higher
552
+ - Ensure you have standard pip installed (`python -m ensurepip --upgrade`) for best compatibility
553
+ - uv pip has known issues with newer manylinux tags - standard pip is recommended
554
+ - Check PyPI to verify the package supports your Python version on Linux
555
+
556
+ #### Managing Bundle Size
557
+
558
+ RunPod serverless has a **500MB deployment limit**. Exceeding this limit will cause deployment failures.
559
+
560
+ Use `--exclude` to skip packages already in your worker-tetra Docker image:
561
+
562
+ ```bash
563
+ # For GPU deployments (PyTorch pre-installed)
564
+ flash build --exclude torch,torchvision,torchaudio
565
+
566
+ # Check your resource config to determine which base image you're using
567
+ ```
568
+
569
+ **Which packages to exclude depends on your resource config:**
570
+ - **GPU resources** → PyTorch images have torch/torchvision/torchaudio pre-installed
571
+ - **CPU resources** → Python slim images have NO ML frameworks pre-installed
572
+ - **Load-balanced** → Same as above, depends on GPU vs CPU variant
573
+
574
+ See [worker-tetra](https://github.com/runpod-workers/worker-tetra) for base image details.
575
+
249
576
  ## Configuration
250
577
 
251
578
  ### GPU configuration parameters
252
579
 
253
- The following parameters can be used with `LiveServerless` (full remote code execution) and `ServerlessEndpoint` (Dictionary payload only) to configure your Runpod GPU endpoints:
580
+ The following parameters can be used with `LiveServerless` (full remote code execution) and `ServerlessEndpoint` (dictionary payload only) to configure your Runpod GPU endpoints:
254
581
 
255
582
  | Parameter | Description | Default | Example Values |
256
583
  |--------------------|-------------------------------------------------|---------------|-------------------------------------|
@@ -281,7 +608,7 @@ The same GPU configuration parameters above apply to `LiveServerless` (full remo
281
608
 
282
609
  | Feature | LiveServerless | ServerlessEndpoint | CpuServerlessEndpoint |
283
610
  |---------|----------------|-------------------|----------------------|
284
- | **Remote code execution** | ✅ Full Python function execution | ❌ Dictionary payloads only | ❌ Dictionary payloads only |
611
+ | **Remote code execution** | ✅ Full Python function execution | ❌ Dictionary payload only | ❌ Dictionary payload only |
285
612
  | **Custom Docker images** | ❌ Fixed optimized images | ✅ Any Docker image | ✅ Any Docker image |
286
613
  | **Use case** | Dynamic remote functions | Traditional API endpoints | Traditional CPU endpoints |
287
614
  | **Function returns** | Any Python object | Dictionary only | Dictionary only |
@@ -299,6 +626,7 @@ Some common GPU groups available through `GpuGroup`:
299
626
 
300
627
 
301
628
  ### Available CPU instance types
629
+
302
630
  - `CpuInstanceType.CPU3G_1_4` - (cpu3g-1-4) 3rd gen general purpose, 1 vCPU, 4GB RAM
303
631
  - `CpuInstanceType.CPU3G_2_8` - (cpu3g-2-8) 3rd gen general purpose, 2 vCPU, 8GB RAM
304
632
  - `CpuInstanceType.CPU3G_4_16` - (cpu3g-4-16) 3rd gen general purpose, 4 vCPU, 16GB RAM
@@ -358,6 +686,7 @@ if __name__ == "__main__":
358
686
  ```python
359
687
  import asyncio
360
688
  from tetra_rp import remote, LiveServerless, GpuGroup, PodTemplate
689
+ import base64
361
690
 
362
691
  # Advanced GPU configuration with consolidated template overrides
363
692
  sd_config = LiveServerless(
@@ -735,39 +1064,48 @@ async def text_classification_pipeline(train_texts, train_labels, test_texts):
735
1064
 
736
1065
  ### More examples
737
1066
 
738
- You can find many more examples in the [tetra-examples repository](https://github.com/runpod/tetra-examples).
1067
+ You can find many more examples in the [flash-examples repository](https://github.com/runpod/flash-examples).
739
1068
 
740
- You can also install the examples as a submodule:
1069
+ ## Use cases
741
1070
 
742
- ```bash
743
- git clone https://github.com/runpod/tetra-examples.git
744
- cd tetra-examples
745
- python -m examples.example
746
- python -m examples.image_gen
747
- python -m examples.matrix_operations
748
- ```
1071
+ Flash is well-suited for a diverse range of AI and data processing workloads:
1072
+
1073
+ - **Multi-modal AI pipelines**: Orchestrate unified workflows combining text, image, and audio models with GPU acceleration.
1074
+ - **Distributed model training**: Scale training operations across multiple GPU workers for faster model development.
1075
+ - **AI research experimentation**: Rapidly prototype and test complex model combinations without infrastructure overhead.
1076
+ - **Production inference systems**: Deploy sophisticated multi-stage inference pipelines for real-world applications.
1077
+ - **Data processing workflows**: Efficiently process large datasets using CPU workers for general computation and GPU workers for accelerated tasks.
1078
+ - **Hybrid GPU/CPU workflows**: Optimize cost and performance by combining CPU preprocessing with GPU inference.
1079
+
1080
+ ## Limitations
1081
+
1082
+ - Serverless deployments using Flash are currently restricted to the `EU-RO-1` datacenter.
1083
+ - Flash is designed primarily for local development and live-testing workflows.
1084
+ - While Flash supports provisioning traditional Serverless endpoints (non-Live endpoints), the interface for interacting with these resources will change in upcoming releases. For now, focus on using `LiveServerless` for the most stable development experience, as it provides full remote code execution without requiring custom Docker images.
1085
+ - As you work through the Flash examples repository, you'll accumulate multiple endpoints in your Runpod account. These endpoints persist until manually deleted through the Runpod console. A `flash undeploy` command is in development to streamline cleanup, but for now, regular manual deletion of unused endpoints is recommended to avoid unnecessary charges.
1086
+ - Finally, be aware of your account's maximum worker capacity limits. Flash can rapidly scale workers across multiple endpoints, and you may hit capacity constraints faster than with traditional deployment patterns. If you find yourself consistently reaching worker limits, contact Runpod support to increase your account's capacity allocation.
749
1087
 
750
1088
  ## Contributing
751
1089
 
752
- We welcome contributions to Tetra! Whether you're fixing bugs, adding features, or improving documentation, your help makes this project better.
1090
+ We welcome contributions to Flash! Whether you're fixing bugs, adding features, or improving documentation, your help makes this project better.
753
1091
 
754
- ### Development Setup
1092
+ ### Development setup
755
1093
 
756
- 1. Fork and clone the repository
757
- 2. Set up your development environment following the project guidelines
758
- 3. Make your changes following our coding standards
759
- 4. Test your changes thoroughly
760
- 5. Submit a pull request
1094
+ 1. Fork and clone the repository.
1095
+ 2. Set up your development environment following the project guidelines.
1096
+ 3. Make your changes following our coding standards.
1097
+ 4. Test your changes thoroughly.
1098
+ 5. Submit a pull request.
761
1099
 
762
- ### Release Process
1100
+ ### Release process
763
1101
 
764
1102
  This project uses an automated release system built on Release Please. For detailed information about how releases work, including conventional commits, versioning, and the CI/CD pipeline, see our [Release System Documentation](RELEASE_SYSTEM.md).
765
1103
 
766
1104
  **Quick reference for contributors:**
767
1105
  - Use conventional commits: `feat:`, `fix:`, `docs:`, etc.
768
- - CI automatically runs quality checks on all PRs
769
- - Release PRs are created automatically when changes are merged to main
770
- - Releases are published to PyPI automatically when release PRs are merged
1106
+ - CI automatically runs quality checks on all PRs.
1107
+ - Release PRs are created automatically when changes are merged to main.
1108
+ - Releases are published to PyPI automatically when release PRs are merged.
771
1109
 
772
1110
  ## Troubleshooting
773
1111
 
@@ -792,15 +1130,15 @@ def fetch_data(url):
792
1130
 
793
1131
  ### Performance optimization
794
1132
 
795
- - Set `workersMin=1` to keep workers warm and avoid cold starts
796
- - Use `idleTimeout` to balance cost and responsiveness
797
- - Choose appropriate GPU types for your workload
1133
+ - Set `workersMin=1` to keep workers warm and avoid cold starts.
1134
+ - Use `idleTimeout` to balance cost and responsiveness.
1135
+ - Choose appropriate GPU types for your workload.
798
1136
 
799
1137
  ## License
800
1138
 
801
1139
  This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
802
1140
 
803
1141
  <p align="center">
804
- <a href="https://github.com/yourusername/tetra">Tetra</a> •
1142
+ <a href="https://github.com/runpod/tetra-rp">Flash</a> •
805
1143
  <a href="https://runpod.io">Runpod</a>
806
1144
  </p>