@aws/ml-container-creator 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +202 -0
- package/LICENSE-THIRD-PARTY +68620 -0
- package/NOTICE +2 -0
- package/README.md +106 -0
- package/bin/cli.js +365 -0
- package/config/defaults.json +32 -0
- package/config/presets/transformers-djl.json +26 -0
- package/config/presets/transformers-gpu.json +24 -0
- package/config/presets/transformers-lmi.json +27 -0
- package/package.json +129 -0
- package/servers/README.md +419 -0
- package/servers/base-image-picker/catalogs/model-servers.json +1191 -0
- package/servers/base-image-picker/catalogs/python-slim.json +38 -0
- package/servers/base-image-picker/catalogs/triton-backends.json +51 -0
- package/servers/base-image-picker/catalogs/triton.json +38 -0
- package/servers/base-image-picker/index.js +495 -0
- package/servers/base-image-picker/manifest.json +17 -0
- package/servers/base-image-picker/package.json +15 -0
- package/servers/hyperpod-cluster-picker/LICENSE +202 -0
- package/servers/hyperpod-cluster-picker/index.js +424 -0
- package/servers/hyperpod-cluster-picker/manifest.json +14 -0
- package/servers/hyperpod-cluster-picker/package.json +17 -0
- package/servers/instance-recommender/LICENSE +202 -0
- package/servers/instance-recommender/catalogs/instances.json +852 -0
- package/servers/instance-recommender/index.js +284 -0
- package/servers/instance-recommender/manifest.json +16 -0
- package/servers/instance-recommender/package.json +15 -0
- package/servers/lib/LICENSE +202 -0
- package/servers/lib/bedrock-client.js +160 -0
- package/servers/lib/custom-validators.js +46 -0
- package/servers/lib/dynamic-resolver.js +36 -0
- package/servers/lib/package.json +11 -0
- package/servers/lib/schemas/image-catalog.schema.json +185 -0
- package/servers/lib/schemas/instances.schema.json +124 -0
- package/servers/lib/schemas/manifest.schema.json +64 -0
- package/servers/lib/schemas/model-catalog.schema.json +91 -0
- package/servers/lib/schemas/regions.schema.json +26 -0
- package/servers/lib/schemas/triton-backends.schema.json +51 -0
- package/servers/model-picker/catalogs/jumpstart-public.json +66 -0
- package/servers/model-picker/catalogs/popular-diffusors.json +88 -0
- package/servers/model-picker/catalogs/popular-transformers.json +226 -0
- package/servers/model-picker/index.js +1693 -0
- package/servers/model-picker/manifest.json +18 -0
- package/servers/model-picker/package.json +20 -0
- package/servers/region-picker/LICENSE +202 -0
- package/servers/region-picker/catalogs/regions.json +263 -0
- package/servers/region-picker/index.js +230 -0
- package/servers/region-picker/manifest.json +16 -0
- package/servers/region-picker/package.json +15 -0
- package/src/app.js +1007 -0
- package/src/copy-tpl.js +77 -0
- package/src/lib/accelerator-validator.js +39 -0
- package/src/lib/asset-manager.js +385 -0
- package/src/lib/aws-profile-parser.js +181 -0
- package/src/lib/bootstrap-command-handler.js +1647 -0
- package/src/lib/bootstrap-config.js +238 -0
- package/src/lib/ci-register-helpers.js +124 -0
- package/src/lib/ci-report-helpers.js +158 -0
- package/src/lib/ci-stage-helpers.js +268 -0
- package/src/lib/cli-handler.js +529 -0
- package/src/lib/comment-generator.js +544 -0
- package/src/lib/community-reports-validator.js +91 -0
- package/src/lib/config-manager.js +2106 -0
- package/src/lib/configuration-exporter.js +204 -0
- package/src/lib/configuration-manager.js +695 -0
- package/src/lib/configuration-matcher.js +221 -0
- package/src/lib/cpu-validator.js +36 -0
- package/src/lib/cuda-validator.js +57 -0
- package/src/lib/deployment-config-resolver.js +103 -0
- package/src/lib/deployment-entry-schema.js +125 -0
- package/src/lib/deployment-registry.js +598 -0
- package/src/lib/docker-introspection-validator.js +51 -0
- package/src/lib/engine-prefix-resolver.js +60 -0
- package/src/lib/huggingface-client.js +172 -0
- package/src/lib/key-value-parser.js +37 -0
- package/src/lib/known-flags-validator.js +200 -0
- package/src/lib/manifest-cli.js +280 -0
- package/src/lib/mcp-client.js +303 -0
- package/src/lib/mcp-command-handler.js +532 -0
- package/src/lib/neuron-validator.js +80 -0
- package/src/lib/parameter-schema-validator.js +284 -0
- package/src/lib/prompt-runner.js +1349 -0
- package/src/lib/prompts.js +1138 -0
- package/src/lib/registry-command-handler.js +519 -0
- package/src/lib/registry-loader.js +198 -0
- package/src/lib/rocm-validator.js +80 -0
- package/src/lib/schema-validator.js +157 -0
- package/src/lib/sensitive-redactor.js +59 -0
- package/src/lib/template-engine.js +156 -0
- package/src/lib/template-manager.js +341 -0
- package/src/lib/validation-engine.js +314 -0
- package/src/prompt-adapter.js +63 -0
- package/templates/Dockerfile +300 -0
- package/templates/IAM_PERMISSIONS.md +84 -0
- package/templates/MIGRATION.md +488 -0
- package/templates/PROJECT_README.md +439 -0
- package/templates/TEMPLATE_SYSTEM.md +243 -0
- package/templates/buildspec.yml +64 -0
- package/templates/code/chat_template.jinja +1 -0
- package/templates/code/flask/gunicorn_config.py +35 -0
- package/templates/code/flask/wsgi.py +10 -0
- package/templates/code/model_handler.py +387 -0
- package/templates/code/serve +300 -0
- package/templates/code/serve.py +175 -0
- package/templates/code/serving.properties +105 -0
- package/templates/code/start_server.py +39 -0
- package/templates/code/start_server.sh +39 -0
- package/templates/diffusors/Dockerfile +72 -0
- package/templates/diffusors/patch_image_api.py +35 -0
- package/templates/diffusors/serve +115 -0
- package/templates/diffusors/start_server.sh +114 -0
- package/templates/do/.gitkeep +1 -0
- package/templates/do/README.md +541 -0
- package/templates/do/build +83 -0
- package/templates/do/ci +681 -0
- package/templates/do/clean +811 -0
- package/templates/do/config +260 -0
- package/templates/do/deploy +1560 -0
- package/templates/do/export +306 -0
- package/templates/do/logs +319 -0
- package/templates/do/manifest +12 -0
- package/templates/do/push +119 -0
- package/templates/do/register +580 -0
- package/templates/do/run +113 -0
- package/templates/do/submit +417 -0
- package/templates/do/test +1147 -0
- package/templates/hyperpod/configmap.yaml +24 -0
- package/templates/hyperpod/deployment.yaml +71 -0
- package/templates/hyperpod/pvc.yaml +42 -0
- package/templates/hyperpod/service.yaml +17 -0
- package/templates/nginx-diffusors.conf +74 -0
- package/templates/nginx-predictors.conf +47 -0
- package/templates/nginx-tensorrt.conf +74 -0
- package/templates/requirements.txt +61 -0
- package/templates/sample_model/test_inference.py +123 -0
- package/templates/sample_model/train_abalone.py +252 -0
- package/templates/test/test_endpoint.sh +79 -0
- package/templates/test/test_local_image.sh +80 -0
- package/templates/test/test_model_handler.py +180 -0
- package/templates/triton/Dockerfile +128 -0
- package/templates/triton/config.pbtxt +163 -0
- package/templates/triton/model.py +130 -0
- package/templates/triton/requirements.txt +11 -0
|
@@ -0,0 +1,1191 @@
|
|
|
1
|
+
{
|
|
2
|
+
"vllm": [
|
|
3
|
+
{
|
|
4
|
+
"image": "vllm/vllm-openai:v0.10.1",
|
|
5
|
+
"tag": "v0.10.1",
|
|
6
|
+
"architecture": "amd64",
|
|
7
|
+
"created": "2025-01-15T00:00:00Z",
|
|
8
|
+
"labels": {
|
|
9
|
+
"cuda_version": "12.4",
|
|
10
|
+
"python_version": "3.12",
|
|
11
|
+
"framework_version": "0.10.1"
|
|
12
|
+
},
|
|
13
|
+
"registry": "dockerhub",
|
|
14
|
+
"repository": "vllm/vllm-openai",
|
|
15
|
+
"defaults": {
|
|
16
|
+
"envVars": {
|
|
17
|
+
"VLLM_TENSOR_PARALLEL_SIZE": "1",
|
|
18
|
+
"VLLM_GPU_MEMORY_UTILIZATION": "0.9",
|
|
19
|
+
"VLLM_MAX_NUM_SEQS": "256",
|
|
20
|
+
"VLLM_MAX_MODEL_LEN": "4096",
|
|
21
|
+
"VLLM_ENABLE_PREFIX_CACHING": "true"
|
|
22
|
+
},
|
|
23
|
+
"inferenceAmiVersion": "al2-ami-sagemaker-inference-gpu-3-1",
|
|
24
|
+
"recommendedInstanceTypes": [
|
|
25
|
+
"ml.g5.xlarge",
|
|
26
|
+
"ml.g5.2xlarge",
|
|
27
|
+
"ml.g5.4xlarge",
|
|
28
|
+
"ml.g5.12xlarge"
|
|
29
|
+
]
|
|
30
|
+
},
|
|
31
|
+
"accelerator": {
|
|
32
|
+
"type": "cuda",
|
|
33
|
+
"version": "12.1",
|
|
34
|
+
"versionRange": {
|
|
35
|
+
"min": "12.0",
|
|
36
|
+
"max": "12.3"
|
|
37
|
+
}
|
|
38
|
+
},
|
|
39
|
+
"validationLevel": "tested",
|
|
40
|
+
"profiles": {
|
|
41
|
+
"low-latency": {
|
|
42
|
+
"displayName": "Low Latency",
|
|
43
|
+
"description": "Optimized for single-request latency with prefix caching",
|
|
44
|
+
"envVars": {
|
|
45
|
+
"VLLM_MAX_NUM_SEQS": "32",
|
|
46
|
+
"VLLM_GPU_MEMORY_UTILIZATION": "0.85",
|
|
47
|
+
"VLLM_ENABLE_PREFIX_CACHING": "true"
|
|
48
|
+
},
|
|
49
|
+
"recommendedInstanceTypes": [
|
|
50
|
+
"ml.g5.xlarge"
|
|
51
|
+
],
|
|
52
|
+
"notes": "Prefix caching improves latency for repeated prompts"
|
|
53
|
+
},
|
|
54
|
+
"high-throughput": {
|
|
55
|
+
"displayName": "High Throughput",
|
|
56
|
+
"description": "Optimized for batch processing with continuous batching",
|
|
57
|
+
"envVars": {
|
|
58
|
+
"VLLM_MAX_NUM_SEQS": "512",
|
|
59
|
+
"VLLM_GPU_MEMORY_UTILIZATION": "0.95",
|
|
60
|
+
"VLLM_MAX_MODEL_LEN": "2048",
|
|
61
|
+
"VLLM_ENABLE_PREFIX_CACHING": "false"
|
|
62
|
+
},
|
|
63
|
+
"recommendedInstanceTypes": [
|
|
64
|
+
"ml.g5.4xlarge",
|
|
65
|
+
"ml.g5.12xlarge"
|
|
66
|
+
],
|
|
67
|
+
"notes": "Continuous batching maximizes GPU utilization"
|
|
68
|
+
},
|
|
69
|
+
"multi-gpu": {
|
|
70
|
+
"displayName": "Multi-GPU",
|
|
71
|
+
"description": "Tensor parallel across multiple GPUs for large models",
|
|
72
|
+
"envVars": {
|
|
73
|
+
"VLLM_TENSOR_PARALLEL_SIZE": "4",
|
|
74
|
+
"VLLM_GPU_MEMORY_UTILIZATION": "0.9",
|
|
75
|
+
"VLLM_MAX_NUM_SEQS": "256"
|
|
76
|
+
},
|
|
77
|
+
"recommendedInstanceTypes": [
|
|
78
|
+
"ml.g5.12xlarge",
|
|
79
|
+
"ml.g5.48xlarge"
|
|
80
|
+
],
|
|
81
|
+
"notes": "Requires instance with 4+ GPUs. Set TENSOR_PARALLEL_SIZE to match GPU count"
|
|
82
|
+
}
|
|
83
|
+
},
|
|
84
|
+
"notes": "vLLM 0.4.0 adds prefix caching and improved performance. Requires CUDA 12.0+"
|
|
85
|
+
},
|
|
86
|
+
{
|
|
87
|
+
"image": "vllm/vllm-openai:v0.9.1",
|
|
88
|
+
"tag": "v0.9.1",
|
|
89
|
+
"architecture": "amd64",
|
|
90
|
+
"created": "2024-12-10T00:00:00Z",
|
|
91
|
+
"labels": {
|
|
92
|
+
"cuda_version": "12.1",
|
|
93
|
+
"python_version": "3.12",
|
|
94
|
+
"framework_version": "0.9.1"
|
|
95
|
+
},
|
|
96
|
+
"registry": "dockerhub",
|
|
97
|
+
"repository": "vllm/vllm-openai",
|
|
98
|
+
"defaults": {
|
|
99
|
+
"envVars": {
|
|
100
|
+
"VLLM_TENSOR_PARALLEL_SIZE": "1",
|
|
101
|
+
"VLLM_GPU_MEMORY_UTILIZATION": "0.9",
|
|
102
|
+
"VLLM_MAX_NUM_SEQS": "256",
|
|
103
|
+
"VLLM_MAX_MODEL_LEN": "4096",
|
|
104
|
+
"VLLM_ENABLE_PREFIX_CACHING": "true"
|
|
105
|
+
},
|
|
106
|
+
"inferenceAmiVersion": "al2-ami-sagemaker-inference-gpu-3-1",
|
|
107
|
+
"recommendedInstanceTypes": [
|
|
108
|
+
"ml.g5.xlarge",
|
|
109
|
+
"ml.g5.2xlarge",
|
|
110
|
+
"ml.g5.4xlarge",
|
|
111
|
+
"ml.g5.12xlarge"
|
|
112
|
+
]
|
|
113
|
+
},
|
|
114
|
+
"accelerator": {
|
|
115
|
+
"type": "cuda",
|
|
116
|
+
"version": "12.1",
|
|
117
|
+
"versionRange": {
|
|
118
|
+
"min": "12.0",
|
|
119
|
+
"max": "12.3"
|
|
120
|
+
}
|
|
121
|
+
},
|
|
122
|
+
"validationLevel": "tested",
|
|
123
|
+
"profiles": {
|
|
124
|
+
"low-latency": {
|
|
125
|
+
"displayName": "Low Latency",
|
|
126
|
+
"description": "Optimized for single-request latency with prefix caching",
|
|
127
|
+
"envVars": {
|
|
128
|
+
"VLLM_MAX_NUM_SEQS": "32",
|
|
129
|
+
"VLLM_GPU_MEMORY_UTILIZATION": "0.85",
|
|
130
|
+
"VLLM_ENABLE_PREFIX_CACHING": "true"
|
|
131
|
+
},
|
|
132
|
+
"recommendedInstanceTypes": [
|
|
133
|
+
"ml.g5.xlarge"
|
|
134
|
+
],
|
|
135
|
+
"notes": "Prefix caching improves latency for repeated prompts"
|
|
136
|
+
},
|
|
137
|
+
"high-throughput": {
|
|
138
|
+
"displayName": "High Throughput",
|
|
139
|
+
"description": "Optimized for batch processing with continuous batching",
|
|
140
|
+
"envVars": {
|
|
141
|
+
"VLLM_MAX_NUM_SEQS": "512",
|
|
142
|
+
"VLLM_GPU_MEMORY_UTILIZATION": "0.95",
|
|
143
|
+
"VLLM_MAX_MODEL_LEN": "2048",
|
|
144
|
+
"VLLM_ENABLE_PREFIX_CACHING": "false"
|
|
145
|
+
},
|
|
146
|
+
"recommendedInstanceTypes": [
|
|
147
|
+
"ml.g5.4xlarge",
|
|
148
|
+
"ml.g5.12xlarge"
|
|
149
|
+
],
|
|
150
|
+
"notes": "Continuous batching maximizes GPU utilization"
|
|
151
|
+
},
|
|
152
|
+
"multi-gpu": {
|
|
153
|
+
"displayName": "Multi-GPU",
|
|
154
|
+
"description": "Tensor parallel across multiple GPUs for large models",
|
|
155
|
+
"envVars": {
|
|
156
|
+
"VLLM_TENSOR_PARALLEL_SIZE": "4",
|
|
157
|
+
"VLLM_GPU_MEMORY_UTILIZATION": "0.9",
|
|
158
|
+
"VLLM_MAX_NUM_SEQS": "256"
|
|
159
|
+
},
|
|
160
|
+
"recommendedInstanceTypes": [
|
|
161
|
+
"ml.g5.12xlarge",
|
|
162
|
+
"ml.g5.48xlarge"
|
|
163
|
+
],
|
|
164
|
+
"notes": "Requires instance with 4+ GPUs. Set TENSOR_PARALLEL_SIZE to match GPU count"
|
|
165
|
+
}
|
|
166
|
+
},
|
|
167
|
+
"notes": "vLLM 0.4.0 adds prefix caching and improved performance. Requires CUDA 12.0+"
|
|
168
|
+
}
|
|
169
|
+
],
|
|
170
|
+
"sglang": [
|
|
171
|
+
{
|
|
172
|
+
"image": "lmsysorg/sglang:v0.5.4.post1-cu121",
|
|
173
|
+
"tag": "v0.5.4.post1-cu121",
|
|
174
|
+
"architecture": "amd64",
|
|
175
|
+
"created": "2025-01-20T00:00:00Z",
|
|
176
|
+
"labels": {
|
|
177
|
+
"cuda_version": "12.1",
|
|
178
|
+
"python_version": "3.10",
|
|
179
|
+
"framework_version": "0.5.4"
|
|
180
|
+
},
|
|
181
|
+
"registry": "dockerhub",
|
|
182
|
+
"repository": "lmsysorg/sglang",
|
|
183
|
+
"defaults": {
|
|
184
|
+
"envVars": {
|
|
185
|
+
"SGLANG_TENSOR_PARALLEL_SIZE": "1",
|
|
186
|
+
"SGLANG_MEM_FRACTION": "0.9",
|
|
187
|
+
"SGLANG_MAX_RUNNING_REQUESTS": "256",
|
|
188
|
+
"SGLANG_CONTEXT_LENGTH": "4096"
|
|
189
|
+
},
|
|
190
|
+
"inferenceAmiVersion": "al2-ami-sagemaker-inference-gpu-3-1",
|
|
191
|
+
"recommendedInstanceTypes": [
|
|
192
|
+
"ml.g5.xlarge",
|
|
193
|
+
"ml.g5.2xlarge",
|
|
194
|
+
"ml.g5.4xlarge"
|
|
195
|
+
]
|
|
196
|
+
},
|
|
197
|
+
"accelerator": {
|
|
198
|
+
"type": "cuda",
|
|
199
|
+
"version": "12.1",
|
|
200
|
+
"versionRange": {
|
|
201
|
+
"min": "11.8",
|
|
202
|
+
"max": "12.2"
|
|
203
|
+
}
|
|
204
|
+
},
|
|
205
|
+
"validationLevel": "experimental",
|
|
206
|
+
"profiles": {
|
|
207
|
+
"default": {
|
|
208
|
+
"displayName": "Default Configuration",
|
|
209
|
+
"description": "Balanced configuration for general use",
|
|
210
|
+
"envVars": {
|
|
211
|
+
"SGLANG_MAX_RUNNING_REQUESTS": "256",
|
|
212
|
+
"SGLANG_MEM_FRACTION": "0.9"
|
|
213
|
+
},
|
|
214
|
+
"recommendedInstanceTypes": [
|
|
215
|
+
"ml.g5.xlarge",
|
|
216
|
+
"ml.g5.2xlarge"
|
|
217
|
+
],
|
|
218
|
+
"notes": "Good starting point for most workloads"
|
|
219
|
+
},
|
|
220
|
+
"high-throughput": {
|
|
221
|
+
"displayName": "High Throughput",
|
|
222
|
+
"description": "Optimized for maximum throughput with RadixAttention",
|
|
223
|
+
"envVars": {
|
|
224
|
+
"SGLANG_MAX_RUNNING_REQUESTS": "512",
|
|
225
|
+
"SGLANG_MEM_FRACTION": "0.95",
|
|
226
|
+
"SGLANG_CONTEXT_LENGTH": "2048",
|
|
227
|
+
"SGLANG_ENABLE_RADIX_CACHE": "true"
|
|
228
|
+
},
|
|
229
|
+
"recommendedInstanceTypes": [
|
|
230
|
+
"ml.g5.4xlarge",
|
|
231
|
+
"ml.g5.12xlarge"
|
|
232
|
+
],
|
|
233
|
+
"notes": "RadixAttention provides automatic KV cache reuse for improved throughput"
|
|
234
|
+
}
|
|
235
|
+
},
|
|
236
|
+
"notes": "SGLang 0.2.0 features RadixAttention for automatic KV cache reuse. Experimental support"
|
|
237
|
+
},
|
|
238
|
+
{
|
|
239
|
+
"image": "lmsysorg/sglang:v0.4.6-cu121",
|
|
240
|
+
"tag": "v0.4.6-cu121",
|
|
241
|
+
"architecture": "amd64",
|
|
242
|
+
"created": "2024-11-15T00:00:00Z",
|
|
243
|
+
"labels": {
|
|
244
|
+
"cuda_version": "12.1",
|
|
245
|
+
"python_version": "3.10",
|
|
246
|
+
"framework_version": "0.4.6"
|
|
247
|
+
},
|
|
248
|
+
"registry": "dockerhub",
|
|
249
|
+
"repository": "lmsysorg/sglang",
|
|
250
|
+
"defaults": {
|
|
251
|
+
"envVars": {
|
|
252
|
+
"SGLANG_TENSOR_PARALLEL_SIZE": "1",
|
|
253
|
+
"SGLANG_MEM_FRACTION": "0.9",
|
|
254
|
+
"SGLANG_MAX_RUNNING_REQUESTS": "256",
|
|
255
|
+
"SGLANG_CONTEXT_LENGTH": "4096"
|
|
256
|
+
},
|
|
257
|
+
"inferenceAmiVersion": "al2-ami-sagemaker-inference-gpu-3-1",
|
|
258
|
+
"recommendedInstanceTypes": [
|
|
259
|
+
"ml.g5.xlarge",
|
|
260
|
+
"ml.g5.2xlarge",
|
|
261
|
+
"ml.g5.4xlarge"
|
|
262
|
+
]
|
|
263
|
+
},
|
|
264
|
+
"accelerator": {
|
|
265
|
+
"type": "cuda",
|
|
266
|
+
"version": "12.1",
|
|
267
|
+
"versionRange": {
|
|
268
|
+
"min": "11.8",
|
|
269
|
+
"max": "12.2"
|
|
270
|
+
}
|
|
271
|
+
},
|
|
272
|
+
"validationLevel": "experimental",
|
|
273
|
+
"profiles": {
|
|
274
|
+
"default": {
|
|
275
|
+
"displayName": "Default Configuration",
|
|
276
|
+
"description": "Balanced configuration for general use",
|
|
277
|
+
"envVars": {
|
|
278
|
+
"SGLANG_MAX_RUNNING_REQUESTS": "256",
|
|
279
|
+
"SGLANG_MEM_FRACTION": "0.9"
|
|
280
|
+
},
|
|
281
|
+
"recommendedInstanceTypes": [
|
|
282
|
+
"ml.g5.xlarge",
|
|
283
|
+
"ml.g5.2xlarge"
|
|
284
|
+
],
|
|
285
|
+
"notes": "Good starting point for most workloads"
|
|
286
|
+
},
|
|
287
|
+
"high-throughput": {
|
|
288
|
+
"displayName": "High Throughput",
|
|
289
|
+
"description": "Optimized for maximum throughput with RadixAttention",
|
|
290
|
+
"envVars": {
|
|
291
|
+
"SGLANG_MAX_RUNNING_REQUESTS": "512",
|
|
292
|
+
"SGLANG_MEM_FRACTION": "0.95",
|
|
293
|
+
"SGLANG_CONTEXT_LENGTH": "2048",
|
|
294
|
+
"SGLANG_ENABLE_RADIX_CACHE": "true"
|
|
295
|
+
},
|
|
296
|
+
"recommendedInstanceTypes": [
|
|
297
|
+
"ml.g5.4xlarge",
|
|
298
|
+
"ml.g5.12xlarge"
|
|
299
|
+
],
|
|
300
|
+
"notes": "RadixAttention provides automatic KV cache reuse for improved throughput"
|
|
301
|
+
}
|
|
302
|
+
},
|
|
303
|
+
"notes": "SGLang 0.2.0 features RadixAttention for automatic KV cache reuse. Experimental support"
|
|
304
|
+
}
|
|
305
|
+
],
|
|
306
|
+
"tensorrt-llm": [
|
|
307
|
+
{
|
|
308
|
+
"image": "nvcr.io/nvidia/tensorrt-llm/release:1.2.0rc8",
|
|
309
|
+
"tag": "1.2.0rc8",
|
|
310
|
+
"architecture": "amd64",
|
|
311
|
+
"created": "2025-01-05T00:00:00Z",
|
|
312
|
+
"labels": {
|
|
313
|
+
"cuda_version": "12.4",
|
|
314
|
+
"python_version": "3.10",
|
|
315
|
+
"framework_version": "1.2.0"
|
|
316
|
+
},
|
|
317
|
+
"registry": "ngc",
|
|
318
|
+
"repository": "nvidia/tensorrt-llm",
|
|
319
|
+
"defaults": {
|
|
320
|
+
"envVars": {
|
|
321
|
+
"TRTLLM_TENSOR_PARALLEL_SIZE": "1",
|
|
322
|
+
"TRTLLM_PIPELINE_PARALLEL_SIZE": "1",
|
|
323
|
+
"TRTLLM_MAX_BATCH_SIZE": "8",
|
|
324
|
+
"TRTLLM_MAX_INPUT_LEN": "2048",
|
|
325
|
+
"TRTLLM_MAX_OUTPUT_LEN": "512",
|
|
326
|
+
"TRTLLM_ENABLE_CHUNKED_CONTEXT": "true",
|
|
327
|
+
"UCX_MEMTYPE_CACHE": "n"
|
|
328
|
+
},
|
|
329
|
+
"inferenceAmiVersion": "al2-ami-sagemaker-inference-gpu-3-2",
|
|
330
|
+
"recommendedInstanceTypes": [
|
|
331
|
+
"ml.g5.2xlarge",
|
|
332
|
+
"ml.g5.4xlarge",
|
|
333
|
+
"ml.g5.12xlarge",
|
|
334
|
+
"ml.g5.48xlarge"
|
|
335
|
+
]
|
|
336
|
+
},
|
|
337
|
+
"accelerator": {
|
|
338
|
+
"type": "cuda",
|
|
339
|
+
"version": "12.2",
|
|
340
|
+
"versionRange": {
|
|
341
|
+
"min": "12.1",
|
|
342
|
+
"max": "12.3"
|
|
343
|
+
}
|
|
344
|
+
},
|
|
345
|
+
"validationLevel": "tested",
|
|
346
|
+
"profiles": {
|
|
347
|
+
"fp16": {
|
|
348
|
+
"displayName": "FP16 Precision",
|
|
349
|
+
"description": "Half-precision inference with chunked context support",
|
|
350
|
+
"envVars": {
|
|
351
|
+
"TRTLLM_DTYPE": "float16",
|
|
352
|
+
"TRTLLM_MAX_BATCH_SIZE": "16",
|
|
353
|
+
"TRTLLM_ENABLE_CHUNKED_CONTEXT": "true"
|
|
354
|
+
},
|
|
355
|
+
"recommendedInstanceTypes": [
|
|
356
|
+
"ml.g5.2xlarge",
|
|
357
|
+
"ml.g5.4xlarge"
|
|
358
|
+
],
|
|
359
|
+
"notes": "Chunked context allows processing longer sequences"
|
|
360
|
+
},
|
|
361
|
+
"int8": {
|
|
362
|
+
"displayName": "INT8 Quantization",
|
|
363
|
+
"description": "8-bit quantization with weight-only quantization",
|
|
364
|
+
"envVars": {
|
|
365
|
+
"TRTLLM_DTYPE": "int8",
|
|
366
|
+
"TRTLLM_MAX_BATCH_SIZE": "32",
|
|
367
|
+
"TRTLLM_USE_WEIGHT_ONLY": "true",
|
|
368
|
+
"TRTLLM_WEIGHT_ONLY_PRECISION": "int8"
|
|
369
|
+
},
|
|
370
|
+
"recommendedInstanceTypes": [
|
|
371
|
+
"ml.g5.xlarge",
|
|
372
|
+
"ml.g5.2xlarge"
|
|
373
|
+
],
|
|
374
|
+
"notes": "Weight-only quantization provides best speed/accuracy tradeoff"
|
|
375
|
+
},
|
|
376
|
+
"int4": {
|
|
377
|
+
"displayName": "INT4 Quantization",
|
|
378
|
+
"description": "4-bit quantization for maximum memory efficiency",
|
|
379
|
+
"envVars": {
|
|
380
|
+
"TRTLLM_DTYPE": "int4",
|
|
381
|
+
"TRTLLM_MAX_BATCH_SIZE": "64",
|
|
382
|
+
"TRTLLM_USE_WEIGHT_ONLY": "true",
|
|
383
|
+
"TRTLLM_WEIGHT_ONLY_PRECISION": "int4"
|
|
384
|
+
},
|
|
385
|
+
"recommendedInstanceTypes": [
|
|
386
|
+
"ml.g5.xlarge"
|
|
387
|
+
],
|
|
388
|
+
"notes": "Enables running larger models on smaller instances with acceptable accuracy"
|
|
389
|
+
}
|
|
390
|
+
},
|
|
391
|
+
"notes": "TensorRT-LLM 1.0.0 adds chunked context and INT4 support. Requires CUDA 12.1+"
|
|
392
|
+
},
|
|
393
|
+
{
|
|
394
|
+
"image": "nvcr.io/nvidia/tensorrt-llm/release:1.1.0",
|
|
395
|
+
"tag": "1.1.0",
|
|
396
|
+
"architecture": "amd64",
|
|
397
|
+
"created": "2024-10-20T00:00:00Z",
|
|
398
|
+
"labels": {
|
|
399
|
+
"cuda_version": "12.1",
|
|
400
|
+
"python_version": "3.10",
|
|
401
|
+
"framework_version": "1.1.0"
|
|
402
|
+
},
|
|
403
|
+
"registry": "ngc",
|
|
404
|
+
"repository": "nvidia/tensorrt-llm",
|
|
405
|
+
"defaults": {
|
|
406
|
+
"envVars": {
|
|
407
|
+
"TRTLLM_TENSOR_PARALLEL_SIZE": "1",
|
|
408
|
+
"TRTLLM_PIPELINE_PARALLEL_SIZE": "1",
|
|
409
|
+
"TRTLLM_MAX_BATCH_SIZE": "8",
|
|
410
|
+
"TRTLLM_MAX_INPUT_LEN": "2048",
|
|
411
|
+
"TRTLLM_MAX_OUTPUT_LEN": "512",
|
|
412
|
+
"TRTLLM_ENABLE_CHUNKED_CONTEXT": "true",
|
|
413
|
+
"UCX_MEMTYPE_CACHE": "n"
|
|
414
|
+
},
|
|
415
|
+
"inferenceAmiVersion": "al2-ami-sagemaker-inference-gpu-3-2",
|
|
416
|
+
"recommendedInstanceTypes": [
|
|
417
|
+
"ml.g5.2xlarge",
|
|
418
|
+
"ml.g5.4xlarge",
|
|
419
|
+
"ml.g5.12xlarge",
|
|
420
|
+
"ml.g5.48xlarge"
|
|
421
|
+
]
|
|
422
|
+
},
|
|
423
|
+
"accelerator": {
|
|
424
|
+
"type": "cuda",
|
|
425
|
+
"version": "12.2",
|
|
426
|
+
"versionRange": {
|
|
427
|
+
"min": "12.1",
|
|
428
|
+
"max": "12.3"
|
|
429
|
+
}
|
|
430
|
+
},
|
|
431
|
+
"validationLevel": "tested",
|
|
432
|
+
"profiles": {
|
|
433
|
+
"fp16": {
|
|
434
|
+
"displayName": "FP16 Precision",
|
|
435
|
+
"description": "Half-precision inference with chunked context support",
|
|
436
|
+
"envVars": {
|
|
437
|
+
"TRTLLM_DTYPE": "float16",
|
|
438
|
+
"TRTLLM_MAX_BATCH_SIZE": "16",
|
|
439
|
+
"TRTLLM_ENABLE_CHUNKED_CONTEXT": "true"
|
|
440
|
+
},
|
|
441
|
+
"recommendedInstanceTypes": [
|
|
442
|
+
"ml.g5.2xlarge",
|
|
443
|
+
"ml.g5.4xlarge"
|
|
444
|
+
],
|
|
445
|
+
"notes": "Chunked context allows processing longer sequences"
|
|
446
|
+
},
|
|
447
|
+
"int8": {
|
|
448
|
+
"displayName": "INT8 Quantization",
|
|
449
|
+
"description": "8-bit quantization with weight-only quantization",
|
|
450
|
+
"envVars": {
|
|
451
|
+
"TRTLLM_DTYPE": "int8",
|
|
452
|
+
"TRTLLM_MAX_BATCH_SIZE": "32",
|
|
453
|
+
"TRTLLM_USE_WEIGHT_ONLY": "true",
|
|
454
|
+
"TRTLLM_WEIGHT_ONLY_PRECISION": "int8"
|
|
455
|
+
},
|
|
456
|
+
"recommendedInstanceTypes": [
|
|
457
|
+
"ml.g5.xlarge",
|
|
458
|
+
"ml.g5.2xlarge"
|
|
459
|
+
],
|
|
460
|
+
"notes": "Weight-only quantization provides best speed/accuracy tradeoff"
|
|
461
|
+
},
|
|
462
|
+
"int4": {
|
|
463
|
+
"displayName": "INT4 Quantization",
|
|
464
|
+
"description": "4-bit quantization for maximum memory efficiency",
|
|
465
|
+
"envVars": {
|
|
466
|
+
"TRTLLM_DTYPE": "int4",
|
|
467
|
+
"TRTLLM_MAX_BATCH_SIZE": "64",
|
|
468
|
+
"TRTLLM_USE_WEIGHT_ONLY": "true",
|
|
469
|
+
"TRTLLM_WEIGHT_ONLY_PRECISION": "int4"
|
|
470
|
+
},
|
|
471
|
+
"recommendedInstanceTypes": [
|
|
472
|
+
"ml.g5.xlarge"
|
|
473
|
+
],
|
|
474
|
+
"notes": "Enables running larger models on smaller instances with acceptable accuracy"
|
|
475
|
+
}
|
|
476
|
+
},
|
|
477
|
+
"notes": "TensorRT-LLM 1.0.0 adds chunked context and INT4 support. Requires CUDA 12.1+"
|
|
478
|
+
}
|
|
479
|
+
],
|
|
480
|
+
"lmi": [
|
|
481
|
+
{
|
|
482
|
+
"image": "763104351884.dkr.ecr.us-east-1.amazonaws.com/djl-inference:0.32.0-lmi14.0.0-cu126",
|
|
483
|
+
"tag": "0.32.0-lmi14.0.0-cu126",
|
|
484
|
+
"architecture": "amd64",
|
|
485
|
+
"created": "2025-01-12T00:00:00Z",
|
|
486
|
+
"labels": {
|
|
487
|
+
"cuda_version": "12.6",
|
|
488
|
+
"python_version": "3.10",
|
|
489
|
+
"framework_version": "14.0.0"
|
|
490
|
+
},
|
|
491
|
+
"registry": "ecr",
|
|
492
|
+
"repository": "djl-inference",
|
|
493
|
+
"defaults": {
|
|
494
|
+
"envVars": {
|
|
495
|
+
"SERVING_PORT": "8080",
|
|
496
|
+
"OPTION_TENSOR_PARALLEL_DEGREE": "1",
|
|
497
|
+
"OPTION_MAX_ROLLING_BATCH_SIZE": "32",
|
|
498
|
+
"OPTION_DTYPE": "fp16"
|
|
499
|
+
},
|
|
500
|
+
"inferenceAmiVersion": "al2-ami-sagemaker-inference-gpu-3-2",
|
|
501
|
+
"recommendedInstanceTypes": [
|
|
502
|
+
"ml.g5.xlarge",
|
|
503
|
+
"ml.g5.2xlarge",
|
|
504
|
+
"ml.g5.4xlarge",
|
|
505
|
+
"ml.g5.12xlarge"
|
|
506
|
+
]
|
|
507
|
+
},
|
|
508
|
+
"accelerator": {
|
|
509
|
+
"type": "cuda",
|
|
510
|
+
"version": "12.6",
|
|
511
|
+
"versionRange": {
|
|
512
|
+
"min": "12.0",
|
|
513
|
+
"max": "12.6"
|
|
514
|
+
}
|
|
515
|
+
},
|
|
516
|
+
"validationLevel": "tested",
|
|
517
|
+
"profiles": {
|
|
518
|
+
"vllm-backend": {
|
|
519
|
+
"displayName": "vLLM Backend",
|
|
520
|
+
"description": "Use vLLM as the inference backend for LMI",
|
|
521
|
+
"envVars": {
|
|
522
|
+
"OPTION_ROLLING_BATCH": "vllm",
|
|
523
|
+
"OPTION_MAX_ROLLING_BATCH_SIZE": "32",
|
|
524
|
+
"OPTION_DTYPE": "fp16"
|
|
525
|
+
},
|
|
526
|
+
"recommendedInstanceTypes": [
|
|
527
|
+
"ml.g5.xlarge",
|
|
528
|
+
"ml.g5.2xlarge"
|
|
529
|
+
],
|
|
530
|
+
"notes": "vLLM backend provides excellent performance for most models"
|
|
531
|
+
},
|
|
532
|
+
"tensorrt-backend": {
|
|
533
|
+
"displayName": "TensorRT-LLM Backend",
|
|
534
|
+
"description": "Use TensorRT-LLM for maximum performance",
|
|
535
|
+
"envVars": {
|
|
536
|
+
"OPTION_ROLLING_BATCH": "tensorrt-llm",
|
|
537
|
+
"OPTION_MAX_ROLLING_BATCH_SIZE": "16",
|
|
538
|
+
"OPTION_DTYPE": "fp16"
|
|
539
|
+
},
|
|
540
|
+
"recommendedInstanceTypes": [
|
|
541
|
+
"ml.g5.2xlarge",
|
|
542
|
+
"ml.g5.4xlarge"
|
|
543
|
+
],
|
|
544
|
+
"notes": "TensorRT-LLM provides best performance but requires model compilation"
|
|
545
|
+
},
|
|
546
|
+
"lmi-dist": {
|
|
547
|
+
"displayName": "LMI-Dist (DeepSpeed)",
|
|
548
|
+
"description": "Use LMI-Dist with DeepSpeed for large models",
|
|
549
|
+
"envVars": {
|
|
550
|
+
"OPTION_ROLLING_BATCH": "lmi-dist",
|
|
551
|
+
"OPTION_TENSOR_PARALLEL_DEGREE": "4",
|
|
552
|
+
"OPTION_MAX_ROLLING_BATCH_SIZE": "64"
|
|
553
|
+
},
|
|
554
|
+
"recommendedInstanceTypes": [
|
|
555
|
+
"ml.g5.12xlarge",
|
|
556
|
+
"ml.g5.48xlarge"
|
|
557
|
+
],
|
|
558
|
+
"notes": "Best for very large models requiring multi-GPU tensor parallelism"
|
|
559
|
+
},
|
|
560
|
+
"auto": {
|
|
561
|
+
"displayName": "Auto Backend Selection",
|
|
562
|
+
"description": "Let LMI automatically select the best backend",
|
|
563
|
+
"envVars": {
|
|
564
|
+
"OPTION_MAX_ROLLING_BATCH_SIZE": "32",
|
|
565
|
+
"OPTION_DTYPE": "fp16"
|
|
566
|
+
},
|
|
567
|
+
"recommendedInstanceTypes": [
|
|
568
|
+
"ml.g5.xlarge",
|
|
569
|
+
"ml.g5.2xlarge",
|
|
570
|
+
"ml.g5.4xlarge"
|
|
571
|
+
],
|
|
572
|
+
"notes": "LMI will analyze your model and select the optimal backend automatically"
|
|
573
|
+
}
|
|
574
|
+
},
|
|
575
|
+
"notes": "AWS Large Model Inference (LMI) 14.0.0 with automatic backend selection. Supports vLLM, TensorRT-LLM, LMI-Dist, and Transformers NeuronX backends"
|
|
576
|
+
},
|
|
577
|
+
{
|
|
578
|
+
"image": "763104351884.dkr.ecr.us-east-1.amazonaws.com/djl-inference:0.31.0-lmi13.0.0-cu124",
|
|
579
|
+
"tag": "0.31.0-lmi13.0.0-cu124",
|
|
580
|
+
"architecture": "amd64",
|
|
581
|
+
"created": "2024-11-01T00:00:00Z",
|
|
582
|
+
"labels": {
|
|
583
|
+
"cuda_version": "12.4",
|
|
584
|
+
"python_version": "3.10",
|
|
585
|
+
"framework_version": "13.0.0"
|
|
586
|
+
},
|
|
587
|
+
"registry": "ecr",
|
|
588
|
+
"repository": "djl-inference",
|
|
589
|
+
"defaults": {
|
|
590
|
+
"envVars": {
|
|
591
|
+
"SERVING_PORT": "8080",
|
|
592
|
+
"OPTION_TENSOR_PARALLEL_DEGREE": "1",
|
|
593
|
+
"OPTION_MAX_ROLLING_BATCH_SIZE": "32",
|
|
594
|
+
"OPTION_DTYPE": "fp16"
|
|
595
|
+
},
|
|
596
|
+
"inferenceAmiVersion": "al2-ami-sagemaker-inference-gpu-3-2",
|
|
597
|
+
"recommendedInstanceTypes": [
|
|
598
|
+
"ml.g5.xlarge",
|
|
599
|
+
"ml.g5.2xlarge",
|
|
600
|
+
"ml.g5.4xlarge",
|
|
601
|
+
"ml.g5.12xlarge"
|
|
602
|
+
]
|
|
603
|
+
},
|
|
604
|
+
"accelerator": {
|
|
605
|
+
"type": "cuda",
|
|
606
|
+
"version": "12.6",
|
|
607
|
+
"versionRange": {
|
|
608
|
+
"min": "12.0",
|
|
609
|
+
"max": "12.6"
|
|
610
|
+
}
|
|
611
|
+
},
|
|
612
|
+
"validationLevel": "tested",
|
|
613
|
+
"profiles": {
|
|
614
|
+
"vllm-backend": {
|
|
615
|
+
"displayName": "vLLM Backend",
|
|
616
|
+
"description": "Use vLLM as the inference backend for LMI",
|
|
617
|
+
"envVars": {
|
|
618
|
+
"OPTION_ROLLING_BATCH": "vllm",
|
|
619
|
+
"OPTION_MAX_ROLLING_BATCH_SIZE": "32",
|
|
620
|
+
"OPTION_DTYPE": "fp16"
|
|
621
|
+
},
|
|
622
|
+
"recommendedInstanceTypes": [
|
|
623
|
+
"ml.g5.xlarge",
|
|
624
|
+
"ml.g5.2xlarge"
|
|
625
|
+
],
|
|
626
|
+
"notes": "vLLM backend provides excellent performance for most models"
|
|
627
|
+
},
|
|
628
|
+
"tensorrt-backend": {
|
|
629
|
+
"displayName": "TensorRT-LLM Backend",
|
|
630
|
+
"description": "Use TensorRT-LLM for maximum performance",
|
|
631
|
+
"envVars": {
|
|
632
|
+
"OPTION_ROLLING_BATCH": "tensorrt-llm",
|
|
633
|
+
"OPTION_MAX_ROLLING_BATCH_SIZE": "16",
|
|
634
|
+
"OPTION_DTYPE": "fp16"
|
|
635
|
+
},
|
|
636
|
+
"recommendedInstanceTypes": [
|
|
637
|
+
"ml.g5.2xlarge",
|
|
638
|
+
"ml.g5.4xlarge"
|
|
639
|
+
],
|
|
640
|
+
"notes": "TensorRT-LLM provides best performance but requires model compilation"
|
|
641
|
+
},
|
|
642
|
+
"lmi-dist": {
|
|
643
|
+
"displayName": "LMI-Dist (DeepSpeed)",
|
|
644
|
+
"description": "Use LMI-Dist with DeepSpeed for large models",
|
|
645
|
+
"envVars": {
|
|
646
|
+
"OPTION_ROLLING_BATCH": "lmi-dist",
|
|
647
|
+
"OPTION_TENSOR_PARALLEL_DEGREE": "4",
|
|
648
|
+
"OPTION_MAX_ROLLING_BATCH_SIZE": "64"
|
|
649
|
+
},
|
|
650
|
+
"recommendedInstanceTypes": [
|
|
651
|
+
"ml.g5.12xlarge",
|
|
652
|
+
"ml.g5.48xlarge"
|
|
653
|
+
],
|
|
654
|
+
"notes": "Best for very large models requiring multi-GPU tensor parallelism"
|
|
655
|
+
},
|
|
656
|
+
"auto": {
|
|
657
|
+
"displayName": "Auto Backend Selection",
|
|
658
|
+
"description": "Let LMI automatically select the best backend",
|
|
659
|
+
"envVars": {
|
|
660
|
+
"OPTION_MAX_ROLLING_BATCH_SIZE": "32",
|
|
661
|
+
"OPTION_DTYPE": "fp16"
|
|
662
|
+
},
|
|
663
|
+
"recommendedInstanceTypes": [
|
|
664
|
+
"ml.g5.xlarge",
|
|
665
|
+
"ml.g5.2xlarge",
|
|
666
|
+
"ml.g5.4xlarge"
|
|
667
|
+
],
|
|
668
|
+
"notes": "LMI will analyze your model and select the optimal backend automatically"
|
|
669
|
+
}
|
|
670
|
+
},
|
|
671
|
+
"notes": "AWS Large Model Inference (LMI) 14.0.0 with automatic backend selection. Supports vLLM, TensorRT-LLM, LMI-Dist, and Transformers NeuronX backends"
|
|
672
|
+
}
|
|
673
|
+
],
|
|
674
|
+
"djl": [
|
|
675
|
+
{
|
|
676
|
+
"image": "deepjavalibrary/djl-serving:0.36.0-pytorch-gpu",
|
|
677
|
+
"tag": "0.36.0-pytorch-gpu",
|
|
678
|
+
"architecture": "amd64",
|
|
679
|
+
"created": "2025-01-08T00:00:00Z",
|
|
680
|
+
"labels": {
|
|
681
|
+
"cuda_version": "12.6",
|
|
682
|
+
"python_version": "3.10",
|
|
683
|
+
"framework_version": "0.36.0"
|
|
684
|
+
},
|
|
685
|
+
"registry": "dockerhub",
|
|
686
|
+
"repository": "deepjavalibrary/djl-serving",
|
|
687
|
+
"defaults": {
|
|
688
|
+
"envVars": {
|
|
689
|
+
"SERVING_PORT": "8080",
|
|
690
|
+
"OPTION_TENSOR_PARALLEL_DEGREE": "1",
|
|
691
|
+
"OPTION_DEVICE_MAP": "auto"
|
|
692
|
+
},
|
|
693
|
+
"inferenceAmiVersion": "al2-ami-sagemaker-inference-gpu-3-2",
|
|
694
|
+
"recommendedInstanceTypes": [
|
|
695
|
+
"ml.g5.xlarge",
|
|
696
|
+
"ml.g5.2xlarge",
|
|
697
|
+
"ml.g5.4xlarge"
|
|
698
|
+
]
|
|
699
|
+
},
|
|
700
|
+
"accelerator": {
|
|
701
|
+
"type": "cuda",
|
|
702
|
+
"version": "12.6",
|
|
703
|
+
"versionRange": {
|
|
704
|
+
"min": "11.8",
|
|
705
|
+
"max": "12.6"
|
|
706
|
+
}
|
|
707
|
+
},
|
|
708
|
+
"validationLevel": "community-validated",
|
|
709
|
+
"profiles": {
|
|
710
|
+
"pytorch": {
|
|
711
|
+
"displayName": "PyTorch Engine",
|
|
712
|
+
"description": "Use PyTorch as the inference engine",
|
|
713
|
+
"envVars": {
|
|
714
|
+
"ENGINE": "Python",
|
|
715
|
+
"OPTION_DEVICE_MAP": "auto",
|
|
716
|
+
"BATCH_SIZE": "1"
|
|
717
|
+
},
|
|
718
|
+
"recommendedInstanceTypes": [
|
|
719
|
+
"ml.g5.xlarge",
|
|
720
|
+
"ml.g5.2xlarge"
|
|
721
|
+
],
|
|
722
|
+
"notes": "PyTorch engine provides good compatibility with HuggingFace models"
|
|
723
|
+
},
|
|
724
|
+
"multi-gpu": {
|
|
725
|
+
"displayName": "Multi-GPU",
|
|
726
|
+
"description": "Tensor parallel across multiple GPUs",
|
|
727
|
+
"envVars": {
|
|
728
|
+
"ENGINE": "Python",
|
|
729
|
+
"OPTION_TENSOR_PARALLEL_DEGREE": "4",
|
|
730
|
+
"OPTION_DEVICE_MAP": "auto"
|
|
731
|
+
},
|
|
732
|
+
"recommendedInstanceTypes": [
|
|
733
|
+
"ml.g5.12xlarge",
|
|
734
|
+
"ml.g5.48xlarge"
|
|
735
|
+
],
|
|
736
|
+
"notes": "Distribute model across multiple GPUs for large models"
|
|
737
|
+
}
|
|
738
|
+
},
|
|
739
|
+
"notes": "DJL Serving 0.32.0 with PyTorch backend. Flexible Java-based serving framework with Python engine support"
|
|
740
|
+
},
|
|
741
|
+
{
|
|
742
|
+
"image": "deepjavalibrary/djl-serving:0.35.0-pytorch-gpu",
|
|
743
|
+
"tag": "0.35.0-pytorch-gpu",
|
|
744
|
+
"architecture": "amd64",
|
|
745
|
+
"created": "2024-10-15T00:00:00Z",
|
|
746
|
+
"labels": {
|
|
747
|
+
"cuda_version": "12.4",
|
|
748
|
+
"python_version": "3.10",
|
|
749
|
+
"framework_version": "0.35.0"
|
|
750
|
+
},
|
|
751
|
+
"registry": "dockerhub",
|
|
752
|
+
"repository": "deepjavalibrary/djl-serving",
|
|
753
|
+
"defaults": {
|
|
754
|
+
"envVars": {
|
|
755
|
+
"SERVING_PORT": "8080",
|
|
756
|
+
"OPTION_TENSOR_PARALLEL_DEGREE": "1",
|
|
757
|
+
"OPTION_DEVICE_MAP": "auto"
|
|
758
|
+
},
|
|
759
|
+
"inferenceAmiVersion": "al2-ami-sagemaker-inference-gpu-3-2",
|
|
760
|
+
"recommendedInstanceTypes": [
|
|
761
|
+
"ml.g5.xlarge",
|
|
762
|
+
"ml.g5.2xlarge",
|
|
763
|
+
"ml.g5.4xlarge"
|
|
764
|
+
]
|
|
765
|
+
},
|
|
766
|
+
"accelerator": {
|
|
767
|
+
"type": "cuda",
|
|
768
|
+
"version": "12.6",
|
|
769
|
+
"versionRange": {
|
|
770
|
+
"min": "11.8",
|
|
771
|
+
"max": "12.6"
|
|
772
|
+
}
|
|
773
|
+
},
|
|
774
|
+
"validationLevel": "community-validated",
|
|
775
|
+
"profiles": {
|
|
776
|
+
"pytorch": {
|
|
777
|
+
"displayName": "PyTorch Engine",
|
|
778
|
+
"description": "Use PyTorch as the inference engine",
|
|
779
|
+
"envVars": {
|
|
780
|
+
"ENGINE": "Python",
|
|
781
|
+
"OPTION_DEVICE_MAP": "auto",
|
|
782
|
+
"BATCH_SIZE": "1"
|
|
783
|
+
},
|
|
784
|
+
"recommendedInstanceTypes": [
|
|
785
|
+
"ml.g5.xlarge",
|
|
786
|
+
"ml.g5.2xlarge"
|
|
787
|
+
],
|
|
788
|
+
"notes": "PyTorch engine provides good compatibility with HuggingFace models"
|
|
789
|
+
},
|
|
790
|
+
"multi-gpu": {
|
|
791
|
+
"displayName": "Multi-GPU",
|
|
792
|
+
"description": "Tensor parallel across multiple GPUs",
|
|
793
|
+
"envVars": {
|
|
794
|
+
"ENGINE": "Python",
|
|
795
|
+
"OPTION_TENSOR_PARALLEL_DEGREE": "4",
|
|
796
|
+
"OPTION_DEVICE_MAP": "auto"
|
|
797
|
+
},
|
|
798
|
+
"recommendedInstanceTypes": [
|
|
799
|
+
"ml.g5.12xlarge",
|
|
800
|
+
"ml.g5.48xlarge"
|
|
801
|
+
],
|
|
802
|
+
"notes": "Distribute model across multiple GPUs for large models"
|
|
803
|
+
}
|
|
804
|
+
},
|
|
805
|
+
"notes": "DJL Serving 0.32.0 with PyTorch backend. Flexible Java-based serving framework with Python engine support"
|
|
806
|
+
}
|
|
807
|
+
],
|
|
808
|
+
"vllm-omni": [
|
|
809
|
+
{
|
|
810
|
+
"image": "vllm/vllm-omni:v0.16.0",
|
|
811
|
+
"tag": "v0.16.0",
|
|
812
|
+
"architecture": "amd64",
|
|
813
|
+
"created": "2026-02-01T00:00:00Z",
|
|
814
|
+
"labels": {
|
|
815
|
+
"cuda_version": "12.4",
|
|
816
|
+
"python_version": "3.12",
|
|
817
|
+
"framework_version": "0.16.0"
|
|
818
|
+
},
|
|
819
|
+
"registry": "dockerhub",
|
|
820
|
+
"repository": "vllm/vllm-omni",
|
|
821
|
+
"defaults": {
|
|
822
|
+
"envVars": {
|
|
823
|
+
"HF_TOKEN": "${hfToken}",
|
|
824
|
+
"VLLM_WORKER_MULTIPROC_METHOD": "spawn"
|
|
825
|
+
},
|
|
826
|
+
"inferenceAmiVersion": "al2-ami-sagemaker-inference-gpu-3-2",
|
|
827
|
+
"recommendedInstanceTypes": [
|
|
828
|
+
"ml.g5.2xlarge",
|
|
829
|
+
"ml.g5.4xlarge",
|
|
830
|
+
"ml.g5.12xlarge"
|
|
831
|
+
]
|
|
832
|
+
},
|
|
833
|
+
"accelerator": {
|
|
834
|
+
"type": "cuda",
|
|
835
|
+
"version": "12.4",
|
|
836
|
+
"versionRange": {
|
|
837
|
+
"min": "12.1",
|
|
838
|
+
"max": "12.6"
|
|
839
|
+
}
|
|
840
|
+
},
|
|
841
|
+
"validationLevel": "experimental",
|
|
842
|
+
"profiles": {
|
|
843
|
+
"quality": {
|
|
844
|
+
"displayName": "Quality",
|
|
845
|
+
"description": "Higher step count for better image quality",
|
|
846
|
+
"envVars": {},
|
|
847
|
+
"recommendedInstanceTypes": [
|
|
848
|
+
"ml.g5.4xlarge",
|
|
849
|
+
"ml.g5.12xlarge"
|
|
850
|
+
],
|
|
851
|
+
"notes": "Best image quality, no cache acceleration, VAE tiling for memory efficiency"
|
|
852
|
+
},
|
|
853
|
+
"speed": {
|
|
854
|
+
"displayName": "Speed",
|
|
855
|
+
"description": "Cache acceleration for faster generation",
|
|
856
|
+
"envVars": {},
|
|
857
|
+
"recommendedInstanceTypes": [
|
|
858
|
+
"ml.g5.2xlarge",
|
|
859
|
+
"ml.g5.4xlarge"
|
|
860
|
+
],
|
|
861
|
+
"notes": "TeaCache acceleration reduces redundant computation between denoising steps"
|
|
862
|
+
},
|
|
863
|
+
"multi-gpu": {
|
|
864
|
+
"displayName": "Multi-GPU",
|
|
865
|
+
"description": "Sequence parallelism for large diffusion models",
|
|
866
|
+
"envVars": {},
|
|
867
|
+
"recommendedInstanceTypes": [
|
|
868
|
+
"ml.g5.12xlarge",
|
|
869
|
+
"ml.g5.48xlarge"
|
|
870
|
+
],
|
|
871
|
+
"notes": "Ulysses sequence parallelism for large models like FLUX on multi-GPU instances"
|
|
872
|
+
}
|
|
873
|
+
},
|
|
874
|
+
"notes": "vLLM-Omni (separate project from vLLM) with diffusion model support. Requires CUDA 12.1+ and GPU instance. Supports FLUX, SD3.5, Qwen-Image, Z-Image-Turbo, Bagel, and other DiT model families. Uses --omni flag for serving. Default port 8000 remapped to 8080 for SageMaker."
|
|
875
|
+
},
|
|
876
|
+
{
|
|
877
|
+
"image": "vllm/vllm-omni:v0.14.0",
|
|
878
|
+
"tag": "v0.14.0",
|
|
879
|
+
"architecture": "amd64",
|
|
880
|
+
"created": "2025-11-15T00:00:00Z",
|
|
881
|
+
"labels": {
|
|
882
|
+
"cuda_version": "12.4",
|
|
883
|
+
"python_version": "3.12",
|
|
884
|
+
"framework_version": "0.14.0"
|
|
885
|
+
},
|
|
886
|
+
"registry": "dockerhub",
|
|
887
|
+
"repository": "vllm/vllm-omni",
|
|
888
|
+
"defaults": {
|
|
889
|
+
"envVars": {
|
|
890
|
+
"HF_TOKEN": "${hfToken}",
|
|
891
|
+
"VLLM_WORKER_MULTIPROC_METHOD": "spawn"
|
|
892
|
+
},
|
|
893
|
+
"inferenceAmiVersion": "al2-ami-sagemaker-inference-gpu-3-2",
|
|
894
|
+
"recommendedInstanceTypes": [
|
|
895
|
+
"ml.g5.2xlarge",
|
|
896
|
+
"ml.g5.4xlarge",
|
|
897
|
+
"ml.g5.12xlarge"
|
|
898
|
+
]
|
|
899
|
+
},
|
|
900
|
+
"accelerator": {
|
|
901
|
+
"type": "cuda",
|
|
902
|
+
"version": "12.4",
|
|
903
|
+
"versionRange": {
|
|
904
|
+
"min": "12.1",
|
|
905
|
+
"max": "12.6"
|
|
906
|
+
}
|
|
907
|
+
},
|
|
908
|
+
"validationLevel": "experimental",
|
|
909
|
+
"profiles": {
|
|
910
|
+
"quality": {
|
|
911
|
+
"displayName": "Quality",
|
|
912
|
+
"description": "Higher step count for better image quality",
|
|
913
|
+
"envVars": {},
|
|
914
|
+
"recommendedInstanceTypes": [
|
|
915
|
+
"ml.g5.4xlarge",
|
|
916
|
+
"ml.g5.12xlarge"
|
|
917
|
+
],
|
|
918
|
+
"notes": "Best image quality, no cache acceleration, VAE tiling for memory efficiency"
|
|
919
|
+
},
|
|
920
|
+
"speed": {
|
|
921
|
+
"displayName": "Speed",
|
|
922
|
+
"description": "Cache acceleration for faster generation",
|
|
923
|
+
"envVars": {},
|
|
924
|
+
"recommendedInstanceTypes": [
|
|
925
|
+
"ml.g5.2xlarge",
|
|
926
|
+
"ml.g5.4xlarge"
|
|
927
|
+
],
|
|
928
|
+
"notes": "TeaCache acceleration reduces redundant computation between denoising steps"
|
|
929
|
+
},
|
|
930
|
+
"multi-gpu": {
|
|
931
|
+
"displayName": "Multi-GPU",
|
|
932
|
+
"description": "Sequence parallelism for large diffusion models",
|
|
933
|
+
"envVars": {},
|
|
934
|
+
"recommendedInstanceTypes": [
|
|
935
|
+
"ml.g5.12xlarge",
|
|
936
|
+
"ml.g5.48xlarge"
|
|
937
|
+
],
|
|
938
|
+
"notes": "Ulysses sequence parallelism for large models like FLUX on multi-GPU instances"
|
|
939
|
+
}
|
|
940
|
+
},
|
|
941
|
+
"notes": "vLLM-Omni (separate project from vLLM) with diffusion model support. Requires CUDA 12.1+ and GPU instance. Supports FLUX, SD3.5, Qwen-Image, Z-Image-Turbo, Bagel, and other DiT model families. Uses --omni flag for serving. Default port 8000 remapped to 8080 for SageMaker."
|
|
942
|
+
}
|
|
943
|
+
],
|
|
944
|
+
"triton-fil": [
|
|
945
|
+
{
|
|
946
|
+
"image": "nvcr.io/nvidia/tritonserver:24.08-py3",
|
|
947
|
+
"tag": "24.08",
|
|
948
|
+
"architecture": "amd64",
|
|
949
|
+
"created": "2026-03-25T00:00:00Z",
|
|
950
|
+
"labels": {
|
|
951
|
+
"cuda_version": "12.5",
|
|
952
|
+
"python_version": "3.10",
|
|
953
|
+
"framework_version": "24.08"
|
|
954
|
+
},
|
|
955
|
+
"registry": "ngc",
|
|
956
|
+
"repository": "nvcr.io/nvidia/tritonserver",
|
|
957
|
+
"defaults": {
|
|
958
|
+
"envVars": {
|
|
959
|
+
"TRITON_MODEL_REPOSITORY": "/opt/ml/model/model_repository"
|
|
960
|
+
},
|
|
961
|
+
"inferenceAmiVersion": "al2-ami-sagemaker-inference-gpu-3-2",
|
|
962
|
+
"recommendedInstanceTypes": [
|
|
963
|
+
"ml.g5.xlarge",
|
|
964
|
+
"ml.g5.2xlarge"
|
|
965
|
+
]
|
|
966
|
+
},
|
|
967
|
+
"accelerator": {
|
|
968
|
+
"type": "cuda",
|
|
969
|
+
"version": "12.5",
|
|
970
|
+
"versionRange": {
|
|
971
|
+
"min": "12.0",
|
|
972
|
+
"max": "12.6"
|
|
973
|
+
}
|
|
974
|
+
},
|
|
975
|
+
"validationLevel": "experimental",
|
|
976
|
+
"notes": "Triton FIL backend for tree-based models (XGBoost, LightGBM). GPU optional but recommended for performance"
|
|
977
|
+
}
|
|
978
|
+
],
|
|
979
|
+
"triton-onnxruntime": [
|
|
980
|
+
{
|
|
981
|
+
"image": "nvcr.io/nvidia/tritonserver:24.08-py3",
|
|
982
|
+
"tag": "24.08",
|
|
983
|
+
"architecture": "amd64",
|
|
984
|
+
"created": "2026-03-25T00:00:00Z",
|
|
985
|
+
"labels": {
|
|
986
|
+
"cuda_version": "12.5",
|
|
987
|
+
"python_version": "3.10",
|
|
988
|
+
"framework_version": "24.08"
|
|
989
|
+
},
|
|
990
|
+
"registry": "ngc",
|
|
991
|
+
"repository": "nvcr.io/nvidia/tritonserver",
|
|
992
|
+
"defaults": {
|
|
993
|
+
"envVars": {
|
|
994
|
+
"TRITON_MODEL_REPOSITORY": "/opt/ml/model/model_repository"
|
|
995
|
+
},
|
|
996
|
+
"inferenceAmiVersion": "al2-ami-sagemaker-inference-gpu-3-2",
|
|
997
|
+
"recommendedInstanceTypes": [
|
|
998
|
+
"ml.g5.xlarge",
|
|
999
|
+
"ml.g5.2xlarge"
|
|
1000
|
+
]
|
|
1001
|
+
},
|
|
1002
|
+
"accelerator": {
|
|
1003
|
+
"type": "cuda",
|
|
1004
|
+
"version": "12.5",
|
|
1005
|
+
"versionRange": {
|
|
1006
|
+
"min": "12.0",
|
|
1007
|
+
"max": "12.6"
|
|
1008
|
+
}
|
|
1009
|
+
},
|
|
1010
|
+
"validationLevel": "experimental",
|
|
1011
|
+
"notes": "Triton ONNX Runtime backend for ONNX models. GPU optional but recommended for performance"
|
|
1012
|
+
}
|
|
1013
|
+
],
|
|
1014
|
+
"triton-tensorflow": [
|
|
1015
|
+
{
|
|
1016
|
+
"image": "nvcr.io/nvidia/tritonserver:24.08-py3",
|
|
1017
|
+
"tag": "24.08",
|
|
1018
|
+
"architecture": "amd64",
|
|
1019
|
+
"created": "2026-03-25T00:00:00Z",
|
|
1020
|
+
"labels": {
|
|
1021
|
+
"cuda_version": "12.5",
|
|
1022
|
+
"python_version": "3.10",
|
|
1023
|
+
"framework_version": "24.08"
|
|
1024
|
+
},
|
|
1025
|
+
"registry": "ngc",
|
|
1026
|
+
"repository": "nvcr.io/nvidia/tritonserver",
|
|
1027
|
+
"defaults": {
|
|
1028
|
+
"envVars": {
|
|
1029
|
+
"TRITON_MODEL_REPOSITORY": "/opt/ml/model/model_repository"
|
|
1030
|
+
},
|
|
1031
|
+
"inferenceAmiVersion": "al2-ami-sagemaker-inference-gpu-3-2",
|
|
1032
|
+
"recommendedInstanceTypes": [
|
|
1033
|
+
"ml.g5.xlarge",
|
|
1034
|
+
"ml.g5.2xlarge"
|
|
1035
|
+
]
|
|
1036
|
+
},
|
|
1037
|
+
"accelerator": {
|
|
1038
|
+
"type": "cuda",
|
|
1039
|
+
"version": "12.5",
|
|
1040
|
+
"versionRange": {
|
|
1041
|
+
"min": "12.0",
|
|
1042
|
+
"max": "12.6"
|
|
1043
|
+
}
|
|
1044
|
+
},
|
|
1045
|
+
"validationLevel": "experimental",
|
|
1046
|
+
"notes": "Triton TensorFlow backend for SavedModel format. GPU optional but recommended for performance"
|
|
1047
|
+
}
|
|
1048
|
+
],
|
|
1049
|
+
"triton-pytorch": [
|
|
1050
|
+
{
|
|
1051
|
+
"image": "nvcr.io/nvidia/tritonserver:24.08-py3",
|
|
1052
|
+
"tag": "24.08",
|
|
1053
|
+
"architecture": "amd64",
|
|
1054
|
+
"created": "2026-03-25T00:00:00Z",
|
|
1055
|
+
"labels": {
|
|
1056
|
+
"cuda_version": "12.5",
|
|
1057
|
+
"python_version": "3.10",
|
|
1058
|
+
"framework_version": "24.08"
|
|
1059
|
+
},
|
|
1060
|
+
"registry": "ngc",
|
|
1061
|
+
"repository": "nvcr.io/nvidia/tritonserver",
|
|
1062
|
+
"defaults": {
|
|
1063
|
+
"envVars": {
|
|
1064
|
+
"TRITON_MODEL_REPOSITORY": "/opt/ml/model/model_repository"
|
|
1065
|
+
},
|
|
1066
|
+
"inferenceAmiVersion": "al2-ami-sagemaker-inference-gpu-3-2",
|
|
1067
|
+
"recommendedInstanceTypes": [
|
|
1068
|
+
"ml.g5.xlarge",
|
|
1069
|
+
"ml.g5.2xlarge"
|
|
1070
|
+
]
|
|
1071
|
+
},
|
|
1072
|
+
"accelerator": {
|
|
1073
|
+
"type": "cuda",
|
|
1074
|
+
"version": "12.5",
|
|
1075
|
+
"versionRange": {
|
|
1076
|
+
"min": "12.0",
|
|
1077
|
+
"max": "12.6"
|
|
1078
|
+
}
|
|
1079
|
+
},
|
|
1080
|
+
"validationLevel": "experimental",
|
|
1081
|
+
"notes": "Triton PyTorch backend for TorchScript models. GPU recommended for performance"
|
|
1082
|
+
}
|
|
1083
|
+
],
|
|
1084
|
+
"triton-vllm": [
|
|
1085
|
+
{
|
|
1086
|
+
"image": "nvcr.io/nvidia/tritonserver:24.08-py3",
|
|
1087
|
+
"tag": "24.08",
|
|
1088
|
+
"architecture": "amd64",
|
|
1089
|
+
"created": "2026-03-25T00:00:00Z",
|
|
1090
|
+
"labels": {
|
|
1091
|
+
"cuda_version": "12.5",
|
|
1092
|
+
"python_version": "3.10",
|
|
1093
|
+
"framework_version": "24.08"
|
|
1094
|
+
},
|
|
1095
|
+
"registry": "ngc",
|
|
1096
|
+
"repository": "nvcr.io/nvidia/tritonserver",
|
|
1097
|
+
"defaults": {
|
|
1098
|
+
"envVars": {
|
|
1099
|
+
"TRITON_MODEL_REPOSITORY": "/opt/ml/model/model_repository"
|
|
1100
|
+
},
|
|
1101
|
+
"inferenceAmiVersion": "al2-ami-sagemaker-inference-gpu-3-2",
|
|
1102
|
+
"recommendedInstanceTypes": [
|
|
1103
|
+
"ml.g5.xlarge",
|
|
1104
|
+
"ml.g5.2xlarge",
|
|
1105
|
+
"ml.g5.4xlarge"
|
|
1106
|
+
]
|
|
1107
|
+
},
|
|
1108
|
+
"accelerator": {
|
|
1109
|
+
"type": "cuda",
|
|
1110
|
+
"version": "12.5",
|
|
1111
|
+
"versionRange": {
|
|
1112
|
+
"min": "12.0",
|
|
1113
|
+
"max": "12.6"
|
|
1114
|
+
}
|
|
1115
|
+
},
|
|
1116
|
+
"validationLevel": "experimental",
|
|
1117
|
+
"notes": "Triton vLLM backend for LLM serving. Requires GPU instance"
|
|
1118
|
+
}
|
|
1119
|
+
],
|
|
1120
|
+
"triton-tensorrtllm": [
|
|
1121
|
+
{
|
|
1122
|
+
"image": "nvcr.io/nvidia/tritonserver:24.08-py3",
|
|
1123
|
+
"tag": "24.08",
|
|
1124
|
+
"architecture": "amd64",
|
|
1125
|
+
"created": "2026-03-25T00:00:00Z",
|
|
1126
|
+
"labels": {
|
|
1127
|
+
"cuda_version": "12.5",
|
|
1128
|
+
"python_version": "3.10",
|
|
1129
|
+
"framework_version": "24.08"
|
|
1130
|
+
},
|
|
1131
|
+
"registry": "ngc",
|
|
1132
|
+
"repository": "nvcr.io/nvidia/tritonserver",
|
|
1133
|
+
"defaults": {
|
|
1134
|
+
"envVars": {
|
|
1135
|
+
"TRITON_MODEL_REPOSITORY": "/opt/ml/model/model_repository"
|
|
1136
|
+
},
|
|
1137
|
+
"inferenceAmiVersion": "al2-ami-sagemaker-inference-gpu-3-2",
|
|
1138
|
+
"recommendedInstanceTypes": [
|
|
1139
|
+
"ml.g5.2xlarge",
|
|
1140
|
+
"ml.g5.4xlarge",
|
|
1141
|
+
"ml.g5.12xlarge"
|
|
1142
|
+
]
|
|
1143
|
+
},
|
|
1144
|
+
"accelerator": {
|
|
1145
|
+
"type": "cuda",
|
|
1146
|
+
"version": "12.5",
|
|
1147
|
+
"versionRange": {
|
|
1148
|
+
"min": "12.0",
|
|
1149
|
+
"max": "12.6"
|
|
1150
|
+
}
|
|
1151
|
+
},
|
|
1152
|
+
"validationLevel": "experimental",
|
|
1153
|
+
"notes": "Triton TensorRT-LLM backend for optimized LLM serving. Requires GPU instance"
|
|
1154
|
+
}
|
|
1155
|
+
],
|
|
1156
|
+
"triton-python": [
|
|
1157
|
+
{
|
|
1158
|
+
"image": "nvcr.io/nvidia/tritonserver:24.08-py3",
|
|
1159
|
+
"tag": "24.08",
|
|
1160
|
+
"architecture": "amd64",
|
|
1161
|
+
"created": "2026-03-25T00:00:00Z",
|
|
1162
|
+
"labels": {
|
|
1163
|
+
"cuda_version": "12.5",
|
|
1164
|
+
"python_version": "3.10",
|
|
1165
|
+
"framework_version": "24.08"
|
|
1166
|
+
},
|
|
1167
|
+
"registry": "ngc",
|
|
1168
|
+
"repository": "nvcr.io/nvidia/tritonserver",
|
|
1169
|
+
"defaults": {
|
|
1170
|
+
"envVars": {
|
|
1171
|
+
"TRITON_MODEL_REPOSITORY": "/opt/ml/model/model_repository"
|
|
1172
|
+
},
|
|
1173
|
+
"inferenceAmiVersion": "al2-ami-sagemaker-inference-gpu-3-2",
|
|
1174
|
+
"recommendedInstanceTypes": [
|
|
1175
|
+
"ml.g5.xlarge",
|
|
1176
|
+
"ml.g5.2xlarge"
|
|
1177
|
+
]
|
|
1178
|
+
},
|
|
1179
|
+
"accelerator": {
|
|
1180
|
+
"type": "cuda",
|
|
1181
|
+
"version": "12.5",
|
|
1182
|
+
"versionRange": {
|
|
1183
|
+
"min": "12.0",
|
|
1184
|
+
"max": "12.6"
|
|
1185
|
+
}
|
|
1186
|
+
},
|
|
1187
|
+
"validationLevel": "experimental",
|
|
1188
|
+
"notes": "Triton Python backend for custom model serving with TritonPythonModel interface. GPU optional"
|
|
1189
|
+
}
|
|
1190
|
+
]
|
|
1191
|
+
}
|