jetson-examples 0.1.7__py3-none-any.whl → 0.1.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {jetson_examples-0.1.7.dist-info → jetson_examples-0.1.8.dist-info}/LICENSE +21 -21
- {jetson_examples-0.1.7.dist-info → jetson_examples-0.1.8.dist-info}/METADATA +8 -8
- jetson_examples-0.1.8.dist-info/RECORD +107 -0
- {jetson_examples-0.1.7.dist-info → jetson_examples-0.1.8.dist-info}/WHEEL +1 -1
- reComputer/__init__.py +1 -1
- reComputer/main.py +60 -60
- reComputer/scripts/MoveNet-Lightning/clean.sh +8 -8
- reComputer/scripts/MoveNet-Lightning/getVersion.sh +59 -59
- reComputer/scripts/MoveNet-Lightning/init.sh +6 -6
- reComputer/scripts/MoveNet-Lightning/readme.md +30 -30
- reComputer/scripts/MoveNet-Lightning/run.sh +19 -19
- reComputer/scripts/MoveNet-Thunder/clean.sh +7 -7
- reComputer/scripts/MoveNet-Thunder/getVersion.sh +59 -59
- reComputer/scripts/MoveNet-Thunder/init.sh +6 -6
- reComputer/scripts/MoveNet-Thunder/readme.md +31 -31
- reComputer/scripts/MoveNet-Thunder/run.sh +18 -18
- reComputer/scripts/MoveNetJS/clean.sh +4 -4
- reComputer/scripts/MoveNetJS/readme.md +56 -56
- reComputer/scripts/MoveNetJS/run.sh +13 -13
- reComputer/scripts/Sheared-LLaMA-2.7B-ShareGPT/init.sh +16 -16
- reComputer/scripts/Sheared-LLaMA-2.7B-ShareGPT/run.sh +8 -8
- reComputer/scripts/audiocraft/README.md +35 -35
- reComputer/scripts/audiocraft/clean.sh +5 -5
- reComputer/scripts/audiocraft/init.sh +16 -16
- reComputer/scripts/audiocraft/run.sh +7 -7
- reComputer/scripts/check.sh +4 -4
- reComputer/scripts/clean.sh +33 -33
- reComputer/scripts/comfyui/LICENSE +21 -21
- reComputer/scripts/comfyui/README.md +127 -127
- reComputer/scripts/comfyui/clean.sh +6 -6
- reComputer/scripts/comfyui/config.yaml +29 -29
- reComputer/scripts/comfyui/init.sh +163 -163
- reComputer/scripts/comfyui/run.sh +30 -30
- reComputer/scripts/depth-anything/Dockerfile +5 -5
- reComputer/scripts/depth-anything/LICENSE +21 -21
- reComputer/scripts/depth-anything/README.md +135 -135
- reComputer/scripts/depth-anything/clean.sh +7 -7
- reComputer/scripts/depth-anything/config.yaml +31 -31
- reComputer/scripts/depth-anything/init.sh +164 -164
- reComputer/scripts/depth-anything/run.sh +22 -22
- reComputer/scripts/depth-anything-v2/Dockerfile +5 -5
- reComputer/scripts/depth-anything-v2/LICENSE +21 -21
- reComputer/scripts/depth-anything-v2/README.md +135 -135
- reComputer/scripts/depth-anything-v2/clean.sh +7 -7
- reComputer/scripts/depth-anything-v2/config.yaml +31 -31
- reComputer/scripts/depth-anything-v2/init.sh +164 -164
- reComputer/scripts/depth-anything-v2/run.sh +22 -22
- reComputer/scripts/live-llava/init.sh +16 -16
- reComputer/scripts/live-llava/run.sh +278 -278
- reComputer/scripts/llama-factory/README.md +68 -68
- reComputer/scripts/llama-factory/clean.sh +4 -4
- reComputer/scripts/llama-factory/init.sh +52 -52
- reComputer/scripts/llama-factory/run.sh +10 -10
- reComputer/scripts/llama3/clean.sh +22 -22
- reComputer/scripts/llama3/init.sh +16 -16
- reComputer/scripts/llama3/run.sh +13 -13
- reComputer/scripts/llava/clean.sh +3 -3
- reComputer/scripts/llava/init.sh +16 -16
- reComputer/scripts/llava/run.sh +9 -9
- reComputer/scripts/llava-v1.5-7b/init.sh +16 -16
- reComputer/scripts/llava-v1.5-7b/run.sh +9 -9
- reComputer/scripts/llava-v1.6-vicuna-7b/init.sh +16 -16
- reComputer/scripts/llava-v1.6-vicuna-7b/run.sh +10 -10
- reComputer/scripts/nanodb/init.sh +16 -16
- reComputer/scripts/nanodb/readme.md +10 -10
- reComputer/scripts/nanodb/run.sh +90 -90
- reComputer/scripts/nanoowl/init.sh +16 -16
- reComputer/scripts/nanoowl/run.sh +7 -7
- reComputer/scripts/ollama/clean.sh +22 -22
- reComputer/scripts/ollama/init.sh +16 -16
- reComputer/scripts/ollama/run.sh +10 -10
- reComputer/scripts/parler-tts/clean.sh +7 -7
- reComputer/scripts/parler-tts/getVersion.sh +59 -59
- reComputer/scripts/parler-tts/init.sh +8 -8
- reComputer/scripts/parler-tts/readme.md +63 -63
- reComputer/scripts/parler-tts/run.sh +17 -17
- reComputer/scripts/run.sh +48 -48
- reComputer/scripts/stable-diffusion-webui/init.sh +16 -16
- reComputer/scripts/stable-diffusion-webui/run.sh +6 -6
- reComputer/scripts/text-generation-webui/init.sh +16 -16
- reComputer/scripts/text-generation-webui/run.sh +11 -11
- reComputer/scripts/ultralytics-yolo/LICENSE +21 -21
- reComputer/scripts/ultralytics-yolo/README.md +124 -124
- reComputer/scripts/ultralytics-yolo/clean.sh +6 -6
- reComputer/scripts/ultralytics-yolo/config.yaml +31 -32
- reComputer/scripts/ultralytics-yolo/init.sh +4 -176
- reComputer/scripts/ultralytics-yolo/run.sh +26 -25
- reComputer/scripts/update.sh +26 -26
- reComputer/scripts/utils.sh +166 -0
- reComputer/scripts/whisper/init.sh +16 -16
- reComputer/scripts/whisper/run.sh +7 -7
- reComputer/scripts/yolov10/Dockerfile +13 -13
- reComputer/scripts/yolov10/README.md +71 -71
- reComputer/scripts/yolov10/clean.sh +4 -4
- reComputer/scripts/yolov10/config.yaml +31 -0
- reComputer/scripts/yolov10/init.sh +20 -52
- reComputer/scripts/yolov10/run.sh +7 -7
- reComputer/scripts/yolov8-rail-inspection/config.yaml +31 -0
- reComputer/scripts/yolov8-rail-inspection/init.sh +5 -0
- reComputer/scripts/yolov8-rail-inspection/readme.md +35 -35
- reComputer/scripts/yolov8-rail-inspection/run.sh +21 -21
- jetson_examples-0.1.7.dist-info/RECORD +0 -103
- {jetson_examples-0.1.7.dist-info → jetson_examples-0.1.8.dist-info}/entry_points.txt +0 -0
- {jetson_examples-0.1.7.dist-info → jetson_examples-0.1.8.dist-info}/top_level.txt +0 -0
@@ -1,135 +1,135 @@
|
|
1
|
-
# Jetson-Example: Run Depth Anything on NVIDIA Jetson Orin 🚀
|
2
|
-
This project provides an one-click deployment of the Depth Anything monocular depth estimation model developed by Hong Kong University and ByteDance. The deployment is visualized on [reComputer J4012](https://www.seeedstudio.com/reComputer-J4012-p-5586.html) (Jetson Orin NX 16GB, 100 TOPS) and includes a WebUI for model conversion to TensorRT and real-time depth estimation.
|
3
|
-
<p align="center">
|
4
|
-
<img src="images/WebUI.png" alt="WebUI">
|
5
|
-
</p>
|
6
|
-
|
7
|
-
All models and inference engine implemented in this project are from the official [Depth Anything](https://depth-anything.github.io/).
|
8
|
-
|
9
|
-
## 🔥Features
|
10
|
-
|
11
|
-
- One-click deployment for Depth Anything models.
|
12
|
-
- WebUI for model conversion and depth estimation.
|
13
|
-
- Support for uploading videos/images or using the local camera
|
14
|
-
- Supports S, B, L models of Depth Anything with input sizes of 308, 384, 406, and 518.
|
15
|
-
|
16
|
-
### 🗝️WebUI Features
|
17
|
-
- **Choose model**: Select from depth_anything_vits14 models. (S, B, L)
|
18
|
-
- **Choose input size**: Select the desired input size.(308, 384, 406, 518)
|
19
|
-
- **Grayscale option**: Option to use grayscale.
|
20
|
-
- **Choose source**: Select the input source (Video, Image, Camera).
|
21
|
-
- **Export Model**: Automatically download and convert the model from PyTorch (.pth) to TensorRT format.
|
22
|
-
- **Start Estimation**: Begin depth estimation using the selected model and input source.
|
23
|
-
- **Stop Estimation**: Stop the ongoing depth estimation process.
|
24
|
-
<p align="center">
|
25
|
-
<img src="images/Opr.png" alt="Depthanything" width="320" height="360">
|
26
|
-
</p>
|
27
|
-
|
28
|
-
## 🥳Getting Started
|
29
|
-
### 📜Prerequisites
|
30
|
-
- reComputer J4012 [(🛒Buy Here)](https://www.seeedstudio.com/reComputer-J4012-p-5586.html)
|
31
|
-
- Docker installed on reComputer
|
32
|
-
- USB Camera (optional)
|
33
|
-
|
34
|
-
|
35
|
-
### Modify Docker Daemon Configuration (Optional)
|
36
|
-
To enhance the experience of quickly loading models in Docker, you need to add the following content to the `/etc/docker/daemon.json` file:
|
37
|
-
|
38
|
-
```json
|
39
|
-
{
|
40
|
-
"default-runtime": "nvidia",
|
41
|
-
"runtimes": {
|
42
|
-
"nvidia": {
|
43
|
-
"path": "nvidia-container-runtime",
|
44
|
-
"runtimeArgs": []
|
45
|
-
}
|
46
|
-
},
|
47
|
-
"storage-driver": "overlay2",
|
48
|
-
"data-root": "/var/lib/docker",
|
49
|
-
"log-driver": "json-file",
|
50
|
-
"log-opts": {
|
51
|
-
"max-size": "100m",
|
52
|
-
"max-file": "3"
|
53
|
-
},
|
54
|
-
"no-new-privileges": true,
|
55
|
-
"experimental": false
|
56
|
-
}
|
57
|
-
```
|
58
|
-
|
59
|
-
After modifying the `daemon.json` file, you need to restart the Docker service to apply the configuration:
|
60
|
-
|
61
|
-
```sh
|
62
|
-
sudo systemctl restart docker
|
63
|
-
```
|
64
|
-
|
65
|
-
|
66
|
-
### 🚀Installation
|
67
|
-
|
68
|
-
|
69
|
-
PyPI(recommend)
|
70
|
-
```sh
|
71
|
-
pip install jetson-examples
|
72
|
-
```
|
73
|
-
Linux (github trick)
|
74
|
-
```sh
|
75
|
-
curl -fsSL https://raw.githubusercontent.com/Seeed-Projects/jetson-examples/main/install.sh | sh
|
76
|
-
```
|
77
|
-
Github (for Developer)
|
78
|
-
```sh
|
79
|
-
git clone https://github.com/Seeed-Projects/jetson-examples
|
80
|
-
cd jetson-examples
|
81
|
-
pip install .
|
82
|
-
```
|
83
|
-
|
84
|
-
### 📋Usage
|
85
|
-
1. Run code:
|
86
|
-
```sh
|
87
|
-
reComputer run depth-anything
|
88
|
-
```
|
89
|
-
2. Open a web browser and input **http://{reComputer ip}:5000**. Use the WebUI to select the model, input size, and source.
|
90
|
-
|
91
|
-
3. Click on **Export Model** to download and convert the model.
|
92
|
-
|
93
|
-
4. Click on **Start Estimation** to begin the depth estimation process.
|
94
|
-
|
95
|
-
5. View the real-time depth estimation results on the WebUI.
|
96
|
-
|
97
|
-
## ⛏️Applications
|
98
|
-
|
99
|
-
- **Security**: Enhance surveillance systems with depth perception.
|
100
|
-
<p align="center">
|
101
|
-
<img src="images/Security.png" alt="Security" width="500" height="150">
|
102
|
-
</p>
|
103
|
-
- **Autonomous Driving**: Improve environmental sensing for autonomous vehicles.
|
104
|
-
<p align="center">
|
105
|
-
<img src="images/Autonomous Driving.png" alt="Autonomous Driving" width="500" height="150">
|
106
|
-
</p>
|
107
|
-
- **Underwater Scenes**: Apply depth estimation in underwater exploration.
|
108
|
-
<p align="center">
|
109
|
-
<img src="images/Underwater Scenes.png" alt="Underwater Scenes" width="500" height="150">
|
110
|
-
</p>
|
111
|
-
- **Indoor Scenes**: Use depth estimation for indoor navigation and analysis.
|
112
|
-
<p align="center">
|
113
|
-
<img src="images/Indoor Scenes.png" alt="Indoor Scenes" width="500" height="150">
|
114
|
-
</p>
|
115
|
-
|
116
|
-
## Further Development 🔧
|
117
|
-
- [Depth Anything Official](https://depth-anything.github.io/)
|
118
|
-
- [Depth Anything TensorRT](https://github.com/spacewalk01/depth-anything-tensorrt)
|
119
|
-
- [Depth Anything ONNX](https://github.com/fabio-sim/Depth-Anything-ONNX)
|
120
|
-
- [Depth Anything ROS](https://github.com/scepter914/DepthAnything-ROS)
|
121
|
-
- [Depth Anything Android](https://github.com/FeiGeChuanShu/ncnn-android-depth_anything)
|
122
|
-
|
123
|
-
|
124
|
-
## 🙏🏻Contributing
|
125
|
-
|
126
|
-
We welcome contributions from the community. Please fork the repository and create a pull request with your changes.
|
127
|
-
|
128
|
-
## ✅License
|
129
|
-
|
130
|
-
This project is licensed under the MIT License.
|
131
|
-
|
132
|
-
## 🏷️Acknowledgements
|
133
|
-
|
134
|
-
- Depth Anything [project](https://depth-anything.github.io/) by Hong Kong University and ByteDance.
|
135
|
-
- Seeed Studio team for their [support and resources](https://github.com/Seeed-Projects/jetson-examples).
|
1
|
+
# Jetson-Example: Run Depth Anything on NVIDIA Jetson Orin 🚀
|
2
|
+
This project provides an one-click deployment of the Depth Anything monocular depth estimation model developed by Hong Kong University and ByteDance. The deployment is visualized on [reComputer J4012](https://www.seeedstudio.com/reComputer-J4012-p-5586.html) (Jetson Orin NX 16GB, 100 TOPS) and includes a WebUI for model conversion to TensorRT and real-time depth estimation.
|
3
|
+
<p align="center">
|
4
|
+
<img src="images/WebUI.png" alt="WebUI">
|
5
|
+
</p>
|
6
|
+
|
7
|
+
All models and inference engine implemented in this project are from the official [Depth Anything](https://depth-anything.github.io/).
|
8
|
+
|
9
|
+
## 🔥Features
|
10
|
+
|
11
|
+
- One-click deployment for Depth Anything models.
|
12
|
+
- WebUI for model conversion and depth estimation.
|
13
|
+
- Support for uploading videos/images or using the local camera
|
14
|
+
- Supports S, B, L models of Depth Anything with input sizes of 308, 384, 406, and 518.
|
15
|
+
|
16
|
+
### 🗝️WebUI Features
|
17
|
+
- **Choose model**: Select from depth_anything_vits14 models. (S, B, L)
|
18
|
+
- **Choose input size**: Select the desired input size.(308, 384, 406, 518)
|
19
|
+
- **Grayscale option**: Option to use grayscale.
|
20
|
+
- **Choose source**: Select the input source (Video, Image, Camera).
|
21
|
+
- **Export Model**: Automatically download and convert the model from PyTorch (.pth) to TensorRT format.
|
22
|
+
- **Start Estimation**: Begin depth estimation using the selected model and input source.
|
23
|
+
- **Stop Estimation**: Stop the ongoing depth estimation process.
|
24
|
+
<p align="center">
|
25
|
+
<img src="images/Opr.png" alt="Depthanything" width="320" height="360">
|
26
|
+
</p>
|
27
|
+
|
28
|
+
## 🥳Getting Started
|
29
|
+
### 📜Prerequisites
|
30
|
+
- reComputer J4012 [(🛒Buy Here)](https://www.seeedstudio.com/reComputer-J4012-p-5586.html)
|
31
|
+
- Docker installed on reComputer
|
32
|
+
- USB Camera (optional)
|
33
|
+
|
34
|
+
|
35
|
+
### Modify Docker Daemon Configuration (Optional)
|
36
|
+
To enhance the experience of quickly loading models in Docker, you need to add the following content to the `/etc/docker/daemon.json` file:
|
37
|
+
|
38
|
+
```json
|
39
|
+
{
|
40
|
+
"default-runtime": "nvidia",
|
41
|
+
"runtimes": {
|
42
|
+
"nvidia": {
|
43
|
+
"path": "nvidia-container-runtime",
|
44
|
+
"runtimeArgs": []
|
45
|
+
}
|
46
|
+
},
|
47
|
+
"storage-driver": "overlay2",
|
48
|
+
"data-root": "/var/lib/docker",
|
49
|
+
"log-driver": "json-file",
|
50
|
+
"log-opts": {
|
51
|
+
"max-size": "100m",
|
52
|
+
"max-file": "3"
|
53
|
+
},
|
54
|
+
"no-new-privileges": true,
|
55
|
+
"experimental": false
|
56
|
+
}
|
57
|
+
```
|
58
|
+
|
59
|
+
After modifying the `daemon.json` file, you need to restart the Docker service to apply the configuration:
|
60
|
+
|
61
|
+
```sh
|
62
|
+
sudo systemctl restart docker
|
63
|
+
```
|
64
|
+
|
65
|
+
|
66
|
+
### 🚀Installation
|
67
|
+
|
68
|
+
|
69
|
+
PyPI(recommend)
|
70
|
+
```sh
|
71
|
+
pip install jetson-examples
|
72
|
+
```
|
73
|
+
Linux (github trick)
|
74
|
+
```sh
|
75
|
+
curl -fsSL https://raw.githubusercontent.com/Seeed-Projects/jetson-examples/main/install.sh | sh
|
76
|
+
```
|
77
|
+
Github (for Developer)
|
78
|
+
```sh
|
79
|
+
git clone https://github.com/Seeed-Projects/jetson-examples
|
80
|
+
cd jetson-examples
|
81
|
+
pip install .
|
82
|
+
```
|
83
|
+
|
84
|
+
### 📋Usage
|
85
|
+
1. Run code:
|
86
|
+
```sh
|
87
|
+
reComputer run depth-anything
|
88
|
+
```
|
89
|
+
2. Open a web browser and input **http://{reComputer ip}:5000**. Use the WebUI to select the model, input size, and source.
|
90
|
+
|
91
|
+
3. Click on **Export Model** to download and convert the model.
|
92
|
+
|
93
|
+
4. Click on **Start Estimation** to begin the depth estimation process.
|
94
|
+
|
95
|
+
5. View the real-time depth estimation results on the WebUI.
|
96
|
+
|
97
|
+
## ⛏️Applications
|
98
|
+
|
99
|
+
- **Security**: Enhance surveillance systems with depth perception.
|
100
|
+
<p align="center">
|
101
|
+
<img src="images/Security.png" alt="Security" width="500" height="150">
|
102
|
+
</p>
|
103
|
+
- **Autonomous Driving**: Improve environmental sensing for autonomous vehicles.
|
104
|
+
<p align="center">
|
105
|
+
<img src="images/Autonomous Driving.png" alt="Autonomous Driving" width="500" height="150">
|
106
|
+
</p>
|
107
|
+
- **Underwater Scenes**: Apply depth estimation in underwater exploration.
|
108
|
+
<p align="center">
|
109
|
+
<img src="images/Underwater Scenes.png" alt="Underwater Scenes" width="500" height="150">
|
110
|
+
</p>
|
111
|
+
- **Indoor Scenes**: Use depth estimation for indoor navigation and analysis.
|
112
|
+
<p align="center">
|
113
|
+
<img src="images/Indoor Scenes.png" alt="Indoor Scenes" width="500" height="150">
|
114
|
+
</p>
|
115
|
+
|
116
|
+
## Further Development 🔧
|
117
|
+
- [Depth Anything Official](https://depth-anything.github.io/)
|
118
|
+
- [Depth Anything TensorRT](https://github.com/spacewalk01/depth-anything-tensorrt)
|
119
|
+
- [Depth Anything ONNX](https://github.com/fabio-sim/Depth-Anything-ONNX)
|
120
|
+
- [Depth Anything ROS](https://github.com/scepter914/DepthAnything-ROS)
|
121
|
+
- [Depth Anything Android](https://github.com/FeiGeChuanShu/ncnn-android-depth_anything)
|
122
|
+
|
123
|
+
|
124
|
+
## 🙏🏻Contributing
|
125
|
+
|
126
|
+
We welcome contributions from the community. Please fork the repository and create a pull request with your changes.
|
127
|
+
|
128
|
+
## ✅License
|
129
|
+
|
130
|
+
This project is licensed under the MIT License.
|
131
|
+
|
132
|
+
## 🏷️Acknowledgements
|
133
|
+
|
134
|
+
- Depth Anything [project](https://depth-anything.github.io/) by Hong Kong University and ByteDance.
|
135
|
+
- Seeed Studio team for their [support and resources](https://github.com/Seeed-Projects/jetson-examples).
|
@@ -1,8 +1,8 @@
|
|
1
|
-
#!/bin/bash
|
2
|
-
|
3
|
-
CONTAINER_NAME="depth-anything"
|
4
|
-
IMAGE_NAME="yaohui1998/depthanything-on-jetson-orin:latest"
|
5
|
-
|
6
|
-
sudo docker stop $CONTAINER_NAME
|
7
|
-
sudo docker rm $CONTAINER_NAME
|
1
|
+
#!/bin/bash
|
2
|
+
|
3
|
+
CONTAINER_NAME="depth-anything"
|
4
|
+
IMAGE_NAME="yaohui1998/depthanything-on-jetson-orin:latest"
|
5
|
+
|
6
|
+
sudo docker stop $CONTAINER_NAME
|
7
|
+
sudo docker rm $CONTAINER_NAME
|
8
8
|
sudo docker rmi $IMAGE_NAMEs
|
@@ -1,31 +1,31 @@
|
|
1
|
-
allowed_l4t_versions:
|
2
|
-
- 35.3.1
|
3
|
-
- 35.4.1
|
4
|
-
- 35.5.0
|
5
|
-
required_disk_space: 20 # in GB
|
6
|
-
min_mem_gb: 4
|
7
|
-
min_swap_gb: 2
|
8
|
-
nvidia_jetson_package: "nvidia-jetpack"
|
9
|
-
packages:
|
10
|
-
#- "ros-noetic-ros-base"
|
11
|
-
#- "flask"
|
12
|
-
docker:
|
13
|
-
desired_daemon_json: |
|
14
|
-
{
|
15
|
-
"default-runtime": "nvidia",
|
16
|
-
"runtimes": {
|
17
|
-
"nvidia": {
|
18
|
-
"path": "nvidia-container-runtime",
|
19
|
-
"runtimeArgs": []
|
20
|
-
}
|
21
|
-
},
|
22
|
-
"storage-driver": "overlay2",
|
23
|
-
"data-root": "/var/lib/docker",
|
24
|
-
"log-driver": "json-file",
|
25
|
-
"log-opts": {
|
26
|
-
"max-size": "100m",
|
27
|
-
"max-file": "3"
|
28
|
-
},
|
29
|
-
"no-new-privileges": true,
|
30
|
-
"experimental": false
|
31
|
-
}
|
1
|
+
allowed_l4t_versions:
|
2
|
+
- 35.3.1
|
3
|
+
- 35.4.1
|
4
|
+
- 35.5.0
|
5
|
+
required_disk_space: 20 # in GB
|
6
|
+
min_mem_gb: 4
|
7
|
+
min_swap_gb: 2
|
8
|
+
nvidia_jetson_package: "nvidia-jetpack"
|
9
|
+
packages:
|
10
|
+
#- "ros-noetic-ros-base"
|
11
|
+
#- "flask"
|
12
|
+
docker:
|
13
|
+
desired_daemon_json: |
|
14
|
+
{
|
15
|
+
"default-runtime": "nvidia",
|
16
|
+
"runtimes": {
|
17
|
+
"nvidia": {
|
18
|
+
"path": "nvidia-container-runtime",
|
19
|
+
"runtimeArgs": []
|
20
|
+
}
|
21
|
+
},
|
22
|
+
"storage-driver": "overlay2",
|
23
|
+
"data-root": "/var/lib/docker",
|
24
|
+
"log-driver": "json-file",
|
25
|
+
"log-opts": {
|
26
|
+
"max-size": "100m",
|
27
|
+
"max-file": "3"
|
28
|
+
},
|
29
|
+
"no-new-privileges": true,
|
30
|
+
"experimental": false
|
31
|
+
}
|