jetson-examples 0.1.8__py3-none-any.whl → 0.1.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (106) hide show
  1. {jetson_examples-0.1.8.dist-info → jetson_examples-0.1.9.dist-info}/LICENSE +21 -21
  2. {jetson_examples-0.1.8.dist-info → jetson_examples-0.1.9.dist-info}/METADATA +1 -1
  3. jetson_examples-0.1.9.dist-info/RECORD +109 -0
  4. reComputer/__init__.py +1 -1
  5. reComputer/main.py +60 -60
  6. reComputer/scripts/MoveNet-Lightning/clean.sh +8 -8
  7. reComputer/scripts/MoveNet-Lightning/getVersion.sh +59 -59
  8. reComputer/scripts/MoveNet-Lightning/init.sh +6 -6
  9. reComputer/scripts/MoveNet-Lightning/readme.md +30 -30
  10. reComputer/scripts/MoveNet-Lightning/run.sh +19 -19
  11. reComputer/scripts/MoveNet-Thunder/clean.sh +7 -7
  12. reComputer/scripts/MoveNet-Thunder/getVersion.sh +59 -59
  13. reComputer/scripts/MoveNet-Thunder/init.sh +6 -6
  14. reComputer/scripts/MoveNet-Thunder/readme.md +31 -31
  15. reComputer/scripts/MoveNet-Thunder/run.sh +18 -18
  16. reComputer/scripts/MoveNetJS/clean.sh +4 -4
  17. reComputer/scripts/MoveNetJS/readme.md +56 -56
  18. reComputer/scripts/MoveNetJS/run.sh +13 -13
  19. reComputer/scripts/Sheared-LLaMA-2.7B-ShareGPT/init.sh +16 -16
  20. reComputer/scripts/Sheared-LLaMA-2.7B-ShareGPT/run.sh +8 -8
  21. reComputer/scripts/audiocraft/README.md +35 -35
  22. reComputer/scripts/audiocraft/clean.sh +5 -5
  23. reComputer/scripts/audiocraft/init.sh +16 -16
  24. reComputer/scripts/audiocraft/run.sh +7 -7
  25. reComputer/scripts/check.sh +4 -4
  26. reComputer/scripts/clean.sh +33 -33
  27. reComputer/scripts/comfyui/LICENSE +21 -21
  28. reComputer/scripts/comfyui/README.md +127 -127
  29. reComputer/scripts/comfyui/clean.sh +9 -7
  30. reComputer/scripts/comfyui/config.yaml +30 -29
  31. reComputer/scripts/comfyui/init.sh +9 -163
  32. reComputer/scripts/comfyui/run.sh +30 -30
  33. reComputer/scripts/depth-anything/Dockerfile +5 -5
  34. reComputer/scripts/depth-anything/LICENSE +21 -21
  35. reComputer/scripts/depth-anything/README.md +135 -135
  36. reComputer/scripts/depth-anything/clean.sh +7 -7
  37. reComputer/scripts/depth-anything/config.yaml +31 -31
  38. reComputer/scripts/depth-anything/init.sh +164 -164
  39. reComputer/scripts/depth-anything/run.sh +22 -22
  40. reComputer/scripts/depth-anything-v2/Dockerfile +5 -5
  41. reComputer/scripts/depth-anything-v2/LICENSE +21 -21
  42. reComputer/scripts/depth-anything-v2/README.md +135 -135
  43. reComputer/scripts/depth-anything-v2/clean.sh +7 -7
  44. reComputer/scripts/depth-anything-v2/config.yaml +31 -31
  45. reComputer/scripts/depth-anything-v2/init.sh +164 -164
  46. reComputer/scripts/depth-anything-v2/run.sh +22 -22
  47. reComputer/scripts/live-llava/init.sh +16 -16
  48. reComputer/scripts/live-llava/run.sh +278 -278
  49. reComputer/scripts/llama-factory/README.md +68 -68
  50. reComputer/scripts/llama-factory/clean.sh +4 -4
  51. reComputer/scripts/llama-factory/init.sh +52 -52
  52. reComputer/scripts/llama-factory/run.sh +10 -10
  53. reComputer/scripts/llama3/clean.sh +22 -22
  54. reComputer/scripts/llama3/config.yaml +31 -0
  55. reComputer/scripts/llama3/init.sh +19 -16
  56. reComputer/scripts/llama3/run.sh +13 -13
  57. reComputer/scripts/llava/clean.sh +3 -3
  58. reComputer/scripts/llava/init.sh +16 -16
  59. reComputer/scripts/llava/run.sh +9 -9
  60. reComputer/scripts/llava-v1.5-7b/init.sh +16 -16
  61. reComputer/scripts/llava-v1.5-7b/run.sh +9 -9
  62. reComputer/scripts/llava-v1.6-vicuna-7b/init.sh +16 -16
  63. reComputer/scripts/llava-v1.6-vicuna-7b/run.sh +10 -10
  64. reComputer/scripts/nanodb/init.sh +16 -16
  65. reComputer/scripts/nanodb/readme.md +10 -10
  66. reComputer/scripts/nanodb/run.sh +90 -90
  67. reComputer/scripts/nanoowl/init.sh +16 -16
  68. reComputer/scripts/nanoowl/run.sh +7 -7
  69. reComputer/scripts/ollama/clean.sh +22 -22
  70. reComputer/scripts/ollama/config.yaml +31 -0
  71. reComputer/scripts/ollama/init.sh +19 -16
  72. reComputer/scripts/ollama/run.sh +10 -10
  73. reComputer/scripts/parler-tts/clean.sh +7 -7
  74. reComputer/scripts/parler-tts/getVersion.sh +59 -59
  75. reComputer/scripts/parler-tts/init.sh +8 -8
  76. reComputer/scripts/parler-tts/readme.md +63 -63
  77. reComputer/scripts/parler-tts/run.sh +17 -17
  78. reComputer/scripts/run.sh +48 -48
  79. reComputer/scripts/stable-diffusion-webui/init.sh +16 -16
  80. reComputer/scripts/stable-diffusion-webui/run.sh +6 -6
  81. reComputer/scripts/text-generation-webui/init.sh +16 -16
  82. reComputer/scripts/text-generation-webui/run.sh +11 -11
  83. reComputer/scripts/ultralytics-yolo/LICENSE +21 -21
  84. reComputer/scripts/ultralytics-yolo/README.md +124 -124
  85. reComputer/scripts/ultralytics-yolo/clean.sh +6 -6
  86. reComputer/scripts/ultralytics-yolo/config.yaml +31 -31
  87. reComputer/scripts/ultralytics-yolo/init.sh +4 -4
  88. reComputer/scripts/ultralytics-yolo/run.sh +26 -26
  89. reComputer/scripts/update.sh +26 -26
  90. reComputer/scripts/utils.sh +168 -166
  91. reComputer/scripts/whisper/init.sh +16 -16
  92. reComputer/scripts/whisper/run.sh +7 -7
  93. reComputer/scripts/yolov10/Dockerfile +13 -13
  94. reComputer/scripts/yolov10/README.md +71 -71
  95. reComputer/scripts/yolov10/clean.sh +4 -4
  96. reComputer/scripts/yolov10/config.yaml +31 -31
  97. reComputer/scripts/yolov10/init.sh +20 -20
  98. reComputer/scripts/yolov10/run.sh +7 -7
  99. reComputer/scripts/yolov8-rail-inspection/config.yaml +31 -31
  100. reComputer/scripts/yolov8-rail-inspection/init.sh +5 -5
  101. reComputer/scripts/yolov8-rail-inspection/readme.md +35 -35
  102. reComputer/scripts/yolov8-rail-inspection/run.sh +21 -21
  103. jetson_examples-0.1.8.dist-info/RECORD +0 -107
  104. {jetson_examples-0.1.8.dist-info → jetson_examples-0.1.9.dist-info}/WHEEL +0 -0
  105. {jetson_examples-0.1.8.dist-info → jetson_examples-0.1.9.dist-info}/entry_points.txt +0 -0
  106. {jetson_examples-0.1.8.dist-info → jetson_examples-0.1.9.dist-info}/top_level.txt +0 -0
@@ -1,17 +1,17 @@
1
- #!/bin/bash
2
-
3
- MODELS_DIR=/home/$USER/models
4
-
5
- # get L4T version
6
- # it exports a variable IMAGE_TAG
7
- source ./getVersion.sh
8
-
9
- # pull docker image
10
- echo "docker push feiticeir0/parler_tts:${IMAGE_TAG}"
11
-
12
- docker run \
13
- --rm \
14
- -p 7860:7860 \
15
- --runtime=nvidia \
16
- -v $(MODELS_DIR):/app \
17
- feiticeir0/parler_tts:${IMAGE_TAG}
1
+ #!/bin/bash
2
+
3
+ MODELS_DIR=/home/$USER/models
4
+
5
+ # get L4T version
6
+ # it exports a variable IMAGE_TAG
7
+ source ./getVersion.sh
8
+
9
+ # pull docker image
10
+ echo "docker push feiticeir0/parler_tts:${IMAGE_TAG}"
11
+
12
+ docker run \
13
+ --rm \
14
+ -p 7860:7860 \
15
+ --runtime=nvidia \
16
+ -v $(MODELS_DIR):/app \
17
+ feiticeir0/parler_tts:${IMAGE_TAG}
reComputer/scripts/run.sh CHANGED
@@ -1,48 +1,48 @@
1
- #!/bin/bash
2
- handle_error() {
3
- echo "An error occurred. Exiting..."
4
- exit 1
5
- }
6
- trap 'handle_error' ERR
7
-
8
- check_is_jetson_or_not() {
9
- model_file="/proc/device-tree/model"
10
-
11
- if [ -f "/proc/device-tree/model" ]; then
12
- model=$(tr -d '\0' < /proc/device-tree/model | tr '[:upper:]' '[:lower:]')
13
- if [[ $model =~ jetson|orin|nv|agx ]]; then
14
- echo "INFO: machine[$model] confirmed..."
15
- else
16
- echo "WARNING: machine[$model] maybe not support..."
17
- exit 1
18
- fi
19
- else
20
- echo "ERROR: machine[$model] not support this..."
21
- exit 1
22
- fi
23
- }
24
- check_is_jetson_or_not
25
-
26
- echo "run example:$1"
27
- BASE_PATH=/home/$USER/reComputer
28
-
29
-
30
- cd $JETSON_REPO_PATH
31
- script_dir=$(dirname "$0")
32
-
33
- init_script=$script_dir/$1/init.sh
34
- if [ -f $init_script ]; then
35
- echo "----example init----"
36
- bash $init_script
37
- else
38
- echo "WARN: Example[$1] init.sh Not Found."
39
- fi
40
-
41
- start_script=$script_dir/$1/run.sh
42
- if [ -f $start_script ]; then
43
- echo "----example start----"
44
- bash $start_script
45
- else
46
- echo "ERROR: Example[$1] run.sh Not Found."
47
- fi
48
- echo "----example done----"
1
+ #!/bin/bash
2
+ handle_error() {
3
+ echo "An error occurred. Exiting..."
4
+ exit 1
5
+ }
6
+ trap 'handle_error' ERR
7
+
8
+ check_is_jetson_or_not() {
9
+ model_file="/proc/device-tree/model"
10
+
11
+ if [ -f "/proc/device-tree/model" ]; then
12
+ model=$(tr -d '\0' < /proc/device-tree/model | tr '[:upper:]' '[:lower:]')
13
+ if [[ $model =~ jetson|orin|nv|agx ]]; then
14
+ echo "INFO: machine[$model] confirmed..."
15
+ else
16
+ echo "WARNING: machine[$model] maybe not support..."
17
+ exit 1
18
+ fi
19
+ else
20
+ echo "ERROR: machine[$model] not support this..."
21
+ exit 1
22
+ fi
23
+ }
24
+ check_is_jetson_or_not
25
+
26
+ echo "run example:$1"
27
+ BASE_PATH=/home/$USER/reComputer
28
+
29
+
30
+ cd $JETSON_REPO_PATH
31
+ script_dir=$(dirname "$0")
32
+
33
+ init_script=$script_dir/$1/init.sh
34
+ if [ -f $init_script ]; then
35
+ echo "----example init----"
36
+ bash $init_script
37
+ else
38
+ echo "WARN: Example[$1] init.sh Not Found."
39
+ fi
40
+
41
+ start_script=$script_dir/$1/run.sh
42
+ if [ -f $start_script ]; then
43
+ echo "----example start----"
44
+ bash $start_script
45
+ else
46
+ echo "ERROR: Example[$1] run.sh Not Found."
47
+ fi
48
+ echo "----example done----"
@@ -1,16 +1,16 @@
1
- #!/bin/bash
2
-
3
-
4
- BASE_PATH=/home/$USER/reComputer
5
- mkdir -p $BASE_PATH/
6
- JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
7
- BASE_JETSON_LAB_GIT="https://github.com/dusty-nv/jetson-containers"
8
- if [ -d $JETSON_REPO_PATH ]; then
9
- echo "jetson-ai-lab existed."
10
- else
11
- echo "jetson-ai-lab does not installed. start init..."
12
- cd $BASE_PATH/
13
- git clone --depth=1 $BASE_JETSON_LAB_GIT
14
- cd $JETSON_REPO_PATH
15
- bash install.sh
16
- fi
1
+ #!/bin/bash
2
+
3
+
4
+ BASE_PATH=/home/$USER/reComputer
5
+ mkdir -p $BASE_PATH/
6
+ JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
7
+ BASE_JETSON_LAB_GIT="https://github.com/dusty-nv/jetson-containers"
8
+ if [ -d $JETSON_REPO_PATH ]; then
9
+ echo "jetson-ai-lab existed."
10
+ else
11
+ echo "jetson-ai-lab does not installed. start init..."
12
+ cd $BASE_PATH/
13
+ git clone --depth=1 $BASE_JETSON_LAB_GIT
14
+ cd $JETSON_REPO_PATH
15
+ bash install.sh
16
+ fi
@@ -1,7 +1,7 @@
1
- #!/bin/bash
2
-
3
- BASE_PATH=/home/$USER/reComputer
4
- JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
5
- cd $JETSON_REPO_PATH
6
-
1
+ #!/bin/bash
2
+
3
+ BASE_PATH=/home/$USER/reComputer
4
+ JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
5
+ cd $JETSON_REPO_PATH
6
+
7
7
  ./run.sh $(./autotag stable-diffusion-webui)
@@ -1,16 +1,16 @@
1
- #!/bin/bash
2
-
3
-
4
- BASE_PATH=/home/$USER/reComputer
5
- mkdir -p $BASE_PATH/
6
- JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
7
- BASE_JETSON_LAB_GIT="https://github.com/dusty-nv/jetson-containers"
8
- if [ -d $JETSON_REPO_PATH ]; then
9
- echo "jetson-ai-lab existed."
10
- else
11
- echo "jetson-ai-lab does not installed. start init..."
12
- cd $BASE_PATH/
13
- git clone --depth=1 $BASE_JETSON_LAB_GIT
14
- cd $JETSON_REPO_PATH
15
- bash install.sh
16
- fi
1
+ #!/bin/bash
2
+
3
+
4
+ BASE_PATH=/home/$USER/reComputer
5
+ mkdir -p $BASE_PATH/
6
+ JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
7
+ BASE_JETSON_LAB_GIT="https://github.com/dusty-nv/jetson-containers"
8
+ if [ -d $JETSON_REPO_PATH ]; then
9
+ echo "jetson-ai-lab existed."
10
+ else
11
+ echo "jetson-ai-lab does not installed. start init..."
12
+ cd $BASE_PATH/
13
+ git clone --depth=1 $BASE_JETSON_LAB_GIT
14
+ cd $JETSON_REPO_PATH
15
+ bash install.sh
16
+ fi
@@ -1,12 +1,12 @@
1
- #!/bin/bash
2
-
3
- BASE_PATH=/home/$USER/reComputer
4
- JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
5
- cd $JETSON_REPO_PATH
6
-
7
- # download llm model
8
- ./run.sh --workdir=/opt/text-generation-webui $(./autotag text-generation-webui) /bin/bash -c \
9
- 'python3 download-model.py --output=/data/models/text-generation-webui TheBloke/Llama-2-7b-Chat-GPTQ'
10
-
11
- # run text-generation-webui
1
+ #!/bin/bash
2
+
3
+ BASE_PATH=/home/$USER/reComputer
4
+ JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
5
+ cd $JETSON_REPO_PATH
6
+
7
+ # download llm model
8
+ ./run.sh --workdir=/opt/text-generation-webui $(./autotag text-generation-webui) /bin/bash -c \
9
+ 'python3 download-model.py --output=/data/models/text-generation-webui TheBloke/Llama-2-7b-Chat-GPTQ'
10
+
11
+ # run text-generation-webui
12
12
  ./run.sh $(./autotag text-generation-webui)
@@ -1,21 +1,21 @@
1
- MIT License
2
-
3
- Copyright (c) [2024] [Seeed Studio]
4
-
5
- Permission is hereby granted, free of charge, to any person obtaining a copy
6
- of this software and associated documentation files (the "Software"), to deal
7
- in the Software without restriction, including without limitation the rights
8
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
- copies of the Software, and to permit persons to whom the Software is
10
- furnished to do so, subject to the following conditions:
11
-
12
- The above copyright notice and this permission notice shall be included in all
13
- copies or substantial portions of the Software.
14
-
15
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
- SOFTWARE.
1
+ MIT License
2
+
3
+ Copyright (c) [2024] [Seeed Studio]
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -1,124 +1,124 @@
1
- # Jetson-Example: Run Ultralytics YOLO Platform Service on NVIDIA Jetson Orin 🚀
2
-
3
- ## "One-Click Quick Deployment of Plug-and-Play Ultralytics YOLOv8 for All Task Models with Web UI and HTTP API Interface"
4
- <p align="center">
5
- <img src="images/Ultralytics-yolo.gif" alt="Ultralytics YOLO">
6
- </p>
7
-
8
- ## Introduction 📘
9
- In this project, you can quickly deploy all YOLOv8 task models on Nvidia Jetson Orin devices with one click. This setup enables object detection, segmentation, human pose estimation, and classification. It supports uploading local videos, images, and using a webcam, and also allows one-click TensorRT model conversion. By accessing [http://127.0.0.1:5001](http://127.0.0.1:5001) on your local machine or within the same LAN, you can quickly start using Ultralytics YOLO. Additionally, an HTTP API method has been added at [http://127.0.0.1:5001/results](http://127.0.0.1:5001/results) to display detection data results for any task, and an additional Python script is provided to read YOLOv8 detection data within Docker.
10
-
11
- ## **Key Features**:
12
-
13
- 1. **One-Click Deployment and Plug-and-Play**: Quickly deploy all YOLOv8 task models on Nvidia Jetson Orin devices.
14
- 2. **Comprehensive Task Support**: Enables object detection, segmentation, human pose estimation, and classification.
15
- 3. **Versatile Input Options**: Supports uploading local videos, images, and using a webcam.
16
- 4. **TensorRT Model Conversion**: Allows one-click conversion of models to TensorRT.
17
- 5. **Web UI Access**: Easy access via [`http://127.0.0.1:5001`](http://127.0.0.1:5001) on the local machine or within the same LAN.
18
- 6. **HTTP API Interface**: Added HTTP API at [`http://127.0.0.1:5001/results`](http://127.0.0.1:5001/results) to display detection data results.
19
- 7. **Python Script Support**: Provides an additional Python script to read YOLOv8 detection data within Docker.
20
-
21
- [![My Project](images/tasks.png)](https://github.com/ultralytics/ultralytics?tab=readme-ov-file#models)
22
- All models implemented in this project are from the official [Ultralytics Yolo](https://github.com/ultralytics/ultralytics?tab=readme-ov-file#models).
23
-
24
- ### Get a Jetson Orin Device 🛒
25
- | Device Model | Description | Link |
26
- |--------------|-------------|------|
27
- | Jetson Orin Nano Dev Kit, Orin Nano 8GB, 40TOPS | Developer kit for NVIDIA Jetson Orin Nano | [Buy Here](https://www.seeedstudio.com/NVIDIAr-Jetson-Orintm-Nano-Developer-Kit-p-5617.html) |
28
- | reComputer J4012, powered by Orin NX 16GB, 100 TOPS | Embedded computer powered by Orin NX | [Buy Here](https://www.seeedstudio.com/reComputer-J4012-p-5586.html) |
29
-
30
- ## Quickstart ⚡
31
-
32
- ### Modify Docker Daemon Configuration (Optional)
33
- To enhance the experience of quickly loading models in Docker, you need to add the following content to the `/etc/docker/daemon.json` file:
34
-
35
- ```json
36
- {
37
- "default-runtime": "nvidia",
38
- "runtimes": {
39
- "nvidia": {
40
- "path": "nvidia-container-runtime",
41
- "runtimeArgs": []
42
- }
43
- },
44
- "storage-driver": "overlay2",
45
- "data-root": "/var/lib/docker",
46
- "log-driver": "json-file",
47
- "log-opts": {
48
- "max-size": "100m",
49
- "max-file": "3"
50
- },
51
- "no-new-privileges": true,
52
- "experimental": false
53
- }
54
- ```
55
-
56
- After modifying the `daemon.json` file, you need to restart the Docker service to apply the configuration:
57
-
58
- ```sh
59
- sudo systemctl restart docker
60
- ```
61
-
62
- ### Installation via PyPI (Recommended) 🐍
63
- 1. Install the package:
64
- ```sh
65
- pip install jetson-examples
66
- ```
67
-
68
- 2. Restart your reComputer:
69
- ```sh
70
- sudo reboot
71
- ```
72
-
73
- 3. Run Ultralytics YOLO on Jetson with one command:
74
- ```sh
75
- reComputer run ultralytics-yolo
76
- ```
77
- 4. "Enter [`http://127.0.0.1:5001`](http://127.0.0.1:5001) or http://device_IP:5001 in your browser to access the Web UI."
78
- <p align="center">
79
- <img src="images/ultralytics_fig1.png" alt="Ultralytics YOLO">
80
- </p>
81
-
82
- - **Choose Model**: Select YOLOv8 n, s, l, m, x models and various tasks such as object detection, classification, segmentation, human pose estimation, OBB, etc.
83
- - **Upload Custom Model**: Users can upload their own trained YOLOv8 models.
84
- - **Choose Input Type**: Users can select to input locally uploaded images, videos, or real-time camera devices.
85
- - **Enable TensorRT**: Choose whether to convert and use the TensorRT model. The initial conversion may require varying amounts of time.
86
-
87
- 5. If you want to see the detection result data, you can enter [`http://127.0.0.1:5001/results`](http://127.0.0.1:5001/results) in your browser to view the `JSON` formatted data results. These results include `boxes` for object detection, `masks` for segmentation, `keypoints` for human pose estimation, and the `names` corresponding to all numerical categories.
88
- <p align="center">
89
- <img src="images/ultralytics_fig2.png" alt="Ultralytics YOLO">
90
- </p>
91
- We also provide a Python script to help users integrate the data into their own programs.
92
-
93
- ```python
94
- import requests
95
-
96
- def fetch_results():
97
- response = requests.get('http://localhost:5001/results')
98
- if response.status_code == 200:
99
- results = response.json()
100
- return results
101
- else:
102
- print('Failed to fetch results')
103
- return None
104
-
105
- results = fetch_results()
106
- print(results)
107
- ```
108
-
109
-
110
- ## Notes 📝
111
- - To stop detection at any time, press the Stop button.
112
- - When accessing the WebUI from other devices within the same LAN, use the URL: `http://{Jetson_IP}:5001`.
113
- - You can view the JSON formatted detection results by accessing http://{Jetson_IP}:5001/results.
114
- - The first model conversion may require different amounts of time depending on the hardware and network environment, so please be patient.
115
-
116
-
117
- ## Further Development 🔧
118
- - [Training a YOLOv8 Model](https://wiki.seeedstudio.com/How_to_Train_and_Deploy_YOLOv8_on_reComputer/)
119
- - [TensorRT Acceleration](https://wiki.seeedstudio.com/YOLOv8-DeepStream-TRT-Jetson/)
120
- - [Multistreams using Deepstream](https://wiki.seeedstudio.com/YOLOv8-DeepStream-TRT-Jetson/#multistream-model-benchmarks) Tutorials.
121
-
122
- ## License
123
-
124
- This project is licensed under the MIT License.
1
+ # Jetson-Example: Run Ultralytics YOLO Platform Service on NVIDIA Jetson Orin 🚀
2
+
3
+ ## "One-Click Quick Deployment of Plug-and-Play Ultralytics YOLOv8 for All Task Models with Web UI and HTTP API Interface"
4
+ <p align="center">
5
+ <img src="images/Ultralytics-yolo.gif" alt="Ultralytics YOLO">
6
+ </p>
7
+
8
+ ## Introduction 📘
9
+ In this project, you can quickly deploy all YOLOv8 task models on Nvidia Jetson Orin devices with one click. This setup enables object detection, segmentation, human pose estimation, and classification. It supports uploading local videos, images, and using a webcam, and also allows one-click TensorRT model conversion. By accessing [http://127.0.0.1:5001](http://127.0.0.1:5001) on your local machine or within the same LAN, you can quickly start using Ultralytics YOLO. Additionally, an HTTP API method has been added at [http://127.0.0.1:5001/results](http://127.0.0.1:5001/results) to display detection data results for any task, and an additional Python script is provided to read YOLOv8 detection data within Docker.
10
+
11
+ ## **Key Features**:
12
+
13
+ 1. **One-Click Deployment and Plug-and-Play**: Quickly deploy all YOLOv8 task models on Nvidia Jetson Orin devices.
14
+ 2. **Comprehensive Task Support**: Enables object detection, segmentation, human pose estimation, and classification.
15
+ 3. **Versatile Input Options**: Supports uploading local videos, images, and using a webcam.
16
+ 4. **TensorRT Model Conversion**: Allows one-click conversion of models to TensorRT.
17
+ 5. **Web UI Access**: Easy access via [`http://127.0.0.1:5001`](http://127.0.0.1:5001) on the local machine or within the same LAN.
18
+ 6. **HTTP API Interface**: Added HTTP API at [`http://127.0.0.1:5001/results`](http://127.0.0.1:5001/results) to display detection data results.
19
+ 7. **Python Script Support**: Provides an additional Python script to read YOLOv8 detection data within Docker.
20
+
21
+ [![My Project](images/tasks.png)](https://github.com/ultralytics/ultralytics?tab=readme-ov-file#models)
22
+ All models implemented in this project are from the official [Ultralytics Yolo](https://github.com/ultralytics/ultralytics?tab=readme-ov-file#models).
23
+
24
+ ### Get a Jetson Orin Device 🛒
25
+ | Device Model | Description | Link |
26
+ |--------------|-------------|------|
27
+ | Jetson Orin Nano Dev Kit, Orin Nano 8GB, 40TOPS | Developer kit for NVIDIA Jetson Orin Nano | [Buy Here](https://www.seeedstudio.com/NVIDIAr-Jetson-Orintm-Nano-Developer-Kit-p-5617.html) |
28
+ | reComputer J4012, powered by Orin NX 16GB, 100 TOPS | Embedded computer powered by Orin NX | [Buy Here](https://www.seeedstudio.com/reComputer-J4012-p-5586.html) |
29
+
30
+ ## Quickstart ⚡
31
+
32
+ ### Modify Docker Daemon Configuration (Optional)
33
+ To enhance the experience of quickly loading models in Docker, you need to add the following content to the `/etc/docker/daemon.json` file:
34
+
35
+ ```json
36
+ {
37
+ "default-runtime": "nvidia",
38
+ "runtimes": {
39
+ "nvidia": {
40
+ "path": "nvidia-container-runtime",
41
+ "runtimeArgs": []
42
+ }
43
+ },
44
+ "storage-driver": "overlay2",
45
+ "data-root": "/var/lib/docker",
46
+ "log-driver": "json-file",
47
+ "log-opts": {
48
+ "max-size": "100m",
49
+ "max-file": "3"
50
+ },
51
+ "no-new-privileges": true,
52
+ "experimental": false
53
+ }
54
+ ```
55
+
56
+ After modifying the `daemon.json` file, you need to restart the Docker service to apply the configuration:
57
+
58
+ ```sh
59
+ sudo systemctl restart docker
60
+ ```
61
+
62
+ ### Installation via PyPI (Recommended) 🐍
63
+ 1. Install the package:
64
+ ```sh
65
+ pip install jetson-examples
66
+ ```
67
+
68
+ 2. Restart your reComputer:
69
+ ```sh
70
+ sudo reboot
71
+ ```
72
+
73
+ 3. Run Ultralytics YOLO on Jetson with one command:
74
+ ```sh
75
+ reComputer run ultralytics-yolo
76
+ ```
77
+ 4. "Enter [`http://127.0.0.1:5001`](http://127.0.0.1:5001) or http://device_IP:5001 in your browser to access the Web UI."
78
+ <p align="center">
79
+ <img src="images/ultralytics_fig1.png" alt="Ultralytics YOLO">
80
+ </p>
81
+
82
+ - **Choose Model**: Select YOLOv8 n, s, l, m, x models and various tasks such as object detection, classification, segmentation, human pose estimation, OBB, etc.
83
+ - **Upload Custom Model**: Users can upload their own trained YOLOv8 models.
84
+ - **Choose Input Type**: Users can select to input locally uploaded images, videos, or real-time camera devices.
85
+ - **Enable TensorRT**: Choose whether to convert and use the TensorRT model. The initial conversion may require varying amounts of time.
86
+
87
+ 5. If you want to see the detection result data, you can enter [`http://127.0.0.1:5001/results`](http://127.0.0.1:5001/results) in your browser to view the `JSON` formatted data results. These results include `boxes` for object detection, `masks` for segmentation, `keypoints` for human pose estimation, and the `names` corresponding to all numerical categories.
88
+ <p align="center">
89
+ <img src="images/ultralytics_fig2.png" alt="Ultralytics YOLO">
90
+ </p>
91
+ We also provide a Python script to help users integrate the data into their own programs.
92
+
93
+ ```python
94
+ import requests
95
+
96
+ def fetch_results():
97
+ response = requests.get('http://localhost:5001/results')
98
+ if response.status_code == 200:
99
+ results = response.json()
100
+ return results
101
+ else:
102
+ print('Failed to fetch results')
103
+ return None
104
+
105
+ results = fetch_results()
106
+ print(results)
107
+ ```
108
+
109
+
110
+ ## Notes 📝
111
+ - To stop detection at any time, press the Stop button.
112
+ - When accessing the WebUI from other devices within the same LAN, use the URL: `http://{Jetson_IP}:5001`.
113
+ - You can view the JSON formatted detection results by accessing http://{Jetson_IP}:5001/results.
114
+ - The first model conversion may require different amounts of time depending on the hardware and network environment, so please be patient.
115
+
116
+
117
+ ## Further Development 🔧
118
+ - [Training a YOLOv8 Model](https://wiki.seeedstudio.com/How_to_Train_and_Deploy_YOLOv8_on_reComputer/)
119
+ - [TensorRT Acceleration](https://wiki.seeedstudio.com/YOLOv8-DeepStream-TRT-Jetson/)
120
+ - [Multistreams using Deepstream](https://wiki.seeedstudio.com/YOLOv8-DeepStream-TRT-Jetson/#multistream-model-benchmarks) Tutorials.
121
+
122
+ ## License
123
+
124
+ This project is licensed under the MIT License.
@@ -1,7 +1,7 @@
1
- #!/bin/bash
2
- CONTAINER_NAME="ultralytics-yolo"
3
- IMAGE_NAME="yaohui1998/ultralytics-yolo:latest"
4
-
5
- sudo docker stop $CONTAINER_NAME
6
- sudo docker rm $CONTAINER_NAME
1
+ #!/bin/bash
2
+ CONTAINER_NAME="ultralytics-yolo"
3
+ IMAGE_NAME="yaohui1998/ultralytics-yolo:latest"
4
+
5
+ sudo docker stop $CONTAINER_NAME
6
+ sudo docker rm $CONTAINER_NAME
7
7
  sudo docker rmi $IMAGE_NAME
@@ -1,31 +1,31 @@
1
- # The tested JetPack versions.
2
- ALLOWED_L4T_VERSIONS:
3
- - 35.3.1
4
- - 35.4.1
5
- - 35.5.0
6
- - 36.3.0
7
- REQUIRED_DISK_SPACE: 20 # in GB
8
- REQUIRED_MEM_SPACE: 4
9
- PACKAGES:
10
- - nvidia-jetpack
11
- DOCKER:
12
- ENABLE: true
13
- DAEMON: |
14
- {
15
- "default-runtime": "nvidia",
16
- "runtimes": {
17
- "nvidia": {
18
- "path": "nvidia-container-runtime",
19
- "runtimeArgs": []
20
- }
21
- },
22
- "storage-driver": "overlay2",
23
- "data-root": "/var/lib/docker",
24
- "log-driver": "json-file",
25
- "log-opts": {
26
- "max-size": "100m",
27
- "max-file": "3"
28
- },
29
- "no-new-privileges": true,
30
- "experimental": false
31
- }
1
+ # The tested JetPack versions.
2
+ ALLOWED_L4T_VERSIONS:
3
+ - 35.3.1
4
+ - 35.4.1
5
+ - 35.5.0
6
+ - 36.3.0
7
+ REQUIRED_DISK_SPACE: 20 # in GB
8
+ REQUIRED_MEM_SPACE: 4
9
+ PACKAGES:
10
+ - nvidia-jetpack
11
+ DOCKER:
12
+ ENABLE: true
13
+ DAEMON: |
14
+ {
15
+ "default-runtime": "nvidia",
16
+ "runtimes": {
17
+ "nvidia": {
18
+ "path": "nvidia-container-runtime",
19
+ "runtimeArgs": []
20
+ }
21
+ },
22
+ "storage-driver": "overlay2",
23
+ "data-root": "/var/lib/docker",
24
+ "log-driver": "json-file",
25
+ "log-opts": {
26
+ "max-size": "100m",
27
+ "max-file": "3"
28
+ },
29
+ "no-new-privileges": true,
30
+ "experimental": false
31
+ }
@@ -1,4 +1,4 @@
1
- #!/bin/bash
2
-
3
- source $(dirname "$(realpath "$0")")/../utils.sh
4
- check_base_env "$(dirname "$(realpath "$0")")/config.yaml"
1
+ #!/bin/bash
2
+
3
+ source $(dirname "$(realpath "$0")")/../utils.sh
4
+ check_base_env "$(dirname "$(realpath "$0")")/config.yaml"