jetson-examples 0.0.6__tar.gz → 0.0.7__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. {jetson_examples-0.0.6 → jetson_examples-0.0.7}/PKG-INFO +12 -9
  2. {jetson_examples-0.0.6 → jetson_examples-0.0.7}/README.md +11 -8
  3. {jetson_examples-0.0.6 → jetson_examples-0.0.7}/jetson_examples.egg-info/PKG-INFO +12 -9
  4. {jetson_examples-0.0.6 → jetson_examples-0.0.7}/jetson_examples.egg-info/SOURCES.txt +0 -2
  5. {jetson_examples-0.0.6 → jetson_examples-0.0.7}/pyproject.toml +1 -1
  6. jetson_examples-0.0.7/reComputer/__init__.py +1 -0
  7. {jetson_examples-0.0.6 → jetson_examples-0.0.7}/reComputer/scripts/nanodb/run.sh +25 -0
  8. {jetson_examples-0.0.6 → jetson_examples-0.0.7}/reComputer/scripts/run.sh +0 -25
  9. jetson_examples-0.0.6/reComputer/__init__.py +0 -1
  10. jetson_examples-0.0.6/reComputer/scripts/hello-world/readme.md +0 -3
  11. jetson_examples-0.0.6/reComputer/scripts/hello-world/run.sh +0 -9
  12. {jetson_examples-0.0.6 → jetson_examples-0.0.7}/LICENSE +0 -0
  13. {jetson_examples-0.0.6 → jetson_examples-0.0.7}/jetson_examples.egg-info/dependency_links.txt +0 -0
  14. {jetson_examples-0.0.6 → jetson_examples-0.0.7}/jetson_examples.egg-info/entry_points.txt +0 -0
  15. {jetson_examples-0.0.6 → jetson_examples-0.0.7}/jetson_examples.egg-info/top_level.txt +0 -0
  16. {jetson_examples-0.0.6 → jetson_examples-0.0.7}/reComputer/main.py +0 -0
  17. {jetson_examples-0.0.6 → jetson_examples-0.0.7}/reComputer/scripts/Sheared-LLaMA-2.7B-ShareGPT/run.sh +0 -0
  18. {jetson_examples-0.0.6 → jetson_examples-0.0.7}/reComputer/scripts/check.sh +0 -0
  19. {jetson_examples-0.0.6 → jetson_examples-0.0.7}/reComputer/scripts/live-llava/run.sh +0 -0
  20. {jetson_examples-0.0.6 → jetson_examples-0.0.7}/reComputer/scripts/llama3/run.sh +0 -0
  21. {jetson_examples-0.0.6 → jetson_examples-0.0.7}/reComputer/scripts/llava/run.sh +0 -0
  22. {jetson_examples-0.0.6 → jetson_examples-0.0.7}/reComputer/scripts/llava-v1.5-7b/run.sh +0 -0
  23. {jetson_examples-0.0.6 → jetson_examples-0.0.7}/reComputer/scripts/llava-v1.6-vicuna-7b/run.sh +0 -0
  24. {jetson_examples-0.0.6 → jetson_examples-0.0.7}/reComputer/scripts/nanodb/readme.md +0 -0
  25. {jetson_examples-0.0.6 → jetson_examples-0.0.7}/reComputer/scripts/nanoowl/run.sh +0 -0
  26. {jetson_examples-0.0.6 → jetson_examples-0.0.7}/reComputer/scripts/ollama/run.sh +0 -0
  27. {jetson_examples-0.0.6 → jetson_examples-0.0.7}/reComputer/scripts/stable-diffusion-webui/run.sh +0 -0
  28. {jetson_examples-0.0.6 → jetson_examples-0.0.7}/reComputer/scripts/text-generation-webui/run.sh +0 -0
  29. {jetson_examples-0.0.6 → jetson_examples-0.0.7}/reComputer/scripts/update.sh +0 -0
  30. {jetson_examples-0.0.6 → jetson_examples-0.0.7}/reComputer/scripts/whisper/run.sh +0 -0
  31. {jetson_examples-0.0.6 → jetson_examples-0.0.7}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: jetson-examples
3
- Version: 0.0.6
3
+ Version: 0.0.7
4
4
  Summary: Running Gen AI models and applications on NVIDIA Jetson devices with one-line command
5
5
  Author-email: luozhixin <zhixin.luo@seeed.cc>
6
6
  Project-URL: Homepage, https://github.com/Seeed-Projects/jetson-examples
@@ -33,6 +33,7 @@ pip install jetson-examples
33
33
  ```
34
34
 
35
35
  - [more installation methods](./docs/install.md)
36
+ - If you have already installed, you can use `pip install jetson-examples --upgrade` to update.
36
37
 
37
38
  ## Quickstart
38
39
 
@@ -48,14 +49,16 @@ reComputer supports a list of examples from [jetson-ai-lab](https://www.jetson-a
48
49
 
49
50
  Here are some examples that can be run:
50
51
 
51
- | Example | Type | Model/Data Size | Image Size | Command |
52
- | ---------------------- | ------------------------ | --------------- | ---------- | --------------------------------------- |
53
- | text-generation-webui | Text (LLM) | 3.9GB | 14.8GB | `reComputer run text-generation-webui` |
54
- | LLaVA | Text + Vision (VLM) | 13GB | 14.4GB | `reComputer run llava` |
55
- | stable-diffusion-webui | Image Generation | 3.97G | 7.3GB | `reComputer run stable-diffusion-webui` |
56
- | nanoowl | Vision Transformers(ViT) | 613MB | 15.1GB | `reComputer run nanoowl` |
57
- | nanodb | Vector Database | 76GB | 7.0GB | `reComputer run nanodb` |
58
- | whisper | Audio | 1.5GB | 6.0GB | `reComputer run whisper` |
52
+ | Example | Type | Model/Data Size | Image Size | Command |
53
+ | ------------------------------------------------ | ------------------------ | --------------- | ---------- | --------------------------------------- |
54
+ | 🆕 llama3 | Text (LLM) | 4.9GB | 10.5GB | `reComputer run llama3` |
55
+ | 🆕 [ollama](https://github.com/ollama/ollama) | Inference Server | * | 10.5GB | `reComputer run ollama` |
56
+ | LLaVA | Text + Vision (VLM) | 13GB | 14.4GB | `reComputer run llava` |
57
+ | Live LLaVA | Text + Vision (VLM) | 13GB | 20.3GB | `reComputer run live-llava` |
58
+ | stable-diffusion-webui | Image Generation | 3.97G | 7.3GB | `reComputer run stable-diffusion-webui` |
59
+ | nanoowl | Vision Transformers(ViT) | 613MB | 15.1GB | `reComputer run nanoowl` |
60
+ | [nanodb](../reComputer/scripts/nanodb/readme.md) | Vector Database | 76GB | 7.0GB | `reComputer run nanodb` |
61
+ | whisper | Audio | 1.5GB | 6.0GB | `reComputer run whisper` |
59
62
 
60
63
  > Note: You should have enough space to run example, like `LLaVA`, at least `27.4GB` totally
61
64
 
@@ -18,6 +18,7 @@ pip install jetson-examples
18
18
  ```
19
19
 
20
20
  - [more installation methods](./docs/install.md)
21
+ - If you have already installed, you can use `pip install jetson-examples --upgrade` to update.
21
22
 
22
23
  ## Quickstart
23
24
 
@@ -33,14 +34,16 @@ reComputer supports a list of examples from [jetson-ai-lab](https://www.jetson-a
33
34
 
34
35
  Here are some examples that can be run:
35
36
 
36
- | Example | Type | Model/Data Size | Image Size | Command |
37
- | ---------------------- | ------------------------ | --------------- | ---------- | --------------------------------------- |
38
- | text-generation-webui | Text (LLM) | 3.9GB | 14.8GB | `reComputer run text-generation-webui` |
39
- | LLaVA | Text + Vision (VLM) | 13GB | 14.4GB | `reComputer run llava` |
40
- | stable-diffusion-webui | Image Generation | 3.97G | 7.3GB | `reComputer run stable-diffusion-webui` |
41
- | nanoowl | Vision Transformers(ViT) | 613MB | 15.1GB | `reComputer run nanoowl` |
42
- | nanodb | Vector Database | 76GB | 7.0GB | `reComputer run nanodb` |
43
- | whisper | Audio | 1.5GB | 6.0GB | `reComputer run whisper` |
37
+ | Example | Type | Model/Data Size | Image Size | Command |
38
+ | ------------------------------------------------ | ------------------------ | --------------- | ---------- | --------------------------------------- |
39
+ | 🆕 llama3 | Text (LLM) | 4.9GB | 10.5GB | `reComputer run llama3` |
40
+ | 🆕 [ollama](https://github.com/ollama/ollama) | Inference Server | * | 10.5GB | `reComputer run ollama` |
41
+ | LLaVA | Text + Vision (VLM) | 13GB | 14.4GB | `reComputer run llava` |
42
+ | Live LLaVA | Text + Vision (VLM) | 13GB | 20.3GB | `reComputer run live-llava` |
43
+ | stable-diffusion-webui | Image Generation | 3.97G | 7.3GB | `reComputer run stable-diffusion-webui` |
44
+ | nanoowl | Vision Transformers(ViT) | 613MB | 15.1GB | `reComputer run nanoowl` |
45
+ | [nanodb](../reComputer/scripts/nanodb/readme.md) | Vector Database | 76GB | 7.0GB | `reComputer run nanodb` |
46
+ | whisper | Audio | 1.5GB | 6.0GB | `reComputer run whisper` |
44
47
 
45
48
  > Note: You should have enough space to run example, like `LLaVA`, at least `27.4GB` totally
46
49
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: jetson-examples
3
- Version: 0.0.6
3
+ Version: 0.0.7
4
4
  Summary: Running Gen AI models and applications on NVIDIA Jetson devices with one-line command
5
5
  Author-email: luozhixin <zhixin.luo@seeed.cc>
6
6
  Project-URL: Homepage, https://github.com/Seeed-Projects/jetson-examples
@@ -33,6 +33,7 @@ pip install jetson-examples
33
33
  ```
34
34
 
35
35
  - [more installation methods](./docs/install.md)
36
+ - If you have already installed, you can use `pip install jetson-examples --upgrade` to update.
36
37
 
37
38
  ## Quickstart
38
39
 
@@ -48,14 +49,16 @@ reComputer supports a list of examples from [jetson-ai-lab](https://www.jetson-a
48
49
 
49
50
  Here are some examples that can be run:
50
51
 
51
- | Example | Type | Model/Data Size | Image Size | Command |
52
- | ---------------------- | ------------------------ | --------------- | ---------- | --------------------------------------- |
53
- | text-generation-webui | Text (LLM) | 3.9GB | 14.8GB | `reComputer run text-generation-webui` |
54
- | LLaVA | Text + Vision (VLM) | 13GB | 14.4GB | `reComputer run llava` |
55
- | stable-diffusion-webui | Image Generation | 3.97G | 7.3GB | `reComputer run stable-diffusion-webui` |
56
- | nanoowl | Vision Transformers(ViT) | 613MB | 15.1GB | `reComputer run nanoowl` |
57
- | nanodb | Vector Database | 76GB | 7.0GB | `reComputer run nanodb` |
58
- | whisper | Audio | 1.5GB | 6.0GB | `reComputer run whisper` |
52
+ | Example | Type | Model/Data Size | Image Size | Command |
53
+ | ------------------------------------------------ | ------------------------ | --------------- | ---------- | --------------------------------------- |
54
+ | 🆕 llama3 | Text (LLM) | 4.9GB | 10.5GB | `reComputer run llama3` |
55
+ | 🆕 [ollama](https://github.com/ollama/ollama) | Inference Server | * | 10.5GB | `reComputer run ollama` |
56
+ | LLaVA | Text + Vision (VLM) | 13GB | 14.4GB | `reComputer run llava` |
57
+ | Live LLaVA | Text + Vision (VLM) | 13GB | 20.3GB | `reComputer run live-llava` |
58
+ | stable-diffusion-webui | Image Generation | 3.97G | 7.3GB | `reComputer run stable-diffusion-webui` |
59
+ | nanoowl | Vision Transformers(ViT) | 613MB | 15.1GB | `reComputer run nanoowl` |
60
+ | [nanodb](../reComputer/scripts/nanodb/readme.md) | Vector Database | 76GB | 7.0GB | `reComputer run nanodb` |
61
+ | whisper | Audio | 1.5GB | 6.0GB | `reComputer run whisper` |
59
62
 
60
63
  > Note: You should have enough space to run example, like `LLaVA`, at least `27.4GB` totally
61
64
 
@@ -12,8 +12,6 @@ reComputer/scripts/check.sh
12
12
  reComputer/scripts/run.sh
13
13
  reComputer/scripts/update.sh
14
14
  reComputer/scripts/Sheared-LLaMA-2.7B-ShareGPT/run.sh
15
- reComputer/scripts/hello-world/readme.md
16
- reComputer/scripts/hello-world/run.sh
17
15
  reComputer/scripts/live-llava/run.sh
18
16
  reComputer/scripts/llama3/run.sh
19
17
  reComputer/scripts/llava/run.sh
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "jetson-examples"
7
- version = "0.0.6"
7
+ version = "0.0.7"
8
8
  authors = [{ name = "luozhixin", email = "zhixin.luo@seeed.cc" }]
9
9
  description = "Running Gen AI models and applications on NVIDIA Jetson devices with one-line command"
10
10
  readme = "README.md"
@@ -0,0 +1 @@
1
+ __version__ = "0.0.7"
@@ -3,6 +3,31 @@
3
3
  BASE_PATH=/home/$USER/reComputer
4
4
  JETSON_REPO_PATH="$BASE_PATH/jetson-containers"
5
5
 
6
+ check_disk_space() {
7
+ directory="$1" # a directory
8
+ required_space_gb="$2" # how many GB we need
9
+
10
+ # get disk of directory
11
+ device=$(df -P "$directory" | awk 'NR==2 {print $1}')
12
+ echo $device
13
+
14
+ # get free space in KB
15
+ free_space=$(df -P "$device" | awk 'NR==2 {print $4}')
16
+ echo $free_space
17
+
18
+ # change unit to GB
19
+ free_space_gb=$(echo "scale=2; $free_space / 1024 / 1024" | bc)
20
+ echo $free_space_gb
21
+
22
+ # check and fast-fail
23
+ if (( $(echo "$free_space_gb >= $required_space_gb" | bc -l) )); then
24
+ echo "disk space ($1) enough, keep going."
25
+ else
26
+ echo "disk space ($1) not enough!! we need $2 GB!!"
27
+ exit 1
28
+ fi
29
+ }
30
+
6
31
  # check data files TODO: support params to force download
7
32
  DATA_PATH="$JETSON_REPO_PATH/data/datasets/coco/2017"
8
33
  if [ ! -d $DATA_PATH ]; then
@@ -18,31 +18,6 @@ check_is_jetson_or_not() {
18
18
  }
19
19
  check_is_jetson_or_not
20
20
 
21
- check_disk_space() {
22
- directory="$1" # a directory
23
- required_space_gb="$2" # how many GB we need
24
-
25
- # get disk of directory
26
- device=$(df -P "$directory" | awk 'NR==2 {print $1}')
27
- echo $device
28
-
29
- # get free space in KB
30
- free_space=$(df -P "$device" | awk 'NR==2 {print $4}')
31
- echo $free_space
32
-
33
- # change unit to GB
34
- free_space_gb=$(echo "scale=2; $free_space / 1024 / 1024" | bc)
35
- echo $free_space_gb
36
-
37
- # check and fast-fail
38
- if (( $(echo "$free_space_gb >= $required_space_gb" | bc -l) )); then
39
- echo "disk space ($1) enough, keep going."
40
- else
41
- echo "disk space ($1) not enough!! we need $2 GB!!"
42
- exit 1
43
- fi
44
- }
45
-
46
21
  echo "run example:$1"
47
22
  BASE_PATH=/home/$USER/reComputer
48
23
 
@@ -1 +0,0 @@
1
- __version__ = "0.0.6"
@@ -1,3 +0,0 @@
1
- # hello-world
2
-
3
- - print `hello-world` to show how to add your project into this package
@@ -1,9 +0,0 @@
1
- #!/bin/bash
2
- ./run.sh $(./autotag llava) \
3
- python3 -m nano_llm.agents.video_query --api=mlc \
4
- --model Efficient-Large-Model/VILA-2.7b \
5
- --max-context-len 768 \
6
- --max-new-tokens 32 \
7
- --video-input /dev/video0 \
8
- --video-output webrtc://@:8554/output \
9
- --nanodb /data/nanodb/coco/2017
File without changes