inference-cli 0.9.0__1-py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (138) hide show
  1. cli/__init__.py +0 -0
  2. cli/inference_cli/__init__.py +0 -0
  3. cli/inference_cli/actions.py +31 -0
  4. cli/inference_cli/lib/__init__.py +1 -0
  5. cli/inference_cli/lib/container_adapter.py +112 -0
  6. cli/inference_cli/main.py +46 -0
  7. cli/inference_cli/server.py +40 -0
  8. cli/inference_cli/version.py +5 -0
  9. cli/setup.py +49 -0
  10. inference/__init__.py +0 -0
  11. inference/cli/__init__.py +0 -0
  12. inference/cli/inference_cli/__init__.py +0 -0
  13. inference/cli/inference_cli/actions.py +31 -0
  14. inference/cli/inference_cli/lib/__init__.py +1 -0
  15. inference/cli/inference_cli/lib/container_adapter.py +112 -0
  16. inference/cli/inference_cli/main.py +51 -0
  17. inference/cli/inference_cli/version.py +5 -0
  18. inference/cli/setup.py +49 -0
  19. inference/core/__init__.py +52 -0
  20. inference/core/cache/__init__.py +8 -0
  21. inference/core/cache/base.py +88 -0
  22. inference/core/cache/memory.py +151 -0
  23. inference/core/cache/redis.py +142 -0
  24. inference/core/data_models.py +855 -0
  25. inference/core/devices/__init__.py +0 -0
  26. inference/core/devices/utils.py +140 -0
  27. inference/core/env.py +287 -0
  28. inference/core/exceptions.py +126 -0
  29. inference/core/interfaces/__init__.py +0 -0
  30. inference/core/interfaces/base.py +8 -0
  31. inference/core/interfaces/camera/__init__.py +0 -0
  32. inference/core/interfaces/camera/camera.py +100 -0
  33. inference/core/interfaces/http/__init__.py +0 -0
  34. inference/core/interfaces/http/http_api.py +988 -0
  35. inference/core/interfaces/udp/__init__.py +0 -0
  36. inference/core/interfaces/udp/udp_stream.py +258 -0
  37. inference/core/logger.py +10 -0
  38. inference/core/managers/__init__.py +1 -0
  39. inference/core/managers/base.py +259 -0
  40. inference/core/managers/decorators/__init__.py +0 -0
  41. inference/core/managers/decorators/base.py +147 -0
  42. inference/core/managers/decorators/fixed_size_cache.py +82 -0
  43. inference/core/managers/decorators/logger.py +51 -0
  44. inference/core/managers/entities.py +11 -0
  45. inference/core/managers/metrics.py +79 -0
  46. inference/core/managers/pingback.py +153 -0
  47. inference/core/models/__init__.py +0 -0
  48. inference/core/models/base.py +148 -0
  49. inference/core/models/classification_base.py +336 -0
  50. inference/core/models/instance_segmentation_base.py +276 -0
  51. inference/core/models/mixins.py +162 -0
  52. inference/core/models/object_detection_base.py +258 -0
  53. inference/core/models/roboflow.py +837 -0
  54. inference/core/models/types.py +3 -0
  55. inference/core/nms.py +154 -0
  56. inference/core/registries/__init__.py +1 -0
  57. inference/core/registries/base.py +32 -0
  58. inference/core/registries/decorators/__init__.py +1 -0
  59. inference/core/registries/decorators/base.py +36 -0
  60. inference/core/registries/roboflow.py +127 -0
  61. inference/core/usage.py +63 -0
  62. inference/core/utils/__init__.py +0 -0
  63. inference/core/utils/image_utils.py +207 -0
  64. inference/core/utils/onnx.py +17 -0
  65. inference/core/utils/postprocess.py +476 -0
  66. inference/core/utils/preprocess.py +965 -0
  67. inference/core/utils/url_utils.py +21 -0
  68. inference/core/version.py +5 -0
  69. inference/models/__init__.py +24 -0
  70. inference/models/clip/__init__.py +1 -0
  71. inference/models/clip/clip_model.py +351 -0
  72. inference/models/gaze/__init__.py +1 -0
  73. inference/models/gaze/gaze.py +364 -0
  74. inference/models/gaze/l2cs.py +84 -0
  75. inference/models/sam/__init__.py +1 -0
  76. inference/models/sam/segment_anything.py +312 -0
  77. inference/models/utils.py +136 -0
  78. inference/models/vit/__init__.py +1 -0
  79. inference/models/vit/vit_classification.py +42 -0
  80. inference/models/yolact/__init__.py +1 -0
  81. inference/models/yolact/yolact_instance_segmentation.py +321 -0
  82. inference/models/yolov5/__init__.py +4 -0
  83. inference/models/yolov5/yolov5_instance_segmentation.py +39 -0
  84. inference/models/yolov5/yolov5_object_detection.py +39 -0
  85. inference/models/yolov7/__init__.py +3 -0
  86. inference/models/yolov7/yolov7_instance_segmentation.py +32 -0
  87. inference/models/yolov8/__init__.py +5 -0
  88. inference/models/yolov8/yolov8_classification.py +13 -0
  89. inference/models/yolov8/yolov8_instance_segmentation.py +50 -0
  90. inference/models/yolov8/yolov8_object_detection.py +47 -0
  91. inference_cli/__init__.py +0 -0
  92. inference_cli/actions.py +31 -0
  93. inference_cli/lib/__init__.py +5 -0
  94. inference_cli/lib/container_adapter.py +142 -0
  95. inference_cli/lib/infer_adapter.py +27 -0
  96. inference_cli/main.py +46 -0
  97. inference_cli/server.py +40 -0
  98. inference_cli/setup.py +101 -0
  99. inference_cli/version.py +5 -0
  100. inference_cli-0.9.0.dist-info/LICENSE +3 -0
  101. inference_cli-0.9.0.dist-info/LICENSE.core +201 -0
  102. inference_cli-0.9.0.dist-info/METADATA +187 -0
  103. inference_cli-0.9.0.dist-info/RECORD +138 -0
  104. inference_cli-0.9.0.dist-info/WHEEL +5 -0
  105. inference_cli-0.9.0.dist-info/entry_points.txt +2 -0
  106. inference_cli-0.9.0.dist-info/top_level.txt +1 -0
  107. inference_client/__init__.py +10 -0
  108. inference_client/http/__init__.py +0 -0
  109. inference_client/http/client.py +389 -0
  110. inference_client/http/entities.py +181 -0
  111. inference_client/http/errors.py +69 -0
  112. inference_client/http/utils/__init__.py +0 -0
  113. inference_client/http/utils/encoding.py +45 -0
  114. inference_client/http/utils/iterables.py +13 -0
  115. inference_client/http/utils/loaders.py +126 -0
  116. inference_client/http/utils/post_processing.py +152 -0
  117. inference_client/http/utils/pre_processing.py +60 -0
  118. tests/__init__.py +0 -0
  119. tests/inference/__init__.py +0 -0
  120. tests/inference/clip_test.py +81 -0
  121. tests/inference/gaze_test.py +110 -0
  122. tests/inference/populate_expected_responses.py +62 -0
  123. tests/inference/populate_expected_responses_clip.py +44 -0
  124. tests/inference/populate_expected_responses_sam.py +44 -0
  125. tests/inference/regression_test.py +474 -0
  126. tests/inference/sam_test.py +82 -0
  127. tests/inference_client/__init__.py +0 -0
  128. tests/inference_client/unit_tests/__init__.py +0 -0
  129. tests/inference_client/unit_tests/http/__init__.py +0 -0
  130. tests/inference_client/unit_tests/http/test_client.py +1038 -0
  131. tests/inference_client/unit_tests/http/test_entities.py +123 -0
  132. tests/inference_client/unit_tests/http/utils/__init__.py +0 -0
  133. tests/inference_client/unit_tests/http/utils/conftest.py +36 -0
  134. tests/inference_client/unit_tests/http/utils/test_encoding.py +114 -0
  135. tests/inference_client/unit_tests/http/utils/test_iterables.py +62 -0
  136. tests/inference_client/unit_tests/http/utils/test_loaders.py +360 -0
  137. tests/inference_client/unit_tests/http/utils/test_postprocessing.py +387 -0
  138. tests/inference_client/unit_tests/http/utils/test_preprocessing.py +82 -0
cli/__init__.py ADDED
File without changes
File without changes
@@ -0,0 +1,31 @@
1
+ import base64
2
+ import requests
3
+
4
+ def is_url(string: str):
5
+ return string.startswith("http")
6
+
7
+ def infer(image, project_id, model_version, api_key, host):
8
+ endpoint = f"{host}/{project_id}/{model_version}"
9
+ headers = {
10
+ 'Content-Type': 'application/json'
11
+ }
12
+
13
+ if is_url(image):
14
+ image_type = "url"
15
+ params = {
16
+ "api_key": api_key,
17
+ "image": image,
18
+ "image_type": image_type
19
+ }
20
+ response = requests.post(endpoint, headers=headers, params=params).json()
21
+ else:
22
+ with open(image, 'rb') as image_file:
23
+ image = base64.b64encode(image_file.read()).decode('utf-8')
24
+ image_type = "base64"
25
+ params = {
26
+ "api_key": api_key,
27
+ "image_type": image_type
28
+ }
29
+ response = requests.post(endpoint, headers=headers, params=params, data=image).json()
30
+
31
+ print(response)
@@ -0,0 +1 @@
1
+ from inference_cli.lib.container_adapter import start_inference_container
@@ -0,0 +1,112 @@
1
+ import subprocess
2
+
3
+ import docker
4
+ import typer
5
+
6
+ docker_client = docker.from_env()
7
+
8
+
9
+ def ask_user_to_kill_container(c):
10
+ name = c.attrs.get("Name", "")
11
+ env_vars = c.attrs.get("Config", {}).get("Env", {})
12
+ port = 9001
13
+ for var in env_vars:
14
+ if var.startswith("PORT="):
15
+ port = var.split("=")[1]
16
+ should_delete = typer.confirm(
17
+ f" An inference server is already running in container {name} on port {port}. Are you sure you want to delete it?"
18
+ )
19
+ return should_delete
20
+
21
+
22
+ def is_inference_server_container(container):
23
+ image_tags = container.image.tags
24
+ for t in image_tags:
25
+ if t.startswith("roboflow/roboflow-inference-server"):
26
+ return True
27
+ return False
28
+
29
+
30
+ def handle_existing_containers(containers):
31
+ has_existing_containers = False
32
+ for c in containers:
33
+ if is_inference_server_container(c):
34
+ has_existing_containers = True
35
+ if c.attrs.get("State", {}).get("Status", "").lower() == "running":
36
+ should_kill = ask_user_to_kill_container(c)
37
+ if should_kill:
38
+ c.kill()
39
+ has_existing_containers = False
40
+ return has_existing_containers
41
+
42
+
43
+ def find_existing_containers():
44
+ containers = []
45
+ for c in docker_client.containers.list():
46
+ if is_inference_server_container(c):
47
+ if c.attrs.get("State", {}).get("Status", "").lower() == "running":
48
+ containers.append(c)
49
+ return containers
50
+
51
+
52
+ def get_image():
53
+ try:
54
+ subprocess.check_output("nvidia-smi")
55
+ print("GPU detected. Using a GPU image.")
56
+ return "roboflow/roboflow-inference-server-gpu:latest"
57
+ except:
58
+ print("No GPU detected. Using a CPU image.")
59
+ return "roboflow/roboflow-inference-server-cpu:latest"
60
+
61
+
62
+ def start_inference_container(
63
+ api_key,
64
+ image=None,
65
+ port=9001,
66
+ labels=None,
67
+ project="roboflow-platform",
68
+ metrics_enabled=True,
69
+ device_id=None,
70
+ num_workers=1,
71
+ ):
72
+ containers = find_existing_containers()
73
+ if len(containers) > 0:
74
+ still_has_containers = handle_existing_containers(containers)
75
+ if still_has_containers:
76
+ print("Please kill the existing containers and try again.")
77
+ return
78
+
79
+ if image is None:
80
+ image = get_image()
81
+
82
+ device_requests = None
83
+ privileged = False
84
+ if "gpu" in image:
85
+ privileged = True
86
+ device_requests = (
87
+ [docker.types.DeviceRequest(device_ids=["all"], capabilities=[["gpu"]])],
88
+ )
89
+
90
+ print(f"Starting inference server container...")
91
+ docker_client.containers.run(
92
+ image=image,
93
+ privileged=privileged,
94
+ detach=True,
95
+ labels=labels,
96
+ ports={"9001": port},
97
+ # network="host",
98
+ device_requests=device_requests,
99
+ environment=[
100
+ "HOST=0.0.0.0",
101
+ f"PORT={port}",
102
+ f"PROJECT={project}",
103
+ f"METRICS_ENABLED={metrics_enabled}",
104
+ f"DEVICE_ID={device_id}",
105
+ f"API_KEY={api_key}",
106
+ f"NUM_WORKERS={num_workers}",
107
+ ],
108
+ )
109
+
110
+
111
+ if __name__ == "__main__":
112
+ start_inference_container("my_api_key")
@@ -0,0 +1,46 @@
1
+ import typer
2
+ from typing_extensions import Annotated
3
+ from inference_cli.server import server_app
4
+ import inference_cli.lib
5
+
6
+ app = typer.Typer()
7
+
8
+ app.add_typer(server_app, name="server")
9
+
10
+
11
+ @app.command()
12
+ def infer(
13
+ image: Annotated[
14
+ str, typer.Argument(help="URL or local path of image to run inference on.")
15
+ ],
16
+ project_id: Annotated[
17
+ str,
18
+ typer.Option(
19
+ "--project-id", "-p", help="Roboflow project to run inference with."
20
+ ),
21
+ ],
22
+ model_version: Annotated[
23
+ str,
24
+ typer.Option(
25
+ "--model-version",
26
+ "-v",
27
+ help="Version of model to run inference with.",
28
+ ),
29
+ ],
30
+ api_key: Annotated[
31
+ str,
32
+ typer.Option("--api-key", "-a", help="Roboflow API key for your workspace."),
33
+ ],
34
+ host: Annotated[
35
+ str,
36
+ typer.Option("--host", "-h", help="Host to run inference on."),
37
+ ] = "http://localhost:9001",
38
+ ):
39
+ typer.echo(
40
+ f"Running inference on image {image}, using model ({project_id}/{model_version}), and host ({host})"
41
+ )
42
+ inference_cli.lib.infer(image, project_id, model_version, api_key, host)
43
+
44
+
45
+ if __name__ == "__main__":
46
+ app()
@@ -0,0 +1,40 @@
1
+ import typer
2
+ from typing_extensions import Annotated
3
+ from inference_cli.lib import start_inference_container, check_inference_server_status
4
+
5
+ server_app = typer.Typer(
6
+ help="""Commands for running the inference server locally. \n
7
+ Supported devices targets are x86 CPU, ARM64 CPU, and NVIDIA GPU."""
8
+ )
9
+
10
+
11
+ @server_app.command()
12
+ def start(
13
+ port: Annotated[
14
+ int,
15
+ typer.Option(
16
+ "--port",
17
+ "-p",
18
+ help="Port to run the inference server on (default is 9001).",
19
+ ),
20
+ ] = 9001,
21
+ rf_env: Annotated[
22
+ str,
23
+ typer.Option(
24
+ "--rf-env",
25
+ "-rfe",
26
+ help="Roboflow environment to run the inference server with (default is roboflow-platform).",
27
+ ),
28
+ ] = "roboflow-platform",
29
+ ):
30
+ start_inference_container("", port=port, project=rf_env)
31
+
32
+
33
+ @server_app.command()
34
+ def status():
35
+ print("Checking status of inference server.")
36
+ check_inference_server_status()
37
+
38
+
39
+ if __name__ == "__main__":
40
+ server_app()
@@ -0,0 +1,5 @@
1
+ __version__ = "0.0.11"
2
+
3
+
4
+ if __name__ == "__main__":
5
+ print(__version__)
cli/setup.py ADDED
@@ -0,0 +1,49 @@
1
+ import os
2
+ import setuptools
3
+ from setuptools import find_packages
4
+ import sys
5
+ import shutil
6
+
7
+ root = os.path.abspath(os.path.join(os.path.dirname(__file__)))
8
+ sys.path.append(root)
9
+
10
+ shutil.copyfile(
11
+ os.path.join(root, "../inference/core/version.py"),
12
+ os.path.join(root, "inference_cli/version.py"),
13
+ )
14
+ from inference_cli.version import __version__
15
+
16
+ with open("README.md", "r") as fh:
17
+ long_description = fh.read()
18
+
19
+
20
+ def read_requirements(path):
21
+ with open(os.path.join(root, path)) as fh:
22
+ return [line.strip() for line in fh]
23
+
24
+
25
+ setuptools.setup(
26
+ name="inference-cli",
27
+ version=__version__,
28
+ author="Roboflow",
29
+ author_email="help@roboflow.com",
30
+ description="With no prior knowledge of machine learning or device-specific deployment, you can deploy a computer vision model to a range of devices and environments using Roboflow Inference CLI.",
31
+ long_description=long_description,
32
+ long_description_content_type="text/markdown",
33
+ url="https://github.com/roboflow/inference",
34
+ packages=find_packages(
35
+ where="./",
36
+ ),
37
+ entry_points={
38
+ "console_scripts": [
39
+ "inference=inference_cli.main:app",
40
+ ],
41
+ },
42
+ install_requires=read_requirements("requirements.txt"),
43
+ classifiers=[
44
+ "Programming Language :: Python :: 3",
45
+ "License :: OSI Approved :: Apache Software License",
46
+ "Operating System :: OS Independent",
47
+ ],
48
+ python_requires=">=3.7",
49
+ )
inference/__init__.py ADDED
File without changes
File without changes
File without changes
@@ -0,0 +1,31 @@
1
+ import base64
2
+ import requests
3
+
4
+ def is_url(string: str):
5
+ return string.startswith("http")
6
+
7
+ def infer(image, project_id, model_version, api_key, host):
8
+ endpoint = f"{host}/{project_id}/{model_version}"
9
+ headers = {
10
+ 'Content-Type': 'application/json'
11
+ }
12
+
13
+ if is_url(image):
14
+ image_type = "url"
15
+ params = {
16
+ "api_key": api_key,
17
+ "image": image,
18
+ "image_type": image_type
19
+ }
20
+ response = requests.post(endpoint, headers=headers, params=params).json()
21
+ else:
22
+ with open(image, 'rb') as image_file:
23
+ image = base64.b64encode(image_file.read()).decode('utf-8')
24
+ image_type = "base64"
25
+ params = {
26
+ "api_key": api_key,
27
+ "image_type": image_type
28
+ }
29
+ response = requests.post(endpoint, headers=headers, params=params, data=image).json()
30
+
31
+ print(response)
@@ -0,0 +1 @@
1
+ from inference_cli.lib.container_adapter import start_inference_container
@@ -0,0 +1,112 @@
1
+ import subprocess
2
+
3
+ import docker
4
+ import typer
5
+
6
+ docker_client = docker.from_env()
7
+
8
+
9
+ def ask_user_to_kill_container(c):
10
+ name = c.attrs.get("Name", "")
11
+ env_vars = c.attrs.get("Config", {}).get("Env", {})
12
+ port = 9001
13
+ for var in env_vars:
14
+ if var.startswith("PORT="):
15
+ port = var.split("=")[1]
16
+ should_delete = typer.confirm(
17
+ f" An inference server is already running in container {name} on port {port}. Are you sure you want to delete it?"
18
+ )
19
+ return should_delete
20
+
21
+
22
+ def is_inference_server_container(container):
23
+ image_tags = container.image.tags
24
+ for t in image_tags:
25
+ if t.startswith("roboflow/roboflow-inference-server"):
26
+ return True
27
+ return False
28
+
29
+
30
+ def handle_existing_containers(containers):
31
+ has_existing_containers = False
32
+ for c in containers:
33
+ if is_inference_server_container(c):
34
+ has_existing_containers = True
35
+ if c.attrs.get("State", {}).get("Status", "").lower() == "running":
36
+ should_kill = ask_user_to_kill_container(c)
37
+ if should_kill:
38
+ c.kill()
39
+ has_existing_containers = False
40
+ return has_existing_containers
41
+
42
+
43
+ def find_existing_containers():
44
+ containers = []
45
+ for c in docker_client.containers.list():
46
+ if is_inference_server_container(c):
47
+ if c.attrs.get("State", {}).get("Status", "").lower() == "running":
48
+ containers.append(c)
49
+ return containers
50
+
51
+
52
+ def get_image():
53
+ try:
54
+ subprocess.check_output("nvidia-smi")
55
+ print("GPU detected. Using a GPU image.")
56
+ return "roboflow/roboflow-inference-server-gpu:latest"
57
+ except:
58
+ print("No GPU detected. Using a CPU image.")
59
+ return "roboflow/roboflow-inference-server-cpu:latest"
60
+
61
+
62
+ def start_inference_container(
63
+ api_key,
64
+ image=None,
65
+ port=9001,
66
+ labels=None,
67
+ project="roboflow-platform",
68
+ metrics_enabled=True,
69
+ device_id=None,
70
+ num_workers=1,
71
+ ):
72
+ containers = find_existing_containers()
73
+ if len(containers) > 0:
74
+ still_has_containers = handle_existing_containers(containers)
75
+ if still_has_containers:
76
+ print("Please kill the existing containers and try again.")
77
+ return
78
+
79
+ if image is None:
80
+ image = get_image()
81
+
82
+ device_requests = None
83
+ privileged = False
84
+ if "gpu" in image:
85
+ privileged = True
86
+ device_requests = (
87
+ [docker.types.DeviceRequest(device_ids=["all"], capabilities=[["gpu"]])],
88
+ )
89
+
90
+ print(f"Starting inference server container...")
91
+ docker_client.containers.run(
92
+ image=image,
93
+ privileged=privileged,
94
+ detach=True,
95
+ labels=labels,
96
+ ports={"9001": port},
97
+ # network="host",
98
+ device_requests=device_requests,
99
+ environment=[
100
+ "HOST=0.0.0.0",
101
+ f"PORT={port}",
102
+ f"PROJECT={project}",
103
+ f"METRICS_ENABLED={metrics_enabled}",
104
+ f"DEVICE_ID={device_id}",
105
+ f"API_KEY={api_key}",
106
+ f"NUM_WORKERS={num_workers}",
107
+ ],
108
+ )
109
+
110
+
111
+ if __name__ == "__main__":
112
+ start_inference_container("my_api_key")
@@ -0,0 +1,51 @@
1
+ import typer
2
+ import inference_cli.actions
3
+ from inference_cli.lib import start_inference_container
4
+
5
+ app = typer.Typer()
6
+
7
+
8
+ @app.command()
9
+ def serve(
10
+ port: int = typer.Option(
11
+ 9001,
12
+ "-p",
13
+ "--port",
14
+ help="Port to run the inference server on (default is 9001).",
15
+ ),
16
+ rf_env: str = typer.Option(
17
+ "roboflow-platform",
18
+ "-rfe",
19
+ "--rf-env",
20
+ help="Roboflow environment to run the inference server with (default is roboflow-platform).",
21
+ ),
22
+ ):
23
+ start_inference_container("", port=port, project=rf_env)
24
+
25
+
26
+ @app.command()
27
+ def infer(
28
+ image: str = typer.Option(
29
+ None, "-i", "--image", help="URL or local path of image to run inference on."
30
+ ),
31
+ project_id: str = typer.Option(
32
+ None, "-p", "--project-id", help="Roboflow project to run inference with."
33
+ ),
34
+ model_version: str = typer.Option(
35
+ None, "-v", "--model-version", help="Version of model to run inference with."
36
+ ),
37
+ api_key: str = typer.Option(
38
+ None, "-a", "--api-key", help="Roboflow API key for your workspace."
39
+ ),
40
+ host: str = typer.Option(
41
+ "http://localhost:9001", "-h", "--host", help="Host to run inference on."
42
+ ),
43
+ ):
44
+ typer.echo(
45
+ f"Running inference on image {image}, using model ({project_id}/{model_version}), and host ({host})"
46
+ )
47
+ inference_cli.actions.infer(image, project_id, model_version, api_key, host)
48
+
49
+
50
+ if __name__ == "__main__":
51
+ app()
@@ -0,0 +1,5 @@
1
+ __version__ = "0.8.9"
2
+
3
+
4
+ if __name__ == "__main__":
5
+ print(__version__)
inference/cli/setup.py ADDED
@@ -0,0 +1,49 @@
1
+ import os
2
+ import setuptools
3
+ from setuptools import find_packages
4
+ import sys
5
+ import shutil
6
+
7
+ root = os.path.abspath(os.path.join(os.path.dirname(__file__)))
8
+ sys.path.append(root)
9
+
10
+ shutil.copyfile(
11
+ os.path.join(root, "../inference/core/version.py"),
12
+ os.path.join(root, "inference_cli/version.py"),
13
+ )
14
+ from inference_cli.version import __version__
15
+
16
+ with open("README.md", "r") as fh:
17
+ long_description = fh.read()
18
+
19
+
20
+ def read_requirements(path):
21
+ with open(os.path.join(root, path)) as fh:
22
+ return [line.strip() for line in fh]
23
+
24
+
25
+ setuptools.setup(
26
+ name="inference-cli",
27
+ version=__version__,
28
+ author="Roboflow",
29
+ author_email="help@roboflow.com",
30
+ description="With no prior knowledge of machine learning or device-specific deployment, you can deploy a computer vision model to a range of devices and environments using Roboflow Inference CLI.",
31
+ long_description=long_description,
32
+ long_description_content_type="text/markdown",
33
+ url="https://github.com/roboflow/inference",
34
+ packages=find_packages(
35
+ where="./",
36
+ ),
37
+ entry_points={
38
+ "console_scripts": [
39
+ "inference=inference_cli.main:app",
40
+ ],
41
+ },
42
+ install_requires=read_requirements("requirements.txt"),
43
+ classifiers=[
44
+ "Programming Language :: Python :: 3",
45
+ "License :: OSI Approved :: Apache Software License",
46
+ "Operating System :: OS Independent",
47
+ ],
48
+ python_requires=">=3.7",
49
+ )
@@ -0,0 +1,52 @@
1
+ import threading
2
+ import time
3
+
4
+ import requests
5
+
6
+ from inference.core.env import DISABLE_VERSION_CHECK, VERSION_CHECK_MODE
7
+ from inference.core.logger import logger
8
+ from inference.core.version import __version__
9
+
10
+ latest_release = None
11
+ last_checked = 0
12
+ cache_duration = 86400 # 24 hours
13
+ log_frequency = 300 # 5 minutes
14
+
15
+
16
+ def get_latest_release_version():
17
+ global latest_release, last_checked
18
+ now = time.time()
19
+ if latest_release is None or now - last_checked > cache_duration:
20
+ try:
21
+ logger.debug("Checking for latest inference release version...")
22
+ response = requests.get(
23
+ "https://api.github.com/repos/roboflow/inference/releases/latest"
24
+ )
25
+ response.raise_for_status()
26
+ latest_release = response.json()["tag_name"].lstrip("v")
27
+ last_checked = now
28
+ except requests.exceptions.RequestException:
29
+ pass
30
+
31
+
32
+ def check_latest_release_against_current():
33
+ get_latest_release_version()
34
+ if latest_release is not None and latest_release != __version__:
35
+ logger.warning(
36
+ f"Your inference package version {__version__} is out of date! Please upgrade to version {latest_release} of inference for the latest features and bug fixes by running `pip install --upgrade roboflow-inference`."
37
+ )
38
+
39
+
40
+ def check_latest_release_against_current_continuous():
41
+ while True:
42
+ check_latest_release_against_current()
43
+ time.sleep(log_frequency)
44
+
45
+
46
+ if not DISABLE_VERSION_CHECK:
47
+ if VERSION_CHECK_MODE == "continuous":
48
+ t = threading.Thread(target=check_latest_release_against_current_continuous)
49
+ t.daemon = True
50
+ t.start()
51
+ else:
52
+ check_latest_release_against_current()
@@ -0,0 +1,8 @@
1
+ from inference.core.cache.memory import MemoryCache
2
+ from inference.core.cache.redis import RedisCache
3
+ from inference.core.env import REDIS_HOST, REDIS_PORT
4
+
5
+ if REDIS_HOST is not None:
6
+ cache = RedisCache(host=REDIS_HOST, port=REDIS_PORT)
7
+ else:
8
+ cache = MemoryCache()