orbitals 0.1.1__tar.gz → 0.1.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. {orbitals-0.1.1 → orbitals-0.1.2}/PKG-INFO +16 -10
  2. {orbitals-0.1.1 → orbitals-0.1.2}/README.md +10 -4
  3. {orbitals-0.1.1 → orbitals-0.1.2}/pyproject.toml +3 -3
  4. {orbitals-0.1.1 → orbitals-0.1.2}/src/orbitals/scope_guard/cli/serve.py +1 -1
  5. {orbitals-0.1.1 → orbitals-0.1.2}/src/orbitals/scope_guard/guards/base.py +4 -4
  6. {orbitals-0.1.1 → orbitals-0.1.2}/src/orbitals/scope_guard/guards/hf.py +1 -1
  7. {orbitals-0.1.1 → orbitals-0.1.2}/src/orbitals/scope_guard/guards/vllm.py +3 -3
  8. {orbitals-0.1.1 → orbitals-0.1.2}/uv.lock +444 -262
  9. {orbitals-0.1.1 → orbitals-0.1.2}/.dockerignore +0 -0
  10. {orbitals-0.1.1 → orbitals-0.1.2}/.gitignore +0 -0
  11. {orbitals-0.1.1 → orbitals-0.1.2}/AGENTS.md +0 -0
  12. {orbitals-0.1.1 → orbitals-0.1.2}/CONTRIBUTING.md +0 -0
  13. {orbitals-0.1.1 → orbitals-0.1.2}/Dockerfile +0 -0
  14. {orbitals-0.1.1 → orbitals-0.1.2}/Dockerfile copy +0 -0
  15. {orbitals-0.1.1 → orbitals-0.1.2}/LICENSE +0 -0
  16. {orbitals-0.1.1 → orbitals-0.1.2}/README.scope-guard.md +0 -0
  17. {orbitals-0.1.1 → orbitals-0.1.2}/assets/orbitals-banner.png +0 -0
  18. {orbitals-0.1.1 → orbitals-0.1.2}/assets/orbitals.svg +0 -0
  19. {orbitals-0.1.1 → orbitals-0.1.2}/assets/scope-guard.svg +0 -0
  20. {orbitals-0.1.1 → orbitals-0.1.2}/examples/scope-guard/api.py +0 -0
  21. {orbitals-0.1.1 → orbitals-0.1.2}/examples/scope-guard/async_api.py +0 -0
  22. {orbitals-0.1.1 → orbitals-0.1.2}/examples/scope-guard/local.py +0 -0
  23. {orbitals-0.1.1 → orbitals-0.1.2}/examples/scope-guard/test.py +0 -0
  24. {orbitals-0.1.1 → orbitals-0.1.2}/examples/scope-guard/vllm_serve.py +0 -0
  25. {orbitals-0.1.1 → orbitals-0.1.2}/src/hf_pipeline/scope_guard.py +0 -0
  26. {orbitals-0.1.1 → orbitals-0.1.2}/src/orbitals/__init__.py +0 -0
  27. {orbitals-0.1.1 → orbitals-0.1.2}/src/orbitals/cli/__init__.py +0 -0
  28. {orbitals-0.1.1 → orbitals-0.1.2}/src/orbitals/cli/main.py +0 -0
  29. {orbitals-0.1.1 → orbitals-0.1.2}/src/orbitals/scope_guard/__init__.py +0 -0
  30. {orbitals-0.1.1 → orbitals-0.1.2}/src/orbitals/scope_guard/cli/__init__.py +0 -0
  31. {orbitals-0.1.1 → orbitals-0.1.2}/src/orbitals/scope_guard/cli/convert_default_model_name.py +0 -0
  32. {orbitals-0.1.1 → orbitals-0.1.2}/src/orbitals/scope_guard/cli/main.py +0 -0
  33. {orbitals-0.1.1 → orbitals-0.1.2}/src/orbitals/scope_guard/guards/__init__.py +0 -0
  34. {orbitals-0.1.1 → orbitals-0.1.2}/src/orbitals/scope_guard/guards/api.py +0 -0
  35. {orbitals-0.1.1 → orbitals-0.1.2}/src/orbitals/scope_guard/modeling.py +0 -0
  36. {orbitals-0.1.1 → orbitals-0.1.2}/src/orbitals/scope_guard/prompting.py +0 -0
  37. {orbitals-0.1.1 → orbitals-0.1.2}/src/orbitals/scope_guard/serving/__init__.py +0 -0
  38. {orbitals-0.1.1 → orbitals-0.1.2}/src/orbitals/scope_guard/serving/main.py +0 -0
  39. {orbitals-0.1.1 → orbitals-0.1.2}/src/orbitals/scope_guard/serving/vllm_logging_config.json +0 -0
  40. {orbitals-0.1.1 → orbitals-0.1.2}/src/orbitals/types.py +0 -0
  41. {orbitals-0.1.1 → orbitals-0.1.2}/src/orbitals/utils.py +0 -0
  42. {orbitals-0.1.1 → orbitals-0.1.2}/src/scripts/playground.ipynb +0 -0
  43. {orbitals-0.1.1 → orbitals-0.1.2}/src/scripts/push_hf_pipeline.py +0 -0
  44. {orbitals-0.1.1 → orbitals-0.1.2}/src/scripts/push_model.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: orbitals
3
- Version: 0.1.1
3
+ Version: 0.1.2
4
4
  Summary: LLM Guardrails tailored to your Principles
5
5
  Author-email: Luigi Procopio <luigi@principled-intelligence.com>, Edoardo Barba <edoardo@principled-intelligence.com>
6
6
  License: Apache-2.0
@@ -11,7 +11,7 @@ Classifier: Programming Language :: Python :: 3.11
11
11
  Classifier: Programming Language :: Python :: 3.12
12
12
  Classifier: Programming Language :: Python :: 3.13
13
13
  Classifier: Programming Language :: Python :: 3.14
14
- Requires-Python: ==3.13
14
+ Requires-Python: >=3.10
15
15
  Requires-Dist: aiohttp
16
16
  Requires-Dist: pydantic>=2.0.0
17
17
  Requires-Dist: requests
@@ -22,7 +22,7 @@ Requires-Dist: fastapi[standard]>=0.119.1; extra == 'all'
22
22
  Requires-Dist: nvidia-ml-py; extra == 'all'
23
23
  Requires-Dist: transformers<5.0.0,>=4.47.0; extra == 'all'
24
24
  Requires-Dist: uvicorn>=0.29.0; extra == 'all'
25
- Requires-Dist: vllm<0.13.0,>=0.11.0; extra == 'all'
25
+ Requires-Dist: vllm>=0.11.0; extra == 'all'
26
26
  Requires-Dist: xgrammar; extra == 'all'
27
27
  Provides-Extra: scope-guard-all
28
28
  Requires-Dist: accelerate>=1.11.0; extra == 'scope-guard-all'
@@ -30,7 +30,7 @@ Requires-Dist: fastapi[standard]>=0.119.1; extra == 'scope-guard-all'
30
30
  Requires-Dist: nvidia-ml-py; extra == 'scope-guard-all'
31
31
  Requires-Dist: transformers<5.0.0,>=4.47.0; extra == 'scope-guard-all'
32
32
  Requires-Dist: uvicorn>=0.29.0; extra == 'scope-guard-all'
33
- Requires-Dist: vllm<0.13.0,>=0.11.0; extra == 'scope-guard-all'
33
+ Requires-Dist: vllm>=0.11.0; extra == 'scope-guard-all'
34
34
  Requires-Dist: xgrammar; extra == 'scope-guard-all'
35
35
  Provides-Extra: scope-guard-hf
36
36
  Requires-Dist: accelerate>=1.11.0; extra == 'scope-guard-hf'
@@ -41,12 +41,12 @@ Requires-Dist: fastapi[standard]>=0.119.1; extra == 'scope-guard-serve'
41
41
  Requires-Dist: nvidia-ml-py; extra == 'scope-guard-serve'
42
42
  Requires-Dist: transformers<5.0.0,>=4.47.0; extra == 'scope-guard-serve'
43
43
  Requires-Dist: uvicorn>=0.29.0; extra == 'scope-guard-serve'
44
- Requires-Dist: vllm<0.13.0,>=0.11.0; extra == 'scope-guard-serve'
44
+ Requires-Dist: vllm>=0.11.0; extra == 'scope-guard-serve'
45
45
  Requires-Dist: xgrammar; extra == 'scope-guard-serve'
46
46
  Provides-Extra: scope-guard-vllm
47
47
  Requires-Dist: nvidia-ml-py; extra == 'scope-guard-vllm'
48
48
  Requires-Dist: transformers<5.0.0,>=4.47.0; extra == 'scope-guard-vllm'
49
- Requires-Dist: vllm<0.13.0,>=0.11.0; extra == 'scope-guard-vllm'
49
+ Requires-Dist: vllm>=0.11.0; extra == 'scope-guard-vllm'
50
50
  Requires-Dist: xgrammar; extra == 'scope-guard-vllm'
51
51
  Provides-Extra: serving
52
52
  Requires-Dist: fastapi[standard]>=0.119.1; extra == 'serving'
@@ -54,7 +54,7 @@ Requires-Dist: uvicorn>=0.29.0; extra == 'serving'
54
54
  Description-Content-Type: text/markdown
55
55
 
56
56
  <div align="center">
57
- <img src="assets/orbitals-banner.png" width="70%" />
57
+ <img src="https://raw.githubusercontent.com/Principled-Intelligence/orbitals/refs/heads/main/assets/orbitals-banner.png" width="70%" />
58
58
  <h3 align="center">
59
59
  <p>
60
60
  <b>LLM Guardrails tailored to your Principles</b>
@@ -63,10 +63,16 @@ Description-Content-Type: text/markdown
63
63
  </div>
64
64
 
65
65
  <p align="center">
66
- <img src="https://img.shields.io/pypi/v/orbitals?color=green" alt="PyPI Version">
66
+ <a href="https://pypi.org/project/orbitals/">
67
+ <img src="https://img.shields.io/pypi/v/orbitals?color=green" alt="PyPI Version">
68
+ </a>
67
69
  <!-- <img src="https://img.shields.io/badge/type%20checked-ty-blue.svg?color=green" alt="Type Checked with ty"> -->
68
- <img src="https://img.shields.io/pypi/pyversions/orbitals" alt="Python Versions">
69
- <img src="https://img.shields.io/github/license/principled-intelligence/orbitals" alt="GitHub License">
70
+ <a href="https://pypi.org/project/orbitals/">
71
+ <img src="https://img.shields.io/pypi/pyversions/orbitals" alt="Python Versions">
72
+ </a>
73
+ <a href="https://raw.githubusercontent.com/Principled-Intelligence/orbitals/refs/heads/main/LICENSE">
74
+ <img src="https://img.shields.io/github/license/principled-intelligence/orbitals" alt="GitHub License">
75
+ </a>
70
76
  </p>
71
77
 
72
78
  ## Overview
@@ -1,5 +1,5 @@
1
1
  <div align="center">
2
- <img src="assets/orbitals-banner.png" width="70%" />
2
+ <img src="https://raw.githubusercontent.com/Principled-Intelligence/orbitals/refs/heads/main/assets/orbitals-banner.png" width="70%" />
3
3
  <h3 align="center">
4
4
  <p>
5
5
  <b>LLM Guardrails tailored to your Principles</b>
@@ -8,10 +8,16 @@
8
8
  </div>
9
9
 
10
10
  <p align="center">
11
- <img src="https://img.shields.io/pypi/v/orbitals?color=green" alt="PyPI Version">
11
+ <a href="https://pypi.org/project/orbitals/">
12
+ <img src="https://img.shields.io/pypi/v/orbitals?color=green" alt="PyPI Version">
13
+ </a>
12
14
  <!-- <img src="https://img.shields.io/badge/type%20checked-ty-blue.svg?color=green" alt="Type Checked with ty"> -->
13
- <img src="https://img.shields.io/pypi/pyversions/orbitals" alt="Python Versions">
14
- <img src="https://img.shields.io/github/license/principled-intelligence/orbitals" alt="GitHub License">
15
+ <a href="https://pypi.org/project/orbitals/">
16
+ <img src="https://img.shields.io/pypi/pyversions/orbitals" alt="Python Versions">
17
+ </a>
18
+ <a href="https://raw.githubusercontent.com/Principled-Intelligence/orbitals/refs/heads/main/LICENSE">
19
+ <img src="https://img.shields.io/github/license/principled-intelligence/orbitals" alt="GitHub License">
20
+ </a>
15
21
  </p>
16
22
 
17
23
  ## Overview
@@ -4,11 +4,11 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "orbitals"
7
- version = "0.1.1"
7
+ version = "0.1.2"
8
8
  description = "LLM Guardrails tailored to your Principles"
9
9
  readme = "README.md"
10
10
 
11
- requires-python = "==3.13"
11
+ requires-python = ">=3.10"
12
12
  license = { text = "Apache-2.0" }
13
13
 
14
14
  authors = [
@@ -37,7 +37,7 @@ scope-guard-hf = [
37
37
  ]
38
38
  scope-guard-vllm = [
39
39
  "transformers>=4.47.0,<5.0.0",
40
- "vllm>=0.11.0,<0.13.0",
40
+ "vllm>=0.11.0",
41
41
  "xgrammar",
42
42
  "nvidia-ml-py",
43
43
  ]
@@ -38,7 +38,7 @@ def serve(
38
38
  "0.0.0.0", "-h", "--host", help="The host to use for the server"
39
39
  ),
40
40
  vllm_port: int = typer.Option(8001, help="The port to use for the vLLM server"),
41
- vllm_max_model_len: int = typer.Option(10000, help="Maximum model length for vLLM"),
41
+ vllm_max_model_len: int = typer.Option(15000, help="Maximum model length for vLLM"),
42
42
  vllm_max_num_seqs: int = typer.Option(
43
43
  2, help="Maximum number of sequences for vLLM"
44
44
  ),
@@ -139,7 +139,7 @@ class ScopeGuard(BaseScopeGuard):
139
139
  backend: Literal["hf"] = "hf",
140
140
  model: DefaultModel | str = "scope-guard",
141
141
  skip_evidences: bool = False,
142
- max_new_tokens: int = 10_000,
142
+ max_new_tokens: int = 3000,
143
143
  do_sample: bool = False,
144
144
  **kwargs,
145
145
  ) -> HuggingFaceScopeGuard: ...
@@ -151,8 +151,8 @@ class ScopeGuard(BaseScopeGuard):
151
151
  model: DefaultModel | str = "scope-guard",
152
152
  skip_evidences: bool = False,
153
153
  temperature: float = 0.0,
154
- max_tokens: int = 10_000,
155
- max_model_len: int = 10_000,
154
+ max_tokens: int = 3000,
155
+ max_model_len: int = 15_000,
156
156
  max_num_seqs: int = 2,
157
157
  ) -> VLLMScopeGuard: ...
158
158
 
@@ -227,7 +227,7 @@ class AsyncScopeGuard(BaseScopeGuard):
227
227
  skip_evidences: bool = False,
228
228
  vllm_serving_url: str = "http://localhost:8000",
229
229
  temperature: float = 0.0,
230
- max_tokens: int = 10_000,
230
+ max_tokens: int = 3000,
231
231
  ) -> AsyncVLLMApiScopeGuard: ...
232
232
 
233
233
  @overload
@@ -23,7 +23,7 @@ class HuggingFaceScopeGuard(ScopeGuard):
23
23
  backend: Literal["hf"] = "hf",
24
24
  model: DefaultModel | str = "scope-guard",
25
25
  skip_evidences: bool = False,
26
- max_new_tokens: int = 10_000,
26
+ max_new_tokens: int = 3000,
27
27
  do_sample: bool = False,
28
28
  **kwargs,
29
29
  ):
@@ -26,8 +26,8 @@ class VLLMScopeGuard(ScopeGuard):
26
26
  model: DefaultModel | str = "scope-guard",
27
27
  skip_evidences: bool = False,
28
28
  temperature: float = 0.0,
29
- max_tokens: int = 10_000,
30
- max_model_len: int = 10_000,
29
+ max_tokens: int = 3000,
30
+ max_model_len: int = 15_000,
31
31
  max_num_seqs: int = 2,
32
32
  gpu_memory_utilization: float = 0.9,
33
33
  ):
@@ -131,7 +131,7 @@ class AsyncVLLMApiScopeGuard(AsyncScopeGuard):
131
131
  skip_evidences: bool = False,
132
132
  vllm_serving_url: str = "http://localhost:8000",
133
133
  temperature: float = 0.0,
134
- max_tokens: int = 10_000,
134
+ max_tokens: int = 3000,
135
135
  ):
136
136
  import transformers
137
137