vllm-judge 0.1.0__tar.gz → 0.1.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (24) hide show
  1. {vllm_judge-0.1.0 → vllm_judge-0.1.1}/PKG-INFO +14 -15
  2. {vllm_judge-0.1.0 → vllm_judge-0.1.1}/README.md +6 -6
  3. {vllm_judge-0.1.0 → vllm_judge-0.1.1}/pyproject.toml +9 -10
  4. {vllm_judge-0.1.0 → vllm_judge-0.1.1}/src/vllm_judge/__init__.py +1 -1
  5. {vllm_judge-0.1.0 → vllm_judge-0.1.1}/src/vllm_judge.egg-info/PKG-INFO +14 -15
  6. {vllm_judge-0.1.0 → vllm_judge-0.1.1}/src/vllm_judge.egg-info/requires.txt +2 -3
  7. {vllm_judge-0.1.0 → vllm_judge-0.1.1}/setup.cfg +0 -0
  8. {vllm_judge-0.1.0 → vllm_judge-0.1.1}/src/vllm_judge/api/__init__.py +0 -0
  9. {vllm_judge-0.1.0 → vllm_judge-0.1.1}/src/vllm_judge/api/client.py +0 -0
  10. {vllm_judge-0.1.0 → vllm_judge-0.1.1}/src/vllm_judge/api/models.py +0 -0
  11. {vllm_judge-0.1.0 → vllm_judge-0.1.1}/src/vllm_judge/api/server.py +0 -0
  12. {vllm_judge-0.1.0 → vllm_judge-0.1.1}/src/vllm_judge/batch.py +0 -0
  13. {vllm_judge-0.1.0 → vllm_judge-0.1.1}/src/vllm_judge/cli.py +0 -0
  14. {vllm_judge-0.1.0 → vllm_judge-0.1.1}/src/vllm_judge/client.py +0 -0
  15. {vllm_judge-0.1.0 → vllm_judge-0.1.1}/src/vllm_judge/exceptions.py +0 -0
  16. {vllm_judge-0.1.0 → vllm_judge-0.1.1}/src/vllm_judge/judge.py +0 -0
  17. {vllm_judge-0.1.0 → vllm_judge-0.1.1}/src/vllm_judge/metrics.py +0 -0
  18. {vllm_judge-0.1.0 → vllm_judge-0.1.1}/src/vllm_judge/models.py +0 -0
  19. {vllm_judge-0.1.0 → vllm_judge-0.1.1}/src/vllm_judge/prompts.py +0 -0
  20. {vllm_judge-0.1.0 → vllm_judge-0.1.1}/src/vllm_judge/templating.py +0 -0
  21. {vllm_judge-0.1.0 → vllm_judge-0.1.1}/src/vllm_judge.egg-info/SOURCES.txt +0 -0
  22. {vllm_judge-0.1.0 → vllm_judge-0.1.1}/src/vllm_judge.egg-info/dependency_links.txt +0 -0
  23. {vllm_judge-0.1.0 → vllm_judge-0.1.1}/src/vllm_judge.egg-info/entry_points.txt +0 -0
  24. {vllm_judge-0.1.0 → vllm_judge-0.1.1}/src/vllm_judge.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: vllm_judge
3
- Version: 0.1.0
3
+ Version: 0.1.1
4
4
  Summary: LLM-as-a-Judge evaluations for vLLM hosted models
5
5
  Author: TrustyAI team
6
6
  Author-email: Sai Chandra Pandraju <saichandrapandraju@gmail.com>
@@ -20,14 +20,6 @@ Requires-Dist: uvicorn[standard]>=0.22.0; extra == "api"
20
20
  Requires-Dist: websockets>=11.0; extra == "api"
21
21
  Provides-Extra: jinja2
22
22
  Requires-Dist: jinja2>=3.0.0; extra == "jinja2"
23
- Provides-Extra: dev
24
- Requires-Dist: pytest>=7.0.0; extra == "dev"
25
- Requires-Dist: pytest-asyncio>=0.21.0; extra == "dev"
26
- Requires-Dist: pytest-cov>=4.0.0; extra == "dev"
27
- Requires-Dist: black>=23.0.0; extra == "dev"
28
- Requires-Dist: isort>=5.12.0; extra == "dev"
29
- Requires-Dist: flake8>=6.0.0; extra == "dev"
30
- Requires-Dist: mypy>=1.0.0; extra == "dev"
31
23
  Provides-Extra: test
32
24
  Requires-Dist: pytest>=7.0.0; extra == "test"
33
25
  Requires-Dist: pytest-asyncio>=0.21.0; extra == "test"
@@ -37,10 +29,17 @@ Provides-Extra: docs
37
29
  Requires-Dist: mkdocs>=1.5.0; extra == "docs"
38
30
  Requires-Dist: mkdocs-material>=9.0.0; extra == "docs"
39
31
  Requires-Dist: mkdocstrings[python]>=0.24.0; extra == "docs"
32
+ Requires-Dist: mkdocs-material-extensions>=1.3.1; extra == "docs"
33
+ Provides-Extra: dev
34
+ Requires-Dist: vllm_judge[api,docs,jinja2,test]; extra == "dev"
35
+ Requires-Dist: black>=23.0.0; extra == "dev"
36
+ Requires-Dist: isort>=5.12.0; extra == "dev"
37
+ Requires-Dist: flake8>=6.0.0; extra == "dev"
38
+ Requires-Dist: mypy>=1.0.0; extra == "dev"
40
39
 
41
40
  # vLLM Judge
42
41
 
43
- A lightweight library for LLM-as-a-Judge evaluations using vLLM hosted models.
42
+ A lightweight library for LLM-as-a-Judge evaluations using vLLM hosted models. Please refer the [documentation](https://saichandrapandraju.github.io/vllm_judge/) for usage details.
44
43
 
45
44
  ## Features
46
45
 
@@ -55,16 +54,16 @@ A lightweight library for LLM-as-a-Judge evaluations using vLLM hosted models.
55
54
 
56
55
  ```bash
57
56
  # Basic installation
58
- pip install vllm_judge
57
+ pip install vllm-judge
59
58
 
60
59
  # With API support
61
- pip install vllm_judge[api]
60
+ pip install vllm-judge[api]
62
61
 
63
62
  # With Jinja2 template support
64
- pip install vllm_judge[jinja2]
63
+ pip install vllm-judge[jinja2]
65
64
 
66
65
  # Everything
67
- pip install vllm_judge[api,jinja2]
66
+ pip install vllm-judge[dev]
68
67
  ```
69
68
 
70
69
  ## Quick Start
@@ -73,7 +72,7 @@ pip install vllm_judge[api,jinja2]
73
72
  from vllm_judge import Judge
74
73
 
75
74
  # Initialize with vLLM url
76
- judge = await Judge.from_url("http://localhost:8000")
75
+ judge = Judge.from_url("http://localhost:8000")
77
76
 
78
77
  # Simple evaluation
79
78
  result = await judge.evaluate(
@@ -1,6 +1,6 @@
1
1
  # vLLM Judge
2
2
 
3
- A lightweight library for LLM-as-a-Judge evaluations using vLLM hosted models.
3
+ A lightweight library for LLM-as-a-Judge evaluations using vLLM hosted models. Please refer the [documentation](https://saichandrapandraju.github.io/vllm_judge/) for usage details.
4
4
 
5
5
  ## Features
6
6
 
@@ -15,16 +15,16 @@ A lightweight library for LLM-as-a-Judge evaluations using vLLM hosted models.
15
15
 
16
16
  ```bash
17
17
  # Basic installation
18
- pip install vllm_judge
18
+ pip install vllm-judge
19
19
 
20
20
  # With API support
21
- pip install vllm_judge[api]
21
+ pip install vllm-judge[api]
22
22
 
23
23
  # With Jinja2 template support
24
- pip install vllm_judge[jinja2]
24
+ pip install vllm-judge[jinja2]
25
25
 
26
26
  # Everything
27
- pip install vllm_judge[api,jinja2]
27
+ pip install vllm-judge[dev]
28
28
  ```
29
29
 
30
30
  ## Quick Start
@@ -33,7 +33,7 @@ pip install vllm_judge[api,jinja2]
33
33
  from vllm_judge import Judge
34
34
 
35
35
  # Initialize with vLLM url
36
- judge = await Judge.from_url("http://localhost:8000")
36
+ judge = Judge.from_url("http://localhost:8000")
37
37
 
38
38
  # Simple evaluation
39
39
  result = await judge.evaluate(
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "vllm_judge"
7
- version = "0.1.0"
7
+ version = "0.1.1"
8
8
  description = "LLM-as-a-Judge evaluations for vLLM hosted models"
9
9
  readme = "README.md"
10
10
  authors = [
@@ -40,15 +40,6 @@ api = [
40
40
  jinja2 = [
41
41
  "jinja2>=3.0.0",
42
42
  ]
43
- dev = [
44
- "pytest>=7.0.0",
45
- "pytest-asyncio>=0.21.0",
46
- "pytest-cov>=4.0.0",
47
- "black>=23.0.0",
48
- "isort>=5.12.0",
49
- "flake8>=6.0.0",
50
- "mypy>=1.0.0",
51
- ]
52
43
  test = [
53
44
  "pytest>=7.0.0",
54
45
  "pytest-asyncio>=0.21.0",
@@ -59,6 +50,14 @@ docs = [
59
50
  "mkdocs>=1.5.0",
60
51
  "mkdocs-material>=9.0.0",
61
52
  "mkdocstrings[python]>=0.24.0",
53
+ "mkdocs-material-extensions>=1.3.1"
54
+ ]
55
+ dev = [
56
+ "vllm_judge[api,jinja2,test,docs]",
57
+ "black>=23.0.0",
58
+ "isort>=5.12.0",
59
+ "flake8>=6.0.0",
60
+ "mypy>=1.0.0",
62
61
  ]
63
62
 
64
63
  [project.scripts]
@@ -5,7 +5,7 @@ A lightweight library for evaluating text responses using self-hosted language m
5
5
  via vLLM's OpenAI-compatible API.
6
6
  """
7
7
 
8
- __version__ = "0.1.0"
8
+ __version__ = "0.1.1"
9
9
 
10
10
  from vllm_judge.judge import Judge
11
11
  from vllm_judge.models import (
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: vllm_judge
3
- Version: 0.1.0
3
+ Version: 0.1.1
4
4
  Summary: LLM-as-a-Judge evaluations for vLLM hosted models
5
5
  Author: TrustyAI team
6
6
  Author-email: Sai Chandra Pandraju <saichandrapandraju@gmail.com>
@@ -20,14 +20,6 @@ Requires-Dist: uvicorn[standard]>=0.22.0; extra == "api"
20
20
  Requires-Dist: websockets>=11.0; extra == "api"
21
21
  Provides-Extra: jinja2
22
22
  Requires-Dist: jinja2>=3.0.0; extra == "jinja2"
23
- Provides-Extra: dev
24
- Requires-Dist: pytest>=7.0.0; extra == "dev"
25
- Requires-Dist: pytest-asyncio>=0.21.0; extra == "dev"
26
- Requires-Dist: pytest-cov>=4.0.0; extra == "dev"
27
- Requires-Dist: black>=23.0.0; extra == "dev"
28
- Requires-Dist: isort>=5.12.0; extra == "dev"
29
- Requires-Dist: flake8>=6.0.0; extra == "dev"
30
- Requires-Dist: mypy>=1.0.0; extra == "dev"
31
23
  Provides-Extra: test
32
24
  Requires-Dist: pytest>=7.0.0; extra == "test"
33
25
  Requires-Dist: pytest-asyncio>=0.21.0; extra == "test"
@@ -37,10 +29,17 @@ Provides-Extra: docs
37
29
  Requires-Dist: mkdocs>=1.5.0; extra == "docs"
38
30
  Requires-Dist: mkdocs-material>=9.0.0; extra == "docs"
39
31
  Requires-Dist: mkdocstrings[python]>=0.24.0; extra == "docs"
32
+ Requires-Dist: mkdocs-material-extensions>=1.3.1; extra == "docs"
33
+ Provides-Extra: dev
34
+ Requires-Dist: vllm_judge[api,docs,jinja2,test]; extra == "dev"
35
+ Requires-Dist: black>=23.0.0; extra == "dev"
36
+ Requires-Dist: isort>=5.12.0; extra == "dev"
37
+ Requires-Dist: flake8>=6.0.0; extra == "dev"
38
+ Requires-Dist: mypy>=1.0.0; extra == "dev"
40
39
 
41
40
  # vLLM Judge
42
41
 
43
- A lightweight library for LLM-as-a-Judge evaluations using vLLM hosted models.
42
+ A lightweight library for LLM-as-a-Judge evaluations using vLLM hosted models. Please refer the [documentation](https://saichandrapandraju.github.io/vllm_judge/) for usage details.
44
43
 
45
44
  ## Features
46
45
 
@@ -55,16 +54,16 @@ A lightweight library for LLM-as-a-Judge evaluations using vLLM hosted models.
55
54
 
56
55
  ```bash
57
56
  # Basic installation
58
- pip install vllm_judge
57
+ pip install vllm-judge
59
58
 
60
59
  # With API support
61
- pip install vllm_judge[api]
60
+ pip install vllm-judge[api]
62
61
 
63
62
  # With Jinja2 template support
64
- pip install vllm_judge[jinja2]
63
+ pip install vllm-judge[jinja2]
65
64
 
66
65
  # Everything
67
- pip install vllm_judge[api,jinja2]
66
+ pip install vllm-judge[dev]
68
67
  ```
69
68
 
70
69
  ## Quick Start
@@ -73,7 +72,7 @@ pip install vllm_judge[api,jinja2]
73
72
  from vllm_judge import Judge
74
73
 
75
74
  # Initialize with vLLM url
76
- judge = await Judge.from_url("http://localhost:8000")
75
+ judge = Judge.from_url("http://localhost:8000")
77
76
 
78
77
  # Simple evaluation
79
78
  result = await judge.evaluate(
@@ -9,9 +9,7 @@ uvicorn[standard]>=0.22.0
9
9
  websockets>=11.0
10
10
 
11
11
  [dev]
12
- pytest>=7.0.0
13
- pytest-asyncio>=0.21.0
14
- pytest-cov>=4.0.0
12
+ vllm_judge[api,docs,jinja2,test]
15
13
  black>=23.0.0
16
14
  isort>=5.12.0
17
15
  flake8>=6.0.0
@@ -21,6 +19,7 @@ mypy>=1.0.0
21
19
  mkdocs>=1.5.0
22
20
  mkdocs-material>=9.0.0
23
21
  mkdocstrings[python]>=0.24.0
22
+ mkdocs-material-extensions>=1.3.1
24
23
 
25
24
  [jinja2]
26
25
  jinja2>=3.0.0
File without changes