ehs-llm-client 0.1.1__tar.gz → 0.1.4__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {ehs_llm_client-0.1.1 → ehs_llm_client-0.1.4}/PKG-INFO +8 -8
- {ehs_llm_client-0.1.1 → ehs_llm_client-0.1.4}/README.md +6 -6
- {ehs_llm_client-0.1.1 → ehs_llm_client-0.1.4}/ehs_llm_client.egg-info/PKG-INFO +8 -8
- {ehs_llm_client-0.1.1 → ehs_llm_client-0.1.4}/pyproject.toml +2 -2
- {ehs_llm_client-0.1.1 → ehs_llm_client-0.1.4}/ehs_llm_client/__init__.py +0 -0
- {ehs_llm_client-0.1.1 → ehs_llm_client-0.1.4}/ehs_llm_client/client.py +0 -0
- {ehs_llm_client-0.1.1 → ehs_llm_client-0.1.4}/ehs_llm_client/config.py +0 -0
- {ehs_llm_client-0.1.1 → ehs_llm_client-0.1.4}/ehs_llm_client/exceptions.py +0 -0
- {ehs_llm_client-0.1.1 → ehs_llm_client-0.1.4}/ehs_llm_client/utils.py +0 -0
- {ehs_llm_client-0.1.1 → ehs_llm_client-0.1.4}/ehs_llm_client.egg-info/SOURCES.txt +0 -0
- {ehs_llm_client-0.1.1 → ehs_llm_client-0.1.4}/ehs_llm_client.egg-info/dependency_links.txt +0 -0
- {ehs_llm_client-0.1.1 → ehs_llm_client-0.1.4}/ehs_llm_client.egg-info/requires.txt +0 -0
- {ehs_llm_client-0.1.1 → ehs_llm_client-0.1.4}/ehs_llm_client.egg-info/top_level.txt +0 -0
- {ehs_llm_client-0.1.1 → ehs_llm_client-0.1.4}/setup.cfg +0 -0
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: ehs-llm-client
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.4
|
|
4
4
|
Summary: Unified LLM client. Currently supports Openai, Azure Openai and Google Gemini
|
|
5
5
|
Author-email: Andersen Huang <andersen.huang@ehsanalytics.com>
|
|
6
|
-
License: MIT
|
|
6
|
+
License-Expression: MIT
|
|
7
7
|
Description-Content-Type: text/markdown
|
|
8
8
|
Requires-Dist: openai
|
|
9
9
|
Requires-Dist: google-genai
|
|
@@ -15,7 +15,7 @@ Requires-Dist: python-dateutil
|
|
|
15
15
|
[](https://pypi.org/project/llm-client/)
|
|
16
16
|
[](https://www.python.org/downloads/)
|
|
17
17
|
|
|
18
|
-
`llm-client` is a unified **async Python client** for interacting with multiple LLM providers, including **OpenAI**, **Azure OpenAI**, and **Google Gemini**.
|
|
18
|
+
`ehs-llm-client` is a unified **async Python client** for interacting with multiple LLM providers, including **OpenAI**, **Azure OpenAI**, and **Google Gemini**.
|
|
19
19
|
It supports **single calls**, **structured JSON outputs**, and **batch processing**, designed for **production-ready, reusable code** in applications, scripts, or pipelines.
|
|
20
20
|
|
|
21
21
|
---
|
|
@@ -36,7 +36,7 @@ It supports **single calls**, **structured JSON outputs**, and **batch processin
|
|
|
36
36
|
|
|
37
37
|
```bash
|
|
38
38
|
# Install from PyPI
|
|
39
|
-
pip install
|
|
39
|
+
pip install ehs_llm-client
|
|
40
40
|
```
|
|
41
41
|
|
|
42
42
|
Or install editable version during development:
|
|
@@ -50,7 +50,7 @@ pip install -e .
|
|
|
50
50
|
|
|
51
51
|
## Configuration
|
|
52
52
|
|
|
53
|
-
`llm-client` supports **three configuration modes**:
|
|
53
|
+
`ehs-llm-client` supports **three configuration modes**:
|
|
54
54
|
|
|
55
55
|
### 1️⃣ Config file (`.cfg`)
|
|
56
56
|
|
|
@@ -70,7 +70,7 @@ model_batch = gpt-4.1-mini
|
|
|
70
70
|
Usage:
|
|
71
71
|
|
|
72
72
|
```python
|
|
73
|
-
from
|
|
73
|
+
from ehs_llm_client import LLM
|
|
74
74
|
|
|
75
75
|
llm = LLM("prod", config_file_path="llmconfig.cfg")
|
|
76
76
|
```
|
|
@@ -80,7 +80,7 @@ llm = LLM("prod", config_file_path="llmconfig.cfg")
|
|
|
80
80
|
### 2️⃣ Config dictionary (for tests / CI)
|
|
81
81
|
|
|
82
82
|
```python
|
|
83
|
-
from
|
|
83
|
+
from ehs_llm_client import LLM
|
|
84
84
|
|
|
85
85
|
llm = LLM(
|
|
86
86
|
"default",
|
|
@@ -115,7 +115,7 @@ llm = LLM("default")
|
|
|
115
115
|
|
|
116
116
|
```python
|
|
117
117
|
import asyncio
|
|
118
|
-
from
|
|
118
|
+
from ehs_llm_client import LLM
|
|
119
119
|
|
|
120
120
|
async def main():
|
|
121
121
|
llm = LLM("prod")
|
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
[](https://pypi.org/project/llm-client/)
|
|
4
4
|
[](https://www.python.org/downloads/)
|
|
5
5
|
|
|
6
|
-
`llm-client` is a unified **async Python client** for interacting with multiple LLM providers, including **OpenAI**, **Azure OpenAI**, and **Google Gemini**.
|
|
6
|
+
`ehs-llm-client` is a unified **async Python client** for interacting with multiple LLM providers, including **OpenAI**, **Azure OpenAI**, and **Google Gemini**.
|
|
7
7
|
It supports **single calls**, **structured JSON outputs**, and **batch processing**, designed for **production-ready, reusable code** in applications, scripts, or pipelines.
|
|
8
8
|
|
|
9
9
|
---
|
|
@@ -24,7 +24,7 @@ It supports **single calls**, **structured JSON outputs**, and **batch processin
|
|
|
24
24
|
|
|
25
25
|
```bash
|
|
26
26
|
# Install from PyPI
|
|
27
|
-
pip install
|
|
27
|
+
pip install ehs_llm-client
|
|
28
28
|
```
|
|
29
29
|
|
|
30
30
|
Or install editable version during development:
|
|
@@ -38,7 +38,7 @@ pip install -e .
|
|
|
38
38
|
|
|
39
39
|
## Configuration
|
|
40
40
|
|
|
41
|
-
`llm-client` supports **three configuration modes**:
|
|
41
|
+
`ehs-llm-client` supports **three configuration modes**:
|
|
42
42
|
|
|
43
43
|
### 1️⃣ Config file (`.cfg`)
|
|
44
44
|
|
|
@@ -58,7 +58,7 @@ model_batch = gpt-4.1-mini
|
|
|
58
58
|
Usage:
|
|
59
59
|
|
|
60
60
|
```python
|
|
61
|
-
from
|
|
61
|
+
from ehs_llm_client import LLM
|
|
62
62
|
|
|
63
63
|
llm = LLM("prod", config_file_path="llmconfig.cfg")
|
|
64
64
|
```
|
|
@@ -68,7 +68,7 @@ llm = LLM("prod", config_file_path="llmconfig.cfg")
|
|
|
68
68
|
### 2️⃣ Config dictionary (for tests / CI)
|
|
69
69
|
|
|
70
70
|
```python
|
|
71
|
-
from
|
|
71
|
+
from ehs_llm_client import LLM
|
|
72
72
|
|
|
73
73
|
llm = LLM(
|
|
74
74
|
"default",
|
|
@@ -103,7 +103,7 @@ llm = LLM("default")
|
|
|
103
103
|
|
|
104
104
|
```python
|
|
105
105
|
import asyncio
|
|
106
|
-
from
|
|
106
|
+
from ehs_llm_client import LLM
|
|
107
107
|
|
|
108
108
|
async def main():
|
|
109
109
|
llm = LLM("prod")
|
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: ehs-llm-client
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.4
|
|
4
4
|
Summary: Unified LLM client. Currently supports Openai, Azure Openai and Google Gemini
|
|
5
5
|
Author-email: Andersen Huang <andersen.huang@ehsanalytics.com>
|
|
6
|
-
License: MIT
|
|
6
|
+
License-Expression: MIT
|
|
7
7
|
Description-Content-Type: text/markdown
|
|
8
8
|
Requires-Dist: openai
|
|
9
9
|
Requires-Dist: google-genai
|
|
@@ -15,7 +15,7 @@ Requires-Dist: python-dateutil
|
|
|
15
15
|
[](https://pypi.org/project/llm-client/)
|
|
16
16
|
[](https://www.python.org/downloads/)
|
|
17
17
|
|
|
18
|
-
`llm-client` is a unified **async Python client** for interacting with multiple LLM providers, including **OpenAI**, **Azure OpenAI**, and **Google Gemini**.
|
|
18
|
+
`ehs-llm-client` is a unified **async Python client** for interacting with multiple LLM providers, including **OpenAI**, **Azure OpenAI**, and **Google Gemini**.
|
|
19
19
|
It supports **single calls**, **structured JSON outputs**, and **batch processing**, designed for **production-ready, reusable code** in applications, scripts, or pipelines.
|
|
20
20
|
|
|
21
21
|
---
|
|
@@ -36,7 +36,7 @@ It supports **single calls**, **structured JSON outputs**, and **batch processin
|
|
|
36
36
|
|
|
37
37
|
```bash
|
|
38
38
|
# Install from PyPI
|
|
39
|
-
pip install
|
|
39
|
+
pip install ehs_llm-client
|
|
40
40
|
```
|
|
41
41
|
|
|
42
42
|
Or install editable version during development:
|
|
@@ -50,7 +50,7 @@ pip install -e .
|
|
|
50
50
|
|
|
51
51
|
## Configuration
|
|
52
52
|
|
|
53
|
-
`llm-client` supports **three configuration modes**:
|
|
53
|
+
`ehs-llm-client` supports **three configuration modes**:
|
|
54
54
|
|
|
55
55
|
### 1️⃣ Config file (`.cfg`)
|
|
56
56
|
|
|
@@ -70,7 +70,7 @@ model_batch = gpt-4.1-mini
|
|
|
70
70
|
Usage:
|
|
71
71
|
|
|
72
72
|
```python
|
|
73
|
-
from
|
|
73
|
+
from ehs_llm_client import LLM
|
|
74
74
|
|
|
75
75
|
llm = LLM("prod", config_file_path="llmconfig.cfg")
|
|
76
76
|
```
|
|
@@ -80,7 +80,7 @@ llm = LLM("prod", config_file_path="llmconfig.cfg")
|
|
|
80
80
|
### 2️⃣ Config dictionary (for tests / CI)
|
|
81
81
|
|
|
82
82
|
```python
|
|
83
|
-
from
|
|
83
|
+
from ehs_llm_client import LLM
|
|
84
84
|
|
|
85
85
|
llm = LLM(
|
|
86
86
|
"default",
|
|
@@ -115,7 +115,7 @@ llm = LLM("default")
|
|
|
115
115
|
|
|
116
116
|
```python
|
|
117
117
|
import asyncio
|
|
118
|
-
from
|
|
118
|
+
from ehs_llm_client import LLM
|
|
119
119
|
|
|
120
120
|
async def main():
|
|
121
121
|
llm = LLM("prod")
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[project]
|
|
2
2
|
name = "ehs-llm-client"
|
|
3
|
-
version = "0.1.
|
|
3
|
+
version = "0.1.4"
|
|
4
4
|
description = "Unified LLM client. Currently supports Openai, Azure Openai and Google Gemini"
|
|
5
5
|
dependencies = [
|
|
6
6
|
"openai",
|
|
@@ -9,7 +9,7 @@ dependencies = [
|
|
|
9
9
|
"python-dateutil"
|
|
10
10
|
]
|
|
11
11
|
readme = { file = "README.md", content-type = "text/markdown" }
|
|
12
|
-
license =
|
|
12
|
+
license = "MIT"
|
|
13
13
|
authors = [
|
|
14
14
|
{ name="Andersen Huang", email="andersen.huang@ehsanalytics.com" }
|
|
15
15
|
]
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|