ehs-llm-client 0.1.0__tar.gz → 0.1.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ehs_llm_client-0.1.2/PKG-INFO +179 -0
- {ehs_llm_client-0.1.0 → ehs_llm_client-0.1.2}/README.md +1 -1
- ehs_llm_client-0.1.2/ehs_llm_client.egg-info/PKG-INFO +179 -0
- {ehs_llm_client-0.1.0 → ehs_llm_client-0.1.2}/pyproject.toml +6 -2
- ehs_llm_client-0.1.0/PKG-INFO +0 -8
- ehs_llm_client-0.1.0/ehs_llm_client.egg-info/PKG-INFO +0 -8
- {ehs_llm_client-0.1.0 → ehs_llm_client-0.1.2}/ehs_llm_client/__init__.py +0 -0
- {ehs_llm_client-0.1.0 → ehs_llm_client-0.1.2}/ehs_llm_client/client.py +0 -0
- {ehs_llm_client-0.1.0 → ehs_llm_client-0.1.2}/ehs_llm_client/config.py +0 -0
- {ehs_llm_client-0.1.0 → ehs_llm_client-0.1.2}/ehs_llm_client/exceptions.py +0 -0
- {ehs_llm_client-0.1.0 → ehs_llm_client-0.1.2}/ehs_llm_client/utils.py +0 -0
- {ehs_llm_client-0.1.0 → ehs_llm_client-0.1.2}/ehs_llm_client.egg-info/SOURCES.txt +0 -0
- {ehs_llm_client-0.1.0 → ehs_llm_client-0.1.2}/ehs_llm_client.egg-info/dependency_links.txt +0 -0
- {ehs_llm_client-0.1.0 → ehs_llm_client-0.1.2}/ehs_llm_client.egg-info/requires.txt +0 -0
- {ehs_llm_client-0.1.0 → ehs_llm_client-0.1.2}/ehs_llm_client.egg-info/top_level.txt +0 -0
- {ehs_llm_client-0.1.0 → ehs_llm_client-0.1.2}/setup.cfg +0 -0
|
@@ -0,0 +1,179 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: ehs-llm-client
|
|
3
|
+
Version: 0.1.2
|
|
4
|
+
Summary: Unified LLM client. Currently supports Openai, Azure Openai and Google Gemini
|
|
5
|
+
Author-email: Andersen Huang <andersen.huang@ehsanalytics.com>
|
|
6
|
+
License: MIT
|
|
7
|
+
Description-Content-Type: text/markdown
|
|
8
|
+
Requires-Dist: openai
|
|
9
|
+
Requires-Dist: google-genai
|
|
10
|
+
Requires-Dist: python-dotenv
|
|
11
|
+
Requires-Dist: python-dateutil
|
|
12
|
+
|
|
13
|
+
# llm-client
|
|
14
|
+
|
|
15
|
+
[](https://pypi.org/project/llm-client/)
|
|
16
|
+
[](https://www.python.org/downloads/)
|
|
17
|
+
|
|
18
|
+
`llm-client` is a unified **async Python client** for interacting with multiple LLM providers, including **OpenAI**, **Azure OpenAI**, and **Google Gemini**.
|
|
19
|
+
It supports **single calls**, **structured JSON outputs**, and **batch processing**, designed for **production-ready, reusable code** in applications, scripts, or pipelines.
|
|
20
|
+
|
|
21
|
+
---
|
|
22
|
+
|
|
23
|
+
## Features
|
|
24
|
+
|
|
25
|
+
- Async support for multiple LLM providers
|
|
26
|
+
- Unified interface across OpenAI, Azure, and Google Gemini
|
|
27
|
+
- Structured JSON responses via schemas
|
|
28
|
+
- Retry and timeout handling
|
|
29
|
+
- Batch processing support
|
|
30
|
+
- Environment variable configuration for API keys
|
|
31
|
+
- Easy integration into existing projects or monorepos
|
|
32
|
+
|
|
33
|
+
---
|
|
34
|
+
|
|
35
|
+
## Installation
|
|
36
|
+
|
|
37
|
+
```bash
|
|
38
|
+
# Install from PyPI
|
|
39
|
+
pip install llm-client
|
|
40
|
+
```
|
|
41
|
+
|
|
42
|
+
Or install editable version during development:
|
|
43
|
+
|
|
44
|
+
```bash
|
|
45
|
+
# From project root
|
|
46
|
+
pip install -e .
|
|
47
|
+
```
|
|
48
|
+
|
|
49
|
+
---
|
|
50
|
+
|
|
51
|
+
## Configuration
|
|
52
|
+
|
|
53
|
+
`llm-client` supports **three configuration modes**:
|
|
54
|
+
|
|
55
|
+
### 1️⃣ Config file (`.cfg`)
|
|
56
|
+
|
|
57
|
+
Example: `llmconfig.cfg`
|
|
58
|
+
|
|
59
|
+
```ini
|
|
60
|
+
[default_settings]
|
|
61
|
+
provider = openai
|
|
62
|
+
model = gpt-4.1-mini
|
|
63
|
+
|
|
64
|
+
[prod]
|
|
65
|
+
provider = openai
|
|
66
|
+
model = gpt-4.1
|
|
67
|
+
model_batch = gpt-4.1-mini
|
|
68
|
+
```
|
|
69
|
+
|
|
70
|
+
Usage:
|
|
71
|
+
|
|
72
|
+
```python
|
|
73
|
+
from llm_client import LLM
|
|
74
|
+
|
|
75
|
+
llm = LLM("prod", config_file_path="llmconfig.cfg")
|
|
76
|
+
```
|
|
77
|
+
|
|
78
|
+
---
|
|
79
|
+
|
|
80
|
+
### 2️⃣ Config dictionary (for tests / CI)
|
|
81
|
+
|
|
82
|
+
```python
|
|
83
|
+
from llm_client import LLM
|
|
84
|
+
|
|
85
|
+
llm = LLM(
|
|
86
|
+
"default",
|
|
87
|
+
config={
|
|
88
|
+
"default_settings": {
|
|
89
|
+
"provider": "openai",
|
|
90
|
+
"model": "gpt-4.1-mini"
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
)
|
|
94
|
+
```
|
|
95
|
+
|
|
96
|
+
---
|
|
97
|
+
|
|
98
|
+
### 3️⃣ Environment variable fallback
|
|
99
|
+
|
|
100
|
+
```bash
|
|
101
|
+
export LLM_PROVIDER=openai
|
|
102
|
+
export LLM_MODEL=gpt-4.1-mini
|
|
103
|
+
export OPENAI_API_KEY=sk-...
|
|
104
|
+
```
|
|
105
|
+
|
|
106
|
+
```python
|
|
107
|
+
llm = LLM("default")
|
|
108
|
+
```
|
|
109
|
+
|
|
110
|
+
---
|
|
111
|
+
|
|
112
|
+
## Usage
|
|
113
|
+
|
|
114
|
+
### Single async call
|
|
115
|
+
|
|
116
|
+
```python
|
|
117
|
+
import asyncio
|
|
118
|
+
from ehs_llm_client import LLM
|
|
119
|
+
|
|
120
|
+
async def main():
|
|
121
|
+
llm = LLM("prod")
|
|
122
|
+
messages = [{"role": "user", "content": "Say hello in JSON"}]
|
|
123
|
+
|
|
124
|
+
response, in_tokens, out_tokens = await llm.get_response_async(
|
|
125
|
+
messages, schema="return json"
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
print(response)
|
|
129
|
+
|
|
130
|
+
asyncio.run(main())
|
|
131
|
+
```
|
|
132
|
+
|
|
133
|
+
---
|
|
134
|
+
|
|
135
|
+
### Batch request
|
|
136
|
+
|
|
137
|
+
```python
|
|
138
|
+
batch_line = llm.create_batch_request(
|
|
139
|
+
custom_id="test1",
|
|
140
|
+
messages=[{"role": "user", "content": "Give me a JSON object"}],
|
|
141
|
+
schema="return json"
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
batch_id = await llm.run_batch_process([batch_line], submission_id="batch123")
|
|
145
|
+
status = await llm.get_batch_status(batch_id)
|
|
146
|
+
print(status)
|
|
147
|
+
```
|
|
148
|
+
|
|
149
|
+
---
|
|
150
|
+
|
|
151
|
+
## API Key Management
|
|
152
|
+
|
|
153
|
+
Keep API keys **out of code**. Recommended environment variables:
|
|
154
|
+
|
|
155
|
+
| Provider | Env var name |
|
|
156
|
+
|----------|-------------|
|
|
157
|
+
| OpenAI | `OPENAI_API_KEY` |
|
|
158
|
+
| Azure OpenAI | `AZURE_OPENAI_API_KEY` |
|
|
159
|
+
| Google Gemini | `GOOGLE_API_KEY` |
|
|
160
|
+
|
|
161
|
+
---
|
|
162
|
+
|
|
163
|
+
## Development
|
|
164
|
+
|
|
165
|
+
```bash
|
|
166
|
+
# Create a virtual environment
|
|
167
|
+
python -m venv .venv
|
|
168
|
+
source .venv/bin/activate # macOS/Linux
|
|
169
|
+
.venv\Scripts\activate # Windows
|
|
170
|
+
|
|
171
|
+
# Install dependencies
|
|
172
|
+
pip install -e .
|
|
173
|
+
```
|
|
174
|
+
|
|
175
|
+
---
|
|
176
|
+
|
|
177
|
+
## License
|
|
178
|
+
|
|
179
|
+
MIT License © 2026 Your Name
|
|
@@ -0,0 +1,179 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: ehs-llm-client
|
|
3
|
+
Version: 0.1.2
|
|
4
|
+
Summary: Unified LLM client. Currently supports Openai, Azure Openai and Google Gemini
|
|
5
|
+
Author-email: Andersen Huang <andersen.huang@ehsanalytics.com>
|
|
6
|
+
License: MIT
|
|
7
|
+
Description-Content-Type: text/markdown
|
|
8
|
+
Requires-Dist: openai
|
|
9
|
+
Requires-Dist: google-genai
|
|
10
|
+
Requires-Dist: python-dotenv
|
|
11
|
+
Requires-Dist: python-dateutil
|
|
12
|
+
|
|
13
|
+
# llm-client
|
|
14
|
+
|
|
15
|
+
[](https://pypi.org/project/llm-client/)
|
|
16
|
+
[](https://www.python.org/downloads/)
|
|
17
|
+
|
|
18
|
+
`llm-client` is a unified **async Python client** for interacting with multiple LLM providers, including **OpenAI**, **Azure OpenAI**, and **Google Gemini**.
|
|
19
|
+
It supports **single calls**, **structured JSON outputs**, and **batch processing**, designed for **production-ready, reusable code** in applications, scripts, or pipelines.
|
|
20
|
+
|
|
21
|
+
---
|
|
22
|
+
|
|
23
|
+
## Features
|
|
24
|
+
|
|
25
|
+
- Async support for multiple LLM providers
|
|
26
|
+
- Unified interface across OpenAI, Azure, and Google Gemini
|
|
27
|
+
- Structured JSON responses via schemas
|
|
28
|
+
- Retry and timeout handling
|
|
29
|
+
- Batch processing support
|
|
30
|
+
- Environment variable configuration for API keys
|
|
31
|
+
- Easy integration into existing projects or monorepos
|
|
32
|
+
|
|
33
|
+
---
|
|
34
|
+
|
|
35
|
+
## Installation
|
|
36
|
+
|
|
37
|
+
```bash
|
|
38
|
+
# Install from PyPI
|
|
39
|
+
pip install llm-client
|
|
40
|
+
```
|
|
41
|
+
|
|
42
|
+
Or install editable version during development:
|
|
43
|
+
|
|
44
|
+
```bash
|
|
45
|
+
# From project root
|
|
46
|
+
pip install -e .
|
|
47
|
+
```
|
|
48
|
+
|
|
49
|
+
---
|
|
50
|
+
|
|
51
|
+
## Configuration
|
|
52
|
+
|
|
53
|
+
`llm-client` supports **three configuration modes**:
|
|
54
|
+
|
|
55
|
+
### 1️⃣ Config file (`.cfg`)
|
|
56
|
+
|
|
57
|
+
Example: `llmconfig.cfg`
|
|
58
|
+
|
|
59
|
+
```ini
|
|
60
|
+
[default_settings]
|
|
61
|
+
provider = openai
|
|
62
|
+
model = gpt-4.1-mini
|
|
63
|
+
|
|
64
|
+
[prod]
|
|
65
|
+
provider = openai
|
|
66
|
+
model = gpt-4.1
|
|
67
|
+
model_batch = gpt-4.1-mini
|
|
68
|
+
```
|
|
69
|
+
|
|
70
|
+
Usage:
|
|
71
|
+
|
|
72
|
+
```python
|
|
73
|
+
from llm_client import LLM
|
|
74
|
+
|
|
75
|
+
llm = LLM("prod", config_file_path="llmconfig.cfg")
|
|
76
|
+
```
|
|
77
|
+
|
|
78
|
+
---
|
|
79
|
+
|
|
80
|
+
### 2️⃣ Config dictionary (for tests / CI)
|
|
81
|
+
|
|
82
|
+
```python
|
|
83
|
+
from llm_client import LLM
|
|
84
|
+
|
|
85
|
+
llm = LLM(
|
|
86
|
+
"default",
|
|
87
|
+
config={
|
|
88
|
+
"default_settings": {
|
|
89
|
+
"provider": "openai",
|
|
90
|
+
"model": "gpt-4.1-mini"
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
)
|
|
94
|
+
```
|
|
95
|
+
|
|
96
|
+
---
|
|
97
|
+
|
|
98
|
+
### 3️⃣ Environment variable fallback
|
|
99
|
+
|
|
100
|
+
```bash
|
|
101
|
+
export LLM_PROVIDER=openai
|
|
102
|
+
export LLM_MODEL=gpt-4.1-mini
|
|
103
|
+
export OPENAI_API_KEY=sk-...
|
|
104
|
+
```
|
|
105
|
+
|
|
106
|
+
```python
|
|
107
|
+
llm = LLM("default")
|
|
108
|
+
```
|
|
109
|
+
|
|
110
|
+
---
|
|
111
|
+
|
|
112
|
+
## Usage
|
|
113
|
+
|
|
114
|
+
### Single async call
|
|
115
|
+
|
|
116
|
+
```python
|
|
117
|
+
import asyncio
|
|
118
|
+
from ehs_llm_client import LLM
|
|
119
|
+
|
|
120
|
+
async def main():
|
|
121
|
+
llm = LLM("prod")
|
|
122
|
+
messages = [{"role": "user", "content": "Say hello in JSON"}]
|
|
123
|
+
|
|
124
|
+
response, in_tokens, out_tokens = await llm.get_response_async(
|
|
125
|
+
messages, schema="return json"
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
print(response)
|
|
129
|
+
|
|
130
|
+
asyncio.run(main())
|
|
131
|
+
```
|
|
132
|
+
|
|
133
|
+
---
|
|
134
|
+
|
|
135
|
+
### Batch request
|
|
136
|
+
|
|
137
|
+
```python
|
|
138
|
+
batch_line = llm.create_batch_request(
|
|
139
|
+
custom_id="test1",
|
|
140
|
+
messages=[{"role": "user", "content": "Give me a JSON object"}],
|
|
141
|
+
schema="return json"
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
batch_id = await llm.run_batch_process([batch_line], submission_id="batch123")
|
|
145
|
+
status = await llm.get_batch_status(batch_id)
|
|
146
|
+
print(status)
|
|
147
|
+
```
|
|
148
|
+
|
|
149
|
+
---
|
|
150
|
+
|
|
151
|
+
## API Key Management
|
|
152
|
+
|
|
153
|
+
Keep API keys **out of code**. Recommended environment variables:
|
|
154
|
+
|
|
155
|
+
| Provider | Env var name |
|
|
156
|
+
|----------|-------------|
|
|
157
|
+
| OpenAI | `OPENAI_API_KEY` |
|
|
158
|
+
| Azure OpenAI | `AZURE_OPENAI_API_KEY` |
|
|
159
|
+
| Google Gemini | `GOOGLE_API_KEY` |
|
|
160
|
+
|
|
161
|
+
---
|
|
162
|
+
|
|
163
|
+
## Development
|
|
164
|
+
|
|
165
|
+
```bash
|
|
166
|
+
# Create a virtual environment
|
|
167
|
+
python -m venv .venv
|
|
168
|
+
source .venv/bin/activate # macOS/Linux
|
|
169
|
+
.venv\Scripts\activate # Windows
|
|
170
|
+
|
|
171
|
+
# Install dependencies
|
|
172
|
+
pip install -e .
|
|
173
|
+
```
|
|
174
|
+
|
|
175
|
+
---
|
|
176
|
+
|
|
177
|
+
## License
|
|
178
|
+
|
|
179
|
+
MIT License © 2026 Your Name
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[project]
|
|
2
2
|
name = "ehs-llm-client"
|
|
3
|
-
version = "0.1.
|
|
3
|
+
version = "0.1.2"
|
|
4
4
|
description = "Unified LLM client. Currently supports Openai, Azure Openai and Google Gemini"
|
|
5
5
|
dependencies = [
|
|
6
6
|
"openai",
|
|
@@ -8,7 +8,11 @@ dependencies = [
|
|
|
8
8
|
"python-dotenv",
|
|
9
9
|
"python-dateutil"
|
|
10
10
|
]
|
|
11
|
-
|
|
11
|
+
readme = { file = "README.md", content-type = "text/markdown" }
|
|
12
|
+
license = { text = "MIT" }
|
|
13
|
+
authors = [
|
|
14
|
+
{ name="Andersen Huang", email="andersen.huang@ehsanalytics.com" }
|
|
15
|
+
]
|
|
12
16
|
[tool.setuptools.packages.find]
|
|
13
17
|
where = ["."]
|
|
14
18
|
include = ["ehs_llm_client*"]
|
ehs_llm_client-0.1.0/PKG-INFO
DELETED
|
@@ -1,8 +0,0 @@
|
|
|
1
|
-
Metadata-Version: 2.4
|
|
2
|
-
Name: ehs-llm-client
|
|
3
|
-
Version: 0.1.0
|
|
4
|
-
Summary: Unified LLM client. Currently supports Openai, Azure Openai and Google Gemini
|
|
5
|
-
Requires-Dist: openai
|
|
6
|
-
Requires-Dist: google-genai
|
|
7
|
-
Requires-Dist: python-dotenv
|
|
8
|
-
Requires-Dist: python-dateutil
|
|
@@ -1,8 +0,0 @@
|
|
|
1
|
-
Metadata-Version: 2.4
|
|
2
|
-
Name: ehs-llm-client
|
|
3
|
-
Version: 0.1.0
|
|
4
|
-
Summary: Unified LLM client. Currently supports Openai, Azure Openai and Google Gemini
|
|
5
|
-
Requires-Dist: openai
|
|
6
|
-
Requires-Dist: google-genai
|
|
7
|
-
Requires-Dist: python-dotenv
|
|
8
|
-
Requires-Dist: python-dateutil
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|