generic-llm-api-client 0.1.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. generic_llm_api_client-0.1.1/LICENSE +21 -0
  2. generic_llm_api_client-0.1.1/PKG-INFO +477 -0
  3. generic_llm_api_client-0.1.1/README.md +427 -0
  4. generic_llm_api_client-0.1.1/ai_client/__init__.py +66 -0
  5. generic_llm_api_client-0.1.1/ai_client/base_client.py +353 -0
  6. generic_llm_api_client-0.1.1/ai_client/claude_client.py +283 -0
  7. generic_llm_api_client-0.1.1/ai_client/deepseek_client.py +28 -0
  8. generic_llm_api_client-0.1.1/ai_client/gemini_client.py +269 -0
  9. generic_llm_api_client-0.1.1/ai_client/mistral_client.py +191 -0
  10. generic_llm_api_client-0.1.1/ai_client/openai_client.py +299 -0
  11. generic_llm_api_client-0.1.1/ai_client/qwen_client.py +28 -0
  12. generic_llm_api_client-0.1.1/ai_client/response.py +89 -0
  13. generic_llm_api_client-0.1.1/ai_client/utils.py +151 -0
  14. generic_llm_api_client-0.1.1/generic_llm_api_client.egg-info/PKG-INFO +477 -0
  15. generic_llm_api_client-0.1.1/generic_llm_api_client.egg-info/SOURCES.txt +28 -0
  16. generic_llm_api_client-0.1.1/generic_llm_api_client.egg-info/dependency_links.txt +1 -0
  17. generic_llm_api_client-0.1.1/generic_llm_api_client.egg-info/requires.txt +24 -0
  18. generic_llm_api_client-0.1.1/generic_llm_api_client.egg-info/top_level.txt +1 -0
  19. generic_llm_api_client-0.1.1/pyproject.toml +106 -0
  20. generic_llm_api_client-0.1.1/setup.cfg +4 -0
  21. generic_llm_api_client-0.1.1/tests/test_async.py +130 -0
  22. generic_llm_api_client-0.1.1/tests/test_base_client.py +180 -0
  23. generic_llm_api_client-0.1.1/tests/test_claude_client.py +133 -0
  24. generic_llm_api_client-0.1.1/tests/test_integration_basic.py +363 -0
  25. generic_llm_api_client-0.1.1/tests/test_integration_multimodal.py +306 -0
  26. generic_llm_api_client-0.1.1/tests/test_integration_structured.py +368 -0
  27. generic_llm_api_client-0.1.1/tests/test_openai_client.py +171 -0
  28. generic_llm_api_client-0.1.1/tests/test_other_clients.py +160 -0
  29. generic_llm_api_client-0.1.1/tests/test_response.py +151 -0
  30. generic_llm_api_client-0.1.1/tests/test_utils.py +193 -0
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Sorin
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,477 @@
1
+ Metadata-Version: 2.4
2
+ Name: generic-llm-api-client
3
+ Version: 0.1.1
4
+ Summary: A unified, provider-agnostic Python client for multiple LLM APIs
5
+ Author-email: Sorin Marti <sorin.marti@gmail.com>
6
+ Maintainer-email: Sorin Marti <sorin.marti@gmail.com>
7
+ License: MIT
8
+ Project-URL: Homepage, https://github.com/RISE-UNIBAS/generic_llm_api_client
9
+ Project-URL: Documentation, https://github.com/RISE-UNIBAS/generic_llm_api_client#readme
10
+ Project-URL: Repository, https://github.com/RISE-UNIBAS/generic_llm_api_client
11
+ Project-URL: Issues, https://github.com/RISE-UNIBAS/generic_llm_api_client/issues
12
+ Keywords: llm,ai,openai,anthropic,claude,gemini,mistral,deepseek,qwen,api-client,humanities,research,benchmarking
13
+ Classifier: Development Status :: 3 - Alpha
14
+ Classifier: Intended Audience :: Developers
15
+ Classifier: Intended Audience :: Science/Research
16
+ Classifier: License :: OSI Approved :: MIT License
17
+ Classifier: Programming Language :: Python :: 3
18
+ Classifier: Programming Language :: Python :: 3.9
19
+ Classifier: Programming Language :: Python :: 3.10
20
+ Classifier: Programming Language :: Python :: 3.11
21
+ Classifier: Programming Language :: Python :: 3.12
22
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
23
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
24
+ Requires-Python: >=3.9
25
+ Description-Content-Type: text/markdown
26
+ License-File: LICENSE
27
+ Requires-Dist: anthropic~=0.71.0
28
+ Requires-Dist: openai~=2.6.1
29
+ Requires-Dist: mistralai~=1.9.11
30
+ Requires-Dist: google-genai~=1.46.0
31
+ Requires-Dist: requests~=2.32.5
32
+ Provides-Extra: dev
33
+ Requires-Dist: pytest>=7.0.0; extra == "dev"
34
+ Requires-Dist: pytest-asyncio>=0.21.0; extra == "dev"
35
+ Requires-Dist: pytest-cov>=4.0.0; extra == "dev"
36
+ Requires-Dist: pytest-mock>=3.10.0; extra == "dev"
37
+ Requires-Dist: python-dotenv>=1.0.0; extra == "dev"
38
+ Requires-Dist: Pillow>=10.0.0; extra == "dev"
39
+ Requires-Dist: black>=23.0.0; extra == "dev"
40
+ Requires-Dist: mypy>=1.0.0; extra == "dev"
41
+ Requires-Dist: ruff>=0.1.0; extra == "dev"
42
+ Provides-Extra: test
43
+ Requires-Dist: pytest>=7.0.0; extra == "test"
44
+ Requires-Dist: pytest-asyncio>=0.21.0; extra == "test"
45
+ Requires-Dist: pytest-cov>=4.0.0; extra == "test"
46
+ Requires-Dist: pytest-mock>=3.10.0; extra == "test"
47
+ Requires-Dist: python-dotenv>=1.0.0; extra == "test"
48
+ Requires-Dist: Pillow>=10.0.0; extra == "test"
49
+ Dynamic: license-file
50
+
51
+ # Generic LLM API Client
52
+
53
+ A unified, provider-agnostic Python client for multiple LLM APIs. Query any LLM (OpenAI, Anthropic Claude, Google Gemini, Mistral, DeepSeek, Qwen, OpenRouter, and more) through a single, consistent interface.
54
+
55
+ **Perfect for**: Research workflows, benchmarking studies, automated testing, and applications that need to work with multiple LLM providers without dealing with their individual APIs.
56
+
57
+ ## Important Note
58
+
59
+ This package is a **convenience wrapper** for working with multiple LLM providers through a unified interface. It is **not intended as a replacement** for the official provider libraries (openai, anthropic, google-genai, etc.).
60
+
61
+ ### Use this package when:
62
+ - You need to query multiple LLM providers in the same project
63
+ - You're building benchmarking or comparison tools
64
+ - You want a consistent interface across providers
65
+ - You need provider-agnostic code for research workflows
66
+
67
+ ### Use the official libraries when:
68
+ - You need cutting-edge features on day one of release
69
+ - You require provider-specific advanced features
70
+ - You only work with a single provider
71
+
72
+ **Update pace:** This package is maintained by a small team and may not immediately support every new feature from upstream providers. We prioritize stability and cross-provider compatibility over bleeding-edge feature coverage.
73
+
74
+ ## Features
75
+
76
+ - **Provider-Agnostic**: Single interface for OpenAI, Anthropic, Google, Mistral, DeepSeek, Qwen, and OpenRouter
77
+ - **Multimodal Support**: Text + images across all supporting providers
78
+ - **Structured Output**: Unified Pydantic model support across providers
79
+ - **Rich Response Objects**: Detailed token usage, costs, timing, and metadata
80
+ - **Async Support**: Parallel processing for faster benchmarks
81
+ - **Built-in Retry Logic**: Automatic exponential backoff for rate limits
82
+ - **Custom Base URLs**: Easy integration with OpenRouter, sciCORE, and other OpenAI-compatible APIs
83
+
84
+ ## Installation
85
+
86
+ ```bash
87
+ pip install generic-llm-api-client
88
+ ```
89
+
90
+ ## Quick Start
91
+
92
+ ```python
93
+ from ai_client import create_ai_client
94
+
95
+ # Create a client for any provider
96
+ client = create_ai_client('openai', api_key='sk-...')
97
+
98
+ # Send a prompt
99
+ response, duration = client.prompt('gpt-4', 'What is 2+2?')
100
+
101
+ print(f"Response: {response.text}")
102
+ print(f"Tokens used: {response.usage.total_tokens}")
103
+ print(f"Time: {duration:.2f}s")
104
+ ```
105
+
106
+ ## Supported Providers
107
+
108
+ | Provider | ID | Multimodal | Structured Output |
109
+ |----------|-----|-----------|-------------------|
110
+ | OpenAI | `openai` | Yes | Yes |
111
+ | Anthropic Claude | `anthropic` | Yes | Yes (via tools) |
112
+ | Google Gemini | `genai` | Yes | Yes |
113
+ | Mistral | `mistral` | Yes | Yes |
114
+ | DeepSeek | `deepseek` | Yes | Yes |
115
+ | Qwen | `qwen` | Yes | Yes |
116
+ | OpenRouter | `openrouter` | Yes | Yes |
117
+ | sciCORE | `scicore` | Yes | Yes |
118
+
119
+ ## Usage Examples
120
+
121
+ ### Basic Text Prompt
122
+
123
+ ```python
124
+ from ai_client import create_ai_client
125
+
126
+ client = create_ai_client('anthropic', api_key='sk-ant-...')
127
+ response, duration = client.prompt(
128
+ 'claude-3-5-sonnet-20241022',
129
+ 'Explain quantum computing in simple terms'
130
+ )
131
+
132
+ print(response.text)
133
+ ```
134
+
135
+ ### Multimodal (Text + Images)
136
+
137
+ ```python
138
+ from ai_client import create_ai_client
139
+
140
+ client = create_ai_client('openai', api_key='sk-...')
141
+
142
+ response, duration = client.prompt(
143
+ 'gpt-4o',
144
+ 'Describe this image in detail',
145
+ images=['path/to/image.jpg']
146
+ )
147
+
148
+ print(response.text)
149
+ ```
150
+
151
+ ### Multiple Images
152
+
153
+ ```python
154
+ response, duration = client.prompt(
155
+ 'gpt-4o',
156
+ 'Compare these two images',
157
+ images=['image1.jpg', 'image2.jpg']
158
+ )
159
+ ```
160
+
161
+ ### Structured Output with Pydantic
162
+
163
+ ```python
164
+ from pydantic import BaseModel
165
+ from ai_client import create_ai_client
166
+
167
+ class Person(BaseModel):
168
+ name: str
169
+ age: int
170
+ occupation: str
171
+
172
+ client = create_ai_client('openai', api_key='sk-...')
173
+
174
+ response, duration = client.prompt(
175
+ 'gpt-4',
176
+ 'Extract: John Smith is a 35-year-old software engineer',
177
+ response_format=Person
178
+ )
179
+
180
+ # Parse the response
181
+ import json
182
+ person_data = json.loads(response.text)
183
+ person = Person(**person_data)
184
+
185
+ print(f"{person.name}, {person.age}, {person.occupation}")
186
+ ```
187
+
188
+ ### Async for Parallel Processing
189
+
190
+ ```python
191
+ import asyncio
192
+ from ai_client import create_ai_client
193
+
194
+ async def process_batch():
195
+ client = create_ai_client('openai', api_key='sk-...')
196
+
197
+ # Process multiple prompts in parallel
198
+ tasks = [
199
+ client.prompt_async('gpt-4', f'Tell me about {topic}')
200
+ for topic in ['Python', 'JavaScript', 'Rust']
201
+ ]
202
+
203
+ results = await asyncio.gather(*tasks)
204
+
205
+ for response, duration in results:
206
+ print(f"({duration:.2f}s) {response.text[:100]}...")
207
+
208
+ asyncio.run(process_batch())
209
+ ```
210
+
211
+ ### Custom Base URLs (OpenRouter, sciCORE)
212
+
213
+ ```python
214
+ from ai_client import create_ai_client
215
+
216
+ # OpenRouter - access to 100+ models
217
+ client = create_ai_client(
218
+ 'openrouter',
219
+ api_key='sk-or-...',
220
+ base_url='https://openrouter.ai/api/v1',
221
+ default_headers={
222
+ "HTTP-Referer": "https://your-site.com",
223
+ "X-Title": "Your App"
224
+ }
225
+ )
226
+
227
+ response, _ = client.prompt('anthropic/claude-3-opus', 'Hello!')
228
+
229
+ # sciCORE (University HPC)
230
+ client = create_ai_client(
231
+ 'scicore',
232
+ api_key='your-key',
233
+ base_url='https://llm-api-h200.ceda.unibas.ch/litellm/v1'
234
+ )
235
+
236
+ response, _ = client.prompt('deepseek/deepseek-chat', 'Hello!')
237
+ ```
238
+
239
+ ### Accessing Response Metadata
240
+
241
+ ```python
242
+ response, duration = client.prompt('gpt-4', 'Hello')
243
+
244
+ # Response text
245
+ print(response.text)
246
+
247
+ # Token usage
248
+ print(f"Input tokens: {response.usage.input_tokens}")
249
+ print(f"Output tokens: {response.usage.output_tokens}")
250
+ print(f"Total tokens: {response.usage.total_tokens}")
251
+
252
+ # Metadata
253
+ print(f"Model: {response.model}")
254
+ print(f"Provider: {response.provider}")
255
+ print(f"Finish reason: {response.finish_reason}")
256
+ print(f"Duration: {response.duration}s")
257
+
258
+ # Raw provider response (for detailed analysis)
259
+ raw = response.raw_response
260
+
261
+ # Convert to dict (for JSON serialization)
262
+ response_dict = response.to_dict()
263
+ ```
264
+
265
+ ## Configuration
266
+
267
+ ### Provider-Specific Settings
268
+
269
+ ```python
270
+ from ai_client import create_ai_client
271
+
272
+ # OpenAI
273
+ client = create_ai_client(
274
+ 'openai',
275
+ api_key='sk-...',
276
+ temperature=0.7,
277
+ max_tokens=500,
278
+ frequency_penalty=0.5
279
+ )
280
+
281
+ # Claude
282
+ client = create_ai_client(
283
+ 'anthropic',
284
+ api_key='sk-ant-...',
285
+ temperature=1.0,
286
+ max_tokens=4096,
287
+ top_k=40
288
+ )
289
+
290
+ # Settings can also be passed per-request
291
+ response, _ = client.prompt(
292
+ 'gpt-4',
293
+ 'Hello',
294
+ temperature=0.9,
295
+ max_tokens=100
296
+ )
297
+ ```
298
+
299
+ ### Custom System Prompts
300
+
301
+ ```python
302
+ from ai_client import create_ai_client
303
+
304
+ client = create_ai_client(
305
+ 'openai',
306
+ api_key='sk-...',
307
+ system_prompt="You are a helpful coding assistant specialized in Python."
308
+ )
309
+
310
+ # Override for specific request
311
+ response, _ = client.prompt(
312
+ 'gpt-4',
313
+ 'Write a haiku',
314
+ system_prompt="You are a poetic assistant."
315
+ )
316
+ ```
317
+
318
+ ## Use Case: Benchmarking
319
+
320
+ Perfect for research workflows that need to evaluate multiple models:
321
+
322
+ ```python
323
+ from ai_client import create_ai_client
324
+ import asyncio
325
+
326
+ async def benchmark_models():
327
+ providers = [
328
+ ('openai', 'gpt-4'),
329
+ ('anthropic', 'claude-3-5-sonnet-20241022'),
330
+ ('genai', 'gemini-2.0-flash-exp'),
331
+ ]
332
+
333
+ prompt = 'Explain quantum entanglement'
334
+
335
+ for provider_id, model in providers:
336
+ client = create_ai_client(provider_id, api_key=f'{provider_id}_key')
337
+
338
+ response, duration = await client.prompt_async(model, prompt)
339
+
340
+ print(f"\n=== {provider_id}/{model} ===")
341
+ print(f"Duration: {duration:.2f}s")
342
+ print(f"Tokens: {response.usage.total_tokens}")
343
+ print(f"Response: {response.text[:200]}...")
344
+
345
+ asyncio.run(benchmark_models())
346
+ ```
347
+
348
+ ## Error Handling
349
+
350
+ The package includes built-in retry logic with exponential backoff:
351
+
352
+ ```python
353
+ from ai_client import create_ai_client, RateLimitError, APIError
354
+
355
+ client = create_ai_client('openai', api_key='sk-...')
356
+
357
+ try:
358
+ response, duration = client.prompt('gpt-4', 'Hello')
359
+ # Automatically retries up to 3 times on rate limit errors
360
+ except RateLimitError as e:
361
+ print(f"Rate limited after retries: {e}")
362
+ except APIError as e:
363
+ print(f"API error: {e}")
364
+ except Exception as e:
365
+ print(f"Unknown error: {e}")
366
+ ```
367
+
368
+ ## Advanced Features
369
+
370
+ ### Get Available Models
371
+
372
+ ```python
373
+ from ai_client import create_ai_client
374
+
375
+ client = create_ai_client('openai', api_key='sk-...')
376
+ models = client.get_model_list()
377
+
378
+ for model_id, created_date in models:
379
+ print(f"{model_id} (created: {created_date})")
380
+ ```
381
+
382
+ ### Check Multimodal Support
383
+
384
+ ```python
385
+ client = create_ai_client('openai', api_key='sk-...')
386
+
387
+ if client.has_multimodal_support():
388
+ print("This provider supports images!")
389
+ ```
390
+
391
+ ## Package Structure
392
+
393
+ ```
394
+ ai_client/
395
+ __init__.py # Package exports
396
+ base_client.py # BaseAIClient + factory
397
+ response.py # LLMResponse, Usage dataclasses
398
+ utils.py # Retry logic, exceptions, utilities
399
+ openai_client.py # OpenAI implementation
400
+ claude_client.py # Anthropic Claude
401
+ gemini_client.py # Google Gemini
402
+ mistral_client.py # Mistral AI
403
+ deepseek_client.py # DeepSeek
404
+ qwen_client.py # Qwen
405
+ ```
406
+
407
+ ## Requirements
408
+
409
+ - Python >=3.9
410
+ - anthropic ~=0.71.0
411
+ - openai ~=2.6.1
412
+ - mistralai ~=1.9.11
413
+ - google-genai ~=1.46.0
414
+ - requests ~=2.32.5
415
+
416
+ ## Development
417
+
418
+ ```bash
419
+ # Clone the repository
420
+ git clone https://github.com/RISE-UNIBAS/generic-llm-api-client.git
421
+ cd generic-llm-api-client
422
+
423
+ # Install in development mode
424
+ pip install -e ".[dev]"
425
+
426
+ # Run tests
427
+ pytest
428
+
429
+ # Run integration tests (requires API keys)
430
+ pytest -m integration
431
+
432
+ # Format code
433
+ black ai_client tests
434
+
435
+ # Type checking
436
+ mypy ai_client/
437
+ ```
438
+
439
+ ## Documentation
440
+
441
+ - **[EXAMPLES.md](EXAMPLES.md)** - Comprehensive usage examples
442
+ - **[PUBLISHING.md](PUBLISHING.md)** - Guide for maintainers on publishing releases
443
+
444
+ ## Contributing
445
+
446
+ Contributions are welcome! Please feel free to submit a Pull Request.
447
+
448
+ ## License
449
+
450
+ This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
451
+
452
+ ## Citation
453
+
454
+ If you use this package in your research, please cite:
455
+
456
+ ```bibtex
457
+ @software{generic_llm_api_client,
458
+ author = {Sorin Marti},
459
+ title = {Generic LLM API Client: A Unified Interface for Multiple LLM Providers},
460
+ year = {2025},
461
+ url = {https://github.com/RISE-UNIBAS/generic-llm-api-client}
462
+ }
463
+ ```
464
+
465
+ ## Support
466
+
467
+ - GitHub Issues: [Report bugs or request features](https://github.com/RISE-UNIBAS/generic-llm-api-client/issues)
468
+ - Documentation: [Full documentation](https://github.com/RISE-UNIBAS/generic-llm-api-client#readme)
469
+
470
+ ## Roadmap
471
+
472
+ - [ ] Tool use / function calling support
473
+ - [ ] Streaming support
474
+ - [ ] Conversation history management
475
+ - [ ] More providers (Cohere, AI21, etc.)
476
+ - [ ] Cost estimation utilities
477
+ - [ ] Prompt caching support