donkit-llm 0.1.1__py3-none-any.whl → 0.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,442 @@
1
+ #!/usr/bin/env python3
2
+ """Test script for Ollama LLM provider integration."""
3
+
4
+ import asyncio
5
+ import json
6
+
7
+ import pytest
8
+
9
+ from llm_gate.models import ModelCapability
10
+ from .factory import ModelFactory
11
+ from .model_abstract import (
12
+ ContentPart,
13
+ ContentType,
14
+ EmbeddingRequest,
15
+ GenerateRequest,
16
+ Message,
17
+ )
18
+ from .openai_model import OpenAIModel
19
+
20
+
21
+ def test_ollama_model_creation() -> None:
22
+ """Test that Ollama model initializes correctly."""
23
+ print("=" * 60)
24
+ print("TEST 1: Ollama Model Creation via Factory")
25
+ print("=" * 60)
26
+
27
+ credentials = {
28
+ "api_key": "ollama",
29
+ "base_url": "http://localhost:11434/v1",
30
+ }
31
+
32
+ try:
33
+ model = ModelFactory.create_model(
34
+ provider="ollama",
35
+ model_name="gpt-oss:20b-cloud",
36
+ credentials=credentials,
37
+ )
38
+ print(f"✅ Model created: {model.__class__.__name__}")
39
+ print(f" Model name: {model.model_name}")
40
+ print(f" Is OpenAI model: {isinstance(model, OpenAIModel)}")
41
+ except Exception as e:
42
+ print(f"❌ Failed to create model: {e}")
43
+ raise
44
+
45
+
46
+ def test_openai_model_direct_creation() -> None:
47
+ """Test direct OpenAI model creation for Ollama."""
48
+ print("\n" + "=" * 60)
49
+ print("TEST 2: Direct OpenAI Model Creation for Ollama")
50
+ print("=" * 60)
51
+
52
+ try:
53
+ model = ModelFactory.create_openai_model(
54
+ model_name="gpt-oss:20b-cloud",
55
+ api_key="ollama",
56
+ base_url="http://localhost:11434/v1",
57
+ )
58
+ print(f"✅ Model created: {model.__class__.__name__}")
59
+ print(f" Model name: {model.model_name}")
60
+ print(f" Has client: {hasattr(model, 'client')}")
61
+ print(f" Client type: {type(model.client).__name__}")
62
+ except Exception as e:
63
+ print(f"❌ Failed to create model: {e}")
64
+ raise
65
+
66
+
67
+ def test_embedding_model_creation() -> None:
68
+ """Test embedding model creation for Ollama."""
69
+ print("\n" + "=" * 60)
70
+ print("TEST 3: Embedding Model Creation")
71
+ print("=" * 60)
72
+
73
+ try:
74
+ # OpenAI embedding model with Ollama base URL
75
+ model = ModelFactory.create_embedding_model(
76
+ provider="openai",
77
+ model_name="nomic-embed-text",
78
+ api_key="ollama",
79
+ base_url="http://localhost:11434/v1",
80
+ )
81
+ print(f"✅ Embedding model created: {model.__class__.__name__}")
82
+ print(f" Model name: {model.model_name}")
83
+ except Exception as e:
84
+ print(f"❌ Failed to create embedding model: {e}")
85
+ raise
86
+
87
+
88
+ def test_model_configuration() -> None:
89
+ """Test model configuration."""
90
+ print("\n" + "=" * 60)
91
+ print("TEST 4: Model Configuration")
92
+ print("=" * 60)
93
+
94
+ try:
95
+ model = ModelFactory.create_openai_model(
96
+ model_name="gpt-oss:20b-cloud",
97
+ api_key="ollama",
98
+ base_url="http://localhost:11434/v1",
99
+ )
100
+
101
+ config = {
102
+ "model_name": model.model_name,
103
+ "model_type": model.__class__.__name__,
104
+ "has_client": hasattr(model, "client"),
105
+ "capabilities": str(model.capabilities),
106
+ }
107
+
108
+ print("✅ Model configuration:")
109
+ print(json.dumps(config, indent=2, default=str))
110
+ except Exception as e:
111
+ print(f"❌ Failed to get model configuration: {e}")
112
+ raise
113
+
114
+
115
+ def test_factory_provider_support() -> None:
116
+ """Test that factory supports ollama provider."""
117
+ print("\n" + "=" * 60)
118
+ print("TEST 5: Factory Provider Support")
119
+ print("=" * 60)
120
+
121
+ credentials = {
122
+ "api_key": "ollama",
123
+ "base_url": "http://localhost:11434/v1",
124
+ }
125
+
126
+ try:
127
+ # Test all supported providers
128
+ providers = ["openai", "ollama"]
129
+ for provider in providers:
130
+ model = ModelFactory.create_model(
131
+ provider=provider,
132
+ model_name="test-model",
133
+ credentials=credentials,
134
+ )
135
+ print(f"✅ Provider '{provider}' supported: {model.__class__.__name__}")
136
+ except Exception as e:
137
+ print(f"❌ Provider support test failed: {e}")
138
+ raise
139
+
140
+
141
+ @pytest.mark.asyncio
142
+ async def test_text_generation() -> None:
143
+ """Test text generation with Ollama."""
144
+ print("\n" + "=" * 60)
145
+ print("TEST 6: Text Generation Request")
146
+ print("=" * 60)
147
+
148
+ credentials = {
149
+ "api_key": "ollama",
150
+ "base_url": "http://localhost:11434/v1",
151
+ }
152
+
153
+ try:
154
+ model = ModelFactory.create_model(
155
+ provider="ollama",
156
+ model_name="gpt-oss:20b-cloud",
157
+ credentials=credentials,
158
+ )
159
+
160
+ request = GenerateRequest(
161
+ messages=[
162
+ Message(
163
+ role="user", content="Say 'Hello from Ollama!' in one sentence."
164
+ )
165
+ ],
166
+ temperature=0.7,
167
+ max_tokens=100,
168
+ )
169
+
170
+ print("📤 Sending request to Ollama...")
171
+ print(" Model: gpt-oss:20b-cloud")
172
+ print(f" Message: {request.messages[0].content}")
173
+
174
+ response = await model.generate(request)
175
+
176
+ print("✅ Generation successful")
177
+ print(f" Response: {response.content}")
178
+
179
+ except Exception as e:
180
+ print(f"❌ Generation failed: {e}")
181
+ raise
182
+
183
+
184
+ @pytest.mark.asyncio
185
+ async def test_streaming_generation() -> None:
186
+ """Test streaming text generation with Ollama."""
187
+ print("\n" + "=" * 60)
188
+ print("TEST 7: Streaming Text Generation")
189
+ print("=" * 60)
190
+
191
+ credentials = {
192
+ "api_key": "ollama",
193
+ "base_url": "http://localhost:11434/v1",
194
+ }
195
+
196
+ try:
197
+ model = ModelFactory.create_model(
198
+ provider="ollama",
199
+ model_name="gpt-oss:20b-cloud",
200
+ credentials=credentials,
201
+ )
202
+
203
+ request = GenerateRequest(
204
+ messages=[
205
+ Message(role="user", content="Count from 1 to 5, one number per line.")
206
+ ],
207
+ temperature=0.5,
208
+ max_tokens=100,
209
+ )
210
+
211
+ print("📤 Sending streaming request to Ollama...")
212
+ print(" Model: gpt-oss:20b-cloud")
213
+ print(f" Message: {request.messages[0].content}")
214
+ print("\n Response stream:")
215
+
216
+ full_content = ""
217
+ async for chunk in model.generate_stream(request):
218
+ if chunk.content:
219
+ print(f" {chunk.content}", end="", flush=True)
220
+ full_content += chunk.content
221
+
222
+ print("\n\n✅ Streaming complete")
223
+ print(f" Total content length: {len(full_content)} characters")
224
+
225
+ except Exception as e:
226
+ print(f"❌ Streaming failed: {e}")
227
+ raise
228
+
229
+
230
+ @pytest.mark.asyncio
231
+ async def test_embedding_generation() -> None:
232
+ """Test embedding generation with Ollama."""
233
+ print("\n" + "=" * 60)
234
+ print("TEST 8: Embedding Generation")
235
+ print("=" * 60)
236
+ try:
237
+ model = ModelFactory.create_embedding_model(
238
+ provider="openai",
239
+ model_name="embeddinggemma",
240
+ api_key="ollama",
241
+ base_url="http://localhost:11434/v1",
242
+ )
243
+
244
+ texts = [
245
+ "Hello from Ollama!",
246
+ "This is a test embedding.",
247
+ "Embedding models are useful for semantic search.",
248
+ ]
249
+
250
+ request = EmbeddingRequest(input=texts)
251
+
252
+ print("📤 Sending embedding request to Ollama...")
253
+ print(" Model: nomic-embed-text")
254
+ print(f" Texts to embed: {len(texts)}")
255
+ for i, text in enumerate(texts, 1):
256
+ print(f" {i}. {text}")
257
+
258
+ response = await model.embed(request)
259
+
260
+ print("✅ Embedding successful")
261
+ print(f" Number of embeddings: {len(response.embeddings)}")
262
+ print(f" Embedding dimension: {len(response.embeddings[0])}")
263
+ print(f" First embedding (first 5 values): {response.embeddings[0][:5]}")
264
+
265
+ except Exception as e:
266
+ print(f"❌ Embedding failed: {e}")
267
+ raise
268
+
269
+
270
+ @pytest.mark.asyncio
271
+ async def test_multimodal_vision() -> None:
272
+ """Test multimodal vision capabilities with Ollama."""
273
+ print("\n" + "=" * 60)
274
+ print("TEST 9: Multimodal Vision (Image Understanding)")
275
+ print("=" * 60)
276
+
277
+ credentials = {
278
+ "api_key": "ollama",
279
+ "base_url": "http://localhost:11434/v1",
280
+ }
281
+
282
+ try:
283
+ model = ModelFactory.create_model(
284
+ provider="ollama",
285
+ model_name="qwen3-vl:235b-cloud",
286
+ credentials=credentials,
287
+ )
288
+
289
+ # Check if model supports vision
290
+ print("📋 Model capabilities:")
291
+ print(f" Vision support: {model.supports_capability(ModelCapability.VISION)}")
292
+ print(
293
+ f" Multimodal input: {model.supports_capability(ModelCapability.MULTIMODAL_INPUT)}"
294
+ )
295
+
296
+ # Create a message with image URL (using a public test image)
297
+ image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/3/3a/Cat03.jpg/1200px-Cat03.jpg"
298
+
299
+ content_parts = [
300
+ ContentPart(
301
+ content_type=ContentType.TEXT,
302
+ content="What do you see in this image? Describe it briefly.",
303
+ ),
304
+ ContentPart(
305
+ content_type=ContentType.IMAGE_URL,
306
+ content=image_url,
307
+ mime_type="image/jpeg",
308
+ ),
309
+ ]
310
+
311
+ message = Message(role="user", content=content_parts)
312
+
313
+ request = GenerateRequest(
314
+ messages=[message],
315
+ temperature=0.7,
316
+ max_tokens=200,
317
+ )
318
+
319
+ print("\n📤 Sending multimodal request to Ollama...")
320
+ print(" Model: gpt-oss:20b-cloud")
321
+ print(f" Message parts: {len(content_parts)}")
322
+ print(" - Text: What do you see in this image?")
323
+ print(f" - Image: {image_url}")
324
+
325
+ response = await model.generate(request)
326
+
327
+ print("\n✅ Vision analysis successful")
328
+ print(f" Response: {response.content}")
329
+
330
+ except Exception as e:
331
+ print(f"❌ Vision test failed: {e}")
332
+ raise
333
+
334
+
335
+ @pytest.mark.asyncio
336
+ async def test_multimodal_with_base64() -> None:
337
+ """Test multimodal with base64 encoded image."""
338
+ print("\n" + "=" * 60)
339
+ print("TEST 10: Multimodal with Base64 Image")
340
+ print("=" * 60)
341
+
342
+ credentials = {
343
+ "api_key": "ollama",
344
+ "base_url": "http://localhost:11434/v1",
345
+ }
346
+
347
+ try:
348
+ model = ModelFactory.create_model(
349
+ provider="ollama",
350
+ model_name="qwen3-vl:235b-cloud",
351
+ credentials=credentials,
352
+ )
353
+
354
+ # For this test, we'll use a simple base64 encoded 1x1 pixel image
355
+ # In real usage, you would encode an actual image
356
+ base64_image = "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg=="
357
+
358
+ content_parts = [
359
+ ContentPart(
360
+ content_type=ContentType.TEXT,
361
+ content="Analyze this image and tell me what you see.",
362
+ ),
363
+ ContentPart(
364
+ content_type=ContentType.IMAGE_BASE64,
365
+ content=base64_image,
366
+ mime_type="image/png",
367
+ ),
368
+ ]
369
+
370
+ message = Message(role="user", content=content_parts)
371
+
372
+ request = GenerateRequest(
373
+ messages=[message],
374
+ temperature=0.5,
375
+ max_tokens=100,
376
+ )
377
+
378
+ print("📤 Sending base64 image request to Ollama...")
379
+ print(" Model: gpt-oss:20b-cloud")
380
+ print(f" Message parts: {len(content_parts)}")
381
+ print(" - Text: Analyze this image")
382
+ print(f" - Image (base64): {len(base64_image)} characters")
383
+
384
+ response = await model.generate(request)
385
+
386
+ print("✅ Base64 image analysis successful")
387
+ print(f" Response: {response.content}")
388
+
389
+ except Exception as e:
390
+ print(f"❌ Base64 image test failed: {e}")
391
+ raise
392
+
393
+
394
+ async def run_async_tests() -> None:
395
+ """Run async tests."""
396
+ try:
397
+ # await test_text_generation()
398
+ # await test_streaming_generation()
399
+ # await test_embedding_generation()
400
+ await test_multimodal_vision()
401
+ await test_multimodal_with_base64()
402
+ except Exception as e:
403
+ print(f"\n❌ Async tests failed: {e}")
404
+ raise
405
+
406
+
407
+ def main() -> None:
408
+ """Run all tests."""
409
+ print("\n")
410
+ print("╔" + "=" * 58 + "╗")
411
+ print("║" + " " * 58 + "║")
412
+ print("║" + " OLLAMA LLM PROVIDER INTEGRATION TESTS".center(58) + "║")
413
+ print("║" + " " * 58 + "║")
414
+ print("╚" + "=" * 58 + "╝")
415
+
416
+ try:
417
+ # Sync tests
418
+ # test_ollama_model_creation()
419
+ # test_model_configuration()
420
+ # test_embedding_model_creation()
421
+ # test_factory_provider_support()
422
+ # test_openai_model_direct_creation()
423
+
424
+ # Async tests (actual API calls)
425
+ print("\n" + "=" * 60)
426
+ print("Running async tests (actual API calls)...")
427
+ print("=" * 60)
428
+ asyncio.run(run_async_tests())
429
+
430
+ print("\n" + "=" * 60)
431
+ print("✅ ALL TESTS PASSED!")
432
+ print("=" * 60 + "\n")
433
+
434
+ except Exception as e:
435
+ print("\n" + "=" * 60)
436
+ print(f"❌ TESTS FAILED: {e}")
437
+ print("=" * 60 + "\n")
438
+ raise
439
+
440
+
441
+ if __name__ == "__main__":
442
+ main()