django-cfg 1.1.75__py3-none-any.whl → 1.1.76__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- django_cfg/__init__.py +1 -1
- django_cfg/modules/django_llm/llm/client.py +38 -26
- django_cfg/modules/django_llm/llm/models.py +114 -0
- {django_cfg-1.1.75.dist-info → django_cfg-1.1.76.dist-info}/METADATA +1 -1
- {django_cfg-1.1.75.dist-info → django_cfg-1.1.76.dist-info}/RECORD +8 -7
- {django_cfg-1.1.75.dist-info → django_cfg-1.1.76.dist-info}/WHEEL +0 -0
- {django_cfg-1.1.75.dist-info → django_cfg-1.1.76.dist-info}/entry_points.txt +0 -0
- {django_cfg-1.1.75.dist-info → django_cfg-1.1.76.dist-info}/licenses/LICENSE +0 -0
django_cfg/__init__.py
CHANGED
@@ -38,7 +38,7 @@ default_app_config = "django_cfg.apps.DjangoCfgConfig"
|
|
38
38
|
from typing import TYPE_CHECKING
|
39
39
|
|
40
40
|
# Version information
|
41
|
-
__version__ = "1.1.
|
41
|
+
__version__ = "1.1.76"
|
42
42
|
__author__ = "Unrealos Team"
|
43
43
|
__email__ = "info@unrealos.com"
|
44
44
|
__license__ = "MIT"
|
@@ -26,6 +26,17 @@ from .models_cache import ModelsCache, ModelInfo
|
|
26
26
|
from .costs import calculate_chat_cost, calculate_embedding_cost, estimate_cost
|
27
27
|
from .tokenizer import Tokenizer
|
28
28
|
from .extractor import JSONExtractor
|
29
|
+
from .models import (
|
30
|
+
EmbeddingResponse,
|
31
|
+
ChatCompletionResponse,
|
32
|
+
TokenUsage,
|
33
|
+
ChatChoice,
|
34
|
+
LLMStats,
|
35
|
+
CostEstimate,
|
36
|
+
ValidationResult,
|
37
|
+
CacheInfo,
|
38
|
+
LLMError
|
39
|
+
)
|
29
40
|
|
30
41
|
logger = logging.getLogger(__name__)
|
31
42
|
|
@@ -458,7 +469,7 @@ class LLMClient:
|
|
458
469
|
self.models_cache.clear_cache()
|
459
470
|
logger.info("LLM client cache cleared")
|
460
471
|
|
461
|
-
def generate_embedding(self, text: str, model: str = "text-embedding-ada-002") ->
|
472
|
+
def generate_embedding(self, text: str, model: str = "text-embedding-ada-002") -> EmbeddingResponse:
|
462
473
|
"""
|
463
474
|
Generate embedding for text.
|
464
475
|
|
@@ -484,7 +495,8 @@ class LLMClient:
|
|
484
495
|
if cached_response:
|
485
496
|
logger.debug("Cache hit for embedding generation")
|
486
497
|
self.stats['cache_hits'] += 1
|
487
|
-
|
498
|
+
# Convert cached dict back to Pydantic model
|
499
|
+
return EmbeddingResponse(**cached_response)
|
488
500
|
|
489
501
|
self.stats['cache_misses'] += 1
|
490
502
|
self.stats['total_requests'] += 1
|
@@ -526,16 +538,16 @@ class LLMClient:
|
|
526
538
|
tokens_used = len(text.split()) # Rough estimate
|
527
539
|
cost = calculate_embedding_cost(tokens_used, model, self.models_cache)
|
528
540
|
|
529
|
-
result =
|
530
|
-
|
531
|
-
|
532
|
-
|
533
|
-
|
534
|
-
|
535
|
-
|
536
|
-
|
537
|
-
|
538
|
-
|
541
|
+
result = EmbeddingResponse(
|
542
|
+
embedding=mock_embedding,
|
543
|
+
tokens=tokens_used,
|
544
|
+
cost=cost,
|
545
|
+
model=model,
|
546
|
+
text_length=len(text),
|
547
|
+
dimension=len(mock_embedding),
|
548
|
+
response_time=time.time() - start_time,
|
549
|
+
warning="This is a mock embedding, not a real one. OpenRouter doesn't support embedding models."
|
550
|
+
)
|
539
551
|
else:
|
540
552
|
# Use real OpenAI embedding API
|
541
553
|
response = self.client.embeddings.create(
|
@@ -551,25 +563,25 @@ class LLMClient:
|
|
551
563
|
tokens_used = response.usage.total_tokens
|
552
564
|
cost = calculate_embedding_cost(tokens_used, model, self.models_cache)
|
553
565
|
|
554
|
-
result =
|
555
|
-
|
556
|
-
|
557
|
-
|
558
|
-
|
559
|
-
|
560
|
-
|
561
|
-
|
562
|
-
|
566
|
+
result = EmbeddingResponse(
|
567
|
+
embedding=embedding_vector,
|
568
|
+
tokens=tokens_used,
|
569
|
+
cost=cost,
|
570
|
+
model=model,
|
571
|
+
text_length=len(text),
|
572
|
+
dimension=len(embedding_vector),
|
573
|
+
response_time=time.time() - start_time
|
574
|
+
)
|
563
575
|
|
564
576
|
# Update statistics
|
565
577
|
self.stats['successful_requests'] += 1
|
566
|
-
self.stats['total_tokens_used'] += result
|
567
|
-
self.stats['total_cost_usd'] += result
|
578
|
+
self.stats['total_tokens_used'] += result.tokens
|
579
|
+
self.stats['total_cost_usd'] += result.cost
|
568
580
|
|
569
|
-
# Cache the result
|
570
|
-
self.cache.set_response(request_hash, result, model)
|
581
|
+
# Cache the result (convert to dict for caching)
|
582
|
+
self.cache.set_response(request_hash, result.model_dump(), model)
|
571
583
|
|
572
|
-
logger.debug(f"Generated embedding: {result
|
584
|
+
logger.debug(f"Generated embedding: {result.tokens} tokens, ${result.cost:.6f}")
|
573
585
|
return result
|
574
586
|
|
575
587
|
except Exception as e:
|
@@ -0,0 +1,114 @@
|
|
1
|
+
"""
|
2
|
+
Pydantic models for LLM client responses.
|
3
|
+
"""
|
4
|
+
|
5
|
+
from typing import List, Optional, Dict, Any, Union
|
6
|
+
from pydantic import BaseModel, Field
|
7
|
+
from datetime import datetime
|
8
|
+
|
9
|
+
|
10
|
+
class TokenUsage(BaseModel):
|
11
|
+
"""Token usage information."""
|
12
|
+
prompt_tokens: int = Field(default=0, description="Number of tokens in the prompt")
|
13
|
+
completion_tokens: int = Field(default=0, description="Number of tokens in the completion")
|
14
|
+
total_tokens: int = Field(default=0, description="Total number of tokens used")
|
15
|
+
|
16
|
+
|
17
|
+
class ChatChoice(BaseModel):
|
18
|
+
"""Chat completion choice."""
|
19
|
+
index: int = Field(description="Choice index")
|
20
|
+
message: Dict[str, Any] = Field(description="Message content")
|
21
|
+
finish_reason: Optional[str] = Field(default=None, description="Reason for finishing")
|
22
|
+
|
23
|
+
|
24
|
+
class ChatCompletionResponse(BaseModel):
|
25
|
+
"""Chat completion response from LLM."""
|
26
|
+
id: str = Field(description="Response ID")
|
27
|
+
model: str = Field(description="Model used")
|
28
|
+
created: str = Field(description="Creation timestamp")
|
29
|
+
choices: List[ChatChoice] = Field(default_factory=list, description="Response choices")
|
30
|
+
usage: Optional[TokenUsage] = Field(default=None, description="Token usage")
|
31
|
+
finish_reason: Optional[str] = Field(default=None, description="Finish reason")
|
32
|
+
content: str = Field(description="Response content")
|
33
|
+
tokens_used: int = Field(default=0, description="Total tokens used")
|
34
|
+
cost_usd: float = Field(default=0.0, description="Cost in USD")
|
35
|
+
processing_time: float = Field(default=0.0, description="Processing time in seconds")
|
36
|
+
extracted_json: Optional[Dict[str, Any]] = Field(default=None, description="Extracted JSON if any")
|
37
|
+
|
38
|
+
|
39
|
+
class EmbeddingResponse(BaseModel):
|
40
|
+
"""Embedding generation response from LLM."""
|
41
|
+
embedding: List[float] = Field(description="Generated embedding vector")
|
42
|
+
tokens: int = Field(description="Number of tokens processed")
|
43
|
+
cost: float = Field(description="Cost in USD")
|
44
|
+
model: str = Field(description="Model used for embedding")
|
45
|
+
text_length: int = Field(description="Length of input text")
|
46
|
+
dimension: int = Field(description="Embedding vector dimension")
|
47
|
+
response_time: float = Field(description="Response time in seconds")
|
48
|
+
warning: Optional[str] = Field(default=None, description="Warning message if any")
|
49
|
+
|
50
|
+
class Config:
|
51
|
+
"""Pydantic configuration."""
|
52
|
+
json_encoders = {
|
53
|
+
# Custom encoders if needed
|
54
|
+
}
|
55
|
+
|
56
|
+
|
57
|
+
class LLMStats(BaseModel):
|
58
|
+
"""LLM client statistics."""
|
59
|
+
successful_requests: int = Field(default=0, description="Number of successful requests")
|
60
|
+
failed_requests: int = Field(default=0, description="Number of failed requests")
|
61
|
+
total_tokens_used: int = Field(default=0, description="Total tokens used")
|
62
|
+
total_cost_usd: float = Field(default=0.0, description="Total cost in USD")
|
63
|
+
model_usage: Dict[str, int] = Field(default_factory=dict, description="Usage per model")
|
64
|
+
provider_usage: Dict[str, int] = Field(default_factory=dict, description="Usage per provider")
|
65
|
+
cache_hits: int = Field(default=0, description="Number of cache hits")
|
66
|
+
cache_misses: int = Field(default=0, description="Number of cache misses")
|
67
|
+
|
68
|
+
|
69
|
+
class ModelInfo(BaseModel):
|
70
|
+
"""Model information."""
|
71
|
+
id: str = Field(description="Model ID")
|
72
|
+
name: str = Field(description="Model name")
|
73
|
+
provider: str = Field(description="Provider name")
|
74
|
+
max_tokens: int = Field(description="Maximum tokens")
|
75
|
+
input_cost_per_token: float = Field(description="Input cost per token")
|
76
|
+
output_cost_per_token: float = Field(description="Output cost per token")
|
77
|
+
supports_functions: bool = Field(default=False, description="Supports function calling")
|
78
|
+
supports_vision: bool = Field(default=False, description="Supports vision")
|
79
|
+
context_window: int = Field(description="Context window size")
|
80
|
+
|
81
|
+
|
82
|
+
class CostEstimate(BaseModel):
|
83
|
+
"""Cost estimation result."""
|
84
|
+
estimated_cost: float = Field(description="Estimated cost in USD")
|
85
|
+
input_tokens: int = Field(description="Estimated input tokens")
|
86
|
+
output_tokens: int = Field(description="Estimated output tokens")
|
87
|
+
total_tokens: int = Field(description="Total estimated tokens")
|
88
|
+
model: str = Field(description="Model used for estimation")
|
89
|
+
|
90
|
+
|
91
|
+
class ValidationResult(BaseModel):
|
92
|
+
"""Validation result for requests."""
|
93
|
+
is_valid: bool = Field(description="Whether the request is valid")
|
94
|
+
errors: List[str] = Field(default_factory=list, description="Validation errors")
|
95
|
+
warnings: List[str] = Field(default_factory=list, description="Validation warnings")
|
96
|
+
estimated_tokens: Optional[int] = Field(default=None, description="Estimated token count")
|
97
|
+
estimated_cost: Optional[float] = Field(default=None, description="Estimated cost")
|
98
|
+
|
99
|
+
|
100
|
+
class CacheInfo(BaseModel):
|
101
|
+
"""Cache information."""
|
102
|
+
hit: bool = Field(description="Whether it was a cache hit")
|
103
|
+
key: str = Field(description="Cache key")
|
104
|
+
ttl: Optional[int] = Field(default=None, description="Time to live in seconds")
|
105
|
+
size: Optional[int] = Field(default=None, description="Cache entry size")
|
106
|
+
|
107
|
+
|
108
|
+
class LLMError(BaseModel):
|
109
|
+
"""LLM error information."""
|
110
|
+
error_type: str = Field(description="Type of error")
|
111
|
+
message: str = Field(description="Error message")
|
112
|
+
code: Optional[str] = Field(default=None, description="Error code")
|
113
|
+
details: Optional[Dict[str, Any]] = Field(default=None, description="Additional error details")
|
114
|
+
retry_after: Optional[int] = Field(default=None, description="Retry after seconds")
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: django-cfg
|
3
|
-
Version: 1.1.
|
3
|
+
Version: 1.1.76
|
4
4
|
Summary: 🚀 Production-ready Django configuration framework with type-safe settings, smart automation, and modern developer experience
|
5
5
|
Project-URL: Homepage, https://github.com/markolofsen/django-cfg
|
6
6
|
Project-URL: Documentation, https://django-cfg.readthedocs.io
|
@@ -1,5 +1,5 @@
|
|
1
1
|
django_cfg/README.md,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
2
|
-
django_cfg/__init__.py,sha256=
|
2
|
+
django_cfg/__init__.py,sha256=5HQBbQwX5UmQp5IlOokrIJ5gAjYEWnrSwKTMPFZ4k00,14288
|
3
3
|
django_cfg/apps.py,sha256=k84brkeXJI7EgKZLEpTkM9YFZofKI4PzhFOn1cl9Msc,1656
|
4
4
|
django_cfg/exceptions.py,sha256=RTQEoU3PfR8lqqNNv5ayd_HY2yJLs3eioqUy8VM6AG4,10378
|
5
5
|
django_cfg/integration.py,sha256=jUO-uZXLmBXy9iugqgsl_xnYA_xoH3LZg5RxZbobVrc,4988
|
@@ -215,9 +215,10 @@ django_cfg/modules/django_llm/__init__.py,sha256=Nirl7Ap3sv5qS5EWM0IEUl_ul-mSYac
|
|
215
215
|
django_cfg/modules/django_llm/example.py,sha256=uL3hbRHHuvmWrNzMI7uSV5wrbIck5yqcgrfRGmA76Wg,13066
|
216
216
|
django_cfg/modules/django_llm/llm/__init__.py,sha256=sLx3bbLUx1h-k5aYoljlAeIg9tDzyd5C9ZFJvccbNSA,192
|
217
217
|
django_cfg/modules/django_llm/llm/cache.py,sha256=cYcbGpVF7zLUvLVvbqKtrJJnAPwno4ubL77UBI7x4bo,5653
|
218
|
-
django_cfg/modules/django_llm/llm/client.py,sha256=
|
218
|
+
django_cfg/modules/django_llm/llm/client.py,sha256=MLAboZIGB0MFclvTtKZx1sVEVCmCujPULSRYkzxSoQY,22117
|
219
219
|
django_cfg/modules/django_llm/llm/costs.py,sha256=1L5YTlIIJTWmY0_jKC8sEMZs1YRMDeStz-r-BpyZ2Vo,7490
|
220
220
|
django_cfg/modules/django_llm/llm/extractor.py,sha256=6LCq3IZUO5zKefwNEQ4EkszLGLGEA_YFLvAUPoRBdMc,2694
|
221
|
+
django_cfg/modules/django_llm/llm/models.py,sha256=6MxlRp2ll09mbSZrG6kNH6W_NZcLn_zHJ7d8YWAk1Ac,5529
|
221
222
|
django_cfg/modules/django_llm/llm/models_cache.py,sha256=RnW9Q147I8YFwZWeceYZ6p32wra3WLKFNTo-o4NT_3s,20675
|
222
223
|
django_cfg/modules/django_llm/llm/tokenizer.py,sha256=MMrb34Xl1u0yMUGQ4TRW40fP4NWbnOuwjBKkoOswu9g,2678
|
223
224
|
django_cfg/modules/django_llm/translator/__init__.py,sha256=_gMsHBWEfiTFQvTk4UsHcXCyQBf2ilF82ISFRBbRBSU,247
|
@@ -264,8 +265,8 @@ django_cfg/templates/emails/base_email.html,sha256=TWcvYa2IHShlF_E8jf1bWZStRO0v8
|
|
264
265
|
django_cfg/utils/__init__.py,sha256=64wwXJuXytvwt8Ze_erSR2HmV07nGWJ6DV5wloRBvYE,435
|
265
266
|
django_cfg/utils/path_resolution.py,sha256=eML-6-RIGTs5TePktIQN8nxfDUEFJ3JA0AzWBcihAbs,13894
|
266
267
|
django_cfg/utils/smart_defaults.py,sha256=-qaoiOQ1HKDOzwK2uxoNlmrOX6l8zgGlVPgqtdj3y4g,22319
|
267
|
-
django_cfg-1.1.
|
268
|
-
django_cfg-1.1.
|
269
|
-
django_cfg-1.1.
|
270
|
-
django_cfg-1.1.
|
271
|
-
django_cfg-1.1.
|
268
|
+
django_cfg-1.1.76.dist-info/METADATA,sha256=3PX5-rA1Dv_hRUi0Nc_qrVImDe9Wsg92YBKYHqBsZoM,45783
|
269
|
+
django_cfg-1.1.76.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
270
|
+
django_cfg-1.1.76.dist-info/entry_points.txt,sha256=Ucmde4Z2wEzgb4AggxxZ0zaYDb9HpyE5blM3uJ0_VNg,56
|
271
|
+
django_cfg-1.1.76.dist-info/licenses/LICENSE,sha256=xHuytiUkSZCRG3N11nk1X6q1_EGQtv6aL5O9cqNRhKE,1071
|
272
|
+
django_cfg-1.1.76.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|