langfuse-prompt-library-iauro 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langfuse_prompt_library/__init__.py +54 -0
- langfuse_prompt_library/config.py +153 -0
- langfuse_prompt_library/exceptions.py +95 -0
- langfuse_prompt_library/manager.py +663 -0
- langfuse_prompt_library/models.py +42 -0
- langfuse_prompt_library_iauro-0.1.0.dist-info/METADATA +252 -0
- langfuse_prompt_library_iauro-0.1.0.dist-info/RECORD +13 -0
- langfuse_prompt_library_iauro-0.1.0.dist-info/WHEEL +5 -0
- langfuse_prompt_library_iauro-0.1.0.dist-info/licenses/LICENSE +21 -0
- langfuse_prompt_library_iauro-0.1.0.dist-info/top_level.txt +2 -0
- utils/__init__.py +1 -0
- utils/logger.py +122 -0
- utils/utility.py +302 -0
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Langfuse Library - A production-ready wrapper for Langfuse prompts and tracing.
|
|
3
|
+
|
|
4
|
+
This library provides a robust, enterprise-grade interface for:
|
|
5
|
+
- Fetching and managing prompts from Langfuse with caching
|
|
6
|
+
- Calling LLMs with automatic tracing and retry logic
|
|
7
|
+
- Token tracking and observability
|
|
8
|
+
- Error handling and validation
|
|
9
|
+
- Performance metrics
|
|
10
|
+
|
|
11
|
+
Example:
|
|
12
|
+
>>> from langfuse_prompt_library import LangfuseManager
|
|
13
|
+
>>>
|
|
14
|
+
>>> lf = LangfuseManager()
|
|
15
|
+
>>> response = lf.call_llm(
|
|
16
|
+
... prompt_name="customer_support_agent",
|
|
17
|
+
... user_input="How do I reset my password?",
|
|
18
|
+
... prompt_label="production"
|
|
19
|
+
... )
|
|
20
|
+
>>> print(response.content)
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
from .manager import LangfuseManager
|
|
24
|
+
from .models import LLMResponse
|
|
25
|
+
from .config import LangfuseConfig
|
|
26
|
+
from utils.logger import get_logger
|
|
27
|
+
from .exceptions import (
|
|
28
|
+
LangfuseLibraryError,
|
|
29
|
+
ConfigurationError,
|
|
30
|
+
PromptNotFoundError,
|
|
31
|
+
ProviderError,
|
|
32
|
+
APITimeoutError,
|
|
33
|
+
RateLimitError,
|
|
34
|
+
CacheError,
|
|
35
|
+
ValidationError,
|
|
36
|
+
TracingError
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
__version__ = "0.1.0"
|
|
40
|
+
__all__ = [
|
|
41
|
+
"LangfuseManager",
|
|
42
|
+
"LLMResponse",
|
|
43
|
+
"LangfuseConfig",
|
|
44
|
+
"get_logger",
|
|
45
|
+
"LangfuseLibraryError",
|
|
46
|
+
"ConfigurationError",
|
|
47
|
+
"PromptNotFoundError",
|
|
48
|
+
"ProviderError",
|
|
49
|
+
"APITimeoutError",
|
|
50
|
+
"RateLimitError",
|
|
51
|
+
"CacheError",
|
|
52
|
+
"ValidationError",
|
|
53
|
+
"TracingError"
|
|
54
|
+
]
|
|
@@ -0,0 +1,153 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Configuration management for Langfuse library.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import os
|
|
6
|
+
import logging
|
|
7
|
+
from dataclasses import dataclass, field
|
|
8
|
+
from typing import Optional
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@dataclass
|
|
12
|
+
class LangfuseConfig:
|
|
13
|
+
"""Configuration for Langfuse client.
|
|
14
|
+
|
|
15
|
+
Attributes:
|
|
16
|
+
secret_key: Langfuse secret key (required)
|
|
17
|
+
public_key: Langfuse public key (required)
|
|
18
|
+
host: Langfuse host URL (required)
|
|
19
|
+
openai_api_key: OpenAI API key (optional)
|
|
20
|
+
anthropic_api_key: Anthropic API key (optional)
|
|
21
|
+
enable_caching: Enable prompt caching (default: True)
|
|
22
|
+
cache_ttl: Cache time-to-live in seconds (default: 300)
|
|
23
|
+
debug: Enable debug logging (default: False)
|
|
24
|
+
log_level: Logging level (default: INFO)
|
|
25
|
+
request_timeout: Default timeout for API requests in seconds (default: 30)
|
|
26
|
+
max_retries: Maximum number of retries for failed API calls (default: 3)
|
|
27
|
+
retry_delay: Initial delay between retries in seconds (default: 1)
|
|
28
|
+
enable_metrics: Enable performance metrics collection (default: False)
|
|
29
|
+
flush_at_shutdown: Automatically flush traces at shutdown (default: True)
|
|
30
|
+
connection_pool_size: Max connections in pool (default: 10)
|
|
31
|
+
validate_prompts: Validate prompt compilation (default: True)
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
secret_key: Optional[str] = None
|
|
35
|
+
public_key: Optional[str] = None
|
|
36
|
+
host: Optional[str] = None
|
|
37
|
+
openai_api_key: Optional[str] = None
|
|
38
|
+
anthropic_api_key: Optional[str] = None
|
|
39
|
+
enable_caching: bool = True
|
|
40
|
+
cache_ttl: int = 300
|
|
41
|
+
debug: bool = False
|
|
42
|
+
log_level: int = logging.INFO
|
|
43
|
+
request_timeout: float = 30.0
|
|
44
|
+
max_retries: int = 3
|
|
45
|
+
retry_delay: float = 1.0
|
|
46
|
+
enable_metrics: bool = False
|
|
47
|
+
flush_at_shutdown: bool = True
|
|
48
|
+
connection_pool_size: int = 10
|
|
49
|
+
validate_prompts: bool = True
|
|
50
|
+
|
|
51
|
+
@classmethod
|
|
52
|
+
def from_env(cls) -> 'LangfuseConfig':
|
|
53
|
+
"""Load configuration from environment variables.
|
|
54
|
+
|
|
55
|
+
Environment variables:
|
|
56
|
+
LANGFUSE_SECRET_KEY: Langfuse secret key
|
|
57
|
+
LANGFUSE_PUBLIC_KEY: Langfuse public key
|
|
58
|
+
LANGFUSE_BASE_URL: Langfuse host URL
|
|
59
|
+
OPENAI_API_KEY: OpenAI API key (optional)
|
|
60
|
+
ANTHROPIC_API_KEY: Anthropic API key (optional)
|
|
61
|
+
LANGFUSE_ENABLE_CACHE: Enable caching (default: "true")
|
|
62
|
+
LANGFUSE_CACHE_TTL: Cache TTL in seconds (default: "300")
|
|
63
|
+
LANGFUSE_DEBUG: Enable debug mode (default: "false")
|
|
64
|
+
LANGFUSE_LOG_LEVEL: Logging level (default: "INFO")
|
|
65
|
+
LANGFUSE_REQUEST_TIMEOUT: Request timeout in seconds (default: "30")
|
|
66
|
+
LANGFUSE_MAX_RETRIES: Max retry attempts (default: "3")
|
|
67
|
+
LANGFUSE_RETRY_DELAY: Initial retry delay in seconds (default: "1")
|
|
68
|
+
LANGFUSE_ENABLE_METRICS: Enable metrics collection (default: "false")
|
|
69
|
+
LANGFUSE_FLUSH_AT_SHUTDOWN: Auto-flush at shutdown (default: "true")
|
|
70
|
+
LANGFUSE_CONNECTION_POOL_SIZE: Connection pool size (default: "10")
|
|
71
|
+
LANGFUSE_VALIDATE_PROMPTS: Validate prompts (default: "true")
|
|
72
|
+
|
|
73
|
+
Returns:
|
|
74
|
+
LangfuseConfig instance with values from environment
|
|
75
|
+
"""
|
|
76
|
+
# Parse log level from string
|
|
77
|
+
log_level_str = os.getenv("LANGFUSE_LOG_LEVEL", "INFO").upper()
|
|
78
|
+
log_level = getattr(logging, log_level_str, logging.INFO)
|
|
79
|
+
|
|
80
|
+
return cls(
|
|
81
|
+
secret_key=os.getenv("LANGFUSE_SECRET_KEY"),
|
|
82
|
+
public_key=os.getenv("LANGFUSE_PUBLIC_KEY"),
|
|
83
|
+
host=os.getenv("LANGFUSE_BASE_URL"),
|
|
84
|
+
openai_api_key=os.getenv("OPENAI_API_KEY"),
|
|
85
|
+
anthropic_api_key=os.getenv("ANTHROPIC_API_KEY"),
|
|
86
|
+
enable_caching=os.getenv("LANGFUSE_ENABLE_CACHE", "true").lower() == "true",
|
|
87
|
+
cache_ttl=int(os.getenv("LANGFUSE_CACHE_TTL", "300")),
|
|
88
|
+
debug=os.getenv("LANGFUSE_DEBUG", "false").lower() == "true",
|
|
89
|
+
log_level=log_level,
|
|
90
|
+
request_timeout=float(os.getenv("LANGFUSE_REQUEST_TIMEOUT", "30")),
|
|
91
|
+
max_retries=int(os.getenv("LANGFUSE_MAX_RETRIES", "3")),
|
|
92
|
+
retry_delay=float(os.getenv("LANGFUSE_RETRY_DELAY", "1")),
|
|
93
|
+
enable_metrics=os.getenv("LANGFUSE_ENABLE_METRICS", "false").lower() == "true",
|
|
94
|
+
flush_at_shutdown=os.getenv("LANGFUSE_FLUSH_AT_SHUTDOWN", "true").lower() == "true",
|
|
95
|
+
connection_pool_size=int(os.getenv("LANGFUSE_CONNECTION_POOL_SIZE", "10")),
|
|
96
|
+
validate_prompts=os.getenv("LANGFUSE_VALIDATE_PROMPTS", "true").lower() == "true"
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
def validate(self, require_provider: bool = True) -> None:
|
|
100
|
+
"""Validate that all required configuration fields are set.
|
|
101
|
+
|
|
102
|
+
Args:
|
|
103
|
+
require_provider: If True, requires at least one provider API key
|
|
104
|
+
|
|
105
|
+
Raises:
|
|
106
|
+
ValueError: If any required field is missing or invalid
|
|
107
|
+
"""
|
|
108
|
+
from .exceptions import ConfigurationError
|
|
109
|
+
|
|
110
|
+
# Always required: Langfuse keys
|
|
111
|
+
required_fields = {
|
|
112
|
+
"secret_key": self.secret_key,
|
|
113
|
+
"public_key": self.public_key,
|
|
114
|
+
"host": self.host
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
missing_fields = [
|
|
118
|
+
field for field, value in required_fields.items()
|
|
119
|
+
if not value
|
|
120
|
+
]
|
|
121
|
+
|
|
122
|
+
if missing_fields:
|
|
123
|
+
raise ConfigurationError(
|
|
124
|
+
f"Missing required Langfuse configuration: {', '.join(missing_fields)}. "
|
|
125
|
+
f"Please set the corresponding environment variables."
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
# Validate numeric ranges
|
|
129
|
+
if self.cache_ttl < 0:
|
|
130
|
+
raise ConfigurationError("cache_ttl must be non-negative")
|
|
131
|
+
|
|
132
|
+
if self.request_timeout <= 0:
|
|
133
|
+
raise ConfigurationError("request_timeout must be positive")
|
|
134
|
+
|
|
135
|
+
if self.max_retries < 0:
|
|
136
|
+
raise ConfigurationError("max_retries must be non-negative")
|
|
137
|
+
|
|
138
|
+
if self.retry_delay < 0:
|
|
139
|
+
raise ConfigurationError("retry_delay must be non-negative")
|
|
140
|
+
|
|
141
|
+
if self.connection_pool_size <= 0:
|
|
142
|
+
raise ConfigurationError("connection_pool_size must be positive")
|
|
143
|
+
|
|
144
|
+
# Check for at least one provider if required
|
|
145
|
+
if require_provider:
|
|
146
|
+
has_openai = bool(self.openai_api_key)
|
|
147
|
+
has_anthropic = bool(self.anthropic_api_key)
|
|
148
|
+
|
|
149
|
+
if not (has_openai or has_anthropic):
|
|
150
|
+
raise ConfigurationError(
|
|
151
|
+
"No LLM provider configured. "
|
|
152
|
+
"Please set OPENAI_API_KEY or ANTHROPIC_API_KEY environment variable."
|
|
153
|
+
)
|
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Custom exceptions for Langfuse library.
|
|
3
|
+
|
|
4
|
+
Provides specific exception types for better error handling and debugging.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class LangfuseLibraryError(Exception):
|
|
9
|
+
"""Base exception for all Langfuse library errors."""
|
|
10
|
+
pass
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class ConfigurationError(LangfuseLibraryError):
|
|
14
|
+
"""Raised when configuration is invalid or missing."""
|
|
15
|
+
pass
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class PromptNotFoundError(LangfuseLibraryError):
|
|
19
|
+
"""Raised when a requested prompt is not found."""
|
|
20
|
+
|
|
21
|
+
def __init__(self, prompt_name: str, version: str = None, label: str = None):
|
|
22
|
+
self.prompt_name = prompt_name
|
|
23
|
+
self.version = version
|
|
24
|
+
self.label = label
|
|
25
|
+
|
|
26
|
+
msg = f"Prompt '{prompt_name}' not found"
|
|
27
|
+
if version:
|
|
28
|
+
msg += f" (version: {version})"
|
|
29
|
+
if label:
|
|
30
|
+
msg += f" (label: {label})"
|
|
31
|
+
|
|
32
|
+
super().__init__(msg)
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class ProviderError(LangfuseLibraryError):
|
|
36
|
+
"""Raised when LLM provider encounters an error."""
|
|
37
|
+
|
|
38
|
+
def __init__(self, provider: str, message: str, original_error: Exception = None):
|
|
39
|
+
self.provider = provider
|
|
40
|
+
self.original_error = original_error
|
|
41
|
+
|
|
42
|
+
msg = f"{provider} provider error: {message}"
|
|
43
|
+
if original_error:
|
|
44
|
+
msg += f" | Original error: {str(original_error)}"
|
|
45
|
+
|
|
46
|
+
super().__init__(msg)
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class APITimeoutError(LangfuseLibraryError):
|
|
50
|
+
"""Raised when an API call times out."""
|
|
51
|
+
|
|
52
|
+
def __init__(self, operation: str, timeout: float):
|
|
53
|
+
self.operation = operation
|
|
54
|
+
self.timeout = timeout
|
|
55
|
+
super().__init__(f"{operation} timed out after {timeout} seconds")
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
class RateLimitError(LangfuseLibraryError):
|
|
59
|
+
"""Raised when rate limit is exceeded."""
|
|
60
|
+
|
|
61
|
+
def __init__(self, provider: str, retry_after: int = None):
|
|
62
|
+
self.provider = provider
|
|
63
|
+
self.retry_after = retry_after
|
|
64
|
+
|
|
65
|
+
msg = f"Rate limit exceeded for {provider}"
|
|
66
|
+
if retry_after:
|
|
67
|
+
msg += f" | Retry after {retry_after} seconds"
|
|
68
|
+
|
|
69
|
+
super().__init__(msg)
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
class CacheError(LangfuseLibraryError):
|
|
73
|
+
"""Raised when cache operations fail."""
|
|
74
|
+
pass
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
class ValidationError(LangfuseLibraryError):
|
|
78
|
+
"""Raised when input validation fails."""
|
|
79
|
+
|
|
80
|
+
def __init__(self, field: str, message: str):
|
|
81
|
+
self.field = field
|
|
82
|
+
super().__init__(f"Validation error for '{field}': {message}")
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
class TracingError(LangfuseLibraryError):
|
|
86
|
+
"""Raised when tracing operations fail."""
|
|
87
|
+
|
|
88
|
+
def __init__(self, message: str, original_error: Exception = None):
|
|
89
|
+
self.original_error = original_error
|
|
90
|
+
|
|
91
|
+
msg = f"Tracing error: {message}"
|
|
92
|
+
if original_error:
|
|
93
|
+
msg += f" | Original error: {str(original_error)}"
|
|
94
|
+
|
|
95
|
+
super().__init__(msg)
|