lucidicai 3.3.0__tar.gz → 3.4.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {lucidicai-3.3.0 → lucidicai-3.4.0}/PKG-INFO +1 -1
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai/__init__.py +6 -1
- lucidicai-3.4.0/lucidicai/api/resources/prompt.py +162 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai/client.py +1 -1
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai.egg-info/PKG-INFO +1 -1
- {lucidicai-3.3.0 → lucidicai-3.4.0}/setup.py +1 -1
- lucidicai-3.3.0/lucidicai/api/resources/prompt.py +0 -84
- {lucidicai-3.3.0 → lucidicai-3.4.0}/README.md +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai/api/__init__.py +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai/api/client.py +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai/api/resources/__init__.py +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai/api/resources/dataset.py +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai/api/resources/evals.py +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai/api/resources/event.py +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai/api/resources/experiment.py +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai/api/resources/feature_flag.py +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai/api/resources/session.py +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai/core/__init__.py +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai/core/config.py +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai/core/errors.py +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai/core/types.py +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai/integrations/__init__.py +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai/integrations/livekit.py +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai/sdk/__init__.py +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai/sdk/context.py +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai/sdk/decorators.py +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai/sdk/error_boundary.py +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai/sdk/event.py +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai/sdk/event_builder.py +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai/sdk/features/__init__.py +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai/sdk/features/dataset.py +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai/sdk/features/feature_flag.py +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai/sdk/init.py +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai/sdk/session.py +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai/sdk/shutdown_manager.py +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai/session_obj.py +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai/telemetry/__init__.py +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai/telemetry/context_bridge.py +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai/telemetry/context_capture_processor.py +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai/telemetry/extract.py +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai/telemetry/litellm_bridge.py +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai/telemetry/lucidic_exporter.py +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai/telemetry/openai_agents_instrumentor.py +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai/telemetry/openai_patch.py +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai/telemetry/openai_uninstrument.py +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai/telemetry/telemetry_init.py +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai/telemetry/telemetry_manager.py +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai/telemetry/utils/__init__.py +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai/telemetry/utils/model_pricing.py +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai/telemetry/utils/provider.py +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai/utils/__init__.py +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai/utils/logger.py +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai/utils/serialization.py +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai.egg-info/SOURCES.txt +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai.egg-info/dependency_links.txt +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai.egg-info/requires.txt +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/lucidicai.egg-info/top_level.txt +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/setup.cfg +0 -0
- {lucidicai-3.3.0 → lucidicai-3.4.0}/tests/test_event_creation.py +0 -0
|
@@ -34,11 +34,14 @@ from .core.errors import (
|
|
|
34
34
|
FeatureFlagError,
|
|
35
35
|
)
|
|
36
36
|
|
|
37
|
+
# Prompt object
|
|
38
|
+
from .api.resources.prompt import Prompt
|
|
39
|
+
|
|
37
40
|
# Integrations
|
|
38
41
|
from .integrations.livekit import setup_livekit
|
|
39
42
|
|
|
40
43
|
# Version
|
|
41
|
-
__version__ = "3.
|
|
44
|
+
__version__ = "3.4.0"
|
|
42
45
|
|
|
43
46
|
# All exports
|
|
44
47
|
__all__ = [
|
|
@@ -53,6 +56,8 @@ __all__ = [
|
|
|
53
56
|
"InvalidOperationError",
|
|
54
57
|
"PromptError",
|
|
55
58
|
"FeatureFlagError",
|
|
59
|
+
# Prompt object
|
|
60
|
+
"Prompt",
|
|
56
61
|
# Integrations
|
|
57
62
|
"setup_livekit",
|
|
58
63
|
# Version
|
|
@@ -0,0 +1,162 @@
|
|
|
1
|
+
"""Prompt resource API operations."""
|
|
2
|
+
import logging
|
|
3
|
+
import time
|
|
4
|
+
from dataclasses import dataclass
|
|
5
|
+
from typing import Any, Dict, Optional, Tuple, TYPE_CHECKING
|
|
6
|
+
|
|
7
|
+
from ..client import HttpClient
|
|
8
|
+
|
|
9
|
+
if TYPE_CHECKING:
|
|
10
|
+
from ...core.config import SDKConfig
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger("Lucidic")
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@dataclass
|
|
16
|
+
class Prompt:
|
|
17
|
+
"""Represents a prompt retrieved from the Lucidic prompt database."""
|
|
18
|
+
|
|
19
|
+
raw_content: str
|
|
20
|
+
content: str
|
|
21
|
+
metadata: Dict[str, Any]
|
|
22
|
+
|
|
23
|
+
def __str__(self) -> str:
|
|
24
|
+
return self.content
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class PromptResource:
|
|
28
|
+
"""Handle prompt-related API operations."""
|
|
29
|
+
|
|
30
|
+
def __init__(self, http: HttpClient, config: "SDKConfig", production: bool = False):
|
|
31
|
+
"""Initialize prompt resource.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
http: HTTP client instance
|
|
35
|
+
config: SDK configuration
|
|
36
|
+
production: Whether to suppress errors in production mode
|
|
37
|
+
"""
|
|
38
|
+
self.http = http
|
|
39
|
+
self._config = config
|
|
40
|
+
self._production = production
|
|
41
|
+
self._cache: Dict[Tuple[str, str], Dict[str, Any]] = {}
|
|
42
|
+
|
|
43
|
+
def _is_cache_valid(self, cache_key: Tuple[str, str], cache_ttl: int) -> bool:
|
|
44
|
+
"""Check if a cached prompt is still valid.
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
cache_key: The (prompt_name, label) tuple
|
|
48
|
+
cache_ttl: Cache TTL in seconds (-1 = indefinite, 0 = no cache)
|
|
49
|
+
|
|
50
|
+
Returns:
|
|
51
|
+
True if cache is valid, False otherwise
|
|
52
|
+
"""
|
|
53
|
+
if cache_ttl == 0:
|
|
54
|
+
return False
|
|
55
|
+
if cache_key not in self._cache:
|
|
56
|
+
return False
|
|
57
|
+
if cache_ttl == -1:
|
|
58
|
+
return True
|
|
59
|
+
cached = self._cache[cache_key]
|
|
60
|
+
return (time.time() - cached["timestamp"]) < cache_ttl
|
|
61
|
+
|
|
62
|
+
def get(
|
|
63
|
+
self,
|
|
64
|
+
prompt_name: str,
|
|
65
|
+
variables: Optional[Dict[str, Any]] = None,
|
|
66
|
+
label: str = "production",
|
|
67
|
+
cache_ttl: int = 0,
|
|
68
|
+
) -> Prompt:
|
|
69
|
+
"""Get a prompt from the prompt database.
|
|
70
|
+
|
|
71
|
+
Args:
|
|
72
|
+
prompt_name: Name of the prompt.
|
|
73
|
+
variables: Variables to interpolate into the prompt.
|
|
74
|
+
label: Prompt version label (default: "production").
|
|
75
|
+
cache_ttl: Cache TTL in seconds. 0 = no cache, -1 = cache indefinitely,
|
|
76
|
+
positive value = seconds before refetching.
|
|
77
|
+
|
|
78
|
+
Returns:
|
|
79
|
+
A Prompt object with raw_content, content (with variables replaced),
|
|
80
|
+
and metadata. Use str(prompt) for backward-compatible string access.
|
|
81
|
+
"""
|
|
82
|
+
try:
|
|
83
|
+
cache_key = (prompt_name, label)
|
|
84
|
+
|
|
85
|
+
# Check cache
|
|
86
|
+
if self._is_cache_valid(cache_key, cache_ttl):
|
|
87
|
+
raw_content = self._cache[cache_key]["content"]
|
|
88
|
+
metadata = self._cache[cache_key]["metadata"]
|
|
89
|
+
else:
|
|
90
|
+
response = self.http.get(
|
|
91
|
+
"getprompt",
|
|
92
|
+
{"prompt_name": prompt_name, "label": label, "agent_id": self._config.agent_id},
|
|
93
|
+
)
|
|
94
|
+
raw_content = response.get("prompt_content", "")
|
|
95
|
+
metadata = response.get("metadata", {})
|
|
96
|
+
|
|
97
|
+
# Store in cache if caching is enabled
|
|
98
|
+
if cache_ttl != 0:
|
|
99
|
+
self._cache[cache_key] = {
|
|
100
|
+
"content": raw_content,
|
|
101
|
+
"metadata": metadata,
|
|
102
|
+
"timestamp": time.time(),
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
# Replace variables
|
|
106
|
+
content = raw_content
|
|
107
|
+
if variables:
|
|
108
|
+
for key, value in variables.items():
|
|
109
|
+
content = content.replace(f"{{{{{key}}}}}", str(value))
|
|
110
|
+
|
|
111
|
+
return Prompt(raw_content=raw_content, content=content, metadata=metadata)
|
|
112
|
+
except Exception as e:
|
|
113
|
+
if self._production:
|
|
114
|
+
logger.error(f"[PromptResource] Failed to get prompt: {e}")
|
|
115
|
+
return Prompt(raw_content="", content="", metadata={})
|
|
116
|
+
raise
|
|
117
|
+
|
|
118
|
+
async def aget(
|
|
119
|
+
self,
|
|
120
|
+
prompt_name: str,
|
|
121
|
+
variables: Optional[Dict[str, Any]] = None,
|
|
122
|
+
label: str = "production",
|
|
123
|
+
cache_ttl: int = 0,
|
|
124
|
+
) -> Prompt:
|
|
125
|
+
"""Get a prompt from the prompt database (asynchronous).
|
|
126
|
+
|
|
127
|
+
See get() for full documentation.
|
|
128
|
+
"""
|
|
129
|
+
try:
|
|
130
|
+
cache_key = (prompt_name, label)
|
|
131
|
+
|
|
132
|
+
# Check cache
|
|
133
|
+
if self._is_cache_valid(cache_key, cache_ttl):
|
|
134
|
+
raw_content = self._cache[cache_key]["content"]
|
|
135
|
+
metadata = self._cache[cache_key]["metadata"]
|
|
136
|
+
else:
|
|
137
|
+
response = await self.http.aget(
|
|
138
|
+
"getprompt",
|
|
139
|
+
{"prompt_name": prompt_name, "label": label, "agent_id": self._config.agent_id},
|
|
140
|
+
)
|
|
141
|
+
raw_content = response.get("prompt_content", "")
|
|
142
|
+
metadata = response.get("metadata", {})
|
|
143
|
+
|
|
144
|
+
# Store in cache if caching is enabled
|
|
145
|
+
if cache_ttl != 0:
|
|
146
|
+
self._cache[cache_key] = {
|
|
147
|
+
"content": raw_content,
|
|
148
|
+
"metadata": metadata,
|
|
149
|
+
"timestamp": time.time(),
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
content = raw_content
|
|
153
|
+
if variables:
|
|
154
|
+
for key, value in variables.items():
|
|
155
|
+
content = content.replace(f"{{{{{key}}}}}", str(value))
|
|
156
|
+
|
|
157
|
+
return Prompt(raw_content=raw_content, content=content, metadata=metadata)
|
|
158
|
+
except Exception as e:
|
|
159
|
+
if self._production:
|
|
160
|
+
logger.error(f"[PromptResource] Failed to get prompt: {e}")
|
|
161
|
+
return Prompt(raw_content="", content="", metadata={})
|
|
162
|
+
raise
|
|
@@ -155,7 +155,7 @@ class LucidicAI:
|
|
|
155
155
|
"events": EventResource(self._http, self._production),
|
|
156
156
|
"datasets": DatasetResource(self._http, self._config.agent_id, self._production),
|
|
157
157
|
"experiments": ExperimentResource(self._http, self._config.agent_id, self._production),
|
|
158
|
-
"prompts": PromptResource(self._http, self._production),
|
|
158
|
+
"prompts": PromptResource(self._http, self._config, self._production),
|
|
159
159
|
"feature_flags": FeatureFlagResource(self._http, self._config.agent_id, self._production),
|
|
160
160
|
"evals": EvalsResource(self._http, self._production),
|
|
161
161
|
}
|
|
@@ -1,84 +0,0 @@
|
|
|
1
|
-
"""Prompt resource API operations."""
|
|
2
|
-
import logging
|
|
3
|
-
from typing import Any, Dict, Optional
|
|
4
|
-
|
|
5
|
-
from ..client import HttpClient
|
|
6
|
-
|
|
7
|
-
logger = logging.getLogger("Lucidic")
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
class PromptResource:
|
|
11
|
-
"""Handle prompt-related API operations."""
|
|
12
|
-
|
|
13
|
-
def __init__(self, http: HttpClient, production: bool = False):
|
|
14
|
-
"""Initialize prompt resource.
|
|
15
|
-
|
|
16
|
-
Args:
|
|
17
|
-
http: HTTP client instance
|
|
18
|
-
production: Whether to suppress errors in production mode
|
|
19
|
-
"""
|
|
20
|
-
self.http = http
|
|
21
|
-
self._production = production
|
|
22
|
-
|
|
23
|
-
def get(
|
|
24
|
-
self,
|
|
25
|
-
prompt_name: str,
|
|
26
|
-
variables: Optional[Dict[str, Any]] = None,
|
|
27
|
-
label: str = "production",
|
|
28
|
-
) -> str:
|
|
29
|
-
"""Get a prompt from the prompt database.
|
|
30
|
-
|
|
31
|
-
Args:
|
|
32
|
-
prompt_name: Name of the prompt.
|
|
33
|
-
variables: Variables to interpolate into the prompt.
|
|
34
|
-
label: Prompt version label (default: "production").
|
|
35
|
-
|
|
36
|
-
Returns:
|
|
37
|
-
The prompt content with variables interpolated.
|
|
38
|
-
"""
|
|
39
|
-
try:
|
|
40
|
-
response = self.http.get(
|
|
41
|
-
"getprompt",
|
|
42
|
-
{"prompt_name": prompt_name, "label": label},
|
|
43
|
-
)
|
|
44
|
-
prompt = response.get("prompt_content", "")
|
|
45
|
-
|
|
46
|
-
# Replace variables
|
|
47
|
-
if variables:
|
|
48
|
-
for key, value in variables.items():
|
|
49
|
-
prompt = prompt.replace(f"{{{key}}}", str(value))
|
|
50
|
-
|
|
51
|
-
return prompt
|
|
52
|
-
except Exception as e:
|
|
53
|
-
if self._production:
|
|
54
|
-
logger.error(f"[PromptResource] Failed to get prompt: {e}")
|
|
55
|
-
return ""
|
|
56
|
-
raise
|
|
57
|
-
|
|
58
|
-
async def aget(
|
|
59
|
-
self,
|
|
60
|
-
prompt_name: str,
|
|
61
|
-
variables: Optional[Dict[str, Any]] = None,
|
|
62
|
-
label: str = "production",
|
|
63
|
-
) -> str:
|
|
64
|
-
"""Get a prompt from the prompt database (asynchronous).
|
|
65
|
-
|
|
66
|
-
See get() for full documentation.
|
|
67
|
-
"""
|
|
68
|
-
try:
|
|
69
|
-
response = await self.http.aget(
|
|
70
|
-
"getprompt",
|
|
71
|
-
{"prompt_name": prompt_name, "label": label},
|
|
72
|
-
)
|
|
73
|
-
prompt = response.get("prompt_content", "")
|
|
74
|
-
|
|
75
|
-
if variables:
|
|
76
|
-
for key, value in variables.items():
|
|
77
|
-
prompt = prompt.replace(f"{{{key}}}", str(value))
|
|
78
|
-
|
|
79
|
-
return prompt
|
|
80
|
-
except Exception as e:
|
|
81
|
-
if self._production:
|
|
82
|
-
logger.error(f"[PromptResource] Failed to get prompt: {e}")
|
|
83
|
-
return ""
|
|
84
|
-
raise
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|