lucidicai 2.1.3__py3-none-any.whl → 3.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lucidicai/__init__.py +32 -390
- lucidicai/api/client.py +31 -2
- lucidicai/api/resources/__init__.py +16 -1
- lucidicai/api/resources/dataset.py +422 -82
- lucidicai/api/resources/evals.py +209 -0
- lucidicai/api/resources/event.py +399 -27
- lucidicai/api/resources/experiment.py +108 -0
- lucidicai/api/resources/feature_flag.py +78 -0
- lucidicai/api/resources/prompt.py +84 -0
- lucidicai/api/resources/session.py +545 -38
- lucidicai/client.py +408 -480
- lucidicai/core/config.py +73 -48
- lucidicai/core/errors.py +3 -3
- lucidicai/sdk/bound_decorators.py +321 -0
- lucidicai/sdk/context.py +20 -2
- lucidicai/sdk/decorators.py +283 -74
- lucidicai/sdk/event.py +538 -36
- lucidicai/sdk/event_builder.py +2 -4
- lucidicai/sdk/features/dataset.py +391 -1
- lucidicai/sdk/features/feature_flag.py +344 -3
- lucidicai/sdk/init.py +49 -347
- lucidicai/sdk/session.py +502 -0
- lucidicai/sdk/shutdown_manager.py +103 -46
- lucidicai/session_obj.py +321 -0
- lucidicai/telemetry/context_capture_processor.py +13 -6
- lucidicai/telemetry/extract.py +60 -63
- lucidicai/telemetry/litellm_bridge.py +3 -44
- lucidicai/telemetry/lucidic_exporter.py +143 -131
- lucidicai/telemetry/openai_agents_instrumentor.py +2 -2
- lucidicai/telemetry/openai_patch.py +7 -6
- lucidicai/telemetry/telemetry_manager.py +183 -0
- lucidicai/telemetry/utils/model_pricing.py +21 -30
- lucidicai/telemetry/utils/provider.py +77 -0
- lucidicai/utils/images.py +27 -11
- lucidicai/utils/serialization.py +27 -0
- {lucidicai-2.1.3.dist-info → lucidicai-3.1.0.dist-info}/METADATA +1 -1
- {lucidicai-2.1.3.dist-info → lucidicai-3.1.0.dist-info}/RECORD +39 -29
- {lucidicai-2.1.3.dist-info → lucidicai-3.1.0.dist-info}/WHEEL +0 -0
- {lucidicai-2.1.3.dist-info → lucidicai-3.1.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
"""Experiment resource API operations."""
|
|
2
|
+
import logging
|
|
3
|
+
from typing import Any, Dict, List, Optional
|
|
4
|
+
|
|
5
|
+
from ..client import HttpClient
|
|
6
|
+
|
|
7
|
+
logger = logging.getLogger("Lucidic")
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class ExperimentResource:
|
|
11
|
+
"""Handle experiment-related API operations."""
|
|
12
|
+
|
|
13
|
+
def __init__(
|
|
14
|
+
self,
|
|
15
|
+
http: HttpClient,
|
|
16
|
+
agent_id: Optional[str] = None,
|
|
17
|
+
production: bool = False,
|
|
18
|
+
):
|
|
19
|
+
"""Initialize experiment resource.
|
|
20
|
+
|
|
21
|
+
Args:
|
|
22
|
+
http: HTTP client instance
|
|
23
|
+
agent_id: Default agent ID for experiments
|
|
24
|
+
production: Whether to suppress errors in production mode
|
|
25
|
+
"""
|
|
26
|
+
self.http = http
|
|
27
|
+
self._agent_id = agent_id
|
|
28
|
+
self._production = production
|
|
29
|
+
|
|
30
|
+
def create(
|
|
31
|
+
self,
|
|
32
|
+
experiment_name: str,
|
|
33
|
+
description: Optional[str] = None,
|
|
34
|
+
tags: Optional[List[str]] = None,
|
|
35
|
+
LLM_boolean_evaluators: Optional[List[str]] = None,
|
|
36
|
+
LLM_numeric_evaluators: Optional[List[str]] = None,
|
|
37
|
+
) -> Optional[str]:
|
|
38
|
+
"""Create a new experiment.
|
|
39
|
+
|
|
40
|
+
Args:
|
|
41
|
+
experiment_name: Name of the experiment.
|
|
42
|
+
description: Optional description.
|
|
43
|
+
tags: Optional tags for filtering.
|
|
44
|
+
LLM_boolean_evaluators: Boolean evaluator names.
|
|
45
|
+
LLM_numeric_evaluators: Numeric evaluator names.
|
|
46
|
+
|
|
47
|
+
Returns:
|
|
48
|
+
The experiment ID if created successfully, None otherwise.
|
|
49
|
+
"""
|
|
50
|
+
evaluator_names = []
|
|
51
|
+
if LLM_boolean_evaluators:
|
|
52
|
+
evaluator_names.extend(LLM_boolean_evaluators)
|
|
53
|
+
if LLM_numeric_evaluators:
|
|
54
|
+
evaluator_names.extend(LLM_numeric_evaluators)
|
|
55
|
+
|
|
56
|
+
try:
|
|
57
|
+
response = self.http.post(
|
|
58
|
+
"createexperiment",
|
|
59
|
+
{
|
|
60
|
+
"agent_id": self._agent_id,
|
|
61
|
+
"experiment_name": experiment_name,
|
|
62
|
+
"description": description or "",
|
|
63
|
+
"tags": tags or [],
|
|
64
|
+
"evaluator_names": evaluator_names,
|
|
65
|
+
},
|
|
66
|
+
)
|
|
67
|
+
return response.get("experiment_id")
|
|
68
|
+
except Exception as e:
|
|
69
|
+
if self._production:
|
|
70
|
+
logger.error(f"[ExperimentResource] Failed to create experiment: {e}")
|
|
71
|
+
return None
|
|
72
|
+
raise
|
|
73
|
+
|
|
74
|
+
async def acreate(
|
|
75
|
+
self,
|
|
76
|
+
experiment_name: str,
|
|
77
|
+
description: Optional[str] = None,
|
|
78
|
+
tags: Optional[List[str]] = None,
|
|
79
|
+
LLM_boolean_evaluators: Optional[List[str]] = None,
|
|
80
|
+
LLM_numeric_evaluators: Optional[List[str]] = None,
|
|
81
|
+
) -> Optional[str]:
|
|
82
|
+
"""Create a new experiment (asynchronous).
|
|
83
|
+
|
|
84
|
+
See create() for full documentation.
|
|
85
|
+
"""
|
|
86
|
+
evaluator_names = []
|
|
87
|
+
if LLM_boolean_evaluators:
|
|
88
|
+
evaluator_names.extend(LLM_boolean_evaluators)
|
|
89
|
+
if LLM_numeric_evaluators:
|
|
90
|
+
evaluator_names.extend(LLM_numeric_evaluators)
|
|
91
|
+
|
|
92
|
+
try:
|
|
93
|
+
response = await self.http.apost(
|
|
94
|
+
"createexperiment",
|
|
95
|
+
{
|
|
96
|
+
"agent_id": self._agent_id,
|
|
97
|
+
"experiment_name": experiment_name,
|
|
98
|
+
"description": description or "",
|
|
99
|
+
"tags": tags or [],
|
|
100
|
+
"evaluator_names": evaluator_names,
|
|
101
|
+
},
|
|
102
|
+
)
|
|
103
|
+
return response.get("experiment_id")
|
|
104
|
+
except Exception as e:
|
|
105
|
+
if self._production:
|
|
106
|
+
logger.error(f"[ExperimentResource] Failed to create experiment: {e}")
|
|
107
|
+
return None
|
|
108
|
+
raise
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
"""Feature flag resource API operations."""
|
|
2
|
+
import logging
|
|
3
|
+
from typing import Any, Dict, Optional
|
|
4
|
+
|
|
5
|
+
from ..client import HttpClient
|
|
6
|
+
|
|
7
|
+
logger = logging.getLogger("Lucidic")
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class FeatureFlagResource:
|
|
11
|
+
"""Handle feature flag-related API operations."""
|
|
12
|
+
|
|
13
|
+
def __init__(
|
|
14
|
+
self,
|
|
15
|
+
http: HttpClient,
|
|
16
|
+
agent_id: Optional[str] = None,
|
|
17
|
+
production: bool = False,
|
|
18
|
+
):
|
|
19
|
+
"""Initialize feature flag resource.
|
|
20
|
+
|
|
21
|
+
Args:
|
|
22
|
+
http: HTTP client instance
|
|
23
|
+
agent_id: Default agent ID for feature flags
|
|
24
|
+
production: Whether to suppress errors in production mode
|
|
25
|
+
"""
|
|
26
|
+
self.http = http
|
|
27
|
+
self._agent_id = agent_id
|
|
28
|
+
self._production = production
|
|
29
|
+
|
|
30
|
+
def get(
|
|
31
|
+
self,
|
|
32
|
+
flag_name: str,
|
|
33
|
+
default: Any = None,
|
|
34
|
+
context: Optional[Dict[str, Any]] = None,
|
|
35
|
+
) -> Any:
|
|
36
|
+
"""Get a feature flag value.
|
|
37
|
+
|
|
38
|
+
Args:
|
|
39
|
+
flag_name: Name of the feature flag.
|
|
40
|
+
default: Default value if flag is not found.
|
|
41
|
+
context: Optional context for flag evaluation.
|
|
42
|
+
|
|
43
|
+
Returns:
|
|
44
|
+
The flag value or default.
|
|
45
|
+
"""
|
|
46
|
+
try:
|
|
47
|
+
response = self.http.get(
|
|
48
|
+
"featureflags",
|
|
49
|
+
{"flag_name": flag_name, "agent_id": self._agent_id},
|
|
50
|
+
)
|
|
51
|
+
return response.get("value", default)
|
|
52
|
+
except Exception as e:
|
|
53
|
+
if self._production:
|
|
54
|
+
logger.error(f"[FeatureFlagResource] Failed to get feature flag: {e}")
|
|
55
|
+
return default
|
|
56
|
+
raise
|
|
57
|
+
|
|
58
|
+
async def aget(
|
|
59
|
+
self,
|
|
60
|
+
flag_name: str,
|
|
61
|
+
default: Any = None,
|
|
62
|
+
context: Optional[Dict[str, Any]] = None,
|
|
63
|
+
) -> Any:
|
|
64
|
+
"""Get a feature flag value (asynchronous).
|
|
65
|
+
|
|
66
|
+
See get() for full documentation.
|
|
67
|
+
"""
|
|
68
|
+
try:
|
|
69
|
+
response = await self.http.aget(
|
|
70
|
+
"featureflags",
|
|
71
|
+
{"flag_name": flag_name, "agent_id": self._agent_id},
|
|
72
|
+
)
|
|
73
|
+
return response.get("value", default)
|
|
74
|
+
except Exception as e:
|
|
75
|
+
if self._production:
|
|
76
|
+
logger.error(f"[FeatureFlagResource] Failed to get feature flag: {e}")
|
|
77
|
+
return default
|
|
78
|
+
raise
|
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
"""Prompt resource API operations."""
|
|
2
|
+
import logging
|
|
3
|
+
from typing import Any, Dict, Optional
|
|
4
|
+
|
|
5
|
+
from ..client import HttpClient
|
|
6
|
+
|
|
7
|
+
logger = logging.getLogger("Lucidic")
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class PromptResource:
|
|
11
|
+
"""Handle prompt-related API operations."""
|
|
12
|
+
|
|
13
|
+
def __init__(self, http: HttpClient, production: bool = False):
|
|
14
|
+
"""Initialize prompt resource.
|
|
15
|
+
|
|
16
|
+
Args:
|
|
17
|
+
http: HTTP client instance
|
|
18
|
+
production: Whether to suppress errors in production mode
|
|
19
|
+
"""
|
|
20
|
+
self.http = http
|
|
21
|
+
self._production = production
|
|
22
|
+
|
|
23
|
+
def get(
|
|
24
|
+
self,
|
|
25
|
+
prompt_name: str,
|
|
26
|
+
variables: Optional[Dict[str, Any]] = None,
|
|
27
|
+
label: str = "production",
|
|
28
|
+
) -> str:
|
|
29
|
+
"""Get a prompt from the prompt database.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
prompt_name: Name of the prompt.
|
|
33
|
+
variables: Variables to interpolate into the prompt.
|
|
34
|
+
label: Prompt version label (default: "production").
|
|
35
|
+
|
|
36
|
+
Returns:
|
|
37
|
+
The prompt content with variables interpolated.
|
|
38
|
+
"""
|
|
39
|
+
try:
|
|
40
|
+
response = self.http.get(
|
|
41
|
+
"getprompt",
|
|
42
|
+
{"prompt_name": prompt_name, "label": label},
|
|
43
|
+
)
|
|
44
|
+
prompt = response.get("prompt_content", "")
|
|
45
|
+
|
|
46
|
+
# Replace variables
|
|
47
|
+
if variables:
|
|
48
|
+
for key, value in variables.items():
|
|
49
|
+
prompt = prompt.replace(f"{{{key}}}", str(value))
|
|
50
|
+
|
|
51
|
+
return prompt
|
|
52
|
+
except Exception as e:
|
|
53
|
+
if self._production:
|
|
54
|
+
logger.error(f"[PromptResource] Failed to get prompt: {e}")
|
|
55
|
+
return ""
|
|
56
|
+
raise
|
|
57
|
+
|
|
58
|
+
async def aget(
|
|
59
|
+
self,
|
|
60
|
+
prompt_name: str,
|
|
61
|
+
variables: Optional[Dict[str, Any]] = None,
|
|
62
|
+
label: str = "production",
|
|
63
|
+
) -> str:
|
|
64
|
+
"""Get a prompt from the prompt database (asynchronous).
|
|
65
|
+
|
|
66
|
+
See get() for full documentation.
|
|
67
|
+
"""
|
|
68
|
+
try:
|
|
69
|
+
response = await self.http.aget(
|
|
70
|
+
"getprompt",
|
|
71
|
+
{"prompt_name": prompt_name, "label": label},
|
|
72
|
+
)
|
|
73
|
+
prompt = response.get("prompt_content", "")
|
|
74
|
+
|
|
75
|
+
if variables:
|
|
76
|
+
for key, value in variables.items():
|
|
77
|
+
prompt = prompt.replace(f"{{{key}}}", str(value))
|
|
78
|
+
|
|
79
|
+
return prompt
|
|
80
|
+
except Exception as e:
|
|
81
|
+
if self._production:
|
|
82
|
+
logger.error(f"[PromptResource] Failed to get prompt: {e}")
|
|
83
|
+
return ""
|
|
84
|
+
raise
|