lucidicai 1.1.20__tar.gz → 3.0.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lucidicai-3.0.0/PKG-INFO +10 -0
- lucidicai-3.0.0/README.md +666 -0
- lucidicai-3.0.0/lucidicai/__init__.py +55 -0
- lucidicai-3.0.0/lucidicai/api/__init__.py +1 -0
- lucidicai-3.0.0/lucidicai/api/client.py +386 -0
- lucidicai-3.0.0/lucidicai/api/resources/__init__.py +16 -0
- lucidicai-3.0.0/lucidicai/api/resources/dataset.py +532 -0
- lucidicai-3.0.0/lucidicai/api/resources/event.py +460 -0
- lucidicai-3.0.0/lucidicai/api/resources/experiment.py +108 -0
- lucidicai-3.0.0/lucidicai/api/resources/feature_flag.py +78 -0
- lucidicai-3.0.0/lucidicai/api/resources/prompt.py +84 -0
- lucidicai-3.0.0/lucidicai/api/resources/session.py +633 -0
- lucidicai-3.0.0/lucidicai/client.py +428 -0
- lucidicai-3.0.0/lucidicai/core/__init__.py +1 -0
- lucidicai-3.0.0/lucidicai/core/config.py +248 -0
- lucidicai-3.0.0/lucidicai/core/errors.py +60 -0
- lucidicai-3.0.0/lucidicai/core/types.py +35 -0
- lucidicai-3.0.0/lucidicai/sdk/__init__.py +1 -0
- lucidicai-3.0.0/lucidicai/sdk/context.py +291 -0
- lucidicai-3.0.0/lucidicai/sdk/decorators.py +396 -0
- lucidicai-3.0.0/lucidicai/sdk/error_boundary.py +299 -0
- lucidicai-3.0.0/lucidicai/sdk/event.py +628 -0
- lucidicai-3.0.0/lucidicai/sdk/event_builder.py +302 -0
- lucidicai-3.0.0/lucidicai/sdk/features/__init__.py +1 -0
- lucidicai-3.0.0/lucidicai/sdk/features/dataset.py +781 -0
- lucidicai-3.0.0/lucidicai/sdk/features/feature_flag.py +724 -0
- lucidicai-3.0.0/lucidicai/sdk/init.py +137 -0
- lucidicai-3.0.0/lucidicai/sdk/session.py +502 -0
- lucidicai-3.0.0/lucidicai/sdk/shutdown_manager.py +359 -0
- lucidicai-3.0.0/lucidicai/session_obj.py +321 -0
- lucidicai-3.0.0/lucidicai/telemetry/context_bridge.py +82 -0
- lucidicai-3.0.0/lucidicai/telemetry/context_capture_processor.py +88 -0
- lucidicai-3.0.0/lucidicai/telemetry/extract.py +189 -0
- lucidicai-3.0.0/lucidicai/telemetry/litellm_bridge.py +359 -0
- lucidicai-3.0.0/lucidicai/telemetry/lucidic_exporter.py +318 -0
- lucidicai-3.0.0/lucidicai/telemetry/openai_agents_instrumentor.py +306 -0
- lucidicai-3.0.0/lucidicai/telemetry/openai_patch.py +426 -0
- lucidicai-3.0.0/lucidicai/telemetry/openai_uninstrument.py +87 -0
- lucidicai-3.0.0/lucidicai/telemetry/telemetry_init.py +204 -0
- lucidicai-3.0.0/lucidicai/telemetry/telemetry_manager.py +183 -0
- lucidicai-3.0.0/lucidicai/telemetry/utils/__init__.py +0 -0
- lucidicai-3.0.0/lucidicai/telemetry/utils/model_pricing.py +269 -0
- lucidicai-3.0.0/lucidicai/telemetry/utils/provider.py +77 -0
- lucidicai-3.0.0/lucidicai/utils/__init__.py +1 -0
- lucidicai-3.0.0/lucidicai/utils/logger.py +168 -0
- lucidicai-3.0.0/lucidicai/utils/serialization.py +27 -0
- lucidicai-3.0.0/lucidicai.egg-info/PKG-INFO +10 -0
- lucidicai-3.0.0/lucidicai.egg-info/SOURCES.txt +53 -0
- lucidicai-3.0.0/lucidicai.egg-info/requires.txt +17 -0
- {lucidicai-1.1.20 → lucidicai-3.0.0}/setup.py +12 -4
- lucidicai-3.0.0/tests/test_event_creation.py +200 -0
- lucidicai-1.1.20/PKG-INFO +0 -29
- lucidicai-1.1.20/README.md +0 -1
- lucidicai-1.1.20/lucidicai/__init__.py +0 -446
- lucidicai-1.1.20/lucidicai/action.py +0 -8
- lucidicai-1.1.20/lucidicai/client.py +0 -155
- lucidicai-1.1.20/lucidicai/errors.py +0 -21
- lucidicai-1.1.20/lucidicai/event.py +0 -53
- lucidicai-1.1.20/lucidicai/image_upload.py +0 -74
- lucidicai-1.1.20/lucidicai/model_pricing.py +0 -50
- lucidicai-1.1.20/lucidicai/providers/anthropic_handler.py +0 -124
- lucidicai-1.1.20/lucidicai/providers/base_providers.py +0 -22
- lucidicai-1.1.20/lucidicai/providers/langchain.py +0 -583
- lucidicai-1.1.20/lucidicai/providers/openai_handler.py +0 -133
- lucidicai-1.1.20/lucidicai/session.py +0 -117
- lucidicai-1.1.20/lucidicai/singleton.py +0 -17
- lucidicai-1.1.20/lucidicai/state.py +0 -8
- lucidicai-1.1.20/lucidicai/step.py +0 -96
- lucidicai-1.1.20/lucidicai.egg-info/PKG-INFO +0 -29
- lucidicai-1.1.20/lucidicai.egg-info/SOURCES.txt +0 -24
- lucidicai-1.1.20/lucidicai.egg-info/requires.txt +0 -8
- lucidicai-1.1.20/tests/test_session.py +0 -424
- {lucidicai-1.1.20/lucidicai/providers → lucidicai-3.0.0/lucidicai/telemetry}/__init__.py +0 -0
- {lucidicai-1.1.20 → lucidicai-3.0.0}/lucidicai.egg-info/dependency_links.txt +0 -0
- {lucidicai-1.1.20 → lucidicai-3.0.0}/lucidicai.egg-info/top_level.txt +0 -0
- {lucidicai-1.1.20 → lucidicai-3.0.0}/setup.cfg +0 -0
lucidicai-3.0.0/PKG-INFO
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: lucidicai
|
|
3
|
+
Version: 3.0.0
|
|
4
|
+
Summary: Lucidic AI Python SDK
|
|
5
|
+
Author: Andy Liang
|
|
6
|
+
Author-email: andy@lucidic.ai
|
|
7
|
+
Classifier: Programming Language :: Python :: 3
|
|
8
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
9
|
+
Classifier: Operating System :: OS Independent
|
|
10
|
+
Requires-Python: >=3.6
|
|
@@ -0,0 +1,666 @@
|
|
|
1
|
+
# Lucidic AI Python SDK
|
|
2
|
+
|
|
3
|
+
The official Python SDK for [Lucidic AI](https://lucidic.ai), providing comprehensive observability and analytics for LLM-powered applications.
|
|
4
|
+
|
|
5
|
+
## Features
|
|
6
|
+
|
|
7
|
+
- **Session & Event Tracking** - Track complex AI workflows with typed, immutable events and automatic nesting
|
|
8
|
+
- **Multi-Provider Support** - Automatic instrumentation for OpenAI, Anthropic, LangChain, Google Generative AI (Gemini), Vertex AI, AWS Bedrock, Cohere, Groq, and more
|
|
9
|
+
- **Real-time Analytics** - Monitor costs, performance, and behavior of your AI applications
|
|
10
|
+
- **Data Privacy** - Built-in masking functions to protect sensitive information
|
|
11
|
+
- **Screenshot Support** - Capture and analyze visual context in your AI workflows
|
|
12
|
+
- **Production Ready** - OpenTelemetry-based instrumentation for enterprise-scale applications
|
|
13
|
+
- **Decorators** - Pythonic decorators for effortless function tracking with automatic nesting
|
|
14
|
+
- **Async Support** - Full support for async/await patterns and concurrent execution
|
|
15
|
+
|
|
16
|
+
## Installation
|
|
17
|
+
|
|
18
|
+
```bash
|
|
19
|
+
pip install lucidicai
|
|
20
|
+
```
|
|
21
|
+
|
|
22
|
+
## Quick Start
|
|
23
|
+
|
|
24
|
+
```python
|
|
25
|
+
import lucidicai as lai
|
|
26
|
+
from openai import OpenAI
|
|
27
|
+
|
|
28
|
+
# Initialize the SDK
|
|
29
|
+
session_id = lai.init(
|
|
30
|
+
session_name="My AI Assistant",
|
|
31
|
+
providers=["openai"]
|
|
32
|
+
)
|
|
33
|
+
|
|
34
|
+
# Use your LLM as normal - Lucidic automatically tracks the interaction
|
|
35
|
+
client = OpenAI()
|
|
36
|
+
response = client.chat.completions.create(
|
|
37
|
+
model="gpt-4",
|
|
38
|
+
messages=[{"role": "user", "content": "Hello, how are you?"}]
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
# Events are automatically created and queued for delivery
|
|
42
|
+
# Session automatically ends on process exit (auto_end=True by default)
|
|
43
|
+
```
|
|
44
|
+
|
|
45
|
+
### Quick Start with Context Manager
|
|
46
|
+
|
|
47
|
+
```python
|
|
48
|
+
import lucidicai as lai
|
|
49
|
+
from openai import OpenAI
|
|
50
|
+
|
|
51
|
+
# All-in-one lifecycle: init → bind → run → auto-end at context exit
|
|
52
|
+
with lai.session(session_name="My AI Assistant", providers=["openai"]):
|
|
53
|
+
client = OpenAI()
|
|
54
|
+
response = client.chat.completions.create(
|
|
55
|
+
model="gpt-4",
|
|
56
|
+
messages=[{"role": "user", "content": "Hello, how are you?"}]
|
|
57
|
+
)
|
|
58
|
+
# Session automatically ends when exiting the context
|
|
59
|
+
```
|
|
60
|
+
|
|
61
|
+
## Configuration
|
|
62
|
+
|
|
63
|
+
### Environment Variables
|
|
64
|
+
|
|
65
|
+
Create a `.env` file or set these environment variables:
|
|
66
|
+
|
|
67
|
+
```bash
|
|
68
|
+
LUCIDIC_API_KEY=your_api_key # Required: Your Lucidic API key
|
|
69
|
+
LUCIDIC_AGENT_ID=your_agent_id # Required: Your agent identifier
|
|
70
|
+
LUCIDIC_DEBUG=False # Optional: Enable debug logging
|
|
71
|
+
LUCIDIC_VERBOSE=False # Optional: Enable verbose event logging
|
|
72
|
+
```
|
|
73
|
+
|
|
74
|
+
### Initialization Options
|
|
75
|
+
|
|
76
|
+
```python
|
|
77
|
+
lai.init(
|
|
78
|
+
session_name="My Session", # Optional: Name for this session
|
|
79
|
+
api_key="...", # Optional: Override env var
|
|
80
|
+
agent_id="...", # Optional: Override env var
|
|
81
|
+
providers=["openai", "anthropic"], # Optional: LLM providers to track
|
|
82
|
+
task="Process customer request", # Optional: High-level task description
|
|
83
|
+
auto_end=True, # Optional: Auto-end session on exit (default: True)
|
|
84
|
+
masking_function=my_mask_func, # Optional: Custom PII masking
|
|
85
|
+
tags=["customer-support", "v1.2"], # Optional: Session tags
|
|
86
|
+
evaluators=[...], # Optional: Evaluation criteria
|
|
87
|
+
experiment_id="...", # Optional: Link to experiment
|
|
88
|
+
capture_uncaught=True # Optional: Capture crash events (default: True)
|
|
89
|
+
)
|
|
90
|
+
```
|
|
91
|
+
|
|
92
|
+
## Core Concepts
|
|
93
|
+
|
|
94
|
+
### Sessions
|
|
95
|
+
A session represents a complete interaction or workflow. Sessions are automatically tracked and can be nested across threads and async tasks.
|
|
96
|
+
|
|
97
|
+
```python
|
|
98
|
+
# Start a new session
|
|
99
|
+
session_id = lai.init(session_name="Customer Support Chat")
|
|
100
|
+
|
|
101
|
+
# Update session metadata
|
|
102
|
+
lai.update_session(
|
|
103
|
+
task="Resolved billing issue",
|
|
104
|
+
session_eval=0.95,
|
|
105
|
+
is_successful=True
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
# End session (or let auto_end handle it)
|
|
109
|
+
lai.end_session(is_successful=True, session_eval=0.9)
|
|
110
|
+
```
|
|
111
|
+
|
|
112
|
+
### Session Context Management (Thread & Async Safe)
|
|
113
|
+
|
|
114
|
+
Lucidic uses Python's `contextvars` to bind sessions to the current execution context, ensuring correct attribution in concurrent environments.
|
|
115
|
+
|
|
116
|
+
#### Pattern 1: Full Lifecycle Management
|
|
117
|
+
|
|
118
|
+
```python
|
|
119
|
+
import lucidicai as lai
|
|
120
|
+
from openai import OpenAI
|
|
121
|
+
|
|
122
|
+
# Synchronous version
|
|
123
|
+
with lai.session(session_name="order-flow", providers=["openai"]):
|
|
124
|
+
OpenAI().chat.completions.create(
|
|
125
|
+
model="gpt-4",
|
|
126
|
+
messages=[{"role": "user", "content": "Place order"}]
|
|
127
|
+
)
|
|
128
|
+
# Session automatically ends at context exit
|
|
129
|
+
```
|
|
130
|
+
|
|
131
|
+
```python
|
|
132
|
+
# Async version
|
|
133
|
+
import asyncio
|
|
134
|
+
from openai import AsyncOpenAI
|
|
135
|
+
|
|
136
|
+
async def main():
|
|
137
|
+
async with lai.session_async(session_name="async-flow", providers=["openai"]):
|
|
138
|
+
await AsyncOpenAI().chat.completions.create(
|
|
139
|
+
model="gpt-4",
|
|
140
|
+
messages=[{"role": "user", "content": "Hello"}]
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
asyncio.run(main())
|
|
144
|
+
```
|
|
145
|
+
|
|
146
|
+
#### Pattern 2: Session Binding (Without Auto-End)
|
|
147
|
+
|
|
148
|
+
```python
|
|
149
|
+
# Create session without auto-end
|
|
150
|
+
sid = lai.init(session_name="long-running", providers=["openai"], auto_end=False)
|
|
151
|
+
|
|
152
|
+
# Bind for specific operations
|
|
153
|
+
with lai.bind_session(sid):
|
|
154
|
+
# Operations here are attributed to this session
|
|
155
|
+
OpenAI().chat.completions.create(...)
|
|
156
|
+
|
|
157
|
+
# Session remains open - end manually when ready
|
|
158
|
+
lai.end_session()
|
|
159
|
+
```
|
|
160
|
+
|
|
161
|
+
#### Pattern 3: Function Wrappers
|
|
162
|
+
|
|
163
|
+
```python
|
|
164
|
+
def process_request():
|
|
165
|
+
from openai import OpenAI
|
|
166
|
+
return OpenAI().chat.completions.create(
|
|
167
|
+
model="gpt-4",
|
|
168
|
+
messages=[{"role": "user", "content": "Process this"}]
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
# Full lifecycle wrapper
|
|
172
|
+
result = lai.run_session(
|
|
173
|
+
process_request,
|
|
174
|
+
init_params={"session_name": "wrapped", "providers": ["openai"]}
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
# Bind-only wrapper
|
|
178
|
+
sid = lai.init(session_name="manual", providers=["openai"], auto_end=False)
|
|
179
|
+
result = lai.run_in_session(sid, process_request)
|
|
180
|
+
lai.end_session()
|
|
181
|
+
```
|
|
182
|
+
|
|
183
|
+
### Automatic Session Management
|
|
184
|
+
|
|
185
|
+
By default, Lucidic automatically ends your session when your process exits:
|
|
186
|
+
|
|
187
|
+
```python
|
|
188
|
+
# Default behavior - session auto-ends on exit
|
|
189
|
+
lai.init(session_name="My Session") # auto_end=True by default
|
|
190
|
+
|
|
191
|
+
# Disable auto-end for manual control
|
|
192
|
+
lai.init(session_name="My Session", auto_end=False)
|
|
193
|
+
# Must call lai.end_session() explicitly
|
|
194
|
+
```
|
|
195
|
+
|
|
196
|
+
### Events
|
|
197
|
+
|
|
198
|
+
Events are automatically created when using instrumented providers. All events are typed and immutable once created.
|
|
199
|
+
|
|
200
|
+
```python
|
|
201
|
+
# Manual event creation with typed payloads
|
|
202
|
+
event_id = lai.create_event(
|
|
203
|
+
type="function_call", # or "llm_generation", "error_traceback", "generic"
|
|
204
|
+
function_name="process_data",
|
|
205
|
+
arguments={"input": "data"},
|
|
206
|
+
return_value={"result": "success"},
|
|
207
|
+
duration=1.5
|
|
208
|
+
)
|
|
209
|
+
```
|
|
210
|
+
|
|
211
|
+
Event types and their payloads:
|
|
212
|
+
- **llm_generation**: LLM API calls with request/response/usage data
|
|
213
|
+
- **function_call**: Function executions with arguments and return values
|
|
214
|
+
- **error_traceback**: Errors with full traceback information
|
|
215
|
+
- **generic**: General events with custom details
|
|
216
|
+
|
|
217
|
+
## Provider Integration
|
|
218
|
+
|
|
219
|
+
### OpenAI
|
|
220
|
+
```python
|
|
221
|
+
from openai import OpenAI
|
|
222
|
+
|
|
223
|
+
lai.init(session_name="OpenAI Example", providers=["openai"])
|
|
224
|
+
client = OpenAI()
|
|
225
|
+
|
|
226
|
+
# All OpenAI API calls are automatically tracked
|
|
227
|
+
response = client.chat.completions.create(
|
|
228
|
+
model="gpt-4",
|
|
229
|
+
messages=[{"role": "user", "content": "Write a haiku about coding"}]
|
|
230
|
+
)
|
|
231
|
+
```
|
|
232
|
+
|
|
233
|
+
### Anthropic
|
|
234
|
+
```python
|
|
235
|
+
from anthropic import Anthropic
|
|
236
|
+
|
|
237
|
+
lai.init(session_name="Claude Example", providers=["anthropic"])
|
|
238
|
+
client = Anthropic()
|
|
239
|
+
|
|
240
|
+
# Anthropic API calls are automatically tracked
|
|
241
|
+
response = client.messages.create(
|
|
242
|
+
model="claude-3-opus-20240229",
|
|
243
|
+
messages=[{"role": "user", "content": "Explain quantum computing"}]
|
|
244
|
+
)
|
|
245
|
+
```
|
|
246
|
+
|
|
247
|
+
### LangChain
|
|
248
|
+
```python
|
|
249
|
+
from langchain_openai import ChatOpenAI
|
|
250
|
+
from langchain_core.messages import HumanMessage
|
|
251
|
+
|
|
252
|
+
lai.init(session_name="LangChain Example", providers=["langchain"])
|
|
253
|
+
|
|
254
|
+
# LangChain calls are automatically tracked
|
|
255
|
+
llm = ChatOpenAI(model="gpt-4")
|
|
256
|
+
response = llm.invoke([HumanMessage(content="Hello!")])
|
|
257
|
+
```
|
|
258
|
+
|
|
259
|
+
### Google Generative AI (Gemini)
|
|
260
|
+
```python
|
|
261
|
+
import google.generativeai as genai
|
|
262
|
+
|
|
263
|
+
lai.init(session_name="Gemini Example", providers=["google"])
|
|
264
|
+
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
|
|
265
|
+
|
|
266
|
+
model = genai.GenerativeModel("gemini-1.5-flash")
|
|
267
|
+
response = model.generate_content("Write a haiku about clouds")
|
|
268
|
+
```
|
|
269
|
+
|
|
270
|
+
### Vertex AI
|
|
271
|
+
```python
|
|
272
|
+
from google.cloud import aiplatform
|
|
273
|
+
from vertexai.generative_models import GenerativeModel
|
|
274
|
+
|
|
275
|
+
lai.init(session_name="Vertex Example", providers=["vertexai"])
|
|
276
|
+
aiplatform.init(project=os.getenv("GCP_PROJECT"), location="us-central1")
|
|
277
|
+
|
|
278
|
+
model = GenerativeModel("gemini-1.5-flash")
|
|
279
|
+
response = model.generate_content("Say hello")
|
|
280
|
+
```
|
|
281
|
+
|
|
282
|
+
### AWS Bedrock
|
|
283
|
+
```python
|
|
284
|
+
import boto3
|
|
285
|
+
|
|
286
|
+
lai.init(session_name="Bedrock Example", providers=["bedrock"])
|
|
287
|
+
client = boto3.client("bedrock-runtime", region_name="us-east-1")
|
|
288
|
+
|
|
289
|
+
response = client.invoke_model(
|
|
290
|
+
modelId="amazon.nova-lite-v1:0",
|
|
291
|
+
body=b'{"inputText": "Hello from Bedrock"}',
|
|
292
|
+
contentType="application/json",
|
|
293
|
+
accept="application/json"
|
|
294
|
+
)
|
|
295
|
+
```
|
|
296
|
+
|
|
297
|
+
### Cohere
|
|
298
|
+
```python
|
|
299
|
+
import cohere
|
|
300
|
+
|
|
301
|
+
lai.init(session_name="Cohere Example", providers=["cohere"])
|
|
302
|
+
co = cohere.ClientV2(api_key=os.getenv("COHERE_API_KEY"))
|
|
303
|
+
response = co.chat(
|
|
304
|
+
model="command-r",
|
|
305
|
+
messages=[{"role": "user", "content": "Hello"}]
|
|
306
|
+
)
|
|
307
|
+
```
|
|
308
|
+
|
|
309
|
+
### Groq
|
|
310
|
+
```python
|
|
311
|
+
from groq import Groq
|
|
312
|
+
|
|
313
|
+
lai.init(session_name="Groq Example", providers=["groq"])
|
|
314
|
+
client = Groq(api_key=os.getenv("GROQ_API_KEY"))
|
|
315
|
+
response = client.chat.completions.create(
|
|
316
|
+
model="llama-3.1-8b-instant",
|
|
317
|
+
messages=[{"role": "user", "content": "Hello from Groq"}]
|
|
318
|
+
)
|
|
319
|
+
```
|
|
320
|
+
|
|
321
|
+
## Advanced Features
|
|
322
|
+
|
|
323
|
+
### Function Tracking with Decorators
|
|
324
|
+
|
|
325
|
+
The `@lai.event` decorator automatically tracks function calls as nested events:
|
|
326
|
+
|
|
327
|
+
```python
|
|
328
|
+
@lai.event()
|
|
329
|
+
def process_data(input_data: dict) -> dict:
|
|
330
|
+
# Function automatically tracked with arguments and return value
|
|
331
|
+
result = transform(input_data)
|
|
332
|
+
return result
|
|
333
|
+
|
|
334
|
+
# Creates a FUNCTION_CALL event with full tracking
|
|
335
|
+
output = process_data({"key": "value"})
|
|
336
|
+
```
|
|
337
|
+
|
|
338
|
+
#### Nested Event Tracking
|
|
339
|
+
|
|
340
|
+
Events automatically nest when functions call other tracked functions:
|
|
341
|
+
|
|
342
|
+
```python
|
|
343
|
+
@lai.event()
|
|
344
|
+
def outer_function(data: str) -> dict:
|
|
345
|
+
# This creates a parent event
|
|
346
|
+
result = inner_function(data)
|
|
347
|
+
return {"processed": result}
|
|
348
|
+
|
|
349
|
+
@lai.event()
|
|
350
|
+
def inner_function(data: str) -> str:
|
|
351
|
+
# This creates a child event nested under outer_function
|
|
352
|
+
return data.upper()
|
|
353
|
+
|
|
354
|
+
# Creates nested events with parent-child relationship
|
|
355
|
+
output = outer_function("hello")
|
|
356
|
+
```
|
|
357
|
+
|
|
358
|
+
#### Error Tracking
|
|
359
|
+
|
|
360
|
+
The decorator automatically captures exceptions:
|
|
361
|
+
|
|
362
|
+
```python
|
|
363
|
+
@lai.event()
|
|
364
|
+
def risky_operation(value: int) -> int:
|
|
365
|
+
if value < 0:
|
|
366
|
+
raise ValueError("Value must be positive")
|
|
367
|
+
return value * 2
|
|
368
|
+
|
|
369
|
+
try:
|
|
370
|
+
risky_operation(-1)
|
|
371
|
+
except ValueError:
|
|
372
|
+
pass # Error is still tracked in the event
|
|
373
|
+
```
|
|
374
|
+
|
|
375
|
+
#### Async Function Support
|
|
376
|
+
|
|
377
|
+
```python
|
|
378
|
+
@lai.event()
|
|
379
|
+
async def async_process(url: str) -> dict:
|
|
380
|
+
async with aiohttp.ClientSession() as session:
|
|
381
|
+
async with session.get(url) as response:
|
|
382
|
+
return await response.json()
|
|
383
|
+
|
|
384
|
+
# Async functions are fully supported
|
|
385
|
+
result = await async_process("https://api.example.com/data")
|
|
386
|
+
```
|
|
387
|
+
|
|
388
|
+
### Data Masking
|
|
389
|
+
|
|
390
|
+
Protect sensitive information with custom masking functions:
|
|
391
|
+
|
|
392
|
+
```python
|
|
393
|
+
def mask_pii(text):
|
|
394
|
+
# Your PII masking logic here
|
|
395
|
+
import re
|
|
396
|
+
# Example: mask email addresses
|
|
397
|
+
text = re.sub(r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b', '[EMAIL]', text)
|
|
398
|
+
# Example: mask phone numbers
|
|
399
|
+
text = re.sub(r'\b\d{3}[-.]?\d{3}[-.]?\d{4}\b', '[PHONE]', text)
|
|
400
|
+
return text
|
|
401
|
+
|
|
402
|
+
lai.init(
|
|
403
|
+
session_name="Secure Session",
|
|
404
|
+
masking_function=mask_pii
|
|
405
|
+
)
|
|
406
|
+
```
|
|
407
|
+
|
|
408
|
+
### Experiments
|
|
409
|
+
|
|
410
|
+
Create experiments to group and analyze multiple sessions:
|
|
411
|
+
|
|
412
|
+
```python
|
|
413
|
+
# Create an experiment
|
|
414
|
+
experiment_id = lai.create_experiment(
|
|
415
|
+
experiment_name="Prompt Optimization Test",
|
|
416
|
+
LLM_boolean_evaluators=["response_quality", "latency"],
|
|
417
|
+
LLM_numeric_evaluators=["coherence", "relevance"],
|
|
418
|
+
description="Testing different prompt strategies",
|
|
419
|
+
tags=["A/B-test", "prompts"]
|
|
420
|
+
)
|
|
421
|
+
|
|
422
|
+
# Link sessions to the experiment
|
|
423
|
+
lai.init(
|
|
424
|
+
session_name="Test Variant A",
|
|
425
|
+
experiment_id=experiment_id,
|
|
426
|
+
providers=["openai"]
|
|
427
|
+
)
|
|
428
|
+
```
|
|
429
|
+
|
|
430
|
+
### Prompt Management
|
|
431
|
+
|
|
432
|
+
Fetch and cache prompts from the Lucidic platform:
|
|
433
|
+
|
|
434
|
+
```python
|
|
435
|
+
prompt = lai.get_prompt(
|
|
436
|
+
prompt_name="customer_support",
|
|
437
|
+
variables={"issue_type": "billing", "customer_name": "John"},
|
|
438
|
+
cache_ttl=3600, # Cache for 1 hour
|
|
439
|
+
label="v1.2"
|
|
440
|
+
)
|
|
441
|
+
|
|
442
|
+
# Variables are replaced in the prompt template
|
|
443
|
+
# {{issue_type}} → "billing"
|
|
444
|
+
# {{customer_name}} → "John"
|
|
445
|
+
```
|
|
446
|
+
|
|
447
|
+
### Manual Flush
|
|
448
|
+
|
|
449
|
+
Force flush all pending telemetry data:
|
|
450
|
+
|
|
451
|
+
```python
|
|
452
|
+
# Ensure all events are sent immediately
|
|
453
|
+
lai.flush(timeout_seconds=2.0)
|
|
454
|
+
```
|
|
455
|
+
|
|
456
|
+
## Error Handling
|
|
457
|
+
|
|
458
|
+
The SDK provides specific exceptions for different error scenarios:
|
|
459
|
+
|
|
460
|
+
```python
|
|
461
|
+
from lucidicai.errors import (
|
|
462
|
+
APIKeyVerificationError,
|
|
463
|
+
InvalidOperationError,
|
|
464
|
+
LucidicNotInitializedError,
|
|
465
|
+
PromptError
|
|
466
|
+
)
|
|
467
|
+
|
|
468
|
+
try:
|
|
469
|
+
lai.init(session_name="My Session")
|
|
470
|
+
except APIKeyVerificationError:
|
|
471
|
+
print("Invalid API key - check your credentials")
|
|
472
|
+
except LucidicNotInitializedError:
|
|
473
|
+
print("SDK not initialized - call lai.init() first")
|
|
474
|
+
```
|
|
475
|
+
|
|
476
|
+
## Crash Event Capture
|
|
477
|
+
|
|
478
|
+
The SDK automatically captures uncaught exceptions and creates error events:
|
|
479
|
+
|
|
480
|
+
```python
|
|
481
|
+
lai.init(
|
|
482
|
+
session_name="my-session",
|
|
483
|
+
capture_uncaught=True # Default: True
|
|
484
|
+
)
|
|
485
|
+
|
|
486
|
+
# If an uncaught exception occurs:
|
|
487
|
+
# 1. An error_traceback event is created with the full traceback
|
|
488
|
+
# 2. The session is ended as unsuccessful
|
|
489
|
+
# 3. Telemetry is flushed before exit
|
|
490
|
+
```
|
|
491
|
+
|
|
492
|
+
This feature also handles:
|
|
493
|
+
- **SIGINT/SIGTERM signals**: Graceful shutdown with event creation
|
|
494
|
+
- **Thread exceptions**: Main thread exceptions trigger full shutdown
|
|
495
|
+
- **Masking**: Error messages are masked if a masking_function is provided
|
|
496
|
+
|
|
497
|
+
## Performance & Architecture
|
|
498
|
+
|
|
499
|
+
### Non-Blocking Event Delivery
|
|
500
|
+
- Events are queued and delivered asynchronously
|
|
501
|
+
- Returns immediately with client-side UUID
|
|
502
|
+
- Background worker handles batching and retries
|
|
503
|
+
|
|
504
|
+
### Efficient Batching
|
|
505
|
+
- Events batched every 100ms or 100 events
|
|
506
|
+
- Large payloads (>64KB) automatically compressed with gzip
|
|
507
|
+
- Automatic blob storage for oversized events
|
|
508
|
+
|
|
509
|
+
### Thread & Async Safety
|
|
510
|
+
- Context-aware session binding using contextvars
|
|
511
|
+
- Thread-safe singleton pattern
|
|
512
|
+
- Full async/await support
|
|
513
|
+
|
|
514
|
+
### OpenTelemetry Integration
|
|
515
|
+
- Industry-standard observability
|
|
516
|
+
- Automatic span → event conversion
|
|
517
|
+
- Configurable export intervals
|
|
518
|
+
|
|
519
|
+
## Best Practices
|
|
520
|
+
|
|
521
|
+
1. **Initialize Once**: Call `lai.init()` at the start of your application
|
|
522
|
+
2. **Use Context Managers**: Prefer `with lai.session()` for automatic lifecycle management
|
|
523
|
+
3. **Enable Auto-End**: Let the SDK handle session cleanup (default behavior)
|
|
524
|
+
4. **Handle Errors**: Wrap SDK calls in try-except blocks for production
|
|
525
|
+
5. **Mask Sensitive Data**: Always use masking functions when handling PII
|
|
526
|
+
6. **Leverage Decorators**: Use `@lai.event` for automatic function tracking
|
|
527
|
+
7. **Group Related Work**: Use experiments to analyze A/B tests and variants
|
|
528
|
+
|
|
529
|
+
## Examples
|
|
530
|
+
|
|
531
|
+
### Customer Support Bot
|
|
532
|
+
```python
|
|
533
|
+
import lucidicai as lai
|
|
534
|
+
from openai import OpenAI
|
|
535
|
+
|
|
536
|
+
# Initialize with context manager for automatic cleanup
|
|
537
|
+
with lai.session(
|
|
538
|
+
session_name="Customer Support",
|
|
539
|
+
providers=["openai"],
|
|
540
|
+
task="Handle customer inquiry",
|
|
541
|
+
tags=["support", "chat"]
|
|
542
|
+
):
|
|
543
|
+
@lai.event()
|
|
544
|
+
def analyze_issue(customer_message: str) -> str:
|
|
545
|
+
"""Analyze the customer's issue"""
|
|
546
|
+
# LLM call is automatically tracked
|
|
547
|
+
client = OpenAI()
|
|
548
|
+
response = client.chat.completions.create(
|
|
549
|
+
model="gpt-4",
|
|
550
|
+
messages=[
|
|
551
|
+
{"role": "system", "content": "You are a support analyst"},
|
|
552
|
+
{"role": "user", "content": f"Categorize this issue: {customer_message}"}
|
|
553
|
+
]
|
|
554
|
+
)
|
|
555
|
+
return response.choices[0].message.content
|
|
556
|
+
|
|
557
|
+
@lai.event()
|
|
558
|
+
def generate_response(issue_category: str) -> str:
|
|
559
|
+
"""Generate a helpful response"""
|
|
560
|
+
# Nested event tracking
|
|
561
|
+
# ... response generation logic ...
|
|
562
|
+
return "Response generated"
|
|
563
|
+
|
|
564
|
+
# Process customer request with automatic nesting
|
|
565
|
+
issue = analyze_issue("I can't login to my account")
|
|
566
|
+
response = generate_response(issue)
|
|
567
|
+
```
|
|
568
|
+
|
|
569
|
+
### Data Analysis Pipeline
|
|
570
|
+
```python
|
|
571
|
+
import lucidicai as lai
|
|
572
|
+
import pandas as pd
|
|
573
|
+
from typing import Dict, Any
|
|
574
|
+
|
|
575
|
+
lai.init(
|
|
576
|
+
session_name="Quarterly Sales Analysis",
|
|
577
|
+
providers=["openai"],
|
|
578
|
+
task="Generate sales insights",
|
|
579
|
+
auto_end=True # Session will end when process exits
|
|
580
|
+
)
|
|
581
|
+
|
|
582
|
+
@lai.event()
|
|
583
|
+
def load_data(file_path: str) -> pd.DataFrame:
|
|
584
|
+
"""Load and validate sales data"""
|
|
585
|
+
df = pd.read_csv(file_path)
|
|
586
|
+
# Data loading logic
|
|
587
|
+
return df
|
|
588
|
+
|
|
589
|
+
@lai.event()
|
|
590
|
+
def analyze_with_llm(data_summary: Dict[str, Any]) -> str:
|
|
591
|
+
"""Generate insights using GPT-4"""
|
|
592
|
+
from openai import OpenAI
|
|
593
|
+
client = OpenAI()
|
|
594
|
+
|
|
595
|
+
response = client.chat.completions.create(
|
|
596
|
+
model="gpt-4",
|
|
597
|
+
messages=[
|
|
598
|
+
{"role": "system", "content": "You are a data analyst"},
|
|
599
|
+
{"role": "user", "content": f"Analyze this sales data: {data_summary}"}
|
|
600
|
+
]
|
|
601
|
+
)
|
|
602
|
+
return response.choices[0].message.content
|
|
603
|
+
|
|
604
|
+
# Execute pipeline with automatic tracking
|
|
605
|
+
df = load_data("sales_q4.csv")
|
|
606
|
+
summary = df.describe().to_dict()
|
|
607
|
+
insights = analyze_with_llm(summary)
|
|
608
|
+
|
|
609
|
+
# Session automatically ends on process exit
|
|
610
|
+
```
|
|
611
|
+
|
|
612
|
+
### Concurrent Processing
|
|
613
|
+
```python
|
|
614
|
+
import lucidicai as lai
|
|
615
|
+
import asyncio
|
|
616
|
+
from typing import List
|
|
617
|
+
|
|
618
|
+
async def process_item(item_id: str, session_id: str) -> dict:
|
|
619
|
+
"""Process a single item with session binding"""
|
|
620
|
+
# Bind this coroutine to the session
|
|
621
|
+
with lai.bind_session(session_id):
|
|
622
|
+
@lai.event()
|
|
623
|
+
async def fetch_data(id: str) -> dict:
|
|
624
|
+
# Async operation tracked as nested event
|
|
625
|
+
await asyncio.sleep(0.1)
|
|
626
|
+
return {"id": id, "data": "processed"}
|
|
627
|
+
|
|
628
|
+
return await fetch_data(item_id)
|
|
629
|
+
|
|
630
|
+
async def main():
|
|
631
|
+
# Create session
|
|
632
|
+
session_id = lai.init(
|
|
633
|
+
session_name="Batch Processing",
|
|
634
|
+
providers=["openai"],
|
|
635
|
+
auto_end=False # Manual control for async
|
|
636
|
+
)
|
|
637
|
+
|
|
638
|
+
# Process items concurrently
|
|
639
|
+
items = ["item1", "item2", "item3"]
|
|
640
|
+
tasks = [process_item(item, session_id) for item in items]
|
|
641
|
+
results = await asyncio.gather(*tasks)
|
|
642
|
+
|
|
643
|
+
# End session manually
|
|
644
|
+
lai.end_session(is_successful=True)
|
|
645
|
+
return results
|
|
646
|
+
|
|
647
|
+
# Run async pipeline
|
|
648
|
+
asyncio.run(main())
|
|
649
|
+
```
|
|
650
|
+
|
|
651
|
+
## Environment Variables
|
|
652
|
+
|
|
653
|
+
| Variable | Default | Description |
|
|
654
|
+
|----------|---------|-------------|
|
|
655
|
+
| `LUCIDIC_API_KEY` | Required | API authentication key |
|
|
656
|
+
| `LUCIDIC_AGENT_ID` | Required | Agent identifier |
|
|
657
|
+
| `LUCIDIC_AUTO_END` | True | Auto-end sessions on exit |
|
|
658
|
+
|
|
659
|
+
## Support
|
|
660
|
+
|
|
661
|
+
- **Documentation**: [https://docs.lucidic.ai](https://docs.lucidic.ai)
|
|
662
|
+
- **Issues**: [GitHub Issues](https://github.com/Lucidic-AI/Lucidic-Python/issues)
|
|
663
|
+
|
|
664
|
+
## License
|
|
665
|
+
|
|
666
|
+
This SDK is distributed under the MIT License.
|