lucidicai 1.2.16__tar.gz → 1.2.17__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {lucidicai-1.2.16 → lucidicai-1.2.17}/PKG-INFO +1 -1
- lucidicai-1.2.17/README.md +492 -0
- {lucidicai-1.2.16 → lucidicai-1.2.17}/lucidicai/__init__.py +93 -19
- {lucidicai-1.2.16 → lucidicai-1.2.17}/lucidicai/client.py +3 -2
- lucidicai-1.2.17/lucidicai/decorators.py +357 -0
- {lucidicai-1.2.16 → lucidicai-1.2.17}/lucidicai/image_upload.py +24 -1
- {lucidicai-1.2.16 → lucidicai-1.2.17}/lucidicai/session.py +7 -0
- lucidicai-1.2.17/lucidicai/telemetry/lucidic_exporter.py +259 -0
- lucidicai-1.2.17/lucidicai/telemetry/lucidic_span_processor.py +665 -0
- lucidicai-1.2.17/lucidicai/telemetry/openai_agents_instrumentor.py +306 -0
- lucidicai-1.2.17/lucidicai/telemetry/otel_handlers.py +266 -0
- lucidicai-1.2.17/lucidicai/telemetry/otel_init.py +197 -0
- lucidicai-1.2.17/lucidicai/telemetry/otel_provider.py +168 -0
- {lucidicai-1.2.16/lucidicai/providers → lucidicai-1.2.17/lucidicai/telemetry}/pydantic_ai_handler.py +1 -1
- lucidicai-1.2.17/lucidicai/telemetry/utils/__init__.py +0 -0
- lucidicai-1.2.17/lucidicai/telemetry/utils/image_storage.py +45 -0
- lucidicai-1.2.17/lucidicai/telemetry/utils/text_storage.py +53 -0
- lucidicai-1.2.17/lucidicai/telemetry/utils/universal_image_interceptor.py +276 -0
- {lucidicai-1.2.16 → lucidicai-1.2.17}/lucidicai.egg-info/PKG-INFO +1 -1
- lucidicai-1.2.17/lucidicai.egg-info/SOURCES.txt +33 -0
- {lucidicai-1.2.16 → lucidicai-1.2.17}/setup.py +1 -1
- lucidicai-1.2.16/lucidicai/providers/anthropic_handler.py +0 -260
- lucidicai-1.2.16/lucidicai/providers/langchain.py +0 -557
- lucidicai-1.2.16/lucidicai/providers/openai_agents_handler.py +0 -404
- lucidicai-1.2.16/lucidicai/providers/openai_handler.py +0 -702
- lucidicai-1.2.16/lucidicai.egg-info/SOURCES.txt +0 -31
- lucidicai-1.2.16/tests/test_anthropic_comprehensive.py +0 -503
- lucidicai-1.2.16/tests/test_anthropic_thinking.py +0 -325
- lucidicai-1.2.16/tests/test_event_display.py +0 -105
- lucidicai-1.2.16/tests/test_openai_agents_9_patterns_fixed.py +0 -590
- lucidicai-1.2.16/tests/test_openai_comprehensive.py +0 -427
- lucidicai-1.2.16/tests/test_pydantic_ai_comprehensive.py +0 -301
- {lucidicai-1.2.16 → lucidicai-1.2.17}/lucidicai/constants.py +0 -0
- {lucidicai-1.2.16 → lucidicai-1.2.17}/lucidicai/errors.py +0 -0
- {lucidicai-1.2.16 → lucidicai-1.2.17}/lucidicai/event.py +0 -0
- {lucidicai-1.2.16 → lucidicai-1.2.17}/lucidicai/model_pricing.py +0 -0
- {lucidicai-1.2.16 → lucidicai-1.2.17}/lucidicai/singleton.py +0 -0
- {lucidicai-1.2.16 → lucidicai-1.2.17}/lucidicai/step.py +0 -0
- {lucidicai-1.2.16 → lucidicai-1.2.17}/lucidicai/streaming.py +0 -0
- {lucidicai-1.2.16/lucidicai/providers → lucidicai-1.2.17/lucidicai/telemetry}/__init__.py +0 -0
- /lucidicai-1.2.16/lucidicai/providers/base_providers.py → /lucidicai-1.2.17/lucidicai/telemetry/base_provider.py +0 -0
- {lucidicai-1.2.16/lucidicai/providers → lucidicai-1.2.17/lucidicai/telemetry}/opentelemetry_converter.py +0 -0
- {lucidicai-1.2.16 → lucidicai-1.2.17}/lucidicai.egg-info/dependency_links.txt +0 -0
- {lucidicai-1.2.16 → lucidicai-1.2.17}/lucidicai.egg-info/requires.txt +0 -0
- {lucidicai-1.2.16 → lucidicai-1.2.17}/lucidicai.egg-info/top_level.txt +0 -0
- {lucidicai-1.2.16 → lucidicai-1.2.17}/setup.cfg +0 -0
|
@@ -0,0 +1,492 @@
|
|
|
1
|
+
# Lucidic AI Python SDK
|
|
2
|
+
|
|
3
|
+
The official Python SDK for [Lucidic AI](https://lucidic.ai), providing comprehensive observability and analytics for LLM-powered applications.
|
|
4
|
+
|
|
5
|
+
## Features
|
|
6
|
+
|
|
7
|
+
- **Session & Step Tracking** - Track complex AI agent workflows with hierarchical session management
|
|
8
|
+
- **Multi-Provider Support** - Automatic instrumentation for OpenAI, Anthropic, LangChain, and more
|
|
9
|
+
- **Real-time Analytics** - Monitor costs, performance, and behavior of your AI applications
|
|
10
|
+
- **Data Privacy** - Built-in masking functions to protect sensitive information
|
|
11
|
+
- **Screenshot Support** - Capture and analyze visual context in your AI workflows
|
|
12
|
+
- **Production Ready** - OpenTelemetry-based instrumentation for enterprise-scale applications
|
|
13
|
+
- **Decorators** - Pythonic decorators for effortless step and event tracking
|
|
14
|
+
|
|
15
|
+
## Installation
|
|
16
|
+
|
|
17
|
+
```bash
|
|
18
|
+
pip install lucidicai
|
|
19
|
+
```
|
|
20
|
+
|
|
21
|
+
## Quick Start
|
|
22
|
+
|
|
23
|
+
```python
|
|
24
|
+
import lucidicai as lai
|
|
25
|
+
from openai import OpenAI
|
|
26
|
+
|
|
27
|
+
# Initialize the SDK
|
|
28
|
+
lai.init(
|
|
29
|
+
session_name="My AI Assistant",
|
|
30
|
+
providers=["openai"]
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
# Create a workflow step
|
|
34
|
+
lai.create_step(
|
|
35
|
+
state="Processing user query",
|
|
36
|
+
action="Generate response",
|
|
37
|
+
goal="Provide helpful answer"
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
# Use your LLM as normal - Lucidic automatically tracks the interaction
|
|
41
|
+
client = OpenAI()
|
|
42
|
+
response = client.chat.completions.create(
|
|
43
|
+
model="gpt-4",
|
|
44
|
+
messages=[{"role": "user", "content": "Hello, how are you?"}]
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
# End the step and session
|
|
48
|
+
lai.end_step()
|
|
49
|
+
lai.end_session(is_successful=True)
|
|
50
|
+
```
|
|
51
|
+
|
|
52
|
+
## Configuration
|
|
53
|
+
|
|
54
|
+
### Environment Variables
|
|
55
|
+
|
|
56
|
+
Create a `.env` file or set these environment variables:
|
|
57
|
+
|
|
58
|
+
```bash
|
|
59
|
+
LUCIDIC_API_KEY=your_api_key # Required: Your Lucidic API key
|
|
60
|
+
LUCIDIC_AGENT_ID=your_agent_id # Required: Your agent identifier
|
|
61
|
+
```
|
|
62
|
+
|
|
63
|
+
### Initialization Options
|
|
64
|
+
|
|
65
|
+
```python
|
|
66
|
+
lai.init(
|
|
67
|
+
session_name="My Session", # Required: Name for this session
|
|
68
|
+
lucidic_api_key="...", # Optional: Override env var
|
|
69
|
+
agent_id="...", # Optional: Override env var
|
|
70
|
+
providers=["openai", "anthropic"], # Optional: LLM providers to track
|
|
71
|
+
task="Process customer request", # Optional: High-level task description
|
|
72
|
+
production_monitoring=False, # Optional: Production mode flag
|
|
73
|
+
auto_end=True, # Optional: Auto-end session on exit (default: True)
|
|
74
|
+
masking_function=my_mask_func, # Optional: Custom PII masking
|
|
75
|
+
tags=["customer-support", "v1.2"], # Optional: Session tags
|
|
76
|
+
rubrics=[...] # Optional: Evaluation criteria
|
|
77
|
+
)
|
|
78
|
+
```
|
|
79
|
+
|
|
80
|
+
## Core Concepts
|
|
81
|
+
|
|
82
|
+
### Sessions
|
|
83
|
+
A session represents a complete interaction or workflow, containing multiple steps and events.
|
|
84
|
+
|
|
85
|
+
```python
|
|
86
|
+
# Start a new session
|
|
87
|
+
session_id = lai.init(session_name="Customer Support Chat")
|
|
88
|
+
|
|
89
|
+
# Continue an existing session
|
|
90
|
+
lai.continue_session(session_id="existing-session-id")
|
|
91
|
+
|
|
92
|
+
# Update session metadata
|
|
93
|
+
lai.update_session(
|
|
94
|
+
task="Resolved billing issue",
|
|
95
|
+
session_eval=0.95,
|
|
96
|
+
is_successful=True
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
# End session
|
|
100
|
+
lai.end_session(is_successful=True, session_eval=0.9)
|
|
101
|
+
```
|
|
102
|
+
|
|
103
|
+
### Automatic Session Management (auto_end)
|
|
104
|
+
|
|
105
|
+
By default, Lucidic automatically ends your session when your process exits, ensuring no data is lost. This feature is enabled by default but can be controlled:
|
|
106
|
+
|
|
107
|
+
```python
|
|
108
|
+
# Default behavior - session auto-ends on exit
|
|
109
|
+
lai.init(session_name="My Session") # auto_end=True by default
|
|
110
|
+
|
|
111
|
+
# Disable auto-end if you want manual control
|
|
112
|
+
lai.init(session_name="My Session", auto_end=False)
|
|
113
|
+
```
|
|
114
|
+
|
|
115
|
+
The auto_end feature:
|
|
116
|
+
- Automatically calls `end_session()` when your Python process exits
|
|
117
|
+
- Works with normal exits, crashes, and interrupts (Ctrl+C)
|
|
118
|
+
- Prevents data loss from forgotten `end_session()` calls
|
|
119
|
+
- Can be disabled for cases where you need explicit control
|
|
120
|
+
|
|
121
|
+
### Steps
|
|
122
|
+
Steps break down complex workflows into discrete, trackable units.
|
|
123
|
+
|
|
124
|
+
```python
|
|
125
|
+
# Create a step
|
|
126
|
+
step_id = lai.create_step(
|
|
127
|
+
state="Current context or state",
|
|
128
|
+
action="What the agent is doing",
|
|
129
|
+
goal="What the agent aims to achieve",
|
|
130
|
+
screenshot_path="/path/to/screenshot.png" # Optional
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
# Update step progress
|
|
134
|
+
lai.update_step(
|
|
135
|
+
step_id=step_id,
|
|
136
|
+
eval_score=0.8,
|
|
137
|
+
eval_description="Partially completed task"
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
# End step
|
|
141
|
+
lai.end_step(step_id=step_id)
|
|
142
|
+
```
|
|
143
|
+
|
|
144
|
+
- NOTE: If no step exists when an LLM call is made (but Lucidic has already been initialized), Lucidic will automatically create a new step for that call. This step will contain exactly one event—the LLM call itself.
|
|
145
|
+
|
|
146
|
+
### Events
|
|
147
|
+
Events are automatically tracked when using instrumented providers, but can also be created manually.
|
|
148
|
+
|
|
149
|
+
```python
|
|
150
|
+
# Manual event creation
|
|
151
|
+
event_id = lai.create_event(
|
|
152
|
+
description="Generated summary",
|
|
153
|
+
result="Success",
|
|
154
|
+
cost_added=0.002,
|
|
155
|
+
model="gpt-4",
|
|
156
|
+
screenshots=["/path/to/image1.png", "/path/to/image2.png"]
|
|
157
|
+
)
|
|
158
|
+
```
|
|
159
|
+
|
|
160
|
+
## Provider Integration
|
|
161
|
+
|
|
162
|
+
### OpenAI
|
|
163
|
+
```python
|
|
164
|
+
from openai import OpenAI
|
|
165
|
+
|
|
166
|
+
lai.init(session_name="OpenAI Example", providers=["openai"])
|
|
167
|
+
client = OpenAI()
|
|
168
|
+
|
|
169
|
+
# All OpenAI API calls are automatically tracked
|
|
170
|
+
response = client.chat.completions.create(
|
|
171
|
+
model="gpt-4",
|
|
172
|
+
messages=[{"role": "user", "content": "Write a haiku about coding"}]
|
|
173
|
+
)
|
|
174
|
+
```
|
|
175
|
+
|
|
176
|
+
### Anthropic
|
|
177
|
+
```python
|
|
178
|
+
from anthropic import Anthropic
|
|
179
|
+
|
|
180
|
+
lai.init(session_name="Claude Example", providers=["anthropic"])
|
|
181
|
+
client = Anthropic()
|
|
182
|
+
|
|
183
|
+
# Anthropic API calls are automatically tracked
|
|
184
|
+
response = client.messages.create(
|
|
185
|
+
model="claude-3-opus-20240229",
|
|
186
|
+
messages=[{"role": "user", "content": "Explain quantum computing"}]
|
|
187
|
+
)
|
|
188
|
+
```
|
|
189
|
+
|
|
190
|
+
### LangChain
|
|
191
|
+
```python
|
|
192
|
+
from langchain_openai import ChatOpenAI
|
|
193
|
+
from langchain_core.messages import HumanMessage
|
|
194
|
+
|
|
195
|
+
lai.init(session_name="LangChain Example", providers=["langchain"])
|
|
196
|
+
|
|
197
|
+
# LangChain calls are automatically tracked
|
|
198
|
+
llm = ChatOpenAI(model="gpt-4")
|
|
199
|
+
response = llm.invoke([HumanMessage(content="Hello!")])
|
|
200
|
+
```
|
|
201
|
+
|
|
202
|
+
## Advanced Features
|
|
203
|
+
|
|
204
|
+
### Decorators
|
|
205
|
+
Simplify your code with Python decorators for automatic tracking:
|
|
206
|
+
|
|
207
|
+
#### Step Decorator
|
|
208
|
+
Wrap functions to automatically create and manage steps:
|
|
209
|
+
|
|
210
|
+
```python
|
|
211
|
+
@lai.step(
|
|
212
|
+
# All parameters are optional and auto generated if not provided
|
|
213
|
+
state="Processing data",
|
|
214
|
+
action="Transform input",
|
|
215
|
+
goal="Generate output",
|
|
216
|
+
eval_score=1,
|
|
217
|
+
eval_description="Data succesfully processed",
|
|
218
|
+
screenshot_path="/path/to/image" # populates step image if provided. No image if not provided
|
|
219
|
+
)
|
|
220
|
+
def process_data(input_data: dict) -> dict:
|
|
221
|
+
# Your processing logic here
|
|
222
|
+
result = transform(input_data)
|
|
223
|
+
return result
|
|
224
|
+
|
|
225
|
+
# The function automatically creates a step, executes, and ends the step
|
|
226
|
+
output = process_data({"key": "value"})
|
|
227
|
+
```
|
|
228
|
+
|
|
229
|
+
#### Event Decorator
|
|
230
|
+
Track function calls as events with automatic input/output capture:
|
|
231
|
+
|
|
232
|
+
```python
|
|
233
|
+
@lai.event(
|
|
234
|
+
# All parameters are optional
|
|
235
|
+
description="Calculate statistics", # function inputs if not provided
|
|
236
|
+
result="Stats calculated" # function output if not provided
|
|
237
|
+
model="stats-engine", # Not shown if not provided
|
|
238
|
+
cost_added=0.001 # 0 if not provided
|
|
239
|
+
)
|
|
240
|
+
def calculate_stats(data: list) -> dict:
|
|
241
|
+
return {
|
|
242
|
+
'mean': sum(data) / len(data),
|
|
243
|
+
'max': max(data),
|
|
244
|
+
'min': min(data)
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
# Creates an event with function inputs and outputs
|
|
248
|
+
stats = calculate_stats([1, 2, 3, 4, 5])
|
|
249
|
+
```
|
|
250
|
+
|
|
251
|
+
#### Accessing Created Steps and Events
|
|
252
|
+
Within decorated functions, you can access and update the created step:
|
|
253
|
+
|
|
254
|
+
```python
|
|
255
|
+
from lucidicai.decorators import get_decorator_step
|
|
256
|
+
|
|
257
|
+
@lai.step(state="Initial state", action="Process")
|
|
258
|
+
def process_with_updates(data: dict) -> dict:
|
|
259
|
+
# Access the current step ID
|
|
260
|
+
step_id = get_decorator_step()
|
|
261
|
+
|
|
262
|
+
# Manually update the step - this overrides decorator parameters
|
|
263
|
+
lai.update_step(
|
|
264
|
+
step_id=step_id,
|
|
265
|
+
state="Processing in progress",
|
|
266
|
+
eval_score=0.5,
|
|
267
|
+
eval_description="Halfway complete"
|
|
268
|
+
)
|
|
269
|
+
|
|
270
|
+
# Do some processing...
|
|
271
|
+
result = transform(data)
|
|
272
|
+
|
|
273
|
+
# Update again before completion
|
|
274
|
+
lai.update_step(
|
|
275
|
+
step_id=step_id,
|
|
276
|
+
eval_score=1.0,
|
|
277
|
+
eval_description="Successfully completed transformation"
|
|
278
|
+
)
|
|
279
|
+
|
|
280
|
+
return result
|
|
281
|
+
|
|
282
|
+
# Any updates made within the decorated function overwrite the parameters passed into the decorator.
|
|
283
|
+
|
|
284
|
+
#### Nested Usage
|
|
285
|
+
Decorators can be nested for complex workflows:
|
|
286
|
+
|
|
287
|
+
```python
|
|
288
|
+
@lai.step(state="Main workflow", action="Process batch")
|
|
289
|
+
def process_batch(items: list) -> list:
|
|
290
|
+
results = []
|
|
291
|
+
|
|
292
|
+
@lai.event(description="Process single item")
|
|
293
|
+
def process_item(item):
|
|
294
|
+
# LLM calls here create their own events automatically
|
|
295
|
+
return transform(item)
|
|
296
|
+
|
|
297
|
+
for item in items:
|
|
298
|
+
results.append(process_item(item))
|
|
299
|
+
|
|
300
|
+
return results
|
|
301
|
+
```
|
|
302
|
+
|
|
303
|
+
#### Async Support
|
|
304
|
+
Both decorators fully support async functions:
|
|
305
|
+
|
|
306
|
+
```python
|
|
307
|
+
@lai.step(state="Async operation", action="Fetch data")
|
|
308
|
+
async def fetch_data(url: str) -> dict:
|
|
309
|
+
async with aiohttp.ClientSession() as session:
|
|
310
|
+
async with session.get(url) as response:
|
|
311
|
+
return await response.json()
|
|
312
|
+
|
|
313
|
+
@lai.event(description="Async processing")
|
|
314
|
+
async def process_async(data: dict) -> dict:
|
|
315
|
+
await asyncio.sleep(1)
|
|
316
|
+
return transform(data)
|
|
317
|
+
```
|
|
318
|
+
|
|
319
|
+
### Data Masking
|
|
320
|
+
Protect sensitive information with custom masking functions:
|
|
321
|
+
|
|
322
|
+
```python
|
|
323
|
+
def mask_pii(text):
|
|
324
|
+
# Your PII masking logic here
|
|
325
|
+
return text.replace("SSN:", "XXX-XX-")
|
|
326
|
+
|
|
327
|
+
lai.init(
|
|
328
|
+
session_name="Secure Session",
|
|
329
|
+
masking_function=mask_pii
|
|
330
|
+
)
|
|
331
|
+
```
|
|
332
|
+
|
|
333
|
+
### Image Analysis
|
|
334
|
+
Upload screenshots for visual context:
|
|
335
|
+
|
|
336
|
+
```python
|
|
337
|
+
# With step creation
|
|
338
|
+
lai.create_step(
|
|
339
|
+
state="Analyzing UI",
|
|
340
|
+
action="Check layout",
|
|
341
|
+
goal="Verify responsive design",
|
|
342
|
+
screenshot_path="/path/to/screenshot.png"
|
|
343
|
+
)
|
|
344
|
+
|
|
345
|
+
# With events
|
|
346
|
+
|
|
347
|
+
lai.create_event(
|
|
348
|
+
description="UI validation",
|
|
349
|
+
screenshots=[base64_encoded_image1, base64_encoded_image2]
|
|
350
|
+
)
|
|
351
|
+
```
|
|
352
|
+
|
|
353
|
+
### Prompt Management
|
|
354
|
+
Fetch and cache prompts from the Lucidic platform:
|
|
355
|
+
|
|
356
|
+
```python
|
|
357
|
+
prompt = lai.get_prompt(
|
|
358
|
+
prompt_name="customer_support",
|
|
359
|
+
variables={"issue_type": "billing"},
|
|
360
|
+
cache_ttl=3600, # Cache for 1 hour
|
|
361
|
+
label="v1.2"
|
|
362
|
+
)
|
|
363
|
+
```
|
|
364
|
+
|
|
365
|
+
### Mass Simulations
|
|
366
|
+
Run large-scale testing and evaluation:
|
|
367
|
+
|
|
368
|
+
```python
|
|
369
|
+
# Create a mass simulation
|
|
370
|
+
mass_sim_id = lai.create_mass_sim(
|
|
371
|
+
mass_sim_name="Load Test",
|
|
372
|
+
total_num_sessions=1000
|
|
373
|
+
)
|
|
374
|
+
|
|
375
|
+
# Initialize sessions with mass_sim_id
|
|
376
|
+
lai.init(
|
|
377
|
+
session_name="Test Session",
|
|
378
|
+
mass_sim_id=mass_sim_id
|
|
379
|
+
)
|
|
380
|
+
```
|
|
381
|
+
|
|
382
|
+
## Error Handling
|
|
383
|
+
|
|
384
|
+
The SDK provides specific exceptions for different error scenarios:
|
|
385
|
+
|
|
386
|
+
```python
|
|
387
|
+
from lucidicai.errors import (
|
|
388
|
+
APIKeyVerificationError,
|
|
389
|
+
InvalidOperationError,
|
|
390
|
+
LucidicNotInitializedError,
|
|
391
|
+
PromptError
|
|
392
|
+
)
|
|
393
|
+
|
|
394
|
+
try:
|
|
395
|
+
lai.init(session_name="My Session")
|
|
396
|
+
except APIKeyVerificationError:
|
|
397
|
+
print("Invalid API key - check your credentials")
|
|
398
|
+
except LucidicNotInitializedError:
|
|
399
|
+
print("SDK not initialized - call lai.init() first")
|
|
400
|
+
```
|
|
401
|
+
|
|
402
|
+
## Best Practices
|
|
403
|
+
|
|
404
|
+
1. **Initialize Once**: Call `lai.init()` at the start of your application or workflow
|
|
405
|
+
2. **Use Steps**: Break complex workflows into logical steps for better tracking
|
|
406
|
+
3. **Handle Errors**: Wrap SDK calls in try-except blocks for production applications
|
|
407
|
+
4. **Session Cleanup**: With `auto_end` enabled (default), sessions automatically end on exit. For manual control, set `auto_end=False` and call `lai.end_session()`
|
|
408
|
+
5. **Mask Sensitive Data**: Use masking functions to protect PII and confidential information
|
|
409
|
+
|
|
410
|
+
## Examples
|
|
411
|
+
|
|
412
|
+
### Customer Support Bot
|
|
413
|
+
```python
|
|
414
|
+
import lucidicai as lai
|
|
415
|
+
from openai import OpenAI
|
|
416
|
+
|
|
417
|
+
# Initialize for customer support workflow
|
|
418
|
+
lai.init(
|
|
419
|
+
session_name="Customer Support",
|
|
420
|
+
providers=["openai"],
|
|
421
|
+
task="Handle customer inquiry",
|
|
422
|
+
tags=["support", "chat"]
|
|
423
|
+
)
|
|
424
|
+
|
|
425
|
+
# Step 1: Understand the issue
|
|
426
|
+
lai.create_step(
|
|
427
|
+
state="Customer reported login issue",
|
|
428
|
+
action="Diagnose problem",
|
|
429
|
+
goal="Identify root cause"
|
|
430
|
+
)
|
|
431
|
+
|
|
432
|
+
client = OpenAI()
|
|
433
|
+
# ... your chatbot logic here ...
|
|
434
|
+
|
|
435
|
+
lai.end_step()
|
|
436
|
+
|
|
437
|
+
# Step 2: Provide solution
|
|
438
|
+
lai.create_step(
|
|
439
|
+
state="Issue identified as password reset",
|
|
440
|
+
action="Guide through reset process",
|
|
441
|
+
goal="Resolve customer issue"
|
|
442
|
+
)
|
|
443
|
+
|
|
444
|
+
# ... more chatbot logic ...
|
|
445
|
+
|
|
446
|
+
lai.end_step()
|
|
447
|
+
lai.end_session(is_successful=True, session_eval=0.95)
|
|
448
|
+
```
|
|
449
|
+
|
|
450
|
+
### Data Analysis Pipeline
|
|
451
|
+
```python
|
|
452
|
+
import lucidicai as lai
|
|
453
|
+
import pandas as pd
|
|
454
|
+
|
|
455
|
+
lai.init(
|
|
456
|
+
session_name="Quarterly Sales Analysis",
|
|
457
|
+
providers=["openai"],
|
|
458
|
+
task="Generate sales insights"
|
|
459
|
+
)
|
|
460
|
+
|
|
461
|
+
# Step 1: Data loading
|
|
462
|
+
lai.create_step(
|
|
463
|
+
state="Loading Q4 sales data",
|
|
464
|
+
action="Read and validate CSV files",
|
|
465
|
+
goal="Prepare data for analysis"
|
|
466
|
+
)
|
|
467
|
+
|
|
468
|
+
# ... data loading logic ...
|
|
469
|
+
|
|
470
|
+
lai.end_step()
|
|
471
|
+
|
|
472
|
+
# Step 2: Analysis
|
|
473
|
+
lai.create_step(
|
|
474
|
+
state="Data loaded successfully",
|
|
475
|
+
action="Generate insights using GPT-4",
|
|
476
|
+
goal="Create executive summary"
|
|
477
|
+
)
|
|
478
|
+
|
|
479
|
+
# ... LLM analysis logic ...
|
|
480
|
+
|
|
481
|
+
lai.end_step()
|
|
482
|
+
lai.end_session(is_successful=True)
|
|
483
|
+
```
|
|
484
|
+
|
|
485
|
+
## Support
|
|
486
|
+
|
|
487
|
+
- **Documentation**: [https://docs.lucidic.ai](https://docs.lucidic.ai)
|
|
488
|
+
- **Issues**: [GitHub Issues](https://github.com/Lucidic-AI/Lucidic-Python/issues)
|
|
489
|
+
|
|
490
|
+
## License
|
|
491
|
+
|
|
492
|
+
This SDK is distributed under the MIT License.
|
|
@@ -7,14 +7,24 @@ from typing import List, Literal, Optional
|
|
|
7
7
|
from .client import Client
|
|
8
8
|
from .errors import APIKeyVerificationError, InvalidOperationError, LucidicNotInitializedError, PromptError
|
|
9
9
|
from .event import Event
|
|
10
|
-
from .providers.anthropic_handler import AnthropicHandler
|
|
11
|
-
from .providers.langchain import LucidicLangchainHandler
|
|
12
|
-
from .providers.openai_handler import OpenAIHandler
|
|
13
|
-
from .providers.openai_agents_handler import OpenAIAgentsHandler
|
|
14
|
-
from .providers.pydantic_ai_handler import PydanticAIHandler
|
|
15
10
|
from .session import Session
|
|
16
11
|
from .step import Step
|
|
17
12
|
|
|
13
|
+
# Import OpenTelemetry-based handlers
|
|
14
|
+
from .telemetry.otel_handlers import (
|
|
15
|
+
OTelOpenAIHandler,
|
|
16
|
+
OTelAnthropicHandler,
|
|
17
|
+
OTelLangChainHandler,
|
|
18
|
+
OTelPydanticAIHandler,
|
|
19
|
+
OTelOpenAIAgentsHandler
|
|
20
|
+
)
|
|
21
|
+
|
|
22
|
+
# Import telemetry manager
|
|
23
|
+
from .telemetry.otel_init import LucidicTelemetry
|
|
24
|
+
|
|
25
|
+
# Import decorators
|
|
26
|
+
from .decorators import step, event
|
|
27
|
+
|
|
18
28
|
ProviderType = Literal["openai", "anthropic", "langchain", "pydantic_ai", "openai_agents"]
|
|
19
29
|
|
|
20
30
|
# Configure logging
|
|
@@ -37,31 +47,33 @@ def _setup_providers(client: Client, providers: List[ProviderType]) -> None:
|
|
|
37
47
|
# Track which providers have been set up to avoid duplication
|
|
38
48
|
setup_providers = set()
|
|
39
49
|
|
|
50
|
+
# Initialize telemetry if using OpenTelemetry
|
|
51
|
+
if providers:
|
|
52
|
+
telemetry = LucidicTelemetry()
|
|
53
|
+
if not telemetry.is_initialized():
|
|
54
|
+
telemetry.initialize(agent_id=client.agent_id)
|
|
55
|
+
|
|
40
56
|
for provider in providers:
|
|
41
57
|
if provider in setup_providers:
|
|
42
58
|
continue
|
|
43
59
|
|
|
44
60
|
if provider == "openai":
|
|
45
|
-
client.set_provider(
|
|
61
|
+
client.set_provider(OTelOpenAIHandler())
|
|
46
62
|
setup_providers.add("openai")
|
|
47
63
|
elif provider == "anthropic":
|
|
48
|
-
client.set_provider(
|
|
64
|
+
client.set_provider(OTelAnthropicHandler())
|
|
49
65
|
setup_providers.add("anthropic")
|
|
50
66
|
elif provider == "langchain":
|
|
67
|
+
client.set_provider(OTelLangChainHandler())
|
|
51
68
|
logger.info("For LangChain, make sure to create a handler and attach it to your top-level Agent class.")
|
|
52
69
|
setup_providers.add("langchain")
|
|
53
70
|
elif provider == "pydantic_ai":
|
|
54
|
-
client.set_provider(
|
|
71
|
+
client.set_provider(OTelPydanticAIHandler())
|
|
55
72
|
setup_providers.add("pydantic_ai")
|
|
56
73
|
elif provider == "openai_agents":
|
|
57
74
|
try:
|
|
58
|
-
|
|
59
|
-
client.set_provider(OpenAIAgentsHandler())
|
|
75
|
+
client.set_provider(OTelOpenAIAgentsHandler())
|
|
60
76
|
setup_providers.add("openai_agents")
|
|
61
|
-
# Also enable OpenAI handler if not already set up
|
|
62
|
-
if "openai" not in setup_providers:
|
|
63
|
-
client.set_provider(OpenAIHandler())
|
|
64
|
-
setup_providers.add("openai")
|
|
65
77
|
except Exception as e:
|
|
66
78
|
logger.error(f"Failed to set up OpenAI Agents provider: {e}")
|
|
67
79
|
raise
|
|
@@ -87,11 +99,8 @@ __all__ = [
|
|
|
87
99
|
'LucidicNotInitializedError',
|
|
88
100
|
'PromptError',
|
|
89
101
|
'InvalidOperationError',
|
|
90
|
-
'
|
|
91
|
-
'
|
|
92
|
-
'OpenAIHandler',
|
|
93
|
-
'OpenAIAgentsHandler',
|
|
94
|
-
'PydanticAIHandler'
|
|
102
|
+
'step',
|
|
103
|
+
'event',
|
|
95
104
|
]
|
|
96
105
|
|
|
97
106
|
|
|
@@ -106,6 +115,7 @@ def init(
|
|
|
106
115
|
rubrics: Optional[list] = None,
|
|
107
116
|
tags: Optional[list] = None,
|
|
108
117
|
masking_function = None,
|
|
118
|
+
auto_end: Optional[bool] = True,
|
|
109
119
|
) -> str:
|
|
110
120
|
"""
|
|
111
121
|
Initialize the Lucidic client.
|
|
@@ -120,6 +130,7 @@ def init(
|
|
|
120
130
|
rubrics: Optional rubrics for evaluation, list of strings.
|
|
121
131
|
tags: Optional tags for the session, list of strings.
|
|
122
132
|
masking_function: Optional function to mask sensitive data.
|
|
133
|
+
auto_end: If True, automatically end the session on process exit. Defaults to True.
|
|
123
134
|
|
|
124
135
|
Raises:
|
|
125
136
|
InvalidOperationError: If the client is already initialized.
|
|
@@ -147,6 +158,10 @@ def init(
|
|
|
147
158
|
else:
|
|
148
159
|
production_monitoring = False
|
|
149
160
|
|
|
161
|
+
# Handle auto_end with environment variable support
|
|
162
|
+
if auto_end is None:
|
|
163
|
+
auto_end = os.getenv("LUCIDIC_AUTO_END", "True").lower() == "true"
|
|
164
|
+
|
|
150
165
|
# Set up providers
|
|
151
166
|
_setup_providers(client, providers)
|
|
152
167
|
session_id = client.init_session(
|
|
@@ -159,6 +174,10 @@ def init(
|
|
|
159
174
|
)
|
|
160
175
|
if masking_function:
|
|
161
176
|
client.masking_function = masking_function
|
|
177
|
+
|
|
178
|
+
# Set the auto_end flag on the client
|
|
179
|
+
client.auto_end = auto_end
|
|
180
|
+
|
|
162
181
|
logger.info("Session initialized successfully")
|
|
163
182
|
return session_id
|
|
164
183
|
|
|
@@ -169,6 +188,7 @@ def continue_session(
|
|
|
169
188
|
agent_id: Optional[str] = None,
|
|
170
189
|
providers: Optional[List[ProviderType]] = [],
|
|
171
190
|
masking_function = None,
|
|
191
|
+
auto_end: Optional[bool] = True,
|
|
172
192
|
):
|
|
173
193
|
if lucidic_api_key is None:
|
|
174
194
|
lucidic_api_key = os.getenv("LUCIDIC_API_KEY", None)
|
|
@@ -188,11 +208,19 @@ def continue_session(
|
|
|
188
208
|
agent_id=agent_id,
|
|
189
209
|
)
|
|
190
210
|
|
|
211
|
+
# Handle auto_end with environment variable support
|
|
212
|
+
if auto_end is None:
|
|
213
|
+
auto_end = os.getenv("LUCIDIC_AUTO_END", "True").lower() == "true"
|
|
214
|
+
|
|
191
215
|
# Set up providers
|
|
192
216
|
_setup_providers(client, providers)
|
|
193
217
|
session_id = client.continue_session(session_id=session_id)
|
|
194
218
|
if masking_function:
|
|
195
219
|
client.masking_function = masking_function
|
|
220
|
+
|
|
221
|
+
# Set the auto_end flag on the client
|
|
222
|
+
client.auto_end = auto_end
|
|
223
|
+
|
|
196
224
|
logger.info(f"Session {session_id} continuing...")
|
|
197
225
|
return session_id # For consistency
|
|
198
226
|
|
|
@@ -249,9 +277,55 @@ def reset_sdk() -> None:
|
|
|
249
277
|
client = Client()
|
|
250
278
|
if not client.initialized:
|
|
251
279
|
return
|
|
280
|
+
|
|
281
|
+
# Shutdown OpenTelemetry if it was initialized
|
|
282
|
+
telemetry = LucidicTelemetry()
|
|
283
|
+
if telemetry.is_initialized():
|
|
284
|
+
telemetry.uninstrument_all()
|
|
285
|
+
|
|
252
286
|
client.clear()
|
|
253
287
|
|
|
254
288
|
|
|
289
|
+
def _cleanup_telemetry():
|
|
290
|
+
"""Cleanup function for OpenTelemetry shutdown"""
|
|
291
|
+
try:
|
|
292
|
+
telemetry = LucidicTelemetry()
|
|
293
|
+
if telemetry.is_initialized():
|
|
294
|
+
telemetry.uninstrument_all()
|
|
295
|
+
logger.info("OpenTelemetry instrumentation cleaned up")
|
|
296
|
+
except Exception as e:
|
|
297
|
+
logger.error(f"Error during telemetry cleanup: {e}")
|
|
298
|
+
|
|
299
|
+
|
|
300
|
+
def _auto_end_session():
|
|
301
|
+
"""Automatically end session on exit if auto_end is enabled"""
|
|
302
|
+
try:
|
|
303
|
+
client = Client()
|
|
304
|
+
if hasattr(client, 'auto_end') and client.auto_end and client.session and not client.session.is_finished:
|
|
305
|
+
logger.info("Auto-ending active session on exit")
|
|
306
|
+
end_session()
|
|
307
|
+
except Exception as e:
|
|
308
|
+
logger.debug(f"Error during auto-end session: {e}")
|
|
309
|
+
|
|
310
|
+
|
|
311
|
+
def _signal_handler(signum, frame):
|
|
312
|
+
"""Handle interruption signals"""
|
|
313
|
+
_auto_end_session()
|
|
314
|
+
_cleanup_telemetry()
|
|
315
|
+
# Re-raise the signal for default handling
|
|
316
|
+
signal.signal(signum, signal.SIG_DFL)
|
|
317
|
+
os.kill(os.getpid(), signum)
|
|
318
|
+
|
|
319
|
+
|
|
320
|
+
# Register cleanup functions (auto-end runs first due to LIFO order)
|
|
321
|
+
atexit.register(_cleanup_telemetry)
|
|
322
|
+
atexit.register(_auto_end_session)
|
|
323
|
+
|
|
324
|
+
# Register signal handlers for graceful shutdown
|
|
325
|
+
signal.signal(signal.SIGINT, _signal_handler)
|
|
326
|
+
signal.signal(signal.SIGTERM, _signal_handler)
|
|
327
|
+
|
|
328
|
+
|
|
255
329
|
def create_mass_sim(
|
|
256
330
|
mass_sim_name: str,
|
|
257
331
|
total_num_sessions: int,
|