lucidicai 1.2.16__tar.gz → 1.2.18__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {lucidicai-1.2.16 → lucidicai-1.2.18}/PKG-INFO +1 -1
- lucidicai-1.2.18/README.md +492 -0
- {lucidicai-1.2.16 → lucidicai-1.2.18}/lucidicai/__init__.py +105 -30
- {lucidicai-1.2.16 → lucidicai-1.2.18}/lucidicai/client.py +10 -4
- lucidicai-1.2.18/lucidicai/decorators.py +357 -0
- {lucidicai-1.2.16 → lucidicai-1.2.18}/lucidicai/image_upload.py +24 -1
- {lucidicai-1.2.16 → lucidicai-1.2.18}/lucidicai/session.py +9 -1
- lucidicai-1.2.18/lucidicai/telemetry/lucidic_exporter.py +259 -0
- lucidicai-1.2.18/lucidicai/telemetry/lucidic_span_processor.py +665 -0
- lucidicai-1.2.18/lucidicai/telemetry/openai_agents_instrumentor.py +306 -0
- lucidicai-1.2.18/lucidicai/telemetry/otel_handlers.py +266 -0
- lucidicai-1.2.18/lucidicai/telemetry/otel_init.py +197 -0
- lucidicai-1.2.18/lucidicai/telemetry/otel_provider.py +168 -0
- {lucidicai-1.2.16/lucidicai/providers → lucidicai-1.2.18/lucidicai/telemetry}/pydantic_ai_handler.py +1 -1
- lucidicai-1.2.18/lucidicai/telemetry/utils/__init__.py +0 -0
- lucidicai-1.2.18/lucidicai/telemetry/utils/image_storage.py +45 -0
- lucidicai-1.2.18/lucidicai/telemetry/utils/text_storage.py +53 -0
- lucidicai-1.2.18/lucidicai/telemetry/utils/universal_image_interceptor.py +276 -0
- {lucidicai-1.2.16 → lucidicai-1.2.18}/lucidicai.egg-info/PKG-INFO +1 -1
- lucidicai-1.2.18/lucidicai.egg-info/SOURCES.txt +33 -0
- {lucidicai-1.2.16 → lucidicai-1.2.18}/setup.py +1 -1
- lucidicai-1.2.16/lucidicai/providers/anthropic_handler.py +0 -260
- lucidicai-1.2.16/lucidicai/providers/langchain.py +0 -557
- lucidicai-1.2.16/lucidicai/providers/openai_agents_handler.py +0 -404
- lucidicai-1.2.16/lucidicai/providers/openai_handler.py +0 -702
- lucidicai-1.2.16/lucidicai.egg-info/SOURCES.txt +0 -31
- lucidicai-1.2.16/tests/test_anthropic_comprehensive.py +0 -503
- lucidicai-1.2.16/tests/test_anthropic_thinking.py +0 -325
- lucidicai-1.2.16/tests/test_event_display.py +0 -105
- lucidicai-1.2.16/tests/test_openai_agents_9_patterns_fixed.py +0 -590
- lucidicai-1.2.16/tests/test_openai_comprehensive.py +0 -427
- lucidicai-1.2.16/tests/test_pydantic_ai_comprehensive.py +0 -301
- {lucidicai-1.2.16 → lucidicai-1.2.18}/lucidicai/constants.py +0 -0
- {lucidicai-1.2.16 → lucidicai-1.2.18}/lucidicai/errors.py +0 -0
- {lucidicai-1.2.16 → lucidicai-1.2.18}/lucidicai/event.py +0 -0
- {lucidicai-1.2.16 → lucidicai-1.2.18}/lucidicai/model_pricing.py +0 -0
- {lucidicai-1.2.16 → lucidicai-1.2.18}/lucidicai/singleton.py +0 -0
- {lucidicai-1.2.16 → lucidicai-1.2.18}/lucidicai/step.py +0 -0
- {lucidicai-1.2.16 → lucidicai-1.2.18}/lucidicai/streaming.py +0 -0
- {lucidicai-1.2.16/lucidicai/providers → lucidicai-1.2.18/lucidicai/telemetry}/__init__.py +0 -0
- /lucidicai-1.2.16/lucidicai/providers/base_providers.py → /lucidicai-1.2.18/lucidicai/telemetry/base_provider.py +0 -0
- {lucidicai-1.2.16/lucidicai/providers → lucidicai-1.2.18/lucidicai/telemetry}/opentelemetry_converter.py +0 -0
- {lucidicai-1.2.16 → lucidicai-1.2.18}/lucidicai.egg-info/dependency_links.txt +0 -0
- {lucidicai-1.2.16 → lucidicai-1.2.18}/lucidicai.egg-info/requires.txt +0 -0
- {lucidicai-1.2.16 → lucidicai-1.2.18}/lucidicai.egg-info/top_level.txt +0 -0
- {lucidicai-1.2.16 → lucidicai-1.2.18}/setup.cfg +0 -0
|
@@ -0,0 +1,492 @@
|
|
|
1
|
+
# Lucidic AI Python SDK
|
|
2
|
+
|
|
3
|
+
The official Python SDK for [Lucidic AI](https://lucidic.ai), providing comprehensive observability and analytics for LLM-powered applications.
|
|
4
|
+
|
|
5
|
+
## Features
|
|
6
|
+
|
|
7
|
+
- **Session & Step Tracking** - Track complex AI agent workflows with hierarchical session management
|
|
8
|
+
- **Multi-Provider Support** - Automatic instrumentation for OpenAI, Anthropic, LangChain, and more
|
|
9
|
+
- **Real-time Analytics** - Monitor costs, performance, and behavior of your AI applications
|
|
10
|
+
- **Data Privacy** - Built-in masking functions to protect sensitive information
|
|
11
|
+
- **Screenshot Support** - Capture and analyze visual context in your AI workflows
|
|
12
|
+
- **Production Ready** - OpenTelemetry-based instrumentation for enterprise-scale applications
|
|
13
|
+
- **Decorators** - Pythonic decorators for effortless step and event tracking
|
|
14
|
+
|
|
15
|
+
## Installation
|
|
16
|
+
|
|
17
|
+
```bash
|
|
18
|
+
pip install lucidicai
|
|
19
|
+
```
|
|
20
|
+
|
|
21
|
+
## Quick Start
|
|
22
|
+
|
|
23
|
+
```python
|
|
24
|
+
import lucidicai as lai
|
|
25
|
+
from openai import OpenAI
|
|
26
|
+
|
|
27
|
+
# Initialize the SDK
|
|
28
|
+
lai.init(
|
|
29
|
+
session_name="My AI Assistant",
|
|
30
|
+
providers=["openai"]
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
# Create a workflow step
|
|
34
|
+
lai.create_step(
|
|
35
|
+
state="Processing user query",
|
|
36
|
+
action="Generate response",
|
|
37
|
+
goal="Provide helpful answer"
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
# Use your LLM as normal - Lucidic automatically tracks the interaction
|
|
41
|
+
client = OpenAI()
|
|
42
|
+
response = client.chat.completions.create(
|
|
43
|
+
model="gpt-4",
|
|
44
|
+
messages=[{"role": "user", "content": "Hello, how are you?"}]
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
# End the step and session
|
|
48
|
+
lai.end_step()
|
|
49
|
+
lai.end_session(is_successful=True)
|
|
50
|
+
```
|
|
51
|
+
|
|
52
|
+
## Configuration
|
|
53
|
+
|
|
54
|
+
### Environment Variables
|
|
55
|
+
|
|
56
|
+
Create a `.env` file or set these environment variables:
|
|
57
|
+
|
|
58
|
+
```bash
|
|
59
|
+
LUCIDIC_API_KEY=your_api_key # Required: Your Lucidic API key
|
|
60
|
+
LUCIDIC_AGENT_ID=your_agent_id # Required: Your agent identifier
|
|
61
|
+
```
|
|
62
|
+
|
|
63
|
+
### Initialization Options
|
|
64
|
+
|
|
65
|
+
```python
|
|
66
|
+
lai.init(
|
|
67
|
+
session_name="My Session", # Required: Name for this session
|
|
68
|
+
lucidic_api_key="...", # Optional: Override env var
|
|
69
|
+
agent_id="...", # Optional: Override env var
|
|
70
|
+
providers=["openai", "anthropic"], # Optional: LLM providers to track
|
|
71
|
+
task="Process customer request", # Optional: High-level task description
|
|
72
|
+
production_monitoring=False, # Optional: Production mode flag
|
|
73
|
+
auto_end=True, # Optional: Auto-end session on exit (default: True)
|
|
74
|
+
masking_function=my_mask_func, # Optional: Custom PII masking
|
|
75
|
+
tags=["customer-support", "v1.2"], # Optional: Session tags
|
|
76
|
+
rubrics=[...] # Optional: Evaluation criteria
|
|
77
|
+
)
|
|
78
|
+
```
|
|
79
|
+
|
|
80
|
+
## Core Concepts
|
|
81
|
+
|
|
82
|
+
### Sessions
|
|
83
|
+
A session represents a complete interaction or workflow, containing multiple steps and events.
|
|
84
|
+
|
|
85
|
+
```python
|
|
86
|
+
# Start a new session
|
|
87
|
+
session_id = lai.init(session_name="Customer Support Chat")
|
|
88
|
+
|
|
89
|
+
# Continue an existing session
|
|
90
|
+
lai.continue_session(session_id="existing-session-id")
|
|
91
|
+
|
|
92
|
+
# Update session metadata
|
|
93
|
+
lai.update_session(
|
|
94
|
+
task="Resolved billing issue",
|
|
95
|
+
session_eval=0.95,
|
|
96
|
+
is_successful=True
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
# End session
|
|
100
|
+
lai.end_session(is_successful=True, session_eval=0.9)
|
|
101
|
+
```
|
|
102
|
+
|
|
103
|
+
### Automatic Session Management (auto_end)
|
|
104
|
+
|
|
105
|
+
By default, Lucidic automatically ends your session when your process exits, ensuring no data is lost. This feature is enabled by default but can be controlled:
|
|
106
|
+
|
|
107
|
+
```python
|
|
108
|
+
# Default behavior - session auto-ends on exit
|
|
109
|
+
lai.init(session_name="My Session") # auto_end=True by default
|
|
110
|
+
|
|
111
|
+
# Disable auto-end if you want manual control
|
|
112
|
+
lai.init(session_name="My Session", auto_end=False)
|
|
113
|
+
```
|
|
114
|
+
|
|
115
|
+
The auto_end feature:
|
|
116
|
+
- Automatically calls `end_session()` when your Python process exits
|
|
117
|
+
- Works with normal exits, crashes, and interrupts (Ctrl+C)
|
|
118
|
+
- Prevents data loss from forgotten `end_session()` calls
|
|
119
|
+
- Can be disabled for cases where you need explicit control
|
|
120
|
+
|
|
121
|
+
### Steps
|
|
122
|
+
Steps break down complex workflows into discrete, trackable units.
|
|
123
|
+
|
|
124
|
+
```python
|
|
125
|
+
# Create a step
|
|
126
|
+
step_id = lai.create_step(
|
|
127
|
+
state="Current context or state",
|
|
128
|
+
action="What the agent is doing",
|
|
129
|
+
goal="What the agent aims to achieve",
|
|
130
|
+
screenshot_path="/path/to/screenshot.png" # Optional
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
# Update step progress
|
|
134
|
+
lai.update_step(
|
|
135
|
+
step_id=step_id,
|
|
136
|
+
eval_score=0.8,
|
|
137
|
+
eval_description="Partially completed task"
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
# End step
|
|
141
|
+
lai.end_step(step_id=step_id)
|
|
142
|
+
```
|
|
143
|
+
|
|
144
|
+
- NOTE: If no step exists when an LLM call is made (but Lucidic has already been initialized), Lucidic will automatically create a new step for that call. This step will contain exactly one event—the LLM call itself.
|
|
145
|
+
|
|
146
|
+
### Events
|
|
147
|
+
Events are automatically tracked when using instrumented providers, but can also be created manually.
|
|
148
|
+
|
|
149
|
+
```python
|
|
150
|
+
# Manual event creation
|
|
151
|
+
event_id = lai.create_event(
|
|
152
|
+
description="Generated summary",
|
|
153
|
+
result="Success",
|
|
154
|
+
cost_added=0.002,
|
|
155
|
+
model="gpt-4",
|
|
156
|
+
screenshots=["/path/to/image1.png", "/path/to/image2.png"]
|
|
157
|
+
)
|
|
158
|
+
```
|
|
159
|
+
|
|
160
|
+
## Provider Integration
|
|
161
|
+
|
|
162
|
+
### OpenAI
|
|
163
|
+
```python
|
|
164
|
+
from openai import OpenAI
|
|
165
|
+
|
|
166
|
+
lai.init(session_name="OpenAI Example", providers=["openai"])
|
|
167
|
+
client = OpenAI()
|
|
168
|
+
|
|
169
|
+
# All OpenAI API calls are automatically tracked
|
|
170
|
+
response = client.chat.completions.create(
|
|
171
|
+
model="gpt-4",
|
|
172
|
+
messages=[{"role": "user", "content": "Write a haiku about coding"}]
|
|
173
|
+
)
|
|
174
|
+
```
|
|
175
|
+
|
|
176
|
+
### Anthropic
|
|
177
|
+
```python
|
|
178
|
+
from anthropic import Anthropic
|
|
179
|
+
|
|
180
|
+
lai.init(session_name="Claude Example", providers=["anthropic"])
|
|
181
|
+
client = Anthropic()
|
|
182
|
+
|
|
183
|
+
# Anthropic API calls are automatically tracked
|
|
184
|
+
response = client.messages.create(
|
|
185
|
+
model="claude-3-opus-20240229",
|
|
186
|
+
messages=[{"role": "user", "content": "Explain quantum computing"}]
|
|
187
|
+
)
|
|
188
|
+
```
|
|
189
|
+
|
|
190
|
+
### LangChain
|
|
191
|
+
```python
|
|
192
|
+
from langchain_openai import ChatOpenAI
|
|
193
|
+
from langchain_core.messages import HumanMessage
|
|
194
|
+
|
|
195
|
+
lai.init(session_name="LangChain Example", providers=["langchain"])
|
|
196
|
+
|
|
197
|
+
# LangChain calls are automatically tracked
|
|
198
|
+
llm = ChatOpenAI(model="gpt-4")
|
|
199
|
+
response = llm.invoke([HumanMessage(content="Hello!")])
|
|
200
|
+
```
|
|
201
|
+
|
|
202
|
+
## Advanced Features
|
|
203
|
+
|
|
204
|
+
### Decorators
|
|
205
|
+
Simplify your code with Python decorators for automatic tracking:
|
|
206
|
+
|
|
207
|
+
#### Step Decorator
|
|
208
|
+
Wrap functions to automatically create and manage steps:
|
|
209
|
+
|
|
210
|
+
```python
|
|
211
|
+
@lai.step(
|
|
212
|
+
# All parameters are optional and auto generated if not provided
|
|
213
|
+
state="Processing data",
|
|
214
|
+
action="Transform input",
|
|
215
|
+
goal="Generate output",
|
|
216
|
+
eval_score=1,
|
|
217
|
+
eval_description="Data succesfully processed",
|
|
218
|
+
screenshot_path="/path/to/image" # populates step image if provided. No image if not provided
|
|
219
|
+
)
|
|
220
|
+
def process_data(input_data: dict) -> dict:
|
|
221
|
+
# Your processing logic here
|
|
222
|
+
result = transform(input_data)
|
|
223
|
+
return result
|
|
224
|
+
|
|
225
|
+
# The function automatically creates a step, executes, and ends the step
|
|
226
|
+
output = process_data({"key": "value"})
|
|
227
|
+
```
|
|
228
|
+
|
|
229
|
+
#### Event Decorator
|
|
230
|
+
Track function calls as events with automatic input/output capture:
|
|
231
|
+
|
|
232
|
+
```python
|
|
233
|
+
@lai.event(
|
|
234
|
+
# All parameters are optional
|
|
235
|
+
description="Calculate statistics", # function inputs if not provided
|
|
236
|
+
result="Stats calculated" # function output if not provided
|
|
237
|
+
model="stats-engine", # Not shown if not provided
|
|
238
|
+
cost_added=0.001 # 0 if not provided
|
|
239
|
+
)
|
|
240
|
+
def calculate_stats(data: list) -> dict:
|
|
241
|
+
return {
|
|
242
|
+
'mean': sum(data) / len(data),
|
|
243
|
+
'max': max(data),
|
|
244
|
+
'min': min(data)
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
# Creates an event with function inputs and outputs
|
|
248
|
+
stats = calculate_stats([1, 2, 3, 4, 5])
|
|
249
|
+
```
|
|
250
|
+
|
|
251
|
+
#### Accessing Created Steps and Events
|
|
252
|
+
Within decorated functions, you can access and update the created step:
|
|
253
|
+
|
|
254
|
+
```python
|
|
255
|
+
from lucidicai.decorators import get_decorator_step
|
|
256
|
+
|
|
257
|
+
@lai.step(state="Initial state", action="Process")
|
|
258
|
+
def process_with_updates(data: dict) -> dict:
|
|
259
|
+
# Access the current step ID
|
|
260
|
+
step_id = get_decorator_step()
|
|
261
|
+
|
|
262
|
+
# Manually update the step - this overrides decorator parameters
|
|
263
|
+
lai.update_step(
|
|
264
|
+
step_id=step_id,
|
|
265
|
+
state="Processing in progress",
|
|
266
|
+
eval_score=0.5,
|
|
267
|
+
eval_description="Halfway complete"
|
|
268
|
+
)
|
|
269
|
+
|
|
270
|
+
# Do some processing...
|
|
271
|
+
result = transform(data)
|
|
272
|
+
|
|
273
|
+
# Update again before completion
|
|
274
|
+
lai.update_step(
|
|
275
|
+
step_id=step_id,
|
|
276
|
+
eval_score=1.0,
|
|
277
|
+
eval_description="Successfully completed transformation"
|
|
278
|
+
)
|
|
279
|
+
|
|
280
|
+
return result
|
|
281
|
+
|
|
282
|
+
# Any updates made within the decorated function overwrite the parameters passed into the decorator.
|
|
283
|
+
|
|
284
|
+
#### Nested Usage
|
|
285
|
+
Decorators can be nested for complex workflows:
|
|
286
|
+
|
|
287
|
+
```python
|
|
288
|
+
@lai.step(state="Main workflow", action="Process batch")
|
|
289
|
+
def process_batch(items: list) -> list:
|
|
290
|
+
results = []
|
|
291
|
+
|
|
292
|
+
@lai.event(description="Process single item")
|
|
293
|
+
def process_item(item):
|
|
294
|
+
# LLM calls here create their own events automatically
|
|
295
|
+
return transform(item)
|
|
296
|
+
|
|
297
|
+
for item in items:
|
|
298
|
+
results.append(process_item(item))
|
|
299
|
+
|
|
300
|
+
return results
|
|
301
|
+
```
|
|
302
|
+
|
|
303
|
+
#### Async Support
|
|
304
|
+
Both decorators fully support async functions:
|
|
305
|
+
|
|
306
|
+
```python
|
|
307
|
+
@lai.step(state="Async operation", action="Fetch data")
|
|
308
|
+
async def fetch_data(url: str) -> dict:
|
|
309
|
+
async with aiohttp.ClientSession() as session:
|
|
310
|
+
async with session.get(url) as response:
|
|
311
|
+
return await response.json()
|
|
312
|
+
|
|
313
|
+
@lai.event(description="Async processing")
|
|
314
|
+
async def process_async(data: dict) -> dict:
|
|
315
|
+
await asyncio.sleep(1)
|
|
316
|
+
return transform(data)
|
|
317
|
+
```
|
|
318
|
+
|
|
319
|
+
### Data Masking
|
|
320
|
+
Protect sensitive information with custom masking functions:
|
|
321
|
+
|
|
322
|
+
```python
|
|
323
|
+
def mask_pii(text):
|
|
324
|
+
# Your PII masking logic here
|
|
325
|
+
return text.replace("SSN:", "XXX-XX-")
|
|
326
|
+
|
|
327
|
+
lai.init(
|
|
328
|
+
session_name="Secure Session",
|
|
329
|
+
masking_function=mask_pii
|
|
330
|
+
)
|
|
331
|
+
```
|
|
332
|
+
|
|
333
|
+
### Image Analysis
|
|
334
|
+
Upload screenshots for visual context:
|
|
335
|
+
|
|
336
|
+
```python
|
|
337
|
+
# With step creation
|
|
338
|
+
lai.create_step(
|
|
339
|
+
state="Analyzing UI",
|
|
340
|
+
action="Check layout",
|
|
341
|
+
goal="Verify responsive design",
|
|
342
|
+
screenshot_path="/path/to/screenshot.png"
|
|
343
|
+
)
|
|
344
|
+
|
|
345
|
+
# With events
|
|
346
|
+
|
|
347
|
+
lai.create_event(
|
|
348
|
+
description="UI validation",
|
|
349
|
+
screenshots=[base64_encoded_image1, base64_encoded_image2]
|
|
350
|
+
)
|
|
351
|
+
```
|
|
352
|
+
|
|
353
|
+
### Prompt Management
|
|
354
|
+
Fetch and cache prompts from the Lucidic platform:
|
|
355
|
+
|
|
356
|
+
```python
|
|
357
|
+
prompt = lai.get_prompt(
|
|
358
|
+
prompt_name="customer_support",
|
|
359
|
+
variables={"issue_type": "billing"},
|
|
360
|
+
cache_ttl=3600, # Cache for 1 hour
|
|
361
|
+
label="v1.2"
|
|
362
|
+
)
|
|
363
|
+
```
|
|
364
|
+
|
|
365
|
+
### Mass Simulations
|
|
366
|
+
Run large-scale testing and evaluation:
|
|
367
|
+
|
|
368
|
+
```python
|
|
369
|
+
# Create a mass simulation
|
|
370
|
+
mass_sim_id = lai.create_mass_sim(
|
|
371
|
+
mass_sim_name="Load Test",
|
|
372
|
+
total_num_sessions=1000
|
|
373
|
+
)
|
|
374
|
+
|
|
375
|
+
# Initialize sessions with mass_sim_id
|
|
376
|
+
lai.init(
|
|
377
|
+
session_name="Test Session",
|
|
378
|
+
mass_sim_id=mass_sim_id
|
|
379
|
+
)
|
|
380
|
+
```
|
|
381
|
+
|
|
382
|
+
## Error Handling
|
|
383
|
+
|
|
384
|
+
The SDK provides specific exceptions for different error scenarios:
|
|
385
|
+
|
|
386
|
+
```python
|
|
387
|
+
from lucidicai.errors import (
|
|
388
|
+
APIKeyVerificationError,
|
|
389
|
+
InvalidOperationError,
|
|
390
|
+
LucidicNotInitializedError,
|
|
391
|
+
PromptError
|
|
392
|
+
)
|
|
393
|
+
|
|
394
|
+
try:
|
|
395
|
+
lai.init(session_name="My Session")
|
|
396
|
+
except APIKeyVerificationError:
|
|
397
|
+
print("Invalid API key - check your credentials")
|
|
398
|
+
except LucidicNotInitializedError:
|
|
399
|
+
print("SDK not initialized - call lai.init() first")
|
|
400
|
+
```
|
|
401
|
+
|
|
402
|
+
## Best Practices
|
|
403
|
+
|
|
404
|
+
1. **Initialize Once**: Call `lai.init()` at the start of your application or workflow
|
|
405
|
+
2. **Use Steps**: Break complex workflows into logical steps for better tracking
|
|
406
|
+
3. **Handle Errors**: Wrap SDK calls in try-except blocks for production applications
|
|
407
|
+
4. **Session Cleanup**: With `auto_end` enabled (default), sessions automatically end on exit. For manual control, set `auto_end=False` and call `lai.end_session()`
|
|
408
|
+
5. **Mask Sensitive Data**: Use masking functions to protect PII and confidential information
|
|
409
|
+
|
|
410
|
+
## Examples
|
|
411
|
+
|
|
412
|
+
### Customer Support Bot
|
|
413
|
+
```python
|
|
414
|
+
import lucidicai as lai
|
|
415
|
+
from openai import OpenAI
|
|
416
|
+
|
|
417
|
+
# Initialize for customer support workflow
|
|
418
|
+
lai.init(
|
|
419
|
+
session_name="Customer Support",
|
|
420
|
+
providers=["openai"],
|
|
421
|
+
task="Handle customer inquiry",
|
|
422
|
+
tags=["support", "chat"]
|
|
423
|
+
)
|
|
424
|
+
|
|
425
|
+
# Step 1: Understand the issue
|
|
426
|
+
lai.create_step(
|
|
427
|
+
state="Customer reported login issue",
|
|
428
|
+
action="Diagnose problem",
|
|
429
|
+
goal="Identify root cause"
|
|
430
|
+
)
|
|
431
|
+
|
|
432
|
+
client = OpenAI()
|
|
433
|
+
# ... your chatbot logic here ...
|
|
434
|
+
|
|
435
|
+
lai.end_step()
|
|
436
|
+
|
|
437
|
+
# Step 2: Provide solution
|
|
438
|
+
lai.create_step(
|
|
439
|
+
state="Issue identified as password reset",
|
|
440
|
+
action="Guide through reset process",
|
|
441
|
+
goal="Resolve customer issue"
|
|
442
|
+
)
|
|
443
|
+
|
|
444
|
+
# ... more chatbot logic ...
|
|
445
|
+
|
|
446
|
+
lai.end_step()
|
|
447
|
+
lai.end_session(is_successful=True, session_eval=0.95)
|
|
448
|
+
```
|
|
449
|
+
|
|
450
|
+
### Data Analysis Pipeline
|
|
451
|
+
```python
|
|
452
|
+
import lucidicai as lai
|
|
453
|
+
import pandas as pd
|
|
454
|
+
|
|
455
|
+
lai.init(
|
|
456
|
+
session_name="Quarterly Sales Analysis",
|
|
457
|
+
providers=["openai"],
|
|
458
|
+
task="Generate sales insights"
|
|
459
|
+
)
|
|
460
|
+
|
|
461
|
+
# Step 1: Data loading
|
|
462
|
+
lai.create_step(
|
|
463
|
+
state="Loading Q4 sales data",
|
|
464
|
+
action="Read and validate CSV files",
|
|
465
|
+
goal="Prepare data for analysis"
|
|
466
|
+
)
|
|
467
|
+
|
|
468
|
+
# ... data loading logic ...
|
|
469
|
+
|
|
470
|
+
lai.end_step()
|
|
471
|
+
|
|
472
|
+
# Step 2: Analysis
|
|
473
|
+
lai.create_step(
|
|
474
|
+
state="Data loaded successfully",
|
|
475
|
+
action="Generate insights using GPT-4",
|
|
476
|
+
goal="Create executive summary"
|
|
477
|
+
)
|
|
478
|
+
|
|
479
|
+
# ... LLM analysis logic ...
|
|
480
|
+
|
|
481
|
+
lai.end_step()
|
|
482
|
+
lai.end_session(is_successful=True)
|
|
483
|
+
```
|
|
484
|
+
|
|
485
|
+
## Support
|
|
486
|
+
|
|
487
|
+
- **Documentation**: [https://docs.lucidic.ai](https://docs.lucidic.ai)
|
|
488
|
+
- **Issues**: [GitHub Issues](https://github.com/Lucidic-AI/Lucidic-Python/issues)
|
|
489
|
+
|
|
490
|
+
## License
|
|
491
|
+
|
|
492
|
+
This SDK is distributed under the MIT License.
|