lucidicai 1.3.2__tar.gz → 1.3.5__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {lucidicai-1.3.2 → lucidicai-1.3.5}/PKG-INFO +1 -1
- {lucidicai-1.3.2 → lucidicai-1.3.5}/README.md +232 -2
- {lucidicai-1.3.2 → lucidicai-1.3.5}/lucidicai/__init__.py +257 -13
- {lucidicai-1.3.2 → lucidicai-1.3.5}/lucidicai/client.py +16 -1
- lucidicai-1.3.5/lucidicai/context.py +119 -0
- {lucidicai-1.3.2 → lucidicai-1.3.5}/lucidicai/model_pricing.py +11 -0
- {lucidicai-1.3.2 → lucidicai-1.3.5}/lucidicai/telemetry/lucidic_exporter.py +16 -4
- {lucidicai-1.3.2 → lucidicai-1.3.5}/lucidicai/telemetry/lucidic_span_processor.py +67 -49
- {lucidicai-1.3.2 → lucidicai-1.3.5}/lucidicai/telemetry/otel_handlers.py +207 -59
- lucidicai-1.3.5/lucidicai/telemetry/otel_init.py +312 -0
- {lucidicai-1.3.2 → lucidicai-1.3.5}/lucidicai/telemetry/otel_provider.py +15 -5
- {lucidicai-1.3.2 → lucidicai-1.3.5}/lucidicai/telemetry/utils/universal_image_interceptor.py +89 -0
- {lucidicai-1.3.2 → lucidicai-1.3.5}/lucidicai.egg-info/PKG-INFO +1 -1
- {lucidicai-1.3.2 → lucidicai-1.3.5}/lucidicai.egg-info/SOURCES.txt +1 -0
- {lucidicai-1.3.2 → lucidicai-1.3.5}/setup.py +1 -1
- lucidicai-1.3.2/lucidicai/telemetry/otel_init.py +0 -200
- {lucidicai-1.3.2 → lucidicai-1.3.5}/lucidicai/constants.py +0 -0
- {lucidicai-1.3.2 → lucidicai-1.3.5}/lucidicai/decorators.py +0 -0
- {lucidicai-1.3.2 → lucidicai-1.3.5}/lucidicai/errors.py +0 -0
- {lucidicai-1.3.2 → lucidicai-1.3.5}/lucidicai/event.py +0 -0
- {lucidicai-1.3.2 → lucidicai-1.3.5}/lucidicai/image_upload.py +0 -0
- {lucidicai-1.3.2 → lucidicai-1.3.5}/lucidicai/lru.py +0 -0
- {lucidicai-1.3.2 → lucidicai-1.3.5}/lucidicai/session.py +0 -0
- {lucidicai-1.3.2 → lucidicai-1.3.5}/lucidicai/singleton.py +0 -0
- {lucidicai-1.3.2 → lucidicai-1.3.5}/lucidicai/step.py +0 -0
- {lucidicai-1.3.2 → lucidicai-1.3.5}/lucidicai/streaming.py +0 -0
- {lucidicai-1.3.2 → lucidicai-1.3.5}/lucidicai/telemetry/__init__.py +0 -0
- {lucidicai-1.3.2 → lucidicai-1.3.5}/lucidicai/telemetry/base_provider.py +0 -0
- {lucidicai-1.3.2 → lucidicai-1.3.5}/lucidicai/telemetry/litellm_bridge.py +0 -0
- {lucidicai-1.3.2 → lucidicai-1.3.5}/lucidicai/telemetry/openai_agents_instrumentor.py +0 -0
- {lucidicai-1.3.2 → lucidicai-1.3.5}/lucidicai/telemetry/opentelemetry_converter.py +0 -0
- {lucidicai-1.3.2 → lucidicai-1.3.5}/lucidicai/telemetry/pydantic_ai_handler.py +0 -0
- {lucidicai-1.3.2 → lucidicai-1.3.5}/lucidicai/telemetry/utils/__init__.py +0 -0
- {lucidicai-1.3.2 → lucidicai-1.3.5}/lucidicai/telemetry/utils/image_storage.py +0 -0
- {lucidicai-1.3.2 → lucidicai-1.3.5}/lucidicai/telemetry/utils/text_storage.py +0 -0
- {lucidicai-1.3.2 → lucidicai-1.3.5}/lucidicai.egg-info/dependency_links.txt +0 -0
- {lucidicai-1.3.2 → lucidicai-1.3.5}/lucidicai.egg-info/requires.txt +0 -0
- {lucidicai-1.3.2 → lucidicai-1.3.5}/lucidicai.egg-info/top_level.txt +0 -0
- {lucidicai-1.3.2 → lucidicai-1.3.5}/setup.cfg +0 -0
|
@@ -5,7 +5,7 @@ The official Python SDK for [Lucidic AI](https://lucidic.ai), providing comprehe
|
|
|
5
5
|
## Features
|
|
6
6
|
|
|
7
7
|
- **Session & Step Tracking** - Track complex AI agent workflows with hierarchical session management
|
|
8
|
-
- **Multi-Provider Support** - Automatic instrumentation for OpenAI, Anthropic, LangChain, and more
|
|
8
|
+
- **Multi-Provider Support** - Automatic instrumentation for OpenAI, Anthropic, LangChain, Google Generative AI (Gemini), Vertex AI, AWS Bedrock, Cohere, Groq, and more
|
|
9
9
|
- **Real-time Analytics** - Monitor costs, performance, and behavior of your AI applications
|
|
10
10
|
- **Data Privacy** - Built-in masking functions to protect sensitive information
|
|
11
11
|
- **Screenshot Support** - Capture and analyze visual context in your AI workflows
|
|
@@ -49,6 +49,21 @@ lai.end_step()
|
|
|
49
49
|
lai.end_session(is_successful=True)
|
|
50
50
|
```
|
|
51
51
|
|
|
52
|
+
### Quick Start (context manager)
|
|
53
|
+
|
|
54
|
+
```python
|
|
55
|
+
import lucidicai as lai
|
|
56
|
+
from openai import OpenAI
|
|
57
|
+
|
|
58
|
+
# All-in-one lifecycle: init → bind → run → auto-end at context exit
|
|
59
|
+
with lai.session(session_name="My AI Assistant", providers=["openai"]):
|
|
60
|
+
client = OpenAI()
|
|
61
|
+
response = client.chat.completions.create(
|
|
62
|
+
model="gpt-5",
|
|
63
|
+
messages=[{"role": "user", "content": "Hello, how are you?"}]
|
|
64
|
+
)
|
|
65
|
+
```
|
|
66
|
+
|
|
52
67
|
## Configuration
|
|
53
68
|
|
|
54
69
|
### Environment Variables
|
|
@@ -67,7 +82,7 @@ lai.init(
|
|
|
67
82
|
session_name="My Session", # Required: Name for this session
|
|
68
83
|
api_key="...", # Optional: Override env var
|
|
69
84
|
agent_id="...", # Optional: Override env var
|
|
70
|
-
providers=["openai", "anthropic"], # Optional: LLM providers to track
|
|
85
|
+
providers=["openai", "anthropic", "google", "vertexai", "bedrock", "cohere", "groq"], # Optional: LLM providers to track
|
|
71
86
|
task="Process customer request", # Optional: High-level task description
|
|
72
87
|
production_monitoring=False, # Optional: Production mode flag
|
|
73
88
|
auto_end=True, # Optional: Auto-end session on exit (default: True)
|
|
@@ -100,6 +115,109 @@ lai.update_session(
|
|
|
100
115
|
lai.end_session(is_successful=True, session_eval=0.9)
|
|
101
116
|
```
|
|
102
117
|
|
|
118
|
+
### Session Context (async-safe)
|
|
119
|
+
|
|
120
|
+
Lucidic uses Python contextvars to bind a session to the current execution context (threads/async tasks). This guarantees spans from concurrent requests are attributed to the correct session.
|
|
121
|
+
|
|
122
|
+
There are three recommended patterns:
|
|
123
|
+
|
|
124
|
+
1) Full lifecycle (auto-end on exit)
|
|
125
|
+
|
|
126
|
+
```python
|
|
127
|
+
import lucidicai as lai
|
|
128
|
+
from openai import OpenAI
|
|
129
|
+
|
|
130
|
+
with lai.session(session_name="order-flow", providers=["openai"]):
|
|
131
|
+
OpenAI().chat.completions.create(
|
|
132
|
+
model="gpt-5",
|
|
133
|
+
messages=[{"role":"user","content":"Place order"}]
|
|
134
|
+
)
|
|
135
|
+
# Session automatically ends at context exit.
|
|
136
|
+
# Note: any auto_end argument is ignored inside session(...).
|
|
137
|
+
```
|
|
138
|
+
|
|
139
|
+
Async variant:
|
|
140
|
+
|
|
141
|
+
```python
|
|
142
|
+
import lucidicai as lai
|
|
143
|
+
from openai import AsyncOpenAI
|
|
144
|
+
import asyncio
|
|
145
|
+
|
|
146
|
+
async def main():
|
|
147
|
+
async with lai.session_async(session_name="async-flow", providers=["openai"]):
|
|
148
|
+
await AsyncOpenAI().chat.completions.create(
|
|
149
|
+
model="gpt-5",
|
|
150
|
+
messages=[{"role":"user","content":"Hello"}]
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
asyncio.run(main())
|
|
154
|
+
```
|
|
155
|
+
|
|
156
|
+
2) Bind-only (does NOT end the session)
|
|
157
|
+
|
|
158
|
+
```python
|
|
159
|
+
import lucidicai as lai
|
|
160
|
+
from openai import OpenAI
|
|
161
|
+
|
|
162
|
+
sid = lai.init(session_name="request-123", providers=["openai"], auto_end=False)
|
|
163
|
+
with lai.bind_session(sid):
|
|
164
|
+
OpenAI().chat.completions.create(
|
|
165
|
+
model="gpt-5",
|
|
166
|
+
messages=[{"role":"user","content":"..."}]
|
|
167
|
+
)
|
|
168
|
+
# Session remains open. End explicitly when ready:
|
|
169
|
+
lai.end_session()
|
|
170
|
+
```
|
|
171
|
+
|
|
172
|
+
Async variant:
|
|
173
|
+
|
|
174
|
+
```python
|
|
175
|
+
sid = lai.init(session_name="request-async", providers=["openai"], auto_end=False)
|
|
176
|
+
|
|
177
|
+
async def run():
|
|
178
|
+
async with lai.bind_session_async(sid):
|
|
179
|
+
await AsyncOpenAI().chat.completions.create(
|
|
180
|
+
model="gpt-5",
|
|
181
|
+
messages=[{"role":"user","content":"..."}]
|
|
182
|
+
)
|
|
183
|
+
|
|
184
|
+
asyncio.run(run())
|
|
185
|
+
# End later
|
|
186
|
+
lai.end_session()
|
|
187
|
+
```
|
|
188
|
+
|
|
189
|
+
3) Fully manual
|
|
190
|
+
|
|
191
|
+
```python
|
|
192
|
+
sid = lai.init(session_name="manual", providers=["openai"], auto_end=True)
|
|
193
|
+
lai.set_active_session(sid)
|
|
194
|
+
# ... your workflow ...
|
|
195
|
+
lai.clear_active_session()
|
|
196
|
+
# End now, or rely on auto_end at process exit
|
|
197
|
+
lai.end_session()
|
|
198
|
+
```
|
|
199
|
+
|
|
200
|
+
Function wrappers are also provided:
|
|
201
|
+
|
|
202
|
+
```python
|
|
203
|
+
def do_work():
|
|
204
|
+
from openai import OpenAI
|
|
205
|
+
return OpenAI().chat.completions.create(model="gpt-5", messages=[{"role":"user","content":"wrapped"}])
|
|
206
|
+
|
|
207
|
+
# Full lifecycle in one call
|
|
208
|
+
result = lai.run_session(do_work, init_params={"session_name":"wrapped","providers":["openai"]})
|
|
209
|
+
|
|
210
|
+
# Bind-only wrapper
|
|
211
|
+
sid = lai.init(session_name="bound-only", providers=["openai"], auto_end=False)
|
|
212
|
+
result = lai.run_in_session(sid, do_work)
|
|
213
|
+
lai.end_session()
|
|
214
|
+
```
|
|
215
|
+
|
|
216
|
+
Notes:
|
|
217
|
+
- The context managers are safe for threads and asyncio tasks.
|
|
218
|
+
- `session(...)` always ends the session at context exit (ignores any provided auto_end).
|
|
219
|
+
- Existing single-threaded usage (plain `init` + provider calls) remains supported.
|
|
220
|
+
|
|
103
221
|
### Automatic Session Management (auto_end)
|
|
104
222
|
|
|
105
223
|
By default, Lucidic automatically ends your session when your process exits, ensuring no data is lost. This feature is enabled by default but can be controlled:
|
|
@@ -118,6 +236,8 @@ The auto_end feature:
|
|
|
118
236
|
- Prevents data loss from forgotten `end_session()` calls
|
|
119
237
|
- Can be disabled for cases where you need explicit control
|
|
120
238
|
|
|
239
|
+
When using `session(...)` or `session_async(...)`, the session will end at context exit regardless of the `auto_end` setting. A debug warning is logged if `auto_end` is provided in that context.
|
|
240
|
+
|
|
121
241
|
### Steps
|
|
122
242
|
Steps break down complex workflows into discrete, trackable units.
|
|
123
243
|
|
|
@@ -199,6 +319,65 @@ llm = ChatOpenAI(model="gpt-4")
|
|
|
199
319
|
response = llm.invoke([HumanMessage(content="Hello!")])
|
|
200
320
|
```
|
|
201
321
|
|
|
322
|
+
### Google Generative AI (Gemini)
|
|
323
|
+
```python
|
|
324
|
+
import google.generativeai as genai
|
|
325
|
+
|
|
326
|
+
lai.init(session_name="Gemini Example", providers=["google"]) # or "google_generativeai"
|
|
327
|
+
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
|
|
328
|
+
|
|
329
|
+
model = genai.GenerativeModel("gemini-1.5-flash")
|
|
330
|
+
resp = model.generate_content("Write a haiku about clouds")
|
|
331
|
+
```
|
|
332
|
+
|
|
333
|
+
### Vertex AI
|
|
334
|
+
```python
|
|
335
|
+
from google.cloud import aiplatform
|
|
336
|
+
from vertexai.generative_models import GenerativeModel
|
|
337
|
+
|
|
338
|
+
lai.init(session_name="Vertex Example", providers=["vertexai"]) # or "vertex_ai"
|
|
339
|
+
aiplatform.init(project=os.getenv("GCP_PROJECT"), location=os.getenv("GCP_REGION", "us-central1"))
|
|
340
|
+
|
|
341
|
+
model = GenerativeModel("gemini-1.5-flash")
|
|
342
|
+
resp = model.generate_content("Say hello")
|
|
343
|
+
```
|
|
344
|
+
|
|
345
|
+
### AWS Bedrock
|
|
346
|
+
```python
|
|
347
|
+
import boto3
|
|
348
|
+
|
|
349
|
+
lai.init(session_name="Bedrock Example", providers=["bedrock"]) # or "aws_bedrock", "amazon_bedrock"
|
|
350
|
+
client = boto3.client("bedrock-runtime", region_name=os.getenv("AWS_REGION", "us-east-1"))
|
|
351
|
+
|
|
352
|
+
resp = client.invoke_model(
|
|
353
|
+
modelId=os.getenv("BEDROCK_MODEL_ID", "amazon.nova-lite-v1:0"),
|
|
354
|
+
body=b'{"inputText": "Hello from Bedrock"}',
|
|
355
|
+
contentType="application/json",
|
|
356
|
+
accept="application/json",
|
|
357
|
+
)
|
|
358
|
+
```
|
|
359
|
+
|
|
360
|
+
### Cohere
|
|
361
|
+
```python
|
|
362
|
+
import cohere
|
|
363
|
+
|
|
364
|
+
lai.init(session_name="Cohere Example", providers=["cohere"])
|
|
365
|
+
co = cohere.ClientV2(api_key=os.getenv("COHERE_API_KEY"))
|
|
366
|
+
resp = co.chat(model="command-r", messages=[{"role":"user","content":"Hello"}])
|
|
367
|
+
```
|
|
368
|
+
|
|
369
|
+
### Groq
|
|
370
|
+
```python
|
|
371
|
+
from groq import Groq
|
|
372
|
+
|
|
373
|
+
lai.init(session_name="Groq Example", providers=["groq"])
|
|
374
|
+
client = Groq(api_key=os.getenv("GROQ_API_KEY"))
|
|
375
|
+
resp = client.chat.completions.create(
|
|
376
|
+
model="llama-3.1-8b-instant",
|
|
377
|
+
messages=[{"role":"user","content":"Hello from Groq"}],
|
|
378
|
+
)
|
|
379
|
+
```
|
|
380
|
+
|
|
202
381
|
## Advanced Features
|
|
203
382
|
|
|
204
383
|
### Decorators
|
|
@@ -399,6 +578,57 @@ except LucidicNotInitializedError:
|
|
|
399
578
|
print("SDK not initialized - call lai.init() first")
|
|
400
579
|
```
|
|
401
580
|
|
|
581
|
+
## Crash events on uncaught exceptions
|
|
582
|
+
|
|
583
|
+
When the SDK is initialized, Lucidic will capture uncaught exceptions and create a final crash event before the process exits. This is enabled by default and requires no additional configuration.
|
|
584
|
+
|
|
585
|
+
### Behavior
|
|
586
|
+
|
|
587
|
+
- On an uncaught exception (main thread):
|
|
588
|
+
- A Lucidic event is created and linked to the active session.
|
|
589
|
+
- The event description contains the full Python traceback. If a `masking_function` was provided to `lai.init()`, it is applied; long descriptions are truncated to ~16K characters.
|
|
590
|
+
- The event result is set to: "process exited with code 1".
|
|
591
|
+
- The session is ended as unsuccessful with reason `uncaughtException` (independent of `auto_end`).
|
|
592
|
+
- The telemetry provider is best-effort flushed and shut down.
|
|
593
|
+
- Python’s default exit behavior is preserved (exit code 1 and default exception printing).
|
|
594
|
+
|
|
595
|
+
- On signals (`SIGINT`, `SIGTERM`):
|
|
596
|
+
- A final event is created with a description that includes the signal name and a best-effort stack snapshot.
|
|
597
|
+
- The event result is set to: `"process exited with code <128+signum>"` (e.g., 130 for SIGINT, 143 for SIGTERM).
|
|
598
|
+
- Existing auto-end and telemetry cleanup run, and default signal semantics are preserved.
|
|
599
|
+
|
|
600
|
+
### Configuration
|
|
601
|
+
|
|
602
|
+
- Enabled by default after `lai.init(...)`. To opt out:
|
|
603
|
+
|
|
604
|
+
```python
|
|
605
|
+
import lucidicai as lai
|
|
606
|
+
|
|
607
|
+
lai.init(
|
|
608
|
+
session_name="my-session",
|
|
609
|
+
capture_uncaught=False, # disables crash event capture
|
|
610
|
+
)
|
|
611
|
+
```
|
|
612
|
+
|
|
613
|
+
This behavior is independent of `auto_end`; even when `auto_end` is `False`, the SDK will end the session as unsuccessful in this fatal path.
|
|
614
|
+
|
|
615
|
+
### Caveats and lifecycle notes
|
|
616
|
+
|
|
617
|
+
- Multiple handlers and ordering:
|
|
618
|
+
- If other libraries register their own handlers, ordering can affect which path runs first. Lucidic guards against duplication, but if another handler exits the process earlier, the crash event may not complete.
|
|
619
|
+
|
|
620
|
+
- Main-thread semantics:
|
|
621
|
+
- Only uncaught exceptions on the main thread are treated as process-ending. Exceptions in worker threads do not exit the process by default and are not recorded as crash events by this mechanism.
|
|
622
|
+
|
|
623
|
+
- Best-effort transport:
|
|
624
|
+
- Network issues or abrupt termination (e.g., forced container kill, `os._exit`) can prevent event delivery despite best efforts.
|
|
625
|
+
|
|
626
|
+
- Exit semantics:
|
|
627
|
+
- We do not call `sys.exit(1)` from the handler; Python already exits with code 1 for uncaught exceptions, and default printing is preserved by chaining to the original `sys.excepthook`.
|
|
628
|
+
|
|
629
|
+
- Not intercepted:
|
|
630
|
+
- `SystemExit` raised explicitly (e.g., `sys.exit(...)`) and `os._exit(...)` calls are not treated as uncaught exceptions and will not produce a crash event.
|
|
631
|
+
|
|
402
632
|
## Best Practices
|
|
403
633
|
|
|
404
634
|
1. **Initialize Once**: Call `lai.init()` at the start of your application or workflow
|
|
@@ -2,6 +2,9 @@ import atexit
|
|
|
2
2
|
import logging
|
|
3
3
|
import os
|
|
4
4
|
import signal
|
|
5
|
+
import sys
|
|
6
|
+
import traceback
|
|
7
|
+
import threading
|
|
5
8
|
from typing import List, Literal, Optional
|
|
6
9
|
|
|
7
10
|
from .client import Client
|
|
@@ -25,8 +28,35 @@ from .telemetry.otel_init import LucidicTelemetry
|
|
|
25
28
|
|
|
26
29
|
# Import decorators
|
|
27
30
|
from .decorators import step, event
|
|
31
|
+
from .context import (
|
|
32
|
+
set_active_session,
|
|
33
|
+
bind_session,
|
|
34
|
+
bind_session_async,
|
|
35
|
+
clear_active_session,
|
|
36
|
+
current_session_id,
|
|
37
|
+
session,
|
|
38
|
+
session_async,
|
|
39
|
+
run_session,
|
|
40
|
+
run_in_session,
|
|
41
|
+
)
|
|
28
42
|
|
|
29
|
-
ProviderType = Literal[
|
|
43
|
+
ProviderType = Literal[
|
|
44
|
+
"openai",
|
|
45
|
+
"anthropic",
|
|
46
|
+
"langchain",
|
|
47
|
+
"pydantic_ai",
|
|
48
|
+
"openai_agents",
|
|
49
|
+
"litellm",
|
|
50
|
+
"bedrock",
|
|
51
|
+
"aws_bedrock",
|
|
52
|
+
"amazon_bedrock",
|
|
53
|
+
"google",
|
|
54
|
+
"google_generativeai",
|
|
55
|
+
"vertexai",
|
|
56
|
+
"vertex_ai",
|
|
57
|
+
"cohere",
|
|
58
|
+
"groq",
|
|
59
|
+
]
|
|
30
60
|
|
|
31
61
|
# Configure logging
|
|
32
62
|
logger = logging.getLogger("Lucidic")
|
|
@@ -38,6 +68,137 @@ if not logger.handlers:
|
|
|
38
68
|
logger.setLevel(logging.INFO)
|
|
39
69
|
|
|
40
70
|
|
|
71
|
+
# Crash/exit capture configuration
|
|
72
|
+
MAX_ERROR_DESCRIPTION_LENGTH = 16384
|
|
73
|
+
_crash_handlers_installed = False
|
|
74
|
+
_original_sys_excepthook = None
|
|
75
|
+
_original_threading_excepthook = None
|
|
76
|
+
_shutdown_lock = threading.Lock()
|
|
77
|
+
_is_shutting_down = False
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def _mask_and_truncate(text: Optional[str]) -> Optional[str]:
|
|
81
|
+
"""Apply masking and truncate to a safe length. Best effort; never raises."""
|
|
82
|
+
if text is None:
|
|
83
|
+
return text
|
|
84
|
+
try:
|
|
85
|
+
masked = Client().mask(text)
|
|
86
|
+
except Exception:
|
|
87
|
+
masked = text
|
|
88
|
+
if masked is None:
|
|
89
|
+
return masked
|
|
90
|
+
return masked[:MAX_ERROR_DESCRIPTION_LENGTH]
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
def _post_fatal_event(exit_code: int, description: str, extra: Optional[dict] = None) -> None:
|
|
94
|
+
"""Best-effort creation of a final Lucidic event on fatal paths.
|
|
95
|
+
|
|
96
|
+
- Idempotent using a process-wide shutdown flag to avoid duplicates when
|
|
97
|
+
multiple hooks fire (signal + excepthook).
|
|
98
|
+
- Swallows all exceptions to avoid interfering with shutdown.
|
|
99
|
+
"""
|
|
100
|
+
global _is_shutting_down
|
|
101
|
+
with _shutdown_lock:
|
|
102
|
+
if _is_shutting_down:
|
|
103
|
+
return
|
|
104
|
+
_is_shutting_down = True
|
|
105
|
+
try:
|
|
106
|
+
client = Client()
|
|
107
|
+
session = getattr(client, 'session', None)
|
|
108
|
+
if not session or getattr(session, 'is_finished', False):
|
|
109
|
+
return
|
|
110
|
+
arguments = {"exit_code": exit_code}
|
|
111
|
+
if extra:
|
|
112
|
+
try:
|
|
113
|
+
arguments.update(extra)
|
|
114
|
+
except Exception:
|
|
115
|
+
pass
|
|
116
|
+
|
|
117
|
+
event_id = session.create_event(
|
|
118
|
+
description=_mask_and_truncate(description),
|
|
119
|
+
result=f"process exited with code {exit_code}",
|
|
120
|
+
function_name="__process_exit__",
|
|
121
|
+
arguments=arguments,
|
|
122
|
+
)
|
|
123
|
+
session.update_event(event_id=event_id, is_finished=True)
|
|
124
|
+
except Exception:
|
|
125
|
+
# Never raise during shutdown
|
|
126
|
+
pass
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
def _install_crash_handlers() -> None:
|
|
130
|
+
"""Install global uncaught exception handlers (idempotent)."""
|
|
131
|
+
global _crash_handlers_installed, _original_sys_excepthook, _original_threading_excepthook
|
|
132
|
+
if _crash_handlers_installed:
|
|
133
|
+
return
|
|
134
|
+
|
|
135
|
+
_original_sys_excepthook = sys.excepthook
|
|
136
|
+
|
|
137
|
+
def _sys_hook(exc_type, exc, tb):
|
|
138
|
+
try:
|
|
139
|
+
trace_str = ''.join(traceback.format_exception(exc_type, exc, tb))
|
|
140
|
+
except Exception:
|
|
141
|
+
trace_str = f"Uncaught exception: {getattr(exc_type, '__name__', str(exc_type))}: {exc}"
|
|
142
|
+
|
|
143
|
+
# Emit final event and end the session as unsuccessful
|
|
144
|
+
_post_fatal_event(1, trace_str, {
|
|
145
|
+
"exception_type": getattr(exc_type, "__name__", str(exc_type)),
|
|
146
|
+
"exception_message": str(exc),
|
|
147
|
+
"thread_name": threading.current_thread().name,
|
|
148
|
+
})
|
|
149
|
+
try:
|
|
150
|
+
# Prevent auto_end double work
|
|
151
|
+
client = Client()
|
|
152
|
+
try:
|
|
153
|
+
client.auto_end = False
|
|
154
|
+
except Exception:
|
|
155
|
+
pass
|
|
156
|
+
# End session explicitly as unsuccessful
|
|
157
|
+
end_session()
|
|
158
|
+
except Exception:
|
|
159
|
+
pass
|
|
160
|
+
# Best-effort force flush and shutdown telemetry
|
|
161
|
+
try:
|
|
162
|
+
telemetry = LucidicTelemetry()
|
|
163
|
+
if telemetry.is_initialized():
|
|
164
|
+
try:
|
|
165
|
+
telemetry.force_flush()
|
|
166
|
+
except Exception:
|
|
167
|
+
pass
|
|
168
|
+
try:
|
|
169
|
+
telemetry.uninstrument_all()
|
|
170
|
+
except Exception:
|
|
171
|
+
pass
|
|
172
|
+
except Exception:
|
|
173
|
+
pass
|
|
174
|
+
# Chain to original to preserve default printing/behavior
|
|
175
|
+
try:
|
|
176
|
+
_original_sys_excepthook(exc_type, exc, tb)
|
|
177
|
+
except Exception:
|
|
178
|
+
# Avoid recursion/errors in fatal path
|
|
179
|
+
pass
|
|
180
|
+
|
|
181
|
+
sys.excepthook = _sys_hook
|
|
182
|
+
|
|
183
|
+
# For Python 3.8+, only treat main-thread exceptions as fatal (process-exiting)
|
|
184
|
+
if hasattr(threading, 'excepthook'):
|
|
185
|
+
_original_threading_excepthook = threading.excepthook
|
|
186
|
+
|
|
187
|
+
def _thread_hook(args):
|
|
188
|
+
try:
|
|
189
|
+
if args.thread is threading.main_thread():
|
|
190
|
+
_sys_hook(args.exc_type, args.exc_value, args.exc_traceback)
|
|
191
|
+
except Exception:
|
|
192
|
+
pass
|
|
193
|
+
try:
|
|
194
|
+
_original_threading_excepthook(args)
|
|
195
|
+
except Exception:
|
|
196
|
+
pass
|
|
197
|
+
|
|
198
|
+
threading.excepthook = _thread_hook
|
|
199
|
+
|
|
200
|
+
_crash_handlers_installed = True
|
|
201
|
+
|
|
41
202
|
def _setup_providers(client: Client, providers: List[ProviderType]) -> None:
|
|
42
203
|
"""Set up providers for the client, avoiding duplication
|
|
43
204
|
|
|
@@ -81,6 +242,26 @@ def _setup_providers(client: Client, providers: List[ProviderType]) -> None:
|
|
|
81
242
|
elif provider == "litellm":
|
|
82
243
|
client.set_provider(OTelLiteLLMHandler())
|
|
83
244
|
setup_providers.add("litellm")
|
|
245
|
+
elif provider in ("bedrock", "aws_bedrock", "amazon_bedrock"):
|
|
246
|
+
from .telemetry.otel_handlers import OTelBedrockHandler
|
|
247
|
+
client.set_provider(OTelBedrockHandler())
|
|
248
|
+
setup_providers.add("bedrock")
|
|
249
|
+
elif provider in ("google", "google_generativeai"):
|
|
250
|
+
from .telemetry.otel_handlers import OTelGoogleGenerativeAIHandler
|
|
251
|
+
client.set_provider(OTelGoogleGenerativeAIHandler())
|
|
252
|
+
setup_providers.add("google")
|
|
253
|
+
elif provider in ("vertexai", "vertex_ai"):
|
|
254
|
+
from .telemetry.otel_handlers import OTelVertexAIHandler
|
|
255
|
+
client.set_provider(OTelVertexAIHandler())
|
|
256
|
+
setup_providers.add("vertexai")
|
|
257
|
+
elif provider == "cohere":
|
|
258
|
+
from .telemetry.otel_handlers import OTelCohereHandler
|
|
259
|
+
client.set_provider(OTelCohereHandler())
|
|
260
|
+
setup_providers.add("cohere")
|
|
261
|
+
elif provider == "groq":
|
|
262
|
+
from .telemetry.otel_handlers import OTelGroqHandler
|
|
263
|
+
client.set_provider(OTelGroqHandler())
|
|
264
|
+
setup_providers.add("groq")
|
|
84
265
|
|
|
85
266
|
__all__ = [
|
|
86
267
|
'Client',
|
|
@@ -105,6 +286,14 @@ __all__ = [
|
|
|
105
286
|
'InvalidOperationError',
|
|
106
287
|
'step',
|
|
107
288
|
'event',
|
|
289
|
+
'set_active_session',
|
|
290
|
+
'bind_session',
|
|
291
|
+
'bind_session_async',
|
|
292
|
+
'clear_active_session',
|
|
293
|
+
'session',
|
|
294
|
+
'session_async',
|
|
295
|
+
'run_session',
|
|
296
|
+
'run_in_session',
|
|
108
297
|
]
|
|
109
298
|
|
|
110
299
|
|
|
@@ -122,6 +311,7 @@ def init(
|
|
|
122
311
|
tags: Optional[list] = None,
|
|
123
312
|
masking_function = None,
|
|
124
313
|
auto_end: Optional[bool] = True,
|
|
314
|
+
capture_uncaught: Optional[bool] = True,
|
|
125
315
|
) -> str:
|
|
126
316
|
"""
|
|
127
317
|
Initialize the Lucidic client.
|
|
@@ -189,6 +379,17 @@ def init(
|
|
|
189
379
|
|
|
190
380
|
# Set the auto_end flag on the client
|
|
191
381
|
client.auto_end = auto_end
|
|
382
|
+
# Bind this session id to the current execution context for async-safety
|
|
383
|
+
try:
|
|
384
|
+
set_active_session(real_session_id)
|
|
385
|
+
except Exception:
|
|
386
|
+
pass
|
|
387
|
+
# Install crash handlers unless explicitly disabled
|
|
388
|
+
try:
|
|
389
|
+
if capture_uncaught:
|
|
390
|
+
_install_crash_handlers()
|
|
391
|
+
except Exception:
|
|
392
|
+
pass
|
|
192
393
|
|
|
193
394
|
logger.info("Session initialized successfully")
|
|
194
395
|
return real_session_id
|
|
@@ -232,6 +433,11 @@ def continue_session(
|
|
|
232
433
|
client.auto_end = auto_end
|
|
233
434
|
|
|
234
435
|
logger.info(f"Session {session_id} continuing...")
|
|
436
|
+
# Bind this session id to the current execution context for async-safety
|
|
437
|
+
try:
|
|
438
|
+
set_active_session(session_id)
|
|
439
|
+
except Exception:
|
|
440
|
+
pass
|
|
235
441
|
return session_id # For consistency
|
|
236
442
|
|
|
237
443
|
|
|
@@ -252,10 +458,20 @@ def update_session(
|
|
|
252
458
|
is_successful: Whether the session was successful.
|
|
253
459
|
is_successful_reason: Session success reason.
|
|
254
460
|
"""
|
|
461
|
+
# Prefer context-bound session over global active session
|
|
255
462
|
client = Client()
|
|
256
|
-
|
|
463
|
+
target_sid = None
|
|
464
|
+
try:
|
|
465
|
+
target_sid = current_session_id.get(None)
|
|
466
|
+
except Exception:
|
|
467
|
+
target_sid = None
|
|
468
|
+
if not target_sid and client.session:
|
|
469
|
+
target_sid = client.session.session_id
|
|
470
|
+
if not target_sid:
|
|
257
471
|
return
|
|
258
|
-
|
|
472
|
+
# Use ephemeral session facade to avoid mutating global state
|
|
473
|
+
session = client.session if (client.session and client.session.session_id == target_sid) else Session(agent_id=client.agent_id, session_id=target_sid)
|
|
474
|
+
session.update_session(**locals())
|
|
259
475
|
|
|
260
476
|
|
|
261
477
|
def end_session(
|
|
@@ -274,17 +490,31 @@ def end_session(
|
|
|
274
490
|
is_successful_reason: Session success reason.
|
|
275
491
|
"""
|
|
276
492
|
client = Client()
|
|
277
|
-
|
|
493
|
+
# Prefer context-bound session id
|
|
494
|
+
target_sid = None
|
|
495
|
+
try:
|
|
496
|
+
target_sid = current_session_id.get(None)
|
|
497
|
+
except Exception:
|
|
498
|
+
target_sid = None
|
|
499
|
+
if not target_sid and client.session:
|
|
500
|
+
target_sid = client.session.session_id
|
|
501
|
+
if not target_sid:
|
|
278
502
|
return
|
|
279
|
-
|
|
280
|
-
#
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
provider._callback
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
503
|
+
|
|
504
|
+
# If ending the globally active session, keep existing cleanup behavior
|
|
505
|
+
if client.session and client.session.session_id == target_sid:
|
|
506
|
+
# Wait for any pending LiteLLM callbacks before ending session
|
|
507
|
+
for provider in client.providers:
|
|
508
|
+
if hasattr(provider, '_callback') and hasattr(provider._callback, 'wait_for_pending_callbacks'):
|
|
509
|
+
logger.info("Waiting for LiteLLM callbacks to complete before ending session...")
|
|
510
|
+
provider._callback.wait_for_pending_callbacks(timeout=5.0)
|
|
511
|
+
client.session.update_session(is_finished=True, **locals())
|
|
512
|
+
client.clear()
|
|
513
|
+
return
|
|
514
|
+
|
|
515
|
+
# Otherwise, end the specified session id without clearing global state
|
|
516
|
+
temp = Session(agent_id=client.agent_id, session_id=target_sid)
|
|
517
|
+
temp.update_session(is_finished=True, **locals())
|
|
288
518
|
|
|
289
519
|
|
|
290
520
|
def reset_sdk() -> None:
|
|
@@ -330,6 +560,20 @@ def _auto_end_session():
|
|
|
330
560
|
|
|
331
561
|
def _signal_handler(signum, frame):
|
|
332
562
|
"""Handle interruption signals"""
|
|
563
|
+
# Best-effort final event for signal exits
|
|
564
|
+
try:
|
|
565
|
+
try:
|
|
566
|
+
name = signal.Signals(signum).name
|
|
567
|
+
except Exception:
|
|
568
|
+
name = str(signum)
|
|
569
|
+
try:
|
|
570
|
+
stack_str = ''.join(traceback.format_stack(frame)) if frame else ''
|
|
571
|
+
except Exception:
|
|
572
|
+
stack_str = ''
|
|
573
|
+
desc = _mask_and_truncate(f"Received signal {name}\n{stack_str}")
|
|
574
|
+
_post_fatal_event(128 + signum, desc, {"signal": name, "signum": signum})
|
|
575
|
+
except Exception:
|
|
576
|
+
pass
|
|
333
577
|
_auto_end_session()
|
|
334
578
|
_cleanup_telemetry()
|
|
335
579
|
# Re-raise the signal for default handling
|
|
@@ -69,6 +69,10 @@ class Client:
|
|
|
69
69
|
|
|
70
70
|
def set_provider(self, provider: BaseProvider) -> None:
|
|
71
71
|
"""Set the LLM provider to track"""
|
|
72
|
+
# Avoid duplicate provider registration of the same class
|
|
73
|
+
for existing in self.providers:
|
|
74
|
+
if type(existing) is type(provider):
|
|
75
|
+
return
|
|
72
76
|
self.providers.append(provider)
|
|
73
77
|
provider.override()
|
|
74
78
|
|
|
@@ -134,6 +138,16 @@ class Client:
|
|
|
134
138
|
self.initialized = True
|
|
135
139
|
return self.session.session_id
|
|
136
140
|
|
|
141
|
+
def create_event_for_session(self, session_id: str, **kwargs) -> str:
|
|
142
|
+
"""Create an event for a specific session id without mutating global session.
|
|
143
|
+
|
|
144
|
+
This avoids cross-thread races by not switching the active session on
|
|
145
|
+
the singleton client. It constructs an ephemeral Session facade to send
|
|
146
|
+
requests under the provided session id.
|
|
147
|
+
"""
|
|
148
|
+
temp_session = Session(agent_id=self.agent_id, session_id=session_id)
|
|
149
|
+
return temp_session.create_event(**kwargs)
|
|
150
|
+
|
|
137
151
|
def continue_session(self, session_id: str):
|
|
138
152
|
if session_id in self.custom_session_id_translations:
|
|
139
153
|
session_id = self.custom_session_id_translations[session_id]
|
|
@@ -149,7 +163,8 @@ class Client:
|
|
|
149
163
|
agent_id=self.agent_id,
|
|
150
164
|
session_id=real_session_id
|
|
151
165
|
)
|
|
152
|
-
|
|
166
|
+
import logging as _logging
|
|
167
|
+
_logging.getLogger('Lucidic').info(f"Session {data.get('session_name', '')} continuing...")
|
|
153
168
|
return self.session.session_id
|
|
154
169
|
|
|
155
170
|
def init_mass_sim(self, **kwargs) -> str:
|