llumo 0.2.24__py3-none-any.whl → 0.2.26__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llumo/__init__.py +6 -3
- llumo/callback.py +480 -0
- llumo/callbacks-0.py +258 -0
- llumo/client.py +383 -204
- llumo/llumoLogger.py +57 -0
- llumo/llumoSessionContext.py +366 -0
- llumo/openai.py +196 -50
- {llumo-0.2.24.dist-info → llumo-0.2.26.dist-info}/METADATA +1 -1
- llumo-0.2.26.dist-info/RECORD +20 -0
- llumo-0.2.24.dist-info/RECORD +0 -16
- {llumo-0.2.24.dist-info → llumo-0.2.26.dist-info}/WHEEL +0 -0
- {llumo-0.2.24.dist-info → llumo-0.2.26.dist-info}/licenses/LICENSE +0 -0
- {llumo-0.2.24.dist-info → llumo-0.2.26.dist-info}/top_level.txt +0 -0
llumo/callbacks-0.py
ADDED
|
@@ -0,0 +1,258 @@
|
|
|
1
|
+
from langchain_core.callbacks.base import BaseCallbackHandler
|
|
2
|
+
from typing import Any, Dict, List, Optional, Union
|
|
3
|
+
import time
|
|
4
|
+
import json
|
|
5
|
+
from llumo.llumoSessionContext import getSessionID, getLlumoRun
|
|
6
|
+
from llumo.llumoSessionContext import LlumoSessionContext
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class LlumoCallbackHandler(BaseCallbackHandler):
|
|
10
|
+
"""
|
|
11
|
+
LangChain callback handler that integrates with Llumo logging system.
|
|
12
|
+
Tracks LLM calls, tool usage, agent actions, and chains.
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
def __init__(self, logger):
|
|
16
|
+
self.logger = logger
|
|
17
|
+
self.start_times = {} # Track start times for latency calculation
|
|
18
|
+
self.step_counters = {} # Track step counts for agents
|
|
19
|
+
|
|
20
|
+
def _get_session_context(self) -> Optional[LlumoSessionContext]:
|
|
21
|
+
"""Get the current Llumo session context from context variables."""
|
|
22
|
+
try:
|
|
23
|
+
session_id = getSessionID()
|
|
24
|
+
run = getLlumoRun()
|
|
25
|
+
if session_id and run:
|
|
26
|
+
# Create a temporary context object to access logging methods
|
|
27
|
+
ctx = LlumoSessionContext(self.logger, session_id)
|
|
28
|
+
return ctx
|
|
29
|
+
except Exception:
|
|
30
|
+
pass
|
|
31
|
+
return None
|
|
32
|
+
|
|
33
|
+
def _safe_serialize(self, obj: Any) -> str:
|
|
34
|
+
"""Safely serialize objects to JSON string."""
|
|
35
|
+
try:
|
|
36
|
+
return json.dumps(obj, default=str, ensure_ascii=False)
|
|
37
|
+
except Exception:
|
|
38
|
+
return str(obj)
|
|
39
|
+
|
|
40
|
+
# LLM Callbacks
|
|
41
|
+
def on_llm_start(
|
|
42
|
+
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
|
|
43
|
+
) -> None:
|
|
44
|
+
"""Called when LLM starts generating."""
|
|
45
|
+
run_id = kwargs.get('run_id')
|
|
46
|
+
if run_id:
|
|
47
|
+
self.start_times[run_id] = time.time()
|
|
48
|
+
|
|
49
|
+
print("LLM started - prompts:", len(prompts))
|
|
50
|
+
|
|
51
|
+
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
|
|
52
|
+
"""Called when LLM generates a new token."""
|
|
53
|
+
# Optional: Could be used for streaming token tracking
|
|
54
|
+
pass
|
|
55
|
+
|
|
56
|
+
def on_llm_end(self, response: Any, **kwargs: Any) -> None:
|
|
57
|
+
"""Called when LLM finishes generating."""
|
|
58
|
+
ctx = self._get_session_context()
|
|
59
|
+
if not ctx:
|
|
60
|
+
print("No Llumo session context available")
|
|
61
|
+
return
|
|
62
|
+
|
|
63
|
+
run_id = kwargs.get('run_id')
|
|
64
|
+
start_time = self.start_times.pop(run_id, time.time())
|
|
65
|
+
latency_ms = int((time.time() - start_time) * 1000)
|
|
66
|
+
|
|
67
|
+
# Extract LLM response details
|
|
68
|
+
model = getattr(response, 'model_name', 'unknown')
|
|
69
|
+
|
|
70
|
+
# Get token usage if available
|
|
71
|
+
token_usage = getattr(response, 'llm_output', {}).get('token_usage', {})
|
|
72
|
+
input_tokens = token_usage.get('prompt_tokens', 0)
|
|
73
|
+
output_tokens = token_usage.get('completion_tokens', 0)
|
|
74
|
+
|
|
75
|
+
# Get the generated text
|
|
76
|
+
if hasattr(response, 'generations') and response.generations:
|
|
77
|
+
output_text = response.generations[0][0].text if response.generations[0] else ""
|
|
78
|
+
else:
|
|
79
|
+
output_text = str(response)
|
|
80
|
+
|
|
81
|
+
# Get the original prompt
|
|
82
|
+
prompts = kwargs.get('prompts', [''])
|
|
83
|
+
query = prompts[0] if prompts else ""
|
|
84
|
+
|
|
85
|
+
try:
|
|
86
|
+
ctx.logLlmStep(
|
|
87
|
+
stepName=f"LLM Call - {model}",
|
|
88
|
+
model=model,
|
|
89
|
+
provider="langchain",
|
|
90
|
+
inputTokens=input_tokens,
|
|
91
|
+
outputTokens=output_tokens,
|
|
92
|
+
temperature=kwargs.get('temperature', 0.7),
|
|
93
|
+
promptTruncated=False,
|
|
94
|
+
latencyMs=latency_ms,
|
|
95
|
+
query=query,
|
|
96
|
+
output=output_text,
|
|
97
|
+
status="SUCCESS",
|
|
98
|
+
message=""
|
|
99
|
+
)
|
|
100
|
+
print(f"Logged LLM step: {model} ({latency_ms}ms)")
|
|
101
|
+
except Exception as e:
|
|
102
|
+
print(f"Failed to log LLM step: {e}")
|
|
103
|
+
|
|
104
|
+
def on_llm_error(self, error: Exception, **kwargs: Any) -> None:
|
|
105
|
+
"""Called when LLM encounters an error."""
|
|
106
|
+
ctx = self._get_session_context()
|
|
107
|
+
if not ctx:
|
|
108
|
+
return
|
|
109
|
+
|
|
110
|
+
run_id = kwargs.get('run_id')
|
|
111
|
+
start_time = self.start_times.pop(run_id, time.time())
|
|
112
|
+
latency_ms = int((time.time() - start_time) * 1000)
|
|
113
|
+
|
|
114
|
+
prompts = kwargs.get('prompts', [''])
|
|
115
|
+
query = prompts[0] if prompts else ""
|
|
116
|
+
|
|
117
|
+
try:
|
|
118
|
+
ctx.logLlmStep(
|
|
119
|
+
stepName="LLM Call - Error",
|
|
120
|
+
model="unknown",
|
|
121
|
+
provider="langchain",
|
|
122
|
+
inputTokens=0,
|
|
123
|
+
outputTokens=0,
|
|
124
|
+
temperature=0.7,
|
|
125
|
+
promptTruncated=False,
|
|
126
|
+
latencyMs=latency_ms,
|
|
127
|
+
query=query,
|
|
128
|
+
output="",
|
|
129
|
+
status="FAILURE",
|
|
130
|
+
message=str(error)
|
|
131
|
+
)
|
|
132
|
+
print(f"Logged LLM error: {error}")
|
|
133
|
+
except Exception as e:
|
|
134
|
+
print(f"Failed to log LLM error: {e}")
|
|
135
|
+
|
|
136
|
+
# Chain Callbacks
|
|
137
|
+
def on_chain_start(
|
|
138
|
+
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
|
|
139
|
+
) -> None:
|
|
140
|
+
pass
|
|
141
|
+
|
|
142
|
+
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
|
|
143
|
+
"""Called when a chain ends."""
|
|
144
|
+
print("Chain execution completed")
|
|
145
|
+
|
|
146
|
+
def on_chain_error(self, error: Exception, **kwargs: Any) -> None:
|
|
147
|
+
"""Called when a chain encounters an error."""
|
|
148
|
+
print(f"Chain error: {error}")
|
|
149
|
+
|
|
150
|
+
# Tool Callbacks
|
|
151
|
+
def on_tool_start(
|
|
152
|
+
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
|
|
153
|
+
) -> None:
|
|
154
|
+
"""Called when a tool starts."""
|
|
155
|
+
run_id = kwargs.get('run_id')
|
|
156
|
+
if run_id:
|
|
157
|
+
self.start_times[run_id] = time.time()
|
|
158
|
+
|
|
159
|
+
tool_name = serialized.get('name', 'Unknown Tool')
|
|
160
|
+
print(f"Tool started: {tool_name}")
|
|
161
|
+
|
|
162
|
+
def on_tool_end(self, output: str, **kwargs: Any) -> None:
|
|
163
|
+
"""Called when a tool ends."""
|
|
164
|
+
ctx = self._get_session_context()
|
|
165
|
+
if not ctx:
|
|
166
|
+
return
|
|
167
|
+
|
|
168
|
+
run_id = kwargs.get('run_id')
|
|
169
|
+
start_time = self.start_times.pop(run_id, time.time())
|
|
170
|
+
latency_ms = int((time.time() - start_time) * 1000)
|
|
171
|
+
|
|
172
|
+
# Extract tool info from kwargs
|
|
173
|
+
serialized = kwargs.get('serialized', {})
|
|
174
|
+
tool_name = serialized.get('name', 'Unknown Tool')
|
|
175
|
+
input_str = kwargs.get('input_str', '')
|
|
176
|
+
|
|
177
|
+
try:
|
|
178
|
+
ctx.logToolStep(
|
|
179
|
+
stepName=f"Tool - {tool_name}",
|
|
180
|
+
toolName=tool_name,
|
|
181
|
+
input={"input": input_str},
|
|
182
|
+
output=output,
|
|
183
|
+
latencyMs=latency_ms,
|
|
184
|
+
status="SUCCESS",
|
|
185
|
+
message=""
|
|
186
|
+
)
|
|
187
|
+
print(f"Logged tool step: {tool_name} ({latency_ms}ms)")
|
|
188
|
+
except Exception as e:
|
|
189
|
+
print(f"Failed to log tool step: {e}")
|
|
190
|
+
|
|
191
|
+
def on_tool_error(self, error: Exception, **kwargs: Any) -> None:
|
|
192
|
+
"""Called when a tool encounters an error."""
|
|
193
|
+
ctx = self._get_session_context()
|
|
194
|
+
if not ctx:
|
|
195
|
+
return
|
|
196
|
+
|
|
197
|
+
run_id = kwargs.get('run_id')
|
|
198
|
+
start_time = self.start_times.pop(run_id, time.time())
|
|
199
|
+
latency_ms = int((time.time() - start_time) * 1000)
|
|
200
|
+
|
|
201
|
+
serialized = kwargs.get('serialized', {})
|
|
202
|
+
tool_name = serialized.get('name', 'Unknown Tool')
|
|
203
|
+
input_str = kwargs.get('input_str', '')
|
|
204
|
+
|
|
205
|
+
try:
|
|
206
|
+
ctx.logToolStep(
|
|
207
|
+
stepName=f"Tool - {tool_name} - Error",
|
|
208
|
+
toolName=tool_name,
|
|
209
|
+
input={"input": input_str},
|
|
210
|
+
output="",
|
|
211
|
+
latencyMs=latency_ms,
|
|
212
|
+
status="FAILURE",
|
|
213
|
+
message=str(error)
|
|
214
|
+
)
|
|
215
|
+
print(f"Logged tool error: {tool_name} - {error}")
|
|
216
|
+
except Exception as e:
|
|
217
|
+
print(f"Failed to log tool error: {e}")
|
|
218
|
+
|
|
219
|
+
# Agent Callbacks
|
|
220
|
+
def on_agent_action(self, action: Any, **kwargs: Any) -> None:
|
|
221
|
+
"""Called when an agent takes an action."""
|
|
222
|
+
run_id = kwargs.get('run_id')
|
|
223
|
+
|
|
224
|
+
# Track agent step count
|
|
225
|
+
if run_id not in self.step_counters:
|
|
226
|
+
self.step_counters[run_id] = 0
|
|
227
|
+
self.step_counters[run_id] += 1
|
|
228
|
+
|
|
229
|
+
print(f"Agent action: {getattr(action, 'tool', 'unknown')}")
|
|
230
|
+
|
|
231
|
+
def on_agent_finish(self, finish: Any, **kwargs: Any) -> None:
|
|
232
|
+
"""Called when an agent finishes."""
|
|
233
|
+
ctx = self._get_session_context()
|
|
234
|
+
if not ctx:
|
|
235
|
+
return
|
|
236
|
+
|
|
237
|
+
run_id = kwargs.get('run_id')
|
|
238
|
+
num_steps = self.step_counters.pop(run_id, 0)
|
|
239
|
+
|
|
240
|
+
try:
|
|
241
|
+
ctx.logAgentStep(
|
|
242
|
+
stepName="Agent Execution",
|
|
243
|
+
agentType="langchain_agent",
|
|
244
|
+
agentName="LangChain Agent",
|
|
245
|
+
numStepsTaken=num_steps,
|
|
246
|
+
tools=[], # Could be populated if tool info is available
|
|
247
|
+
query=getattr(finish, 'log', ''),
|
|
248
|
+
status="SUCCESS",
|
|
249
|
+
message=""
|
|
250
|
+
)
|
|
251
|
+
print(f"Logged agent finish: {num_steps} steps")
|
|
252
|
+
except Exception as e:
|
|
253
|
+
print(f"Failed to log agent step: {e}")
|
|
254
|
+
|
|
255
|
+
|
|
256
|
+
|
|
257
|
+
|
|
258
|
+
|