llumo 0.2.24__py3-none-any.whl → 0.2.25__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
llumo/__init__.py CHANGED
@@ -4,5 +4,8 @@ from .helpingFuntions import *
4
4
  from .models import AVAILABLEMODELS
5
5
  from .execution import ModelExecutor
6
6
  from .functionCalling import *
7
- from .openai import openai
8
- from .google import genai
7
+ from .openai import OpenAI
8
+ from .google import genai
9
+ from .llumoSessionContext import *
10
+ from .llumoLogger import *
11
+ from .callback import *
llumo/callback.py ADDED
@@ -0,0 +1,480 @@
1
+ from typing import Any, Dict, List
2
+ from langchain_core.callbacks.base import BaseCallbackHandler
3
+ from langchain_core.messages import BaseMessage
4
+ from langchain_core.outputs import LLMResult
5
+ from langchain_core.agents import AgentAction, AgentFinish
6
+ import json
7
+ from llumo.llumoLogger import LLUMOLogger
8
+ from llumo.llumoSessionContext import LlumoSessionContext
9
+ import time
10
+ import re
11
+
12
+
13
+ class LlumoCallbackHandler(BaseCallbackHandler):
14
+ def __init__(self, session: LlumoSessionContext = None):
15
+ if session is None:
16
+ raise ValueError("LlumoSessionContext is required")
17
+
18
+ self.sessionLogger = session
19
+
20
+ # Initialize timing and state variables
21
+ self.llmStartTime = None
22
+ self.agentStartTime = None
23
+ self.toolStartTime = None
24
+ self.chainStartTime = None
25
+ self.stepTime = None
26
+
27
+ # Initialize tracking variables
28
+ self.prompt = ""
29
+ self.currentToolName = None
30
+ self.currentToolInput = None
31
+ self.currentAgentName = None
32
+ self.currentChainTime = "unknown"
33
+ self.agentsSteps = 0
34
+ self.toolsUsed = []
35
+ self.llmProvider = "unknown"
36
+
37
+ # Status tracking
38
+ self.hasErrors = False
39
+ self.lastError = None
40
+
41
+ # ReAct specific tracking
42
+ self.reactSteps = []
43
+ self.currentThought = ""
44
+ self.currentAction = ""
45
+ self.currentObservation = ""
46
+ self.isAgentExecution = False
47
+
48
+ def on_chain_start(self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any) -> None:
49
+ """Called when a chain starts - this includes agent execution"""
50
+ try:
51
+ self.prompt = inputs.get("input", "")
52
+ self.chainStartTime = time.time()
53
+
54
+ # Reset all tracking variables for new query
55
+ self.agentsSteps = 0
56
+ self.toolsUsed = []
57
+ self.reactSteps = []
58
+ self.currentThought = ""
59
+ self.currentAction = ""
60
+ self.currentObservation = ""
61
+ self.currentToolName = None
62
+ self.currentToolInput = None
63
+ self.hasErrors = False
64
+ self.lastError = None
65
+
66
+ # Dynamically detect agent name from serialized data
67
+ if serialized is not None:
68
+ # Check for 'name' parameter passed during initialization
69
+ if "name" in serialized:
70
+ self.currentAgentName = serialized["name"]
71
+ elif "_type" in serialized:
72
+ self.currentAgentName = serialized["_type"]
73
+ elif "kwargs" in serialized and "name" in serialized["kwargs"]:
74
+ self.currentAgentName = serialized["kwargs"]["name"]
75
+ elif "id" in serialized and isinstance(serialized["id"], list):
76
+ self.currentAgentName = serialized["id"][-1] if serialized["id"] else "unknown"
77
+ else:
78
+ self.currentAgentName = "unknown"
79
+
80
+ # Check if this is agent execution
81
+ if ("agent" in str(self.currentAgentName).lower() or
82
+ (serialized and serialized.get("_type") == "agent_executor") or
83
+ any(key in str(serialized).lower() for key in ["agent", "react", "executor"])):
84
+
85
+ self.agentStartTime = time.time()
86
+ self.isAgentExecution = True
87
+ print(f"[DEBUG] Agent execution started: {self.currentAgentName} - Reset counters for new query")
88
+ else:
89
+ self.isAgentExecution = False
90
+
91
+ except Exception as e:
92
+ print(f"[ERROR] in on_chain_start: {e}")
93
+
94
+ def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
95
+ """Called when a chain ends"""
96
+ try:
97
+ if self.isAgentExecution and isinstance(outputs, dict) and "output" in outputs:
98
+ # Use logAgentStep for final completion
99
+ self.sessionLogger.logAgentStep(
100
+ stepName="Agent Execution Completed",
101
+ agentType="react_agent",
102
+ agentName=self.currentAgentName or "unknown",
103
+ numStepsTaken=self.agentsSteps,
104
+ tools=self.toolsUsed,
105
+ query=self.prompt,
106
+ status="SUCCESS",
107
+ message=f"Final output: {outputs['output']}. ReAct steps: {json.dumps(self.reactSteps)}",
108
+ )
109
+
110
+ # Reset execution state after chain ends
111
+ self.isAgentExecution = False
112
+
113
+ except Exception as e:
114
+ print(f"[ERROR] Failed to log chain output: {e}")
115
+
116
+ def on_llm_start(self, serialized: Dict[str, Any], prompts: List[Any], **kwargs: Any) -> None:
117
+ """Called when LLM starts"""
118
+ self.llmStartTime = time.time()
119
+ self.stepTime = time.time()
120
+
121
+ # Dynamically get model info
122
+ model = "unknown"
123
+ if serialized and "kwargs" in serialized:
124
+ model = serialized["kwargs"].get("model_name",serialized["kwargs"].get("model", "unknown"))
125
+
126
+ provider = "unknown"
127
+ if isinstance(serialized.get("id", []), list) and len(serialized["id"]) > 2:
128
+ provider = serialized["id"][2]
129
+ self.llmProvider = provider
130
+
131
+ def on_llm_end(self, response: Any, **kwargs: Any) -> None:
132
+ """Called when LLM completes"""
133
+ duration_ms = int((time.time() - self.llmStartTime) * 1000) if self.llmStartTime else 0
134
+
135
+ # Initialize default values
136
+ output = ""
137
+ model_name = "unknown"
138
+ input_tokens = 0
139
+ output_tokens = 0
140
+ status = "SUCCESS"
141
+ error_message = ""
142
+
143
+ try:
144
+ # Case 1: LLMResult object
145
+ if hasattr(response, 'generations'):
146
+ if response.generations and len(response.generations) > 0:
147
+ generation = response.generations[0][0] if len(response.generations[0]) > 0 else None
148
+ if generation:
149
+ # Handle message content
150
+ if hasattr(generation, 'message'):
151
+ message = generation.message
152
+ if hasattr(message, 'content'):
153
+ output = message.content
154
+ # Handle function calls
155
+ if hasattr(message, 'additional_kwargs'):
156
+ func_call = message.additional_kwargs.get('function_call')
157
+ if func_call:
158
+ output = (f"Function call: {func_call.get('name', 'unknown')} "
159
+ f"with arguments {func_call.get('arguments', '{}')}")
160
+ if not output:
161
+ output = str(message)
162
+ else:
163
+ output = getattr(generation, 'text', str(generation))
164
+
165
+ # Get token usage and model name
166
+ if hasattr(response, 'llm_output'):
167
+ model_name = response.llm_output.get("model_name", "unknown")
168
+ usage = response.llm_output.get("token_usage", {})
169
+ input_tokens = usage.get("prompt_tokens", usage.get("input_tokens", 0))
170
+ output_tokens = usage.get("completion_tokens", usage.get("output_tokens", 0))
171
+
172
+ # Case 2: Direct AIMessage
173
+ elif hasattr(response, 'content'):
174
+ output = response.content
175
+ # Handle function calls
176
+ if hasattr(response, 'additional_kwargs'):
177
+ func_call = response.additional_kwargs.get('function_call')
178
+ if func_call:
179
+ output = (f"Function call: {func_call.get('name', 'unknown')} "
180
+ f"with arguments {func_call.get('arguments', '{}')}")
181
+
182
+ # Get metadata
183
+ if hasattr(response, 'response_metadata'):
184
+ model_name = getattr(response.response_metadata, "model_name", "unknown")
185
+ token_usage = getattr(response.response_metadata, "token_usage", {})
186
+ if isinstance(token_usage, dict):
187
+ input_tokens = token_usage.get("prompt_tokens", token_usage.get("input_tokens", 0))
188
+ output_tokens = token_usage.get("completion_tokens", token_usage.get("output_tokens", 0))
189
+
190
+ # Case 3: Other types
191
+ else:
192
+ output = str(response)
193
+ if hasattr(response, 'model'):
194
+ model_name = response.model
195
+
196
+ except Exception as e:
197
+ error_message = f"Response processing error: {str(e)}"
198
+ status = "ERROR"
199
+
200
+ # Ensure we have string values
201
+ output = str(output) if output is not None else ""
202
+ model_name = str(model_name) if model_name is not None else "unknown"
203
+
204
+ # Parse ReAct reasoning from LLM output if we're in agent execution
205
+ if self.isAgentExecution and output:
206
+ self._parse_react_reasoning(output)
207
+
208
+ try:
209
+ self.sessionLogger.logLlmStep(
210
+ stepName="LLM Call Completed",
211
+ model=model_name,
212
+ provider=self.llmProvider,
213
+ inputTokens=int(input_tokens),
214
+ outputTokens=int(output_tokens),
215
+ temperature=float(kwargs.get("temperature", 0.7)),
216
+ promptTruncated=False,
217
+ latencyMs=duration_ms,
218
+ query=str(self.prompt),
219
+ output=output,
220
+ status=status,
221
+ message=error_message if status == "ERROR" else "",
222
+ )
223
+ except Exception as e:
224
+ print(f"[ERROR] Failed to log LLM end: {e}")
225
+
226
+ def _parse_react_reasoning(self, llm_output: str):
227
+ """Parse ReAct reasoning pattern from LLM output"""
228
+ try:
229
+ # Extract thought patterns
230
+ thought_match = re.search(r'Thought:\s*(.+?)(?=Action:|$)', llm_output, re.DOTALL)
231
+ if thought_match:
232
+ self.currentThought = thought_match.group(1).strip()
233
+
234
+ # Extract action patterns
235
+ action_match = re.search(r'Action:\s*(.+?)(?=Action Input:|Thought:|$)', llm_output, re.DOTALL)
236
+ if action_match:
237
+ self.currentAction = action_match.group(1).strip()
238
+
239
+ # Extract action input patterns
240
+ action_input_match = re.search(r'Action Input:\s*(.+?)(?=Observation:|Thought:|$)', llm_output, re.DOTALL)
241
+ action_input = ""
242
+ if action_input_match:
243
+ action_input = action_input_match.group(1).strip()
244
+
245
+ # Store the reasoning step for ReAct trace
246
+ if self.currentThought or self.currentAction:
247
+ reasoning_step = {
248
+ "step_number": self.agentsSteps + 1,
249
+ "thought": self.currentThought,
250
+ "planned_action": self.currentAction,
251
+ "action_input": action_input,
252
+ "full_llm_output": llm_output,
253
+ "timestamp": time.time()
254
+ }
255
+
256
+ # Add to react steps for complete trace
257
+ if not self.reactSteps or self.reactSteps[-1].get("step_number") != reasoning_step["step_number"]:
258
+ self.reactSteps.append(reasoning_step)
259
+ else:
260
+ # Update existing step
261
+ self.reactSteps[-1].update(reasoning_step)
262
+
263
+ except Exception as e:
264
+ print(f"[ERROR] Failed to parse ReAct reasoning: {e}")
265
+
266
+ def on_tool_start(self, serialized: Dict[str, Any], input_str: str, **kwargs: Any) -> None:
267
+ """Called when a tool starts executing"""
268
+ self.toolStartTime = time.time()
269
+ self.stepTime = time.time()
270
+
271
+ # Dynamically get tool name
272
+ self.currentToolName = (serialized.get("name") or
273
+ serialized.get("_type") or
274
+ "unknown")
275
+
276
+ # Handle the case where input_str is "None" or None
277
+ if input_str == "None" or input_str is None:
278
+ self.currentToolInput = {"input": ""}
279
+ else:
280
+ try:
281
+ # Try to parse as JSON if it looks like JSON
282
+ if input_str.startswith("{") and input_str.endswith("}"):
283
+ self.currentToolInput = json.loads(input_str)
284
+ else:
285
+ self.currentToolInput = {"input": input_str}
286
+ except:
287
+ self.currentToolInput = {"input": input_str}
288
+
289
+ # Track tools used
290
+ if self.currentToolName not in self.toolsUsed:
291
+ self.toolsUsed.append(self.currentToolName)
292
+
293
+ print(f"[DEBUG] Tool started: {self.currentToolName} with input: {input_str}")
294
+
295
+ def on_tool_end(self, output: Any, **kwargs: Any) -> None:
296
+ """Called when a tool completes execution"""
297
+ duration_ms = int((time.time() - self.toolStartTime) * 1000) if self.toolStartTime else 0
298
+
299
+ try:
300
+ # Ensure output is stringified safely
301
+ if output is None:
302
+ output_str = ""
303
+ elif isinstance(output, (dict, list)):
304
+ output_str = json.dumps(output)
305
+ else:
306
+ output_str = str(output)
307
+
308
+ # Store as observation for ReAct step
309
+ self.currentObservation = output_str
310
+
311
+ # Update the current ReAct step with observation
312
+ if self.reactSteps and self.isAgentExecution:
313
+ self.reactSteps[-1]["observation"] = output_str
314
+ self.reactSteps[-1]["tool_execution_ms"] = duration_ms
315
+
316
+ self.sessionLogger.logToolStep(
317
+ stepName="Tool Execution Completed",
318
+ toolName=self.currentToolName or "unknown",
319
+ input=self.currentToolInput or {"input": ""},
320
+ output=output_str,
321
+ latencyMs=duration_ms,
322
+ status="SUCCESS",
323
+ message="",
324
+ )
325
+
326
+ print(f"[DEBUG] Tool completed: {self.currentToolName} -> {output_str}")
327
+
328
+ except Exception as e:
329
+ print(f"[ERROR] Failed to log tool end: {e}")
330
+
331
+ def on_agent_action(self, action: AgentAction, **kwargs: Any) -> None:
332
+ """Called when an agent takes an action"""
333
+ self.agentsSteps += 1
334
+ print("ON AGENT ACTION: ", action)
335
+
336
+ try:
337
+ # Dynamically extract information from action
338
+ tool_name = getattr(action, "tool", "unknown")
339
+ tool_input = getattr(action, "tool_input", "")
340
+ log_message = getattr(action, "log", "")
341
+
342
+ # Track tools if not already tracked
343
+ if tool_name not in self.toolsUsed:
344
+ self.toolsUsed.append(tool_name)
345
+
346
+ # Update our ReAct steps tracking with executed action
347
+ if self.reactSteps:
348
+ self.reactSteps[-1].update({
349
+ "executed_action": tool_name,
350
+ "executed_input": tool_input,
351
+ "action_log": log_message
352
+ })
353
+
354
+ # Log the agent action step using logAgentStep
355
+ current_status = "ERROR" if self.hasErrors else "SUCCESS"
356
+ reasoning_text = self.currentThought if self.currentThought else "No reasoning captured"
357
+
358
+ self.sessionLogger.logAgentStep(
359
+ stepName=f"Agent Action Step {self.agentsSteps}",
360
+ agentType="react_agent",
361
+ agentName=self.currentAgentName or "unknown",
362
+ numStepsTaken=self.agentsSteps,
363
+ tools=[tool_name],
364
+ query=self.prompt,
365
+ status=current_status,
366
+ message=f"Executing {tool_name} with input: {tool_input}. Reasoning: {reasoning_text}",
367
+ )
368
+
369
+ except Exception as e:
370
+ print(f"[ERROR] Failed to log agent action: {e}")
371
+
372
+ def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
373
+ """Called when an agent completes execution"""
374
+ print("ON AGENT FINISH:", finish)
375
+ # We don't need to log anything here since the final result is already logged in on_chain_end
376
+ pass
377
+
378
+ def on_agent_error(self, error: Exception, **kwargs: Any) -> None:
379
+ """Called when an agent encounters an error"""
380
+ print("ITS A AGENT ERROR:", error)
381
+ self.hasErrors = True
382
+ self.lastError = str(error)
383
+
384
+ try:
385
+ self.sessionLogger.logAgentStep(
386
+ stepName="Agent Execution Error",
387
+ agentType="react_agent",
388
+ agentName=self.currentAgentName or "unknown",
389
+ numStepsTaken=self.agentsSteps,
390
+ tools=self.toolsUsed,
391
+ query=self.prompt,
392
+ status="ERROR",
393
+ message=str(error),
394
+ )
395
+ except Exception as e:
396
+ print(f"[ERROR] Failed to log agent error: {e}")
397
+
398
+ def on_tool_error(self, error: Exception, **kwargs: Any) -> None:
399
+ """Called when a tool encounters an error"""
400
+ print("ITS A TOOL ERROR:", error)
401
+ self.hasErrors = True
402
+ self.lastError = str(error)
403
+
404
+ try:
405
+ # Update ReAct step with error observation
406
+ if self.reactSteps:
407
+ self.reactSteps[-1]["observation"] = f"ERROR: {str(error)}"
408
+ self.reactSteps[-1]["error"] = True
409
+
410
+ self.sessionLogger.logToolStep(
411
+ stepName="Tool Execution Failed",
412
+ toolName=self.currentToolName or "unknown",
413
+ input=self.currentToolInput or {"input": ""},
414
+ output="",
415
+ latencyMs=0,
416
+ status="ERROR",
417
+ message=str(error),
418
+ )
419
+ except Exception as e:
420
+ print(f"[ERROR] Failed to log tool error: {e}")
421
+
422
+ def on_chain_error(self, error: Exception, **kwargs: Any) -> None:
423
+ """Called when a chain encounters an error"""
424
+ print("ITS A CHAIN ERROR:", error)
425
+ self.hasErrors = True
426
+ self.lastError = str(error)
427
+
428
+ try:
429
+ if self.isAgentExecution:
430
+ # Use logAgentStep for agent-related chain errors
431
+ self.sessionLogger.logAgentStep(
432
+ stepName="Agent Chain Error",
433
+ agentType="react_agent",
434
+ agentName=self.currentAgentName or "unknown",
435
+ numStepsTaken=self.agentsSteps,
436
+ tools=self.toolsUsed,
437
+ query=self.prompt,
438
+ status="ERROR",
439
+ message=str(error),
440
+ )
441
+ else:
442
+ # Use logLlmStep for general chain errors
443
+ self.sessionLogger.logLlmStep(
444
+ stepName="Chain Execution Error",
445
+ model="unknown",
446
+ provider=self.llmProvider,
447
+ inputTokens=0,
448
+ outputTokens=0,
449
+ temperature=0.0,
450
+ promptTruncated=False,
451
+ latencyMs=0,
452
+ query=self.prompt,
453
+ output="",
454
+ status="ERROR",
455
+ message=str(error),
456
+ )
457
+ except Exception as e:
458
+ print(f"[ERROR] Failed to log chain error: {e}")
459
+
460
+ def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
461
+ """Called when LLM generates a new token (for streaming)"""
462
+ pass
463
+
464
+ def on_text(self, text: str, **kwargs: Any) -> None:
465
+ """Called when arbitrary text is logged"""
466
+ # Only log significant text events during agent execution
467
+ if self.isAgentExecution and text.strip():
468
+ print(f"[DEBUG] Additional text: {text}")
469
+
470
+ # Check if this text contains important ReAct information like "Observation:"
471
+ if any(keyword in text.lower() for keyword in ['observation:']):
472
+ try:
473
+ # Update the current ReAct step with additional observation info
474
+ if self.reactSteps:
475
+ existing_obs = self.reactSteps[-1].get("observation", "")
476
+ self.reactSteps[-1][
477
+ "observation"] = f"{existing_obs}\n{text.strip()}" if existing_obs else text.strip()
478
+
479
+ except Exception as e:
480
+ print(f"[ERROR] Failed to process text event: {e}")