llumo 0.2.38__tar.gz → 0.2.40__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (26) hide show
  1. {llumo-0.2.38/llumo.egg-info → llumo-0.2.40}/PKG-INFO +1 -1
  2. {llumo-0.2.38 → llumo-0.2.40}/llumo/callback.py +38 -11
  3. {llumo-0.2.38 → llumo-0.2.40}/llumo/client.py +10 -6
  4. {llumo-0.2.38 → llumo-0.2.40}/llumo/helpingFuntions.py +72 -1
  5. {llumo-0.2.38 → llumo-0.2.40}/llumo/llumoLogger.py +1 -0
  6. {llumo-0.2.38 → llumo-0.2.40}/llumo/llumoSessionContext.py +20 -9
  7. {llumo-0.2.38 → llumo-0.2.40/llumo.egg-info}/PKG-INFO +1 -1
  8. {llumo-0.2.38 → llumo-0.2.40}/LICENSE +0 -0
  9. {llumo-0.2.38 → llumo-0.2.40}/MANIFEST.in +0 -0
  10. {llumo-0.2.38 → llumo-0.2.40}/README.md +0 -0
  11. {llumo-0.2.38 → llumo-0.2.40}/llumo/__init__.py +0 -0
  12. {llumo-0.2.38 → llumo-0.2.40}/llumo/callbacks-0.py +0 -0
  13. {llumo-0.2.38 → llumo-0.2.40}/llumo/chains.py +0 -0
  14. {llumo-0.2.38 → llumo-0.2.40}/llumo/exceptions.py +0 -0
  15. {llumo-0.2.38 → llumo-0.2.40}/llumo/execution.py +0 -0
  16. {llumo-0.2.38 → llumo-0.2.40}/llumo/functionCalling.py +0 -0
  17. {llumo-0.2.38 → llumo-0.2.40}/llumo/google.py +0 -0
  18. {llumo-0.2.38 → llumo-0.2.40}/llumo/models.py +0 -0
  19. {llumo-0.2.38 → llumo-0.2.40}/llumo/openai.py +0 -0
  20. {llumo-0.2.38 → llumo-0.2.40}/llumo/sockets.py +0 -0
  21. {llumo-0.2.38 → llumo-0.2.40}/llumo.egg-info/SOURCES.txt +0 -0
  22. {llumo-0.2.38 → llumo-0.2.40}/llumo.egg-info/dependency_links.txt +0 -0
  23. {llumo-0.2.38 → llumo-0.2.40}/llumo.egg-info/requires.txt +0 -0
  24. {llumo-0.2.38 → llumo-0.2.40}/llumo.egg-info/top_level.txt +0 -0
  25. {llumo-0.2.38 → llumo-0.2.40}/setup.cfg +0 -0
  26. {llumo-0.2.38 → llumo-0.2.40}/setup.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: llumo
3
- Version: 0.2.38
3
+ Version: 0.2.40
4
4
  Summary: Python SDK for interacting with the Llumo ai API.
5
5
  Home-page: https://www.llumo.ai/
6
6
  Author: Llumo
@@ -1,3 +1,4 @@
1
+ from logging import lastResort
1
2
  from typing import Any, Dict, List
2
3
  from langchain_core.callbacks.base import BaseCallbackHandler
3
4
  from langchain_core.messages import BaseMessage
@@ -17,7 +18,7 @@ class LlumoCallbackHandler(BaseCallbackHandler):
17
18
  raise ValueError("LlumoSessionContext is required")
18
19
 
19
20
  self.sessionLogger = session
20
- self.sessionLogger.isLangchain = True
21
+ self.sessionLogger.logger.isLangchain = True
21
22
  self.agentType = agentType
22
23
 
23
24
  # Initialize timing and state variables
@@ -30,6 +31,7 @@ class LlumoCallbackHandler(BaseCallbackHandler):
30
31
  # Initialize tracking variables
31
32
  self.prompt = ""
32
33
  self.searchQuery = ""
34
+ self.currentInputTokens = 0
33
35
  self.currentToolName = None
34
36
  self.currentToolInput = None
35
37
  self.currentAgentName = None
@@ -48,8 +50,7 @@ class LlumoCallbackHandler(BaseCallbackHandler):
48
50
  self.currentAction = ""
49
51
  self.currentObservation = ""
50
52
  self.isAgentExecution = False
51
-
52
-
53
+ self.availableTools = None
53
54
 
54
55
  def on_chain_start(self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any) -> None:
55
56
  """Called when a chain starts - this includes agent execution"""
@@ -86,8 +87,9 @@ class LlumoCallbackHandler(BaseCallbackHandler):
86
87
  elif "id" in serialized and isinstance(serialized["id"], list):
87
88
  self.currentAgentName = serialized["id"][-1] if serialized["id"] else "unknown"
88
89
  else:
89
- self.currentAgentName = "unknown"
90
-
90
+ self.currentAgentName = kwargs.get("name","unknown")
91
+ else:
92
+ self.currentAgentName = kwargs.get("name", "unknown")
91
93
  # Check if this is agent execution
92
94
  if ("agent" in str(self.currentAgentName).lower() or
93
95
  (serialized and serialized.get("_type") == "agent_executor") or
@@ -124,6 +126,9 @@ class LlumoCallbackHandler(BaseCallbackHandler):
124
126
 
125
127
  def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
126
128
  """Called when a chain ends"""
129
+ # print("ON CHAIN END: ",outputs)
130
+ # print("ON CHAIN END: ",kwargs)
131
+
127
132
  try:
128
133
  if self.isAgentExecution and isinstance(outputs, dict) and "output" in outputs:
129
134
  # Use logAgentStep for final completion
@@ -147,16 +152,27 @@ class LlumoCallbackHandler(BaseCallbackHandler):
147
152
 
148
153
  def on_llm_start(self, serialized: Dict[str, Any], prompts: List[Any], **kwargs: Any) -> None:
149
154
  """Called when LLM starts"""
155
+ # print("ON LLM START: ",serialized)
156
+ # print("ON LLM START: ", prompts)
157
+ # print("ON LLM START: ", kwargs)
158
+ try:
159
+ self.availableTools = kwargs["invocation_params"]["functions"]
160
+ except:
161
+ self.availableTools = []
162
+
163
+
150
164
  self.llmStartTime = time.time()
151
165
  self.stepTime = time.time()
152
-
166
+ # print(prompts)
153
167
  if self.prompt == "":
154
168
  match = re.search(r"Human:\s*(.*)",prompts[0], re.DOTALL)
169
+ # allPromptInstructions = " ".join(prompts)
155
170
  if match:
156
171
  user_question = match.group(1).strip()
157
172
  self.prompt = user_question # 👉 What is LangChain?
158
173
  else:
159
174
  self.prompt = ""
175
+ # self.allPrompt = allPromptInstructions
160
176
 
161
177
  # Dynamically get model info
162
178
  model = "unknown"
@@ -244,9 +260,11 @@ class LlumoCallbackHandler(BaseCallbackHandler):
244
260
  # Ensure we have string values
245
261
  output = str(output) if output is not None else ""
246
262
  model_name = str(model_name) if model_name is not None else "unknown"
263
+ self.currentInputTokens = input_tokens
247
264
 
248
265
  # Parse ReAct reasoning from LLM output if we're in agent execution
249
266
  if self.isAgentExecution and output:
267
+ # print("[AGENT EXECUTOR OUTPUT]")
250
268
  self._parse_react_reasoning(output)
251
269
  try:
252
270
  self.sessionLogger.logLlmStep(
@@ -286,7 +304,7 @@ class LlumoCallbackHandler(BaseCallbackHandler):
286
304
  retrieverSource = str(source),
287
305
  topK = len(documents),
288
306
  chunkSize = chunkSize,
289
- context = [doc.page_content for doc in documents],
307
+ context = " ".join([doc.page_content for doc in documents]),
290
308
  searchQuery = self.prompt if self.prompt != "" else self.searchQuery,
291
309
  latencyMs = 120, # mock latency, replace with real timing if needed
292
310
  status = "SUCCESS"
@@ -385,6 +403,9 @@ class LlumoCallbackHandler(BaseCallbackHandler):
385
403
  # print(f"[DEBUG] Tool started: {self.currentToolName} with input: {input_str}")
386
404
 
387
405
  def on_tool_end(self, output: Any, **kwargs: Any) -> None:
406
+ # print("ON TOOL END: ",output)
407
+ # print("ON TOOL END: ",kwargs)
408
+
388
409
  """Called when a tool completes execution"""
389
410
  duration_ms = int((time.time() - self.toolStartTime) * 1000) if self.toolStartTime else 0
390
411
 
@@ -423,7 +444,9 @@ class LlumoCallbackHandler(BaseCallbackHandler):
423
444
  def on_agent_action(self, action: AgentAction, **kwargs: Any) -> None:
424
445
  """Called when an agent takes an action"""
425
446
  self.agentsSteps += 1
447
+
426
448
  # print("ON AGENT ACTION: ", action)
449
+ # print("ON AGENT ACTION: ", kwargs)
427
450
 
428
451
  try:
429
452
  # Dynamically extract information from action
@@ -452,7 +475,7 @@ class LlumoCallbackHandler(BaseCallbackHandler):
452
475
  agentType=self.agentType,
453
476
  agentName=self.currentAgentName or "unknown",
454
477
  numStepsTaken=self.agentsSteps,
455
- tools=[tool_name],
478
+ tools=self.availableTools,
456
479
  query=self.prompt,
457
480
  status=current_status,
458
481
  # message=f"Executing {tool_name} with input: {tool_input}. Reasoning: {reasoning_text}",
@@ -471,7 +494,7 @@ class LlumoCallbackHandler(BaseCallbackHandler):
471
494
 
472
495
  def on_agent_error(self, error: Exception, **kwargs: Any) -> None:
473
496
  """Called when an agent encounters an error"""
474
- # print("ITS A AGENT ERROR:", error)
497
+ # print("ON AGENT ERROR:", error)
475
498
  self.hasErrors = True
476
499
  self.lastError = str(error)
477
500
 
@@ -491,6 +514,8 @@ class LlumoCallbackHandler(BaseCallbackHandler):
491
514
 
492
515
  def on_tool_error(self, error: Exception, **kwargs: Any) -> None:
493
516
  """Called when a tool encounters an error"""
517
+ # print("ON TOOL ERROR: ",error)
518
+ # print("ON TOOL ERROR: ", kwargs)
494
519
 
495
520
  self.hasErrors = True
496
521
  self.lastError = str(error)
@@ -517,6 +542,8 @@ class LlumoCallbackHandler(BaseCallbackHandler):
517
542
  def on_chain_error(self, error: Exception, **kwargs: Any) -> None:
518
543
  """Called when a chain encounters an error"""
519
544
  # print("ITS A CHAIN ERROR:", error)
545
+ # print("ITS A CHAIN ERROR:", kwargs)
546
+
520
547
  self.hasErrors = True
521
548
  self.lastError = str(error)
522
549
 
@@ -542,13 +569,13 @@ class LlumoCallbackHandler(BaseCallbackHandler):
542
569
  stepName="Chain Execution Error",
543
570
  model="unknown",
544
571
  provider=self.llmProvider,
545
- inputTokens=0,
572
+ inputTokens=self.currentInputTokens,
546
573
  outputTokens=0,
547
574
  # temperature=0.0,
548
575
  # promptTruncated=False,
549
576
  latencyMs=0,
550
577
  prompt=self.prompt,
551
- output="",
578
+ output=self.lastError,
552
579
  status="FAILURE",
553
580
  # message=str(error),
554
581
  )
@@ -779,7 +779,8 @@ class LlumoClient:
779
779
  def debugLogs(
780
780
  self,
781
781
  data,
782
- prompt_template="",
782
+ promptTemplate="",
783
+ systemInstructions = ""
783
784
 
784
785
  ):
785
786
  if isinstance(data, dict):
@@ -830,15 +831,15 @@ class LlumoClient:
830
831
  context = ""
831
832
 
832
833
  # Process prompt template if provided
833
- if prompt_template:
834
+ if promptTemplate:
834
835
  # Extract template variables
835
- keys = re.findall(r"{{(.*?)}}", prompt_template)
836
+ keys = re.findall(r"{{(.*?)}}", promptTemplate)
836
837
 
837
838
  if not all([key in dataframe.columns for key in keys]):
838
839
  raise LlumoAIError.InvalidPromptTemplate()
839
840
 
840
841
  # Populate template and separate query/context
841
- populated_template = prompt_template
842
+ populated_template = promptTemplate
842
843
  for key in keys:
843
844
  value = row.get(key, "")
844
845
  if isinstance(value, str):
@@ -908,7 +909,10 @@ class LlumoClient:
908
909
  "createdAt":createdAt,
909
910
  "columnID":rowID,
910
911
  "rowID":columnID,
911
- "latency": random.randint(1000, 1500)
912
+ "latency": random.randint(1000, 1500),
913
+ "promptTemplate":promptTemplate,
914
+ "systemInstructions":systemInstructions
915
+
912
916
  }
913
917
 
914
918
  allBatches.append(batch)
@@ -918,7 +922,7 @@ class LlumoClient:
918
922
 
919
923
  try:
920
924
  # print(batch)
921
- response = postForListOfSteps(record=batch,workspaceID=workspaceID)
925
+ response = postForDebugLogs(record=batch,workspaceID=workspaceID)
922
926
 
923
927
  # failure case inside response
924
928
  if isinstance(response, dict) and str(response.get("status", "")).lower() == "false":
@@ -740,7 +740,7 @@ def getCustomAnalytics(workspaceID):
740
740
 
741
741
 
742
742
 
743
- def postForListOfSteps(record: {},workspaceID):
743
+ def postForDebugLogs(record: {},workspaceID):
744
744
  url = "https://backend-api.llumo.ai/api/v1/get-debug-log-for-upload"
745
745
  payload = record
746
746
  workspaceID = workspaceID
@@ -763,3 +763,74 @@ def postForListOfSteps(record: {},workspaceID):
763
763
 
764
764
  except Exception as e:
765
765
  return {"status":"False","exception": str(e)}
766
+
767
+
768
+ def removeLLmStep(run: dict) -> dict:
769
+ """
770
+ Remove LLM steps that appear immediately before an AGENT step.
771
+
772
+ """
773
+
774
+ if not run or "steps" not in run:
775
+ return run
776
+
777
+ steps = run["steps"]
778
+ indices_to_remove = set()
779
+ llm_stack = [] # stack of indices where stepType == "LLM"
780
+
781
+ for idx, step in enumerate(steps):
782
+ step_type = step.get("stepType")
783
+
784
+ if step_type == "LLM":
785
+ llm_stack.append(idx)
786
+
787
+ elif step_type == "AGENT":
788
+ if llm_stack:
789
+ last_llm_idx = llm_stack[-1]
790
+
791
+ # ✅ Only remove if LLM is immediately before AGENT
792
+ if last_llm_idx == idx - 1:
793
+ indices_to_remove.add(last_llm_idx)
794
+ llm_stack.pop() # matched, so pop
795
+
796
+ # Rebuild steps excluding removed indices
797
+ cleaned_steps = [
798
+ step for i, step in enumerate(steps)
799
+ if i not in indices_to_remove
800
+ ]
801
+
802
+ run["steps"] = cleaned_steps
803
+ return run
804
+
805
+
806
+ def addSelectedTools(run: dict) -> dict:
807
+ """
808
+ Populate metadata.toolSelected in AGENT steps based on TOOL executions.
809
+ """
810
+
811
+ if not run or "steps" not in run:
812
+ return run
813
+
814
+ steps = run["steps"]
815
+ current_agent_step = None
816
+
817
+ for step in steps:
818
+ step_type = step.get("stepType")
819
+
820
+ # Track the most recent AGENT step
821
+ if step_type == "AGENT":
822
+ current_agent_step = step
823
+
824
+ # Ensure toolSelected exists
825
+ metadata = current_agent_step.get("metadata", {})
826
+ metadata.setdefault("toolSelected", [])
827
+ current_agent_step["metadata"] = metadata
828
+
829
+ # When TOOL is executed, attach it to last AGENT
830
+ elif step_type == "TOOL" and current_agent_step:
831
+ tool_name = step.get("metadata", {}).get("toolName")
832
+
833
+ if tool_name:
834
+ current_agent_step["metadata"]["toolSelected"].append(tool_name)
835
+
836
+ return run
@@ -8,6 +8,7 @@ class LlumoLogger:
8
8
  self.workspaceID = None
9
9
  self.playgroundID = None
10
10
  self.userEmailID = None
11
+ self.isLangchain = False
11
12
  self._authenticate()
12
13
 
13
14
  def _authenticate(self):
@@ -1,3 +1,4 @@
1
+
1
2
  import contextvars
2
3
  import uuid
3
4
  from typing import Optional, List, Dict, Any
@@ -6,7 +7,8 @@ import requests
6
7
  from .client import LlumoClient
7
8
  import math
8
9
  import base64
9
-
10
+ from .helpingFuntions import removeLLmStep
11
+ from .helpingFuntions import addSelectedTools
10
12
  import random
11
13
 
12
14
  _ctxLogger = contextvars.ContextVar("ctxLogger")
@@ -35,7 +37,7 @@ class LlumoSessionContext(LlumoClient):
35
37
  self.threadLogger = None
36
38
  self.threadSessionID = None
37
39
  self.threadLlumoRun = None
38
- self.isLangchain = False
40
+
39
41
 
40
42
  def start(self):
41
43
  self.threadLogger = _ctxLogger.set(self.logger)
@@ -57,9 +59,9 @@ class LlumoSessionContext(LlumoClient):
57
59
  self.end()
58
60
 
59
61
 
60
- def startLlumoRun(self, runName: str, rowID: str = "", columnID: str = "", runID: str = None):
62
+ def startLlumoRun(self, runName: str,promptTemplate:str = "",systemInstructions:str = "", rowID: str = "", columnID: str = "", runID: str = None):
61
63
 
62
- if runID is None:
64
+ if runID == None:
63
65
  LlumoRunID = str(uuid.uuid4().hex[:16])
64
66
  else:
65
67
  LlumoRunID = runID
@@ -82,7 +84,7 @@ class LlumoSessionContext(LlumoClient):
82
84
  "sessionID": self.sessionID,
83
85
  "playgroundID": self.logger.getPlaygroundID(),
84
86
  "workspaceID": self.logger.getWorkspaceID(),
85
- "source": "SDK_LANGCHAIN" if self.isLangchain else "SDK_OTHERS",
87
+ "source": "SDK_LANGCHAIN" if self.logger.isLangchain else "SDK_OTHERS",
86
88
  "rowID": rowID,
87
89
  "columnID": columnID,
88
90
  "email": self.logger.getUserEmailID(),
@@ -100,8 +102,9 @@ class LlumoSessionContext(LlumoClient):
100
102
  "totalTokens": "",
101
103
  "cost": "",
102
104
  "modelsUsed": "gpt-4o",
103
-
104
- }
105
+ },
106
+ "promptTemplate":promptTemplate,
107
+ "systemInstructions":systemInstructions
105
108
  }
106
109
 
107
110
  self.threadLlumoRun = _ctxLlumoRun.set(llumoRun)
@@ -169,8 +172,15 @@ class LlumoSessionContext(LlumoClient):
169
172
 
170
173
  try:
171
174
  # print("[PAYLOAD]: ",run)
172
- payload = run
175
+
176
+ payload = removeLLmStep(run)
177
+ # print("*******PAYLOAD AFTER removeLLmStep*******: ", payload)
178
+
179
+ payload = addSelectedTools(payload)
180
+ # print("********PAYLOAD AFTER addSelectedTools*********: ", payload)
181
+
173
182
  response = requests.post(url, headers=headers, json=payload, timeout=20)
183
+
174
184
  response.raise_for_status()
175
185
  # print("[PAYLOAD]: ",response.json())
176
186
 
@@ -293,7 +303,7 @@ class LlumoSessionContext(LlumoClient):
293
303
  "retrieverSource": retrieverSource,
294
304
  "topK": topK,
295
305
  "chunkSize":chunkSize,
296
- "context": context,
306
+ "context": " ".join(context),
297
307
  "searchQuery": searchQuery,
298
308
  "latencyMs": latencyMs,
299
309
  "status": status,
@@ -318,6 +328,7 @@ class LlumoSessionContext(LlumoClient):
318
328
  "agentName": agentName,
319
329
  "numStepsTaken": numStepsTaken,
320
330
  "tools": tools,
331
+ "toolSelected":[],
321
332
  "query": query,
322
333
  "status": status,
323
334
  # "message": message,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: llumo
3
- Version: 0.2.38
3
+ Version: 0.2.40
4
4
  Summary: Python SDK for interacting with the Llumo ai API.
5
5
  Home-page: https://www.llumo.ai/
6
6
  Author: Llumo
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes