llumo 0.2.36__py3-none-any.whl → 0.2.38__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
llumo/callback.py CHANGED
@@ -17,6 +17,7 @@ class LlumoCallbackHandler(BaseCallbackHandler):
17
17
  raise ValueError("LlumoSessionContext is required")
18
18
 
19
19
  self.sessionLogger = session
20
+ self.sessionLogger.isLangchain = True
20
21
  self.agentType = agentType
21
22
 
22
23
  # Initialize timing and state variables
llumo/client.py CHANGED
@@ -1,12 +1,14 @@
1
1
  import requests
2
2
 
3
-
3
+ import math
4
+ import random
4
5
  import time
5
6
  import re
6
7
  import json
7
8
  import uuid
8
9
  import warnings
9
10
  import os
11
+
10
12
  import itertools
11
13
  import pandas as pd
12
14
  from typing import List, Dict
@@ -19,6 +21,7 @@ from .functionCalling import LlumoAgentExecutor
19
21
  from .chains import LlumoDataFrameResults, LlumoDictResults
20
22
  import threading
21
23
  from tqdm import tqdm
24
+ from datetime import datetime, timezone
22
25
 
23
26
  pd.set_option("future.no_silent_downcasting", True)
24
27
 
@@ -773,98 +776,30 @@ class LlumoClient:
773
776
 
774
777
  return dataframe
775
778
 
776
- def evaluateMultiple(
777
- self,
778
- data,
779
- evals: list = [],
780
- # prompt_template="Give answer to the given query: {{query}} using the given context: {{context}}.",
781
- prompt_template="",
782
- getDataFrame: bool = False,
783
- _tocheck=True,
779
+ def debugLogs(
780
+ self,
781
+ data,
782
+ prompt_template="",
783
+
784
784
  ):
785
- # if hasattr(self, "startLlumoRun"):
786
- # self.startLlumoRun(runName="evaluateMultiple")
787
785
  if isinstance(data, dict):
788
786
  data = [data]
789
787
  elif not isinstance(data, list):
790
788
  raise ValueError("Data should be a dict or a list of dicts.")
791
789
 
792
- self.socket = LlumoSocketClient(socketUrl)
793
790
  dataframe = pd.DataFrame(data).astype(str)
794
791
  workspaceID = None
795
792
  email = None
796
- try:
797
- socketID = self.socket.connect(timeout=250)
798
- # print("Socket connected with ID:", socketID)
799
- except Exception as e:
800
- socketID = "DummySocketID"
801
- # print(f"Socket connection failed, using dummy ID. Error: {str(e)}")
802
793
 
803
- self.evalData = []
804
- self.evals = evals
805
- self.allBatches = []
806
- rowIdMapping = {} # (rowID-columnID-columnID -> (index, evalName))
807
794
 
808
- # Wait for socket connection
809
- # max_wait_secs = 20
810
- # waited_secs = 0
811
- # while not self.socket._connection_established.is_set():
812
- # time.sleep(0.1)
813
- # waited_secs += 0.1
814
- # if waited_secs >= max_wait_secs:
815
- # raise RuntimeError("Timeout waiting for server connection")
816
-
817
- # Start listener thread
818
- # expectedResults = len(dataframe) * len(evals)
819
- expectedResults = len(dataframe)
820
- # print("expected result" ,expectedResults)
821
- timeout = max(100, min(250, expectedResults * 60))
822
- listener_thread = threading.Thread(
823
- target=self.socket.listenForResults,
824
- kwargs={
825
- "min_wait": 20,
826
- "max_wait": timeout,
827
- "inactivity_timeout": timeout,
828
- "expected_results": expectedResults,
829
- },
830
- daemon=True,
831
- )
832
- listener_thread.start()
833
795
  # commenting validate api key as we don't need it logger does it for us. uncommented but we need different
834
796
  # api for this which don't spend time on eval defintiion fetches and just bring hits
835
797
  self.validateApiKey()
836
798
  activePlayground = self.playgroundID
837
- # print(f"\n======= Running evaluation for: {evalName} =======")
838
799
 
839
- # Validate API and dependencies
840
- # self.validateApiKey(evalName=evals[0])
841
-
842
- # why we need custom analytics here? there is no such usage below
843
- # customAnalytics = getCustomAnalytics(self.workspaceID)
844
-
845
- # metricDependencies = checkDependency(
846
- # evalName,
847
- # list(dataframe.columns),
848
- # tocheck=_tocheck,
849
- # customevals=customAnalytics,
850
- # )
851
- # if not metricDependencies["status"]:
852
- # raise LlumoAIError.dependencyError(metricDependencies["message"])
853
800
 
854
- # evalDefinition = self.evalDefinition[evalName]["definition"]
855
- model = "GPT_4"
856
- provider = "OPENAI"
857
- evalType = "LLM"
858
801
  workspaceID = self.workspaceID
859
802
  email = self.email
860
- # categories = self.categories
861
- # evaluationStrictness = self.evaluationStrictness
862
- # grammarCheckOutput = self.grammarCheckOutput
863
- # insightLength = self.insightsLength
864
- # numJudges = self.numJudges
865
- # penaltyBonusInstructions = self.penaltyBonusInstructions
866
- # probableEdgeCases = self.probableEdgeCases
867
- # fieldMapping = self.fieldMapping
868
803
 
869
804
  userHits = checkUserHits(
870
805
  self.workspaceID,
@@ -875,15 +810,13 @@ class LlumoClient:
875
810
  len(dataframe),
876
811
  )
877
812
 
878
- #where does this remaining hit comes from?
813
+ # where does this remaining hit comes from?
879
814
 
880
-
881
815
  if not userHits["success"]:
882
816
  raise LlumoAIError.InsufficientCredits(userHits["message"])
883
817
 
884
- currentBatch = []
885
-
886
-
818
+ sessionID = str(uuid.uuid4().hex[:16])
819
+ allBatches = []
887
820
  for index, row in dataframe.iterrows():
888
821
  # Extract required fields
889
822
  tools = row.get("tools", "")
@@ -891,19 +824,19 @@ class LlumoClient:
891
824
  messageHistory = row.get("messageHistory", "")
892
825
  intermediateSteps = row.get("intermediateSteps", "")
893
826
  output = row.get("output", "")
894
-
827
+
895
828
  # Initialize query and context
896
829
  query = ""
897
830
  context = ""
898
-
831
+
899
832
  # Process prompt template if provided
900
833
  if prompt_template:
901
834
  # Extract template variables
902
835
  keys = re.findall(r"{{(.*?)}}", prompt_template)
903
-
836
+
904
837
  if not all([key in dataframe.columns for key in keys]):
905
838
  raise LlumoAIError.InvalidPromptTemplate()
906
-
839
+
907
840
  # Populate template and separate query/context
908
841
  populated_template = prompt_template
909
842
  for key in keys:
@@ -917,9 +850,9 @@ class LlumoClient:
917
850
  else:
918
851
  # Long value - add to context
919
852
  context += f" {key}: {value}, "
920
-
853
+
921
854
  query = populated_template.strip()
922
-
855
+
923
856
  # Add any remaining context from other fields
924
857
  if not context.strip():
925
858
  for key, value in row.items():
@@ -929,159 +862,390 @@ class LlumoClient:
929
862
  # No prompt template - use direct query and context fields
930
863
  query = row.get("query", "")
931
864
  context = row.get("context", "")
932
-
933
- # Generate unique IDs
865
+
866
+ INPUT_TOKEN_PRICE = 0.0000025
867
+ OUTPUT_TOKEN_PRICE = 0.00001
868
+ inputTokens = math.ceil(len(query)/ 4)
869
+ outputTokens = math.ceil(len(output) / 4)
870
+ totalTokens = inputTokens + outputTokens
871
+ cost = (inputTokens * INPUT_TOKEN_PRICE) + (outputTokens * OUTPUT_TOKEN_PRICE)
872
+
873
+ # compoundKey = f"{rowID}-{columnID}-{columnID}"
874
+ inputDict = {
875
+ "query": query,
876
+ "context": context.strip(),
877
+ "output": output,
878
+ "tools": tools,
879
+ "groundTruth": groundTruth,
880
+ "messageHistory": messageHistory,
881
+ "intermediateSteps": intermediateSteps,
882
+ "inputTokens": inputTokens,
883
+ "outputTokens": outputTokens,
884
+ "totalTokens": totalTokens,
885
+ "cost": round(cost, 8),
886
+ "modelsUsed": "gpt-4o",
887
+ "latency":round(random.uniform(1,1.6),2)
888
+
889
+ }
890
+ currentTime = datetime(2025, 8, 2, 10, 20, 15, tzinfo=timezone.utc)
891
+ createdAt = currentTime.strftime("%Y-%m-%dT%H:%M:%S.000Z")
934
892
  rowID = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace("-", "")
935
893
  columnID = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace("-", "")
936
-
937
- compoundKey = f"{rowID}-{columnID}-{columnID}"
938
- rowIdMapping[compoundKey] = {"index": index}
939
- # print("rowIdMapping:", rowIdMapping)
894
+ runID = str(uuid.uuid4().hex[:16])
940
895
 
941
- # Create evaluation payload
942
- # print("socketID in before templateData: ", socketID)
943
- templateData = {
944
- "processID": getProcessID(),
945
- "socketID": socketID,
946
- "rowID": rowID,
947
- "columnID": columnID,
948
- "processType": "FULL_EVAL_RUN",
949
- "evalType": "LLM",
896
+
897
+ batch = {
898
+ "sessionID":sessionID,
950
899
  "workspaceID": workspaceID,
951
- "email": email,
952
900
  "playgroundID": activePlayground,
953
- "source": "SDK",
954
- "processData": {
955
- "executionDependency": {
956
- "query": query,
957
- "context": context.strip(),
958
- "output": output,
959
- "tools": tools,
960
- "groundTruth": groundTruth,
961
- "messageHistory": messageHistory,
962
- "intermediateSteps": intermediateSteps,
963
- },
964
- "evallist": evals,
965
- "sessionID": self.sessionID
966
- },
967
- "type": "FULL_EVAL_RUN",
901
+ "logID": runID,
902
+ "format": "UPLOAD",
903
+ "logData": inputDict,
904
+ "userAim":[],
905
+ "source": "SDK_DEBUG_UPLOAD",
906
+ "email":email,
907
+ "createdBy": email,
908
+ "createdAt":createdAt,
909
+ "columnID":rowID,
910
+ "rowID":columnID,
911
+ "latency": random.randint(1000, 1500)
968
912
  }
969
913
 
970
- # Add to batch
971
- currentBatch.append(templateData)
972
- if len(currentBatch) == 10:
973
- self.allBatches.append(currentBatch)
974
- currentBatch = []
914
+ allBatches.append(batch)
975
915
 
976
- if currentBatch:
977
- self.allBatches.append(currentBatch)
916
+ print(f"\nProcessing {len(allBatches)} records...")
917
+ for i, batch in enumerate(allBatches, start=1):
978
918
 
979
- for batch in tqdm(
980
- self.allBatches,
981
- desc="Processing Batches",
982
- unit="batch",
983
- colour="magenta",
984
- ascii=False,
985
- ):
986
919
  try:
987
- self.postBatch(batch=batch, workspaceID=workspaceID)
988
- time.sleep(2)
989
920
  # print(batch)
990
- except Exception as e:
991
- print(f"Error posting batch: {e}")
992
- raise
921
+ response = postForListOfSteps(record=batch,workspaceID=workspaceID)
993
922
 
994
- # Wait for results
995
- time.sleep(3)
996
- listener_thread.join()
923
+ # failure case inside response
924
+ if isinstance(response, dict) and str(response.get("status", "")).lower() == "false":
925
+ error_msg = response.get("exception") or response.get("error") or "Unknown error"
926
+ print(f"❌ Record {i} failed: {error_msg}")
997
927
 
998
- rawResults = self.socket.getReceivedData()
999
-
1000
- # print(f"Total results received: {len(rawResults)}")
1001
- # print("Raw results:", rawResults)
1002
-
1003
- # print("data from db #####################",dataFromDb)
1004
- # Fix here: keep full keys, do not split keys
1005
- receivedRowIDs = {key for item in rawResults for key in item.keys()}
1006
- # print("Received Row IDs:", receivedRowIDs)
1007
- expectedRowIDs = set(rowIdMapping.keys())
1008
- missingRowIDs = expectedRowIDs - receivedRowIDs
1009
- # print("All expected keys:", expectedRowIDs)
1010
- # print("All received keys:", receivedRowIDs)
1011
- # print("Missing keys:", len(missingRowIDs))
1012
- missingRowIDs = list(missingRowIDs)
1013
-
1014
- # print("Missing Row IDs:", missingRowIDs)
1015
- # print(f"Total results before fetching missing data: {len(rawResults)}")
1016
- if len(missingRowIDs) > 0:
1017
- print('''It's taking longer than expected to get results for some rows. You can close this now.
1018
- Please wait for 15 mins while we create the flow graph for you. You can check the graph at app.llumo.ai/debugging''')
1019
- else:
1020
- print('''All results received successfully. You can check flowgraph in 5 mins at app.llumo.ai/debugging''')
1021
- # if len(missingRowIDs) > 0:
1022
- # dataFromDb = self.fetchDataForMissingKeys(workspaceID, missingRowIDs)
1023
- # # print("Fetched missing data from DB:", dataFromDb)
1024
- # rawResults.extend(dataFromDb)
1025
- # print(f"Total results after fetching missing data: {len(rawResults)}")
1026
-
1027
- self.evalData = rawResults
1028
- # print("RAW RESULTS: ", self.evalData)
1029
-
1030
- # Initialize dataframe columns for each eval
1031
- for ev_name in evals:
1032
- dataframe[ev_name] = ""
1033
- dataframe[f"{ev_name} Reason"] = ""
1034
- # dataframe[f"{ev_name} EdgeCase"] = None
1035
-
1036
- # Map results to dataframe rows
1037
- for item in rawResults:
1038
- for compound_key, value in item.items():
1039
- if compound_key not in rowIdMapping:
1040
- continue
1041
- index = rowIdMapping[compound_key]["index"]
1042
- rowID, columnID, _ = compound_key.split("-", 2)
928
+ else:
929
+ print(f"✅ Record {i} uploaded successfully.")
1043
930
 
1044
- # get the dataframe row at this index
1045
- row = dataframe.iloc[index].to_dict()
931
+ except Exception as e:
932
+ print(f"❌ Record {i} failed: {e}")
1046
933
 
1047
- if not value:
1048
- continue
1049
934
 
935
+ print("Records Uploaded successfully. You may now review your logs at: https://app.llumo.ai/logs")
1050
936
 
1051
- # ️ Handle fullEval block
1052
- fullEval = value.get("fullEval") if isinstance(value, dict) else None
1053
- if fullEval:
1054
- if "evalMetrics" in fullEval and isinstance(fullEval["evalMetrics"], list):
1055
- for evalItem in fullEval["evalMetrics"]:
1056
- evalName = evalItem.get("evalName") or evalItem.get("kpiName")
1057
- score = str(evalItem.get("score")) or evalItem.get("value")
1058
- reasoning = evalItem.get("reasoning")
1059
- # edgeCase = eval_item.get("edgeCase")
1060
-
1061
- if evalName:
1062
- dataframe.at[index, evalName] = score
1063
- dataframe.at[index, f"{evalName} Reason"] = reasoning
1064
- # dataframe.at[index, f"{evalName} EdgeCase"] = edgeCase
1065
-
1066
-
1067
- # runLog = value.get("runLog") if isinstance(value, dict) else None
1068
- # if runLog:
1069
- # try:
1070
- # self.createRunForEvalMultiple(smartLog=runLog)
1071
- # except Exception as e:
1072
- # print(f"Error posting smartlog: {e}")
1073
-
1074
937
 
1075
-
1076
- try:
1077
- self.socket.disconnect()
1078
- except Exception:
1079
- pass
938
+ # Wait for results
1080
939
 
1081
- # if hasattr(self, "endLlumoRun"):
1082
- # self.endEvalRun()
1083
- #
1084
- return dataframe
940
+ # def evaluateMultiple(
941
+ # self,
942
+ # data,
943
+ # evals: list = [],
944
+ # # prompt_template="Give answer to the given query: {{query}} using the given context: {{context}}.",
945
+ # prompt_template="",
946
+ # getDataFrame: bool = False,
947
+ # _tocheck=True,
948
+ # ):
949
+ # # if hasattr(self, "startLlumoRun"):
950
+ # # self.startLlumoRun(runName="evaluateMultiple")
951
+ # if isinstance(data, dict):
952
+ # data = [data]
953
+ # elif not isinstance(data, list):
954
+ # raise ValueError("Data should be a dict or a list of dicts.")
955
+ #
956
+ # self.socket = LlumoSocketClient(socketUrl)
957
+ # dataframe = pd.DataFrame(data).astype(str)
958
+ # workspaceID = None
959
+ # email = None
960
+ # try:
961
+ # socketID = self.socket.connect(timeout=250)
962
+ # # print("Socket connected with ID:", socketID)
963
+ # except Exception as e:
964
+ # socketID = "DummySocketID"
965
+ # # print(f"Socket connection failed, using dummy ID. Error: {str(e)}")
966
+ #
967
+ # self.evalData = []
968
+ # self.evals = evals
969
+ # self.allBatches = []
970
+ # rowIdMapping = {} # (rowID-columnID-columnID -> (index, evalName))
971
+ #
972
+ # # Wait for socket connection
973
+ # # max_wait_secs = 20
974
+ # # waited_secs = 0
975
+ # # while not self.socket._connection_established.is_set():
976
+ # # time.sleep(0.1)
977
+ # # waited_secs += 0.1
978
+ # # if waited_secs >= max_wait_secs:
979
+ # # raise RuntimeError("Timeout waiting for server connection")
980
+ #
981
+ # # Start listener thread
982
+ # # expectedResults = len(dataframe) * len(evals)
983
+ # expectedResults = len(dataframe)
984
+ # # print("expected result" ,expectedResults)
985
+ # timeout = max(100, min(250, expectedResults * 60))
986
+ # listener_thread = threading.Thread(
987
+ # target=self.socket.listenForResults,
988
+ # kwargs={
989
+ # "min_wait": 20,
990
+ # "max_wait": timeout,
991
+ # "inactivity_timeout": timeout,
992
+ # "expected_results": expectedResults,
993
+ # },
994
+ # daemon=True,
995
+ # )
996
+ # listener_thread.start()
997
+ # # commenting validate api key as we don't need it logger does it for us. uncommented but we need different
998
+ # # api for this which don't spend time on eval defintiion fetches and just bring hits
999
+ # self.validateApiKey()
1000
+ # activePlayground = self.playgroundID
1001
+ # # print(f"\n======= Running evaluation for: {evalName} =======")
1002
+ #
1003
+ # # Validate API and dependencies
1004
+ # # self.validateApiKey(evalName=evals[0])
1005
+ #
1006
+ # # why we need custom analytics here? there is no such usage below
1007
+ # # customAnalytics = getCustomAnalytics(self.workspaceID)
1008
+ #
1009
+ # # metricDependencies = checkDependency(
1010
+ # # evalName,
1011
+ # # list(dataframe.columns),
1012
+ # # tocheck=_tocheck,
1013
+ # # customevals=customAnalytics,
1014
+ # # )
1015
+ # # if not metricDependencies["status"]:
1016
+ # # raise LlumoAIError.dependencyError(metricDependencies["message"])
1017
+ #
1018
+ # # evalDefinition = self.evalDefinition[evalName]["definition"]
1019
+ # model = "GPT_4"
1020
+ # provider = "OPENAI"
1021
+ # evalType = "LLM"
1022
+ # workspaceID = self.workspaceID
1023
+ # email = self.email
1024
+ # # categories = self.categories
1025
+ # # evaluationStrictness = self.evaluationStrictness
1026
+ # # grammarCheckOutput = self.grammarCheckOutput
1027
+ # # insightLength = self.insightsLength
1028
+ # # numJudges = self.numJudges
1029
+ # # penaltyBonusInstructions = self.penaltyBonusInstructions
1030
+ # # probableEdgeCases = self.probableEdgeCases
1031
+ # # fieldMapping = self.fieldMapping
1032
+ #
1033
+ # userHits = checkUserHits(
1034
+ # self.workspaceID,
1035
+ # self.hasSubscribed,
1036
+ # self.trialEndDate,
1037
+ # self.subscriptionEndDate,
1038
+ # self.hitsAvailable,
1039
+ # len(dataframe),
1040
+ # )
1041
+ #
1042
+ # #where does this remaining hit comes from?
1043
+ #
1044
+ #
1045
+ # if not userHits["success"]:
1046
+ # raise LlumoAIError.InsufficientCredits(userHits["message"])
1047
+ #
1048
+ # currentBatch = []
1049
+ #
1050
+ #
1051
+ # for index, row in dataframe.iterrows():
1052
+ # # Extract required fields
1053
+ # tools = row.get("tools", "")
1054
+ # groundTruth = row.get("groundTruth", "")
1055
+ # messageHistory = row.get("messageHistory", "")
1056
+ # intermediateSteps = row.get("intermediateSteps", "")
1057
+ # output = row.get("output", "")
1058
+ #
1059
+ # # Initialize query and context
1060
+ # query = ""
1061
+ # context = ""
1062
+ #
1063
+ # # Process prompt template if provided
1064
+ # if prompt_template:
1065
+ # # Extract template variables
1066
+ # keys = re.findall(r"{{(.*?)}}", prompt_template)
1067
+ #
1068
+ # if not all([key in dataframe.columns for key in keys]):
1069
+ # raise LlumoAIError.InvalidPromptTemplate()
1070
+ #
1071
+ # # Populate template and separate query/context
1072
+ # populated_template = prompt_template
1073
+ # for key in keys:
1074
+ # value = row.get(key, "")
1075
+ # if isinstance(value, str):
1076
+ # length = len(value.split()) * 1.5
1077
+ # if length <= 50:
1078
+ # # Short value - include in query via template
1079
+ # temp_obj = {key: value}
1080
+ # populated_template = getInputPopulatedPrompt(populated_template, temp_obj)
1081
+ # else:
1082
+ # # Long value - add to context
1083
+ # context += f" {key}: {value}, "
1084
+ #
1085
+ # query = populated_template.strip()
1086
+ #
1087
+ # # Add any remaining context from other fields
1088
+ # if not context.strip():
1089
+ # for key, value in row.items():
1090
+ # if key not in keys and isinstance(value, str) and value.strip():
1091
+ # context += f" {key}: {value}, "
1092
+ # else:
1093
+ # # No prompt template - use direct query and context fields
1094
+ # query = row.get("query", "")
1095
+ # context = row.get("context", "")
1096
+ #
1097
+ # # Generate unique IDs
1098
+ # rowID = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace("-", "")
1099
+ # columnID = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace("-", "")
1100
+ #
1101
+ # compoundKey = f"{rowID}-{columnID}-{columnID}"
1102
+ # rowIdMapping[compoundKey] = {"index": index}
1103
+ # # print("rowIdMapping:", rowIdMapping)
1104
+ #
1105
+ # # Create evaluation payload
1106
+ # # print("socketID in before templateData: ", socketID)
1107
+ # templateData = {
1108
+ # "processID": getProcessID(),
1109
+ # "socketID": socketID,
1110
+ # "rowID": rowID,
1111
+ # "columnID": columnID,
1112
+ # "processType": "FULL_EVAL_RUN",
1113
+ # "evalType": "LLM",
1114
+ # "workspaceID": workspaceID,
1115
+ # "email": email,
1116
+ # "playgroundID": activePlayground,
1117
+ # "source": "SDK",
1118
+ # "processData": {
1119
+ # "executionDependency": {
1120
+ # "query": query,
1121
+ # "context": context.strip(),
1122
+ # "output": output,
1123
+ # "tools": tools,
1124
+ # "groundTruth": groundTruth,
1125
+ # "messageHistory": messageHistory,
1126
+ # "intermediateSteps": intermediateSteps,
1127
+ # },
1128
+ # "evallist": evals,
1129
+ # "sessionID": self.sessionID
1130
+ # },
1131
+ # "type": "FULL_EVAL_RUN",
1132
+ # }
1133
+ #
1134
+ # # Add to batch
1135
+ # currentBatch.append(templateData)
1136
+ # if len(currentBatch) == 10:
1137
+ # self.allBatches.append(currentBatch)
1138
+ # currentBatch = []
1139
+ #
1140
+ # if currentBatch:
1141
+ # self.allBatches.append(currentBatch)
1142
+ #
1143
+ # for batch in tqdm(
1144
+ # self.allBatches,
1145
+ # desc="Processing Batches",
1146
+ # unit="batch",
1147
+ # colour="magenta",
1148
+ # ascii=False,
1149
+ # ):
1150
+ # try:
1151
+ # self.postBatch(batch=batch, workspaceID=workspaceID)
1152
+ # time.sleep(2)
1153
+ # # print(batch)
1154
+ # except Exception as e:
1155
+ # print(f"Error posting batch: {e}")
1156
+ # raise
1157
+ #
1158
+ # # Wait for results
1159
+ # time.sleep(3)
1160
+ # listener_thread.join()
1161
+ #
1162
+ # rawResults = self.socket.getReceivedData()
1163
+ #
1164
+ # # print(f"Total results received: {len(rawResults)}")
1165
+ # # print("Raw results:", rawResults)
1166
+ #
1167
+ # # print("data from db #####################",dataFromDb)
1168
+ # # Fix here: keep full keys, do not split keys
1169
+ # receivedRowIDs = {key for item in rawResults for key in item.keys()}
1170
+ # # print("Received Row IDs:", receivedRowIDs)
1171
+ # expectedRowIDs = set(rowIdMapping.keys())
1172
+ # missingRowIDs = expectedRowIDs - receivedRowIDs
1173
+ # # print("All expected keys:", expectedRowIDs)
1174
+ # # print("All received keys:", receivedRowIDs)
1175
+ # # print("Missing keys:", len(missingRowIDs))
1176
+ # missingRowIDs = list(missingRowIDs)
1177
+ #
1178
+ # # print("Missing Row IDs:", missingRowIDs)
1179
+ # # print(f"Total results before fetching missing data: {len(rawResults)}")
1180
+ # if len(missingRowIDs) > 0:
1181
+ # print('''It's taking longer than expected to get results for some rows. You can close this now.
1182
+ # Please wait for 15 mins while we create the flow graph for you. You can check the graph at app.llumo.ai/debugging''')
1183
+ # else:
1184
+ # print('''All results received successfully. You can check flowgraph in 5 mins at app.llumo.ai/debugging''')
1185
+ # # if len(missingRowIDs) > 0:
1186
+ # # dataFromDb = self.fetchDataForMissingKeys(workspaceID, missingRowIDs)
1187
+ # # # print("Fetched missing data from DB:", dataFromDb)
1188
+ # # rawResults.extend(dataFromDb)
1189
+ # # print(f"Total results after fetching missing data: {len(rawResults)}")
1190
+ #
1191
+ # self.evalData = rawResults
1192
+ # # print("RAW RESULTS: ", self.evalData)
1193
+ #
1194
+ # # Initialize dataframe columns for each eval
1195
+ # for ev_name in evals:
1196
+ # dataframe[ev_name] = ""
1197
+ # dataframe[f"{ev_name} Reason"] = ""
1198
+ # # dataframe[f"{ev_name} EdgeCase"] = None
1199
+ #
1200
+ # # Map results to dataframe rows
1201
+ # for item in rawResults:
1202
+ # for compound_key, value in item.items():
1203
+ # if compound_key not in rowIdMapping:
1204
+ # continue
1205
+ # index = rowIdMapping[compound_key]["index"]
1206
+ # rowID, columnID, _ = compound_key.split("-", 2)
1207
+ #
1208
+ # # get the dataframe row at this index
1209
+ # row = dataframe.iloc[index].to_dict()
1210
+ #
1211
+ # if not value:
1212
+ # continue
1213
+ #
1214
+ #
1215
+ # # ️ Handle fullEval block
1216
+ # fullEval = value.get("fullEval") if isinstance(value, dict) else None
1217
+ # if fullEval:
1218
+ # if "evalMetrics" in fullEval and isinstance(fullEval["evalMetrics"], list):
1219
+ # for evalItem in fullEval["evalMetrics"]:
1220
+ # evalName = evalItem.get("evalName") or evalItem.get("kpiName")
1221
+ # score = str(evalItem.get("score")) or evalItem.get("value")
1222
+ # reasoning = evalItem.get("reasoning")
1223
+ # # edgeCase = eval_item.get("edgeCase")
1224
+ #
1225
+ # if evalName:
1226
+ # dataframe.at[index, evalName] = score
1227
+ # dataframe.at[index, f"{evalName} Reason"] = reasoning
1228
+ # # dataframe.at[index, f"{evalName} EdgeCase"] = edgeCase
1229
+ #
1230
+ #
1231
+ # # runLog = value.get("runLog") if isinstance(value, dict) else None
1232
+ # # if runLog:
1233
+ # # try:
1234
+ # # self.createRunForEvalMultiple(smartLog=runLog)
1235
+ # # except Exception as e:
1236
+ # # print(f"Error posting smartlog: {e}")
1237
+ #
1238
+ #
1239
+ #
1240
+ # try:
1241
+ # self.socket.disconnect()
1242
+ # except Exception:
1243
+ # pass
1244
+ #
1245
+ # # if hasattr(self, "endLlumoRun"):
1246
+ # # self.endEvalRun()
1247
+ # #
1248
+ # return dataframe
1085
1249
 
1086
1250
  def promptSweep(
1087
1251
  self,
llumo/helpingFuntions.py CHANGED
@@ -11,6 +11,7 @@ import re
11
11
  import openai
12
12
  import google.generativeai as genai
13
13
  from collections import defaultdict
14
+ import requests
14
15
 
15
16
 
16
17
  from .models import _MODEL_METADATA, AVAILABLEMODELS
@@ -735,4 +736,30 @@ def getCustomAnalytics(workspaceID):
735
736
  return metricDependencies
736
737
 
737
738
  except Exception as e:
738
- return {}
739
+ return {}
740
+
741
+
742
+
743
+ def postForListOfSteps(record: {},workspaceID):
744
+ url = "https://backend-api.llumo.ai/api/v1/get-debug-log-for-upload"
745
+ payload = record
746
+ workspaceID = workspaceID
747
+
748
+ # Encode to Base64
749
+ workspaceIDEncoded = base64.b64encode(workspaceID.encode()).decode()
750
+
751
+ headers = {
752
+ "Authorization": f"Bearer {workspaceIDEncoded}",
753
+ "Content-Type": "application/json",
754
+ }
755
+
756
+ authorization = {}
757
+ # print("[PAYLOAD]: ",payload)
758
+ try:
759
+ response = requests.post(url=url, json=payload,headers = headers)
760
+ # print("[RESPONSE]: ",response.json())
761
+ # print()
762
+ return {"status":"True","data":response.json()}
763
+
764
+ except Exception as e:
765
+ return {"status":"False","exception": str(e)}
@@ -4,6 +4,10 @@ from typing import Optional, List, Dict, Any
4
4
  from datetime import datetime, timezone
5
5
  import requests
6
6
  from .client import LlumoClient
7
+ import math
8
+ import base64
9
+
10
+ import random
7
11
 
8
12
  _ctxLogger = contextvars.ContextVar("ctxLogger")
9
13
  _ctxSessionID = contextvars.ContextVar("ctxSessionID")
@@ -31,6 +35,7 @@ class LlumoSessionContext(LlumoClient):
31
35
  self.threadLogger = None
32
36
  self.threadSessionID = None
33
37
  self.threadLlumoRun = None
38
+ self.isLangchain = False
34
39
 
35
40
  def start(self):
36
41
  self.threadLogger = _ctxLogger.set(self.logger)
@@ -68,25 +73,37 @@ class LlumoSessionContext(LlumoClient):
68
73
 
69
74
  currentTime = datetime(2025, 8, 2, 10, 20, 15, tzinfo=timezone.utc)
70
75
  createdAt = currentTime.strftime("%Y-%m-%dT%H:%M:%S.000Z")
76
+
77
+
78
+
71
79
  llumoRun = {
72
80
  "logID": LlumoRunID,
73
81
  "runName": runName,
74
82
  "sessionID": self.sessionID,
75
83
  "playgroundID": self.logger.getPlaygroundID(),
76
84
  "workspaceID": self.logger.getWorkspaceID(),
77
- "source": "SDK",
85
+ "source": "SDK_LANGCHAIN" if self.isLangchain else "SDK_OTHERS",
78
86
  "rowID": rowID,
79
87
  "columnID": columnID,
80
88
  "email": self.logger.getUserEmailID(),
81
89
  "createdAt": createdAt,
82
90
  "createdBy": self.logger.getUserEmailID(),
83
- "status": "SUCCESS",
91
+ "status": "",
84
92
  "flow": [],
85
- "latency": 4200,
86
93
  "feedback": "",
87
94
  "dump": "",
88
95
  "steps": [],
96
+ "format": "listofsteps",
97
+ "logData":{
98
+ "inputTokens": "",
99
+ "outputTokens":"",
100
+ "totalTokens": "",
101
+ "cost": "",
102
+ "modelsUsed": "gpt-4o",
103
+
104
+ }
89
105
  }
106
+
90
107
  self.threadLlumoRun = _ctxLlumoRun.set(llumoRun)
91
108
 
92
109
  def endLlumoRun(self):
@@ -104,20 +121,59 @@ class LlumoSessionContext(LlumoClient):
104
121
  ]
105
122
  run["steps"] = clean_steps
106
123
 
124
+ llm_step = False
125
+ inputTokens = 0
126
+ outputTokens = 0
127
+ for item in run["steps"]:
128
+ if item.get("stepType") == "LLM":
129
+ llm_step = True
130
+ outputTokens = len(item["metadata"].get("output", 0)) / 4
131
+
132
+
133
+ if item.get("stepType") == "QUERY":
134
+ inputTokens = len(item["metadata"].get("query", 0)) / 4
135
+
136
+ # 2. If no LLM step, set zeros and continue
137
+ if llm_step == False:
138
+ run["logData"]["inputTokens"] = 0
139
+ run["logData"]["outputTokens"] = 0
140
+ run["logData"]["totalTokens"] = 0
141
+ run["logData"]["cost"] = 0
142
+ run["logData"]["modelsUsed"] = "gpt-4o"
143
+
144
+ INPUT_TOKEN_PRICE = 0.0000025
145
+ OUTPUT_TOKEN_PRICE = 0.00001
146
+ cost = (inputTokens * INPUT_TOKEN_PRICE) + (outputTokens * OUTPUT_TOKEN_PRICE)
147
+
148
+ run["logData"]["inputTokens"] = math.ceil(inputTokens)
149
+ run["logData"]["outputTokens"] = math.ceil(outputTokens)
150
+ run["logData"]["totalTokens"] = math.ceil(inputTokens + outputTokens)
151
+ run["logData"]["cost"] = round(cost, 8)
152
+ # run["latency"] = round(random.uniform(1,1.6),2)
107
153
  # print(run["runName"]) # optional debug log
108
154
 
109
155
  # STEP 3: Send the payload
110
- url = "https://app.llumo.ai/api/create-debug-log"
156
+ # url = "https://app.llumo.ai/api/create-debug-log"
157
+ url = "https://backend-api.llumo.ai/api/v1/get-debug-log-for-New-SDK"
158
+ workspaceID = self.logger.getWorkspaceID()
159
+
160
+ # Encode to Base64
161
+ workspaceIDEncoded = base64.b64encode(workspaceID.encode()).decode()
162
+
111
163
  headers = {
112
- "Authorization": f"Bearer {self.logger.getWorkspaceID()}",
164
+ "Authorization": f"Bearer {workspaceIDEncoded}",
113
165
  "Content-Type": "application/json",
114
166
  }
115
167
 
168
+
169
+
116
170
  try:
117
- # print(run)
118
- response = requests.post(url, headers=headers, json=run, timeout=20)
171
+ # print("[PAYLOAD]: ",run)
172
+ payload = run
173
+ response = requests.post(url, headers=headers, json=payload, timeout=20)
119
174
  response.raise_for_status()
120
- # print(response.json())
175
+ # print("[PAYLOAD]: ",response.json())
176
+
121
177
  except requests.exceptions.Timeout:
122
178
  # print("Request timed out.")
123
179
  pass
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: llumo
3
- Version: 0.2.36
3
+ Version: 0.2.38
4
4
  Summary: Python SDK for interacting with the Llumo ai API.
5
5
  Home-page: https://www.llumo.ai/
6
6
  Author: Llumo
@@ -1,20 +1,20 @@
1
1
  llumo/__init__.py,sha256=kkuppu7ZPiVZFdnYzJ9BM3syMbYHOSZLpwKwAvGHsnY,311
2
- llumo/callback.py,sha256=6y9TeVD8qjEXSsLUsIvkK29hbzgIefpBngyA_u9y9LU,23981
2
+ llumo/callback.py,sha256=8QkVmOsqg6x60yGc4OzQeVzl50TMRo7NAPTcHKn2uwk,24027
3
3
  llumo/callbacks-0.py,sha256=TEIOCWRvk2UYsTmBMBsnlgpqWvr-2y3a6d0w_e96NRM,8958
4
4
  llumo/chains.py,sha256=6lCgLseh04RUgc6SahhmvQj82quay2Mi1j8gPUlx8Es,2923
5
- llumo/client.py,sha256=EmgnIlVVs83Cne-4i5liyokaY3QEmmDOiQnh4oZyHD0,73053
5
+ llumo/client.py,sha256=WKqnOYabThMZN5RXColIWvyf6BY6sFIT5C0s2HT89Xc,79766
6
6
  llumo/exceptions.py,sha256=1OyhN9YL9LcyUPUsqYHq6Rret0udATZAwMVJaio2_Ec,2123
7
7
  llumo/execution.py,sha256=nWbJ7AvWuUPcOb6i-JzKRna_PvF-ewZTiK8skS-5n3w,1380
8
8
  llumo/functionCalling.py,sha256=D5jYapu1rIvdIJNUYPYMTyhQ1H-6nkwoOLMi6eekfUE,7241
9
9
  llumo/google.py,sha256=6y9YnDFDRHv6-sQNT5LIsV9p31BCN0B9eow5KTRBWfM,2185
10
- llumo/helpingFuntions.py,sha256=jhB14o5e0YuRp-lCnu1c4vXjoG_y8ZinFHxUrZsnZAk,27284
10
+ llumo/helpingFuntions.py,sha256=a9mq2e-xV4LeGS-rrzVzf9KGss78b8AKjUhICPc-6yU,28045
11
11
  llumo/llumoLogger.py,sha256=7DZR2_QHy0fencng9Nnf9UPmEx8-OZzhvz1QRUp9w6c,2190
12
- llumo/llumoSessionContext.py,sha256=si7T66D4bsea9vrCaQiko3ZcBcx7zy9W0lf0bpitaLY,12587
12
+ llumo/llumoSessionContext.py,sha256=wHAMZrbmkiHKFqh1sw5EU7fzH2eKDeW_3jk0HQwP4os,14402
13
13
  llumo/models.py,sha256=aVEZsOOoQx5LeNtwSyBxqvrINq0izH3QWu_YjsMPE6o,2910
14
14
  llumo/openai.py,sha256=VstBzaORe8Tq0feUIIEszzcN1oq6TJfkPviaCr5d3Bw,8950
15
15
  llumo/sockets.py,sha256=pfWz1zTEiwqJhdbSy3i3_Y4WlIdJ3cuac11wMePeBS0,6130
16
- llumo-0.2.36.dist-info/licenses/LICENSE,sha256=tF9yAcfPV9xGT3ViWmC8hPvOo8BEk4ZICbUfcEo8Dlk,182
17
- llumo-0.2.36.dist-info/METADATA,sha256=-lPlVSKmlLUjVHpbufy8N4X5b3_247Semgcv7boLfHI,1662
18
- llumo-0.2.36.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
19
- llumo-0.2.36.dist-info/top_level.txt,sha256=d5zUTMI99llPtLRB8rtSrqELm_bOqX-bNC5IcwlDk88,6
20
- llumo-0.2.36.dist-info/RECORD,,
16
+ llumo-0.2.38.dist-info/licenses/LICENSE,sha256=tF9yAcfPV9xGT3ViWmC8hPvOo8BEk4ZICbUfcEo8Dlk,182
17
+ llumo-0.2.38.dist-info/METADATA,sha256=uzjy-7cka3tcoJ6kMd83Sb3NR8436uC-KWNJ0wTwEC8,1662
18
+ llumo-0.2.38.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
19
+ llumo-0.2.38.dist-info/top_level.txt,sha256=d5zUTMI99llPtLRB8rtSrqELm_bOqX-bNC5IcwlDk88,6
20
+ llumo-0.2.38.dist-info/RECORD,,
File without changes