llumo 0.2.23__py3-none-any.whl → 0.2.24__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
llumo/client.py CHANGED
@@ -99,11 +99,23 @@ class LlumoClient:
99
99
  )
100
100
  self.email = data["data"]["data"].get("email", None)
101
101
 
102
- self.definationMapping[evalName] = data.get("data", {}).get("data", {}).get("analyticsMapping", {}).get(evalName, None)
103
-
102
+ self.definationMapping[evalName] = data.get("data", {}).get("data", {}).get("analyticsMapping", {}).get(evalName, "")
103
+ self.categories = data.get("data", {}).get("data", {}).get("analyticsMapping", {}).get(evalName, "").get("categories", {})
104
+ self.evaluationStrictness=data.get("data", {}).get("data", {}).get("analyticsMapping", {}).get(evalName, "").get("evaluationStrictness", {})
105
+ self.grammarCheckOutput=data.get("data", {}).get("data", {}).get("analyticsMapping", {}).get(evalName, "").get("grammarCheckOutput", {})
106
+ self.insightsLength=data.get("data", {}).get("data", {}).get("analyticsMapping", {}).get(evalName, "").get("insightsLength", {})
107
+ self.insightsLevel=data.get("data", {}).get("data", {}).get("analyticsMapping", {}).get(evalName, "").get("insightsLevel", {})
108
+ self.executionDependency=data.get("data", {}).get("data", {}).get("analyticsMapping", {}).get(evalName, "").get("executionDependency", {})
109
+ self.sampleData=data.get("data", {}).get("data", {}).get("analyticsMapping", {}).get(evalName, "").get("sampleData", {})
110
+ self.numJudges=data.get("data", {}).get("data", {}).get("analyticsMapping", {}).get(evalName, "").get("numJudges", {})
111
+ self.penaltyBonusInstructions=data.get("data", {}).get("data", {}).get("analyticsMapping", {}).get(evalName, "").get("penaltyBonusInstructions", [])
112
+ self.probableEdgeCases= data.get("data", {}).get("data", {}).get("analyticsMapping", {}).get(evalName, "").get("probableEdgeCases", [])
113
+ self.fieldMapping= data.get("data", {}).get("data", {}).get("analyticsMapping", {}).get(evalName, "").get("fieldMapping", [])
114
+
115
+
104
116
  except Exception as e:
105
117
  # print(f"Error extracting data from response: {str(e)}")
106
- raise LlumoAIError.UnexpectedError(detail=str(e))
118
+ raise LlumoAIError.UnexpectedError(detail=evalName)
107
119
 
108
120
  def postBatch(self, batch, workspaceID):
109
121
  payload = {
@@ -679,8 +691,9 @@ class LlumoClient:
679
691
 
680
692
  # Validate API and dependencies
681
693
  self.validateApiKey(evalName=evalName)
694
+ customAnalytics=getCustomAnalytics(self.workspaceID)
682
695
  metricDependencies = checkDependency(
683
- evalName, list(dataframe.columns), tocheck=_tocheck
696
+ evalName, list(dataframe.columns), tocheck=_tocheck,customevals=customAnalytics
684
697
  )
685
698
  if not metricDependencies["status"]:
686
699
  raise LlumoAIError.dependencyError(metricDependencies["message"])
@@ -691,6 +704,15 @@ class LlumoClient:
691
704
  evalType = "LLM"
692
705
  workspaceID = self.workspaceID
693
706
  email = self.email
707
+ categories=self.categories
708
+ evaluationStrictness=self.evaluationStrictness
709
+ grammarCheckOutput=self.grammarCheckOutput
710
+ insightLength=self.insightsLength
711
+ numJudges=self.numJudges
712
+ penaltyBonusInstructions=self.penaltyBonusInstructions
713
+ probableEdgeCases=self.probableEdgeCases
714
+ fieldMapping=self.fieldMapping
715
+
694
716
 
695
717
  userHits = checkUserHits(
696
718
  self.workspaceID,
@@ -732,31 +754,39 @@ class LlumoClient:
732
754
  templateData = {
733
755
  "processID": getProcessID(),
734
756
  "socketID": socketID,
757
+ "rowID": rowID,
758
+ "columnID": columnID,
759
+ "processType": "EVAL",
760
+ "evalType": evalType,
761
+ "workspaceID": workspaceID,
762
+ "email": email,
763
+ "playgroundID": activePlayground,
735
764
  "source": "SDK",
736
765
  "processData": {
766
+ "analyticsName": evalName,
767
+ "definition": evalDefinition,
737
768
  "executionDependency": {
738
- "query": "",
739
- "context": "",
740
- "output": output,
741
- "tools": tools,
742
- "groundTruth": groundTruth,
743
- "messageHistory": messageHistory,
744
- "intermediateSteps": intermediateSteps,
769
+ "Query": "",
770
+ "Context": "",
771
+ "Output": output,
772
+ "Tools": tools,
773
+ "GroundTruth": groundTruth,
774
+ "MessageHistory": messageHistory,
775
+ "IntermediateSteps": intermediateSteps,
745
776
  },
746
- "definition": evalDefinition,
777
+ "categories":categories,
778
+ "evaluationStrictness": evaluationStrictness,
779
+ "grammarCheckOutput": grammarCheckOutput,
780
+ "insightLength": insightLength,
781
+ "numJudges": numJudges,
782
+ "penaltyBonusInstructions": penaltyBonusInstructions,
783
+ "probableEdgeCases": probableEdgeCases,
747
784
  "model": model,
748
785
  "provider": provider,
749
- "analytics": evalName,
750
786
  },
751
- "workspaceID": workspaceID,
752
787
  "type": "EVAL",
753
- "evalType": evalType,
754
788
  "kpi": evalName,
755
- "columnID": columnID,
756
- "rowID": rowID,
757
- "playgroundID": activePlayground,
758
- "processType": "EVAL",
759
- "email": email,
789
+ "fieldMappig":fieldMapping,
760
790
  }
761
791
 
762
792
  query = ""
@@ -828,7 +858,6 @@ class LlumoClient:
828
858
  # print("All received keys:", received_rowIDs)
829
859
  # print("Missing keys:", len(missingRowIDs))
830
860
  missingRowIDs=list(missingRowIDs)
831
-
832
861
  if len(missingRowIDs) > 0:
833
862
  dataFromDb=fetchData(workspaceID,activePlayground,missingRowIDs)
834
863
  rawResults.extend(dataFromDb)
@@ -868,7 +897,8 @@ class LlumoClient:
868
897
  promptText=prompt_template,
869
898
  definationMapping=self.definationMapping,
870
899
  outputColName=outputColName,
871
- activePlayground= activePlayground
900
+ activePlayground= activePlayground,
901
+ customAnalytics=customAnalytics
872
902
  ):
873
903
  print(
874
904
  "LLUMO’s intuitive UI is ready—start exploring and experimenting with your logs now. Visit https://app.llumo.ai/evallm to see the results."
llumo/exceptions.py CHANGED
@@ -22,8 +22,8 @@ class LlumoAIError(Exception):
22
22
  return LlumoAIError("The API response is not in valid JSON format")
23
23
 
24
24
  @staticmethod
25
- def UnexpectedError(detail="An UnexpectedError error occurred"):
26
- return LlumoAIError(f"UnexpectedError error: {detail}")
25
+ def UnexpectedError(detail="Metric"):
26
+ return LlumoAIError(f"Can you please check if {detail} is written correctly. If you want to run {detail} please create a custom eval with same name of app.llumo.ai/evallm ")
27
27
 
28
28
  @staticmethod
29
29
  def EvalError(detail="Some error occured while processing"):
llumo/helpingFuntions.py CHANGED
@@ -26,6 +26,7 @@ uploadRowList = (
26
26
  )
27
27
  createInsightUrl="https://app.llumo.ai/api/external/generate-insight-from-eval-for-sdk"
28
28
 
29
+ getCustomAnalyticsUrl="https://app.llumo.ai/api/workspace/get-all-analytics"
29
30
 
30
31
  def getProcessID():
31
32
  return f"{int(time.time() * 1000)}{uuid.uuid4()}"
@@ -217,7 +218,7 @@ def deleteColumnListInPlayground(workspaceID: str, playgroundID: str):
217
218
  return None
218
219
 
219
220
  def createColumn(workspaceID, dataframe, playgroundID, promptText=None,queryColName=None,
220
- outputColName= "output",dataStreamName=None,definationMapping=None,evalOutputMap = None):
221
+ outputColName= "output",dataStreamName=None,definationMapping=None,evalOutputMap = None,customAnalytics=[]):
221
222
  if len(dataframe) > 100:
222
223
  dataframe = dataframe.head(100)
223
224
  print("⚠️ Dataframe truncated to 100 rows for upload.")
@@ -228,9 +229,12 @@ def createColumn(workspaceID, dataframe, playgroundID, promptText=None,queryColN
228
229
  "columnListToUpload": [],
229
230
  }
230
231
  allEvals = ['Response Completeness', 'Response Bias', 'Response Harmfulness', 'Input Toxicity', 'Input Harmfulness', 'Context Utilization', 'Relevance Retention', 'Semantic Cohesion', 'Final Task Alignment', 'Tool Reliability', 'Response Correctness', 'Response Toxicity', 'Input Bias', 'Input Relevancy', 'Redundancy Reduction', 'Response Sentiment', 'Tool Selection Accuracy', 'Stepwise Progression', 'Hallucination', 'Faithfulness', 'Answer Relevancy', 'Context Precision', 'Answer Similarity', 'Harmfulness', 'Maliciousness', 'Coherence', 'Answer Correctness', 'Context Recall', 'Context Entity Recall', 'Conciseness', 'customEvalColumn', 'Groundedness', 'Memory Utilization', 'Input Relevancy (Multi-turn)','PII Check','Prompt Injection']
231
-
232
- evalDependencies = checkDependency(_returnDepMapping=True)
233
-
232
+ try:
233
+ allEvals.extend(list(customAnalytics.keys()))
234
+ except Exception as e:
235
+ pass
236
+ evalDependencies = checkDependency(_returnDepMapping=True,customevals=customAnalytics)
237
+ print(allEvals)
234
238
  # Create a mapping of column names to unique column IDs
235
239
  columnIDMapping = {}
236
240
 
@@ -296,7 +300,7 @@ def createColumn(workspaceID, dataframe, playgroundID, promptText=None,queryColN
296
300
  "order": indx}
297
301
 
298
302
 
299
- elif any(col.startswith(eval + "_") or col == eval for eval in allEvals) and not " Reason" in col and promptText is not None:
303
+ elif any(col.startswith(eval + "_") or col == eval for eval in allEvals) and not " Reason" in col and promptText is not None :
300
304
  if evalOutputMap != None:
301
305
  outputColName = evalOutputMap[col]
302
306
  else:
@@ -455,14 +459,14 @@ def uploadRowsInDBPlayground(payload):
455
459
  return None
456
460
 
457
461
 
458
- def createPlayground(email, workspaceID, df, promptText=None,queryColName=None,dataStreamName=None,definationMapping=None,outputColName="output",evalOutputMap = None,activePlayground=None):
462
+ def createPlayground(email, workspaceID, df, promptText=None,queryColName=None,dataStreamName=None,definationMapping=None,outputColName="output",evalOutputMap = None,activePlayground=None,customAnalytics=[]):
459
463
 
460
464
  if activePlayground != None:
461
465
  playgroundId=activePlayground
462
466
  else:
463
467
  playgroundId = str(createEvalPlayground(email=email, workspaceID=workspaceID))
464
468
  payload1, payload2 = createColumn(
465
- workspaceID=workspaceID, dataframe=df, playgroundID=playgroundId, promptText=promptText,queryColName=queryColName,dataStreamName=dataStreamName,definationMapping=definationMapping,outputColName=outputColName,evalOutputMap=evalOutputMap
469
+ workspaceID=workspaceID, dataframe=df, playgroundID=playgroundId, promptText=promptText,queryColName=queryColName,dataStreamName=dataStreamName,definationMapping=definationMapping,outputColName=outputColName,evalOutputMap=evalOutputMap,customAnalytics=customAnalytics
466
470
  )
467
471
 
468
472
  # Debugging line to check the payload2 structure
@@ -515,7 +519,7 @@ def getPlaygroundInsights(defination:str,uniqueClassesString: str, reasonList: l
515
519
  else:
516
520
  print(f"Error generating insight: {responseGenerate.status_code} - {responseGenerate.text}")
517
521
  return None
518
- def checkDependency(selectedEval:list = [], columns:list = [],tocheck=True,_returnDepMapping = False):
522
+ def checkDependency(selectedEval:list = [], columns:list = [],tocheck=True,_returnDepMapping = False,customevals={}):
519
523
  """
520
524
  Checks if all the required input columns for the selected evaluation metric are present.
521
525
 
@@ -527,6 +531,7 @@ def checkDependency(selectedEval:list = [], columns:list = [],tocheck=True,_retu
527
531
  - LlumoAIError.dependencyError: If any required column is missing.
528
532
  """
529
533
  # Define required dependencies for each evaluation metric
534
+
530
535
  metricDependencies = {
531
536
  'Response Completeness': ['context', 'query', 'output'],
532
537
  'Response Bias': ['output'],
@@ -553,6 +558,8 @@ def checkDependency(selectedEval:list = [], columns:list = [],tocheck=True,_retu
553
558
  'PII Check':["query","output"],
554
559
  'Prompt Injection':["query"]
555
560
  }
561
+
562
+ metricDependencies.update(customevals)
556
563
  if _returnDepMapping == True:
557
564
  return metricDependencies
558
565
 
@@ -652,6 +659,7 @@ def validateGoogleKey(api_key):
652
659
 
653
660
  def groupLogsByClass(logs, max_logs=2):
654
661
  # Initialize the final result structures (no defaultdict)
662
+
655
663
  groupedLogs = {}
656
664
  uniqueEdgeCases = {} # This will store unique edge cases for each eval_name
657
665
 
@@ -685,3 +693,46 @@ def groupLogsByClass(logs, max_logs=2):
685
693
  uniqueEdgeCases[eval_name] = list(uniqueEdgeCases[eval_name])
686
694
 
687
695
  return groupedLogs, uniqueEdgeCases
696
+
697
+
698
+ def getCustomAnalytics(workspaceID):
699
+ try:
700
+ url = getCustomAnalyticsUrl
701
+ payload = {
702
+ "workspaceID": workspaceID
703
+ }
704
+
705
+ headers = {
706
+ "Content-Type": "application/json"
707
+ }
708
+
709
+ response = requests.post(url, json=payload, headers=headers)
710
+ data=response.json()
711
+ customData=data.get("data","").get("analyticsCustom","")
712
+ customMapping = {
713
+ "QUERY": "query",
714
+ "CONTEXT": "context",
715
+ "OUTPUT": "output",
716
+ "MESSAGEHISTORY": "messageHistory",
717
+ "TOOLS": "tools",
718
+ "INTERMEDIATESTEPS": "intermediateSteps",
719
+ "GROUNDTRUTH": "groundTruth",
720
+ }
721
+
722
+ metricDependencies = {}
723
+
724
+
725
+
726
+ for eval in customData:
727
+ evalName = eval.get("analyticsName")
728
+ evalDependencyRaw = list(eval.get("variableMappings").values())
729
+
730
+ # Replace each value using the custom mapping
731
+ evalDependency = [customMapping.get(val.upper(), val.lower()) for val in evalDependencyRaw]
732
+
733
+ # Build the dict
734
+ metricDependencies[evalName] = evalDependency
735
+ return metricDependencies
736
+
737
+ except Exception as e:
738
+ return {}
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: llumo
3
- Version: 0.2.23
3
+ Version: 0.2.24
4
4
  Summary: Python SDK for interacting with the Llumo ai API.
5
5
  Home-page: https://www.llumo.ai/
6
6
  Author: Llumo
@@ -1,16 +1,16 @@
1
1
  llumo/__init__.py,sha256=YVBkF1fiXFBd_zzySi9BDWgX8MJuLBJ-oF8538MrnDU,256
2
2
  llumo/chains.py,sha256=6lCgLseh04RUgc6SahhmvQj82quay2Mi1j8gPUlx8Es,2923
3
- llumo/client.py,sha256=iVcdoGncHNrABl3qImBzwwQ_mf48M1Nte3nUEoSbQ2Y,64867
4
- llumo/exceptions.py,sha256=Vp_MnanHbnd1Yjuoi6WLrKiwwZbJL3znCox2URMmGU4,2032
3
+ llumo/client.py,sha256=pCocD7v5dDuHKlWE6OqZrrEk4e_LWYuI_LgWaGT3E-g,67498
4
+ llumo/exceptions.py,sha256=1OyhN9YL9LcyUPUsqYHq6Rret0udATZAwMVJaio2_Ec,2123
5
5
  llumo/execution.py,sha256=nWbJ7AvWuUPcOb6i-JzKRna_PvF-ewZTiK8skS-5n3w,1380
6
6
  llumo/functionCalling.py,sha256=D5jYapu1rIvdIJNUYPYMTyhQ1H-6nkwoOLMi6eekfUE,7241
7
7
  llumo/google.py,sha256=3S_aRtbtlctCXPGR0u4baLlkyFrsjd02vlUCkoRPA5U,2147
8
- llumo/helpingFuntions.py,sha256=SmQvE3_9aOzKJa1wAmp6UhOfW1eWwkWcNn_Jwhw4TG8,25641
8
+ llumo/helpingFuntions.py,sha256=B6FwUQ5f1v4FKrWCbYoGWMFdscOV_liuuhTgNQ3cdrk,27275
9
9
  llumo/models.py,sha256=aVEZsOOoQx5LeNtwSyBxqvrINq0izH3QWu_YjsMPE6o,2910
10
10
  llumo/openai.py,sha256=DGhEwQIJIIycGpw3hYQnyxdj6RFVpZ-gay-fZGqtkhU,3013
11
11
  llumo/sockets.py,sha256=pBDo-U65hMIMwKMwZQl3iBkEjISEt-9BkXxZTWfSHF4,6116
12
- llumo-0.2.23.dist-info/licenses/LICENSE,sha256=tF9yAcfPV9xGT3ViWmC8hPvOo8BEk4ZICbUfcEo8Dlk,182
13
- llumo-0.2.23.dist-info/METADATA,sha256=6FgzHIZZ7ZEWk1q7DDZIBantpqkk5SH-8EuPtNOPoyc,1558
14
- llumo-0.2.23.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
15
- llumo-0.2.23.dist-info/top_level.txt,sha256=d5zUTMI99llPtLRB8rtSrqELm_bOqX-bNC5IcwlDk88,6
16
- llumo-0.2.23.dist-info/RECORD,,
12
+ llumo-0.2.24.dist-info/licenses/LICENSE,sha256=tF9yAcfPV9xGT3ViWmC8hPvOo8BEk4ZICbUfcEo8Dlk,182
13
+ llumo-0.2.24.dist-info/METADATA,sha256=IfYBa5UYiXuF595u13Qyi5MoQorHzxpfpUSPmciS7rM,1558
14
+ llumo-0.2.24.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
15
+ llumo-0.2.24.dist-info/top_level.txt,sha256=d5zUTMI99llPtLRB8rtSrqELm_bOqX-bNC5IcwlDk88,6
16
+ llumo-0.2.24.dist-info/RECORD,,
File without changes