llumo 0.2.22__py3-none-any.whl → 0.2.24__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llumo/client.py +80 -21
- llumo/exceptions.py +2 -2
- llumo/helpingFuntions.py +66 -13
- {llumo-0.2.22.dist-info → llumo-0.2.24.dist-info}/METADATA +1 -1
- {llumo-0.2.22.dist-info → llumo-0.2.24.dist-info}/RECORD +8 -8
- {llumo-0.2.22.dist-info → llumo-0.2.24.dist-info}/WHEEL +0 -0
- {llumo-0.2.22.dist-info → llumo-0.2.24.dist-info}/licenses/LICENSE +0 -0
- {llumo-0.2.22.dist-info → llumo-0.2.24.dist-info}/top_level.txt +0 -0
llumo/client.py
CHANGED
|
@@ -99,11 +99,23 @@ class LlumoClient:
|
|
|
99
99
|
)
|
|
100
100
|
self.email = data["data"]["data"].get("email", None)
|
|
101
101
|
|
|
102
|
-
self.definationMapping[evalName] = data.get("data", {}).get("data", {}).get("analyticsMapping", {}).get(evalName,
|
|
103
|
-
|
|
102
|
+
self.definationMapping[evalName] = data.get("data", {}).get("data", {}).get("analyticsMapping", {}).get(evalName, "")
|
|
103
|
+
self.categories = data.get("data", {}).get("data", {}).get("analyticsMapping", {}).get(evalName, "").get("categories", {})
|
|
104
|
+
self.evaluationStrictness=data.get("data", {}).get("data", {}).get("analyticsMapping", {}).get(evalName, "").get("evaluationStrictness", {})
|
|
105
|
+
self.grammarCheckOutput=data.get("data", {}).get("data", {}).get("analyticsMapping", {}).get(evalName, "").get("grammarCheckOutput", {})
|
|
106
|
+
self.insightsLength=data.get("data", {}).get("data", {}).get("analyticsMapping", {}).get(evalName, "").get("insightsLength", {})
|
|
107
|
+
self.insightsLevel=data.get("data", {}).get("data", {}).get("analyticsMapping", {}).get(evalName, "").get("insightsLevel", {})
|
|
108
|
+
self.executionDependency=data.get("data", {}).get("data", {}).get("analyticsMapping", {}).get(evalName, "").get("executionDependency", {})
|
|
109
|
+
self.sampleData=data.get("data", {}).get("data", {}).get("analyticsMapping", {}).get(evalName, "").get("sampleData", {})
|
|
110
|
+
self.numJudges=data.get("data", {}).get("data", {}).get("analyticsMapping", {}).get(evalName, "").get("numJudges", {})
|
|
111
|
+
self.penaltyBonusInstructions=data.get("data", {}).get("data", {}).get("analyticsMapping", {}).get(evalName, "").get("penaltyBonusInstructions", [])
|
|
112
|
+
self.probableEdgeCases= data.get("data", {}).get("data", {}).get("analyticsMapping", {}).get(evalName, "").get("probableEdgeCases", [])
|
|
113
|
+
self.fieldMapping= data.get("data", {}).get("data", {}).get("analyticsMapping", {}).get(evalName, "").get("fieldMapping", [])
|
|
114
|
+
|
|
115
|
+
|
|
104
116
|
except Exception as e:
|
|
105
117
|
# print(f"Error extracting data from response: {str(e)}")
|
|
106
|
-
raise LlumoAIError.UnexpectedError(detail=
|
|
118
|
+
raise LlumoAIError.UnexpectedError(detail=evalName)
|
|
107
119
|
|
|
108
120
|
def postBatch(self, batch, workspaceID):
|
|
109
121
|
payload = {
|
|
@@ -679,8 +691,9 @@ class LlumoClient:
|
|
|
679
691
|
|
|
680
692
|
# Validate API and dependencies
|
|
681
693
|
self.validateApiKey(evalName=evalName)
|
|
694
|
+
customAnalytics=getCustomAnalytics(self.workspaceID)
|
|
682
695
|
metricDependencies = checkDependency(
|
|
683
|
-
evalName, list(dataframe.columns), tocheck=_tocheck
|
|
696
|
+
evalName, list(dataframe.columns), tocheck=_tocheck,customevals=customAnalytics
|
|
684
697
|
)
|
|
685
698
|
if not metricDependencies["status"]:
|
|
686
699
|
raise LlumoAIError.dependencyError(metricDependencies["message"])
|
|
@@ -691,6 +704,15 @@ class LlumoClient:
|
|
|
691
704
|
evalType = "LLM"
|
|
692
705
|
workspaceID = self.workspaceID
|
|
693
706
|
email = self.email
|
|
707
|
+
categories=self.categories
|
|
708
|
+
evaluationStrictness=self.evaluationStrictness
|
|
709
|
+
grammarCheckOutput=self.grammarCheckOutput
|
|
710
|
+
insightLength=self.insightsLength
|
|
711
|
+
numJudges=self.numJudges
|
|
712
|
+
penaltyBonusInstructions=self.penaltyBonusInstructions
|
|
713
|
+
probableEdgeCases=self.probableEdgeCases
|
|
714
|
+
fieldMapping=self.fieldMapping
|
|
715
|
+
|
|
694
716
|
|
|
695
717
|
userHits = checkUserHits(
|
|
696
718
|
self.workspaceID,
|
|
@@ -720,6 +742,7 @@ class LlumoClient:
|
|
|
720
742
|
|
|
721
743
|
inputDict = {key: row[key] for key in keys if key in row}
|
|
722
744
|
output = row.get(outputColName, "")
|
|
745
|
+
intermediateSteps = row.get("intermediateSteps", "")
|
|
723
746
|
|
|
724
747
|
|
|
725
748
|
rowID = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace("-", "")
|
|
@@ -731,30 +754,39 @@ class LlumoClient:
|
|
|
731
754
|
templateData = {
|
|
732
755
|
"processID": getProcessID(),
|
|
733
756
|
"socketID": socketID,
|
|
757
|
+
"rowID": rowID,
|
|
758
|
+
"columnID": columnID,
|
|
759
|
+
"processType": "EVAL",
|
|
760
|
+
"evalType": evalType,
|
|
761
|
+
"workspaceID": workspaceID,
|
|
762
|
+
"email": email,
|
|
763
|
+
"playgroundID": activePlayground,
|
|
734
764
|
"source": "SDK",
|
|
735
765
|
"processData": {
|
|
766
|
+
"analyticsName": evalName,
|
|
767
|
+
"definition": evalDefinition,
|
|
736
768
|
"executionDependency": {
|
|
737
|
-
"
|
|
738
|
-
"
|
|
739
|
-
"
|
|
740
|
-
"
|
|
741
|
-
"
|
|
742
|
-
"
|
|
769
|
+
"Query": "",
|
|
770
|
+
"Context": "",
|
|
771
|
+
"Output": output,
|
|
772
|
+
"Tools": tools,
|
|
773
|
+
"GroundTruth": groundTruth,
|
|
774
|
+
"MessageHistory": messageHistory,
|
|
775
|
+
"IntermediateSteps": intermediateSteps,
|
|
743
776
|
},
|
|
744
|
-
"
|
|
777
|
+
"categories":categories,
|
|
778
|
+
"evaluationStrictness": evaluationStrictness,
|
|
779
|
+
"grammarCheckOutput": grammarCheckOutput,
|
|
780
|
+
"insightLength": insightLength,
|
|
781
|
+
"numJudges": numJudges,
|
|
782
|
+
"penaltyBonusInstructions": penaltyBonusInstructions,
|
|
783
|
+
"probableEdgeCases": probableEdgeCases,
|
|
745
784
|
"model": model,
|
|
746
785
|
"provider": provider,
|
|
747
|
-
"analytics": evalName,
|
|
748
786
|
},
|
|
749
|
-
"workspaceID": workspaceID,
|
|
750
787
|
"type": "EVAL",
|
|
751
|
-
"evalType": evalType,
|
|
752
788
|
"kpi": evalName,
|
|
753
|
-
"
|
|
754
|
-
"rowID": rowID,
|
|
755
|
-
"playgroundID": activePlayground,
|
|
756
|
-
"processType": "EVAL",
|
|
757
|
-
"email": email,
|
|
789
|
+
"fieldMappig":fieldMapping,
|
|
758
790
|
}
|
|
759
791
|
|
|
760
792
|
query = ""
|
|
@@ -826,7 +858,6 @@ class LlumoClient:
|
|
|
826
858
|
# print("All received keys:", received_rowIDs)
|
|
827
859
|
# print("Missing keys:", len(missingRowIDs))
|
|
828
860
|
missingRowIDs=list(missingRowIDs)
|
|
829
|
-
|
|
830
861
|
if len(missingRowIDs) > 0:
|
|
831
862
|
dataFromDb=fetchData(workspaceID,activePlayground,missingRowIDs)
|
|
832
863
|
rawResults.extend(dataFromDb)
|
|
@@ -866,11 +897,18 @@ class LlumoClient:
|
|
|
866
897
|
promptText=prompt_template,
|
|
867
898
|
definationMapping=self.definationMapping,
|
|
868
899
|
outputColName=outputColName,
|
|
869
|
-
activePlayground= activePlayground
|
|
900
|
+
activePlayground= activePlayground,
|
|
901
|
+
customAnalytics=customAnalytics
|
|
870
902
|
):
|
|
871
903
|
print(
|
|
872
904
|
"LLUMO’s intuitive UI is ready—start exploring and experimenting with your logs now. Visit https://app.llumo.ai/evallm to see the results."
|
|
873
905
|
)
|
|
906
|
+
if getDataFrame:
|
|
907
|
+
return LlumoDataFrameResults(dataframe,evals=self.evals,evalData=self.evalData,definationMapping=self.definationMapping)
|
|
908
|
+
else:
|
|
909
|
+
data=dataframe.to_dict(orient="records")
|
|
910
|
+
return LlumoDictResults(data,evals=self.evals,evalData=self.evalData,definationMapping=self.definationMapping)
|
|
911
|
+
|
|
874
912
|
else:
|
|
875
913
|
if getDataFrame:
|
|
876
914
|
return LlumoDataFrameResults(dataframe,evals=self.evals,evalData=self.evalData,definationMapping=self.definationMapping)
|
|
@@ -935,6 +973,7 @@ class LlumoClient:
|
|
|
935
973
|
):
|
|
936
974
|
print(
|
|
937
975
|
"LLUMO’s intuitive UI is ready—start exploring and experimenting with your logs now. Visit https://app.llumo.ai/evallm to see the results.")
|
|
976
|
+
|
|
938
977
|
else:
|
|
939
978
|
if getDataFrame == True and toEvaluate == True:
|
|
940
979
|
return LlumoDataFrameResults(working_df, evals=self.evals, evalData=self.evalData,
|
|
@@ -1231,6 +1270,20 @@ class LlumoClient:
|
|
|
1231
1270
|
):
|
|
1232
1271
|
print(
|
|
1233
1272
|
"LLUMO’s intuitive UI is ready—start exploring and experimenting with your logs now. Visit https://app.llumo.ai/evallm to see the results.")
|
|
1273
|
+
if getDataFrame == True and toEvaluate == True:
|
|
1274
|
+
return LlumoDataFrameResults(working_df, evals=self.evals, evalData=self.evalData,
|
|
1275
|
+
definationMapping=self.definationMapping)
|
|
1276
|
+
|
|
1277
|
+
elif getDataFrame == False and toEvaluate == True:
|
|
1278
|
+
data = working_df.to_dict(orient="records")
|
|
1279
|
+
return LlumoDictResults(data, evals=self.evals, evalData=self.evalData,
|
|
1280
|
+
definationMapping=self.definationMapping)
|
|
1281
|
+
|
|
1282
|
+
elif getDataFrame== True and toEvaluate == False:
|
|
1283
|
+
return working_df
|
|
1284
|
+
|
|
1285
|
+
elif getDataFrame == False and toEvaluate == False :
|
|
1286
|
+
return working_df.to_dict(orient = "records")
|
|
1234
1287
|
else:
|
|
1235
1288
|
if getDataFrame == True and toEvaluate == True:
|
|
1236
1289
|
return LlumoDataFrameResults(working_df, evals=self.evals, evalData=self.evalData,
|
|
@@ -1468,6 +1521,12 @@ class LlumoClient:
|
|
|
1468
1521
|
):
|
|
1469
1522
|
print(
|
|
1470
1523
|
"LLUMO’s intuitive UI is ready—start exploring and experimenting with your logs now. Visit https://app.llumo.ai/evallm to see the results.")
|
|
1524
|
+
if getDataFrame:
|
|
1525
|
+
return working_df
|
|
1526
|
+
|
|
1527
|
+
else:
|
|
1528
|
+
data = working_df.to_dict(orient="records")
|
|
1529
|
+
return data
|
|
1471
1530
|
else:
|
|
1472
1531
|
if getDataFrame:
|
|
1473
1532
|
return working_df
|
llumo/exceptions.py
CHANGED
|
@@ -22,8 +22,8 @@ class LlumoAIError(Exception):
|
|
|
22
22
|
return LlumoAIError("The API response is not in valid JSON format")
|
|
23
23
|
|
|
24
24
|
@staticmethod
|
|
25
|
-
def UnexpectedError(detail="
|
|
26
|
-
return LlumoAIError(f"
|
|
25
|
+
def UnexpectedError(detail="Metric"):
|
|
26
|
+
return LlumoAIError(f"Can you please check if {detail} is written correctly. If you want to run {detail} please create a custom eval with same name of app.llumo.ai/evallm ")
|
|
27
27
|
|
|
28
28
|
@staticmethod
|
|
29
29
|
def EvalError(detail="Some error occured while processing"):
|
llumo/helpingFuntions.py
CHANGED
|
@@ -26,6 +26,7 @@ uploadRowList = (
|
|
|
26
26
|
)
|
|
27
27
|
createInsightUrl="https://app.llumo.ai/api/external/generate-insight-from-eval-for-sdk"
|
|
28
28
|
|
|
29
|
+
getCustomAnalyticsUrl="https://app.llumo.ai/api/workspace/get-all-analytics"
|
|
29
30
|
|
|
30
31
|
def getProcessID():
|
|
31
32
|
return f"{int(time.time() * 1000)}{uuid.uuid4()}"
|
|
@@ -217,7 +218,7 @@ def deleteColumnListInPlayground(workspaceID: str, playgroundID: str):
|
|
|
217
218
|
return None
|
|
218
219
|
|
|
219
220
|
def createColumn(workspaceID, dataframe, playgroundID, promptText=None,queryColName=None,
|
|
220
|
-
outputColName= "output",dataStreamName=None,definationMapping=None,evalOutputMap = None):
|
|
221
|
+
outputColName= "output",dataStreamName=None,definationMapping=None,evalOutputMap = None,customAnalytics=[]):
|
|
221
222
|
if len(dataframe) > 100:
|
|
222
223
|
dataframe = dataframe.head(100)
|
|
223
224
|
print("⚠️ Dataframe truncated to 100 rows for upload.")
|
|
@@ -227,10 +228,13 @@ def createColumn(workspaceID, dataframe, playgroundID, promptText=None,queryColN
|
|
|
227
228
|
"playgroundID": playgroundID,
|
|
228
229
|
"columnListToUpload": [],
|
|
229
230
|
}
|
|
230
|
-
allEvals = ['Response Completeness', 'Response Bias', 'Response Harmfulness', 'Input Toxicity', 'Input Harmfulness', 'Context Utilization', 'Relevance Retention', 'Semantic Cohesion', 'Final Task Alignment', 'Tool Reliability', 'Response Correctness', 'Response Toxicity', 'Input Bias', 'Input Relevancy', 'Redundancy Reduction', 'Response Sentiment', 'Tool Selection Accuracy', 'Stepwise Progression', 'Hallucination', 'Faithfulness', 'Answer Relevancy', 'Context Precision', 'Answer Similarity', 'Harmfulness', 'Maliciousness', 'Coherence', 'Answer Correctness', 'Context Recall', 'Context Entity Recall', 'Conciseness', 'customEvalColumn', 'Groundedness', 'Memory Utilization', 'Input Relevancy (Multi-turn)']
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
231
|
+
allEvals = ['Response Completeness', 'Response Bias', 'Response Harmfulness', 'Input Toxicity', 'Input Harmfulness', 'Context Utilization', 'Relevance Retention', 'Semantic Cohesion', 'Final Task Alignment', 'Tool Reliability', 'Response Correctness', 'Response Toxicity', 'Input Bias', 'Input Relevancy', 'Redundancy Reduction', 'Response Sentiment', 'Tool Selection Accuracy', 'Stepwise Progression', 'Hallucination', 'Faithfulness', 'Answer Relevancy', 'Context Precision', 'Answer Similarity', 'Harmfulness', 'Maliciousness', 'Coherence', 'Answer Correctness', 'Context Recall', 'Context Entity Recall', 'Conciseness', 'customEvalColumn', 'Groundedness', 'Memory Utilization', 'Input Relevancy (Multi-turn)','PII Check','Prompt Injection']
|
|
232
|
+
try:
|
|
233
|
+
allEvals.extend(list(customAnalytics.keys()))
|
|
234
|
+
except Exception as e:
|
|
235
|
+
pass
|
|
236
|
+
evalDependencies = checkDependency(_returnDepMapping=True,customevals=customAnalytics)
|
|
237
|
+
print(allEvals)
|
|
234
238
|
# Create a mapping of column names to unique column IDs
|
|
235
239
|
columnIDMapping = {}
|
|
236
240
|
|
|
@@ -296,7 +300,7 @@ def createColumn(workspaceID, dataframe, playgroundID, promptText=None,queryColN
|
|
|
296
300
|
"order": indx}
|
|
297
301
|
|
|
298
302
|
|
|
299
|
-
elif any(col.startswith(eval + "_") or col == eval for eval in allEvals) and not " Reason" in col and promptText is not None:
|
|
303
|
+
elif any(col.startswith(eval + "_") or col == eval for eval in allEvals) and not " Reason" in col and promptText is not None :
|
|
300
304
|
if evalOutputMap != None:
|
|
301
305
|
outputColName = evalOutputMap[col]
|
|
302
306
|
else:
|
|
@@ -455,14 +459,14 @@ def uploadRowsInDBPlayground(payload):
|
|
|
455
459
|
return None
|
|
456
460
|
|
|
457
461
|
|
|
458
|
-
def createPlayground(email, workspaceID, df, promptText=None,queryColName=None,dataStreamName=None,definationMapping=None,outputColName="output",evalOutputMap = None,activePlayground=None):
|
|
462
|
+
def createPlayground(email, workspaceID, df, promptText=None,queryColName=None,dataStreamName=None,definationMapping=None,outputColName="output",evalOutputMap = None,activePlayground=None,customAnalytics=[]):
|
|
459
463
|
|
|
460
464
|
if activePlayground != None:
|
|
461
465
|
playgroundId=activePlayground
|
|
462
466
|
else:
|
|
463
467
|
playgroundId = str(createEvalPlayground(email=email, workspaceID=workspaceID))
|
|
464
468
|
payload1, payload2 = createColumn(
|
|
465
|
-
workspaceID=workspaceID, dataframe=df, playgroundID=playgroundId, promptText=promptText,queryColName=queryColName,dataStreamName=dataStreamName,definationMapping=definationMapping,outputColName=outputColName,evalOutputMap=evalOutputMap
|
|
469
|
+
workspaceID=workspaceID, dataframe=df, playgroundID=playgroundId, promptText=promptText,queryColName=queryColName,dataStreamName=dataStreamName,definationMapping=definationMapping,outputColName=outputColName,evalOutputMap=evalOutputMap,customAnalytics=customAnalytics
|
|
466
470
|
)
|
|
467
471
|
|
|
468
472
|
# Debugging line to check the payload2 structure
|
|
@@ -515,7 +519,7 @@ def getPlaygroundInsights(defination:str,uniqueClassesString: str, reasonList: l
|
|
|
515
519
|
else:
|
|
516
520
|
print(f"Error generating insight: {responseGenerate.status_code} - {responseGenerate.text}")
|
|
517
521
|
return None
|
|
518
|
-
def checkDependency(selectedEval:list = [], columns:list = [],tocheck=True,_returnDepMapping = False):
|
|
522
|
+
def checkDependency(selectedEval:list = [], columns:list = [],tocheck=True,_returnDepMapping = False,customevals={}):
|
|
519
523
|
"""
|
|
520
524
|
Checks if all the required input columns for the selected evaluation metric are present.
|
|
521
525
|
|
|
@@ -527,6 +531,7 @@ def checkDependency(selectedEval:list = [], columns:list = [],tocheck=True,_retu
|
|
|
527
531
|
- LlumoAIError.dependencyError: If any required column is missing.
|
|
528
532
|
"""
|
|
529
533
|
# Define required dependencies for each evaluation metric
|
|
534
|
+
|
|
530
535
|
metricDependencies = {
|
|
531
536
|
'Response Completeness': ['context', 'query', 'output'],
|
|
532
537
|
'Response Bias': ['output'],
|
|
@@ -537,20 +542,24 @@ def checkDependency(selectedEval:list = [], columns:list = [],tocheck=True,_retu
|
|
|
537
542
|
'Relevance Retention': ['context', 'query'],
|
|
538
543
|
'Semantic Cohesion': ['context'],
|
|
539
544
|
'Final Task Alignment': ['query','output'],
|
|
540
|
-
'Tool Reliability': ['
|
|
545
|
+
'Tool Reliability': ['intermediateSteps'],
|
|
541
546
|
'Response Correctness': ['output', 'query', 'context'],
|
|
542
547
|
'Response Toxicity': ['output'],
|
|
543
548
|
'Input Bias': ['query'],
|
|
544
549
|
'Input Relevancy': ['context', 'query'],
|
|
545
550
|
'Redundancy Reduction': ['context'],
|
|
546
551
|
'Response Sentiment': ['output'],
|
|
547
|
-
'Tool Selection Accuracy': ['tools', '
|
|
548
|
-
'Stepwise Progression': ['tools', '
|
|
552
|
+
'Tool Selection Accuracy': ['tools', 'intermediateSteps'],
|
|
553
|
+
'Stepwise Progression': ['tools', 'intermediateSteps'],
|
|
549
554
|
'Hallucination': ['query', 'context', 'output'],
|
|
550
555
|
'Groundedness': ['groundTruth', 'output'],
|
|
551
556
|
'Memory Utilization': ['context', 'messageHistory'],
|
|
552
|
-
'Input Relevancy (Multi-turn)': ['context', 'query']
|
|
557
|
+
'Input Relevancy (Multi-turn)': ['context', 'query'],
|
|
558
|
+
'PII Check':["query","output"],
|
|
559
|
+
'Prompt Injection':["query"]
|
|
553
560
|
}
|
|
561
|
+
|
|
562
|
+
metricDependencies.update(customevals)
|
|
554
563
|
if _returnDepMapping == True:
|
|
555
564
|
return metricDependencies
|
|
556
565
|
|
|
@@ -650,6 +659,7 @@ def validateGoogleKey(api_key):
|
|
|
650
659
|
|
|
651
660
|
def groupLogsByClass(logs, max_logs=2):
|
|
652
661
|
# Initialize the final result structures (no defaultdict)
|
|
662
|
+
|
|
653
663
|
groupedLogs = {}
|
|
654
664
|
uniqueEdgeCases = {} # This will store unique edge cases for each eval_name
|
|
655
665
|
|
|
@@ -683,3 +693,46 @@ def groupLogsByClass(logs, max_logs=2):
|
|
|
683
693
|
uniqueEdgeCases[eval_name] = list(uniqueEdgeCases[eval_name])
|
|
684
694
|
|
|
685
695
|
return groupedLogs, uniqueEdgeCases
|
|
696
|
+
|
|
697
|
+
|
|
698
|
+
def getCustomAnalytics(workspaceID):
|
|
699
|
+
try:
|
|
700
|
+
url = getCustomAnalyticsUrl
|
|
701
|
+
payload = {
|
|
702
|
+
"workspaceID": workspaceID
|
|
703
|
+
}
|
|
704
|
+
|
|
705
|
+
headers = {
|
|
706
|
+
"Content-Type": "application/json"
|
|
707
|
+
}
|
|
708
|
+
|
|
709
|
+
response = requests.post(url, json=payload, headers=headers)
|
|
710
|
+
data=response.json()
|
|
711
|
+
customData=data.get("data","").get("analyticsCustom","")
|
|
712
|
+
customMapping = {
|
|
713
|
+
"QUERY": "query",
|
|
714
|
+
"CONTEXT": "context",
|
|
715
|
+
"OUTPUT": "output",
|
|
716
|
+
"MESSAGEHISTORY": "messageHistory",
|
|
717
|
+
"TOOLS": "tools",
|
|
718
|
+
"INTERMEDIATESTEPS": "intermediateSteps",
|
|
719
|
+
"GROUNDTRUTH": "groundTruth",
|
|
720
|
+
}
|
|
721
|
+
|
|
722
|
+
metricDependencies = {}
|
|
723
|
+
|
|
724
|
+
|
|
725
|
+
|
|
726
|
+
for eval in customData:
|
|
727
|
+
evalName = eval.get("analyticsName")
|
|
728
|
+
evalDependencyRaw = list(eval.get("variableMappings").values())
|
|
729
|
+
|
|
730
|
+
# Replace each value using the custom mapping
|
|
731
|
+
evalDependency = [customMapping.get(val.upper(), val.lower()) for val in evalDependencyRaw]
|
|
732
|
+
|
|
733
|
+
# Build the dict
|
|
734
|
+
metricDependencies[evalName] = evalDependency
|
|
735
|
+
return metricDependencies
|
|
736
|
+
|
|
737
|
+
except Exception as e:
|
|
738
|
+
return {}
|
|
@@ -1,16 +1,16 @@
|
|
|
1
1
|
llumo/__init__.py,sha256=YVBkF1fiXFBd_zzySi9BDWgX8MJuLBJ-oF8538MrnDU,256
|
|
2
2
|
llumo/chains.py,sha256=6lCgLseh04RUgc6SahhmvQj82quay2Mi1j8gPUlx8Es,2923
|
|
3
|
-
llumo/client.py,sha256=
|
|
4
|
-
llumo/exceptions.py,sha256=
|
|
3
|
+
llumo/client.py,sha256=pCocD7v5dDuHKlWE6OqZrrEk4e_LWYuI_LgWaGT3E-g,67498
|
|
4
|
+
llumo/exceptions.py,sha256=1OyhN9YL9LcyUPUsqYHq6Rret0udATZAwMVJaio2_Ec,2123
|
|
5
5
|
llumo/execution.py,sha256=nWbJ7AvWuUPcOb6i-JzKRna_PvF-ewZTiK8skS-5n3w,1380
|
|
6
6
|
llumo/functionCalling.py,sha256=D5jYapu1rIvdIJNUYPYMTyhQ1H-6nkwoOLMi6eekfUE,7241
|
|
7
7
|
llumo/google.py,sha256=3S_aRtbtlctCXPGR0u4baLlkyFrsjd02vlUCkoRPA5U,2147
|
|
8
|
-
llumo/helpingFuntions.py,sha256=
|
|
8
|
+
llumo/helpingFuntions.py,sha256=B6FwUQ5f1v4FKrWCbYoGWMFdscOV_liuuhTgNQ3cdrk,27275
|
|
9
9
|
llumo/models.py,sha256=aVEZsOOoQx5LeNtwSyBxqvrINq0izH3QWu_YjsMPE6o,2910
|
|
10
10
|
llumo/openai.py,sha256=DGhEwQIJIIycGpw3hYQnyxdj6RFVpZ-gay-fZGqtkhU,3013
|
|
11
11
|
llumo/sockets.py,sha256=pBDo-U65hMIMwKMwZQl3iBkEjISEt-9BkXxZTWfSHF4,6116
|
|
12
|
-
llumo-0.2.
|
|
13
|
-
llumo-0.2.
|
|
14
|
-
llumo-0.2.
|
|
15
|
-
llumo-0.2.
|
|
16
|
-
llumo-0.2.
|
|
12
|
+
llumo-0.2.24.dist-info/licenses/LICENSE,sha256=tF9yAcfPV9xGT3ViWmC8hPvOo8BEk4ZICbUfcEo8Dlk,182
|
|
13
|
+
llumo-0.2.24.dist-info/METADATA,sha256=IfYBa5UYiXuF595u13Qyi5MoQorHzxpfpUSPmciS7rM,1558
|
|
14
|
+
llumo-0.2.24.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
15
|
+
llumo-0.2.24.dist-info/top_level.txt,sha256=d5zUTMI99llPtLRB8rtSrqELm_bOqX-bNC5IcwlDk88,6
|
|
16
|
+
llumo-0.2.24.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|