llumo 0.2.14b6__py3-none-any.whl → 0.2.15__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llumo/client.py +398 -173
- llumo/exceptions.py +4 -0
- llumo/execution.py +4 -5
- llumo/helpingFuntions.py +116 -29
- llumo/models.py +63 -26
- llumo/sockets.py +3 -3
- {llumo-0.2.14b6.dist-info → llumo-0.2.15.dist-info}/METADATA +1 -1
- llumo-0.2.15.dist-info/RECORD +13 -0
- llumo-0.2.14b6.dist-info/RECORD +0 -13
- {llumo-0.2.14b6.dist-info → llumo-0.2.15.dist-info}/WHEEL +0 -0
- {llumo-0.2.14b6.dist-info → llumo-0.2.15.dist-info}/licenses/LICENSE +0 -0
- {llumo-0.2.14b6.dist-info → llumo-0.2.15.dist-info}/top_level.txt +0 -0
llumo/client.py
CHANGED
@@ -5,12 +5,12 @@ import time
|
|
5
5
|
import re
|
6
6
|
import json
|
7
7
|
import uuid
|
8
|
-
|
8
|
+
import warnings
|
9
9
|
import os
|
10
10
|
import itertools
|
11
11
|
import pandas as pd
|
12
12
|
from typing import List, Dict
|
13
|
-
from .models import AVAILABLEMODELS, getProviderFromModel
|
13
|
+
from .models import AVAILABLEMODELS, getProviderFromModel, Provider
|
14
14
|
from .execution import ModelExecutor
|
15
15
|
from .exceptions import LlumoAIError
|
16
16
|
from .helpingFuntions import *
|
@@ -19,12 +19,19 @@ from .functionCalling import LlumoAgentExecutor
|
|
19
19
|
import threading
|
20
20
|
from tqdm import tqdm
|
21
21
|
|
22
|
+
pd.set_option('future.no_silent_downcasting', True)
|
23
|
+
|
22
24
|
postUrl = (
|
23
25
|
"https://red-skull-service-392377961931.us-central1.run.app/api/process-playground"
|
24
26
|
)
|
25
27
|
fetchUrl = (
|
26
28
|
"https://red-skull-service-392377961931.us-central1.run.app/api/get-cells-data"
|
27
29
|
)
|
30
|
+
socketDataUrl = "https://app.llumo.ai/api/eval/get-awaited"
|
31
|
+
# {
|
32
|
+
# "workspaceID":"c9191fdf33bdd7838328c1a0",
|
33
|
+
# "playgroundID":"17496117244856b7815ac94004347b1c2e2f7e01600ec"
|
34
|
+
# }
|
28
35
|
validateUrl = "https://app.llumo.ai/api/workspace-details"
|
29
36
|
socketUrl = "https://red-skull-service-392377961931.us-central1.run.app/"
|
30
37
|
|
@@ -33,7 +40,7 @@ class LlumoClient:
|
|
33
40
|
|
34
41
|
def __init__(self, api_key):
|
35
42
|
self.apiKey = api_key
|
36
|
-
|
43
|
+
|
37
44
|
self.processMapping = {}
|
38
45
|
self.definationMapping = {}
|
39
46
|
|
@@ -45,6 +52,7 @@ class LlumoClient:
|
|
45
52
|
reqBody = {"analytics": [evalName]}
|
46
53
|
|
47
54
|
try:
|
55
|
+
|
48
56
|
response = requests.post(url=validateUrl, json=reqBody, headers=headers)
|
49
57
|
|
50
58
|
except requests.exceptions.RequestException as e:
|
@@ -89,9 +97,8 @@ class LlumoClient:
|
|
89
97
|
)
|
90
98
|
self.email = data["data"]["data"].get("email", None)
|
91
99
|
|
92
|
-
self.definationMapping[evalName] = data
|
93
|
-
|
94
|
-
]
|
100
|
+
self.definationMapping[evalName] = data.get("data", {}).get("data", {}).get("analyticsMapping", {}).get(evalName, None)
|
101
|
+
|
95
102
|
except Exception as e:
|
96
103
|
# print(f"Error extracting data from response: {str(e)}")
|
97
104
|
raise LlumoAIError.UnexpectedError(detail=str(e))
|
@@ -577,7 +584,8 @@ class LlumoClient:
|
|
577
584
|
createExperiment: bool = False,
|
578
585
|
_tocheck=True,
|
579
586
|
):
|
580
|
-
|
587
|
+
self.socket = LlumoSocketClient(socketUrl)
|
588
|
+
dataframe = pd.DataFrame(data).astype(str)
|
581
589
|
workspaceID = None
|
582
590
|
email = None
|
583
591
|
socketID = self.socket.connect(timeout=250)
|
@@ -602,13 +610,16 @@ class LlumoClient:
|
|
602
610
|
kwargs={
|
603
611
|
"min_wait": 40,
|
604
612
|
"max_wait": timeout,
|
605
|
-
"inactivity_timeout":
|
613
|
+
"inactivity_timeout": 10,
|
606
614
|
"expected_results": expectedResults,
|
607
615
|
},
|
608
616
|
daemon=True,
|
609
617
|
)
|
610
618
|
listener_thread.start()
|
611
|
-
|
619
|
+
|
620
|
+
activePlayground = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace(
|
621
|
+
"-", ""
|
622
|
+
)
|
612
623
|
for evalName in evals:
|
613
624
|
# print(f"\n======= Running evaluation for: {evalName} =======")
|
614
625
|
|
@@ -656,9 +667,7 @@ class LlumoClient:
|
|
656
667
|
inputDict = {key: row[key] for key in keys if key in row}
|
657
668
|
output = row.get(outputColName, "")
|
658
669
|
|
659
|
-
|
660
|
-
"-", ""
|
661
|
-
)
|
670
|
+
|
662
671
|
rowID = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace("-", "")
|
663
672
|
columnID = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace("-", "")
|
664
673
|
|
@@ -751,15 +760,24 @@ class LlumoClient:
|
|
751
760
|
time.sleep(3)
|
752
761
|
listener_thread.join()
|
753
762
|
|
754
|
-
|
755
|
-
|
763
|
+
|
764
|
+
rawResults = self.socket.getReceivedData()
|
765
|
+
|
766
|
+
# print("data from db #####################",dataFromDb)
|
756
767
|
# Fix here: keep full keys, do not split keys
|
757
|
-
|
758
|
-
|
759
|
-
|
768
|
+
receivedRowIDs = {key for item in rawResults for key in item.keys()}
|
769
|
+
expectedRowIDs = set(rowIdMapping.keys())
|
770
|
+
missingRowIDs = expectedRowIDs - receivedRowIDs
|
760
771
|
# print("All expected keys:", expected_rowIDs)
|
761
772
|
# print("All received keys:", received_rowIDs)
|
762
|
-
print("Missing keys:", len(
|
773
|
+
# print("Missing keys:", len(missingRowIDs))
|
774
|
+
missingRowIDs=list(missingRowIDs)
|
775
|
+
|
776
|
+
if len(missingRowIDs) > 0:
|
777
|
+
dataFromDb=fetchData(workspaceID,activePlayground,missingRowIDs)
|
778
|
+
rawResults.extend(dataFromDb)
|
779
|
+
|
780
|
+
|
763
781
|
|
764
782
|
# Initialize dataframe columns for each eval
|
765
783
|
for eval in evals:
|
@@ -767,7 +785,7 @@ class LlumoClient:
|
|
767
785
|
dataframe[f"{eval} Reason"] = None
|
768
786
|
|
769
787
|
# Map results to dataframe rows
|
770
|
-
for item in
|
788
|
+
for item in rawResults:
|
771
789
|
for compound_key, value in item.items():
|
772
790
|
if compound_key in rowIdMapping:
|
773
791
|
index = rowIdMapping[compound_key]["index"]
|
@@ -776,10 +794,16 @@ class LlumoClient:
|
|
776
794
|
dataframe.at[index, f"{evalName} Reason"] = value.get("reasoning")
|
777
795
|
|
778
796
|
self.socket.disconnect()
|
797
|
+
|
779
798
|
|
780
799
|
if createExperiment:
|
781
800
|
pd.set_option("future.no_silent_downcasting", True)
|
782
|
-
df = dataframe.fillna("Some error occured").astype(object)
|
801
|
+
# df = dataframe.fillna("Some error occured").astype(object)
|
802
|
+
with warnings.catch_warnings():
|
803
|
+
warnings.simplefilter(action='ignore', category=FutureWarning)
|
804
|
+
df = dataframe.fillna("Some error occurred").astype(str)
|
805
|
+
|
806
|
+
df = dataframe.fillna("Some error occured").infer_objects(copy=False)
|
783
807
|
if createPlayground(
|
784
808
|
email,
|
785
809
|
workspaceID,
|
@@ -794,7 +818,7 @@ class LlumoClient:
|
|
794
818
|
else:
|
795
819
|
return dataframe
|
796
820
|
|
797
|
-
def
|
821
|
+
def promptSweep(
|
798
822
|
self,
|
799
823
|
templates: List[str],
|
800
824
|
dataset: Dict[str, List[str]],
|
@@ -803,9 +827,15 @@ class LlumoClient:
|
|
803
827
|
evals=["Response Correctness"],
|
804
828
|
toEvaluate: bool = False,
|
805
829
|
createExperiment: bool = False,
|
830
|
+
|
831
|
+
|
806
832
|
) -> pd.DataFrame:
|
807
833
|
|
808
|
-
|
834
|
+
modelStatus = validateModels(model_aliases=model_aliases)
|
835
|
+
if modelStatus["status"]== False:
|
836
|
+
raise LlumoAIError.providerError(modelStatus["message"])
|
837
|
+
|
838
|
+
self.validateApiKey()
|
809
839
|
workspaceID = self.workspaceID
|
810
840
|
email = self.email
|
811
841
|
executor = ModelExecutor(apiKey)
|
@@ -910,6 +940,7 @@ class LlumoClient:
|
|
910
940
|
evals=["Final Task Alignment"],
|
911
941
|
prompt_template="Give answer for the given query: {{query}}",
|
912
942
|
createExperiment: bool = False,
|
943
|
+
|
913
944
|
):
|
914
945
|
if model.lower() not in ["openai", "google"]:
|
915
946
|
raise ValueError("Model must be 'openai' or 'google'")
|
@@ -940,7 +971,7 @@ class LlumoClient:
|
|
940
971
|
if createExperiment:
|
941
972
|
pd.set_option("future.no_silent_downcasting", True)
|
942
973
|
df = toolResponseDf.fillna("Some error occured")
|
943
|
-
if createPlayground(self.email, self.workspaceID, df):
|
974
|
+
if createPlayground(self.email, self.workspaceID, df,promptText=prompt_template,definationMapping=self.definationMapping):
|
944
975
|
print(
|
945
976
|
"Your data has been saved in the Llumo Experiment. Visit https://app.llumo.ai/evallm to see the results."
|
946
977
|
)
|
@@ -971,183 +1002,380 @@ class LlumoClient:
|
|
971
1002
|
# )
|
972
1003
|
toolResponseDf = self.evaluateMultiple(
|
973
1004
|
toolResponseDf.to_dict(orient="records"),
|
974
|
-
|
1005
|
+
evals=evals,
|
975
1006
|
prompt_template="Give answer for the given query: {{query}}",
|
976
1007
|
outputColName=outputColName,
|
1008
|
+
createExperiment=createExperiment
|
977
1009
|
)
|
978
|
-
|
1010
|
+
if createExperiment:
|
1011
|
+
pass
|
1012
|
+
else:
|
1013
|
+
return toolResponseDf
|
979
1014
|
|
980
1015
|
except Exception as e:
|
981
1016
|
raise e
|
982
1017
|
|
983
|
-
def
|
984
|
-
|
985
|
-
|
986
|
-
|
987
|
-
|
988
|
-
|
1018
|
+
def ragSweep(
|
1019
|
+
self,
|
1020
|
+
data,
|
1021
|
+
streamName: str,
|
1022
|
+
queryColName: str = "query",
|
1023
|
+
createExperiment: bool = False,
|
1024
|
+
modelAliases=[],
|
1025
|
+
apiKey="",
|
1026
|
+
prompt_template="Give answer to the given: {{query}} using the context:{{context}}",
|
1027
|
+
evals=["Context Utilization"],
|
1028
|
+
toEvaluate=False,
|
1029
|
+
generateOutput=True
|
989
1030
|
):
|
990
|
-
|
991
|
-
|
992
|
-
|
993
|
-
|
994
|
-
|
995
|
-
|
996
|
-
|
997
|
-
|
998
|
-
|
999
|
-
|
1000
|
-
|
1001
|
-
|
1002
|
-
|
1003
|
-
)
|
1004
|
-
# print(f"Connected with socket ID: {socketID}")
|
1005
|
-
rowIdMapping = {}
|
1031
|
+
# Validate required parameters
|
1032
|
+
if generateOutput:
|
1033
|
+
if not modelAliases:
|
1034
|
+
raise ValueError("Model aliases must be provided when generateOutput is True.")
|
1035
|
+
if not apiKey or not isinstance(apiKey, str) or apiKey.strip() == "":
|
1036
|
+
raise ValueError("Valid API key must be provided when generateOutput is True.")
|
1037
|
+
|
1038
|
+
modelStatus = validateModels(model_aliases=modelAliases)
|
1039
|
+
if modelStatus["status"]== False:
|
1040
|
+
if len(modelAliases) == 0:
|
1041
|
+
raise LlumoAIError.providerError("No model selected.")
|
1042
|
+
else:
|
1043
|
+
raise LlumoAIError.providerError(modelStatus["message"])
|
1006
1044
|
|
1007
|
-
|
1008
|
-
|
1009
|
-
|
1045
|
+
# Copy the original dataframe
|
1046
|
+
original_df = pd.DataFrame(data)
|
1047
|
+
working_df = original_df.copy()
|
1010
1048
|
|
1011
|
-
|
1012
|
-
|
1013
|
-
|
1014
|
-
|
1015
|
-
|
1016
|
-
|
1017
|
-
|
1018
|
-
|
1019
|
-
|
1049
|
+
# Connect to socket
|
1050
|
+
self.socket = LlumoSocketClient(socketUrl)
|
1051
|
+
socketID = self.socket.connect(timeout=150)
|
1052
|
+
waited_secs = 0
|
1053
|
+
while not self.socket._connection_established.is_set():
|
1054
|
+
time.sleep(0.1)
|
1055
|
+
waited_secs += 0.1
|
1056
|
+
if waited_secs >= 20:
|
1057
|
+
raise RuntimeError("Timeout waiting for server 'connection-established' event.")
|
1020
1058
|
|
1021
|
-
|
1022
|
-
if not userHits["success"]:
|
1023
|
-
raise LlumoAIError.InsufficientCredits(userHits["message"])
|
1059
|
+
self.validateApiKey()
|
1024
1060
|
|
1025
|
-
|
1026
|
-
|
1027
|
-
|
1028
|
-
|
1029
|
-
|
1030
|
-
|
1031
|
-
|
1032
|
-
currentBatch = []
|
1061
|
+
# Check user credits
|
1062
|
+
userHits = checkUserHits(
|
1063
|
+
self.workspaceID, self.hasSubscribed, self.trialEndDate,
|
1064
|
+
self.subscriptionEndDate, self.hitsAvailable, len(working_df)
|
1065
|
+
)
|
1066
|
+
if not userHits["success"]:
|
1067
|
+
raise LlumoAIError.InsufficientCredits(userHits["message"])
|
1033
1068
|
|
1034
|
-
|
1035
|
-
|
1036
|
-
|
1037
|
-
|
1038
|
-
rowID = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace("-", "")
|
1039
|
-
columnID = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace("-", "")
|
1069
|
+
print("====🚀Sit back while we fetch data from the stream 🚀====")
|
1070
|
+
workspaceID, email = self.workspaceID, self.email
|
1071
|
+
activePlayground = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace("-", "")
|
1072
|
+
streamId = getStreamId(workspaceID, self.apiKey, streamName)
|
1040
1073
|
|
1041
|
-
|
1042
|
-
|
1043
|
-
|
1044
|
-
|
1045
|
-
"socketID": socketID,
|
1046
|
-
"processData": {
|
1047
|
-
"executionDependency": {"query": row[queryColName]},
|
1048
|
-
"dataStreamID": streamId,
|
1049
|
-
},
|
1050
|
-
"workspaceID": workspaceID,
|
1051
|
-
"email": email,
|
1052
|
-
"type": "DATA_STREAM",
|
1053
|
-
"playgroundID": activePlayground,
|
1054
|
-
"processType": "DATA_STREAM",
|
1055
|
-
"rowID": rowID,
|
1056
|
-
"columnID": columnID,
|
1057
|
-
"source": "SDK",
|
1058
|
-
}
|
1074
|
+
# Prepare batches
|
1075
|
+
rowIdMapping = {}
|
1076
|
+
self.allBatches = []
|
1077
|
+
currentBatch = []
|
1059
1078
|
|
1060
|
-
|
1079
|
+
expectedResults = len(working_df)
|
1080
|
+
timeout = max(100, min(150, expectedResults * 10))
|
1061
1081
|
|
1062
|
-
|
1082
|
+
listener_thread = threading.Thread(
|
1083
|
+
target=self.socket.listenForResults,
|
1084
|
+
kwargs={
|
1085
|
+
"min_wait": 40,
|
1086
|
+
"max_wait": timeout,
|
1087
|
+
"inactivity_timeout": 10,
|
1088
|
+
"expected_results": expectedResults,
|
1089
|
+
},
|
1090
|
+
daemon=True
|
1091
|
+
)
|
1092
|
+
listener_thread.start()
|
1093
|
+
|
1094
|
+
for index, row in working_df.iterrows():
|
1095
|
+
rowID, columnID = uuid.uuid4().hex, uuid.uuid4().hex
|
1096
|
+
compoundKey = f"{rowID}-{columnID}-{columnID}"
|
1097
|
+
rowIdMapping[compoundKey] = {"index": index}
|
1098
|
+
templateData = {
|
1099
|
+
"processID": getProcessID(),
|
1100
|
+
"socketID": socketID,
|
1101
|
+
"processData": {
|
1102
|
+
"executionDependency": {"query": row[queryColName]},
|
1103
|
+
"dataStreamID": streamId,
|
1104
|
+
},
|
1105
|
+
"workspaceID": workspaceID,
|
1106
|
+
"email": email,
|
1107
|
+
"type": "DATA_STREAM",
|
1108
|
+
"playgroundID": activePlayground,
|
1109
|
+
"processType": "DATA_STREAM",
|
1110
|
+
"rowID": rowID,
|
1111
|
+
"columnID": columnID,
|
1112
|
+
"source": "SDK",
|
1113
|
+
}
|
1114
|
+
currentBatch.append(templateData)
|
1115
|
+
if len(currentBatch) == 10 or index == len(working_df) - 1:
|
1063
1116
|
self.allBatches.append(currentBatch)
|
1064
1117
|
currentBatch = []
|
1065
1118
|
|
1066
|
-
|
1067
|
-
|
1068
|
-
|
1119
|
+
for batch in tqdm(self.allBatches, desc="Processing Batches", unit="batch", colour="magenta", ncols=80):
|
1120
|
+
try:
|
1121
|
+
self.postDataStream(batch=batch, workspaceID=workspaceID)
|
1122
|
+
time.sleep(3)
|
1123
|
+
except Exception as e:
|
1124
|
+
print(f"Error posting batch: {e}")
|
1125
|
+
raise
|
1069
1126
|
|
1070
|
-
|
1071
|
-
|
1127
|
+
time.sleep(3)
|
1128
|
+
listener_thread.join()
|
1129
|
+
|
1130
|
+
rawResults = self.socket.getReceivedData()
|
1131
|
+
expectedRowIDs = set(rowIdMapping.keys())
|
1132
|
+
receivedRowIDs = {key for item in rawResults for key in item.keys()}
|
1133
|
+
missingRowIDs = list(expectedRowIDs - receivedRowIDs)
|
1134
|
+
|
1135
|
+
if missingRowIDs:
|
1136
|
+
dataFromDb = fetchData(workspaceID, activePlayground, missingRowIDs)
|
1137
|
+
rawResults.extend(dataFromDb)
|
1138
|
+
|
1139
|
+
working_df["context"] = None
|
1140
|
+
for item in rawResults:
|
1141
|
+
for compound_key, value in item.items():
|
1142
|
+
if compound_key in rowIdMapping:
|
1143
|
+
idx = rowIdMapping[compound_key]["index"]
|
1144
|
+
working_df.at[idx, "context"] = value.get("value")
|
1145
|
+
|
1146
|
+
# Output generation
|
1147
|
+
if generateOutput == True:
|
1148
|
+
working_df = self._outputForStream(working_df, modelAliases, prompt_template, apiKey)
|
1149
|
+
|
1150
|
+
# Optional evaluation
|
1151
|
+
outputEvalMapping = None
|
1152
|
+
if toEvaluate:
|
1153
|
+
for evalName in evals:
|
1154
|
+
# Validate API and dependencies
|
1155
|
+
self.validateApiKey(evalName=evalName)
|
1156
|
+
metricDependencies = checkDependency(
|
1157
|
+
evalName, list(working_df.columns), tocheck=False
|
1158
|
+
)
|
1159
|
+
if not metricDependencies["status"]:
|
1160
|
+
raise LlumoAIError.dependencyError(metricDependencies["message"])
|
1161
|
+
|
1162
|
+
working_df, outputEvalMapping = self._evaluateForStream(working_df, evals, modelAliases, prompt_template,generateOutput)
|
1163
|
+
|
1164
|
+
|
1165
|
+
self.socket.disconnect()
|
1166
|
+
|
1167
|
+
# Create experiment if required
|
1168
|
+
if createExperiment:
|
1169
|
+
# df = working_df.fillna("Some error occured").astype(object)
|
1170
|
+
with warnings.catch_warnings():
|
1171
|
+
warnings.simplefilter(action='ignore', category=FutureWarning)
|
1172
|
+
df = working_df.fillna("Some error occurred").astype(str)
|
1173
|
+
if createPlayground(
|
1174
|
+
email, workspaceID, df,
|
1175
|
+
queryColName=queryColName,
|
1176
|
+
dataStreamName=streamId,
|
1177
|
+
promptText=prompt_template,
|
1178
|
+
definationMapping=self.definationMapping,
|
1179
|
+
evalOutputMap=outputEvalMapping
|
1180
|
+
):
|
1181
|
+
print(
|
1182
|
+
"Your data has been saved in the Llumo Experiment. Visit https://app.llumo.ai/evallm to see the results.")
|
1183
|
+
else:
|
1184
|
+
self.latestDataframe = working_df
|
1185
|
+
return working_df
|
1186
|
+
|
1187
|
+
def _outputForStream(self, df, modelAliases, prompt_template, apiKey):
|
1188
|
+
executor = ModelExecutor(apiKey)
|
1189
|
+
|
1190
|
+
for indx, row in df.iterrows():
|
1191
|
+
inputVariables = re.findall(r"{{(.*?)}}", prompt_template)
|
1192
|
+
if not all([k in df.columns for k in inputVariables]):
|
1193
|
+
raise LlumoAIError.InvalidPromptTemplate()
|
1194
|
+
|
1195
|
+
inputDict = {key: row[key] for key in inputVariables}
|
1196
|
+
for i, model in enumerate(modelAliases, 1):
|
1072
1197
|
try:
|
1073
|
-
|
1074
|
-
|
1198
|
+
|
1199
|
+
provider = getProviderFromModel(model)
|
1200
|
+
if provider == Provider.OPENAI:
|
1201
|
+
validateOpenaiKey(apiKey)
|
1202
|
+
elif provider == Provider.GOOGLE:
|
1203
|
+
validateGoogleKey(apiKey)
|
1204
|
+
|
1205
|
+
filled_template = getInputPopulatedPrompt(prompt_template, inputDict)
|
1206
|
+
response = executor.execute(provider, model.value, filled_template, apiKey)
|
1207
|
+
df.at[indx, f"output_{i}"] = response
|
1208
|
+
|
1075
1209
|
except Exception as e:
|
1076
|
-
|
1077
|
-
|
1210
|
+
# df.at[indx, f"output_{i}"] = str(e)
|
1211
|
+
raise e
|
1078
1212
|
|
1079
|
-
|
1080
|
-
time.sleep(1)
|
1213
|
+
return df
|
1081
1214
|
|
1082
|
-
|
1083
|
-
|
1084
|
-
# Calculate a reasonable timeout based on the data size
|
1085
|
-
timeout = max(60, min(600, total_items * 10))
|
1086
|
-
# print(f"All batches posted. Waiting up to {timeout} seconds for results...")
|
1215
|
+
def _evaluateForStream(self, df, evals, modelAliases, prompt_template,generateOutput):
|
1216
|
+
dfWithEvals = df.copy()
|
1087
1217
|
|
1088
|
-
|
1089
|
-
|
1090
|
-
|
1091
|
-
|
1092
|
-
|
1093
|
-
|
1094
|
-
|
1218
|
+
outputColMapping = {}
|
1219
|
+
for i, model in enumerate(modelAliases, 1):
|
1220
|
+
if generateOutput:
|
1221
|
+
outputColName = f"output_{i}"
|
1222
|
+
else:
|
1223
|
+
outputColName = "output"
|
1224
|
+
try:
|
1095
1225
|
|
1096
|
-
|
1097
|
-
|
1098
|
-
|
1226
|
+
res = self.evaluateMultiple(
|
1227
|
+
dfWithEvals.to_dict("records"),
|
1228
|
+
evals=evals,
|
1229
|
+
prompt_template=prompt_template,
|
1230
|
+
outputColName=outputColName,
|
1231
|
+
_tocheck=False,
|
1232
|
+
)
|
1233
|
+
for evalMetric in evals:
|
1234
|
+
scoreCol = f"{evalMetric}"
|
1235
|
+
reasonCol = f"{evalMetric} Reason"
|
1099
1236
|
|
1100
|
-
|
1101
|
-
|
1102
|
-
|
1237
|
+
if scoreCol in res.columns:
|
1238
|
+
res = res.rename(columns={scoreCol: f"{scoreCol}_{i}"})
|
1239
|
+
if reasonCol in res.columns:
|
1240
|
+
res = res.rename(columns={reasonCol: f"{evalMetric}_{i} Reason"})
|
1103
1241
|
|
1104
|
-
|
1242
|
+
outputColMapping[f"{scoreCol}_{i}"] = outputColName
|
1105
1243
|
|
1106
|
-
|
1107
|
-
|
1108
|
-
|
1109
|
-
|
1110
|
-
|
1244
|
+
newCols = [col for col in res.columns if col not in dfWithEvals.columns]
|
1245
|
+
dfWithEvals = pd.concat([dfWithEvals, res[newCols]], axis=1)
|
1246
|
+
except Exception as e:
|
1247
|
+
print(f"Evaluation failed for model {model.value}: {str(e)}")
|
1248
|
+
return dfWithEvals, outputColMapping
|
1249
|
+
|
1250
|
+
def runDataStream(
|
1251
|
+
self,
|
1252
|
+
data,
|
1253
|
+
streamName: str,
|
1254
|
+
queryColName: str = "query",
|
1255
|
+
createExperiment: bool = False,
|
1256
|
+
):
|
1257
|
+
|
1258
|
+
|
1259
|
+
# Copy the original dataframe
|
1260
|
+
original_df = pd.DataFrame(data)
|
1261
|
+
working_df = original_df.copy()
|
1262
|
+
|
1263
|
+
# Connect to socket
|
1264
|
+
self.socket = LlumoSocketClient(socketUrl)
|
1265
|
+
socketID = self.socket.connect(timeout=150)
|
1266
|
+
waited_secs = 0
|
1267
|
+
while not self.socket._connection_established.is_set():
|
1268
|
+
time.sleep(0.1)
|
1269
|
+
waited_secs += 0.1
|
1270
|
+
if waited_secs >= 20:
|
1271
|
+
raise RuntimeError("Timeout waiting for server 'connection-established' event.")
|
1272
|
+
|
1273
|
+
self.validateApiKey()
|
1274
|
+
|
1275
|
+
# Check user credits
|
1276
|
+
userHits = checkUserHits(
|
1277
|
+
self.workspaceID, self.hasSubscribed, self.trialEndDate,
|
1278
|
+
self.subscriptionEndDate, self.hitsAvailable, len(working_df)
|
1279
|
+
)
|
1280
|
+
if not userHits["success"]:
|
1281
|
+
raise LlumoAIError.InsufficientCredits(userHits["message"])
|
1282
|
+
|
1283
|
+
print("====🚀Sit back while we fetch data from the stream 🚀====")
|
1284
|
+
workspaceID, email = self.workspaceID, self.email
|
1285
|
+
activePlayground = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace("-", "")
|
1286
|
+
streamId = getStreamId(workspaceID, self.apiKey, streamName)
|
1287
|
+
|
1288
|
+
# Prepare batches
|
1289
|
+
rowIdMapping = {}
|
1290
|
+
self.allBatches = []
|
1291
|
+
currentBatch = []
|
1292
|
+
|
1293
|
+
expectedResults = len(working_df)
|
1294
|
+
timeout = max(100, min(150, expectedResults * 10))
|
1295
|
+
|
1296
|
+
listener_thread = threading.Thread(
|
1297
|
+
target=self.socket.listenForResults,
|
1298
|
+
kwargs={
|
1299
|
+
"min_wait": 40,
|
1300
|
+
"max_wait": timeout,
|
1301
|
+
"inactivity_timeout": 10,
|
1302
|
+
"expected_results": expectedResults,
|
1303
|
+
},
|
1304
|
+
daemon=True
|
1305
|
+
)
|
1306
|
+
listener_thread.start()
|
1307
|
+
|
1308
|
+
for index, row in working_df.iterrows():
|
1309
|
+
rowID, columnID = uuid.uuid4().hex, uuid.uuid4().hex
|
1310
|
+
compoundKey = f"{rowID}-{columnID}-{columnID}"
|
1311
|
+
rowIdMapping[compoundKey] = {"index": index}
|
1312
|
+
templateData = {
|
1313
|
+
"processID": getProcessID(),
|
1314
|
+
"socketID": socketID,
|
1315
|
+
"processData": {
|
1316
|
+
"executionDependency": {"query": row[queryColName]},
|
1317
|
+
"dataStreamID": streamId,
|
1318
|
+
},
|
1319
|
+
"workspaceID": workspaceID,
|
1320
|
+
"email": email,
|
1321
|
+
"type": "DATA_STREAM",
|
1322
|
+
"playgroundID": activePlayground,
|
1323
|
+
"processType": "DATA_STREAM",
|
1324
|
+
"rowID": rowID,
|
1325
|
+
"columnID": columnID,
|
1326
|
+
"source": "SDK",
|
1327
|
+
}
|
1328
|
+
currentBatch.append(templateData)
|
1329
|
+
if len(currentBatch) == 10 or index == len(working_df) - 1:
|
1330
|
+
self.allBatches.append(currentBatch)
|
1331
|
+
currentBatch = []
|
1332
|
+
|
1333
|
+
for batch in tqdm(self.allBatches, desc="Processing Batches", unit="batch", colour="magenta", ncols=80):
|
1111
1334
|
try:
|
1112
|
-
self.
|
1113
|
-
|
1335
|
+
self.postDataStream(batch=batch, workspaceID=workspaceID)
|
1336
|
+
time.sleep(3)
|
1114
1337
|
except Exception as e:
|
1115
|
-
print(f"Error
|
1338
|
+
print(f"Error posting batch: {e}")
|
1339
|
+
raise
|
1116
1340
|
|
1117
|
-
|
1118
|
-
|
1119
|
-
for item in records:
|
1120
|
-
for compound_key, value in item.items():
|
1121
|
-
# for compound_key, value in item['data'].items():
|
1341
|
+
time.sleep(3)
|
1342
|
+
listener_thread.join()
|
1122
1343
|
|
1123
|
-
|
1124
|
-
|
1125
|
-
|
1126
|
-
|
1127
|
-
# dataframe.at[index, evalName] = value
|
1128
|
-
dataframe.at[index, streamName] = value["value"]
|
1344
|
+
rawResults = self.socket.getReceivedData()
|
1345
|
+
expectedRowIDs = set(rowIdMapping.keys())
|
1346
|
+
receivedRowIDs = {key for item in rawResults for key in item.keys()}
|
1347
|
+
missingRowIDs = list(expectedRowIDs - receivedRowIDs)
|
1129
1348
|
|
1130
|
-
|
1131
|
-
|
1132
|
-
|
1349
|
+
if missingRowIDs:
|
1350
|
+
dataFromDb = fetchData(workspaceID, activePlayground, missingRowIDs)
|
1351
|
+
rawResults.extend(dataFromDb)
|
1133
1352
|
|
1134
|
-
|
1135
|
-
|
1136
|
-
|
1353
|
+
working_df["context"] = None
|
1354
|
+
for item in rawResults:
|
1355
|
+
for compound_key, value in item.items():
|
1356
|
+
if compound_key in rowIdMapping:
|
1357
|
+
idx = rowIdMapping[compound_key]["index"]
|
1358
|
+
working_df.at[idx, "context"] = value.get("value")
|
1359
|
+
|
1360
|
+
|
1361
|
+
|
1362
|
+
self.socket.disconnect()
|
1137
1363
|
|
1364
|
+
# Create experiment if required
|
1365
|
+
if createExperiment:
|
1366
|
+
df = working_df.fillna("Some error occured").astype(object)
|
1138
1367
|
if createPlayground(
|
1139
|
-
|
1140
|
-
|
1141
|
-
|
1142
|
-
|
1143
|
-
dataStreamName=streamId,
|
1368
|
+
email, workspaceID, df,
|
1369
|
+
queryColName=queryColName,
|
1370
|
+
dataStreamName=streamId,
|
1371
|
+
definationMapping=self.definationMapping,
|
1144
1372
|
):
|
1145
1373
|
print(
|
1146
|
-
"Your data has been saved in the Llumo Experiment. Visit https://app.llumo.ai/evallm to see the results."
|
1147
|
-
)
|
1374
|
+
"Your data has been saved in the Llumo Experiment. Visit https://app.llumo.ai/evallm to see the results.")
|
1148
1375
|
else:
|
1149
|
-
self.latestDataframe =
|
1150
|
-
return
|
1376
|
+
self.latestDataframe = working_df
|
1377
|
+
return working_df
|
1378
|
+
|
1151
1379
|
|
1152
1380
|
def createExperiment(self, dataframe):
|
1153
1381
|
try:
|
@@ -1165,18 +1393,8 @@ class LlumoClient:
|
|
1165
1393
|
|
1166
1394
|
workspaceID = None
|
1167
1395
|
email = None
|
1168
|
-
socketID = self.socket.connect(timeout=150)
|
1169
|
-
self.allBatches = []
|
1170
|
-
|
1171
|
-
# Wait for socket connection
|
1172
|
-
max_wait_secs = 20
|
1173
|
-
waited_secs = 0
|
1174
|
-
while not self.socket._connection_established.is_set():
|
1175
|
-
time.sleep(0.1)
|
1176
|
-
waited_secs += 0.1
|
1177
|
-
if waited_secs >= max_wait_secs:
|
1178
|
-
raise RuntimeError("Timeout waiting for server connection")
|
1179
1396
|
|
1397
|
+
|
1180
1398
|
try:
|
1181
1399
|
self.validateApiKey()
|
1182
1400
|
except Exception as e:
|
@@ -1195,14 +1413,21 @@ class LlumoClient:
|
|
1195
1413
|
elif ext in [".xlsx", ".xls"]:
|
1196
1414
|
df = pd.read_excel(file_path)
|
1197
1415
|
elif ext == ".json":
|
1198
|
-
df = pd.read_json(file_path)
|
1416
|
+
df = pd.read_json(file_path, orient="records")
|
1199
1417
|
elif ext == ".parquet":
|
1200
1418
|
df = pd.read_parquet(file_path)
|
1201
1419
|
else:
|
1202
1420
|
raise ValueError(f"Unsupported file format: {ext}")
|
1203
1421
|
|
1204
1422
|
# If successfully loaded, call createPlayground
|
1205
|
-
|
1423
|
+
df = df.astype(str)
|
1424
|
+
if createPlayground(self.email, self.workspaceID, df):
|
1425
|
+
|
1426
|
+
print(
|
1427
|
+
"Your data has been saved in the Llumo Experiment. Visit https://app.llumo.ai/evallm to see the results."
|
1428
|
+
)
|
1429
|
+
|
1430
|
+
return True
|
1206
1431
|
|
1207
1432
|
except Exception as e:
|
1208
1433
|
print(f"Error: {e}")
|
llumo/exceptions.py
CHANGED
@@ -50,6 +50,10 @@ class LlumoAIError(Exception):
|
|
50
50
|
def dependencyError(details):
|
51
51
|
return LlumoAIError(details)
|
52
52
|
|
53
|
+
@staticmethod
|
54
|
+
def providerError(details):
|
55
|
+
return LlumoAIError(details)
|
56
|
+
|
53
57
|
# @staticmethod
|
54
58
|
# def dateNotFound():
|
55
59
|
# return LlumoAIError("Trial end date or subscription end date not found for the given user.")
|
llumo/execution.py
CHANGED
@@ -25,15 +25,14 @@ class ModelExecutor:
|
|
25
25
|
return response.choices[0].message.content
|
26
26
|
|
27
27
|
def _executeGoogle(self, modelName: str, prompt: str,api_key) -> str:
|
28
|
-
|
28
|
+
|
29
29
|
# Configure GenAI with API Key
|
30
30
|
genai.configure(api_key=api_key)
|
31
|
-
|
31
|
+
|
32
32
|
# Select Generative Model
|
33
33
|
model = genai.GenerativeModel("gemini-2.0-flash-lite")
|
34
34
|
# Generate Response
|
35
35
|
response = model.generate_content(prompt)
|
36
36
|
return response.text
|
37
|
-
|
38
|
-
|
39
|
-
|
37
|
+
|
38
|
+
|
llumo/helpingFuntions.py
CHANGED
@@ -8,7 +8,11 @@ import json
|
|
8
8
|
import base64
|
9
9
|
import os
|
10
10
|
import re
|
11
|
+
import openai
|
12
|
+
import google.generativeai as genai
|
11
13
|
|
14
|
+
|
15
|
+
from .models import _MODEL_METADATA, AVAILABLEMODELS
|
12
16
|
subscriptionUrl = "https://app.llumo.ai/api/workspace/record-extra-usage"
|
13
17
|
getStreamdataUrl = "https://app.llumo.ai/api/data-stream/all"
|
14
18
|
createPlayUrl = "https://app.llumo.ai/api/New-Eval-API/create-new-eval-playground"
|
@@ -212,8 +216,8 @@ def deleteColumnListInPlayground(workspaceID: str, playgroundID: str):
|
|
212
216
|
print("❌ Error:", response.status_code, response.text)
|
213
217
|
return None
|
214
218
|
|
215
|
-
|
216
|
-
|
219
|
+
def createColumn(workspaceID, dataframe, playgroundID, promptText=None,queryColName=None,
|
220
|
+
outputColName= "output",dataStreamName=None,definationMapping=None,evalOutputMap = None):
|
217
221
|
if len(dataframe) > 100:
|
218
222
|
dataframe = dataframe.head(100)
|
219
223
|
print("⚠️ Dataframe truncated to 100 rows for upload.")
|
@@ -233,12 +237,12 @@ def createColumn(workspaceID, dataframe, playgroundID, promptText=None,queryColN
|
|
233
237
|
# Iterate over each column in the dataframe
|
234
238
|
for indx, col in enumerate(dataframe.columns):
|
235
239
|
# Generate a unique column ID using uuid
|
236
|
-
columnID = str(uuid.uuid4().hex[:8])
|
240
|
+
columnID = str(uuid.uuid4().hex[:8])
|
237
241
|
|
238
242
|
columnIDMapping[col] = columnID
|
239
243
|
|
240
|
-
|
241
|
-
if col.startswith('output') and
|
244
|
+
|
245
|
+
if col.startswith('output') and promptText!=None:
|
242
246
|
# For output columns, create the prompt template with promptText
|
243
247
|
if promptText:
|
244
248
|
# Extract variables from promptText and set them as dependencies
|
@@ -249,12 +253,12 @@ def createColumn(workspaceID, dataframe, playgroundID, promptText=None,queryColN
|
|
249
253
|
|
250
254
|
# Loop through each variable and check if it exists as a column name
|
251
255
|
for var in variables:
|
252
|
-
varName = var.strip()
|
256
|
+
varName = var.strip()
|
253
257
|
if varName in columnIDMapping: # Check if the variable is a column name
|
254
258
|
dependencies.append(columnIDMapping[varName]) # Add its columnID
|
255
259
|
|
256
260
|
# Now update the template for the output column
|
257
|
-
|
261
|
+
|
258
262
|
template={
|
259
263
|
"provider": "OPENAI",
|
260
264
|
"model": "GPT_4o",
|
@@ -276,8 +280,8 @@ def createColumn(workspaceID, dataframe, playgroundID, promptText=None,queryColN
|
|
276
280
|
"type": "PROMPT",
|
277
281
|
"order": indx,
|
278
282
|
}
|
279
|
-
|
280
|
-
elif col.startswith('
|
283
|
+
|
284
|
+
elif col.startswith('context') and dataStreamName != None :
|
281
285
|
if queryColName and dataStreamName:
|
282
286
|
dependencies = []
|
283
287
|
dependencies.append(columnIDMapping[queryColName])
|
@@ -287,22 +291,27 @@ def createColumn(workspaceID, dataframe, playgroundID, promptText=None,queryColN
|
|
287
291
|
"dataStreamName": dataStreamName,
|
288
292
|
"query": columnIDMapping[queryColName],
|
289
293
|
"columnID": columnID, # Use the generated column ID
|
290
|
-
"label": "
|
294
|
+
"label": "context",
|
291
295
|
"type": "DATA_STREAM",
|
292
296
|
"order": indx}
|
293
297
|
|
294
|
-
elif col in allEvals and uploadViaSDK == False:
|
295
298
|
|
299
|
+
elif any(col.startswith(eval + "_") or col == eval for eval in allEvals) and not " Reason" in col and promptText is not None:
|
300
|
+
if evalOutputMap != None:
|
301
|
+
outputColName = evalOutputMap[col]
|
302
|
+
else:
|
303
|
+
outputColName = outputColName
|
296
304
|
dependencies = []
|
297
305
|
variables = re.findall(r'{{(.*?)}}', promptText)
|
298
306
|
|
299
307
|
# Loop through each variable and check if it exists as a column name
|
300
308
|
for var in variables:
|
301
|
-
varName = var.strip()
|
309
|
+
varName = var.strip()
|
302
310
|
if varName in columnIDMapping: # Check if the variable is a column name
|
303
311
|
dependencies.append(columnIDMapping[varName])
|
304
|
-
|
312
|
+
|
305
313
|
dependencies.append(columnIDMapping[outputColName]) # Add the output column ID
|
314
|
+
|
306
315
|
longDef = definationMapping.get(col, {}).get('definition', "")
|
307
316
|
shortDef =definationMapping.get(col, {}).get('briefDefinition', "")
|
308
317
|
enum = col.upper().replace(" ","_")
|
@@ -341,12 +350,12 @@ def createColumn(workspaceID, dataframe, playgroundID, promptText=None,queryColN
|
|
341
350
|
"order": indx
|
342
351
|
}
|
343
352
|
|
344
|
-
elif col.endswith(' Reason'):
|
345
|
-
continue
|
353
|
+
elif col.endswith(' Reason') and promptText!=None:
|
354
|
+
continue
|
355
|
+
|
346
356
|
|
347
|
-
|
348
357
|
else:
|
349
|
-
|
358
|
+
|
350
359
|
template = {
|
351
360
|
"label": col, # Label is the column name
|
352
361
|
"type": "VARIABLE", # Default type for non-output columns
|
@@ -371,25 +380,27 @@ def createColumn(workspaceID, dataframe, playgroundID, promptText=None,queryColN
|
|
371
380
|
row_dict = {}
|
372
381
|
|
373
382
|
# For each column, we need to map the column ID to the corresponding value in the row
|
383
|
+
|
374
384
|
for col in dataframe.columns:
|
375
385
|
columnID = columnIDMapping[col]
|
376
|
-
|
377
|
-
if col in allEvals:
|
386
|
+
|
387
|
+
if any(col.startswith(eval + "_") or col == eval for eval in allEvals) and not " Reason" in col and promptText!=None:
|
388
|
+
|
378
389
|
row_dict[columnID] = {
|
379
|
-
|
390
|
+
|
380
391
|
"value": row[col],
|
381
392
|
"type": "EVAL",
|
382
393
|
"isValid": True,
|
383
394
|
"reasoning": row[col+" Reason"],
|
384
395
|
"edgeCase": "minorHallucinationDetailNotInContext",
|
385
396
|
"kpi": col
|
386
|
-
|
387
|
-
|
388
|
-
elif col.endswith(' Reason'):
|
397
|
+
|
398
|
+
}
|
399
|
+
elif col.endswith(' Reason') and promptText!=None:
|
389
400
|
continue
|
390
401
|
else:# Get the columnID from the mapping
|
391
402
|
row_dict[columnID] = row[col]
|
392
|
-
|
403
|
+
|
393
404
|
# row_dict[columnID] = row[col] # Directly map the column ID to the row value
|
394
405
|
# Add the row index (if necessary)
|
395
406
|
row_dict["pIndex"] = indx
|
@@ -397,6 +408,7 @@ def createColumn(workspaceID, dataframe, playgroundID, promptText=None,queryColN
|
|
397
408
|
|
398
409
|
# Return the column template, row template, and the column ID mapping
|
399
410
|
return coltemplate, rowTemplate
|
411
|
+
|
400
412
|
def uploadColumnListInPlayground(payload):
|
401
413
|
url = uploadColList
|
402
414
|
headers = {
|
@@ -440,15 +452,14 @@ def uploadRowsInDBPlayground(payload):
|
|
440
452
|
return None
|
441
453
|
|
442
454
|
|
443
|
-
def createPlayground(email, workspaceID, df, promptText=None,queryColName=None,dataStreamName=None,definationMapping=None,outputColName="output",
|
455
|
+
def createPlayground(email, workspaceID, df, promptText=None,queryColName=None,dataStreamName=None,definationMapping=None,outputColName="output",evalOutputMap = None):
|
444
456
|
|
445
457
|
playgroundId = str(createEvalPlayground(email=email, workspaceID=workspaceID))
|
446
458
|
payload1, payload2 = createColumn(
|
447
|
-
workspaceID=workspaceID, dataframe=df, playgroundID=playgroundId, promptText=promptText,queryColName=queryColName,dataStreamName=dataStreamName,definationMapping=definationMapping,outputColName=outputColName,
|
459
|
+
workspaceID=workspaceID, dataframe=df, playgroundID=playgroundId, promptText=promptText,queryColName=queryColName,dataStreamName=dataStreamName,definationMapping=definationMapping,outputColName=outputColName,evalOutputMap=evalOutputMap
|
448
460
|
)
|
449
461
|
|
450
|
-
|
451
|
-
|
462
|
+
# Debugging line to check the payload2 structure
|
452
463
|
deleteExistingRows = deleteColumnListInPlayground(
|
453
464
|
workspaceID=workspaceID, playgroundID=playgroundId
|
454
465
|
)
|
@@ -460,6 +471,7 @@ def createPlayground(email, workspaceID, df, promptText=None,queryColName=None,d
|
|
460
471
|
|
461
472
|
|
462
473
|
|
474
|
+
|
463
475
|
def getPlaygroundInsights(workspaceID: str, activePlayground: str):
|
464
476
|
headers = {
|
465
477
|
|
@@ -563,4 +575,79 @@ def checkDependency(selectedEval, columns,tocheck=True):
|
|
563
575
|
}
|
564
576
|
return {"status":True,"message":"success"}
|
565
577
|
else:
|
566
|
-
return {"status":True,"message":"success"}
|
578
|
+
return {"status":True,"message":"success"}
|
579
|
+
|
580
|
+
|
581
|
+
def fetchData(workspaceID, playgroundID, missingList: list):
|
582
|
+
# Define the URL and prepare the payload
|
583
|
+
socket_data_url = "https://app.llumo.ai/api/eval/get-awaited"
|
584
|
+
payload = {
|
585
|
+
"workspaceID": workspaceID,
|
586
|
+
"playgroundID": playgroundID,
|
587
|
+
"missingList": missingList
|
588
|
+
}
|
589
|
+
|
590
|
+
try:
|
591
|
+
# Send a POST request to the API
|
592
|
+
response = requests.post(socket_data_url, json=payload)
|
593
|
+
|
594
|
+
# Check if the response is successful
|
595
|
+
if response.status_code == 200:
|
596
|
+
# Parse the JSON data from the response
|
597
|
+
data = response.json().get("data", {})
|
598
|
+
|
599
|
+
|
600
|
+
# Prepare the list of all data values in the desired format
|
601
|
+
result_list = []
|
602
|
+
for key, value in data.items():
|
603
|
+
# Create a dictionary for each item in the response data
|
604
|
+
result_list.append({
|
605
|
+
key: {
|
606
|
+
"value": value.get("value"),
|
607
|
+
"reasoning": value.get("reasoning"),
|
608
|
+
"edgeCase": value.get("edgeCase"),
|
609
|
+
"kpi": value.get("kpi")
|
610
|
+
}
|
611
|
+
})
|
612
|
+
|
613
|
+
return result_list
|
614
|
+
else:
|
615
|
+
print(f"Failed to fetch data. Status Code: {response.status_code}")
|
616
|
+
return []
|
617
|
+
|
618
|
+
except Exception as e:
|
619
|
+
print(f"An error occurred: {e}")
|
620
|
+
return []
|
621
|
+
|
622
|
+
def validateModels(model_aliases):
|
623
|
+
|
624
|
+
selectedProviders = []
|
625
|
+
for name in model_aliases:
|
626
|
+
for alias ,(provider , modelName ) in _MODEL_METADATA.items():
|
627
|
+
if modelName == name:
|
628
|
+
selectedProviders.append(provider)
|
629
|
+
|
630
|
+
if len(set(selectedProviders)) > 1:
|
631
|
+
return {"status": False,"message":"All selected models should be of same provider."}
|
632
|
+
else:
|
633
|
+
return {"status": True,"message":"All selected models are of same provider."}
|
634
|
+
|
635
|
+
|
636
|
+
|
637
|
+
def validateOpenaiKey(api_key):
|
638
|
+
try:
|
639
|
+
client = openai.OpenAI(api_key=api_key)
|
640
|
+
_ = client.models.list() # Light call to list models
|
641
|
+
except openai.AuthenticationError:
|
642
|
+
raise ValueError("❌ Invalid OpenAI API key.")
|
643
|
+
except Exception as e:
|
644
|
+
raise RuntimeError(f"⚠️ Error validating OpenAI key: {e}")
|
645
|
+
|
646
|
+
def validateGoogleKey(api_key):
|
647
|
+
try:
|
648
|
+
genai.configure(api_key=api_key)
|
649
|
+
_ = genai.GenerativeModel("gemini-2.0-flash-lite").generate_content("test")
|
650
|
+
except Exception as e:
|
651
|
+
if "PERMISSION_DENIED" in str(e) or "API key not valid" in str(e):
|
652
|
+
raise ValueError("❌ Invalid Google API key.")
|
653
|
+
raise RuntimeError(f"⚠️ Error validating Gemini key: {e}")
|
llumo/models.py
CHANGED
@@ -6,35 +6,72 @@ class Provider(str, Enum):
|
|
6
6
|
|
7
7
|
# Maps model aliases → (provider, actual model name for API)
|
8
8
|
_MODEL_METADATA = {
|
9
|
-
"
|
10
|
-
"
|
11
|
-
"
|
12
|
-
"
|
13
|
-
"
|
14
|
-
"
|
15
|
-
|
16
|
-
"
|
17
|
-
"
|
18
|
-
"
|
19
|
-
"
|
20
|
-
"
|
21
|
-
"
|
9
|
+
"GPT_4O": (Provider.OPENAI, "GPT_4O"),
|
10
|
+
"GPT_4_5": (Provider.OPENAI, "GPT_4_5"),
|
11
|
+
"GPT_4": (Provider.OPENAI, "GPT_4"),
|
12
|
+
"GPT_4_32K": (Provider.OPENAI, "GPT_4_32K"),
|
13
|
+
"GPT_3_5_Turbo": (Provider.OPENAI, "GPT_35T"),
|
14
|
+
"GPT_3_5_Turbo_Instruct": (Provider.OPENAI, "GPT_35T_INS"),
|
15
|
+
"GPT_3_5_Turbo_16K": (Provider.OPENAI, "GPT_35T_16K"),
|
16
|
+
"GPT_4_o_Mini": (Provider.OPENAI, "GPT_4O_MINI"),
|
17
|
+
"o4_MINI": (Provider.OPENAI, "O4_MINI"),
|
18
|
+
"o4_MINI_HIGH": (Provider.OPENAI, "O4_MINI_HIGH"),
|
19
|
+
"GPT_4_1": (Provider.OPENAI, "GPT_4_1"),
|
20
|
+
"GPT_4_1_Mini": (Provider.OPENAI, "GPT_4_1_MINI"),
|
21
|
+
"GPT_4_1_nano": (Provider.OPENAI, "GPT_4_1_NANO"),
|
22
|
+
"o3": (Provider.OPENAI, "O3"),
|
23
|
+
"o3_MINI": (Provider.OPENAI, "O3_MINI"),
|
24
|
+
"o1": (Provider.OPENAI, "O1"),
|
25
|
+
"o1_MINI": (Provider.OPENAI, "O1_MINI"),
|
26
|
+
|
27
|
+
|
28
|
+
"Gemini_2_5_Pro": (Provider.GOOGLE, "GEMINI_2_5_PRO"),
|
29
|
+
"Gemini_2_5_Flash": (Provider.GOOGLE, "GEMINI_2_5_FLASH"),
|
30
|
+
"Gemini_2_0": (Provider.GOOGLE, "GEMINI_2_0"),
|
31
|
+
"Gemini_2_0_Flash": (Provider.GOOGLE, "GEMINI_2_0_FLASH"),
|
32
|
+
"Gemini_Pro": (Provider.GOOGLE, "GEMINI_PRO"),
|
33
|
+
"Text_Bison": (Provider.GOOGLE, "TEXT_BISON"),
|
34
|
+
"Chat_Bison": (Provider.GOOGLE, "CHAT_BISON"),
|
35
|
+
"Text_Bison_32k": (Provider.GOOGLE, "TEXT_BISON_32K"),
|
36
|
+
"Text_Unicorn": (Provider.GOOGLE, "TEXT_UNICORN"),
|
37
|
+
"Google_1_5_Flash": (Provider.GOOGLE, "GOOGLE_15_FLASH"),
|
38
|
+
"Gemma_3_9B": (Provider.GOOGLE, "GEMMA_3_9B"),
|
39
|
+
"Gemma_3_27B": (Provider.GOOGLE, "GEMMA_3_27B"),
|
22
40
|
}
|
23
41
|
|
24
42
|
class AVAILABLEMODELS(str, Enum):
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
43
|
+
GPT_4o= "GPT_4O",
|
44
|
+
GPT_4o_Mini= "GPT_4O_MINI",
|
45
|
+
GPT_4_5= "GPT_4_5",
|
46
|
+
GPT_4= "GPT_4",
|
47
|
+
GPT_4_32K= "GPT_4_32K",
|
48
|
+
GPT_3_5_Turbo= "GPT_35T",
|
49
|
+
GPT_3_5_Turbo_Instruct= "GPT_35T_INS",
|
50
|
+
GPT_3_5_Turbo_16K= "GPT_35T_16K",
|
51
|
+
GPT_4_o_Mini= "GPT_4O_MINI",
|
52
|
+
o4_MINI = "O4_MINI",
|
53
|
+
o4_MINI_HIGH = "O4_MINI_HIGH",
|
54
|
+
GPT_4_1 = "GPT_4_1",
|
55
|
+
GPT_4_1_Mini = "GPT_4_1_MINI",
|
56
|
+
GPT_4_1_nano = "GPT_4_1_NANO",
|
57
|
+
o3 = "O3",
|
58
|
+
o3_MINI = "O3_MINI",
|
59
|
+
o1 = "O1",
|
60
|
+
o1_MINI = "O1_MINI",
|
61
|
+
|
62
|
+
Gemini_2_5_Pro = "GEMINI_2_5_PRO",
|
63
|
+
Gemini_2_5_Flash = "GEMINI_2_5_FLASH",
|
64
|
+
Gemini_2_0 = "GEMINI_2_0",
|
65
|
+
Gemini_2_0_Flash = "GEMINI_2_0_FLASH",
|
66
|
+
Gemini_Pro = "GEMINI_PRO",
|
67
|
+
Text_Bison = "TEXT_BISON",
|
68
|
+
Chat_Bison = "CHAT_BISON",
|
69
|
+
Text_Bison_32k = "TEXT_BISON_32K",
|
70
|
+
Text_Unicorn = "TEXT_UNICORN",
|
71
|
+
Google_1_5_Flash = "GOOGLE_15_FLASH",
|
72
|
+
Gemma_3_9B = "GEMMA_3_9B",
|
73
|
+
Gemma_3_27B = "GEMMA_3_27B",
|
74
|
+
|
38
75
|
|
39
76
|
def getProviderFromModel(model: AVAILABLEMODELS) -> Provider:
|
40
77
|
for alias, (provider, apiName) in _MODEL_METADATA.items():
|
llumo/sockets.py
CHANGED
@@ -17,10 +17,10 @@ class LlumoSocketClient:
|
|
17
17
|
|
18
18
|
# Initialize client
|
19
19
|
self.sio = socketio.Client(
|
20
|
-
logger=
|
21
|
-
engineio_logger=
|
20
|
+
logger=False,
|
21
|
+
engineio_logger=False,
|
22
22
|
reconnection=True,
|
23
|
-
reconnection_attempts=
|
23
|
+
reconnection_attempts=1,
|
24
24
|
reconnection_delay=1,
|
25
25
|
)
|
26
26
|
|
@@ -0,0 +1,13 @@
|
|
1
|
+
llumo/__init__.py,sha256=O04b4yW1BnOvcHzxWFddAKhtdBEhBNhLdb6xgnpHH_Q,205
|
2
|
+
llumo/client.py,sha256=zh6fpKpjlYcvzrPZkPviF1hDRzfnA1K0U1gweoKfkwc,54675
|
3
|
+
llumo/exceptions.py,sha256=Vp_MnanHbnd1Yjuoi6WLrKiwwZbJL3znCox2URMmGU4,2032
|
4
|
+
llumo/execution.py,sha256=nWbJ7AvWuUPcOb6i-JzKRna_PvF-ewZTiK8skS-5n3w,1380
|
5
|
+
llumo/functionCalling.py,sha256=D5jYapu1rIvdIJNUYPYMTyhQ1H-6nkwoOLMi6eekfUE,7241
|
6
|
+
llumo/helpingFuntions.py,sha256=-9GA9X0KBUVZb3_25D8AlninWnVc9ajFp4QkR_mDePY,23545
|
7
|
+
llumo/models.py,sha256=aVEZsOOoQx5LeNtwSyBxqvrINq0izH3QWu_YjsMPE6o,2910
|
8
|
+
llumo/sockets.py,sha256=I2JO_eNEctRo_ikgvFVp5zDd-m0VDu04IEUhhsa1Tic,5950
|
9
|
+
llumo-0.2.15.dist-info/licenses/LICENSE,sha256=tF9yAcfPV9xGT3ViWmC8hPvOo8BEk4ZICbUfcEo8Dlk,182
|
10
|
+
llumo-0.2.15.dist-info/METADATA,sha256=OQApH-0Gj918OaMbyQasOtE6lAhU5__No3SK9xge-NM,1519
|
11
|
+
llumo-0.2.15.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
12
|
+
llumo-0.2.15.dist-info/top_level.txt,sha256=d5zUTMI99llPtLRB8rtSrqELm_bOqX-bNC5IcwlDk88,6
|
13
|
+
llumo-0.2.15.dist-info/RECORD,,
|
llumo-0.2.14b6.dist-info/RECORD
DELETED
@@ -1,13 +0,0 @@
|
|
1
|
-
llumo/__init__.py,sha256=O04b4yW1BnOvcHzxWFddAKhtdBEhBNhLdb6xgnpHH_Q,205
|
2
|
-
llumo/client.py,sha256=YmvbfyWR9YCDOFrKM0nwDMWYLGpu4RSZwbkiUJ3e78M,46162
|
3
|
-
llumo/exceptions.py,sha256=i3Qv4_g7XjRuho7-b7ybjw2bwSh_NhvICR6ZAgiLQX8,1944
|
4
|
-
llumo/execution.py,sha256=x88wQV8eL99wNN5YtjFaAMCIfN1PdfQVlAZQb4vzgQ0,1413
|
5
|
-
llumo/functionCalling.py,sha256=D5jYapu1rIvdIJNUYPYMTyhQ1H-6nkwoOLMi6eekfUE,7241
|
6
|
-
llumo/helpingFuntions.py,sha256=f2Y-x-DbGk3E29qaJWDOsTkuqqDFl9-VQTRM490amE4,20443
|
7
|
-
llumo/models.py,sha256=YH-qAMnShmUpmKE2LQAzQdpRsaXkFSlOqMxHwU4zBUI,1560
|
8
|
-
llumo/sockets.py,sha256=-zJYRCDRwElIPr5iOFqzQxjecuLJ7mztiyYJz14pGLY,5949
|
9
|
-
llumo-0.2.14b6.dist-info/licenses/LICENSE,sha256=tF9yAcfPV9xGT3ViWmC8hPvOo8BEk4ZICbUfcEo8Dlk,182
|
10
|
-
llumo-0.2.14b6.dist-info/METADATA,sha256=2Yl4gnAXsfpJWLB6mhlza0HUE76uJY3sC1TWK7GlUu4,1521
|
11
|
-
llumo-0.2.14b6.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
12
|
-
llumo-0.2.14b6.dist-info/top_level.txt,sha256=d5zUTMI99llPtLRB8rtSrqELm_bOqX-bNC5IcwlDk88,6
|
13
|
-
llumo-0.2.14b6.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|