llumo 0.2.14b6__py3-none-any.whl → 0.2.15b1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llumo/client.py +378 -170
- llumo/exceptions.py +4 -0
- llumo/helpingFuntions.py +97 -29
- llumo/models.py +63 -26
- llumo/sockets.py +3 -3
- {llumo-0.2.14b6.dist-info → llumo-0.2.15b1.dist-info}/METADATA +1 -1
- llumo-0.2.15b1.dist-info/RECORD +13 -0
- llumo-0.2.14b6.dist-info/RECORD +0 -13
- {llumo-0.2.14b6.dist-info → llumo-0.2.15b1.dist-info}/WHEEL +0 -0
- {llumo-0.2.14b6.dist-info → llumo-0.2.15b1.dist-info}/licenses/LICENSE +0 -0
- {llumo-0.2.14b6.dist-info → llumo-0.2.15b1.dist-info}/top_level.txt +0 -0
llumo/client.py
CHANGED
@@ -25,6 +25,11 @@ postUrl = (
|
|
25
25
|
fetchUrl = (
|
26
26
|
"https://red-skull-service-392377961931.us-central1.run.app/api/get-cells-data"
|
27
27
|
)
|
28
|
+
socketDataUrl = "https://app.llumo.ai/api/eval/get-awaited"
|
29
|
+
# {
|
30
|
+
# "workspaceID":"c9191fdf33bdd7838328c1a0",
|
31
|
+
# "playgroundID":"17496117244856b7815ac94004347b1c2e2f7e01600ec"
|
32
|
+
# }
|
28
33
|
validateUrl = "https://app.llumo.ai/api/workspace-details"
|
29
34
|
socketUrl = "https://red-skull-service-392377961931.us-central1.run.app/"
|
30
35
|
|
@@ -33,7 +38,7 @@ class LlumoClient:
|
|
33
38
|
|
34
39
|
def __init__(self, api_key):
|
35
40
|
self.apiKey = api_key
|
36
|
-
|
41
|
+
|
37
42
|
self.processMapping = {}
|
38
43
|
self.definationMapping = {}
|
39
44
|
|
@@ -45,6 +50,7 @@ class LlumoClient:
|
|
45
50
|
reqBody = {"analytics": [evalName]}
|
46
51
|
|
47
52
|
try:
|
53
|
+
print(reqBody)
|
48
54
|
response = requests.post(url=validateUrl, json=reqBody, headers=headers)
|
49
55
|
|
50
56
|
except requests.exceptions.RequestException as e:
|
@@ -89,9 +95,8 @@ class LlumoClient:
|
|
89
95
|
)
|
90
96
|
self.email = data["data"]["data"].get("email", None)
|
91
97
|
|
92
|
-
self.definationMapping[evalName] = data
|
93
|
-
|
94
|
-
]
|
98
|
+
self.definationMapping[evalName] = data.get("data", {}).get("data", {}).get("analyticsMapping", {}).get(evalName, None)
|
99
|
+
|
95
100
|
except Exception as e:
|
96
101
|
# print(f"Error extracting data from response: {str(e)}")
|
97
102
|
raise LlumoAIError.UnexpectedError(detail=str(e))
|
@@ -577,7 +582,8 @@ class LlumoClient:
|
|
577
582
|
createExperiment: bool = False,
|
578
583
|
_tocheck=True,
|
579
584
|
):
|
580
|
-
|
585
|
+
self.socket = LlumoSocketClient(socketUrl)
|
586
|
+
dataframe = pd.DataFrame(data).astype(str)
|
581
587
|
workspaceID = None
|
582
588
|
email = None
|
583
589
|
socketID = self.socket.connect(timeout=250)
|
@@ -602,13 +608,16 @@ class LlumoClient:
|
|
602
608
|
kwargs={
|
603
609
|
"min_wait": 40,
|
604
610
|
"max_wait": timeout,
|
605
|
-
"inactivity_timeout":
|
611
|
+
"inactivity_timeout": 10,
|
606
612
|
"expected_results": expectedResults,
|
607
613
|
},
|
608
614
|
daemon=True,
|
609
615
|
)
|
610
616
|
listener_thread.start()
|
611
|
-
|
617
|
+
|
618
|
+
activePlayground = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace(
|
619
|
+
"-", ""
|
620
|
+
)
|
612
621
|
for evalName in evals:
|
613
622
|
# print(f"\n======= Running evaluation for: {evalName} =======")
|
614
623
|
|
@@ -656,9 +665,7 @@ class LlumoClient:
|
|
656
665
|
inputDict = {key: row[key] for key in keys if key in row}
|
657
666
|
output = row.get(outputColName, "")
|
658
667
|
|
659
|
-
|
660
|
-
"-", ""
|
661
|
-
)
|
668
|
+
|
662
669
|
rowID = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace("-", "")
|
663
670
|
columnID = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace("-", "")
|
664
671
|
|
@@ -751,15 +758,27 @@ class LlumoClient:
|
|
751
758
|
time.sleep(3)
|
752
759
|
listener_thread.join()
|
753
760
|
|
754
|
-
|
755
|
-
|
761
|
+
|
762
|
+
rawResults = self.socket.getReceivedData()
|
763
|
+
|
764
|
+
# print("data from db #####################",dataFromDb)
|
756
765
|
# Fix here: keep full keys, do not split keys
|
757
|
-
|
758
|
-
|
759
|
-
|
766
|
+
receivedRowIDs = {key for item in rawResults for key in item.keys()}
|
767
|
+
expectedRowIDs = set(rowIdMapping.keys())
|
768
|
+
missingRowIDs = expectedRowIDs - receivedRowIDs
|
760
769
|
# print("All expected keys:", expected_rowIDs)
|
761
770
|
# print("All received keys:", received_rowIDs)
|
762
|
-
print("Missing keys:", len(
|
771
|
+
# print("Missing keys:", len(missingRowIDs))
|
772
|
+
missingRowIDs=list(missingRowIDs)
|
773
|
+
|
774
|
+
if len(missingRowIDs) > 0:
|
775
|
+
dataFromDb=fetchData(workspaceID,activePlayground,missingRowIDs)
|
776
|
+
rawResults.extend(dataFromDb)
|
777
|
+
|
778
|
+
|
779
|
+
|
780
|
+
|
781
|
+
|
763
782
|
|
764
783
|
# Initialize dataframe columns for each eval
|
765
784
|
for eval in evals:
|
@@ -767,7 +786,7 @@ class LlumoClient:
|
|
767
786
|
dataframe[f"{eval} Reason"] = None
|
768
787
|
|
769
788
|
# Map results to dataframe rows
|
770
|
-
for item in
|
789
|
+
for item in rawResults:
|
771
790
|
for compound_key, value in item.items():
|
772
791
|
if compound_key in rowIdMapping:
|
773
792
|
index = rowIdMapping[compound_key]["index"]
|
@@ -776,6 +795,7 @@ class LlumoClient:
|
|
776
795
|
dataframe.at[index, f"{evalName} Reason"] = value.get("reasoning")
|
777
796
|
|
778
797
|
self.socket.disconnect()
|
798
|
+
|
779
799
|
|
780
800
|
if createExperiment:
|
781
801
|
pd.set_option("future.no_silent_downcasting", True)
|
@@ -794,7 +814,7 @@ class LlumoClient:
|
|
794
814
|
else:
|
795
815
|
return dataframe
|
796
816
|
|
797
|
-
def
|
817
|
+
def promptSweep(
|
798
818
|
self,
|
799
819
|
templates: List[str],
|
800
820
|
dataset: Dict[str, List[str]],
|
@@ -803,9 +823,15 @@ class LlumoClient:
|
|
803
823
|
evals=["Response Correctness"],
|
804
824
|
toEvaluate: bool = False,
|
805
825
|
createExperiment: bool = False,
|
826
|
+
|
827
|
+
|
806
828
|
) -> pd.DataFrame:
|
807
829
|
|
808
|
-
|
830
|
+
modelStatus = validateModels(model_aliases=model_aliases)
|
831
|
+
if modelStatus["status"]== False:
|
832
|
+
raise LlumoAIError.providerError(modelStatus["message"])
|
833
|
+
|
834
|
+
self.validateApiKey()
|
809
835
|
workspaceID = self.workspaceID
|
810
836
|
email = self.email
|
811
837
|
executor = ModelExecutor(apiKey)
|
@@ -910,6 +936,7 @@ class LlumoClient:
|
|
910
936
|
evals=["Final Task Alignment"],
|
911
937
|
prompt_template="Give answer for the given query: {{query}}",
|
912
938
|
createExperiment: bool = False,
|
939
|
+
|
913
940
|
):
|
914
941
|
if model.lower() not in ["openai", "google"]:
|
915
942
|
raise ValueError("Model must be 'openai' or 'google'")
|
@@ -940,7 +967,7 @@ class LlumoClient:
|
|
940
967
|
if createExperiment:
|
941
968
|
pd.set_option("future.no_silent_downcasting", True)
|
942
969
|
df = toolResponseDf.fillna("Some error occured")
|
943
|
-
if createPlayground(self.email, self.workspaceID, df):
|
970
|
+
if createPlayground(self.email, self.workspaceID, df,promptText=prompt_template,definationMapping=self.definationMapping):
|
944
971
|
print(
|
945
972
|
"Your data has been saved in the Llumo Experiment. Visit https://app.llumo.ai/evallm to see the results."
|
946
973
|
)
|
@@ -971,183 +998,367 @@ class LlumoClient:
|
|
971
998
|
# )
|
972
999
|
toolResponseDf = self.evaluateMultiple(
|
973
1000
|
toolResponseDf.to_dict(orient="records"),
|
974
|
-
|
1001
|
+
evals=evals,
|
975
1002
|
prompt_template="Give answer for the given query: {{query}}",
|
976
1003
|
outputColName=outputColName,
|
1004
|
+
createExperiment=createExperiment
|
977
1005
|
)
|
978
|
-
|
1006
|
+
if createExperiment:
|
1007
|
+
pass
|
1008
|
+
else:
|
1009
|
+
return toolResponseDf
|
979
1010
|
|
980
1011
|
except Exception as e:
|
981
1012
|
raise e
|
982
1013
|
|
983
|
-
def
|
984
|
-
|
985
|
-
|
986
|
-
|
987
|
-
|
988
|
-
|
1014
|
+
def ragSweep(
|
1015
|
+
self,
|
1016
|
+
data,
|
1017
|
+
streamName: str,
|
1018
|
+
queryColName: str = "query",
|
1019
|
+
createExperiment: bool = False,
|
1020
|
+
modelAliases=[],
|
1021
|
+
apiKey="",
|
1022
|
+
prompt_template="Give answer to the given: {{query}} using the context:{{context}}",
|
1023
|
+
evals=["Context Utilization"],
|
1024
|
+
toEvaluate=False,
|
1025
|
+
generateOutput=True
|
989
1026
|
):
|
990
|
-
|
991
|
-
|
992
|
-
|
993
|
-
|
994
|
-
|
995
|
-
|
996
|
-
|
997
|
-
|
998
|
-
|
999
|
-
|
1000
|
-
|
1001
|
-
|
1002
|
-
|
1003
|
-
)
|
1004
|
-
# print(f"Connected with socket ID: {socketID}")
|
1005
|
-
rowIdMapping = {}
|
1027
|
+
# Validate required parameters
|
1028
|
+
if generateOutput:
|
1029
|
+
if not modelAliases:
|
1030
|
+
raise ValueError("Model aliases must be provided when generateOutput is True.")
|
1031
|
+
if not apiKey or not isinstance(apiKey, str) or apiKey.strip() == "":
|
1032
|
+
raise ValueError("Valid API key must be provided when generateOutput is True.")
|
1033
|
+
|
1034
|
+
modelStatus = validateModels(model_aliases=modelAliases)
|
1035
|
+
if modelStatus["status"]== False:
|
1036
|
+
if len(modelAliases) == 0:
|
1037
|
+
raise LlumoAIError.providerError("No model selected.")
|
1038
|
+
else:
|
1039
|
+
raise LlumoAIError.providerError(modelStatus["message"])
|
1006
1040
|
|
1007
|
-
|
1008
|
-
|
1009
|
-
|
1041
|
+
# Copy the original dataframe
|
1042
|
+
original_df = pd.DataFrame(data)
|
1043
|
+
working_df = original_df.copy()
|
1010
1044
|
|
1011
|
-
|
1012
|
-
|
1013
|
-
|
1014
|
-
|
1015
|
-
|
1016
|
-
|
1017
|
-
|
1018
|
-
|
1019
|
-
|
1045
|
+
# Connect to socket
|
1046
|
+
self.socket = LlumoSocketClient(socketUrl)
|
1047
|
+
socketID = self.socket.connect(timeout=150)
|
1048
|
+
waited_secs = 0
|
1049
|
+
while not self.socket._connection_established.is_set():
|
1050
|
+
time.sleep(0.1)
|
1051
|
+
waited_secs += 0.1
|
1052
|
+
if waited_secs >= 20:
|
1053
|
+
raise RuntimeError("Timeout waiting for server 'connection-established' event.")
|
1020
1054
|
|
1021
|
-
|
1022
|
-
if not userHits["success"]:
|
1023
|
-
raise LlumoAIError.InsufficientCredits(userHits["message"])
|
1055
|
+
self.validateApiKey()
|
1024
1056
|
|
1025
|
-
|
1026
|
-
|
1027
|
-
|
1028
|
-
|
1029
|
-
|
1030
|
-
|
1031
|
-
|
1032
|
-
currentBatch = []
|
1057
|
+
# Check user credits
|
1058
|
+
userHits = checkUserHits(
|
1059
|
+
self.workspaceID, self.hasSubscribed, self.trialEndDate,
|
1060
|
+
self.subscriptionEndDate, self.hitsAvailable, len(working_df)
|
1061
|
+
)
|
1062
|
+
if not userHits["success"]:
|
1063
|
+
raise LlumoAIError.InsufficientCredits(userHits["message"])
|
1033
1064
|
|
1034
|
-
|
1035
|
-
|
1036
|
-
|
1037
|
-
|
1038
|
-
rowID = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace("-", "")
|
1039
|
-
columnID = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace("-", "")
|
1065
|
+
print("====🚀Sit back while we fetch data from the stream 🚀====")
|
1066
|
+
workspaceID, email = self.workspaceID, self.email
|
1067
|
+
activePlayground = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace("-", "")
|
1068
|
+
streamId = getStreamId(workspaceID, self.apiKey, streamName)
|
1040
1069
|
|
1041
|
-
|
1042
|
-
|
1043
|
-
|
1044
|
-
|
1045
|
-
"socketID": socketID,
|
1046
|
-
"processData": {
|
1047
|
-
"executionDependency": {"query": row[queryColName]},
|
1048
|
-
"dataStreamID": streamId,
|
1049
|
-
},
|
1050
|
-
"workspaceID": workspaceID,
|
1051
|
-
"email": email,
|
1052
|
-
"type": "DATA_STREAM",
|
1053
|
-
"playgroundID": activePlayground,
|
1054
|
-
"processType": "DATA_STREAM",
|
1055
|
-
"rowID": rowID,
|
1056
|
-
"columnID": columnID,
|
1057
|
-
"source": "SDK",
|
1058
|
-
}
|
1070
|
+
# Prepare batches
|
1071
|
+
rowIdMapping = {}
|
1072
|
+
self.allBatches = []
|
1073
|
+
currentBatch = []
|
1059
1074
|
|
1060
|
-
|
1075
|
+
expectedResults = len(working_df)
|
1076
|
+
timeout = max(100, min(150, expectedResults * 10))
|
1077
|
+
|
1078
|
+
listener_thread = threading.Thread(
|
1079
|
+
target=self.socket.listenForResults,
|
1080
|
+
kwargs={
|
1081
|
+
"min_wait": 40,
|
1082
|
+
"max_wait": timeout,
|
1083
|
+
"inactivity_timeout": 10,
|
1084
|
+
"expected_results": expectedResults,
|
1085
|
+
},
|
1086
|
+
daemon=True
|
1087
|
+
)
|
1088
|
+
listener_thread.start()
|
1061
1089
|
|
1062
|
-
|
1090
|
+
for index, row in working_df.iterrows():
|
1091
|
+
rowID, columnID = uuid.uuid4().hex, uuid.uuid4().hex
|
1092
|
+
compoundKey = f"{rowID}-{columnID}-{columnID}"
|
1093
|
+
rowIdMapping[compoundKey] = {"index": index}
|
1094
|
+
templateData = {
|
1095
|
+
"processID": getProcessID(),
|
1096
|
+
"socketID": socketID,
|
1097
|
+
"processData": {
|
1098
|
+
"executionDependency": {"query": row[queryColName]},
|
1099
|
+
"dataStreamID": streamId,
|
1100
|
+
},
|
1101
|
+
"workspaceID": workspaceID,
|
1102
|
+
"email": email,
|
1103
|
+
"type": "DATA_STREAM",
|
1104
|
+
"playgroundID": activePlayground,
|
1105
|
+
"processType": "DATA_STREAM",
|
1106
|
+
"rowID": rowID,
|
1107
|
+
"columnID": columnID,
|
1108
|
+
"source": "SDK",
|
1109
|
+
}
|
1110
|
+
currentBatch.append(templateData)
|
1111
|
+
if len(currentBatch) == 10 or index == len(working_df) - 1:
|
1063
1112
|
self.allBatches.append(currentBatch)
|
1064
1113
|
currentBatch = []
|
1065
1114
|
|
1066
|
-
|
1067
|
-
|
1068
|
-
|
1115
|
+
for batch in tqdm(self.allBatches, desc="Processing Batches", unit="batch", colour="magenta", ncols=80):
|
1116
|
+
try:
|
1117
|
+
self.postDataStream(batch=batch, workspaceID=workspaceID)
|
1118
|
+
time.sleep(3)
|
1119
|
+
except Exception as e:
|
1120
|
+
print(f"Error posting batch: {e}")
|
1121
|
+
raise
|
1069
1122
|
|
1070
|
-
|
1071
|
-
|
1123
|
+
time.sleep(3)
|
1124
|
+
listener_thread.join()
|
1125
|
+
|
1126
|
+
rawResults = self.socket.getReceivedData()
|
1127
|
+
expectedRowIDs = set(rowIdMapping.keys())
|
1128
|
+
receivedRowIDs = {key for item in rawResults for key in item.keys()}
|
1129
|
+
missingRowIDs = list(expectedRowIDs - receivedRowIDs)
|
1130
|
+
|
1131
|
+
if missingRowIDs:
|
1132
|
+
dataFromDb = fetchData(workspaceID, activePlayground, missingRowIDs)
|
1133
|
+
rawResults.extend(dataFromDb)
|
1134
|
+
|
1135
|
+
working_df["context"] = None
|
1136
|
+
for item in rawResults:
|
1137
|
+
for compound_key, value in item.items():
|
1138
|
+
if compound_key in rowIdMapping:
|
1139
|
+
idx = rowIdMapping[compound_key]["index"]
|
1140
|
+
working_df.at[idx, "context"] = value.get("value")
|
1141
|
+
|
1142
|
+
# Output generation
|
1143
|
+
if generateOutput == True:
|
1144
|
+
working_df = self._outputForStream(working_df, modelAliases, prompt_template, apiKey)
|
1145
|
+
|
1146
|
+
# Optional evaluation
|
1147
|
+
outputEvalMapping = None
|
1148
|
+
if toEvaluate:
|
1149
|
+
for evalName in evals:
|
1150
|
+
|
1151
|
+
# Validate API and dependencies
|
1152
|
+
self.validateApiKey(evalName=evalName)
|
1153
|
+
metricDependencies = checkDependency(
|
1154
|
+
evalName, list(working_df.columns), tocheck=True
|
1155
|
+
)
|
1156
|
+
if not metricDependencies["status"]:
|
1157
|
+
raise LlumoAIError.dependencyError(metricDependencies["message"])
|
1158
|
+
|
1159
|
+
working_df, outputEvalMapping = self._evaluateForStream(working_df, evals, modelAliases, prompt_template)
|
1160
|
+
|
1161
|
+
|
1162
|
+
self.socket.disconnect()
|
1163
|
+
|
1164
|
+
# Create experiment if required
|
1165
|
+
if createExperiment:
|
1166
|
+
df = working_df.fillna("Some error occured").astype(object)
|
1167
|
+
if createPlayground(
|
1168
|
+
email, workspaceID, df,
|
1169
|
+
queryColName=queryColName,
|
1170
|
+
dataStreamName=streamId,
|
1171
|
+
promptText=prompt_template,
|
1172
|
+
definationMapping=self.definationMapping,
|
1173
|
+
evalOutputMap=outputEvalMapping
|
1174
|
+
):
|
1175
|
+
print(
|
1176
|
+
"Your data has been saved in the Llumo Experiment. Visit https://app.llumo.ai/evallm to see the results.")
|
1177
|
+
else:
|
1178
|
+
self.latestDataframe = working_df
|
1179
|
+
return working_df
|
1180
|
+
|
1181
|
+
def _outputForStream(self, df, modelAliases, prompt_template, apiKey):
|
1182
|
+
executor = ModelExecutor(apiKey)
|
1183
|
+
|
1184
|
+
for indx, row in df.iterrows():
|
1185
|
+
inputVariables = re.findall(r"{{(.*?)}}", prompt_template)
|
1186
|
+
if not all([k in df.columns for k in inputVariables]):
|
1187
|
+
raise LlumoAIError.InvalidPromptTemplate()
|
1188
|
+
|
1189
|
+
inputDict = {key: row[key] for key in inputVariables}
|
1190
|
+
for i, model in enumerate(modelAliases, 1):
|
1072
1191
|
try:
|
1073
|
-
|
1074
|
-
|
1192
|
+
provider = getProviderFromModel(model)
|
1193
|
+
filled_template = getInputPopulatedPrompt(prompt_template, inputDict)
|
1194
|
+
response = executor.execute(provider, model.value, filled_template, apiKey)
|
1195
|
+
df.at[indx, f"output_{i}"] = response
|
1075
1196
|
except Exception as e:
|
1076
|
-
|
1077
|
-
|
1197
|
+
df.at[indx, f"output_{i}"] = str(e)
|
1198
|
+
return df
|
1078
1199
|
|
1079
|
-
|
1080
|
-
|
1200
|
+
def _evaluateForStream(self, df, evals, modelAliases, prompt_template):
|
1201
|
+
dfWithEvals = df.copy()
|
1081
1202
|
|
1082
|
-
|
1083
|
-
self.AllProcessMapping()
|
1084
|
-
# Calculate a reasonable timeout based on the data size
|
1085
|
-
timeout = max(60, min(600, total_items * 10))
|
1086
|
-
# print(f"All batches posted. Waiting up to {timeout} seconds for results...")
|
1203
|
+
outputColMapping = {}
|
1087
1204
|
|
1088
|
-
|
1089
|
-
|
1090
|
-
|
1091
|
-
max_wait=timeout,
|
1092
|
-
inactivity_timeout=30,
|
1093
|
-
expected_results=None,
|
1094
|
-
)
|
1205
|
+
for i, model in enumerate(modelAliases, 1):
|
1206
|
+
outputColName = f"output_{i}"
|
1207
|
+
try:
|
1095
1208
|
|
1096
|
-
|
1097
|
-
|
1098
|
-
|
1209
|
+
res = self.evaluateMultiple(
|
1210
|
+
dfWithEvals.to_dict("records"),
|
1211
|
+
evals=evals,
|
1212
|
+
prompt_template=prompt_template,
|
1213
|
+
outputColName=outputColName,
|
1214
|
+
_tocheck=False,
|
1215
|
+
)
|
1216
|
+
for evalMetric in evals:
|
1217
|
+
scoreCol = f"{evalMetric}"
|
1218
|
+
reasonCol = f"{evalMetric} Reason"
|
1099
1219
|
|
1100
|
-
|
1101
|
-
|
1102
|
-
|
1220
|
+
if scoreCol in res.columns:
|
1221
|
+
res = res.rename(columns={scoreCol: f"{scoreCol}_{i}"})
|
1222
|
+
if reasonCol in res.columns:
|
1223
|
+
res = res.rename(columns={reasonCol: f"{evalMetric}_{i} Reason"})
|
1103
1224
|
|
1104
|
-
|
1225
|
+
outputColMapping[f"{scoreCol}_{i}"] = outputColName
|
1105
1226
|
|
1106
|
-
|
1107
|
-
|
1108
|
-
|
1109
|
-
|
1110
|
-
|
1227
|
+
newCols = [col for col in res.columns if col not in dfWithEvals.columns]
|
1228
|
+
dfWithEvals = pd.concat([dfWithEvals, res[newCols]], axis=1)
|
1229
|
+
except Exception as e:
|
1230
|
+
print(f"Evaluation failed for model {model.value}: {str(e)}")
|
1231
|
+
return dfWithEvals, outputColMapping
|
1232
|
+
|
1233
|
+
def runDataStream(
|
1234
|
+
self,
|
1235
|
+
data,
|
1236
|
+
streamName: str,
|
1237
|
+
queryColName: str = "query",
|
1238
|
+
createExperiment: bool = False,
|
1239
|
+
):
|
1240
|
+
|
1241
|
+
|
1242
|
+
# Copy the original dataframe
|
1243
|
+
original_df = pd.DataFrame(data)
|
1244
|
+
working_df = original_df.copy()
|
1245
|
+
|
1246
|
+
# Connect to socket
|
1247
|
+
self.socket = LlumoSocketClient(socketUrl)
|
1248
|
+
socketID = self.socket.connect(timeout=150)
|
1249
|
+
waited_secs = 0
|
1250
|
+
while not self.socket._connection_established.is_set():
|
1251
|
+
time.sleep(0.1)
|
1252
|
+
waited_secs += 0.1
|
1253
|
+
if waited_secs >= 20:
|
1254
|
+
raise RuntimeError("Timeout waiting for server 'connection-established' event.")
|
1255
|
+
|
1256
|
+
self.validateApiKey()
|
1257
|
+
|
1258
|
+
# Check user credits
|
1259
|
+
userHits = checkUserHits(
|
1260
|
+
self.workspaceID, self.hasSubscribed, self.trialEndDate,
|
1261
|
+
self.subscriptionEndDate, self.hitsAvailable, len(working_df)
|
1262
|
+
)
|
1263
|
+
if not userHits["success"]:
|
1264
|
+
raise LlumoAIError.InsufficientCredits(userHits["message"])
|
1265
|
+
|
1266
|
+
print("====🚀Sit back while we fetch data from the stream 🚀====")
|
1267
|
+
workspaceID, email = self.workspaceID, self.email
|
1268
|
+
activePlayground = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace("-", "")
|
1269
|
+
streamId = getStreamId(workspaceID, self.apiKey, streamName)
|
1270
|
+
|
1271
|
+
# Prepare batches
|
1272
|
+
rowIdMapping = {}
|
1273
|
+
self.allBatches = []
|
1274
|
+
currentBatch = []
|
1275
|
+
|
1276
|
+
expectedResults = len(working_df)
|
1277
|
+
timeout = max(100, min(150, expectedResults * 10))
|
1278
|
+
|
1279
|
+
listener_thread = threading.Thread(
|
1280
|
+
target=self.socket.listenForResults,
|
1281
|
+
kwargs={
|
1282
|
+
"min_wait": 40,
|
1283
|
+
"max_wait": timeout,
|
1284
|
+
"inactivity_timeout": 10,
|
1285
|
+
"expected_results": expectedResults,
|
1286
|
+
},
|
1287
|
+
daemon=True
|
1288
|
+
)
|
1289
|
+
listener_thread.start()
|
1290
|
+
|
1291
|
+
for index, row in working_df.iterrows():
|
1292
|
+
rowID, columnID = uuid.uuid4().hex, uuid.uuid4().hex
|
1293
|
+
compoundKey = f"{rowID}-{columnID}-{columnID}"
|
1294
|
+
rowIdMapping[compoundKey] = {"index": index}
|
1295
|
+
templateData = {
|
1296
|
+
"processID": getProcessID(),
|
1297
|
+
"socketID": socketID,
|
1298
|
+
"processData": {
|
1299
|
+
"executionDependency": {"query": row[queryColName]},
|
1300
|
+
"dataStreamID": streamId,
|
1301
|
+
},
|
1302
|
+
"workspaceID": workspaceID,
|
1303
|
+
"email": email,
|
1304
|
+
"type": "DATA_STREAM",
|
1305
|
+
"playgroundID": activePlayground,
|
1306
|
+
"processType": "DATA_STREAM",
|
1307
|
+
"rowID": rowID,
|
1308
|
+
"columnID": columnID,
|
1309
|
+
"source": "SDK",
|
1310
|
+
}
|
1311
|
+
currentBatch.append(templateData)
|
1312
|
+
if len(currentBatch) == 10 or index == len(working_df) - 1:
|
1313
|
+
self.allBatches.append(currentBatch)
|
1314
|
+
currentBatch = []
|
1315
|
+
|
1316
|
+
for batch in tqdm(self.allBatches, desc="Processing Batches", unit="batch", colour="magenta", ncols=80):
|
1111
1317
|
try:
|
1112
|
-
self.
|
1113
|
-
|
1318
|
+
self.postDataStream(batch=batch, workspaceID=workspaceID)
|
1319
|
+
time.sleep(3)
|
1114
1320
|
except Exception as e:
|
1115
|
-
print(f"Error
|
1321
|
+
print(f"Error posting batch: {e}")
|
1322
|
+
raise
|
1116
1323
|
|
1117
|
-
|
1118
|
-
|
1119
|
-
for item in records:
|
1120
|
-
for compound_key, value in item.items():
|
1121
|
-
# for compound_key, value in item['data'].items():
|
1324
|
+
time.sleep(3)
|
1325
|
+
listener_thread.join()
|
1122
1326
|
|
1123
|
-
|
1124
|
-
|
1125
|
-
|
1126
|
-
|
1127
|
-
# dataframe.at[index, evalName] = value
|
1128
|
-
dataframe.at[index, streamName] = value["value"]
|
1327
|
+
rawResults = self.socket.getReceivedData()
|
1328
|
+
expectedRowIDs = set(rowIdMapping.keys())
|
1329
|
+
receivedRowIDs = {key for item in rawResults for key in item.keys()}
|
1330
|
+
missingRowIDs = list(expectedRowIDs - receivedRowIDs)
|
1129
1331
|
|
1130
|
-
|
1131
|
-
|
1132
|
-
|
1332
|
+
if missingRowIDs:
|
1333
|
+
dataFromDb = fetchData(workspaceID, activePlayground, missingRowIDs)
|
1334
|
+
rawResults.extend(dataFromDb)
|
1133
1335
|
|
1134
|
-
|
1135
|
-
|
1136
|
-
|
1336
|
+
working_df["context"] = None
|
1337
|
+
for item in rawResults:
|
1338
|
+
for compound_key, value in item.items():
|
1339
|
+
if compound_key in rowIdMapping:
|
1340
|
+
idx = rowIdMapping[compound_key]["index"]
|
1341
|
+
working_df.at[idx, "context"] = value.get("value")
|
1137
1342
|
|
1343
|
+
|
1344
|
+
|
1345
|
+
self.socket.disconnect()
|
1346
|
+
|
1347
|
+
# Create experiment if required
|
1348
|
+
if createExperiment:
|
1349
|
+
df = working_df.fillna("Some error occured").astype(object)
|
1138
1350
|
if createPlayground(
|
1139
|
-
|
1140
|
-
|
1141
|
-
|
1142
|
-
|
1143
|
-
dataStreamName=streamId,
|
1351
|
+
email, workspaceID, df,
|
1352
|
+
queryColName=queryColName,
|
1353
|
+
dataStreamName=streamId,
|
1354
|
+
definationMapping=self.definationMapping,
|
1144
1355
|
):
|
1145
1356
|
print(
|
1146
|
-
"Your data has been saved in the Llumo Experiment. Visit https://app.llumo.ai/evallm to see the results."
|
1147
|
-
)
|
1357
|
+
"Your data has been saved in the Llumo Experiment. Visit https://app.llumo.ai/evallm to see the results.")
|
1148
1358
|
else:
|
1149
|
-
self.latestDataframe =
|
1150
|
-
return
|
1359
|
+
self.latestDataframe = working_df
|
1360
|
+
return working_df
|
1361
|
+
|
1151
1362
|
|
1152
1363
|
def createExperiment(self, dataframe):
|
1153
1364
|
try:
|
@@ -1165,18 +1376,8 @@ class LlumoClient:
|
|
1165
1376
|
|
1166
1377
|
workspaceID = None
|
1167
1378
|
email = None
|
1168
|
-
socketID = self.socket.connect(timeout=150)
|
1169
|
-
self.allBatches = []
|
1170
|
-
|
1171
|
-
# Wait for socket connection
|
1172
|
-
max_wait_secs = 20
|
1173
|
-
waited_secs = 0
|
1174
|
-
while not self.socket._connection_established.is_set():
|
1175
|
-
time.sleep(0.1)
|
1176
|
-
waited_secs += 0.1
|
1177
|
-
if waited_secs >= max_wait_secs:
|
1178
|
-
raise RuntimeError("Timeout waiting for server connection")
|
1179
1379
|
|
1380
|
+
|
1180
1381
|
try:
|
1181
1382
|
self.validateApiKey()
|
1182
1383
|
except Exception as e:
|
@@ -1195,14 +1396,21 @@ class LlumoClient:
|
|
1195
1396
|
elif ext in [".xlsx", ".xls"]:
|
1196
1397
|
df = pd.read_excel(file_path)
|
1197
1398
|
elif ext == ".json":
|
1198
|
-
df = pd.read_json(file_path)
|
1399
|
+
df = pd.read_json(file_path, orient="records")
|
1199
1400
|
elif ext == ".parquet":
|
1200
1401
|
df = pd.read_parquet(file_path)
|
1201
1402
|
else:
|
1202
1403
|
raise ValueError(f"Unsupported file format: {ext}")
|
1203
1404
|
|
1204
1405
|
# If successfully loaded, call createPlayground
|
1205
|
-
|
1406
|
+
df = df.astype(str)
|
1407
|
+
if createPlayground(self.email, self.workspaceID, df):
|
1408
|
+
|
1409
|
+
print(
|
1410
|
+
"Your data has been saved in the Llumo Experiment. Visit https://app.llumo.ai/evallm to see the results."
|
1411
|
+
)
|
1412
|
+
|
1413
|
+
return True
|
1206
1414
|
|
1207
1415
|
except Exception as e:
|
1208
1416
|
print(f"Error: {e}")
|
llumo/exceptions.py
CHANGED
@@ -50,6 +50,10 @@ class LlumoAIError(Exception):
|
|
50
50
|
def dependencyError(details):
|
51
51
|
return LlumoAIError(details)
|
52
52
|
|
53
|
+
@staticmethod
|
54
|
+
def providerError(details):
|
55
|
+
return LlumoAIError(details)
|
56
|
+
|
53
57
|
# @staticmethod
|
54
58
|
# def dateNotFound():
|
55
59
|
# return LlumoAIError("Trial end date or subscription end date not found for the given user.")
|
llumo/helpingFuntions.py
CHANGED
@@ -9,6 +9,8 @@ import base64
|
|
9
9
|
import os
|
10
10
|
import re
|
11
11
|
|
12
|
+
|
13
|
+
from .models import _MODEL_METADATA, AVAILABLEMODELS
|
12
14
|
subscriptionUrl = "https://app.llumo.ai/api/workspace/record-extra-usage"
|
13
15
|
getStreamdataUrl = "https://app.llumo.ai/api/data-stream/all"
|
14
16
|
createPlayUrl = "https://app.llumo.ai/api/New-Eval-API/create-new-eval-playground"
|
@@ -212,8 +214,8 @@ def deleteColumnListInPlayground(workspaceID: str, playgroundID: str):
|
|
212
214
|
print("❌ Error:", response.status_code, response.text)
|
213
215
|
return None
|
214
216
|
|
215
|
-
|
216
|
-
|
217
|
+
def createColumn(workspaceID, dataframe, playgroundID, promptText=None,queryColName=None,
|
218
|
+
outputColName= "output",dataStreamName=None,definationMapping=None,evalOutputMap = None):
|
217
219
|
if len(dataframe) > 100:
|
218
220
|
dataframe = dataframe.head(100)
|
219
221
|
print("⚠️ Dataframe truncated to 100 rows for upload.")
|
@@ -233,12 +235,12 @@ def createColumn(workspaceID, dataframe, playgroundID, promptText=None,queryColN
|
|
233
235
|
# Iterate over each column in the dataframe
|
234
236
|
for indx, col in enumerate(dataframe.columns):
|
235
237
|
# Generate a unique column ID using uuid
|
236
|
-
columnID = str(uuid.uuid4().hex[:8])
|
238
|
+
columnID = str(uuid.uuid4().hex[:8])
|
237
239
|
|
238
240
|
columnIDMapping[col] = columnID
|
239
241
|
|
240
|
-
|
241
|
-
if col.startswith('output') and
|
242
|
+
|
243
|
+
if col.startswith('output') and promptText!=None:
|
242
244
|
# For output columns, create the prompt template with promptText
|
243
245
|
if promptText:
|
244
246
|
# Extract variables from promptText and set them as dependencies
|
@@ -249,12 +251,12 @@ def createColumn(workspaceID, dataframe, playgroundID, promptText=None,queryColN
|
|
249
251
|
|
250
252
|
# Loop through each variable and check if it exists as a column name
|
251
253
|
for var in variables:
|
252
|
-
varName = var.strip()
|
254
|
+
varName = var.strip()
|
253
255
|
if varName in columnIDMapping: # Check if the variable is a column name
|
254
256
|
dependencies.append(columnIDMapping[varName]) # Add its columnID
|
255
257
|
|
256
258
|
# Now update the template for the output column
|
257
|
-
|
259
|
+
|
258
260
|
template={
|
259
261
|
"provider": "OPENAI",
|
260
262
|
"model": "GPT_4o",
|
@@ -276,8 +278,8 @@ def createColumn(workspaceID, dataframe, playgroundID, promptText=None,queryColN
|
|
276
278
|
"type": "PROMPT",
|
277
279
|
"order": indx,
|
278
280
|
}
|
279
|
-
|
280
|
-
elif col.startswith('
|
281
|
+
|
282
|
+
elif col.startswith('context') and dataStreamName != None :
|
281
283
|
if queryColName and dataStreamName:
|
282
284
|
dependencies = []
|
283
285
|
dependencies.append(columnIDMapping[queryColName])
|
@@ -287,22 +289,27 @@ def createColumn(workspaceID, dataframe, playgroundID, promptText=None,queryColN
|
|
287
289
|
"dataStreamName": dataStreamName,
|
288
290
|
"query": columnIDMapping[queryColName],
|
289
291
|
"columnID": columnID, # Use the generated column ID
|
290
|
-
"label": "
|
292
|
+
"label": "context",
|
291
293
|
"type": "DATA_STREAM",
|
292
294
|
"order": indx}
|
293
295
|
|
294
|
-
elif col in allEvals and uploadViaSDK == False:
|
295
296
|
|
297
|
+
elif any(col.startswith(eval + "_") or col == eval for eval in allEvals) and not " Reason" in col and promptText is not None:
|
298
|
+
if evalOutputMap != None:
|
299
|
+
outputColName = evalOutputMap[col]
|
300
|
+
else:
|
301
|
+
outputColName = outputColName
|
296
302
|
dependencies = []
|
297
303
|
variables = re.findall(r'{{(.*?)}}', promptText)
|
298
304
|
|
299
305
|
# Loop through each variable and check if it exists as a column name
|
300
306
|
for var in variables:
|
301
|
-
varName = var.strip()
|
307
|
+
varName = var.strip()
|
302
308
|
if varName in columnIDMapping: # Check if the variable is a column name
|
303
309
|
dependencies.append(columnIDMapping[varName])
|
304
|
-
|
310
|
+
|
305
311
|
dependencies.append(columnIDMapping[outputColName]) # Add the output column ID
|
312
|
+
|
306
313
|
longDef = definationMapping.get(col, {}).get('definition', "")
|
307
314
|
shortDef =definationMapping.get(col, {}).get('briefDefinition', "")
|
308
315
|
enum = col.upper().replace(" ","_")
|
@@ -341,12 +348,12 @@ def createColumn(workspaceID, dataframe, playgroundID, promptText=None,queryColN
|
|
341
348
|
"order": indx
|
342
349
|
}
|
343
350
|
|
344
|
-
elif col.endswith(' Reason'):
|
345
|
-
continue
|
351
|
+
elif col.endswith(' Reason') and promptText!=None:
|
352
|
+
continue
|
353
|
+
|
346
354
|
|
347
|
-
|
348
355
|
else:
|
349
|
-
|
356
|
+
|
350
357
|
template = {
|
351
358
|
"label": col, # Label is the column name
|
352
359
|
"type": "VARIABLE", # Default type for non-output columns
|
@@ -371,25 +378,27 @@ def createColumn(workspaceID, dataframe, playgroundID, promptText=None,queryColN
|
|
371
378
|
row_dict = {}
|
372
379
|
|
373
380
|
# For each column, we need to map the column ID to the corresponding value in the row
|
381
|
+
print(dataframe.columns)
|
374
382
|
for col in dataframe.columns:
|
375
383
|
columnID = columnIDMapping[col]
|
376
|
-
|
377
|
-
if col in allEvals:
|
384
|
+
|
385
|
+
if any(col.startswith(eval + "_") or col == eval for eval in allEvals) and not " Reason" in col and promptText!=None:
|
386
|
+
print(col)
|
378
387
|
row_dict[columnID] = {
|
379
|
-
|
388
|
+
|
380
389
|
"value": row[col],
|
381
390
|
"type": "EVAL",
|
382
391
|
"isValid": True,
|
383
392
|
"reasoning": row[col+" Reason"],
|
384
393
|
"edgeCase": "minorHallucinationDetailNotInContext",
|
385
394
|
"kpi": col
|
386
|
-
|
387
|
-
|
388
|
-
elif col.endswith(' Reason'):
|
395
|
+
|
396
|
+
}
|
397
|
+
elif col.endswith(' Reason') and promptText!=None:
|
389
398
|
continue
|
390
399
|
else:# Get the columnID from the mapping
|
391
400
|
row_dict[columnID] = row[col]
|
392
|
-
|
401
|
+
|
393
402
|
# row_dict[columnID] = row[col] # Directly map the column ID to the row value
|
394
403
|
# Add the row index (if necessary)
|
395
404
|
row_dict["pIndex"] = indx
|
@@ -397,6 +406,7 @@ def createColumn(workspaceID, dataframe, playgroundID, promptText=None,queryColN
|
|
397
406
|
|
398
407
|
# Return the column template, row template, and the column ID mapping
|
399
408
|
return coltemplate, rowTemplate
|
409
|
+
|
400
410
|
def uploadColumnListInPlayground(payload):
|
401
411
|
url = uploadColList
|
402
412
|
headers = {
|
@@ -440,15 +450,14 @@ def uploadRowsInDBPlayground(payload):
|
|
440
450
|
return None
|
441
451
|
|
442
452
|
|
443
|
-
def createPlayground(email, workspaceID, df, promptText=None,queryColName=None,dataStreamName=None,definationMapping=None,outputColName="output",
|
453
|
+
def createPlayground(email, workspaceID, df, promptText=None,queryColName=None,dataStreamName=None,definationMapping=None,outputColName="output",evalOutputMap = None):
|
444
454
|
|
445
455
|
playgroundId = str(createEvalPlayground(email=email, workspaceID=workspaceID))
|
446
456
|
payload1, payload2 = createColumn(
|
447
|
-
workspaceID=workspaceID, dataframe=df, playgroundID=playgroundId, promptText=promptText,queryColName=queryColName,dataStreamName=dataStreamName,definationMapping=definationMapping,outputColName=outputColName,
|
457
|
+
workspaceID=workspaceID, dataframe=df, playgroundID=playgroundId, promptText=promptText,queryColName=queryColName,dataStreamName=dataStreamName,definationMapping=definationMapping,outputColName=outputColName,evalOutputMap=evalOutputMap
|
448
458
|
)
|
449
459
|
|
450
|
-
|
451
|
-
|
460
|
+
# Debugging line to check the payload2 structure
|
452
461
|
deleteExistingRows = deleteColumnListInPlayground(
|
453
462
|
workspaceID=workspaceID, playgroundID=playgroundId
|
454
463
|
)
|
@@ -460,6 +469,7 @@ def createPlayground(email, workspaceID, df, promptText=None,queryColName=None,d
|
|
460
469
|
|
461
470
|
|
462
471
|
|
472
|
+
|
463
473
|
def getPlaygroundInsights(workspaceID: str, activePlayground: str):
|
464
474
|
headers = {
|
465
475
|
|
@@ -563,4 +573,62 @@ def checkDependency(selectedEval, columns,tocheck=True):
|
|
563
573
|
}
|
564
574
|
return {"status":True,"message":"success"}
|
565
575
|
else:
|
566
|
-
return {"status":True,"message":"success"}
|
576
|
+
return {"status":True,"message":"success"}
|
577
|
+
|
578
|
+
|
579
|
+
def fetchData(workspaceID, playgroundID, missingList: list):
|
580
|
+
# Define the URL and prepare the payload
|
581
|
+
socket_data_url = "https://app.llumo.ai/api/eval/get-awaited"
|
582
|
+
payload = {
|
583
|
+
"workspaceID": workspaceID,
|
584
|
+
"playgroundID": playgroundID,
|
585
|
+
"missingList": missingList
|
586
|
+
}
|
587
|
+
|
588
|
+
try:
|
589
|
+
# Send a POST request to the API
|
590
|
+
response = requests.post(socket_data_url, json=payload)
|
591
|
+
|
592
|
+
# Check if the response is successful
|
593
|
+
if response.status_code == 200:
|
594
|
+
# Parse the JSON data from the response
|
595
|
+
data = response.json().get("data", {})
|
596
|
+
|
597
|
+
|
598
|
+
# Prepare the list of all data values in the desired format
|
599
|
+
result_list = []
|
600
|
+
for key, value in data.items():
|
601
|
+
# Create a dictionary for each item in the response data
|
602
|
+
result_list.append({
|
603
|
+
key: {
|
604
|
+
"value": value.get("value"),
|
605
|
+
"reasoning": value.get("reasoning"),
|
606
|
+
"edgeCase": value.get("edgeCase"),
|
607
|
+
"kpi": value.get("kpi")
|
608
|
+
}
|
609
|
+
})
|
610
|
+
|
611
|
+
return result_list
|
612
|
+
else:
|
613
|
+
print(f"Failed to fetch data. Status Code: {response.status_code}")
|
614
|
+
return []
|
615
|
+
|
616
|
+
except Exception as e:
|
617
|
+
print(f"An error occurred: {e}")
|
618
|
+
return []
|
619
|
+
|
620
|
+
def validateModels(model_aliases):
|
621
|
+
|
622
|
+
selectedProviders = []
|
623
|
+
for name in model_aliases:
|
624
|
+
for alias ,(provider , modelName ) in _MODEL_METADATA.items():
|
625
|
+
if modelName == name:
|
626
|
+
selectedProviders.append(provider)
|
627
|
+
|
628
|
+
if len(set(selectedProviders)) > 1:
|
629
|
+
return {"status": False,"message":"All selected models should be of same provider."}
|
630
|
+
else:
|
631
|
+
return {"status": True,"message":"All selected models are of same provider."}
|
632
|
+
|
633
|
+
|
634
|
+
|
llumo/models.py
CHANGED
@@ -6,35 +6,72 @@ class Provider(str, Enum):
|
|
6
6
|
|
7
7
|
# Maps model aliases → (provider, actual model name for API)
|
8
8
|
_MODEL_METADATA = {
|
9
|
-
"
|
10
|
-
"
|
11
|
-
"
|
12
|
-
"
|
13
|
-
"
|
14
|
-
"
|
15
|
-
|
16
|
-
"
|
17
|
-
"
|
18
|
-
"
|
19
|
-
"
|
20
|
-
"
|
21
|
-
"
|
9
|
+
"GPT_4O": (Provider.OPENAI, "GPT_4O"),
|
10
|
+
"GPT_4_5": (Provider.OPENAI, "GPT_4_5"),
|
11
|
+
"GPT_4": (Provider.OPENAI, "GPT_4"),
|
12
|
+
"GPT_4_32K": (Provider.OPENAI, "GPT_4_32K"),
|
13
|
+
"GPT_3_5_Turbo": (Provider.OPENAI, "GPT_35T"),
|
14
|
+
"GPT_3_5_Turbo_Instruct": (Provider.OPENAI, "GPT_35T_INS"),
|
15
|
+
"GPT_3_5_Turbo_16K": (Provider.OPENAI, "GPT_35T_16K"),
|
16
|
+
"GPT_4_o_Mini": (Provider.OPENAI, "GPT_4O_MINI"),
|
17
|
+
"o4_MINI": (Provider.OPENAI, "O4_MINI"),
|
18
|
+
"o4_MINI_HIGH": (Provider.OPENAI, "O4_MINI_HIGH"),
|
19
|
+
"GPT_4_1": (Provider.OPENAI, "GPT_4_1"),
|
20
|
+
"GPT_4_1_Mini": (Provider.OPENAI, "GPT_4_1_MINI"),
|
21
|
+
"GPT_4_1_nano": (Provider.OPENAI, "GPT_4_1_NANO"),
|
22
|
+
"o3": (Provider.OPENAI, "O3"),
|
23
|
+
"o3_MINI": (Provider.OPENAI, "O3_MINI"),
|
24
|
+
"o1": (Provider.OPENAI, "O1"),
|
25
|
+
"o1_MINI": (Provider.OPENAI, "O1_MINI"),
|
26
|
+
|
27
|
+
|
28
|
+
"Gemini_2_5_Pro": (Provider.GOOGLE, "GEMINI_2_5_PRO"),
|
29
|
+
"Gemini_2_5_Flash": (Provider.GOOGLE, "GEMINI_2_5_FLASH"),
|
30
|
+
"Gemini_2_0": (Provider.GOOGLE, "GEMINI_2_0"),
|
31
|
+
"Gemini_2_0_Flash": (Provider.GOOGLE, "GEMINI_2_0_FLASH"),
|
32
|
+
"Gemini_Pro": (Provider.GOOGLE, "GEMINI_PRO"),
|
33
|
+
"Text_Bison": (Provider.GOOGLE, "TEXT_BISON"),
|
34
|
+
"Chat_Bison": (Provider.GOOGLE, "CHAT_BISON"),
|
35
|
+
"Text_Bison_32k": (Provider.GOOGLE, "TEXT_BISON_32K"),
|
36
|
+
"Text_Unicorn": (Provider.GOOGLE, "TEXT_UNICORN"),
|
37
|
+
"Google_1_5_Flash": (Provider.GOOGLE, "GOOGLE_15_FLASH"),
|
38
|
+
"Gemma_3_9B": (Provider.GOOGLE, "GEMMA_3_9B"),
|
39
|
+
"Gemma_3_27B": (Provider.GOOGLE, "GEMMA_3_27B"),
|
22
40
|
}
|
23
41
|
|
24
42
|
class AVAILABLEMODELS(str, Enum):
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
43
|
+
GPT_4o= "GPT_4O",
|
44
|
+
GPT_4o_Mini= "GPT_4O_MINI",
|
45
|
+
GPT_4_5= "GPT_4_5",
|
46
|
+
GPT_4= "GPT_4",
|
47
|
+
GPT_4_32K= "GPT_4_32K",
|
48
|
+
GPT_3_5_Turbo= "GPT_35T",
|
49
|
+
GPT_3_5_Turbo_Instruct= "GPT_35T_INS",
|
50
|
+
GPT_3_5_Turbo_16K= "GPT_35T_16K",
|
51
|
+
GPT_4_o_Mini= "GPT_4O_MINI",
|
52
|
+
o4_MINI = "O4_MINI",
|
53
|
+
o4_MINI_HIGH = "O4_MINI_HIGH",
|
54
|
+
GPT_4_1 = "GPT_4_1",
|
55
|
+
GPT_4_1_Mini = "GPT_4_1_MINI",
|
56
|
+
GPT_4_1_nano = "GPT_4_1_NANO",
|
57
|
+
o3 = "O3",
|
58
|
+
o3_MINI = "O3_MINI",
|
59
|
+
o1 = "O1",
|
60
|
+
o1_MINI = "O1_MINI",
|
61
|
+
|
62
|
+
Gemini_2_5_Pro = "GEMINI_2_5_PRO",
|
63
|
+
Gemini_2_5_Flash = "GEMINI_2_5_FLASH",
|
64
|
+
Gemini_2_0 = "GEMINI_2_0",
|
65
|
+
Gemini_2_0_Flash = "GEMINI_2_0_FLASH",
|
66
|
+
Gemini_Pro = "GEMINI_PRO",
|
67
|
+
Text_Bison = "TEXT_BISON",
|
68
|
+
Chat_Bison = "CHAT_BISON",
|
69
|
+
Text_Bison_32k = "TEXT_BISON_32K",
|
70
|
+
Text_Unicorn = "TEXT_UNICORN",
|
71
|
+
Google_1_5_Flash = "GOOGLE_15_FLASH",
|
72
|
+
Gemma_3_9B = "GEMMA_3_9B",
|
73
|
+
Gemma_3_27B = "GEMMA_3_27B",
|
74
|
+
|
38
75
|
|
39
76
|
def getProviderFromModel(model: AVAILABLEMODELS) -> Provider:
|
40
77
|
for alias, (provider, apiName) in _MODEL_METADATA.items():
|
llumo/sockets.py
CHANGED
@@ -17,10 +17,10 @@ class LlumoSocketClient:
|
|
17
17
|
|
18
18
|
# Initialize client
|
19
19
|
self.sio = socketio.Client(
|
20
|
-
logger=
|
21
|
-
engineio_logger=
|
20
|
+
logger=False,
|
21
|
+
engineio_logger=False,
|
22
22
|
reconnection=True,
|
23
|
-
reconnection_attempts=
|
23
|
+
reconnection_attempts=1,
|
24
24
|
reconnection_delay=1,
|
25
25
|
)
|
26
26
|
|
@@ -0,0 +1,13 @@
|
|
1
|
+
llumo/__init__.py,sha256=O04b4yW1BnOvcHzxWFddAKhtdBEhBNhLdb6xgnpHH_Q,205
|
2
|
+
llumo/client.py,sha256=XljwD5mZxjyrXHhu8YhN0cGsd-O_LyKbPzrhS8zbqZo,53778
|
3
|
+
llumo/exceptions.py,sha256=Vp_MnanHbnd1Yjuoi6WLrKiwwZbJL3znCox2URMmGU4,2032
|
4
|
+
llumo/execution.py,sha256=x88wQV8eL99wNN5YtjFaAMCIfN1PdfQVlAZQb4vzgQ0,1413
|
5
|
+
llumo/functionCalling.py,sha256=D5jYapu1rIvdIJNUYPYMTyhQ1H-6nkwoOLMi6eekfUE,7241
|
6
|
+
llumo/helpingFuntions.py,sha256=0-ZwG0fnbfP4DP1JTMewM8LdXzz_-p1gRqhPsX0Zmpk,22785
|
7
|
+
llumo/models.py,sha256=aVEZsOOoQx5LeNtwSyBxqvrINq0izH3QWu_YjsMPE6o,2910
|
8
|
+
llumo/sockets.py,sha256=I2JO_eNEctRo_ikgvFVp5zDd-m0VDu04IEUhhsa1Tic,5950
|
9
|
+
llumo-0.2.15b1.dist-info/licenses/LICENSE,sha256=tF9yAcfPV9xGT3ViWmC8hPvOo8BEk4ZICbUfcEo8Dlk,182
|
10
|
+
llumo-0.2.15b1.dist-info/METADATA,sha256=yDLkiD46Qq44PA3ylKK2dzsXZmnuE23yxH0RmoqizOk,1521
|
11
|
+
llumo-0.2.15b1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
12
|
+
llumo-0.2.15b1.dist-info/top_level.txt,sha256=d5zUTMI99llPtLRB8rtSrqELm_bOqX-bNC5IcwlDk88,6
|
13
|
+
llumo-0.2.15b1.dist-info/RECORD,,
|
llumo-0.2.14b6.dist-info/RECORD
DELETED
@@ -1,13 +0,0 @@
|
|
1
|
-
llumo/__init__.py,sha256=O04b4yW1BnOvcHzxWFddAKhtdBEhBNhLdb6xgnpHH_Q,205
|
2
|
-
llumo/client.py,sha256=YmvbfyWR9YCDOFrKM0nwDMWYLGpu4RSZwbkiUJ3e78M,46162
|
3
|
-
llumo/exceptions.py,sha256=i3Qv4_g7XjRuho7-b7ybjw2bwSh_NhvICR6ZAgiLQX8,1944
|
4
|
-
llumo/execution.py,sha256=x88wQV8eL99wNN5YtjFaAMCIfN1PdfQVlAZQb4vzgQ0,1413
|
5
|
-
llumo/functionCalling.py,sha256=D5jYapu1rIvdIJNUYPYMTyhQ1H-6nkwoOLMi6eekfUE,7241
|
6
|
-
llumo/helpingFuntions.py,sha256=f2Y-x-DbGk3E29qaJWDOsTkuqqDFl9-VQTRM490amE4,20443
|
7
|
-
llumo/models.py,sha256=YH-qAMnShmUpmKE2LQAzQdpRsaXkFSlOqMxHwU4zBUI,1560
|
8
|
-
llumo/sockets.py,sha256=-zJYRCDRwElIPr5iOFqzQxjecuLJ7mztiyYJz14pGLY,5949
|
9
|
-
llumo-0.2.14b6.dist-info/licenses/LICENSE,sha256=tF9yAcfPV9xGT3ViWmC8hPvOo8BEk4ZICbUfcEo8Dlk,182
|
10
|
-
llumo-0.2.14b6.dist-info/METADATA,sha256=2Yl4gnAXsfpJWLB6mhlza0HUE76uJY3sC1TWK7GlUu4,1521
|
11
|
-
llumo-0.2.14b6.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
12
|
-
llumo-0.2.14b6.dist-info/top_level.txt,sha256=d5zUTMI99llPtLRB8rtSrqELm_bOqX-bNC5IcwlDk88,6
|
13
|
-
llumo-0.2.14b6.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|