llumo 0.2.25__py3-none-any.whl → 0.2.26__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
llumo/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
- from .client import LlumoClient
1
+
2
2
  from .exceptions import LlumoAIError
3
3
  from .helpingFuntions import *
4
4
  from .models import AVAILABLEMODELS
llumo/client.py CHANGED
@@ -16,11 +16,11 @@ from .exceptions import LlumoAIError
16
16
  from .helpingFuntions import *
17
17
  from .sockets import LlumoSocketClient
18
18
  from .functionCalling import LlumoAgentExecutor
19
- from .chains import LlumoDataFrameResults,LlumoDictResults
19
+ from .chains import LlumoDataFrameResults, LlumoDictResults
20
20
  import threading
21
21
  from tqdm import tqdm
22
22
 
23
- pd.set_option('future.no_silent_downcasting', True)
23
+ pd.set_option("future.no_silent_downcasting", True)
24
24
 
25
25
  postUrl = (
26
26
  "https://red-skull-service-392377961931.us-central1.run.app/api/process-playground"
@@ -39,10 +39,11 @@ socketUrl = "https://red-skull-service-392377961931.us-central1.run.app/"
39
39
 
40
40
  class LlumoClient:
41
41
 
42
- def __init__(self, api_key):
42
+ def __init__(self, api_key, playground_id=None):
43
43
  self.apiKey = api_key
44
- self.evalData=[]
45
- self.evals=[]
44
+ self.playgroundID = playground_id
45
+ self.evalData = []
46
+ self.evals = []
46
47
  self.processMapping = {}
47
48
  self.definationMapping = {}
48
49
 
@@ -54,7 +55,7 @@ class LlumoClient:
54
55
  reqBody = {"analytics": [evalName]}
55
56
 
56
57
  try:
57
-
58
+
58
59
  response = requests.post(url=validateUrl, json=reqBody, headers=headers)
59
60
 
60
61
  except requests.exceptions.RequestException as e:
@@ -99,20 +100,90 @@ class LlumoClient:
99
100
  )
100
101
  self.email = data["data"]["data"].get("email", None)
101
102
 
102
- self.definationMapping[evalName] = data.get("data", {}).get("data", {}).get("analyticsMapping", {}).get(evalName, "")
103
- self.categories = data.get("data", {}).get("data", {}).get("analyticsMapping", {}).get(evalName, "").get("categories", {})
104
- self.evaluationStrictness=data.get("data", {}).get("data", {}).get("analyticsMapping", {}).get(evalName, "").get("evaluationStrictness", {})
105
- self.grammarCheckOutput=data.get("data", {}).get("data", {}).get("analyticsMapping", {}).get(evalName, "").get("grammarCheckOutput", {})
106
- self.insightsLength=data.get("data", {}).get("data", {}).get("analyticsMapping", {}).get(evalName, "").get("insightsLength", {})
107
- self.insightsLevel=data.get("data", {}).get("data", {}).get("analyticsMapping", {}).get(evalName, "").get("insightsLevel", {})
108
- self.executionDependency=data.get("data", {}).get("data", {}).get("analyticsMapping", {}).get(evalName, "").get("executionDependency", {})
109
- self.sampleData=data.get("data", {}).get("data", {}).get("analyticsMapping", {}).get(evalName, "").get("sampleData", {})
110
- self.numJudges=data.get("data", {}).get("data", {}).get("analyticsMapping", {}).get(evalName, "").get("numJudges", {})
111
- self.penaltyBonusInstructions=data.get("data", {}).get("data", {}).get("analyticsMapping", {}).get(evalName, "").get("penaltyBonusInstructions", [])
112
- self.probableEdgeCases= data.get("data", {}).get("data", {}).get("analyticsMapping", {}).get(evalName, "").get("probableEdgeCases", [])
113
- self.fieldMapping= data.get("data", {}).get("data", {}).get("analyticsMapping", {}).get(evalName, "").get("fieldMapping", [])
114
-
115
-
103
+ self.definationMapping[evalName] = (
104
+ data.get("data", {})
105
+ .get("data", {})
106
+ .get("analyticsMapping", {})
107
+ .get(evalName, "")
108
+ )
109
+ self.categories = (
110
+ data.get("data", {})
111
+ .get("data", {})
112
+ .get("analyticsMapping", {})
113
+ .get(evalName, "")
114
+ .get("categories", {})
115
+ )
116
+ self.evaluationStrictness = (
117
+ data.get("data", {})
118
+ .get("data", {})
119
+ .get("analyticsMapping", {})
120
+ .get(evalName, "")
121
+ .get("evaluationStrictness", {})
122
+ )
123
+ self.grammarCheckOutput = (
124
+ data.get("data", {})
125
+ .get("data", {})
126
+ .get("analyticsMapping", {})
127
+ .get(evalName, "")
128
+ .get("grammarCheckOutput", {})
129
+ )
130
+ self.insightsLength = (
131
+ data.get("data", {})
132
+ .get("data", {})
133
+ .get("analyticsMapping", {})
134
+ .get(evalName, "")
135
+ .get("insightsLength", {})
136
+ )
137
+ self.insightsLevel = (
138
+ data.get("data", {})
139
+ .get("data", {})
140
+ .get("analyticsMapping", {})
141
+ .get(evalName, "")
142
+ .get("insightsLevel", {})
143
+ )
144
+ self.executionDependency = (
145
+ data.get("data", {})
146
+ .get("data", {})
147
+ .get("analyticsMapping", {})
148
+ .get(evalName, "")
149
+ .get("executionDependency", {})
150
+ )
151
+ self.sampleData = (
152
+ data.get("data", {})
153
+ .get("data", {})
154
+ .get("analyticsMapping", {})
155
+ .get(evalName, "")
156
+ .get("sampleData", {})
157
+ )
158
+ self.numJudges = (
159
+ data.get("data", {})
160
+ .get("data", {})
161
+ .get("analyticsMapping", {})
162
+ .get(evalName, "")
163
+ .get("numJudges", {})
164
+ )
165
+ self.penaltyBonusInstructions = (
166
+ data.get("data", {})
167
+ .get("data", {})
168
+ .get("analyticsMapping", {})
169
+ .get(evalName, "")
170
+ .get("penaltyBonusInstructions", [])
171
+ )
172
+ self.probableEdgeCases = (
173
+ data.get("data", {})
174
+ .get("data", {})
175
+ .get("analyticsMapping", {})
176
+ .get(evalName, "")
177
+ .get("probableEdgeCases", [])
178
+ )
179
+ self.fieldMapping = (
180
+ data.get("data", {})
181
+ .get("data", {})
182
+ .get("analyticsMapping", {})
183
+ .get(evalName, "")
184
+ .get("fieldMapping", [])
185
+ )
186
+
116
187
  except Exception as e:
117
188
  # print(f"Error extracting data from response: {str(e)}")
118
189
  raise LlumoAIError.UnexpectedError(detail=evalName)
@@ -548,7 +619,7 @@ class LlumoClient:
548
619
  "playgroundID": activePlayground,
549
620
  }
550
621
 
551
- rowIdMapping[f'{rowID}-{columnID}-{columnID}'] = index
622
+ rowIdMapping[f"{rowID}-{columnID}-{columnID}"] = index
552
623
  # print("__________________________TEMPLATE__________________________________")
553
624
  # print(templateData)
554
625
 
@@ -628,13 +699,11 @@ class LlumoClient:
628
699
  def evaluateMultiple(
629
700
  self,
630
701
  data,
631
- evals: list, # list of eval metric names
632
- session, # Add session parameter
702
+ evals: list,
633
703
  prompt_template="Give answer to the given query: {{query}} using the given context: {{context}}.",
634
704
  outputColName="output",
635
705
  createExperiment: bool = False,
636
- getDataFrame:bool =False,
637
- playgroundID: str = None,
706
+ getDataFrame: bool = False,
638
707
  _tocheck=True,
639
708
  ):
640
709
  if isinstance(data, dict):
@@ -649,10 +718,10 @@ class LlumoClient:
649
718
  try:
650
719
  socketID = self.socket.connect(timeout=250)
651
720
  except Exception as e:
652
- socketID="DummySocketID"
653
-
654
- self.evalData=[]
655
- self.evals=evals
721
+ socketID = "DummySocketID"
722
+
723
+ self.evalData = []
724
+ self.evals = evals
656
725
  self.allBatches = []
657
726
  rowIdMapping = {} # (rowID-columnID-columnID -> (index, evalName))
658
727
 
@@ -681,21 +750,18 @@ class LlumoClient:
681
750
  )
682
751
  listener_thread.start()
683
752
  self.validateApiKey(evalName=evals[0])
684
- if createExperiment:
685
- if playgroundID:
686
- activePlayground = playgroundID
687
- else:
688
- activePlayground = str(createEvalPlayground(email=self.email, workspaceID=self.workspaceID))
689
- else:
690
- activePlayground = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace("-", "")
753
+ activePlayground = self.playgroundID
691
754
  for evalName in evals:
692
755
  # print(f"\n======= Running evaluation for: {evalName} =======")
693
756
 
694
757
  # Validate API and dependencies
695
758
  self.validateApiKey(evalName=evalName)
696
- customAnalytics=getCustomAnalytics(self.workspaceID)
759
+ customAnalytics = getCustomAnalytics(self.workspaceID)
697
760
  metricDependencies = checkDependency(
698
- evalName, list(dataframe.columns), tocheck=_tocheck,customevals=customAnalytics
761
+ evalName,
762
+ list(dataframe.columns),
763
+ tocheck=_tocheck,
764
+ customevals=customAnalytics,
699
765
  )
700
766
  if not metricDependencies["status"]:
701
767
  raise LlumoAIError.dependencyError(metricDependencies["message"])
@@ -706,15 +772,14 @@ class LlumoClient:
706
772
  evalType = "LLM"
707
773
  workspaceID = self.workspaceID
708
774
  email = self.email
709
- categories=self.categories
710
- evaluationStrictness=self.evaluationStrictness
711
- grammarCheckOutput=self.grammarCheckOutput
712
- insightLength=self.insightsLength
713
- numJudges=self.numJudges
714
- penaltyBonusInstructions=self.penaltyBonusInstructions
715
- probableEdgeCases=self.probableEdgeCases
716
- fieldMapping=self.fieldMapping
717
-
775
+ categories = self.categories
776
+ evaluationStrictness = self.evaluationStrictness
777
+ grammarCheckOutput = self.grammarCheckOutput
778
+ insightLength = self.insightsLength
779
+ numJudges = self.numJudges
780
+ penaltyBonusInstructions = self.penaltyBonusInstructions
781
+ probableEdgeCases = self.probableEdgeCases
782
+ fieldMapping = self.fieldMapping
718
783
 
719
784
  userHits = checkUserHits(
720
785
  self.workspaceID,
@@ -746,7 +811,6 @@ class LlumoClient:
746
811
  output = row.get(outputColName, "")
747
812
  intermediateSteps = row.get("intermediateSteps", "")
748
813
 
749
-
750
814
  rowID = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace("-", "")
751
815
  columnID = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace("-", "")
752
816
 
@@ -776,7 +840,7 @@ class LlumoClient:
776
840
  "MessageHistory": messageHistory,
777
841
  "IntermediateSteps": intermediateSteps,
778
842
  },
779
- "categories":categories,
843
+ "categories": categories,
780
844
  "evaluationStrictness": evaluationStrictness,
781
845
  "grammarCheckOutput": grammarCheckOutput,
782
846
  "insightLength": insightLength,
@@ -788,7 +852,7 @@ class LlumoClient:
788
852
  },
789
853
  "type": "EVAL",
790
854
  "kpi": evalName,
791
- "fieldMappig":fieldMapping,
855
+ "fieldMappig": fieldMapping,
792
856
  }
793
857
 
794
858
  query = ""
@@ -848,25 +912,23 @@ class LlumoClient:
848
912
  time.sleep(3)
849
913
  listener_thread.join()
850
914
 
851
-
852
915
  rawResults = self.socket.getReceivedData()
853
-
916
+
854
917
  # print("data from db #####################",dataFromDb)
855
918
  # Fix here: keep full keys, do not split keys
856
919
  receivedRowIDs = {key for item in rawResults for key in item.keys()}
857
920
  expectedRowIDs = set(rowIdMapping.keys())
858
- missingRowIDs = expectedRowIDs - receivedRowIDs
921
+ missingRowIDs = expectedRowIDs - receivedRowIDs
859
922
  # print("All expected keys:", expected_rowIDs)
860
923
  # print("All received keys:", received_rowIDs)
861
924
  # print("Missing keys:", len(missingRowIDs))
862
- missingRowIDs=list(missingRowIDs)
925
+ missingRowIDs = list(missingRowIDs)
863
926
  if len(missingRowIDs) > 0:
864
- dataFromDb=fetchData(workspaceID,activePlayground,missingRowIDs)
927
+ dataFromDb = fetchData(workspaceID, activePlayground, missingRowIDs)
865
928
  rawResults.extend(dataFromDb)
866
-
867
-
929
+
868
930
  self.evalData = rawResults
869
-
931
+
870
932
  # Initialize dataframe columns for each eval
871
933
  for eval in evals:
872
934
  dataframe[eval] = None
@@ -882,10 +944,10 @@ class LlumoClient:
882
944
  dataframe.at[index, f"{evalName} Reason"] = value.get("reasoning")
883
945
 
884
946
  # Log the evaluation step
885
- if session:
947
+ if hasattr(self, "logEvalStep"):
886
948
  try:
887
949
  start_time = time.time()
888
- session.logEvalStep(
950
+ self.logEvalStep(
889
951
  stepName=f"EVAL-{evalName}",
890
952
  output=value.get("value"),
891
953
  context=row.get("context", ""),
@@ -906,13 +968,12 @@ class LlumoClient:
906
968
  print(f"Error logging eval step: {e}")
907
969
 
908
970
  self.socket.disconnect()
909
-
910
971
 
911
972
  if createExperiment:
912
973
  pd.set_option("future.no_silent_downcasting", True)
913
974
  # df = dataframe.fillna("Some error occured").astype(object)
914
975
  with warnings.catch_warnings():
915
- warnings.simplefilter(action='ignore', category=FutureWarning)
976
+ warnings.simplefilter(action="ignore", category=FutureWarning)
916
977
  df = dataframe.fillna("Some error occurred").astype(str)
917
978
 
918
979
  df = dataframe.fillna("Some error occured").infer_objects(copy=False)
@@ -923,42 +984,60 @@ class LlumoClient:
923
984
  promptText=prompt_template,
924
985
  definationMapping=self.definationMapping,
925
986
  outputColName=outputColName,
926
- activePlayground= activePlayground,
927
- customAnalytics=customAnalytics
987
+ activePlayground=activePlayground,
988
+ customAnalytics=customAnalytics,
928
989
  ):
929
990
  print(
930
991
  "LLUMO’s intuitive UI is ready—start exploring and experimenting with your logs now. Visit https://app.llumo.ai/evallm to see the results."
931
992
  )
932
993
  if getDataFrame:
933
- return LlumoDataFrameResults(dataframe,evals=self.evals,evalData=self.evalData,definationMapping=self.definationMapping)
994
+ return LlumoDataFrameResults(
995
+ dataframe,
996
+ evals=self.evals,
997
+ evalData=self.evalData,
998
+ definationMapping=self.definationMapping,
999
+ )
934
1000
  else:
935
- data=dataframe.to_dict(orient="records")
936
- return LlumoDictResults(data,evals=self.evals,evalData=self.evalData,definationMapping=self.definationMapping)
1001
+ data = dataframe.to_dict(orient="records")
1002
+ return LlumoDictResults(
1003
+ data,
1004
+ evals=self.evals,
1005
+ evalData=self.evalData,
1006
+ definationMapping=self.definationMapping,
1007
+ )
937
1008
 
938
1009
  else:
939
1010
  if getDataFrame:
940
- return LlumoDataFrameResults(dataframe,evals=self.evals,evalData=self.evalData,definationMapping=self.definationMapping)
1011
+ return LlumoDataFrameResults(
1012
+ dataframe,
1013
+ evals=self.evals,
1014
+ evalData=self.evalData,
1015
+ definationMapping=self.definationMapping,
1016
+ )
941
1017
  else:
942
- data=dataframe.to_dict(orient="records")
943
- return LlumoDictResults(data,evals=self.evals,evalData=self.evalData,definationMapping=self.definationMapping)
1018
+ data = dataframe.to_dict(orient="records")
1019
+ return LlumoDictResults(
1020
+ data,
1021
+ evals=self.evals,
1022
+ evalData=self.evalData,
1023
+ definationMapping=self.definationMapping,
1024
+ )
944
1025
 
945
-
946
1026
  def promptSweep(
947
1027
  self,
948
1028
  templates: List[str],
949
1029
  data,
950
1030
  model_aliases: List[AVAILABLEMODELS],
951
- apiKey: str,
952
1031
  evals=["Response Correctness"],
953
1032
  toEvaluate: bool = False,
954
1033
  createExperiment: bool = False,
955
- getDataFrame=False
1034
+ getDataFrame=False,
956
1035
  ) -> pd.DataFrame:
957
1036
  if isinstance(data, dict):
958
- data = [data]
959
- # Check if data is now a list of dictionaries
1037
+ data = [data]
1038
+ # Check if data is now a list of dictionaries
960
1039
  if isinstance(data, list) and all(isinstance(item, dict) for item in data):
961
- working_df= pd.DataFrame(data).astype(str)
1040
+ working_df = pd.DataFrame(data).astype(str)
962
1041
  else:
963
1042
  raise ValueError("Data must be a dictionary or a list of dictionaries.")
964
1043
  modelStatus = validateModels(model_aliases=model_aliases)
@@ -968,10 +1047,10 @@ class LlumoClient:
968
1047
  self.validateApiKey()
969
1048
  workspaceID = self.workspaceID
970
1049
  email = self.email
971
- executor = ModelExecutor(apiKey)
1050
+ executor = ModelExecutor(self.apiKey)
972
1051
  prompt_template = templates[0]
973
-
974
- working_df = self._outputForStream(working_df, model_aliases, prompt_template, apiKey)
1052
+
1053
+ working_df = self._outputForStream(working_df, model_aliases, prompt_template)
975
1054
 
976
1055
  # Optional evaluation
977
1056
  outputEvalMapping = None
@@ -985,40 +1064,49 @@ class LlumoClient:
985
1064
  if not metricDependencies["status"]:
986
1065
  raise LlumoAIError.dependencyError(metricDependencies["message"])
987
1066
 
988
- working_df, outputEvalMapping = self._evaluateForStream(working_df, evals, model_aliases, prompt_template,generateOutput=True)
1067
+ working_df, outputEvalMapping = self._evaluateForStream(
1068
+ working_df, evals, model_aliases, prompt_template, generateOutput=True
1069
+ )
989
1070
  if createExperiment:
990
1071
  # df = working_df.fillna("Some error occured").astype(object)
991
1072
  with warnings.catch_warnings():
992
- warnings.simplefilter(action='ignore', category=FutureWarning)
1073
+ warnings.simplefilter(action="ignore", category=FutureWarning)
993
1074
  df = working_df.fillna("Some error occurred").astype(str)
994
1075
  if createPlayground(
995
- email, workspaceID, df,
996
- promptText=prompt_template,
997
- definationMapping=self.definationMapping,
998
- evalOutputMap=outputEvalMapping
1076
+ email,
1077
+ workspaceID,
1078
+ df,
1079
+ promptText=prompt_template,
1080
+ definationMapping=self.definationMapping,
1081
+ evalOutputMap=outputEvalMapping,
999
1082
  ):
1000
1083
  print(
1001
- "LLUMO’s intuitive UI is ready—start exploring and experimenting with your logs now. Visit https://app.llumo.ai/evallm to see the results.")
1002
-
1084
+ "LLUMO’s intuitive UI is ready—start exploring and experimenting with your logs now. Visit https://app.llumo.ai/evallm to see the results."
1085
+ )
1086
+
1003
1087
  else:
1004
1088
  if getDataFrame == True and toEvaluate == True:
1005
- return LlumoDataFrameResults(working_df, evals=self.evals, evalData=self.evalData,
1006
- definationMapping=self.definationMapping)
1089
+ return LlumoDataFrameResults(
1090
+ working_df,
1091
+ evals=self.evals,
1092
+ evalData=self.evalData,
1093
+ definationMapping=self.definationMapping,
1094
+ )
1007
1095
 
1008
1096
  elif getDataFrame == False and toEvaluate == True:
1009
1097
  data = working_df.to_dict(orient="records")
1010
- return LlumoDictResults(data, evals=self.evals, evalData=self.evalData,
1011
- definationMapping=self.definationMapping)
1098
+ return LlumoDictResults(
1099
+ data,
1100
+ evals=self.evals,
1101
+ evalData=self.evalData,
1102
+ definationMapping=self.definationMapping,
1103
+ )
1012
1104
 
1013
- elif getDataFrame== True and toEvaluate == False:
1105
+ elif getDataFrame == True and toEvaluate == False:
1014
1106
  return working_df
1015
1107
 
1016
- elif getDataFrame == False and toEvaluate == False :
1017
- return working_df.to_dict(orient = "records")
1018
-
1019
-
1020
-
1021
-
1108
+ elif getDataFrame == False and toEvaluate == False:
1109
+ return working_df.to_dict(orient="records")
1022
1110
 
1023
1111
  # this function generates an output using llm and tools and evaluate that output
1024
1112
  def evaluateAgents(
@@ -1030,8 +1118,7 @@ class LlumoClient:
1030
1118
  evals=["Final Task Alignment"],
1031
1119
  prompt_template="Give answer for the given query: {{query}}",
1032
1120
  createExperiment: bool = False,
1033
- getDataFrame:bool = False
1034
-
1121
+ getDataFrame: bool = False,
1035
1122
  ):
1036
1123
  if isinstance(data, dict):
1037
1124
  data = [data]
@@ -1062,8 +1149,7 @@ class LlumoClient:
1062
1149
  evals=evals,
1063
1150
  prompt_template=prompt_template,
1064
1151
  createExperiment=createExperiment,
1065
- getDataFrame=getDataFrame
1066
-
1152
+ getDataFrame=getDataFrame,
1067
1153
  )
1068
1154
 
1069
1155
  return toolResponseDf
@@ -1083,9 +1169,8 @@ class LlumoClient:
1083
1169
  data,
1084
1170
  evals=["Final Task Alignment"],
1085
1171
  createExperiment: bool = False,
1086
- getDataFrame = False,
1087
- outputColName="output"
1088
-
1172
+ getDataFrame=False,
1173
+ outputColName="output",
1089
1174
  ):
1090
1175
  if isinstance(data, dict):
1091
1176
  data = [data]
@@ -1112,8 +1197,7 @@ class LlumoClient:
1112
1197
  prompt_template="Give answer for the given query: {{query}}",
1113
1198
  outputColName=outputColName,
1114
1199
  createExperiment=createExperiment,
1115
- getDataFrame = getDataFrame
1116
-
1200
+ getDataFrame=getDataFrame,
1117
1201
  )
1118
1202
  if createExperiment:
1119
1203
  pass
@@ -1124,18 +1208,17 @@ class LlumoClient:
1124
1208
  raise e
1125
1209
 
1126
1210
  def ragSweep(
1127
- self,
1128
- data,
1129
- streamName: str,
1130
- queryColName: str = "query",
1131
- createExperiment: bool = False,
1132
- modelAliases=[],
1133
- apiKey="",
1134
- prompt_template="Give answer to the given: {{query}} using the context:{{context}}",
1135
- evals=["Context Utilization"],
1136
- toEvaluate=False,
1137
- generateOutput=True,
1138
- getDataFrame = False
1211
+ self,
1212
+ data,
1213
+ streamName: str,
1214
+ queryColName: str = "query",
1215
+ createExperiment: bool = False,
1216
+ modelAliases=[],
1217
+ prompt_template="Give answer to the given: {{query}} using the context:{{context}}",
1218
+ evals=["Context Utilization"],
1219
+ toEvaluate=False,
1220
+ generateOutput=True,
1221
+ getDataFrame=False,
1139
1222
  ):
1140
1223
  if isinstance(data, dict):
1141
1224
  data = [data]
@@ -1145,13 +1228,21 @@ class LlumoClient:
1145
1228
  # Validate required parameters
1146
1229
  if generateOutput:
1147
1230
  if not modelAliases:
1148
- raise ValueError("Model aliases must be provided when generateOutput is True.")
1149
- if not apiKey or not isinstance(apiKey, str) or apiKey.strip() == "":
1150
- raise ValueError("Valid API key must be provided when generateOutput is True.")
1231
+ raise ValueError(
1232
+ "Model aliases must be provided when generateOutput is True."
1233
+ )
1234
+ if (
1235
+ not self.apiKey
1236
+ or not isinstance(self.apiKey, str)
1237
+ or self.apiKey.strip() == ""
1238
+ ):
1239
+ raise ValueError(
1240
+ "Valid API key must be provided when generateOutput is True."
1241
+ )
1151
1242
 
1152
1243
  modelStatus = validateModels(model_aliases=modelAliases)
1153
- if modelStatus["status"]== False:
1154
- if len(modelAliases) == 0:
1244
+ if modelStatus["status"] == False:
1245
+ if len(modelAliases) == 0:
1155
1246
  raise LlumoAIError.providerError("No model selected.")
1156
1247
  else:
1157
1248
  raise LlumoAIError.providerError(modelStatus["message"])
@@ -1165,7 +1256,7 @@ class LlumoClient:
1165
1256
  try:
1166
1257
  socketID = self.socket.connect(timeout=150)
1167
1258
  except Exception as e:
1168
- socketID="DummySocketID"
1259
+ socketID = "DummySocketID"
1169
1260
  # waited_secs = 0
1170
1261
  # while not self.socket._connection_established.is_set():
1171
1262
  # time.sleep(0.1)
@@ -1177,8 +1268,12 @@ class LlumoClient:
1177
1268
 
1178
1269
  # Check user credits
1179
1270
  userHits = checkUserHits(
1180
- self.workspaceID, self.hasSubscribed, self.trialEndDate,
1181
- self.subscriptionEndDate, self.hitsAvailable, len(working_df)
1271
+ self.workspaceID,
1272
+ self.hasSubscribed,
1273
+ self.trialEndDate,
1274
+ self.subscriptionEndDate,
1275
+ self.hitsAvailable,
1276
+ len(working_df),
1182
1277
  )
1183
1278
  if not userHits["success"]:
1184
1279
  raise LlumoAIError.InsufficientCredits(userHits["message"])
@@ -1204,7 +1299,7 @@ class LlumoClient:
1204
1299
  "inactivity_timeout": 10,
1205
1300
  "expected_results": expectedResults,
1206
1301
  },
1207
- daemon=True
1302
+ daemon=True,
1208
1303
  )
1209
1304
  listener_thread.start()
1210
1305
 
@@ -1233,7 +1328,13 @@ class LlumoClient:
1233
1328
  self.allBatches.append(currentBatch)
1234
1329
  currentBatch = []
1235
1330
 
1236
- for batch in tqdm(self.allBatches, desc="Processing Batches", unit="batch", colour="magenta", ncols=80):
1331
+ for batch in tqdm(
1332
+ self.allBatches,
1333
+ desc="Processing Batches",
1334
+ unit="batch",
1335
+ colour="magenta",
1336
+ ncols=80,
1337
+ ):
1237
1338
  try:
1238
1339
  self.postDataStream(batch=batch, workspaceID=workspaceID)
1239
1340
  time.sleep(3)
@@ -1262,7 +1363,9 @@ class LlumoClient:
1262
1363
 
1263
1364
  # Output generation
1264
1365
  if generateOutput == True:
1265
- working_df = self._outputForStream(working_df, modelAliases, prompt_template, apiKey)
1366
+ working_df = self._outputForStream(
1367
+ working_df, modelAliases, prompt_template
1368
+ )
1266
1369
 
1267
1370
  # Optional evaluation
1268
1371
  outputEvalMapping = None
@@ -1276,58 +1379,78 @@ class LlumoClient:
1276
1379
  if not metricDependencies["status"]:
1277
1380
  raise LlumoAIError.dependencyError(metricDependencies["message"])
1278
1381
 
1279
- working_df, outputEvalMapping = self._evaluateForStream(working_df, evals, modelAliases, prompt_template,generateOutput)
1382
+ working_df, outputEvalMapping = self._evaluateForStream(
1383
+ working_df, evals, modelAliases, prompt_template, generateOutput
1384
+ )
1280
1385
 
1281
-
1282
1386
  self.socket.disconnect()
1283
1387
  # Create experiment if required
1284
1388
  if createExperiment:
1285
1389
  # df = working_df.fillna("Some error occured").astype(object)
1286
1390
  with warnings.catch_warnings():
1287
- warnings.simplefilter(action='ignore', category=FutureWarning)
1391
+ warnings.simplefilter(action="ignore", category=FutureWarning)
1288
1392
  df = working_df.fillna("Some error occurred").astype(str)
1289
1393
  if createPlayground(
1290
- email, workspaceID, df,
1291
- queryColName=queryColName,
1292
- dataStreamName=streamId,
1293
- promptText=prompt_template,
1294
- definationMapping=self.definationMapping,
1295
- evalOutputMap=outputEvalMapping
1394
+ email,
1395
+ workspaceID,
1396
+ df,
1397
+ queryColName=queryColName,
1398
+ dataStreamName=streamId,
1399
+ promptText=prompt_template,
1400
+ definationMapping=self.definationMapping,
1401
+ evalOutputMap=outputEvalMapping,
1296
1402
  ):
1297
1403
  print(
1298
- "LLUMO’s intuitive UI is ready—start exploring and experimenting with your logs now. Visit https://app.llumo.ai/evallm to see the results.")
1404
+ "LLUMO’s intuitive UI is ready—start exploring and experimenting with your logs now. Visit https://app.llumo.ai/evallm to see the results."
1405
+ )
1299
1406
  if getDataFrame == True and toEvaluate == True:
1300
- return LlumoDataFrameResults(working_df, evals=self.evals, evalData=self.evalData,
1301
- definationMapping=self.definationMapping)
1407
+ return LlumoDataFrameResults(
1408
+ working_df,
1409
+ evals=self.evals,
1410
+ evalData=self.evalData,
1411
+ definationMapping=self.definationMapping,
1412
+ )
1302
1413
 
1303
1414
  elif getDataFrame == False and toEvaluate == True:
1304
1415
  data = working_df.to_dict(orient="records")
1305
- return LlumoDictResults(data, evals=self.evals, evalData=self.evalData,
1306
- definationMapping=self.definationMapping)
1416
+ return LlumoDictResults(
1417
+ data,
1418
+ evals=self.evals,
1419
+ evalData=self.evalData,
1420
+ definationMapping=self.definationMapping,
1421
+ )
1307
1422
 
1308
- elif getDataFrame== True and toEvaluate == False:
1423
+ elif getDataFrame == True and toEvaluate == False:
1309
1424
  return working_df
1310
1425
 
1311
- elif getDataFrame == False and toEvaluate == False :
1312
- return working_df.to_dict(orient = "records")
1426
+ elif getDataFrame == False and toEvaluate == False:
1427
+ return working_df.to_dict(orient="records")
1313
1428
  else:
1314
1429
  if getDataFrame == True and toEvaluate == True:
1315
- return LlumoDataFrameResults(working_df, evals=self.evals, evalData=self.evalData,
1316
- definationMapping=self.definationMapping)
1430
+ return LlumoDataFrameResults(
1431
+ working_df,
1432
+ evals=self.evals,
1433
+ evalData=self.evalData,
1434
+ definationMapping=self.definationMapping,
1435
+ )
1317
1436
 
1318
1437
  elif getDataFrame == False and toEvaluate == True:
1319
1438
  data = working_df.to_dict(orient="records")
1320
- return LlumoDictResults(data, evals=self.evals, evalData=self.evalData,
1321
- definationMapping=self.definationMapping)
1439
+ return LlumoDictResults(
1440
+ data,
1441
+ evals=self.evals,
1442
+ evalData=self.evalData,
1443
+ definationMapping=self.definationMapping,
1444
+ )
1322
1445
 
1323
- elif getDataFrame== True and toEvaluate == False:
1446
+ elif getDataFrame == True and toEvaluate == False:
1324
1447
  return working_df
1325
1448
 
1326
- elif getDataFrame == False and toEvaluate == False :
1327
- return working_df.to_dict(orient = "records")
1449
+ elif getDataFrame == False and toEvaluate == False:
1450
+ return working_df.to_dict(orient="records")
1328
1451
 
1329
- def _outputForStream(self, df, modelAliases, prompt_template, apiKey):
1330
- executor = ModelExecutor(apiKey)
1452
+ def _outputForStream(self, df, modelAliases, prompt_template):
1453
+ executor = ModelExecutor(self.apiKey)
1331
1454
 
1332
1455
  for indx, row in df.iterrows():
1333
1456
  inputVariables = re.findall(r"{{(.*?)}}", prompt_template)
@@ -1340,21 +1463,25 @@ class LlumoClient:
1340
1463
 
1341
1464
  provider = getProviderFromModel(model)
1342
1465
  if provider == Provider.OPENAI:
1343
- validateOpenaiKey(apiKey)
1466
+ validateOpenaiKey(self.apiKey)
1344
1467
  elif provider == Provider.GOOGLE:
1345
- validateGoogleKey(apiKey)
1468
+ validateGoogleKey(self.apiKey)
1346
1469
 
1347
- filled_template = getInputPopulatedPrompt(prompt_template, inputDict)
1348
- response = executor.execute(provider, model.value, filled_template, apiKey)
1470
+ filled_template = getInputPopulatedPrompt(
1471
+ prompt_template, inputDict
1472
+ )
1473
+ response = executor.execute(provider, model.value, filled_template)
1349
1474
  df.at[indx, f"output_{i}"] = response
1350
-
1475
+
1351
1476
  except Exception as e:
1352
1477
  # df.at[indx, f"output_{i}"] = str(e)
1353
1478
  raise e
1354
1479
 
1355
1480
  return df
1356
1481
 
1357
- def _evaluateForStream(self, df, evals, modelAliases, prompt_template, generateOutput):
1482
+ def _evaluateForStream(
1483
+ self, df, evals, modelAliases, prompt_template, generateOutput
1484
+ ):
1358
1485
  dfWithEvals = df.copy()
1359
1486
  outputColMapping = {}
1360
1487
 
@@ -1370,7 +1497,7 @@ class LlumoClient:
1370
1497
  outputColName=outputColName,
1371
1498
  _tocheck=False,
1372
1499
  getDataFrame=True,
1373
- createExperiment=False
1500
+ createExperiment=False,
1374
1501
  )
1375
1502
 
1376
1503
  for evalMetric in evals:
@@ -1379,11 +1506,15 @@ class LlumoClient:
1379
1506
  if scoreCol in res.columns:
1380
1507
  res = res.rename(columns={scoreCol: f"{scoreCol}_{i}"})
1381
1508
  if reasonCol in res.columns:
1382
- res = res.rename(columns={reasonCol: f"{evalMetric}_{i} Reason"})
1509
+ res = res.rename(
1510
+ columns={reasonCol: f"{evalMetric}_{i} Reason"}
1511
+ )
1383
1512
 
1384
1513
  outputColMapping[f"{scoreCol}_{i}"] = outputColName
1385
1514
 
1386
- newCols = [col for col in res.columns if col not in dfWithEvals.columns]
1515
+ newCols = [
1516
+ col for col in res.columns if col not in dfWithEvals.columns
1517
+ ]
1387
1518
  dfWithEvals = pd.concat([dfWithEvals, res[newCols]], axis=1)
1388
1519
 
1389
1520
  except Exception as e:
@@ -1400,7 +1531,7 @@ class LlumoClient:
1400
1531
  outputColName=outputColName,
1401
1532
  _tocheck=False,
1402
1533
  getDataFrame=True,
1403
- createExperiment=False
1534
+ createExperiment=False,
1404
1535
  )
1405
1536
  for evalMetric in evals:
1406
1537
  scoreCol = f"{evalMetric}"
@@ -1415,13 +1546,13 @@ class LlumoClient:
1415
1546
  return dfWithEvals, outputColMapping
1416
1547
 
1417
1548
  def runDataStream(
1418
- self,
1419
- data,
1420
- streamName: str,
1421
- queryColName: str = "query",
1422
- createExperiment: bool = False,
1423
- getDataFrame = False
1424
- ):
1549
+ self,
1550
+ data,
1551
+ streamName: str,
1552
+ queryColName: str = "query",
1553
+ createExperiment: bool = False,
1554
+ getDataFrame=False,
1555
+ ):
1425
1556
 
1426
1557
  if isinstance(data, dict):
1427
1558
  data = [data]
@@ -1437,7 +1568,7 @@ class LlumoClient:
1437
1568
  try:
1438
1569
  socketID = self.socket.connect(timeout=150)
1439
1570
  except Exception as e:
1440
- socketID="DummySocketID"
1571
+ socketID = "DummySocketID"
1441
1572
  # waited_secs = 0
1442
1573
  # while not self.socket._connection_established.is_set():
1443
1574
  # time.sleep(0.1)
@@ -1449,8 +1580,12 @@ class LlumoClient:
1449
1580
 
1450
1581
  # Check user credits
1451
1582
  userHits = checkUserHits(
1452
- self.workspaceID, self.hasSubscribed, self.trialEndDate,
1453
- self.subscriptionEndDate, self.hitsAvailable, len(working_df)
1583
+ self.workspaceID,
1584
+ self.hasSubscribed,
1585
+ self.trialEndDate,
1586
+ self.subscriptionEndDate,
1587
+ self.hitsAvailable,
1588
+ len(working_df),
1454
1589
  )
1455
1590
  if not userHits["success"]:
1456
1591
  raise LlumoAIError.InsufficientCredits(userHits["message"])
@@ -1476,7 +1611,7 @@ class LlumoClient:
1476
1611
  "inactivity_timeout": 10,
1477
1612
  "expected_results": expectedResults,
1478
1613
  },
1479
- daemon=True
1614
+ daemon=True,
1480
1615
  )
1481
1616
  listener_thread.start()
1482
1617
 
@@ -1505,7 +1640,13 @@ class LlumoClient:
1505
1640
  self.allBatches.append(currentBatch)
1506
1641
  currentBatch = []
1507
1642
 
1508
- for batch in tqdm(self.allBatches, desc="Processing Batches", unit="batch", colour="magenta", ncols=80):
1643
+ for batch in tqdm(
1644
+ self.allBatches,
1645
+ desc="Processing Batches",
1646
+ unit="batch",
1647
+ colour="magenta",
1648
+ ncols=80,
1649
+ ):
1509
1650
  try:
1510
1651
  self.postDataStream(batch=batch, workspaceID=workspaceID)
1511
1652
  time.sleep(3)
@@ -1532,21 +1673,22 @@ class LlumoClient:
1532
1673
  idx = rowIdMapping[compound_key]["index"]
1533
1674
  working_df.at[idx, "context"] = value.get("value")
1534
1675
 
1535
-
1536
-
1537
1676
  self.socket.disconnect()
1538
1677
 
1539
1678
  # Create experiment if required
1540
1679
  if createExperiment:
1541
1680
  df = working_df.fillna("Some error occured").astype(object)
1542
1681
  if createPlayground(
1543
- email, workspaceID, df,
1544
- queryColName=queryColName,
1545
- dataStreamName=streamId,
1546
- definationMapping=self.definationMapping,
1682
+ email,
1683
+ workspaceID,
1684
+ df,
1685
+ queryColName=queryColName,
1686
+ dataStreamName=streamId,
1687
+ definationMapping=self.definationMapping,
1547
1688
  ):
1548
1689
  print(
1549
- "LLUMO’s intuitive UI is ready—start exploring and experimenting with your logs now. Visit https://app.llumo.ai/evallm to see the results.")
1690
+ "LLUMO’s intuitive UI is ready—start exploring and experimenting with your logs now. Visit https://app.llumo.ai/evallm to see the results."
1691
+ )
1550
1692
  if getDataFrame:
1551
1693
  return working_df
1552
1694
 
@@ -1563,7 +1705,6 @@ class LlumoClient:
1563
1705
  # self.latestDataframe = working_df
1564
1706
  # return working_df
1565
1707
 
1566
-
1567
1708
  def createExperiment(self, dataframe):
1568
1709
  try:
1569
1710
  self.validateApiKey()
@@ -1581,7 +1722,6 @@ class LlumoClient:
1581
1722
  workspaceID = None
1582
1723
  email = None
1583
1724
 
1584
-
1585
1725
  try:
1586
1726
  self.validateApiKey()
1587
1727
  except Exception as e:
@@ -1609,17 +1749,17 @@ class LlumoClient:
1609
1749
  # If successfully loaded, call createPlayground
1610
1750
  df = df.astype(str)
1611
1751
  if createPlayground(self.email, self.workspaceID, df):
1612
-
1752
+
1613
1753
  print(
1614
1754
  "LLUMO’s intuitive UI is ready—start exploring and experimenting with your logs now. Visit https://app.llumo.ai/evallm to see the results."
1615
1755
  )
1616
-
1756
+
1617
1757
  return True
1618
1758
 
1619
1759
  except Exception as e:
1620
1760
  print(f"Error: {e}")
1621
-
1622
- def upload(self,data):
1761
+
1762
+ def upload(self, data):
1623
1763
  try:
1624
1764
  if isinstance(data, dict):
1625
1765
  data = [data]
@@ -1639,7 +1779,6 @@ class LlumoClient:
1639
1779
  print(f"Error: {e}")
1640
1780
  return False
1641
1781
 
1642
-
1643
1782
  def createExperimentWithEvals(
1644
1783
  self,
1645
1784
  data,
@@ -1647,7 +1786,7 @@ class LlumoClient:
1647
1786
  prompt_template="Give answer to the given query: {{query}} using the given context: {{context}}.",
1648
1787
  outputColName="output",
1649
1788
  createExperiment: bool = False,
1650
- getDataFrame:bool =False,
1789
+ getDataFrame: bool = False,
1651
1790
  _tocheck=True,
1652
1791
  ):
1653
1792
  if isinstance(data, dict):
@@ -1657,8 +1796,8 @@ class LlumoClient:
1657
1796
  dataframe = pd.DataFrame(data).astype(str)
1658
1797
  workspaceID = None
1659
1798
  email = None
1660
- self.evalData=[]
1661
- self.evals=evals
1799
+ self.evalData = []
1800
+ self.evals = evals
1662
1801
  self.allBatches = []
1663
1802
  rowIdMapping = {} # (rowID-columnID-columnID -> (index, evalName))
1664
1803
  self.validateApiKey(evalName=evals[0])
@@ -1666,20 +1805,22 @@ class LlumoClient:
1666
1805
  if playgroundID:
1667
1806
  activePlayground = playgroundID
1668
1807
  else:
1669
- activePlayground = str(createEvalPlayground(email=self.email, workspaceID=self.workspaceID))
1670
- else:
1808
+ activePlayground = str(
1809
+ createEvalPlayground(email=self.email, workspaceID=self.workspaceID)
1810
+ )
1811
+ else:
1671
1812
  activePlayground = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace(
1672
- "-", ""
1673
- )
1813
+ "-", ""
1814
+ )
1674
1815
  for evalName in evals:
1675
1816
  self.validateApiKey(evalName=evalName)
1676
- self.evalData =dataframe.to_dict(orient="records")
1817
+ self.evalData = dataframe.to_dict(orient="records")
1677
1818
  if createExperiment:
1678
1819
  print("heading to upload")
1679
1820
  pd.set_option("future.no_silent_downcasting", True)
1680
1821
  # df = dataframe.fillna("Some error occured").astype(object)
1681
1822
  with warnings.catch_warnings():
1682
- warnings.simplefilter(action='ignore', category=FutureWarning)
1823
+ warnings.simplefilter(action="ignore", category=FutureWarning)
1683
1824
  df = dataframe.fillna("Some error occurred").astype(str)
1684
1825
 
1685
1826
  df = dataframe.fillna("Some error occured").infer_objects(copy=False)
@@ -1690,20 +1831,30 @@ class LlumoClient:
1690
1831
  promptText=prompt_template,
1691
1832
  definationMapping=self.definationMapping,
1692
1833
  outputColName=outputColName,
1693
- activePlayground= activePlayground
1834
+ activePlayground=activePlayground,
1694
1835
  ):
1695
1836
  print(
1696
1837
  "LLUMO’s intuitive UI is ready—start exploring and experimenting with your logs now. Visit https://app.llumo.ai/evallm to see the results."
1697
1838
  )
1698
-
1839
+
1699
1840
  else:
1700
1841
  if getDataFrame:
1701
- return LlumoDataFrameResults(dataframe,evals=self.evals,evalData=self.evalData,definationMapping=self.definationMapping)
1842
+ return LlumoDataFrameResults(
1843
+ dataframe,
1844
+ evals=self.evals,
1845
+ evalData=self.evalData,
1846
+ definationMapping=self.definationMapping,
1847
+ )
1702
1848
  else:
1703
- data=dataframe.to_dict(orient="records")
1704
- return LlumoDictResults(data,evals=self.evals,evalData=self.evalData,definationMapping=self.definationMapping)
1849
+ data = dataframe.to_dict(orient="records")
1850
+ return LlumoDictResults(
1851
+ data,
1852
+ evals=self.evals,
1853
+ evalData=self.evalData,
1854
+ definationMapping=self.definationMapping,
1855
+ )
1856
+
1705
1857
 
1706
-
1707
1858
  class SafeDict(dict):
1708
1859
  def __missing__(self, key):
1709
1860
  return ""
llumo/llumoLogger.py CHANGED
@@ -1,7 +1,7 @@
1
1
  import requests
2
2
 
3
3
 
4
- class LLUMOLogger:
4
+ class LlumoLogger:
5
5
  def __init__(self, apiKey: str, playground: str):
6
6
  self.apiKey = apiKey
7
7
  self.playground = playground
@@ -3,6 +3,7 @@ import uuid
3
3
  from typing import Optional, List, Dict, Any
4
4
  from datetime import datetime, timezone
5
5
  import requests
6
+ from .client import LlumoClient
6
7
 
7
8
  _ctxLogger = contextvars.ContextVar("ctxLogger")
8
9
  _ctxSessionID = contextvars.ContextVar("ctxSessionID")
@@ -21,8 +22,9 @@ def getLlumoRun():
21
22
  return _ctxLlumoRun.get()
22
23
 
23
24
 
24
- class LlumoSessionContext:
25
+ class LlumoSessionContext(LlumoClient):
25
26
  def __init__(self, logger, sessionID: Optional[str] = None):
27
+ super().__init__(api_key=logger.apiKey, playground_id=logger.getPlaygroundID())
26
28
  self.sessionID = sessionID or str(uuid.uuid4().hex[:14])
27
29
  self.logger = logger
28
30
  self.apiKey = logger.apiKey
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: llumo
3
- Version: 0.2.25
3
+ Version: 0.2.26
4
4
  Summary: Python SDK for interacting with the Llumo ai API.
5
5
  Home-page: https://www.llumo.ai/
6
6
  Author: Llumo
@@ -1,20 +1,20 @@
1
- llumo/__init__.py,sha256=ZdFeOT5aDM1iA4VzQ8ryc0rxF3ihjhPO8aCRuw8t0zk,342
1
+ llumo/__init__.py,sha256=kkuppu7ZPiVZFdnYzJ9BM3syMbYHOSZLpwKwAvGHsnY,311
2
2
  llumo/callback.py,sha256=Pzg9Smqsu5G900YZjoFwqMY0TTP4jUizxllaP0TjKgk,20439
3
3
  llumo/callbacks-0.py,sha256=TEIOCWRvk2UYsTmBMBsnlgpqWvr-2y3a6d0w_e96NRM,8958
4
4
  llumo/chains.py,sha256=6lCgLseh04RUgc6SahhmvQj82quay2Mi1j8gPUlx8Es,2923
5
- llumo/client.py,sha256=RKI8XIIafzMWX42gXBXAcMjtOzZngx1ebgGfXmNDa-w,69064
5
+ llumo/client.py,sha256=14swva7RlXsoldlMpiveUEG45MViDsXimKrnRJrT4m8,71408
6
6
  llumo/exceptions.py,sha256=1OyhN9YL9LcyUPUsqYHq6Rret0udATZAwMVJaio2_Ec,2123
7
7
  llumo/execution.py,sha256=nWbJ7AvWuUPcOb6i-JzKRna_PvF-ewZTiK8skS-5n3w,1380
8
8
  llumo/functionCalling.py,sha256=D5jYapu1rIvdIJNUYPYMTyhQ1H-6nkwoOLMi6eekfUE,7241
9
9
  llumo/google.py,sha256=3S_aRtbtlctCXPGR0u4baLlkyFrsjd02vlUCkoRPA5U,2147
10
10
  llumo/helpingFuntions.py,sha256=B6FwUQ5f1v4FKrWCbYoGWMFdscOV_liuuhTgNQ3cdrk,27275
11
- llumo/llumoLogger.py,sha256=UW3eIQb5txneilx8FQnGf6t4LgP85NoIf5YECFDZShk,1912
12
- llumo/llumoSessionContext.py,sha256=J4oFCWcpksZ2sWgGJqzq2LjUwIG1OAQva_EAvNy9ACs,10373
11
+ llumo/llumoLogger.py,sha256=grdjhu6Ngxg7nhnrMOP5Pd5ALR7U2ROws48yhf_N7y0,1912
12
+ llumo/llumoSessionContext.py,sha256=v1OPJFYWe5-mLLUohX5qY7dlzgwmxpuRZ0rDsXEv6f4,10506
13
13
  llumo/models.py,sha256=aVEZsOOoQx5LeNtwSyBxqvrINq0izH3QWu_YjsMPE6o,2910
14
14
  llumo/openai.py,sha256=QyNMXiYTppaU_YjU6vU5UB0At3OiNntoDTQ0dszLN0g,8538
15
15
  llumo/sockets.py,sha256=pBDo-U65hMIMwKMwZQl3iBkEjISEt-9BkXxZTWfSHF4,6116
16
- llumo-0.2.25.dist-info/licenses/LICENSE,sha256=tF9yAcfPV9xGT3ViWmC8hPvOo8BEk4ZICbUfcEo8Dlk,182
17
- llumo-0.2.25.dist-info/METADATA,sha256=HkIJH54gwQ-rAgxzgpiCWVvvn6tD0jNPYJqnTAoZOfA,1558
18
- llumo-0.2.25.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
19
- llumo-0.2.25.dist-info/top_level.txt,sha256=d5zUTMI99llPtLRB8rtSrqELm_bOqX-bNC5IcwlDk88,6
20
- llumo-0.2.25.dist-info/RECORD,,
16
+ llumo-0.2.26.dist-info/licenses/LICENSE,sha256=tF9yAcfPV9xGT3ViWmC8hPvOo8BEk4ZICbUfcEo8Dlk,182
17
+ llumo-0.2.26.dist-info/METADATA,sha256=pn5AHWPNuRewlkYfFOjxDtU1o0xBOQlJOwGC3VKCNlk,1558
18
+ llumo-0.2.26.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
19
+ llumo-0.2.26.dist-info/top_level.txt,sha256=d5zUTMI99llPtLRB8rtSrqELm_bOqX-bNC5IcwlDk88,6
20
+ llumo-0.2.26.dist-info/RECORD,,
File without changes