llumo 0.2.14b5__py3-none-any.whl → 0.2.14b7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
llumo/client.py CHANGED
@@ -19,8 +19,17 @@ from .functionCalling import LlumoAgentExecutor
19
19
  import threading
20
20
  from tqdm import tqdm
21
21
 
22
- postUrl = "https://red-skull-service-392377961931.us-central1.run.app/api/process-playground"
23
- fetchUrl = "https://red-skull-service-392377961931.us-central1.run.app/api/get-cells-data"
22
+ postUrl = (
23
+ "https://red-skull-service-392377961931.us-central1.run.app/api/process-playground"
24
+ )
25
+ fetchUrl = (
26
+ "https://red-skull-service-392377961931.us-central1.run.app/api/get-cells-data"
27
+ )
28
+ socketDataUrl = "https://app.llumo.ai/api/eval/get-awaited"
29
+ # {
30
+ # "workspaceID":"c9191fdf33bdd7838328c1a0",
31
+ # "playgroundID":"17496117244856b7815ac94004347b1c2e2f7e01600ec"
32
+ # }
24
33
  validateUrl = "https://app.llumo.ai/api/workspace-details"
25
34
  socketUrl = "https://red-skull-service-392377961931.us-central1.run.app/"
26
35
 
@@ -42,9 +51,6 @@ class LlumoClient:
42
51
 
43
52
  try:
44
53
  response = requests.post(url=validateUrl, json=reqBody, headers=headers)
45
-
46
-
47
-
48
54
 
49
55
  except requests.exceptions.RequestException as e:
50
56
  print(f"Request exception: {str(e)}")
@@ -77,16 +83,19 @@ class LlumoClient:
77
83
  raise LlumoAIError.InvalidApiResponse()
78
84
 
79
85
  try:
80
- self.hitsAvailable = data['data']["data"].get("remainingHits", 0)
86
+ self.hitsAvailable = data["data"]["data"].get("remainingHits", 0)
81
87
  self.workspaceID = data["data"]["data"].get("workspaceID")
82
88
  self.evalDefinition = data["data"]["data"]["analyticsMapping"]
83
89
  self.socketToken = data["data"]["data"].get("token")
84
90
  self.hasSubscribed = data["data"]["data"].get("hasSubscribed", False)
85
91
  self.trialEndDate = data["data"]["data"].get("trialEndDate", None)
86
- self.subscriptionEndDate = data["data"]["data"].get("subscriptionEndDate", None)
92
+ self.subscriptionEndDate = data["data"]["data"].get(
93
+ "subscriptionEndDate", None
94
+ )
87
95
  self.email = data["data"]["data"].get("email", None)
88
-
89
- self.definationMapping[evalName] = data["data"]["data"]["analyticsMapping"][evalName]
96
+
97
+ self.definationMapping[evalName] = data.get("data", {}).get("data", {}).get("analyticsMapping", {}).get(evalName, None)
98
+
90
99
  except Exception as e:
91
100
  # print(f"Error extracting data from response: {str(e)}")
92
101
  raise LlumoAIError.UnexpectedError(detail=str(e))
@@ -160,14 +169,16 @@ class LlumoClient:
160
169
  prompt_template="",
161
170
  outputColName="output",
162
171
  createExperiment: bool = False,
163
- _tocheck = True,
172
+ _tocheck=True,
164
173
  ):
165
-
174
+
166
175
  # converting it into a pandas dataframe object
167
176
  dataframe = pd.DataFrame(data)
168
177
 
169
178
  # check for dependencies for the selected eval metric
170
- metricDependencies = checkDependency(eval,columns=list(dataframe.columns),tocheck=_tocheck)
179
+ metricDependencies = checkDependency(
180
+ eval, columns=list(dataframe.columns), tocheck=_tocheck
181
+ )
171
182
  if metricDependencies["status"] == False:
172
183
  raise LlumoAIError.dependencyError(metricDependencies["message"])
173
184
 
@@ -323,7 +334,7 @@ class LlumoClient:
323
334
 
324
335
  for cnt, batch in enumerate(self.allBatches):
325
336
  try:
326
-
337
+
327
338
  self.postBatch(batch=batch, workspaceID=workspaceID)
328
339
  print("Betch Posted with item len: ", len(batch))
329
340
  except Exception as e:
@@ -372,7 +383,14 @@ class LlumoClient:
372
383
  pd.set_option("future.no_silent_downcasting", True)
373
384
  df = dataframe.fillna("Some error occured").astype(object)
374
385
 
375
- if createPlayground(email, workspaceID, df,promptText=prompt_template,definationMapping=self.definationMapping,outputColName=outputColName):
386
+ if createPlayground(
387
+ email,
388
+ workspaceID,
389
+ df,
390
+ promptText=prompt_template,
391
+ definationMapping=self.definationMapping,
392
+ outputColName=outputColName,
393
+ ):
376
394
  print(
377
395
  "Your data has been saved in the Llumo Experiment. Visit https://app.llumo.ai/evallm to see the results.Please rerun the experiment to see the results on playground."
378
396
  )
@@ -392,7 +410,9 @@ class LlumoClient:
392
410
  time.sleep(0.1)
393
411
  waited_secs += 0.1
394
412
  if waited_secs >= max_wait_secs:
395
- raise RuntimeError("Timeout waiting for server 'connection-established' event.")
413
+ raise RuntimeError(
414
+ "Timeout waiting for server 'connection-established' event."
415
+ )
396
416
 
397
417
  try:
398
418
  self.validateApiKey()
@@ -403,8 +423,14 @@ class LlumoClient:
403
423
  print(f"Response content: {e.response.text[:500]}...")
404
424
  raise
405
425
 
406
- userHits = checkUserHits(self.workspaceID, self.hasSubscribed, self.trialEndDate, self.subscriptionEndDate,
407
- self.hitsAvailable, len(dataframe))
426
+ userHits = checkUserHits(
427
+ self.workspaceID,
428
+ self.hasSubscribed,
429
+ self.trialEndDate,
430
+ self.subscriptionEndDate,
431
+ self.hitsAvailable,
432
+ len(dataframe),
433
+ )
408
434
 
409
435
  if not userHits["success"]:
410
436
  raise LlumoAIError.InsufficientCredits(userHits["message"])
@@ -425,14 +451,22 @@ class LlumoClient:
425
451
  if not all([ky in dataframe.columns for ky in keys]):
426
452
  raise LlumoAIError.InvalidPromptTemplate()
427
453
 
428
- activePlayground = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace("-", "")
454
+ activePlayground = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace(
455
+ "-", ""
456
+ )
429
457
  rowID = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace("-", "")
430
458
  columnID = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace("-", "")
431
459
 
432
- compressed_prompt_id = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace("-", "")
433
- compressed_prompt_output_id = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace("-", "")
460
+ compressed_prompt_id = (
461
+ f"{int(time.time() * 1000)}{uuid.uuid4()}".replace("-", "")
462
+ )
463
+ compressed_prompt_output_id = (
464
+ f"{int(time.time() * 1000)}{uuid.uuid4()}".replace("-", "")
465
+ )
434
466
  cost_id = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace("-", "")
435
- cost_saving_id = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace("-", "")
467
+ cost_saving_id = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace(
468
+ "-", ""
469
+ )
436
470
 
437
471
  rowDataDict = {}
438
472
  for col in dataframe.columns:
@@ -452,7 +486,7 @@ class LlumoClient:
452
486
  "compressed_prompt": compressed_prompt_id,
453
487
  "compressed_prompt_output": compressed_prompt_output_id,
454
488
  "cost": cost_id,
455
- "cost_saving": cost_saving_id
489
+ "cost_saving": cost_saving_id,
456
490
  },
457
491
  "processData": {
458
492
  "rowData": rowDataDict,
@@ -465,12 +499,12 @@ class LlumoClient:
465
499
  "compressed_prompt": compressed_prompt_id,
466
500
  "compressed_prompt_output": compressed_prompt_output_id,
467
501
  "cost": cost_id,
468
- "cost_saving": cost_saving_id
469
- }
502
+ "cost_saving": cost_saving_id,
503
+ },
470
504
  },
471
505
  "workspaceID": workspaceID,
472
506
  "email": email,
473
- "playgroundID": activePlayground
507
+ "playgroundID": activePlayground,
474
508
  }
475
509
 
476
510
  rowIdMapping[rowID] = index
@@ -495,7 +529,12 @@ class LlumoClient:
495
529
 
496
530
  self.AllProcessMapping()
497
531
  timeout = max(60, min(600, total_items * 10))
498
- self.socket.listenForResults(min_wait=20, max_wait=timeout, inactivity_timeout=30, expected_results=None)
532
+ self.socket.listenForResults(
533
+ min_wait=20,
534
+ max_wait=timeout,
535
+ inactivity_timeout=30,
536
+ expected_results=None,
537
+ )
499
538
 
500
539
  results = self.socket.getReceivedData()
501
540
  # results = self.finalResp(eval_results)
@@ -514,7 +553,7 @@ class LlumoClient:
514
553
  for records in results:
515
554
  for compound_key, value in records.items():
516
555
  # for compound_key, value in item['data'].items():
517
- rowID = compound_key.split('-')[0]
556
+ rowID = compound_key.split("-")[0]
518
557
  # looking for the index of each rowID , in the original dataframe
519
558
  if rowID in rowIdMapping:
520
559
  index = rowIdMapping[rowID]
@@ -534,18 +573,18 @@ class LlumoClient:
534
573
  return dataframe
535
574
 
536
575
  def evaluateMultiple(
537
- self,
538
- data,
539
- evals: list, # list of eval metric names
540
- prompt_template="",
541
- outputColName="output",
542
- createExperiment: bool = False,
543
- _tocheck=True,
576
+ self,
577
+ data,
578
+ evals: list, # list of eval metric names
579
+ prompt_template="",
580
+ outputColName="output",
581
+ createExperiment: bool = False,
582
+ _tocheck=True,
544
583
  ):
545
584
  dataframe = pd.DataFrame(data)
546
585
  workspaceID = None
547
586
  email = None
548
- socketID = self.socket.connect(timeout=150)
587
+ socketID = self.socket.connect(timeout=250)
549
588
  self.allBatches = []
550
589
  rowIdMapping = {} # (rowID-columnID-columnID -> (index, evalName))
551
590
 
@@ -567,19 +606,24 @@ class LlumoClient:
567
606
  kwargs={
568
607
  "min_wait": 40,
569
608
  "max_wait": timeout,
570
- "inactivity_timeout": 200,
609
+ "inactivity_timeout": 10,
571
610
  "expected_results": expectedResults,
572
611
  },
573
- daemon=True
612
+ daemon=True,
574
613
  )
575
614
  listener_thread.start()
576
-
615
+
616
+ activePlayground = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace(
617
+ "-", ""
618
+ )
577
619
  for evalName in evals:
578
620
  # print(f"\n======= Running evaluation for: {evalName} =======")
579
621
 
580
622
  # Validate API and dependencies
581
623
  self.validateApiKey(evalName=evalName)
582
- metricDependencies = checkDependency(evalName, list(dataframe.columns), tocheck=_tocheck)
624
+ metricDependencies = checkDependency(
625
+ evalName, list(dataframe.columns), tocheck=_tocheck
626
+ )
583
627
  if not metricDependencies["status"]:
584
628
  raise LlumoAIError.dependencyError(metricDependencies["message"])
585
629
 
@@ -605,7 +649,11 @@ class LlumoClient:
605
649
  for index, row in dataframe.iterrows():
606
650
  tools = [row["tools"]] if "tools" in dataframe.columns else []
607
651
  groundTruth = row.get("groundTruth", "")
608
- messageHistory = [row["messageHistory"]] if "messageHistory" in dataframe.columns else []
652
+ messageHistory = (
653
+ [row["messageHistory"]]
654
+ if "messageHistory" in dataframe.columns
655
+ else []
656
+ )
609
657
  promptTemplate = prompt_template
610
658
  keys = re.findall(r"{{(.*?)}}", promptTemplate)
611
659
 
@@ -615,7 +663,7 @@ class LlumoClient:
615
663
  inputDict = {key: row[key] for key in keys if key in row}
616
664
  output = row.get(outputColName, "")
617
665
 
618
- activePlayground = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace("-", "")
666
+
619
667
  rowID = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace("-", "")
620
668
  columnID = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace("-", "")
621
669
 
@@ -661,7 +709,9 @@ class LlumoClient:
661
709
  else:
662
710
  if promptTemplate:
663
711
  tempObj = {key: value}
664
- promptTemplate = getInputPopulatedPrompt(promptTemplate, tempObj)
712
+ promptTemplate = getInputPopulatedPrompt(
713
+ promptTemplate, tempObj
714
+ )
665
715
  else:
666
716
  query += f" {key}: {value}, "
667
717
 
@@ -669,10 +719,16 @@ class LlumoClient:
669
719
  for key, value in inputDict.items():
670
720
  context += f" {key}: {value}, "
671
721
 
672
- templateData["processData"]["executionDependency"]["context"] = context.strip()
673
- templateData["processData"]["executionDependency"]["query"] = query.strip()
722
+ templateData["processData"]["executionDependency"][
723
+ "context"
724
+ ] = context.strip()
725
+ templateData["processData"]["executionDependency"][
726
+ "query"
727
+ ] = query.strip()
674
728
  if promptTemplate and not query.strip():
675
- templateData["processData"]["executionDependency"]["query"] = promptTemplate
729
+ templateData["processData"]["executionDependency"][
730
+ "query"
731
+ ] = promptTemplate
676
732
 
677
733
  currentBatch.append(templateData)
678
734
  if len(currentBatch) == 10:
@@ -682,31 +738,45 @@ class LlumoClient:
682
738
  if currentBatch:
683
739
  self.allBatches.append(currentBatch)
684
740
 
685
-
686
- for batch in tqdm(self.allBatches, desc="Processing Batches", unit="batch",colour="magenta", ascii=False):
741
+ for batch in tqdm(
742
+ self.allBatches,
743
+ desc="Processing Batches",
744
+ unit="batch",
745
+ colour="magenta",
746
+ ascii=False,
747
+ ):
687
748
  try:
688
749
  self.postBatch(batch=batch, workspaceID=workspaceID)
689
-
750
+ time.sleep(3)
690
751
  except Exception as e:
691
752
  print(f"Error posting batch: {e}")
692
753
  raise
693
754
 
694
-
695
755
  # Wait for results
696
-
756
+ time.sleep(3)
697
757
  listener_thread.join()
698
758
 
699
- raw_results = self.socket.getReceivedData()
700
-
701
759
 
760
+ rawResults = self.socket.getReceivedData()
702
761
 
762
+ # print("data from db #####################",dataFromDb)
703
763
  # Fix here: keep full keys, do not split keys
704
- # received_rowIDs = {key for item in raw_results for key in item.keys()}
705
- # expected_rowIDs = set(rowIdMapping.keys())
706
- # missing_rowIDs = expected_rowIDs - received_rowIDs
764
+ receivedRowIDs = {key for item in rawResults for key in item.keys()}
765
+ expectedRowIDs = set(rowIdMapping.keys())
766
+ missingRowIDs = expectedRowIDs - receivedRowIDs
707
767
  # print("All expected keys:", expected_rowIDs)
708
768
  # print("All received keys:", received_rowIDs)
709
- # print("Missing keys:", missing_rowIDs)
769
+ # print("Missing keys:", len(missingRowIDs))
770
+ missingRowIDs=list(missingRowIDs)
771
+
772
+ if len(missingRowIDs) > 0:
773
+ dataFromDb=fetchData(workspaceID,activePlayground,missingRowIDs)
774
+ rawResults.extend(dataFromDb)
775
+
776
+
777
+
778
+
779
+
710
780
 
711
781
  # Initialize dataframe columns for each eval
712
782
  for eval in evals:
@@ -714,7 +784,7 @@ class LlumoClient:
714
784
  dataframe[f"{eval} Reason"] = None
715
785
 
716
786
  # Map results to dataframe rows
717
- for item in raw_results:
787
+ for item in rawResults:
718
788
  for compound_key, value in item.items():
719
789
  if compound_key in rowIdMapping:
720
790
  index = rowIdMapping[compound_key]["index"]
@@ -723,32 +793,37 @@ class LlumoClient:
723
793
  dataframe.at[index, f"{evalName} Reason"] = value.get("reasoning")
724
794
 
725
795
  self.socket.disconnect()
796
+
726
797
 
727
798
  if createExperiment:
728
799
  pd.set_option("future.no_silent_downcasting", True)
729
800
  df = dataframe.fillna("Some error occured").astype(object)
730
- if createPlayground(email, workspaceID, df, promptText=prompt_template,
731
- definationMapping=self.definationMapping, outputColName=outputColName):
801
+ if createPlayground(
802
+ email,
803
+ workspaceID,
804
+ df,
805
+ promptText=prompt_template,
806
+ definationMapping=self.definationMapping,
807
+ outputColName=outputColName,
808
+ ):
732
809
  print(
733
- "Your data has been saved in the Llumo Experiment. Visit https://app.llumo.ai/evallm to see the results.")
810
+ "Your data has been saved in the Llumo Experiment. Visit https://app.llumo.ai/evallm to see the results."
811
+ )
734
812
  else:
735
813
  return dataframe
736
814
 
737
815
  def run_sweep(
738
- self,
739
- templates: List[str],
740
- dataset: Dict[str, List[str]],
741
- model_aliases: List[AVAILABLEMODELS],
742
- apiKey: str,
743
- evals=["Response Correctness"],
744
- toEvaluate: bool = False,
745
- createExperiment: bool = False,
746
- ) -> pd.DataFrame:
747
-
748
-
749
-
816
+ self,
817
+ templates: List[str],
818
+ dataset: Dict[str, List[str]],
819
+ model_aliases: List[AVAILABLEMODELS],
820
+ apiKey: str,
821
+ evals=["Response Correctness"],
822
+ toEvaluate: bool = False,
823
+ createExperiment: bool = False,
824
+ ) -> pd.DataFrame:
750
825
 
751
- self.validateApiKey(evalName="")
826
+ self.validateApiKey(evalName=" ")
752
827
  workspaceID = self.workspaceID
753
828
  email = self.email
754
829
  executor = ModelExecutor(apiKey)
@@ -772,7 +847,9 @@ class LlumoClient:
772
847
  for i, model in enumerate(model_aliases, 1):
773
848
  try:
774
849
  provider = getProviderFromModel(model)
775
- response = executor.execute(provider, model.value, prompt, apiKey)
850
+ response = executor.execute(
851
+ provider, model.value, prompt, apiKey
852
+ )
776
853
  outputKey = f"output_{i}"
777
854
  row[outputKey] = response
778
855
  except Exception as e:
@@ -780,14 +857,11 @@ class LlumoClient:
780
857
 
781
858
  results.append(row)
782
859
 
783
-
784
-
785
860
  df = pd.DataFrame(results)
786
861
 
787
-
788
- if toEvaluate==True:
862
+ if toEvaluate == True:
789
863
  dfWithEvals = df.copy()
790
- for i, model in enumerate(model_aliases,1):
864
+ for i, model in enumerate(model_aliases, 1):
791
865
  outputColName = f"output_{i}"
792
866
  try:
793
867
  res = self.evaluateMultiple(
@@ -797,7 +871,7 @@ class LlumoClient:
797
871
  outputColName=outputColName,
798
872
  _tocheck=False,
799
873
  )
800
-
874
+
801
875
  # Rename all new columns with _i+1 (e.g., _1, _2)
802
876
  for evalMetric in evals:
803
877
  scoreCol = f"{evalMetric}"
@@ -808,10 +882,10 @@ class LlumoClient:
808
882
  res = res.rename(columns={reasonCol: f"{reasonCol}_{i}"})
809
883
 
810
884
  # Drop duplicated columns from df (like prompt, variables, etc.)
811
- newCols = [col for col in res.columns if col not in dfWithEvals.columns]
885
+ newCols = [
886
+ col for col in res.columns if col not in dfWithEvals.columns
887
+ ]
812
888
  dfWithEvals = pd.concat([dfWithEvals, res[newCols]], axis=1)
813
-
814
-
815
889
 
816
890
  except Exception as e:
817
891
  print(f"Evaluation failed for model {model.value}: {str(e)}")
@@ -819,22 +893,31 @@ class LlumoClient:
819
893
  if createExperiment:
820
894
  pd.set_option("future.no_silent_downcasting", True)
821
895
  dfWithEvals = dfWithEvals.fillna("Some error occurred")
822
- if createPlayground(email, workspaceID, dfWithEvals, promptText=templates[0],definationMapping=self.definationMapping):
823
-
824
- print("Your data has been saved in the Llumo Experiment. Visit https://app.llumo.ai/evallm to see the results.")
896
+ if createPlayground(
897
+ email,
898
+ workspaceID,
899
+ dfWithEvals,
900
+ promptText=templates[0],
901
+ definationMapping=self.definationMapping,
902
+ ):
903
+
904
+ print(
905
+ "Your data has been saved in the Llumo Experiment. Visit https://app.llumo.ai/evallm to see the results."
906
+ )
825
907
  else:
826
908
  return dfWithEvals
827
909
  else:
828
- if createExperiment==True:
910
+ if createExperiment == True:
829
911
  pd.set_option("future.no_silent_downcasting", True)
830
912
  df = df.fillna("Some error occurred")
831
913
 
832
914
  if createPlayground(email, workspaceID, df, promptText=templates[0]):
833
- print("Your data has been saved in the Llumo Experiment. Visit https://app.llumo.ai/evallm to see the results.")
834
- else :
915
+ print(
916
+ "Your data has been saved in the Llumo Experiment. Visit https://app.llumo.ai/evallm to see the results."
917
+ )
918
+ else:
835
919
  return df
836
920
 
837
-
838
921
  # this function generates an output using llm and tools and evaluate that output
839
922
  def evaluateAgents(
840
923
  self,
@@ -856,27 +939,26 @@ class LlumoClient:
856
939
  toolResponseDf = LlumoAgentExecutor.run(
857
940
  dataframe, agents, model=model, model_api_key=model_api_key
858
941
  )
859
-
860
942
 
861
943
  # for eval in evals:
862
- # Perform evaluation
863
- # toolResponseDf = self.evaluate(
864
- # toolResponseDf.to_dict(orient = "records"),
865
- # eval=eval,
866
- # prompt_template=prompt_template,
867
- # createExperiment=False,
868
- # )
944
+ # Perform evaluation
945
+ # toolResponseDf = self.evaluate(
946
+ # toolResponseDf.to_dict(orient = "records"),
947
+ # eval=eval,
948
+ # prompt_template=prompt_template,
949
+ # createExperiment=False,
950
+ # )
869
951
  toolResponseDf = self.evaluateMultiple(
870
- toolResponseDf.to_dict(orient = "records"),
952
+ toolResponseDf.to_dict(orient="records"),
871
953
  evals=evals,
872
954
  prompt_template=prompt_template,
873
955
  createExperiment=False,
874
- )
956
+ )
875
957
 
876
958
  if createExperiment:
877
959
  pd.set_option("future.no_silent_downcasting", True)
878
960
  df = toolResponseDf.fillna("Some error occured")
879
- if createPlayground(self.email, self.workspaceID, df):
961
+ if createPlayground(self.email, self.workspaceID, df,promptText=prompt_template,definationMapping=self.definationMapping):
880
962
  print(
881
963
  "Your data has been saved in the Llumo Experiment. Visit https://app.llumo.ai/evallm to see the results."
882
964
  )
@@ -906,16 +988,20 @@ class LlumoClient:
906
988
  # toolResponseDf.to_dict(orient = "records"), eval=eval, prompt_template="Give answer for the given query: {{query}}",outputColName=outputColName
907
989
  # )
908
990
  toolResponseDf = self.evaluateMultiple(
909
- toolResponseDf.to_dict(orient = "records"),
910
- eval=evals, prompt_template="Give answer for the given query: {{query}}",
911
- outputColName=outputColName
991
+ toolResponseDf.to_dict(orient="records"),
992
+ evals=evals,
993
+ prompt_template="Give answer for the given query: {{query}}",
994
+ outputColName=outputColName,
995
+ createExperiment=createExperiment
912
996
  )
913
- return toolResponseDf
997
+ if createExperiment:
998
+ pass
999
+ else:
1000
+ return toolResponseDf
914
1001
 
915
1002
  except Exception as e:
916
1003
  raise e
917
1004
 
918
-
919
1005
  def runDataStream(
920
1006
  self,
921
1007
  data,
@@ -939,11 +1025,11 @@ class LlumoClient:
939
1025
  )
940
1026
  # print(f"Connected with socket ID: {socketID}")
941
1027
  rowIdMapping = {}
942
-
943
- # print(f"Validating API key...")
1028
+
1029
+ # print(f"Validating API key...")
944
1030
  self.validateApiKey()
945
- # print(f"API key validation successful. Hits available: {self.hitsAvailable}")
946
-
1031
+ # print(f"API key validation successful. Hits available: {self.hitsAvailable}")
1032
+
947
1033
  # check for available hits and trial limit
948
1034
  userHits = checkUserHits(
949
1035
  self.workspaceID,
@@ -1071,7 +1157,13 @@ class LlumoClient:
1071
1157
  pd.set_option("future.no_silent_downcasting", True)
1072
1158
  df = dataframe.fillna("Some error occured").astype(object)
1073
1159
 
1074
- if createPlayground(email, workspaceID, df,queryColName=queryColName, dataStreamName=streamId):
1160
+ if createPlayground(
1161
+ email,
1162
+ workspaceID,
1163
+ df,
1164
+ queryColName=queryColName,
1165
+ dataStreamName=streamId,
1166
+ ):
1075
1167
  print(
1076
1168
  "Your data has been saved in the Llumo Experiment. Visit https://app.llumo.ai/evallm to see the results."
1077
1169
  )
@@ -1091,6 +1183,49 @@ class LlumoClient:
1091
1183
  except Exception as e:
1092
1184
  raise "Some error ocuured please check your API key"
1093
1185
 
1186
+ def upload(self, file_path):
1187
+
1188
+ workspaceID = None
1189
+ email = None
1190
+
1191
+
1192
+ try:
1193
+ self.validateApiKey()
1194
+ except Exception as e:
1195
+ if hasattr(e, "response") and getattr(e, "response", None) is not None:
1196
+ pass
1197
+ raise
1198
+
1199
+ # Get file extension
1200
+ _, ext = os.path.splitext(file_path)
1201
+ ext = ext.lower()
1202
+
1203
+ # Supported formats
1204
+ try:
1205
+ if ext == ".csv":
1206
+ df = pd.read_csv(file_path)
1207
+ elif ext in [".xlsx", ".xls"]:
1208
+ df = pd.read_excel(file_path)
1209
+ elif ext == ".json":
1210
+ df = pd.read_json(file_path, orient="records")
1211
+ elif ext == ".parquet":
1212
+ df = pd.read_parquet(file_path)
1213
+ else:
1214
+ raise ValueError(f"Unsupported file format: {ext}")
1215
+
1216
+ # If successfully loaded, call createPlayground
1217
+ df = df.astype(str)
1218
+ if createPlayground(self.email, self.workspaceID, df):
1219
+
1220
+ print(
1221
+ "Your data has been saved in the Llumo Experiment. Visit https://app.llumo.ai/evallm to see the results."
1222
+ )
1223
+
1224
+ return True
1225
+
1226
+ except Exception as e:
1227
+ print(f"Error: {e}")
1228
+
1094
1229
 
1095
1230
  class SafeDict(dict):
1096
1231
  def __missing__(self, key):
llumo/helpingFuntions.py CHANGED
@@ -212,7 +212,6 @@ def deleteColumnListInPlayground(workspaceID: str, playgroundID: str):
212
212
  print("❌ Error:", response.status_code, response.text)
213
213
  return None
214
214
 
215
-
216
215
  def createColumn(workspaceID, dataframe, playgroundID, promptText=None,queryColName=None,outputColName= "output",dataStreamName=None,definationMapping=None):
217
216
  if len(dataframe) > 100:
218
217
  dataframe = dataframe.head(100)
@@ -238,7 +237,7 @@ def createColumn(workspaceID, dataframe, playgroundID, promptText=None,queryColN
238
237
  columnIDMapping[col] = columnID
239
238
 
240
239
 
241
- if col.startswith('output'):
240
+ if col.startswith('output') and promptText!=None:
242
241
  # For output columns, create the prompt template with promptText
243
242
  if promptText:
244
243
  # Extract variables from promptText and set them as dependencies
@@ -277,7 +276,7 @@ def createColumn(workspaceID, dataframe, playgroundID, promptText=None,queryColN
277
276
  "order": indx,
278
277
  }
279
278
 
280
- elif col.startswith('Data '):
279
+ elif col.startswith('Data ') :
281
280
  if queryColName and dataStreamName:
282
281
  dependencies = []
283
282
  dependencies.append(columnIDMapping[queryColName])
@@ -291,7 +290,7 @@ def createColumn(workspaceID, dataframe, playgroundID, promptText=None,queryColN
291
290
  "type": "DATA_STREAM",
292
291
  "order": indx}
293
292
 
294
- elif col in allEvals:
293
+ elif col in allEvals and promptText!=None:
295
294
 
296
295
  dependencies = []
297
296
  variables = re.findall(r'{{(.*?)}}', promptText)
@@ -341,7 +340,7 @@ def createColumn(workspaceID, dataframe, playgroundID, promptText=None,queryColN
341
340
  "order": indx
342
341
  }
343
342
 
344
- elif col.endswith(' Reason'):
343
+ elif col.endswith(' Reason') and promptText!=None:
345
344
  continue
346
345
 
347
346
 
@@ -374,7 +373,7 @@ def createColumn(workspaceID, dataframe, playgroundID, promptText=None,queryColN
374
373
  for col in dataframe.columns:
375
374
  columnID = columnIDMapping[col]
376
375
 
377
- if col in allEvals:
376
+ if col in allEvals and promptText!=None:
378
377
  row_dict[columnID] = {
379
378
 
380
379
  "value": row[col],
@@ -385,7 +384,7 @@ def createColumn(workspaceID, dataframe, playgroundID, promptText=None,queryColN
385
384
  "kpi": col
386
385
 
387
386
  }
388
- elif col.endswith(' Reason'):
387
+ elif col.endswith(' Reason') and promptText!=None:
389
388
  continue
390
389
  else:# Get the columnID from the mapping
391
390
  row_dict[columnID] = row[col]
@@ -397,6 +396,7 @@ def createColumn(workspaceID, dataframe, playgroundID, promptText=None,queryColN
397
396
 
398
397
  # Return the column template, row template, and the column ID mapping
399
398
  return coltemplate, rowTemplate
399
+
400
400
  def uploadColumnListInPlayground(payload):
401
401
  url = uploadColList
402
402
  headers = {
@@ -447,8 +447,7 @@ def createPlayground(email, workspaceID, df, promptText=None,queryColName=None,d
447
447
  workspaceID=workspaceID, dataframe=df, playgroundID=playgroundId, promptText=promptText,queryColName=queryColName,dataStreamName=dataStreamName,definationMapping=definationMapping,outputColName=outputColName
448
448
  )
449
449
 
450
-
451
-
450
+ # Debugging line to check the payload2 structure
452
451
  deleteExistingRows = deleteColumnListInPlayground(
453
452
  workspaceID=workspaceID, playgroundID=playgroundId
454
453
  )
@@ -460,6 +459,7 @@ def createPlayground(email, workspaceID, df, promptText=None,queryColName=None,d
460
459
 
461
460
 
462
461
 
462
+
463
463
  def getPlaygroundInsights(workspaceID: str, activePlayground: str):
464
464
  headers = {
465
465
 
@@ -563,4 +563,46 @@ def checkDependency(selectedEval, columns,tocheck=True):
563
563
  }
564
564
  return {"status":True,"message":"success"}
565
565
  else:
566
- return {"status":True,"message":"success"}
566
+ return {"status":True,"message":"success"}
567
+
568
+
569
+ def fetchData(workspaceID, playgroundID, missingList: list):
570
+ # Define the URL and prepare the payload
571
+ socket_data_url = "https://app.llumo.ai/api/eval/get-awaited"
572
+ payload = {
573
+ "workspaceID": workspaceID,
574
+ "playgroundID": playgroundID,
575
+ "missingList": missingList
576
+ }
577
+
578
+ try:
579
+ # Send a POST request to the API
580
+ response = requests.post(socket_data_url, json=payload)
581
+
582
+ # Check if the response is successful
583
+ if response.status_code == 200:
584
+ # Parse the JSON data from the response
585
+ data = response.json().get("data", {})
586
+
587
+
588
+ # Prepare the list of all data values in the desired format
589
+ result_list = []
590
+ for key, value in data.items():
591
+ # Create a dictionary for each item in the response data
592
+ result_list.append({
593
+ key: {
594
+ "value": value.get("value"),
595
+ "reasoning": value.get("reasoning"),
596
+ "edgeCase": value.get("edgeCase"),
597
+ "kpi": value.get("kpi")
598
+ }
599
+ })
600
+
601
+ return result_list
602
+ else:
603
+ print(f"Failed to fetch data. Status Code: {response.status_code}")
604
+ return []
605
+
606
+ except Exception as e:
607
+ print(f"An error occurred: {e}")
608
+ return []
llumo/sockets.py CHANGED
@@ -17,15 +17,16 @@ class LlumoSocketClient:
17
17
 
18
18
  # Initialize client
19
19
  self.sio = socketio.Client(
20
- logger=True,
21
- engineio_logger=True,
20
+ logger=False,
21
+ engineio_logger=False,
22
22
  reconnection=True,
23
- reconnection_attempts=10,
23
+ reconnection_attempts=1,
24
24
  reconnection_delay=1,
25
25
  )
26
26
 
27
27
  @self.sio.on("connect")
28
28
  def on_connect():
29
+ self.sio.emit("ready")
29
30
  # print("Socket connection established")
30
31
  self._connected = True
31
32
  # Don't set connection_established yet - wait for server confirmation
@@ -37,12 +38,13 @@ class LlumoSocketClient:
37
38
  # f"Server acknowledged connection with 'connection-established' event: {data}"
38
39
  # )
39
40
  if isinstance(data, dict) and "socketId" in data:
41
+ self.sio.emit("ready")
40
42
  self.server_socket_id = data["socketId"]
41
43
  # print(f"Received server socket ID: {self.server_socket_id}")
42
44
  self._connection_established.set()
43
45
 
44
46
  @self.sio.on("result-update")
45
- def on_result_update(data):
47
+ def on_result_update(data, callback=None):
46
48
  with self._lock:
47
49
  # print(f"Received result-update event: {data}")
48
50
  self._received_data.append(data)
@@ -55,6 +57,8 @@ class LlumoSocketClient:
55
57
  ):
56
58
  # print("✅ All expected results received.")
57
59
  self._listening_done.set()
60
+ if callback:
61
+ callback(True)
58
62
 
59
63
  @self.sio.on("disconnect")
60
64
  def on_disconnect():
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: llumo
3
- Version: 0.2.14b5
3
+ Version: 0.2.14b7
4
4
  Summary: Python SDK for interacting with the Llumo ai API.
5
5
  Home-page: https://www.llumo.ai/
6
6
  Author: Llumo
@@ -0,0 +1,13 @@
1
+ llumo/__init__.py,sha256=O04b4yW1BnOvcHzxWFddAKhtdBEhBNhLdb6xgnpHH_Q,205
2
+ llumo/client.py,sha256=HpvUyucrGPbcPQMz_cTRDcEsBFpmNt8jfW1zJU4Nyss,46781
3
+ llumo/exceptions.py,sha256=i3Qv4_g7XjRuho7-b7ybjw2bwSh_NhvICR6ZAgiLQX8,1944
4
+ llumo/execution.py,sha256=x88wQV8eL99wNN5YtjFaAMCIfN1PdfQVlAZQb4vzgQ0,1413
5
+ llumo/functionCalling.py,sha256=D5jYapu1rIvdIJNUYPYMTyhQ1H-6nkwoOLMi6eekfUE,7241
6
+ llumo/helpingFuntions.py,sha256=RgWok8DoE1R-Tc0kJ9B5En6LEUEk5EvQU8iJiGPbUsw,21911
7
+ llumo/models.py,sha256=YH-qAMnShmUpmKE2LQAzQdpRsaXkFSlOqMxHwU4zBUI,1560
8
+ llumo/sockets.py,sha256=I2JO_eNEctRo_ikgvFVp5zDd-m0VDu04IEUhhsa1Tic,5950
9
+ llumo-0.2.14b7.dist-info/licenses/LICENSE,sha256=tF9yAcfPV9xGT3ViWmC8hPvOo8BEk4ZICbUfcEo8Dlk,182
10
+ llumo-0.2.14b7.dist-info/METADATA,sha256=kdeDmcNgV8uRyH7gXhhAqeb3se5U_Gqo3bA3Cf4SLlM,1521
11
+ llumo-0.2.14b7.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
12
+ llumo-0.2.14b7.dist-info/top_level.txt,sha256=d5zUTMI99llPtLRB8rtSrqELm_bOqX-bNC5IcwlDk88,6
13
+ llumo-0.2.14b7.dist-info/RECORD,,
@@ -1,13 +0,0 @@
1
- llumo/__init__.py,sha256=O04b4yW1BnOvcHzxWFddAKhtdBEhBNhLdb6xgnpHH_Q,205
2
- llumo/client.py,sha256=Zque6TDcEBFO1tQjNMsmUpYVbGdooLiwDKDYPtJ2szY,43465
3
- llumo/exceptions.py,sha256=i3Qv4_g7XjRuho7-b7ybjw2bwSh_NhvICR6ZAgiLQX8,1944
4
- llumo/execution.py,sha256=x88wQV8eL99wNN5YtjFaAMCIfN1PdfQVlAZQb4vzgQ0,1413
5
- llumo/functionCalling.py,sha256=D5jYapu1rIvdIJNUYPYMTyhQ1H-6nkwoOLMi6eekfUE,7241
6
- llumo/helpingFuntions.py,sha256=9aFvBNPB-AyeMs6c8YpdPckNeF95thmTqAAmYC1q4oo,20320
7
- llumo/models.py,sha256=YH-qAMnShmUpmKE2LQAzQdpRsaXkFSlOqMxHwU4zBUI,1560
8
- llumo/sockets.py,sha256=ox7gfYKPqfeufjJPrEdK4WFrj0hFoMnfrFTPkRYysqg,5804
9
- llumo-0.2.14b5.dist-info/licenses/LICENSE,sha256=tF9yAcfPV9xGT3ViWmC8hPvOo8BEk4ZICbUfcEo8Dlk,182
10
- llumo-0.2.14b5.dist-info/METADATA,sha256=oB9mvp7SsvdX41Ig2AWqah-7Ym7MxZjRgYHkQCXkYQo,1521
11
- llumo-0.2.14b5.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
12
- llumo-0.2.14b5.dist-info/top_level.txt,sha256=d5zUTMI99llPtLRB8rtSrqELm_bOqX-bNC5IcwlDk88,6
13
- llumo-0.2.14b5.dist-info/RECORD,,