llumo 0.2.14b2__py3-none-any.whl → 0.2.14b3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
llumo/client.py CHANGED
@@ -16,10 +16,11 @@ from .exceptions import LlumoAIError
16
16
  from .helpingFuntions import *
17
17
  from .sockets import LlumoSocketClient
18
18
  from .functionCalling import LlumoAgentExecutor
19
+ import threading
20
+ from tqdm import tqdm
19
21
 
20
-
21
- postUrl = "https://app.llumo.ai/api/eval/run-multiple-column"
22
- fetchUrl = "https://app.llumo.ai/api/eval/fetch-rows-data-by-column"
22
+ postUrl = "https://red-skull-service-392377961931.us-central1.run.app/api/process-playground"
23
+ fetchUrl = "https://red-skull-service-392377961931.us-central1.run.app/api/get-cells-data"
23
24
  validateUrl = "https://app.llumo.ai/api/workspace-details"
24
25
  socketUrl = "https://red-skull-service-392377961931.us-central1.run.app/"
25
26
 
@@ -32,7 +33,7 @@ class LlumoClient:
32
33
  self.processMapping = {}
33
34
  self.definationMapping = {}
34
35
 
35
- def validateApiKey(self, evalName=" "):
36
+ def validateApiKey(self, evalName="Input Bias"):
36
37
  headers = {
37
38
  "Authorization": f"Bearer {self.apiKey}",
38
39
  "Content-Type": "application/json",
@@ -43,11 +44,7 @@ class LlumoClient:
43
44
  response = requests.post(url=validateUrl, json=reqBody, headers=headers)
44
45
 
45
46
 
46
- try:
47
- response_preview = response.text[:500] # First 500 chars
48
- # print(f"Response preview: {response_preview}")
49
- except Exception as e:
50
- print(f"Could not get response preview: {e}")
47
+
51
48
 
52
49
  except requests.exceptions.RequestException as e:
53
50
  print(f"Request exception: {str(e)}")
@@ -383,69 +380,6 @@ class LlumoClient:
383
380
  return dataframe
384
381
 
385
382
  # this function allows the users to run multiple evals at once
386
- def evaluateMultiple(
387
- self,
388
- data,
389
- eval=["Response Completeness"],
390
- prompt_template="Give answer to the given query:{{query}} , using the given context: {{context}}",
391
- outputColName="output",
392
- createExperiment: bool = False,
393
- _tocheck = True,
394
- ):
395
- """
396
- Runs multiple evaluation metrics on the same input dataset.
397
-
398
- Parameters:
399
- data (list of dict): Input data, where each dict represents a row.
400
- eval (list of str): List of evaluation metric names to run.
401
- prompt_template (str): Optional prompt template used in evaluation.
402
- outputColName (str): Column name in data that holds the model output.
403
- createExperiment (bool): Whether to log the results to Llumo playground.
404
-
405
- Returns:
406
- pandas.DataFrame: Final dataframe with all evaluation results.
407
- """
408
-
409
- # Convert input dict list into a DataFrame
410
- dataframe = pd.DataFrame(data)
411
-
412
- # Copy to hold final results
413
- resultdf = dataframe.copy()
414
-
415
- # Run each evaluation metric one by one
416
- for evalName in eval:
417
- # time.sleep(2) # small delay to avoid overload or rate limits
418
-
419
- # Call evaluate (assumes evaluate takes dict, not dataframe)
420
- resultdf = self.evaluate(
421
- data=resultdf.to_dict(orient="records"), # convert df back to dict list
422
- eval=evalName,
423
- prompt_template=prompt_template,
424
- outputColName=outputColName,
425
- createExperiment=False,
426
- _tocheck=_tocheck,
427
- )
428
-
429
- # Save to playground if requested
430
- if createExperiment:
431
- pd.set_option("future.no_silent_downcasting", True)
432
- df = resultdf.fillna("Some error occured").astype(object)
433
-
434
- if createPlayground(
435
- self.email,
436
- self.workspaceID,
437
- df,
438
- definationMapping=self.definationMapping,
439
- outputColName=outputColName,
440
- promptText=prompt_template
441
- ):
442
- print(
443
- "Your data has been saved in the Llumo Experiment. "
444
- "Visit https://app.llumo.ai/evallm to see the results. "
445
- "Please rerun the experiment to see the results on playground."
446
- )
447
- else:
448
- return resultdf
449
383
 
450
384
  def evaluateCompressor(self, data, prompt_template):
451
385
  results = []
@@ -598,22 +532,223 @@ class LlumoClient:
598
532
  # dataframe["cost_saving"] = cost_saving
599
533
 
600
534
  return dataframe
535
+
536
+ def evaluateMultiple(
537
+ self,
538
+ data,
539
+ evals: list, # list of eval metric names
540
+ prompt_template="",
541
+ outputColName="output",
542
+ createExperiment: bool = False,
543
+ _tocheck=True,
544
+ ):
545
+ dataframe = pd.DataFrame(data)
546
+ workspaceID = None
547
+ email = None
548
+ socketID = self.socket.connect(timeout=150)
549
+ self.allBatches = []
550
+ rowIdMapping = {} # (rowID-columnID-columnID -> (index, evalName))
551
+
552
+ # Wait for socket connection
553
+ max_wait_secs = 20
554
+ waited_secs = 0
555
+ while not self.socket._connection_established.is_set():
556
+ time.sleep(0.1)
557
+ waited_secs += 0.1
558
+ if waited_secs >= max_wait_secs:
559
+ raise RuntimeError("Timeout waiting for server connection")
560
+
561
+ # Start listener thread
562
+ expectedResults = len(dataframe) * len(evals)
563
+ # print("expected result" ,expectedResults)
564
+ timeout = max(100, min(150, expectedResults * 10))
565
+ listener_thread = threading.Thread(
566
+ target=self.socket.listenForResults,
567
+ kwargs={
568
+ "min_wait": 40,
569
+ "max_wait": timeout,
570
+ "inactivity_timeout": 200,
571
+ "expected_results": expectedResults,
572
+ },
573
+ daemon=True
574
+ )
575
+ listener_thread.start()
576
+
577
+ for evalName in evals:
578
+ # print(f"\n======= Running evaluation for: {evalName} =======")
579
+
580
+ # Validate API and dependencies
581
+ self.validateApiKey(evalName=evalName)
582
+ metricDependencies = checkDependency(evalName, list(dataframe.columns), tocheck=_tocheck)
583
+ if not metricDependencies["status"]:
584
+ raise LlumoAIError.dependencyError(metricDependencies["message"])
585
+
586
+ evalDefinition = self.evalDefinition[evalName]["definition"]
587
+ model = "GPT_4"
588
+ provider = "OPENAI"
589
+ evalType = "LLM"
590
+ workspaceID = self.workspaceID
591
+ email = self.email
592
+
593
+ userHits = checkUserHits(
594
+ self.workspaceID,
595
+ self.hasSubscribed,
596
+ self.trialEndDate,
597
+ self.subscriptionEndDate,
598
+ self.hitsAvailable,
599
+ len(dataframe),
600
+ )
601
+ if not userHits["success"]:
602
+ raise LlumoAIError.InsufficientCredits(userHits["message"])
603
+
604
+ currentBatch = []
605
+ for index, row in dataframe.iterrows():
606
+ tools = [row["tools"]] if "tools" in dataframe.columns else []
607
+ groundTruth = row.get("groundTruth", "")
608
+ messageHistory = [row["messageHistory"]] if "messageHistory" in dataframe.columns else []
609
+ promptTemplate = prompt_template
610
+ keys = re.findall(r"{{(.*?)}}", promptTemplate)
611
+
612
+ if not all([ky in dataframe.columns for ky in keys]):
613
+ raise LlumoAIError.InvalidPromptTemplate()
614
+
615
+ inputDict = {key: row[key] for key in keys if key in row}
616
+ output = row.get(outputColName, "")
617
+
618
+ activePlayground = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace("-", "")
619
+ rowID = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace("-", "")
620
+ columnID = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace("-", "")
621
+
622
+ compoundKey = f"{rowID}-{columnID}-{columnID}"
623
+ rowIdMapping[compoundKey] = {"index": index, "eval": evalName}
624
+
625
+ templateData = {
626
+ "processID": getProcessID(),
627
+ "socketID": socketID,
628
+ "source": "SDK",
629
+ "processData": {
630
+ "executionDependency": {
631
+ "query": "",
632
+ "context": "",
633
+ "output": output,
634
+ "tools": tools,
635
+ "groundTruth": groundTruth,
636
+ "messageHistory": messageHistory,
637
+ },
638
+ "definition": evalDefinition,
639
+ "model": model,
640
+ "provider": provider,
641
+ "analytics": evalName,
642
+ },
643
+ "workspaceID": workspaceID,
644
+ "type": "EVAL",
645
+ "evalType": evalType,
646
+ "kpi": evalName,
647
+ "columnID": columnID,
648
+ "rowID": rowID,
649
+ "playgroundID": activePlayground,
650
+ "processType": "EVAL",
651
+ "email": email,
652
+ }
653
+
654
+ query = ""
655
+ context = ""
656
+ for key, value in inputDict.items():
657
+ if isinstance(value, str):
658
+ length = len(value.split()) * 1.5
659
+ if length > 50:
660
+ context += f" {key}: {value}, "
661
+ else:
662
+ if promptTemplate:
663
+ tempObj = {key: value}
664
+ promptTemplate = getInputPopulatedPrompt(promptTemplate, tempObj)
665
+ else:
666
+ query += f" {key}: {value}, "
667
+
668
+ if not context.strip():
669
+ for key, value in inputDict.items():
670
+ context += f" {key}: {value}, "
671
+
672
+ templateData["processData"]["executionDependency"]["context"] = context.strip()
673
+ templateData["processData"]["executionDependency"]["query"] = query.strip()
674
+ if promptTemplate and not query.strip():
675
+ templateData["processData"]["executionDependency"]["query"] = promptTemplate
676
+
677
+ currentBatch.append(templateData)
678
+ if len(currentBatch) == 10:
679
+ self.allBatches.append(currentBatch)
680
+ currentBatch = []
681
+
682
+ if currentBatch:
683
+ self.allBatches.append(currentBatch)
684
+
685
+
686
+ for batch in tqdm(self.allBatches, desc="Processing Batches", unit="batch",colour="magenta", ascii=False):
687
+ try:
688
+ self.postBatch(batch=batch, workspaceID=workspaceID)
689
+
690
+ except Exception as e:
691
+ print(f"Error posting batch: {e}")
692
+ raise
693
+
694
+
695
+ # Wait for results
696
+
697
+ listener_thread.join()
698
+
699
+ raw_results = self.socket.getReceivedData()
700
+
701
+
702
+
703
+ # Fix here: keep full keys, do not split keys
704
+ # received_rowIDs = {key for item in raw_results for key in item.keys()}
705
+ # expected_rowIDs = set(rowIdMapping.keys())
706
+ # missing_rowIDs = expected_rowIDs - received_rowIDs
707
+ # print("All expected keys:", expected_rowIDs)
708
+ # print("All received keys:", received_rowIDs)
709
+ # print("Missing keys:", missing_rowIDs)
710
+
711
+ # Initialize dataframe columns for each eval
712
+ for eval in evals:
713
+ dataframe[eval] = None
714
+ dataframe[f"{eval} Reason"] = None
715
+
716
+ # Map results to dataframe rows
717
+ for item in raw_results:
718
+ for compound_key, value in item.items():
719
+ if compound_key in rowIdMapping:
720
+ index = rowIdMapping[compound_key]["index"]
721
+ evalName = rowIdMapping[compound_key]["eval"]
722
+ dataframe.at[index, evalName] = value.get("value")
723
+ dataframe.at[index, f"{evalName} Reason"] = value.get("reasoning")
724
+
725
+ self.socket.disconnect()
726
+
727
+ if createExperiment:
728
+ pd.set_option("future.no_silent_downcasting", True)
729
+ df = dataframe.fillna("Some error occured").astype(object)
730
+ if createPlayground(email, workspaceID, df, promptText=prompt_template,
731
+ definationMapping=self.definationMapping, outputColName=outputColName):
732
+ print(
733
+ "Your data has been saved in the Llumo Experiment. Visit https://app.llumo.ai/evallm to see the results.")
734
+ else:
735
+ return dataframe
736
+
601
737
  def run_sweep(
602
738
  self,
603
739
  templates: List[str],
604
740
  dataset: Dict[str, List[str]],
605
741
  model_aliases: List[AVAILABLEMODELS],
606
742
  apiKey: str,
607
- eval=["Response Correctness"],
743
+ evals=["Response Correctness"],
608
744
  toEvaluate: bool = False,
609
745
  createExperiment: bool = False,
610
746
  ) -> pd.DataFrame:
611
747
 
612
- try:
613
- self.validateApiKey()
614
- except Exception as e:
615
- raise Exception("Some error occurred, please check your API key")
748
+
749
+
616
750
 
751
+ self.validateApiKey(evalName="")
617
752
  workspaceID = self.workspaceID
618
753
  email = self.email
619
754
  executor = ModelExecutor(apiKey)
@@ -657,14 +792,14 @@ class LlumoClient:
657
792
  try:
658
793
  res = self.evaluateMultiple(
659
794
  df.to_dict("records"),
660
- eval=eval,
795
+ evals=evals,
661
796
  prompt_template=str(templates[0]),
662
797
  outputColName=outputColName,
663
798
  _tocheck=False,
664
799
  )
665
-
800
+
666
801
  # Rename all new columns with _i+1 (e.g., _1, _2)
667
- for evalMetric in eval:
802
+ for evalMetric in evals:
668
803
  scoreCol = f"{evalMetric}"
669
804
  reasonCol = f"{evalMetric} Reason"
670
805
  if scoreCol in res.columns:
@@ -675,6 +810,8 @@ class LlumoClient:
675
810
  # Drop duplicated columns from df (like prompt, variables, etc.)
676
811
  newCols = [col for col in res.columns if col not in dfWithEvals.columns]
677
812
  dfWithEvals = pd.concat([dfWithEvals, res[newCols]], axis=1)
813
+
814
+
678
815
 
679
816
  except Exception as e:
680
817
  print(f"Evaluation failed for model {model.value}: {str(e)}")
@@ -720,22 +857,22 @@ class LlumoClient:
720
857
  dataframe, agents, model=model, model_api_key=model_api_key
721
858
  )
722
859
 
723
-
724
- # evals = [
725
- # "Tool Reliability",
726
- # "Stepwise Progression",
727
- # "Tool Selection Accuracy",
728
- # "Final Task Alignment",
729
- # ]
730
860
 
731
- for eval in evals:
861
+ # for eval in evals:
732
862
  # Perform evaluation
733
- toolResponseDf = self.evaluate(
734
- toolResponseDf.to_dict(orient = "records"),
735
- eval=eval,
736
- prompt_template=prompt_template,
737
- createExperiment=False,
863
+ # toolResponseDf = self.evaluate(
864
+ # toolResponseDf.to_dict(orient = "records"),
865
+ # eval=eval,
866
+ # prompt_template=prompt_template,
867
+ # createExperiment=False,
868
+ # )
869
+ toolResponseDf = self.evaluateMultiple(
870
+ toolResponseDf.to_dict(orient = "records"),
871
+ evals=evals,
872
+ prompt_template=prompt_template,
873
+ createExperiment=False,
738
874
  )
875
+
739
876
  if createExperiment:
740
877
  pd.set_option("future.no_silent_downcasting", True)
741
878
  df = toolResponseDf.fillna("Some error occured")
@@ -762,21 +899,17 @@ class LlumoClient:
762
899
  "DataFrame must contain 'query', 'messageHistory','output' ,and 'tools' columns. Make sure the columns names are same as mentioned here."
763
900
  )
764
901
 
765
-
766
- # evals = [
767
- # "Tool Reliability",
768
- # "Stepwise Progression",
769
- # "Tool Selection Accuracy",
770
- # "Final Task Alignment",
771
- # ]
772
-
773
902
  toolResponseDf = dataframe.copy()
774
- for eval in evals:
775
- # Perform evaluation
776
- toolResponseDf = self.evaluate(
777
- toolResponseDf.to_dict(orient = "records"), eval=eval, prompt_template="Give answer for the given query: {{query}}",outputColName=outputColName
778
- )
779
-
903
+ # for eval in evals:
904
+ # # Perform evaluation
905
+ # toolResponseDf = self.evaluate(
906
+ # toolResponseDf.to_dict(orient = "records"), eval=eval, prompt_template="Give answer for the given query: {{query}}",outputColName=outputColName
907
+ # )
908
+ toolResponseDf = self.evaluateMultiple(
909
+ toolResponseDf.to_dict(orient = "records"),
910
+ eval=evals, prompt_template="Give answer for the given query: {{query}}",
911
+ outputColName=outputColName
912
+ )
780
913
  return toolResponseDf
781
914
 
782
915
  except Exception as e:
@@ -785,13 +918,13 @@ class LlumoClient:
785
918
 
786
919
  def runDataStream(
787
920
  self,
788
- dataframe,
921
+ data,
789
922
  streamName: str,
790
923
  queryColName: str = "query",
791
924
  createExperiment: bool = False,
792
925
  ):
793
926
  results = {}
794
-
927
+ dataframe = pd.DataFrame(data)
795
928
  try:
796
929
  socketID = self.socket.connect(timeout=150)
797
930
  # Ensure full connection before proceeding
@@ -806,16 +939,11 @@ class LlumoClient:
806
939
  )
807
940
  # print(f"Connected with socket ID: {socketID}")
808
941
  rowIdMapping = {}
809
- try:
942
+
810
943
  # print(f"Validating API key...")
811
- self.validateApiKey()
944
+ self.validateApiKey()
812
945
  # print(f"API key validation successful. Hits available: {self.hitsAvailable}")
813
- except Exception as e:
814
- print(f"Error during API key validation: {str(e)}")
815
- if hasattr(e, "response") and getattr(e, "response", None) is not None:
816
- print(f"Status code: {e.response.status_code}")
817
- print(f"Response content: {e.response.text[:500]}...")
818
- raise
946
+
819
947
  # check for available hits and trial limit
820
948
  userHits = checkUserHits(
821
949
  self.workspaceID,
llumo/helpingFuntions.py CHANGED
@@ -341,7 +341,8 @@ def createColumn(workspaceID, dataframe, playgroundID, promptText=None,queryColN
341
341
  "order": indx
342
342
  }
343
343
 
344
-
344
+ elif col.endswith(' Reason'):
345
+ continue
345
346
 
346
347
 
347
348
  else:
@@ -373,31 +374,29 @@ def createColumn(workspaceID, dataframe, playgroundID, promptText=None,queryColN
373
374
  for col in dataframe.columns:
374
375
  columnID = columnIDMapping[col]
375
376
 
376
- # if col in allEvals:
377
- # row_dict[columnID] = {
377
+ if col in allEvals:
378
+ row_dict[columnID] = {
378
379
 
379
- # "value": row[col],
380
- # "type": "EVAL",
381
- # "isValid": True,
382
- # "reasoning": row[col+" Reason"],
383
- # "edgeCase": "minorHallucinationDetailNotInContext",
384
- # "kpi": col
380
+ "value": row[col],
381
+ "type": "EVAL",
382
+ "isValid": True,
383
+ "reasoning": row[col+" Reason"],
384
+ "edgeCase": "minorHallucinationDetailNotInContext",
385
+ "kpi": col
385
386
 
386
- # }
387
- # else:# Get the columnID from the mapping
388
- # row_dict[columnID] = {
389
- # "value": row[col],
390
- # "type":"VARIABLE"# Map the columnID to the value in the row
391
- # }
387
+ }
388
+ elif col.endswith(' Reason'):
389
+ continue
390
+ else:# Get the columnID from the mapping
391
+ row_dict[columnID] = row[col]
392
392
 
393
- row_dict[columnID] = row[col] # Directly map the column ID to the row value
393
+ # row_dict[columnID] = row[col] # Directly map the column ID to the row value
394
394
  # Add the row index (if necessary)
395
395
  row_dict["pIndex"] = indx
396
396
  rowTemplate["dataToUploadList"].append(row_dict)
397
397
 
398
398
  # Return the column template, row template, and the column ID mapping
399
399
  return coltemplate, rowTemplate
400
-
401
400
  def uploadColumnListInPlayground(payload):
402
401
  url = uploadColList
403
402
  headers = {
llumo/sockets.py CHANGED
@@ -17,8 +17,8 @@ class LlumoSocketClient:
17
17
 
18
18
  # Initialize client
19
19
  self.sio = socketio.Client(
20
- # logger=True,
21
- # engineio_logger=True,
20
+ logger=False,
21
+ engineio_logger=False,
22
22
  reconnection=True,
23
23
  reconnection_attempts=10,
24
24
  reconnection_delay=1,
@@ -104,6 +104,7 @@ class LlumoSocketClient:
104
104
  # if not self._connected:
105
105
  # raise RuntimeError("WebSocket is not connected. Call connect() first.")
106
106
 
107
+ # total records
107
108
  self._expected_results = expected_results # NEW
108
109
  start_time = time.time()
109
110
  self._last_update_time = time.time()
@@ -128,7 +129,7 @@ class LlumoSocketClient:
128
129
  self._listening_done.set()
129
130
  break
130
131
 
131
-
132
+
132
133
 
133
134
  timeout_thread = threading.Thread(target=timeout_watcher, daemon=True)
134
135
  timeout_thread.start()
@@ -136,6 +137,7 @@ class LlumoSocketClient:
136
137
 
137
138
  def getReceivedData(self):
138
139
  with self._lock:
140
+ # print("Total received:", len(self._received_data)) # DEBUG
139
141
  return self._received_data.copy()
140
142
 
141
143
  def disconnect(self):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: llumo
3
- Version: 0.2.14b2
3
+ Version: 0.2.14b3
4
4
  Summary: Python SDK for interacting with the Llumo ai API.
5
5
  Home-page: https://www.llumo.ai/
6
6
  Author: Llumo
@@ -21,6 +21,7 @@ Requires-Dist: requests>=2.0.0
21
21
  Requires-Dist: python-socketio
22
22
  Requires-Dist: python-dotenv
23
23
  Requires-Dist: openai==1.75.0
24
+ Requires-Dist: tqdm==4.67.1
24
25
  Requires-Dist: google-generativeai==0.8.5
25
26
  Dynamic: author
26
27
  Dynamic: author-email
@@ -0,0 +1,13 @@
1
+ llumo/__init__.py,sha256=O04b4yW1BnOvcHzxWFddAKhtdBEhBNhLdb6xgnpHH_Q,205
2
+ llumo/client.py,sha256=Zque6TDcEBFO1tQjNMsmUpYVbGdooLiwDKDYPtJ2szY,43465
3
+ llumo/exceptions.py,sha256=i3Qv4_g7XjRuho7-b7ybjw2bwSh_NhvICR6ZAgiLQX8,1944
4
+ llumo/execution.py,sha256=x88wQV8eL99wNN5YtjFaAMCIfN1PdfQVlAZQb4vzgQ0,1413
5
+ llumo/functionCalling.py,sha256=D5jYapu1rIvdIJNUYPYMTyhQ1H-6nkwoOLMi6eekfUE,7241
6
+ llumo/helpingFuntions.py,sha256=9aFvBNPB-AyeMs6c8YpdPckNeF95thmTqAAmYC1q4oo,20320
7
+ llumo/models.py,sha256=YH-qAMnShmUpmKE2LQAzQdpRsaXkFSlOqMxHwU4zBUI,1560
8
+ llumo/sockets.py,sha256=Rlww5z9w_Ij99Z_VzgpF93K9gSZ5YaS4A8oAY0jS1wA,5650
9
+ llumo-0.2.14b3.dist-info/licenses/LICENSE,sha256=tF9yAcfPV9xGT3ViWmC8hPvOo8BEk4ZICbUfcEo8Dlk,182
10
+ llumo-0.2.14b3.dist-info/METADATA,sha256=gDpqIZ7fknvE3_2FgeWMppevOCYzGac3GOUBAU4nPoU,1521
11
+ llumo-0.2.14b3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
12
+ llumo-0.2.14b3.dist-info/top_level.txt,sha256=d5zUTMI99llPtLRB8rtSrqELm_bOqX-bNC5IcwlDk88,6
13
+ llumo-0.2.14b3.dist-info/RECORD,,
@@ -1,13 +0,0 @@
1
- llumo/__init__.py,sha256=O04b4yW1BnOvcHzxWFddAKhtdBEhBNhLdb6xgnpHH_Q,205
2
- llumo/client.py,sha256=Iy16dr-bPDWQ9iRRaNDnlJvZ_j52qEzLJWK2-CcDpwM,38135
3
- llumo/exceptions.py,sha256=i3Qv4_g7XjRuho7-b7ybjw2bwSh_NhvICR6ZAgiLQX8,1944
4
- llumo/execution.py,sha256=x88wQV8eL99wNN5YtjFaAMCIfN1PdfQVlAZQb4vzgQ0,1413
5
- llumo/functionCalling.py,sha256=D5jYapu1rIvdIJNUYPYMTyhQ1H-6nkwoOLMi6eekfUE,7241
6
- llumo/helpingFuntions.py,sha256=mFoRtxpG4T7enXINTigN7Xztdbj1IKeGNvhrFvwrgSg,20360
7
- llumo/models.py,sha256=YH-qAMnShmUpmKE2LQAzQdpRsaXkFSlOqMxHwU4zBUI,1560
8
- llumo/sockets.py,sha256=4X1KSdCJX8_sRY5E_m9bv2kd8B8Jymg_QM59de-FqLw,5570
9
- llumo-0.2.14b2.dist-info/licenses/LICENSE,sha256=tF9yAcfPV9xGT3ViWmC8hPvOo8BEk4ZICbUfcEo8Dlk,182
10
- llumo-0.2.14b2.dist-info/METADATA,sha256=SMGdn7-5oxYWRpz1IN-sJxU8NtaQz_CqQFOe9J8mDws,1493
11
- llumo-0.2.14b2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
12
- llumo-0.2.14b2.dist-info/top_level.txt,sha256=d5zUTMI99llPtLRB8rtSrqELm_bOqX-bNC5IcwlDk88,6
13
- llumo-0.2.14b2.dist-info/RECORD,,