llumo 0.2.12__py3-none-any.whl → 0.2.13b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
llumo/client.py CHANGED
@@ -30,6 +30,7 @@ class LlumoClient:
30
30
  self.apiKey = api_key
31
31
  self.socket = LlumoSocketClient(socketUrl)
32
32
  self.processMapping = {}
33
+ self.definationMapping = {}
33
34
 
34
35
  def validateApiKey(self, evalName=" "):
35
36
  headers = {
@@ -87,6 +88,8 @@ class LlumoClient:
87
88
  self.trialEndDate = data["data"]["data"].get("trialEndDate", None)
88
89
  self.subscriptionEndDate = data["data"]["data"].get("subscriptionEndDate", None)
89
90
  self.email = data["data"]["data"].get("email", None)
91
+
92
+ self.definationMapping[evalName] = self.evalDefinition
90
93
 
91
94
  except Exception as e:
92
95
  # print(f"Error extracting data from response: {str(e)}")
@@ -315,12 +318,13 @@ class LlumoClient:
315
318
 
316
319
  for cnt, batch in enumerate(self.allBatches):
317
320
  try:
321
+
318
322
  self.postBatch(batch=batch, workspaceID=workspaceID)
319
323
  # print("Betch Posted with item len: ", len(batch))
320
324
  except Exception as e:
321
325
  continue
322
326
 
323
- time.sleep(1)
327
+ time.sleep(3)
324
328
 
325
329
  timeout = max(50, min(600, totalItems * 10))
326
330
 
@@ -363,9 +367,9 @@ class LlumoClient:
363
367
  pd.set_option("future.no_silent_downcasting", True)
364
368
  df = dataframe.fillna("Some error occured").astype(object)
365
369
 
366
- if createPlayground(email, workspaceID, df):
370
+ if createPlayground(email, workspaceID, df,promptText=prompt_template,definationMapping=self.definationMapping,outputColName=outputColName):
367
371
  print(
368
- "Your data has been saved in the Llumo Experiment. Visit https://app.llumo.ai/evallm to see the results."
372
+ "Your data has been saved in the Llumo Experiment. Visit https://app.llumo.ai/evallm to see the results.Please rerun the experiment to see the results on playground."
369
373
  )
370
374
  else:
371
375
  return dataframe
@@ -381,15 +385,16 @@ class LlumoClient:
381
385
  ):
382
386
  resultdf = dataframe.copy()
383
387
  for evalName in eval:
388
+ time.sleep(2)
384
389
  resultdf = self.evaluate(dataframe = resultdf,eval=evalName,prompt_template=prompt_template,outputColName=outputColName,createExperiment = False)
385
390
 
386
391
  if createExperiment:
387
392
  pd.set_option("future.no_silent_downcasting", True)
388
393
  df = resultdf.fillna("Some error occured").astype(object)
389
394
 
390
- if createPlayground(self.email, self.workspaceID, df):
395
+ if createPlayground(self.email, self.workspaceID, df,definationMapping=self.definationMapping,outputColName=outputColName,promptText=prompt_template):
391
396
  print(
392
- "Your data has been saved in the Llumo Experiment. Visit https://app.llumo.ai/evallm to see the results."
397
+ "Your data has been saved in the Llumo Experiment. Visit https://app.llumo.ai/evallm to see the results.Please rerun the experiment to see the results on playground."
393
398
  )
394
399
  else:
395
400
  return resultdf
@@ -546,21 +551,22 @@ class LlumoClient:
546
551
  # dataframe["cost_saving"] = cost_saving
547
552
 
548
553
  return dataframe
549
-
550
554
  def run_sweep(
551
- self,
552
- templates: List[str],
553
- dataset: Dict[str, List[str]],
554
- model_aliases: List[AVAILABLEMODELS],
555
- apiKey: str,
556
- eval=["Response Correctness"],
557
- toEvaluate: bool = False,
558
- createExperiment: bool = False,
559
- ) -> pd.DataFrame:
555
+ self,
556
+ templates: List[str],
557
+ dataset: Dict[str, List[str]],
558
+ model_aliases: List[AVAILABLEMODELS],
559
+ apiKey: str,
560
+ eval=["Response Correctness"],
561
+ toEvaluate: bool = False,
562
+ createExperiment: bool = False,
563
+ ) -> pd.DataFrame:
564
+
560
565
  try:
561
566
  self.validateApiKey()
562
567
  except Exception as e:
563
- raise "Some error ocuured please check your API key"
568
+ raise Exception("Some error occurred, please check your API key")
569
+
564
570
  workspaceID = self.workspaceID
565
571
  email = self.email
566
572
  executor = ModelExecutor(apiKey)
@@ -570,57 +576,80 @@ class LlumoClient:
570
576
 
571
577
  results = []
572
578
 
573
- # Iterate through combinations
574
579
  for combo in combinations:
575
580
  for template in templates:
576
581
  prompt = template
577
582
  for k, v in combo.items():
578
583
  prompt = prompt.replace(f"{{{{{k}}}}}", v)
579
- # Add a row for each model
580
- for model in model_aliases:
581
- row = {
582
- "template": template,
583
- "prompt": prompt,
584
- **combo,
585
- "model": model.value,
586
- }
587
584
 
585
+ row = {
586
+ "prompt": prompt,
587
+ **combo,
588
+ }
589
+
590
+ for i, model in enumerate(model_aliases, 1):
588
591
  try:
589
592
  provider = getProviderFromModel(model)
590
- response = executor.execute(
591
- provider, model.value, prompt, apiKey
592
- )
593
- row["output"] = response
593
+ response = executor.execute(provider, model.value, prompt, apiKey)
594
+ outputKey = f"output_{i}"
595
+ row[outputKey] = response
594
596
  except Exception as e:
595
- row["output"] = f"Error: {str(e)}"
597
+ row[f"output_{i}"] = str(e)
596
598
 
597
- results.append(row)
599
+ results.append(row)
600
+
601
+
602
+
598
603
  df = pd.DataFrame(results)
599
- if toEvaluate:
600
604
 
601
- res = self.evaluateMultiple(df, eval=eval, prompt_template=str(templates[0]))
605
+
606
+ if toEvaluate==True:
607
+ dfWithEvals = df.copy()
608
+ for i, model in enumerate(model_aliases,1):
609
+ outputColName = f"output_{i}"
610
+ try:
611
+ res = self.evaluateMultiple(
612
+ df,
613
+ eval=eval,
614
+ prompt_template=str(templates[0]),
615
+ outputColName=outputColName,
616
+ )
617
+
618
+ # Rename all new columns with _i+1 (e.g., _1, _2)
619
+ for evalMetric in eval:
620
+ scoreCol = f"{evalMetric}"
621
+ reasonCol = f"{evalMetric} Reason"
622
+ if scoreCol in res.columns:
623
+ res = res.rename(columns={scoreCol: f"{scoreCol}_{i}"})
624
+ if reasonCol in res.columns:
625
+ res = res.rename(columns={reasonCol: f"{reasonCol}_{i}"})
626
+
627
+ # Drop duplicated columns from df (like prompt, variables, etc.)
628
+ newCols = [col for col in res.columns if col not in dfWithEvals.columns]
629
+ dfWithEvals = pd.concat([dfWithEvals, res[newCols]], axis=1)
630
+
631
+ except Exception as e:
632
+ print(f"Evaluation failed for model {model.value}: {str(e)}")
602
633
 
603
634
  if createExperiment:
604
635
  pd.set_option("future.no_silent_downcasting", True)
605
- res = res.fillna("Some error occured")
606
- if createPlayground(email, workspaceID, res):
607
- print(
608
- "Your data has been saved in the Llumo Experiment. Visit https://app.llumo.ai/evallm to see the results."
609
- )
636
+ dfWithEvals = dfWithEvals.fillna("Some error occurred")
637
+ if createPlayground(email, workspaceID, dfWithEvals, promptText=templates[0]):
638
+
639
+ print("Your data has been saved in the Llumo Experiment. Visit https://app.llumo.ai/evallm to see the results.")
610
640
  else:
611
- return res
612
-
641
+ return dfWithEvals
613
642
  else:
614
- if createExperiment:
643
+ if createExperiment==True:
615
644
  pd.set_option("future.no_silent_downcasting", True)
616
- df = df.fillna("Some error occured")
617
- if createPlayground(email, workspaceID, df):
618
- print(
619
- "Your data has been saved in the Llumo Experiment. Visit https://app.llumo.ai/evallm to see the results."
620
- )
621
- else:
645
+ df = df.fillna("Some error occurred")
646
+
647
+ if createPlayground(email, workspaceID, df, promptText=templates[0]):
648
+ print("Your data has been saved in the Llumo Experiment. Visit https://app.llumo.ai/evallm to see the results.")
649
+ else :
622
650
  return df
623
651
 
652
+
624
653
  # this function generates an output using llm and tools and evaluate that output
625
654
  def evaluateAgents(
626
655
  self,
@@ -628,6 +657,7 @@ class LlumoClient:
628
657
  model,
629
658
  agents,
630
659
  model_api_key=None,
660
+ evals=["Final Task Alignment"],
631
661
  prompt_template="Give answer for the given query: {{query}}",
632
662
  createExperiment: bool = False,
633
663
  ):
@@ -638,12 +668,14 @@ class LlumoClient:
638
668
  toolResponseDf = LlumoAgentExecutor.run(
639
669
  dataframe, agents, model=model, model_api_key=model_api_key
640
670
  )
641
- evals = [
642
- "Tool Reliability",
643
- "Stepwise Progression",
644
- "Tool Selection Accuracy",
645
- "Final Task Alignment",
646
- ]
671
+
672
+
673
+ # evals = [
674
+ # "Tool Reliability",
675
+ # "Stepwise Progression",
676
+ # "Tool Selection Accuracy",
677
+ # "Final Task Alignment",
678
+ # ]
647
679
 
648
680
  for eval in evals:
649
681
  # Perform evaluation
@@ -667,31 +699,37 @@ class LlumoClient:
667
699
  def evaluateAgentResponses(
668
700
  self,
669
701
  dataframe,
670
- prompt_template="Give answer for the given query: {{query}}",
702
+ evals=["Final Task Alignment"],
703
+ outputColName="output",
671
704
  createExperiment: bool = False,
672
705
  ):
673
706
  try:
674
707
  if "query" and "messageHistory" and "tools" not in dataframe.columns:
675
708
  raise ValueError(
676
- "DataFrame must contain 'query', 'messageHistory', and 'tools' columns"
709
+ "DataFrame must contain 'query', 'messageHistory','output' ,and 'tools' columns. Make sure the columns names are same as mentioned here."
677
710
  )
678
- evals = [
679
- "Tool Reliability",
680
- "Stepwise Progression",
681
- "Tool Selection Accuracy",
682
- "Final Task Alignment",
683
- ]
711
+ prompt_template="Give answer for the given query: {{query}}"
712
+
713
+ # evals = [
714
+ # "Tool Reliability",
715
+ # "Stepwise Progression",
716
+ # "Tool Selection Accuracy",
717
+ # "Final Task Alignment",
718
+ # ]
684
719
  toolResponseDf = dataframe.copy()
685
720
  for eval in evals:
686
721
  # Perform evaluation
687
722
  toolResponseDf = self.evaluate(
688
- toolResponseDf, eval=eval, prompt_template=prompt_template
723
+ toolResponseDf, eval=eval, prompt_template=prompt_template,outputColName=outputColName
689
724
  )
725
+
726
+
690
727
  return toolResponseDf
691
728
 
692
729
  except Exception as e:
693
730
  raise e
694
731
 
732
+
695
733
  def runDataStream(
696
734
  self,
697
735
  dataframe,
@@ -852,7 +890,7 @@ class LlumoClient:
852
890
  pd.set_option("future.no_silent_downcasting", True)
853
891
  df = dataframe.fillna("Some error occured").astype(object)
854
892
 
855
- if createPlayground(email, workspaceID, df):
893
+ if createPlayground(email, workspaceID, df,queryColName=queryColName, dataStreamName=streamId):
856
894
  print(
857
895
  "Your data has been saved in the Llumo Experiment. Visit https://app.llumo.ai/evallm to see the results."
858
896
  )
llumo/helpingFuntions.py CHANGED
@@ -7,6 +7,7 @@ import requests
7
7
  import json
8
8
  import base64
9
9
  import os
10
+ import re
10
11
 
11
12
  subscriptionUrl = "https://app.llumo.ai/api/workspace/record-extra-usage"
12
13
  getStreamdataUrl = "https://app.llumo.ai/api/data-stream/all"
@@ -18,6 +19,8 @@ uploadColList = (
18
19
  uploadRowList = (
19
20
  "https://app.llumo.ai/api/New-Eval-API/new-upload-flow/uploadRowsInDBPlayground"
20
21
  )
22
+ createInsightUrl = "https://app.llumo.ai/api/New-Eval-API/insights-api/generate-playground-insights"
23
+ getPlaygroundInsightsUrl="https://app.llumo.ai/api/New-Eval-API/insights-api/get-all-playground-insights"
21
24
 
22
25
 
23
26
  def getProcessID():
@@ -210,30 +213,151 @@ def deleteColumnListInPlayground(workspaceID: str, playgroundID: str):
210
213
  return None
211
214
 
212
215
 
213
- def createColumn(workspaceID, dataframe, playgroundID):
214
-
216
+ def createColumn(workspaceID, dataframe, playgroundID, promptText=None,queryColName=None,outputColName= "output",dataStreamName=None,definationMapping=None):
215
217
  if len(dataframe) > 100:
216
218
  dataframe = dataframe.head(100)
217
219
  print("⚠️ Dataframe truncated to 100 rows for upload.")
218
220
 
219
- playgroundID = playgroundID
220
-
221
221
  coltemplate = {
222
222
  "workspaceID": workspaceID,
223
223
  "playgroundID": playgroundID,
224
224
  "columnListToUpload": [],
225
225
  }
226
+ allEvals = ['Response Completeness', 'Response Bias', 'Response Harmfulness', 'Input Toxicity', 'Input Harmfulness', 'Context Utilization', 'Relevance Retention', 'Semantic Cohesion', 'Final Task Alignment', 'Tool Reliability', 'Response Correctness', 'Response Toxicity', 'Input Bias', 'Input Relevancy', 'Redundancy Reduction', 'Response Sentiment', 'Tool Selection Accuracy', 'Stepwise Progression', 'Hallucination', 'Faithfulness', 'Answer Relevancy', 'Context Precision', 'Answer Similarity', 'Harmfulness', 'Maliciousness', 'Coherence', 'Answer Correctness', 'Context Recall', 'Context Entity Recall', 'Conciseness', 'customEvalColumn', 'Groundedness', 'Memory Utilization', 'Input Relevancy (Multi-turn)']
227
+
228
+
229
+
230
+ # Create a mapping of column names to unique column IDs
231
+ columnIDMapping = {}
226
232
 
233
+ # Iterate over each column in the dataframe
227
234
  for indx, col in enumerate(dataframe.columns):
228
- template = {
229
- "label": col,
230
- "type": "VARIABLE",
231
- "variableType": "STRING",
232
- "order": indx,
233
- "columnID": col,
234
- }
235
+ # Generate a unique column ID using uuid
236
+ columnID = str(uuid.uuid4().hex[:8])
237
+
238
+ columnIDMapping[col] = columnID
239
+
240
+
241
+ if col.startswith('output'):
242
+ # For output columns, create the prompt template with promptText
243
+ if promptText:
244
+ # Extract variables from promptText and set them as dependencies
245
+ dependencies = []
246
+
247
+ # Find variables inside {{variable}}
248
+ variables = re.findall(r'{{(.*?)}}', promptText)
249
+
250
+ # Loop through each variable and check if it exists as a column name
251
+ for var in variables:
252
+ varName = var.strip()
253
+ if varName in columnIDMapping: # Check if the variable is a column name
254
+ dependencies.append(columnIDMapping[varName]) # Add its columnID
255
+
256
+ # Now update the template for the output column
257
+
258
+ template={
259
+ "provider": "OPENAI",
260
+ "model": "GPT_4o",
261
+ "promptText": promptText,
262
+ "modelOptions": {
263
+ "temperature": 0,
264
+ "frequencyPenalty": 0,
265
+ "presencePenalty": 0,
266
+ "maxToken": 8192
267
+ },
268
+ "toolConfig": "none",
269
+ "concurrency": "",
270
+ "outputType": "STRING",
271
+ "isPromptSelected": True,
272
+ "isSmartPromptSelected": False,
273
+ "dependency": dependencies, # Use the dependencies extracted from promptText
274
+ "columnID": columnID, # Use the generated column ID
275
+ "label": col,
276
+ "type": "PROMPT",
277
+ "order": indx,
278
+ }
279
+
280
+ elif col.startswith('Data '):
281
+ if queryColName and dataStreamName:
282
+ dependencies = []
283
+ dependencies.append(columnIDMapping[queryColName])
284
+ template = {
285
+ "variableType": "STRING",
286
+ "dependency": dependencies,
287
+ "dataStreamName": dataStreamName,
288
+ "query": columnIDMapping[queryColName],
289
+ "columnID": columnID, # Use the generated column ID
290
+ "label": "Data stream",
291
+ "type": "DATA_STREAM",
292
+ "order": indx}
293
+
294
+ elif col in allEvals:
295
+
296
+ dependencies = []
297
+ variables = re.findall(r'{{(.*?)}}', promptText)
298
+
299
+ # Loop through each variable and check if it exists as a column name
300
+ for var in variables:
301
+ varName = var.strip()
302
+ if varName in columnIDMapping: # Check if the variable is a column name
303
+ dependencies.append(columnIDMapping[varName])
304
+
305
+ dependencies.append(columnIDMapping[outputColName]) # Add the output column ID
306
+ longDef = definationMapping.get(col, {}).get(col, "")
307
+ shortDef ="You have run this from SDK"
308
+ enum = col.upper().replace(" ","_")
309
+
310
+ template = {
311
+ "analytics": [
312
+ col.lower().replace(" ","_")
313
+ ],
314
+ "evaluationMetric": "ALL",
315
+ "evaluationModel": "GEMINI_PRO",
316
+ "selectPrompt": columnIDMapping[outputColName],
317
+ "scoreCondition": "GREATER_THAN",
318
+ "scoreValue": "50",
319
+ "scoreResult": "PASS",
320
+ "llmKpi": col,
321
+ "setRules": True,
322
+ "type": "EVAL",
323
+ "evalType": "LLM",
324
+ "similarityMetric": None,
325
+ "embeddingModel": None,
326
+ "groundTruth": None,
327
+ "dataStream": None,
328
+ "context": None,
329
+ "dependency": [columnIDMapping[outputColName]],
330
+ "hallucinationFields": {
331
+ "query": None,
332
+ "context": None,
333
+ "output": None
334
+ },
335
+ "definition": longDef,
336
+ "analyticsENUM": enum,
337
+ "prompt": shortDef,
338
+ "analyticsName": col,
339
+ "columnID": str(uuid.uuid4().hex[:8]),
340
+ "label": col,
341
+ "order": indx
342
+ }
343
+
344
+
345
+
346
+
347
+ else:
348
+
349
+ template = {
350
+ "label": col, # Label is the column name
351
+ "type": "VARIABLE", # Default type for non-output columns
352
+ "variableType": "STRING",
353
+ "order": indx,
354
+ "columnID": columnID, # Use the generated column ID
355
+ }
356
+
357
+ # Append the template to the column list
235
358
  coltemplate["columnListToUpload"].append(template)
236
359
 
360
+ # Prepare the row template structure
237
361
  rowTemplate = {
238
362
  "workspaceID": workspaceID,
239
363
  "playgroundID": playgroundID,
@@ -241,14 +365,22 @@ def createColumn(workspaceID, dataframe, playgroundID):
241
365
  "columnList": coltemplate["columnListToUpload"],
242
366
  }
243
367
 
368
+ # Populate dataToUploadList with rows from the dataframe
244
369
  for indx, row in dataframe.iterrows():
245
- row_dict = row.to_dict()
370
+ row_dict = {}
371
+
372
+ # For each column, we need to map the column ID to the corresponding value in the row
373
+ for col in dataframe.columns:
374
+ columnID = columnIDMapping[col] # Get the columnID from the mapping
375
+ row_dict[columnID] = row[col] # Map the columnID to the value in the row
376
+
377
+ # Add the row index (if necessary)
246
378
  row_dict["pIndex"] = indx
247
379
  rowTemplate["dataToUploadList"].append(row_dict)
248
380
 
381
+ # Return the column template, row template, and the column ID mapping
249
382
  return coltemplate, rowTemplate
250
383
 
251
-
252
384
  def uploadColumnListInPlayground(payload):
253
385
  url = uploadColList
254
386
  headers = {
@@ -292,10 +424,11 @@ def uploadRowsInDBPlayground(payload):
292
424
  return None
293
425
 
294
426
 
295
- def createPlayground(email, workspaceID, df):
427
+ def createPlayground(email, workspaceID, df, promptText=None,queryColName=None,dataStreamName=None,definationMapping=None,outputColName="output"):
428
+
296
429
  playgroundId = str(createEvalPlayground(email=email, workspaceID=workspaceID))
297
430
  payload1, payload2 = createColumn(
298
- workspaceID=workspaceID, dataframe=df, playgroundID=playgroundId
431
+ workspaceID=workspaceID, dataframe=df, playgroundID=playgroundId, promptText=promptText,queryColName=queryColName,dataStreamName=dataStreamName,definationMapping=definationMapping,outputColName=outputColName
299
432
  )
300
433
  deleteExistingRows = deleteColumnListInPlayground(
301
434
  workspaceID=workspaceID, playgroundID=playgroundId
@@ -305,3 +438,56 @@ def createPlayground(email, workspaceID, df):
305
438
 
306
439
  if rowListUpload:
307
440
  return True
441
+
442
+
443
+
444
+ def getPlaygroundInsights(workspaceID: str, activePlayground: str):
445
+ headers = {
446
+
447
+ "Content-Type": "application/json",
448
+ }
449
+
450
+ # Initial request to generate playground insights
451
+ payload = {
452
+ "activePlayground": activePlayground,
453
+ "workspaceID": workspaceID,
454
+ }
455
+
456
+ urlGenerate = createInsightUrl
457
+
458
+ responseGenerate = requests.post(urlGenerate, json=payload, headers=headers)
459
+
460
+ if responseGenerate.status_code == 200:
461
+ responseJson = responseGenerate.json()
462
+
463
+ insightStatus = responseJson.get("data", {}).get("insight", False)
464
+
465
+ if insightStatus:
466
+ # If insight is true, request to get all playground insights
467
+ urlGetAll = getPlaygroundInsightsUrl
468
+
469
+ responseGetAll = requests.post(urlGetAll, json=payload, headers=headers)
470
+
471
+ if responseGetAll.status_code == 200:
472
+ responseAllJson = responseGetAll.json()
473
+
474
+ data = responseAllJson.get("data", {}).get("data", [])
475
+
476
+ # Extract insight and solution
477
+ insights = []
478
+ for item in data:
479
+ insight = item.get("insight", "")
480
+ solution = item.get("solution", "")
481
+ insights.append({"insight": insight, "solution": solution})
482
+
483
+ return insights
484
+ else:
485
+ print(f"Error fetching all insights: {responseGetAll.status_code} - {responseGetAll.text}")
486
+ return None
487
+ else:
488
+ print("No insight generated.")
489
+ return None
490
+ else:
491
+ print(f"Error generating insight: {responseGenerate.status_code} - {responseGenerate.text}")
492
+ return None
493
+
llumo/sockets.py CHANGED
@@ -101,8 +101,8 @@ class LlumoSocketClient:
101
101
  raise RuntimeError(f"WebSocket connection failed: {e}")
102
102
 
103
103
  def listenForResults(self, min_wait=30, max_wait=300, inactivity_timeout=50, expected_results=None):
104
- if not self._connected:
105
- raise RuntimeError("WebSocket is not connected. Call connect() first.")
104
+ # if not self._connected:
105
+ # raise RuntimeError("WebSocket is not connected. Call connect() first.")
106
106
 
107
107
  self._expected_results = expected_results # NEW
108
108
  start_time = time.time()
@@ -128,7 +128,7 @@ class LlumoSocketClient:
128
128
  self._listening_done.set()
129
129
  break
130
130
 
131
- time.sleep(1)
131
+ time.sleep(3)
132
132
 
133
133
  timeout_thread = threading.Thread(target=timeout_watcher, daemon=True)
134
134
  timeout_thread.start()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: llumo
3
- Version: 0.2.12
3
+ Version: 0.2.13b1
4
4
  Summary: Python SDK for interacting with the Llumo ai API.
5
5
  Home-page: https://www.llumo.ai/
6
6
  Author: Llumo
@@ -0,0 +1,13 @@
1
+ llumo/__init__.py,sha256=O04b4yW1BnOvcHzxWFddAKhtdBEhBNhLdb6xgnpHH_Q,205
2
+ llumo/client.py,sha256=pzmJkz5LRF3h1WgjmezNnJEUAZ9_5nF47eW489F9-y4,36026
3
+ llumo/exceptions.py,sha256=iCj7HhtO_ckC2EaVBdXbAudNpuMDsYmmMEV5lwynZ-E,1854
4
+ llumo/execution.py,sha256=x88wQV8eL99wNN5YtjFaAMCIfN1PdfQVlAZQb4vzgQ0,1413
5
+ llumo/functionCalling.py,sha256=D5jYapu1rIvdIJNUYPYMTyhQ1H-6nkwoOLMi6eekfUE,7241
6
+ llumo/helpingFuntions.py,sha256=ah0FUQcRV3gfguvjQQ_aZzq59hpJttqAPJdjJVNYdFc,17110
7
+ llumo/models.py,sha256=YH-qAMnShmUpmKE2LQAzQdpRsaXkFSlOqMxHwU4zBUI,1560
8
+ llumo/sockets.py,sha256=0BCcdCaiXDR7LO_9NIYA6urtpgdmyWW2M1US67G9Eus,5583
9
+ llumo-0.2.13b1.dist-info/licenses/LICENSE,sha256=tF9yAcfPV9xGT3ViWmC8hPvOo8BEk4ZICbUfcEo8Dlk,182
10
+ llumo-0.2.13b1.dist-info/METADATA,sha256=Kyb0OFYTsOosmZ6Rcok4LNgWqVsUldzjeXmnw2vOnGA,1493
11
+ llumo-0.2.13b1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
12
+ llumo-0.2.13b1.dist-info/top_level.txt,sha256=d5zUTMI99llPtLRB8rtSrqELm_bOqX-bNC5IcwlDk88,6
13
+ llumo-0.2.13b1.dist-info/RECORD,,
@@ -1,13 +0,0 @@
1
- llumo/__init__.py,sha256=O04b4yW1BnOvcHzxWFddAKhtdBEhBNhLdb6xgnpHH_Q,205
2
- llumo/client.py,sha256=QMasCjzRmL38RuMUHoY39_ge2nIRdSOECjj1daicd-k,34144
3
- llumo/exceptions.py,sha256=iCj7HhtO_ckC2EaVBdXbAudNpuMDsYmmMEV5lwynZ-E,1854
4
- llumo/execution.py,sha256=x88wQV8eL99wNN5YtjFaAMCIfN1PdfQVlAZQb4vzgQ0,1413
5
- llumo/functionCalling.py,sha256=D5jYapu1rIvdIJNUYPYMTyhQ1H-6nkwoOLMi6eekfUE,7241
6
- llumo/helpingFuntions.py,sha256=lG_d3lQgJj6pI7v1YdLqdPojrLCNwybKz29zXrGaL5k,9090
7
- llumo/models.py,sha256=YH-qAMnShmUpmKE2LQAzQdpRsaXkFSlOqMxHwU4zBUI,1560
8
- llumo/sockets.py,sha256=Qxxqtx3Hg07HLhA4QfcipK1ChiOYhHZBu02iA6MfYlQ,5579
9
- llumo-0.2.12.dist-info/licenses/LICENSE,sha256=tF9yAcfPV9xGT3ViWmC8hPvOo8BEk4ZICbUfcEo8Dlk,182
10
- llumo-0.2.12.dist-info/METADATA,sha256=CYu8qMtD-XZuoQzC7YPZRlNgetEQPjMT1RiMGnHdZJs,1491
11
- llumo-0.2.12.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
12
- llumo-0.2.12.dist-info/top_level.txt,sha256=d5zUTMI99llPtLRB8rtSrqELm_bOqX-bNC5IcwlDk88,6
13
- llumo-0.2.12.dist-info/RECORD,,