llumo 0.2.42__tar.gz → 0.2.44__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (26) hide show
  1. {llumo-0.2.42/llumo.egg-info → llumo-0.2.44}/PKG-INFO +1 -1
  2. {llumo-0.2.42 → llumo-0.2.44}/llumo/client.py +317 -84
  3. {llumo-0.2.42 → llumo-0.2.44}/llumo/exceptions.py +9 -0
  4. {llumo-0.2.42 → llumo-0.2.44}/llumo/helpingFuntions.py +169 -0
  5. {llumo-0.2.42 → llumo-0.2.44}/llumo/llumoLogger.py +3 -2
  6. {llumo-0.2.42 → llumo-0.2.44/llumo.egg-info}/PKG-INFO +1 -1
  7. {llumo-0.2.42 → llumo-0.2.44}/LICENSE +0 -0
  8. {llumo-0.2.42 → llumo-0.2.44}/MANIFEST.in +0 -0
  9. {llumo-0.2.42 → llumo-0.2.44}/README.md +0 -0
  10. {llumo-0.2.42 → llumo-0.2.44}/llumo/__init__.py +0 -0
  11. {llumo-0.2.42 → llumo-0.2.44}/llumo/callback.py +0 -0
  12. {llumo-0.2.42 → llumo-0.2.44}/llumo/callbacks-0.py +0 -0
  13. {llumo-0.2.42 → llumo-0.2.44}/llumo/chains.py +0 -0
  14. {llumo-0.2.42 → llumo-0.2.44}/llumo/execution.py +0 -0
  15. {llumo-0.2.42 → llumo-0.2.44}/llumo/functionCalling.py +0 -0
  16. {llumo-0.2.42 → llumo-0.2.44}/llumo/google.py +0 -0
  17. {llumo-0.2.42 → llumo-0.2.44}/llumo/llumoSessionContext.py +0 -0
  18. {llumo-0.2.42 → llumo-0.2.44}/llumo/models.py +0 -0
  19. {llumo-0.2.42 → llumo-0.2.44}/llumo/openai.py +0 -0
  20. {llumo-0.2.42 → llumo-0.2.44}/llumo/sockets.py +0 -0
  21. {llumo-0.2.42 → llumo-0.2.44}/llumo.egg-info/SOURCES.txt +0 -0
  22. {llumo-0.2.42 → llumo-0.2.44}/llumo.egg-info/dependency_links.txt +0 -0
  23. {llumo-0.2.42 → llumo-0.2.44}/llumo.egg-info/requires.txt +0 -0
  24. {llumo-0.2.42 → llumo-0.2.44}/llumo.egg-info/top_level.txt +0 -0
  25. {llumo-0.2.42 → llumo-0.2.44}/setup.cfg +0 -0
  26. {llumo-0.2.42 → llumo-0.2.44}/setup.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: llumo
3
- Version: 0.2.42
3
+ Version: 0.2.44
4
4
  Summary: Python SDK for interacting with the Llumo ai API.
5
5
  Home-page: https://www.llumo.ai/
6
6
  Author: Llumo
@@ -8,10 +8,12 @@ import json
8
8
  import uuid
9
9
  import warnings
10
10
  import os
11
-
11
+ from typing import List, Dict, Optional
12
12
  import itertools
13
13
  import pandas as pd
14
- from typing import List, Dict
14
+ from typing import List, Dict, Any
15
+
16
+
15
17
  from .models import AVAILABLEMODELS, getProviderFromModel, Provider
16
18
  from .execution import ModelExecutor
17
19
  from .exceptions import LlumoAIError
@@ -50,6 +52,7 @@ class LlumoClient:
50
52
  self.evals = []
51
53
  self.processMapping = {}
52
54
  self.definationMapping = {}
55
+ self.ALL_USER_AIM = ['incorrectOutput', 'incorrectInput', 'hallucination', 'ragQuality', 'contextMismanagement', 'toolCallIssues', 'agentReasoning', 'stuckAgents', 'jsonErrors', 'highLatency', 'highCost', 'safetyBlocks', 'modelRouting', 'systemErrors', 'promptAdherence']
53
56
 
54
57
  def validateApiKey(self, evalName="Input Bias"):
55
58
  headers = {
@@ -780,7 +783,9 @@ class LlumoClient:
780
783
  self,
781
784
  data,
782
785
  promptTemplate="",
783
- systemInstructions = ""
786
+ systemInstructions = "",
787
+ multiTurnChat=False,
788
+ createMultipleLogs = True
784
789
 
785
790
  ):
786
791
  if isinstance(data, dict):
@@ -788,7 +793,7 @@ class LlumoClient:
788
793
  elif not isinstance(data, list):
789
794
  raise ValueError("Data should be a dict or a list of dicts.")
790
795
 
791
- dataframe = pd.DataFrame(data).astype(str)
796
+ dataframe = pd.DataFrame(data).astype(str).replace(to_replace="nan",value = "")
792
797
  workspaceID = None
793
798
  email = None
794
799
 
@@ -822,25 +827,45 @@ class LlumoClient:
822
827
  # Extract required fields
823
828
  query = row.get("query", "")
824
829
  context = row.get("context", "")
830
+
825
831
  tools = row.get("tools", "")
826
832
  groundTruth = row.get("groundTruth", "")
827
- messageHistory = row.get("messageHistory", "")
833
+
834
+ if multiTurnChat==False:
835
+ # ---- SINGLE TURN (existing behavior) ----
836
+ messageHistory = row.get("messageHistory", "")
837
+
838
+ else:
839
+ # ---- MULTI TURN ----
840
+ multiTurnData = createMessageHistory(data, index)
841
+
842
+ if createMultipleLogs==True:
843
+ # each row will get history till that point
844
+ messageHistory = multiTurnData
845
+ else:
846
+ # only final API call should contain full history
847
+ if index == len(dataframe) - 1:
848
+ messageHistory = multiTurnData
849
+ else:
850
+ messageHistory = ""
851
+
852
+
853
+
828
854
  intermediateSteps = row.get("intermediateSteps", "")
829
855
  output = row.get("output", "")
830
856
 
831
- # Initialize query and context
857
+ # # Initialize query and context
832
858
  # query = ""
833
859
  # context = ""
834
-
860
+ #
835
861
  # # Process prompt template if provided
836
862
  # if promptTemplate:
837
863
  # # Extract template variables
838
864
  # keys = re.findall(r"{{(.*?)}}", promptTemplate)
839
-
865
+ #
840
866
  # if not all([key in dataframe.columns for key in keys]):
841
- # # raise LlumoAIError.InvalidPromptTemplate()
842
- # break
843
-
867
+ # raise LlumoAIError.InvalidPromptTemplate()
868
+ #
844
869
  # # Populate template and separate query/context
845
870
  # populated_template = promptTemplate
846
871
  # for key in keys:
@@ -854,9 +879,9 @@ class LlumoClient:
854
879
  # else:
855
880
  # # Long value - add to context
856
881
  # context += f" {key}: {value}, "
857
-
882
+ #
858
883
  # query = populated_template.strip()
859
-
884
+ #
860
885
  # # Add any remaining context from other fields
861
886
  # if not context.strip():
862
887
  # for key, value in row.items():
@@ -894,6 +919,7 @@ class LlumoClient:
894
919
 
895
920
 
896
921
  }
922
+
897
923
  currentTime = datetime(2025, 8, 2, 10, 20, 15, tzinfo=timezone.utc)
898
924
  createdAt = currentTime.strftime("%Y-%m-%dT%H:%M:%S.000Z")
899
925
  rowID = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace("-", "")
@@ -926,7 +952,7 @@ class LlumoClient:
926
952
  for i, batch in enumerate(allBatches, start=1):
927
953
 
928
954
  try:
929
- # print(batch)
955
+ # print(batch)
930
956
  response = postForDebugLogs(record=batch,workspaceID=workspaceID)
931
957
 
932
958
  # failure case inside response
@@ -943,6 +969,9 @@ class LlumoClient:
943
969
 
944
970
  print("Records Uploaded successfully. You may now review your logs at: https://app.llumo.ai/logs")
945
971
 
972
+
973
+ # Wait for results
974
+
946
975
  # def evaluateMultiple(
947
976
  # self,
948
977
  # data,
@@ -2084,14 +2113,149 @@ class LlumoClient:
2084
2113
  definationMapping=self.definationMapping,
2085
2114
  )
2086
2115
 
2116
+ # def get_evaluate_multiple(
2117
+ # self,
2118
+ # data,
2119
+ # evals
2120
+ # ) -> List:
2121
+
2122
+ # print("Evaluating multiple data with evals:", data, evals)
2123
+
2124
+ # dataID = uuid.uuid4().hex[:36]
2125
+
2126
+ # self.validateApiKey()
2127
+
2128
+ # if not self.workspaceID:
2129
+ # raise LlumoAIError("Workspace ID not found after validation.")
2130
+
2131
+ # payload = {
2132
+ # "dataID": dataID,
2133
+ # "data": data,
2134
+ # "evals": evals,
2135
+ # "workspaceID": self.workspaceID,
2136
+ # "playgroundID": self.playgroundID,
2137
+ # }
2138
+
2139
+ # print("payload", payload)
2140
+
2141
+ # # Create evaluation
2142
+ # requests.post(
2143
+ # "https://backend-api.llumo.ai/api/v1/sdk/create-evaluation-Multiple",
2144
+ # json=payload,
2145
+ # headers={
2146
+ # "Content-Type": "application/json",
2147
+ # "Authorization": f"Bearer {self.apiKey}",
2148
+ # },
2149
+ # )
2150
+
2151
+ # final_result_data = []
2152
+
2153
+ # cursor = "0-0"
2154
+ # limit = 10
2155
+ # all_data_fetched = False
2156
+
2157
+ # while not all_data_fetched:
2158
+ # try:
2159
+ # response = requests.get(
2160
+ # "https://backend-api.llumo.ai/api/v1/sdk/poll",
2161
+ # params={
2162
+ # "cursor": cursor,
2163
+ # "dataID": dataID,
2164
+ # "limit": limit,
2165
+ # },
2166
+ # )
2167
+
2168
+ # response_data = response.json()
2169
+ # result_data = response_data.get("debugLog", {})
2170
+ # print("resultData", result_data)
2171
+
2172
+ # results = result_data.get("results", [])
2173
+ # final_result_data.extend(results)
2174
+
2175
+ # cursor = result_data.get("nextCursor")
2176
+
2177
+ # if len(final_result_data) == len(data):
2178
+ # all_data_fetched = True
2179
+
2180
+ # time.sleep(10)
2181
+
2182
+ # except Exception as error:
2183
+ # print("error", error)
2184
+ # all_data_fetched = True
2185
+
2186
+ # # Shape results
2187
+ # formatted_results = []
2188
+
2189
+ # for row in final_result_data:
2190
+ # score: Dict[str, float | None] = {}
2191
+ # reasoning: Dict[str, str] = {}
2192
+
2193
+ # for eval_name in evals:
2194
+ # details = row.get(eval_name)
2195
+
2196
+ # if isinstance(details, dict):
2197
+ # if isinstance(details.get("value"), (int, float)):
2198
+ # score[eval_name] = details.get("value")
2199
+ # else:
2200
+ # score[eval_name] = details.get("score")
2201
+ # reasoning[eval_name] = details.get("reasoning", "")
2202
+
2203
+ # elif "score" in row:
2204
+ # score[eval_name] = (
2205
+ # row["score"] if isinstance(row["score"], (int, float)) else None
2206
+ # )
2207
+ # reasoning[eval_name] = row.get("reasoning", "")
2208
+ # else:
2209
+ # score[eval_name] = None
2210
+ # reasoning[eval_name] = ""
2211
+
2212
+ # formatted_row = {
2213
+ # "context": row.get("context", ""),
2214
+ # "query": row.get("query", ""),
2215
+ # "output": row.get("output", ""),
2216
+ # "score": score,
2217
+ # "reasoning": reasoning,
2218
+ # }
2219
+
2220
+ # print(formatted_row)
2221
+ # formatted_results.append(formatted_row)
2222
+
2223
+ # return formatted_results
2224
+
2087
2225
  def getEvaluateMultiple(
2088
- self,
2089
- data,
2090
- evals
2091
- ) -> List:
2226
+ self,
2227
+ data,
2228
+ evals,
2229
+ promptTemplate="",
2230
+ systemInstructions="",
2231
+ multiTurnChat=False,
2232
+ createMultipleLogs=True
2233
+ ):
2092
2234
 
2093
2235
  # print("Evaluating multiple data with evals:", data, evals)
2236
+ rawData = data.copy()
2237
+ try:
2238
+ self.validateApiKey()
2239
+ except Exception as e:
2240
+ print(f"Error during API key validation: {str(e)}")
2241
+ if hasattr(e, "response") and getattr(e, "response", None) is not None:
2242
+ print(f"Status code: {e.response.status_code}")
2243
+ print(f"Response content: {e.response.text[:500]}...")
2244
+ raise
2094
2245
 
2246
+ userHits = checkUserHits(
2247
+ self.workspaceID,
2248
+ self.hasSubscribed,
2249
+ self.trialEndDate,
2250
+ self.subscriptionEndDate,
2251
+ self.hitsAvailable,
2252
+ len(data),
2253
+ )
2254
+
2255
+ if not userHits["success"]:
2256
+ raise LlumoAIError.InsufficientCredits(userHits["message"])
2257
+
2258
+ print("✅ SDK integration successful!")
2095
2259
  dataID = uuid.uuid4().hex[:36]
2096
2260
 
2097
2261
  self.validateApiKey()
@@ -2099,6 +2263,38 @@ class LlumoClient:
2099
2263
  if not self.workspaceID:
2100
2264
  raise LlumoAIError("Workspace ID not found after validation.")
2101
2265
 
2266
+ if promptTemplate:
2267
+ for row in data:
2268
+ row["promptTemplate"] = promptTemplate
2269
+ if systemInstructions:
2270
+ for row in data:
2271
+ row["systemInstructions"] = systemInstructions
2272
+
2273
+ if multiTurnChat == True:
2274
+ if createMultipleLogs == True:
2275
+ dataWithMessageHistory = []
2276
+ for indx, row in enumerate(data):
2277
+ messageHistory = createMessageHistory(data, currentIndex=indx)
2278
+ rowCopy = row.copy()
2279
+ rowCopy["messageHistory"] = messageHistory
2280
+ dataWithMessageHistory.append(rowCopy)
2281
+ data = dataWithMessageHistory
2282
+ else:
2283
+ dataWithMessageHistory = []
2284
+ for indx, row in enumerate(data):
2285
+ if indx == len(data) - 1:
2286
+ messageHistory = createMessageHistory(data, currentIndex=indx)
2287
+ rowCopy = row.copy()
2288
+ rowCopy["messageHistory"] = messageHistory
2289
+ dataWithMessageHistory.append(rowCopy)
2290
+ else:
2291
+ row["messageHistory"] = ""
2292
+ dataWithMessageHistory.append(row)
2293
+
2294
+ data = dataWithMessageHistory
2295
+
2296
+ # print("DATA:")
2297
+ # print(data)
2102
2298
  payload = {
2103
2299
  "dataID": dataID,
2104
2300
  "data": data,
@@ -2107,91 +2303,128 @@ class LlumoClient:
2107
2303
  "playgroundID": self.playgroundID,
2108
2304
  }
2109
2305
 
2110
- # print("payload", payload)
2306
+ # print("payload", payload)
2111
2307
 
2112
- # Create evaluation
2113
- requests.post(
2114
- "https://backend-api.llumo.ai/api/v1/sdk/create-evaluation-Multiple",
2115
- json=payload,
2116
- headers={
2117
- "Content-Type": "application/json",
2118
- "Authorization": f"Bearer {self.apiKey}",
2119
- },
2308
+ # Create evaluation + Poll results (moved to helper)
2309
+ final_result_data = dataPollingFuncForEval(
2310
+ api_key=self.apiKey,
2311
+ payload=payload,
2312
+ data=data,
2120
2313
  )
2314
+ # Shape results
2315
+ formatted_results = []
2121
2316
 
2122
- final_result_data = []
2123
2317
 
2124
- cursor = "0-0"
2125
- limit = 10
2126
- all_data_fetched = False
2318
+ for row in final_result_data:
2319
+ # print("ROW: ",row)
2320
+ result = []
2127
2321
 
2128
- while not all_data_fetched:
2129
- try:
2130
- response = requests.get(
2131
- "https://backend-api.llumo.ai/api/v1/sdk/poll",
2132
- params={
2133
- "cursor": cursor,
2134
- "dataID": dataID,
2135
- "limit": limit,
2136
- },
2137
- )
2322
+ # Extract numeric keys ("0", "1", "2", ...)
2323
+ numeric_keys = sorted(
2324
+ [key for key in row.keys() if str(key).strip() != "" and str(key).isdigit()],
2325
+ key=lambda x: int(x)
2326
+ )
2138
2327
 
2139
- response_data = response.json()
2140
- result_data = response_data.get("debugLog", {})
2141
- print("resultData", result_data)
2328
+ for key in numeric_keys:
2329
+ result.append(row[key])
2142
2330
 
2143
- results = result_data.get("results", [])
2144
- final_result_data.extend(results)
2331
+ evalData={}
2332
+ for key in row:
2333
+ if key not in numeric_keys:
2334
+ evalData[key]=row[key]
2145
2335
 
2146
- cursor = result_data.get("nextCursor")
2336
+ # evalResultDict = {"evaluation": result}
2337
+ evalData = {}
2338
+ for key in row:
2339
+ if key not in numeric_keys:
2340
+ evalData[key] = row[key]
2147
2341
 
2148
- if len(final_result_data) == len(data):
2149
- all_data_fetched = True
2342
+ # evalResultDict = {"evaluation": result}
2150
2343
 
2151
- time.sleep(10)
2344
+ evalData["evaluation"] = result
2345
+ formatted_results.append(evalData)
2152
2346
 
2153
- except Exception as error:
2154
- print("error", error)
2155
- all_data_fetched = True
2347
+ return {"llumoEval": formatted_results}
2348
+ # return formatted_results
2156
2349
 
2157
- # Shape results
2158
- formatted_results = []
2350
+ def getInsights(self,logs:List,userAim:List[str],promptTemplate:str = ""
2351
+ ,systemInstructions:str="",multiTurnChat=False,createMultipleLogs=True):
2159
2352
 
2160
- for row in final_result_data:
2161
- score: Dict[str, float | None] = {}
2162
- reasoning: Dict[str, str] = {}
2353
+ try:
2354
+ self.validateApiKey()
2355
+ except Exception as e:
2356
+ print(f"Error during API key validation: {str(e)}")
2357
+ if hasattr(e, "response") and getattr(e, "response", None) is not None:
2358
+ print(f"Status code: {e.response.status_code}")
2359
+ print(f"Response content: {e.response.text[:500]}...")
2360
+ raise
2361
+
2362
+ userHits = checkUserHits(
2363
+ self.workspaceID,
2364
+ self.hasSubscribed,
2365
+ self.trialEndDate,
2366
+ self.subscriptionEndDate,
2367
+ self.hitsAvailable,
2368
+ 1,
2369
+ )
2370
+
2371
+ if not userHits["success"]:
2372
+ raise LlumoAIError.InsufficientCredits(userHits["message"])
2163
2373
 
2164
- for eval_name in evals:
2165
- details = row.get(eval_name)
2374
+ if len(logs)==0 :
2375
+ raise LlumoAIError.emptyLogList()
2166
2376
 
2167
- if isinstance(details, dict):
2168
- if isinstance(details.get("value"), (int, float)):
2169
- score[eval_name] = details.get("value")
2377
+ if not isinstance(userAim, list):
2378
+ raise TypeError(f"userAim must be list, got {type(userAim).__name__}")
2379
+
2380
+ if any(aim not in self.ALL_USER_AIM for aim in userAim):
2381
+ errorMessage = f"Please pass a valid user aim. Only acceptable user aims are->{self.ALL_USER_AIM}"
2382
+ raise LlumoAIError.invalidUserAim(details=errorMessage)
2383
+
2384
+
2385
+
2386
+ if multiTurnChat == True:
2387
+ if createMultipleLogs == True:
2388
+ dataWithMessageHistory = []
2389
+ for indx, row in enumerate(logs):
2390
+ messageHistory = createMessageHistory(logs, currentIndex=indx)
2391
+ rowCopy = row.copy()
2392
+ rowCopy["messageHistory"] = messageHistory
2393
+ dataWithMessageHistory.append(rowCopy)
2394
+ logs= dataWithMessageHistory
2395
+ else:
2396
+ dataWithMessageHistory = []
2397
+ for indx, row in enumerate(logs):
2398
+ if indx == len(logs) - 1:
2399
+ messageHistory = createMessageHistory(logs, currentIndex=indx)
2400
+ rowCopy = row.copy()
2401
+ rowCopy["messageHistory"] = messageHistory
2402
+ dataWithMessageHistory.append(rowCopy)
2170
2403
  else:
2171
- score[eval_name] = details.get("score")
2172
- reasoning[eval_name] = details.get("reasoning", "")
2404
+ row["messageHistory"] = ""
2405
+ dataWithMessageHistory.append(row)
2406
+ logs = dataWithMessageHistory
2173
2407
 
2174
- elif "score" in row:
2175
- score[eval_name] = (
2176
- row["score"] if isinstance(row["score"], (int, float)) else None
2177
- )
2178
- reasoning[eval_name] = row.get("reasoning", "")
2179
- else:
2180
- score[eval_name] = None
2181
- reasoning[eval_name] = ""
2182
-
2183
- formatted_row = {
2184
- "context": row.get("context", ""),
2185
- "query": row.get("query", ""),
2186
- "output": row.get("output", ""),
2187
- "score": score,
2188
- "reasoning": reasoning,
2189
- }
2408
+ if (promptTemplate!="") or systemInstructions!="":
2409
+ logs = addPromptAndInstructionInLogs(logData = logs ,promptTemplate=promptTemplate,systemInstruction=systemInstructions)
2410
+
2411
+ # print("[LOGS: ]")
2412
+ # print(logs)
2413
+
2414
+ # 1. Create Report
2415
+ print("✅ Generating Insights Now....")
2416
+ dataID = uuid.uuid4().hex[:36]
2417
+ payload = {
2418
+ "data":logs,
2419
+ "userAim":userAim,
2420
+ "dataID":dataID
2421
+ }
2190
2422
 
2191
- # print(formatted_row)
2192
- formatted_results.append(formatted_row)
2423
+ # 2. Poll for Results
2424
+ insight_result = dataPollingFuncForInsight(payload)
2425
+ # llumoInsight = formattedInsightResponse(llmResponse=insight_result)
2193
2426
 
2194
- return formatted_results
2427
+ return {"llumoInsight": insight_result}
2195
2428
 
2196
2429
 
2197
2430
  class SafeDict(dict):
@@ -1,3 +1,4 @@
1
+
1
2
  class LlumoAIError(Exception):
2
3
  """Base class for all Llumo SDK-related errors."""
3
4
 
@@ -54,6 +55,14 @@ class LlumoAIError(Exception):
54
55
  def providerError(details):
55
56
  return LlumoAIError(details)
56
57
 
58
+ @staticmethod
59
+ def emptyLogList(details= "List of log object is empty. Ensure your logs have at least 1 log object."):
60
+ return LlumoAIError(details)
61
+
62
+ @staticmethod
63
+ def invalidUserAim(details= ""):
64
+ return LlumoAIError(details)
65
+
57
66
  # @staticmethod
58
67
  # def dateNotFound():
59
68
  # return LlumoAIError("Trial end date or subscription end date not found for the given user.")
@@ -29,6 +29,9 @@ createInsightUrl="https://app.llumo.ai/api/external/generate-insight-from-eval-f
29
29
 
30
30
  getCustomAnalyticsUrl="https://app.llumo.ai/api/workspace/get-all-analytics"
31
31
 
32
+
33
+
34
+
32
35
  def getProcessID():
33
36
  return f"{int(time.time() * 1000)}{uuid.uuid4()}"
34
37
 
@@ -739,6 +742,8 @@ def getCustomAnalytics(workspaceID):
739
742
  return {}
740
743
 
741
744
 
745
+ def normalize_md(s: str) -> str:
746
+ return "\n".join(line.lstrip() for line in s.splitlines())
742
747
 
743
748
  def postForDebugLogs(record: {},workspaceID):
744
749
  url = "https://backend-api.llumo.ai/api/v1/get-debug-log-for-upload"
@@ -834,3 +839,167 @@ def addSelectedTools(run: dict) -> dict:
834
839
  current_agent_step["metadata"]["toolSelected"].append(tool_name)
835
840
 
836
841
  return run
842
+
843
+ def createMessageHistory(data, currentIndex = 0):
844
+ conversationHistory = []
845
+ for dataObj in data[:currentIndex]:
846
+ conversationHistory.append(dataObj)
847
+ return f"{conversationHistory}"
848
+
849
+
850
+ def addPromptAndInstructionInLogs(logData=None,promptTemplate= "",systemInstruction=""):
851
+ logDataWithPrompt = []
852
+ for data in logData:
853
+ if isinstance(data,str):
854
+ logDataWithPrompt.append(data + f"**promptTemplate**={promptTemplate}\n **systemInstruction**={systemInstruction}")
855
+ elif isinstance(data,dict):
856
+ data["promptTemplate"]= promptTemplate
857
+ data["systemInstruction"]= systemInstruction
858
+ logDataWithPrompt.append(data)
859
+
860
+ else:
861
+ return logData
862
+ return logDataWithPrompt
863
+
864
+
865
+ def dataPollingFuncForEval(api_key, payload, data, poll_interval=10, limit=10):
866
+ # Create evaluation (POST)
867
+ requests.post(
868
+ "https://backend-api.llumo.ai/api/v1/sdk/create-evaluation-Multiple",
869
+ json=payload,
870
+ headers={
871
+ "Content-Type": "application/json",
872
+ "Authorization": f"Bearer {api_key}",
873
+ },
874
+ )
875
+
876
+ final_result_data = []
877
+
878
+ cursor = "0-0"
879
+ all_data_fetched = False
880
+
881
+ print("✅ Evaluation Started...")
882
+ while not all_data_fetched:
883
+ try:
884
+ response = requests.get(
885
+ "https://backend-api.llumo.ai/api/v1/sdk/poll",
886
+ params={
887
+ "cursor": cursor,
888
+ "dataID": payload["dataID"],
889
+ "limit": limit,
890
+ },
891
+ )
892
+
893
+ response_data = response.json()
894
+ result_data = response_data.get("debugLog", {})
895
+
896
+ results = result_data.get("results", [])
897
+ final_result_data.extend(results)
898
+
899
+ cursor = result_data.get("nextCursor")
900
+
901
+ if len(final_result_data) == len(data):
902
+ all_data_fetched = True
903
+
904
+ time.sleep(poll_interval)
905
+
906
+ except Exception as error:
907
+ print("error", error)
908
+ all_data_fetched = True
909
+
910
+ return final_result_data
911
+
912
+ def dataPollingFuncForInsight(payload, poll_interval=1, limit=10, max_polls=20):
913
+ dataID = payload["dataID"]
914
+
915
+ # -------------------------------
916
+ # 1. Create Insight Report (POST)
917
+ # -------------------------------
918
+ try:
919
+ create_url = "https://backend-api.llumo.ai/api/v1/sdk/create-insight-report"
920
+ response = requests.post(create_url, json=payload)
921
+ response.raise_for_status()
922
+ except requests.exceptions.RequestException as e:
923
+ error_data = e.response.json() if e.response else str(e)
924
+ print(f"Error in create request: {error_data}")
925
+ return None
926
+
927
+ # -------------------------------
928
+ # 2. Poll Insight Results (GET)
929
+ # -------------------------------
930
+ cursor = "0-0"
931
+ poll_count = 0
932
+ insight_result = []
933
+
934
+ while poll_count < max_polls:
935
+ poll_count += 1
936
+
937
+ try:
938
+ poll_params = {
939
+ "dataID": dataID,
940
+ "cursor": cursor,
941
+ "limit": limit,
942
+ }
943
+
944
+ poll_url = "https://backend-api.llumo.ai/api/v1/sdk/poll-insight-report"
945
+ poll_response = requests.get(poll_url, params=poll_params)
946
+ poll_response.raise_for_status()
947
+
948
+ data = poll_response.json()
949
+ debug_log = data.get("debugLog", {})
950
+ results = debug_log.get("results", [])
951
+ next_cursor = debug_log.get("nextCursor")
952
+
953
+ if results:
954
+ insight_result.extend(results)
955
+ break # same heuristic as original
956
+
957
+ if next_cursor != cursor:
958
+ cursor = next_cursor
959
+
960
+ except requests.exceptions.RequestException as e:
961
+ if e.response is not None and e.response.status_code == 404:
962
+ pass
963
+ else:
964
+ break
965
+
966
+ time.sleep(poll_interval)
967
+
968
+ return insight_result
969
+
970
+ def formattedInsightResponse(llmResponse):
971
+ try:
972
+ response = llmResponse
973
+ jsonResponse = response.json().get("response", {})
974
+ reasons = "\n- ".join(jsonResponse['reason'])
975
+ solutions = "\n- ".join(jsonResponse['solution'])
976
+ examples = "\n- ".join(jsonResponse['examples'])
977
+
978
+ formattedResponse = f"""
979
+ # 🔍 **{jsonResponse['insightTitle'].strip()}**
980
+ ---
981
+ ## 🧠 **Insight**
982
+ > {jsonResponse['insight'].strip()}
983
+ ---
984
+ ## 📝 **Description**
985
+ {jsonResponse['shortDescription'].strip()}
986
+ ---
987
+ ## 🔎 **Root Causes**
988
+ - {reasons.strip()}
989
+ ---
990
+ ## 🛠 **Solutions**
991
+ - {solutions.strip()}
992
+ ---
993
+ ## 📌 **Examples**
994
+ - {examples.strip()}
995
+ ---
996
+ """
997
+ formattedResponse = normalize_md(formattedResponse)
998
+ llumoInsight = formattedResponse
999
+
1000
+
1001
+
1002
+ except Exception as e:
1003
+ print("An error occurred. Please try again.")
1004
+ llumoInsight = e
1005
+ return llumoInsight
@@ -2,9 +2,10 @@ import requests
2
2
 
3
3
 
4
4
  class LlumoLogger:
5
- def __init__(self, apiKey: str, playground: str):
5
+ def __init__(self, apiKey: str, project: str):
6
6
  self.apiKey = apiKey
7
- self.playground = playground
7
+ # self.playground = playground
8
+ self.playground = project
8
9
  self.workspaceID = None
9
10
  self.playgroundID = None
10
11
  self.userEmailID = None
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: llumo
3
- Version: 0.2.42
3
+ Version: 0.2.44
4
4
  Summary: Python SDK for interacting with the Llumo ai API.
5
5
  Home-page: https://www.llumo.ai/
6
6
  Author: Llumo
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes