llumo 0.2.43__tar.gz → 0.2.45__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {llumo-0.2.43/llumo.egg-info → llumo-0.2.45}/PKG-INFO +1 -1
- {llumo-0.2.43 → llumo-0.2.45}/llumo/client.py +38 -146
- llumo-0.2.45/llumo/exceptions.py +105 -0
- {llumo-0.2.43 → llumo-0.2.45}/llumo/helpingFuntions.py +77 -14
- {llumo-0.2.43 → llumo-0.2.45}/llumo/llumoSessionContext.py +2 -1
- {llumo-0.2.43 → llumo-0.2.45/llumo.egg-info}/PKG-INFO +1 -1
- llumo-0.2.43/llumo/exceptions.py +0 -68
- {llumo-0.2.43 → llumo-0.2.45}/LICENSE +0 -0
- {llumo-0.2.43 → llumo-0.2.45}/MANIFEST.in +0 -0
- {llumo-0.2.43 → llumo-0.2.45}/README.md +0 -0
- {llumo-0.2.43 → llumo-0.2.45}/llumo/__init__.py +0 -0
- {llumo-0.2.43 → llumo-0.2.45}/llumo/callback.py +0 -0
- {llumo-0.2.43 → llumo-0.2.45}/llumo/callbacks-0.py +0 -0
- {llumo-0.2.43 → llumo-0.2.45}/llumo/chains.py +0 -0
- {llumo-0.2.43 → llumo-0.2.45}/llumo/execution.py +0 -0
- {llumo-0.2.43 → llumo-0.2.45}/llumo/functionCalling.py +0 -0
- {llumo-0.2.43 → llumo-0.2.45}/llumo/google.py +0 -0
- {llumo-0.2.43 → llumo-0.2.45}/llumo/llumoLogger.py +0 -0
- {llumo-0.2.43 → llumo-0.2.45}/llumo/models.py +0 -0
- {llumo-0.2.43 → llumo-0.2.45}/llumo/openai.py +0 -0
- {llumo-0.2.43 → llumo-0.2.45}/llumo/sockets.py +0 -0
- {llumo-0.2.43 → llumo-0.2.45}/llumo.egg-info/SOURCES.txt +0 -0
- {llumo-0.2.43 → llumo-0.2.45}/llumo.egg-info/dependency_links.txt +0 -0
- {llumo-0.2.43 → llumo-0.2.45}/llumo.egg-info/requires.txt +0 -0
- {llumo-0.2.43 → llumo-0.2.45}/llumo.egg-info/top_level.txt +0 -0
- {llumo-0.2.43 → llumo-0.2.45}/setup.cfg +0 -0
- {llumo-0.2.43 → llumo-0.2.45}/setup.py +0 -0
|
@@ -2231,7 +2231,9 @@ class LlumoClient:
|
|
|
2231
2231
|
multiTurnChat=False,
|
|
2232
2232
|
createMultipleLogs=True
|
|
2233
2233
|
):
|
|
2234
|
+
|
|
2234
2235
|
# print("Evaluating multiple data with evals:", data, evals)
|
|
2236
|
+
rawData = data.copy()
|
|
2235
2237
|
try:
|
|
2236
2238
|
self.validateApiKey()
|
|
2237
2239
|
except Exception as e:
|
|
@@ -2303,91 +2305,47 @@ class LlumoClient:
|
|
|
2303
2305
|
|
|
2304
2306
|
# print("payload", payload)
|
|
2305
2307
|
|
|
2306
|
-
# Create evaluation
|
|
2307
|
-
|
|
2308
|
-
|
|
2309
|
-
|
|
2310
|
-
|
|
2311
|
-
"Content-Type": "application/json",
|
|
2312
|
-
"Authorization": f"Bearer {self.apiKey}",
|
|
2313
|
-
},
|
|
2308
|
+
# Create evaluation + Poll results (moved to helper)
|
|
2309
|
+
final_result_data = dataPollingFuncForEval(
|
|
2310
|
+
api_key=self.apiKey,
|
|
2311
|
+
payload=payload,
|
|
2312
|
+
data=data,
|
|
2314
2313
|
)
|
|
2315
|
-
|
|
2316
|
-
final_result_data = []
|
|
2317
|
-
|
|
2318
|
-
cursor = "0-0"
|
|
2319
|
-
limit = 10
|
|
2320
|
-
all_data_fetched = False
|
|
2321
|
-
|
|
2322
|
-
print("✅ Evaluation Started...")
|
|
2323
|
-
while not all_data_fetched:
|
|
2324
|
-
try:
|
|
2325
|
-
response = requests.get(
|
|
2326
|
-
"https://backend-api.llumo.ai/api/v1/sdk/poll",
|
|
2327
|
-
params={
|
|
2328
|
-
"cursor": cursor,
|
|
2329
|
-
"dataID": dataID,
|
|
2330
|
-
"limit": limit,
|
|
2331
|
-
},
|
|
2332
|
-
)
|
|
2333
|
-
|
|
2334
|
-
response_data = response.json()
|
|
2335
|
-
result_data = response_data.get("debugLog", {})
|
|
2336
|
-
# print("resultData", result_data)
|
|
2337
|
-
|
|
2338
|
-
results = result_data.get("results", [])
|
|
2339
|
-
final_result_data.extend(results)
|
|
2340
|
-
|
|
2341
|
-
cursor = result_data.get("nextCursor")
|
|
2342
|
-
|
|
2343
|
-
if len(final_result_data) == len(data):
|
|
2344
|
-
all_data_fetched = True
|
|
2345
|
-
|
|
2346
|
-
time.sleep(10)
|
|
2347
|
-
|
|
2348
|
-
except Exception as error:
|
|
2349
|
-
print("error", error)
|
|
2350
|
-
all_data_fetched = True
|
|
2351
|
-
|
|
2352
2314
|
# Shape results
|
|
2353
2315
|
formatted_results = []
|
|
2354
2316
|
|
|
2317
|
+
|
|
2355
2318
|
for row in final_result_data:
|
|
2356
|
-
|
|
2357
|
-
|
|
2319
|
+
# print("ROW: ",row)
|
|
2320
|
+
result = []
|
|
2358
2321
|
|
|
2359
|
-
|
|
2360
|
-
|
|
2322
|
+
# Extract numeric keys ("0", "1", "2", ...)
|
|
2323
|
+
numeric_keys = sorted(
|
|
2324
|
+
[key for key in row.keys() if str(key).strip() != "" and str(key).isdigit()],
|
|
2325
|
+
key=lambda x: int(x)
|
|
2326
|
+
)
|
|
2361
2327
|
|
|
2362
|
-
|
|
2363
|
-
|
|
2364
|
-
score[eval_name] = details.get("value")
|
|
2365
|
-
else:
|
|
2366
|
-
score[eval_name] = details.get("score")
|
|
2367
|
-
reasoning[eval_name] = details.get("reasoning", "")
|
|
2328
|
+
for key in numeric_keys:
|
|
2329
|
+
result.append(row[key])
|
|
2368
2330
|
|
|
2369
|
-
|
|
2370
|
-
|
|
2371
|
-
|
|
2372
|
-
|
|
2373
|
-
reasoning[eval_name] = row.get("reasoning", "")
|
|
2374
|
-
else:
|
|
2375
|
-
score[eval_name] = None
|
|
2376
|
-
reasoning[eval_name] = ""
|
|
2377
|
-
|
|
2378
|
-
formatted_row = {
|
|
2379
|
-
"query": row.get("query", ""),
|
|
2380
|
-
"context": row.get("context", ""),
|
|
2381
|
-
"output": row.get("output", ""),
|
|
2382
|
-
"score": score,
|
|
2383
|
-
"reasoning": reasoning,
|
|
2384
|
-
}
|
|
2331
|
+
evalData={}
|
|
2332
|
+
for key in row:
|
|
2333
|
+
if key not in numeric_keys:
|
|
2334
|
+
evalData[key]=row[key]
|
|
2385
2335
|
|
|
2386
|
-
#
|
|
2387
|
-
|
|
2336
|
+
# evalResultDict = {"evaluation": result}
|
|
2337
|
+
evalData = {}
|
|
2338
|
+
for key in row:
|
|
2339
|
+
if key not in numeric_keys:
|
|
2340
|
+
evalData[key] = row[key]
|
|
2388
2341
|
|
|
2389
|
-
|
|
2342
|
+
# evalResultDict = {"evaluation": result}
|
|
2390
2343
|
|
|
2344
|
+
evalData["evaluation"] = result
|
|
2345
|
+
formatted_results.append(evalData)
|
|
2346
|
+
|
|
2347
|
+
return {"llumoEval": formatted_results}
|
|
2348
|
+
# return formatted_results
|
|
2391
2349
|
|
|
2392
2350
|
def getInsights(self,logs:List,userAim:List[str],promptTemplate:str = ""
|
|
2393
2351
|
,systemInstructions:str="",multiTurnChat=False,createMultipleLogs=True):
|
|
@@ -2456,80 +2414,14 @@ class LlumoClient:
|
|
|
2456
2414
|
# 1. Create Report
|
|
2457
2415
|
print("✅ Generating Insights Now....")
|
|
2458
2416
|
dataID = uuid.uuid4().hex[:36]
|
|
2459
|
-
|
|
2460
|
-
|
|
2461
|
-
|
|
2462
|
-
|
|
2463
|
-
|
|
2464
|
-
}
|
|
2465
|
-
create_url = "https://backend-api.llumo.ai/api/v1/sdk/create-insight-report"
|
|
2466
|
-
response = requests.post(create_url, json=payload)
|
|
2467
|
-
|
|
2468
|
-
# Check if request was successful
|
|
2469
|
-
response.raise_for_status()
|
|
2470
|
-
|
|
2471
|
-
# print(f"Create Response: {response.json()}")
|
|
2472
|
-
except requests.exceptions.RequestException as e:
|
|
2473
|
-
# Check for response data in the error
|
|
2474
|
-
error_data = e.response.json() if e.response else str(e)
|
|
2475
|
-
print(f"Error in create request: {error_data}")
|
|
2476
|
-
return
|
|
2417
|
+
payload = {
|
|
2418
|
+
"data":logs,
|
|
2419
|
+
"userAim":userAim,
|
|
2420
|
+
"dataID":dataID
|
|
2421
|
+
}
|
|
2477
2422
|
|
|
2478
2423
|
# 2. Poll for Results
|
|
2479
|
-
|
|
2480
|
-
cursor = '0-0'
|
|
2481
|
-
is_complete = False
|
|
2482
|
-
max_polls = 20
|
|
2483
|
-
poll_count = 0
|
|
2484
|
-
|
|
2485
|
-
insight_result = []
|
|
2486
|
-
while not is_complete and poll_count < max_polls:
|
|
2487
|
-
poll_count += 1
|
|
2488
|
-
|
|
2489
|
-
try:
|
|
2490
|
-
poll_params = {
|
|
2491
|
-
'dataID': dataID,
|
|
2492
|
-
'cursor': cursor,
|
|
2493
|
-
'limit': 10
|
|
2494
|
-
}
|
|
2495
|
-
poll_url = f"https://backend-api.llumo.ai/api/v1/sdk/poll-insight-report"
|
|
2496
|
-
poll_response = requests.get(poll_url, params=poll_params)
|
|
2497
|
-
poll_response.raise_for_status()
|
|
2498
|
-
|
|
2499
|
-
data = poll_response.json()
|
|
2500
|
-
# Accessing nested data: pollResponse.data.debugLog
|
|
2501
|
-
debug_log = data.get('debugLog', {})
|
|
2502
|
-
results = debug_log.get('results', [])
|
|
2503
|
-
next_cursor = debug_log.get('nextCursor')
|
|
2504
|
-
|
|
2505
|
-
if results:
|
|
2506
|
-
insight_result.extend(results)
|
|
2507
|
-
|
|
2508
|
-
# Logic to handle cursor movement
|
|
2509
|
-
if next_cursor == cursor and not results:
|
|
2510
|
-
# print("Long poll returned empty (timeout). Continuing...")
|
|
2511
|
-
pass
|
|
2512
|
-
else:
|
|
2513
|
-
cursor = next_cursor
|
|
2514
|
-
|
|
2515
|
-
# Break condition for test (heuristic)
|
|
2516
|
-
if len(insight_result):
|
|
2517
|
-
break
|
|
2518
|
-
|
|
2519
|
-
except requests.exceptions.RequestException as e:
|
|
2520
|
-
error_msg = e.response.json() if e.response else str(e)
|
|
2521
|
-
# print(f"Error in poll request: {error_msg}")
|
|
2522
|
-
|
|
2523
|
-
if e.response is not None and e.response.status_code == 404:
|
|
2524
|
-
# print("Resource not ready yet...")
|
|
2525
|
-
pass
|
|
2526
|
-
else:
|
|
2527
|
-
# Fatal error, break the loop
|
|
2528
|
-
break
|
|
2529
|
-
|
|
2530
|
-
# Small delay to prevent tight loop (Equivalent to await sleep(1000))
|
|
2531
|
-
time.sleep(1)
|
|
2532
|
-
|
|
2424
|
+
insight_result = dataPollingFuncForInsight(payload)
|
|
2533
2425
|
# llumoInsight = formattedInsightResponse(llmResponse=insight_result)
|
|
2534
2426
|
|
|
2535
2427
|
return {"llumoInsight": insight_result}
|
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
|
|
2
|
+
class LlumoAIError(Exception):
|
|
3
|
+
"""Base class for all Llumo SDK-related errors."""
|
|
4
|
+
|
|
5
|
+
def __init__(self, message):
|
|
6
|
+
self.message = message
|
|
7
|
+
super().__init__(self.message)
|
|
8
|
+
|
|
9
|
+
@staticmethod
|
|
10
|
+
def InvalidApiKey():
|
|
11
|
+
return LlumoAIError("The provided API key is invalid or unauthorized"
|
|
12
|
+
"To fix this:\n"
|
|
13
|
+
"1. Go to https://app.llumo.ai/getting-started\n"
|
|
14
|
+
"2. Look at the top navigation bar (right side)\n"
|
|
15
|
+
"3. Copy the API key shown under “API Key”\n"
|
|
16
|
+
"4. Paste that key into your SDK configuration"
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
@staticmethod
|
|
20
|
+
def InvalidApiResponse():
|
|
21
|
+
return LlumoAIError("Invalid or UnexpectedError response from the API"
|
|
22
|
+
"We received a response from the API, but it wasn’t in the expected format…”")
|
|
23
|
+
|
|
24
|
+
@staticmethod
|
|
25
|
+
def RequestFailed(detail="The request could not be completed."):
|
|
26
|
+
return LlumoAIError(
|
|
27
|
+
f"We were unable to complete the request to the Llumo API. "
|
|
28
|
+
f"{detail} "
|
|
29
|
+
"Please check your network connection or try again later."
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
@staticmethod
|
|
34
|
+
def InvalidJsonResponse():
|
|
35
|
+
return LlumoAIError("The API response is not in valid JSON format")
|
|
36
|
+
|
|
37
|
+
@staticmethod
|
|
38
|
+
def UnexpectedError(detail="Metric"):
|
|
39
|
+
return LlumoAIError(
|
|
40
|
+
f"We couldn’t find an evaluation named '{detail}'. "
|
|
41
|
+
f"Please check that the name is correct. "
|
|
42
|
+
f"If you’d like to run '{detail}', you can create a custom evaluation "
|
|
43
|
+
f"with the same name at https://app.llumo.ai/evallm."
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
@staticmethod
|
|
47
|
+
def EvalError(detail="Some error occured while processing"):
|
|
48
|
+
return LlumoAIError(f"error: {detail}")
|
|
49
|
+
|
|
50
|
+
@staticmethod
|
|
51
|
+
def InsufficientCredits(details="Your available credits have been exhausted."):
|
|
52
|
+
return LlumoAIError(
|
|
53
|
+
f"{details} "
|
|
54
|
+
"To continue running evaluations, please upgrade your plan or "
|
|
55
|
+
"increase your usage limits in the LLUMO AI dashboard at: "
|
|
56
|
+
"https://app.llumo.ai/settings."
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
# return LlumoAIError("LLumo hits exhausted")
|
|
60
|
+
|
|
61
|
+
@staticmethod
|
|
62
|
+
def InvalidPromptTemplate():
|
|
63
|
+
return LlumoAIError('''Make sure the prompt template fulfills the following criteria:
|
|
64
|
+
1. All the variables should be inside double curly braces. Example: Give answer for the {{query}}, based on given {{context}}.
|
|
65
|
+
2. The variables used in the prompt template must be present in the dataframe columns with the same name..
|
|
66
|
+
''')
|
|
67
|
+
|
|
68
|
+
@staticmethod
|
|
69
|
+
def modelHitsExhausted(details = "Your credits for the selected model exhausted."):
|
|
70
|
+
return LlumoAIError(details)
|
|
71
|
+
|
|
72
|
+
@staticmethod
|
|
73
|
+
def dependencyError(details):
|
|
74
|
+
return LlumoAIError(details)
|
|
75
|
+
|
|
76
|
+
@staticmethod
|
|
77
|
+
def providerError(details):
|
|
78
|
+
return LlumoAIError(details)
|
|
79
|
+
|
|
80
|
+
@staticmethod
|
|
81
|
+
def emptyLogList(details="No logs were provided for analysis."):
|
|
82
|
+
return LlumoAIError(
|
|
83
|
+
f"{details} "
|
|
84
|
+
"Please pass at least one log entry. "
|
|
85
|
+
"You can find the correct log format at "
|
|
86
|
+
"https://app.llumo.ai/getting-started "
|
|
87
|
+
"under the “Run SDK with zero data egress” section."
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
@staticmethod
|
|
91
|
+
def invalidUserAim(details=""):
|
|
92
|
+
return LlumoAIError(
|
|
93
|
+
"Invalid userAim detected. "
|
|
94
|
+
"Each userAim must match one of the supported categories used for analysis. "
|
|
95
|
+
"Valid options include:\n"
|
|
96
|
+
"[incorrectOutput, incorrectInput, hallucination, ragQuality, "
|
|
97
|
+
"contextMismanagement, toolCallIssues, agentReasoning, stuckAgents, "
|
|
98
|
+
"jsonErrors, highLatency, highCost, safetyBlocks, modelRouting, "
|
|
99
|
+
"systemErrors, promptAdherence]."
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
# @staticmethod
|
|
104
|
+
# def dateNotFound():
|
|
105
|
+
# return LlumoAIError("Trial end date or subscription end date not found for the given user.")
|
|
@@ -862,48 +862,111 @@ def addPromptAndInstructionInLogs(logData=None,promptTemplate= "",systemInstruct
|
|
|
862
862
|
return logDataWithPrompt
|
|
863
863
|
|
|
864
864
|
|
|
865
|
-
def dataPollingFuncForEval(
|
|
866
|
-
|
|
867
|
-
pollUrl = "https://backend-api.llumo.ai/api/v1/sdk/poll"
|
|
868
|
-
|
|
869
|
-
# Create evaluation
|
|
865
|
+
def dataPollingFuncForEval(api_key, payload, data, poll_interval=10, limit=10):
|
|
866
|
+
# Create evaluation (POST)
|
|
870
867
|
requests.post(
|
|
871
|
-
|
|
872
|
-
json=
|
|
873
|
-
headers=
|
|
868
|
+
"https://backend-api.llumo.ai/api/v1/sdk/create-evaluation-Multiple",
|
|
869
|
+
json=payload,
|
|
870
|
+
headers={
|
|
871
|
+
"Content-Type": "application/json",
|
|
872
|
+
"Authorization": f"Bearer {api_key}",
|
|
873
|
+
},
|
|
874
874
|
)
|
|
875
|
-
# print("payload", payload)
|
|
876
875
|
|
|
877
876
|
final_result_data = []
|
|
877
|
+
|
|
878
|
+
cursor = "0-0"
|
|
878
879
|
all_data_fetched = False
|
|
879
880
|
|
|
880
881
|
print("✅ Evaluation Started...")
|
|
881
882
|
while not all_data_fetched:
|
|
882
883
|
try:
|
|
883
884
|
response = requests.get(
|
|
884
|
-
|
|
885
|
-
params=
|
|
885
|
+
"https://backend-api.llumo.ai/api/v1/sdk/poll",
|
|
886
|
+
params={
|
|
887
|
+
"cursor": cursor,
|
|
888
|
+
"dataID": payload["dataID"],
|
|
889
|
+
"limit": limit,
|
|
890
|
+
},
|
|
886
891
|
)
|
|
887
892
|
|
|
888
893
|
response_data = response.json()
|
|
889
894
|
result_data = response_data.get("debugLog", {})
|
|
890
|
-
# print("resultData", result_data)
|
|
891
895
|
|
|
892
896
|
results = result_data.get("results", [])
|
|
893
897
|
final_result_data.extend(results)
|
|
894
898
|
|
|
895
899
|
cursor = result_data.get("nextCursor")
|
|
896
900
|
|
|
897
|
-
if len(final_result_data) ==
|
|
901
|
+
if len(final_result_data) == len(data):
|
|
898
902
|
all_data_fetched = True
|
|
899
903
|
|
|
900
|
-
time.sleep(
|
|
904
|
+
time.sleep(poll_interval)
|
|
901
905
|
|
|
902
906
|
except Exception as error:
|
|
903
907
|
print("error", error)
|
|
904
908
|
all_data_fetched = True
|
|
909
|
+
|
|
905
910
|
return final_result_data
|
|
906
911
|
|
|
912
|
+
def dataPollingFuncForInsight(payload, poll_interval=1, limit=10, max_polls=20):
|
|
913
|
+
dataID = payload["dataID"]
|
|
914
|
+
|
|
915
|
+
# -------------------------------
|
|
916
|
+
# 1. Create Insight Report (POST)
|
|
917
|
+
# -------------------------------
|
|
918
|
+
try:
|
|
919
|
+
create_url = "https://backend-api.llumo.ai/api/v1/sdk/create-insight-report"
|
|
920
|
+
response = requests.post(create_url, json=payload)
|
|
921
|
+
response.raise_for_status()
|
|
922
|
+
except requests.exceptions.RequestException as e:
|
|
923
|
+
error_data = e.response.json() if e.response else str(e)
|
|
924
|
+
print(f"Error in create request: {error_data}")
|
|
925
|
+
return None
|
|
926
|
+
|
|
927
|
+
# -------------------------------
|
|
928
|
+
# 2. Poll Insight Results (GET)
|
|
929
|
+
# -------------------------------
|
|
930
|
+
cursor = "0-0"
|
|
931
|
+
poll_count = 0
|
|
932
|
+
insight_result = []
|
|
933
|
+
|
|
934
|
+
while poll_count < max_polls:
|
|
935
|
+
poll_count += 1
|
|
936
|
+
|
|
937
|
+
try:
|
|
938
|
+
poll_params = {
|
|
939
|
+
"dataID": dataID,
|
|
940
|
+
"cursor": cursor,
|
|
941
|
+
"limit": limit,
|
|
942
|
+
}
|
|
943
|
+
|
|
944
|
+
poll_url = "https://backend-api.llumo.ai/api/v1/sdk/poll-insight-report"
|
|
945
|
+
poll_response = requests.get(poll_url, params=poll_params)
|
|
946
|
+
poll_response.raise_for_status()
|
|
947
|
+
|
|
948
|
+
data = poll_response.json()
|
|
949
|
+
debug_log = data.get("debugLog", {})
|
|
950
|
+
results = debug_log.get("results", [])
|
|
951
|
+
next_cursor = debug_log.get("nextCursor")
|
|
952
|
+
|
|
953
|
+
if results:
|
|
954
|
+
insight_result.extend(results)
|
|
955
|
+
break # same heuristic as original
|
|
956
|
+
|
|
957
|
+
if next_cursor != cursor:
|
|
958
|
+
cursor = next_cursor
|
|
959
|
+
|
|
960
|
+
except requests.exceptions.RequestException as e:
|
|
961
|
+
if e.response is not None and e.response.status_code == 404:
|
|
962
|
+
pass
|
|
963
|
+
else:
|
|
964
|
+
break
|
|
965
|
+
|
|
966
|
+
time.sleep(poll_interval)
|
|
967
|
+
|
|
968
|
+
return insight_result
|
|
969
|
+
|
|
907
970
|
def formattedInsightResponse(llmResponse):
|
|
908
971
|
try:
|
|
909
972
|
response = llmResponse
|
|
@@ -11,6 +11,7 @@ from .helpingFuntions import removeLLmStep
|
|
|
11
11
|
from .helpingFuntions import addSelectedTools
|
|
12
12
|
import random
|
|
13
13
|
|
|
14
|
+
|
|
14
15
|
_ctxLogger = contextvars.ContextVar("ctxLogger")
|
|
15
16
|
_ctxSessionID = contextvars.ContextVar("ctxSessionID")
|
|
16
17
|
_ctxLlumoRun = contextvars.ContextVar("ctxLlumoRun")
|
|
@@ -179,7 +180,7 @@ class LlumoSessionContext(LlumoClient):
|
|
|
179
180
|
|
|
180
181
|
payload = addSelectedTools(payload)
|
|
181
182
|
# print("********PAYLOAD AFTER addSelectedTools*********: ", payload)
|
|
182
|
-
|
|
183
|
+
|
|
183
184
|
response = requests.post(url, headers=headers, json=payload, timeout=20)
|
|
184
185
|
|
|
185
186
|
response.raise_for_status()
|
llumo-0.2.43/llumo/exceptions.py
DELETED
|
@@ -1,68 +0,0 @@
|
|
|
1
|
-
|
|
2
|
-
class LlumoAIError(Exception):
|
|
3
|
-
"""Base class for all Llumo SDK-related errors."""
|
|
4
|
-
|
|
5
|
-
def __init__(self, message):
|
|
6
|
-
self.message = message
|
|
7
|
-
super().__init__(self.message)
|
|
8
|
-
|
|
9
|
-
@staticmethod
|
|
10
|
-
def InvalidApiKey():
|
|
11
|
-
return LlumoAIError("The provided API key is invalid or unauthorized")
|
|
12
|
-
|
|
13
|
-
@staticmethod
|
|
14
|
-
def InvalidApiResponse():
|
|
15
|
-
return LlumoAIError("Invalid or UnexpectedError response from the API")
|
|
16
|
-
|
|
17
|
-
@staticmethod
|
|
18
|
-
def RequestFailed(detail="The request to the API failed"):
|
|
19
|
-
return LlumoAIError(f"Request to the API failed: {detail}")
|
|
20
|
-
|
|
21
|
-
@staticmethod
|
|
22
|
-
def InvalidJsonResponse():
|
|
23
|
-
return LlumoAIError("The API response is not in valid JSON format")
|
|
24
|
-
|
|
25
|
-
@staticmethod
|
|
26
|
-
def UnexpectedError(detail="Metric"):
|
|
27
|
-
return LlumoAIError(f"Can you please check if {detail} is written correctly. If you want to run {detail} please create a custom eval with same name of app.llumo.ai/evallm ")
|
|
28
|
-
|
|
29
|
-
@staticmethod
|
|
30
|
-
def EvalError(detail="Some error occured while processing"):
|
|
31
|
-
return LlumoAIError(f"error: {detail}")
|
|
32
|
-
|
|
33
|
-
@staticmethod
|
|
34
|
-
def InsufficientCredits(details):
|
|
35
|
-
return LlumoAIError(details)
|
|
36
|
-
|
|
37
|
-
# return LlumoAIError("LLumo hits exhausted")
|
|
38
|
-
|
|
39
|
-
@staticmethod
|
|
40
|
-
def InvalidPromptTemplate():
|
|
41
|
-
return LlumoAIError('''Make sure the prompt template fulfills the following criteria:
|
|
42
|
-
1. All the variables should be inside double curly braces. Example: Give answer for the {{query}}, based on given {{context}}.
|
|
43
|
-
2. The variables used in the prompt template must be present in the dataframe columns with the same name..
|
|
44
|
-
''')
|
|
45
|
-
|
|
46
|
-
@staticmethod
|
|
47
|
-
def modelHitsExhausted(details = "Your credits for the selected model exhausted."):
|
|
48
|
-
return LlumoAIError(details)
|
|
49
|
-
|
|
50
|
-
@staticmethod
|
|
51
|
-
def dependencyError(details):
|
|
52
|
-
return LlumoAIError(details)
|
|
53
|
-
|
|
54
|
-
@staticmethod
|
|
55
|
-
def providerError(details):
|
|
56
|
-
return LlumoAIError(details)
|
|
57
|
-
|
|
58
|
-
@staticmethod
|
|
59
|
-
def emptyLogList(details= "List of log object is empty. Ensure your logs have at least 1 log object."):
|
|
60
|
-
return LlumoAIError(details)
|
|
61
|
-
|
|
62
|
-
@staticmethod
|
|
63
|
-
def invalidUserAim(details= ""):
|
|
64
|
-
return LlumoAIError(details)
|
|
65
|
-
|
|
66
|
-
# @staticmethod
|
|
67
|
-
# def dateNotFound():
|
|
68
|
-
# return LlumoAIError("Trial end date or subscription end date not found for the given user.")
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|