llumo 0.2.26__py3-none-any.whl → 0.2.28__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
llumo/callback.py CHANGED
@@ -4,7 +4,7 @@ from langchain_core.messages import BaseMessage
4
4
  from langchain_core.outputs import LLMResult
5
5
  from langchain_core.agents import AgentAction, AgentFinish
6
6
  import json
7
- from llumo.llumoLogger import LLUMOLogger
7
+ from llumo.llumoLogger import LlumoLogger
8
8
  from llumo.llumoSessionContext import LlumoSessionContext
9
9
  import time
10
10
  import re
llumo/client.py CHANGED
@@ -706,6 +706,8 @@ class LlumoClient:
706
706
  getDataFrame: bool = False,
707
707
  _tocheck=True,
708
708
  ):
709
+ if hasattr(self, "startLlumoRun"):
710
+ self.startLlumoRun(runName="evaluateMultiple")
709
711
  if isinstance(data, dict):
710
712
  data = [data]
711
713
  elif not isinstance(data, list):
@@ -968,6 +970,8 @@ class LlumoClient:
968
970
  print(f"Error logging eval step: {e}")
969
971
 
970
972
  self.socket.disconnect()
973
+ if hasattr(self, "endLlumoRun"):
974
+ self.endLlumoRun()
971
975
 
972
976
  if createExperiment:
973
977
  pd.set_option("future.no_silent_downcasting", True)
@@ -91,7 +91,7 @@ class LlumoSessionContext(LlumoClient):
91
91
  ]
92
92
  run["steps"] = clean_steps
93
93
 
94
- print(run["runName"]) # optional debug log
94
+ # print(run["runName"]) # optional debug log
95
95
 
96
96
  # STEP 3: Send the payload
97
97
  url = "https://app.llumo.ai/api/create-debug-log"
@@ -107,7 +107,7 @@ class LlumoSessionContext(LlumoClient):
107
107
  except requests.exceptions.Timeout:
108
108
  print("Request timed out.")
109
109
  except requests.exceptions.RequestException as e:
110
- print(f"Request failed: {e}")
110
+ pass
111
111
 
112
112
  # Cleanup
113
113
  if self.threadLlumoRun:
@@ -120,7 +120,7 @@ class LlumoSessionContext(LlumoClient):
120
120
  stepName: str,
121
121
  metadata: Optional[dict] = None,
122
122
  ):
123
- print(f"logged: {stepType}")
123
+ # print(f"logged: {stepType}")
124
124
  run = getLlumoRun()
125
125
  if run is None:
126
126
  raise RuntimeError("No active run to log steps.")
llumo/openai.py CHANGED
@@ -2,7 +2,7 @@ import time
2
2
  from openai import OpenAI as OpenAIClient
3
3
  from .client import LlumoClient
4
4
  from .llumoSessionContext import LlumoSessionContext
5
- from .llumoLogger import LLUMOLogger
5
+ from .llumoLogger import LlumoLogger
6
6
 
7
7
 
8
8
  # evaluation function that uses LlumoClient
@@ -90,26 +90,26 @@ class OpenAI(OpenAIClient):
90
90
  latency = int((time.time() - start_time) * 1000)
91
91
  # Access the first result object
92
92
  bias_evaluation = bias_evaluation_result[0]
93
- # message = "-".join(
94
- # getattr(bias_evaluation, "edgeCases", {}).get("value", [])
95
- # )
96
- # self.session.logEvalStep(
97
- # stepName=f"EVAL-Input Bias",
98
- # output="",
99
- # context=context,
100
- # query=user_message,
101
- # messageHistory="",
102
- # tools="",
103
- # intermediateSteps="",
104
- # groundTruth="",
105
- # analyticsScore=getattr(bias_evaluation, "analyticsScore", {}),
106
- # reasoning=getattr(bias_evaluation, "reasoning", {}),
107
- # classification=getattr(bias_evaluation, "classification", {}),
108
- # evalLabel=getattr(bias_evaluation, "evalLabel", {}),
109
- # latencyMs=latency,
110
- # status="SUCCESS",
111
- # message=message,
112
- # )
93
+ message = "-".join(
94
+ getattr(bias_evaluation, "edgeCases", {}).get("value", [])
95
+ )
96
+ self.session.logEvalStep(
97
+ stepName=f"EVAL-Input Bias",
98
+ output="",
99
+ context=context,
100
+ query=user_message,
101
+ messageHistory="",
102
+ tools="",
103
+ intermediateSteps="",
104
+ groundTruth="",
105
+ analyticsScore=getattr(bias_evaluation, "analyticsScore", {}),
106
+ reasoning=getattr(bias_evaluation, "reasoning", {}),
107
+ classification=getattr(bias_evaluation, "classification", {}),
108
+ evalLabel=getattr(bias_evaluation, "evalLabel", {}),
109
+ latencyMs=latency,
110
+ status="SUCCESS",
111
+ message=message,
112
+ )
113
113
  except Exception as e:
114
114
  print(f"Input Bias evaluation failed: {e}")
115
115
  self.session.logEvalStep(
@@ -171,30 +171,30 @@ class OpenAI(OpenAIClient):
171
171
  latency = int((time.time() - start_time) * 1000)
172
172
  # Access the first result object
173
173
  correctness_evaluation = correctness_evaluation_result[0]
174
- # message = "-".join(
175
- # getattr(correctness_evaluation, "edgeCases", {}).get("value", [])
176
- # )
177
- # self.session.logEvalStep(
178
- # stepName=f"EVAL-Response Correctness",
179
- # output=output_text,
180
- # context=context,
181
- # query=user_message,
182
- # messageHistory="",
183
- # tools="",
184
- # intermediateSteps="",
185
- # groundTruth="",
186
- # analyticsScore=getattr(
187
- # correctness_evaluation, "analyticsScore", {}
188
- # ),
189
- # reasoning=getattr(correctness_evaluation, "reasoning", {}),
190
- # classification=getattr(
191
- # correctness_evaluation, "classification", {}
192
- # ),
193
- # evalLabel=getattr(correctness_evaluation, "evalLabel", {}),
194
- # latencyMs=latency,
195
- # status="SUCCESS",
196
- # message=message,
197
- # )
174
+ message = "-".join(
175
+ getattr(correctness_evaluation, "edgeCases", {}).get("value", [])
176
+ )
177
+ self.session.logEvalStep(
178
+ stepName=f"EVAL-Response Correctness",
179
+ output=output_text,
180
+ context=context,
181
+ query=user_message,
182
+ messageHistory="",
183
+ tools="",
184
+ intermediateSteps="",
185
+ groundTruth="",
186
+ analyticsScore=getattr(
187
+ correctness_evaluation, "analyticsScore", {}
188
+ ),
189
+ reasoning=getattr(correctness_evaluation, "reasoning", {}),
190
+ classification=getattr(
191
+ correctness_evaluation, "classification", {}
192
+ ),
193
+ evalLabel=getattr(correctness_evaluation, "evalLabel", {}),
194
+ latencyMs=latency,
195
+ status="SUCCESS",
196
+ message=message,
197
+ )
198
198
  except Exception as e:
199
199
  print(f"Response Correctness evaluation failed: {e}")
200
200
  correctness_evaluation = None
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: llumo
3
- Version: 0.2.26
3
+ Version: 0.2.28
4
4
  Summary: Python SDK for interacting with the Llumo ai API.
5
5
  Home-page: https://www.llumo.ai/
6
6
  Author: Llumo
@@ -1,20 +1,20 @@
1
1
  llumo/__init__.py,sha256=kkuppu7ZPiVZFdnYzJ9BM3syMbYHOSZLpwKwAvGHsnY,311
2
- llumo/callback.py,sha256=Pzg9Smqsu5G900YZjoFwqMY0TTP4jUizxllaP0TjKgk,20439
2
+ llumo/callback.py,sha256=dOsQ35Ro6IVec3TiJfkPx3H9PQtk8oWfJA1skFENTIM,20439
3
3
  llumo/callbacks-0.py,sha256=TEIOCWRvk2UYsTmBMBsnlgpqWvr-2y3a6d0w_e96NRM,8958
4
4
  llumo/chains.py,sha256=6lCgLseh04RUgc6SahhmvQj82quay2Mi1j8gPUlx8Es,2923
5
- llumo/client.py,sha256=14swva7RlXsoldlMpiveUEG45MViDsXimKrnRJrT4m8,71408
5
+ llumo/client.py,sha256=keYx0GToNB-FXmGncXd0MOwwCGjxsIoDbOhTx2rCcMQ,71582
6
6
  llumo/exceptions.py,sha256=1OyhN9YL9LcyUPUsqYHq6Rret0udATZAwMVJaio2_Ec,2123
7
7
  llumo/execution.py,sha256=nWbJ7AvWuUPcOb6i-JzKRna_PvF-ewZTiK8skS-5n3w,1380
8
8
  llumo/functionCalling.py,sha256=D5jYapu1rIvdIJNUYPYMTyhQ1H-6nkwoOLMi6eekfUE,7241
9
9
  llumo/google.py,sha256=3S_aRtbtlctCXPGR0u4baLlkyFrsjd02vlUCkoRPA5U,2147
10
10
  llumo/helpingFuntions.py,sha256=B6FwUQ5f1v4FKrWCbYoGWMFdscOV_liuuhTgNQ3cdrk,27275
11
11
  llumo/llumoLogger.py,sha256=grdjhu6Ngxg7nhnrMOP5Pd5ALR7U2ROws48yhf_N7y0,1912
12
- llumo/llumoSessionContext.py,sha256=v1OPJFYWe5-mLLUohX5qY7dlzgwmxpuRZ0rDsXEv6f4,10506
12
+ llumo/llumoSessionContext.py,sha256=iJDeC3FG6dID9jdaDK5d6hIlXWuhWx4KQy-Nnty4wYg,10485
13
13
  llumo/models.py,sha256=aVEZsOOoQx5LeNtwSyBxqvrINq0izH3QWu_YjsMPE6o,2910
14
- llumo/openai.py,sha256=QyNMXiYTppaU_YjU6vU5UB0At3OiNntoDTQ0dszLN0g,8538
14
+ llumo/openai.py,sha256=c0pZ-yzm6LfUAbfVmOiVpY9pS5sAWZRb8_jAj0ir910,8450
15
15
  llumo/sockets.py,sha256=pBDo-U65hMIMwKMwZQl3iBkEjISEt-9BkXxZTWfSHF4,6116
16
- llumo-0.2.26.dist-info/licenses/LICENSE,sha256=tF9yAcfPV9xGT3ViWmC8hPvOo8BEk4ZICbUfcEo8Dlk,182
17
- llumo-0.2.26.dist-info/METADATA,sha256=pn5AHWPNuRewlkYfFOjxDtU1o0xBOQlJOwGC3VKCNlk,1558
18
- llumo-0.2.26.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
19
- llumo-0.2.26.dist-info/top_level.txt,sha256=d5zUTMI99llPtLRB8rtSrqELm_bOqX-bNC5IcwlDk88,6
20
- llumo-0.2.26.dist-info/RECORD,,
16
+ llumo-0.2.28.dist-info/licenses/LICENSE,sha256=tF9yAcfPV9xGT3ViWmC8hPvOo8BEk4ZICbUfcEo8Dlk,182
17
+ llumo-0.2.28.dist-info/METADATA,sha256=WYFBghjzGVN4xRdMjqsg3QSKTw5np1qzBoy5tIbcTvk,1558
18
+ llumo-0.2.28.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
19
+ llumo-0.2.28.dist-info/top_level.txt,sha256=d5zUTMI99llPtLRB8rtSrqELm_bOqX-bNC5IcwlDk88,6
20
+ llumo-0.2.28.dist-info/RECORD,,
File without changes