llumo 0.2.28__py3-none-any.whl → 0.2.30__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -51,8 +51,14 @@ class LlumoSessionContext(LlumoClient):
51
51
  def __exit__(self, excType, excVal, excTb):
52
52
  self.end()
53
53
 
54
- def startLlumoRun(self, runName: str):
54
+ def startLlumoRun(self, runName: str,rowID= "",columnID = ""):
55
55
  LlumoRunID = str(uuid.uuid4().hex[:16])
56
+
57
+ # if rowID =="":
58
+ # rowID = str(uuid.uuid4().hex[:16])
59
+ # if columnID == "":
60
+ # columnID = str(uuid.uuid4().hex[:16])
61
+
56
62
  currentTime = datetime(2025, 8, 2, 10, 20, 15, tzinfo=timezone.utc)
57
63
  createdAt = currentTime.strftime("%Y-%m-%dT%H:%M:%S.000Z")
58
64
  llumoRun = {
@@ -62,8 +68,8 @@ class LlumoSessionContext(LlumoClient):
62
68
  "playgroundID": self.logger.getPlaygroundID(),
63
69
  "workspaceID": self.logger.getWorkspaceID(),
64
70
  "source": "SDK",
65
- "rowID": "",
66
- "columnID": "",
71
+ "rowID": rowID,
72
+ "columnID": columnID,
67
73
  "email": self.logger.getUserEmailID(),
68
74
  "createdAt": createdAt,
69
75
  "createdBy": self.logger.getUserEmailID(),
@@ -96,12 +102,52 @@ class LlumoSessionContext(LlumoClient):
96
102
  # STEP 3: Send the payload
97
103
  url = "https://app.llumo.ai/api/create-debug-log"
98
104
  headers = {
99
- "Authorization": f"Bearer {self.apiKey}",
105
+ "Authorization": f"Bearer {self.logger.getWorkspaceID()}",
100
106
  "Content-Type": "application/json",
101
107
  }
102
108
 
103
109
  try:
104
- response = requests.post(url, headers=headers, json=run, timeout=10)
110
+ # print(run)
111
+ response = requests.post(url, headers=headers, json=run, timeout=20)
112
+ response.raise_for_status()
113
+ # print(response.json())
114
+ except requests.exceptions.Timeout:
115
+ # print("Request timed out.")
116
+ pass
117
+ except requests.exceptions.RequestException as e:
118
+ pass
119
+
120
+ # Cleanup
121
+ if self.threadLlumoRun:
122
+ _ctxLlumoRun.reset(self.threadLlumoRun)
123
+ self.threadLlumoRun = None
124
+
125
+ def endEvalRun(self):
126
+ run = getLlumoRun()
127
+ if run is None:
128
+ return
129
+
130
+ # STEP 1: Sort steps by timestamp
131
+ steps = run.get("steps", [])
132
+ # sorted_steps = sorted(steps, key=lambda s: s.get("timestamp", 0))
133
+
134
+ # # STEP 2: Remove timestamp from each step before sending
135
+ # clean_steps = [
136
+ # {k: v for k, v in step.items() if k != "timestamp"} for step in sorted_steps
137
+ # ]
138
+ # run["steps"] = clean_steps
139
+
140
+ # print(run["runName"]) # optional debug log
141
+
142
+ # STEP 3: Send the payload
143
+ url = "https://backend-api.llumo.ai/api/v1/create-debug-log-for-sdk"
144
+ headers = {
145
+ "Authorization": f"Bearer {self.logger.getWorkspaceID()}",
146
+ "Content-Type": "application/json",
147
+ }
148
+ # print(run)
149
+ try:
150
+ response = requests.post(url, headers=headers, json={"log":run}, timeout=20)
105
151
  response.raise_for_status()
106
152
  # print(response.json())
107
153
  except requests.exceptions.Timeout:
@@ -145,51 +191,52 @@ class LlumoSessionContext(LlumoClient):
145
191
  provider: str,
146
192
  inputTokens: int,
147
193
  outputTokens: int,
148
- temperature: float,
149
- promptTruncated: bool,
194
+ # temperature: float,
195
+ # promptTruncated: bool,
150
196
  latencyMs: int,
151
- query: str,
197
+ prompt: str,
152
198
  output: str,
153
199
  status: str,
154
- message: str,
200
+ # message: str,
155
201
  ):
156
202
  metadata = {
157
203
  "model": model,
158
204
  "provider": provider,
159
205
  "inputTokens": inputTokens,
160
206
  "outputTokens": outputTokens,
161
- "temperature": temperature,
162
- "promptTruncated": promptTruncated,
207
+ # "temperature": temperature,
208
+ # "promptTruncated": promptTruncated,
163
209
  "latencyMs": latencyMs,
164
- "query": query,
210
+ "prompt": prompt,
165
211
  "output": output,
166
212
  "status": status,
167
- "message": message,
213
+ # "message": message,
168
214
  }
215
+
169
216
  self.logStep("LLM", stepName, metadata)
170
217
 
171
218
  def logRetrieverStep(
172
219
  self,
173
220
  stepName: str,
174
221
  retrieverSource: str,
175
- queryVectorType: str,
176
222
  topK: int,
177
- matchedIDs: List[str],
178
- query: str,
223
+ chunkSize,
224
+ context : str,
225
+ searchQuery: str,
179
226
  latencyMs: int,
180
- status: str,
181
- message: str,
227
+ status: str
182
228
  ):
183
229
  metadata = {
184
230
  "retrieverSource": retrieverSource,
185
- "queryVectorType": queryVectorType,
186
231
  "topK": topK,
187
- "matchedIDs": matchedIDs,
188
- "query": query,
232
+ "chunkSize":chunkSize,
233
+ "context": context,
234
+ "searchQuery": searchQuery,
189
235
  "latencyMs": latencyMs,
190
236
  "status": status,
191
- "message": message,
237
+ # "message": message,
192
238
  }
239
+
193
240
  self.logStep("RETRIEVER", stepName, metadata)
194
241
 
195
242
  def logAgentStep(
@@ -201,7 +248,7 @@ class LlumoSessionContext(LlumoClient):
201
248
  tools: List[str],
202
249
  query: str,
203
250
  status: str,
204
- message: str,
251
+ # message: str,
205
252
  ):
206
253
  metadata = {
207
254
  "agentType": agentType,
@@ -210,8 +257,8 @@ class LlumoSessionContext(LlumoClient):
210
257
  "tools": tools,
211
258
  "query": query,
212
259
  "status": status,
213
- "message": message,
214
- }
260
+ # "message": message,
261
+ }
215
262
  self.logStep("AGENT", stepName, metadata)
216
263
 
217
264
  def logToolSelectorStep(
@@ -222,7 +269,7 @@ class LlumoSessionContext(LlumoClient):
222
269
  selectedTool: str,
223
270
  reasoning: str,
224
271
  status: str,
225
- message: str,
272
+ # message: str,
226
273
  ):
227
274
  metadata = {
228
275
  "selectorType": selectorType,
@@ -230,7 +277,7 @@ class LlumoSessionContext(LlumoClient):
230
277
  "selectedTool": selectedTool,
231
278
  "reasoning": reasoning,
232
279
  "status": status,
233
- "message": message,
280
+ # "message": message,
234
281
  }
235
282
  self.logStep("TOOL_SELECTOR", stepName, metadata)
236
283
 
@@ -238,19 +285,21 @@ class LlumoSessionContext(LlumoClient):
238
285
  self,
239
286
  stepName: str,
240
287
  toolName: str,
288
+ description: str,
241
289
  input: Dict[str, Any],
242
290
  output: str,
243
291
  latencyMs: int,
244
292
  status: str,
245
- message: str,
293
+ # message: str,
246
294
  ):
247
295
  metadata = {
248
296
  "toolName": toolName,
297
+ "description":description,
249
298
  "input": input,
250
299
  "output": output,
251
300
  "latencyMs": latencyMs,
252
301
  "status": status,
253
- "message": message,
302
+ # "message": message,
254
303
  }
255
304
  self.logStep("TOOL", stepName, metadata)
256
305
 
@@ -364,3 +413,16 @@ class LlumoSessionContext(LlumoClient):
364
413
  "message": message,
365
414
  }
366
415
  self.logStep("CUSTOM_SCRIPT", stepName, metadata)
416
+
417
+
418
+ def logQueryStep(self,stepName,model,provider,inputTokens,query,status):
419
+ metadata = {
420
+ "model": model,
421
+ "provider": provider,
422
+ "inputTokens": inputTokens,
423
+ "query": query,
424
+ "status":status
425
+ }
426
+ self.logStep("QUERY", stepName, metadata)
427
+
428
+
llumo/openai.py CHANGED
@@ -12,8 +12,6 @@ def performEvaluation(data, api_key=None, evals=["Response Correctness"], **kwar
12
12
  results = client.evaluateMultiple(
13
13
  data,
14
14
  evals=evals,
15
- createExperiment=kwargs.get("createExperiment", False),
16
- playgroundID=kwargs.get("playgroundID"),
17
15
  prompt_template="Give answer to the query: {{query}}, using context: {{context}}",
18
16
  getDataFrame=False,
19
17
  )
@@ -25,9 +23,13 @@ def performEvaluation(data, api_key=None, evals=["Response Correctness"], **kwar
25
23
 
26
24
  # Wrapper around ChatCompletion to allow custom fields like `.evaluation`
27
25
  class ChatCompletionWithEval:
28
- def __init__(self, response, evaluation):
26
+ def __init__(
27
+ self,
28
+ response,
29
+ # , evaluation
30
+ ):
29
31
  self._response = response
30
- self.evaluation = evaluation
32
+ # self.evaluation = evaluation
31
33
 
32
34
  def __getattr__(self, name):
33
35
  return getattr(self._response, name)
@@ -70,70 +72,77 @@ class OpenAI(OpenAIClient):
70
72
  workspace_id = self.session.logger.getWorkspaceID()
71
73
 
72
74
  # Input Bias Evaluation
73
- eval_input_bias = [
74
- {
75
- "query": user_message,
76
- "context": context,
77
- "output": "", # No output yet
78
- }
79
- ]
80
- try:
81
- start_time = time.time()
82
- bias_evaluation_result = performEvaluation(
83
- eval_input_bias,
84
- api_key=self.llumo_key,
85
- evals=["Input Bias"],
86
- playgroundID=playground_id,
87
- workspaceID=workspace_id,
88
- createExperiment=create_experiment,
89
- )
90
- latency = int((time.time() - start_time) * 1000)
91
- # Access the first result object
92
- bias_evaluation = bias_evaluation_result[0]
93
- message = "-".join(
94
- getattr(bias_evaluation, "edgeCases", {}).get("value", [])
95
- )
96
- self.session.logEvalStep(
97
- stepName=f"EVAL-Input Bias",
98
- output="",
99
- context=context,
100
- query=user_message,
101
- messageHistory="",
102
- tools="",
103
- intermediateSteps="",
104
- groundTruth="",
105
- analyticsScore=getattr(bias_evaluation, "analyticsScore", {}),
106
- reasoning=getattr(bias_evaluation, "reasoning", {}),
107
- classification=getattr(bias_evaluation, "classification", {}),
108
- evalLabel=getattr(bias_evaluation, "evalLabel", {}),
109
- latencyMs=latency,
110
- status="SUCCESS",
111
- message=message,
112
- )
113
- except Exception as e:
114
- print(f"Input Bias evaluation failed: {e}")
115
- self.session.logEvalStep(
116
- stepName=f"EVAL-FAILURE",
117
- output="",
118
- context=context,
119
- query=user_message,
120
- messageHistory="",
121
- tools="",
122
- intermediateSteps="",
123
- groundTruth="",
124
- analyticsScore={},
125
- reasoning={},
126
- classification={},
127
- evalLabel={},
128
- latencyMs=0,
129
- status="FAILURE",
130
- message="EVAL_ERROR",
131
- )
75
+ # eval_input_bias = [
76
+ # {
77
+ # "query": user_message,
78
+ # "context": context,
79
+ # "output": "", # No output yet
80
+ # }
81
+ # ]
82
+ # try:
83
+ # start_time = time.time()
84
+ # bias_evaluation_result = performEvaluation(
85
+ # eval_input_bias,
86
+ # api_key=self.llumo_key,
87
+ # evals=["Input Bias"],
88
+ # playgroundID=playground_id,
89
+ # workspaceID=workspace_id,
90
+ # createExperiment=create_experiment,
91
+ # )
92
+ # latency = int((time.time() - start_time) * 1000)
93
+ # # Access the first result object
94
+ # bias_evaluation = bias_evaluation_result[0]
95
+ # message = "-".join(
96
+ # getattr(bias_evaluation, "edgeCases", {}).get("value", [])
97
+ # )
98
+ # self.session.logEvalStep(
99
+ # stepName=f"EVAL-Input Bias",
100
+ # output="",
101
+ # context=context,
102
+ # query=user_message,
103
+ # messageHistory="",
104
+ # tools="",
105
+ # intermediateSteps="",
106
+ # groundTruth="",
107
+ # analyticsScore=getattr(bias_evaluation, "analyticsScore", {}),
108
+ # reasoning=getattr(bias_evaluation, "reasoning", {}),
109
+ # classification=getattr(bias_evaluation, "classification", {}),
110
+ # evalLabel=getattr(bias_evaluation, "evalLabel", {}),
111
+ # latencyMs=latency,
112
+ # status="SUCCESS",
113
+ # message=message,
114
+ # )
115
+ # except Exception as e:
116
+ # print(f"Input Bias evaluation failed: {e}")
117
+ # self.session.logEvalStep(
118
+ # stepName=f"EVAL-FAILURE",
119
+ # output="",
120
+ # context=context,
121
+ # query=user_message,
122
+ # messageHistory="",
123
+ # tools="",
124
+ # intermediateSteps="",
125
+ # groundTruth="",
126
+ # analyticsScore={},
127
+ # reasoning={},
128
+ # classification={},
129
+ # evalLabel={},
130
+ # latencyMs=0,
131
+ # status="FAILURE",
132
+ # message="EVAL_ERROR",
133
+ # )
132
134
 
133
135
  start_time = time.time()
134
136
  response = original_create(*args, **kwargs)
135
137
  latency = int((time.time() - start_time) * 1000)
136
138
  output_text = response.choices[0].message.content
139
+ self.session.logQueryStep(
140
+ stepName="Query Invocation",
141
+ model=model,
142
+ provider="openai",
143
+ inputTokens=response.usage.prompt_tokens,
144
+ query=user_message,
145
+ status = "SUCCESS")
137
146
 
138
147
  self.session.logLlmStep(
139
148
  stepName=f"LLM-{user_message[:30]}",
@@ -141,84 +150,87 @@ class OpenAI(OpenAIClient):
141
150
  provider="openai",
142
151
  inputTokens=response.usage.prompt_tokens,
143
152
  outputTokens=response.usage.completion_tokens,
144
- temperature=kwargs.get("temperature", 0.0),
145
- promptTruncated=False,
153
+ # temperature=kwargs.get("temperature", 0.0),
154
+ # promptTruncated=False,
146
155
  latencyMs=latency,
147
- query=user_message,
156
+ prompt=user_message,
148
157
  output=output_text,
149
158
  status="SUCCESS",
150
- message="",
159
+ # message="",
151
160
  )
152
161
 
153
162
  # Response Correctness Evaluation
154
- eval_input_correctness = [
155
- {
156
- "query": user_message,
157
- "context": context,
158
- "output": output_text,
159
- }
160
- ]
161
- try:
162
- start_time = time.time()
163
- correctness_evaluation_result = performEvaluation(
164
- eval_input_correctness,
165
- api_key=self.llumo_key,
166
- evals=["Response Correctness"],
167
- playgroundID=playground_id,
168
- workspaceID=workspace_id,
169
- createExperiment=create_experiment,
170
- )
171
- latency = int((time.time() - start_time) * 1000)
172
- # Access the first result object
173
- correctness_evaluation = correctness_evaluation_result[0]
174
- message = "-".join(
175
- getattr(correctness_evaluation, "edgeCases", {}).get("value", [])
176
- )
177
- self.session.logEvalStep(
178
- stepName=f"EVAL-Response Correctness",
179
- output=output_text,
180
- context=context,
181
- query=user_message,
182
- messageHistory="",
183
- tools="",
184
- intermediateSteps="",
185
- groundTruth="",
186
- analyticsScore=getattr(
187
- correctness_evaluation, "analyticsScore", {}
188
- ),
189
- reasoning=getattr(correctness_evaluation, "reasoning", {}),
190
- classification=getattr(
191
- correctness_evaluation, "classification", {}
192
- ),
193
- evalLabel=getattr(correctness_evaluation, "evalLabel", {}),
194
- latencyMs=latency,
195
- status="SUCCESS",
196
- message=message,
197
- )
198
- except Exception as e:
199
- print(f"Response Correctness evaluation failed: {e}")
200
- correctness_evaluation = None
201
- self.session.logEvalStep(
202
- stepName=f"EVAL-FAILURE",
203
- output=output_text,
204
- context=context,
205
- query=user_message,
206
- messageHistory="",
207
- tools="",
208
- intermediateSteps="",
209
- groundTruth="",
210
- analyticsScore={},
211
- reasoning={},
212
- classification={},
213
- evalLabel={},
214
- latencyMs=0,
215
- status="FAILURE",
216
- message="EVAL_ERROR",
217
- )
218
-
219
- if correctness_evaluation is None:
220
- return response
221
-
222
- return ChatCompletionWithEval(response, correctness_evaluation)
163
+ # eval_input_correctness = [
164
+ # {
165
+ # "query": user_message,
166
+ # "context": context,
167
+ # "output": output_text,
168
+ # }
169
+ # ]
170
+ # try:
171
+ # start_time = time.time()
172
+ # correctness_evaluation_result = performEvaluation(
173
+ # eval_input_correctness,
174
+ # api_key=self.llumo_key,
175
+ # evals=["Response Correctness"],
176
+ # playgroundID=playground_id,
177
+ # workspaceID=workspace_id,
178
+ # createExperiment=create_experiment,
179
+ # )
180
+ # latency = int((time.time() - start_time) * 1000)
181
+ # # Access the first result object
182
+ # correctness_evaluation = correctness_evaluation_result[0]
183
+ # message = "-".join(
184
+ # getattr(correctness_evaluation, "edgeCases", {}).get("value", [])
185
+ # )
186
+ # self.session.logEvalStep(
187
+ # stepName=f"EVAL-Response Correctness",
188
+ # output=output_text,
189
+ # context=context,
190
+ # query=user_message,
191
+ # messageHistory="",
192
+ # tools="",
193
+ # intermediateSteps="",
194
+ # groundTruth="",
195
+ # analyticsScore=getattr(
196
+ # correctness_evaluation, "analyticsScore", {}
197
+ # ),
198
+ # reasoning=getattr(correctness_evaluation, "reasoning", {}),
199
+ # classification=getattr(
200
+ # correctness_evaluation, "classification", {}
201
+ # ),
202
+ # evalLabel=getattr(correctness_evaluation, "evalLabel", {}),
203
+ # latencyMs=latency,
204
+ # status="SUCCESS",
205
+ # message=message,
206
+ # )
207
+ # except Exception as e:
208
+ # print(f"Response Correctness evaluation failed: {e}")
209
+ # correctness_evaluation = None
210
+ # self.session.logEvalStep(
211
+ # stepName=f"EVAL-FAILURE",
212
+ # output=output_text,
213
+ # context=context,
214
+ # query=user_message,
215
+ # messageHistory="",
216
+ # tools="",
217
+ # intermediateSteps="",
218
+ # groundTruth="",
219
+ # analyticsScore={},
220
+ # reasoning={},
221
+ # classification={},
222
+ # evalLabel={},
223
+ # latencyMs=0,
224
+ # status="FAILURE",
225
+ # message="EVAL_ERROR",
226
+ # )
227
+
228
+ # if correctness_evaluation is None:
229
+ # return response
230
+
231
+ return ChatCompletionWithEval(
232
+ response
233
+ # , correctness_evaluation
234
+ )
223
235
 
224
236
  self.chat.completions.create = create_wrapper
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: llumo
3
- Version: 0.2.28
3
+ Version: 0.2.30
4
4
  Summary: Python SDK for interacting with the Llumo ai API.
5
5
  Home-page: https://www.llumo.ai/
6
6
  Author: Llumo
@@ -0,0 +1,20 @@
1
+ llumo/__init__.py,sha256=kkuppu7ZPiVZFdnYzJ9BM3syMbYHOSZLpwKwAvGHsnY,311
2
+ llumo/callback.py,sha256=K8O_bXgeIOXr5gougWF2Y7wnXTf7c3gSEvJZFiVoCmA,23829
3
+ llumo/callbacks-0.py,sha256=TEIOCWRvk2UYsTmBMBsnlgpqWvr-2y3a6d0w_e96NRM,8958
4
+ llumo/chains.py,sha256=6lCgLseh04RUgc6SahhmvQj82quay2Mi1j8gPUlx8Es,2923
5
+ llumo/client.py,sha256=_icK1unxsPdKiLboLlRcrpvfO6IuaZeoS-n3j5eZIX8,70772
6
+ llumo/exceptions.py,sha256=1OyhN9YL9LcyUPUsqYHq6Rret0udATZAwMVJaio2_Ec,2123
7
+ llumo/execution.py,sha256=nWbJ7AvWuUPcOb6i-JzKRna_PvF-ewZTiK8skS-5n3w,1380
8
+ llumo/functionCalling.py,sha256=D5jYapu1rIvdIJNUYPYMTyhQ1H-6nkwoOLMi6eekfUE,7241
9
+ llumo/google.py,sha256=6y9YnDFDRHv6-sQNT5LIsV9p31BCN0B9eow5KTRBWfM,2185
10
+ llumo/helpingFuntions.py,sha256=RSXMA5XLP6qiMsJW7axp1PZj8RFwY9HkkbG7nwJkbIM,27279
11
+ llumo/llumoLogger.py,sha256=grdjhu6Ngxg7nhnrMOP5Pd5ALR7U2ROws48yhf_N7y0,1912
12
+ llumo/llumoSessionContext.py,sha256=eBlVRDeRmJ8BzfrHNofEiy8DO_TZCwzLo06LlOBcyho,12417
13
+ llumo/models.py,sha256=aVEZsOOoQx5LeNtwSyBxqvrINq0izH3QWu_YjsMPE6o,2910
14
+ llumo/openai.py,sha256=VstBzaORe8Tq0feUIIEszzcN1oq6TJfkPviaCr5d3Bw,8950
15
+ llumo/sockets.py,sha256=pBDo-U65hMIMwKMwZQl3iBkEjISEt-9BkXxZTWfSHF4,6116
16
+ llumo-0.2.30.dist-info/licenses/LICENSE,sha256=tF9yAcfPV9xGT3ViWmC8hPvOo8BEk4ZICbUfcEo8Dlk,182
17
+ llumo-0.2.30.dist-info/METADATA,sha256=VstCu1Qq9l_1qrerrHmTt1KFTByH62V1RK2c1XzB78g,1558
18
+ llumo-0.2.30.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
19
+ llumo-0.2.30.dist-info/top_level.txt,sha256=d5zUTMI99llPtLRB8rtSrqELm_bOqX-bNC5IcwlDk88,6
20
+ llumo-0.2.30.dist-info/RECORD,,
@@ -1,20 +0,0 @@
1
- llumo/__init__.py,sha256=kkuppu7ZPiVZFdnYzJ9BM3syMbYHOSZLpwKwAvGHsnY,311
2
- llumo/callback.py,sha256=dOsQ35Ro6IVec3TiJfkPx3H9PQtk8oWfJA1skFENTIM,20439
3
- llumo/callbacks-0.py,sha256=TEIOCWRvk2UYsTmBMBsnlgpqWvr-2y3a6d0w_e96NRM,8958
4
- llumo/chains.py,sha256=6lCgLseh04RUgc6SahhmvQj82quay2Mi1j8gPUlx8Es,2923
5
- llumo/client.py,sha256=keYx0GToNB-FXmGncXd0MOwwCGjxsIoDbOhTx2rCcMQ,71582
6
- llumo/exceptions.py,sha256=1OyhN9YL9LcyUPUsqYHq6Rret0udATZAwMVJaio2_Ec,2123
7
- llumo/execution.py,sha256=nWbJ7AvWuUPcOb6i-JzKRna_PvF-ewZTiK8skS-5n3w,1380
8
- llumo/functionCalling.py,sha256=D5jYapu1rIvdIJNUYPYMTyhQ1H-6nkwoOLMi6eekfUE,7241
9
- llumo/google.py,sha256=3S_aRtbtlctCXPGR0u4baLlkyFrsjd02vlUCkoRPA5U,2147
10
- llumo/helpingFuntions.py,sha256=B6FwUQ5f1v4FKrWCbYoGWMFdscOV_liuuhTgNQ3cdrk,27275
11
- llumo/llumoLogger.py,sha256=grdjhu6Ngxg7nhnrMOP5Pd5ALR7U2ROws48yhf_N7y0,1912
12
- llumo/llumoSessionContext.py,sha256=iJDeC3FG6dID9jdaDK5d6hIlXWuhWx4KQy-Nnty4wYg,10485
13
- llumo/models.py,sha256=aVEZsOOoQx5LeNtwSyBxqvrINq0izH3QWu_YjsMPE6o,2910
14
- llumo/openai.py,sha256=c0pZ-yzm6LfUAbfVmOiVpY9pS5sAWZRb8_jAj0ir910,8450
15
- llumo/sockets.py,sha256=pBDo-U65hMIMwKMwZQl3iBkEjISEt-9BkXxZTWfSHF4,6116
16
- llumo-0.2.28.dist-info/licenses/LICENSE,sha256=tF9yAcfPV9xGT3ViWmC8hPvOo8BEk4ZICbUfcEo8Dlk,182
17
- llumo-0.2.28.dist-info/METADATA,sha256=WYFBghjzGVN4xRdMjqsg3QSKTw5np1qzBoy5tIbcTvk,1558
18
- llumo-0.2.28.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
19
- llumo-0.2.28.dist-info/top_level.txt,sha256=d5zUTMI99llPtLRB8rtSrqELm_bOqX-bNC5IcwlDk88,6
20
- llumo-0.2.28.dist-info/RECORD,,
File without changes