llumo 0.1.9__py3-none-any.whl → 0.1.9b10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llumo/__init__.py +7 -7
- llumo/client.py +782 -561
- llumo/exceptions.py +51 -45
- llumo/execution.py +38 -38
- llumo/functionCalling.py +190 -190
- llumo/helpingFuntions.py +137 -50
- llumo/models.py +42 -42
- llumo/sockets.py +148 -148
- llumo-0.1.9b10.dist-info/METADATA +17 -0
- llumo-0.1.9b10.dist-info/RECORD +13 -0
- {llumo-0.1.9.dist-info → llumo-0.1.9b10.dist-info}/WHEEL +1 -1
- {llumo-0.1.9.dist-info → llumo-0.1.9b10.dist-info}/licenses/LICENSE +4 -4
- llumo/.env +0 -6
- llumo-0.1.9.dist-info/METADATA +0 -26
- llumo-0.1.9.dist-info/RECORD +0 -14
- {llumo-0.1.9.dist-info → llumo-0.1.9b10.dist-info}/top_level.txt +0 -0
llumo/client.py
CHANGED
@@ -1,561 +1,782 @@
|
|
1
|
-
import requests
|
2
|
-
from .
|
3
|
-
|
4
|
-
import
|
5
|
-
import
|
6
|
-
import
|
7
|
-
import
|
8
|
-
|
9
|
-
|
10
|
-
import
|
11
|
-
import
|
12
|
-
import
|
13
|
-
|
14
|
-
from
|
15
|
-
from .
|
16
|
-
from .
|
17
|
-
from .
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
self.
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
# print(
|
54
|
-
|
55
|
-
#
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
raise LlumoAIError.
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
#
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
self.
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
# print(f"
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
"
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
"
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
|
214
|
-
|
215
|
-
|
216
|
-
|
217
|
-
|
218
|
-
|
219
|
-
|
220
|
-
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
|
225
|
-
|
226
|
-
|
227
|
-
|
228
|
-
|
229
|
-
|
230
|
-
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
|
235
|
-
|
236
|
-
|
237
|
-
}
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
|
246
|
-
|
247
|
-
|
248
|
-
|
249
|
-
|
250
|
-
|
251
|
-
|
252
|
-
|
253
|
-
|
254
|
-
|
255
|
-
|
256
|
-
|
257
|
-
|
258
|
-
|
259
|
-
|
260
|
-
|
261
|
-
|
262
|
-
|
263
|
-
|
264
|
-
|
265
|
-
|
266
|
-
|
267
|
-
|
268
|
-
|
269
|
-
|
270
|
-
|
271
|
-
|
272
|
-
|
273
|
-
|
274
|
-
|
275
|
-
|
276
|
-
|
277
|
-
|
278
|
-
|
279
|
-
|
280
|
-
|
281
|
-
|
282
|
-
|
283
|
-
|
284
|
-
|
285
|
-
|
286
|
-
|
287
|
-
|
288
|
-
|
289
|
-
|
290
|
-
|
291
|
-
|
292
|
-
|
293
|
-
|
294
|
-
|
295
|
-
|
296
|
-
|
297
|
-
|
298
|
-
|
299
|
-
|
300
|
-
|
301
|
-
|
302
|
-
|
303
|
-
|
304
|
-
|
305
|
-
|
306
|
-
|
307
|
-
|
308
|
-
|
309
|
-
|
310
|
-
|
311
|
-
|
312
|
-
|
313
|
-
|
314
|
-
|
315
|
-
|
316
|
-
|
317
|
-
|
318
|
-
|
319
|
-
|
320
|
-
|
321
|
-
|
322
|
-
|
323
|
-
|
324
|
-
|
325
|
-
|
326
|
-
|
327
|
-
|
328
|
-
|
329
|
-
|
330
|
-
|
331
|
-
|
332
|
-
|
333
|
-
|
334
|
-
|
335
|
-
|
336
|
-
|
337
|
-
|
338
|
-
|
339
|
-
|
340
|
-
|
341
|
-
|
342
|
-
|
343
|
-
|
344
|
-
|
345
|
-
|
346
|
-
|
347
|
-
|
348
|
-
|
349
|
-
|
350
|
-
|
351
|
-
|
352
|
-
|
353
|
-
|
354
|
-
|
355
|
-
|
356
|
-
|
357
|
-
|
358
|
-
|
359
|
-
|
360
|
-
|
361
|
-
|
362
|
-
|
363
|
-
|
364
|
-
|
365
|
-
|
366
|
-
|
367
|
-
|
368
|
-
|
369
|
-
|
370
|
-
|
371
|
-
|
372
|
-
|
373
|
-
|
374
|
-
|
375
|
-
|
376
|
-
|
377
|
-
|
378
|
-
|
379
|
-
|
380
|
-
"
|
381
|
-
"
|
382
|
-
|
383
|
-
|
384
|
-
|
385
|
-
|
386
|
-
|
387
|
-
|
388
|
-
|
389
|
-
|
390
|
-
|
391
|
-
|
392
|
-
|
393
|
-
|
394
|
-
|
395
|
-
|
396
|
-
|
397
|
-
|
398
|
-
|
399
|
-
|
400
|
-
|
401
|
-
|
402
|
-
|
403
|
-
|
404
|
-
|
405
|
-
|
406
|
-
|
407
|
-
|
408
|
-
|
409
|
-
}
|
410
|
-
|
411
|
-
|
412
|
-
|
413
|
-
|
414
|
-
|
415
|
-
|
416
|
-
|
417
|
-
|
418
|
-
|
419
|
-
|
420
|
-
|
421
|
-
|
422
|
-
|
423
|
-
|
424
|
-
|
425
|
-
|
426
|
-
|
427
|
-
|
428
|
-
|
429
|
-
|
430
|
-
|
431
|
-
|
432
|
-
|
433
|
-
|
434
|
-
|
435
|
-
|
436
|
-
|
437
|
-
|
438
|
-
|
439
|
-
|
440
|
-
|
441
|
-
|
442
|
-
|
443
|
-
|
444
|
-
|
445
|
-
|
446
|
-
|
447
|
-
|
448
|
-
|
449
|
-
|
450
|
-
|
451
|
-
|
452
|
-
|
453
|
-
|
454
|
-
|
455
|
-
|
456
|
-
|
457
|
-
|
458
|
-
|
459
|
-
|
460
|
-
|
461
|
-
|
462
|
-
|
463
|
-
|
464
|
-
|
465
|
-
|
466
|
-
|
467
|
-
|
468
|
-
|
469
|
-
|
470
|
-
|
471
|
-
|
472
|
-
|
473
|
-
|
474
|
-
|
475
|
-
|
476
|
-
|
477
|
-
|
478
|
-
|
479
|
-
|
480
|
-
|
481
|
-
|
482
|
-
|
483
|
-
|
484
|
-
|
485
|
-
|
486
|
-
|
487
|
-
|
488
|
-
|
489
|
-
print(
|
490
|
-
|
491
|
-
|
492
|
-
|
493
|
-
|
494
|
-
|
495
|
-
|
496
|
-
|
497
|
-
|
498
|
-
|
499
|
-
|
500
|
-
|
501
|
-
|
502
|
-
|
503
|
-
|
504
|
-
|
505
|
-
|
506
|
-
|
507
|
-
|
508
|
-
|
509
|
-
|
510
|
-
|
511
|
-
|
512
|
-
|
513
|
-
|
514
|
-
|
515
|
-
|
516
|
-
|
517
|
-
|
518
|
-
|
519
|
-
|
520
|
-
|
521
|
-
|
522
|
-
|
523
|
-
|
524
|
-
|
525
|
-
|
526
|
-
|
527
|
-
|
528
|
-
|
529
|
-
|
530
|
-
|
531
|
-
|
532
|
-
|
533
|
-
|
534
|
-
|
535
|
-
|
536
|
-
|
537
|
-
|
538
|
-
|
539
|
-
|
540
|
-
|
541
|
-
|
542
|
-
|
543
|
-
|
544
|
-
|
545
|
-
|
546
|
-
|
547
|
-
|
548
|
-
|
549
|
-
|
550
|
-
|
551
|
-
|
552
|
-
|
553
|
-
|
554
|
-
|
555
|
-
|
556
|
-
|
557
|
-
|
558
|
-
|
559
|
-
|
560
|
-
|
561
|
-
|
1
|
+
import requests
|
2
|
+
from docutils.nodes import subscript
|
3
|
+
|
4
|
+
from .exceptions import LlumoAIError
|
5
|
+
import time
|
6
|
+
import re
|
7
|
+
import json
|
8
|
+
import uuid
|
9
|
+
import threading
|
10
|
+
from dotenv import load_dotenv
|
11
|
+
import os
|
12
|
+
import itertools
|
13
|
+
import pandas as pd
|
14
|
+
from typing import List, Dict
|
15
|
+
from .models import AVAILABLEMODELS,getProviderFromModel
|
16
|
+
from .execution import ModelExecutor
|
17
|
+
from .helpingFuntions import *
|
18
|
+
from .sockets import LlumoSocketClient
|
19
|
+
from .functionCalling import LlumoAgentExecutor
|
20
|
+
|
21
|
+
|
22
|
+
# 👇 NEW: Explicitly load .env from the package folder
|
23
|
+
envPath = os.path.join(os.path.dirname(__file__), '.env')
|
24
|
+
load_dotenv(dotenv_path=envPath, override=False)# Automatically looks for .env in current directory
|
25
|
+
|
26
|
+
postUrl = os.getenv("POST_URL")
|
27
|
+
fetchUrl = os.getenv("FETCH_URL")
|
28
|
+
validateUrl = os.getenv("VALIDATE_URL")
|
29
|
+
socketUrl = os.getenv("SOCKET_URL")
|
30
|
+
|
31
|
+
|
32
|
+
class LlumoClient:
|
33
|
+
|
34
|
+
def __init__(self, api_key):
|
35
|
+
self.apiKey = api_key
|
36
|
+
self.socket = LlumoSocketClient(socketUrl)
|
37
|
+
self.processMapping = {}
|
38
|
+
|
39
|
+
|
40
|
+
|
41
|
+
def validateApiKey(self, evalName = ""):
|
42
|
+
headers = {
|
43
|
+
"Authorization": f"Bearer {self.apiKey}",
|
44
|
+
"Content-Type": "application/json",
|
45
|
+
}
|
46
|
+
reqBody = {"analytics": [evalName]}
|
47
|
+
|
48
|
+
# print(f"Making API key validation request to: {validateUrl}")
|
49
|
+
# print(f"Request body: {reqBody}")
|
50
|
+
|
51
|
+
try:
|
52
|
+
response = requests.post(url=validateUrl, json=reqBody, headers=headers)
|
53
|
+
# print(response.text)
|
54
|
+
# Print response info for debugging
|
55
|
+
# print(f"Response status code: {response.status_code}")
|
56
|
+
# print(f"Response headers: {response.headers}")
|
57
|
+
|
58
|
+
# Try to get at least some of the response content
|
59
|
+
try:
|
60
|
+
response_preview = response.text[:500] # First 500 chars
|
61
|
+
# print(f"Response preview: {response_preview}")
|
62
|
+
except Exception as e:
|
63
|
+
print(f"Could not get response preview: {e}")
|
64
|
+
|
65
|
+
except requests.exceptions.RequestException as e:
|
66
|
+
print(f"Request exception: {str(e)}")
|
67
|
+
raise LlumoAIError.RequestFailed(detail=str(e))
|
68
|
+
|
69
|
+
if response.status_code == 401:
|
70
|
+
raise LlumoAIError.InvalidApiKey()
|
71
|
+
|
72
|
+
# Handle other common status codes
|
73
|
+
if response.status_code == 404:
|
74
|
+
raise LlumoAIError.RequestFailed(
|
75
|
+
detail=f"Endpoint not found (404): {validateUrl}"
|
76
|
+
)
|
77
|
+
|
78
|
+
# if response.status_code >= 500:
|
79
|
+
# raise LlumoAIError.ServerError(
|
80
|
+
# detail=f"Server error ({response.status_code})"
|
81
|
+
# )
|
82
|
+
|
83
|
+
if response.status_code != 200:
|
84
|
+
raise LlumoAIError.RequestFailed(
|
85
|
+
detail=f"Unexpected status code: {response.status_code}"
|
86
|
+
)
|
87
|
+
|
88
|
+
# Try to parse JSON
|
89
|
+
try:
|
90
|
+
data = response.json()
|
91
|
+
except ValueError as e:
|
92
|
+
print(f"JSON parsing error: {str(e)}")
|
93
|
+
# print(f"Response content that could not be parsed: {response.text[:1000]}...")
|
94
|
+
raise LlumoAIError.InvalidJsonResponse()
|
95
|
+
|
96
|
+
if "data" not in data or not data["data"]:
|
97
|
+
# print(f"Invalid API response structure: {data}")
|
98
|
+
raise LlumoAIError.InvalidApiResponse()
|
99
|
+
|
100
|
+
try:
|
101
|
+
self.hitsAvailable = data["data"].get("remainingHits", 0)
|
102
|
+
self.workspaceID = data["data"].get("workspaceID")
|
103
|
+
self.evalDefinition = data["data"].get("analyticsMapping")
|
104
|
+
self.socketToken = data["data"].get("token")
|
105
|
+
self.hasSubscribed = data["data"].get("hasSubscribed",False)
|
106
|
+
self.trialEndDate = data["data"].get("trialEndDate",None)
|
107
|
+
self.subscriptionEndDate = data["data"].get("subscriptionEndDate", None)
|
108
|
+
|
109
|
+
# print(f"API key validation successful:")
|
110
|
+
# print(f"- Remaining hits: {self.hitsAvailable}")
|
111
|
+
# print(f"- Workspace ID: {self.workspaceID}")
|
112
|
+
# print(f"- Token received: {'Yes' if self.socketToken else 'No'}")
|
113
|
+
|
114
|
+
except Exception as e:
|
115
|
+
# print(f"Error extracting data from response: {str(e)}")
|
116
|
+
raise LlumoAIError.UnexpectedError(detail=str(e))
|
117
|
+
|
118
|
+
def postBatch(self, batch, workspaceID):
|
119
|
+
payload = {
|
120
|
+
"batch": json.dumps(batch),
|
121
|
+
"runType": "EVAL",
|
122
|
+
"workspaceID": workspaceID,
|
123
|
+
}
|
124
|
+
# socketToken here if the "JWD" token
|
125
|
+
headers = {
|
126
|
+
"Authorization": f"Bearer {self.socketToken}",
|
127
|
+
"Content-Type": "application/json",
|
128
|
+
}
|
129
|
+
try:
|
130
|
+
# print(postUrl)
|
131
|
+
response = requests.post(postUrl, json=payload, headers=headers)
|
132
|
+
# print(f"Post API Status Code: {response.status_code}")
|
133
|
+
# print(response.text)
|
134
|
+
|
135
|
+
except Exception as e:
|
136
|
+
print(f"Error in posting batch: {e}")
|
137
|
+
|
138
|
+
|
139
|
+
def postDataStream(self, batch, workspaceID):
|
140
|
+
payload = {
|
141
|
+
"batch": json.dumps(batch),
|
142
|
+
"runType": "DATA_STREAM",
|
143
|
+
"workspaceID": workspaceID,
|
144
|
+
}
|
145
|
+
# socketToken here if the "JWD" token
|
146
|
+
headers = {
|
147
|
+
"Authorization": f"Bearer {self.socketToken}",
|
148
|
+
"Content-Type": "application/json",
|
149
|
+
}
|
150
|
+
try:
|
151
|
+
# print(postUrl)
|
152
|
+
response = requests.post(postUrl, json=payload, headers=headers)
|
153
|
+
# print(f"Post API Status Code: {response.status_code}")
|
154
|
+
# print(response.text)
|
155
|
+
|
156
|
+
except Exception as e:
|
157
|
+
print(f"Error in posting batch: {e}")
|
158
|
+
|
159
|
+
|
160
|
+
def AllProcessMapping(self):
|
161
|
+
for batch in self.allBatches:
|
162
|
+
for record in batch:
|
163
|
+
rowId = record['rowID']
|
164
|
+
colId = record['columnID']
|
165
|
+
pid = f'{rowId}-{colId}-{colId}'
|
166
|
+
self.processMapping[pid] = record
|
167
|
+
|
168
|
+
|
169
|
+
def finalResp(self,results):
|
170
|
+
seen = set()
|
171
|
+
uniqueResults = []
|
172
|
+
|
173
|
+
for item in results:
|
174
|
+
for rowID in item: # Each item has only one key
|
175
|
+
# for rowID in item["data"]:
|
176
|
+
if rowID not in seen:
|
177
|
+
seen.add(rowID)
|
178
|
+
uniqueResults.append(item)
|
179
|
+
|
180
|
+
return uniqueResults
|
181
|
+
|
182
|
+
def evaluate(self, dataframe, eval ="Response Completeness", prompt_template="", outputColName="output"):
|
183
|
+
|
184
|
+
results = {}
|
185
|
+
try:
|
186
|
+
socketID = self.socket.connect(timeout=150)
|
187
|
+
|
188
|
+
# Ensure full connection before proceeding
|
189
|
+
max_wait_secs = 20
|
190
|
+
waited_secs = 0
|
191
|
+
while not self.socket._connection_established.is_set():
|
192
|
+
time.sleep(0.1)
|
193
|
+
waited_secs += 0.1
|
194
|
+
if waited_secs >= max_wait_secs:
|
195
|
+
raise RuntimeError("Timeout waiting for server 'connection-established' event.")
|
196
|
+
|
197
|
+
rowIdMapping = {}
|
198
|
+
|
199
|
+
|
200
|
+
print(f"\n======= Running evaluation for: {eval} =======")
|
201
|
+
|
202
|
+
try:
|
203
|
+
self.validateApiKey(evalName=eval)
|
204
|
+
except Exception as e:
|
205
|
+
if hasattr(e, "response") and getattr(e, "response", None) is not None:
|
206
|
+
pass
|
207
|
+
raise
|
208
|
+
userHits = checkUserHits(self.workspaceID,self.hasSubscribed,self.trialEndDate,self.subscriptionEndDate,self.hitsAvailable,len(dataframe))
|
209
|
+
|
210
|
+
|
211
|
+
if not userHits["success"]:
|
212
|
+
raise LlumoAIError.InsufficientCredits(userHits["message"])
|
213
|
+
|
214
|
+
# if self.hitsAvailable == 0 or len(dataframe) > self.hitsAvailable:
|
215
|
+
# raise LlumoAIError.InsufficientCredits()
|
216
|
+
|
217
|
+
evalDefinition = self.evalDefinition[eval]
|
218
|
+
model = "GPT_4"
|
219
|
+
provider = "OPENAI"
|
220
|
+
evalType = "LLM"
|
221
|
+
workspaceID = self.workspaceID
|
222
|
+
|
223
|
+
self.allBatches = []
|
224
|
+
currentBatch = []
|
225
|
+
|
226
|
+
for index, row in dataframe.iterrows():
|
227
|
+
tools = [row["tools"]] if "tools" in dataframe.columns else []
|
228
|
+
groundTruth = row["groundTruth"] if "groundTruth" in dataframe.columns else ""
|
229
|
+
messageHistory = [row["messageHistory"]] if "messageHistory" in dataframe.columns else []
|
230
|
+
promptTemplate = prompt_template
|
231
|
+
|
232
|
+
keys = re.findall(r"{{(.*?)}}", promptTemplate)
|
233
|
+
|
234
|
+
if not all([ky in dataframe.columns for ky in keys]):
|
235
|
+
raise LlumoAIError.InvalidPromptTemplate()
|
236
|
+
|
237
|
+
inputDict = {key: row[key] for key in keys if key in row}
|
238
|
+
output = row[outputColName] if outputColName in dataframe.columns else ""
|
239
|
+
|
240
|
+
activePlayground = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace("-", "")
|
241
|
+
rowID = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace("-", "")
|
242
|
+
columnID = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace("-", "")
|
243
|
+
|
244
|
+
# storing the generated rowID and the row index (dataframe) for later lookkup
|
245
|
+
rowIdMapping[rowID] = index
|
246
|
+
|
247
|
+
templateData = {
|
248
|
+
"processID": getProcessID(),
|
249
|
+
"socketID": socketID,
|
250
|
+
"source": "SDK",
|
251
|
+
"processData": {
|
252
|
+
"executionDependency": {
|
253
|
+
"query": "",
|
254
|
+
"context": "",
|
255
|
+
"output": output,
|
256
|
+
"tools": tools,
|
257
|
+
"groundTruth": groundTruth,
|
258
|
+
"messageHistory": messageHistory,
|
259
|
+
},
|
260
|
+
"definition": evalDefinition,
|
261
|
+
"model": model,
|
262
|
+
"provider": provider,
|
263
|
+
"analytics": eval,
|
264
|
+
},
|
265
|
+
"workspaceID": workspaceID,
|
266
|
+
"type": "EVAL",
|
267
|
+
"evalType": evalType,
|
268
|
+
"kpi": eval,
|
269
|
+
"columnID": columnID,
|
270
|
+
"rowID": rowID,
|
271
|
+
"playgroundID": activePlayground,
|
272
|
+
"processType": "EVAL",
|
273
|
+
}
|
274
|
+
|
275
|
+
query = ""
|
276
|
+
context = ""
|
277
|
+
for key, value in inputDict.items():
|
278
|
+
if isinstance(value, str):
|
279
|
+
length = len(value.split()) * 1.5
|
280
|
+
if length > 50:
|
281
|
+
context += f" {key}: {value}, "
|
282
|
+
else:
|
283
|
+
if promptTemplate:
|
284
|
+
tempObj = {key: value}
|
285
|
+
promptTemplate = getInputPopulatedPrompt(promptTemplate, tempObj)
|
286
|
+
else:
|
287
|
+
query += f" {key}: {value}, "
|
288
|
+
|
289
|
+
if not context.strip():
|
290
|
+
for key, value in inputDict.items():
|
291
|
+
context += f" {key}: {value}, "
|
292
|
+
|
293
|
+
templateData["processData"]["executionDependency"]["context"] = context.strip()
|
294
|
+
templateData["processData"]["executionDependency"]["query"] = query.strip()
|
295
|
+
|
296
|
+
if promptTemplate and not query.strip():
|
297
|
+
templateData["processData"]["executionDependency"]["query"] = promptTemplate
|
298
|
+
|
299
|
+
currentBatch.append(templateData)
|
300
|
+
|
301
|
+
if len(currentBatch) == 10 or index == len(dataframe) - 1:
|
302
|
+
self.allBatches.append(currentBatch)
|
303
|
+
currentBatch = []
|
304
|
+
|
305
|
+
totalItems = sum(len(batch) for batch in self.allBatches)
|
306
|
+
|
307
|
+
for cnt, batch in enumerate(self.allBatches):
|
308
|
+
try:
|
309
|
+
self.postBatch(batch=batch, workspaceID=workspaceID)
|
310
|
+
# print("Betch Posted with item len: ", len(batch))
|
311
|
+
except Exception as e:
|
312
|
+
continue
|
313
|
+
|
314
|
+
time.sleep(1)
|
315
|
+
|
316
|
+
timeout = max(50, min(600, totalItems * 10))
|
317
|
+
|
318
|
+
self.socket.listenForResults(
|
319
|
+
min_wait=40, max_wait=timeout, inactivity_timeout=150, expected_results=totalItems
|
320
|
+
)
|
321
|
+
|
322
|
+
eval_results = self.socket.getReceivedData()
|
323
|
+
results[eval] = self.finalResp(eval_results)
|
324
|
+
|
325
|
+
except Exception as e:
|
326
|
+
raise
|
327
|
+
finally:
|
328
|
+
try:
|
329
|
+
self.socket.disconnect()
|
330
|
+
except Exception as e:
|
331
|
+
pass
|
332
|
+
|
333
|
+
for evalName, records in results.items():
|
334
|
+
dataframe[evalName] = None
|
335
|
+
for item in records:
|
336
|
+
for compound_key, value in item.items():
|
337
|
+
# for compound_key, value in item['data'].items():
|
338
|
+
|
339
|
+
rowID = compound_key.split('-')[0]
|
340
|
+
# looking for the index of each rowID , in the original dataframe
|
341
|
+
if rowID in rowIdMapping:
|
342
|
+
index = rowIdMapping[rowID]
|
343
|
+
# dataframe.at[index, evalName] = value
|
344
|
+
dataframe.at[index, evalName] = value["value"]
|
345
|
+
dataframe.at[index, f'{evalName} Reason'] = value["reasoning"]
|
346
|
+
|
347
|
+
|
348
|
+
else:
|
349
|
+
pass
|
350
|
+
# print(f"⚠️ Warning: Could not find rowID {rowID} in mapping")
|
351
|
+
|
352
|
+
return dataframe
|
353
|
+
|
354
|
+
def evaluateCompressor(self, dataframe, prompt_template):
|
355
|
+
results = []
|
356
|
+
|
357
|
+
try:
|
358
|
+
# Connect to socket first
|
359
|
+
# print("Connecting to socket server...")
|
360
|
+
socketID = self.socket.connect(timeout=150)
|
361
|
+
|
362
|
+
# Ensure full connection before proceeding
|
363
|
+
max_wait_secs = 20
|
364
|
+
waited_secs = 0
|
365
|
+
while not self.socket._connection_established.is_set():
|
366
|
+
time.sleep(0.1)
|
367
|
+
waited_secs += 0.1
|
368
|
+
if waited_secs >= max_wait_secs:
|
369
|
+
raise RuntimeError("Timeout waiting for server 'connection-established' event.")
|
370
|
+
|
371
|
+
# print(f"Connected with socket ID: {socketID}")
|
372
|
+
|
373
|
+
try:
|
374
|
+
# print(f"Validating API key...")
|
375
|
+
self.validateApiKey()
|
376
|
+
# print(f"API key validation successful. Hits available: {self.hitsAvailable}")
|
377
|
+
except Exception as e:
|
378
|
+
print(f"Error during API key validation: {str(e)}")
|
379
|
+
if hasattr(e, "response") and getattr(e, "response", None) is not None:
|
380
|
+
print(f"Status code: {e.response.status_code}")
|
381
|
+
print(f"Response content: {e.response.text[:500]}...")
|
382
|
+
raise
|
383
|
+
|
384
|
+
# check for available hits and trial limit
|
385
|
+
userHits = checkUserHits(self.workspaceID, self.hasSubscribed, self.trialEndDate, self.subscriptionEndDate,
|
386
|
+
self.hitsAvailable, len(dataframe))
|
387
|
+
|
388
|
+
# do not proceed if subscription or trial limit has exhausted
|
389
|
+
if not userHits["success"]:
|
390
|
+
raise LlumoAIError.InsufficientCredits(userHits["message"])
|
391
|
+
|
392
|
+
# if self.hitsAvailable == 0 or len(dataframe) > self.hitsAvailable:
|
393
|
+
# raise LlumoAIError.InsufficientCredits()
|
394
|
+
|
395
|
+
model = "GPT_4"
|
396
|
+
provider = "OPENAI"
|
397
|
+
evalType = "LLUMO"
|
398
|
+
workspaceID = self.workspaceID
|
399
|
+
|
400
|
+
# Prepare all batches before sending
|
401
|
+
# print("Preparing batches...")
|
402
|
+
self.allBatches = []
|
403
|
+
currentBatch = []
|
404
|
+
|
405
|
+
for index, row in dataframe.iterrows():
|
406
|
+
promptTemplate = prompt_template
|
407
|
+
|
408
|
+
# extracting the placeholders from the prompt template
|
409
|
+
keys = re.findall(r"{{(.*?)}}", promptTemplate)
|
410
|
+
inputDict = {key: row[key] for key in keys if key in row}
|
411
|
+
|
412
|
+
if not all([ky in dataframe.columns for ky in keys]):
|
413
|
+
raise LlumoAIError.InvalidPromptTemplate()
|
414
|
+
|
415
|
+
activePlayground = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace("-", "")
|
416
|
+
rowID = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace("-", "")
|
417
|
+
columnID = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace("-", "")
|
418
|
+
|
419
|
+
compressed_prompt_id = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace("-", "")
|
420
|
+
compressed_prompt_output_id = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace("-", "")
|
421
|
+
cost_id = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace("-", "")
|
422
|
+
cost_saving_id = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace("-", "")
|
423
|
+
|
424
|
+
# Use the server-provided socket ID here
|
425
|
+
templateData = {
|
426
|
+
"processID": getProcessID(),
|
427
|
+
"socketID": socketID,
|
428
|
+
"source": "SDK",
|
429
|
+
"rowID": rowID,
|
430
|
+
"columnID": columnID,
|
431
|
+
"processType": "COST_SAVING",
|
432
|
+
"evalType": evalType,
|
433
|
+
"dependency": list(inputDict.keys()),
|
434
|
+
"costColumnMapping": {
|
435
|
+
"compressed_prompt": compressed_prompt_id,
|
436
|
+
"compressed_prompt_output": compressed_prompt_output_id,
|
437
|
+
"cost": cost_id,
|
438
|
+
"cost_saving": cost_saving_id
|
439
|
+
},
|
440
|
+
"processData": {
|
441
|
+
"rowData": {
|
442
|
+
"query": {"type": "VARIABLE", "value": ""},
|
443
|
+
"context": {"type": "VARIABLE", "value": ""},
|
444
|
+
},
|
445
|
+
"dependency": list(inputDict.keys()),
|
446
|
+
"dependencyMapping": {ky: ky for ky in list(inputDict.keys())},
|
447
|
+
"provider": provider,
|
448
|
+
"model": model,
|
449
|
+
"promptText": promptTemplate,
|
450
|
+
"costColumnMapping": {
|
451
|
+
"compressed_prompt": compressed_prompt_id,
|
452
|
+
"compressed_prompt_output": compressed_prompt_output_id,
|
453
|
+
"cost": cost_id,
|
454
|
+
"cost_saving": cost_saving_id
|
455
|
+
}
|
456
|
+
},
|
457
|
+
"workspaceID": workspaceID,
|
458
|
+
"email": "",
|
459
|
+
"playgroundID": activePlayground
|
460
|
+
}
|
461
|
+
|
462
|
+
|
463
|
+
# Build query/context from input
|
464
|
+
query = ""
|
465
|
+
context = ""
|
466
|
+
|
467
|
+
for key, value in inputDict.items():
|
468
|
+
if isinstance(value, str):
|
469
|
+
length = len(value.split()) * 1.5
|
470
|
+
if length > 50:
|
471
|
+
context += f" {key}: {value}, "
|
472
|
+
else:
|
473
|
+
if promptTemplate:
|
474
|
+
populatedPrompt = getInputPopulatedPrompt(promptTemplate, {key: value})
|
475
|
+
query += f"{populatedPrompt} "
|
476
|
+
else:
|
477
|
+
query += f" {key}: {value}, "
|
478
|
+
|
479
|
+
if not context.strip():
|
480
|
+
for key, value in inputDict.items():
|
481
|
+
context += f" {key}: {value}, "
|
482
|
+
|
483
|
+
templateData["processData"]["rowData"]["context"]["value"] = context.strip()
|
484
|
+
templateData["processData"]["rowData"]["query"]["value"] = query.strip()
|
485
|
+
|
486
|
+
if promptTemplate and not query.strip():
|
487
|
+
templateData["processData"]["rowData"]["query"]["value"] = promptTemplate
|
488
|
+
|
489
|
+
# print(templateData)
|
490
|
+
currentBatch.append(templateData)
|
491
|
+
|
492
|
+
if len(currentBatch) == 10 or index == len(dataframe) - 1:
|
493
|
+
self.allBatches.append(currentBatch)
|
494
|
+
currentBatch = []
|
495
|
+
|
496
|
+
# Post all batches
|
497
|
+
total_items = sum(len(batch) for batch in self.allBatches)
|
498
|
+
# print(f"Posting {len(self.allBatches)} batches ({total_items} items total)")
|
499
|
+
|
500
|
+
for cnt, batch in enumerate(self.allBatches):
|
501
|
+
# print(f"Posting batch {cnt + 1}/{len(self.allBatches)} for eval '{eval}'")
|
502
|
+
try:
|
503
|
+
self.postBatch(batch=batch, workspaceID=workspaceID)
|
504
|
+
# print(f"Batch {cnt + 1} posted successfully")
|
505
|
+
except Exception as e:
|
506
|
+
print(f"Error posting batch {cnt + 1}: {str(e)}")
|
507
|
+
continue
|
508
|
+
|
509
|
+
# Small delay between batches to prevent overwhelming the server
|
510
|
+
time.sleep(1)
|
511
|
+
|
512
|
+
# updating the dict for row column mapping
|
513
|
+
self.AllProcessMapping()
|
514
|
+
# Calculate a reasonable timeout based on the data size
|
515
|
+
timeout = max(60, min(600, total_items * 10))
|
516
|
+
# print(f"All batches posted. Waiting up to {timeout} seconds for results...")
|
517
|
+
|
518
|
+
# Listen for results
|
519
|
+
self.socket.listenForResults(min_wait=20, max_wait=timeout, inactivity_timeout=30,expected_results=None)
|
520
|
+
|
521
|
+
# Get results for this evaluation
|
522
|
+
eval_results = self.socket.getReceivedData()
|
523
|
+
# print(f"Received {len(eval_results)} results for evaluation '{eval}'")
|
524
|
+
|
525
|
+
# Add these results to our overall results
|
526
|
+
results = self.finalResp(eval_results)
|
527
|
+
print(f"======= Completed evaluation: {eval} =======\n")
|
528
|
+
|
529
|
+
# print("All evaluations completed successfully")
|
530
|
+
|
531
|
+
except Exception as e:
|
532
|
+
print(f"Error during evaluation: {e}")
|
533
|
+
raise
|
534
|
+
finally:
|
535
|
+
# Always disconnect the socket when done
|
536
|
+
try:
|
537
|
+
self.socket.disconnect()
|
538
|
+
# print("Socket disconnected")
|
539
|
+
except Exception as e:
|
540
|
+
print(f"Error disconnecting socket: {e}")
|
541
|
+
|
542
|
+
compressed_prompt , compressed_prompt_output , cost , cost_saving = costColumnMapping(results,self.processMapping)
|
543
|
+
dataframe["compressed_prompt"] = compressed_prompt
|
544
|
+
dataframe["compressed_prompt_output"] = compressed_prompt_output
|
545
|
+
dataframe["cost"] = cost
|
546
|
+
dataframe["cost_saving"] = cost_saving
|
547
|
+
return dataframe
|
548
|
+
|
549
|
+
|
550
|
+
def run_sweep(self,templates: List[str], dataset: Dict[str, List[str]], model_aliases: List[AVAILABLEMODELS], apiKey: str, eval = ["Response Correctness"],toEvaluate:bool =False ) -> pd.DataFrame:
|
551
|
+
executor = ModelExecutor(apiKey)
|
552
|
+
|
553
|
+
keys = list(dataset.keys())
|
554
|
+
value_combinations = list(itertools.product(*dataset.values()))
|
555
|
+
combinations = [dict(zip(keys, values)) for values in value_combinations]
|
556
|
+
|
557
|
+
results = []
|
558
|
+
|
559
|
+
# Iterate through combinations
|
560
|
+
for combo in combinations:
|
561
|
+
for template in templates:
|
562
|
+
prompt = template
|
563
|
+
for k, v in combo.items():
|
564
|
+
prompt = prompt.replace(f"{{{{{k}}}}}", v)
|
565
|
+
# Add a row for each model
|
566
|
+
for model in model_aliases:
|
567
|
+
row = {
|
568
|
+
"template": template,
|
569
|
+
"prompt": prompt,
|
570
|
+
**combo,
|
571
|
+
"model": model.value
|
572
|
+
}
|
573
|
+
|
574
|
+
|
575
|
+
try:
|
576
|
+
provider = getProviderFromModel(model)
|
577
|
+
response = executor.execute(provider, model.value, prompt, apiKey)
|
578
|
+
row["output"] = response
|
579
|
+
except Exception as e:
|
580
|
+
row["output"] = f"Error: {str(e)}"
|
581
|
+
|
582
|
+
results.append(row)
|
583
|
+
df=pd.DataFrame(results)
|
584
|
+
if toEvaluate:
|
585
|
+
|
586
|
+
res = self.evaluate(df,eval =eval ,prompt_template=str(templates[0]))
|
587
|
+
return res
|
588
|
+
|
589
|
+
return df
|
590
|
+
|
591
|
+
def evaluateAgents(self, dataframe, model, agents, model_api_key=None,
|
592
|
+
prompt_template="Give answer for the given query: {{query}}"):
|
593
|
+
if model.lower() not in ["openai", "google"]:
|
594
|
+
raise ValueError("Model must be 'openai' or 'google'")
|
595
|
+
|
596
|
+
# Run unified agent execution
|
597
|
+
toolResponseDf = LlumoAgentExecutor.run(dataframe, agents, model=model, model_api_key=model_api_key)
|
598
|
+
evals = ["Tool Reliability", "Stepwise Progression", "Tool Selection Accuracy", "Final Task Alignment"]
|
599
|
+
|
600
|
+
for eval in evals:
|
601
|
+
# Perform evaluation
|
602
|
+
toolResponseDf = self.evaluate(
|
603
|
+
toolResponseDf,
|
604
|
+
eval = eval,
|
605
|
+
prompt_template=prompt_template
|
606
|
+
)
|
607
|
+
return toolResponseDf
|
608
|
+
|
609
|
+
def evaluateAgentResponses(self, dataframe, prompt_template="Give answer for the given query: {{query}}"):
|
610
|
+
try:
|
611
|
+
if "query" and "messageHistory" and "tools" not in dataframe.columns:
|
612
|
+
raise ValueError("DataFrame must contain 'query', 'messageHistory', and 'tools' columns")
|
613
|
+
evals = ["Tool Reliability", "Stepwise Progression", "Tool Selection Accuracy", "Final Task Alignment"]
|
614
|
+
toolResponseDf = dataframe.copy()
|
615
|
+
for eval in evals:
|
616
|
+
# Perform evaluation
|
617
|
+
toolResponseDf = self.evaluate(
|
618
|
+
toolResponseDf,
|
619
|
+
eval = eval,
|
620
|
+
prompt_template=prompt_template
|
621
|
+
)
|
622
|
+
return toolResponseDf
|
623
|
+
|
624
|
+
except Exception as e:
|
625
|
+
raise e
|
626
|
+
|
627
|
+
def runDataStream(self, dataframe, streamName:str,queryColName:str="query"):
|
628
|
+
results = {}
|
629
|
+
|
630
|
+
try:
|
631
|
+
socketID = self.socket.connect(timeout=150)
|
632
|
+
# Ensure full connection before proceeding
|
633
|
+
max_wait_secs = 20
|
634
|
+
waited_secs = 0
|
635
|
+
while not self.socket._connection_established.is_set():
|
636
|
+
time.sleep(0.1)
|
637
|
+
waited_secs += 0.1
|
638
|
+
if waited_secs >= max_wait_secs:
|
639
|
+
raise RuntimeError("Timeout waiting for server 'connection-established' event.")
|
640
|
+
# print(f"Connected with socket ID: {socketID}")
|
641
|
+
rowIdMapping = {}
|
642
|
+
try:
|
643
|
+
# print(f"Validating API key...")
|
644
|
+
self.validateApiKey()
|
645
|
+
# print(f"API key validation successful. Hits available: {self.hitsAvailable}")
|
646
|
+
except Exception as e:
|
647
|
+
print(f"Error during API key validation: {str(e)}")
|
648
|
+
if hasattr(e, "response") and getattr(e, "response", None) is not None:
|
649
|
+
print(f"Status code: {e.response.status_code}")
|
650
|
+
print(f"Response content: {e.response.text[:500]}...")
|
651
|
+
raise
|
652
|
+
# check for available hits and trial limit
|
653
|
+
userHits = checkUserHits(self.workspaceID, self.hasSubscribed, self.trialEndDate, self.subscriptionEndDate,
|
654
|
+
self.hitsAvailable, len(dataframe))
|
655
|
+
|
656
|
+
# do not proceed if subscription or trial limit has exhausted
|
657
|
+
if not userHits["success"]:
|
658
|
+
raise LlumoAIError.InsufficientCredits(userHits["message"])
|
659
|
+
|
660
|
+
|
661
|
+
|
662
|
+
print("====🚀Sit back while we fetch data from the stream 🚀====")
|
663
|
+
workspaceID = self.workspaceID
|
664
|
+
streamId=getStreamId(workspaceID,self.apiKey,streamName)
|
665
|
+
# Prepare all batches before sending
|
666
|
+
# print("Preparing batches...")
|
667
|
+
self.allBatches = []
|
668
|
+
currentBatch = []
|
669
|
+
|
670
|
+
for index, row in dataframe.iterrows():
|
671
|
+
activePlayground = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace("-", "")
|
672
|
+
rowID = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace("-", "")
|
673
|
+
columnID = f"{int(time.time() * 1000)}{uuid.uuid4()}".replace("-", "")
|
674
|
+
|
675
|
+
rowIdMapping[rowID] = index
|
676
|
+
# Use the server-provided socket ID here
|
677
|
+
templateData = {
|
678
|
+
"processID": getProcessID(),
|
679
|
+
"socketID": socketID,
|
680
|
+
"processData": {
|
681
|
+
"executionDependency": {
|
682
|
+
"query": row[queryColName]
|
683
|
+
},
|
684
|
+
"dataStreamID": streamId
|
685
|
+
},
|
686
|
+
"workspaceID": workspaceID,
|
687
|
+
"email": "",
|
688
|
+
"type": "DATA_STREAM",
|
689
|
+
"playgroundID": activePlayground,
|
690
|
+
"processType": "DATA_STREAM",
|
691
|
+
"rowID": rowID,
|
692
|
+
"columnID": columnID,
|
693
|
+
"source": "SDK"
|
694
|
+
|
695
|
+
}
|
696
|
+
|
697
|
+
|
698
|
+
currentBatch.append(templateData)
|
699
|
+
|
700
|
+
if len(currentBatch) == 10 or index == len(dataframe) - 1:
|
701
|
+
self.allBatches.append(currentBatch)
|
702
|
+
currentBatch = []
|
703
|
+
|
704
|
+
# Post all batches
|
705
|
+
total_items = sum(len(batch) for batch in self.allBatches)
|
706
|
+
# print(f"Posting {len(self.allBatches)} batches ({total_items} items total)")
|
707
|
+
|
708
|
+
for cnt, batch in enumerate(self.allBatches):
|
709
|
+
# print(f"Posting batch {cnt + 1}/{len(self.allBatches)} for eval '{eval}'")
|
710
|
+
try:
|
711
|
+
self.postDataStream(batch=batch, workspaceID=workspaceID)
|
712
|
+
# print(f"Batch {cnt + 1} posted successfully")
|
713
|
+
except Exception as e:
|
714
|
+
print(f"Error posting batch {cnt + 1}: {str(e)}")
|
715
|
+
continue
|
716
|
+
|
717
|
+
# Small delay between batches to prevent overwhelming the server
|
718
|
+
time.sleep(1)
|
719
|
+
|
720
|
+
# updating the dict for row column mapping
|
721
|
+
self.AllProcessMapping()
|
722
|
+
# Calculate a reasonable timeout based on the data size
|
723
|
+
timeout = max(60, min(600, total_items * 10))
|
724
|
+
# print(f"All batches posted. Waiting up to {timeout} seconds for results...")
|
725
|
+
|
726
|
+
# Listen for results
|
727
|
+
self.socket.listenForResults(min_wait=20, max_wait=timeout, inactivity_timeout=30,expected_results=None)
|
728
|
+
|
729
|
+
# Get results for this evaluation
|
730
|
+
eval_results = self.socket.getReceivedData()
|
731
|
+
# print(f"Received {len(eval_results)} results for evaluation '{eval}'")
|
732
|
+
|
733
|
+
# Add these results to our overall results
|
734
|
+
results["Data Stream"] = self.finalResp(eval_results)
|
735
|
+
print(f"=======You are all set! continue your expectations 🚀======\n")
|
736
|
+
|
737
|
+
|
738
|
+
# print("All evaluations completed successfully")
|
739
|
+
|
740
|
+
except Exception as e:
|
741
|
+
print(f"Error during evaluation: {e}")
|
742
|
+
raise
|
743
|
+
finally:
|
744
|
+
# Always disconnect the socket when done
|
745
|
+
try:
|
746
|
+
self.socket.disconnect()
|
747
|
+
# print("Socket disconnected")
|
748
|
+
except Exception as e:
|
749
|
+
print(f"Error disconnecting socket: {e}")
|
750
|
+
|
751
|
+
for streamName, records in results.items():
|
752
|
+
dataframe[streamName] = None
|
753
|
+
for item in records:
|
754
|
+
for compound_key, value in item.items():
|
755
|
+
# for compound_key, value in item['data'].items():
|
756
|
+
|
757
|
+
rowID = compound_key.split('-')[0]
|
758
|
+
# looking for the index of each rowID , in the original dataframe
|
759
|
+
if rowID in rowIdMapping:
|
760
|
+
index = rowIdMapping[rowID]
|
761
|
+
# dataframe.at[index, evalName] = value
|
762
|
+
dataframe.at[index, streamName] = value["value"]
|
763
|
+
|
764
|
+
|
765
|
+
|
766
|
+
else:
|
767
|
+
pass
|
768
|
+
# print(f"⚠️ Warning: Could not find rowID {rowID} in mapping")
|
769
|
+
|
770
|
+
return dataframe
|
771
|
+
|
772
|
+
|
773
|
+
|
774
|
+
def getId(self,workspaceID,streamName):
|
775
|
+
streamId=getStreamId(workspaceID,self.apiKey,streamName)
|
776
|
+
return streamId
|
777
|
+
|
778
|
+
|
779
|
+
|
780
|
+
class SafeDict(dict):
|
781
|
+
def __missing__(self, key):
|
782
|
+
return ""
|