lexsi-sdk 0.1.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. lexsi_sdk/__init__.py +5 -0
  2. lexsi_sdk/client/__init__.py +0 -0
  3. lexsi_sdk/client/client.py +176 -0
  4. lexsi_sdk/common/__init__.py +0 -0
  5. lexsi_sdk/common/config/.env.prod +3 -0
  6. lexsi_sdk/common/constants.py +143 -0
  7. lexsi_sdk/common/enums.py +8 -0
  8. lexsi_sdk/common/environment.py +49 -0
  9. lexsi_sdk/common/monitoring.py +81 -0
  10. lexsi_sdk/common/trigger.py +75 -0
  11. lexsi_sdk/common/types.py +122 -0
  12. lexsi_sdk/common/utils.py +93 -0
  13. lexsi_sdk/common/validation.py +110 -0
  14. lexsi_sdk/common/xai_uris.py +197 -0
  15. lexsi_sdk/core/__init__.py +0 -0
  16. lexsi_sdk/core/agent.py +62 -0
  17. lexsi_sdk/core/alert.py +56 -0
  18. lexsi_sdk/core/case.py +618 -0
  19. lexsi_sdk/core/dashboard.py +131 -0
  20. lexsi_sdk/core/guardrails/__init__.py +0 -0
  21. lexsi_sdk/core/guardrails/guard_template.py +299 -0
  22. lexsi_sdk/core/guardrails/guardrail_autogen.py +554 -0
  23. lexsi_sdk/core/guardrails/guardrails_langgraph.py +525 -0
  24. lexsi_sdk/core/guardrails/guardrails_openai.py +541 -0
  25. lexsi_sdk/core/guardrails/openai_runner.py +1328 -0
  26. lexsi_sdk/core/model_summary.py +110 -0
  27. lexsi_sdk/core/organization.py +549 -0
  28. lexsi_sdk/core/project.py +5131 -0
  29. lexsi_sdk/core/synthetic.py +387 -0
  30. lexsi_sdk/core/text.py +595 -0
  31. lexsi_sdk/core/tracer.py +208 -0
  32. lexsi_sdk/core/utils.py +36 -0
  33. lexsi_sdk/core/workspace.py +325 -0
  34. lexsi_sdk/core/wrapper.py +766 -0
  35. lexsi_sdk/core/xai.py +306 -0
  36. lexsi_sdk/version.py +34 -0
  37. lexsi_sdk-0.1.16.dist-info/METADATA +100 -0
  38. lexsi_sdk-0.1.16.dist-info/RECORD +40 -0
  39. lexsi_sdk-0.1.16.dist-info/WHEEL +5 -0
  40. lexsi_sdk-0.1.16.dist-info/top_level.txt +1 -0
lexsi_sdk/core/case.py ADDED
@@ -0,0 +1,618 @@
1
+ from __future__ import annotations
2
+ from io import BytesIO
3
+ from typing import Dict, List, Optional
4
+ from pydantic import BaseModel, ConfigDict
5
+ import plotly.graph_objects as go
6
+ import pandas as pd
7
+ from IPython.display import SVG, display
8
+ from lexsi_sdk.client.client import APIClient
9
+ from lexsi_sdk.common.xai_uris import EXPLAINABILITY_SUMMARY, GET_TRIGGERS_DAYS_URI
10
+ import base64
11
+ from PIL import Image
12
+
13
+
14
+ class Case(BaseModel):
15
+ """Represents a single explainability case with plotting helpers."""
16
+ status: str
17
+ true_value: str | int
18
+ pred_value: str | int
19
+ pred_category: str | int
20
+ observations: List
21
+ shap_feature_importance: Optional[Dict] = {}
22
+ lime_feature_importance: Optional[Dict] = {}
23
+ ig_features_importance: Optional[Dict] = {}
24
+ dlb_feature_importance: Optional[Dict] = {}
25
+ similar_cases: List
26
+ is_automl_prediction: Optional[bool] = False
27
+ model_name: str
28
+ case_prediction_path: Optional[str] = ""
29
+ case_prediction_svg: Optional[str] = ""
30
+ observation_checklist: Optional[List] = []
31
+ policy_checklist: Optional[List] = []
32
+ final_decision: Optional[str] = ""
33
+ unique_identifier: Optional[str] = ""
34
+ tag: Optional[str] = ""
35
+ created_at: Optional[str] = ""
36
+ data: Optional[Dict] = {}
37
+ similar_cases_data: Optional[List] = []
38
+ audit_trail: Optional[dict] = {}
39
+ project_name: Optional[str] = ""
40
+ image_data: Optional[Dict] = {}
41
+ data_id: Optional[str] = ""
42
+ summary: Optional[str] = ""
43
+ model_config = ConfigDict(protected_namespaces=())
44
+
45
+ api_client: APIClient
46
+
47
+ def __init__(self, **kwargs):
48
+ """Capture API client used to fetch additional explainability data."""
49
+ super().__init__(**kwargs)
50
+ self.api_client = kwargs.get("api_client")
51
+
52
+ def explainability_shap_feature_importance(self):
53
+ """Plots Shap Feature Importance chart"""
54
+ fig = go.Figure()
55
+
56
+ if len(list(self.shap_feature_importance.values())) < 1:
57
+ return "No Shap Feature Importance for the case"
58
+
59
+ if isinstance(list(self.shap_feature_importance.values())[0], dict):
60
+ for col in self.shap_feature_importance.keys():
61
+ fig.add_trace(
62
+ go.Bar(
63
+ x=list(self.shap_feature_importance[col].values()),
64
+ y=list(self.shap_feature_importance[col].keys()),
65
+ orientation="h",
66
+ name=col,
67
+ )
68
+ )
69
+ else:
70
+ fig.add_trace(
71
+ go.Bar(
72
+ x=list(self.shap_feature_importance.values()),
73
+ y=list(self.shap_feature_importance.keys()),
74
+ orientation="h",
75
+ )
76
+ )
77
+ fig.update_layout(
78
+ barmode="relative",
79
+ height=800,
80
+ width=800,
81
+ yaxis_autorange="reversed",
82
+ bargap=0.01,
83
+ legend_orientation="h",
84
+ legend_x=0.1,
85
+ legend_y=1.1,
86
+ )
87
+ fig.show(config={"displaylogo": False})
88
+
89
+ def explainability_ig_feature_importance(self):
90
+ """Plots IG Feature Importance chart"""
91
+ fig = go.Figure()
92
+
93
+ if len(list(self.ig_features_importance.values())) < 1:
94
+ return "No IG Feature Importance for the case"
95
+
96
+ if isinstance(list(self.ig_features_importance.values())[0], dict):
97
+ for col in self.ig_features_importance.keys():
98
+ fig.add_trace(
99
+ go.Bar(
100
+ x=list(self.ig_features_importance[col].values()),
101
+ y=list(self.ig_features_importance[col].keys()),
102
+ orientation="h",
103
+ name=col,
104
+ )
105
+ )
106
+ else:
107
+ fig.add_trace(
108
+ go.Bar(
109
+ x=list(self.ig_features_importance.values()),
110
+ y=list(self.ig_features_importance.keys()),
111
+ orientation="h",
112
+ )
113
+ )
114
+ fig.update_layout(
115
+ barmode="relative",
116
+ height=800,
117
+ width=800,
118
+ yaxis_autorange="reversed",
119
+ bargap=0.01,
120
+ legend_orientation="h",
121
+ legend_x=0.1,
122
+ legend_y=1.1,
123
+ )
124
+ fig.show(config={"displaylogo": False})
125
+
126
+ def explainability_lime_feature_importance(self):
127
+ """Plots Lime Feature Importance chart"""
128
+ fig = go.Figure()
129
+
130
+ if len(list(self.lime_feature_importance.values())) < 1:
131
+ return "No Lime Feature Importance for the case"
132
+
133
+ if isinstance(list(self.lime_feature_importance.values())[0], dict):
134
+ for col in self.lime_feature_importance.keys():
135
+ fig.add_trace(
136
+ go.Bar(
137
+ x=list(self.lime_feature_importance[col].values()),
138
+ y=list(self.lime_feature_importance[col].keys()),
139
+ orientation="h",
140
+ name=col,
141
+ )
142
+ )
143
+ else:
144
+ fig.add_trace(
145
+ go.Bar(
146
+ x=list(self.lime_feature_importance.values()),
147
+ y=list(self.lime_feature_importance.keys()),
148
+ orientation="h",
149
+ )
150
+ )
151
+ fig.update_layout(
152
+ barmode="relative",
153
+ height=800,
154
+ width=800,
155
+ yaxis_autorange="reversed",
156
+ bargap=0.01,
157
+ legend_orientation="h",
158
+ legend_x=0.1,
159
+ legend_y=1.1,
160
+ )
161
+ fig.show(config={"displaylogo": False})
162
+
163
+ def explainability_dlb_feature_importance(self):
164
+ """Plots DLB Feature Importance chart"""
165
+ fig = go.Figure()
166
+ if len(list(self.dlb_feature_importance.values())) < 1:
167
+ return "No DLB Feature Importance for the case"
168
+
169
+ if isinstance(list(self.dlb_feature_importance.values())[0], dict):
170
+ for col in self.dlb_feature_importance.keys():
171
+ fig.add_trace(
172
+ go.Bar(
173
+ x=list(self.dlb_feature_importance[col].values()),
174
+ y=list(self.dlb_feature_importance[col].keys()),
175
+ orientation="h",
176
+ name=col,
177
+ )
178
+ )
179
+ else:
180
+ fig.add_trace(
181
+ go.Bar(
182
+ x=list(self.dlb_feature_importance.values()),
183
+ y=list(self.dlb_feature_importance.keys()),
184
+ orientation="h",
185
+ )
186
+ )
187
+ fig.update_layout(
188
+ barmode="relative",
189
+ height=800,
190
+ width=800,
191
+ yaxis_autorange="reversed",
192
+ bargap=0.01,
193
+ legend_orientation="h",
194
+ legend_x=0.1,
195
+ legend_y=1.1,
196
+ )
197
+ fig.show(config={"displaylogo": False})
198
+
199
+ def explainability_prediction_path(self):
200
+ """Explainability Prediction Path"""
201
+ svg = SVG(self.case_prediction_svg)
202
+ display(svg)
203
+
204
+ def explainability_raw_data(self) -> pd.DataFrame:
205
+ """Explainability Raw Data
206
+
207
+ :return: raw data dataframe
208
+ """
209
+ raw_data_df = (
210
+ pd.DataFrame([self.data])
211
+ .transpose()
212
+ .reset_index()
213
+ .rename(columns={"index": "Feature", 0: "Value"})
214
+ )
215
+ return raw_data_df
216
+
217
+ def explainability_observations(self) -> pd.DataFrame:
218
+ """Explainability Observations
219
+
220
+ :return: observations dataframe
221
+ """
222
+ observations_df = pd.DataFrame(self.observation_checklist)
223
+
224
+ return observations_df
225
+
226
+ def explainability_policies(self) -> pd.DataFrame:
227
+ """Explainability Policies
228
+
229
+ :return: policies dataframe
230
+ """
231
+ policy_df = pd.DataFrame(self.policy_checklist)
232
+
233
+ return policy_df
234
+
235
+ def explainability_decision(self) -> pd.DataFrame:
236
+ """Explainability Decision
237
+
238
+ :return: decision dataframe
239
+ """
240
+ data = {
241
+ "True Value": self.true_value,
242
+ "Prediction Value": self.pred_value,
243
+ "Prediction Category": self.pred_category,
244
+ "Final Prediction": self.final_decision,
245
+ }
246
+ decision_df = pd.DataFrame([data])
247
+
248
+ return decision_df
249
+
250
+ def explainability_similar_cases(self) -> pd.DataFrame | str:
251
+ """Similar Cases
252
+
253
+ :return: similar cases dataframe
254
+ """
255
+ if not self.similar_cases_data:
256
+ return "No similar cases found. Or add 'similar_cases' in components case_info()"
257
+
258
+ similar_cases_df = pd.DataFrame(self.similar_cases_data)
259
+ return similar_cases_df
260
+
261
+ def explainability_gradcam(self):
262
+ """Visualize Grad-CAM results for image explanations."""
263
+ if not self.image_data.get("gradcam", None):
264
+ return "No Gradcam method found for this case"
265
+ fig = go.Figure()
266
+
267
+ fig.add_layout_image(
268
+ dict(
269
+ source=self.image_data.get("gradcam", {}).get("heatmap"),
270
+ xref="x",
271
+ yref="y",
272
+ x=0,
273
+ y=1,
274
+ sizex=1,
275
+ sizey=1,
276
+ xanchor="left",
277
+ yanchor="top",
278
+ layer="below",
279
+ )
280
+ )
281
+
282
+ fig.add_layout_image(
283
+ dict(
284
+ source=self.image_data.get("gradcam", {}).get("superimposed"),
285
+ xref="x",
286
+ yref="y",
287
+ x=1.2,
288
+ y=1,
289
+ sizex=1,
290
+ sizey=1,
291
+ xanchor="left",
292
+ yanchor="top",
293
+ layer="below",
294
+ )
295
+ )
296
+
297
+ fig.add_annotation(
298
+ x=0.5,
299
+ y=0.1,
300
+ text="Attributions",
301
+ showarrow=False,
302
+ font=dict(size=16),
303
+ xref="x",
304
+ yref="y",
305
+ )
306
+ fig.add_annotation(
307
+ x=1.7,
308
+ y=0.1,
309
+ text="Superimposed",
310
+ showarrow=False,
311
+ font=dict(size=16),
312
+ xref="x",
313
+ yref="y",
314
+ )
315
+ fig.update_layout(
316
+ xaxis=dict(visible=False, range=[0, 2.5]),
317
+ yaxis=dict(visible=False, range=[0, 1]),
318
+ margin=dict(l=30, r=30, t=30, b=30),
319
+ )
320
+
321
+ fig.show(config={"displaylogo": False})
322
+
323
+ def explainability_shap(self):
324
+ """Render SHAP attribution plot for image cases."""
325
+ if not self.image_data.get("shap", None):
326
+ return "No Shap method found for this case"
327
+ fig = go.Figure()
328
+
329
+ fig.add_layout_image(
330
+ dict(
331
+ source=self.image_data.get("shap", {}).get("plot"),
332
+ xref="x",
333
+ yref="y",
334
+ x=0,
335
+ y=1,
336
+ sizex=1,
337
+ sizey=1,
338
+ xanchor="left",
339
+ yanchor="top",
340
+ layer="below",
341
+ )
342
+ )
343
+
344
+ fig.update_layout(
345
+ xaxis=dict(visible=False, range=[0, 2.5]),
346
+ yaxis=dict(visible=False, range=[0, 1]),
347
+ margin=dict(l=30, r=30, t=30, b=30),
348
+ )
349
+
350
+ fig.show(config={"displaylogo": False})
351
+
352
+ def explainability_lime(self):
353
+ """Render LIME attribution plot for image cases."""
354
+ if not self.image_data.get("lime", None):
355
+ return "No Lime method found for this case"
356
+ fig = go.Figure()
357
+
358
+ fig.add_layout_image(
359
+ dict(
360
+ source=self.image_data.get("lime", {}).get("plot"),
361
+ xref="x",
362
+ yref="y",
363
+ x=0,
364
+ y=1,
365
+ sizex=1,
366
+ sizey=1,
367
+ xanchor="left",
368
+ yanchor="top",
369
+ layer="below",
370
+ )
371
+ )
372
+
373
+ fig.update_layout(
374
+ xaxis=dict(visible=False, range=[0, 2.5]),
375
+ yaxis=dict(visible=False, range=[0, 1]),
376
+ margin=dict(l=30, r=30, t=30, b=30),
377
+ )
378
+
379
+ fig.show(config={"displaylogo": False})
380
+
381
+ def explainability_integrated_gradients(self):
382
+ """Render Integrated Gradients attribution plots."""
383
+ if not self.image_data.get("integrated_gradients", None):
384
+ return "No Integrated Gradients method found for this case"
385
+ fig = go.Figure()
386
+
387
+ fig.add_layout_image(
388
+ dict(
389
+ source=self.image_data.get("integrated_gradients", {}).get(
390
+ "attributions"
391
+ ),
392
+ xref="x",
393
+ yref="y",
394
+ x=0,
395
+ y=1,
396
+ sizex=1,
397
+ sizey=1,
398
+ xanchor="left",
399
+ yanchor="top",
400
+ layer="below",
401
+ )
402
+ )
403
+
404
+ fig.add_layout_image(
405
+ dict(
406
+ source=self.image_data.get("integrated_gradients", {}).get(
407
+ "positive_attributions"
408
+ ),
409
+ xref="x",
410
+ yref="y",
411
+ x=1.2,
412
+ y=1,
413
+ sizex=1,
414
+ sizey=1,
415
+ xanchor="left",
416
+ yanchor="top",
417
+ layer="below",
418
+ )
419
+ )
420
+
421
+ fig.add_layout_image(
422
+ dict(
423
+ source=self.image_data.get("integrated_gradients", {}).get(
424
+ "negative_attributions"
425
+ ),
426
+ xref="x",
427
+ yref="y",
428
+ x=2.4,
429
+ y=1,
430
+ sizex=1,
431
+ sizey=1,
432
+ xanchor="left",
433
+ yanchor="top",
434
+ layer="below",
435
+ )
436
+ )
437
+
438
+ fig.add_annotation(
439
+ x=0.5,
440
+ y=0.1,
441
+ text="Attributions",
442
+ showarrow=False,
443
+ font=dict(size=16),
444
+ xref="x",
445
+ yref="y",
446
+ )
447
+ fig.add_annotation(
448
+ x=1.7,
449
+ y=0.1,
450
+ text="Positive Attributions",
451
+ showarrow=False,
452
+ font=dict(size=16),
453
+ xref="x",
454
+ yref="y",
455
+ )
456
+ fig.add_annotation(
457
+ x=2.9,
458
+ y=0.1,
459
+ text="Negative Attributions",
460
+ showarrow=False,
461
+ font=dict(size=16),
462
+ xref="x",
463
+ yref="y",
464
+ )
465
+ fig.update_layout(
466
+ xaxis=dict(visible=False, range=[0, 2.5]),
467
+ yaxis=dict(visible=False, range=[0, 1]),
468
+ margin=dict(l=30, r=30, t=30, b=30),
469
+ )
470
+
471
+ fig.show(config={"displaylogo": False})
472
+
473
+ def alerts_trail(self, page_num: Optional[int] = 1, days: Optional[int] = 7):
474
+ """Fetch alerts for this case over the given window."""
475
+ if days==7:
476
+ return pd.DataFrame(self.audit_trail.get("alerts", {}))
477
+ resp = self.api_client.post(f"{GET_TRIGGERS_DAYS_URI}?project_name={self.project_name}&page_num={page_num}&days={days}")
478
+ if resp.get("details"):
479
+ return pd.DataFrame(resp.get("details"))
480
+ else:
481
+ return "No alerts found."
482
+
483
+ def audit(self):
484
+ """Return stored audit trail."""
485
+ return self.audit_trail
486
+
487
+ def feature_importance(self, feature: str):
488
+ """Return feature importance values for a specific feature."""
489
+ if self.shap_feature_importance:
490
+ return self.shap_feature_importance.get(feature, {})
491
+ elif self.lime_feature_importance:
492
+ return self.lime_feature_importance.get(feature, {})
493
+ elif self.ig_features_importance:
494
+ return self.ig_features_importance.get(feature, {})
495
+ else:
496
+ return "No Feature Importance found for the case"
497
+
498
+ def explainability_summary(self):
499
+ """Request or return cached explainability summary text."""
500
+ if self.data_id and not self.summary:
501
+ payload = {
502
+ "project_name": self.project_name,
503
+ "viewed_case_id": self.data_id,
504
+ }
505
+ res = self.api_client.post(EXPLAINABILITY_SUMMARY,payload)
506
+ if not res.get("success"):
507
+ raise Exception(res.get("details","Failed to summarize"))
508
+
509
+ self.summary = res.get("details")
510
+ return res.get("details")
511
+
512
+ return self.summary
513
+
514
+ class CaseText(BaseModel):
515
+ """Represents text explainability output for a generated case."""
516
+ model_name: str
517
+ status: str
518
+ prompt: str
519
+ output: str
520
+ explainability: Optional[Dict] = {}
521
+ audit_trail: Optional[Dict] = {}
522
+
523
+ def prompt(self):
524
+ """Get prompt"""
525
+ return self.prompt
526
+
527
+ def output(self):
528
+ """Get output"""
529
+ return self.output
530
+
531
+
532
+ def explainability_raw_data(self) -> pd.DataFrame:
533
+ """Explainability Raw Data
534
+
535
+ :return: raw data dataframe
536
+ """
537
+ raw_data_df = (
538
+ pd.DataFrame([self.explainability.get("feature_importance", {})])
539
+ .transpose()
540
+ .reset_index()
541
+ .rename(columns={"index": "Feature", 0: "Value"})
542
+ .sort_values(by="Value", ascending=False)
543
+ )
544
+ return raw_data_df
545
+
546
+ def explainability_feature_importance(self):
547
+ """Plots Feature Importance chart"""
548
+ fig = go.Figure()
549
+ feature_importance = self.explainability.get("feature_importance", {})
550
+
551
+ if not feature_importance:
552
+ return "No Feature Importance for the case"
553
+ raw_data_df = (
554
+ pd.DataFrame([feature_importance])
555
+ .transpose()
556
+ .reset_index()
557
+ .rename(columns={"index": "Feature", 0: "Value"})
558
+ .sort_values(by="Value", ascending=False)
559
+ )
560
+ fig.add_trace(
561
+ go.Bar(
562
+ x=raw_data_df["Value"],
563
+ y=raw_data_df["Feature"],
564
+ orientation="h"
565
+ )
566
+ )
567
+ fig.update_layout(
568
+ barmode="relative",
569
+ height=max(400, len(raw_data_df) * 20),
570
+ width=800,
571
+ yaxis=dict(
572
+ autorange="reversed",
573
+ tickmode="array",
574
+ tickvals=list(raw_data_df["Feature"]),
575
+ ticktext=list(raw_data_df["Feature"]),
576
+ tickfont=dict(size=10),
577
+ ),
578
+ bargap=0.01,
579
+ margin=dict(l=150, r=20, t=30, b=30),
580
+ legend_orientation="h",
581
+ legend_x=0.1,
582
+ legend_y=0.5,
583
+ )
584
+
585
+ fig.show(config={"displaylogo": False})
586
+
587
+ def network_graph(self):
588
+ """Decode and return a base64-encoded network graph image."""
589
+ network_graph_data = self.explainability.get("network_graph", {})
590
+ if not network_graph_data:
591
+ return "No Network graph found for this case"
592
+ base64_str = network_graph_data
593
+ try:
594
+ img_bytes = base64.b64decode(base64_str)
595
+ image = Image.open(BytesIO(img_bytes))
596
+ return image
597
+ except Exception as e:
598
+ print(f"Error decoding base64 image: {e}")
599
+ return None
600
+
601
+ def token_attribution_graph(self):
602
+ """Decode and return a base64-encoded token attribution graph."""
603
+ relevance_data = self.explainability.get("relevance", {})
604
+ if not relevance_data:
605
+ return "No Token Attribution graph found for this case"
606
+ base64_str = relevance_data
607
+ try:
608
+ img_bytes = base64.b64decode(base64_str)
609
+ image = Image.open(BytesIO(img_bytes))
610
+ return image
611
+ except Exception as e:
612
+ print(f"Error decoding base64 image: {e}")
613
+ return None
614
+
615
+ def audit(self):
616
+ """Return audit details for the text case."""
617
+ return self.audit_trail
618
+