openlit 1.34.29__py3-none-any.whl → 1.34.31__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (168) hide show
  1. openlit/__helpers.py +235 -86
  2. openlit/__init__.py +16 -13
  3. openlit/_instrumentors.py +2 -1
  4. openlit/evals/all.py +50 -21
  5. openlit/evals/bias_detection.py +47 -20
  6. openlit/evals/hallucination.py +53 -22
  7. openlit/evals/toxicity.py +50 -21
  8. openlit/evals/utils.py +54 -30
  9. openlit/guard/all.py +61 -19
  10. openlit/guard/prompt_injection.py +34 -14
  11. openlit/guard/restrict_topic.py +46 -15
  12. openlit/guard/sensitive_topic.py +34 -14
  13. openlit/guard/utils.py +58 -22
  14. openlit/instrumentation/ag2/__init__.py +24 -8
  15. openlit/instrumentation/ag2/ag2.py +34 -13
  16. openlit/instrumentation/ag2/async_ag2.py +34 -13
  17. openlit/instrumentation/ag2/utils.py +133 -30
  18. openlit/instrumentation/ai21/__init__.py +43 -14
  19. openlit/instrumentation/ai21/ai21.py +47 -21
  20. openlit/instrumentation/ai21/async_ai21.py +47 -21
  21. openlit/instrumentation/ai21/utils.py +299 -78
  22. openlit/instrumentation/anthropic/__init__.py +21 -4
  23. openlit/instrumentation/anthropic/anthropic.py +28 -17
  24. openlit/instrumentation/anthropic/async_anthropic.py +28 -17
  25. openlit/instrumentation/anthropic/utils.py +145 -35
  26. openlit/instrumentation/assemblyai/__init__.py +11 -2
  27. openlit/instrumentation/assemblyai/assemblyai.py +15 -4
  28. openlit/instrumentation/assemblyai/utils.py +120 -25
  29. openlit/instrumentation/astra/__init__.py +43 -10
  30. openlit/instrumentation/astra/astra.py +28 -5
  31. openlit/instrumentation/astra/async_astra.py +28 -5
  32. openlit/instrumentation/astra/utils.py +151 -55
  33. openlit/instrumentation/azure_ai_inference/__init__.py +43 -10
  34. openlit/instrumentation/azure_ai_inference/async_azure_ai_inference.py +53 -21
  35. openlit/instrumentation/azure_ai_inference/azure_ai_inference.py +53 -21
  36. openlit/instrumentation/azure_ai_inference/utils.py +307 -83
  37. openlit/instrumentation/bedrock/__init__.py +21 -4
  38. openlit/instrumentation/bedrock/bedrock.py +63 -25
  39. openlit/instrumentation/bedrock/utils.py +139 -30
  40. openlit/instrumentation/chroma/__init__.py +89 -16
  41. openlit/instrumentation/chroma/chroma.py +28 -6
  42. openlit/instrumentation/chroma/utils.py +167 -51
  43. openlit/instrumentation/cohere/__init__.py +63 -18
  44. openlit/instrumentation/cohere/async_cohere.py +63 -24
  45. openlit/instrumentation/cohere/cohere.py +63 -24
  46. openlit/instrumentation/cohere/utils.py +286 -73
  47. openlit/instrumentation/controlflow/__init__.py +35 -9
  48. openlit/instrumentation/controlflow/controlflow.py +66 -33
  49. openlit/instrumentation/crawl4ai/__init__.py +25 -10
  50. openlit/instrumentation/crawl4ai/async_crawl4ai.py +78 -31
  51. openlit/instrumentation/crawl4ai/crawl4ai.py +78 -31
  52. openlit/instrumentation/crewai/__init__.py +111 -24
  53. openlit/instrumentation/crewai/async_crewai.py +114 -0
  54. openlit/instrumentation/crewai/crewai.py +104 -131
  55. openlit/instrumentation/crewai/utils.py +615 -0
  56. openlit/instrumentation/dynamiq/__init__.py +46 -12
  57. openlit/instrumentation/dynamiq/dynamiq.py +74 -33
  58. openlit/instrumentation/elevenlabs/__init__.py +23 -4
  59. openlit/instrumentation/elevenlabs/async_elevenlabs.py +16 -4
  60. openlit/instrumentation/elevenlabs/elevenlabs.py +16 -4
  61. openlit/instrumentation/elevenlabs/utils.py +128 -25
  62. openlit/instrumentation/embedchain/__init__.py +11 -2
  63. openlit/instrumentation/embedchain/embedchain.py +68 -35
  64. openlit/instrumentation/firecrawl/__init__.py +24 -7
  65. openlit/instrumentation/firecrawl/firecrawl.py +46 -20
  66. openlit/instrumentation/google_ai_studio/__init__.py +45 -10
  67. openlit/instrumentation/google_ai_studio/async_google_ai_studio.py +67 -44
  68. openlit/instrumentation/google_ai_studio/google_ai_studio.py +67 -44
  69. openlit/instrumentation/google_ai_studio/utils.py +180 -67
  70. openlit/instrumentation/gpt4all/__init__.py +22 -7
  71. openlit/instrumentation/gpt4all/gpt4all.py +67 -29
  72. openlit/instrumentation/gpt4all/utils.py +285 -61
  73. openlit/instrumentation/gpu/__init__.py +128 -47
  74. openlit/instrumentation/groq/__init__.py +21 -4
  75. openlit/instrumentation/groq/async_groq.py +33 -21
  76. openlit/instrumentation/groq/groq.py +33 -21
  77. openlit/instrumentation/groq/utils.py +192 -55
  78. openlit/instrumentation/haystack/__init__.py +70 -24
  79. openlit/instrumentation/haystack/async_haystack.py +28 -6
  80. openlit/instrumentation/haystack/haystack.py +28 -6
  81. openlit/instrumentation/haystack/utils.py +196 -74
  82. openlit/instrumentation/julep/__init__.py +69 -19
  83. openlit/instrumentation/julep/async_julep.py +53 -27
  84. openlit/instrumentation/julep/julep.py +53 -28
  85. openlit/instrumentation/langchain/__init__.py +74 -63
  86. openlit/instrumentation/langchain/callback_handler.py +1100 -0
  87. openlit/instrumentation/langchain_community/__init__.py +13 -2
  88. openlit/instrumentation/langchain_community/async_langchain_community.py +23 -5
  89. openlit/instrumentation/langchain_community/langchain_community.py +23 -5
  90. openlit/instrumentation/langchain_community/utils.py +35 -9
  91. openlit/instrumentation/letta/__init__.py +68 -15
  92. openlit/instrumentation/letta/letta.py +99 -54
  93. openlit/instrumentation/litellm/__init__.py +43 -14
  94. openlit/instrumentation/litellm/async_litellm.py +51 -26
  95. openlit/instrumentation/litellm/litellm.py +51 -26
  96. openlit/instrumentation/litellm/utils.py +312 -101
  97. openlit/instrumentation/llamaindex/__init__.py +267 -90
  98. openlit/instrumentation/llamaindex/async_llamaindex.py +28 -6
  99. openlit/instrumentation/llamaindex/llamaindex.py +28 -6
  100. openlit/instrumentation/llamaindex/utils.py +204 -91
  101. openlit/instrumentation/mem0/__init__.py +11 -2
  102. openlit/instrumentation/mem0/mem0.py +50 -29
  103. openlit/instrumentation/milvus/__init__.py +10 -2
  104. openlit/instrumentation/milvus/milvus.py +31 -6
  105. openlit/instrumentation/milvus/utils.py +166 -67
  106. openlit/instrumentation/mistral/__init__.py +63 -18
  107. openlit/instrumentation/mistral/async_mistral.py +63 -24
  108. openlit/instrumentation/mistral/mistral.py +63 -24
  109. openlit/instrumentation/mistral/utils.py +277 -69
  110. openlit/instrumentation/multion/__init__.py +69 -19
  111. openlit/instrumentation/multion/async_multion.py +57 -26
  112. openlit/instrumentation/multion/multion.py +57 -26
  113. openlit/instrumentation/ollama/__init__.py +39 -18
  114. openlit/instrumentation/ollama/async_ollama.py +57 -26
  115. openlit/instrumentation/ollama/ollama.py +57 -26
  116. openlit/instrumentation/ollama/utils.py +226 -50
  117. openlit/instrumentation/openai/__init__.py +156 -32
  118. openlit/instrumentation/openai/async_openai.py +147 -67
  119. openlit/instrumentation/openai/openai.py +150 -67
  120. openlit/instrumentation/openai/utils.py +660 -186
  121. openlit/instrumentation/openai_agents/__init__.py +6 -2
  122. openlit/instrumentation/openai_agents/processor.py +409 -537
  123. openlit/instrumentation/phidata/__init__.py +13 -5
  124. openlit/instrumentation/phidata/phidata.py +67 -32
  125. openlit/instrumentation/pinecone/__init__.py +48 -9
  126. openlit/instrumentation/pinecone/async_pinecone.py +27 -5
  127. openlit/instrumentation/pinecone/pinecone.py +27 -5
  128. openlit/instrumentation/pinecone/utils.py +153 -47
  129. openlit/instrumentation/premai/__init__.py +22 -7
  130. openlit/instrumentation/premai/premai.py +51 -26
  131. openlit/instrumentation/premai/utils.py +246 -59
  132. openlit/instrumentation/pydantic_ai/__init__.py +49 -22
  133. openlit/instrumentation/pydantic_ai/pydantic_ai.py +69 -16
  134. openlit/instrumentation/pydantic_ai/utils.py +89 -24
  135. openlit/instrumentation/qdrant/__init__.py +19 -4
  136. openlit/instrumentation/qdrant/async_qdrant.py +33 -7
  137. openlit/instrumentation/qdrant/qdrant.py +33 -7
  138. openlit/instrumentation/qdrant/utils.py +228 -93
  139. openlit/instrumentation/reka/__init__.py +23 -10
  140. openlit/instrumentation/reka/async_reka.py +17 -11
  141. openlit/instrumentation/reka/reka.py +17 -11
  142. openlit/instrumentation/reka/utils.py +138 -36
  143. openlit/instrumentation/together/__init__.py +44 -12
  144. openlit/instrumentation/together/async_together.py +50 -27
  145. openlit/instrumentation/together/together.py +50 -27
  146. openlit/instrumentation/together/utils.py +301 -71
  147. openlit/instrumentation/transformers/__init__.py +2 -1
  148. openlit/instrumentation/transformers/transformers.py +13 -3
  149. openlit/instrumentation/transformers/utils.py +139 -36
  150. openlit/instrumentation/vertexai/__init__.py +81 -16
  151. openlit/instrumentation/vertexai/async_vertexai.py +33 -15
  152. openlit/instrumentation/vertexai/utils.py +123 -27
  153. openlit/instrumentation/vertexai/vertexai.py +33 -15
  154. openlit/instrumentation/vllm/__init__.py +12 -5
  155. openlit/instrumentation/vllm/utils.py +121 -31
  156. openlit/instrumentation/vllm/vllm.py +16 -10
  157. openlit/otel/events.py +35 -10
  158. openlit/otel/metrics.py +32 -24
  159. openlit/otel/tracing.py +24 -9
  160. openlit/semcov/__init__.py +101 -7
  161. {openlit-1.34.29.dist-info → openlit-1.34.31.dist-info}/METADATA +2 -1
  162. openlit-1.34.31.dist-info/RECORD +166 -0
  163. openlit/instrumentation/langchain/async_langchain.py +0 -102
  164. openlit/instrumentation/langchain/langchain.py +0 -102
  165. openlit/instrumentation/langchain/utils.py +0 -252
  166. openlit-1.34.29.dist-info/RECORD +0 -166
  167. {openlit-1.34.29.dist-info → openlit-1.34.31.dist-info}/LICENSE +0 -0
  168. {openlit-1.34.29.dist-info → openlit-1.34.31.dist-info}/WHEEL +0 -0
openlit/evals/utils.py CHANGED
@@ -15,6 +15,7 @@ from openlit.semcov import SemanticConvention
15
15
  # Initialize logger for logging potential issues and operations
16
16
  logger = logging.getLogger(__name__)
17
17
 
18
+
18
19
  class JsonOutput(BaseModel):
19
20
  """
20
21
  A model representing the structure of JSON output for prompt injection detection.
@@ -32,9 +33,13 @@ class JsonOutput(BaseModel):
32
33
  classification: str
33
34
  explanation: str
34
35
 
35
- def setup_provider(provider: Optional[str], api_key: Optional[str],
36
- model: Optional[str],
37
- base_url: Optional[str]) -> Tuple[Optional[str], Optional[str], Optional[str]]:
36
+
37
+ def setup_provider(
38
+ provider: Optional[str],
39
+ api_key: Optional[str],
40
+ model: Optional[str],
41
+ base_url: Optional[str],
42
+ ) -> Tuple[Optional[str], Optional[str], Optional[str]]:
38
43
  """
39
44
  Sets up the provider, API key, model, and base URL.
40
45
 
@@ -52,7 +57,7 @@ def setup_provider(provider: Optional[str], api_key: Optional[str],
52
57
  """
53
58
  provider_configs = {
54
59
  "openai": {"env_var": "OPENAI_API_KEY"},
55
- "anthropic": {"env_var": "ANTHROPIC_API_KEY"}
60
+ "anthropic": {"env_var": "ANTHROPIC_API_KEY"},
56
61
  }
57
62
 
58
63
  if provider is None:
@@ -72,12 +77,16 @@ def setup_provider(provider: Optional[str], api_key: Optional[str],
72
77
 
73
78
  if not api_key:
74
79
  # pylint: disable=line-too-long
75
- raise ValueError(f"API key required via 'api_key' parameter or '{env_var}' environment variable")
80
+ raise ValueError(
81
+ f"API key required via 'api_key' parameter or '{env_var}' environment variable"
82
+ )
76
83
 
77
84
  return api_key, model, base_url
78
85
 
79
86
 
80
- def format_prompt(system_prompt: str, prompt: str, contexts: List[str], text: str) -> str:
87
+ def format_prompt(
88
+ system_prompt: str, prompt: str, contexts: List[str], text: str
89
+ ) -> str:
81
90
  """
82
91
  Format the prompt.
83
92
 
@@ -98,6 +107,7 @@ def format_prompt(system_prompt: str, prompt: str, contexts: List[str], text: st
98
107
 
99
108
  return formatted_prompt
100
109
 
110
+
101
111
  def llm_response(provider: str, prompt: str, model: str, base_url: str) -> str:
102
112
  """
103
113
  Generates an LLM response using the configured provider.
@@ -117,6 +127,7 @@ def llm_response(provider: str, prompt: str, model: str, base_url: str) -> str:
117
127
  else:
118
128
  raise ValueError(f"Unsupported provider: {provider}")
119
129
 
130
+
120
131
  def llm_response_openai(prompt: str, model: str, base_url: str) -> str:
121
132
  """
122
133
  Interacts with the OpenAI API to get a LLM response.
@@ -142,10 +153,11 @@ def llm_response_openai(prompt: str, model: str, base_url: str) -> str:
142
153
  {"role": "user", "content": prompt},
143
154
  ],
144
155
  temperature=0.0,
145
- response_format=JsonOutput
156
+ response_format=JsonOutput,
146
157
  )
147
158
  return response.choices[0].message.content
148
159
 
160
+
149
161
  def llm_response_anthropic(prompt: str, model: str) -> str:
150
162
  """
151
163
  Interacts with the Anthropic API to get a LLM response.
@@ -172,23 +184,33 @@ def llm_response_anthropic(prompt: str, model: str) -> str:
172
184
  "verdict": {"type": "string", "description": "Evaluation verdict"},
173
185
  "evaluation": {"type": "string", "description": "Evaluation type"},
174
186
  "score": {"type": "number", "description": "Evaluation score"},
175
- "classification": {"type": "string", "description": "Evaluation category"},
176
- "explanation": {"type": "string", "description": "Evaluation reason"}
187
+ "classification": {
188
+ "type": "string",
189
+ "description": "Evaluation category",
190
+ },
191
+ "explanation": {
192
+ "type": "string",
193
+ "description": "Evaluation reason",
194
+ },
177
195
  },
178
- "required": ["verdict", "evaluation", "score", "classification", "explanation"]
179
- }
196
+ "required": [
197
+ "verdict",
198
+ "evaluation",
199
+ "score",
200
+ "classification",
201
+ "explanation",
202
+ ],
203
+ },
180
204
  }
181
205
  ]
182
206
 
183
207
  response = client.messages.create(
184
208
  model=model,
185
- messages=[
186
- {"role": "user", "content": prompt}
187
- ],
209
+ messages=[{"role": "user", "content": prompt}],
188
210
  max_tokens=2000,
189
211
  temperature=0.0,
190
212
  tools=tools,
191
- stream=False
213
+ stream=False,
192
214
  )
193
215
 
194
216
  for content in response.content:
@@ -198,6 +220,7 @@ def llm_response_anthropic(prompt: str, model: str) -> str:
198
220
 
199
221
  return response
200
222
 
223
+
201
224
  def parse_llm_response(response) -> JsonOutput:
202
225
  """
203
226
  Parses the LLM response into a JsonOutput object.
@@ -220,8 +243,14 @@ def parse_llm_response(response) -> JsonOutput:
220
243
  return JsonOutput(**data)
221
244
  except (json.JSONDecodeError, TypeError) as e:
222
245
  logger.error("Error parsing LLM response: '%s'", e)
223
- return JsonOutput(score=0, classification="none", explanation="none",
224
- verdict="no", evaluation="none")
246
+ return JsonOutput(
247
+ score=0,
248
+ classification="none",
249
+ explanation="none",
250
+ verdict="no",
251
+ evaluation="none",
252
+ )
253
+
225
254
 
226
255
  def eval_metrics():
227
256
  """
@@ -240,11 +269,12 @@ def eval_metrics():
240
269
  guard_requests = meter.create_counter(
241
270
  name=SemanticConvention.EVAL_REQUESTS,
242
271
  description="Counter for evaluation requests",
243
- unit="1"
272
+ unit="1",
244
273
  )
245
274
 
246
275
  return guard_requests
247
276
 
277
+
248
278
  def eval_metric_attributes(verdict, score, validator, classification, explanation):
249
279
  """
250
280
  Initializes OpenTelemetry attributes for metrics.
@@ -260,16 +290,10 @@ def eval_metric_attributes(verdict, score, validator, classification, explanatio
260
290
  """
261
291
 
262
292
  return {
263
- TELEMETRY_SDK_NAME:
264
- "openlit",
265
- SemanticConvention.EVAL_VERDICT:
266
- verdict,
267
- SemanticConvention.EVAL_SCORE:
268
- score,
269
- SemanticConvention.EVAL_VALIDATOR:
270
- validator,
271
- SemanticConvention.EVAL_CLASSIFICATION:
272
- classification,
273
- SemanticConvention.EVAL_EXPLANATION:
274
- explanation,
293
+ TELEMETRY_SDK_NAME: "openlit",
294
+ SemanticConvention.EVAL_VERDICT: verdict,
295
+ SemanticConvention.EVAL_SCORE: score,
296
+ SemanticConvention.EVAL_VALIDATOR: validator,
297
+ SemanticConvention.EVAL_CLASSIFICATION: classification,
298
+ SemanticConvention.EVAL_EXPLANATION: explanation,
275
299
  }
openlit/guard/all.py CHANGED
@@ -12,10 +12,15 @@ from openlit.guard.utils import (
12
12
  parse_llm_response,
13
13
  custom_rule_detection,
14
14
  guard_metrics,
15
- guard_metric_attributes
15
+ guard_metric_attributes,
16
16
  )
17
17
 
18
- def get_all_system_prompt(valid_topics: Optional[List[str]] = None, invalid_topics: Optional[List[str]] = None, custom_categories: Optional[Dict[str, str]] = None) -> str:
18
+
19
+ def get_all_system_prompt(
20
+ valid_topics: Optional[List[str]] = None,
21
+ invalid_topics: Optional[List[str]] = None,
22
+ custom_categories: Optional[Dict[str, str]] = None,
23
+ ) -> str:
19
24
  """
20
25
  Returns the system prompt used for LLM analysis to capture prompt injections, valid topics, and sensitive topics.
21
26
 
@@ -68,8 +73,15 @@ def get_all_system_prompt(valid_topics: Optional[List[str]] = None, invalid_topi
68
73
 
69
74
  # Append custom categories for prompt injection if provided
70
75
  if custom_categories:
71
- custom_categories_str = "\n".join([f"- {key}: {description}" for key, description in custom_categories.items()])
72
- base_prompt += f"\n Additional Prompt Injection Categories:\n{custom_categories_str}"
76
+ custom_categories_str = "\n".join(
77
+ [
78
+ f"- {key}: {description}"
79
+ for key, description in custom_categories.items()
80
+ ]
81
+ )
82
+ base_prompt += (
83
+ f"\n Additional Prompt Injection Categories:\n{custom_categories_str}"
84
+ )
73
85
 
74
86
  base_prompt += """
75
87
 
@@ -116,8 +128,15 @@ def get_all_system_prompt(valid_topics: Optional[List[str]] = None, invalid_topi
116
128
 
117
129
  # Append custom categories for sensitive topics if provided
118
130
  if custom_categories:
119
- custom_categories_str = "\n".join([f"- {key}: {description}" for key, description in custom_categories.items()])
120
- base_prompt += f"\n Additional Sensitive Topics Categories:\n{custom_categories_str}"
131
+ custom_categories_str = "\n".join(
132
+ [
133
+ f"- {key}: {description}"
134
+ for key, description in custom_categories.items()
135
+ ]
136
+ )
137
+ base_prompt += (
138
+ f"\n Additional Sensitive Topics Categories:\n{custom_categories_str}"
139
+ )
121
140
 
122
141
  base_prompt += """
123
142
 
@@ -126,6 +145,7 @@ def get_all_system_prompt(valid_topics: Optional[List[str]] = None, invalid_topi
126
145
  """
127
146
  return base_prompt
128
147
 
148
+
129
149
  class All:
130
150
  """
131
151
  A comprehensive class to detect prompt injections, valid/invalid topics, and sensitive topics using LLM or custom rules.
@@ -141,13 +161,18 @@ class All:
141
161
  invalid_topics (Optional[List[str]]): List of invalid topics.
142
162
  """
143
163
 
144
- def __init__(self, provider: Optional[str] = None, api_key: Optional[str] = None,
145
- model: Optional[str] = None, base_url: Optional[str] = None,
146
- custom_rules: Optional[List[dict]] = None,
147
- custom_categories: Optional[Dict[str, str]] = None,
148
- valid_topics: Optional[List[str]] = None,
149
- invalid_topics: Optional[List[str]] = None,
150
- collect_metrics: Optional[bool] = False):
164
+ def __init__(
165
+ self,
166
+ provider: Optional[str] = None,
167
+ api_key: Optional[str] = None,
168
+ model: Optional[str] = None,
169
+ base_url: Optional[str] = None,
170
+ custom_rules: Optional[List[dict]] = None,
171
+ custom_categories: Optional[Dict[str, str]] = None,
172
+ valid_topics: Optional[List[str]] = None,
173
+ invalid_topics: Optional[List[str]] = None,
174
+ collect_metrics: Optional[bool] = False,
175
+ ):
151
176
  """
152
177
  Initializes the All class with specified LLM settings, custom rules, and categories.
153
178
 
@@ -165,8 +190,12 @@ class All:
165
190
  ValueError: If provider is not specified.
166
191
  """
167
192
  self.provider = provider
168
- self.api_key, self.model, self.base_url = setup_provider(provider, api_key, model, base_url)
169
- self.system_prompt = get_all_system_prompt(valid_topics, invalid_topics, custom_categories)
193
+ self.api_key, self.model, self.base_url = setup_provider(
194
+ provider, api_key, model, base_url
195
+ )
196
+ self.system_prompt = get_all_system_prompt(
197
+ valid_topics, invalid_topics, custom_categories
198
+ )
170
199
  self.custom_rules = custom_rules or []
171
200
  self.valid_topics = valid_topics or []
172
201
  self.invalid_topics = invalid_topics or []
@@ -183,18 +212,31 @@ class All:
183
212
  JsonOutput: The structured result of the detection.
184
213
  """
185
214
  custom_rule_result = custom_rule_detection(text, self.custom_rules)
186
- llm_result = JsonOutput(score=0.0, verdict="no", guard="none", classification="none", explanation="none")
215
+ llm_result = JsonOutput(
216
+ score=0.0,
217
+ verdict="no",
218
+ guard="none",
219
+ classification="none",
220
+ explanation="none",
221
+ )
187
222
 
188
223
  if self.provider:
189
224
  prompt = format_prompt(self.system_prompt, text)
190
- llm_result = parse_llm_response(llm_response(self.provider, prompt, self.model, self.base_url))
225
+ llm_result = parse_llm_response(
226
+ llm_response(self.provider, prompt, self.model, self.base_url)
227
+ )
191
228
 
192
229
  result = max(custom_rule_result, llm_result, key=lambda x: x.score)
193
230
 
194
231
  if self.collect_metrics:
195
232
  guard_counter = guard_metrics()
196
- attributes = guard_metric_attributes(result.verdict, result.score, result.guard,
197
- result.classification, result.explanation)
233
+ attributes = guard_metric_attributes(
234
+ result.verdict,
235
+ result.score,
236
+ result.guard,
237
+ result.classification,
238
+ result.explanation,
239
+ )
198
240
  guard_counter.add(1, attributes)
199
241
 
200
242
  return result
@@ -12,9 +12,10 @@ from openlit.guard.utils import (
12
12
  parse_llm_response,
13
13
  custom_rule_detection,
14
14
  guard_metrics,
15
- guard_metric_attributes
15
+ guard_metric_attributes,
16
16
  )
17
17
 
18
+
18
19
  def get_system_prompt(custom_categories: Optional[Dict[str, str]] = None) -> str:
19
20
  """
20
21
  Returns the system prompt used for LLM analysis, including custom categories if provided.
@@ -64,7 +65,9 @@ def get_system_prompt(custom_categories: Optional[Dict[str, str]] = None) -> str
64
65
  """
65
66
 
66
67
  if custom_categories:
67
- custom_categories_str = "\n".join([f"- {key}: {value}" for key, value in custom_categories.items()])
68
+ custom_categories_str = "\n".join(
69
+ [f"- {key}: {value}" for key, value in custom_categories.items()]
70
+ )
68
71
  base_prompt += f"\n Additional Categories:\n{custom_categories_str}"
69
72
 
70
73
  base_prompt += """
@@ -73,17 +76,25 @@ def get_system_prompt(custom_categories: Optional[Dict[str, str]] = None) -> str
73
76
  """
74
77
  return base_prompt
75
78
 
79
+
76
80
  class PromptInjection:
77
81
  """Class to intialize Prompt Injection"""
78
82
 
79
- def __init__(self, provider: Optional[str] = None, api_key: Optional[str] = None,
80
- model: Optional[str] = None, base_url: Optional[str] = None,
81
- custom_rules: Optional[List[dict]] = None,
82
- custom_categories: Optional[Dict[str, str]] = None,
83
- threshold_score: float = 0.25,
84
- collect_metrics: Optional[bool] = False):
83
+ def __init__(
84
+ self,
85
+ provider: Optional[str] = None,
86
+ api_key: Optional[str] = None,
87
+ model: Optional[str] = None,
88
+ base_url: Optional[str] = None,
89
+ custom_rules: Optional[List[dict]] = None,
90
+ custom_categories: Optional[Dict[str, str]] = None,
91
+ threshold_score: float = 0.25,
92
+ collect_metrics: Optional[bool] = False,
93
+ ):
85
94
  self.provider = provider
86
- self.api_key, self.model, self.base_url = setup_provider(provider, api_key, model, base_url)
95
+ self.api_key, self.model, self.base_url = setup_provider(
96
+ provider, api_key, model, base_url
97
+ )
87
98
  self.system_prompt = get_system_prompt(custom_categories)
88
99
  self.custom_rules = custom_rules or []
89
100
  self.threshold_score = threshold_score
@@ -93,11 +104,19 @@ class PromptInjection:
93
104
  """Functon to detect Prompt Injection and jailbreak attempts in input"""
94
105
 
95
106
  custom_rule_result = custom_rule_detection(text, self.custom_rules)
96
- llm_result = JsonOutput(score=0, classification="none", explanation="none", verdict="none", guard="none")
107
+ llm_result = JsonOutput(
108
+ score=0,
109
+ classification="none",
110
+ explanation="none",
111
+ verdict="none",
112
+ guard="none",
113
+ )
97
114
 
98
115
  if self.provider:
99
116
  prompt = format_prompt(self.system_prompt, text)
100
- llm_result = parse_llm_response(llm_response(self.provider, prompt, self.model, self.base_url))
117
+ llm_result = parse_llm_response(
118
+ llm_response(self.provider, prompt, self.model, self.base_url)
119
+ )
101
120
 
102
121
  result = max(custom_rule_result, llm_result, key=lambda x: x.score)
103
122
  score = 0 if result.classification == "none" else result.score
@@ -105,8 +124,9 @@ class PromptInjection:
105
124
 
106
125
  if self.collect_metrics is True:
107
126
  guard_counter = guard_metrics()
108
- attributes = guard_metric_attributes(verdict, score, result.guard,
109
- result.classification, result.explanation)
127
+ attributes = guard_metric_attributes(
128
+ verdict, score, result.guard, result.classification, result.explanation
129
+ )
110
130
  guard_counter.add(1, attributes)
111
131
 
112
132
  return JsonOutput(
@@ -114,5 +134,5 @@ class PromptInjection:
114
134
  guard=result.guard,
115
135
  verdict=verdict,
116
136
  classification=result.classification,
117
- explanation=result.explanation
137
+ explanation=result.explanation,
118
138
  )
@@ -11,10 +11,13 @@ from openlit.guard.utils import (
11
11
  llm_response,
12
12
  parse_llm_response,
13
13
  guard_metrics,
14
- guard_metric_attributes
14
+ guard_metric_attributes,
15
15
  )
16
16
 
17
- def get_system_prompt(valid_topics: Optional[List[str]] = None, invalid_topics: Optional[List[str]] = None) -> str:
17
+
18
+ def get_system_prompt(
19
+ valid_topics: Optional[List[str]] = None, invalid_topics: Optional[List[str]] = None
20
+ ) -> str:
18
21
  """
19
22
  Returns the system prompt used for LLM analysis, including valid and invalid topics if provided.
20
23
 
@@ -79,6 +82,7 @@ def get_system_prompt(valid_topics: Optional[List[str]] = None, invalid_topics:
79
82
 
80
83
  return base_prompt
81
84
 
85
+
82
86
  class TopicRestriction:
83
87
  """
84
88
  A class to validate if text belongs to valid or invalid topics using LLM.
@@ -92,12 +96,16 @@ class TopicRestriction:
92
96
  invalid_topics (Optional[List[str]]): List of invalid topics.
93
97
  """
94
98
 
95
- def __init__(self, provider: Optional[str], valid_topics: Optional[List[str]] = None,
96
- api_key: Optional[str] = None, model: Optional[str] = None,
97
- base_url: Optional[str] = None,
98
- invalid_topics: Optional[List[str]] = None,
99
- collect_metrics: Optional[bool] = False,
100
- ):
99
+ def __init__(
100
+ self,
101
+ provider: Optional[str],
102
+ valid_topics: Optional[List[str]] = None,
103
+ api_key: Optional[str] = None,
104
+ model: Optional[str] = None,
105
+ base_url: Optional[str] = None,
106
+ invalid_topics: Optional[List[str]] = None,
107
+ collect_metrics: Optional[bool] = False,
108
+ ):
101
109
  """
102
110
  Initializes the TopicRestriction with specified LLM settings and topics.
103
111
 
@@ -114,12 +122,18 @@ class TopicRestriction:
114
122
  """
115
123
  self.provider = provider
116
124
  if self.provider is None:
117
- raise ValueError("An LLM provider must be specified for TopicRestriction Validator")
118
- self.api_key, self.model, self.base_url = setup_provider(provider, api_key, model, base_url)
125
+ raise ValueError(
126
+ "An LLM provider must be specified for TopicRestriction Validator"
127
+ )
128
+ self.api_key, self.model, self.base_url = setup_provider(
129
+ provider, api_key, model, base_url
130
+ )
119
131
  self.system_prompt = get_system_prompt(valid_topics, invalid_topics)
120
132
  self.valid_topics = valid_topics
121
133
  if self.valid_topics is None:
122
- raise ValueError("Valid Topics must be specified for TopicRestriction Validator")
134
+ raise ValueError(
135
+ "Valid Topics must be specified for TopicRestriction Validator"
136
+ )
123
137
  self.invalid_topics = invalid_topics or []
124
138
  self.collect_metrics = collect_metrics
125
139
 
@@ -139,14 +153,31 @@ class TopicRestriction:
139
153
 
140
154
  # Adjusted logic for consistency with updated JSON structure
141
155
  if llm_result.classification == "valid_topic":
142
- result = JsonOutput(score=0, verdict="no", guard="topic_restriction", classification="valid_topic", explanation="Text fits into a valid topic.")
156
+ result = JsonOutput(
157
+ score=0,
158
+ verdict="no",
159
+ guard="topic_restriction",
160
+ classification="valid_topic",
161
+ explanation="Text fits into a valid topic.",
162
+ )
143
163
  else:
144
- result = JsonOutput(score=1.0, verdict="yes", guard="topic_restriction", classification="invalid_topic", explanation="Text does not match any valid categories.")
164
+ result = JsonOutput(
165
+ score=1.0,
166
+ verdict="yes",
167
+ guard="topic_restriction",
168
+ classification="invalid_topic",
169
+ explanation="Text does not match any valid categories.",
170
+ )
145
171
 
146
172
  if self.collect_metrics:
147
173
  guard_counter = guard_metrics()
148
- attributes = guard_metric_attributes(result.verdict, result.score, result.guard,
149
- result.classification, result.explanation)
174
+ attributes = guard_metric_attributes(
175
+ result.verdict,
176
+ result.score,
177
+ result.guard,
178
+ result.classification,
179
+ result.explanation,
180
+ )
150
181
  guard_counter.add(1, attributes)
151
182
 
152
183
  return result
@@ -12,9 +12,10 @@ from openlit.guard.utils import (
12
12
  parse_llm_response,
13
13
  custom_rule_detection,
14
14
  guard_metrics,
15
- guard_metric_attributes
15
+ guard_metric_attributes,
16
16
  )
17
17
 
18
+
18
19
  def get_system_prompt(custom_categories: Optional[Dict[str, str]] = None) -> str:
19
20
  """
20
21
  Returns the system prompt used for LLM analysis, including custom categories if provided.
@@ -61,7 +62,9 @@ def get_system_prompt(custom_categories: Optional[Dict[str, str]] = None) -> str
61
62
  """
62
63
 
63
64
  if custom_categories:
64
- custom_categories_str = "\n".join([f"- {key}: {value}" for key, value in custom_categories.items()])
65
+ custom_categories_str = "\n".join(
66
+ [f"- {key}: {value}" for key, value in custom_categories.items()]
67
+ )
65
68
  base_prompt += f"\n Additional Categories:\n{custom_categories_str}"
66
69
 
67
70
  base_prompt += """
@@ -71,6 +74,7 @@ def get_system_prompt(custom_categories: Optional[Dict[str, str]] = None) -> str
71
74
 
72
75
  return base_prompt
73
76
 
77
+
74
78
  class SensitiveTopic:
75
79
  """
76
80
  A class to detect sensitive topics using LLM or custom rules.
@@ -84,14 +88,21 @@ class SensitiveTopic:
84
88
  custom_categories (Optional[Dict[str, str]]): Additional categories for sensitive topics.
85
89
  """
86
90
 
87
- def __init__(self, provider: Optional[str] = None, api_key: Optional[str] = None,
88
- model: Optional[str] = None, base_url: Optional[str] = None,
89
- custom_rules: Optional[List[dict]] = None,
90
- custom_categories: Optional[Dict[str, str]] = None,
91
- threshold_score: float = 0.25,
92
- collect_metrics: Optional[bool] = False):
91
+ def __init__(
92
+ self,
93
+ provider: Optional[str] = None,
94
+ api_key: Optional[str] = None,
95
+ model: Optional[str] = None,
96
+ base_url: Optional[str] = None,
97
+ custom_rules: Optional[List[dict]] = None,
98
+ custom_categories: Optional[Dict[str, str]] = None,
99
+ threshold_score: float = 0.25,
100
+ collect_metrics: Optional[bool] = False,
101
+ ):
93
102
  self.provider = provider
94
- self.api_key, self.model, self.base_url = setup_provider(provider, api_key, model, base_url)
103
+ self.api_key, self.model, self.base_url = setup_provider(
104
+ provider, api_key, model, base_url
105
+ )
95
106
  self.system_prompt = get_system_prompt(custom_categories)
96
107
  self.custom_rules = custom_rules or []
97
108
  self.threshold_score = threshold_score
@@ -101,11 +112,19 @@ class SensitiveTopic:
101
112
  """Function to detect sensitive topic in AI response"""
102
113
 
103
114
  custom_rule_result = custom_rule_detection(text, self.custom_rules)
104
- llm_result = JsonOutput(score=0, classification="none", explanation="none", verdict="no", guard="none")
115
+ llm_result = JsonOutput(
116
+ score=0,
117
+ classification="none",
118
+ explanation="none",
119
+ verdict="no",
120
+ guard="none",
121
+ )
105
122
 
106
123
  if self.provider:
107
124
  prompt = format_prompt(self.system_prompt, text)
108
- llm_result = parse_llm_response(llm_response(self.provider, prompt, self.model, self.base_url))
125
+ llm_result = parse_llm_response(
126
+ llm_response(self.provider, prompt, self.model, self.base_url)
127
+ )
109
128
 
110
129
  result = max(custom_rule_result, llm_result, key=lambda x: x.score)
111
130
  score = 0 if result.classification == "none" else result.score
@@ -113,8 +132,9 @@ class SensitiveTopic:
113
132
 
114
133
  if self.collect_metrics:
115
134
  guard_counter = guard_metrics()
116
- attributes = guard_metric_attributes(verdict, score, result.guard,
117
- result.classification, result.explanation)
135
+ attributes = guard_metric_attributes(
136
+ verdict, score, result.guard, result.classification, result.explanation
137
+ )
118
138
  guard_counter.add(1, attributes)
119
139
 
120
140
  return JsonOutput(
@@ -122,5 +142,5 @@ class SensitiveTopic:
122
142
  guard=result.guard,
123
143
  verdict=verdict,
124
144
  classification=result.classification,
125
- explanation=result.explanation
145
+ explanation=result.explanation,
126
146
  )