openlit 1.34.29__py3-none-any.whl → 1.34.31__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openlit/__helpers.py +235 -86
- openlit/__init__.py +16 -13
- openlit/_instrumentors.py +2 -1
- openlit/evals/all.py +50 -21
- openlit/evals/bias_detection.py +47 -20
- openlit/evals/hallucination.py +53 -22
- openlit/evals/toxicity.py +50 -21
- openlit/evals/utils.py +54 -30
- openlit/guard/all.py +61 -19
- openlit/guard/prompt_injection.py +34 -14
- openlit/guard/restrict_topic.py +46 -15
- openlit/guard/sensitive_topic.py +34 -14
- openlit/guard/utils.py +58 -22
- openlit/instrumentation/ag2/__init__.py +24 -8
- openlit/instrumentation/ag2/ag2.py +34 -13
- openlit/instrumentation/ag2/async_ag2.py +34 -13
- openlit/instrumentation/ag2/utils.py +133 -30
- openlit/instrumentation/ai21/__init__.py +43 -14
- openlit/instrumentation/ai21/ai21.py +47 -21
- openlit/instrumentation/ai21/async_ai21.py +47 -21
- openlit/instrumentation/ai21/utils.py +299 -78
- openlit/instrumentation/anthropic/__init__.py +21 -4
- openlit/instrumentation/anthropic/anthropic.py +28 -17
- openlit/instrumentation/anthropic/async_anthropic.py +28 -17
- openlit/instrumentation/anthropic/utils.py +145 -35
- openlit/instrumentation/assemblyai/__init__.py +11 -2
- openlit/instrumentation/assemblyai/assemblyai.py +15 -4
- openlit/instrumentation/assemblyai/utils.py +120 -25
- openlit/instrumentation/astra/__init__.py +43 -10
- openlit/instrumentation/astra/astra.py +28 -5
- openlit/instrumentation/astra/async_astra.py +28 -5
- openlit/instrumentation/astra/utils.py +151 -55
- openlit/instrumentation/azure_ai_inference/__init__.py +43 -10
- openlit/instrumentation/azure_ai_inference/async_azure_ai_inference.py +53 -21
- openlit/instrumentation/azure_ai_inference/azure_ai_inference.py +53 -21
- openlit/instrumentation/azure_ai_inference/utils.py +307 -83
- openlit/instrumentation/bedrock/__init__.py +21 -4
- openlit/instrumentation/bedrock/bedrock.py +63 -25
- openlit/instrumentation/bedrock/utils.py +139 -30
- openlit/instrumentation/chroma/__init__.py +89 -16
- openlit/instrumentation/chroma/chroma.py +28 -6
- openlit/instrumentation/chroma/utils.py +167 -51
- openlit/instrumentation/cohere/__init__.py +63 -18
- openlit/instrumentation/cohere/async_cohere.py +63 -24
- openlit/instrumentation/cohere/cohere.py +63 -24
- openlit/instrumentation/cohere/utils.py +286 -73
- openlit/instrumentation/controlflow/__init__.py +35 -9
- openlit/instrumentation/controlflow/controlflow.py +66 -33
- openlit/instrumentation/crawl4ai/__init__.py +25 -10
- openlit/instrumentation/crawl4ai/async_crawl4ai.py +78 -31
- openlit/instrumentation/crawl4ai/crawl4ai.py +78 -31
- openlit/instrumentation/crewai/__init__.py +111 -24
- openlit/instrumentation/crewai/async_crewai.py +114 -0
- openlit/instrumentation/crewai/crewai.py +104 -131
- openlit/instrumentation/crewai/utils.py +615 -0
- openlit/instrumentation/dynamiq/__init__.py +46 -12
- openlit/instrumentation/dynamiq/dynamiq.py +74 -33
- openlit/instrumentation/elevenlabs/__init__.py +23 -4
- openlit/instrumentation/elevenlabs/async_elevenlabs.py +16 -4
- openlit/instrumentation/elevenlabs/elevenlabs.py +16 -4
- openlit/instrumentation/elevenlabs/utils.py +128 -25
- openlit/instrumentation/embedchain/__init__.py +11 -2
- openlit/instrumentation/embedchain/embedchain.py +68 -35
- openlit/instrumentation/firecrawl/__init__.py +24 -7
- openlit/instrumentation/firecrawl/firecrawl.py +46 -20
- openlit/instrumentation/google_ai_studio/__init__.py +45 -10
- openlit/instrumentation/google_ai_studio/async_google_ai_studio.py +67 -44
- openlit/instrumentation/google_ai_studio/google_ai_studio.py +67 -44
- openlit/instrumentation/google_ai_studio/utils.py +180 -67
- openlit/instrumentation/gpt4all/__init__.py +22 -7
- openlit/instrumentation/gpt4all/gpt4all.py +67 -29
- openlit/instrumentation/gpt4all/utils.py +285 -61
- openlit/instrumentation/gpu/__init__.py +128 -47
- openlit/instrumentation/groq/__init__.py +21 -4
- openlit/instrumentation/groq/async_groq.py +33 -21
- openlit/instrumentation/groq/groq.py +33 -21
- openlit/instrumentation/groq/utils.py +192 -55
- openlit/instrumentation/haystack/__init__.py +70 -24
- openlit/instrumentation/haystack/async_haystack.py +28 -6
- openlit/instrumentation/haystack/haystack.py +28 -6
- openlit/instrumentation/haystack/utils.py +196 -74
- openlit/instrumentation/julep/__init__.py +69 -19
- openlit/instrumentation/julep/async_julep.py +53 -27
- openlit/instrumentation/julep/julep.py +53 -28
- openlit/instrumentation/langchain/__init__.py +74 -63
- openlit/instrumentation/langchain/callback_handler.py +1100 -0
- openlit/instrumentation/langchain_community/__init__.py +13 -2
- openlit/instrumentation/langchain_community/async_langchain_community.py +23 -5
- openlit/instrumentation/langchain_community/langchain_community.py +23 -5
- openlit/instrumentation/langchain_community/utils.py +35 -9
- openlit/instrumentation/letta/__init__.py +68 -15
- openlit/instrumentation/letta/letta.py +99 -54
- openlit/instrumentation/litellm/__init__.py +43 -14
- openlit/instrumentation/litellm/async_litellm.py +51 -26
- openlit/instrumentation/litellm/litellm.py +51 -26
- openlit/instrumentation/litellm/utils.py +312 -101
- openlit/instrumentation/llamaindex/__init__.py +267 -90
- openlit/instrumentation/llamaindex/async_llamaindex.py +28 -6
- openlit/instrumentation/llamaindex/llamaindex.py +28 -6
- openlit/instrumentation/llamaindex/utils.py +204 -91
- openlit/instrumentation/mem0/__init__.py +11 -2
- openlit/instrumentation/mem0/mem0.py +50 -29
- openlit/instrumentation/milvus/__init__.py +10 -2
- openlit/instrumentation/milvus/milvus.py +31 -6
- openlit/instrumentation/milvus/utils.py +166 -67
- openlit/instrumentation/mistral/__init__.py +63 -18
- openlit/instrumentation/mistral/async_mistral.py +63 -24
- openlit/instrumentation/mistral/mistral.py +63 -24
- openlit/instrumentation/mistral/utils.py +277 -69
- openlit/instrumentation/multion/__init__.py +69 -19
- openlit/instrumentation/multion/async_multion.py +57 -26
- openlit/instrumentation/multion/multion.py +57 -26
- openlit/instrumentation/ollama/__init__.py +39 -18
- openlit/instrumentation/ollama/async_ollama.py +57 -26
- openlit/instrumentation/ollama/ollama.py +57 -26
- openlit/instrumentation/ollama/utils.py +226 -50
- openlit/instrumentation/openai/__init__.py +156 -32
- openlit/instrumentation/openai/async_openai.py +147 -67
- openlit/instrumentation/openai/openai.py +150 -67
- openlit/instrumentation/openai/utils.py +660 -186
- openlit/instrumentation/openai_agents/__init__.py +6 -2
- openlit/instrumentation/openai_agents/processor.py +409 -537
- openlit/instrumentation/phidata/__init__.py +13 -5
- openlit/instrumentation/phidata/phidata.py +67 -32
- openlit/instrumentation/pinecone/__init__.py +48 -9
- openlit/instrumentation/pinecone/async_pinecone.py +27 -5
- openlit/instrumentation/pinecone/pinecone.py +27 -5
- openlit/instrumentation/pinecone/utils.py +153 -47
- openlit/instrumentation/premai/__init__.py +22 -7
- openlit/instrumentation/premai/premai.py +51 -26
- openlit/instrumentation/premai/utils.py +246 -59
- openlit/instrumentation/pydantic_ai/__init__.py +49 -22
- openlit/instrumentation/pydantic_ai/pydantic_ai.py +69 -16
- openlit/instrumentation/pydantic_ai/utils.py +89 -24
- openlit/instrumentation/qdrant/__init__.py +19 -4
- openlit/instrumentation/qdrant/async_qdrant.py +33 -7
- openlit/instrumentation/qdrant/qdrant.py +33 -7
- openlit/instrumentation/qdrant/utils.py +228 -93
- openlit/instrumentation/reka/__init__.py +23 -10
- openlit/instrumentation/reka/async_reka.py +17 -11
- openlit/instrumentation/reka/reka.py +17 -11
- openlit/instrumentation/reka/utils.py +138 -36
- openlit/instrumentation/together/__init__.py +44 -12
- openlit/instrumentation/together/async_together.py +50 -27
- openlit/instrumentation/together/together.py +50 -27
- openlit/instrumentation/together/utils.py +301 -71
- openlit/instrumentation/transformers/__init__.py +2 -1
- openlit/instrumentation/transformers/transformers.py +13 -3
- openlit/instrumentation/transformers/utils.py +139 -36
- openlit/instrumentation/vertexai/__init__.py +81 -16
- openlit/instrumentation/vertexai/async_vertexai.py +33 -15
- openlit/instrumentation/vertexai/utils.py +123 -27
- openlit/instrumentation/vertexai/vertexai.py +33 -15
- openlit/instrumentation/vllm/__init__.py +12 -5
- openlit/instrumentation/vllm/utils.py +121 -31
- openlit/instrumentation/vllm/vllm.py +16 -10
- openlit/otel/events.py +35 -10
- openlit/otel/metrics.py +32 -24
- openlit/otel/tracing.py +24 -9
- openlit/semcov/__init__.py +101 -7
- {openlit-1.34.29.dist-info → openlit-1.34.31.dist-info}/METADATA +2 -1
- openlit-1.34.31.dist-info/RECORD +166 -0
- openlit/instrumentation/langchain/async_langchain.py +0 -102
- openlit/instrumentation/langchain/langchain.py +0 -102
- openlit/instrumentation/langchain/utils.py +0 -252
- openlit-1.34.29.dist-info/RECORD +0 -166
- {openlit-1.34.29.dist-info → openlit-1.34.31.dist-info}/LICENSE +0 -0
- {openlit-1.34.29.dist-info → openlit-1.34.31.dist-info}/WHEEL +0 -0
openlit/evals/utils.py
CHANGED
@@ -15,6 +15,7 @@ from openlit.semcov import SemanticConvention
|
|
15
15
|
# Initialize logger for logging potential issues and operations
|
16
16
|
logger = logging.getLogger(__name__)
|
17
17
|
|
18
|
+
|
18
19
|
class JsonOutput(BaseModel):
|
19
20
|
"""
|
20
21
|
A model representing the structure of JSON output for prompt injection detection.
|
@@ -32,9 +33,13 @@ class JsonOutput(BaseModel):
|
|
32
33
|
classification: str
|
33
34
|
explanation: str
|
34
35
|
|
35
|
-
|
36
|
-
|
37
|
-
|
36
|
+
|
37
|
+
def setup_provider(
|
38
|
+
provider: Optional[str],
|
39
|
+
api_key: Optional[str],
|
40
|
+
model: Optional[str],
|
41
|
+
base_url: Optional[str],
|
42
|
+
) -> Tuple[Optional[str], Optional[str], Optional[str]]:
|
38
43
|
"""
|
39
44
|
Sets up the provider, API key, model, and base URL.
|
40
45
|
|
@@ -52,7 +57,7 @@ def setup_provider(provider: Optional[str], api_key: Optional[str],
|
|
52
57
|
"""
|
53
58
|
provider_configs = {
|
54
59
|
"openai": {"env_var": "OPENAI_API_KEY"},
|
55
|
-
"anthropic": {"env_var": "ANTHROPIC_API_KEY"}
|
60
|
+
"anthropic": {"env_var": "ANTHROPIC_API_KEY"},
|
56
61
|
}
|
57
62
|
|
58
63
|
if provider is None:
|
@@ -72,12 +77,16 @@ def setup_provider(provider: Optional[str], api_key: Optional[str],
|
|
72
77
|
|
73
78
|
if not api_key:
|
74
79
|
# pylint: disable=line-too-long
|
75
|
-
raise ValueError(
|
80
|
+
raise ValueError(
|
81
|
+
f"API key required via 'api_key' parameter or '{env_var}' environment variable"
|
82
|
+
)
|
76
83
|
|
77
84
|
return api_key, model, base_url
|
78
85
|
|
79
86
|
|
80
|
-
def format_prompt(
|
87
|
+
def format_prompt(
|
88
|
+
system_prompt: str, prompt: str, contexts: List[str], text: str
|
89
|
+
) -> str:
|
81
90
|
"""
|
82
91
|
Format the prompt.
|
83
92
|
|
@@ -98,6 +107,7 @@ def format_prompt(system_prompt: str, prompt: str, contexts: List[str], text: st
|
|
98
107
|
|
99
108
|
return formatted_prompt
|
100
109
|
|
110
|
+
|
101
111
|
def llm_response(provider: str, prompt: str, model: str, base_url: str) -> str:
|
102
112
|
"""
|
103
113
|
Generates an LLM response using the configured provider.
|
@@ -117,6 +127,7 @@ def llm_response(provider: str, prompt: str, model: str, base_url: str) -> str:
|
|
117
127
|
else:
|
118
128
|
raise ValueError(f"Unsupported provider: {provider}")
|
119
129
|
|
130
|
+
|
120
131
|
def llm_response_openai(prompt: str, model: str, base_url: str) -> str:
|
121
132
|
"""
|
122
133
|
Interacts with the OpenAI API to get a LLM response.
|
@@ -142,10 +153,11 @@ def llm_response_openai(prompt: str, model: str, base_url: str) -> str:
|
|
142
153
|
{"role": "user", "content": prompt},
|
143
154
|
],
|
144
155
|
temperature=0.0,
|
145
|
-
response_format=JsonOutput
|
156
|
+
response_format=JsonOutput,
|
146
157
|
)
|
147
158
|
return response.choices[0].message.content
|
148
159
|
|
160
|
+
|
149
161
|
def llm_response_anthropic(prompt: str, model: str) -> str:
|
150
162
|
"""
|
151
163
|
Interacts with the Anthropic API to get a LLM response.
|
@@ -172,23 +184,33 @@ def llm_response_anthropic(prompt: str, model: str) -> str:
|
|
172
184
|
"verdict": {"type": "string", "description": "Evaluation verdict"},
|
173
185
|
"evaluation": {"type": "string", "description": "Evaluation type"},
|
174
186
|
"score": {"type": "number", "description": "Evaluation score"},
|
175
|
-
"classification": {
|
176
|
-
|
187
|
+
"classification": {
|
188
|
+
"type": "string",
|
189
|
+
"description": "Evaluation category",
|
190
|
+
},
|
191
|
+
"explanation": {
|
192
|
+
"type": "string",
|
193
|
+
"description": "Evaluation reason",
|
194
|
+
},
|
177
195
|
},
|
178
|
-
"required": [
|
179
|
-
|
196
|
+
"required": [
|
197
|
+
"verdict",
|
198
|
+
"evaluation",
|
199
|
+
"score",
|
200
|
+
"classification",
|
201
|
+
"explanation",
|
202
|
+
],
|
203
|
+
},
|
180
204
|
}
|
181
205
|
]
|
182
206
|
|
183
207
|
response = client.messages.create(
|
184
208
|
model=model,
|
185
|
-
messages=[
|
186
|
-
{"role": "user", "content": prompt}
|
187
|
-
],
|
209
|
+
messages=[{"role": "user", "content": prompt}],
|
188
210
|
max_tokens=2000,
|
189
211
|
temperature=0.0,
|
190
212
|
tools=tools,
|
191
|
-
stream=False
|
213
|
+
stream=False,
|
192
214
|
)
|
193
215
|
|
194
216
|
for content in response.content:
|
@@ -198,6 +220,7 @@ def llm_response_anthropic(prompt: str, model: str) -> str:
|
|
198
220
|
|
199
221
|
return response
|
200
222
|
|
223
|
+
|
201
224
|
def parse_llm_response(response) -> JsonOutput:
|
202
225
|
"""
|
203
226
|
Parses the LLM response into a JsonOutput object.
|
@@ -220,8 +243,14 @@ def parse_llm_response(response) -> JsonOutput:
|
|
220
243
|
return JsonOutput(**data)
|
221
244
|
except (json.JSONDecodeError, TypeError) as e:
|
222
245
|
logger.error("Error parsing LLM response: '%s'", e)
|
223
|
-
return JsonOutput(
|
224
|
-
|
246
|
+
return JsonOutput(
|
247
|
+
score=0,
|
248
|
+
classification="none",
|
249
|
+
explanation="none",
|
250
|
+
verdict="no",
|
251
|
+
evaluation="none",
|
252
|
+
)
|
253
|
+
|
225
254
|
|
226
255
|
def eval_metrics():
|
227
256
|
"""
|
@@ -240,11 +269,12 @@ def eval_metrics():
|
|
240
269
|
guard_requests = meter.create_counter(
|
241
270
|
name=SemanticConvention.EVAL_REQUESTS,
|
242
271
|
description="Counter for evaluation requests",
|
243
|
-
unit="1"
|
272
|
+
unit="1",
|
244
273
|
)
|
245
274
|
|
246
275
|
return guard_requests
|
247
276
|
|
277
|
+
|
248
278
|
def eval_metric_attributes(verdict, score, validator, classification, explanation):
|
249
279
|
"""
|
250
280
|
Initializes OpenTelemetry attributes for metrics.
|
@@ -260,16 +290,10 @@ def eval_metric_attributes(verdict, score, validator, classification, explanatio
|
|
260
290
|
"""
|
261
291
|
|
262
292
|
return {
|
263
|
-
|
264
|
-
|
265
|
-
|
266
|
-
|
267
|
-
|
268
|
-
|
269
|
-
SemanticConvention.EVAL_VALIDATOR:
|
270
|
-
validator,
|
271
|
-
SemanticConvention.EVAL_CLASSIFICATION:
|
272
|
-
classification,
|
273
|
-
SemanticConvention.EVAL_EXPLANATION:
|
274
|
-
explanation,
|
293
|
+
TELEMETRY_SDK_NAME: "openlit",
|
294
|
+
SemanticConvention.EVAL_VERDICT: verdict,
|
295
|
+
SemanticConvention.EVAL_SCORE: score,
|
296
|
+
SemanticConvention.EVAL_VALIDATOR: validator,
|
297
|
+
SemanticConvention.EVAL_CLASSIFICATION: classification,
|
298
|
+
SemanticConvention.EVAL_EXPLANATION: explanation,
|
275
299
|
}
|
openlit/guard/all.py
CHANGED
@@ -12,10 +12,15 @@ from openlit.guard.utils import (
|
|
12
12
|
parse_llm_response,
|
13
13
|
custom_rule_detection,
|
14
14
|
guard_metrics,
|
15
|
-
guard_metric_attributes
|
15
|
+
guard_metric_attributes,
|
16
16
|
)
|
17
17
|
|
18
|
-
|
18
|
+
|
19
|
+
def get_all_system_prompt(
|
20
|
+
valid_topics: Optional[List[str]] = None,
|
21
|
+
invalid_topics: Optional[List[str]] = None,
|
22
|
+
custom_categories: Optional[Dict[str, str]] = None,
|
23
|
+
) -> str:
|
19
24
|
"""
|
20
25
|
Returns the system prompt used for LLM analysis to capture prompt injections, valid topics, and sensitive topics.
|
21
26
|
|
@@ -68,8 +73,15 @@ def get_all_system_prompt(valid_topics: Optional[List[str]] = None, invalid_topi
|
|
68
73
|
|
69
74
|
# Append custom categories for prompt injection if provided
|
70
75
|
if custom_categories:
|
71
|
-
custom_categories_str = "\n".join(
|
72
|
-
|
76
|
+
custom_categories_str = "\n".join(
|
77
|
+
[
|
78
|
+
f"- {key}: {description}"
|
79
|
+
for key, description in custom_categories.items()
|
80
|
+
]
|
81
|
+
)
|
82
|
+
base_prompt += (
|
83
|
+
f"\n Additional Prompt Injection Categories:\n{custom_categories_str}"
|
84
|
+
)
|
73
85
|
|
74
86
|
base_prompt += """
|
75
87
|
|
@@ -116,8 +128,15 @@ def get_all_system_prompt(valid_topics: Optional[List[str]] = None, invalid_topi
|
|
116
128
|
|
117
129
|
# Append custom categories for sensitive topics if provided
|
118
130
|
if custom_categories:
|
119
|
-
custom_categories_str = "\n".join(
|
120
|
-
|
131
|
+
custom_categories_str = "\n".join(
|
132
|
+
[
|
133
|
+
f"- {key}: {description}"
|
134
|
+
for key, description in custom_categories.items()
|
135
|
+
]
|
136
|
+
)
|
137
|
+
base_prompt += (
|
138
|
+
f"\n Additional Sensitive Topics Categories:\n{custom_categories_str}"
|
139
|
+
)
|
121
140
|
|
122
141
|
base_prompt += """
|
123
142
|
|
@@ -126,6 +145,7 @@ def get_all_system_prompt(valid_topics: Optional[List[str]] = None, invalid_topi
|
|
126
145
|
"""
|
127
146
|
return base_prompt
|
128
147
|
|
148
|
+
|
129
149
|
class All:
|
130
150
|
"""
|
131
151
|
A comprehensive class to detect prompt injections, valid/invalid topics, and sensitive topics using LLM or custom rules.
|
@@ -141,13 +161,18 @@ class All:
|
|
141
161
|
invalid_topics (Optional[List[str]]): List of invalid topics.
|
142
162
|
"""
|
143
163
|
|
144
|
-
def __init__(
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
164
|
+
def __init__(
|
165
|
+
self,
|
166
|
+
provider: Optional[str] = None,
|
167
|
+
api_key: Optional[str] = None,
|
168
|
+
model: Optional[str] = None,
|
169
|
+
base_url: Optional[str] = None,
|
170
|
+
custom_rules: Optional[List[dict]] = None,
|
171
|
+
custom_categories: Optional[Dict[str, str]] = None,
|
172
|
+
valid_topics: Optional[List[str]] = None,
|
173
|
+
invalid_topics: Optional[List[str]] = None,
|
174
|
+
collect_metrics: Optional[bool] = False,
|
175
|
+
):
|
151
176
|
"""
|
152
177
|
Initializes the All class with specified LLM settings, custom rules, and categories.
|
153
178
|
|
@@ -165,8 +190,12 @@ class All:
|
|
165
190
|
ValueError: If provider is not specified.
|
166
191
|
"""
|
167
192
|
self.provider = provider
|
168
|
-
self.api_key, self.model, self.base_url = setup_provider(
|
169
|
-
|
193
|
+
self.api_key, self.model, self.base_url = setup_provider(
|
194
|
+
provider, api_key, model, base_url
|
195
|
+
)
|
196
|
+
self.system_prompt = get_all_system_prompt(
|
197
|
+
valid_topics, invalid_topics, custom_categories
|
198
|
+
)
|
170
199
|
self.custom_rules = custom_rules or []
|
171
200
|
self.valid_topics = valid_topics or []
|
172
201
|
self.invalid_topics = invalid_topics or []
|
@@ -183,18 +212,31 @@ class All:
|
|
183
212
|
JsonOutput: The structured result of the detection.
|
184
213
|
"""
|
185
214
|
custom_rule_result = custom_rule_detection(text, self.custom_rules)
|
186
|
-
llm_result = JsonOutput(
|
215
|
+
llm_result = JsonOutput(
|
216
|
+
score=0.0,
|
217
|
+
verdict="no",
|
218
|
+
guard="none",
|
219
|
+
classification="none",
|
220
|
+
explanation="none",
|
221
|
+
)
|
187
222
|
|
188
223
|
if self.provider:
|
189
224
|
prompt = format_prompt(self.system_prompt, text)
|
190
|
-
llm_result = parse_llm_response(
|
225
|
+
llm_result = parse_llm_response(
|
226
|
+
llm_response(self.provider, prompt, self.model, self.base_url)
|
227
|
+
)
|
191
228
|
|
192
229
|
result = max(custom_rule_result, llm_result, key=lambda x: x.score)
|
193
230
|
|
194
231
|
if self.collect_metrics:
|
195
232
|
guard_counter = guard_metrics()
|
196
|
-
attributes = guard_metric_attributes(
|
197
|
-
|
233
|
+
attributes = guard_metric_attributes(
|
234
|
+
result.verdict,
|
235
|
+
result.score,
|
236
|
+
result.guard,
|
237
|
+
result.classification,
|
238
|
+
result.explanation,
|
239
|
+
)
|
198
240
|
guard_counter.add(1, attributes)
|
199
241
|
|
200
242
|
return result
|
@@ -12,9 +12,10 @@ from openlit.guard.utils import (
|
|
12
12
|
parse_llm_response,
|
13
13
|
custom_rule_detection,
|
14
14
|
guard_metrics,
|
15
|
-
guard_metric_attributes
|
15
|
+
guard_metric_attributes,
|
16
16
|
)
|
17
17
|
|
18
|
+
|
18
19
|
def get_system_prompt(custom_categories: Optional[Dict[str, str]] = None) -> str:
|
19
20
|
"""
|
20
21
|
Returns the system prompt used for LLM analysis, including custom categories if provided.
|
@@ -64,7 +65,9 @@ def get_system_prompt(custom_categories: Optional[Dict[str, str]] = None) -> str
|
|
64
65
|
"""
|
65
66
|
|
66
67
|
if custom_categories:
|
67
|
-
custom_categories_str = "\n".join(
|
68
|
+
custom_categories_str = "\n".join(
|
69
|
+
[f"- {key}: {value}" for key, value in custom_categories.items()]
|
70
|
+
)
|
68
71
|
base_prompt += f"\n Additional Categories:\n{custom_categories_str}"
|
69
72
|
|
70
73
|
base_prompt += """
|
@@ -73,17 +76,25 @@ def get_system_prompt(custom_categories: Optional[Dict[str, str]] = None) -> str
|
|
73
76
|
"""
|
74
77
|
return base_prompt
|
75
78
|
|
79
|
+
|
76
80
|
class PromptInjection:
|
77
81
|
"""Class to intialize Prompt Injection"""
|
78
82
|
|
79
|
-
def __init__(
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
83
|
+
def __init__(
|
84
|
+
self,
|
85
|
+
provider: Optional[str] = None,
|
86
|
+
api_key: Optional[str] = None,
|
87
|
+
model: Optional[str] = None,
|
88
|
+
base_url: Optional[str] = None,
|
89
|
+
custom_rules: Optional[List[dict]] = None,
|
90
|
+
custom_categories: Optional[Dict[str, str]] = None,
|
91
|
+
threshold_score: float = 0.25,
|
92
|
+
collect_metrics: Optional[bool] = False,
|
93
|
+
):
|
85
94
|
self.provider = provider
|
86
|
-
self.api_key, self.model, self.base_url = setup_provider(
|
95
|
+
self.api_key, self.model, self.base_url = setup_provider(
|
96
|
+
provider, api_key, model, base_url
|
97
|
+
)
|
87
98
|
self.system_prompt = get_system_prompt(custom_categories)
|
88
99
|
self.custom_rules = custom_rules or []
|
89
100
|
self.threshold_score = threshold_score
|
@@ -93,11 +104,19 @@ class PromptInjection:
|
|
93
104
|
"""Functon to detect Prompt Injection and jailbreak attempts in input"""
|
94
105
|
|
95
106
|
custom_rule_result = custom_rule_detection(text, self.custom_rules)
|
96
|
-
llm_result = JsonOutput(
|
107
|
+
llm_result = JsonOutput(
|
108
|
+
score=0,
|
109
|
+
classification="none",
|
110
|
+
explanation="none",
|
111
|
+
verdict="none",
|
112
|
+
guard="none",
|
113
|
+
)
|
97
114
|
|
98
115
|
if self.provider:
|
99
116
|
prompt = format_prompt(self.system_prompt, text)
|
100
|
-
llm_result = parse_llm_response(
|
117
|
+
llm_result = parse_llm_response(
|
118
|
+
llm_response(self.provider, prompt, self.model, self.base_url)
|
119
|
+
)
|
101
120
|
|
102
121
|
result = max(custom_rule_result, llm_result, key=lambda x: x.score)
|
103
122
|
score = 0 if result.classification == "none" else result.score
|
@@ -105,8 +124,9 @@ class PromptInjection:
|
|
105
124
|
|
106
125
|
if self.collect_metrics is True:
|
107
126
|
guard_counter = guard_metrics()
|
108
|
-
attributes = guard_metric_attributes(
|
109
|
-
|
127
|
+
attributes = guard_metric_attributes(
|
128
|
+
verdict, score, result.guard, result.classification, result.explanation
|
129
|
+
)
|
110
130
|
guard_counter.add(1, attributes)
|
111
131
|
|
112
132
|
return JsonOutput(
|
@@ -114,5 +134,5 @@ class PromptInjection:
|
|
114
134
|
guard=result.guard,
|
115
135
|
verdict=verdict,
|
116
136
|
classification=result.classification,
|
117
|
-
explanation=result.explanation
|
137
|
+
explanation=result.explanation,
|
118
138
|
)
|
openlit/guard/restrict_topic.py
CHANGED
@@ -11,10 +11,13 @@ from openlit.guard.utils import (
|
|
11
11
|
llm_response,
|
12
12
|
parse_llm_response,
|
13
13
|
guard_metrics,
|
14
|
-
guard_metric_attributes
|
14
|
+
guard_metric_attributes,
|
15
15
|
)
|
16
16
|
|
17
|
-
|
17
|
+
|
18
|
+
def get_system_prompt(
|
19
|
+
valid_topics: Optional[List[str]] = None, invalid_topics: Optional[List[str]] = None
|
20
|
+
) -> str:
|
18
21
|
"""
|
19
22
|
Returns the system prompt used for LLM analysis, including valid and invalid topics if provided.
|
20
23
|
|
@@ -79,6 +82,7 @@ def get_system_prompt(valid_topics: Optional[List[str]] = None, invalid_topics:
|
|
79
82
|
|
80
83
|
return base_prompt
|
81
84
|
|
85
|
+
|
82
86
|
class TopicRestriction:
|
83
87
|
"""
|
84
88
|
A class to validate if text belongs to valid or invalid topics using LLM.
|
@@ -92,12 +96,16 @@ class TopicRestriction:
|
|
92
96
|
invalid_topics (Optional[List[str]]): List of invalid topics.
|
93
97
|
"""
|
94
98
|
|
95
|
-
def __init__(
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
99
|
+
def __init__(
|
100
|
+
self,
|
101
|
+
provider: Optional[str],
|
102
|
+
valid_topics: Optional[List[str]] = None,
|
103
|
+
api_key: Optional[str] = None,
|
104
|
+
model: Optional[str] = None,
|
105
|
+
base_url: Optional[str] = None,
|
106
|
+
invalid_topics: Optional[List[str]] = None,
|
107
|
+
collect_metrics: Optional[bool] = False,
|
108
|
+
):
|
101
109
|
"""
|
102
110
|
Initializes the TopicRestriction with specified LLM settings and topics.
|
103
111
|
|
@@ -114,12 +122,18 @@ class TopicRestriction:
|
|
114
122
|
"""
|
115
123
|
self.provider = provider
|
116
124
|
if self.provider is None:
|
117
|
-
raise ValueError(
|
118
|
-
|
125
|
+
raise ValueError(
|
126
|
+
"An LLM provider must be specified for TopicRestriction Validator"
|
127
|
+
)
|
128
|
+
self.api_key, self.model, self.base_url = setup_provider(
|
129
|
+
provider, api_key, model, base_url
|
130
|
+
)
|
119
131
|
self.system_prompt = get_system_prompt(valid_topics, invalid_topics)
|
120
132
|
self.valid_topics = valid_topics
|
121
133
|
if self.valid_topics is None:
|
122
|
-
raise ValueError(
|
134
|
+
raise ValueError(
|
135
|
+
"Valid Topics must be specified for TopicRestriction Validator"
|
136
|
+
)
|
123
137
|
self.invalid_topics = invalid_topics or []
|
124
138
|
self.collect_metrics = collect_metrics
|
125
139
|
|
@@ -139,14 +153,31 @@ class TopicRestriction:
|
|
139
153
|
|
140
154
|
# Adjusted logic for consistency with updated JSON structure
|
141
155
|
if llm_result.classification == "valid_topic":
|
142
|
-
result = JsonOutput(
|
156
|
+
result = JsonOutput(
|
157
|
+
score=0,
|
158
|
+
verdict="no",
|
159
|
+
guard="topic_restriction",
|
160
|
+
classification="valid_topic",
|
161
|
+
explanation="Text fits into a valid topic.",
|
162
|
+
)
|
143
163
|
else:
|
144
|
-
result = JsonOutput(
|
164
|
+
result = JsonOutput(
|
165
|
+
score=1.0,
|
166
|
+
verdict="yes",
|
167
|
+
guard="topic_restriction",
|
168
|
+
classification="invalid_topic",
|
169
|
+
explanation="Text does not match any valid categories.",
|
170
|
+
)
|
145
171
|
|
146
172
|
if self.collect_metrics:
|
147
173
|
guard_counter = guard_metrics()
|
148
|
-
attributes = guard_metric_attributes(
|
149
|
-
|
174
|
+
attributes = guard_metric_attributes(
|
175
|
+
result.verdict,
|
176
|
+
result.score,
|
177
|
+
result.guard,
|
178
|
+
result.classification,
|
179
|
+
result.explanation,
|
180
|
+
)
|
150
181
|
guard_counter.add(1, attributes)
|
151
182
|
|
152
183
|
return result
|
openlit/guard/sensitive_topic.py
CHANGED
@@ -12,9 +12,10 @@ from openlit.guard.utils import (
|
|
12
12
|
parse_llm_response,
|
13
13
|
custom_rule_detection,
|
14
14
|
guard_metrics,
|
15
|
-
guard_metric_attributes
|
15
|
+
guard_metric_attributes,
|
16
16
|
)
|
17
17
|
|
18
|
+
|
18
19
|
def get_system_prompt(custom_categories: Optional[Dict[str, str]] = None) -> str:
|
19
20
|
"""
|
20
21
|
Returns the system prompt used for LLM analysis, including custom categories if provided.
|
@@ -61,7 +62,9 @@ def get_system_prompt(custom_categories: Optional[Dict[str, str]] = None) -> str
|
|
61
62
|
"""
|
62
63
|
|
63
64
|
if custom_categories:
|
64
|
-
custom_categories_str = "\n".join(
|
65
|
+
custom_categories_str = "\n".join(
|
66
|
+
[f"- {key}: {value}" for key, value in custom_categories.items()]
|
67
|
+
)
|
65
68
|
base_prompt += f"\n Additional Categories:\n{custom_categories_str}"
|
66
69
|
|
67
70
|
base_prompt += """
|
@@ -71,6 +74,7 @@ def get_system_prompt(custom_categories: Optional[Dict[str, str]] = None) -> str
|
|
71
74
|
|
72
75
|
return base_prompt
|
73
76
|
|
77
|
+
|
74
78
|
class SensitiveTopic:
|
75
79
|
"""
|
76
80
|
A class to detect sensitive topics using LLM or custom rules.
|
@@ -84,14 +88,21 @@ class SensitiveTopic:
|
|
84
88
|
custom_categories (Optional[Dict[str, str]]): Additional categories for sensitive topics.
|
85
89
|
"""
|
86
90
|
|
87
|
-
def __init__(
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
91
|
+
def __init__(
|
92
|
+
self,
|
93
|
+
provider: Optional[str] = None,
|
94
|
+
api_key: Optional[str] = None,
|
95
|
+
model: Optional[str] = None,
|
96
|
+
base_url: Optional[str] = None,
|
97
|
+
custom_rules: Optional[List[dict]] = None,
|
98
|
+
custom_categories: Optional[Dict[str, str]] = None,
|
99
|
+
threshold_score: float = 0.25,
|
100
|
+
collect_metrics: Optional[bool] = False,
|
101
|
+
):
|
93
102
|
self.provider = provider
|
94
|
-
self.api_key, self.model, self.base_url = setup_provider(
|
103
|
+
self.api_key, self.model, self.base_url = setup_provider(
|
104
|
+
provider, api_key, model, base_url
|
105
|
+
)
|
95
106
|
self.system_prompt = get_system_prompt(custom_categories)
|
96
107
|
self.custom_rules = custom_rules or []
|
97
108
|
self.threshold_score = threshold_score
|
@@ -101,11 +112,19 @@ class SensitiveTopic:
|
|
101
112
|
"""Function to detect sensitive topic in AI response"""
|
102
113
|
|
103
114
|
custom_rule_result = custom_rule_detection(text, self.custom_rules)
|
104
|
-
llm_result = JsonOutput(
|
115
|
+
llm_result = JsonOutput(
|
116
|
+
score=0,
|
117
|
+
classification="none",
|
118
|
+
explanation="none",
|
119
|
+
verdict="no",
|
120
|
+
guard="none",
|
121
|
+
)
|
105
122
|
|
106
123
|
if self.provider:
|
107
124
|
prompt = format_prompt(self.system_prompt, text)
|
108
|
-
llm_result = parse_llm_response(
|
125
|
+
llm_result = parse_llm_response(
|
126
|
+
llm_response(self.provider, prompt, self.model, self.base_url)
|
127
|
+
)
|
109
128
|
|
110
129
|
result = max(custom_rule_result, llm_result, key=lambda x: x.score)
|
111
130
|
score = 0 if result.classification == "none" else result.score
|
@@ -113,8 +132,9 @@ class SensitiveTopic:
|
|
113
132
|
|
114
133
|
if self.collect_metrics:
|
115
134
|
guard_counter = guard_metrics()
|
116
|
-
attributes = guard_metric_attributes(
|
117
|
-
|
135
|
+
attributes = guard_metric_attributes(
|
136
|
+
verdict, score, result.guard, result.classification, result.explanation
|
137
|
+
)
|
118
138
|
guard_counter.add(1, attributes)
|
119
139
|
|
120
140
|
return JsonOutput(
|
@@ -122,5 +142,5 @@ class SensitiveTopic:
|
|
122
142
|
guard=result.guard,
|
123
143
|
verdict=verdict,
|
124
144
|
classification=result.classification,
|
125
|
-
explanation=result.explanation
|
145
|
+
explanation=result.explanation,
|
126
146
|
)
|