posthoganalytics 6.7.0__py3-none-any.whl → 6.7.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- posthoganalytics/ai/anthropic/anthropic.py +2 -1
- posthoganalytics/ai/anthropic/anthropic_async.py +2 -1
- posthoganalytics/ai/gemini/gemini.py +2 -1
- posthoganalytics/ai/langchain/callbacks.py +3 -2
- posthoganalytics/ai/openai/openai.py +10 -3
- posthoganalytics/ai/openai/openai_async.py +10 -3
- posthoganalytics/ai/sanitization.py +226 -0
- posthoganalytics/ai/utils.py +27 -2
- posthoganalytics/version.py +1 -1
- {posthoganalytics-6.7.0.dist-info → posthoganalytics-6.7.1.dist-info}/METADATA +1 -1
- {posthoganalytics-6.7.0.dist-info → posthoganalytics-6.7.1.dist-info}/RECORD +14 -13
- {posthoganalytics-6.7.0.dist-info → posthoganalytics-6.7.1.dist-info}/WHEEL +0 -0
- {posthoganalytics-6.7.0.dist-info → posthoganalytics-6.7.1.dist-info}/licenses/LICENSE +0 -0
- {posthoganalytics-6.7.0.dist-info → posthoganalytics-6.7.1.dist-info}/top_level.txt +0 -0
|
@@ -16,6 +16,7 @@ from posthoganalytics.ai.utils import (
|
|
|
16
16
|
merge_system_prompt,
|
|
17
17
|
with_privacy_mode,
|
|
18
18
|
)
|
|
19
|
+
from posthoganalytics.ai.sanitization import sanitize_anthropic
|
|
19
20
|
from posthoganalytics.client import Client as PostHogClient
|
|
20
21
|
from posthoganalytics import setup
|
|
21
22
|
|
|
@@ -184,7 +185,7 @@ class WrappedMessages(Messages):
|
|
|
184
185
|
"$ai_input": with_privacy_mode(
|
|
185
186
|
self._client._ph_client,
|
|
186
187
|
posthog_privacy_mode,
|
|
187
|
-
merge_system_prompt(kwargs, "anthropic"),
|
|
188
|
+
sanitize_anthropic(merge_system_prompt(kwargs, "anthropic")),
|
|
188
189
|
),
|
|
189
190
|
"$ai_output_choices": with_privacy_mode(
|
|
190
191
|
self._client._ph_client,
|
|
@@ -17,6 +17,7 @@ from posthoganalytics.ai.utils import (
|
|
|
17
17
|
merge_system_prompt,
|
|
18
18
|
with_privacy_mode,
|
|
19
19
|
)
|
|
20
|
+
from posthoganalytics.ai.sanitization import sanitize_anthropic
|
|
20
21
|
from posthoganalytics.client import Client as PostHogClient
|
|
21
22
|
|
|
22
23
|
|
|
@@ -184,7 +185,7 @@ class AsyncWrappedMessages(AsyncMessages):
|
|
|
184
185
|
"$ai_input": with_privacy_mode(
|
|
185
186
|
self._client._ph_client,
|
|
186
187
|
posthog_privacy_mode,
|
|
187
|
-
merge_system_prompt(kwargs, "anthropic"),
|
|
188
|
+
sanitize_anthropic(merge_system_prompt(kwargs, "anthropic")),
|
|
188
189
|
),
|
|
189
190
|
"$ai_output_choices": with_privacy_mode(
|
|
190
191
|
self._client._ph_client,
|
|
@@ -16,6 +16,7 @@ from posthoganalytics.ai.utils import (
|
|
|
16
16
|
get_model_params,
|
|
17
17
|
with_privacy_mode,
|
|
18
18
|
)
|
|
19
|
+
from posthoganalytics.ai.sanitization import sanitize_gemini
|
|
19
20
|
from posthoganalytics.client import Client as PostHogClient
|
|
20
21
|
|
|
21
22
|
|
|
@@ -347,7 +348,7 @@ class Models:
|
|
|
347
348
|
"$ai_input": with_privacy_mode(
|
|
348
349
|
self._ph_client,
|
|
349
350
|
privacy_mode,
|
|
350
|
-
self._format_input(contents),
|
|
351
|
+
sanitize_gemini(self._format_input(contents)),
|
|
351
352
|
),
|
|
352
353
|
"$ai_output_choices": with_privacy_mode(
|
|
353
354
|
self._ph_client,
|
|
@@ -37,6 +37,7 @@ from pydantic import BaseModel
|
|
|
37
37
|
|
|
38
38
|
from posthoganalytics import setup
|
|
39
39
|
from posthoganalytics.ai.utils import get_model_params, with_privacy_mode
|
|
40
|
+
from posthoganalytics.ai.sanitization import sanitize_langchain
|
|
40
41
|
from posthoganalytics.client import Client
|
|
41
42
|
|
|
42
43
|
log = logging.getLogger("posthog")
|
|
@@ -480,7 +481,7 @@ class CallbackHandler(BaseCallbackHandler):
|
|
|
480
481
|
event_properties = {
|
|
481
482
|
"$ai_trace_id": trace_id,
|
|
482
483
|
"$ai_input_state": with_privacy_mode(
|
|
483
|
-
self._ph_client, self._privacy_mode, run.input
|
|
484
|
+
self._ph_client, self._privacy_mode, sanitize_langchain(run.input)
|
|
484
485
|
),
|
|
485
486
|
"$ai_latency": run.latency,
|
|
486
487
|
"$ai_span_name": run.name,
|
|
@@ -550,7 +551,7 @@ class CallbackHandler(BaseCallbackHandler):
|
|
|
550
551
|
"$ai_model": run.model,
|
|
551
552
|
"$ai_model_parameters": run.model_params,
|
|
552
553
|
"$ai_input": with_privacy_mode(
|
|
553
|
-
self._ph_client, self._privacy_mode, run.input
|
|
554
|
+
self._ph_client, self._privacy_mode, sanitize_langchain(run.input)
|
|
554
555
|
),
|
|
555
556
|
"$ai_http_status": 200,
|
|
556
557
|
"$ai_latency": run.latency,
|
|
@@ -15,6 +15,7 @@ from posthoganalytics.ai.utils import (
|
|
|
15
15
|
get_model_params,
|
|
16
16
|
with_privacy_mode,
|
|
17
17
|
)
|
|
18
|
+
from posthoganalytics.ai.sanitization import sanitize_openai, sanitize_openai_response
|
|
18
19
|
from posthoganalytics.client import Client as PostHogClient
|
|
19
20
|
from posthoganalytics import setup
|
|
20
21
|
|
|
@@ -194,7 +195,9 @@ class WrappedResponses:
|
|
|
194
195
|
"$ai_model": kwargs.get("model"),
|
|
195
196
|
"$ai_model_parameters": get_model_params(kwargs),
|
|
196
197
|
"$ai_input": with_privacy_mode(
|
|
197
|
-
self._client._ph_client,
|
|
198
|
+
self._client._ph_client,
|
|
199
|
+
posthog_privacy_mode,
|
|
200
|
+
sanitize_openai_response(kwargs.get("input")),
|
|
198
201
|
),
|
|
199
202
|
"$ai_output_choices": with_privacy_mode(
|
|
200
203
|
self._client._ph_client,
|
|
@@ -427,7 +430,9 @@ class WrappedCompletions:
|
|
|
427
430
|
"$ai_model": kwargs.get("model"),
|
|
428
431
|
"$ai_model_parameters": get_model_params(kwargs),
|
|
429
432
|
"$ai_input": with_privacy_mode(
|
|
430
|
-
self._client._ph_client,
|
|
433
|
+
self._client._ph_client,
|
|
434
|
+
posthog_privacy_mode,
|
|
435
|
+
sanitize_openai(kwargs.get("messages")),
|
|
431
436
|
),
|
|
432
437
|
"$ai_output_choices": with_privacy_mode(
|
|
433
438
|
self._client._ph_client,
|
|
@@ -518,7 +523,9 @@ class WrappedEmbeddings:
|
|
|
518
523
|
"$ai_provider": "openai",
|
|
519
524
|
"$ai_model": kwargs.get("model"),
|
|
520
525
|
"$ai_input": with_privacy_mode(
|
|
521
|
-
self._client._ph_client,
|
|
526
|
+
self._client._ph_client,
|
|
527
|
+
posthog_privacy_mode,
|
|
528
|
+
sanitize_openai_response(kwargs.get("input")),
|
|
522
529
|
),
|
|
523
530
|
"$ai_http_status": 200,
|
|
524
531
|
"$ai_input_tokens": usage_stats.get("prompt_tokens", 0),
|
|
@@ -16,6 +16,7 @@ from posthoganalytics.ai.utils import (
|
|
|
16
16
|
get_model_params,
|
|
17
17
|
with_privacy_mode,
|
|
18
18
|
)
|
|
19
|
+
from posthoganalytics.ai.sanitization import sanitize_openai, sanitize_openai_response
|
|
19
20
|
from posthoganalytics.client import Client as PostHogClient
|
|
20
21
|
|
|
21
22
|
|
|
@@ -195,7 +196,9 @@ class WrappedResponses:
|
|
|
195
196
|
"$ai_model": kwargs.get("model"),
|
|
196
197
|
"$ai_model_parameters": get_model_params(kwargs),
|
|
197
198
|
"$ai_input": with_privacy_mode(
|
|
198
|
-
self._client._ph_client,
|
|
199
|
+
self._client._ph_client,
|
|
200
|
+
posthog_privacy_mode,
|
|
201
|
+
sanitize_openai_response(kwargs.get("input")),
|
|
199
202
|
),
|
|
200
203
|
"$ai_output_choices": with_privacy_mode(
|
|
201
204
|
self._client._ph_client,
|
|
@@ -431,7 +434,9 @@ class WrappedCompletions:
|
|
|
431
434
|
"$ai_model": kwargs.get("model"),
|
|
432
435
|
"$ai_model_parameters": get_model_params(kwargs),
|
|
433
436
|
"$ai_input": with_privacy_mode(
|
|
434
|
-
self._client._ph_client,
|
|
437
|
+
self._client._ph_client,
|
|
438
|
+
posthog_privacy_mode,
|
|
439
|
+
sanitize_openai(kwargs.get("messages")),
|
|
435
440
|
),
|
|
436
441
|
"$ai_output_choices": with_privacy_mode(
|
|
437
442
|
self._client._ph_client,
|
|
@@ -522,7 +527,9 @@ class WrappedEmbeddings:
|
|
|
522
527
|
"$ai_provider": "openai",
|
|
523
528
|
"$ai_model": kwargs.get("model"),
|
|
524
529
|
"$ai_input": with_privacy_mode(
|
|
525
|
-
self._client._ph_client,
|
|
530
|
+
self._client._ph_client,
|
|
531
|
+
posthog_privacy_mode,
|
|
532
|
+
sanitize_openai_response(kwargs.get("input")),
|
|
526
533
|
),
|
|
527
534
|
"$ai_http_status": 200,
|
|
528
535
|
"$ai_input_tokens": usage_stats.get("prompt_tokens", 0),
|
|
@@ -0,0 +1,226 @@
|
|
|
1
|
+
import re
|
|
2
|
+
from typing import Any
|
|
3
|
+
from urllib.parse import urlparse
|
|
4
|
+
|
|
5
|
+
REDACTED_IMAGE_PLACEHOLDER = "[base64 image redacted]"
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def is_base64_data_url(text: str) -> bool:
|
|
9
|
+
return re.match(r"^data:([^;]+);base64,", text) is not None
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def is_valid_url(text: str) -> bool:
|
|
13
|
+
try:
|
|
14
|
+
result = urlparse(text)
|
|
15
|
+
return bool(result.scheme and result.netloc)
|
|
16
|
+
except Exception:
|
|
17
|
+
pass
|
|
18
|
+
|
|
19
|
+
return text.startswith(("/", "./", "../"))
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def is_raw_base64(text: str) -> bool:
|
|
23
|
+
if is_valid_url(text):
|
|
24
|
+
return False
|
|
25
|
+
|
|
26
|
+
return len(text) > 20 and re.match(r"^[A-Za-z0-9+/]+=*$", text) is not None
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def redact_base64_data_url(value: Any) -> Any:
|
|
30
|
+
if not isinstance(value, str):
|
|
31
|
+
return value
|
|
32
|
+
|
|
33
|
+
if is_base64_data_url(value):
|
|
34
|
+
return REDACTED_IMAGE_PLACEHOLDER
|
|
35
|
+
|
|
36
|
+
if is_raw_base64(value):
|
|
37
|
+
return REDACTED_IMAGE_PLACEHOLDER
|
|
38
|
+
|
|
39
|
+
return value
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def process_messages(messages: Any, transform_content_func) -> Any:
|
|
43
|
+
if not messages:
|
|
44
|
+
return messages
|
|
45
|
+
|
|
46
|
+
def process_content(content: Any) -> Any:
|
|
47
|
+
if isinstance(content, str):
|
|
48
|
+
return content
|
|
49
|
+
|
|
50
|
+
if not content:
|
|
51
|
+
return content
|
|
52
|
+
|
|
53
|
+
if isinstance(content, list):
|
|
54
|
+
return [transform_content_func(item) for item in content]
|
|
55
|
+
|
|
56
|
+
return transform_content_func(content)
|
|
57
|
+
|
|
58
|
+
def process_message(msg: Any) -> Any:
|
|
59
|
+
if not isinstance(msg, dict) or "content" not in msg:
|
|
60
|
+
return msg
|
|
61
|
+
return {**msg, "content": process_content(msg["content"])}
|
|
62
|
+
|
|
63
|
+
if isinstance(messages, list):
|
|
64
|
+
return [process_message(msg) for msg in messages]
|
|
65
|
+
|
|
66
|
+
return process_message(messages)
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def sanitize_openai_image(item: Any) -> Any:
|
|
70
|
+
if not isinstance(item, dict):
|
|
71
|
+
return item
|
|
72
|
+
|
|
73
|
+
if (
|
|
74
|
+
item.get("type") == "image_url"
|
|
75
|
+
and isinstance(item.get("image_url"), dict)
|
|
76
|
+
and "url" in item["image_url"]
|
|
77
|
+
):
|
|
78
|
+
return {
|
|
79
|
+
**item,
|
|
80
|
+
"image_url": {
|
|
81
|
+
**item["image_url"],
|
|
82
|
+
"url": redact_base64_data_url(item["image_url"]["url"]),
|
|
83
|
+
},
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
return item
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
def sanitize_openai_response_image(item: Any) -> Any:
|
|
90
|
+
if not isinstance(item, dict):
|
|
91
|
+
return item
|
|
92
|
+
|
|
93
|
+
if item.get("type") == "input_image" and "image_url" in item:
|
|
94
|
+
return {
|
|
95
|
+
**item,
|
|
96
|
+
"image_url": redact_base64_data_url(item["image_url"]),
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
return item
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def sanitize_anthropic_image(item: Any) -> Any:
|
|
103
|
+
if not isinstance(item, dict):
|
|
104
|
+
return item
|
|
105
|
+
|
|
106
|
+
if (
|
|
107
|
+
item.get("type") == "image"
|
|
108
|
+
and isinstance(item.get("source"), dict)
|
|
109
|
+
and item["source"].get("type") == "base64"
|
|
110
|
+
and "data" in item["source"]
|
|
111
|
+
):
|
|
112
|
+
# For Anthropic, if the source type is "base64", we should always redact the data
|
|
113
|
+
# The provider is explicitly telling us this is base64 data
|
|
114
|
+
return {
|
|
115
|
+
**item,
|
|
116
|
+
"source": {
|
|
117
|
+
**item["source"],
|
|
118
|
+
"data": REDACTED_IMAGE_PLACEHOLDER,
|
|
119
|
+
},
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
return item
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
def sanitize_gemini_part(part: Any) -> Any:
|
|
126
|
+
if not isinstance(part, dict):
|
|
127
|
+
return part
|
|
128
|
+
|
|
129
|
+
if (
|
|
130
|
+
"inline_data" in part
|
|
131
|
+
and isinstance(part["inline_data"], dict)
|
|
132
|
+
and "data" in part["inline_data"]
|
|
133
|
+
):
|
|
134
|
+
# For Gemini, the inline_data structure indicates base64 data
|
|
135
|
+
# We should redact any string data in this context
|
|
136
|
+
return {
|
|
137
|
+
**part,
|
|
138
|
+
"inline_data": {
|
|
139
|
+
**part["inline_data"],
|
|
140
|
+
"data": REDACTED_IMAGE_PLACEHOLDER,
|
|
141
|
+
},
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
return part
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
def process_gemini_item(item: Any) -> Any:
|
|
148
|
+
if not isinstance(item, dict):
|
|
149
|
+
return item
|
|
150
|
+
|
|
151
|
+
if "parts" in item and item["parts"]:
|
|
152
|
+
parts = item["parts"]
|
|
153
|
+
if isinstance(parts, list):
|
|
154
|
+
parts = [sanitize_gemini_part(part) for part in parts]
|
|
155
|
+
else:
|
|
156
|
+
parts = sanitize_gemini_part(parts)
|
|
157
|
+
|
|
158
|
+
return {**item, "parts": parts}
|
|
159
|
+
|
|
160
|
+
return item
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
def sanitize_langchain_image(item: Any) -> Any:
|
|
164
|
+
if not isinstance(item, dict):
|
|
165
|
+
return item
|
|
166
|
+
|
|
167
|
+
if (
|
|
168
|
+
item.get("type") == "image_url"
|
|
169
|
+
and isinstance(item.get("image_url"), dict)
|
|
170
|
+
and "url" in item["image_url"]
|
|
171
|
+
):
|
|
172
|
+
return {
|
|
173
|
+
**item,
|
|
174
|
+
"image_url": {
|
|
175
|
+
**item["image_url"],
|
|
176
|
+
"url": redact_base64_data_url(item["image_url"]["url"]),
|
|
177
|
+
},
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
if item.get("type") == "image" and "data" in item:
|
|
181
|
+
return {**item, "data": redact_base64_data_url(item["data"])}
|
|
182
|
+
|
|
183
|
+
if (
|
|
184
|
+
item.get("type") == "image"
|
|
185
|
+
and isinstance(item.get("source"), dict)
|
|
186
|
+
and "data" in item["source"]
|
|
187
|
+
):
|
|
188
|
+
# Anthropic style - raw base64 in structured format, always redact
|
|
189
|
+
return {
|
|
190
|
+
**item,
|
|
191
|
+
"source": {
|
|
192
|
+
**item["source"],
|
|
193
|
+
"data": REDACTED_IMAGE_PLACEHOLDER,
|
|
194
|
+
},
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
if item.get("type") == "media" and "data" in item:
|
|
198
|
+
return {**item, "data": redact_base64_data_url(item["data"])}
|
|
199
|
+
|
|
200
|
+
return item
|
|
201
|
+
|
|
202
|
+
|
|
203
|
+
def sanitize_openai(data: Any) -> Any:
|
|
204
|
+
return process_messages(data, sanitize_openai_image)
|
|
205
|
+
|
|
206
|
+
|
|
207
|
+
def sanitize_openai_response(data: Any) -> Any:
|
|
208
|
+
return process_messages(data, sanitize_openai_response_image)
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
def sanitize_anthropic(data: Any) -> Any:
|
|
212
|
+
return process_messages(data, sanitize_anthropic_image)
|
|
213
|
+
|
|
214
|
+
|
|
215
|
+
def sanitize_gemini(data: Any) -> Any:
|
|
216
|
+
if not data:
|
|
217
|
+
return data
|
|
218
|
+
|
|
219
|
+
if isinstance(data, list):
|
|
220
|
+
return [process_gemini_item(item) for item in data]
|
|
221
|
+
|
|
222
|
+
return process_gemini_item(data)
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
def sanitize_langchain(data: Any) -> Any:
|
|
226
|
+
return process_messages(data, sanitize_langchain_image)
|
posthoganalytics/ai/utils.py
CHANGED
|
@@ -5,6 +5,12 @@ from typing import Any, Callable, Dict, List, Optional
|
|
|
5
5
|
from httpx import URL
|
|
6
6
|
|
|
7
7
|
from posthoganalytics.client import Client as PostHogClient
|
|
8
|
+
from posthoganalytics.ai.sanitization import (
|
|
9
|
+
sanitize_openai,
|
|
10
|
+
sanitize_anthropic,
|
|
11
|
+
sanitize_gemini,
|
|
12
|
+
sanitize_langchain,
|
|
13
|
+
)
|
|
8
14
|
|
|
9
15
|
|
|
10
16
|
def get_model_params(kwargs: Dict[str, Any]) -> Dict[str, Any]:
|
|
@@ -422,12 +428,15 @@ def call_llm_and_track_usage(
|
|
|
422
428
|
usage = get_usage(response, provider)
|
|
423
429
|
|
|
424
430
|
messages = merge_system_prompt(kwargs, provider)
|
|
431
|
+
sanitized_messages = sanitize_messages(messages, provider)
|
|
425
432
|
|
|
426
433
|
event_properties = {
|
|
427
434
|
"$ai_provider": provider,
|
|
428
435
|
"$ai_model": kwargs.get("model"),
|
|
429
436
|
"$ai_model_parameters": get_model_params(kwargs),
|
|
430
|
-
"$ai_input": with_privacy_mode(
|
|
437
|
+
"$ai_input": with_privacy_mode(
|
|
438
|
+
ph_client, posthog_privacy_mode, sanitized_messages
|
|
439
|
+
),
|
|
431
440
|
"$ai_output_choices": with_privacy_mode(
|
|
432
441
|
ph_client, posthog_privacy_mode, format_response(response, provider)
|
|
433
442
|
),
|
|
@@ -536,12 +545,15 @@ async def call_llm_and_track_usage_async(
|
|
|
536
545
|
usage = get_usage(response, provider)
|
|
537
546
|
|
|
538
547
|
messages = merge_system_prompt(kwargs, provider)
|
|
548
|
+
sanitized_messages = sanitize_messages(messages, provider)
|
|
539
549
|
|
|
540
550
|
event_properties = {
|
|
541
551
|
"$ai_provider": provider,
|
|
542
552
|
"$ai_model": kwargs.get("model"),
|
|
543
553
|
"$ai_model_parameters": get_model_params(kwargs),
|
|
544
|
-
"$ai_input": with_privacy_mode(
|
|
554
|
+
"$ai_input": with_privacy_mode(
|
|
555
|
+
ph_client, posthog_privacy_mode, sanitized_messages
|
|
556
|
+
),
|
|
545
557
|
"$ai_output_choices": with_privacy_mode(
|
|
546
558
|
ph_client, posthog_privacy_mode, format_response(response, provider)
|
|
547
559
|
),
|
|
@@ -600,6 +612,19 @@ async def call_llm_and_track_usage_async(
|
|
|
600
612
|
return response
|
|
601
613
|
|
|
602
614
|
|
|
615
|
+
def sanitize_messages(data: Any, provider: str) -> Any:
|
|
616
|
+
"""Sanitize messages using provider-specific sanitization functions."""
|
|
617
|
+
if provider == "anthropic":
|
|
618
|
+
return sanitize_anthropic(data)
|
|
619
|
+
elif provider == "openai":
|
|
620
|
+
return sanitize_openai(data)
|
|
621
|
+
elif provider == "gemini":
|
|
622
|
+
return sanitize_gemini(data)
|
|
623
|
+
elif provider == "langchain":
|
|
624
|
+
return sanitize_langchain(data)
|
|
625
|
+
return data
|
|
626
|
+
|
|
627
|
+
|
|
603
628
|
def with_privacy_mode(ph_client: PostHogClient, privacy_mode: bool, value: Any):
|
|
604
629
|
if ph_client.privacy_mode or privacy_mode:
|
|
605
630
|
return None
|
posthoganalytics/version.py
CHANGED
|
@@ -11,20 +11,21 @@ posthoganalytics/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
|
11
11
|
posthoganalytics/request.py,sha256=Bsl2c5WwONKPQzwWMmKPX5VgOlwSiIcSNfhXgoz62Y8,6186
|
|
12
12
|
posthoganalytics/types.py,sha256=Dl3aFGX9XUR0wMmK12r2s5Hjan9jL4HpQ9GHpVcEq5U,10207
|
|
13
13
|
posthoganalytics/utils.py,sha256=-0w-OLcCaoldkbBebPzQyBzLJSo9G9yBOg8NDVz7La8,16088
|
|
14
|
-
posthoganalytics/version.py,sha256=
|
|
14
|
+
posthoganalytics/version.py,sha256=pmATCTXxNwGuui3XSa7FxwrKYFujvZoE2W5rzfeS8KU,87
|
|
15
15
|
posthoganalytics/ai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
16
|
-
posthoganalytics/ai/
|
|
16
|
+
posthoganalytics/ai/sanitization.py,sha256=owipZ4eJYtd4JTI-CM_klatclXaeaIec3XJBOUfsOnQ,5770
|
|
17
|
+
posthoganalytics/ai/utils.py,sha256=oYXRb304uAVeaMij5Caxcy1T--wdQ4Aq74ioXvPZwdU,21891
|
|
17
18
|
posthoganalytics/ai/anthropic/__init__.py,sha256=fFhDOiRzTXzGQlgnrRDL-4yKC8EYIl8NW4a2QNR6xRU,368
|
|
18
|
-
posthoganalytics/ai/anthropic/anthropic.py,sha256=
|
|
19
|
-
posthoganalytics/ai/anthropic/anthropic_async.py,sha256=
|
|
19
|
+
posthoganalytics/ai/anthropic/anthropic.py,sha256=GSrJBE56b68X13u6NAgWLHLLjDg3tDq0EIuvU5Zl_tk,7475
|
|
20
|
+
posthoganalytics/ai/anthropic/anthropic_async.py,sha256=IdH2FPPyTFycsYX1Uu9dfgA9VEGcRBNAyeKTlKo9pWc,7595
|
|
20
21
|
posthoganalytics/ai/anthropic/anthropic_providers.py,sha256=y1_qc8Lbip-YDmpimPGg3DfTm5g-WZk5FrRCXzwF_Ow,2139
|
|
21
22
|
posthoganalytics/ai/gemini/__init__.py,sha256=bMNBnJ6NO_PCQCwmxKIiw4adFuEQ06hFFBALt-aDW-0,174
|
|
22
|
-
posthoganalytics/ai/gemini/gemini.py,sha256=
|
|
23
|
+
posthoganalytics/ai/gemini/gemini.py,sha256=yW4hMGwPlIvEptjJfMvpU_BGSSaLNWgH7FoenbAmgKI,15713
|
|
23
24
|
posthoganalytics/ai/langchain/__init__.py,sha256=9CqAwLynTGj3ASAR80C3PmdTdrYGmu99tz0JL-HPFgI,70
|
|
24
|
-
posthoganalytics/ai/langchain/callbacks.py,sha256=
|
|
25
|
+
posthoganalytics/ai/langchain/callbacks.py,sha256=Otha0a6YLBwETfKjDDbdLzNi-RHRgKFJB69GwWCv9lg,29527
|
|
25
26
|
posthoganalytics/ai/openai/__init__.py,sha256=_flZxkyaDZme9hxJsY31sMlq4nP1dtc75HmNgj-21Kg,197
|
|
26
|
-
posthoganalytics/ai/openai/openai.py,sha256=
|
|
27
|
-
posthoganalytics/ai/openai/openai_async.py,sha256=
|
|
27
|
+
posthoganalytics/ai/openai/openai.py,sha256=qq-O6aNuN-TYXHBRASWz0KmR8XY1IHsSyPNuLlt-27Y,22431
|
|
28
|
+
posthoganalytics/ai/openai/openai_async.py,sha256=Alxpqm6hnNHXJduyOaNxCxCtbOrG4CXrSlUIUgN1Ndg,22832
|
|
28
29
|
posthoganalytics/ai/openai/openai_providers.py,sha256=RPVmj2V0_lAdno_ax5Ul2kwhBA9_rRgAdl_sCqrQc6M,4004
|
|
29
30
|
posthoganalytics/integrations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
30
31
|
posthoganalytics/integrations/django.py,sha256=KYtBr7CkiZQynRc2TCWWYHe-J3ie8iSUa42WPshYZdc,6795
|
|
@@ -42,8 +43,8 @@ posthoganalytics/test/test_request.py,sha256=Zc0VbkjpVmj8mKokQm9rzdgTr0b1U44vvMY
|
|
|
42
43
|
posthoganalytics/test/test_size_limited_dict.py,sha256=-5IQjIEr_-Dql24M0HusdR_XroOMrtgiT0v6ZQCRvzo,774
|
|
43
44
|
posthoganalytics/test/test_types.py,sha256=bRPHdwVpP7hu7emsplU8UVyzSQptv6PaG5lAoOD_BtM,7595
|
|
44
45
|
posthoganalytics/test/test_utils.py,sha256=sqUTbfweVcxxFRd3WDMFXqPMyU6DvzOBeAOc68Py9aw,9620
|
|
45
|
-
posthoganalytics-6.7.
|
|
46
|
-
posthoganalytics-6.7.
|
|
47
|
-
posthoganalytics-6.7.
|
|
48
|
-
posthoganalytics-6.7.
|
|
49
|
-
posthoganalytics-6.7.
|
|
46
|
+
posthoganalytics-6.7.1.dist-info/licenses/LICENSE,sha256=wGf9JBotDkSygFj43m49oiKlFnpMnn97keiZKF-40vE,2450
|
|
47
|
+
posthoganalytics-6.7.1.dist-info/METADATA,sha256=192EARPdA80rxKZUOP30obmzvU8fFa37jkc92yXgVOE,6024
|
|
48
|
+
posthoganalytics-6.7.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
49
|
+
posthoganalytics-6.7.1.dist-info/top_level.txt,sha256=8QsNIqIkBh1p2TXvKp0Em9ZLZKwe3uIqCETyW4s1GOE,17
|
|
50
|
+
posthoganalytics-6.7.1.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|