posthoganalytics 6.7.0__py3-none-any.whl → 7.4.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- posthoganalytics/__init__.py +84 -7
- posthoganalytics/ai/anthropic/__init__.py +10 -0
- posthoganalytics/ai/anthropic/anthropic.py +95 -65
- posthoganalytics/ai/anthropic/anthropic_async.py +95 -65
- posthoganalytics/ai/anthropic/anthropic_converter.py +443 -0
- posthoganalytics/ai/gemini/__init__.py +15 -1
- posthoganalytics/ai/gemini/gemini.py +66 -71
- posthoganalytics/ai/gemini/gemini_async.py +423 -0
- posthoganalytics/ai/gemini/gemini_converter.py +652 -0
- posthoganalytics/ai/langchain/callbacks.py +58 -13
- posthoganalytics/ai/openai/__init__.py +16 -1
- posthoganalytics/ai/openai/openai.py +140 -149
- posthoganalytics/ai/openai/openai_async.py +127 -82
- posthoganalytics/ai/openai/openai_converter.py +741 -0
- posthoganalytics/ai/sanitization.py +248 -0
- posthoganalytics/ai/types.py +125 -0
- posthoganalytics/ai/utils.py +339 -356
- posthoganalytics/client.py +345 -97
- posthoganalytics/contexts.py +81 -0
- posthoganalytics/exception_utils.py +250 -2
- posthoganalytics/feature_flags.py +26 -10
- posthoganalytics/flag_definition_cache.py +127 -0
- posthoganalytics/integrations/django.py +157 -19
- posthoganalytics/request.py +203 -23
- posthoganalytics/test/test_client.py +250 -22
- posthoganalytics/test/test_exception_capture.py +418 -0
- posthoganalytics/test/test_feature_flag_result.py +441 -2
- posthoganalytics/test/test_feature_flags.py +308 -104
- posthoganalytics/test/test_flag_definition_cache.py +612 -0
- posthoganalytics/test/test_module.py +0 -8
- posthoganalytics/test/test_request.py +536 -0
- posthoganalytics/test/test_utils.py +4 -1
- posthoganalytics/types.py +40 -0
- posthoganalytics/version.py +1 -1
- {posthoganalytics-6.7.0.dist-info → posthoganalytics-7.4.3.dist-info}/METADATA +12 -12
- posthoganalytics-7.4.3.dist-info/RECORD +57 -0
- posthoganalytics-6.7.0.dist-info/RECORD +0 -49
- {posthoganalytics-6.7.0.dist-info → posthoganalytics-7.4.3.dist-info}/WHEEL +0 -0
- {posthoganalytics-6.7.0.dist-info → posthoganalytics-7.4.3.dist-info}/licenses/LICENSE +0 -0
- {posthoganalytics-6.7.0.dist-info → posthoganalytics-7.4.3.dist-info}/top_level.txt +0 -0
|
@@ -2,6 +2,8 @@ import time
|
|
|
2
2
|
import uuid
|
|
3
3
|
from typing import Any, Dict, List, Optional
|
|
4
4
|
|
|
5
|
+
from posthoganalytics.ai.types import TokenUsage
|
|
6
|
+
|
|
5
7
|
try:
|
|
6
8
|
import openai
|
|
7
9
|
except ImportError:
|
|
@@ -14,8 +16,17 @@ from posthoganalytics.ai.utils import (
|
|
|
14
16
|
call_llm_and_track_usage_async,
|
|
15
17
|
extract_available_tool_calls,
|
|
16
18
|
get_model_params,
|
|
19
|
+
merge_usage_stats,
|
|
17
20
|
with_privacy_mode,
|
|
18
21
|
)
|
|
22
|
+
from posthoganalytics.ai.openai.openai_converter import (
|
|
23
|
+
extract_openai_usage_from_chunk,
|
|
24
|
+
extract_openai_content_from_chunk,
|
|
25
|
+
extract_openai_tool_calls_from_chunk,
|
|
26
|
+
accumulate_openai_tool_calls,
|
|
27
|
+
format_openai_streaming_output,
|
|
28
|
+
)
|
|
29
|
+
from posthoganalytics.ai.sanitization import sanitize_openai, sanitize_openai_response
|
|
19
30
|
from posthoganalytics.client import Client as PostHogClient
|
|
20
31
|
|
|
21
32
|
|
|
@@ -34,6 +45,7 @@ class AsyncOpenAI(openai.AsyncOpenAI):
|
|
|
34
45
|
of the global posthog.
|
|
35
46
|
**openai_config: Any additional keyword args to set on openai (e.g. organization="xxx").
|
|
36
47
|
"""
|
|
48
|
+
|
|
37
49
|
super().__init__(**kwargs)
|
|
38
50
|
self._ph_client = posthog_client or setup()
|
|
39
51
|
|
|
@@ -66,6 +78,7 @@ class WrappedResponses:
|
|
|
66
78
|
|
|
67
79
|
def __getattr__(self, name):
|
|
68
80
|
"""Fallback to original responses object for any methods we don't explicitly handle."""
|
|
81
|
+
|
|
69
82
|
return getattr(self._original, name)
|
|
70
83
|
|
|
71
84
|
async def create(
|
|
@@ -113,45 +126,36 @@ class WrappedResponses:
|
|
|
113
126
|
**kwargs: Any,
|
|
114
127
|
):
|
|
115
128
|
start_time = time.time()
|
|
116
|
-
usage_stats:
|
|
129
|
+
usage_stats: TokenUsage = TokenUsage()
|
|
117
130
|
final_content = []
|
|
131
|
+
model_from_response: Optional[str] = None
|
|
118
132
|
response = await self._original.create(**kwargs)
|
|
119
133
|
|
|
120
134
|
async def async_generator():
|
|
121
135
|
nonlocal usage_stats
|
|
122
136
|
nonlocal final_content # noqa: F824
|
|
137
|
+
nonlocal model_from_response
|
|
123
138
|
|
|
124
139
|
try:
|
|
125
140
|
async for chunk in response:
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
if
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
if hasattr(chunk, "usage") and chunk.usage:
|
|
132
|
-
usage_stats = {
|
|
133
|
-
k: getattr(chunk.usage, k, 0)
|
|
134
|
-
for k in [
|
|
135
|
-
"input_tokens",
|
|
136
|
-
"output_tokens",
|
|
137
|
-
"total_tokens",
|
|
138
|
-
]
|
|
139
|
-
}
|
|
140
|
-
|
|
141
|
-
# Add support for cached tokens
|
|
142
|
-
if hasattr(chunk.usage, "output_tokens_details") and hasattr(
|
|
143
|
-
chunk.usage.output_tokens_details, "reasoning_tokens"
|
|
141
|
+
# Extract model from response object in chunk (for stored prompts)
|
|
142
|
+
if hasattr(chunk, "response") and chunk.response:
|
|
143
|
+
if model_from_response is None and hasattr(
|
|
144
|
+
chunk.response, "model"
|
|
144
145
|
):
|
|
145
|
-
|
|
146
|
-
chunk.usage.output_tokens_details.reasoning_tokens
|
|
147
|
-
)
|
|
146
|
+
model_from_response = chunk.response.model
|
|
148
147
|
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
148
|
+
# Extract usage stats from chunk
|
|
149
|
+
chunk_usage = extract_openai_usage_from_chunk(chunk, "responses")
|
|
150
|
+
|
|
151
|
+
if chunk_usage:
|
|
152
|
+
merge_usage_stats(usage_stats, chunk_usage)
|
|
153
|
+
|
|
154
|
+
# Extract content from chunk
|
|
155
|
+
content = extract_openai_content_from_chunk(chunk, "responses")
|
|
156
|
+
|
|
157
|
+
if content is not None:
|
|
158
|
+
final_content.append(content)
|
|
155
159
|
|
|
156
160
|
yield chunk
|
|
157
161
|
|
|
@@ -159,6 +163,7 @@ class WrappedResponses:
|
|
|
159
163
|
end_time = time.time()
|
|
160
164
|
latency = end_time - start_time
|
|
161
165
|
output = final_content
|
|
166
|
+
|
|
162
167
|
await self._capture_streaming_event(
|
|
163
168
|
posthog_distinct_id,
|
|
164
169
|
posthog_trace_id,
|
|
@@ -170,6 +175,7 @@ class WrappedResponses:
|
|
|
170
175
|
latency,
|
|
171
176
|
output,
|
|
172
177
|
extract_available_tool_calls("openai", kwargs),
|
|
178
|
+
model_from_response,
|
|
173
179
|
)
|
|
174
180
|
|
|
175
181
|
return async_generator()
|
|
@@ -182,25 +188,31 @@ class WrappedResponses:
|
|
|
182
188
|
posthog_privacy_mode: bool,
|
|
183
189
|
posthog_groups: Optional[Dict[str, Any]],
|
|
184
190
|
kwargs: Dict[str, Any],
|
|
185
|
-
usage_stats:
|
|
191
|
+
usage_stats: TokenUsage,
|
|
186
192
|
latency: float,
|
|
187
193
|
output: Any,
|
|
188
194
|
available_tool_calls: Optional[List[Dict[str, Any]]] = None,
|
|
195
|
+
model_from_response: Optional[str] = None,
|
|
189
196
|
):
|
|
190
197
|
if posthog_trace_id is None:
|
|
191
198
|
posthog_trace_id = str(uuid.uuid4())
|
|
192
199
|
|
|
200
|
+
# Use model from kwargs, fallback to model from response
|
|
201
|
+
model = kwargs.get("model") or model_from_response or "unknown"
|
|
202
|
+
|
|
193
203
|
event_properties = {
|
|
194
204
|
"$ai_provider": "openai",
|
|
195
|
-
"$ai_model":
|
|
205
|
+
"$ai_model": model,
|
|
196
206
|
"$ai_model_parameters": get_model_params(kwargs),
|
|
197
207
|
"$ai_input": with_privacy_mode(
|
|
198
|
-
self._client._ph_client,
|
|
208
|
+
self._client._ph_client,
|
|
209
|
+
posthog_privacy_mode,
|
|
210
|
+
sanitize_openai_response(kwargs.get("input")),
|
|
199
211
|
),
|
|
200
212
|
"$ai_output_choices": with_privacy_mode(
|
|
201
213
|
self._client._ph_client,
|
|
202
214
|
posthog_privacy_mode,
|
|
203
|
-
output,
|
|
215
|
+
format_openai_streaming_output(output, "responses"),
|
|
204
216
|
),
|
|
205
217
|
"$ai_http_status": 200,
|
|
206
218
|
"$ai_input_tokens": usage_stats.get("input_tokens", 0),
|
|
@@ -215,6 +227,15 @@ class WrappedResponses:
|
|
|
215
227
|
**(posthog_properties or {}),
|
|
216
228
|
}
|
|
217
229
|
|
|
230
|
+
# Add web search count if present
|
|
231
|
+
web_search_count = usage_stats.get("web_search_count")
|
|
232
|
+
if (
|
|
233
|
+
web_search_count is not None
|
|
234
|
+
and isinstance(web_search_count, int)
|
|
235
|
+
and web_search_count > 0
|
|
236
|
+
):
|
|
237
|
+
event_properties["$ai_web_search_count"] = web_search_count
|
|
238
|
+
|
|
218
239
|
if available_tool_calls:
|
|
219
240
|
event_properties["$ai_tools"] = available_tool_calls
|
|
220
241
|
|
|
@@ -340,8 +361,10 @@ class WrappedCompletions:
|
|
|
340
361
|
**kwargs: Any,
|
|
341
362
|
):
|
|
342
363
|
start_time = time.time()
|
|
343
|
-
usage_stats:
|
|
364
|
+
usage_stats: TokenUsage = TokenUsage()
|
|
344
365
|
accumulated_content = []
|
|
366
|
+
accumulated_tool_calls: Dict[int, Dict[str, Any]] = {}
|
|
367
|
+
model_from_response: Optional[str] = None
|
|
345
368
|
|
|
346
369
|
if "stream_options" not in kwargs:
|
|
347
370
|
kwargs["stream_options"] = {}
|
|
@@ -351,50 +374,45 @@ class WrappedCompletions:
|
|
|
351
374
|
async def async_generator():
|
|
352
375
|
nonlocal usage_stats
|
|
353
376
|
nonlocal accumulated_content # noqa: F824
|
|
377
|
+
nonlocal accumulated_tool_calls
|
|
378
|
+
nonlocal model_from_response
|
|
354
379
|
|
|
355
380
|
try:
|
|
356
381
|
async for chunk in response:
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
):
|
|
378
|
-
usage_stats["reasoning_tokens"] = (
|
|
379
|
-
chunk.usage.output_tokens_details.reasoning_tokens
|
|
380
|
-
)
|
|
381
|
-
|
|
382
|
-
if (
|
|
383
|
-
hasattr(chunk, "choices")
|
|
384
|
-
and chunk.choices
|
|
385
|
-
and len(chunk.choices) > 0
|
|
386
|
-
):
|
|
387
|
-
if chunk.choices[0].delta and chunk.choices[0].delta.content:
|
|
388
|
-
content = chunk.choices[0].delta.content
|
|
389
|
-
if content:
|
|
390
|
-
accumulated_content.append(content)
|
|
382
|
+
# Extract model from chunk (Chat Completions chunks have model field)
|
|
383
|
+
if model_from_response is None and hasattr(chunk, "model"):
|
|
384
|
+
model_from_response = chunk.model
|
|
385
|
+
|
|
386
|
+
# Extract usage stats from chunk
|
|
387
|
+
chunk_usage = extract_openai_usage_from_chunk(chunk, "chat")
|
|
388
|
+
if chunk_usage:
|
|
389
|
+
merge_usage_stats(usage_stats, chunk_usage)
|
|
390
|
+
|
|
391
|
+
# Extract content from chunk
|
|
392
|
+
content = extract_openai_content_from_chunk(chunk, "chat")
|
|
393
|
+
if content is not None:
|
|
394
|
+
accumulated_content.append(content)
|
|
395
|
+
|
|
396
|
+
# Extract and accumulate tool calls from chunk
|
|
397
|
+
chunk_tool_calls = extract_openai_tool_calls_from_chunk(chunk)
|
|
398
|
+
if chunk_tool_calls:
|
|
399
|
+
accumulate_openai_tool_calls(
|
|
400
|
+
accumulated_tool_calls, chunk_tool_calls
|
|
401
|
+
)
|
|
391
402
|
|
|
392
403
|
yield chunk
|
|
393
404
|
|
|
394
405
|
finally:
|
|
395
406
|
end_time = time.time()
|
|
396
407
|
latency = end_time - start_time
|
|
397
|
-
|
|
408
|
+
|
|
409
|
+
# Convert accumulated tool calls dict to list
|
|
410
|
+
tool_calls_list = (
|
|
411
|
+
list(accumulated_tool_calls.values())
|
|
412
|
+
if accumulated_tool_calls
|
|
413
|
+
else None
|
|
414
|
+
)
|
|
415
|
+
|
|
398
416
|
await self._capture_streaming_event(
|
|
399
417
|
posthog_distinct_id,
|
|
400
418
|
posthog_trace_id,
|
|
@@ -404,8 +422,10 @@ class WrappedCompletions:
|
|
|
404
422
|
kwargs,
|
|
405
423
|
usage_stats,
|
|
406
424
|
latency,
|
|
407
|
-
|
|
425
|
+
accumulated_content,
|
|
426
|
+
tool_calls_list,
|
|
408
427
|
extract_available_tool_calls("openai", kwargs),
|
|
428
|
+
model_from_response,
|
|
409
429
|
)
|
|
410
430
|
|
|
411
431
|
return async_generator()
|
|
@@ -418,29 +438,36 @@ class WrappedCompletions:
|
|
|
418
438
|
posthog_privacy_mode: bool,
|
|
419
439
|
posthog_groups: Optional[Dict[str, Any]],
|
|
420
440
|
kwargs: Dict[str, Any],
|
|
421
|
-
usage_stats:
|
|
441
|
+
usage_stats: TokenUsage,
|
|
422
442
|
latency: float,
|
|
423
443
|
output: Any,
|
|
444
|
+
tool_calls: Optional[List[Dict[str, Any]]] = None,
|
|
424
445
|
available_tool_calls: Optional[List[Dict[str, Any]]] = None,
|
|
446
|
+
model_from_response: Optional[str] = None,
|
|
425
447
|
):
|
|
426
448
|
if posthog_trace_id is None:
|
|
427
449
|
posthog_trace_id = str(uuid.uuid4())
|
|
428
450
|
|
|
451
|
+
# Use model from kwargs, fallback to model from response
|
|
452
|
+
model = kwargs.get("model") or model_from_response or "unknown"
|
|
453
|
+
|
|
429
454
|
event_properties = {
|
|
430
455
|
"$ai_provider": "openai",
|
|
431
|
-
"$ai_model":
|
|
456
|
+
"$ai_model": model,
|
|
432
457
|
"$ai_model_parameters": get_model_params(kwargs),
|
|
433
458
|
"$ai_input": with_privacy_mode(
|
|
434
|
-
self._client._ph_client,
|
|
459
|
+
self._client._ph_client,
|
|
460
|
+
posthog_privacy_mode,
|
|
461
|
+
sanitize_openai(kwargs.get("messages")),
|
|
435
462
|
),
|
|
436
463
|
"$ai_output_choices": with_privacy_mode(
|
|
437
464
|
self._client._ph_client,
|
|
438
465
|
posthog_privacy_mode,
|
|
439
|
-
|
|
466
|
+
format_openai_streaming_output(output, "chat", tool_calls),
|
|
440
467
|
),
|
|
441
468
|
"$ai_http_status": 200,
|
|
442
|
-
"$ai_input_tokens": usage_stats.get("
|
|
443
|
-
"$ai_output_tokens": usage_stats.get("
|
|
469
|
+
"$ai_input_tokens": usage_stats.get("input_tokens", 0),
|
|
470
|
+
"$ai_output_tokens": usage_stats.get("output_tokens", 0),
|
|
444
471
|
"$ai_cache_read_input_tokens": usage_stats.get(
|
|
445
472
|
"cache_read_input_tokens", 0
|
|
446
473
|
),
|
|
@@ -451,6 +478,16 @@ class WrappedCompletions:
|
|
|
451
478
|
**(posthog_properties or {}),
|
|
452
479
|
}
|
|
453
480
|
|
|
481
|
+
# Add web search count if present
|
|
482
|
+
web_search_count = usage_stats.get("web_search_count")
|
|
483
|
+
|
|
484
|
+
if (
|
|
485
|
+
web_search_count is not None
|
|
486
|
+
and isinstance(web_search_count, int)
|
|
487
|
+
and web_search_count > 0
|
|
488
|
+
):
|
|
489
|
+
event_properties["$ai_web_search_count"] = web_search_count
|
|
490
|
+
|
|
454
491
|
if available_tool_calls:
|
|
455
492
|
event_properties["$ai_tools"] = available_tool_calls
|
|
456
493
|
|
|
@@ -475,6 +512,7 @@ class WrappedEmbeddings:
|
|
|
475
512
|
|
|
476
513
|
def __getattr__(self, name):
|
|
477
514
|
"""Fallback to original embeddings object for any methods we don't explicitly handle."""
|
|
515
|
+
|
|
478
516
|
return getattr(self._original, name)
|
|
479
517
|
|
|
480
518
|
async def create(
|
|
@@ -500,6 +538,7 @@ class WrappedEmbeddings:
|
|
|
500
538
|
Returns:
|
|
501
539
|
The response from OpenAI's embeddings.create call.
|
|
502
540
|
"""
|
|
541
|
+
|
|
503
542
|
if posthog_trace_id is None:
|
|
504
543
|
posthog_trace_id = str(uuid.uuid4())
|
|
505
544
|
|
|
@@ -508,12 +547,13 @@ class WrappedEmbeddings:
|
|
|
508
547
|
end_time = time.time()
|
|
509
548
|
|
|
510
549
|
# Extract usage statistics if available
|
|
511
|
-
usage_stats =
|
|
550
|
+
usage_stats: TokenUsage = TokenUsage()
|
|
551
|
+
|
|
512
552
|
if hasattr(response, "usage") and response.usage:
|
|
513
|
-
usage_stats =
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
|
|
553
|
+
usage_stats = TokenUsage(
|
|
554
|
+
input_tokens=getattr(response.usage, "prompt_tokens", 0),
|
|
555
|
+
output_tokens=getattr(response.usage, "completion_tokens", 0),
|
|
556
|
+
)
|
|
517
557
|
|
|
518
558
|
latency = end_time - start_time
|
|
519
559
|
|
|
@@ -522,10 +562,12 @@ class WrappedEmbeddings:
|
|
|
522
562
|
"$ai_provider": "openai",
|
|
523
563
|
"$ai_model": kwargs.get("model"),
|
|
524
564
|
"$ai_input": with_privacy_mode(
|
|
525
|
-
self._client._ph_client,
|
|
565
|
+
self._client._ph_client,
|
|
566
|
+
posthog_privacy_mode,
|
|
567
|
+
sanitize_openai_response(kwargs.get("input")),
|
|
526
568
|
),
|
|
527
569
|
"$ai_http_status": 200,
|
|
528
|
-
"$ai_input_tokens": usage_stats.get("
|
|
570
|
+
"$ai_input_tokens": usage_stats.get("input_tokens", 0),
|
|
529
571
|
"$ai_latency": latency,
|
|
530
572
|
"$ai_trace_id": posthog_trace_id,
|
|
531
573
|
"$ai_base_url": str(self._client.base_url),
|
|
@@ -556,6 +598,7 @@ class WrappedBeta:
|
|
|
556
598
|
|
|
557
599
|
def __getattr__(self, name):
|
|
558
600
|
"""Fallback to original beta object for any methods we don't explicitly handle."""
|
|
601
|
+
|
|
559
602
|
return getattr(self._original, name)
|
|
560
603
|
|
|
561
604
|
@property
|
|
@@ -572,6 +615,7 @@ class WrappedBetaChat:
|
|
|
572
615
|
|
|
573
616
|
def __getattr__(self, name):
|
|
574
617
|
"""Fallback to original beta chat object for any methods we don't explicitly handle."""
|
|
618
|
+
|
|
575
619
|
return getattr(self._original, name)
|
|
576
620
|
|
|
577
621
|
@property
|
|
@@ -588,6 +632,7 @@ class WrappedBetaCompletions:
|
|
|
588
632
|
|
|
589
633
|
def __getattr__(self, name):
|
|
590
634
|
"""Fallback to original beta completions object for any methods we don't explicitly handle."""
|
|
635
|
+
|
|
591
636
|
return getattr(self._original, name)
|
|
592
637
|
|
|
593
638
|
async def parse(
|