posthoganalytics 6.7.2__py3-none-any.whl → 6.7.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -10,7 +10,7 @@ import time
10
10
  import uuid
11
11
  from typing import Any, Dict, List, Optional
12
12
 
13
- from posthoganalytics.ai.types import StreamingContentBlock, ToolInProgress
13
+ from posthoganalytics.ai.types import StreamingContentBlock, TokenUsage, ToolInProgress
14
14
  from posthoganalytics.ai.utils import (
15
15
  call_llm_and_track_usage,
16
16
  merge_usage_stats,
@@ -126,7 +126,7 @@ class WrappedMessages(Messages):
126
126
  **kwargs: Any,
127
127
  ):
128
128
  start_time = time.time()
129
- usage_stats: Dict[str, int] = {"input_tokens": 0, "output_tokens": 0}
129
+ usage_stats: TokenUsage = TokenUsage(input_tokens=0, output_tokens=0)
130
130
  accumulated_content = ""
131
131
  content_blocks: List[StreamingContentBlock] = []
132
132
  tools_in_progress: Dict[str, ToolInProgress] = {}
@@ -210,14 +210,13 @@ class WrappedMessages(Messages):
210
210
  posthog_privacy_mode: bool,
211
211
  posthog_groups: Optional[Dict[str, Any]],
212
212
  kwargs: Dict[str, Any],
213
- usage_stats: Dict[str, int],
213
+ usage_stats: TokenUsage,
214
214
  latency: float,
215
215
  content_blocks: List[StreamingContentBlock],
216
216
  accumulated_content: str,
217
217
  ):
218
218
  from posthoganalytics.ai.types import StreamingEventData
219
219
  from posthoganalytics.ai.anthropic.anthropic_converter import (
220
- standardize_anthropic_usage,
221
220
  format_anthropic_streaming_input,
222
221
  format_anthropic_streaming_output_complete,
223
222
  )
@@ -236,7 +235,7 @@ class WrappedMessages(Messages):
236
235
  formatted_output=format_anthropic_streaming_output_complete(
237
236
  content_blocks, accumulated_content
238
237
  ),
239
- usage_stats=standardize_anthropic_usage(usage_stats),
238
+ usage_stats=usage_stats,
240
239
  latency=latency,
241
240
  distinct_id=posthog_distinct_id,
242
241
  trace_id=posthog_trace_id,
@@ -11,7 +11,7 @@ import uuid
11
11
  from typing import Any, Dict, List, Optional
12
12
 
13
13
  from posthoganalytics import setup
14
- from posthoganalytics.ai.types import StreamingContentBlock, ToolInProgress
14
+ from posthoganalytics.ai.types import StreamingContentBlock, TokenUsage, ToolInProgress
15
15
  from posthoganalytics.ai.utils import (
16
16
  call_llm_and_track_usage_async,
17
17
  extract_available_tool_calls,
@@ -131,7 +131,7 @@ class AsyncWrappedMessages(AsyncMessages):
131
131
  **kwargs: Any,
132
132
  ):
133
133
  start_time = time.time()
134
- usage_stats: Dict[str, int] = {"input_tokens": 0, "output_tokens": 0}
134
+ usage_stats: TokenUsage = TokenUsage(input_tokens=0, output_tokens=0)
135
135
  accumulated_content = ""
136
136
  content_blocks: List[StreamingContentBlock] = []
137
137
  tools_in_progress: Dict[str, ToolInProgress] = {}
@@ -215,7 +215,7 @@ class AsyncWrappedMessages(AsyncMessages):
215
215
  posthog_privacy_mode: bool,
216
216
  posthog_groups: Optional[Dict[str, Any]],
217
217
  kwargs: Dict[str, Any],
218
- usage_stats: Dict[str, int],
218
+ usage_stats: TokenUsage,
219
219
  latency: float,
220
220
  content_blocks: List[StreamingContentBlock],
221
221
  accumulated_content: str,
@@ -14,7 +14,6 @@ from posthoganalytics.ai.types import (
14
14
  FormattedMessage,
15
15
  FormattedTextContent,
16
16
  StreamingContentBlock,
17
- StreamingUsageStats,
18
17
  TokenUsage,
19
18
  ToolInProgress,
20
19
  )
@@ -164,7 +163,38 @@ def format_anthropic_streaming_content(
164
163
  return formatted
165
164
 
166
165
 
167
- def extract_anthropic_usage_from_event(event: Any) -> StreamingUsageStats:
166
+ def extract_anthropic_usage_from_response(response: Any) -> TokenUsage:
167
+ """
168
+ Extract usage from a full Anthropic response (non-streaming).
169
+
170
+ Args:
171
+ response: The complete response from Anthropic API
172
+
173
+ Returns:
174
+ TokenUsage with standardized usage
175
+ """
176
+ if not hasattr(response, "usage"):
177
+ return TokenUsage(input_tokens=0, output_tokens=0)
178
+
179
+ result = TokenUsage(
180
+ input_tokens=getattr(response.usage, "input_tokens", 0),
181
+ output_tokens=getattr(response.usage, "output_tokens", 0),
182
+ )
183
+
184
+ if hasattr(response.usage, "cache_read_input_tokens"):
185
+ cache_read = response.usage.cache_read_input_tokens
186
+ if cache_read and cache_read > 0:
187
+ result["cache_read_input_tokens"] = cache_read
188
+
189
+ if hasattr(response.usage, "cache_creation_input_tokens"):
190
+ cache_creation = response.usage.cache_creation_input_tokens
191
+ if cache_creation and cache_creation > 0:
192
+ result["cache_creation_input_tokens"] = cache_creation
193
+
194
+ return result
195
+
196
+
197
+ def extract_anthropic_usage_from_event(event: Any) -> TokenUsage:
168
198
  """
169
199
  Extract usage statistics from an Anthropic streaming event.
170
200
 
@@ -175,7 +205,7 @@ def extract_anthropic_usage_from_event(event: Any) -> StreamingUsageStats:
175
205
  Dictionary of usage statistics
176
206
  """
177
207
 
178
- usage: StreamingUsageStats = {}
208
+ usage: TokenUsage = TokenUsage()
179
209
 
180
210
  # Handle usage stats from message_start event
181
211
  if hasattr(event, "type") and event.type == "message_start":
@@ -329,26 +359,6 @@ def finalize_anthropic_tool_input(
329
359
  del tools_in_progress[block["id"]]
330
360
 
331
361
 
332
- def standardize_anthropic_usage(usage: Dict[str, Any]) -> TokenUsage:
333
- """
334
- Standardize Anthropic usage statistics to common TokenUsage format.
335
-
336
- Anthropic already uses standard field names, so this mainly structures the data.
337
-
338
- Args:
339
- usage: Raw usage statistics from Anthropic
340
-
341
- Returns:
342
- Standardized TokenUsage dict
343
- """
344
- return TokenUsage(
345
- input_tokens=usage.get("input_tokens", 0),
346
- output_tokens=usage.get("output_tokens", 0),
347
- cache_read_input_tokens=usage.get("cache_read_input_tokens"),
348
- cache_creation_input_tokens=usage.get("cache_creation_input_tokens"),
349
- )
350
-
351
-
352
362
  def format_anthropic_streaming_input(kwargs: Dict[str, Any]) -> Any:
353
363
  """
354
364
  Format Anthropic streaming input using system prompt merging.
@@ -3,6 +3,9 @@ import time
3
3
  import uuid
4
4
  from typing import Any, Dict, Optional
5
5
 
6
+ from posthoganalytics.ai.types import TokenUsage, StreamingEventData
7
+ from posthoganalytics.ai.utils import merge_system_prompt
8
+
6
9
  try:
7
10
  from google import genai
8
11
  except ImportError:
@@ -17,7 +20,6 @@ from posthoganalytics.ai.utils import (
17
20
  merge_usage_stats,
18
21
  )
19
22
  from posthoganalytics.ai.gemini.gemini_converter import (
20
- format_gemini_input,
21
23
  extract_gemini_usage_from_chunk,
22
24
  extract_gemini_content_from_chunk,
23
25
  format_gemini_streaming_output,
@@ -294,7 +296,7 @@ class Models:
294
296
  **kwargs: Any,
295
297
  ):
296
298
  start_time = time.time()
297
- usage_stats: Dict[str, int] = {"input_tokens": 0, "output_tokens": 0}
299
+ usage_stats: TokenUsage = TokenUsage(input_tokens=0, output_tokens=0)
298
300
  accumulated_content = []
299
301
 
300
302
  kwargs_without_stream = {"model": model, "contents": contents, **kwargs}
@@ -350,15 +352,12 @@ class Models:
350
352
  privacy_mode: bool,
351
353
  groups: Optional[Dict[str, Any]],
352
354
  kwargs: Dict[str, Any],
353
- usage_stats: Dict[str, int],
355
+ usage_stats: TokenUsage,
354
356
  latency: float,
355
357
  output: Any,
356
358
  ):
357
- from posthoganalytics.ai.types import StreamingEventData
358
- from posthoganalytics.ai.gemini.gemini_converter import standardize_gemini_usage
359
-
360
359
  # Prepare standardized event data
361
- formatted_input = self._format_input(contents)
360
+ formatted_input = self._format_input(contents, **kwargs)
362
361
  sanitized_input = sanitize_gemini(formatted_input)
363
362
 
364
363
  event_data = StreamingEventData(
@@ -368,7 +367,7 @@ class Models:
368
367
  kwargs=kwargs,
369
368
  formatted_input=sanitized_input,
370
369
  formatted_output=format_gemini_streaming_output(output),
371
- usage_stats=standardize_gemini_usage(usage_stats),
370
+ usage_stats=usage_stats,
372
371
  latency=latency,
373
372
  distinct_id=distinct_id,
374
373
  trace_id=trace_id,
@@ -380,10 +379,12 @@ class Models:
380
379
  # Use the common capture function
381
380
  capture_streaming_event(self._ph_client, event_data)
382
381
 
383
- def _format_input(self, contents):
382
+ def _format_input(self, contents, **kwargs):
384
383
  """Format input contents for PostHog tracking"""
385
384
 
386
- return format_gemini_input(contents)
385
+ # Create kwargs dict with contents for merge_system_prompt
386
+ input_kwargs = {"contents": contents, **kwargs}
387
+ return merge_system_prompt(input_kwargs, "gemini")
387
388
 
388
389
  def generate_content_stream(
389
390
  self,
@@ -10,7 +10,6 @@ from typing import Any, Dict, List, Optional, TypedDict, Union
10
10
  from posthoganalytics.ai.types import (
11
11
  FormattedContentItem,
12
12
  FormattedMessage,
13
- StreamingUsageStats,
14
13
  TokenUsage,
15
14
  )
16
15
 
@@ -221,6 +220,30 @@ def format_gemini_response(response: Any) -> List[FormattedMessage]:
221
220
  return output
222
221
 
223
222
 
223
+ def extract_gemini_system_instruction(config: Any) -> Optional[str]:
224
+ """
225
+ Extract system instruction from Gemini config parameter.
226
+
227
+ Args:
228
+ config: Config object or dict that may contain system instruction
229
+
230
+ Returns:
231
+ System instruction string if present, None otherwise
232
+ """
233
+ if config is None:
234
+ return None
235
+
236
+ # Handle different config formats
237
+ if hasattr(config, "system_instruction"):
238
+ return config.system_instruction
239
+ elif isinstance(config, dict) and "system_instruction" in config:
240
+ return config["system_instruction"]
241
+ elif isinstance(config, dict) and "systemInstruction" in config:
242
+ return config["systemInstruction"]
243
+
244
+ return None
245
+
246
+
224
247
  def extract_gemini_tools(kwargs: Dict[str, Any]) -> Optional[Any]:
225
248
  """
226
249
  Extract tool definitions from Gemini API kwargs.
@@ -238,6 +261,38 @@ def extract_gemini_tools(kwargs: Dict[str, Any]) -> Optional[Any]:
238
261
  return None
239
262
 
240
263
 
264
+ def format_gemini_input_with_system(
265
+ contents: Any, config: Any = None
266
+ ) -> List[FormattedMessage]:
267
+ """
268
+ Format Gemini input contents into standardized message format, including system instruction handling.
269
+
270
+ Args:
271
+ contents: Input contents in various possible formats
272
+ config: Config object or dict that may contain system instruction
273
+
274
+ Returns:
275
+ List of formatted messages with role and content fields, with system message prepended if needed
276
+ """
277
+ formatted_messages = format_gemini_input(contents)
278
+
279
+ # Check if system instruction is provided in config parameter
280
+ system_instruction = extract_gemini_system_instruction(config)
281
+
282
+ if system_instruction is not None:
283
+ has_system = any(msg.get("role") == "system" for msg in formatted_messages)
284
+ if not has_system:
285
+ from posthoganalytics.ai.types import FormattedMessage
286
+
287
+ system_message: FormattedMessage = {
288
+ "role": "system",
289
+ "content": system_instruction,
290
+ }
291
+ formatted_messages = [system_message] + list(formatted_messages)
292
+
293
+ return formatted_messages
294
+
295
+
241
296
  def format_gemini_input(contents: Any) -> List[FormattedMessage]:
242
297
  """
243
298
  Format Gemini input contents into standardized message format for PostHog tracking.
@@ -283,7 +338,54 @@ def format_gemini_input(contents: Any) -> List[FormattedMessage]:
283
338
  return [_format_object_message(contents)]
284
339
 
285
340
 
286
- def extract_gemini_usage_from_chunk(chunk: Any) -> StreamingUsageStats:
341
+ def _extract_usage_from_metadata(metadata: Any) -> TokenUsage:
342
+ """
343
+ Common logic to extract usage from Gemini metadata.
344
+ Used by both streaming and non-streaming paths.
345
+
346
+ Args:
347
+ metadata: usage_metadata from Gemini response or chunk
348
+
349
+ Returns:
350
+ TokenUsage with standardized usage
351
+ """
352
+ usage = TokenUsage(
353
+ input_tokens=getattr(metadata, "prompt_token_count", 0),
354
+ output_tokens=getattr(metadata, "candidates_token_count", 0),
355
+ )
356
+
357
+ # Add cache tokens if present (don't add if 0)
358
+ if hasattr(metadata, "cached_content_token_count"):
359
+ cache_tokens = metadata.cached_content_token_count
360
+ if cache_tokens and cache_tokens > 0:
361
+ usage["cache_read_input_tokens"] = cache_tokens
362
+
363
+ # Add reasoning tokens if present (don't add if 0)
364
+ if hasattr(metadata, "thoughts_token_count"):
365
+ reasoning_tokens = metadata.thoughts_token_count
366
+ if reasoning_tokens and reasoning_tokens > 0:
367
+ usage["reasoning_tokens"] = reasoning_tokens
368
+
369
+ return usage
370
+
371
+
372
+ def extract_gemini_usage_from_response(response: Any) -> TokenUsage:
373
+ """
374
+ Extract usage statistics from a full Gemini response (non-streaming).
375
+
376
+ Args:
377
+ response: The complete response from Gemini API
378
+
379
+ Returns:
380
+ TokenUsage with standardized usage statistics
381
+ """
382
+ if not hasattr(response, "usage_metadata") or not response.usage_metadata:
383
+ return TokenUsage(input_tokens=0, output_tokens=0)
384
+
385
+ return _extract_usage_from_metadata(response.usage_metadata)
386
+
387
+
388
+ def extract_gemini_usage_from_chunk(chunk: Any) -> TokenUsage:
287
389
  """
288
390
  Extract usage statistics from a Gemini streaming chunk.
289
391
 
@@ -291,21 +393,16 @@ def extract_gemini_usage_from_chunk(chunk: Any) -> StreamingUsageStats:
291
393
  chunk: Streaming chunk from Gemini API
292
394
 
293
395
  Returns:
294
- Dictionary of usage statistics
396
+ TokenUsage with standardized usage statistics
295
397
  """
296
398
 
297
- usage: StreamingUsageStats = {}
399
+ usage: TokenUsage = TokenUsage()
298
400
 
299
401
  if not hasattr(chunk, "usage_metadata") or not chunk.usage_metadata:
300
402
  return usage
301
403
 
302
- # Gemini uses prompt_token_count and candidates_token_count
303
- usage["input_tokens"] = getattr(chunk.usage_metadata, "prompt_token_count", 0)
304
- usage["output_tokens"] = getattr(chunk.usage_metadata, "candidates_token_count", 0)
305
-
306
- # Calculate total if both values are defined (including 0)
307
- if "input_tokens" in usage and "output_tokens" in usage:
308
- usage["total_tokens"] = usage["input_tokens"] + usage["output_tokens"]
404
+ # Use the shared helper to extract usage
405
+ usage = _extract_usage_from_metadata(chunk.usage_metadata)
309
406
 
310
407
  return usage
311
408
 
@@ -417,22 +514,3 @@ def format_gemini_streaming_output(
417
514
 
418
515
  # Fallback for empty or unexpected input
419
516
  return [{"role": "assistant", "content": [{"type": "text", "text": ""}]}]
420
-
421
-
422
- def standardize_gemini_usage(usage: Dict[str, Any]) -> TokenUsage:
423
- """
424
- Standardize Gemini usage statistics to common TokenUsage format.
425
-
426
- Gemini already uses standard field names (input_tokens/output_tokens).
427
-
428
- Args:
429
- usage: Raw usage statistics from Gemini
430
-
431
- Returns:
432
- Standardized TokenUsage dict
433
- """
434
- return TokenUsage(
435
- input_tokens=usage.get("input_tokens", 0),
436
- output_tokens=usage.get("output_tokens", 0),
437
- # Gemini doesn't currently support cache or reasoning tokens
438
- )
@@ -2,6 +2,8 @@ import time
2
2
  import uuid
3
3
  from typing import Any, Dict, List, Optional
4
4
 
5
+ from posthoganalytics.ai.types import TokenUsage
6
+
5
7
  try:
6
8
  import openai
7
9
  except ImportError:
@@ -120,7 +122,7 @@ class WrappedResponses:
120
122
  **kwargs: Any,
121
123
  ):
122
124
  start_time = time.time()
123
- usage_stats: Dict[str, int] = {}
125
+ usage_stats: TokenUsage = TokenUsage()
124
126
  final_content = []
125
127
  response = self._original.create(**kwargs)
126
128
 
@@ -171,14 +173,13 @@ class WrappedResponses:
171
173
  posthog_privacy_mode: bool,
172
174
  posthog_groups: Optional[Dict[str, Any]],
173
175
  kwargs: Dict[str, Any],
174
- usage_stats: Dict[str, int],
176
+ usage_stats: TokenUsage,
175
177
  latency: float,
176
178
  output: Any,
177
179
  available_tool_calls: Optional[List[Dict[str, Any]]] = None,
178
180
  ):
179
181
  from posthoganalytics.ai.types import StreamingEventData
180
182
  from posthoganalytics.ai.openai.openai_converter import (
181
- standardize_openai_usage,
182
183
  format_openai_streaming_input,
183
184
  format_openai_streaming_output,
184
185
  )
@@ -195,7 +196,7 @@ class WrappedResponses:
195
196
  kwargs=kwargs,
196
197
  formatted_input=sanitized_input,
197
198
  formatted_output=format_openai_streaming_output(output, "responses"),
198
- usage_stats=standardize_openai_usage(usage_stats, "responses"),
199
+ usage_stats=usage_stats,
199
200
  latency=latency,
200
201
  distinct_id=posthog_distinct_id,
201
202
  trace_id=posthog_trace_id,
@@ -316,7 +317,7 @@ class WrappedCompletions:
316
317
  **kwargs: Any,
317
318
  ):
318
319
  start_time = time.time()
319
- usage_stats: Dict[str, int] = {}
320
+ usage_stats: TokenUsage = TokenUsage()
320
321
  accumulated_content = []
321
322
  accumulated_tool_calls: Dict[int, Dict[str, Any]] = {}
322
323
  if "stream_options" not in kwargs:
@@ -387,7 +388,7 @@ class WrappedCompletions:
387
388
  posthog_privacy_mode: bool,
388
389
  posthog_groups: Optional[Dict[str, Any]],
389
390
  kwargs: Dict[str, Any],
390
- usage_stats: Dict[str, int],
391
+ usage_stats: TokenUsage,
391
392
  latency: float,
392
393
  output: Any,
393
394
  tool_calls: Optional[List[Dict[str, Any]]] = None,
@@ -395,7 +396,6 @@ class WrappedCompletions:
395
396
  ):
396
397
  from posthoganalytics.ai.types import StreamingEventData
397
398
  from posthoganalytics.ai.openai.openai_converter import (
398
- standardize_openai_usage,
399
399
  format_openai_streaming_input,
400
400
  format_openai_streaming_output,
401
401
  )
@@ -412,7 +412,7 @@ class WrappedCompletions:
412
412
  kwargs=kwargs,
413
413
  formatted_input=sanitized_input,
414
414
  formatted_output=format_openai_streaming_output(output, "chat", tool_calls),
415
- usage_stats=standardize_openai_usage(usage_stats, "chat"),
415
+ usage_stats=usage_stats,
416
416
  latency=latency,
417
417
  distinct_id=posthog_distinct_id,
418
418
  trace_id=posthog_trace_id,
@@ -2,6 +2,8 @@ import time
2
2
  import uuid
3
3
  from typing import Any, Dict, List, Optional
4
4
 
5
+ from posthoganalytics.ai.types import TokenUsage
6
+
5
7
  try:
6
8
  import openai
7
9
  except ImportError:
@@ -124,7 +126,7 @@ class WrappedResponses:
124
126
  **kwargs: Any,
125
127
  ):
126
128
  start_time = time.time()
127
- usage_stats: Dict[str, int] = {}
129
+ usage_stats: TokenUsage = TokenUsage()
128
130
  final_content = []
129
131
  response = self._original.create(**kwargs)
130
132
 
@@ -176,7 +178,7 @@ class WrappedResponses:
176
178
  posthog_privacy_mode: bool,
177
179
  posthog_groups: Optional[Dict[str, Any]],
178
180
  kwargs: Dict[str, Any],
179
- usage_stats: Dict[str, int],
181
+ usage_stats: TokenUsage,
180
182
  latency: float,
181
183
  output: Any,
182
184
  available_tool_calls: Optional[List[Dict[str, Any]]] = None,
@@ -336,7 +338,7 @@ class WrappedCompletions:
336
338
  **kwargs: Any,
337
339
  ):
338
340
  start_time = time.time()
339
- usage_stats: Dict[str, int] = {}
341
+ usage_stats: TokenUsage = TokenUsage()
340
342
  accumulated_content = []
341
343
  accumulated_tool_calls: Dict[int, Dict[str, Any]] = {}
342
344
 
@@ -406,7 +408,7 @@ class WrappedCompletions:
406
408
  posthog_privacy_mode: bool,
407
409
  posthog_groups: Optional[Dict[str, Any]],
408
410
  kwargs: Dict[str, Any],
409
- usage_stats: Dict[str, int],
411
+ usage_stats: TokenUsage,
410
412
  latency: float,
411
413
  output: Any,
412
414
  tool_calls: Optional[List[Dict[str, Any]]] = None,
@@ -430,8 +432,8 @@ class WrappedCompletions:
430
432
  format_openai_streaming_output(output, "chat", tool_calls),
431
433
  ),
432
434
  "$ai_http_status": 200,
433
- "$ai_input_tokens": usage_stats.get("prompt_tokens", 0),
434
- "$ai_output_tokens": usage_stats.get("completion_tokens", 0),
435
+ "$ai_input_tokens": usage_stats.get("input_tokens", 0),
436
+ "$ai_output_tokens": usage_stats.get("output_tokens", 0),
435
437
  "$ai_cache_read_input_tokens": usage_stats.get(
436
438
  "cache_read_input_tokens", 0
437
439
  ),
@@ -501,13 +503,13 @@ class WrappedEmbeddings:
501
503
  end_time = time.time()
502
504
 
503
505
  # Extract usage statistics if available
504
- usage_stats = {}
506
+ usage_stats: TokenUsage = TokenUsage()
505
507
 
506
508
  if hasattr(response, "usage") and response.usage:
507
- usage_stats = {
508
- "prompt_tokens": getattr(response.usage, "prompt_tokens", 0),
509
- "total_tokens": getattr(response.usage, "total_tokens", 0),
510
- }
509
+ usage_stats = TokenUsage(
510
+ input_tokens=getattr(response.usage, "prompt_tokens", 0),
511
+ output_tokens=getattr(response.usage, "completion_tokens", 0),
512
+ )
511
513
 
512
514
  latency = end_time - start_time
513
515
 
@@ -521,7 +523,7 @@ class WrappedEmbeddings:
521
523
  sanitize_openai_response(kwargs.get("input")),
522
524
  ),
523
525
  "$ai_http_status": 200,
524
- "$ai_input_tokens": usage_stats.get("prompt_tokens", 0),
526
+ "$ai_input_tokens": usage_stats.get("input_tokens", 0),
525
527
  "$ai_latency": latency,
526
528
  "$ai_trace_id": posthog_trace_id,
527
529
  "$ai_base_url": str(self._client.base_url),
@@ -14,7 +14,6 @@ from posthoganalytics.ai.types import (
14
14
  FormattedImageContent,
15
15
  FormattedMessage,
16
16
  FormattedTextContent,
17
- StreamingUsageStats,
18
17
  TokenUsage,
19
18
  )
20
19
 
@@ -256,9 +255,69 @@ def format_openai_streaming_content(
256
255
  return formatted
257
256
 
258
257
 
258
+ def extract_openai_usage_from_response(response: Any) -> TokenUsage:
259
+ """
260
+ Extract usage statistics from a full OpenAI response (non-streaming).
261
+ Handles both Chat Completions and Responses API.
262
+
263
+ Args:
264
+ response: The complete response from OpenAI API
265
+
266
+ Returns:
267
+ TokenUsage with standardized usage statistics
268
+ """
269
+ if not hasattr(response, "usage"):
270
+ return TokenUsage(input_tokens=0, output_tokens=0)
271
+
272
+ cached_tokens = 0
273
+ input_tokens = 0
274
+ output_tokens = 0
275
+ reasoning_tokens = 0
276
+
277
+ # Responses API format
278
+ if hasattr(response.usage, "input_tokens"):
279
+ input_tokens = response.usage.input_tokens
280
+ if hasattr(response.usage, "output_tokens"):
281
+ output_tokens = response.usage.output_tokens
282
+ if hasattr(response.usage, "input_tokens_details") and hasattr(
283
+ response.usage.input_tokens_details, "cached_tokens"
284
+ ):
285
+ cached_tokens = response.usage.input_tokens_details.cached_tokens
286
+ if hasattr(response.usage, "output_tokens_details") and hasattr(
287
+ response.usage.output_tokens_details, "reasoning_tokens"
288
+ ):
289
+ reasoning_tokens = response.usage.output_tokens_details.reasoning_tokens
290
+
291
+ # Chat Completions format
292
+ if hasattr(response.usage, "prompt_tokens"):
293
+ input_tokens = response.usage.prompt_tokens
294
+ if hasattr(response.usage, "completion_tokens"):
295
+ output_tokens = response.usage.completion_tokens
296
+ if hasattr(response.usage, "prompt_tokens_details") and hasattr(
297
+ response.usage.prompt_tokens_details, "cached_tokens"
298
+ ):
299
+ cached_tokens = response.usage.prompt_tokens_details.cached_tokens
300
+ if hasattr(response.usage, "completion_tokens_details") and hasattr(
301
+ response.usage.completion_tokens_details, "reasoning_tokens"
302
+ ):
303
+ reasoning_tokens = response.usage.completion_tokens_details.reasoning_tokens
304
+
305
+ result = TokenUsage(
306
+ input_tokens=input_tokens,
307
+ output_tokens=output_tokens,
308
+ )
309
+
310
+ if cached_tokens > 0:
311
+ result["cache_read_input_tokens"] = cached_tokens
312
+ if reasoning_tokens > 0:
313
+ result["reasoning_tokens"] = reasoning_tokens
314
+
315
+ return result
316
+
317
+
259
318
  def extract_openai_usage_from_chunk(
260
319
  chunk: Any, provider_type: str = "chat"
261
- ) -> StreamingUsageStats:
320
+ ) -> TokenUsage:
262
321
  """
263
322
  Extract usage statistics from an OpenAI streaming chunk.
264
323
 
@@ -272,16 +331,16 @@ def extract_openai_usage_from_chunk(
272
331
  Dictionary of usage statistics
273
332
  """
274
333
 
275
- usage: StreamingUsageStats = {}
334
+ usage: TokenUsage = TokenUsage()
276
335
 
277
336
  if provider_type == "chat":
278
337
  if not hasattr(chunk, "usage") or not chunk.usage:
279
338
  return usage
280
339
 
281
340
  # Chat Completions API uses prompt_tokens and completion_tokens
282
- usage["prompt_tokens"] = getattr(chunk.usage, "prompt_tokens", 0)
283
- usage["completion_tokens"] = getattr(chunk.usage, "completion_tokens", 0)
284
- usage["total_tokens"] = getattr(chunk.usage, "total_tokens", 0)
341
+ # Standardize to input_tokens and output_tokens
342
+ usage["input_tokens"] = getattr(chunk.usage, "prompt_tokens", 0)
343
+ usage["output_tokens"] = getattr(chunk.usage, "completion_tokens", 0)
285
344
 
286
345
  # Handle cached tokens
287
346
  if hasattr(chunk.usage, "prompt_tokens_details") and hasattr(
@@ -310,7 +369,6 @@ def extract_openai_usage_from_chunk(
310
369
  response_usage = chunk.response.usage
311
370
  usage["input_tokens"] = getattr(response_usage, "input_tokens", 0)
312
371
  usage["output_tokens"] = getattr(response_usage, "output_tokens", 0)
313
- usage["total_tokens"] = getattr(response_usage, "total_tokens", 0)
314
372
 
315
373
  # Handle cached tokens
316
374
  if hasattr(response_usage, "input_tokens_details") and hasattr(
@@ -535,37 +593,6 @@ def format_openai_streaming_output(
535
593
  ]
536
594
 
537
595
 
538
- def standardize_openai_usage(
539
- usage: Dict[str, Any], api_type: str = "chat"
540
- ) -> TokenUsage:
541
- """
542
- Standardize OpenAI usage statistics to common TokenUsage format.
543
-
544
- Args:
545
- usage: Raw usage statistics from OpenAI
546
- api_type: Either "chat" or "responses" to handle different field names
547
-
548
- Returns:
549
- Standardized TokenUsage dict
550
- """
551
- if api_type == "chat":
552
- # Chat API uses prompt_tokens/completion_tokens
553
- return TokenUsage(
554
- input_tokens=usage.get("prompt_tokens", 0),
555
- output_tokens=usage.get("completion_tokens", 0),
556
- cache_read_input_tokens=usage.get("cache_read_input_tokens"),
557
- reasoning_tokens=usage.get("reasoning_tokens"),
558
- )
559
- else: # responses API
560
- # Responses API uses input_tokens/output_tokens
561
- return TokenUsage(
562
- input_tokens=usage.get("input_tokens", 0),
563
- output_tokens=usage.get("output_tokens", 0),
564
- cache_read_input_tokens=usage.get("cache_read_input_tokens"),
565
- reasoning_tokens=usage.get("reasoning_tokens"),
566
- )
567
-
568
-
569
596
  def format_openai_streaming_input(
570
597
  kwargs: Dict[str, Any], api_type: str = "chat"
571
598
  ) -> Any:
@@ -579,7 +606,6 @@ def format_openai_streaming_input(
579
606
  Returns:
580
607
  Formatted input ready for PostHog tracking
581
608
  """
582
- if api_type == "chat":
583
- return kwargs.get("messages")
584
- else: # responses API
585
- return kwargs.get("input")
609
+ from posthoganalytics.ai.utils import merge_system_prompt
610
+
611
+ return merge_system_prompt(kwargs, "openai")
@@ -77,24 +77,6 @@ class ProviderResponse(TypedDict, total=False):
77
77
  error: Optional[str]
78
78
 
79
79
 
80
- class StreamingUsageStats(TypedDict, total=False):
81
- """
82
- Usage statistics collected during streaming.
83
-
84
- Different providers populate different fields during streaming.
85
- """
86
-
87
- input_tokens: int
88
- output_tokens: int
89
- cache_read_input_tokens: Optional[int]
90
- cache_creation_input_tokens: Optional[int]
91
- reasoning_tokens: Optional[int]
92
- # OpenAI-specific names
93
- prompt_tokens: Optional[int]
94
- completion_tokens: Optional[int]
95
- total_tokens: Optional[int]
96
-
97
-
98
80
  class StreamingContentBlock(TypedDict, total=False):
99
81
  """
100
82
  Content block used during streaming to accumulate content.
@@ -133,7 +115,7 @@ class StreamingEventData(TypedDict):
133
115
  kwargs: Dict[str, Any] # Original kwargs for tool extraction and special handling
134
116
  formatted_input: Any # Provider-formatted input ready for tracking
135
117
  formatted_output: Any # Provider-formatted output ready for tracking
136
- usage_stats: TokenUsage # Standardized token counts
118
+ usage_stats: TokenUsage
137
119
  latency: float
138
120
  distinct_id: Optional[str]
139
121
  trace_id: Optional[str]
@@ -1,10 +1,9 @@
1
1
  import time
2
2
  import uuid
3
- from typing import Any, Callable, Dict, Optional
4
-
3
+ from typing import Any, Callable, Dict, List, Optional, cast
5
4
 
6
5
  from posthoganalytics.client import Client as PostHogClient
7
- from posthoganalytics.ai.types import StreamingEventData, StreamingUsageStats
6
+ from posthoganalytics.ai.types import FormattedMessage, StreamingEventData, TokenUsage
8
7
  from posthoganalytics.ai.sanitization import (
9
8
  sanitize_openai,
10
9
  sanitize_anthropic,
@@ -14,7 +13,7 @@ from posthoganalytics.ai.sanitization import (
14
13
 
15
14
 
16
15
  def merge_usage_stats(
17
- target: Dict[str, int], source: StreamingUsageStats, mode: str = "incremental"
16
+ target: TokenUsage, source: TokenUsage, mode: str = "incremental"
18
17
  ) -> None:
19
18
  """
20
19
  Merge streaming usage statistics into target dict, handling None values.
@@ -25,19 +24,49 @@ def merge_usage_stats(
25
24
 
26
25
  Args:
27
26
  target: Dictionary to update with usage stats
28
- source: StreamingUsageStats that may contain None values
27
+ source: TokenUsage that may contain None values
29
28
  mode: Either "incremental" or "cumulative"
30
29
  """
31
30
  if mode == "incremental":
32
31
  # Add new values to existing totals
33
- for key, value in source.items():
34
- if value is not None and isinstance(value, int):
35
- target[key] = target.get(key, 0) + value
32
+ source_input = source.get("input_tokens")
33
+ if source_input is not None:
34
+ current = target.get("input_tokens") or 0
35
+ target["input_tokens"] = current + source_input
36
+
37
+ source_output = source.get("output_tokens")
38
+ if source_output is not None:
39
+ current = target.get("output_tokens") or 0
40
+ target["output_tokens"] = current + source_output
41
+
42
+ source_cache_read = source.get("cache_read_input_tokens")
43
+ if source_cache_read is not None:
44
+ current = target.get("cache_read_input_tokens") or 0
45
+ target["cache_read_input_tokens"] = current + source_cache_read
46
+
47
+ source_cache_creation = source.get("cache_creation_input_tokens")
48
+ if source_cache_creation is not None:
49
+ current = target.get("cache_creation_input_tokens") or 0
50
+ target["cache_creation_input_tokens"] = current + source_cache_creation
51
+
52
+ source_reasoning = source.get("reasoning_tokens")
53
+ if source_reasoning is not None:
54
+ current = target.get("reasoning_tokens") or 0
55
+ target["reasoning_tokens"] = current + source_reasoning
36
56
  elif mode == "cumulative":
37
57
  # Replace with latest values (already cumulative)
38
- for key, value in source.items():
39
- if value is not None and isinstance(value, int):
40
- target[key] = value
58
+ if source.get("input_tokens") is not None:
59
+ target["input_tokens"] = source["input_tokens"]
60
+ if source.get("output_tokens") is not None:
61
+ target["output_tokens"] = source["output_tokens"]
62
+ if source.get("cache_read_input_tokens") is not None:
63
+ target["cache_read_input_tokens"] = source["cache_read_input_tokens"]
64
+ if source.get("cache_creation_input_tokens") is not None:
65
+ target["cache_creation_input_tokens"] = source[
66
+ "cache_creation_input_tokens"
67
+ ]
68
+ if source.get("reasoning_tokens") is not None:
69
+ target["reasoning_tokens"] = source["reasoning_tokens"]
41
70
  else:
42
71
  raise ValueError(f"Invalid mode: {mode}. Must be 'incremental' or 'cumulative'")
43
72
 
@@ -64,74 +93,31 @@ def get_model_params(kwargs: Dict[str, Any]) -> Dict[str, Any]:
64
93
  return model_params
65
94
 
66
95
 
67
- def get_usage(response, provider: str) -> Dict[str, Any]:
96
+ def get_usage(response, provider: str) -> TokenUsage:
97
+ """
98
+ Extract usage statistics from response based on provider.
99
+ Delegates to provider-specific converter functions.
100
+ """
68
101
  if provider == "anthropic":
69
- return {
70
- "input_tokens": response.usage.input_tokens,
71
- "output_tokens": response.usage.output_tokens,
72
- "cache_read_input_tokens": response.usage.cache_read_input_tokens,
73
- "cache_creation_input_tokens": response.usage.cache_creation_input_tokens,
74
- }
102
+ from posthoganalytics.ai.anthropic.anthropic_converter import (
103
+ extract_anthropic_usage_from_response,
104
+ )
105
+
106
+ return extract_anthropic_usage_from_response(response)
75
107
  elif provider == "openai":
76
- cached_tokens = 0
77
- input_tokens = 0
78
- output_tokens = 0
79
- reasoning_tokens = 0
80
-
81
- # responses api
82
- if hasattr(response.usage, "input_tokens"):
83
- input_tokens = response.usage.input_tokens
84
- if hasattr(response.usage, "output_tokens"):
85
- output_tokens = response.usage.output_tokens
86
- if hasattr(response.usage, "input_tokens_details") and hasattr(
87
- response.usage.input_tokens_details, "cached_tokens"
88
- ):
89
- cached_tokens = response.usage.input_tokens_details.cached_tokens
90
- if hasattr(response.usage, "output_tokens_details") and hasattr(
91
- response.usage.output_tokens_details, "reasoning_tokens"
92
- ):
93
- reasoning_tokens = response.usage.output_tokens_details.reasoning_tokens
94
-
95
- # chat completions
96
- if hasattr(response.usage, "prompt_tokens"):
97
- input_tokens = response.usage.prompt_tokens
98
- if hasattr(response.usage, "completion_tokens"):
99
- output_tokens = response.usage.completion_tokens
100
- if hasattr(response.usage, "prompt_tokens_details") and hasattr(
101
- response.usage.prompt_tokens_details, "cached_tokens"
102
- ):
103
- cached_tokens = response.usage.prompt_tokens_details.cached_tokens
108
+ from posthoganalytics.ai.openai.openai_converter import (
109
+ extract_openai_usage_from_response,
110
+ )
104
111
 
105
- return {
106
- "input_tokens": input_tokens,
107
- "output_tokens": output_tokens,
108
- "cache_read_input_tokens": cached_tokens,
109
- "reasoning_tokens": reasoning_tokens,
110
- }
112
+ return extract_openai_usage_from_response(response)
111
113
  elif provider == "gemini":
112
- input_tokens = 0
113
- output_tokens = 0
114
+ from posthoganalytics.ai.gemini.gemini_converter import (
115
+ extract_gemini_usage_from_response,
116
+ )
114
117
 
115
- if hasattr(response, "usage_metadata") and response.usage_metadata:
116
- input_tokens = getattr(response.usage_metadata, "prompt_token_count", 0)
117
- output_tokens = getattr(
118
- response.usage_metadata, "candidates_token_count", 0
119
- )
118
+ return extract_gemini_usage_from_response(response)
120
119
 
121
- return {
122
- "input_tokens": input_tokens,
123
- "output_tokens": output_tokens,
124
- "cache_read_input_tokens": 0,
125
- "cache_creation_input_tokens": 0,
126
- "reasoning_tokens": 0,
127
- }
128
- return {
129
- "input_tokens": 0,
130
- "output_tokens": 0,
131
- "cache_read_input_tokens": 0,
132
- "cache_creation_input_tokens": 0,
133
- "reasoning_tokens": 0,
134
- }
120
+ return TokenUsage(input_tokens=0, output_tokens=0)
135
121
 
136
122
 
137
123
  def format_response(response, provider: str):
@@ -169,9 +155,12 @@ def extract_available_tool_calls(provider: str, kwargs: Dict[str, Any]):
169
155
  from posthoganalytics.ai.openai.openai_converter import extract_openai_tools
170
156
 
171
157
  return extract_openai_tools(kwargs)
158
+ return None
172
159
 
173
160
 
174
- def merge_system_prompt(kwargs: Dict[str, Any], provider: str):
161
+ def merge_system_prompt(
162
+ kwargs: Dict[str, Any], provider: str
163
+ ) -> List[FormattedMessage]:
175
164
  """
176
165
  Merge system prompts and format messages for the given provider.
177
166
  """
@@ -182,14 +171,15 @@ def merge_system_prompt(kwargs: Dict[str, Any], provider: str):
182
171
  system = kwargs.get("system")
183
172
  return format_anthropic_input(messages, system)
184
173
  elif provider == "gemini":
185
- from posthoganalytics.ai.gemini.gemini_converter import format_gemini_input
174
+ from posthoganalytics.ai.gemini.gemini_converter import format_gemini_input_with_system
186
175
 
187
176
  contents = kwargs.get("contents", [])
188
- return format_gemini_input(contents)
177
+ config = kwargs.get("config")
178
+ return format_gemini_input_with_system(contents, config)
189
179
  elif provider == "openai":
190
- # For OpenAI, handle both Chat Completions and Responses API
191
180
  from posthoganalytics.ai.openai.openai_converter import format_openai_input
192
181
 
182
+ # For OpenAI, handle both Chat Completions and Responses API
193
183
  messages_param = kwargs.get("messages")
194
184
  input_param = kwargs.get("input")
195
185
 
@@ -200,9 +190,11 @@ def merge_system_prompt(kwargs: Dict[str, Any], provider: str):
200
190
  if kwargs.get("system") is not None:
201
191
  has_system = any(msg.get("role") == "system" for msg in messages)
202
192
  if not has_system:
203
- messages = [
204
- {"role": "system", "content": kwargs.get("system")}
205
- ] + messages
193
+ system_msg = cast(
194
+ FormattedMessage,
195
+ {"role": "system", "content": kwargs.get("system")},
196
+ )
197
+ messages = [system_msg] + messages
206
198
 
207
199
  # For Responses API, add instructions to the system prompt if provided
208
200
  if kwargs.get("instructions") is not None:
@@ -220,9 +212,11 @@ def merge_system_prompt(kwargs: Dict[str, Any], provider: str):
220
212
  )
221
213
  else:
222
214
  # Create a new system message with instructions
223
- messages = [
224
- {"role": "system", "content": kwargs.get("instructions")}
225
- ] + messages
215
+ instruction_msg = cast(
216
+ FormattedMessage,
217
+ {"role": "system", "content": kwargs.get("instructions")},
218
+ )
219
+ messages = [instruction_msg] + messages
226
220
 
227
221
  return messages
228
222
 
@@ -250,7 +244,7 @@ def call_llm_and_track_usage(
250
244
  response = None
251
245
  error = None
252
246
  http_status = 200
253
- usage: Dict[str, Any] = {}
247
+ usage: TokenUsage = TokenUsage()
254
248
  error_params: Dict[str, Any] = {}
255
249
 
256
250
  try:
@@ -305,27 +299,17 @@ def call_llm_and_track_usage(
305
299
  if available_tool_calls:
306
300
  event_properties["$ai_tools"] = available_tool_calls
307
301
 
308
- if (
309
- usage.get("cache_read_input_tokens") is not None
310
- and usage.get("cache_read_input_tokens", 0) > 0
311
- ):
312
- event_properties["$ai_cache_read_input_tokens"] = usage.get(
313
- "cache_read_input_tokens", 0
314
- )
302
+ cache_read = usage.get("cache_read_input_tokens")
303
+ if cache_read is not None and cache_read > 0:
304
+ event_properties["$ai_cache_read_input_tokens"] = cache_read
315
305
 
316
- if (
317
- usage.get("cache_creation_input_tokens") is not None
318
- and usage.get("cache_creation_input_tokens", 0) > 0
319
- ):
320
- event_properties["$ai_cache_creation_input_tokens"] = usage.get(
321
- "cache_creation_input_tokens", 0
322
- )
306
+ cache_creation = usage.get("cache_creation_input_tokens")
307
+ if cache_creation is not None and cache_creation > 0:
308
+ event_properties["$ai_cache_creation_input_tokens"] = cache_creation
323
309
 
324
- if (
325
- usage.get("reasoning_tokens") is not None
326
- and usage.get("reasoning_tokens", 0) > 0
327
- ):
328
- event_properties["$ai_reasoning_tokens"] = usage.get("reasoning_tokens", 0)
310
+ reasoning = usage.get("reasoning_tokens")
311
+ if reasoning is not None and reasoning > 0:
312
+ event_properties["$ai_reasoning_tokens"] = reasoning
329
313
 
330
314
  if posthog_distinct_id is None:
331
315
  event_properties["$process_person_profile"] = False
@@ -367,7 +351,7 @@ async def call_llm_and_track_usage_async(
367
351
  response = None
368
352
  error = None
369
353
  http_status = 200
370
- usage: Dict[str, Any] = {}
354
+ usage: TokenUsage = TokenUsage()
371
355
  error_params: Dict[str, Any] = {}
372
356
 
373
357
  try:
@@ -422,21 +406,13 @@ async def call_llm_and_track_usage_async(
422
406
  if available_tool_calls:
423
407
  event_properties["$ai_tools"] = available_tool_calls
424
408
 
425
- if (
426
- usage.get("cache_read_input_tokens") is not None
427
- and usage.get("cache_read_input_tokens", 0) > 0
428
- ):
429
- event_properties["$ai_cache_read_input_tokens"] = usage.get(
430
- "cache_read_input_tokens", 0
431
- )
409
+ cache_read = usage.get("cache_read_input_tokens")
410
+ if cache_read is not None and cache_read > 0:
411
+ event_properties["$ai_cache_read_input_tokens"] = cache_read
432
412
 
433
- if (
434
- usage.get("cache_creation_input_tokens") is not None
435
- and usage.get("cache_creation_input_tokens", 0) > 0
436
- ):
437
- event_properties["$ai_cache_creation_input_tokens"] = usage.get(
438
- "cache_creation_input_tokens", 0
439
- )
413
+ cache_creation = usage.get("cache_creation_input_tokens")
414
+ if cache_creation is not None and cache_creation > 0:
415
+ event_properties["$ai_cache_creation_input_tokens"] = cache_creation
440
416
 
441
417
  if posthog_distinct_id is None:
442
418
  event_properties["$process_person_profile"] = False
@@ -1,4 +1,4 @@
1
- VERSION = "6.7.2"
1
+ VERSION = "6.7.4"
2
2
 
3
3
  if __name__ == "__main__":
4
4
  print(VERSION, end="") # noqa: T201
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: posthoganalytics
3
- Version: 6.7.2
3
+ Version: 6.7.4
4
4
  Summary: Integrate PostHog into any python application.
5
5
  Home-page: https://github.com/posthog/posthog-python
6
6
  Author: Posthog
@@ -11,25 +11,25 @@ posthoganalytics/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
11
  posthoganalytics/request.py,sha256=Bsl2c5WwONKPQzwWMmKPX5VgOlwSiIcSNfhXgoz62Y8,6186
12
12
  posthoganalytics/types.py,sha256=Dl3aFGX9XUR0wMmK12r2s5Hjan9jL4HpQ9GHpVcEq5U,10207
13
13
  posthoganalytics/utils.py,sha256=-0w-OLcCaoldkbBebPzQyBzLJSo9G9yBOg8NDVz7La8,16088
14
- posthoganalytics/version.py,sha256=5_SqKJ01JbRPG9x4t8JTHffIB0KktqfXyvL6EK2L4Vg,87
14
+ posthoganalytics/version.py,sha256=RJbegcgNmUJvUzvz3PhJ7kXrYNUcMXmEqH9X-ctDffs,87
15
15
  posthoganalytics/ai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
16
  posthoganalytics/ai/sanitization.py,sha256=owipZ4eJYtd4JTI-CM_klatclXaeaIec3XJBOUfsOnQ,5770
17
- posthoganalytics/ai/types.py,sha256=OsB6u855BdZNl5TyVx6Bxm80fT0dfbfnL9Yr5GsIbOQ,3755
18
- posthoganalytics/ai/utils.py,sha256=nkqVR8Gpum9cllx8Z-ylhYM7tMy1BENfB58lOlrjIkw,20747
17
+ posthoganalytics/ai/types.py,sha256=ceubs4K9xf8vQx7wokq1NL9hPtxyS7D7sUOuT7Lx1lM,3237
18
+ posthoganalytics/ai/utils.py,sha256=WPeb-K_c4wwLJ2fr6s6OAq9YuxwvFhyGqQdIwaRSUQw,20364
19
19
  posthoganalytics/ai/anthropic/__init__.py,sha256=8nTvETZzkfW-P3zBMmp06GOHs0N-xyOGu7Oa4di_lno,669
20
- posthoganalytics/ai/anthropic/anthropic.py,sha256=YA-oZyqnWX1cxZvzMRBq1qwO0_r46Q1NYj2RBNuNAYI,8812
21
- posthoganalytics/ai/anthropic/anthropic_async.py,sha256=M-LIWqkMRF2LSjIwYU_l9D0S1ZjWh4HOoPPBRuKrY4o,10161
22
- posthoganalytics/ai/anthropic/anthropic_converter.py,sha256=U310RnrXN_JCjJIabr_zJKgNq14cs_atEI4ENsN-LK0,11415
20
+ posthoganalytics/ai/anthropic/anthropic.py,sha256=njOoVb9vkCdnPWAQuVF0XB0BnT2y1ScIryrCGyt5ur8,8750
21
+ posthoganalytics/ai/anthropic/anthropic_async.py,sha256=nM3oFcNLw6meEtV6RfrvhFcuxD4aS-CXDuepRHycUjM,10169
22
+ posthoganalytics/ai/anthropic/anthropic_converter.py,sha256=LWIQ1kyK3vV3rLBmQIcd-98fet7isK3uhTRmBqBN0lk,11776
23
23
  posthoganalytics/ai/anthropic/anthropic_providers.py,sha256=y1_qc8Lbip-YDmpimPGg3DfTm5g-WZk5FrRCXzwF_Ow,2139
24
24
  posthoganalytics/ai/gemini/__init__.py,sha256=JV_9-gBR87leHgZW4XAYZP7LSl4YaXeuhqDUpA8HygA,383
25
- posthoganalytics/ai/gemini/gemini.py,sha256=V_ZHKYIJuRzUIQ-BKX8DzFp4eyXdZbzWUg8WHyPZfOw,14960
26
- posthoganalytics/ai/gemini/gemini_converter.py,sha256=nKiwtHIKFw3g2KVR47GqFzNMxHUnSF4kqi8BYJIJPPQ,13495
25
+ posthoganalytics/ai/gemini/gemini.py,sha256=A2acjT_m8ru2YwgIk15aN21CRVEl2jh8pbqjmHplMC8,15035
26
+ posthoganalytics/ai/gemini/gemini_converter.py,sha256=WzRsid-FjXRyhAI5wQ9-tjTapYVCTRKuMPcZFYKUdIo,16027
27
27
  posthoganalytics/ai/langchain/__init__.py,sha256=9CqAwLynTGj3ASAR80C3PmdTdrYGmu99tz0JL-HPFgI,70
28
28
  posthoganalytics/ai/langchain/callbacks.py,sha256=Otha0a6YLBwETfKjDDbdLzNi-RHRgKFJB69GwWCv9lg,29527
29
29
  posthoganalytics/ai/openai/__init__.py,sha256=u4OuUT7k1NgFj0TrxjuyegOg7a_UA8nAU6a-Hszr0OM,490
30
- posthoganalytics/ai/openai/openai.py,sha256=OQxey512e0PfPEDpy5nFQ4fkK2N-aw9kn2YGJaJFHqU,20330
31
- posthoganalytics/ai/openai/openai_async.py,sha256=jT_C-DgI5eUMhJDtA5NtTJy07vQmIkNSYk9bAO12ZPY,21732
32
- posthoganalytics/ai/openai/openai_converter.py,sha256=G-VC3JO8yudOduXVoykoL44KI9o4h1CxH13w2uDlRSw,19552
30
+ posthoganalytics/ai/openai/openai.py,sha256=I05NruE9grWezM_EgOZBiG5Ej_gABsDcYKN0pRQWvzU,20235
31
+ posthoganalytics/ai/openai/openai_async.py,sha256=k6bo3LfJ_CAPBZCxAzyM2uLz4BpW2YWEFhNuzVcpJlM,21811
32
+ posthoganalytics/ai/openai/openai_converter.py,sha256=VBaAGdXPSVNgfvCnSAojslWkTRO2luUxpjafR-WMEbs,20469
33
33
  posthoganalytics/ai/openai/openai_providers.py,sha256=RPVmj2V0_lAdno_ax5Ul2kwhBA9_rRgAdl_sCqrQc6M,4004
34
34
  posthoganalytics/integrations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
35
35
  posthoganalytics/integrations/django.py,sha256=KYtBr7CkiZQynRc2TCWWYHe-J3ie8iSUa42WPshYZdc,6795
@@ -47,8 +47,8 @@ posthoganalytics/test/test_request.py,sha256=Zc0VbkjpVmj8mKokQm9rzdgTr0b1U44vvMY
47
47
  posthoganalytics/test/test_size_limited_dict.py,sha256=-5IQjIEr_-Dql24M0HusdR_XroOMrtgiT0v6ZQCRvzo,774
48
48
  posthoganalytics/test/test_types.py,sha256=bRPHdwVpP7hu7emsplU8UVyzSQptv6PaG5lAoOD_BtM,7595
49
49
  posthoganalytics/test/test_utils.py,sha256=sqUTbfweVcxxFRd3WDMFXqPMyU6DvzOBeAOc68Py9aw,9620
50
- posthoganalytics-6.7.2.dist-info/licenses/LICENSE,sha256=wGf9JBotDkSygFj43m49oiKlFnpMnn97keiZKF-40vE,2450
51
- posthoganalytics-6.7.2.dist-info/METADATA,sha256=nGtssI8-JFCLJJgoGrwrX7KbGjyaRLigawFm2l7_VSY,6024
52
- posthoganalytics-6.7.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
53
- posthoganalytics-6.7.2.dist-info/top_level.txt,sha256=8QsNIqIkBh1p2TXvKp0Em9ZLZKwe3uIqCETyW4s1GOE,17
54
- posthoganalytics-6.7.2.dist-info/RECORD,,
50
+ posthoganalytics-6.7.4.dist-info/licenses/LICENSE,sha256=wGf9JBotDkSygFj43m49oiKlFnpMnn97keiZKF-40vE,2450
51
+ posthoganalytics-6.7.4.dist-info/METADATA,sha256=krO8eciyydNJ2LzkNv59_maYsYxnnT4XpvZ_n6FCi44,6024
52
+ posthoganalytics-6.7.4.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
53
+ posthoganalytics-6.7.4.dist-info/top_level.txt,sha256=8QsNIqIkBh1p2TXvKp0Em9ZLZKwe3uIqCETyW4s1GOE,17
54
+ posthoganalytics-6.7.4.dist-info/RECORD,,