posthoganalytics 6.7.0__py3-none-any.whl → 6.7.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,10 +1,45 @@
1
1
  import time
2
2
  import uuid
3
- from typing import Any, Callable, Dict, List, Optional
3
+ from typing import Any, Callable, Dict, Optional
4
4
 
5
- from httpx import URL
6
5
 
7
6
  from posthoganalytics.client import Client as PostHogClient
7
+ from posthoganalytics.ai.types import StreamingEventData, StreamingUsageStats
8
+ from posthoganalytics.ai.sanitization import (
9
+ sanitize_openai,
10
+ sanitize_anthropic,
11
+ sanitize_gemini,
12
+ sanitize_langchain,
13
+ )
14
+
15
+
16
+ def merge_usage_stats(
17
+ target: Dict[str, int], source: StreamingUsageStats, mode: str = "incremental"
18
+ ) -> None:
19
+ """
20
+ Merge streaming usage statistics into target dict, handling None values.
21
+
22
+ Supports two modes:
23
+ - "incremental": Add source values to target (for APIs that report new tokens)
24
+ - "cumulative": Replace target with source values (for APIs that report totals)
25
+
26
+ Args:
27
+ target: Dictionary to update with usage stats
28
+ source: StreamingUsageStats that may contain None values
29
+ mode: Either "incremental" or "cumulative"
30
+ """
31
+ if mode == "incremental":
32
+ # Add new values to existing totals
33
+ for key, value in source.items():
34
+ if value is not None and isinstance(value, int):
35
+ target[key] = target.get(key, 0) + value
36
+ elif mode == "cumulative":
37
+ # Replace with latest values (already cumulative)
38
+ for key, value in source.items():
39
+ if value is not None and isinstance(value, int):
40
+ target[key] = value
41
+ else:
42
+ raise ValueError(f"Invalid mode: {mode}. Must be 'incremental' or 'cumulative'")
8
43
 
9
44
 
10
45
  def get_model_params(kwargs: Dict[str, Any]) -> Dict[str, Any]:
@@ -103,275 +138,96 @@ def format_response(response, provider: str):
103
138
  """
104
139
  Format a regular (non-streaming) response.
105
140
  """
106
- output = []
107
- if response is None:
108
- return output
109
141
  if provider == "anthropic":
110
- return format_response_anthropic(response)
111
- elif provider == "openai":
112
- return format_response_openai(response)
113
- elif provider == "gemini":
114
- return format_response_gemini(response)
115
- return output
116
-
142
+ from posthoganalytics.ai.anthropic.anthropic_converter import format_anthropic_response
117
143
 
118
- def format_response_anthropic(response):
119
- output = []
120
- content = []
121
-
122
- for choice in response.content:
123
- if (
124
- hasattr(choice, "type")
125
- and choice.type == "text"
126
- and hasattr(choice, "text")
127
- and choice.text
128
- ):
129
- content.append({"type": "text", "text": choice.text})
130
- elif (
131
- hasattr(choice, "type")
132
- and choice.type == "tool_use"
133
- and hasattr(choice, "name")
134
- and hasattr(choice, "id")
135
- ):
136
- tool_call = {
137
- "type": "function",
138
- "id": choice.id,
139
- "function": {
140
- "name": choice.name,
141
- "arguments": getattr(choice, "input", {}),
142
- },
143
- }
144
- content.append(tool_call)
145
-
146
- if content:
147
- message = {
148
- "role": "assistant",
149
- "content": content,
150
- }
151
- output.append(message)
152
-
153
- return output
154
-
155
-
156
- def format_response_openai(response):
157
- output = []
158
-
159
- if hasattr(response, "choices"):
160
- content = []
161
- role = "assistant"
162
-
163
- for choice in response.choices:
164
- # Handle Chat Completions response format
165
- if hasattr(choice, "message") and choice.message:
166
- if choice.message.role:
167
- role = choice.message.role
168
-
169
- if choice.message.content:
170
- content.append({"type": "text", "text": choice.message.content})
171
-
172
- if hasattr(choice.message, "tool_calls") and choice.message.tool_calls:
173
- for tool_call in choice.message.tool_calls:
174
- content.append(
175
- {
176
- "type": "function",
177
- "id": tool_call.id,
178
- "function": {
179
- "name": tool_call.function.name,
180
- "arguments": tool_call.function.arguments,
181
- },
182
- }
183
- )
184
-
185
- if content:
186
- message = {
187
- "role": role,
188
- "content": content,
189
- }
190
- output.append(message)
191
-
192
- # Handle Responses API format
193
- if hasattr(response, "output"):
194
- content = []
195
- role = "assistant"
196
-
197
- for item in response.output:
198
- if item.type == "message":
199
- role = item.role
200
-
201
- if hasattr(item, "content") and isinstance(item.content, list):
202
- for content_item in item.content:
203
- if (
204
- hasattr(content_item, "type")
205
- and content_item.type == "output_text"
206
- and hasattr(content_item, "text")
207
- ):
208
- content.append({"type": "text", "text": content_item.text})
209
- elif hasattr(content_item, "text"):
210
- content.append({"type": "text", "text": content_item.text})
211
- elif (
212
- hasattr(content_item, "type")
213
- and content_item.type == "input_image"
214
- and hasattr(content_item, "image_url")
215
- ):
216
- content.append(
217
- {
218
- "type": "image",
219
- "image": content_item.image_url,
220
- }
221
- )
222
- elif hasattr(item, "content"):
223
- content.append({"type": "text", "text": str(item.content)})
224
-
225
- elif hasattr(item, "type") and item.type == "function_call":
226
- content.append(
227
- {
228
- "type": "function",
229
- "id": getattr(item, "call_id", getattr(item, "id", "")),
230
- "function": {
231
- "name": item.name,
232
- "arguments": getattr(item, "arguments", {}),
233
- },
234
- }
235
- )
144
+ return format_anthropic_response(response)
145
+ elif provider == "openai":
146
+ from posthoganalytics.ai.openai.openai_converter import format_openai_response
236
147
 
237
- if content:
238
- message = {
239
- "role": role,
240
- "content": content,
241
- }
242
- output.append(message)
243
-
244
- return output
245
-
246
-
247
- def format_response_gemini(response):
248
- output = []
249
-
250
- if hasattr(response, "candidates") and response.candidates:
251
- for candidate in response.candidates:
252
- if hasattr(candidate, "content") and candidate.content:
253
- content = []
254
-
255
- if hasattr(candidate.content, "parts") and candidate.content.parts:
256
- for part in candidate.content.parts:
257
- if hasattr(part, "text") and part.text:
258
- content.append({"type": "text", "text": part.text})
259
- elif hasattr(part, "function_call") and part.function_call:
260
- function_call = part.function_call
261
- content.append(
262
- {
263
- "type": "function",
264
- "function": {
265
- "name": function_call.name,
266
- "arguments": function_call.args,
267
- },
268
- }
269
- )
270
-
271
- if content:
272
- message = {
273
- "role": "assistant",
274
- "content": content,
275
- }
276
- output.append(message)
277
-
278
- elif hasattr(candidate, "text") and candidate.text:
279
- output.append(
280
- {
281
- "role": "assistant",
282
- "content": [{"type": "text", "text": candidate.text}],
283
- }
284
- )
285
- elif hasattr(response, "text") and response.text:
286
- output.append(
287
- {
288
- "role": "assistant",
289
- "content": [{"type": "text", "text": response.text}],
290
- }
291
- )
148
+ return format_openai_response(response)
149
+ elif provider == "gemini":
150
+ from posthoganalytics.ai.gemini.gemini_converter import format_gemini_response
292
151
 
293
- return output
152
+ return format_gemini_response(response)
153
+ return []
294
154
 
295
155
 
296
156
  def extract_available_tool_calls(provider: str, kwargs: Dict[str, Any]):
157
+ """
158
+ Extract available tool calls for the given provider.
159
+ """
297
160
  if provider == "anthropic":
298
- if "tools" in kwargs:
299
- return kwargs["tools"]
161
+ from posthoganalytics.ai.anthropic.anthropic_converter import extract_anthropic_tools
300
162
 
301
- return None
163
+ return extract_anthropic_tools(kwargs)
302
164
  elif provider == "gemini":
303
- if "config" in kwargs and hasattr(kwargs["config"], "tools"):
304
- return kwargs["config"].tools
165
+ from posthoganalytics.ai.gemini.gemini_converter import extract_gemini_tools
305
166
 
306
- return None
167
+ return extract_gemini_tools(kwargs)
307
168
  elif provider == "openai":
308
- if "tools" in kwargs:
309
- return kwargs["tools"]
169
+ from posthoganalytics.ai.openai.openai_converter import extract_openai_tools
310
170
 
311
- return None
171
+ return extract_openai_tools(kwargs)
312
172
 
313
173
 
314
174
  def merge_system_prompt(kwargs: Dict[str, Any], provider: str):
315
- messages: List[Dict[str, Any]] = []
175
+ """
176
+ Merge system prompts and format messages for the given provider.
177
+ """
316
178
  if provider == "anthropic":
179
+ from posthoganalytics.ai.anthropic.anthropic_converter import format_anthropic_input
180
+
317
181
  messages = kwargs.get("messages") or []
318
- if kwargs.get("system") is None:
319
- return messages
320
- return [{"role": "system", "content": kwargs.get("system")}] + messages
182
+ system = kwargs.get("system")
183
+ return format_anthropic_input(messages, system)
321
184
  elif provider == "gemini":
322
- contents = kwargs.get("contents", [])
323
- if isinstance(contents, str):
324
- return [{"role": "user", "content": contents}]
325
- elif isinstance(contents, list):
326
- formatted = []
327
- for item in contents:
328
- if isinstance(item, str):
329
- formatted.append({"role": "user", "content": item})
330
- elif hasattr(item, "text"):
331
- formatted.append({"role": "user", "content": item.text})
332
- else:
333
- formatted.append({"role": "user", "content": str(item)})
334
- return formatted
335
- else:
336
- return [{"role": "user", "content": str(contents)}]
337
-
338
- # For OpenAI, handle both Chat Completions and Responses API
339
- if kwargs.get("messages") is not None:
340
- messages = list(kwargs.get("messages", []))
341
-
342
- if kwargs.get("input") is not None:
343
- input_data = kwargs.get("input")
344
- if isinstance(input_data, list):
345
- messages.extend(input_data)
346
- else:
347
- messages.append({"role": "user", "content": input_data})
348
-
349
- # Check if system prompt is provided as a separate parameter
350
- if kwargs.get("system") is not None:
351
- has_system = any(msg.get("role") == "system" for msg in messages)
352
- if not has_system:
353
- messages = [{"role": "system", "content": kwargs.get("system")}] + messages
354
-
355
- # For Responses API, add instructions to the system prompt if provided
356
- if kwargs.get("instructions") is not None:
357
- # Find the system message if it exists
358
- system_idx = next(
359
- (i for i, msg in enumerate(messages) if msg.get("role") == "system"), None
360
- )
185
+ from posthoganalytics.ai.gemini.gemini_converter import format_gemini_input
361
186
 
362
- if system_idx is not None:
363
- # Append instructions to existing system message
364
- system_content = messages[system_idx].get("content", "")
365
- messages[system_idx]["content"] = (
366
- f"{system_content}\n\n{kwargs.get('instructions')}"
187
+ contents = kwargs.get("contents", [])
188
+ return format_gemini_input(contents)
189
+ elif provider == "openai":
190
+ # For OpenAI, handle both Chat Completions and Responses API
191
+ from posthoganalytics.ai.openai.openai_converter import format_openai_input
192
+
193
+ messages_param = kwargs.get("messages")
194
+ input_param = kwargs.get("input")
195
+
196
+ # Get base formatted messages
197
+ messages = format_openai_input(messages_param, input_param)
198
+
199
+ # Check if system prompt is provided as a separate parameter
200
+ if kwargs.get("system") is not None:
201
+ has_system = any(msg.get("role") == "system" for msg in messages)
202
+ if not has_system:
203
+ messages = [
204
+ {"role": "system", "content": kwargs.get("system")}
205
+ ] + messages
206
+
207
+ # For Responses API, add instructions to the system prompt if provided
208
+ if kwargs.get("instructions") is not None:
209
+ # Find the system message if it exists
210
+ system_idx = next(
211
+ (i for i, msg in enumerate(messages) if msg.get("role") == "system"),
212
+ None,
367
213
  )
368
- else:
369
- # Create a new system message with instructions
370
- messages = [
371
- {"role": "system", "content": kwargs.get("instructions")}
372
- ] + messages
373
214
 
374
- return messages
215
+ if system_idx is not None:
216
+ # Append instructions to existing system message
217
+ system_content = messages[system_idx].get("content", "")
218
+ messages[system_idx]["content"] = (
219
+ f"{system_content}\n\n{kwargs.get('instructions')}"
220
+ )
221
+ else:
222
+ # Create a new system message with instructions
223
+ messages = [
224
+ {"role": "system", "content": kwargs.get("instructions")}
225
+ ] + messages
226
+
227
+ return messages
228
+
229
+ # Default case - return empty list
230
+ return []
375
231
 
376
232
 
377
233
  def call_llm_and_track_usage(
@@ -382,7 +238,7 @@ def call_llm_and_track_usage(
382
238
  posthog_properties: Optional[Dict[str, Any]],
383
239
  posthog_privacy_mode: bool,
384
240
  posthog_groups: Optional[Dict[str, Any]],
385
- base_url: URL,
241
+ base_url: str,
386
242
  call_method: Callable[..., Any],
387
243
  **kwargs: Any,
388
244
  ) -> Any:
@@ -395,7 +251,7 @@ def call_llm_and_track_usage(
395
251
  error = None
396
252
  http_status = 200
397
253
  usage: Dict[str, Any] = {}
398
- error_params: Dict[str, any] = {}
254
+ error_params: Dict[str, Any] = {}
399
255
 
400
256
  try:
401
257
  response = call_method(**kwargs)
@@ -422,12 +278,15 @@ def call_llm_and_track_usage(
422
278
  usage = get_usage(response, provider)
423
279
 
424
280
  messages = merge_system_prompt(kwargs, provider)
281
+ sanitized_messages = sanitize_messages(messages, provider)
425
282
 
426
283
  event_properties = {
427
284
  "$ai_provider": provider,
428
285
  "$ai_model": kwargs.get("model"),
429
286
  "$ai_model_parameters": get_model_params(kwargs),
430
- "$ai_input": with_privacy_mode(ph_client, posthog_privacy_mode, messages),
287
+ "$ai_input": with_privacy_mode(
288
+ ph_client, posthog_privacy_mode, sanitized_messages
289
+ ),
431
290
  "$ai_output_choices": with_privacy_mode(
432
291
  ph_client, posthog_privacy_mode, format_response(response, provider)
433
292
  ),
@@ -500,7 +359,7 @@ async def call_llm_and_track_usage_async(
500
359
  posthog_properties: Optional[Dict[str, Any]],
501
360
  posthog_privacy_mode: bool,
502
361
  posthog_groups: Optional[Dict[str, Any]],
503
- base_url: URL,
362
+ base_url: str,
504
363
  call_async_method: Callable[..., Any],
505
364
  **kwargs: Any,
506
365
  ) -> Any:
@@ -509,7 +368,7 @@ async def call_llm_and_track_usage_async(
509
368
  error = None
510
369
  http_status = 200
511
370
  usage: Dict[str, Any] = {}
512
- error_params: Dict[str, any] = {}
371
+ error_params: Dict[str, Any] = {}
513
372
 
514
373
  try:
515
374
  response = await call_async_method(**kwargs)
@@ -536,12 +395,15 @@ async def call_llm_and_track_usage_async(
536
395
  usage = get_usage(response, provider)
537
396
 
538
397
  messages = merge_system_prompt(kwargs, provider)
398
+ sanitized_messages = sanitize_messages(messages, provider)
539
399
 
540
400
  event_properties = {
541
401
  "$ai_provider": provider,
542
402
  "$ai_model": kwargs.get("model"),
543
403
  "$ai_model_parameters": get_model_params(kwargs),
544
- "$ai_input": with_privacy_mode(ph_client, posthog_privacy_mode, messages),
404
+ "$ai_input": with_privacy_mode(
405
+ ph_client, posthog_privacy_mode, sanitized_messages
406
+ ),
545
407
  "$ai_output_choices": with_privacy_mode(
546
408
  ph_client, posthog_privacy_mode, format_response(response, provider)
547
409
  ),
@@ -600,7 +462,122 @@ async def call_llm_and_track_usage_async(
600
462
  return response
601
463
 
602
464
 
465
+ def sanitize_messages(data: Any, provider: str) -> Any:
466
+ """Sanitize messages using provider-specific sanitization functions."""
467
+ if provider == "anthropic":
468
+ return sanitize_anthropic(data)
469
+ elif provider == "openai":
470
+ return sanitize_openai(data)
471
+ elif provider == "gemini":
472
+ return sanitize_gemini(data)
473
+ elif provider == "langchain":
474
+ return sanitize_langchain(data)
475
+ return data
476
+
477
+
603
478
  def with_privacy_mode(ph_client: PostHogClient, privacy_mode: bool, value: Any):
604
479
  if ph_client.privacy_mode or privacy_mode:
605
480
  return None
606
481
  return value
482
+
483
+
484
+ def capture_streaming_event(
485
+ ph_client: PostHogClient,
486
+ event_data: StreamingEventData,
487
+ ):
488
+ """
489
+ Unified streaming event capture for all LLM providers.
490
+
491
+ This function handles the common logic for capturing streaming events across all providers.
492
+ All provider-specific formatting should be done BEFORE calling this function.
493
+
494
+ The function handles:
495
+ - Building PostHog event properties
496
+ - Extracting and adding tools based on provider
497
+ - Applying privacy mode
498
+ - Adding special token fields (cache, reasoning)
499
+ - Provider-specific fields (e.g., OpenAI instructions)
500
+ - Sending the event to PostHog
501
+
502
+ Args:
503
+ ph_client: PostHog client instance
504
+ event_data: Standardized streaming event data containing all necessary information
505
+ """
506
+ trace_id = event_data.get("trace_id") or str(uuid.uuid4())
507
+
508
+ # Build base event properties
509
+ event_properties = {
510
+ "$ai_provider": event_data["provider"],
511
+ "$ai_model": event_data["model"],
512
+ "$ai_model_parameters": get_model_params(event_data["kwargs"]),
513
+ "$ai_input": with_privacy_mode(
514
+ ph_client,
515
+ event_data["privacy_mode"],
516
+ event_data["formatted_input"],
517
+ ),
518
+ "$ai_output_choices": with_privacy_mode(
519
+ ph_client,
520
+ event_data["privacy_mode"],
521
+ event_data["formatted_output"],
522
+ ),
523
+ "$ai_http_status": 200,
524
+ "$ai_input_tokens": event_data["usage_stats"].get("input_tokens", 0),
525
+ "$ai_output_tokens": event_data["usage_stats"].get("output_tokens", 0),
526
+ "$ai_latency": event_data["latency"],
527
+ "$ai_trace_id": trace_id,
528
+ "$ai_base_url": str(event_data["base_url"]),
529
+ **(event_data.get("properties") or {}),
530
+ }
531
+
532
+ # Extract and add tools based on provider
533
+ available_tools = extract_available_tool_calls(
534
+ event_data["provider"],
535
+ event_data["kwargs"],
536
+ )
537
+ if available_tools:
538
+ event_properties["$ai_tools"] = available_tools
539
+
540
+ # Add optional token fields
541
+ # For Anthropic, always include cache fields even if 0 (backward compatibility)
542
+ # For others, only include if present and non-zero
543
+ if event_data["provider"] == "anthropic":
544
+ # Anthropic always includes cache fields
545
+ cache_read = event_data["usage_stats"].get("cache_read_input_tokens", 0)
546
+ cache_creation = event_data["usage_stats"].get("cache_creation_input_tokens", 0)
547
+ event_properties["$ai_cache_read_input_tokens"] = cache_read
548
+ event_properties["$ai_cache_creation_input_tokens"] = cache_creation
549
+ else:
550
+ # Other providers only include if non-zero
551
+ optional_token_fields = [
552
+ "cache_read_input_tokens",
553
+ "cache_creation_input_tokens",
554
+ "reasoning_tokens",
555
+ ]
556
+
557
+ for field in optional_token_fields:
558
+ value = event_data["usage_stats"].get(field)
559
+ if value is not None and isinstance(value, int) and value > 0:
560
+ event_properties[f"$ai_{field}"] = value
561
+
562
+ # Handle provider-specific fields
563
+ if (
564
+ event_data["provider"] == "openai"
565
+ and event_data["kwargs"].get("instructions") is not None
566
+ ):
567
+ event_properties["$ai_instructions"] = with_privacy_mode(
568
+ ph_client,
569
+ event_data["privacy_mode"],
570
+ event_data["kwargs"]["instructions"],
571
+ )
572
+
573
+ if event_data.get("distinct_id") is None:
574
+ event_properties["$process_person_profile"] = False
575
+
576
+ # Send event to PostHog
577
+ if hasattr(ph_client, "capture"):
578
+ ph_client.capture(
579
+ distinct_id=event_data.get("distinct_id") or trace_id,
580
+ event="$ai_generation",
581
+ properties=event_properties,
582
+ groups=event_data.get("groups"),
583
+ )
@@ -1814,7 +1814,7 @@ class Client(object):
1814
1814
  )
1815
1815
  )
1816
1816
 
1817
- response, fallback_to_decide = self._get_all_flags_and_payloads_locally(
1817
+ response, fallback_to_flags = self._get_all_flags_and_payloads_locally(
1818
1818
  distinct_id,
1819
1819
  groups=groups,
1820
1820
  person_properties=person_properties,
@@ -1822,7 +1822,7 @@ class Client(object):
1822
1822
  flag_keys_to_evaluate=flag_keys_to_evaluate,
1823
1823
  )
1824
1824
 
1825
- if fallback_to_decide and not only_evaluate_locally:
1825
+ if fallback_to_flags and not only_evaluate_locally:
1826
1826
  try:
1827
1827
  decide_response = self.get_flags_decision(
1828
1828
  distinct_id,
@@ -1858,7 +1858,7 @@ class Client(object):
1858
1858
 
1859
1859
  flags: dict[str, FlagValue] = {}
1860
1860
  payloads: dict[str, str] = {}
1861
- fallback_to_decide = False
1861
+ fallback_to_flags = False
1862
1862
  # If loading in previous line failed
1863
1863
  if self.feature_flags:
1864
1864
  # Filter flags based on flag_keys_to_evaluate if provided
@@ -1886,19 +1886,19 @@ class Client(object):
1886
1886
  payloads[flag["key"]] = matched_payload
1887
1887
  except InconclusiveMatchError:
1888
1888
  # No need to log this, since it's just telling us to fall back to `/flags`
1889
- fallback_to_decide = True
1889
+ fallback_to_flags = True
1890
1890
  except Exception as e:
1891
1891
  self.log.exception(
1892
1892
  f"[FEATURE FLAGS] Error while computing variant and payload: {e}"
1893
1893
  )
1894
- fallback_to_decide = True
1894
+ fallback_to_flags = True
1895
1895
  else:
1896
- fallback_to_decide = True
1896
+ fallback_to_flags = True
1897
1897
 
1898
1898
  return {
1899
1899
  "featureFlags": flags,
1900
1900
  "featureFlagPayloads": payloads,
1901
- }, fallback_to_decide
1901
+ }, fallback_to_flags
1902
1902
 
1903
1903
  def _initialize_flag_cache(self, cache_url):
1904
1904
  """Initialize feature flag cache for graceful degradation during service outages.
@@ -365,7 +365,7 @@ class TestLocalEvaluation(unittest.TestCase):
365
365
 
366
366
  @mock.patch("posthog.client.flags")
367
367
  @mock.patch("posthog.client.get")
368
- def test_feature_flags_fallback_to_decide(self, patch_get, patch_flags):
368
+ def test_feature_flags_fallback_to_flags(self, patch_get, patch_flags):
369
369
  patch_flags.return_value = {
370
370
  "featureFlags": {"beta-feature": "alakazam", "beta-feature2": "alakazam2"}
371
371
  }
@@ -431,7 +431,7 @@ class TestLocalEvaluation(unittest.TestCase):
431
431
 
432
432
  @mock.patch("posthog.client.flags")
433
433
  @mock.patch("posthog.client.get")
434
- def test_feature_flags_dont_fallback_to_decide_when_only_local_evaluation_is_true(
434
+ def test_feature_flags_dont_fallback_to_flags_when_only_local_evaluation_is_true(
435
435
  self, patch_get, patch_flags
436
436
  ):
437
437
  patch_flags.return_value = {
@@ -1,4 +1,4 @@
1
- VERSION = "6.7.0"
1
+ VERSION = "6.7.2"
2
2
 
3
3
  if __name__ == "__main__":
4
4
  print(VERSION, end="") # noqa: T201
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: posthoganalytics
3
- Version: 6.7.0
3
+ Version: 6.7.2
4
4
  Summary: Integrate PostHog into any python application.
5
5
  Home-page: https://github.com/posthog/posthog-python
6
6
  Author: Posthog