posthoganalytics 7.7.0__py3-none-any.whl → 7.8.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,3 @@
1
+ from posthoganalytics.ai.prompts import Prompts
2
+
3
+ __all__ = ["Prompts"]
@@ -17,6 +17,7 @@ from posthoganalytics.ai.types import (
17
17
  TokenUsage,
18
18
  ToolInProgress,
19
19
  )
20
+ from posthoganalytics.ai.utils import serialize_raw_usage
20
21
 
21
22
 
22
23
  def format_anthropic_response(response: Any) -> List[FormattedMessage]:
@@ -221,6 +222,12 @@ def extract_anthropic_usage_from_response(response: Any) -> TokenUsage:
221
222
  if web_search_count > 0:
222
223
  result["web_search_count"] = web_search_count
223
224
 
225
+ # Capture raw usage metadata for backend processing
226
+ # Serialize to dict here in the converter (not in utils)
227
+ serialized = serialize_raw_usage(response.usage)
228
+ if serialized:
229
+ result["raw_usage"] = serialized
230
+
224
231
  return result
225
232
 
226
233
 
@@ -247,6 +254,11 @@ def extract_anthropic_usage_from_event(event: Any) -> TokenUsage:
247
254
  usage["cache_read_input_tokens"] = getattr(
248
255
  event.message.usage, "cache_read_input_tokens", 0
249
256
  )
257
+ # Capture raw usage metadata for backend processing
258
+ # Serialize to dict here in the converter (not in utils)
259
+ serialized = serialize_raw_usage(event.message.usage)
260
+ if serialized:
261
+ usage["raw_usage"] = serialized
250
262
 
251
263
  # Handle usage stats from message_delta event
252
264
  if hasattr(event, "usage") and event.usage:
@@ -262,6 +274,12 @@ def extract_anthropic_usage_from_event(event: Any) -> TokenUsage:
262
274
  if web_search_count > 0:
263
275
  usage["web_search_count"] = web_search_count
264
276
 
277
+ # Capture raw usage metadata for backend processing
278
+ # Serialize to dict here in the converter (not in utils)
279
+ serialized = serialize_raw_usage(event.usage)
280
+ if serialized:
281
+ usage["raw_usage"] = serialized
282
+
265
283
  return usage
266
284
 
267
285
 
@@ -12,6 +12,7 @@ from posthoganalytics.ai.types import (
12
12
  FormattedMessage,
13
13
  TokenUsage,
14
14
  )
15
+ from posthoganalytics.ai.utils import serialize_raw_usage
15
16
 
16
17
 
17
18
  class GeminiPart(TypedDict, total=False):
@@ -487,6 +488,12 @@ def _extract_usage_from_metadata(metadata: Any) -> TokenUsage:
487
488
  if reasoning_tokens and reasoning_tokens > 0:
488
489
  usage["reasoning_tokens"] = reasoning_tokens
489
490
 
491
+ # Capture raw usage metadata for backend processing
492
+ # Serialize to dict here in the converter (not in utils)
493
+ serialized = serialize_raw_usage(metadata)
494
+ if serialized:
495
+ usage["raw_usage"] = serialized
496
+
490
497
  return usage
491
498
 
492
499
 
@@ -16,6 +16,7 @@ from posthoganalytics.ai.types import (
16
16
  FormattedTextContent,
17
17
  TokenUsage,
18
18
  )
19
+ from posthoganalytics.ai.utils import serialize_raw_usage
19
20
 
20
21
 
21
22
  def format_openai_response(response: Any) -> List[FormattedMessage]:
@@ -429,6 +430,12 @@ def extract_openai_usage_from_response(response: Any) -> TokenUsage:
429
430
  if web_search_count > 0:
430
431
  result["web_search_count"] = web_search_count
431
432
 
433
+ # Capture raw usage metadata for backend processing
434
+ # Serialize to dict here in the converter (not in utils)
435
+ serialized = serialize_raw_usage(response.usage)
436
+ if serialized:
437
+ result["raw_usage"] = serialized
438
+
432
439
  return result
433
440
 
434
441
 
@@ -482,6 +489,12 @@ def extract_openai_usage_from_chunk(
482
489
  chunk.usage.completion_tokens_details.reasoning_tokens
483
490
  )
484
491
 
492
+ # Capture raw usage metadata for backend processing
493
+ # Serialize to dict here in the converter (not in utils)
494
+ serialized = serialize_raw_usage(chunk.usage)
495
+ if serialized:
496
+ usage["raw_usage"] = serialized
497
+
485
498
  elif provider_type == "responses":
486
499
  # For Responses API, usage is only in chunk.response.usage for completed events
487
500
  if hasattr(chunk, "type") and chunk.type == "response.completed":
@@ -516,6 +529,12 @@ def extract_openai_usage_from_chunk(
516
529
  if web_search_count > 0:
517
530
  usage["web_search_count"] = web_search_count
518
531
 
532
+ # Capture raw usage metadata for backend processing
533
+ # Serialize to dict here in the converter (not in utils)
534
+ serialized = serialize_raw_usage(response_usage)
535
+ if serialized:
536
+ usage["raw_usage"] = serialized
537
+
519
538
  return usage
520
539
 
521
540
 
@@ -0,0 +1,272 @@
1
+ """
2
+ Prompt management for PostHog AI SDK.
3
+
4
+ Fetch and compile LLM prompts from PostHog with caching and fallback support.
5
+ """
6
+
7
+ import logging
8
+ import re
9
+ import time
10
+ import urllib.parse
11
+ from typing import Any, Dict, Optional, Union
12
+
13
+ from posthoganalytics.request import USER_AGENT, _get_session
14
+ from posthoganalytics.utils import remove_trailing_slash
15
+
16
+ log = logging.getLogger("posthog")
17
+
18
+ APP_ENDPOINT = "https://app.posthog.com"
19
+ DEFAULT_CACHE_TTL_SECONDS = 300 # 5 minutes
20
+
21
+ PromptVariables = Dict[str, Union[str, int, float, bool]]
22
+
23
+
24
+ class CachedPrompt:
25
+ """Cached prompt with metadata."""
26
+
27
+ def __init__(self, prompt: str, fetched_at: float):
28
+ self.prompt = prompt
29
+ self.fetched_at = fetched_at
30
+
31
+
32
+ def _is_prompt_api_response(data: Any) -> bool:
33
+ """Check if the response is a valid prompt API response."""
34
+ return (
35
+ isinstance(data, dict)
36
+ and "prompt" in data
37
+ and isinstance(data.get("prompt"), str)
38
+ )
39
+
40
+
41
+ class Prompts:
42
+ """
43
+ Fetch and compile LLM prompts from PostHog.
44
+
45
+ Can be initialized with a PostHog client or with direct options.
46
+
47
+ Examples:
48
+ ```python
49
+ from posthoganalytics import Posthog
50
+ from posthoganalytics.ai.prompts import Prompts
51
+
52
+ # With PostHog client
53
+ posthog = Posthog('phc_xxx', host='https://app.posthog.com', personal_api_key='phx_xxx')
54
+ prompts = Prompts(posthog)
55
+
56
+ # Or with direct options (no PostHog client needed)
57
+ prompts = Prompts(personal_api_key='phx_xxx', host='https://app.posthog.com')
58
+
59
+ # Fetch with caching and fallback
60
+ template = prompts.get('support-system-prompt', fallback='You are a helpful assistant.')
61
+
62
+ # Compile with variables
63
+ system_prompt = prompts.compile(template, {
64
+ 'company': 'Acme Corp',
65
+ 'tier': 'premium',
66
+ })
67
+ ```
68
+ """
69
+
70
+ def __init__(
71
+ self,
72
+ posthog: Optional[Any] = None,
73
+ *,
74
+ personal_api_key: Optional[str] = None,
75
+ host: Optional[str] = None,
76
+ default_cache_ttl_seconds: Optional[int] = None,
77
+ ):
78
+ """
79
+ Initialize Prompts.
80
+
81
+ Args:
82
+ posthog: PostHog client instance (optional if personal_api_key provided)
83
+ personal_api_key: Direct API key (optional if posthog provided)
84
+ host: PostHog host (defaults to app endpoint)
85
+ default_cache_ttl_seconds: Default cache TTL (defaults to 300)
86
+ """
87
+ self._default_cache_ttl_seconds = (
88
+ default_cache_ttl_seconds or DEFAULT_CACHE_TTL_SECONDS
89
+ )
90
+ self._cache: Dict[str, CachedPrompt] = {}
91
+
92
+ if posthog is not None:
93
+ self._personal_api_key = getattr(posthog, "personal_api_key", None) or ""
94
+ self._host = remove_trailing_slash(
95
+ getattr(posthog, "raw_host", None) or APP_ENDPOINT
96
+ )
97
+ else:
98
+ self._personal_api_key = personal_api_key or ""
99
+ self._host = remove_trailing_slash(host or APP_ENDPOINT)
100
+
101
+ def get(
102
+ self,
103
+ name: str,
104
+ *,
105
+ cache_ttl_seconds: Optional[int] = None,
106
+ fallback: Optional[str] = None,
107
+ ) -> str:
108
+ """
109
+ Fetch a prompt by name from the PostHog API.
110
+
111
+ Caching behavior:
112
+ 1. If cache is fresh, return cached value
113
+ 2. If fetch fails and cache exists (stale), return stale cache with warning
114
+ 3. If fetch fails and fallback provided, return fallback with warning
115
+ 4. If fetch fails with no cache/fallback, raise exception
116
+
117
+ Args:
118
+ name: The name of the prompt to fetch
119
+ cache_ttl_seconds: Cache TTL in seconds (defaults to instance default)
120
+ fallback: Fallback prompt to use if fetch fails and no cache available
121
+
122
+ Returns:
123
+ The prompt string
124
+
125
+ Raises:
126
+ Exception: If the prompt cannot be fetched and no fallback is available
127
+ """
128
+ ttl = (
129
+ cache_ttl_seconds
130
+ if cache_ttl_seconds is not None
131
+ else self._default_cache_ttl_seconds
132
+ )
133
+
134
+ # Check cache first
135
+ cached = self._cache.get(name)
136
+ now = time.time()
137
+
138
+ if cached is not None:
139
+ is_fresh = (now - cached.fetched_at) < ttl
140
+
141
+ if is_fresh:
142
+ return cached.prompt
143
+
144
+ # Try to fetch from API
145
+ try:
146
+ prompt = self._fetch_prompt_from_api(name)
147
+ fetched_at = time.time()
148
+
149
+ # Update cache
150
+ self._cache[name] = CachedPrompt(prompt=prompt, fetched_at=fetched_at)
151
+
152
+ return prompt
153
+
154
+ except Exception as error:
155
+ # Fallback order:
156
+ # 1. Return stale cache (with warning)
157
+ if cached is not None:
158
+ log.warning(
159
+ '[PostHog Prompts] Failed to fetch prompt "%s", using stale cache: %s',
160
+ name,
161
+ error,
162
+ )
163
+ return cached.prompt
164
+
165
+ # 2. Return fallback (with warning)
166
+ if fallback is not None:
167
+ log.warning(
168
+ '[PostHog Prompts] Failed to fetch prompt "%s", using fallback: %s',
169
+ name,
170
+ error,
171
+ )
172
+ return fallback
173
+
174
+ # 3. Raise error
175
+ raise
176
+
177
+ def compile(self, prompt: str, variables: PromptVariables) -> str:
178
+ """
179
+ Replace {{variableName}} placeholders with values.
180
+
181
+ Unmatched variables are left unchanged.
182
+ Supports variable names with hyphens and dots (e.g., user-id, company.name).
183
+
184
+ Args:
185
+ prompt: The prompt template string
186
+ variables: Object containing variable values
187
+
188
+ Returns:
189
+ The compiled prompt string
190
+ """
191
+
192
+ def replace_variable(match: re.Match) -> str:
193
+ variable_name = match.group(1)
194
+
195
+ if variable_name in variables:
196
+ return str(variables[variable_name])
197
+
198
+ return match.group(0)
199
+
200
+ return re.sub(r"\{\{([\w.-]+)\}\}", replace_variable, prompt)
201
+
202
+ def clear_cache(self, name: Optional[str] = None) -> None:
203
+ """
204
+ Clear cached prompts.
205
+
206
+ Args:
207
+ name: Specific prompt to clear. If None, clears all cached prompts.
208
+ """
209
+ if name is not None:
210
+ self._cache.pop(name, None)
211
+ else:
212
+ self._cache.clear()
213
+
214
+ def _fetch_prompt_from_api(self, name: str) -> str:
215
+ """
216
+ Fetch prompt from PostHog API.
217
+
218
+ Endpoint: {host}/api/environments/@current/llm_prompts/name/{encoded_name}/
219
+ Auth: Bearer {personal_api_key}
220
+
221
+ Args:
222
+ name: The name of the prompt to fetch
223
+
224
+ Returns:
225
+ The prompt string
226
+
227
+ Raises:
228
+ Exception: If the prompt cannot be fetched
229
+ """
230
+ if not self._personal_api_key:
231
+ raise Exception(
232
+ "[PostHog Prompts] personal_api_key is required to fetch prompts. "
233
+ "Please provide it when initializing the Prompts instance."
234
+ )
235
+
236
+ encoded_name = urllib.parse.quote(name, safe="")
237
+ url = f"{self._host}/api/environments/@current/llm_prompts/name/{encoded_name}/"
238
+
239
+ headers = {
240
+ "Authorization": f"Bearer {self._personal_api_key}",
241
+ "User-Agent": USER_AGENT,
242
+ }
243
+
244
+ response = _get_session().get(url, headers=headers, timeout=10)
245
+
246
+ if not response.ok:
247
+ if response.status_code == 404:
248
+ raise Exception(f'[PostHog Prompts] Prompt "{name}" not found')
249
+
250
+ if response.status_code == 403:
251
+ raise Exception(
252
+ f'[PostHog Prompts] Access denied for prompt "{name}". '
253
+ "Check that your personal_api_key has the correct permissions and the LLM prompts feature is enabled."
254
+ )
255
+
256
+ raise Exception(
257
+ f'[PostHog Prompts] Failed to fetch prompt "{name}": HTTP {response.status_code}'
258
+ )
259
+
260
+ try:
261
+ data = response.json()
262
+ except Exception:
263
+ raise Exception(
264
+ f'[PostHog Prompts] Invalid response format for prompt "{name}"'
265
+ )
266
+
267
+ if not _is_prompt_api_response(data):
268
+ raise Exception(
269
+ f'[PostHog Prompts] Invalid response format for prompt "{name}"'
270
+ )
271
+
272
+ return data["prompt"]
@@ -64,6 +64,7 @@ class TokenUsage(TypedDict, total=False):
64
64
  cache_creation_input_tokens: Optional[int]
65
65
  reasoning_tokens: Optional[int]
66
66
  web_search_count: Optional[int]
67
+ raw_usage: Optional[Any] # Raw provider usage metadata for backend processing
67
68
 
68
69
 
69
70
  class ProviderResponse(TypedDict, total=False):
@@ -13,6 +13,54 @@ from posthoganalytics.ai.types import FormattedMessage, StreamingEventData, Toke
13
13
  from posthoganalytics.client import Client as PostHogClient
14
14
 
15
15
 
16
+ def serialize_raw_usage(raw_usage: Any) -> Optional[Dict[str, Any]]:
17
+ """
18
+ Convert raw provider usage objects to JSON-serializable dicts.
19
+
20
+ Handles Pydantic models (OpenAI/Anthropic) and protobuf-like objects (Gemini)
21
+ with a fallback chain to ensure we never pass unserializable objects to PostHog.
22
+
23
+ Args:
24
+ raw_usage: Raw usage object from provider SDK
25
+
26
+ Returns:
27
+ Plain dict or None if conversion fails
28
+ """
29
+ if raw_usage is None:
30
+ return None
31
+
32
+ # Already a dict
33
+ if isinstance(raw_usage, dict):
34
+ return raw_usage
35
+
36
+ # Try Pydantic model_dump() (OpenAI/Anthropic)
37
+ if hasattr(raw_usage, "model_dump") and callable(raw_usage.model_dump):
38
+ try:
39
+ return raw_usage.model_dump()
40
+ except Exception:
41
+ pass
42
+
43
+ # Try to_dict() (some protobuf objects)
44
+ if hasattr(raw_usage, "to_dict") and callable(raw_usage.to_dict):
45
+ try:
46
+ return raw_usage.to_dict()
47
+ except Exception:
48
+ pass
49
+
50
+ # Try __dict__ / vars() for simple objects
51
+ try:
52
+ return vars(raw_usage)
53
+ except Exception:
54
+ pass
55
+
56
+ # Last resort: convert to string representation
57
+ # This ensures we always return something rather than failing
58
+ try:
59
+ return {"_raw": str(raw_usage)}
60
+ except Exception:
61
+ return None
62
+
63
+
16
64
  def merge_usage_stats(
17
65
  target: TokenUsage, source: TokenUsage, mode: str = "incremental"
18
66
  ) -> None:
@@ -60,6 +108,17 @@ def merge_usage_stats(
60
108
  current = target.get("web_search_count") or 0
61
109
  target["web_search_count"] = max(current, source_web_search)
62
110
 
111
+ # Merge raw_usage to avoid losing data from earlier events
112
+ # For Anthropic streaming: message_start has input tokens, message_delta has output
113
+ # Note: raw_usage is already serialized by converters, so it's a dict
114
+ source_raw_usage = source.get("raw_usage")
115
+ if source_raw_usage is not None and isinstance(source_raw_usage, dict):
116
+ current_raw_value = target.get("raw_usage")
117
+ current_raw: Dict[str, Any] = (
118
+ current_raw_value if isinstance(current_raw_value, dict) else {}
119
+ )
120
+ target["raw_usage"] = {**current_raw, **source_raw_usage}
121
+
63
122
  elif mode == "cumulative":
64
123
  # Replace with latest values (already cumulative)
65
124
  if source.get("input_tokens") is not None:
@@ -76,6 +135,9 @@ def merge_usage_stats(
76
135
  target["reasoning_tokens"] = source["reasoning_tokens"]
77
136
  if source.get("web_search_count") is not None:
78
137
  target["web_search_count"] = source["web_search_count"]
138
+ # Note: raw_usage is already serialized by converters, so it's a dict
139
+ if source.get("raw_usage") is not None:
140
+ target["raw_usage"] = source["raw_usage"]
79
141
 
80
142
  else:
81
143
  raise ValueError(f"Invalid mode: {mode}. Must be 'incremental' or 'cumulative'")
@@ -332,6 +394,11 @@ def call_llm_and_track_usage(
332
394
  if web_search_count is not None and web_search_count > 0:
333
395
  tag("$ai_web_search_count", web_search_count)
334
396
 
397
+ raw_usage = usage.get("raw_usage")
398
+ if raw_usage is not None:
399
+ # Already serialized by converters
400
+ tag("$ai_usage", raw_usage)
401
+
335
402
  if posthog_distinct_id is None:
336
403
  tag("$process_person_profile", False)
337
404
 
@@ -457,6 +524,11 @@ async def call_llm_and_track_usage_async(
457
524
  if web_search_count is not None and web_search_count > 0:
458
525
  tag("$ai_web_search_count", web_search_count)
459
526
 
527
+ raw_usage = usage.get("raw_usage")
528
+ if raw_usage is not None:
529
+ # Already serialized by converters
530
+ tag("$ai_usage", raw_usage)
531
+
460
532
  if posthog_distinct_id is None:
461
533
  tag("$process_person_profile", False)
462
534
 
@@ -594,6 +666,12 @@ def capture_streaming_event(
594
666
  ):
595
667
  event_properties["$ai_web_search_count"] = web_search_count
596
668
 
669
+ # Add raw usage metadata if present (all providers)
670
+ raw_usage = event_data["usage_stats"].get("raw_usage")
671
+ if raw_usage is not None:
672
+ # Already serialized by converters
673
+ event_properties["$ai_usage"] = raw_usage
674
+
597
675
  # Handle provider-specific fields
598
676
  if (
599
677
  event_data["provider"] == "openai"
@@ -0,0 +1,577 @@
1
+ import unittest
2
+ from unittest.mock import MagicMock, patch
3
+
4
+ from posthoganalytics.ai.prompts import Prompts
5
+
6
+
7
+ class MockResponse:
8
+ """Mock HTTP response for testing."""
9
+
10
+ def __init__(self, json_data=None, status_code=200, ok=True):
11
+ self._json_data = json_data
12
+ self.status_code = status_code
13
+ self.ok = ok
14
+
15
+ def json(self):
16
+ if self._json_data is None:
17
+ raise ValueError("No JSON data")
18
+ return self._json_data
19
+
20
+
21
+ class TestPrompts(unittest.TestCase):
22
+ """Tests for the Prompts class."""
23
+
24
+ mock_prompt_response = {
25
+ "id": 1,
26
+ "name": "test-prompt",
27
+ "prompt": "Hello, {{name}}! You are a helpful assistant for {{company}}.",
28
+ "version": 1,
29
+ "created_by": "user@example.com",
30
+ "created_at": "2024-01-01T00:00:00Z",
31
+ "updated_at": "2024-01-01T00:00:00Z",
32
+ "deleted": False,
33
+ }
34
+
35
+ def create_mock_posthog(
36
+ self, personal_api_key="phx_test_key", host="https://app.posthog.com"
37
+ ):
38
+ """Create a mock PostHog client."""
39
+ mock = MagicMock()
40
+ mock.personal_api_key = personal_api_key
41
+ mock.raw_host = host
42
+ return mock
43
+
44
+
45
+ class TestPromptsGet(TestPrompts):
46
+ """Tests for the Prompts.get() method."""
47
+
48
+ @patch("posthog.ai.prompts._get_session")
49
+ def test_successfully_fetch_a_prompt(self, mock_get_session):
50
+ """Should successfully fetch a prompt."""
51
+ mock_get = mock_get_session.return_value.get
52
+ mock_get.return_value = MockResponse(json_data=self.mock_prompt_response)
53
+
54
+ posthog = self.create_mock_posthog()
55
+ prompts = Prompts(posthog)
56
+
57
+ result = prompts.get("test-prompt")
58
+
59
+ self.assertEqual(result, self.mock_prompt_response["prompt"])
60
+ mock_get.assert_called_once()
61
+ call_args = mock_get.call_args
62
+ self.assertEqual(
63
+ call_args[0][0],
64
+ "https://app.posthog.com/api/environments/@current/llm_prompts/name/test-prompt/",
65
+ )
66
+ self.assertIn("Authorization", call_args[1]["headers"])
67
+ self.assertEqual(
68
+ call_args[1]["headers"]["Authorization"], "Bearer phx_test_key"
69
+ )
70
+
71
+ @patch("posthog.ai.prompts._get_session")
72
+ @patch("posthog.ai.prompts.time.time")
73
+ def test_return_cached_prompt_when_fresh(self, mock_time, mock_get_session):
74
+ """Should return cached prompt when fresh (no API call)."""
75
+ mock_get = mock_get_session.return_value.get
76
+ mock_get.return_value = MockResponse(json_data=self.mock_prompt_response)
77
+ mock_time.return_value = 1000.0
78
+
79
+ posthog = self.create_mock_posthog()
80
+ prompts = Prompts(posthog)
81
+
82
+ # First call - fetches from API
83
+ result1 = prompts.get("test-prompt", cache_ttl_seconds=300)
84
+ self.assertEqual(result1, self.mock_prompt_response["prompt"])
85
+ self.assertEqual(mock_get.call_count, 1)
86
+
87
+ # Advance time by 60 seconds (still within TTL)
88
+ mock_time.return_value = 1060.0
89
+
90
+ # Second call - should use cache
91
+ result2 = prompts.get("test-prompt", cache_ttl_seconds=300)
92
+ self.assertEqual(result2, self.mock_prompt_response["prompt"])
93
+ self.assertEqual(mock_get.call_count, 1) # No additional fetch
94
+
95
+ @patch("posthog.ai.prompts._get_session")
96
+ @patch("posthog.ai.prompts.time.time")
97
+ def test_refetch_when_cache_is_stale(self, mock_time, mock_get_session):
98
+ """Should refetch when cache is stale."""
99
+ mock_get = mock_get_session.return_value.get
100
+ updated_prompt_response = {
101
+ **self.mock_prompt_response,
102
+ "prompt": "Updated prompt: Hello, {{name}}!",
103
+ }
104
+
105
+ mock_get.side_effect = [
106
+ MockResponse(json_data=self.mock_prompt_response),
107
+ MockResponse(json_data=updated_prompt_response),
108
+ ]
109
+ mock_time.return_value = 1000.0
110
+
111
+ posthog = self.create_mock_posthog()
112
+ prompts = Prompts(posthog)
113
+
114
+ # First call - fetches from API
115
+ result1 = prompts.get("test-prompt", cache_ttl_seconds=60)
116
+ self.assertEqual(result1, self.mock_prompt_response["prompt"])
117
+ self.assertEqual(mock_get.call_count, 1)
118
+
119
+ # Advance time past TTL
120
+ mock_time.return_value = 1061.0
121
+
122
+ # Second call - should refetch
123
+ result2 = prompts.get("test-prompt", cache_ttl_seconds=60)
124
+ self.assertEqual(result2, updated_prompt_response["prompt"])
125
+ self.assertEqual(mock_get.call_count, 2)
126
+
127
+ @patch("posthog.ai.prompts._get_session")
128
+ @patch("posthog.ai.prompts.time.time")
129
+ @patch("posthog.ai.prompts.log")
130
+ def test_use_stale_cache_on_fetch_failure_with_warning(
131
+ self, mock_log, mock_time, mock_get_session
132
+ ):
133
+ """Should use stale cache on fetch failure with warning."""
134
+ mock_get = mock_get_session.return_value.get
135
+ mock_get.side_effect = [
136
+ MockResponse(json_data=self.mock_prompt_response),
137
+ Exception("Network error"),
138
+ ]
139
+ mock_time.return_value = 1000.0
140
+
141
+ posthog = self.create_mock_posthog()
142
+ prompts = Prompts(posthog)
143
+
144
+ # First call - populates cache
145
+ result1 = prompts.get("test-prompt", cache_ttl_seconds=60)
146
+ self.assertEqual(result1, self.mock_prompt_response["prompt"])
147
+
148
+ # Advance time past TTL
149
+ mock_time.return_value = 1061.0
150
+
151
+ # Second call - should use stale cache
152
+ result2 = prompts.get("test-prompt", cache_ttl_seconds=60)
153
+ self.assertEqual(result2, self.mock_prompt_response["prompt"])
154
+
155
+ # Check warning was logged
156
+ mock_log.warning.assert_called()
157
+ warning_call = mock_log.warning.call_args
158
+ self.assertIn("using stale cache", warning_call[0][0])
159
+
160
+ @patch("posthog.ai.prompts._get_session")
161
+ @patch("posthog.ai.prompts.log")
162
+ def test_use_fallback_when_no_cache_and_fetch_fails_with_warning(
163
+ self, mock_log, mock_get_session
164
+ ):
165
+ """Should use fallback when no cache and fetch fails with warning."""
166
+ mock_get = mock_get_session.return_value.get
167
+ mock_get.side_effect = Exception("Network error")
168
+
169
+ posthog = self.create_mock_posthog()
170
+ prompts = Prompts(posthog)
171
+
172
+ fallback = "Default system prompt."
173
+ result = prompts.get("test-prompt", fallback=fallback)
174
+
175
+ self.assertEqual(result, fallback)
176
+
177
+ # Check warning was logged
178
+ mock_log.warning.assert_called()
179
+ warning_call = mock_log.warning.call_args
180
+ self.assertIn("using fallback", warning_call[0][0])
181
+
182
+ @patch("posthog.ai.prompts._get_session")
183
+ def test_throw_when_no_cache_no_fallback_and_fetch_fails(self, mock_get_session):
184
+ """Should throw when no cache, no fallback, and fetch fails."""
185
+ mock_get = mock_get_session.return_value.get
186
+ mock_get.side_effect = Exception("Network error")
187
+
188
+ posthog = self.create_mock_posthog()
189
+ prompts = Prompts(posthog)
190
+
191
+ with self.assertRaises(Exception) as context:
192
+ prompts.get("test-prompt")
193
+
194
+ self.assertIn("Network error", str(context.exception))
195
+
196
+ @patch("posthog.ai.prompts._get_session")
197
+ def test_handle_404_response(self, mock_get_session):
198
+ """Should handle 404 response."""
199
+ mock_get = mock_get_session.return_value.get
200
+ mock_get.return_value = MockResponse(status_code=404, ok=False)
201
+
202
+ posthog = self.create_mock_posthog()
203
+ prompts = Prompts(posthog)
204
+
205
+ with self.assertRaises(Exception) as context:
206
+ prompts.get("nonexistent-prompt")
207
+
208
+ self.assertIn('Prompt "nonexistent-prompt" not found', str(context.exception))
209
+
210
+ @patch("posthog.ai.prompts._get_session")
211
+ def test_handle_403_response(self, mock_get_session):
212
+ """Should handle 403 response."""
213
+ mock_get = mock_get_session.return_value.get
214
+ mock_get.return_value = MockResponse(status_code=403, ok=False)
215
+
216
+ posthog = self.create_mock_posthog()
217
+ prompts = Prompts(posthog)
218
+
219
+ with self.assertRaises(Exception) as context:
220
+ prompts.get("restricted-prompt")
221
+
222
+ self.assertIn(
223
+ 'Access denied for prompt "restricted-prompt"', str(context.exception)
224
+ )
225
+
226
+ def test_throw_when_no_personal_api_key_configured(self):
227
+ """Should throw when no personal_api_key is configured."""
228
+ posthog = self.create_mock_posthog(personal_api_key=None)
229
+ prompts = Prompts(posthog)
230
+
231
+ with self.assertRaises(Exception) as context:
232
+ prompts.get("test-prompt")
233
+
234
+ self.assertIn(
235
+ "personal_api_key is required to fetch prompts", str(context.exception)
236
+ )
237
+
238
+ @patch("posthog.ai.prompts._get_session")
239
+ def test_throw_when_api_returns_invalid_response_format(self, mock_get_session):
240
+ """Should throw when API returns invalid response format."""
241
+ mock_get = mock_get_session.return_value.get
242
+ mock_get.return_value = MockResponse(json_data={"invalid": "response"})
243
+
244
+ posthog = self.create_mock_posthog()
245
+ prompts = Prompts(posthog)
246
+
247
+ with self.assertRaises(Exception) as context:
248
+ prompts.get("test-prompt")
249
+
250
+ self.assertIn("Invalid response format", str(context.exception))
251
+
252
+ @patch("posthog.ai.prompts._get_session")
253
+ def test_use_custom_host_from_posthog_options(self, mock_get_session):
254
+ """Should use custom host from PostHog options."""
255
+ mock_get = mock_get_session.return_value.get
256
+ mock_get.return_value = MockResponse(json_data=self.mock_prompt_response)
257
+
258
+ posthog = self.create_mock_posthog(host="https://eu.i.posthog.com")
259
+ prompts = Prompts(posthog)
260
+
261
+ prompts.get("test-prompt")
262
+
263
+ call_args = mock_get.call_args
264
+ self.assertTrue(
265
+ call_args[0][0].startswith("https://eu.i.posthog.com/"),
266
+ f"Expected URL to start with 'https://eu.i.posthog.com/', got {call_args[0][0]}",
267
+ )
268
+
269
+ @patch("posthog.ai.prompts._get_session")
270
+ @patch("posthog.ai.prompts.time.time")
271
+ def test_use_default_cache_ttl_5_minutes(self, mock_time, mock_get_session):
272
+ """Should use default cache TTL (5 minutes) when not specified."""
273
+ mock_get = mock_get_session.return_value.get
274
+ mock_get.return_value = MockResponse(json_data=self.mock_prompt_response)
275
+ mock_time.return_value = 1000.0
276
+
277
+ posthog = self.create_mock_posthog()
278
+ prompts = Prompts(posthog)
279
+
280
+ # First call
281
+ prompts.get("test-prompt")
282
+ self.assertEqual(mock_get.call_count, 1)
283
+
284
+ # Advance time by 4 minutes (within default 5-minute TTL)
285
+ mock_time.return_value = 1000.0 + (4 * 60)
286
+
287
+ # Second call - should use cache
288
+ prompts.get("test-prompt")
289
+ self.assertEqual(mock_get.call_count, 1)
290
+
291
+ # Advance time past 5-minute TTL
292
+ mock_time.return_value = 1000.0 + (6 * 60)
293
+
294
+ # Third call - should refetch
295
+ prompts.get("test-prompt")
296
+ self.assertEqual(mock_get.call_count, 2)
297
+
298
+ @patch("posthog.ai.prompts._get_session")
299
+ @patch("posthog.ai.prompts.time.time")
300
+ def test_use_custom_default_cache_ttl_from_constructor(
301
+ self, mock_time, mock_get_session
302
+ ):
303
+ """Should use custom default cache TTL from constructor."""
304
+ mock_get = mock_get_session.return_value.get
305
+ mock_get.return_value = MockResponse(json_data=self.mock_prompt_response)
306
+ mock_time.return_value = 1000.0
307
+
308
+ posthog = self.create_mock_posthog()
309
+ prompts = Prompts(posthog, default_cache_ttl_seconds=60)
310
+
311
+ # First call
312
+ prompts.get("test-prompt")
313
+ self.assertEqual(mock_get.call_count, 1)
314
+
315
+ # Advance time past custom TTL
316
+ mock_time.return_value = 1061.0
317
+
318
+ # Second call - should refetch
319
+ prompts.get("test-prompt")
320
+ self.assertEqual(mock_get.call_count, 2)
321
+
322
+ @patch("posthog.ai.prompts._get_session")
323
+ def test_url_encode_prompt_names_with_special_characters(self, mock_get_session):
324
+ """Should URL-encode prompt names with special characters."""
325
+ mock_get = mock_get_session.return_value.get
326
+ mock_get.return_value = MockResponse(json_data=self.mock_prompt_response)
327
+
328
+ posthog = self.create_mock_posthog()
329
+ prompts = Prompts(posthog)
330
+
331
+ prompts.get("prompt with spaces/and/slashes")
332
+
333
+ call_args = mock_get.call_args
334
+ self.assertEqual(
335
+ call_args[0][0],
336
+ "https://app.posthog.com/api/environments/@current/llm_prompts/name/prompt%20with%20spaces%2Fand%2Fslashes/",
337
+ )
338
+
339
+ @patch("posthog.ai.prompts._get_session")
340
+ def test_work_with_direct_options_no_posthog_client(self, mock_get_session):
341
+ """Should work with direct options (no PostHog client)."""
342
+ mock_get = mock_get_session.return_value.get
343
+ mock_get.return_value = MockResponse(json_data=self.mock_prompt_response)
344
+
345
+ prompts = Prompts(personal_api_key="phx_direct_key")
346
+
347
+ result = prompts.get("test-prompt")
348
+
349
+ self.assertEqual(result, self.mock_prompt_response["prompt"])
350
+ call_args = mock_get.call_args
351
+ self.assertEqual(
352
+ call_args[0][0],
353
+ "https://app.posthog.com/api/environments/@current/llm_prompts/name/test-prompt/",
354
+ )
355
+ self.assertEqual(
356
+ call_args[1]["headers"]["Authorization"], "Bearer phx_direct_key"
357
+ )
358
+
359
+ @patch("posthog.ai.prompts._get_session")
360
+ def test_use_custom_host_from_direct_options(self, mock_get_session):
361
+ """Should use custom host from direct options."""
362
+ mock_get = mock_get_session.return_value.get
363
+ mock_get.return_value = MockResponse(json_data=self.mock_prompt_response)
364
+
365
+ prompts = Prompts(
366
+ personal_api_key="phx_direct_key", host="https://eu.posthog.com"
367
+ )
368
+
369
+ prompts.get("test-prompt")
370
+
371
+ call_args = mock_get.call_args
372
+ self.assertEqual(
373
+ call_args[0][0],
374
+ "https://eu.posthog.com/api/environments/@current/llm_prompts/name/test-prompt/",
375
+ )
376
+
377
+ @patch("posthog.ai.prompts._get_session")
378
+ @patch("posthog.ai.prompts.time.time")
379
+ def test_use_custom_default_cache_ttl_from_direct_options(
380
+ self, mock_time, mock_get_session
381
+ ):
382
+ """Should use custom default cache TTL from direct options."""
383
+ mock_get = mock_get_session.return_value.get
384
+ mock_get.return_value = MockResponse(json_data=self.mock_prompt_response)
385
+ mock_time.return_value = 1000.0
386
+
387
+ prompts = Prompts(
388
+ personal_api_key="phx_direct_key", default_cache_ttl_seconds=60
389
+ )
390
+
391
+ # First call
392
+ prompts.get("test-prompt")
393
+ self.assertEqual(mock_get.call_count, 1)
394
+
395
+ # Advance time past custom TTL
396
+ mock_time.return_value = 1061.0
397
+
398
+ # Second call - should refetch
399
+ prompts.get("test-prompt")
400
+ self.assertEqual(mock_get.call_count, 2)
401
+
402
+
403
+ class TestPromptsCompile(TestPrompts):
404
+ """Tests for the Prompts.compile() method."""
405
+
406
+ def test_replace_a_single_variable(self):
407
+ """Should replace a single variable."""
408
+ posthog = self.create_mock_posthog()
409
+ prompts = Prompts(posthog)
410
+
411
+ result = prompts.compile("Hello, {{name}}!", {"name": "World"})
412
+
413
+ self.assertEqual(result, "Hello, World!")
414
+
415
+ def test_replace_multiple_variables(self):
416
+ """Should replace multiple variables."""
417
+ posthog = self.create_mock_posthog()
418
+ prompts = Prompts(posthog)
419
+
420
+ result = prompts.compile(
421
+ "Hello, {{name}}! Welcome to {{company}}. Your tier is {{tier}}.",
422
+ {"name": "John", "company": "Acme Corp", "tier": "premium"},
423
+ )
424
+
425
+ self.assertEqual(
426
+ result, "Hello, John! Welcome to Acme Corp. Your tier is premium."
427
+ )
428
+
429
+ def test_handle_numbers(self):
430
+ """Should handle numbers."""
431
+ posthog = self.create_mock_posthog()
432
+ prompts = Prompts(posthog)
433
+
434
+ result = prompts.compile("You have {{count}} items.", {"count": 42})
435
+
436
+ self.assertEqual(result, "You have 42 items.")
437
+
438
+ def test_handle_booleans(self):
439
+ """Should handle booleans."""
440
+ posthog = self.create_mock_posthog()
441
+ prompts = Prompts(posthog)
442
+
443
+ result = prompts.compile("Feature enabled: {{enabled}}", {"enabled": True})
444
+
445
+ self.assertEqual(result, "Feature enabled: True")
446
+
447
+ def test_leave_unmatched_variables_unchanged(self):
448
+ """Should leave unmatched variables unchanged."""
449
+ posthog = self.create_mock_posthog()
450
+ prompts = Prompts(posthog)
451
+
452
+ result = prompts.compile(
453
+ "Hello, {{name}}! Your {{unknown}} is ready.", {"name": "World"}
454
+ )
455
+
456
+ self.assertEqual(result, "Hello, World! Your {{unknown}} is ready.")
457
+
458
+ def test_handle_prompts_with_no_variables(self):
459
+ """Should handle prompts with no variables."""
460
+ posthog = self.create_mock_posthog()
461
+ prompts = Prompts(posthog)
462
+
463
+ result = prompts.compile("You are a helpful assistant.", {})
464
+
465
+ self.assertEqual(result, "You are a helpful assistant.")
466
+
467
+ def test_handle_empty_variables_dict(self):
468
+ """Should handle empty variables dict."""
469
+ posthog = self.create_mock_posthog()
470
+ prompts = Prompts(posthog)
471
+
472
+ result = prompts.compile("Hello, {{name}}!", {})
473
+
474
+ self.assertEqual(result, "Hello, {{name}}!")
475
+
476
+ def test_handle_multiple_occurrences_of_same_variable(self):
477
+ """Should handle multiple occurrences of the same variable."""
478
+ posthog = self.create_mock_posthog()
479
+ prompts = Prompts(posthog)
480
+
481
+ result = prompts.compile(
482
+ "Hello, {{name}}! Goodbye, {{name}}!", {"name": "World"}
483
+ )
484
+
485
+ self.assertEqual(result, "Hello, World! Goodbye, World!")
486
+
487
+ def test_work_with_direct_options_initialization(self):
488
+ """Should work with direct options initialization."""
489
+ prompts = Prompts(personal_api_key="phx_test_key")
490
+
491
+ result = prompts.compile("Hello, {{name}}!", {"name": "World"})
492
+
493
+ self.assertEqual(result, "Hello, World!")
494
+
495
+ def test_handle_variables_with_hyphens(self):
496
+ """Should handle variables with hyphens."""
497
+ prompts = Prompts(personal_api_key="phx_test_key")
498
+
499
+ result = prompts.compile("User ID: {{user-id}}", {"user-id": "12345"})
500
+
501
+ self.assertEqual(result, "User ID: 12345")
502
+
503
+ def test_handle_variables_with_dots(self):
504
+ """Should handle variables with dots."""
505
+ prompts = Prompts(personal_api_key="phx_test_key")
506
+
507
+ result = prompts.compile("Company: {{company.name}}", {"company.name": "Acme"})
508
+
509
+ self.assertEqual(result, "Company: Acme")
510
+
511
+
512
+ class TestPromptsClearCache(TestPrompts):
513
+ """Tests for the Prompts.clear_cache() method."""
514
+
515
+ @patch("posthog.ai.prompts._get_session")
516
+ def test_clear_a_specific_prompt_from_cache(self, mock_get_session):
517
+ """Should clear a specific prompt from cache."""
518
+ mock_get = mock_get_session.return_value.get
519
+ other_prompt_response = {**self.mock_prompt_response, "name": "other-prompt"}
520
+
521
+ mock_get.side_effect = [
522
+ MockResponse(json_data=self.mock_prompt_response),
523
+ MockResponse(json_data=other_prompt_response),
524
+ MockResponse(json_data=self.mock_prompt_response),
525
+ ]
526
+
527
+ posthog = self.create_mock_posthog()
528
+ prompts = Prompts(posthog)
529
+
530
+ # Populate cache with two prompts
531
+ prompts.get("test-prompt")
532
+ prompts.get("other-prompt")
533
+ self.assertEqual(mock_get.call_count, 2)
534
+
535
+ # Clear only test-prompt
536
+ prompts.clear_cache("test-prompt")
537
+
538
+ # test-prompt should be refetched
539
+ prompts.get("test-prompt")
540
+ self.assertEqual(mock_get.call_count, 3)
541
+
542
+ # other-prompt should still be cached
543
+ prompts.get("other-prompt")
544
+ self.assertEqual(mock_get.call_count, 3)
545
+
546
+ @patch("posthog.ai.prompts._get_session")
547
+ def test_clear_all_prompts_from_cache(self, mock_get_session):
548
+ """Should clear all prompts from cache when no name is provided."""
549
+ mock_get = mock_get_session.return_value.get
550
+ other_prompt_response = {**self.mock_prompt_response, "name": "other-prompt"}
551
+
552
+ mock_get.side_effect = [
553
+ MockResponse(json_data=self.mock_prompt_response),
554
+ MockResponse(json_data=other_prompt_response),
555
+ MockResponse(json_data=self.mock_prompt_response),
556
+ MockResponse(json_data=other_prompt_response),
557
+ ]
558
+
559
+ posthog = self.create_mock_posthog()
560
+ prompts = Prompts(posthog)
561
+
562
+ # Populate cache with two prompts
563
+ prompts.get("test-prompt")
564
+ prompts.get("other-prompt")
565
+ self.assertEqual(mock_get.call_count, 2)
566
+
567
+ # Clear all cache
568
+ prompts.clear_cache()
569
+
570
+ # Both prompts should be refetched
571
+ prompts.get("test-prompt")
572
+ prompts.get("other-prompt")
573
+ self.assertEqual(mock_get.call_count, 4)
574
+
575
+
576
+ if __name__ == "__main__":
577
+ unittest.main()
@@ -1,4 +1,4 @@
1
- VERSION = "7.7.0"
1
+ VERSION = "7.8.1"
2
2
 
3
3
  if __name__ == "__main__":
4
4
  print(VERSION, end="") # noqa: T201
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: posthoganalytics
3
- Version: 7.7.0
3
+ Version: 7.8.1
4
4
  Summary: Integrate PostHog into any python application.
5
5
  Home-page: https://github.com/posthog/posthog-python
6
6
  Author: Posthog
@@ -12,26 +12,27 @@ posthoganalytics/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
12
12
  posthoganalytics/request.py,sha256=sv5dVU4jg4nFI4BpGCGvyoiz6yZZBLMqoBjhmzoomYo,11844
13
13
  posthoganalytics/types.py,sha256=OxGHSmmhVYwA7ecmJXUznDCZ1c4gAGtERzSLSYlyQFM,11540
14
14
  posthoganalytics/utils.py,sha256=-0w-OLcCaoldkbBebPzQyBzLJSo9G9yBOg8NDVz7La8,16088
15
- posthoganalytics/version.py,sha256=N71Rzf8IDjCmTh3rPlaceoLNhCzH4fhLpEfObPtcmmM,87
16
- posthoganalytics/ai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
15
+ posthoganalytics/version.py,sha256=sl041ey3-9y2JhJLC6u2e9H2z1sJUwKy_cOh50bQlg0,87
16
+ posthoganalytics/ai/__init__.py,sha256=Aj7D5QzrxqqrYXJHizNWe6YtYmI80N-EEZDOUzwOF3Q,71
17
+ posthoganalytics/ai/prompts.py,sha256=P8PPr5PLM3gPrI6tzGU61I_ymBJ3yumyGRyzSqtzK5M,8462
17
18
  posthoganalytics/ai/sanitization.py,sha256=Dpx_5gKZfDS38KjmK1C0lvvjm9N8Pp_oIxusac888-g,6057
18
- posthoganalytics/ai/types.py,sha256=arX98hR1PIPeJ3vFikxTlACIh1xPp6aEUw1gBLcKoB0,3273
19
- posthoganalytics/ai/utils.py,sha256=FDgf542pk7Hd_6EYg66h-YhxfQycOmmcb9V8ILgKUjI,23230
19
+ posthoganalytics/ai/types.py,sha256=65sj_ZFUx75FiLM4ESdRPyZhrT6rO_gqJ2-zfcUot40,3356
20
+ posthoganalytics/ai/utils.py,sha256=f-vzFLuUDNkKqxfp82PvlhoLaEWBLYTGSlmR63yveC4,25991
20
21
  posthoganalytics/ai/anthropic/__init__.py,sha256=8nTvETZzkfW-P3zBMmp06GOHs0N-xyOGu7Oa4di_lno,669
21
22
  posthoganalytics/ai/anthropic/anthropic.py,sha256=njOoVb9vkCdnPWAQuVF0XB0BnT2y1ScIryrCGyt5ur8,8750
22
23
  posthoganalytics/ai/anthropic/anthropic_async.py,sha256=EKqDjxoiiGNV2VsLhmMoi_1yKoMSTTUrthTkJlttV8A,8870
23
- posthoganalytics/ai/anthropic/anthropic_converter.py,sha256=0IrXWWGpvE6IIbpczl0osrf4R4XqYDQMBMsKKB_NinY,13071
24
+ posthoganalytics/ai/anthropic/anthropic_converter.py,sha256=M9Q04IVRzkLd_t_HdTL5Iz0MnThGF3X97OWyYW7scQc,13881
24
25
  posthoganalytics/ai/anthropic/anthropic_providers.py,sha256=y1_qc8Lbip-YDmpimPGg3DfTm5g-WZk5FrRCXzwF_Ow,2139
25
26
  posthoganalytics/ai/gemini/__init__.py,sha256=W1c2YcMah5wi4lTk7w8l9Yabw4l7jashBaelYirLadQ,470
26
27
  posthoganalytics/ai/gemini/gemini.py,sha256=pD9VOhaY42Tz4oZktErMkewzEYZ7gLaX21FUN-G7puc,15021
27
28
  posthoganalytics/ai/gemini/gemini_async.py,sha256=Tvt7rPa4cOWKupgk9hJvMTTqEzLWOI3ts9mZn9zt_60,15171
28
- posthoganalytics/ai/gemini/gemini_converter.py,sha256=Rg3RW3vW3aBzowloOlC3mcwyOJANcJ8OQB6q3UkDN84,21819
29
+ posthoganalytics/ai/gemini/gemini_converter.py,sha256=WoHoTs3qlomGuRLmP5jA-50XoUtOLqjVT-_Q77Jtn_Y,22101
29
30
  posthoganalytics/ai/langchain/__init__.py,sha256=9CqAwLynTGj3ASAR80C3PmdTdrYGmu99tz0JL-HPFgI,70
30
31
  posthoganalytics/ai/langchain/callbacks.py,sha256=Uaqok7GsXWz9dV9FLfcHxLZ_Gc8v_Wpi4kU9h8YHQ78,32711
31
32
  posthoganalytics/ai/openai/__init__.py,sha256=u4OuUT7k1NgFj0TrxjuyegOg7a_UA8nAU6a-Hszr0OM,490
32
33
  posthoganalytics/ai/openai/openai.py,sha256=UTHmlOyy2yOKP3MDbQLV25BH0xe2miF_9z8lgPE3Okg,21432
33
34
  posthoganalytics/ai/openai/openai_async.py,sha256=R1vbRrLDQuvZ3v9TOZAEuioeh21XJ_69zevhatIyVto,23709
34
- posthoganalytics/ai/openai/openai_converter.py,sha256=2xl1ZkCGiM5BCTu4RPwtRVYZg5lVQNJsxUzFlHfkuIk,25846
35
+ posthoganalytics/ai/openai/openai_converter.py,sha256=-8M4LkFzsAat0X8TkubtIyI5ytxvMknkNP6xPq7RLCU,26672
35
36
  posthoganalytics/ai/openai/openai_providers.py,sha256=RPVmj2V0_lAdno_ax5Ul2kwhBA9_rRgAdl_sCqrQc6M,4004
36
37
  posthoganalytics/ai/openai_agents/__init__.py,sha256=i12Gy9SlB_7Oqwk8lp2-WFjXiy_NlTr5swvE_mCkMRc,2520
37
38
  posthoganalytics/ai/openai_agents/processor.py,sha256=gL_PHj6foOi5wbAvW2B6oTQibVGg66a6k8nKVEXlf2o,31497
@@ -53,12 +54,13 @@ posthoganalytics/test/test_size_limited_dict.py,sha256=-5IQjIEr_-Dql24M0HusdR_Xr
53
54
  posthoganalytics/test/test_types.py,sha256=bRPHdwVpP7hu7emsplU8UVyzSQptv6PaG5lAoOD_BtM,7595
54
55
  posthoganalytics/test/test_utils.py,sha256=MTz7-Fvffz2a9IRwyKsVy_TnrvIihs-Ap3hhtqGSSAs,9732
55
56
  posthoganalytics/test/ai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
57
+ posthoganalytics/test/ai/test_prompts.py,sha256=h_SB_KZ1LccssMFqBdIrT--LbhxlIZTvvuSENVEugYE,21056
56
58
  posthoganalytics/test/ai/test_sanitization.py,sha256=Om1f22Z0q5Y4VSaDwPHgW3IYUZPoMj05ZYcTeCmOBlE,18155
57
59
  posthoganalytics/test/ai/test_system_prompts.py,sha256=1IQVvcs-YMxPxXOmkbNCFOhvu_NIx4eAanz1XT-QJ3Y,14371
58
60
  posthoganalytics/test/ai/openai_agents/__init__.py,sha256=VGLVcRkGkmj0d4MhjcwQ5IYxoaaMPlw0oR7eXSCcGXI,42
59
61
  posthoganalytics/test/ai/openai_agents/test_processor.py,sha256=p65z82yiVjMQUR5coaBMMhzV6xB1CezzsvQD1GIi4o0,30800
60
- posthoganalytics-7.7.0.dist-info/licenses/LICENSE,sha256=wGf9JBotDkSygFj43m49oiKlFnpMnn97keiZKF-40vE,2450
61
- posthoganalytics-7.7.0.dist-info/METADATA,sha256=gcHpMd9BOdead_UNaQDqa_RrOMz6MTLBkQWr1i0bNZo,6368
62
- posthoganalytics-7.7.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
63
- posthoganalytics-7.7.0.dist-info/top_level.txt,sha256=8QsNIqIkBh1p2TXvKp0Em9ZLZKwe3uIqCETyW4s1GOE,17
64
- posthoganalytics-7.7.0.dist-info/RECORD,,
62
+ posthoganalytics-7.8.1.dist-info/licenses/LICENSE,sha256=wGf9JBotDkSygFj43m49oiKlFnpMnn97keiZKF-40vE,2450
63
+ posthoganalytics-7.8.1.dist-info/METADATA,sha256=DSRTl6OIWoxCrtJOYzVAbzW2gOuyRow6icgzjX66UU8,6368
64
+ posthoganalytics-7.8.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
65
+ posthoganalytics-7.8.1.dist-info/top_level.txt,sha256=8QsNIqIkBh1p2TXvKp0Em9ZLZKwe3uIqCETyW4s1GOE,17
66
+ posthoganalytics-7.8.1.dist-info/RECORD,,