shotgun-sh 0.2.11.dev3__py3-none-any.whl → 0.2.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of shotgun-sh might be problematic. Click here for more details.

Files changed (39) hide show
  1. shotgun/agents/agent_manager.py +66 -12
  2. shotgun/agents/config/README.md +89 -0
  3. shotgun/agents/config/__init__.py +10 -1
  4. shotgun/agents/config/manager.py +287 -32
  5. shotgun/agents/config/models.py +21 -1
  6. shotgun/agents/config/provider.py +27 -0
  7. shotgun/agents/config/streaming_test.py +119 -0
  8. shotgun/agents/conversation_manager.py +14 -7
  9. shotgun/agents/history/history_processors.py +99 -3
  10. shotgun/agents/history/token_counting/openai.py +3 -1
  11. shotgun/build_constants.py +3 -3
  12. shotgun/exceptions.py +32 -0
  13. shotgun/logging_config.py +42 -0
  14. shotgun/main.py +2 -0
  15. shotgun/posthog_telemetry.py +18 -25
  16. shotgun/prompts/agents/partials/common_agent_system_prompt.j2 +3 -2
  17. shotgun/sentry_telemetry.py +157 -1
  18. shotgun/settings.py +5 -0
  19. shotgun/tui/app.py +16 -15
  20. shotgun/tui/screens/chat/chat_screen.py +156 -61
  21. shotgun/tui/screens/chat_screen/command_providers.py +13 -2
  22. shotgun/tui/screens/chat_screen/history/chat_history.py +1 -2
  23. shotgun/tui/screens/directory_setup.py +14 -5
  24. shotgun/tui/screens/feedback.py +10 -3
  25. shotgun/tui/screens/github_issue.py +111 -0
  26. shotgun/tui/screens/model_picker.py +8 -1
  27. shotgun/tui/screens/onboarding.py +431 -0
  28. shotgun/tui/screens/pipx_migration.py +12 -6
  29. shotgun/tui/screens/provider_config.py +25 -8
  30. shotgun/tui/screens/shotgun_auth.py +0 -10
  31. shotgun/tui/screens/welcome.py +32 -0
  32. shotgun/tui/services/conversation_service.py +8 -6
  33. shotgun/tui/widgets/widget_coordinator.py +3 -2
  34. shotgun_sh-0.2.19.dist-info/METADATA +465 -0
  35. {shotgun_sh-0.2.11.dev3.dist-info → shotgun_sh-0.2.19.dist-info}/RECORD +38 -33
  36. shotgun_sh-0.2.11.dev3.dist-info/METADATA +0 -130
  37. {shotgun_sh-0.2.11.dev3.dist-info → shotgun_sh-0.2.19.dist-info}/WHEEL +0 -0
  38. {shotgun_sh-0.2.11.dev3.dist-info → shotgun_sh-0.2.19.dist-info}/entry_points.txt +0 -0
  39. {shotgun_sh-0.2.11.dev3.dist-info → shotgun_sh-0.2.19.dist-info}/licenses/LICENSE +0 -0
@@ -25,6 +25,7 @@ from .models import (
25
25
  ProviderType,
26
26
  ShotgunConfig,
27
27
  )
28
+ from .streaming_test import check_streaming_capability
28
29
 
29
30
  logger = get_logger(__name__)
30
31
 
@@ -207,6 +208,7 @@ async def get_provider_model(
207
208
  spec = MODEL_SPECS[model_name]
208
209
 
209
210
  # Use Shotgun Account with determined model (provider = actual LLM provider)
211
+ # Shotgun accounts always support streaming (via LiteLLM proxy)
210
212
  return ModelConfig(
211
213
  name=spec.name,
212
214
  provider=spec.provider, # Actual LLM provider (OPENAI/ANTHROPIC/GOOGLE)
@@ -214,6 +216,7 @@ async def get_provider_model(
214
216
  max_input_tokens=spec.max_input_tokens,
215
217
  max_output_tokens=spec.max_output_tokens,
216
218
  api_key=shotgun_api_key,
219
+ supports_streaming=True, # Shotgun accounts always support streaming
217
220
  )
218
221
 
219
222
  # Priority 2: Fall back to individual provider keys
@@ -260,6 +263,29 @@ async def get_provider_model(
260
263
  raise ValueError(f"Model '{model_name.value}' not found")
261
264
  spec = MODEL_SPECS[model_name]
262
265
 
266
+ # Check and test streaming capability for GPT-5 family models
267
+ supports_streaming = True # Default to True for all models
268
+ if model_name in (ModelName.GPT_5, ModelName.GPT_5_MINI):
269
+ # Check if streaming capability has been tested
270
+ streaming_capability = config.openai.supports_streaming
271
+
272
+ if streaming_capability is None:
273
+ # Not tested yet - run streaming test (test once for all GPT-5 models)
274
+ logger.info("Testing streaming capability for OpenAI GPT-5 family...")
275
+ streaming_capability = await check_streaming_capability(
276
+ api_key, model_name.value
277
+ )
278
+
279
+ # Save result to config (applies to all OpenAI models)
280
+ config.openai.supports_streaming = streaming_capability
281
+ await config_manager.save(config)
282
+ logger.info(
283
+ f"Streaming test result: "
284
+ f"{'enabled' if streaming_capability else 'disabled'}"
285
+ )
286
+
287
+ supports_streaming = streaming_capability
288
+
263
289
  # Create fully configured ModelConfig
264
290
  return ModelConfig(
265
291
  name=spec.name,
@@ -268,6 +294,7 @@ async def get_provider_model(
268
294
  max_input_tokens=spec.max_input_tokens,
269
295
  max_output_tokens=spec.max_output_tokens,
270
296
  api_key=api_key,
297
+ supports_streaming=supports_streaming,
271
298
  )
272
299
 
273
300
  elif provider_enum == ProviderType.ANTHROPIC:
@@ -0,0 +1,119 @@
1
+ """Utility for testing streaming capability of OpenAI models."""
2
+
3
+ import logging
4
+
5
+ import httpx
6
+
7
+ logger = logging.getLogger(__name__)
8
+
9
+ # Maximum number of attempts to test streaming capability
10
+ MAX_STREAMING_TEST_ATTEMPTS = 3
11
+
12
+ # Timeout for each streaming test attempt (in seconds)
13
+ STREAMING_TEST_TIMEOUT = 10.0
14
+
15
+
16
+ async def check_streaming_capability(
17
+ api_key: str, model_name: str, max_attempts: int = MAX_STREAMING_TEST_ATTEMPTS
18
+ ) -> bool:
19
+ """Check if the given OpenAI model supports streaming with this API key.
20
+
21
+ Retries multiple times to handle transient network issues. Only returns False
22
+ if streaming definitively fails after all retry attempts.
23
+
24
+ Args:
25
+ api_key: The OpenAI API key to test
26
+ model_name: The model name (e.g., "gpt-5", "gpt-5-mini")
27
+ max_attempts: Maximum number of attempts (default: 3)
28
+
29
+ Returns:
30
+ True if streaming is supported, False if it definitively fails
31
+ """
32
+ url = "https://api.openai.com/v1/chat/completions"
33
+ headers = {
34
+ "Authorization": f"Bearer {api_key}",
35
+ "Content-Type": "application/json",
36
+ }
37
+ # GPT-5 family uses max_completion_tokens instead of max_tokens
38
+ payload = {
39
+ "model": model_name,
40
+ "messages": [{"role": "user", "content": "test"}],
41
+ "stream": True,
42
+ "max_completion_tokens": 10,
43
+ }
44
+
45
+ last_error = None
46
+
47
+ for attempt in range(1, max_attempts + 1):
48
+ logger.debug(
49
+ f"Streaming test attempt {attempt}/{max_attempts} for {model_name}"
50
+ )
51
+
52
+ try:
53
+ async with httpx.AsyncClient(timeout=STREAMING_TEST_TIMEOUT) as client:
54
+ async with client.stream(
55
+ "POST", url, json=payload, headers=headers
56
+ ) as response:
57
+ # Check if we get a successful response
58
+ if response.status_code != 200:
59
+ last_error = f"HTTP {response.status_code}"
60
+ logger.warning(
61
+ f"Streaming test attempt {attempt} failed for {model_name}: {last_error}"
62
+ )
63
+
64
+ # For definitive errors (403 Forbidden, 404 Not Found), don't retry
65
+ if response.status_code in (403, 404):
66
+ logger.info(
67
+ f"Streaming definitively unsupported for {model_name} (HTTP {response.status_code})"
68
+ )
69
+ return False
70
+
71
+ # For other errors, retry
72
+ continue
73
+
74
+ # Try to read at least one chunk from the stream
75
+ try:
76
+ async for _ in response.aiter_bytes():
77
+ # Successfully received streaming data
78
+ logger.info(
79
+ f"Streaming test passed for {model_name} (attempt {attempt})"
80
+ )
81
+ return True
82
+ except Exception as e:
83
+ last_error = str(e)
84
+ logger.warning(
85
+ f"Streaming test attempt {attempt} failed for {model_name} while reading stream: {e}"
86
+ )
87
+ continue
88
+
89
+ except httpx.TimeoutException:
90
+ last_error = "timeout"
91
+ logger.warning(
92
+ f"Streaming test attempt {attempt} timed out for {model_name}"
93
+ )
94
+ continue
95
+ except httpx.HTTPStatusError as e:
96
+ last_error = str(e)
97
+ logger.warning(
98
+ f"Streaming test attempt {attempt} failed for {model_name}: {e}"
99
+ )
100
+ continue
101
+ except Exception as e:
102
+ last_error = str(e)
103
+ logger.warning(
104
+ f"Streaming test attempt {attempt} failed for {model_name} with unexpected error: {e}"
105
+ )
106
+ continue
107
+
108
+ # If we got here without reading any chunks, streaming didn't work
109
+ last_error = "no data received"
110
+ logger.warning(
111
+ f"Streaming test attempt {attempt} failed for {model_name}: no data received"
112
+ )
113
+
114
+ # All attempts exhausted
115
+ logger.error(
116
+ f"Streaming test failed for {model_name} after {max_attempts} attempts. "
117
+ f"Last error: {last_error}. Assuming streaming is NOT supported."
118
+ )
119
+ return False
@@ -46,9 +46,12 @@ class ConversationManager:
46
46
 
47
47
  conversation.updated_at = datetime.now()
48
48
 
49
- # Serialize to JSON using Pydantic's model_dump
50
- data = conversation.model_dump(mode="json")
51
- json_content = json.dumps(data, indent=2, ensure_ascii=False)
49
+ # Serialize to JSON in background thread to avoid blocking event loop
50
+ # This is crucial for large conversations (5k+ tokens)
51
+ data = await asyncio.to_thread(conversation.model_dump, mode="json")
52
+ json_content = await asyncio.to_thread(
53
+ json.dumps, data, indent=2, ensure_ascii=False
54
+ )
52
55
 
53
56
  async with aiofiles.open(
54
57
  self.conversation_path, "w", encoding="utf-8"
@@ -76,9 +79,13 @@ class ConversationManager:
76
79
  try:
77
80
  async with aiofiles.open(self.conversation_path, encoding="utf-8") as f:
78
81
  content = await f.read()
79
- data = json.loads(content)
82
+ # Deserialize JSON in background thread to avoid blocking
83
+ data = await asyncio.to_thread(json.loads, content)
80
84
 
81
- conversation = ConversationHistory.model_validate(data)
85
+ # Validate model in background thread for large conversations
86
+ conversation = await asyncio.to_thread(
87
+ ConversationHistory.model_validate, data
88
+ )
82
89
  logger.debug(
83
90
  "Conversation loaded from %s with %d agent messages",
84
91
  self.conversation_path,
@@ -127,10 +134,10 @@ class ConversationManager:
127
134
  "Failed to clear conversation at %s: %s", self.conversation_path, e
128
135
  )
129
136
 
130
- def exists(self) -> bool:
137
+ async def exists(self) -> bool:
131
138
  """Check if a conversation history file exists.
132
139
 
133
140
  Returns:
134
141
  True if conversation file exists, False otherwise
135
142
  """
136
- return self.conversation_path.exists()
143
+ return await aiofiles.os.path.exists(str(self.conversation_path))
@@ -1,7 +1,9 @@
1
1
  """History processors for managing conversation history in Shotgun agents."""
2
2
 
3
+ from collections.abc import Awaitable, Callable
3
4
  from typing import TYPE_CHECKING, Any, Protocol
4
5
 
6
+ from anthropic import APIStatusError
5
7
  from pydantic_ai import ModelSettings
6
8
  from pydantic_ai.messages import (
7
9
  ModelMessage,
@@ -14,6 +16,7 @@ from pydantic_ai.messages import (
14
16
  from shotgun.agents.llm import shotgun_model_request
15
17
  from shotgun.agents.messages import AgentSystemPrompt, SystemStatusPrompt
16
18
  from shotgun.agents.models import AgentDeps
19
+ from shotgun.exceptions import ContextSizeLimitExceeded
17
20
  from shotgun.logging_config import get_logger
18
21
  from shotgun.posthog_telemetry import track_event
19
22
  from shotgun.prompts import PromptLoader
@@ -51,6 +54,86 @@ logger = get_logger(__name__)
51
54
  prompt_loader = PromptLoader()
52
55
 
53
56
 
57
+ async def _safe_token_estimation(
58
+ estimation_func: Callable[..., Awaitable[int]],
59
+ model_name: str,
60
+ max_tokens: int,
61
+ *args: Any,
62
+ **kwargs: Any,
63
+ ) -> int:
64
+ """Safely estimate tokens with proper error handling.
65
+
66
+ Wraps token estimation functions to handle failures gracefully.
67
+ Only RuntimeError (from token counters) is wrapped in ContextSizeLimitExceeded.
68
+ Other errors (network, auth) are allowed to bubble up.
69
+
70
+ Args:
71
+ estimation_func: Async function that estimates tokens
72
+ model_name: Name of the model for error messages
73
+ max_tokens: Maximum tokens for the model
74
+ *args: Arguments to pass to estimation_func
75
+ **kwargs: Keyword arguments to pass to estimation_func
76
+
77
+ Returns:
78
+ Token count from estimation_func
79
+
80
+ Raises:
81
+ ContextSizeLimitExceeded: If token counting fails with RuntimeError
82
+ Exception: Any other exceptions from estimation_func
83
+ """
84
+ try:
85
+ return await estimation_func(*args, **kwargs)
86
+ except Exception as e:
87
+ # Log the error with full context
88
+ logger.warning(
89
+ f"Token counting failed for {model_name}",
90
+ extra={
91
+ "error_type": type(e).__name__,
92
+ "error_message": str(e),
93
+ "model": model_name,
94
+ },
95
+ )
96
+
97
+ # Token counting behavior with oversized context (verified via testing):
98
+ #
99
+ # 1. OpenAI/tiktoken:
100
+ # - Successfully counts any size (tested with 752K tokens, no error)
101
+ # - Library errors: ValueError, KeyError, AttributeError, SSLError (file/cache issues)
102
+ # - Wrapped as: RuntimeError by our counter
103
+ #
104
+ # 2. Gemini/SentencePiece:
105
+ # - Successfully counts any size (tested with 752K tokens, no error)
106
+ # - Library errors: RuntimeError, IOError, TypeError (file/model loading issues)
107
+ # - Wrapped as: RuntimeError by our counter
108
+ #
109
+ # 3. Anthropic API:
110
+ # - Successfully counts large token counts (tested with 752K tokens, no error)
111
+ # - Only enforces 32 MB request size limit (not token count)
112
+ # - Raises: APIStatusError(413) with error type 'request_too_large' for 32MB+ requests
113
+ # - Other API errors: APIConnectionError, RateLimitError, APIStatusError (4xx/5xx)
114
+ # - Wrapped as: RuntimeError by our counter
115
+ #
116
+ # IMPORTANT: No provider raises errors for "too many tokens" during counting.
117
+ # Token count validation happens separately by comparing count to max_input_tokens.
118
+ #
119
+ # We wrap RuntimeError (library-level failures from tiktoken/sentencepiece).
120
+ # We also wrap Anthropic's 413 error (request exceeds 32 MB) as it indicates
121
+ # context is effectively too large and needs user action to reduce it.
122
+ if isinstance(e, RuntimeError):
123
+ raise ContextSizeLimitExceeded(
124
+ model_name=model_name, max_tokens=max_tokens
125
+ ) from e
126
+
127
+ # Check for Anthropic's 32 MB request size limit (APIStatusError with status 413)
128
+ if isinstance(e, APIStatusError) and e.status_code == 413:
129
+ raise ContextSizeLimitExceeded(
130
+ model_name=model_name, max_tokens=max_tokens
131
+ ) from e
132
+
133
+ # Re-raise other exceptions (network errors, auth failures, etc.)
134
+ raise
135
+
136
+
54
137
  def is_summary_part(part: Any) -> bool:
55
138
  """Check if a message part is a compacted summary."""
56
139
  return isinstance(part, TextPart) and part.content.startswith(SUMMARY_MARKER)
@@ -157,9 +240,15 @@ async def token_limit_compactor(
157
240
 
158
241
  if last_summary_index is not None:
159
242
  # Check if post-summary conversation exceeds threshold for incremental compaction
160
- post_summary_tokens = await estimate_post_summary_tokens(
161
- messages, last_summary_index, deps.llm_model
243
+ post_summary_tokens = await _safe_token_estimation(
244
+ estimate_post_summary_tokens,
245
+ deps.llm_model.name,
246
+ model_max_tokens,
247
+ messages,
248
+ last_summary_index,
249
+ deps.llm_model,
162
250
  )
251
+
163
252
  post_summary_percentage = (
164
253
  (post_summary_tokens / max_tokens) * 100 if max_tokens > 0 else 0
165
254
  )
@@ -366,7 +455,14 @@ async def token_limit_compactor(
366
455
 
367
456
  else:
368
457
  # Check if total conversation exceeds threshold for full compaction
369
- total_tokens = await estimate_tokens_from_messages(messages, deps.llm_model)
458
+ total_tokens = await _safe_token_estimation(
459
+ estimate_tokens_from_messages,
460
+ deps.llm_model.name,
461
+ model_max_tokens,
462
+ messages,
463
+ deps.llm_model,
464
+ )
465
+
370
466
  total_percentage = (total_tokens / max_tokens) * 100 if max_tokens > 0 else 0
371
467
 
372
468
  logger.debug(
@@ -63,7 +63,9 @@ class OpenAITokenCounter(TokenCounter):
63
63
 
64
64
  try:
65
65
  return len(self.encoding.encode(text))
66
- except Exception as e:
66
+ except BaseException as e:
67
+ # Must catch BaseException to handle PanicException from tiktoken's Rust layer
68
+ # which can occur with extremely long texts. Regular Exception won't catch it.
67
69
  raise RuntimeError(
68
70
  f"Failed to count tokens for OpenAI model {self.model_name}"
69
71
  ) from e
@@ -8,12 +8,12 @@ DO NOT EDIT MANUALLY.
8
8
  SENTRY_DSN = 'https://2818a6d165c64eccc94cfd51ce05d6aa@o4506813296738304.ingest.us.sentry.io/4510045952409600'
9
9
 
10
10
  # PostHog configuration embedded at build time (empty strings if not provided)
11
- POSTHOG_API_KEY = ''
11
+ POSTHOG_API_KEY = 'phc_KKnChzZUKeNqZDOTJ6soCBWNQSx3vjiULdwTR9H5Mcr'
12
12
  POSTHOG_PROJECT_ID = '191396'
13
13
 
14
14
  # Logfire configuration embedded at build time (only for dev builds)
15
- LOGFIRE_ENABLED = 'true'
16
- LOGFIRE_TOKEN = 'pylf_v1_us_RwZMlJm1tX6j0PL5RWWbmZpzK2hLBNtFWStNKlySfjh8'
15
+ LOGFIRE_ENABLED = ''
16
+ LOGFIRE_TOKEN = ''
17
17
 
18
18
  # Build metadata
19
19
  BUILD_TIME_ENV = "production" if SENTRY_DSN else "development"
shotgun/exceptions.py ADDED
@@ -0,0 +1,32 @@
1
+ """General exceptions for Shotgun application."""
2
+
3
+
4
+ class ErrorNotPickedUpBySentry(Exception): # noqa: N818
5
+ """Base for user-actionable errors that shouldn't be sent to Sentry.
6
+
7
+ These errors represent expected user conditions requiring action
8
+ rather than bugs that need tracking.
9
+ """
10
+
11
+
12
+ class ContextSizeLimitExceeded(ErrorNotPickedUpBySentry):
13
+ """Raised when conversation context exceeds the model's limits.
14
+
15
+ This is a user-actionable error - they need to either:
16
+ 1. Switch to a larger context model
17
+ 2. Switch to a larger model, compact their conversation, then switch back
18
+ 3. Clear the conversation and start fresh
19
+ """
20
+
21
+ def __init__(self, model_name: str, max_tokens: int):
22
+ """Initialize the exception.
23
+
24
+ Args:
25
+ model_name: Name of the model whose limit was exceeded
26
+ max_tokens: Maximum tokens allowed by the model
27
+ """
28
+ self.model_name = model_name
29
+ self.max_tokens = max_tokens
30
+ super().__init__(
31
+ f"Context too large for {model_name} (limit: {max_tokens:,} tokens)"
32
+ )
shotgun/logging_config.py CHANGED
@@ -27,6 +27,44 @@ def get_log_directory() -> Path:
27
27
  return log_dir
28
28
 
29
29
 
30
+ def cleanup_old_log_files(log_dir: Path, max_files: int) -> None:
31
+ """Remove old log files, keeping only the most recent ones.
32
+
33
+ Also removes the legacy shotgun.log file if it exists.
34
+
35
+ Args:
36
+ log_dir: Directory containing log files
37
+ max_files: Maximum number of log files to keep
38
+ """
39
+ try:
40
+ # Remove legacy non-timestamped log file if it exists
41
+ legacy_log = log_dir / "shotgun.log"
42
+ if legacy_log.exists():
43
+ try:
44
+ legacy_log.unlink()
45
+ except OSError:
46
+ pass # noqa: S110
47
+
48
+ # Find all shotgun log files
49
+ log_files = sorted(
50
+ log_dir.glob("shotgun-*.log"),
51
+ key=lambda p: p.stat().st_mtime,
52
+ reverse=True, # Newest first
53
+ )
54
+
55
+ # Remove files beyond the limit
56
+ files_to_delete = log_files[max_files:]
57
+ for log_file in files_to_delete:
58
+ try:
59
+ log_file.unlink()
60
+ except OSError:
61
+ # Ignore errors when deleting individual files
62
+ pass # noqa: S110
63
+ except Exception: # noqa: S110
64
+ # Silently fail - log cleanup shouldn't break the application
65
+ pass
66
+
67
+
30
68
  class ColoredFormatter(logging.Formatter):
31
69
  """Custom formatter with colors for different log levels."""
32
70
 
@@ -123,6 +161,10 @@ def setup_logger(
123
161
  try:
124
162
  # Create file handler with ISO8601 timestamp for each run
125
163
  log_dir = get_log_directory()
164
+
165
+ # Clean up old log files before creating a new one
166
+ cleanup_old_log_files(log_dir, settings.logging.max_log_files)
167
+
126
168
  log_file = log_dir / f"shotgun-{_RUN_TIMESTAMP}.log"
127
169
 
128
170
  # Use regular FileHandler - each run gets its own isolated log file
shotgun/main.py CHANGED
@@ -55,6 +55,8 @@ logger = get_logger(__name__)
55
55
  logger.debug("Logfire observability enabled: %s", _logfire_enabled)
56
56
 
57
57
  # Initialize configuration
58
+ # Note: If config migration fails, ConfigManager will auto-create fresh config
59
+ # and set migration_failed flag for user notification
58
60
  try:
59
61
  import asyncio
60
62
 
@@ -18,6 +18,9 @@ logger = get_early_logger(__name__)
18
18
  # Global PostHog client instance
19
19
  _posthog_client = None
20
20
 
21
+ # Cache the shotgun instance ID to avoid async calls during event tracking
22
+ _shotgun_instance_id: str | None = None
23
+
21
24
 
22
25
  def setup_posthog_observability() -> bool:
23
26
  """Set up PostHog analytics for usage tracking.
@@ -25,7 +28,7 @@ def setup_posthog_observability() -> bool:
25
28
  Returns:
26
29
  True if PostHog was successfully set up, False otherwise
27
30
  """
28
- global _posthog_client
31
+ global _posthog_client, _shotgun_instance_id
29
32
 
30
33
  try:
31
34
  # Check if PostHog is already initialized
@@ -57,31 +60,20 @@ def setup_posthog_observability() -> bool:
57
60
  # Store the client for later use
58
61
  _posthog_client = posthog
59
62
 
60
- # Set user context with anonymous shotgun instance ID from config
63
+ # Cache the shotgun instance ID for later use (avoids async issues)
61
64
  try:
62
65
  import asyncio
63
66
 
64
67
  config_manager = get_config_manager()
65
- shotgun_instance_id = asyncio.run(config_manager.get_shotgun_instance_id())
66
-
67
- # Identify the user in PostHog
68
- posthog.identify( # type: ignore[attr-defined]
69
- distinct_id=shotgun_instance_id,
70
- properties={
71
- "version": __version__,
72
- "environment": environment,
73
- },
74
- )
75
-
76
- # Set default properties for all events
77
- posthog.disabled = False
78
- posthog.personal_api_key = None # Not needed for event tracking
68
+ _shotgun_instance_id = asyncio.run(config_manager.get_shotgun_instance_id())
79
69
 
80
70
  logger.debug(
81
- "PostHog user identified with anonymous ID: %s", shotgun_instance_id
71
+ "PostHog initialized with shotgun instance ID: %s",
72
+ _shotgun_instance_id,
82
73
  )
83
74
  except Exception as e:
84
- logger.warning("Failed to set user context: %s", e)
75
+ logger.warning("Failed to load shotgun instance ID: %s", e)
76
+ # Continue anyway - we'll try to get it during event tracking
85
77
 
86
78
  logger.debug(
87
79
  "PostHog analytics configured successfully (environment: %s, version: %s)",
@@ -102,18 +94,19 @@ def track_event(event_name: str, properties: dict[str, Any] | None = None) -> No
102
94
  event_name: Name of the event to track
103
95
  properties: Optional properties to include with the event
104
96
  """
105
- global _posthog_client
97
+ global _posthog_client, _shotgun_instance_id
106
98
 
107
99
  if _posthog_client is None:
108
100
  logger.debug("PostHog not initialized, skipping event: %s", event_name)
109
101
  return
110
102
 
111
103
  try:
112
- import asyncio
113
-
114
- # Get shotgun instance ID for tracking
115
- config_manager = get_config_manager()
116
- shotgun_instance_id = asyncio.run(config_manager.get_shotgun_instance_id())
104
+ # Use cached instance ID (loaded during setup)
105
+ if _shotgun_instance_id is None:
106
+ logger.warning(
107
+ "Shotgun instance ID not available, skipping event: %s", event_name
108
+ )
109
+ return
117
110
 
118
111
  # Add version and environment to properties
119
112
  if properties is None:
@@ -128,7 +121,7 @@ def track_event(event_name: str, properties: dict[str, Any] | None = None) -> No
128
121
 
129
122
  # Track the event using PostHog's capture method
130
123
  _posthog_client.capture(
131
- distinct_id=shotgun_instance_id, event=event_name, properties=properties
124
+ distinct_id=_shotgun_instance_id, event=event_name, properties=properties
132
125
  )
133
126
  logger.debug("Tracked PostHog event: %s", event_name)
134
127
  except Exception as e:
@@ -7,10 +7,11 @@ Your extensive expertise spans, among other things:
7
7
  ## KEY RULES
8
8
 
9
9
  {% if interactive_mode %}
10
- 0. Always ask CLARIFYING QUESTIONS using structured output if the user's request is ambiguous or lacks sufficient detail.
10
+ 0. Always ask CLARIFYING QUESTIONS using structured output before doing work.
11
11
  - Return your response with the clarifying_questions field populated
12
- - Do not make assumptions about what the user wants
12
+ - Do not make assumptions about what the user wants, get a clear understanding first.
13
13
  - Questions should be clear, specific, and answerable
14
+ - Do not ask too many questions that might overwhelm the user; prioritize the most important ones.
14
15
  {% endif %}
15
16
  1. Above all, prefer using tools to do the work and NEVER respond with text.
16
17
  2. IMPORTANT: Always ask for review and go ahead to move forward after using write_file().