anvil-dev-framework 0.1.7 → 0.1.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (143) hide show
  1. package/README.md +71 -22
  2. package/VERSION +1 -1
  3. package/docs/ANV-263-hook-logging-investigation.md +116 -0
  4. package/docs/command-reference.md +398 -17
  5. package/docs/session-workflow.md +62 -9
  6. package/docs/system-architecture.md +584 -0
  7. package/global/api/__pycache__/ralph_api.cpython-314.pyc +0 -0
  8. package/global/api/openapi.yaml +357 -0
  9. package/global/api/ralph_api.py +528 -0
  10. package/global/commands/anvil-settings.md +47 -19
  11. package/global/commands/audit.md +163 -0
  12. package/global/commands/checklist.md +180 -0
  13. package/global/commands/coderabbit-fix.md +282 -0
  14. package/global/commands/efficiency.md +356 -0
  15. package/global/commands/evidence.md +117 -33
  16. package/global/commands/hud.md +24 -0
  17. package/global/commands/insights.md +101 -3
  18. package/global/commands/orient.md +22 -21
  19. package/global/commands/patterns.md +115 -0
  20. package/global/commands/ralph.md +47 -1
  21. package/global/commands/token-budget.md +214 -0
  22. package/global/commands/weekly-review.md +21 -1
  23. package/global/config/notifications.yaml.template +50 -0
  24. package/global/hooks/ralph_stop.sh +33 -1
  25. package/global/hooks/statusline.sh +67 -2
  26. package/global/lib/__pycache__/coderabbit_metrics.cpython-314.pyc +0 -0
  27. package/global/lib/__pycache__/command_tracker.cpython-314.pyc +0 -0
  28. package/global/lib/__pycache__/context_optimizer.cpython-314.pyc +0 -0
  29. package/global/lib/__pycache__/git_utils.cpython-314.pyc +0 -0
  30. package/global/lib/__pycache__/issue_models.cpython-314.pyc +0 -0
  31. package/global/lib/__pycache__/linear_provider.cpython-314.pyc +0 -0
  32. package/global/lib/__pycache__/optimization_applier.cpython-314.pyc +0 -0
  33. package/global/lib/__pycache__/ralph_state.cpython-314.pyc +0 -0
  34. package/global/lib/__pycache__/ralph_webhooks.cpython-314.pyc +0 -0
  35. package/global/lib/__pycache__/state_manager.cpython-314.pyc +0 -0
  36. package/global/lib/__pycache__/token_analyzer.cpython-314.pyc +0 -0
  37. package/global/lib/__pycache__/token_metrics.cpython-314.pyc +0 -0
  38. package/global/lib/coderabbit_metrics.py +647 -0
  39. package/global/lib/command_tracker.py +147 -0
  40. package/global/lib/context_optimizer.py +323 -0
  41. package/global/lib/linear_provider.py +210 -16
  42. package/global/lib/log_rotation.py +287 -0
  43. package/global/lib/optimization_applier.py +582 -0
  44. package/global/lib/ralph_events.py +398 -0
  45. package/global/lib/ralph_notifier.py +366 -0
  46. package/global/lib/ralph_state.py +264 -24
  47. package/global/lib/ralph_webhooks.py +470 -0
  48. package/global/lib/state_manager.py +121 -0
  49. package/global/lib/token_analyzer.py +1383 -0
  50. package/global/lib/token_metrics.py +919 -0
  51. package/global/tests/__pycache__/test_command_tracker.cpython-314-pytest-9.0.2.pyc +0 -0
  52. package/global/tests/__pycache__/test_context_optimizer.cpython-314-pytest-9.0.2.pyc +0 -0
  53. package/global/tests/__pycache__/test_doc_coverage.cpython-314-pytest-9.0.2.pyc +0 -0
  54. package/global/tests/__pycache__/test_git_utils.cpython-314-pytest-9.0.2.pyc +0 -0
  55. package/global/tests/__pycache__/test_issue_models.cpython-314-pytest-9.0.2.pyc +0 -0
  56. package/global/tests/__pycache__/test_linear_filtering.cpython-314-pytest-9.0.2.pyc +0 -0
  57. package/global/tests/__pycache__/test_linear_provider.cpython-314-pytest-9.0.2.pyc +0 -0
  58. package/global/tests/__pycache__/test_local_provider.cpython-314-pytest-9.0.2.pyc +0 -0
  59. package/global/tests/__pycache__/test_optimization_applier.cpython-314-pytest-9.0.2.pyc +0 -0
  60. package/global/tests/__pycache__/test_token_analyzer.cpython-314-pytest-9.0.2.pyc +0 -0
  61. package/global/tests/__pycache__/test_token_analyzer_phase6.cpython-314-pytest-9.0.2.pyc +0 -0
  62. package/global/tests/__pycache__/test_token_metrics.cpython-314-pytest-9.0.2.pyc +0 -0
  63. package/global/tests/test_command_tracker.py +172 -0
  64. package/global/tests/test_context_optimizer.py +321 -0
  65. package/global/tests/test_linear_filtering.py +319 -0
  66. package/global/tests/test_linear_provider.py +40 -1
  67. package/global/tests/test_optimization_applier.py +508 -0
  68. package/global/tests/test_token_analyzer.py +735 -0
  69. package/global/tests/test_token_analyzer_phase6.py +537 -0
  70. package/global/tests/test_token_metrics.py +829 -0
  71. package/global/tools/README.md +153 -0
  72. package/global/tools/__pycache__/anvil-hud.cpython-314.pyc +0 -0
  73. package/global/tools/__pycache__/orient_linear.cpython-314.pyc +0 -0
  74. package/global/tools/__pycache__/ralph-watchcpython-314.pyc +0 -0
  75. package/global/tools/anvil-hud.py +86 -1
  76. package/global/tools/anvil-memory/src/__tests__/ccs/context-monitor.test.ts +472 -0
  77. package/global/tools/anvil-memory/src/__tests__/ccs/fixtures.ts +405 -0
  78. package/global/tools/anvil-memory/src/__tests__/ccs/index.ts +36 -0
  79. package/global/tools/anvil-memory/src/__tests__/ccs/prompt-generator.test.ts +653 -0
  80. package/global/tools/anvil-memory/src/__tests__/ccs/ralph-stop.test.ts +727 -0
  81. package/global/tools/anvil-memory/src/__tests__/ccs/test-utils.ts +340 -0
  82. package/global/tools/anvil-memory/src/__tests__/commands.test.ts +218 -0
  83. package/global/tools/anvil-memory/src/commands/context.ts +322 -0
  84. package/global/tools/anvil-memory/src/db.ts +108 -0
  85. package/global/tools/anvil-memory/src/index.ts +2 -8
  86. package/global/tools/orient_linear.py +159 -0
  87. package/global/tools/ralph-watch +423 -0
  88. package/package.json +2 -1
  89. package/project/.anvil-project.yaml.template +93 -0
  90. package/project/CLAUDE.md.template +343 -0
  91. package/project/agents/README.md +119 -0
  92. package/project/agents/cross-layer-debugger.md +217 -0
  93. package/project/agents/security-code-reviewer.md +162 -0
  94. package/project/constitution.md.template +235 -0
  95. package/project/coordination.md +103 -0
  96. package/project/docs/background-tasks.md +258 -0
  97. package/project/docs/skills-frontmatter.md +243 -0
  98. package/project/examples/README.md +106 -0
  99. package/project/examples/api-route-template.ts +171 -0
  100. package/project/examples/component-template.tsx +110 -0
  101. package/project/examples/hook-template.ts +152 -0
  102. package/project/examples/service-template.ts +207 -0
  103. package/project/examples/test-template.test.tsx +249 -0
  104. package/project/hooks/README.md +491 -0
  105. package/project/hooks/__pycache__/notification.cpython-314.pyc +0 -0
  106. package/project/hooks/__pycache__/post_tool_use.cpython-314.pyc +0 -0
  107. package/project/hooks/__pycache__/pre_tool_use.cpython-314.pyc +0 -0
  108. package/project/hooks/__pycache__/session_start.cpython-314.pyc +0 -0
  109. package/project/hooks/__pycache__/stop.cpython-314.pyc +0 -0
  110. package/project/hooks/notification.py +183 -0
  111. package/project/hooks/permission_request.py +438 -0
  112. package/project/hooks/post_tool_use.py +397 -0
  113. package/project/hooks/pre_compact.py +126 -0
  114. package/project/hooks/pre_tool_use.py +454 -0
  115. package/project/hooks/session_start.py +656 -0
  116. package/project/hooks/stop.py +356 -0
  117. package/project/hooks/subagent_start.py +223 -0
  118. package/project/hooks/subagent_stop.py +215 -0
  119. package/project/hooks/user_prompt_submit.py +110 -0
  120. package/project/hooks/utils/llm/anth.py +114 -0
  121. package/project/hooks/utils/llm/oai.py +114 -0
  122. package/project/hooks/utils/tts/elevenlabs_tts.py +63 -0
  123. package/project/hooks/utils/tts/mlx_audio_tts.py +86 -0
  124. package/project/hooks/utils/tts/openai_tts.py +92 -0
  125. package/project/hooks/utils/tts/pyttsx3_tts.py +75 -0
  126. package/project/linear.yaml.template +23 -0
  127. package/project/product.md.template +238 -0
  128. package/project/retros/README.md +126 -0
  129. package/project/rules/README.md +90 -0
  130. package/project/rules/debugging.md +139 -0
  131. package/project/rules/security-review.md +115 -0
  132. package/project/settings.yaml.template +185 -0
  133. package/project/specs/SPEC-ANV-72-hud-kanban.md +525 -0
  134. package/project/templates/api-python/CLAUDE.md +547 -0
  135. package/project/templates/generic/CLAUDE.md +260 -0
  136. package/project/templates/saas/CLAUDE.md +478 -0
  137. package/project/tests/README.md +140 -0
  138. package/project/tests/__pycache__/test_transcript_parser.cpython-314-pytest-9.0.2.pyc +0 -0
  139. package/project/tests/fixtures/sample-transcript.jsonl +21 -0
  140. package/project/tests/test-hooks.sh +259 -0
  141. package/project/tests/test-lib.sh +248 -0
  142. package/project/tests/test-statusline.sh +165 -0
  143. package/project/tests/test_transcript_parser.py +323 -0
@@ -0,0 +1,215 @@
1
+ #!/usr/bin/env -S uv run --script
2
+ # /// script
3
+ # requires-python = ">=3.11"
4
+ # dependencies = [
5
+ # "python-dotenv",
6
+ # ]
7
+ # ///
8
+
9
+ import argparse
10
+ import json
11
+ import os
12
+ import sys
13
+ import subprocess
14
+ from pathlib import Path
15
+
16
+ try:
17
+ from dotenv import load_dotenv
18
+ load_dotenv()
19
+ except ImportError:
20
+ pass # dotenv is optional
21
+
22
+
23
+ def get_tts_script_path():
24
+ """
25
+ Determine which TTS script to use based on availability and API keys.
26
+ Priority order: MLX Audio (local) > ElevenLabs > OpenAI > pyttsx3
27
+ """
28
+ # Get current script directory and construct utils/tts path
29
+ script_dir = Path(__file__).parent
30
+ tts_dir = script_dir / "utils" / "tts"
31
+
32
+ # Check for MLX Audio (highest priority - fast, free, local)
33
+ mlx_script = tts_dir / "mlx_audio_tts.py"
34
+ if mlx_script.exists():
35
+ return str(mlx_script)
36
+
37
+ # Check for ElevenLabs API key
38
+ if os.getenv('ELEVENLABS_API_KEY'):
39
+ elevenlabs_script = tts_dir / "elevenlabs_tts.py"
40
+ if elevenlabs_script.exists():
41
+ return str(elevenlabs_script)
42
+
43
+ # Check for OpenAI API key
44
+ if os.getenv('OPENAI_API_KEY'):
45
+ openai_script = tts_dir / "openai_tts.py"
46
+ if openai_script.exists():
47
+ return str(openai_script)
48
+
49
+ # Fall back to pyttsx3 (no API key required)
50
+ pyttsx3_script = tts_dir / "pyttsx3_tts.py"
51
+ if pyttsx3_script.exists():
52
+ return str(pyttsx3_script)
53
+
54
+ return None
55
+
56
+
57
+ def is_legitimate_completion(input_data):
58
+ """
59
+ Check if this is a legitimate subagent completion vs startup cleanup.
60
+ Returns True if we should announce, False otherwise.
61
+
62
+ The problem: When Claude starts, it cleans up orphaned subagents from
63
+ previous sessions, firing SubagentStop for each. We don't want TTS for these.
64
+
65
+ Solution: Track session start time in a temp file. Only announce if the
66
+ subagent completed AFTER the current session started (with buffer).
67
+ """
68
+ import time
69
+ import tempfile
70
+
71
+ # Must have an agent_id to be a real subagent
72
+ agent_id = input_data.get('agent_id', '')
73
+ if not agent_id:
74
+ return False
75
+
76
+ session_id = input_data.get('session_id', '')
77
+ if not session_id:
78
+ return False
79
+
80
+ # Track session start time using a temp file
81
+ session_marker = Path(tempfile.gettempdir()) / f".claude_session_{session_id}"
82
+
83
+ if not session_marker.exists():
84
+ # First SubagentStop for this session - create marker with current time
85
+ # This is likely startup cleanup, so don't announce
86
+ session_marker.write_text(str(time.time()))
87
+ return False
88
+
89
+ try:
90
+ # Session marker exists - check how long since session started
91
+ session_start = float(session_marker.read_text().strip())
92
+ session_age = time.time() - session_start
93
+
94
+ # If session just started (within 10 seconds), this is likely cleanup
95
+ if session_age < 10:
96
+ return False
97
+
98
+ # Session has been running for a while - this is a real completion
99
+ # Verify the agent transcript exists and has content
100
+ transcript_path = input_data.get('agent_transcript_path', '')
101
+ if transcript_path and os.path.exists(transcript_path):
102
+ file_size = os.path.getsize(transcript_path)
103
+ if file_size > 500:
104
+ return True
105
+
106
+ except (ValueError, OSError):
107
+ pass
108
+
109
+ return False
110
+
111
+
112
+ def announce_subagent_completion():
113
+ """Announce subagent completion using the best available TTS service."""
114
+ try:
115
+ tts_script = get_tts_script_path()
116
+ if not tts_script:
117
+ return # No TTS scripts available
118
+
119
+ # Use fixed message for subagent completion
120
+ completion_message = "Subagent Complete"
121
+
122
+ # Call the TTS script with the completion message
123
+ subprocess.run([
124
+ "uv", "run", tts_script, completion_message
125
+ ],
126
+ capture_output=True, # Suppress output
127
+ timeout=10 # 10-second timeout
128
+ )
129
+
130
+ except (subprocess.TimeoutExpired, subprocess.SubprocessError, FileNotFoundError):
131
+ # Fail silently if TTS encounters issues
132
+ pass
133
+ except Exception:
134
+ # Fail silently for any other errors
135
+ pass
136
+
137
+
138
+ def main():
139
+ try:
140
+ # Parse command line arguments
141
+ parser = argparse.ArgumentParser()
142
+ parser.add_argument('--chat', action='store_true', help='Copy transcript to chat.json')
143
+ parser.add_argument('--announce', action='store_true', help='Announce completion via TTS')
144
+ args = parser.parse_args()
145
+
146
+ # Read JSON input from stdin
147
+ input_data = json.load(sys.stdin)
148
+
149
+ # Extract required fields
150
+ input_data.get("session_id", "")
151
+ input_data.get("stop_hook_active", False)
152
+
153
+ # Ensure log directory exists
154
+ log_dir = os.path.join(os.getcwd(), "logs")
155
+ os.makedirs(log_dir, exist_ok=True)
156
+ log_path = os.path.join(log_dir, "subagent_stop.json")
157
+
158
+ # Read existing log data or initialize empty list
159
+ if os.path.exists(log_path):
160
+ with open(log_path, 'r') as f:
161
+ try:
162
+ log_data = json.load(f)
163
+ except (json.JSONDecodeError, ValueError):
164
+ log_data = []
165
+ else:
166
+ log_data = []
167
+
168
+ # Append new data
169
+ log_data.append(input_data)
170
+
171
+ # Write back to file with formatting
172
+ with open(log_path, 'w') as f:
173
+ json.dump(log_data, f, indent=2)
174
+
175
+ # Handle --chat switch (same as stop.py)
176
+ if args.chat and 'transcript_path' in input_data:
177
+ transcript_path = input_data['transcript_path']
178
+ if os.path.exists(transcript_path):
179
+ # Read .jsonl file and convert to JSON array
180
+ chat_data = []
181
+ try:
182
+ with open(transcript_path, 'r') as f:
183
+ for line in f:
184
+ line = line.strip()
185
+ if line:
186
+ try:
187
+ chat_data.append(json.loads(line))
188
+ except json.JSONDecodeError:
189
+ pass # Skip invalid lines
190
+
191
+ # Write to logs/chat.json
192
+ chat_file = os.path.join(log_dir, 'chat.json')
193
+ with open(chat_file, 'w') as f:
194
+ json.dump(chat_data, f, indent=2)
195
+ except Exception:
196
+ pass # Fail silently
197
+
198
+ # Announce subagent completion via TTS
199
+ # Only announce if --announce flag is explicitly set
200
+ # Note: post_tool_use.py already handles Task tool completion announcements
201
+ if args.announce:
202
+ announce_subagent_completion()
203
+
204
+ sys.exit(0)
205
+
206
+ except json.JSONDecodeError:
207
+ # Handle JSON decode errors gracefully
208
+ sys.exit(0)
209
+ except Exception:
210
+ # Handle any other errors gracefully
211
+ sys.exit(0)
212
+
213
+
214
+ if __name__ == "__main__":
215
+ main()
@@ -0,0 +1,110 @@
1
+ #!/usr/bin/env -S uv run --script
2
+ # /// script
3
+ # requires-python = ">=3.11"
4
+ # dependencies = [
5
+ # "python-dotenv",
6
+ # ]
7
+ # ///
8
+
9
+ import argparse
10
+ import json
11
+ import sys
12
+ from pathlib import Path
13
+
14
+ try:
15
+ from dotenv import load_dotenv
16
+ load_dotenv()
17
+ except ImportError:
18
+ pass # dotenv is optional
19
+
20
+
21
+ def log_user_prompt(session_id, input_data):
22
+ """Log user prompt to logs directory."""
23
+ # Ensure logs directory exists
24
+ log_dir = Path("logs")
25
+ log_dir.mkdir(parents=True, exist_ok=True)
26
+ log_file = log_dir / 'user_prompt_submit.json'
27
+
28
+ # Read existing log data or initialize empty list
29
+ if log_file.exists():
30
+ with open(log_file, 'r') as f:
31
+ try:
32
+ log_data = json.load(f)
33
+ except (json.JSONDecodeError, ValueError):
34
+ log_data = []
35
+ else:
36
+ log_data = []
37
+
38
+ # Append the entire input data
39
+ log_data.append(input_data)
40
+
41
+ # Write back to file with formatting
42
+ with open(log_file, 'w') as f:
43
+ json.dump(log_data, f, indent=2)
44
+
45
+
46
+ def validate_prompt(prompt):
47
+ """
48
+ Validate the user prompt for security or policy violations.
49
+ Returns tuple (is_valid, reason).
50
+ """
51
+ # Example validation rules (customize as needed)
52
+ blocked_patterns = [
53
+ # Add any patterns you want to block
54
+ # Example: ('rm -rf /', 'Dangerous command detected'),
55
+ ]
56
+
57
+ prompt_lower = prompt.lower()
58
+
59
+ for pattern, reason in blocked_patterns:
60
+ if pattern.lower() in prompt_lower:
61
+ return False, reason
62
+
63
+ return True, None
64
+
65
+
66
+ def main():
67
+ try:
68
+ # Parse command line arguments
69
+ parser = argparse.ArgumentParser()
70
+ parser.add_argument('--validate', action='store_true',
71
+ help='Enable prompt validation')
72
+ parser.add_argument('--log-only', action='store_true',
73
+ help='Only log prompts, no validation or blocking')
74
+ args = parser.parse_args()
75
+
76
+ # Read JSON input from stdin
77
+ input_data = json.loads(sys.stdin.read())
78
+
79
+ # Extract session_id and prompt
80
+ session_id = input_data.get('session_id', 'unknown')
81
+ prompt = input_data.get('prompt', '')
82
+
83
+ # Log the user prompt
84
+ log_user_prompt(session_id, input_data)
85
+
86
+ # Validate prompt if requested and not in log-only mode
87
+ if args.validate and not args.log_only:
88
+ is_valid, reason = validate_prompt(prompt)
89
+ if not is_valid:
90
+ # Exit code 2 blocks the prompt with error message
91
+ print(f"Prompt blocked: {reason}", file=sys.stderr)
92
+ sys.exit(2)
93
+
94
+ # Add context information (optional)
95
+ # You can print additional context that will be added to the prompt
96
+ # Example: print(f"Current time: {datetime.now()}")
97
+
98
+ # Success - prompt will be processed
99
+ sys.exit(0)
100
+
101
+ except json.JSONDecodeError:
102
+ # Handle JSON decode errors gracefully
103
+ sys.exit(0)
104
+ except Exception:
105
+ # Handle any other errors gracefully
106
+ sys.exit(0)
107
+
108
+
109
+ if __name__ == '__main__':
110
+ main()
@@ -0,0 +1,114 @@
1
+ #!/usr/bin/env -S uv run --script
2
+ # /// script
3
+ # requires-python = ">=3.8"
4
+ # dependencies = [
5
+ # "anthropic",
6
+ # "python-dotenv",
7
+ # ]
8
+ # ///
9
+
10
+ import os
11
+ import sys
12
+ from dotenv import load_dotenv
13
+
14
+
15
+ def prompt_llm(prompt_text):
16
+ """
17
+ Base Anthropic LLM prompting method using fastest model.
18
+
19
+ Args:
20
+ prompt_text (str): The prompt to send to the model
21
+
22
+ Returns:
23
+ str: The model's response text, or None if error
24
+ """
25
+ load_dotenv()
26
+
27
+ api_key = os.getenv("ANTHROPIC_API_KEY")
28
+ if not api_key:
29
+ return None
30
+
31
+ try:
32
+ import anthropic
33
+
34
+ client = anthropic.Anthropic(api_key=api_key)
35
+
36
+ message = client.messages.create(
37
+ model="claude-3-5-haiku-20241022", # Fastest Anthropic model
38
+ max_tokens=100,
39
+ temperature=0.7,
40
+ messages=[{"role": "user", "content": prompt_text}],
41
+ )
42
+
43
+ return message.content[0].text.strip()
44
+
45
+ except Exception:
46
+ return None
47
+
48
+
49
+ def generate_completion_message():
50
+ """
51
+ Generate a completion message using Anthropic LLM.
52
+
53
+ Returns:
54
+ str: A natural language completion message, or None if error
55
+ """
56
+ engineer_name = os.getenv("ENGINEER_NAME", "").strip()
57
+
58
+ if engineer_name:
59
+ name_instruction = f"Sometimes (about 30% of the time) include the engineer's name '{engineer_name}' in a natural way."
60
+ examples = f"""Examples of the style:
61
+ - Standard: "Work complete!", "All done!", "Task finished!", "Ready for your next move!"
62
+ - Personalized: "{engineer_name}, all set!", "Ready for you, {engineer_name}!", "Complete, {engineer_name}!", "{engineer_name}, we're done!" """
63
+ else:
64
+ name_instruction = ""
65
+ examples = """Examples of the style: "Work complete!", "All done!", "Task finished!", "Ready for your next move!" """
66
+
67
+ prompt = f"""Generate a short, friendly completion message for when an AI coding assistant finishes a task.
68
+
69
+ Requirements:
70
+ - Keep it under 10 words
71
+ - Make it positive and future focused
72
+ - Use natural, conversational language
73
+ - Focus on completion/readiness
74
+ - Do NOT include quotes, formatting, or explanations
75
+ - Return ONLY the completion message text
76
+ {name_instruction}
77
+
78
+ {examples}
79
+
80
+ Generate ONE completion message:"""
81
+
82
+ response = prompt_llm(prompt)
83
+
84
+ # Clean up response - remove quotes and extra formatting
85
+ if response:
86
+ response = response.strip().strip('"').strip("'").strip()
87
+ # Take first line if multiple lines
88
+ response = response.split("\n")[0].strip()
89
+
90
+ return response
91
+
92
+
93
+ def main():
94
+ """Command line interface for testing."""
95
+ if len(sys.argv) > 1:
96
+ if sys.argv[1] == "--completion":
97
+ message = generate_completion_message()
98
+ if message:
99
+ print(message)
100
+ else:
101
+ print("Error generating completion message")
102
+ else:
103
+ prompt_text = " ".join(sys.argv[1:])
104
+ response = prompt_llm(prompt_text)
105
+ if response:
106
+ print(response)
107
+ else:
108
+ print("Error calling Anthropic API")
109
+ else:
110
+ print("Usage: ./anth.py 'your prompt here' or ./anth.py --completion")
111
+
112
+
113
+ if __name__ == "__main__":
114
+ main()
@@ -0,0 +1,114 @@
1
+ #!/usr/bin/env -S uv run --script
2
+ # /// script
3
+ # requires-python = ">=3.8"
4
+ # dependencies = [
5
+ # "openai",
6
+ # "python-dotenv",
7
+ # ]
8
+ # ///
9
+
10
+ import os
11
+ import sys
12
+ from dotenv import load_dotenv
13
+
14
+
15
+ def prompt_llm(prompt_text):
16
+ """
17
+ Base OpenAI LLM prompting method using fastest model.
18
+
19
+ Args:
20
+ prompt_text (str): The prompt to send to the model
21
+
22
+ Returns:
23
+ str: The model's response text, or None if error
24
+ """
25
+ load_dotenv()
26
+
27
+ api_key = os.getenv("OPENAI_API_KEY")
28
+ if not api_key:
29
+ return None
30
+
31
+ try:
32
+ from openai import OpenAI
33
+
34
+ client = OpenAI(api_key=api_key)
35
+
36
+ response = client.chat.completions.create(
37
+ model="gpt-4.1-nano", # Fastest OpenAI model
38
+ messages=[{"role": "user", "content": prompt_text}],
39
+ max_tokens=100,
40
+ temperature=0.7,
41
+ )
42
+
43
+ return response.choices[0].message.content.strip()
44
+
45
+ except Exception:
46
+ return None
47
+
48
+
49
+ def generate_completion_message():
50
+ """
51
+ Generate a completion message using OpenAI LLM.
52
+
53
+ Returns:
54
+ str: A natural language completion message, or None if error
55
+ """
56
+ engineer_name = os.getenv("ENGINEER_NAME", "").strip()
57
+
58
+ if engineer_name:
59
+ name_instruction = f"Sometimes (about 30% of the time) include the engineer's name '{engineer_name}' in a natural way."
60
+ examples = f"""Examples of the style:
61
+ - Standard: "Work complete!", "All done!", "Task finished!", "Ready for your next move!"
62
+ - Personalized: "{engineer_name}, all set!", "Ready for you, {engineer_name}!", "Complete, {engineer_name}!", "{engineer_name}, we're done!" """
63
+ else:
64
+ name_instruction = ""
65
+ examples = """Examples of the style: "Work complete!", "All done!", "Task finished!", "Ready for your next move!" """
66
+
67
+ prompt = f"""Generate a short, friendly completion message for when an AI coding assistant finishes a task.
68
+
69
+ Requirements:
70
+ - Keep it under 10 words
71
+ - Make it positive and future focused
72
+ - Use natural, conversational language
73
+ - Focus on completion/readiness
74
+ - Do NOT include quotes, formatting, or explanations
75
+ - Return ONLY the completion message text
76
+ {name_instruction}
77
+
78
+ {examples}
79
+
80
+ Generate ONE completion message:"""
81
+
82
+ response = prompt_llm(prompt)
83
+
84
+ # Clean up response - remove quotes and extra formatting
85
+ if response:
86
+ response = response.strip().strip('"').strip("'").strip()
87
+ # Take first line if multiple lines
88
+ response = response.split("\n")[0].strip()
89
+
90
+ return response
91
+
92
+
93
+ def main():
94
+ """Command line interface for testing."""
95
+ if len(sys.argv) > 1:
96
+ if sys.argv[1] == "--completion":
97
+ message = generate_completion_message()
98
+ if message:
99
+ print(message)
100
+ else:
101
+ print("Error generating completion message")
102
+ else:
103
+ prompt_text = " ".join(sys.argv[1:])
104
+ response = prompt_llm(prompt_text)
105
+ if response:
106
+ print(response)
107
+ else:
108
+ print("Error calling OpenAI API")
109
+ else:
110
+ print("Usage: ./oai.py 'your prompt here' or ./oai.py --completion")
111
+
112
+
113
+ if __name__ == "__main__":
114
+ main()
@@ -0,0 +1,63 @@
1
+ #!/usr/bin/env -S uv run --script --python 3.12
2
+ # /// script
3
+ # requires-python = ">=3.8,<3.14"
4
+ # dependencies = [
5
+ # "elevenlabs",
6
+ # "python-dotenv",
7
+ # ]
8
+ # ///
9
+
10
+ """
11
+ ElevenLabs TTS Script - Fast, high-quality text-to-speech.
12
+
13
+ Usage:
14
+ uv run elevenlabs_tts.py "Your text here"
15
+ """
16
+
17
+ import os
18
+ import sys
19
+ import tempfile
20
+ import subprocess
21
+ from dotenv import load_dotenv
22
+
23
+
24
+ def main():
25
+ # Load from current dir and home dir
26
+ load_dotenv()
27
+ load_dotenv(os.path.expanduser("~/.env"))
28
+
29
+ api_key = os.getenv('ELEVENLABS_API_KEY')
30
+ if not api_key:
31
+ sys.exit(1)
32
+
33
+ try:
34
+ from elevenlabs.client import ElevenLabs
35
+
36
+ client = ElevenLabs(api_key=api_key)
37
+
38
+ # Get text from command line or use default
39
+ text = " ".join(sys.argv[1:]) if len(sys.argv) > 1 else "Task complete"
40
+
41
+ # Generate audio
42
+ audio = client.text_to_speech.convert(
43
+ text=text,
44
+ voice_id="CwhRBWXzGAHq8TQ4Fs17", # Roger - Laid-Back, Casual
45
+ model_id="eleven_turbo_v2_5",
46
+ output_format="mp3_44100_128",
47
+ )
48
+
49
+ # Save to temp file and play with afplay (macOS)
50
+ with tempfile.NamedTemporaryFile(suffix='.mp3', delete=False) as f:
51
+ for chunk in audio:
52
+ f.write(chunk)
53
+ temp_path = f.name
54
+
55
+ subprocess.run(['afplay', temp_path], check=True, capture_output=True)
56
+ os.unlink(temp_path)
57
+
58
+ except Exception:
59
+ sys.exit(1)
60
+
61
+
62
+ if __name__ == "__main__":
63
+ main()
@@ -0,0 +1,86 @@
1
+ #!/usr/bin/env -S uv run --script --python 3.12
2
+ # /// script
3
+ # requires-python = ">=3.11,<3.14"
4
+ # dependencies = [
5
+ # "mlx-audio",
6
+ # "soundfile",
7
+ # "scipy",
8
+ # "numpy",
9
+ # "sounddevice",
10
+ # "loguru",
11
+ # "misaki[en]",
12
+ # "spacy",
13
+ # "en-core-web-sm @ https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.8.0/en_core_web_sm-3.8.0-py3-none-any.whl",
14
+ # ]
15
+ # ///
16
+
17
+ """
18
+ MLX Audio TTS - Fast local text-to-speech for Apple Silicon.
19
+
20
+ Uses Kokoro-82M model via MLX framework for native Metal acceleration.
21
+ First run downloads ~330MB model to ~/.cache/huggingface/
22
+
23
+ Usage:
24
+ uv run mlx_audio_tts.py "Task complete"
25
+ uv run mlx_audio_tts.py --voice af_heart "Hello world"
26
+ uv run mlx_audio_tts.py --speed 1.2 "Subagent complete"
27
+ """
28
+
29
+ import sys
30
+ import os
31
+ import subprocess
32
+ import tempfile
33
+ import argparse
34
+ import glob
35
+
36
+
37
+ def speak(text: str, voice: str = "af_heart", speed: float = 1.1):
38
+ """Generate and play speech using MLX Audio with Kokoro."""
39
+ try:
40
+ from mlx_audio.tts.generate import generate_audio
41
+
42
+ # Create unique prefix for audio output
43
+ with tempfile.NamedTemporaryFile(suffix='.wav', delete=True) as f:
44
+ output_prefix = f.name.replace('.wav', '')
45
+
46
+ # Generate audio (library adds _000.wav, _001.wav, etc.)
47
+ generate_audio(
48
+ text=text,
49
+ model_path="prince-canuma/Kokoro-82M",
50
+ voice=voice,
51
+ speed=speed,
52
+ audio_format="wav",
53
+ file_prefix=output_prefix
54
+ )
55
+
56
+ # Find generated audio file (library adds segment suffix like _000)
57
+ audio_files = glob.glob(f"{output_prefix}*.wav")
58
+
59
+ if audio_files:
60
+ audio_file = audio_files[0] # Use first segment
61
+ if os.path.getsize(audio_file) > 0:
62
+ subprocess.run(['afplay', audio_file], capture_output=True, timeout=10)
63
+ # Clean up all generated segments
64
+ for f in audio_files:
65
+ os.unlink(f)
66
+ else:
67
+ sys.exit(1)
68
+
69
+ except ImportError:
70
+ sys.exit(1)
71
+ except Exception:
72
+ sys.exit(1)
73
+
74
+
75
+ def main():
76
+ parser = argparse.ArgumentParser(description="MLX Audio TTS for Apple Silicon")
77
+ parser.add_argument("text", nargs="?", default="Task complete", help="Text to speak")
78
+ parser.add_argument("--voice", default="af_heart", help="Voice to use (default: af_heart)")
79
+ parser.add_argument("--speed", type=float, default=1.1, help="Speech speed (default: 1.1)")
80
+
81
+ args = parser.parse_args()
82
+ speak(args.text, args.voice, args.speed)
83
+
84
+
85
+ if __name__ == "__main__":
86
+ main()