hypergolic 0.1.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Robert Townley
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,10 @@
1
+ Metadata-Version: 2.4
2
+ Name: hypergolic
3
+ Version: 0.1.2
4
+ Summary: Add your description here
5
+ Requires-Python: >=3.13
6
+ Description-Content-Type: text/markdown
7
+ License-File: LICENSE
8
+ Requires-Dist: pydantic>=2.12.5
9
+ Requires-Dist: requests>=2.32.5
10
+ Dynamic: license-file
File without changes
File without changes
File without changes
@@ -0,0 +1,55 @@
1
+ import os
2
+ from pathlib import Path
3
+ import requests
4
+
5
+ from .schemas import APIResponse, Message
6
+ from ..tools.tool_list import ALL_TOOLS_FORMATTED
7
+
8
+ BASE_API_URL = os.environ.get("HYPERGOLIC_API_URL", "https://api.anthropic.com")
9
+ HYPERGOLIC_API_KEY = os.environ.get("HYPERGOLIC_API_KEY")
10
+ MAX_TOKENS = 8192
11
+
12
+
13
+ def load_system_prompt() -> str:
14
+ """Load the system prompt from the prompts directory."""
15
+ prompt_path = Path(__file__).parent.parent / "prompts" / "system_prompt.txt"
16
+ try:
17
+ return prompt_path.read_text().strip()
18
+ except FileNotFoundError:
19
+ # Fallback to a minimal system prompt if file not found
20
+ return "You are a helpful AI coding assistant with access to command-line tools."
21
+
22
+
23
+ def call_messages_api(messages: list[Message]) -> APIResponse:
24
+ if not HYPERGOLIC_API_KEY:
25
+ raise Exception("HYPERGOLIC_API_KEY not set")
26
+ elif not BASE_API_URL:
27
+ raise Exception("HYPERGOLIC_API_URL not set")
28
+
29
+ url = f"{BASE_API_URL}/v1/messages"
30
+
31
+ headers = {
32
+ "Content-Type": "application/json",
33
+ "x-api-key": HYPERGOLIC_API_KEY,
34
+ "anthropic-version": "2023-06-01",
35
+ }
36
+
37
+ # Prepare tools with cache control on the last tool
38
+ tools = ALL_TOOLS_FORMATTED.copy()
39
+ if tools:
40
+ # Mark the last tool for caching
41
+ tools[-1] = {**tools[-1], "cache_control": {"type": "ephemeral"}}
42
+
43
+ # Load system prompt
44
+ system_prompt = load_system_prompt()
45
+
46
+ data = {
47
+ "model": "claude-sonnet-4-5",
48
+ "max_tokens": MAX_TOKENS,
49
+ "messages": [m.model_dump(exclude_none=True) for m in messages],
50
+ "tools": tools,
51
+ "system": system_prompt,
52
+ }
53
+
54
+ response = requests.post(url, headers=headers, json=data)
55
+ return APIResponse(**response.json())
@@ -0,0 +1,6 @@
1
+ from enum import Enum
2
+
3
+
4
+ class StopReason(str, Enum):
5
+ END_TURN = "end_turn"
6
+ TOOL_USE = "tool_use"
@@ -0,0 +1,69 @@
1
+ from typing import Literal, Optional, Union
2
+
3
+ from pydantic import BaseModel, Field
4
+
5
+ from .enums import StopReason
6
+ from ..tools.enums import ToolName
7
+
8
+
9
+ class CacheControl(BaseModel):
10
+ type: Literal["ephemeral"] = "ephemeral"
11
+
12
+
13
+ class UserToolResult(BaseModel):
14
+ type: Literal["tool_result"] = Field(default="tool_result")
15
+ tool_use_id: str
16
+ content: Union[str, list[str]]
17
+ cache_control: Optional[CacheControl] = None
18
+
19
+
20
+ UserMessageContent = Union[str, list[UserToolResult]]
21
+
22
+
23
+ class UserMessage(BaseModel):
24
+ role: Literal["user"] = Field(default="user")
25
+ content: UserMessageContent
26
+
27
+
28
+ class AssistantMessageTextContent(BaseModel):
29
+ type: Literal["text"]
30
+ text: str
31
+ cache_control: Optional[CacheControl] = None
32
+
33
+
34
+ class AssistantMessageToolUseContent(BaseModel):
35
+ type: Literal["tool_use"]
36
+ id: str
37
+ name: ToolName
38
+ input: dict
39
+
40
+
41
+ AssistantMessageContent = Union[
42
+ AssistantMessageTextContent, AssistantMessageToolUseContent
43
+ ]
44
+
45
+
46
+ class AssistantMessage(BaseModel):
47
+ role: Literal["assistant"] = Field(default="assistant")
48
+ content: list[AssistantMessageContent]
49
+
50
+
51
+ Message = Union[UserMessage, AssistantMessage]
52
+
53
+
54
+ class Usage(BaseModel):
55
+ input_tokens: int
56
+ output_tokens: int
57
+ cache_creation_input_tokens: Optional[int] = None
58
+ cache_read_input_tokens: Optional[int] = None
59
+ service_tier: Optional[str] = None
60
+
61
+
62
+ class APIResponse(BaseModel):
63
+ id: str
64
+ content: list[AssistantMessageContent]
65
+ model: str
66
+ role: str
67
+ stop_reason: StopReason
68
+ type: str = Field(examples=["message"])
69
+ usage: Usage
@@ -0,0 +1,28 @@
1
+ # System Prompts
2
+
3
+ This directory contains system prompts used by the AI coding assistant.
4
+
5
+ ## Files
6
+
7
+ - **system_prompt.txt**: The main system prompt that defines the AI assistant's behavior, capabilities, and guidelines.
8
+
9
+ ## Usage
10
+
11
+ The system prompt is automatically loaded by `llm/api.py` when making API calls to the language model. It sets the context and behavior for how the assistant should respond to user requests.
12
+
13
+ ## Modifying the System Prompt
14
+
15
+ To modify the assistant's behavior:
16
+
17
+ 1. Edit `system_prompt.txt` with your desired changes
18
+ 2. The changes will take effect immediately on the next API call
19
+ 3. No code changes are needed - the prompt is loaded dynamically
20
+
21
+ ## Structure
22
+
23
+ The system prompt includes:
24
+ - **Core Capabilities**: Description of available tools
25
+ - **Guidelines**: Behavioral principles for the assistant
26
+ - **Best Practices**: Specific recommendations for effective assistance
27
+
28
+ Keep the prompt clear, focused, and actionable to ensure the best assistant behavior.
@@ -0,0 +1,45 @@
1
+ You are an AI coding assistant with access to command-line tools. Your purpose is to help users with programming tasks, code analysis, file operations, and system interactions.
2
+
3
+ ## Core Capabilities
4
+
5
+ You have access to a command_line tool that allows you to:
6
+ - Execute shell commands on the user's macOS system
7
+ - Read and write files
8
+ - Navigate directories
9
+ - Run programs and scripts
10
+ - Inspect system state
11
+
12
+ ## Guidelines
13
+
14
+ 1. **Be proactive and helpful**: When a user asks for help, take initiative to gather information and solve problems completely.
15
+
16
+ 2. **Use tools effectively**: Don't hesitate to use the command_line tool to:
17
+ - Explore project structure and understand context
18
+ - Read relevant files before making suggestions
19
+ - Make changes directly when appropriate
20
+ - Test your changes to ensure they work
21
+
22
+ 3. **Explain your actions**: When using tools, briefly explain what you're doing and why. Show command outputs when relevant to help the user understand.
23
+
24
+ 4. **Be safe and thoughtful**:
25
+ - Ask for confirmation before destructive operations
26
+ - Be careful with file modifications
27
+ - Validate inputs and check results
28
+ - Handle errors gracefully
29
+
30
+ 5. **Provide complete solutions**:
31
+ - Don't just suggest changes—implement them when possible
32
+ - Test your implementations
33
+ - Consider edge cases and potential issues
34
+
35
+ 6. **Maintain context**: Remember the conversation history and build on previous interactions to provide coherent, contextual assistance.
36
+
37
+ ## Best Practices
38
+
39
+ - Read before writing: Examine existing code/files before making changes
40
+ - Test your changes: Run commands to verify functionality
41
+ - Be efficient: Combine operations when sensible, but keep them clear
42
+ - Stay focused: Address the user's request directly while being thorough
43
+ - Communicate clearly: Use formatting to make responses readable
44
+
45
+ Your goal is to be a capable, trustworthy assistant that can effectively help with real-world coding and system tasks.
File without changes
@@ -0,0 +1,44 @@
1
+ import shlex
2
+ import subprocess
3
+
4
+ from pydantic import BaseModel, Field
5
+
6
+ from .enums import ToolName
7
+ from .schemas import Tool, ToolOutput
8
+
9
+
10
+ class CommandLineToolInput(BaseModel):
11
+ cmd: str = Field(description="The command to run")
12
+
13
+
14
+ class CommandLineToolOutput(BaseModel):
15
+ returncode: int
16
+ stderr: str
17
+ stdout: str
18
+
19
+
20
+ def issue_cmd(input: CommandLineToolInput) -> ToolOutput:
21
+ output = subprocess.run(input.cmd, shell=True, capture_output=True, text=True)
22
+ result = CommandLineToolOutput(
23
+ returncode=output.returncode,
24
+ stderr=output.stderr,
25
+ stdout=output.stdout,
26
+ )
27
+ return result.model_dump_json()
28
+
29
+
30
+ CommandLineTool = Tool(
31
+ name=ToolName.COMMAND_LINE,
32
+ description="Issue a subprocess to a user's MacOS local machine",
33
+ input_schema={
34
+ "type": "object",
35
+ "properties": {
36
+ "cmd": {
37
+ "type": "string",
38
+ "description": "The command to run",
39
+ }
40
+ },
41
+ },
42
+ callable=issue_cmd,
43
+ input_model=CommandLineToolInput,
44
+ )
@@ -0,0 +1,5 @@
1
+ from enum import Enum
2
+
3
+
4
+ class ToolName(str, Enum):
5
+ COMMAND_LINE = "command_line"
@@ -0,0 +1,13 @@
1
+ from typing import Callable, Union
2
+ from pydantic import BaseModel, Field
3
+
4
+
5
+ class Tool(BaseModel):
6
+ name: str = Field(examples=["get_weather"])
7
+ description: str = Field(examples=["Get the current weather in a given location"])
8
+ input_schema: dict
9
+ callable: Callable = Field(exclude=True)
10
+ input_model: type[BaseModel] = Field(exclude=True)
11
+
12
+
13
+ ToolOutput = Union[str, list[str]]
@@ -0,0 +1,7 @@
1
+ from .schemas import Tool
2
+
3
+ from .command_line import CommandLineTool
4
+
5
+ ALL_TOOLS: list[Tool] = [CommandLineTool]
6
+ ALL_TOOLS_FORMATTED = [t.model_dump() for t in ALL_TOOLS]
7
+ TOOL_MAP = {t.name: t for t in ALL_TOOLS}
@@ -0,0 +1,10 @@
1
+ Metadata-Version: 2.4
2
+ Name: hypergolic
3
+ Version: 0.1.2
4
+ Summary: Add your description here
5
+ Requires-Python: >=3.13
6
+ Description-Content-Type: text/markdown
7
+ License-File: LICENSE
8
+ Requires-Dist: pydantic>=2.12.5
9
+ Requires-Dist: requests>=2.32.5
10
+ Dynamic: license-file
@@ -0,0 +1,20 @@
1
+ LICENSE
2
+ README.md
3
+ pyproject.toml
4
+ hypergolic/__init__.py
5
+ hypergolic.egg-info/PKG-INFO
6
+ hypergolic.egg-info/SOURCES.txt
7
+ hypergolic.egg-info/dependency_links.txt
8
+ hypergolic.egg-info/requires.txt
9
+ hypergolic.egg-info/top_level.txt
10
+ hypergolic/llm/__init__.py
11
+ hypergolic/llm/api.py
12
+ hypergolic/llm/enums.py
13
+ hypergolic/llm/schemas.py
14
+ hypergolic/prompts/README.md
15
+ hypergolic/prompts/system_prompt.txt
16
+ hypergolic/tools/__init__.py
17
+ hypergolic/tools/command_line.py
18
+ hypergolic/tools/enums.py
19
+ hypergolic/tools/schemas.py
20
+ hypergolic/tools/tool_list.py
@@ -0,0 +1,2 @@
1
+ pydantic>=2.12.5
2
+ requests>=2.32.5
@@ -0,0 +1 @@
1
+ hypergolic
@@ -0,0 +1,17 @@
1
+ [project]
2
+ name = "hypergolic"
3
+ version = "0.1.2"
4
+ description = "Add your description here"
5
+ readme = "README.md"
6
+ requires-python = ">=3.13"
7
+ dependencies = [
8
+ "pydantic>=2.12.5",
9
+ "requests>=2.32.5",
10
+ ]
11
+
12
+ [build-system]
13
+ requires = ["setuptools>=61.0"]
14
+ build-backend = "setuptools.build_meta"
15
+
16
+ [tool.setuptools.package-data]
17
+ hypergolic = ["prompts/*"]
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+