starnose 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. starnose-0.1.0/.github/workflows/publish.yml +65 -0
  2. starnose-0.1.0/.github/workflows/test.yml +31 -0
  3. starnose-0.1.0/.gitignore +8 -0
  4. starnose-0.1.0/Formula/starnose.rb +19 -0
  5. starnose-0.1.0/LICENSE +21 -0
  6. starnose-0.1.0/PKG-INFO +194 -0
  7. starnose-0.1.0/README.md +156 -0
  8. starnose-0.1.0/examples/test_agent.py +127 -0
  9. starnose-0.1.0/examples/test_mitm.py +62 -0
  10. starnose-0.1.0/pyproject.toml +56 -0
  11. starnose-0.1.0/scripts/install.sh +68 -0
  12. starnose-0.1.0/scripts/update-homebrew.sh +30 -0
  13. starnose-0.1.0/starnose/__init__.py +7 -0
  14. starnose-0.1.0/starnose/certs.py +150 -0
  15. starnose-0.1.0/starnose/cli.py +999 -0
  16. starnose-0.1.0/starnose/db.py +553 -0
  17. starnose-0.1.0/starnose/hypotheses.py +208 -0
  18. starnose-0.1.0/starnose/integrations/__init__.py +7 -0
  19. starnose-0.1.0/starnose/integrations/anthropic_patch.py +120 -0
  20. starnose-0.1.0/starnose/integrations/langchain.py +169 -0
  21. starnose-0.1.0/starnose/integrations/openai_patch.py +90 -0
  22. starnose-0.1.0/starnose/mitm.py +463 -0
  23. starnose-0.1.0/starnose/proxy.py +479 -0
  24. starnose-0.1.0/starnose/sdk.py +90 -0
  25. starnose-0.1.0/starnose/tokens.py +158 -0
  26. starnose-0.1.0/starnose/tui/__init__.py +0 -0
  27. starnose-0.1.0/starnose/tui/diff.py +141 -0
  28. starnose-0.1.0/starnose/tui/inspect.py +236 -0
  29. starnose-0.1.0/starnose/tui/stats.py +200 -0
  30. starnose-0.1.0/starnose/tui/watch.py +173 -0
  31. starnose-0.1.0/tests/__init__.py +0 -0
  32. starnose-0.1.0/tests/test_db.py +175 -0
  33. starnose-0.1.0/tests/test_hypotheses.py +176 -0
  34. starnose-0.1.0/tests/test_proxy.py +152 -0
  35. starnose-0.1.0/tests/test_tokens.py +107 -0
@@ -0,0 +1,65 @@
1
+ name: Publish to PyPI
2
+
3
+ on:
4
+ release:
5
+ types: [published]
6
+ workflow_dispatch:
7
+
8
+ permissions:
9
+ id-token: write
10
+
11
+ jobs:
12
+ build:
13
+ runs-on: ubuntu-latest
14
+ steps:
15
+ - uses: actions/checkout@v4
16
+
17
+ - uses: actions/setup-python@v5
18
+ with:
19
+ python-version: "3.12"
20
+
21
+ - name: Install build tools
22
+ run: pip install build
23
+
24
+ - name: Build package
25
+ run: python -m build
26
+
27
+ - name: Upload artifacts
28
+ uses: actions/upload-artifact@v4
29
+ with:
30
+ name: dist
31
+ path: dist/
32
+
33
+ publish-pypi:
34
+ needs: build
35
+ runs-on: ubuntu-latest
36
+ environment: pypi
37
+ permissions:
38
+ id-token: write
39
+ steps:
40
+ - name: Download artifacts
41
+ uses: actions/download-artifact@v4
42
+ with:
43
+ name: dist
44
+ path: dist/
45
+
46
+ - name: Publish to PyPI
47
+ uses: pypa/gh-action-pypi-publish@release/v1
48
+
49
+ update-homebrew:
50
+ needs: publish-pypi
51
+ runs-on: ubuntu-latest
52
+ steps:
53
+ - uses: actions/checkout@v4
54
+
55
+ - name: Get version
56
+ id: version
57
+ run: echo "version=${GITHUB_REF#refs/tags/v}" >> $GITHUB_OUTPUT
58
+
59
+ - name: Trigger Homebrew tap update
60
+ run: |
61
+ echo "Update your Homebrew formula with:"
62
+ echo " version: ${{ steps.version.outputs.version }}"
63
+ echo " url: https://pypi.org/packages/source/s/starnose/starnose-${{ steps.version.outputs.version }}.tar.gz"
64
+ echo ""
65
+ echo "Run: scripts/update-homebrew.sh ${{ steps.version.outputs.version }}"
@@ -0,0 +1,31 @@
1
+ name: Tests
2
+
3
+ on:
4
+ push:
5
+ branches: [main]
6
+ pull_request:
7
+ branches: [main]
8
+
9
+ jobs:
10
+ test:
11
+ runs-on: ${{ matrix.os }}
12
+ strategy:
13
+ matrix:
14
+ os: [ubuntu-latest, macos-latest]
15
+ python-version: ["3.10", "3.11", "3.12", "3.13"]
16
+
17
+ steps:
18
+ - uses: actions/checkout@v4
19
+
20
+ - uses: actions/setup-python@v5
21
+ with:
22
+ python-version: ${{ matrix.python-version }}
23
+
24
+ - name: Install dependencies
25
+ run: pip install -e ".[dev]" 2>/dev/null || pip install -e .
26
+
27
+ - name: Install test dependencies
28
+ run: pip install pytest
29
+
30
+ - name: Run tests
31
+ run: python -m pytest tests/ -v
@@ -0,0 +1,8 @@
1
+ __pycache__/
2
+ *.pyc
3
+ *.egg-info/
4
+ dist/
5
+ build/
6
+ .pytest_cache/
7
+ *.db
8
+ .starnose/
@@ -0,0 +1,19 @@
1
+ class Starnose < Formula
2
+ include Language::Python::Virtualenv
3
+
4
+ desc "Context window observability for LLM agents"
5
+ homepage "https://github.com/eitanlebras/starnose"
6
+ url "https://files.pythonhosted.org/packages/source/s/starnose/starnose-0.1.0.tar.gz"
7
+ sha256 "UPDATE_WITH_REAL_SHA256"
8
+ license "MIT"
9
+
10
+ depends_on "python@3.12"
11
+
12
+ def install
13
+ virtualenv_install_with_resources
14
+ end
15
+
16
+ test do
17
+ assert_match "Context window observability", shell_output("#{bin}/snose --help")
18
+ end
19
+ end
starnose-0.1.0/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 starnose contributors
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,194 @@
1
+ Metadata-Version: 2.4
2
+ Name: starnose
3
+ Version: 0.1.0
4
+ Summary: See exactly what your agent sees. Context window observability for LLM agents.
5
+ Project-URL: Homepage, https://github.com/eitanlebras/starnose
6
+ Project-URL: Repository, https://github.com/eitanlebras/starnose
7
+ Project-URL: Issues, https://github.com/eitanlebras/starnose/issues
8
+ Author: Eitan Lebras
9
+ License-Expression: MIT
10
+ License-File: LICENSE
11
+ Keywords: agents,anthropic,claude,context-window,llm,observability,openai,tokens
12
+ Classifier: Development Status :: 3 - Alpha
13
+ Classifier: Environment :: Console
14
+ Classifier: Intended Audience :: Developers
15
+ Classifier: License :: OSI Approved :: MIT License
16
+ Classifier: Programming Language :: Python :: 3
17
+ Classifier: Programming Language :: Python :: 3.10
18
+ Classifier: Programming Language :: Python :: 3.11
19
+ Classifier: Programming Language :: Python :: 3.12
20
+ Classifier: Programming Language :: Python :: 3.13
21
+ Classifier: Topic :: Software Development :: Debuggers
22
+ Classifier: Topic :: Software Development :: Testing
23
+ Requires-Python: >=3.10
24
+ Requires-Dist: aiosqlite>=0.20
25
+ Requires-Dist: anthropic>=0.28
26
+ Requires-Dist: cryptography>=42.0
27
+ Requires-Dist: fastapi>=0.111
28
+ Requires-Dist: httpx>=0.27
29
+ Requires-Dist: openai>=1.30
30
+ Requires-Dist: python-dotenv>=1.0
31
+ Requires-Dist: rich>=13
32
+ Requires-Dist: sqlalchemy>=2.0
33
+ Requires-Dist: textual>=0.60
34
+ Requires-Dist: tiktoken>=0.7
35
+ Requires-Dist: typer>=0.12
36
+ Requires-Dist: uvicorn>=0.30
37
+ Description-Content-Type: text/markdown
38
+
39
+ # starnose
40
+
41
+ Your agent is running in the dark. starnose turns the lights on.
42
+
43
+ `htop` for your agent's context window. Intercept, record, visualize, and compare the context sent to LLMs across agent runs — zero code changes required.
44
+
45
+ ![demo](demo.gif)
46
+
47
+ ## Install
48
+
49
+ ```bash
50
+ pip install starnose
51
+ ```
52
+
53
+ ## Quick Start
54
+
55
+ ```bash
56
+ # 1. Run your agent with context recording
57
+ snose run python my_agent.py
58
+
59
+ # 2. Inspect what your agent sent to the LLM
60
+ snose inspect
61
+
62
+ # 3. Compare two runs
63
+ snose diff
64
+ ```
65
+
66
+ That's it. No code changes. No config files. No API keys.
67
+
68
+ ## How It Works
69
+
70
+ starnose starts a local proxy that intercepts all OpenAI and Anthropic API calls from your agent. It records every message, token count, and response to a local SQLite database. You then use the TUI tools to inspect, compare, and optimize your agent's context window usage.
71
+
72
+ ## Works With
73
+
74
+ - **Claude Code** — `snose run -- claude`
75
+ - **Codex CLI** — `snose run -- codex`
76
+ - **LangChain** — callback handler included
77
+ - **OpenAI SDK** — monkeypatch or proxy
78
+ - **Anthropic SDK** — monkeypatch or proxy
79
+ - **Any OpenAI-compatible agent** — `snose run -- <command>`
80
+
81
+ ## Commands
82
+
83
+ | Command | Description |
84
+ |---------|-------------|
85
+ | `snose run <cmd>` | Run a command with context interception |
86
+ | `snose inspect [id]` | Inspect a run's context window (TUI) |
87
+ | `snose diff [a] [b]` | Compare two runs side-by-side |
88
+ | `snose watch` | Live-monitor a running agent |
89
+ | `snose stats` | Aggregate stats across runs |
90
+ | `snose optimize [id]` | Get optimization suggestions |
91
+ | `snose ls` | List recorded runs |
92
+ | `snose export [id]` | Export run data to JSON |
93
+ | `snose config` | View/modify configuration |
94
+
95
+ ### snose run
96
+
97
+ ```bash
98
+ snose run python my_agent.py
99
+ snose run --name "gpt4-chunked" --tag prod python agent.py
100
+ snose run --compare python agent.py # auto-diff against last run
101
+ ```
102
+
103
+ Flags:
104
+ - `--name` — human-readable name
105
+ - `--tag` — repeatable tags
106
+ - `--model` — override model for token counting
107
+ - `--compare` — auto-diff when complete
108
+ - `--no-proxy` — disable interception
109
+
110
+ ### snose inspect
111
+
112
+ ```bash
113
+ snose inspect # most recent run
114
+ snose inspect run_a2f3 # specific run
115
+ snose inspect --last # pick from recent runs
116
+ ```
117
+
118
+ Two-panel TUI showing run metadata, context budget bar, segment breakdown, and full message details.
119
+
120
+ ### snose diff
121
+
122
+ ```bash
123
+ snose diff # last two runs
124
+ snose diff run_a2f3 run_9c81 # specific runs
125
+ snose diff --last # pick two runs
126
+ ```
127
+
128
+ Shows segment-level delta table and auto-generated hypotheses explaining performance differences.
129
+
130
+ ### snose watch
131
+
132
+ ```bash
133
+ snose watch # attach to running agent
134
+ ```
135
+
136
+ Live-streaming view of context changes as your agent runs.
137
+
138
+ ## Python SDK
139
+
140
+ ```python
141
+ from starnose import trace, snapshot
142
+
143
+ @trace(name="my-run", tags=["prod"])
144
+ def my_agent(query: str) -> str:
145
+ ...
146
+
147
+ # Or as context manager
148
+ with trace("experiment-a") as run:
149
+ result = agent.run(query)
150
+ run.snapshot("pre-retrieval", messages)
151
+ ```
152
+
153
+ ## Integrations
154
+
155
+ ```python
156
+ # OpenAI monkeypatch
157
+ from starnose.integrations import patch_openai
158
+ patch_openai()
159
+
160
+ # Anthropic monkeypatch
161
+ from starnose.integrations import patch_anthropic
162
+ patch_anthropic()
163
+
164
+ # LangChain callback
165
+ from starnose.integrations import LangChainTracer
166
+ agent = AgentExecutor(..., callbacks=[LangChainTracer()])
167
+ ```
168
+
169
+ ## Proxy Chaining
170
+
171
+ If your agent already uses a proxy (e.g. LiteLLM):
172
+
173
+ ```bash
174
+ STARNOSE_UPSTREAM=http://localhost:4000 snose run python agent.py
175
+ ```
176
+
177
+ ## Philosophy
178
+
179
+ - **Local-first** — all data stays in `~/.starnose/runs.db`
180
+ - **Zero code changes** — proxy-based interception, just wrap your command
181
+ - **Pipe-friendly** — every command supports `--json` for scripting
182
+ - **Never breaks your agent** — proxy errors fail open, always
183
+
184
+ ## Configuration
185
+
186
+ ```bash
187
+ snose config # show current config
188
+ snose config set model gpt-4o # set default model
189
+ snose config set context_limit 128000 # override context limit
190
+ snose config reset # restore defaults
191
+ ```
192
+
193
+ Config stored in `~/.starnose/config.json`.
194
+ DB stored in `~/.starnose/runs.db` (override with `STARNOSE_DB` env var).
@@ -0,0 +1,156 @@
1
+ # starnose
2
+
3
+ Your agent is running in the dark. starnose turns the lights on.
4
+
5
+ `htop` for your agent's context window. Intercept, record, visualize, and compare the context sent to LLMs across agent runs — zero code changes required.
6
+
7
+ ![demo](demo.gif)
8
+
9
+ ## Install
10
+
11
+ ```bash
12
+ pip install starnose
13
+ ```
14
+
15
+ ## Quick Start
16
+
17
+ ```bash
18
+ # 1. Run your agent with context recording
19
+ snose run python my_agent.py
20
+
21
+ # 2. Inspect what your agent sent to the LLM
22
+ snose inspect
23
+
24
+ # 3. Compare two runs
25
+ snose diff
26
+ ```
27
+
28
+ That's it. No code changes. No config files. No API keys.
29
+
30
+ ## How It Works
31
+
32
+ starnose starts a local proxy that intercepts all OpenAI and Anthropic API calls from your agent. It records every message, token count, and response to a local SQLite database. You then use the TUI tools to inspect, compare, and optimize your agent's context window usage.
33
+
34
+ ## Works With
35
+
36
+ - **Claude Code** — `snose run -- claude`
37
+ - **Codex CLI** — `snose run -- codex`
38
+ - **LangChain** — callback handler included
39
+ - **OpenAI SDK** — monkeypatch or proxy
40
+ - **Anthropic SDK** — monkeypatch or proxy
41
+ - **Any OpenAI-compatible agent** — `snose run -- <command>`
42
+
43
+ ## Commands
44
+
45
+ | Command | Description |
46
+ |---------|-------------|
47
+ | `snose run <cmd>` | Run a command with context interception |
48
+ | `snose inspect [id]` | Inspect a run's context window (TUI) |
49
+ | `snose diff [a] [b]` | Compare two runs side-by-side |
50
+ | `snose watch` | Live-monitor a running agent |
51
+ | `snose stats` | Aggregate stats across runs |
52
+ | `snose optimize [id]` | Get optimization suggestions |
53
+ | `snose ls` | List recorded runs |
54
+ | `snose export [id]` | Export run data to JSON |
55
+ | `snose config` | View/modify configuration |
56
+
57
+ ### snose run
58
+
59
+ ```bash
60
+ snose run python my_agent.py
61
+ snose run --name "gpt4-chunked" --tag prod python agent.py
62
+ snose run --compare python agent.py # auto-diff against last run
63
+ ```
64
+
65
+ Flags:
66
+ - `--name` — human-readable name
67
+ - `--tag` — repeatable tags
68
+ - `--model` — override model for token counting
69
+ - `--compare` — auto-diff when complete
70
+ - `--no-proxy` — disable interception
71
+
72
+ ### snose inspect
73
+
74
+ ```bash
75
+ snose inspect # most recent run
76
+ snose inspect run_a2f3 # specific run
77
+ snose inspect --last # pick from recent runs
78
+ ```
79
+
80
+ Two-panel TUI showing run metadata, context budget bar, segment breakdown, and full message details.
81
+
82
+ ### snose diff
83
+
84
+ ```bash
85
+ snose diff # last two runs
86
+ snose diff run_a2f3 run_9c81 # specific runs
87
+ snose diff --last # pick two runs
88
+ ```
89
+
90
+ Shows segment-level delta table and auto-generated hypotheses explaining performance differences.
91
+
92
+ ### snose watch
93
+
94
+ ```bash
95
+ snose watch # attach to running agent
96
+ ```
97
+
98
+ Live-streaming view of context changes as your agent runs.
99
+
100
+ ## Python SDK
101
+
102
+ ```python
103
+ from starnose import trace, snapshot
104
+
105
+ @trace(name="my-run", tags=["prod"])
106
+ def my_agent(query: str) -> str:
107
+ ...
108
+
109
+ # Or as context manager
110
+ with trace("experiment-a") as run:
111
+ result = agent.run(query)
112
+ run.snapshot("pre-retrieval", messages)
113
+ ```
114
+
115
+ ## Integrations
116
+
117
+ ```python
118
+ # OpenAI monkeypatch
119
+ from starnose.integrations import patch_openai
120
+ patch_openai()
121
+
122
+ # Anthropic monkeypatch
123
+ from starnose.integrations import patch_anthropic
124
+ patch_anthropic()
125
+
126
+ # LangChain callback
127
+ from starnose.integrations import LangChainTracer
128
+ agent = AgentExecutor(..., callbacks=[LangChainTracer()])
129
+ ```
130
+
131
+ ## Proxy Chaining
132
+
133
+ If your agent already uses a proxy (e.g. LiteLLM):
134
+
135
+ ```bash
136
+ STARNOSE_UPSTREAM=http://localhost:4000 snose run python agent.py
137
+ ```
138
+
139
+ ## Philosophy
140
+
141
+ - **Local-first** — all data stays in `~/.starnose/runs.db`
142
+ - **Zero code changes** — proxy-based interception, just wrap your command
143
+ - **Pipe-friendly** — every command supports `--json` for scripting
144
+ - **Never breaks your agent** — proxy errors fail open, always
145
+
146
+ ## Configuration
147
+
148
+ ```bash
149
+ snose config # show current config
150
+ snose config set model gpt-4o # set default model
151
+ snose config set context_limit 128000 # override context limit
152
+ snose config reset # restore defaults
153
+ ```
154
+
155
+ Config stored in `~/.starnose/config.json`.
156
+ DB stored in `~/.starnose/runs.db` (override with `STARNOSE_DB` env var).
@@ -0,0 +1,127 @@
1
+ """Simple test agent that makes LLM calls through a built-in mock.
2
+
3
+ No API key needed. Run with:
4
+ STARNOSE_OPENAI_UPSTREAM=http://127.0.0.1:19876 snose run python3 examples/test_agent.py
5
+ """
6
+
7
+ import os
8
+ import json
9
+ import socket
10
+ import http.server
11
+ import threading
12
+ import time
13
+
14
+ import httpx
15
+
16
+
17
+ # ── Start mock LLM server on a fixed port ────────────────────────────────────
18
+
19
+ MOCK_PORT = 19876
20
+
21
+
22
+ class MockLLMHandler(http.server.BaseHTTPRequestHandler):
23
+ call_count = 0
24
+
25
+ def do_POST(self):
26
+ length = int(self.headers.get("Content-Length", 0))
27
+ body = json.loads(self.rfile.read(length)) if length else {}
28
+
29
+ MockLLMHandler.call_count += 1
30
+ messages = body.get("messages", [])
31
+ user_msg = next((m["content"] for m in reversed(messages) if m["role"] == "user"), "")
32
+
33
+ response = {
34
+ "id": f"chatcmpl-mock-{MockLLMHandler.call_count}",
35
+ "object": "chat.completion",
36
+ "model": body.get("model", "gpt-4o"),
37
+ "choices": [{
38
+ "index": 0,
39
+ "message": {
40
+ "role": "assistant",
41
+ "content": f"Mock response #{MockLLMHandler.call_count}: {user_msg[:80]}",
42
+ },
43
+ "finish_reason": "stop",
44
+ }],
45
+ "usage": {
46
+ "prompt_tokens": sum(len(m.get("content", "")) // 4 for m in messages),
47
+ "completion_tokens": 20,
48
+ },
49
+ }
50
+
51
+ data = json.dumps(response).encode()
52
+ self.send_response(200)
53
+ self.send_header("Content-Type", "application/json")
54
+ self.send_header("Content-Length", str(len(data)))
55
+ self.end_headers()
56
+ self.wfile.write(data)
57
+
58
+ def log_message(self, *args):
59
+ pass
60
+
61
+
62
+ mock_server = http.server.HTTPServer(("127.0.0.1", MOCK_PORT), MockLLMHandler)
63
+ mock_thread = threading.Thread(target=mock_server.serve_forever, daemon=True)
64
+ mock_thread.start()
65
+ time.sleep(0.1)
66
+
67
+ # ── Make calls through the starnose proxy ────────────────────────────────────
68
+
69
+ base_url = os.environ.get("OPENAI_BASE_URL", f"http://127.0.0.1:{MOCK_PORT}/v1")
70
+ api_key = os.environ.get("OPENAI_API_KEY", "test-key")
71
+
72
+ print(f"proxy: {base_url}")
73
+ print(f"run: {os.environ.get('STARNOSE_RUN_ID', 'none')}")
74
+ print()
75
+
76
+ scenarios = [
77
+ {
78
+ "messages": [
79
+ {"role": "system", "content": "You are a research assistant. Use tools to find information. Always cite your sources. Be thorough."},
80
+ {"role": "user", "content": "What are the latest developments in quantum computing?"},
81
+ ],
82
+ },
83
+ {
84
+ "messages": [
85
+ {"role": "system", "content": "You are a research assistant. Use tools to find information. Always cite your sources. Be thorough."},
86
+ {"role": "user", "content": "What are the latest developments in quantum computing?"},
87
+ {"role": "assistant", "content": "I found several key developments in quantum computing..."},
88
+ {"role": "user", "content": "Tell me more about error correction breakthroughs."},
89
+ {"role": "tool", "content": "Search results for 'quantum error correction 2024':\n" + "Result entry with details about quantum research. " * 50},
90
+ ],
91
+ },
92
+ {
93
+ "messages": [
94
+ {"role": "system", "content": "You are a research assistant. Use tools to find information. Always cite your sources. Be thorough."},
95
+ {"role": "user", "content": "What are the latest developments in quantum computing?"},
96
+ {"role": "assistant", "content": "I found several key developments in quantum computing..."},
97
+ {"role": "user", "content": "Tell me more about error correction breakthroughs."},
98
+ {"role": "tool", "content": "Search results for 'quantum error correction 2024':\n" + "Result entry with details about quantum research. " * 50},
99
+ {"role": "assistant", "content": "Based on my research, here are the key error correction breakthroughs..."},
100
+ {"role": "user", "content": "Now write a comprehensive report. Include all raw data."},
101
+ {"role": "tool", "content": "Full database export:\n" + "Lorem ipsum dolor sit amet, consectetur adipiscing elit. " * 400},
102
+ ],
103
+ },
104
+ ]
105
+
106
+ print(f"Making {len(scenarios)} LLM calls...\n")
107
+
108
+ for i, scenario in enumerate(scenarios, 1):
109
+ resp = httpx.post(
110
+ f"{base_url}/chat/completions",
111
+ json={"model": "gpt-4o", "messages": scenario["messages"], "temperature": 0.7},
112
+ headers={"Authorization": f"Bearer {api_key}"},
113
+ timeout=10,
114
+ )
115
+
116
+ data = resp.json()
117
+ reply = data["choices"][0]["message"]["content"]
118
+ usage = data.get("usage", {})
119
+ print(f" Call {i}: {usage.get('prompt_tokens', 0):>5} in / {usage.get('completion_tokens', 0):>3} out")
120
+ print(f" → {reply[:70]}")
121
+ print()
122
+
123
+ mock_server.shutdown()
124
+ print("Done! Now try:")
125
+ print(" snose inspect")
126
+ print(" snose stats")
127
+ print(" snose optimize")
@@ -0,0 +1,62 @@
1
+ """Test the MITM proxy by making HTTPS requests through it.
2
+
3
+ Run with: snose run --mitm python3 examples/test_mitm.py
4
+ """
5
+
6
+ import os
7
+ import ssl
8
+
9
+ import httpx
10
+
11
+ print(f"HTTPS_PROXY: {os.environ.get('HTTPS_PROXY', 'not set')}")
12
+ print(f"Run ID: {os.environ.get('STARNOSE_RUN_ID', 'not set')}")
13
+ print()
14
+
15
+ # Build SSL context that trusts our CA
16
+ ca_path = os.environ.get("NODE_EXTRA_CA_CERTS")
17
+ if ca_path:
18
+ ssl_ctx = ssl.create_default_context()
19
+ ssl_ctx.load_verify_locations(ca_path)
20
+ # Also load default system CAs for non-intercepted hosts
21
+ ssl_ctx.load_default_certs()
22
+ verify = ssl_ctx
23
+ print(f"Using CA: {ca_path}")
24
+ else:
25
+ verify = True
26
+ print("No custom CA set, using defaults")
27
+
28
+ print()
29
+
30
+ # Test 1: Anthropic API interception
31
+ # The request will be intercepted by the MITM proxy, forwarded to the real
32
+ # api.anthropic.com, and the response recorded. Auth will fail (no real key)
33
+ # but starnose still records the request.
34
+ print("Test: Anthropic API call through MITM proxy...")
35
+ try:
36
+ resp = httpx.post(
37
+ "https://api.anthropic.com/v1/messages",
38
+ headers={
39
+ "x-api-key": "test-key-not-real",
40
+ "anthropic-version": "2023-06-01",
41
+ "content-type": "application/json",
42
+ },
43
+ json={
44
+ "model": "claude-3-haiku-20240307",
45
+ "max_tokens": 100,
46
+ "messages": [
47
+ {"role": "user", "content": "Hello, this is a test of starnose MITM!"}
48
+ ],
49
+ },
50
+ verify=verify,
51
+ timeout=15,
52
+ )
53
+ print(f" Status: {resp.status_code}")
54
+ print(f" (401 expected — no real API key, but request was intercepted)")
55
+ except httpx.ConnectError as e:
56
+ print(f" ConnectError: {e}")
57
+ print(" (If SSL error: run 'snose setup' to trust the CA)")
58
+ except Exception as e:
59
+ print(f" {type(e).__name__}: {e}")
60
+
61
+ print()
62
+ print("Done! Run 'snose inspect' to see recorded calls.")