stubllm 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. stubllm-0.1.0/.coverage +0 -0
  2. stubllm-0.1.0/.github/workflows/ci.yml +70 -0
  3. stubllm-0.1.0/LICENSE +21 -0
  4. stubllm-0.1.0/PKG-INFO +437 -0
  5. stubllm-0.1.0/README.md +400 -0
  6. stubllm-0.1.0/examples/advanced/fixtures/streaming.yaml +28 -0
  7. stubllm-0.1.0/examples/advanced/fixtures/tools.yaml +33 -0
  8. stubllm-0.1.0/examples/advanced/test_advanced.py +93 -0
  9. stubllm-0.1.0/examples/basic/fixtures/chat.yaml +38 -0
  10. stubllm-0.1.0/examples/basic/test_example.py +79 -0
  11. stubllm-0.1.0/pyproject.toml +86 -0
  12. stubllm-0.1.0/src/mockllm/__init__.py +5 -0
  13. stubllm-0.1.0/src/mockllm/cli.py +103 -0
  14. stubllm-0.1.0/src/mockllm/fixtures/__init__.py +7 -0
  15. stubllm-0.1.0/src/mockllm/fixtures/loader.py +65 -0
  16. stubllm-0.1.0/src/mockllm/fixtures/matcher.py +170 -0
  17. stubllm-0.1.0/src/mockllm/fixtures/models.py +145 -0
  18. stubllm-0.1.0/src/mockllm/providers/__init__.py +8 -0
  19. stubllm-0.1.0/src/mockllm/providers/anthropic.py +165 -0
  20. stubllm-0.1.0/src/mockllm/providers/base.py +67 -0
  21. stubllm-0.1.0/src/mockllm/providers/gemini.py +163 -0
  22. stubllm-0.1.0/src/mockllm/providers/openai.py +203 -0
  23. stubllm-0.1.0/src/mockllm/pytest_plugin/__init__.py +5 -0
  24. stubllm-0.1.0/src/mockllm/pytest_plugin/plugin.py +139 -0
  25. stubllm-0.1.0/src/mockllm/recorder/__init__.py +5 -0
  26. stubllm-0.1.0/src/mockllm/recorder/proxy.py +191 -0
  27. stubllm-0.1.0/src/mockllm/server.py +238 -0
  28. stubllm-0.1.0/src/mockllm/streaming/__init__.py +5 -0
  29. stubllm-0.1.0/src/mockllm/streaming/sse.py +79 -0
  30. stubllm-0.1.0/src/stubllm/__init__.py +5 -0
  31. stubllm-0.1.0/src/stubllm/cli.py +103 -0
  32. stubllm-0.1.0/src/stubllm/fixtures/__init__.py +7 -0
  33. stubllm-0.1.0/src/stubllm/fixtures/loader.py +65 -0
  34. stubllm-0.1.0/src/stubllm/fixtures/matcher.py +170 -0
  35. stubllm-0.1.0/src/stubllm/fixtures/models.py +145 -0
  36. stubllm-0.1.0/src/stubllm/providers/__init__.py +8 -0
  37. stubllm-0.1.0/src/stubllm/providers/anthropic.py +165 -0
  38. stubllm-0.1.0/src/stubllm/providers/base.py +67 -0
  39. stubllm-0.1.0/src/stubllm/providers/gemini.py +163 -0
  40. stubllm-0.1.0/src/stubllm/providers/openai.py +203 -0
  41. stubllm-0.1.0/src/stubllm/pytest_plugin/__init__.py +5 -0
  42. stubllm-0.1.0/src/stubllm/pytest_plugin/plugin.py +139 -0
  43. stubllm-0.1.0/src/stubllm/recorder/__init__.py +5 -0
  44. stubllm-0.1.0/src/stubllm/recorder/proxy.py +191 -0
  45. stubllm-0.1.0/src/stubllm/server.py +238 -0
  46. stubllm-0.1.0/src/stubllm/streaming/__init__.py +5 -0
  47. stubllm-0.1.0/src/stubllm/streaming/sse.py +79 -0
  48. stubllm-0.1.0/tests/__init__.py +0 -0
  49. stubllm-0.1.0/tests/test_cli.py +79 -0
  50. stubllm-0.1.0/tests/test_fixtures.py +150 -0
  51. stubllm-0.1.0/tests/test_matching.py +171 -0
  52. stubllm-0.1.0/tests/test_providers/__init__.py +0 -0
  53. stubllm-0.1.0/tests/test_providers/test_anthropic.py +127 -0
  54. stubllm-0.1.0/tests/test_providers/test_gemini.py +101 -0
  55. stubllm-0.1.0/tests/test_providers/test_openai.py +85 -0
  56. stubllm-0.1.0/tests/test_pytest_plugin.py +138 -0
  57. stubllm-0.1.0/tests/test_recorder.py +96 -0
  58. stubllm-0.1.0/tests/test_server.py +162 -0
  59. stubllm-0.1.0/tests/test_server_live.py +82 -0
  60. stubllm-0.1.0/tests/test_streaming.py +93 -0
Binary file
@@ -0,0 +1,70 @@
1
+ name: CI
2
+
3
+ on:
4
+ push:
5
+ branches: [main, develop]
6
+ pull_request:
7
+ branches: [main]
8
+
9
+ jobs:
10
+ test:
11
+ runs-on: ubuntu-latest
12
+ strategy:
13
+ matrix:
14
+ python-version: ["3.10", "3.11", "3.12"]
15
+
16
+ steps:
17
+ - uses: actions/checkout@v4
18
+
19
+ - name: Set up Python ${{ matrix.python-version }}
20
+ uses: actions/setup-python@v5
21
+ with:
22
+ python-version: ${{ matrix.python-version }}
23
+
24
+ - name: Install dependencies
25
+ run: |
26
+ python -m pip install --upgrade pip
27
+ pip install -e ".[dev]"
28
+
29
+ - name: Lint with ruff
30
+ run: ruff check src/ tests/
31
+
32
+ - name: Type check with mypy
33
+ run: mypy src/mockllm --ignore-missing-imports
34
+ continue-on-error: true
35
+
36
+ - name: Run tests with coverage
37
+ run: |
38
+ pytest tests/ \
39
+ --cov=mockllm \
40
+ --cov-report=term-missing \
41
+ --cov-report=xml \
42
+ --cov-fail-under=80 \
43
+ -v
44
+
45
+ - name: Upload coverage to Codecov
46
+ uses: codecov/codecov-action@v4
47
+ with:
48
+ file: ./coverage.xml
49
+ fail_ci_if_error: false
50
+
51
+ build:
52
+ runs-on: ubuntu-latest
53
+ steps:
54
+ - uses: actions/checkout@v4
55
+
56
+ - name: Set up Python
57
+ uses: actions/setup-python@v5
58
+ with:
59
+ python-version: "3.11"
60
+
61
+ - name: Install build tools
62
+ run: pip install build
63
+
64
+ - name: Build package
65
+ run: python -m build
66
+
67
+ - name: Check package
68
+ run: |
69
+ pip install twine
70
+ twine check dist/*
stubllm-0.1.0/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 mockllm contributors
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
stubllm-0.1.0/PKG-INFO ADDED
@@ -0,0 +1,437 @@
1
+ Metadata-Version: 2.4
2
+ Name: stubllm
3
+ Version: 0.1.0
4
+ Summary: Deterministic mock server for LLM APIs. Test your AI code without spending tokens.
5
+ Author: mockllm contributors
6
+ License: MIT
7
+ License-File: LICENSE
8
+ Keywords: anthropic,gemini,llm,mock,openai,stub,testing
9
+ Classifier: Development Status :: 3 - Alpha
10
+ Classifier: Intended Audience :: Developers
11
+ Classifier: License :: OSI Approved :: MIT License
12
+ Classifier: Programming Language :: Python :: 3
13
+ Classifier: Programming Language :: Python :: 3.10
14
+ Classifier: Programming Language :: Python :: 3.11
15
+ Classifier: Programming Language :: Python :: 3.12
16
+ Classifier: Topic :: Software Development :: Testing
17
+ Requires-Python: >=3.10
18
+ Requires-Dist: anyio>=4.0.0
19
+ Requires-Dist: click>=8.1.0
20
+ Requires-Dist: fastapi>=0.110.0
21
+ Requires-Dist: httpx>=0.26.0
22
+ Requires-Dist: pydantic>=2.5.0
23
+ Requires-Dist: pyyaml>=6.0
24
+ Requires-Dist: uvicorn[standard]>=0.27.0
25
+ Provides-Extra: dev
26
+ Requires-Dist: httpx>=0.26.0; extra == 'dev'
27
+ Requires-Dist: mypy>=1.8.0; extra == 'dev'
28
+ Requires-Dist: openai>=1.0.0; extra == 'dev'
29
+ Requires-Dist: pytest-asyncio>=0.23.0; extra == 'dev'
30
+ Requires-Dist: pytest-cov>=4.0; extra == 'dev'
31
+ Requires-Dist: pytest>=7.0; extra == 'dev'
32
+ Requires-Dist: ruff>=0.3.0; extra == 'dev'
33
+ Provides-Extra: pytest
34
+ Requires-Dist: pytest-asyncio>=0.23.0; extra == 'pytest'
35
+ Requires-Dist: pytest>=7.0; extra == 'pytest'
36
+ Description-Content-Type: text/markdown
37
+
38
+ # stubllm
39
+
40
+ **Deterministic mock server for LLM APIs. Test your AI code without spending tokens.**
41
+
42
+ [![CI](https://github.com/your-org/stubllm/actions/workflows/ci.yml/badge.svg)](https://github.com/your-org/stubllm/actions/workflows/ci.yml)
43
+ [![PyPI](https://img.shields.io/pypi/v/stubllm)](https://pypi.org/project/stubllm/)
44
+ [![Python](https://img.shields.io/pypi/pyversions/stubllm)](https://pypi.org/project/stubllm/)
45
+ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](LICENSE)
46
+
47
+ Works with: **OpenAI** · **Anthropic** · **Google Gemini**
48
+
49
+ ---
50
+
51
+ ## 30-second quickstart
52
+
53
+ ```bash
54
+ # 1. Install
55
+ pip install stubllm
56
+
57
+ # 2. Create a fixture
58
+ mkdir fixtures
59
+ cat > fixtures/chat.yaml << 'EOF'
60
+ fixtures:
61
+ - name: "greeting"
62
+ match:
63
+ provider: openai
64
+ messages:
65
+ - role: user
66
+ content:
67
+ contains: "hello"
68
+ response:
69
+ content: "Hello! How can I help you today?"
70
+ EOF
71
+
72
+ # 3. Start the server
73
+ stubllm serve --port 8765
74
+
75
+ # 4. Point your code at it
76
+ export OPENAI_BASE_URL=http://localhost:8765/v1/
77
+ python your_app.py # no real API calls, no tokens spent
78
+ ```
79
+
80
+ ---
81
+
82
+ ## Why stubllm?
83
+
84
+ | | stubllm | Real API | Ollama |
85
+ |---|---|---|---|
86
+ | Cost | Free | Paid | Free |
87
+ | Speed | <1ms | 1-30s | 5-30s |
88
+ | Deterministic | ✅ | ❌ | ❌ |
89
+ | Works offline | ✅ | ❌ | ✅ |
90
+ | No GPU needed | ✅ | ✅ | ❌ |
91
+ | Pytest integration | ✅ | ❌ | ❌ |
92
+ | Fixtures / record-replay | ✅ | ❌ | ❌ |
93
+ | CI-friendly | ✅ | Slow/expensive | Heavy |
94
+
95
+ ---
96
+
97
+ ## Installation
98
+
99
+ ```bash
100
+ pip install stubllm
101
+
102
+ # With pytest support
103
+ pip install "stubllm[pytest]"
104
+ ```
105
+
106
+ ---
107
+
108
+ ## Fixture format
109
+
110
+ Fixtures are YAML (or JSON) files that map request patterns to responses.
111
+
112
+ ### Basic text response
113
+
114
+ ```yaml
115
+ fixtures:
116
+ - name: "greeting"
117
+ match:
118
+ provider: openai # openai | anthropic | gemini | any
119
+ endpoint: /v1/chat/completions # optional
120
+ model: "gpt-4o" # optional
121
+ messages:
122
+ - role: user
123
+ content:
124
+ contains: "hello" # exact | contains | regex
125
+ response:
126
+ content: "Hello! How can I help you today?"
127
+ usage:
128
+ prompt_tokens: 10
129
+ completion_tokens: 12
130
+ total_tokens: 22
131
+ ```
132
+
133
+ ### Tool call response
134
+
135
+ ```yaml
136
+ fixtures:
137
+ - name: "weather_tool"
138
+ match:
139
+ provider: openai
140
+ messages:
141
+ - role: user
142
+ content:
143
+ contains: "weather"
144
+ tools_present: true # only match when tools are provided
145
+ response:
146
+ tool_calls:
147
+ - id: "call_abc123"
148
+ type: function
149
+ function:
150
+ name: "get_weather"
151
+ arguments: '{"location": "Amsterdam"}'
152
+ ```
153
+
154
+ ### Streaming with delay
155
+
156
+ ```yaml
157
+ fixtures:
158
+ - name: "slow_story"
159
+ match:
160
+ messages:
161
+ - role: user
162
+ content:
163
+ contains: "story"
164
+ response:
165
+ content: "Once upon a time..."
166
+ stream_chunk_delay_ms: 50 # simulate realistic streaming speed
167
+ ```
168
+
169
+ ### Error response
170
+
171
+ ```yaml
172
+ fixtures:
173
+ - name: "rate_limit"
174
+ match:
175
+ messages:
176
+ - role: user
177
+ content:
178
+ contains: "trigger_error"
179
+ response:
180
+ content: '{"error": {"message": "Rate limit exceeded", "type": "rate_limit_error"}}'
181
+ http_status: 429
182
+ ```
183
+
184
+ ### Content match strategies
185
+
186
+ ```yaml
187
+ # Exact match (highest priority)
188
+ content:
189
+ exact: "Hello, world!"
190
+
191
+ # Substring match (case-insensitive)
192
+ content:
193
+ contains: "weather"
194
+
195
+ # Regular expression
196
+ content:
197
+ regex: "tell me.*joke"
198
+ ```
199
+
200
+ ### Matching priority
201
+
202
+ Higher specificity = higher priority. When multiple fixtures match, the most specific wins:
203
+
204
+ 1. `exact` message content (score: +10)
205
+ 2. `contains` message content (score: +5)
206
+ 3. `regex` message content (score: +4)
207
+ 4. `model` specified (score: +2)
208
+ 5. `tools_present` specified (score: +2)
209
+ 6. `provider` specified (score: +1)
210
+ 7. Fallback (no match criteria)
211
+
212
+ ---
213
+
214
+ ## Pytest plugin
215
+
216
+ ### Basic setup
217
+
218
+ ```python
219
+ # conftest.py — nothing needed, stubllm auto-registers as a pytest plugin
220
+ # The `stubllm_server` fixture is available automatically after installing stubllm
221
+ ```
222
+
223
+ ```python
224
+ # test_my_app.py
225
+ import openai
226
+ from stubllm.pytest_plugin import use_fixtures
227
+
228
+ @use_fixtures("fixtures/chat.yaml")
229
+ def test_greeting(stubllm_server):
230
+ client = openai.OpenAI(
231
+ base_url=stubllm_server.openai_url, # includes /v1/ — openai SDK needs this
232
+ api_key="test-key"
233
+ )
234
+ response = client.chat.completions.create(
235
+ model="gpt-4o",
236
+ messages=[{"role": "user", "content": "hello world"}]
237
+ )
238
+ assert "Hello" in response.choices[0].message.content
239
+ assert stubllm_server.call_count == 1
240
+ ```
241
+
242
+ ### Assertion helpers
243
+
244
+ ```python
245
+ def test_with_assertions(stubllm_server):
246
+ # ... make some calls ...
247
+
248
+ # Assert specific prompt was sent
249
+ stubllm_server.assert_called_with_prompt("hello")
250
+
251
+ # Assert number of calls
252
+ assert stubllm_server.call_count == 2
253
+
254
+ # Inspect all calls
255
+ for call in stubllm_server.calls:
256
+ print(call["path"], call["body"])
257
+
258
+ # Assert last call path
259
+ stubllm_server.assert_last_call_path("/v1/chat/completions")
260
+ ```
261
+
262
+ ### Multiple fixture files
263
+
264
+ ```python
265
+ @use_fixtures("fixtures/chat.yaml", "fixtures/tools.yaml")
266
+ def test_combined(stubllm_server):
267
+ ... # both fixture files are active for this test
268
+ ```
269
+
270
+ ---
271
+
272
+ ## Multi-provider support
273
+
274
+ ### OpenAI
275
+
276
+ ```python
277
+ import openai
278
+
279
+ client = openai.OpenAI(
280
+ base_url="http://localhost:8765/v1/", # note: /v1/ required — the OpenAI SDK does not add it
281
+ api_key="test-key"
282
+ )
283
+ response = client.chat.completions.create(
284
+ model="gpt-4o",
285
+ messages=[{"role": "user", "content": "hello"}]
286
+ )
287
+ ```
288
+
289
+ ### Anthropic
290
+
291
+ ```python
292
+ import anthropic
293
+
294
+ client = anthropic.Anthropic(
295
+ base_url="http://localhost:8765", # Anthropic SDK adds /v1/ itself
296
+ api_key="test-key"
297
+ )
298
+ message = client.messages.create(
299
+ model="claude-opus-4-6",
300
+ max_tokens=1024,
301
+ messages=[{"role": "user", "content": "hello"}]
302
+ )
303
+ ```
304
+
305
+ ### Google Gemini
306
+
307
+ ```python
308
+ import google.generativeai as genai
309
+
310
+ genai.configure(
311
+ api_key="test-key",
312
+ client_options={"api_endpoint": "localhost:8765"}
313
+ )
314
+ model = genai.GenerativeModel("gemini-pro")
315
+ response = model.generate_content("hello")
316
+ ```
317
+
318
+ ---
319
+
320
+ ## Streaming
321
+
322
+ All providers support streaming. Fixtures work identically — streaming is controlled by the `stream: true` parameter in the request, not the fixture.
323
+
324
+ ```python
325
+ # OpenAI streaming
326
+ stream = client.chat.completions.create(
327
+ model="gpt-4o",
328
+ messages=[{"role": "user", "content": "hello"}],
329
+ stream=True,
330
+ )
331
+ for chunk in stream:
332
+ print(chunk.choices[0].delta.content, end="", flush=True)
333
+ ```
334
+
335
+ Control streaming speed in fixtures:
336
+ ```yaml
337
+ response:
338
+ content: "A long streaming response..."
339
+ stream_chunk_delay_ms: 20 # default: 20ms between chunks
340
+ ```
341
+
342
+ ---
343
+
344
+ ## Record and replay
345
+
346
+ Record real API interactions for later replay:
347
+
348
+ ```bash
349
+ # Start in record mode (proxies to real OpenAI, saves fixtures)
350
+ stubllm record \
351
+ --target https://api.openai.com \
352
+ --fixture-dir ./recorded_fixtures
353
+
354
+ # Run your app against the recording proxy
355
+ OPENAI_BASE_URL=http://localhost:8765/v1/ python your_app.py
356
+
357
+ # Fixtures are saved to ./recorded_fixtures/
358
+ ls recorded_fixtures/
359
+ # recorded_hello_world_1706000000.yaml
360
+ # recorded_weather_query_1706000001.yaml
361
+ ```
362
+
363
+ Recorded fixtures are sanitized (API keys removed) and can be committed to your repo.
364
+
365
+ ---
366
+
367
+ ## CLI reference
368
+
369
+ ```bash
370
+ # Start server (auto-loads ./fixtures/ if it exists)
371
+ stubllm serve
372
+
373
+ # Custom port and fixture directory
374
+ stubllm serve --port 9000 --fixture-dir ./my-fixtures
375
+
376
+ # Multiple fixture directories
377
+ stubllm serve --fixture-dir ./fixtures/openai --fixture-dir ./fixtures/anthropic
378
+
379
+ # Individual fixture files
380
+ stubllm serve --fixture-file chat.yaml --fixture-file tools.yaml
381
+
382
+ # Record mode
383
+ stubllm record --target https://api.openai.com --fixture-dir ./recorded
384
+
385
+ # Version
386
+ stubllm --version
387
+ ```
388
+
389
+ ---
390
+
391
+ ## Structured output (JSON schema)
392
+
393
+ When `response_format: { type: "json_schema" }` is set, stubllm validates that the fixture response is valid JSON. If it's not, it wraps the content automatically.
394
+
395
+ ```yaml
396
+ fixtures:
397
+ - name: "structured"
398
+ match:
399
+ provider: openai
400
+ response:
401
+ content: '{"name": "Alice", "age": 30}' # must be valid JSON
402
+ ```
403
+
404
+ ---
405
+
406
+ ## Project structure
407
+
408
+ ```
409
+ stubllm/
410
+ ├── src/stubllm/
411
+ │ ├── fixtures/ # YAML/JSON loading, Pydantic models, matching engine
412
+ │ ├── providers/ # OpenAI, Anthropic, Gemini endpoint handlers
413
+ │ ├── streaming/ # SSE streaming simulation
414
+ │ ├── recorder/ # Record-and-replay proxy
415
+ │ ├── pytest_plugin/ # pytest fixtures and @use_fixtures decorator
416
+ │ ├── server.py # FastAPI app factory
417
+ │ └── cli.py # click CLI
418
+ ├── tests/ # >80% coverage
419
+ └── examples/ # Working examples (basic + advanced)
420
+ ```
421
+
422
+ ---
423
+
424
+ ## Contributing
425
+
426
+ ```bash
427
+ git clone https://github.com/your-org/stubllm
428
+ cd stubllm
429
+ pip install -e ".[dev]"
430
+ pytest tests/ -v
431
+ ```
432
+
433
+ ---
434
+
435
+ ## License
436
+
437
+ MIT