tokenary 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,28 @@
1
+ name: CI
2
+
3
+ on:
4
+ push:
5
+ branches: [main]
6
+ pull_request:
7
+ branches: [main]
8
+
9
+ jobs:
10
+ test:
11
+ runs-on: ubuntu-latest
12
+
13
+ steps:
14
+ - uses: actions/checkout@v4
15
+
16
+ - name: Install uv
17
+ uses: astral-sh/setup-uv@v4
18
+ with:
19
+ enable-cache: true
20
+
21
+ - name: Set up Python
22
+ run: uv python install 3.12
23
+
24
+ - name: Install dependencies
25
+ run: uv sync --group dev
26
+
27
+ - name: Run tests
28
+ run: uv run pytest
@@ -0,0 +1,96 @@
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ temp/
7
+
8
+ # Virtual environments
9
+ venv/
10
+ env/
11
+ .venv/
12
+ .env/
13
+
14
+ # Mkdocs
15
+ site/
16
+
17
+ # Distribution / packaging
18
+ build/
19
+ develop-eggs/
20
+ dist/
21
+ downloads/
22
+ eggs/
23
+ .eggs/
24
+ lib/
25
+ lib64/
26
+ parts/
27
+ sdist/
28
+ var/
29
+ *.egg-info/
30
+ .installed.cfg
31
+ *.egg
32
+
33
+ # PyInstaller
34
+ *.manifest
35
+ *.spec
36
+
37
+ # Installer logs
38
+ pip-log.txt
39
+ pip-delete-this-directory.txt
40
+
41
+ # Unit test / coverage reports
42
+ htmlcov/
43
+ .tox/
44
+ .nox/
45
+ .coverage
46
+ .coverage.*
47
+ .cache
48
+ nosetests.xml
49
+ coverage.xml
50
+ *.cover
51
+ .hypothesis/
52
+ .pytest_cache/
53
+
54
+ # Jupyter Notebook checkpoints
55
+ .ipynb_checkpoints
56
+
57
+ # pyenv
58
+ .python-version
59
+
60
+ # mypy
61
+ .mypy_cache/
62
+ .dmypy.json
63
+ dmypy.json
64
+
65
+ # Pyre
66
+ .pyre/
67
+
68
+ # Pytype
69
+ .pytype/
70
+
71
+ # Cython debug symbols
72
+ cython_debug/
73
+
74
+ # VS Code
75
+ .vscode/
76
+
77
+ # JetBrains IDEs (PyCharm, etc.)
78
+ .idea/
79
+
80
+ # MacOS
81
+ .DS_Store
82
+
83
+ # Windows
84
+ Thumbs.db
85
+ ehthumbs.db
86
+ desktop.ini
87
+
88
+ # dotenv files
89
+ .env
90
+
91
+ docs/node_modules/
92
+
93
+ .ruff_cache/
94
+ bandit-report.json
95
+
96
+ .claude/
@@ -0,0 +1,27 @@
1
+ repos:
2
+ - repo: https://github.com/pre-commit/pre-commit-hooks
3
+ rev: v4.6.0
4
+ hooks:
5
+ - id: trailing-whitespace
6
+ - id: end-of-file-fixer
7
+ - id: check-yaml
8
+ exclude: ^mkdocs\.yml$
9
+ - id: check-toml
10
+ - id: check-merge-conflict
11
+ - id: check-added-large-files
12
+ args: ["--maxkb=1000"]
13
+ exclude: ^tokenary/_generated\.py$
14
+ - id: check-case-conflict
15
+
16
+ - repo: https://github.com/astral-sh/ruff-pre-commit
17
+ rev: v0.13.1
18
+ hooks:
19
+ - id: ruff
20
+ args: [--fix, --exit-non-zero-on-fix]
21
+ - id: ruff-format
22
+
23
+ - repo: https://github.com/asottile/pyupgrade
24
+ rev: v3.15.2
25
+ hooks:
26
+ - id: pyupgrade
27
+ args: [--py39-plus]
@@ -0,0 +1,95 @@
1
+ Metadata-Version: 2.4
2
+ Name: tokenary
3
+ Version: 0.1.0
4
+ Summary: Add your description here
5
+ Requires-Python: >=3.14
6
+ Requires-Dist: pydantic>=2.13.4
7
+ Provides-Extra: test
8
+ Requires-Dist: pytest-cov>=7.0.0; extra == 'test'
9
+ Requires-Dist: pytest>=9.0.0; extra == 'test'
10
+ Description-Content-Type: text/markdown
11
+
12
+ # tokenary
13
+
14
+ Minimal Python library to calculate LLM API costs based on the LiteLLM model catalog.
15
+
16
+ ## Installation
17
+
18
+ ```bash
19
+ pip install tokenary
20
+ ```
21
+
22
+ ## Usage
23
+
24
+ ### Functional API
25
+
26
+ ```python
27
+ import tokenary
28
+ from tokenary import ModelName
29
+
30
+ result = tokenary.calculate(
31
+ model=ModelName.AZURE_GPT_3_5_TURBO,
32
+ input_tokens=1000,
33
+ output_tokens=500,
34
+ )
35
+
36
+ print(result.total_cost)
37
+ print(result.model_dump())
38
+ ```
39
+
40
+ ### Request object
41
+
42
+ ```python
43
+ from tokenary import ModelName, UsageCostRequest, calculate
44
+
45
+ request = UsageCostRequest(
46
+ model=ModelName.AZURE_GPT_3_5_TURBO,
47
+ input_tokens=2000,
48
+ output_tokens=800,
49
+ reasoning_tokens=200,
50
+ )
51
+
52
+ result = calculate(request)
53
+ print(result.model_dump())
54
+ ```
55
+
56
+ ### Reasoning tokens (e.g. o1)
57
+
58
+ ```python
59
+ from tokenary import ModelName, calculate
60
+
61
+ result = calculate(
62
+ model=ModelName.O1,
63
+ input_tokens=500,
64
+ output_tokens=200,
65
+ reasoning_tokens=300,
66
+ )
67
+
68
+ print(f"Total: ${result.total_cost:.6f}")
69
+ print(f" Reasoning: ${result.reasoning_cost:.6f}")
70
+ ```
71
+
72
+ ### All supported parameters
73
+
74
+ | Parameter | Type | Description |
75
+ | --------------------------- | ----------- | ------------------------------ |
76
+ | `model` | `ModelName` | Model identifier |
77
+ | `input_tokens` | `int` | Number of input tokens |
78
+ | `output_tokens` | `int` | Number of output tokens |
79
+ | `reasoning_tokens` | `int` | Reasoning tokens (e.g. o1) |
80
+ | `audio_input_tokens` | `int` | Audio input tokens |
81
+ | `generated_images` | `int` | Number of generated images |
82
+ | `code_interpreter_sessions` | `int` | Code interpreter sessions |
83
+ | `file_search_calls` | `int` | File search API calls |
84
+ | `file_search_gb_days` | `float` | File search storage (GB-days) |
85
+ | `vector_store_gb_days` | `float` | Vector store storage (GB-days) |
86
+
87
+ The returned `CostBreakdown` object contains per-category costs (`input_cost`, `output_cost`, `reasoning_cost`, …) and a `total_cost`, all in USD.
88
+
89
+ ## Generate pricing artifacts
90
+
91
+ Re-generate the bundled pricing data from the LiteLLM catalog:
92
+
93
+ ```bash
94
+ python -m tokenary.generator
95
+ ```
@@ -0,0 +1,84 @@
1
+ # tokenary
2
+
3
+ Minimal Python library to calculate LLM API costs based on the LiteLLM model catalog.
4
+
5
+ ## Installation
6
+
7
+ ```bash
8
+ pip install tokenary
9
+ ```
10
+
11
+ ## Usage
12
+
13
+ ### Functional API
14
+
15
+ ```python
16
+ import tokenary
17
+ from tokenary import ModelName
18
+
19
+ result = tokenary.calculate(
20
+ model=ModelName.AZURE_GPT_3_5_TURBO,
21
+ input_tokens=1000,
22
+ output_tokens=500,
23
+ )
24
+
25
+ print(result.total_cost)
26
+ print(result.model_dump())
27
+ ```
28
+
29
+ ### Request object
30
+
31
+ ```python
32
+ from tokenary import ModelName, UsageCostRequest, calculate
33
+
34
+ request = UsageCostRequest(
35
+ model=ModelName.AZURE_GPT_3_5_TURBO,
36
+ input_tokens=2000,
37
+ output_tokens=800,
38
+ reasoning_tokens=200,
39
+ )
40
+
41
+ result = calculate(request)
42
+ print(result.model_dump())
43
+ ```
44
+
45
+ ### Reasoning tokens (e.g. o1)
46
+
47
+ ```python
48
+ from tokenary import ModelName, calculate
49
+
50
+ result = calculate(
51
+ model=ModelName.O1,
52
+ input_tokens=500,
53
+ output_tokens=200,
54
+ reasoning_tokens=300,
55
+ )
56
+
57
+ print(f"Total: ${result.total_cost:.6f}")
58
+ print(f" Reasoning: ${result.reasoning_cost:.6f}")
59
+ ```
60
+
61
+ ### All supported parameters
62
+
63
+ | Parameter | Type | Description |
64
+ | --------------------------- | ----------- | ------------------------------ |
65
+ | `model` | `ModelName` | Model identifier |
66
+ | `input_tokens` | `int` | Number of input tokens |
67
+ | `output_tokens` | `int` | Number of output tokens |
68
+ | `reasoning_tokens` | `int` | Reasoning tokens (e.g. o1) |
69
+ | `audio_input_tokens` | `int` | Audio input tokens |
70
+ | `generated_images` | `int` | Number of generated images |
71
+ | `code_interpreter_sessions` | `int` | Code interpreter sessions |
72
+ | `file_search_calls` | `int` | File search API calls |
73
+ | `file_search_gb_days` | `float` | File search storage (GB-days) |
74
+ | `vector_store_gb_days` | `float` | Vector store storage (GB-days) |
75
+
76
+ The returned `CostBreakdown` object contains per-category costs (`input_cost`, `output_cost`, `reasoning_cost`, …) and a `total_cost`, all in USD.
77
+
78
+ ## Generate pricing artifacts
79
+
80
+ Re-generate the bundled pricing data from the LiteLLM catalog:
81
+
82
+ ```bash
83
+ python -m tokenary.generator
84
+ ```