pyg-hyper-bench 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. pyg_hyper_bench-0.1.0/.github/ISSUE_TEMPLATE/bug_report.yml +108 -0
  2. pyg_hyper_bench-0.1.0/.github/ISSUE_TEMPLATE/config.yml +8 -0
  3. pyg_hyper_bench-0.1.0/.github/ISSUE_TEMPLATE/documentation.yml +99 -0
  4. pyg_hyper_bench-0.1.0/.github/ISSUE_TEMPLATE/feature_request.yml +92 -0
  5. pyg_hyper_bench-0.1.0/.github/pull_request_template.md +173 -0
  6. pyg_hyper_bench-0.1.0/.github/workflows/ci.yml +195 -0
  7. pyg_hyper_bench-0.1.0/.github/workflows/publish.yml +120 -0
  8. pyg_hyper_bench-0.1.0/.github/workflows/release.yml +78 -0
  9. pyg_hyper_bench-0.1.0/.gitignore +59 -0
  10. pyg_hyper_bench-0.1.0/.pre-commit-config.yaml +37 -0
  11. pyg_hyper_bench-0.1.0/.python-version +1 -0
  12. pyg_hyper_bench-0.1.0/CHANGELOG.md +47 -0
  13. pyg_hyper_bench-0.1.0/LICENSE +21 -0
  14. pyg_hyper_bench-0.1.0/PKG-INFO +598 -0
  15. pyg_hyper_bench-0.1.0/README.md +550 -0
  16. pyg_hyper_bench-0.1.0/pyproject.toml +203 -0
  17. pyg_hyper_bench-0.1.0/ruff.toml +118 -0
  18. pyg_hyper_bench-0.1.0/src/pyg_hyper_bench/__init__.py +57 -0
  19. pyg_hyper_bench-0.1.0/src/pyg_hyper_bench/evaluators/__init__.py +10 -0
  20. pyg_hyper_bench-0.1.0/src/pyg_hyper_bench/evaluators/multi_run.py +305 -0
  21. pyg_hyper_bench-0.1.0/src/pyg_hyper_bench/evaluators/single_run.py +215 -0
  22. pyg_hyper_bench-0.1.0/src/pyg_hyper_bench/protocols/__init__.py +17 -0
  23. pyg_hyper_bench-0.1.0/src/pyg_hyper_bench/protocols/base.py +56 -0
  24. pyg_hyper_bench-0.1.0/src/pyg_hyper_bench/protocols/clustering.py +128 -0
  25. pyg_hyper_bench-0.1.0/src/pyg_hyper_bench/protocols/link_prediction.py +285 -0
  26. pyg_hyper_bench-0.1.0/src/pyg_hyper_bench/protocols/node_classification.py +219 -0
  27. pyg_hyper_bench-0.1.0/src/pyg_hyper_bench/protocols/ssl_linear_evaluation.py +577 -0
  28. pyg_hyper_bench-0.1.0/src/pyg_hyper_bench/py.typed +0 -0
  29. pyg_hyper_bench-0.1.0/tests/__init__.py +1 -0
  30. pyg_hyper_bench-0.1.0/tests/test_clustering.py +232 -0
  31. pyg_hyper_bench-0.1.0/tests/test_integration.py +418 -0
  32. pyg_hyper_bench-0.1.0/tests/test_link_prediction.py +262 -0
  33. pyg_hyper_bench-0.1.0/tests/test_multi_run_evaluator.py +330 -0
  34. pyg_hyper_bench-0.1.0/tests/test_ssl_linear_evaluation.py +452 -0
  35. pyg_hyper_bench-0.1.0/uv.lock +2695 -0
@@ -0,0 +1,108 @@
1
+ name: Bug Report
2
+ description: Report a bug or issue with pyg-hyper-bench
3
+ title: "[Bug]: "
4
+ labels: ["bug", "needs-triage"]
5
+ body:
6
+ - type: markdown
7
+ attributes:
8
+ value: |
9
+ Thank you for reporting a bug! Please provide the following information to help us resolve the issue quickly.
10
+
11
+ - type: dropdown
12
+ id: bug-area
13
+ attributes:
14
+ label: Bug Area
15
+ description: Where did the issue occur?
16
+ options:
17
+ - Evaluation Protocols (NodeClassificationProtocol, etc.)
18
+ - Single-Run Evaluator (SingleRunEvaluator)
19
+ - Multi-Run Evaluator (MultiRunEvaluator)
20
+ - Statistical Aggregation (mean, std, CI)
21
+ - Data Splitting (transductive/inductive)
22
+ - Reproducibility/Seeds
23
+ - Type Annotations/Type Checking
24
+ - Documentation
25
+ - Installation/Dependencies
26
+ - Tests
27
+ - Other
28
+ validations:
29
+ required: true
30
+
31
+ - type: textarea
32
+ id: description
33
+ attributes:
34
+ label: Description
35
+ description: What happened? What did you expect to happen?
36
+ placeholder: |
37
+ Example:
38
+ - MultiRunEvaluator produces inconsistent results
39
+ - NodeClassificationProtocol split ratios are incorrect
40
+ - Type checking fails with ty
41
+ validations:
42
+ required: true
43
+
44
+ - type: textarea
45
+ id: reproduction
46
+ attributes:
47
+ label: Steps to Reproduce
48
+ description: Provide minimal code to reproduce the issue
49
+ placeholder: |
50
+ ```python
51
+ from pyg_hyper_bench import MultiRunEvaluator, NodeClassificationProtocol
52
+ from pyg_hyper_data.datasets import CoraCocitation
53
+
54
+ dataset = CoraCocitation()
55
+ protocol = NodeClassificationProtocol()
56
+ evaluator = MultiRunEvaluator(dataset, protocol, n_runs=5)
57
+ # Error occurs here
58
+ ```
59
+ value: |
60
+ ```python
61
+ # Your code here
62
+ ```
63
+ validations:
64
+ required: true
65
+
66
+ - type: textarea
67
+ id: environment
68
+ attributes:
69
+ label: Environment
70
+ description: Please provide your environment information
71
+ value: |
72
+ - OS: [e.g. Ubuntu 22.04, macOS 14.0, Windows 11]
73
+ - Python version: [e.g. 3.12.0]
74
+ - PyTorch version: [e.g. 2.9.0]
75
+ - PyTorch Geometric version: [e.g. 2.6.0]
76
+ - pyg-hyper-bench version: [e.g. 0.1.0]
77
+ - pyg-hyper-data version: [e.g. 0.1.0]
78
+ - Installation method: [uv / pip]
79
+ validations:
80
+ required: true
81
+
82
+ - type: textarea
83
+ id: error-output
84
+ attributes:
85
+ label: Error Output
86
+ description: Paste the full error message or traceback
87
+ render: shell
88
+ placeholder: |
89
+ Traceback (most recent call last):
90
+ ...
91
+
92
+ - type: textarea
93
+ id: additional-context
94
+ attributes:
95
+ label: Additional Context
96
+ description: Any additional information, screenshots, or related issues
97
+
98
+ - type: checkboxes
99
+ id: checklist
100
+ attributes:
101
+ label: Checklist
102
+ options:
103
+ - label: I have searched existing issues and this is not a duplicate
104
+ required: true
105
+ - label: I am using the latest version of pyg-hyper-bench
106
+ required: true
107
+ - label: I have verified this is a bug in pyg-hyper-bench (not PyTorch/PyG/pyg-hyper-data)
108
+ required: false
@@ -0,0 +1,8 @@
1
+ blank_issues_enabled: false
2
+ contact_links:
3
+ - name: Question or Discussion
4
+ url: https://github.com/nishide-dev/pyg-hyper-bench/discussions
5
+ about: Ask questions or discuss ideas with the community
6
+ - name: pyg-hyper-data Issues
7
+ url: https://github.com/nishide-dev/pyg-hyper-data/issues
8
+ about: Report issues with datasets or data utilities
@@ -0,0 +1,99 @@
1
+ name: Documentation Issue
2
+ description: Report an issue with documentation or suggest improvements
3
+ title: "[Docs]: "
4
+ labels: ["documentation", "needs-triage"]
5
+ body:
6
+ - type: markdown
7
+ attributes:
8
+ value: |
9
+ Thank you for helping improve our documentation!
10
+
11
+ - type: dropdown
12
+ id: doc-area
13
+ attributes:
14
+ label: Documentation Area
15
+ description: Which part of the documentation needs improvement?
16
+ options:
17
+ - README.md
18
+ - API Documentation (docstrings)
19
+ - Usage Examples
20
+ - DESIGN.md
21
+ - Installation Instructions
22
+ - Quick Start Guide
23
+ - Integration Tests/Examples
24
+ - Type Annotations
25
+ - Other
26
+ validations:
27
+ required: true
28
+
29
+ - type: dropdown
30
+ id: issue-type
31
+ attributes:
32
+ label: Issue Type
33
+ description: What type of documentation issue is this?
34
+ options:
35
+ - Incorrect Information
36
+ - Missing Information
37
+ - Unclear/Confusing
38
+ - Outdated
39
+ - Typo/Grammar
40
+ - Code Example Doesn't Work
41
+ - Needs More Examples
42
+ - Other
43
+ validations:
44
+ required: true
45
+
46
+ - type: textarea
47
+ id: description
48
+ attributes:
49
+ label: Description
50
+ description: Describe the documentation issue
51
+ placeholder: |
52
+ Example:
53
+ - The example in README.md for MultiRunEvaluator is outdated
54
+ - Missing documentation for custom metrics
55
+ - Unclear explanation of statistical measures
56
+ validations:
57
+ required: true
58
+
59
+ - type: textarea
60
+ id: current
61
+ attributes:
62
+ label: Current Documentation
63
+ description: Quote the current documentation (if applicable)
64
+ placeholder: |
65
+ > Current text from docs...
66
+
67
+ - type: textarea
68
+ id: suggested
69
+ attributes:
70
+ label: Suggested Improvement
71
+ description: How should the documentation be improved?
72
+ placeholder: |
73
+ Suggested changes or additions...
74
+
75
+ - type: textarea
76
+ id: location
77
+ attributes:
78
+ label: Location
79
+ description: Where in the documentation is this issue?
80
+ placeholder: |
81
+ - File: README.md, line 123
82
+ - Section: "Quick Start > Multi-Run Evaluation"
83
+ - Function: MultiRunEvaluator.run_evaluation()
84
+
85
+ - type: textarea
86
+ id: additional-context
87
+ attributes:
88
+ label: Additional Context
89
+ description: Any additional information or screenshots
90
+
91
+ - type: checkboxes
92
+ id: checklist
93
+ attributes:
94
+ label: Checklist
95
+ options:
96
+ - label: I have checked the latest documentation
97
+ required: true
98
+ - label: I am willing to submit a PR to fix this
99
+ required: false
@@ -0,0 +1,92 @@
1
+ name: Feature Request
2
+ description: Suggest a new feature or enhancement for pyg-hyper-bench
3
+ title: "[Feature]: "
4
+ labels: ["enhancement", "needs-triage"]
5
+ body:
6
+ - type: markdown
7
+ attributes:
8
+ value: |
9
+ Thank you for suggesting a feature! Please provide details to help us understand your request.
10
+
11
+ - type: dropdown
12
+ id: feature-area
13
+ attributes:
14
+ label: Feature Area
15
+ description: Which area would this feature affect?
16
+ options:
17
+ - New Evaluation Protocol (link prediction, clustering, etc.)
18
+ - Evaluator Enhancement (SingleRunEvaluator, MultiRunEvaluator)
19
+ - Statistical Methods (new metrics, aggregation methods)
20
+ - Data Splitting (new split strategies)
21
+ - PyTorch Lightning Integration
22
+ - Visualization/Reporting
23
+ - Performance Optimization
24
+ - API/Usability
25
+ - Documentation
26
+ - Tests
27
+ - Other
28
+ validations:
29
+ required: true
30
+
31
+ - type: textarea
32
+ id: problem
33
+ attributes:
34
+ label: Problem Statement
35
+ description: What problem would this feature solve?
36
+ placeholder: |
37
+ Example:
38
+ - There is no protocol for hyperedge prediction tasks
39
+ - Multi-run results are difficult to visualize
40
+ - Need support for custom metrics
41
+ validations:
42
+ required: true
43
+
44
+ - type: textarea
45
+ id: solution
46
+ attributes:
47
+ label: Proposed Solution
48
+ description: How would you like to see this feature implemented?
49
+ placeholder: |
50
+ Example:
51
+ - Add LinkPredictionProtocol with AUC/MRR metrics
52
+ - Add plot() method to MultiRunResult for visualization
53
+ - Allow custom metric functions in protocols
54
+
55
+ - type: textarea
56
+ id: alternatives
57
+ attributes:
58
+ label: Alternatives Considered
59
+ description: Have you considered any alternative solutions or workarounds?
60
+
61
+ - type: textarea
62
+ id: example
63
+ attributes:
64
+ label: Example Usage
65
+ description: Show how you would use this feature (if applicable)
66
+ render: python
67
+ placeholder: |
68
+ # Example of how the feature would be used
69
+ from pyg_hyper_bench import LinkPredictionProtocol
70
+
71
+ protocol = LinkPredictionProtocol(
72
+ split_type="hyperedge_split",
73
+ metrics=["auc", "mrr", "hits@10"]
74
+ )
75
+
76
+ - type: textarea
77
+ id: additional-context
78
+ attributes:
79
+ label: Additional Context
80
+ description: Any additional information, references, or related work
81
+
82
+ - type: checkboxes
83
+ id: checklist
84
+ attributes:
85
+ label: Checklist
86
+ options:
87
+ - label: I have searched existing issues and this is not a duplicate
88
+ required: true
89
+ - label: This feature aligns with pyg-hyper-bench's purpose (benchmarking)
90
+ required: true
91
+ - label: I am willing to contribute a PR for this feature
92
+ required: false
@@ -0,0 +1,173 @@
1
+ ## Summary
2
+
3
+ <!-- Briefly describe what this PR does -->
4
+
5
+ ## Type of Change
6
+
7
+ <!-- Check all that apply -->
8
+
9
+ - [ ] 🐛 Bug fix (fixes an issue with existing functionality)
10
+ - [ ] ✨ New feature (adds new functionality)
11
+ - [ ] 📝 Documentation (documentation only changes)
12
+ - [ ] 🎨 Style (formatting, whitespace, no functional changes)
13
+ - [ ] ♻️ Refactoring (code restructuring without changing behavior)
14
+ - [ ] ⚡ Performance improvement
15
+ - [ ] 🔒 Type safety (type annotations, type checking fixes)
16
+ - [ ] 🧪 Tests (adding or updating tests)
17
+ - [ ] 🔧 Configuration (pyproject.toml, CI/CD, etc.)
18
+ - [ ] 📊 New Protocol (new evaluation protocol)
19
+
20
+ ## Related Issue
21
+
22
+ <!-- Link to related issues -->
23
+
24
+ Closes #<!-- Issue number -->
25
+
26
+ ## Changes Made
27
+
28
+ <!-- Describe your changes in detail -->
29
+
30
+ ### Modified Files
31
+
32
+ - `file/path.py`: Description of changes
33
+
34
+ ### Impact on Existing Code
35
+
36
+ <!-- How does this change affect existing functionality? -->
37
+
38
+ - [ ] New protocol added
39
+ - [ ] API changes (breaking changes)
40
+ - [ ] Evaluation logic changes
41
+ - [ ] Performance improvements
42
+ - [ ] Bug fixes
43
+ - [ ] Documentation only
44
+ - [ ] No impact on existing functionality
45
+
46
+ ### Breaking Changes
47
+
48
+ <!-- Check if this introduces breaking changes -->
49
+
50
+ - [ ] Yes (describe below)
51
+ - [ ] No
52
+
53
+ <!-- Details of breaking changes and migration guide -->
54
+ ```
55
+
56
+ ```
57
+
58
+ ## Testing
59
+
60
+ <!-- Describe how you tested your changes -->
61
+
62
+ ### Test Commands
63
+
64
+ ```bash
65
+ # Run all tests
66
+ uv run pytest tests/ -v
67
+
68
+ # Run specific test file
69
+ uv run pytest tests/test_integration.py -v
70
+
71
+ # Check code quality
72
+ uv run ruff check
73
+ uv run ruff format .
74
+ uv run ty check
75
+
76
+ # Run pre-commit hooks
77
+ uv run pre-commit run --all-files
78
+ ```
79
+
80
+ ### Test Results
81
+
82
+ ```bash
83
+ # Paste test output here
84
+ ```
85
+
86
+ ### Test Environment
87
+
88
+ - OS: <!-- e.g. Ubuntu 22.04, macOS 14.0 -->
89
+ - Python version: <!-- e.g. 3.12.0 -->
90
+ - PyTorch version: <!-- e.g. 2.9.0 -->
91
+ - PyTorch Geometric version: <!-- e.g. 2.6.0 -->
92
+ - CUDA version (if applicable): <!-- e.g. 12.6 -->
93
+ - GPU (if applicable): <!-- e.g. RTX 4090 -->
94
+
95
+ ## Screenshots (if applicable)
96
+
97
+ <!-- Add screenshots for visual changes or output examples -->
98
+
99
+ ## Checklist
100
+
101
+ <!-- Please check all items before requesting review -->
102
+
103
+ ### Code Quality
104
+
105
+ - [ ] Code follows the project's style guidelines
106
+ - [ ] Code is well-commented and self-documenting
107
+ - [ ] All tests pass (`uv run pytest tests/`)
108
+ - [ ] Code linting passes (`uv run ruff check`)
109
+ - [ ] Code formatting is correct (`uv run ruff format .`)
110
+ - [ ] Type checking passes (`uv run ty check`)
111
+ - [ ] Pre-commit hooks pass (`uv run pre-commit run --all-files`)
112
+
113
+ ### Testing
114
+
115
+ - [ ] Added tests for new functionality
116
+ - [ ] Updated existing tests (if needed)
117
+ - [ ] All edge cases are covered
118
+ - [ ] Tests are reproducible with fixed seeds
119
+ - [ ] Integration tests pass (if applicable)
120
+
121
+ ### Documentation
122
+
123
+ - [ ] Updated README.md (if needed)
124
+ - [ ] Added/updated docstrings for new/modified functions
125
+ - [ ] Added usage examples (if new feature)
126
+ - [ ] Updated type annotations
127
+ - [ ] Added comments for complex logic
128
+
129
+ ### Protocols (if applicable)
130
+
131
+ - [ ] Protocol implements BenchmarkProtocol interface
132
+ - [ ] Data splitting works correctly
133
+ - [ ] Evaluation metrics are accurate
134
+ - [ ] Reproducible with seeds
135
+ - [ ] Example usage is provided
136
+
137
+ ### Evaluators (if applicable)
138
+
139
+ - [ ] Evaluator works with all protocols
140
+ - [ ] Statistical aggregation is correct (if multi-run)
141
+ - [ ] Confidence intervals are calculated properly
142
+ - [ ] Seed management works correctly
143
+ - [ ] Example usage is provided
144
+
145
+ ### Performance (if applicable)
146
+
147
+ - [ ] Tested with large datasets
148
+ - [ ] Memory usage is reasonable
149
+ - [ ] No significant performance regression
150
+ - [ ] Benchmarked against baseline (if optimization)
151
+
152
+ ### Backward Compatibility
153
+
154
+ - [ ] Changes are backward compatible
155
+ - [ ] If breaking changes exist, migration guide is provided
156
+ - [ ] Deprecation warnings added (if removing features)
157
+
158
+ ## Reviewer Notes
159
+
160
+ <!-- Any specific areas you'd like reviewers to focus on, or questions you have -->
161
+
162
+ ---
163
+
164
+ **Thank you for your contribution!** 🙏
165
+
166
+ <!--
167
+ Review Guidelines:
168
+ - Check code quality and style
169
+ - Verify tests pass and cover edge cases
170
+ - Review documentation completeness
171
+ - Test the changes locally if possible
172
+ - Provide constructive feedback
173
+ -->
@@ -0,0 +1,195 @@
1
+ name: CI
2
+
3
+ on:
4
+ push:
5
+ branches: [main, dev]
6
+ pull_request:
7
+ branches: [main, dev]
8
+
9
+ jobs:
10
+ code-quality:
11
+ name: Code Quality Checks
12
+ runs-on: ubuntu-latest
13
+
14
+ steps:
15
+ - name: Checkout code
16
+ uses: actions/checkout@v4
17
+
18
+ - name: Install uv
19
+ uses: astral-sh/setup-uv@v4
20
+ with:
21
+ enable-cache: true
22
+ cache-dependency-glob: "uv.lock"
23
+
24
+ - name: Set up Python
25
+ run: uv python install 3.12
26
+
27
+ - name: Install dependencies
28
+ run: uv sync --all-extras
29
+
30
+ - name: Check code formatting with Ruff
31
+ run: uv run ruff format --check .
32
+
33
+ - name: Lint code with Ruff
34
+ run: uv run ruff check .
35
+
36
+ - name: Type check with ty
37
+ run: uv run ty check
38
+
39
+ tests:
40
+ name: Tests (Python ${{ matrix.python-version }})
41
+ runs-on: ubuntu-latest
42
+ strategy:
43
+ fail-fast: false
44
+ matrix:
45
+ python-version: ["3.12", "3.13"]
46
+
47
+ steps:
48
+ - name: Checkout code
49
+ uses: actions/checkout@v4
50
+
51
+ - name: Install uv
52
+ uses: astral-sh/setup-uv@v4
53
+ with:
54
+ enable-cache: true
55
+ cache-dependency-glob: "uv.lock"
56
+
57
+ - name: Set up Python ${{ matrix.python-version }}
58
+ run: uv python install ${{ matrix.python-version }}
59
+
60
+ - name: Install dependencies
61
+ run: uv sync --all-extras
62
+
63
+ - name: Run unit tests
64
+ run: uv run pytest tests/test_multi_run_evaluator.py -v --cov=pyg_hyper_bench --cov-report=xml --cov-report=term
65
+
66
+ - name: Run integration tests
67
+ run: uv run pytest tests/test_integration.py -v
68
+
69
+ - name: Upload coverage to Codecov
70
+ uses: codecov/codecov-action@v4
71
+ if: matrix.python-version == '3.12'
72
+ with:
73
+ file: ./coverage.xml
74
+ fail_ci_if_error: false
75
+
76
+ pre-commit:
77
+ name: Pre-commit Hooks
78
+ runs-on: ubuntu-latest
79
+
80
+ steps:
81
+ - name: Checkout code
82
+ uses: actions/checkout@v4
83
+
84
+ - name: Install uv
85
+ uses: astral-sh/setup-uv@v4
86
+ with:
87
+ enable-cache: true
88
+ cache-dependency-glob: "uv.lock"
89
+
90
+ - name: Set up Python
91
+ run: uv python install 3.12
92
+
93
+ - name: Install dependencies
94
+ run: uv sync --all-extras
95
+
96
+ - name: Run pre-commit hooks
97
+ run: uv run pre-commit run --all-files
98
+
99
+ build:
100
+ name: Build Package
101
+ runs-on: ubuntu-latest
102
+
103
+ steps:
104
+ - name: Checkout code
105
+ uses: actions/checkout@v4
106
+
107
+ - name: Install uv
108
+ uses: astral-sh/setup-uv@v4
109
+ with:
110
+ enable-cache: true
111
+
112
+ - name: Set up Python
113
+ run: uv python install 3.12
114
+
115
+ - name: Install dependencies for build test
116
+ run: uv sync --all-extras
117
+
118
+ - name: Build package
119
+ run: uv build --no-sources
120
+
121
+ - name: Check package installation
122
+ run: |
123
+ # Test that the built wheel is valid by checking its contents
124
+ python -m zipfile -l dist/*.whl | grep "pyg_hyper_bench/__init__.py"
125
+
126
+ # Test import in the existing environment (already has all deps)
127
+ uv run python -c "from pyg_hyper_bench import SingleRunEvaluator, MultiRunEvaluator, NodeClassificationProtocol; print('✅ Package imports successfully!')"
128
+
129
+ - name: Upload build artifacts
130
+ uses: actions/upload-artifact@v4
131
+ with:
132
+ name: dist
133
+ path: dist/
134
+
135
+ publish-testpypi:
136
+ name: Publish to TestPyPI
137
+ runs-on: ubuntu-latest
138
+ needs: [code-quality, tests, pre-commit, build]
139
+ if: github.event_name == 'push' && (github.ref == 'refs/heads/main' || github.ref == 'refs/heads/dev')
140
+
141
+ steps:
142
+ - name: Checkout code
143
+ uses: actions/checkout@v4
144
+
145
+ - name: Install uv
146
+ uses: astral-sh/setup-uv@v4
147
+
148
+ - name: Set up Python
149
+ run: uv python install 3.12
150
+
151
+ - name: Install dependencies
152
+ run: uv sync --all-extras
153
+
154
+ - name: Get current version
155
+ id: version
156
+ run: |
157
+ VERSION=$(grep '^version = ' pyproject.toml | sed 's/version = "\(.*\)"/\1/')
158
+ BRANCH_NAME=${GITHUB_REF#refs/heads/}
159
+ COMMIT_SHORT=${GITHUB_SHA:0:7}
160
+ DEV_VERSION="${VERSION}.dev$(date +%Y%m%d%H%M%S)"
161
+
162
+ echo "version=$VERSION" >> $GITHUB_OUTPUT
163
+ echo "dev_version=$DEV_VERSION" >> $GITHUB_OUTPUT
164
+ echo "branch=$BRANCH_NAME" >> $GITHUB_OUTPUT
165
+ echo "commit=$COMMIT_SHORT" >> $GITHUB_OUTPUT
166
+ echo "📦 Base version: $VERSION"
167
+ echo "🚀 TestPyPI version: $DEV_VERSION"
168
+ echo "🌿 Branch: $BRANCH_NAME"
169
+ echo "📝 Commit: $COMMIT_SHORT"
170
+
171
+ - name: Update version for TestPyPI
172
+ run: |
173
+ echo "Updating version to ${{ steps.version.outputs.dev_version }}"
174
+ sed -i 's/^version = ".*"/version = "${{ steps.version.outputs.dev_version }}"/' pyproject.toml
175
+ cat pyproject.toml | grep "^version"
176
+
177
+ - name: Build package
178
+ run: uv build --no-sources
179
+
180
+ - name: Publish to TestPyPI
181
+ env:
182
+ UV_PUBLISH_TOKEN: ${{ secrets.TESTPYPI_API_TOKEN }}
183
+ run: |
184
+ echo "🚀 Publishing pyg-hyper-bench ${{ steps.version.outputs.dev_version }} to TestPyPI..."
185
+ uv publish --index testpypi
186
+
187
+ - name: Create comment with TestPyPI link
188
+ if: github.event_name == 'push'
189
+ run: |
190
+ echo "✅ Published to TestPyPI: https://test.pypi.org/project/pyg-hyper-bench/${{ steps.version.outputs.dev_version }}/"
191
+ echo "🌿 Branch: ${{ steps.version.outputs.branch }}"
192
+ echo "📝 Commit: ${{ steps.version.outputs.commit }}"
193
+ echo ""
194
+ echo "Install with:"
195
+ echo "pip install --index-url https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple/ pyg-hyper-bench==${{ steps.version.outputs.dev_version }}"