copilot-proxy 0.1.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- copilot_proxy-0.1.2/LICENSE +21 -0
- copilot_proxy-0.1.2/PKG-INFO +188 -0
- copilot_proxy-0.1.2/README.md +159 -0
- copilot_proxy-0.1.2/copilot_proxy/__init__.py +13 -0
- copilot_proxy-0.1.2/copilot_proxy/__main__.py +8 -0
- copilot_proxy-0.1.2/copilot_proxy/app.py +226 -0
- copilot_proxy-0.1.2/copilot_proxy/cli.py +61 -0
- copilot_proxy-0.1.2/copilot_proxy.egg-info/PKG-INFO +188 -0
- copilot_proxy-0.1.2/copilot_proxy.egg-info/SOURCES.txt +13 -0
- copilot_proxy-0.1.2/copilot_proxy.egg-info/dependency_links.txt +1 -0
- copilot_proxy-0.1.2/copilot_proxy.egg-info/entry_points.txt +2 -0
- copilot_proxy-0.1.2/copilot_proxy.egg-info/requires.txt +4 -0
- copilot_proxy-0.1.2/copilot_proxy.egg-info/top_level.txt +1 -0
- copilot_proxy-0.1.2/pyproject.toml +45 -0
- copilot_proxy-0.1.2/setup.cfg +4 -0
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2025 modpotato
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,188 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: copilot-proxy
|
|
3
|
+
Version: 0.1.2
|
|
4
|
+
Summary: GitHub Copilot-compatible proxy for Z.AI GLM coding models
|
|
5
|
+
Author: modpotato
|
|
6
|
+
License-Expression: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/modpotato/copilot-proxy
|
|
8
|
+
Project-URL: Repository, https://github.com/modpotato/copilot-proxy
|
|
9
|
+
Project-URL: Issues, https://github.com/modpotato/copilot-proxy/issues
|
|
10
|
+
Keywords: copilot,proxy,fastapi,glm,ollama
|
|
11
|
+
Classifier: Development Status :: 4 - Beta
|
|
12
|
+
Classifier: Framework :: FastAPI
|
|
13
|
+
Classifier: Intended Audience :: Developers
|
|
14
|
+
Classifier: Operating System :: OS Independent
|
|
15
|
+
Classifier: Programming Language :: Python
|
|
16
|
+
Classifier: Programming Language :: Python :: 3
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
20
|
+
Classifier: Topic :: Internet :: Proxy Servers
|
|
21
|
+
Requires-Python: >=3.10
|
|
22
|
+
Description-Content-Type: text/markdown
|
|
23
|
+
License-File: LICENSE
|
|
24
|
+
Requires-Dist: fastapi>=0.110
|
|
25
|
+
Requires-Dist: httpx>=0.27
|
|
26
|
+
Requires-Dist: uvicorn[standard]>=0.27
|
|
27
|
+
Requires-Dist: openai>=1.0.0
|
|
28
|
+
Dynamic: license-file
|
|
29
|
+
|
|
30
|
+
# Copilot-Proxy
|
|
31
|
+
|
|
32
|
+
A proxy server that bridges GitHub Copilot Chat with GLM coding models by mimicking the Ollama API interface.
|
|
33
|
+
|
|
34
|
+
## What it does
|
|
35
|
+
|
|
36
|
+
This proxy server intercepts requests from GitHub Copilot's Ollama provider and forwards them to a GLM coding plan backend. By implementing the Ollama API interface, it allows the GitHub Copilot VS Code extension to communicate with alternative language models seamlessly.
|
|
37
|
+
|
|
38
|
+
```mermaid
|
|
39
|
+
flowchart TD
|
|
40
|
+
A[GitHub Copilot Chat] -- Ollama API (localhost:11434) --> B[Copilot-Proxy Server]
|
|
41
|
+
B --> C[GLM Coding Plan Backend]
|
|
42
|
+
```
|
|
43
|
+
|
|
44
|
+
## Quick Start
|
|
45
|
+
|
|
46
|
+
### Prerequisites
|
|
47
|
+
|
|
48
|
+
1. **Python 3.10+**
|
|
49
|
+
2. **UV** for dependency management and packaging ([install instructions](https://docs.astral.sh/uv/getting-started/installation/))
|
|
50
|
+
3. **Z.AI Coding Plan access** with a valid API key
|
|
51
|
+
|
|
52
|
+
### Install from PyPI (recommended)
|
|
53
|
+
|
|
54
|
+
```powershell
|
|
55
|
+
# Ensure uv is installed first
|
|
56
|
+
uv pip install copilot-proxy
|
|
57
|
+
|
|
58
|
+
# Or run without installing globally
|
|
59
|
+
uvx copilot-proxy --help
|
|
60
|
+
```
|
|
61
|
+
|
|
62
|
+
### Run the proxy locally
|
|
63
|
+
|
|
64
|
+
```powershell
|
|
65
|
+
# Quick one-liner using uvx
|
|
66
|
+
uvx copilot-proxy --host 127.0.0.1 --port 11434
|
|
67
|
+
|
|
68
|
+
# Or inside a synced project environment
|
|
69
|
+
uv sync
|
|
70
|
+
uv run copilot-proxy
|
|
71
|
+
```
|
|
72
|
+
|
|
73
|
+
The server listens on `http://localhost:11434` by default (same port Ollama uses). Make sure Ollama itself is stopped to avoid port conflicts.
|
|
74
|
+
|
|
75
|
+
### Configure credentials
|
|
76
|
+
|
|
77
|
+
Provide your Z.AI API key before launching the proxy:
|
|
78
|
+
```powershell
|
|
79
|
+
# PowerShell (current session only)
|
|
80
|
+
$env:ZAI_API_KEY = "your-zai-api-key"
|
|
81
|
+
```
|
|
82
|
+
|
|
83
|
+
```bash
|
|
84
|
+
# bash/zsh
|
|
85
|
+
export ZAI_API_KEY="your-zai-api-key"
|
|
86
|
+
```
|
|
87
|
+
|
|
88
|
+
You can optionally set a custom endpoint with `ZAI_API_BASE_URL`, though the default already targets the Coding Plan URL `https://api.z.ai/api/coding/paas/v4`.
|
|
89
|
+
|
|
90
|
+
### Configure GitHub Copilot in VS Code
|
|
91
|
+
|
|
92
|
+
- Open the GitHub Copilot Chat panel in VS Code
|
|
93
|
+
- Click on the current model name to view available models
|
|
94
|
+
- Click **'Manage Models...'**
|
|
95
|
+
- Select **'Ollama'** from the list of providers
|
|
96
|
+
- Choose your preferred model from the available GLM models
|
|
97
|
+
|
|
98
|
+
### Available Models
|
|
99
|
+
|
|
100
|
+
The proxy advertises the GLM Coding Plan lineup so Copilot (or any Ollama-compatible client) can switch between them seamlessly:
|
|
101
|
+
|
|
102
|
+
| Model | Description | Use Case Highlights |
|
|
103
|
+
|--------------|--------------------------------------------|----------------------------------------------|
|
|
104
|
+
| `GLM-4.6` | Flagship coding model with top-tier reasoning | Complex refactors, multi-file tasks, tool use |
|
|
105
|
+
| `GLM-4.5` | Balanced performance for everyday coding | General coding, debugging, architecture input |
|
|
106
|
+
| `GLM-4.5-Air`| Lightweight, faster response variant | Quick iterations, drafting, lower-latency use |
|
|
107
|
+
|
|
108
|
+
> **Tip:** These identifiers match the GLM Coding Plan catalog, so any OpenAI-compatible tool can use them by pointing to `https://api.z.ai/api/coding/paas/v4` with your Coding Plan API key.
|
|
109
|
+
|
|
110
|
+
## How it Works
|
|
111
|
+
|
|
112
|
+
The proxy server implements the Ollama API specification, allowing GitHub Copilot's Ollama provider to communicate with it. When Copilot sends requests to `localhost:11434`, the proxy intercepts these requests and forwards them to the GLM coding plan backend, then returns the responses in Ollama-compatible format.
|
|
113
|
+
|
|
114
|
+
## Troubleshooting
|
|
115
|
+
|
|
116
|
+
**Common Issues:**
|
|
117
|
+
|
|
118
|
+
1. **Port conflict errors**
|
|
119
|
+
- Ensure Ollama is not running (both services use port 11434)
|
|
120
|
+
- Check that no other service is using port 11434
|
|
121
|
+
- On Windows, use: `netstat -ano | findstr :11434`
|
|
122
|
+
- On Unix/Linux/Mac, use: `lsof -i :11434`
|
|
123
|
+
|
|
124
|
+
2. **Ollama provider not responding in Copilot Chat**
|
|
125
|
+
- Verify the proxy server is running
|
|
126
|
+
- Check the terminal for any error messages
|
|
127
|
+
- Ensure the GLM backend is accessible
|
|
128
|
+
|
|
129
|
+
3. **Models not appearing in VS Code**
|
|
130
|
+
- Restart VS Code after starting the proxy server
|
|
131
|
+
- Make sure you've selected 'Ollama' as the provider in Copilot settings
|
|
132
|
+
- Check that the proxy server is responding at `http://localhost:11434`
|
|
133
|
+
|
|
134
|
+
## Developing locally
|
|
135
|
+
|
|
136
|
+
```powershell
|
|
137
|
+
uv sync
|
|
138
|
+
uv run uvicorn copilot_proxy.app:app --reload --port 11434
|
|
139
|
+
```
|
|
140
|
+
|
|
141
|
+
Use `uv run pytest` (once tests are added) or `uvx ruff check .` for linting.
|
|
142
|
+
|
|
143
|
+
## Releasing to PyPI with UV
|
|
144
|
+
|
|
145
|
+
1. Bump the version in `pyproject.toml`.
|
|
146
|
+
2. Build the distributions:
|
|
147
|
+
|
|
148
|
+
```powershell
|
|
149
|
+
uv build
|
|
150
|
+
```
|
|
151
|
+
|
|
152
|
+
3. Check the metadata:
|
|
153
|
+
|
|
154
|
+
```powershell
|
|
155
|
+
uvx twine check dist/*
|
|
156
|
+
```
|
|
157
|
+
|
|
158
|
+
4. Publish to TestPyPI (recommended before production):
|
|
159
|
+
|
|
160
|
+
```powershell
|
|
161
|
+
uv publish --repository testpypi
|
|
162
|
+
```
|
|
163
|
+
|
|
164
|
+
5. Publish to PyPI:
|
|
165
|
+
|
|
166
|
+
```powershell
|
|
167
|
+
uv publish
|
|
168
|
+
```
|
|
169
|
+
|
|
170
|
+
Both `uv publish` commands expect the relevant API token to be available in the `UV_PUBLISH_TOKEN` environment variable.
|
|
171
|
+
|
|
172
|
+
### GitHub Actions trusted publisher
|
|
173
|
+
|
|
174
|
+
This repository includes `.github/workflows/publish.yml`, which builds and uploads releases automatically on GitHub tag releases. To enable it:
|
|
175
|
+
|
|
176
|
+
1. Create a PyPI trusted publisher (pending or project-specific) pointing at:
|
|
177
|
+
- **Project**: `copilot-proxy`
|
|
178
|
+
- **Owner**: `modpotato`
|
|
179
|
+
- **Repository**: `copilot-proxy`
|
|
180
|
+
- **Workflow**: `publish.yml`
|
|
181
|
+
- **Environment**: `release`
|
|
182
|
+
2. In GitHub, create the matching repository environment (`Settings → Environments → New environment → release`).
|
|
183
|
+
3. Push a tag (e.g. `v0.1.0`) to GitHub (`git push origin v0.1.0`). The workflow will build with `uv`, publish to PyPI via OIDC, and create the GitHub release automatically.
|
|
184
|
+
4. For dry runs, use the **Run workflow** button; the manual dispatch builds and validates without publishing or creating a release.
|
|
185
|
+
|
|
186
|
+
## License
|
|
187
|
+
|
|
188
|
+
This project is licensed under the MIT License - see the LICENSE file for details.
|
|
@@ -0,0 +1,159 @@
|
|
|
1
|
+
# Copilot-Proxy
|
|
2
|
+
|
|
3
|
+
A proxy server that bridges GitHub Copilot Chat with GLM coding models by mimicking the Ollama API interface.
|
|
4
|
+
|
|
5
|
+
## What it does
|
|
6
|
+
|
|
7
|
+
This proxy server intercepts requests from GitHub Copilot's Ollama provider and forwards them to a GLM coding plan backend. By implementing the Ollama API interface, it allows the GitHub Copilot VS Code extension to communicate with alternative language models seamlessly.
|
|
8
|
+
|
|
9
|
+
```mermaid
|
|
10
|
+
flowchart TD
|
|
11
|
+
A[GitHub Copilot Chat] -- Ollama API (localhost:11434) --> B[Copilot-Proxy Server]
|
|
12
|
+
B --> C[GLM Coding Plan Backend]
|
|
13
|
+
```
|
|
14
|
+
|
|
15
|
+
## Quick Start
|
|
16
|
+
|
|
17
|
+
### Prerequisites
|
|
18
|
+
|
|
19
|
+
1. **Python 3.10+**
|
|
20
|
+
2. **UV** for dependency management and packaging ([install instructions](https://docs.astral.sh/uv/getting-started/installation/))
|
|
21
|
+
3. **Z.AI Coding Plan access** with a valid API key
|
|
22
|
+
|
|
23
|
+
### Install from PyPI (recommended)
|
|
24
|
+
|
|
25
|
+
```powershell
|
|
26
|
+
# Ensure uv is installed first
|
|
27
|
+
uv pip install copilot-proxy
|
|
28
|
+
|
|
29
|
+
# Or run without installing globally
|
|
30
|
+
uvx copilot-proxy --help
|
|
31
|
+
```
|
|
32
|
+
|
|
33
|
+
### Run the proxy locally
|
|
34
|
+
|
|
35
|
+
```powershell
|
|
36
|
+
# Quick one-liner using uvx
|
|
37
|
+
uvx copilot-proxy --host 127.0.0.1 --port 11434
|
|
38
|
+
|
|
39
|
+
# Or inside a synced project environment
|
|
40
|
+
uv sync
|
|
41
|
+
uv run copilot-proxy
|
|
42
|
+
```
|
|
43
|
+
|
|
44
|
+
The server listens on `http://localhost:11434` by default (same port Ollama uses). Make sure Ollama itself is stopped to avoid port conflicts.
|
|
45
|
+
|
|
46
|
+
### Configure credentials
|
|
47
|
+
|
|
48
|
+
Provide your Z.AI API key before launching the proxy:
|
|
49
|
+
```powershell
|
|
50
|
+
# PowerShell (current session only)
|
|
51
|
+
$env:ZAI_API_KEY = "your-zai-api-key"
|
|
52
|
+
```
|
|
53
|
+
|
|
54
|
+
```bash
|
|
55
|
+
# bash/zsh
|
|
56
|
+
export ZAI_API_KEY="your-zai-api-key"
|
|
57
|
+
```
|
|
58
|
+
|
|
59
|
+
You can optionally set a custom endpoint with `ZAI_API_BASE_URL`, though the default already targets the Coding Plan URL `https://api.z.ai/api/coding/paas/v4`.
|
|
60
|
+
|
|
61
|
+
### Configure GitHub Copilot in VS Code
|
|
62
|
+
|
|
63
|
+
- Open the GitHub Copilot Chat panel in VS Code
|
|
64
|
+
- Click on the current model name to view available models
|
|
65
|
+
- Click **'Manage Models...'**
|
|
66
|
+
- Select **'Ollama'** from the list of providers
|
|
67
|
+
- Choose your preferred model from the available GLM models
|
|
68
|
+
|
|
69
|
+
### Available Models
|
|
70
|
+
|
|
71
|
+
The proxy advertises the GLM Coding Plan lineup so Copilot (or any Ollama-compatible client) can switch between them seamlessly:
|
|
72
|
+
|
|
73
|
+
| Model | Description | Use Case Highlights |
|
|
74
|
+
|--------------|--------------------------------------------|----------------------------------------------|
|
|
75
|
+
| `GLM-4.6` | Flagship coding model with top-tier reasoning | Complex refactors, multi-file tasks, tool use |
|
|
76
|
+
| `GLM-4.5` | Balanced performance for everyday coding | General coding, debugging, architecture input |
|
|
77
|
+
| `GLM-4.5-Air`| Lightweight, faster response variant | Quick iterations, drafting, lower-latency use |
|
|
78
|
+
|
|
79
|
+
> **Tip:** These identifiers match the GLM Coding Plan catalog, so any OpenAI-compatible tool can use them by pointing to `https://api.z.ai/api/coding/paas/v4` with your Coding Plan API key.
|
|
80
|
+
|
|
81
|
+
## How it Works
|
|
82
|
+
|
|
83
|
+
The proxy server implements the Ollama API specification, allowing GitHub Copilot's Ollama provider to communicate with it. When Copilot sends requests to `localhost:11434`, the proxy intercepts these requests and forwards them to the GLM coding plan backend, then returns the responses in Ollama-compatible format.
|
|
84
|
+
|
|
85
|
+
## Troubleshooting
|
|
86
|
+
|
|
87
|
+
**Common Issues:**
|
|
88
|
+
|
|
89
|
+
1. **Port conflict errors**
|
|
90
|
+
- Ensure Ollama is not running (both services use port 11434)
|
|
91
|
+
- Check that no other service is using port 11434
|
|
92
|
+
- On Windows, use: `netstat -ano | findstr :11434`
|
|
93
|
+
- On Unix/Linux/Mac, use: `lsof -i :11434`
|
|
94
|
+
|
|
95
|
+
2. **Ollama provider not responding in Copilot Chat**
|
|
96
|
+
- Verify the proxy server is running
|
|
97
|
+
- Check the terminal for any error messages
|
|
98
|
+
- Ensure the GLM backend is accessible
|
|
99
|
+
|
|
100
|
+
3. **Models not appearing in VS Code**
|
|
101
|
+
- Restart VS Code after starting the proxy server
|
|
102
|
+
- Make sure you've selected 'Ollama' as the provider in Copilot settings
|
|
103
|
+
- Check that the proxy server is responding at `http://localhost:11434`
|
|
104
|
+
|
|
105
|
+
## Developing locally
|
|
106
|
+
|
|
107
|
+
```powershell
|
|
108
|
+
uv sync
|
|
109
|
+
uv run uvicorn copilot_proxy.app:app --reload --port 11434
|
|
110
|
+
```
|
|
111
|
+
|
|
112
|
+
Use `uv run pytest` (once tests are added) or `uvx ruff check .` for linting.
|
|
113
|
+
|
|
114
|
+
## Releasing to PyPI with UV
|
|
115
|
+
|
|
116
|
+
1. Bump the version in `pyproject.toml`.
|
|
117
|
+
2. Build the distributions:
|
|
118
|
+
|
|
119
|
+
```powershell
|
|
120
|
+
uv build
|
|
121
|
+
```
|
|
122
|
+
|
|
123
|
+
3. Check the metadata:
|
|
124
|
+
|
|
125
|
+
```powershell
|
|
126
|
+
uvx twine check dist/*
|
|
127
|
+
```
|
|
128
|
+
|
|
129
|
+
4. Publish to TestPyPI (recommended before production):
|
|
130
|
+
|
|
131
|
+
```powershell
|
|
132
|
+
uv publish --repository testpypi
|
|
133
|
+
```
|
|
134
|
+
|
|
135
|
+
5. Publish to PyPI:
|
|
136
|
+
|
|
137
|
+
```powershell
|
|
138
|
+
uv publish
|
|
139
|
+
```
|
|
140
|
+
|
|
141
|
+
Both `uv publish` commands expect the relevant API token to be available in the `UV_PUBLISH_TOKEN` environment variable.
|
|
142
|
+
|
|
143
|
+
### GitHub Actions trusted publisher
|
|
144
|
+
|
|
145
|
+
This repository includes `.github/workflows/publish.yml`, which builds and uploads releases automatically on GitHub tag releases. To enable it:
|
|
146
|
+
|
|
147
|
+
1. Create a PyPI trusted publisher (pending or project-specific) pointing at:
|
|
148
|
+
- **Project**: `copilot-proxy`
|
|
149
|
+
- **Owner**: `modpotato`
|
|
150
|
+
- **Repository**: `copilot-proxy`
|
|
151
|
+
- **Workflow**: `publish.yml`
|
|
152
|
+
- **Environment**: `release`
|
|
153
|
+
2. In GitHub, create the matching repository environment (`Settings → Environments → New environment → release`).
|
|
154
|
+
3. Push a tag (e.g. `v0.1.0`) to GitHub (`git push origin v0.1.0`). The workflow will build with `uv`, publish to PyPI via OIDC, and create the GitHub release automatically.
|
|
155
|
+
4. For dry runs, use the **Run workflow** button; the manual dispatch builds and validates without publishing or creating a release.
|
|
156
|
+
|
|
157
|
+
## License
|
|
158
|
+
|
|
159
|
+
This project is licensed under the MIT License - see the LICENSE file for details.
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
"""Copilot proxy package surfaces the FastAPI application and CLI utilities."""
|
|
2
|
+
from __future__ import annotations
|
|
3
|
+
|
|
4
|
+
from importlib import metadata
|
|
5
|
+
|
|
6
|
+
from .app import app, create_app
|
|
7
|
+
|
|
8
|
+
__all__ = ["app", "create_app", "__version__"]
|
|
9
|
+
|
|
10
|
+
try: # pragma: no cover - fallback when package metadata missing
|
|
11
|
+
__version__ = metadata.version("copilot-proxy")
|
|
12
|
+
except metadata.PackageNotFoundError: # type: ignore[attr-defined]
|
|
13
|
+
__version__ = "0.0.0"
|
|
@@ -0,0 +1,226 @@
|
|
|
1
|
+
"""FastAPI application exposing the Copilot proxy endpoints."""
|
|
2
|
+
from __future__ import annotations
|
|
3
|
+
|
|
4
|
+
import os
|
|
5
|
+
from contextlib import asynccontextmanager
|
|
6
|
+
from typing import AsyncGenerator
|
|
7
|
+
|
|
8
|
+
import httpx
|
|
9
|
+
from fastapi import FastAPI, Request
|
|
10
|
+
from fastapi.responses import StreamingResponse
|
|
11
|
+
|
|
12
|
+
DEFAULT_BASE_URL = "https://api.z.ai/api/coding/paas/v4"
|
|
13
|
+
DEFAULT_MODEL = "GLM-4.6"
|
|
14
|
+
API_KEY_ENV_VARS = ("ZAI_API_KEY", "ZAI_CODING_API_KEY", "GLM_API_KEY")
|
|
15
|
+
BASE_URL_ENV_VAR = "ZAI_API_BASE_URL"
|
|
16
|
+
CHAT_COMPLETION_PATH = "/chat/completions"
|
|
17
|
+
|
|
18
|
+
MODEL_CATALOG = [
|
|
19
|
+
{
|
|
20
|
+
"name": "GLM-4.6",
|
|
21
|
+
"model": "GLM-4.6",
|
|
22
|
+
"modified_at": "2024-01-01T00:00:00Z",
|
|
23
|
+
"size": 0,
|
|
24
|
+
"digest": "GLM-4.6",
|
|
25
|
+
"details": {
|
|
26
|
+
"format": "glm",
|
|
27
|
+
"family": "glm",
|
|
28
|
+
"families": ["glm"],
|
|
29
|
+
"parameter_size": "cloud",
|
|
30
|
+
"quantization_level": "cloud",
|
|
31
|
+
},
|
|
32
|
+
},
|
|
33
|
+
{
|
|
34
|
+
"name": "GLM-4.5",
|
|
35
|
+
"model": "GLM-4.5",
|
|
36
|
+
"modified_at": "2024-01-01T00:00:00Z",
|
|
37
|
+
"size": 0,
|
|
38
|
+
"digest": "GLM-4.5",
|
|
39
|
+
"details": {
|
|
40
|
+
"format": "glm",
|
|
41
|
+
"family": "glm",
|
|
42
|
+
"families": ["glm"],
|
|
43
|
+
"parameter_size": "cloud",
|
|
44
|
+
"quantization_level": "cloud",
|
|
45
|
+
},
|
|
46
|
+
},
|
|
47
|
+
{
|
|
48
|
+
"name": "GLM-4.5-Air",
|
|
49
|
+
"model": "GLM-4.5-Air",
|
|
50
|
+
"modified_at": "2024-01-01T00:00:00Z",
|
|
51
|
+
"size": 0,
|
|
52
|
+
"digest": "GLM-4.5-Air",
|
|
53
|
+
"details": {
|
|
54
|
+
"format": "glm",
|
|
55
|
+
"family": "glm",
|
|
56
|
+
"families": ["glm"],
|
|
57
|
+
"parameter_size": "cloud",
|
|
58
|
+
"quantization_level": "cloud",
|
|
59
|
+
},
|
|
60
|
+
},
|
|
61
|
+
]
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def _get_api_key() -> str:
|
|
65
|
+
for env_var in API_KEY_ENV_VARS:
|
|
66
|
+
api_key = os.getenv(env_var)
|
|
67
|
+
if api_key:
|
|
68
|
+
return api_key.strip()
|
|
69
|
+
raise RuntimeError(
|
|
70
|
+
"Missing Z.AI API key. Please set one of the following environment variables: "
|
|
71
|
+
+ ", ".join(API_KEY_ENV_VARS)
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
def _get_base_url() -> str:
|
|
76
|
+
base_url = os.getenv(BASE_URL_ENV_VAR, DEFAULT_BASE_URL).strip()
|
|
77
|
+
if not base_url:
|
|
78
|
+
base_url = DEFAULT_BASE_URL
|
|
79
|
+
if not base_url.startswith("http://") and not base_url.startswith("https://"):
|
|
80
|
+
base_url = f"https://{base_url}"
|
|
81
|
+
return base_url.rstrip("/")
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def _get_chat_completion_url() -> str:
|
|
85
|
+
base_url = _get_base_url()
|
|
86
|
+
if base_url.endswith(CHAT_COMPLETION_PATH):
|
|
87
|
+
return base_url
|
|
88
|
+
return f"{base_url}{CHAT_COMPLETION_PATH}"
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
@asynccontextmanager
|
|
92
|
+
async def _lifespan(app: FastAPI): # noqa: D401 - FastAPI lifespan signature
|
|
93
|
+
"""Ensure configuration is ready before serving requests."""
|
|
94
|
+
|
|
95
|
+
try:
|
|
96
|
+
_ = _get_api_key()
|
|
97
|
+
print("GLM Coding Plan proxy is ready.")
|
|
98
|
+
except Exception as exc: # pragma: no cover - startup logging
|
|
99
|
+
print(f"Failed to initialise GLM Coding Plan proxy: {exc}")
|
|
100
|
+
yield
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def create_app() -> FastAPI:
|
|
104
|
+
"""Create and return a configured FastAPI application."""
|
|
105
|
+
|
|
106
|
+
app = FastAPI(lifespan=_lifespan)
|
|
107
|
+
|
|
108
|
+
@app.get("/")
|
|
109
|
+
async def root(): # noqa: D401 - FastAPI route
|
|
110
|
+
"""Return a simple health message."""
|
|
111
|
+
|
|
112
|
+
return {"message": "GLM Coding Plan proxy is running"}
|
|
113
|
+
|
|
114
|
+
@app.get("/api/ps")
|
|
115
|
+
async def list_running_models(): # noqa: D401 - FastAPI route
|
|
116
|
+
"""Return an empty list as we do not host local models."""
|
|
117
|
+
|
|
118
|
+
return {"models": []}
|
|
119
|
+
|
|
120
|
+
@app.get("/api/version")
|
|
121
|
+
async def get_version(): # noqa: D401 - FastAPI route
|
|
122
|
+
"""Expose a version compatible with the Ollama API expectations."""
|
|
123
|
+
|
|
124
|
+
return {"version": "0.6.4"}
|
|
125
|
+
|
|
126
|
+
@app.get("/api/tags")
|
|
127
|
+
@app.get("/api/list")
|
|
128
|
+
async def list_models(): # noqa: D401 - FastAPI route
|
|
129
|
+
"""Return the static catalog of GLM models."""
|
|
130
|
+
|
|
131
|
+
return {"models": MODEL_CATALOG}
|
|
132
|
+
|
|
133
|
+
@app.post("/api/show")
|
|
134
|
+
async def show_model(request: Request): # noqa: D401 - FastAPI route
|
|
135
|
+
"""Handle Ollama-compatible model detail queries."""
|
|
136
|
+
|
|
137
|
+
try:
|
|
138
|
+
body = await request.json()
|
|
139
|
+
model_name = body.get("model")
|
|
140
|
+
except Exception:
|
|
141
|
+
model_name = DEFAULT_MODEL
|
|
142
|
+
|
|
143
|
+
if not model_name:
|
|
144
|
+
model_name = DEFAULT_MODEL
|
|
145
|
+
|
|
146
|
+
return {
|
|
147
|
+
"template": "{{ .System }}\n{{ .Prompt }}",
|
|
148
|
+
"capabilities": ["tools"],
|
|
149
|
+
"details": {
|
|
150
|
+
"family": "glm",
|
|
151
|
+
"families": ["glm"],
|
|
152
|
+
"format": "glm",
|
|
153
|
+
"parameter_size": "cloud",
|
|
154
|
+
"quantization_level": "cloud",
|
|
155
|
+
},
|
|
156
|
+
"model_info": {
|
|
157
|
+
"general.basename": model_name,
|
|
158
|
+
"general.architecture": "glm",
|
|
159
|
+
"glm.context_length": 32768,
|
|
160
|
+
},
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
@app.post("/v1/chat/completions")
|
|
164
|
+
async def chat_completions(request: Request): # noqa: D401 - FastAPI route
|
|
165
|
+
"""Forward chat completion calls to the Z.AI backend."""
|
|
166
|
+
|
|
167
|
+
body = await request.json()
|
|
168
|
+
|
|
169
|
+
if not body.get("model"):
|
|
170
|
+
body["model"] = DEFAULT_MODEL
|
|
171
|
+
|
|
172
|
+
stream = body.get("stream", False)
|
|
173
|
+
|
|
174
|
+
api_key = _get_api_key()
|
|
175
|
+
chat_completion_url = _get_chat_completion_url()
|
|
176
|
+
|
|
177
|
+
async def generate_chunks() -> AsyncGenerator[bytes, None]:
|
|
178
|
+
async with httpx.AsyncClient(timeout=300.0) as client:
|
|
179
|
+
try:
|
|
180
|
+
headers = {
|
|
181
|
+
"Content-Type": "application/json",
|
|
182
|
+
"Authorization": f"Bearer {api_key}",
|
|
183
|
+
}
|
|
184
|
+
response = await client.post(
|
|
185
|
+
chat_completion_url,
|
|
186
|
+
headers=headers,
|
|
187
|
+
json=body,
|
|
188
|
+
timeout=None,
|
|
189
|
+
)
|
|
190
|
+
response.raise_for_status()
|
|
191
|
+
|
|
192
|
+
async for chunk in response.aiter_bytes():
|
|
193
|
+
yield chunk
|
|
194
|
+
|
|
195
|
+
except httpx.HTTPStatusError as exc:
|
|
196
|
+
if exc.response.status_code == 401:
|
|
197
|
+
raise RuntimeError("Unauthorized. Check your Z.AI API key.") from exc
|
|
198
|
+
raise
|
|
199
|
+
|
|
200
|
+
if stream:
|
|
201
|
+
return StreamingResponse(generate_chunks(), media_type="text/event-stream")
|
|
202
|
+
|
|
203
|
+
async with httpx.AsyncClient(timeout=300.0) as client:
|
|
204
|
+
headers = {
|
|
205
|
+
"Content-Type": "application/json",
|
|
206
|
+
"Authorization": f"Bearer {api_key}",
|
|
207
|
+
}
|
|
208
|
+
response = await client.post(chat_completion_url, headers=headers, json=body)
|
|
209
|
+
response.raise_for_status()
|
|
210
|
+
return response.json()
|
|
211
|
+
|
|
212
|
+
return app
|
|
213
|
+
|
|
214
|
+
|
|
215
|
+
app = create_app()
|
|
216
|
+
|
|
217
|
+
__all__ = [
|
|
218
|
+
"API_KEY_ENV_VARS",
|
|
219
|
+
"BASE_URL_ENV_VAR",
|
|
220
|
+
"CHAT_COMPLETION_PATH",
|
|
221
|
+
"DEFAULT_BASE_URL",
|
|
222
|
+
"DEFAULT_MODEL",
|
|
223
|
+
"MODEL_CATALOG",
|
|
224
|
+
"app",
|
|
225
|
+
"create_app",
|
|
226
|
+
]
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
"""Command-line interface for running the Copilot proxy."""
|
|
2
|
+
from __future__ import annotations
|
|
3
|
+
|
|
4
|
+
import argparse
|
|
5
|
+
from typing import Optional
|
|
6
|
+
|
|
7
|
+
import uvicorn
|
|
8
|
+
|
|
9
|
+
DEFAULT_HOST = "127.0.0.1"
|
|
10
|
+
DEFAULT_PORT = 11434
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def build_parser() -> argparse.ArgumentParser:
|
|
14
|
+
parser = argparse.ArgumentParser(description="Run the Copilot GLM proxy server.")
|
|
15
|
+
parser.add_argument(
|
|
16
|
+
"--host",
|
|
17
|
+
default=DEFAULT_HOST,
|
|
18
|
+
help=f"Host interface to bind (default: {DEFAULT_HOST}).",
|
|
19
|
+
)
|
|
20
|
+
parser.add_argument(
|
|
21
|
+
"--port",
|
|
22
|
+
type=int,
|
|
23
|
+
default=DEFAULT_PORT,
|
|
24
|
+
help=f"Port to bind (default: {DEFAULT_PORT}).",
|
|
25
|
+
)
|
|
26
|
+
parser.add_argument(
|
|
27
|
+
"--reload",
|
|
28
|
+
action="store_true",
|
|
29
|
+
help="Enable auto-reload (useful for development).",
|
|
30
|
+
)
|
|
31
|
+
parser.add_argument(
|
|
32
|
+
"--log-level",
|
|
33
|
+
default="info",
|
|
34
|
+
help="Log level passed to Uvicorn (default: info).",
|
|
35
|
+
)
|
|
36
|
+
parser.add_argument(
|
|
37
|
+
"--proxy-app",
|
|
38
|
+
default="copilot_proxy.app:app",
|
|
39
|
+
help=(
|
|
40
|
+
"Dotted path to the FastAPI application passed to Uvicorn "
|
|
41
|
+
"(default: copilot_proxy.app:app)."
|
|
42
|
+
),
|
|
43
|
+
)
|
|
44
|
+
return parser
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def main(argv: Optional[list[str]] = None) -> None:
|
|
48
|
+
parser = build_parser()
|
|
49
|
+
args = parser.parse_args(argv)
|
|
50
|
+
|
|
51
|
+
uvicorn.run(
|
|
52
|
+
args.proxy_app,
|
|
53
|
+
host=args.host,
|
|
54
|
+
port=args.port,
|
|
55
|
+
reload=args.reload,
|
|
56
|
+
log_level=args.log_level,
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
if __name__ == "__main__":
|
|
61
|
+
main()
|
|
@@ -0,0 +1,188 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: copilot-proxy
|
|
3
|
+
Version: 0.1.2
|
|
4
|
+
Summary: GitHub Copilot-compatible proxy for Z.AI GLM coding models
|
|
5
|
+
Author: modpotato
|
|
6
|
+
License-Expression: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/modpotato/copilot-proxy
|
|
8
|
+
Project-URL: Repository, https://github.com/modpotato/copilot-proxy
|
|
9
|
+
Project-URL: Issues, https://github.com/modpotato/copilot-proxy/issues
|
|
10
|
+
Keywords: copilot,proxy,fastapi,glm,ollama
|
|
11
|
+
Classifier: Development Status :: 4 - Beta
|
|
12
|
+
Classifier: Framework :: FastAPI
|
|
13
|
+
Classifier: Intended Audience :: Developers
|
|
14
|
+
Classifier: Operating System :: OS Independent
|
|
15
|
+
Classifier: Programming Language :: Python
|
|
16
|
+
Classifier: Programming Language :: Python :: 3
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
20
|
+
Classifier: Topic :: Internet :: Proxy Servers
|
|
21
|
+
Requires-Python: >=3.10
|
|
22
|
+
Description-Content-Type: text/markdown
|
|
23
|
+
License-File: LICENSE
|
|
24
|
+
Requires-Dist: fastapi>=0.110
|
|
25
|
+
Requires-Dist: httpx>=0.27
|
|
26
|
+
Requires-Dist: uvicorn[standard]>=0.27
|
|
27
|
+
Requires-Dist: openai>=1.0.0
|
|
28
|
+
Dynamic: license-file
|
|
29
|
+
|
|
30
|
+
# Copilot-Proxy
|
|
31
|
+
|
|
32
|
+
A proxy server that bridges GitHub Copilot Chat with GLM coding models by mimicking the Ollama API interface.
|
|
33
|
+
|
|
34
|
+
## What it does
|
|
35
|
+
|
|
36
|
+
This proxy server intercepts requests from GitHub Copilot's Ollama provider and forwards them to a GLM coding plan backend. By implementing the Ollama API interface, it allows the GitHub Copilot VS Code extension to communicate with alternative language models seamlessly.
|
|
37
|
+
|
|
38
|
+
```mermaid
|
|
39
|
+
flowchart TD
|
|
40
|
+
A[GitHub Copilot Chat] -- Ollama API (localhost:11434) --> B[Copilot-Proxy Server]
|
|
41
|
+
B --> C[GLM Coding Plan Backend]
|
|
42
|
+
```
|
|
43
|
+
|
|
44
|
+
## Quick Start
|
|
45
|
+
|
|
46
|
+
### Prerequisites
|
|
47
|
+
|
|
48
|
+
1. **Python 3.10+**
|
|
49
|
+
2. **UV** for dependency management and packaging ([install instructions](https://docs.astral.sh/uv/getting-started/installation/))
|
|
50
|
+
3. **Z.AI Coding Plan access** with a valid API key
|
|
51
|
+
|
|
52
|
+
### Install from PyPI (recommended)
|
|
53
|
+
|
|
54
|
+
```powershell
|
|
55
|
+
# Ensure uv is installed first
|
|
56
|
+
uv pip install copilot-proxy
|
|
57
|
+
|
|
58
|
+
# Or run without installing globally
|
|
59
|
+
uvx copilot-proxy --help
|
|
60
|
+
```
|
|
61
|
+
|
|
62
|
+
### Run the proxy locally
|
|
63
|
+
|
|
64
|
+
```powershell
|
|
65
|
+
# Quick one-liner using uvx
|
|
66
|
+
uvx copilot-proxy --host 127.0.0.1 --port 11434
|
|
67
|
+
|
|
68
|
+
# Or inside a synced project environment
|
|
69
|
+
uv sync
|
|
70
|
+
uv run copilot-proxy
|
|
71
|
+
```
|
|
72
|
+
|
|
73
|
+
The server listens on `http://localhost:11434` by default (same port Ollama uses). Make sure Ollama itself is stopped to avoid port conflicts.
|
|
74
|
+
|
|
75
|
+
### Configure credentials
|
|
76
|
+
|
|
77
|
+
Provide your Z.AI API key before launching the proxy:
|
|
78
|
+
```powershell
|
|
79
|
+
# PowerShell (current session only)
|
|
80
|
+
$env:ZAI_API_KEY = "your-zai-api-key"
|
|
81
|
+
```
|
|
82
|
+
|
|
83
|
+
```bash
|
|
84
|
+
# bash/zsh
|
|
85
|
+
export ZAI_API_KEY="your-zai-api-key"
|
|
86
|
+
```
|
|
87
|
+
|
|
88
|
+
You can optionally set a custom endpoint with `ZAI_API_BASE_URL`, though the default already targets the Coding Plan URL `https://api.z.ai/api/coding/paas/v4`.
|
|
89
|
+
|
|
90
|
+
### Configure GitHub Copilot in VS Code
|
|
91
|
+
|
|
92
|
+
- Open the GitHub Copilot Chat panel in VS Code
|
|
93
|
+
- Click on the current model name to view available models
|
|
94
|
+
- Click **'Manage Models...'**
|
|
95
|
+
- Select **'Ollama'** from the list of providers
|
|
96
|
+
- Choose your preferred model from the available GLM models
|
|
97
|
+
|
|
98
|
+
### Available Models
|
|
99
|
+
|
|
100
|
+
The proxy advertises the GLM Coding Plan lineup so Copilot (or any Ollama-compatible client) can switch between them seamlessly:
|
|
101
|
+
|
|
102
|
+
| Model | Description | Use Case Highlights |
|
|
103
|
+
|--------------|--------------------------------------------|----------------------------------------------|
|
|
104
|
+
| `GLM-4.6` | Flagship coding model with top-tier reasoning | Complex refactors, multi-file tasks, tool use |
|
|
105
|
+
| `GLM-4.5` | Balanced performance for everyday coding | General coding, debugging, architecture input |
|
|
106
|
+
| `GLM-4.5-Air`| Lightweight, faster response variant | Quick iterations, drafting, lower-latency use |
|
|
107
|
+
|
|
108
|
+
> **Tip:** These identifiers match the GLM Coding Plan catalog, so any OpenAI-compatible tool can use them by pointing to `https://api.z.ai/api/coding/paas/v4` with your Coding Plan API key.
|
|
109
|
+
|
|
110
|
+
## How it Works
|
|
111
|
+
|
|
112
|
+
The proxy server implements the Ollama API specification, allowing GitHub Copilot's Ollama provider to communicate with it. When Copilot sends requests to `localhost:11434`, the proxy intercepts these requests and forwards them to the GLM coding plan backend, then returns the responses in Ollama-compatible format.
|
|
113
|
+
|
|
114
|
+
## Troubleshooting
|
|
115
|
+
|
|
116
|
+
**Common Issues:**
|
|
117
|
+
|
|
118
|
+
1. **Port conflict errors**
|
|
119
|
+
- Ensure Ollama is not running (both services use port 11434)
|
|
120
|
+
- Check that no other service is using port 11434
|
|
121
|
+
- On Windows, use: `netstat -ano | findstr :11434`
|
|
122
|
+
- On Unix/Linux/Mac, use: `lsof -i :11434`
|
|
123
|
+
|
|
124
|
+
2. **Ollama provider not responding in Copilot Chat**
|
|
125
|
+
- Verify the proxy server is running
|
|
126
|
+
- Check the terminal for any error messages
|
|
127
|
+
- Ensure the GLM backend is accessible
|
|
128
|
+
|
|
129
|
+
3. **Models not appearing in VS Code**
|
|
130
|
+
- Restart VS Code after starting the proxy server
|
|
131
|
+
- Make sure you've selected 'Ollama' as the provider in Copilot settings
|
|
132
|
+
- Check that the proxy server is responding at `http://localhost:11434`
|
|
133
|
+
|
|
134
|
+
## Developing locally
|
|
135
|
+
|
|
136
|
+
```powershell
|
|
137
|
+
uv sync
|
|
138
|
+
uv run uvicorn copilot_proxy.app:app --reload --port 11434
|
|
139
|
+
```
|
|
140
|
+
|
|
141
|
+
Use `uv run pytest` (once tests are added) or `uvx ruff check .` for linting.
|
|
142
|
+
|
|
143
|
+
## Releasing to PyPI with UV
|
|
144
|
+
|
|
145
|
+
1. Bump the version in `pyproject.toml`.
|
|
146
|
+
2. Build the distributions:
|
|
147
|
+
|
|
148
|
+
```powershell
|
|
149
|
+
uv build
|
|
150
|
+
```
|
|
151
|
+
|
|
152
|
+
3. Check the metadata:
|
|
153
|
+
|
|
154
|
+
```powershell
|
|
155
|
+
uvx twine check dist/*
|
|
156
|
+
```
|
|
157
|
+
|
|
158
|
+
4. Publish to TestPyPI (recommended before production):
|
|
159
|
+
|
|
160
|
+
```powershell
|
|
161
|
+
uv publish --repository testpypi
|
|
162
|
+
```
|
|
163
|
+
|
|
164
|
+
5. Publish to PyPI:
|
|
165
|
+
|
|
166
|
+
```powershell
|
|
167
|
+
uv publish
|
|
168
|
+
```
|
|
169
|
+
|
|
170
|
+
Both `uv publish` commands expect the relevant API token to be available in the `UV_PUBLISH_TOKEN` environment variable.
|
|
171
|
+
|
|
172
|
+
### GitHub Actions trusted publisher
|
|
173
|
+
|
|
174
|
+
This repository includes `.github/workflows/publish.yml`, which builds and uploads releases automatically on GitHub tag releases. To enable it:
|
|
175
|
+
|
|
176
|
+
1. Create a PyPI trusted publisher (pending or project-specific) pointing at:
|
|
177
|
+
- **Project**: `copilot-proxy`
|
|
178
|
+
- **Owner**: `modpotato`
|
|
179
|
+
- **Repository**: `copilot-proxy`
|
|
180
|
+
- **Workflow**: `publish.yml`
|
|
181
|
+
- **Environment**: `release`
|
|
182
|
+
2. In GitHub, create the matching repository environment (`Settings → Environments → New environment → release`).
|
|
183
|
+
3. Push a tag (e.g. `v0.1.0`) to GitHub (`git push origin v0.1.0`). The workflow will build with `uv`, publish to PyPI via OIDC, and create the GitHub release automatically.
|
|
184
|
+
4. For dry runs, use the **Run workflow** button; the manual dispatch builds and validates without publishing or creating a release.
|
|
185
|
+
|
|
186
|
+
## License
|
|
187
|
+
|
|
188
|
+
This project is licensed under the MIT License - see the LICENSE file for details.
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
LICENSE
|
|
2
|
+
README.md
|
|
3
|
+
pyproject.toml
|
|
4
|
+
copilot_proxy/__init__.py
|
|
5
|
+
copilot_proxy/__main__.py
|
|
6
|
+
copilot_proxy/app.py
|
|
7
|
+
copilot_proxy/cli.py
|
|
8
|
+
copilot_proxy.egg-info/PKG-INFO
|
|
9
|
+
copilot_proxy.egg-info/SOURCES.txt
|
|
10
|
+
copilot_proxy.egg-info/dependency_links.txt
|
|
11
|
+
copilot_proxy.egg-info/entry_points.txt
|
|
12
|
+
copilot_proxy.egg-info/requires.txt
|
|
13
|
+
copilot_proxy.egg-info/top_level.txt
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
copilot_proxy
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
[project]
|
|
2
|
+
name = "copilot-proxy"
|
|
3
|
+
version = "0.1.2"
|
|
4
|
+
description = "GitHub Copilot-compatible proxy for Z.AI GLM coding models"
|
|
5
|
+
readme = "README.md"
|
|
6
|
+
requires-python = ">=3.10"
|
|
7
|
+
license = "MIT"
|
|
8
|
+
authors = [{ name = "modpotato" }]
|
|
9
|
+
keywords = ["copilot", "proxy", "fastapi", "glm", "ollama"]
|
|
10
|
+
classifiers = [
|
|
11
|
+
"Development Status :: 4 - Beta",
|
|
12
|
+
"Framework :: FastAPI",
|
|
13
|
+
"Intended Audience :: Developers",
|
|
14
|
+
"Operating System :: OS Independent",
|
|
15
|
+
"Programming Language :: Python",
|
|
16
|
+
"Programming Language :: Python :: 3",
|
|
17
|
+
"Programming Language :: Python :: 3.10",
|
|
18
|
+
"Programming Language :: Python :: 3.11",
|
|
19
|
+
"Programming Language :: Python :: 3.12",
|
|
20
|
+
"Topic :: Internet :: Proxy Servers",
|
|
21
|
+
]
|
|
22
|
+
dependencies = [
|
|
23
|
+
"fastapi>=0.110",
|
|
24
|
+
"httpx>=0.27",
|
|
25
|
+
"uvicorn[standard]>=0.27",
|
|
26
|
+
"openai>=1.0.0",
|
|
27
|
+
]
|
|
28
|
+
|
|
29
|
+
[project.scripts]
|
|
30
|
+
copilot-proxy = "copilot_proxy.cli:main"
|
|
31
|
+
|
|
32
|
+
[project.urls]
|
|
33
|
+
Homepage = "https://github.com/modpotato/copilot-proxy"
|
|
34
|
+
Repository = "https://github.com/modpotato/copilot-proxy"
|
|
35
|
+
Issues = "https://github.com/modpotato/copilot-proxy/issues"
|
|
36
|
+
|
|
37
|
+
[build-system]
|
|
38
|
+
requires = ["setuptools>=61.0", "wheel"]
|
|
39
|
+
build-backend = "setuptools.build_meta"
|
|
40
|
+
|
|
41
|
+
[tool.setuptools.packages.find]
|
|
42
|
+
include = ["copilot_proxy"]
|
|
43
|
+
|
|
44
|
+
[tool.uv]
|
|
45
|
+
managed = true
|