argo-proxy 2.5.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2024 Peng Ding
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,313 @@
1
+ Metadata-Version: 2.4
2
+ Name: argo-proxy
3
+ Version: 2.5.0
4
+ Summary: Proxy server to Argo API, OpenAI format compatible
5
+ Author-email: Peng Ding <oaklight@gmx.com>
6
+ License-Expression: MIT
7
+ Project-URL: Documentation, https://github.com/Oaklight/argo-openai-proxy#readme
8
+ Project-URL: Repository, https://github.com/Oaklight/argo-openai-proxy
9
+ Project-URL: Issuses, https://github.com/Oaklight/argo-openai-proxy/issues
10
+ Classifier: Intended Audience :: Developers
11
+ Classifier: Programming Language :: Python :: 3
12
+ Classifier: Topic :: Software Development
13
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
14
+ Requires-Python: >=3.10
15
+ Description-Content-Type: text/markdown
16
+ License-File: LICENSE
17
+ Requires-Dist: aiohttp>=3.12.2
18
+ Requires-Dist: loguru>=0.7.3
19
+ Requires-Dist: PyYAML>=6.0.2
20
+ Requires-Dist: sanic>=25.3.0
21
+ Requires-Dist: tiktoken>=0.9.0
22
+ Requires-Dist: setuptools<75
23
+ Provides-Extra: dev
24
+ Requires-Dist: dotenv>=0.9.9; extra == "dev"
25
+ Requires-Dist: openai>=1.79.0; extra == "dev"
26
+ Requires-Dist: mypy>=1.14.1; extra == "dev"
27
+ Requires-Dist: build>=1.2.2.post1; extra == "dev"
28
+ Requires-Dist: twine>=6.1.0; extra == "dev"
29
+ Requires-Dist: httpx>=0.28.1; extra == "dev"
30
+ Requires-Dist: requests>=2.25.1; extra == "dev"
31
+ Dynamic: license-file
32
+
33
+ # argo-openai-proxy
34
+
35
+ This project is a proxy application that forwards requests to an ARGO API and optionally converts the responses to be compatible with OpenAI's API format. It can be used in conjunction with [autossh-tunnel-dockerized](https://github.com/Oaklight/autossh-tunnel-dockerized) or other secure connection tools.
36
+
37
+ ## NOTICE OF USAGE
38
+
39
+ The machine or server making API calls to Argo must be connected to the Argonne internal network or through a VPN on an Argonne-managed computer if you are working off-site. Your instance of the argo proxy should always be on-premise at an Argonne machine. The software is provided "as is," without any warranties. By using this software, you accept that the authors, contributors, and affiliated organizations will not be liable for any damages or issues arising from its use. You are solely responsible for ensuring the software meets your requirements.
40
+
41
+ - [Notice of Usage](#notice-of-usage)
42
+ - [Deployment](#deployment)
43
+ - [Prerequisites](#prerequisites)
44
+ - [Configuration File](#configuration-file)
45
+ - [Running the Application](#running-the-application)
46
+ - [First-Time Setup](#first-time-setup)
47
+ - [Configuration Options Reference](#configuration-options-reference)
48
+ - [Usage](#usage)
49
+ - [Endpoints](#endpoints)
50
+ - [OpenAI Compatible](#openai-compatible)
51
+ - [Not OpenAI Compatible](#not-openai-compatible)
52
+ - [Timeout Override](#timeout-override)
53
+ - [Models](#models)
54
+ - [Chat Models](#chat-models)
55
+ - [Embedding Models](#embedding-models)
56
+ - [Examples](#examples)
57
+ - [Chat Completion Example](#chat-completion-example)
58
+ - [Embedding Example](#embedding-example)
59
+ - [o1 Chat Example](#o1-chat-example)
60
+ - [OpenAI Client Example](#openai-client-example)
61
+ - [Folder Structure](#folder-structure)
62
+ - [Bug Reports and Contributions](#bug-reports-and-contributions)
63
+
64
+ ## Deployment
65
+
66
+ ### Prerequisites
67
+
68
+ - **Python 3.10+** is required \
69
+ recommend to use conda/mamba or pipx etc to manage exclusive environment \
70
+ **Conda/Mamba** Download and install from: <https://conda-forge.org/download/>
71
+
72
+ - Install dependencies:
73
+
74
+ ```bash
75
+ pip install .
76
+ ```
77
+
78
+ ### Configuration File
79
+
80
+ If you don't want to bother manually configure it, the [First-Time Setup](#first-time-setup) will automatically create it for you.
81
+
82
+ The application uses `config.yaml` for configuration. Here's an example:
83
+
84
+ ```yaml
85
+ port: 44497
86
+ host: 0.0.0.0
87
+ argo_url: "https://apps-dev.inside.anl.gov/argoapi/api/v1/resource/chat/"
88
+ argo_stream_url: "https://apps-dev.inside.anl.gov/argoapi/api/v1/resource/streamchat/"
89
+ argo_embedding_url: "https://apps.inside.anl.gov/argoapi/api/v1/resource/embed/"
90
+ user: "your_username" # set during first-time setup
91
+ verbose: true # can be changed during setup
92
+ num_workers: 5
93
+ timeout: 600 # in seconds
94
+ ```
95
+
96
+ ### Running the Application
97
+
98
+ To start the application:
99
+
100
+ ```bash
101
+ argo-proxy [config_path]
102
+ ```
103
+
104
+ - Without arguments: search for `config.yaml` under `~/.config/argoproxy/`, `~/.argoproxy/`, or current directory
105
+ - With path: uses specified config file
106
+
107
+ ```bash
108
+ argo-proxy /path/to/config.yaml
109
+ ```
110
+
111
+ ### First-Time Setup
112
+
113
+ When running without an existing config file:
114
+
115
+ 1. The script offers to create `config.yaml` from `config.sample.yaml`
116
+ 2. Automatically selects a random available port (can be overridden)
117
+ 3. Prompts for:
118
+ - Your username (sets `user` field)
119
+ - Verbose mode preference (sets `verbose` field)
120
+ 4. Validates connectivity to configured URLs
121
+ 5. Shows the generated config in a formatted display for review before proceeding
122
+
123
+ Example session:
124
+
125
+ ```bash
126
+ $ argo-proxy
127
+ No valid configuration found.
128
+ Would you like to create it from config.sample.yaml? [Y/n]:
129
+ Creating new configuration...
130
+ Use port [52226]? [Y/n/<port>]:
131
+ Enter your username: your_username
132
+ Enable verbose mode? [Y/n]
133
+ Set timeout to [600] seconds? [Y/n/<timeout>]
134
+ Created new configuration at: /home/your_username/.config/argoproxy/config.yaml
135
+ Using port 52226...
136
+ Validating URL connectivity...
137
+ Current configuration:
138
+ --------------------------------------
139
+ {
140
+ "host": "0.0.0.0",
141
+ "port": 52226,
142
+ "user": "your_username",
143
+ "argo_url": "https://apps-dev.inside.anl.gov/argoapi/api/v1/resource/chat/",
144
+ "argo_stream_url": "https://apps-dev.inside.anl.gov/argoapi/api/v1/resource/streamchat/",
145
+ "argo_embedding_url": "https://apps.inside.anl.gov/argoapi/api/v1/resource/embed/",
146
+ "verbose": true,
147
+ "num_workers": 5,
148
+ "timeout": 600
149
+ }
150
+ --------------------------------------
151
+ # ... proxy server starting info display ...
152
+ ```
153
+
154
+ ### Configuration Options Reference
155
+
156
+ | Option | Description | Default |
157
+ | -------------------- | ------------------------------------------------------------ | ------------------ |
158
+ | `host` | Host address to bind the server to | `0.0.0.0` |
159
+ | `port` | Application port (random available port selected by default) | randomly assigned |
160
+ | `argo_url` | ARGO chat API URL | Dev URL (for now) |
161
+ | `argo_stream_url` | ARGO stream API URL | Dev URL (for now) |
162
+ | `argo_embedding_url` | ARGO embedding API URL | Prod URL |
163
+ | `user` | Your username | (Set during setup) |
164
+ | `verbose` | Debug logging | `true` |
165
+ | `num_workers` | Worker processes | `5` |
166
+ | `timeout` | Request timeout (seconds) | `600` |
167
+
168
+ ### `argo-proxy` Cli Available Options
169
+
170
+ ```bash
171
+ $ argo-proxy -h
172
+ usage: argo-proxy [-h] [--show] [--host HOST] [--port PORT] [--num-worker NUM_WORKER]
173
+ [--verbose | --quiet] [--version]
174
+ [config]
175
+
176
+ Argo Proxy CLI
177
+
178
+ positional arguments:
179
+ config Path to the configuration file
180
+
181
+ options:
182
+ -h, --help show this help message and exit
183
+ --show, -s Show the current configuration during launch
184
+ --host HOST, -H HOST Host address to bind the server to
185
+ --port PORT, -p PORT Port number to bind the server to
186
+ --num-worker NUM_WORKER, -n NUM_WORKER
187
+ Number of worker processes to run
188
+ --verbose, -v Enable verbose logging, override if `verbose` set False in config
189
+ --quiet, -q Disable verbose logging, override if `verbose` set True in config
190
+ --version, -V Show the version and exit.
191
+ ```
192
+
193
+ ## Usage
194
+
195
+ ### Endpoints
196
+
197
+ #### OpenAI Compatible
198
+
199
+ These endpoints convert responses from the ARGO API to be compatible with OpenAI's format:
200
+
201
+ - **`/v1/chat/completions`**: Converts ARGO chat/completions responses to OpenAI-compatible format.
202
+ - **`/v1/completions`**: Legacy API for conversions to OpenAI format.
203
+ - **`/v1/embeddings`**: Accesses ARGO Embedding API with response conversion.
204
+ - **`/v1/models`**: Lists available models in OpenAI-compatible format.
205
+
206
+ #### Not OpenAI Compatible
207
+
208
+ These endpoints interact directly with the ARGO API and do not convert responses to OpenAI's format:
209
+
210
+ - **`/v1/chat`**: Proxies requests to the ARGO API without conversion.
211
+ - **`/v1/status`**: Responds with a simple "hello" from GPT-4o, knowing it is alive.
212
+
213
+ #### Timeout Override
214
+
215
+ You can override the default timeout with a `timeout` parameter in your request.
216
+
217
+ Details of how to make such override in different query flavors: [Timeout Override Examples](timeout_examples.md)
218
+
219
+ ### Models
220
+
221
+ #### Chat Models
222
+
223
+ | Original ARGO Model Name | Argo Proxy Name |
224
+ | ------------------------ | ---------------------------------------- |
225
+ | `gpt35` | `argo:gpt-3.5-turbo` |
226
+ | `gpt35large` | `argo:gpt-3.5-turbo-16k` |
227
+ | `gpt4` | `argo:gpt-4` |
228
+ | `gpt4large` | `argo:gpt-4-32k` |
229
+ | `gpt4turbo` | `argo:gpt-4-turbo-preview` |
230
+ | `gpt4o` | `argo:gpt-4o` |
231
+ | `gpt4olatest` | `argo:gpt-4o-latest` |
232
+ | `gpto1preview` | `argo:gpt-o1-preview`, `argo:o1-preview` |
233
+ | `gpto1mini` | `argo:gpt-o1-mini` , `argo:o1-mini` |
234
+ | `gpto3mini` | `argo:gpt-o3-mini` , `argo:o3-mini` |
235
+ | `gpto1` | `argo:gpt-o1` , `argo:o1` |
236
+
237
+ #### Embedding Models
238
+
239
+ | Original ARGO Model Name | Argo Proxy Name |
240
+ | ------------------------ | ----------------------------- |
241
+ | `ada002` | `argo:text-embedding-ada-002` |
242
+ | `v3small` | `argo:text-embedding-3-small` |
243
+ | `v3large` | `argo:text-embedding-3-large` |
244
+
245
+ ### Examples
246
+
247
+ #### Chat Completion Example
248
+
249
+ For an example of how to use the `/v1/chat/completions`, /v1/completions`, /v1/chat` endpoint, see the followings:
250
+
251
+ - [chat_completions_example.py](examples/chat_completions_example.py)
252
+ - [chat_completions_example_stream.py](examples/chat_completions_example_stream.py)
253
+ - [completions_example.py](examples/completions_example.py)
254
+ - [completions_example_stream.py](examples/completions_example_stream.py)
255
+ - [chat_example.py](examples/chat_example.py)
256
+ - [chat_example_stream.py](examples/chat_example_stream.py)
257
+
258
+ #### Embedding Example
259
+
260
+ - [embedding_example.py](examples/embedding_example.py)
261
+
262
+ #### o1 Chat Example
263
+
264
+ - [o1_chat_example.py](examples/o1_chat_example.py)
265
+
266
+ #### OpenAI Client Example
267
+
268
+ - [openai_o3_chat_example.py](examples/o3_chat_example_pyclient.py)
269
+
270
+ ## Folder Structure
271
+
272
+ The following is an overview of the project's directory structure:
273
+
274
+ ```
275
+ $ tree -I "__pycache__|*.egg-info|dist|dev_scripts|config.yaml"
276
+ .
277
+ ├── config.sample.yaml
278
+ ├── examples
279
+ │   ├── chat_completions_example.py
280
+ │   ├── chat_completions_example_stream.py
281
+ │   ├── chat_example.py
282
+ │   ├── chat_example_stream.py
283
+ │   ├── completions_example.py
284
+ │   ├── completions_example_stream.py
285
+ │   ├── embedding_example.py
286
+ │   ├── o1_chat_example.py
287
+ │   └── o3_chat_example_pyclient.py
288
+ ├── LICENSE
289
+ ├── Makefile
290
+ ├── pyproject.toml
291
+ ├── README.md
292
+ ├── run_app.sh
293
+ ├── src
294
+ │   └── argoproxy
295
+ │   ├── app.py
296
+ │   ├── chat.py
297
+ │   ├── cli.py
298
+ │   ├── completions.py
299
+ │   ├── config.py
300
+ │   ├── constants.py
301
+ │   ├── embed.py
302
+ │   ├── extras.py
303
+ │   ├── __init__.py
304
+ │   ├── py.typed
305
+ │   └── utils.py
306
+ └── timeout_examples.md
307
+
308
+ 4 directories, 27 files
309
+ ```
310
+
311
+ ## Bug Reports and Contributions
312
+
313
+ This project was developed in my spare time. Bugs and issues may exist. If you encounter any or have suggestions for improvements, please [open an issue](https://github.com/Oaklight/argo-proxy/issues/new) or [submit a pull request](https://github.com/Oaklight/argo-proxy/compare). Your contributions are highly appreciated!
@@ -0,0 +1,281 @@
1
+ # argo-openai-proxy
2
+
3
+ This project is a proxy application that forwards requests to an ARGO API and optionally converts the responses to be compatible with OpenAI's API format. It can be used in conjunction with [autossh-tunnel-dockerized](https://github.com/Oaklight/autossh-tunnel-dockerized) or other secure connection tools.
4
+
5
+ ## NOTICE OF USAGE
6
+
7
+ The machine or server making API calls to Argo must be connected to the Argonne internal network or through a VPN on an Argonne-managed computer if you are working off-site. Your instance of the argo proxy should always be on-premise at an Argonne machine. The software is provided "as is," without any warranties. By using this software, you accept that the authors, contributors, and affiliated organizations will not be liable for any damages or issues arising from its use. You are solely responsible for ensuring the software meets your requirements.
8
+
9
+ - [Notice of Usage](#notice-of-usage)
10
+ - [Deployment](#deployment)
11
+ - [Prerequisites](#prerequisites)
12
+ - [Configuration File](#configuration-file)
13
+ - [Running the Application](#running-the-application)
14
+ - [First-Time Setup](#first-time-setup)
15
+ - [Configuration Options Reference](#configuration-options-reference)
16
+ - [Usage](#usage)
17
+ - [Endpoints](#endpoints)
18
+ - [OpenAI Compatible](#openai-compatible)
19
+ - [Not OpenAI Compatible](#not-openai-compatible)
20
+ - [Timeout Override](#timeout-override)
21
+ - [Models](#models)
22
+ - [Chat Models](#chat-models)
23
+ - [Embedding Models](#embedding-models)
24
+ - [Examples](#examples)
25
+ - [Chat Completion Example](#chat-completion-example)
26
+ - [Embedding Example](#embedding-example)
27
+ - [o1 Chat Example](#o1-chat-example)
28
+ - [OpenAI Client Example](#openai-client-example)
29
+ - [Folder Structure](#folder-structure)
30
+ - [Bug Reports and Contributions](#bug-reports-and-contributions)
31
+
32
+ ## Deployment
33
+
34
+ ### Prerequisites
35
+
36
+ - **Python 3.10+** is required \
37
+ recommend to use conda/mamba or pipx etc to manage exclusive environment \
38
+ **Conda/Mamba** Download and install from: <https://conda-forge.org/download/>
39
+
40
+ - Install dependencies:
41
+
42
+ ```bash
43
+ pip install .
44
+ ```
45
+
46
+ ### Configuration File
47
+
48
+ If you don't want to bother manually configure it, the [First-Time Setup](#first-time-setup) will automatically create it for you.
49
+
50
+ The application uses `config.yaml` for configuration. Here's an example:
51
+
52
+ ```yaml
53
+ port: 44497
54
+ host: 0.0.0.0
55
+ argo_url: "https://apps-dev.inside.anl.gov/argoapi/api/v1/resource/chat/"
56
+ argo_stream_url: "https://apps-dev.inside.anl.gov/argoapi/api/v1/resource/streamchat/"
57
+ argo_embedding_url: "https://apps.inside.anl.gov/argoapi/api/v1/resource/embed/"
58
+ user: "your_username" # set during first-time setup
59
+ verbose: true # can be changed during setup
60
+ num_workers: 5
61
+ timeout: 600 # in seconds
62
+ ```
63
+
64
+ ### Running the Application
65
+
66
+ To start the application:
67
+
68
+ ```bash
69
+ argo-proxy [config_path]
70
+ ```
71
+
72
+ - Without arguments: search for `config.yaml` under `~/.config/argoproxy/`, `~/.argoproxy/`, or current directory
73
+ - With path: uses specified config file
74
+
75
+ ```bash
76
+ argo-proxy /path/to/config.yaml
77
+ ```
78
+
79
+ ### First-Time Setup
80
+
81
+ When running without an existing config file:
82
+
83
+ 1. The script offers to create `config.yaml` from `config.sample.yaml`
84
+ 2. Automatically selects a random available port (can be overridden)
85
+ 3. Prompts for:
86
+ - Your username (sets `user` field)
87
+ - Verbose mode preference (sets `verbose` field)
88
+ 4. Validates connectivity to configured URLs
89
+ 5. Shows the generated config in a formatted display for review before proceeding
90
+
91
+ Example session:
92
+
93
+ ```bash
94
+ $ argo-proxy
95
+ No valid configuration found.
96
+ Would you like to create it from config.sample.yaml? [Y/n]:
97
+ Creating new configuration...
98
+ Use port [52226]? [Y/n/<port>]:
99
+ Enter your username: your_username
100
+ Enable verbose mode? [Y/n]
101
+ Set timeout to [600] seconds? [Y/n/<timeout>]
102
+ Created new configuration at: /home/your_username/.config/argoproxy/config.yaml
103
+ Using port 52226...
104
+ Validating URL connectivity...
105
+ Current configuration:
106
+ --------------------------------------
107
+ {
108
+ "host": "0.0.0.0",
109
+ "port": 52226,
110
+ "user": "your_username",
111
+ "argo_url": "https://apps-dev.inside.anl.gov/argoapi/api/v1/resource/chat/",
112
+ "argo_stream_url": "https://apps-dev.inside.anl.gov/argoapi/api/v1/resource/streamchat/",
113
+ "argo_embedding_url": "https://apps.inside.anl.gov/argoapi/api/v1/resource/embed/",
114
+ "verbose": true,
115
+ "num_workers": 5,
116
+ "timeout": 600
117
+ }
118
+ --------------------------------------
119
+ # ... proxy server starting info display ...
120
+ ```
121
+
122
+ ### Configuration Options Reference
123
+
124
+ | Option | Description | Default |
125
+ | -------------------- | ------------------------------------------------------------ | ------------------ |
126
+ | `host` | Host address to bind the server to | `0.0.0.0` |
127
+ | `port` | Application port (random available port selected by default) | randomly assigned |
128
+ | `argo_url` | ARGO chat API URL | Dev URL (for now) |
129
+ | `argo_stream_url` | ARGO stream API URL | Dev URL (for now) |
130
+ | `argo_embedding_url` | ARGO embedding API URL | Prod URL |
131
+ | `user` | Your username | (Set during setup) |
132
+ | `verbose` | Debug logging | `true` |
133
+ | `num_workers` | Worker processes | `5` |
134
+ | `timeout` | Request timeout (seconds) | `600` |
135
+
136
+ ### `argo-proxy` Cli Available Options
137
+
138
+ ```bash
139
+ $ argo-proxy -h
140
+ usage: argo-proxy [-h] [--show] [--host HOST] [--port PORT] [--num-worker NUM_WORKER]
141
+ [--verbose | --quiet] [--version]
142
+ [config]
143
+
144
+ Argo Proxy CLI
145
+
146
+ positional arguments:
147
+ config Path to the configuration file
148
+
149
+ options:
150
+ -h, --help show this help message and exit
151
+ --show, -s Show the current configuration during launch
152
+ --host HOST, -H HOST Host address to bind the server to
153
+ --port PORT, -p PORT Port number to bind the server to
154
+ --num-worker NUM_WORKER, -n NUM_WORKER
155
+ Number of worker processes to run
156
+ --verbose, -v Enable verbose logging, override if `verbose` set False in config
157
+ --quiet, -q Disable verbose logging, override if `verbose` set True in config
158
+ --version, -V Show the version and exit.
159
+ ```
160
+
161
+ ## Usage
162
+
163
+ ### Endpoints
164
+
165
+ #### OpenAI Compatible
166
+
167
+ These endpoints convert responses from the ARGO API to be compatible with OpenAI's format:
168
+
169
+ - **`/v1/chat/completions`**: Converts ARGO chat/completions responses to OpenAI-compatible format.
170
+ - **`/v1/completions`**: Legacy API for conversions to OpenAI format.
171
+ - **`/v1/embeddings`**: Accesses ARGO Embedding API with response conversion.
172
+ - **`/v1/models`**: Lists available models in OpenAI-compatible format.
173
+
174
+ #### Not OpenAI Compatible
175
+
176
+ These endpoints interact directly with the ARGO API and do not convert responses to OpenAI's format:
177
+
178
+ - **`/v1/chat`**: Proxies requests to the ARGO API without conversion.
179
+ - **`/v1/status`**: Responds with a simple "hello" from GPT-4o, knowing it is alive.
180
+
181
+ #### Timeout Override
182
+
183
+ You can override the default timeout with a `timeout` parameter in your request.
184
+
185
+ Details of how to make such override in different query flavors: [Timeout Override Examples](timeout_examples.md)
186
+
187
+ ### Models
188
+
189
+ #### Chat Models
190
+
191
+ | Original ARGO Model Name | Argo Proxy Name |
192
+ | ------------------------ | ---------------------------------------- |
193
+ | `gpt35` | `argo:gpt-3.5-turbo` |
194
+ | `gpt35large` | `argo:gpt-3.5-turbo-16k` |
195
+ | `gpt4` | `argo:gpt-4` |
196
+ | `gpt4large` | `argo:gpt-4-32k` |
197
+ | `gpt4turbo` | `argo:gpt-4-turbo-preview` |
198
+ | `gpt4o` | `argo:gpt-4o` |
199
+ | `gpt4olatest` | `argo:gpt-4o-latest` |
200
+ | `gpto1preview` | `argo:gpt-o1-preview`, `argo:o1-preview` |
201
+ | `gpto1mini` | `argo:gpt-o1-mini` , `argo:o1-mini` |
202
+ | `gpto3mini` | `argo:gpt-o3-mini` , `argo:o3-mini` |
203
+ | `gpto1` | `argo:gpt-o1` , `argo:o1` |
204
+
205
+ #### Embedding Models
206
+
207
+ | Original ARGO Model Name | Argo Proxy Name |
208
+ | ------------------------ | ----------------------------- |
209
+ | `ada002` | `argo:text-embedding-ada-002` |
210
+ | `v3small` | `argo:text-embedding-3-small` |
211
+ | `v3large` | `argo:text-embedding-3-large` |
212
+
213
+ ### Examples
214
+
215
+ #### Chat Completion Example
216
+
217
+ For an example of how to use the `/v1/chat/completions`, /v1/completions`, /v1/chat` endpoint, see the followings:
218
+
219
+ - [chat_completions_example.py](examples/chat_completions_example.py)
220
+ - [chat_completions_example_stream.py](examples/chat_completions_example_stream.py)
221
+ - [completions_example.py](examples/completions_example.py)
222
+ - [completions_example_stream.py](examples/completions_example_stream.py)
223
+ - [chat_example.py](examples/chat_example.py)
224
+ - [chat_example_stream.py](examples/chat_example_stream.py)
225
+
226
+ #### Embedding Example
227
+
228
+ - [embedding_example.py](examples/embedding_example.py)
229
+
230
+ #### o1 Chat Example
231
+
232
+ - [o1_chat_example.py](examples/o1_chat_example.py)
233
+
234
+ #### OpenAI Client Example
235
+
236
+ - [openai_o3_chat_example.py](examples/o3_chat_example_pyclient.py)
237
+
238
+ ## Folder Structure
239
+
240
+ The following is an overview of the project's directory structure:
241
+
242
+ ```
243
+ $ tree -I "__pycache__|*.egg-info|dist|dev_scripts|config.yaml"
244
+ .
245
+ ├── config.sample.yaml
246
+ ├── examples
247
+ │   ├── chat_completions_example.py
248
+ │   ├── chat_completions_example_stream.py
249
+ │   ├── chat_example.py
250
+ │   ├── chat_example_stream.py
251
+ │   ├── completions_example.py
252
+ │   ├── completions_example_stream.py
253
+ │   ├── embedding_example.py
254
+ │   ├── o1_chat_example.py
255
+ │   └── o3_chat_example_pyclient.py
256
+ ├── LICENSE
257
+ ├── Makefile
258
+ ├── pyproject.toml
259
+ ├── README.md
260
+ ├── run_app.sh
261
+ ├── src
262
+ │   └── argoproxy
263
+ │   ├── app.py
264
+ │   ├── chat.py
265
+ │   ├── cli.py
266
+ │   ├── completions.py
267
+ │   ├── config.py
268
+ │   ├── constants.py
269
+ │   ├── embed.py
270
+ │   ├── extras.py
271
+ │   ├── __init__.py
272
+ │   ├── py.typed
273
+ │   └── utils.py
274
+ └── timeout_examples.md
275
+
276
+ 4 directories, 27 files
277
+ ```
278
+
279
+ ## Bug Reports and Contributions
280
+
281
+ This project was developed in my spare time. Bugs and issues may exist. If you encounter any or have suggestions for improvements, please [open an issue](https://github.com/Oaklight/argo-proxy/issues/new) or [submit a pull request](https://github.com/Oaklight/argo-proxy/compare). Your contributions are highly appreciated!
@@ -0,0 +1,52 @@
1
+ [build-system]
2
+ requires = ["setuptools>=61.0"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "argo-proxy"
7
+ version = "2.5.0"
8
+ authors = [{ name = "Peng Ding", email = "oaklight@gmx.com" }]
9
+ description = "Proxy server to Argo API, OpenAI format compatible"
10
+ readme = "README.md"
11
+ requires-python = ">=3.10"
12
+ license = "MIT"
13
+ classifiers = [
14
+ "Intended Audience :: Developers",
15
+ "Programming Language :: Python :: 3",
16
+ "Topic :: Software Development",
17
+ "Topic :: Scientific/Engineering :: Artificial Intelligence",
18
+ ]
19
+
20
+ dependencies = [
21
+ "aiohttp>=3.12.2",
22
+ "loguru>=0.7.3",
23
+ "PyYAML>=6.0.2",
24
+ "sanic>=25.3.0",
25
+ "tiktoken>=0.9.0",
26
+ "setuptools<75",
27
+ ]
28
+
29
+ [project.optional-dependencies]
30
+ dev = [
31
+ "dotenv>=0.9.9",
32
+ "openai>=1.79.0",
33
+ "mypy>=1.14.1",
34
+ "build>=1.2.2.post1",
35
+ "twine>=6.1.0",
36
+ "httpx>=0.28.1",
37
+ "requests>=2.25.1",
38
+ ]
39
+
40
+ [project.urls]
41
+ Documentation = "https://github.com/Oaklight/argo-openai-proxy#readme"
42
+ Repository = "https://github.com/Oaklight/argo-openai-proxy"
43
+ Issuses = "https://github.com/Oaklight/argo-openai-proxy/issues"
44
+
45
+ [project.scripts]
46
+ argo-proxy = "argoproxy.cli:main"
47
+
48
+ [tool.setuptools.packages.find]
49
+ where = ["src"]
50
+
51
+ [tool.setuptools.package-data]
52
+ "argoproxy" = ["py.typed"]
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+