openai-admin-usage-mcp-server 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openai_admin_usage_mcp_server-0.1.0/PKG-INFO +7 -0
- openai_admin_usage_mcp_server-0.1.0/README.md +145 -0
- openai_admin_usage_mcp_server-0.1.0/openai_admin_usage_mcp_server.egg-info/PKG-INFO +7 -0
- openai_admin_usage_mcp_server-0.1.0/openai_admin_usage_mcp_server.egg-info/SOURCES.txt +9 -0
- openai_admin_usage_mcp_server-0.1.0/openai_admin_usage_mcp_server.egg-info/dependency_links.txt +1 -0
- openai_admin_usage_mcp_server-0.1.0/openai_admin_usage_mcp_server.egg-info/entry_points.txt +2 -0
- openai_admin_usage_mcp_server-0.1.0/openai_admin_usage_mcp_server.egg-info/requires.txt +2 -0
- openai_admin_usage_mcp_server-0.1.0/openai_admin_usage_mcp_server.egg-info/top_level.txt +1 -0
- openai_admin_usage_mcp_server-0.1.0/pyproject.toml +16 -0
- openai_admin_usage_mcp_server-0.1.0/server.py +998 -0
- openai_admin_usage_mcp_server-0.1.0/setup.cfg +4 -0
|
@@ -0,0 +1,145 @@
|
|
|
1
|
+
# OpenAI Admin Usage MCP Server (Python)
|
|
2
|
+
|
|
3
|
+
This MCP server exposes tools:
|
|
4
|
+
|
|
5
|
+
- `get_organization_costs` -> calls `GET /v1/organization/costs`
|
|
6
|
+
- `get_organization_usage_completions` -> calls `GET /v1/organization/usage/completions`
|
|
7
|
+
- `get_organization_usage_audio_speeches` -> calls `GET /v1/organization/usage/audio_speeches`
|
|
8
|
+
- `get_organization_usage_audio_transcriptions` -> calls `GET /v1/organization/usage/audio_transcriptions`
|
|
9
|
+
- `get_organization_usage_code_interpreter_sessions` -> calls `GET /v1/organization/usage/code_interpreter_sessions`
|
|
10
|
+
- `get_organization_usage_embeddings` -> calls `GET /v1/organization/usage/embeddings`
|
|
11
|
+
- `get_organization_usage_images` -> calls `GET /v1/organization/usage/images`
|
|
12
|
+
- `get_organization_usage_moderations` -> calls `GET /v1/organization/usage/moderations`
|
|
13
|
+
- `get_organization_usage_vector_stores` -> calls `GET /v1/organization/usage/vector_stores`
|
|
14
|
+
- `get_organization_users` -> calls `GET /v1/organization/users`
|
|
15
|
+
- `get_organization_user` -> calls `GET /v1/organization/users/{user_id}`
|
|
16
|
+
- `get_organization_projects` -> calls `GET /v1/organization/projects`
|
|
17
|
+
|
|
18
|
+
Both tools auto-follow pagination and return only merged `data` (no pagination fields in output).
|
|
19
|
+
|
|
20
|
+
## Requirements
|
|
21
|
+
|
|
22
|
+
- Python 3.10+
|
|
23
|
+
- OpenAI Admin key in `OPENAI_ADMIN_KEY` (preferred) or `OPENAI_API_KEY`
|
|
24
|
+
|
|
25
|
+
## Install
|
|
26
|
+
|
|
27
|
+
```bash
|
|
28
|
+
python -m venv .venv
|
|
29
|
+
source .venv/bin/activate
|
|
30
|
+
pip install -e .
|
|
31
|
+
```
|
|
32
|
+
|
|
33
|
+
## Run
|
|
34
|
+
|
|
35
|
+
```bash
|
|
36
|
+
export OPENAI_ADMIN_KEY="your_admin_key"
|
|
37
|
+
openai-admin-usage-mcp-server
|
|
38
|
+
```
|
|
39
|
+
|
|
40
|
+
## Tool Input
|
|
41
|
+
|
|
42
|
+
### `get_organization_costs`
|
|
43
|
+
|
|
44
|
+
- `start_time` (required, integer Unix seconds)
|
|
45
|
+
- `bucket_width` (optional, default `"1d"`)
|
|
46
|
+
- `end_time` (optional, integer Unix seconds)
|
|
47
|
+
- `group_by` (optional array; allowed values: `"project_id"`, `"line_item"`; you can pass one or both)
|
|
48
|
+
- `project_ids` (optional, list of project IDs)
|
|
49
|
+
|
|
50
|
+
### `get_organization_usage_completions`
|
|
51
|
+
|
|
52
|
+
- `start_time` (required, integer Unix seconds)
|
|
53
|
+
- `bucket_width` (optional, `"1m"` / `"1h"` / `"1d"`, default `"1d"`)
|
|
54
|
+
- `end_time` (optional, integer Unix seconds)
|
|
55
|
+
- `group_by` (optional array; allowed values: `"project_id"`, `"user_id"`, `"api_key_id"`, `"model"`, `"batch"`, `"service_tier"`; any combination is allowed)
|
|
56
|
+
- `project_ids` (optional, list of project IDs)
|
|
57
|
+
- `user_ids` (optional, list of user IDs)
|
|
58
|
+
- `api_key_ids` (optional, list of API key IDs)
|
|
59
|
+
- `models` (optional, list of model names)
|
|
60
|
+
- `batch` (optional, boolean)
|
|
61
|
+
|
|
62
|
+
### `get_organization_usage_audio_speeches`
|
|
63
|
+
|
|
64
|
+
- `start_time` (required, integer Unix seconds)
|
|
65
|
+
- `bucket_width` (optional, `"1m"` / `"1h"` / `"1d"`, default `"1d"`)
|
|
66
|
+
- `end_time` (optional, integer Unix seconds)
|
|
67
|
+
- `group_by` (optional array; allowed values: `"project_id"`, `"user_id"`, `"api_key_id"`, `"model"`; any combination is allowed)
|
|
68
|
+
- `project_ids` (optional, list of project IDs)
|
|
69
|
+
- `user_ids` (optional, list of user IDs)
|
|
70
|
+
- `api_key_ids` (optional, list of API key IDs)
|
|
71
|
+
- `models` (optional, list of model names)
|
|
72
|
+
|
|
73
|
+
### `get_organization_usage_audio_transcriptions`
|
|
74
|
+
|
|
75
|
+
- `start_time` (required, integer Unix seconds)
|
|
76
|
+
- `bucket_width` (optional, `"1m"` / `"1h"` / `"1d"`, default `"1d"`)
|
|
77
|
+
- `end_time` (optional, integer Unix seconds)
|
|
78
|
+
- `group_by` (optional array; allowed values: `"project_id"`, `"user_id"`, `"api_key_id"`, `"model"`; any combination is allowed)
|
|
79
|
+
- `project_ids` (optional, list of project IDs)
|
|
80
|
+
- `user_ids` (optional, list of user IDs)
|
|
81
|
+
- `api_key_ids` (optional, list of API key IDs)
|
|
82
|
+
- `models` (optional, list of model names)
|
|
83
|
+
|
|
84
|
+
### `get_organization_usage_code_interpreter_sessions`
|
|
85
|
+
|
|
86
|
+
- `start_time` (required, integer Unix seconds)
|
|
87
|
+
- `bucket_width` (optional, `"1m"` / `"1h"` / `"1d"`, default `"1d"`)
|
|
88
|
+
- `end_time` (optional, integer Unix seconds)
|
|
89
|
+
- `group_by` (optional array; only `"project_id"` is allowed)
|
|
90
|
+
- `project_ids` (optional, list of project IDs)
|
|
91
|
+
|
|
92
|
+
### `get_organization_usage_embeddings`
|
|
93
|
+
|
|
94
|
+
- `start_time` (required, integer Unix seconds)
|
|
95
|
+
- `bucket_width` (optional, `"1m"` / `"1h"` / `"1d"`, default `"1d"`)
|
|
96
|
+
- `end_time` (optional, integer Unix seconds)
|
|
97
|
+
- `group_by` (optional array; allowed values: `"project_id"`, `"user_id"`, `"api_key_id"`, `"model"`; any combination is allowed)
|
|
98
|
+
- `project_ids` (optional, list of project IDs)
|
|
99
|
+
- `user_ids` (optional, list of user IDs)
|
|
100
|
+
- `api_key_ids` (optional, list of API key IDs)
|
|
101
|
+
- `models` (optional, list of model names)
|
|
102
|
+
|
|
103
|
+
### `get_organization_usage_images`
|
|
104
|
+
|
|
105
|
+
- `start_time` (required, integer Unix seconds)
|
|
106
|
+
- `bucket_width` (optional, `"1m"` / `"1h"` / `"1d"`, default `"1d"`)
|
|
107
|
+
- `end_time` (optional, integer Unix seconds)
|
|
108
|
+
- `group_by` (optional array; allowed values: `"project_id"`, `"user_id"`, `"api_key_id"`, `"model"`, `"size"`, `"source"`; any combination is allowed)
|
|
109
|
+
- `project_ids` (optional, list of project IDs)
|
|
110
|
+
- `user_ids` (optional, list of user IDs)
|
|
111
|
+
- `api_key_ids` (optional, list of API key IDs)
|
|
112
|
+
- `models` (optional, list of model names)
|
|
113
|
+
- `sizes` (optional, list of `256x256`, `512x512`, `1024x1024`, `1792x1792`, `1024x1792`)
|
|
114
|
+
- `sources` (optional, list of `image.generation`, `image.edit`, `image.variation`)
|
|
115
|
+
|
|
116
|
+
### `get_organization_usage_moderations`
|
|
117
|
+
|
|
118
|
+
- `start_time` (required, integer Unix seconds)
|
|
119
|
+
- `bucket_width` (optional, `"1m"` / `"1h"` / `"1d"`, default `"1d"`)
|
|
120
|
+
- `end_time` (optional, integer Unix seconds)
|
|
121
|
+
- `group_by` (optional array; allowed values: `"project_id"`, `"user_id"`, `"api_key_id"`, `"model"`; any combination is allowed)
|
|
122
|
+
- `project_ids` (optional, list of project IDs)
|
|
123
|
+
- `user_ids` (optional, list of user IDs)
|
|
124
|
+
- `api_key_ids` (optional, list of API key IDs)
|
|
125
|
+
- `models` (optional, list of model names)
|
|
126
|
+
|
|
127
|
+
### `get_organization_usage_vector_stores`
|
|
128
|
+
|
|
129
|
+
- `start_time` (required, integer Unix seconds)
|
|
130
|
+
- `bucket_width` (optional, `"1m"` / `"1h"` / `"1d"`, default `"1d"`)
|
|
131
|
+
- `end_time` (optional, integer Unix seconds)
|
|
132
|
+
- `group_by` (optional array; only `"project_id"` is allowed)
|
|
133
|
+
- `project_ids` (optional, list of project IDs)
|
|
134
|
+
|
|
135
|
+
### `get_organization_users`
|
|
136
|
+
|
|
137
|
+
- `emails` (optional, list of email addresses to filter users)
|
|
138
|
+
|
|
139
|
+
### `get_organization_user`
|
|
140
|
+
|
|
141
|
+
- `user_id` (required, organization user ID string)
|
|
142
|
+
|
|
143
|
+
### `get_organization_projects`
|
|
144
|
+
|
|
145
|
+
- `include_archived` (optional, boolean; if true includes archived projects)
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
README.md
|
|
2
|
+
pyproject.toml
|
|
3
|
+
server.py
|
|
4
|
+
openai_admin_usage_mcp_server.egg-info/PKG-INFO
|
|
5
|
+
openai_admin_usage_mcp_server.egg-info/SOURCES.txt
|
|
6
|
+
openai_admin_usage_mcp_server.egg-info/dependency_links.txt
|
|
7
|
+
openai_admin_usage_mcp_server.egg-info/entry_points.txt
|
|
8
|
+
openai_admin_usage_mcp_server.egg-info/requires.txt
|
|
9
|
+
openai_admin_usage_mcp_server.egg-info/top_level.txt
|
openai_admin_usage_mcp_server-0.1.0/openai_admin_usage_mcp_server.egg-info/dependency_links.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
server
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
[project]
|
|
2
|
+
name = "openai-admin-usage-mcp-server"
|
|
3
|
+
version = "0.1.0"
|
|
4
|
+
description = "MCP server exposing OpenAI organization costs endpoint as a tool"
|
|
5
|
+
requires-python = ">=3.10"
|
|
6
|
+
dependencies = [
|
|
7
|
+
"mcp>=1.6.0",
|
|
8
|
+
"httpx>=0.27.0",
|
|
9
|
+
]
|
|
10
|
+
|
|
11
|
+
[project.scripts]
|
|
12
|
+
openai-admin-usage-mcp-server = "server:main"
|
|
13
|
+
|
|
14
|
+
[build-system]
|
|
15
|
+
requires = ["setuptools>=68", "wheel"]
|
|
16
|
+
build-backend = "setuptools.build_meta"
|
|
@@ -0,0 +1,998 @@
|
|
|
1
|
+
"""MCP server tools for OpenAI organization usage/costs.
|
|
2
|
+
|
|
3
|
+
All tools auto-follow pagination and return merged output as {"data": [...]}.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
import os
|
|
9
|
+
from typing import Any
|
|
10
|
+
|
|
11
|
+
import httpx
|
|
12
|
+
from mcp.server.fastmcp import FastMCP
|
|
13
|
+
|
|
14
|
+
OPENAI_COSTS_URL = "https://api.openai.com/v1/organization/costs"
|
|
15
|
+
OPENAI_USAGE_COMPLETIONS_URL = "https://api.openai.com/v1/organization/usage/completions"
|
|
16
|
+
OPENAI_USAGE_AUDIO_SPEECHES_URL = "https://api.openai.com/v1/organization/usage/audio_speeches"
|
|
17
|
+
OPENAI_USAGE_AUDIO_TRANSCRIPTIONS_URL = "https://api.openai.com/v1/organization/usage/audio_transcriptions"
|
|
18
|
+
OPENAI_USAGE_CODE_INTERPRETER_SESSIONS_URL = "https://api.openai.com/v1/organization/usage/code_interpreter_sessions"
|
|
19
|
+
OPENAI_USAGE_EMBEDDINGS_URL = "https://api.openai.com/v1/organization/usage/embeddings"
|
|
20
|
+
OPENAI_USAGE_IMAGES_URL = "https://api.openai.com/v1/organization/usage/images"
|
|
21
|
+
OPENAI_USAGE_MODERATIONS_URL = "https://api.openai.com/v1/organization/usage/moderations"
|
|
22
|
+
OPENAI_USAGE_VECTOR_STORES_URL = "https://api.openai.com/v1/organization/usage/vector_stores"
|
|
23
|
+
OPENAI_ORGANIZATION_USERS_URL = "https://api.openai.com/v1/organization/users"
|
|
24
|
+
OPENAI_ORGANIZATION_PROJECTS_URL = "https://api.openai.com/v1/organization/projects"
|
|
25
|
+
ALLOWED_COSTS_GROUP_BY = {"project_id", "line_item"}
|
|
26
|
+
ALLOWED_COMPLETIONS_BUCKET_WIDTH = {"1m", "1h", "1d"}
|
|
27
|
+
ALLOWED_COMPLETIONS_GROUP_BY = {
|
|
28
|
+
"project_id",
|
|
29
|
+
"user_id",
|
|
30
|
+
"api_key_id",
|
|
31
|
+
"model",
|
|
32
|
+
"batch",
|
|
33
|
+
"service_tier",
|
|
34
|
+
}
|
|
35
|
+
ALLOWED_AUDIO_SPEECHES_GROUP_BY = {
|
|
36
|
+
"project_id",
|
|
37
|
+
"user_id",
|
|
38
|
+
"api_key_id",
|
|
39
|
+
"model",
|
|
40
|
+
}
|
|
41
|
+
ALLOWED_AUDIO_TRANSCRIPTIONS_GROUP_BY = {
|
|
42
|
+
"project_id",
|
|
43
|
+
"user_id",
|
|
44
|
+
"api_key_id",
|
|
45
|
+
"model",
|
|
46
|
+
}
|
|
47
|
+
ALLOWED_CODE_INTERPRETER_SESSIONS_GROUP_BY = {"project_id"}
|
|
48
|
+
ALLOWED_EMBEDDINGS_GROUP_BY = {
|
|
49
|
+
"project_id",
|
|
50
|
+
"user_id",
|
|
51
|
+
"api_key_id",
|
|
52
|
+
"model",
|
|
53
|
+
}
|
|
54
|
+
ALLOWED_IMAGES_GROUP_BY = {
|
|
55
|
+
"project_id",
|
|
56
|
+
"user_id",
|
|
57
|
+
"api_key_id",
|
|
58
|
+
"model",
|
|
59
|
+
"size",
|
|
60
|
+
"source",
|
|
61
|
+
}
|
|
62
|
+
ALLOWED_IMAGES_SIZES = {"256x256", "512x512", "1024x1024", "1792x1792", "1024x1792"}
|
|
63
|
+
ALLOWED_IMAGES_SOURCES = {"image.generation", "image.edit", "image.variation"}
|
|
64
|
+
ALLOWED_MODERATIONS_GROUP_BY = {
|
|
65
|
+
"project_id",
|
|
66
|
+
"user_id",
|
|
67
|
+
"api_key_id",
|
|
68
|
+
"model",
|
|
69
|
+
}
|
|
70
|
+
ALLOWED_VECTOR_STORES_GROUP_BY = {"project_id"}
|
|
71
|
+
|
|
72
|
+
mcp = FastMCP("openai-admin-usage")
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
def _validate_costs_inputs(
|
|
76
|
+
start_time: int,
|
|
77
|
+
bucket_width: str,
|
|
78
|
+
end_time: int | None,
|
|
79
|
+
group_by: list[str] | None,
|
|
80
|
+
project_ids: list[str] | None,
|
|
81
|
+
) -> None:
|
|
82
|
+
if not isinstance(start_time, int):
|
|
83
|
+
raise ValueError("`start_time` is required and must be a Unix timestamp in seconds.")
|
|
84
|
+
|
|
85
|
+
if bucket_width != "1d":
|
|
86
|
+
raise ValueError("`bucket_width` currently supports only `1d`.")
|
|
87
|
+
|
|
88
|
+
if end_time is not None and not isinstance(end_time, int):
|
|
89
|
+
raise ValueError("`end_time` must be a Unix timestamp in seconds.")
|
|
90
|
+
|
|
91
|
+
if group_by is not None:
|
|
92
|
+
if not isinstance(group_by, list):
|
|
93
|
+
raise ValueError("`group_by` must be an array.")
|
|
94
|
+
invalid = [value for value in group_by if value not in ALLOWED_COSTS_GROUP_BY]
|
|
95
|
+
if invalid:
|
|
96
|
+
raise ValueError(
|
|
97
|
+
"`group_by` supports only `project_id` and `line_item`."
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
if project_ids is not None:
|
|
101
|
+
if not isinstance(project_ids, list) or not all(
|
|
102
|
+
isinstance(item, str) for item in project_ids
|
|
103
|
+
):
|
|
104
|
+
raise ValueError("`project_ids` must be an array of strings.")
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
def _validate_string_list(value: list[str] | None, field_name: str) -> None:
|
|
108
|
+
if value is not None and (
|
|
109
|
+
not isinstance(value, list) or not all(isinstance(item, str) for item in value)
|
|
110
|
+
):
|
|
111
|
+
raise ValueError(f"`{field_name}` must be an array of strings.")
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
def _validate_completions_inputs(
|
|
115
|
+
start_time: int,
|
|
116
|
+
bucket_width: str,
|
|
117
|
+
end_time: int | None,
|
|
118
|
+
group_by: list[str] | None,
|
|
119
|
+
project_ids: list[str] | None,
|
|
120
|
+
user_ids: list[str] | None,
|
|
121
|
+
api_key_ids: list[str] | None,
|
|
122
|
+
models: list[str] | None,
|
|
123
|
+
batch: bool | None,
|
|
124
|
+
) -> None:
|
|
125
|
+
if not isinstance(start_time, int):
|
|
126
|
+
raise ValueError("`start_time` is required and must be a Unix timestamp in seconds.")
|
|
127
|
+
|
|
128
|
+
if bucket_width not in ALLOWED_COMPLETIONS_BUCKET_WIDTH:
|
|
129
|
+
raise ValueError("`bucket_width` supports only `1m`, `1h`, or `1d`.")
|
|
130
|
+
|
|
131
|
+
if end_time is not None and not isinstance(end_time, int):
|
|
132
|
+
raise ValueError("`end_time` must be a Unix timestamp in seconds.")
|
|
133
|
+
|
|
134
|
+
if group_by is not None:
|
|
135
|
+
if not isinstance(group_by, list):
|
|
136
|
+
raise ValueError("`group_by` must be an array.")
|
|
137
|
+
invalid = [value for value in group_by if value not in ALLOWED_COMPLETIONS_GROUP_BY]
|
|
138
|
+
if invalid:
|
|
139
|
+
raise ValueError(
|
|
140
|
+
"`group_by` supports only project_id, user_id, api_key_id, "
|
|
141
|
+
"model, batch, service_tier."
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
_validate_string_list(project_ids, "project_ids")
|
|
145
|
+
_validate_string_list(user_ids, "user_ids")
|
|
146
|
+
_validate_string_list(api_key_ids, "api_key_ids")
|
|
147
|
+
_validate_string_list(models, "models")
|
|
148
|
+
|
|
149
|
+
if batch is not None and not isinstance(batch, bool):
|
|
150
|
+
raise ValueError("`batch` must be a boolean.")
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
def _validate_audio_speeches_inputs(
|
|
154
|
+
start_time: int,
|
|
155
|
+
bucket_width: str,
|
|
156
|
+
end_time: int | None,
|
|
157
|
+
group_by: list[str] | None,
|
|
158
|
+
project_ids: list[str] | None,
|
|
159
|
+
user_ids: list[str] | None,
|
|
160
|
+
api_key_ids: list[str] | None,
|
|
161
|
+
models: list[str] | None,
|
|
162
|
+
) -> None:
|
|
163
|
+
if not isinstance(start_time, int):
|
|
164
|
+
raise ValueError("`start_time` is required and must be a Unix timestamp in seconds.")
|
|
165
|
+
|
|
166
|
+
if bucket_width not in ALLOWED_COMPLETIONS_BUCKET_WIDTH:
|
|
167
|
+
raise ValueError("`bucket_width` supports only `1m`, `1h`, or `1d`.")
|
|
168
|
+
|
|
169
|
+
if end_time is not None and not isinstance(end_time, int):
|
|
170
|
+
raise ValueError("`end_time` must be a Unix timestamp in seconds.")
|
|
171
|
+
|
|
172
|
+
if group_by is not None:
|
|
173
|
+
if not isinstance(group_by, list):
|
|
174
|
+
raise ValueError("`group_by` must be an array.")
|
|
175
|
+
invalid = [value for value in group_by if value not in ALLOWED_AUDIO_SPEECHES_GROUP_BY]
|
|
176
|
+
if invalid:
|
|
177
|
+
raise ValueError(
|
|
178
|
+
"`group_by` supports only project_id, user_id, api_key_id, model."
|
|
179
|
+
)
|
|
180
|
+
|
|
181
|
+
_validate_string_list(project_ids, "project_ids")
|
|
182
|
+
_validate_string_list(user_ids, "user_ids")
|
|
183
|
+
_validate_string_list(api_key_ids, "api_key_ids")
|
|
184
|
+
_validate_string_list(models, "models")
|
|
185
|
+
|
|
186
|
+
|
|
187
|
+
def _validate_audio_transcriptions_inputs(
|
|
188
|
+
start_time: int,
|
|
189
|
+
bucket_width: str,
|
|
190
|
+
end_time: int | None,
|
|
191
|
+
group_by: list[str] | None,
|
|
192
|
+
project_ids: list[str] | None,
|
|
193
|
+
user_ids: list[str] | None,
|
|
194
|
+
api_key_ids: list[str] | None,
|
|
195
|
+
models: list[str] | None,
|
|
196
|
+
) -> None:
|
|
197
|
+
if not isinstance(start_time, int):
|
|
198
|
+
raise ValueError("`start_time` is required and must be a Unix timestamp in seconds.")
|
|
199
|
+
|
|
200
|
+
if bucket_width not in ALLOWED_COMPLETIONS_BUCKET_WIDTH:
|
|
201
|
+
raise ValueError("`bucket_width` supports only `1m`, `1h`, or `1d`.")
|
|
202
|
+
|
|
203
|
+
if end_time is not None and not isinstance(end_time, int):
|
|
204
|
+
raise ValueError("`end_time` must be a Unix timestamp in seconds.")
|
|
205
|
+
|
|
206
|
+
if group_by is not None:
|
|
207
|
+
if not isinstance(group_by, list):
|
|
208
|
+
raise ValueError("`group_by` must be an array.")
|
|
209
|
+
invalid = [
|
|
210
|
+
value for value in group_by if value not in ALLOWED_AUDIO_TRANSCRIPTIONS_GROUP_BY
|
|
211
|
+
]
|
|
212
|
+
if invalid:
|
|
213
|
+
raise ValueError(
|
|
214
|
+
"`group_by` supports only project_id, user_id, api_key_id, model."
|
|
215
|
+
)
|
|
216
|
+
|
|
217
|
+
_validate_string_list(project_ids, "project_ids")
|
|
218
|
+
_validate_string_list(user_ids, "user_ids")
|
|
219
|
+
_validate_string_list(api_key_ids, "api_key_ids")
|
|
220
|
+
_validate_string_list(models, "models")
|
|
221
|
+
|
|
222
|
+
|
|
223
|
+
def _validate_code_interpreter_sessions_inputs(
|
|
224
|
+
start_time: int,
|
|
225
|
+
bucket_width: str,
|
|
226
|
+
end_time: int | None,
|
|
227
|
+
group_by: list[str] | None,
|
|
228
|
+
project_ids: list[str] | None,
|
|
229
|
+
) -> None:
|
|
230
|
+
if not isinstance(start_time, int):
|
|
231
|
+
raise ValueError("`start_time` is required and must be a Unix timestamp in seconds.")
|
|
232
|
+
|
|
233
|
+
if bucket_width not in ALLOWED_COMPLETIONS_BUCKET_WIDTH:
|
|
234
|
+
raise ValueError("`bucket_width` supports only `1m`, `1h`, or `1d`.")
|
|
235
|
+
|
|
236
|
+
if end_time is not None and not isinstance(end_time, int):
|
|
237
|
+
raise ValueError("`end_time` must be a Unix timestamp in seconds.")
|
|
238
|
+
|
|
239
|
+
if group_by is not None:
|
|
240
|
+
if not isinstance(group_by, list):
|
|
241
|
+
raise ValueError("`group_by` must be an array.")
|
|
242
|
+
invalid = [
|
|
243
|
+
value
|
|
244
|
+
for value in group_by
|
|
245
|
+
if value not in ALLOWED_CODE_INTERPRETER_SESSIONS_GROUP_BY
|
|
246
|
+
]
|
|
247
|
+
if invalid:
|
|
248
|
+
raise ValueError("`group_by` supports only `project_id`.")
|
|
249
|
+
|
|
250
|
+
_validate_string_list(project_ids, "project_ids")
|
|
251
|
+
|
|
252
|
+
|
|
253
|
+
def _validate_embeddings_inputs(
|
|
254
|
+
start_time: int,
|
|
255
|
+
bucket_width: str,
|
|
256
|
+
end_time: int | None,
|
|
257
|
+
group_by: list[str] | None,
|
|
258
|
+
project_ids: list[str] | None,
|
|
259
|
+
user_ids: list[str] | None,
|
|
260
|
+
api_key_ids: list[str] | None,
|
|
261
|
+
models: list[str] | None,
|
|
262
|
+
) -> None:
|
|
263
|
+
if not isinstance(start_time, int):
|
|
264
|
+
raise ValueError("`start_time` is required and must be a Unix timestamp in seconds.")
|
|
265
|
+
|
|
266
|
+
if bucket_width not in ALLOWED_COMPLETIONS_BUCKET_WIDTH:
|
|
267
|
+
raise ValueError("`bucket_width` supports only `1m`, `1h`, or `1d`.")
|
|
268
|
+
|
|
269
|
+
if end_time is not None and not isinstance(end_time, int):
|
|
270
|
+
raise ValueError("`end_time` must be a Unix timestamp in seconds.")
|
|
271
|
+
|
|
272
|
+
if group_by is not None:
|
|
273
|
+
if not isinstance(group_by, list):
|
|
274
|
+
raise ValueError("`group_by` must be an array.")
|
|
275
|
+
invalid = [value for value in group_by if value not in ALLOWED_EMBEDDINGS_GROUP_BY]
|
|
276
|
+
if invalid:
|
|
277
|
+
raise ValueError(
|
|
278
|
+
"`group_by` supports only project_id, user_id, api_key_id, model."
|
|
279
|
+
)
|
|
280
|
+
|
|
281
|
+
_validate_string_list(project_ids, "project_ids")
|
|
282
|
+
_validate_string_list(user_ids, "user_ids")
|
|
283
|
+
_validate_string_list(api_key_ids, "api_key_ids")
|
|
284
|
+
_validate_string_list(models, "models")
|
|
285
|
+
|
|
286
|
+
|
|
287
|
+
def _validate_images_inputs(
|
|
288
|
+
start_time: int,
|
|
289
|
+
bucket_width: str,
|
|
290
|
+
end_time: int | None,
|
|
291
|
+
group_by: list[str] | None,
|
|
292
|
+
project_ids: list[str] | None,
|
|
293
|
+
user_ids: list[str] | None,
|
|
294
|
+
api_key_ids: list[str] | None,
|
|
295
|
+
models: list[str] | None,
|
|
296
|
+
sizes: list[str] | None,
|
|
297
|
+
sources: list[str] | None,
|
|
298
|
+
) -> None:
|
|
299
|
+
if not isinstance(start_time, int):
|
|
300
|
+
raise ValueError("`start_time` is required and must be a Unix timestamp in seconds.")
|
|
301
|
+
|
|
302
|
+
if bucket_width not in ALLOWED_COMPLETIONS_BUCKET_WIDTH:
|
|
303
|
+
raise ValueError("`bucket_width` supports only `1m`, `1h`, or `1d`.")
|
|
304
|
+
|
|
305
|
+
if end_time is not None and not isinstance(end_time, int):
|
|
306
|
+
raise ValueError("`end_time` must be a Unix timestamp in seconds.")
|
|
307
|
+
|
|
308
|
+
if group_by is not None:
|
|
309
|
+
if not isinstance(group_by, list):
|
|
310
|
+
raise ValueError("`group_by` must be an array.")
|
|
311
|
+
invalid = [value for value in group_by if value not in ALLOWED_IMAGES_GROUP_BY]
|
|
312
|
+
if invalid:
|
|
313
|
+
raise ValueError(
|
|
314
|
+
"`group_by` supports only project_id, user_id, api_key_id, model, size, source."
|
|
315
|
+
)
|
|
316
|
+
|
|
317
|
+
_validate_string_list(project_ids, "project_ids")
|
|
318
|
+
_validate_string_list(user_ids, "user_ids")
|
|
319
|
+
_validate_string_list(api_key_ids, "api_key_ids")
|
|
320
|
+
_validate_string_list(models, "models")
|
|
321
|
+
_validate_string_list(sizes, "sizes")
|
|
322
|
+
_validate_string_list(sources, "sources")
|
|
323
|
+
|
|
324
|
+
if sizes is not None:
|
|
325
|
+
invalid_sizes = [size for size in sizes if size not in ALLOWED_IMAGES_SIZES]
|
|
326
|
+
if invalid_sizes:
|
|
327
|
+
raise ValueError(
|
|
328
|
+
"`sizes` supports only 256x256, 512x512, 1024x1024, 1792x1792, 1024x1792."
|
|
329
|
+
)
|
|
330
|
+
|
|
331
|
+
if sources is not None:
|
|
332
|
+
invalid_sources = [source for source in sources if source not in ALLOWED_IMAGES_SOURCES]
|
|
333
|
+
if invalid_sources:
|
|
334
|
+
raise ValueError(
|
|
335
|
+
"`sources` supports only image.generation, image.edit, image.variation."
|
|
336
|
+
)
|
|
337
|
+
|
|
338
|
+
|
|
339
|
+
def _validate_moderations_inputs(
|
|
340
|
+
start_time: int,
|
|
341
|
+
bucket_width: str,
|
|
342
|
+
end_time: int | None,
|
|
343
|
+
group_by: list[str] | None,
|
|
344
|
+
project_ids: list[str] | None,
|
|
345
|
+
user_ids: list[str] | None,
|
|
346
|
+
api_key_ids: list[str] | None,
|
|
347
|
+
models: list[str] | None,
|
|
348
|
+
) -> None:
|
|
349
|
+
if not isinstance(start_time, int):
|
|
350
|
+
raise ValueError("`start_time` is required and must be a Unix timestamp in seconds.")
|
|
351
|
+
|
|
352
|
+
if bucket_width not in ALLOWED_COMPLETIONS_BUCKET_WIDTH:
|
|
353
|
+
raise ValueError("`bucket_width` supports only `1m`, `1h`, or `1d`.")
|
|
354
|
+
|
|
355
|
+
if end_time is not None and not isinstance(end_time, int):
|
|
356
|
+
raise ValueError("`end_time` must be a Unix timestamp in seconds.")
|
|
357
|
+
|
|
358
|
+
if group_by is not None:
|
|
359
|
+
if not isinstance(group_by, list):
|
|
360
|
+
raise ValueError("`group_by` must be an array.")
|
|
361
|
+
invalid = [value for value in group_by if value not in ALLOWED_MODERATIONS_GROUP_BY]
|
|
362
|
+
if invalid:
|
|
363
|
+
raise ValueError(
|
|
364
|
+
"`group_by` supports only project_id, user_id, api_key_id, model."
|
|
365
|
+
)
|
|
366
|
+
|
|
367
|
+
_validate_string_list(project_ids, "project_ids")
|
|
368
|
+
_validate_string_list(user_ids, "user_ids")
|
|
369
|
+
_validate_string_list(api_key_ids, "api_key_ids")
|
|
370
|
+
_validate_string_list(models, "models")
|
|
371
|
+
|
|
372
|
+
|
|
373
|
+
def _validate_vector_stores_inputs(
|
|
374
|
+
start_time: int,
|
|
375
|
+
bucket_width: str,
|
|
376
|
+
end_time: int | None,
|
|
377
|
+
group_by: list[str] | None,
|
|
378
|
+
project_ids: list[str] | None,
|
|
379
|
+
) -> None:
|
|
380
|
+
if not isinstance(start_time, int):
|
|
381
|
+
raise ValueError("`start_time` is required and must be a Unix timestamp in seconds.")
|
|
382
|
+
|
|
383
|
+
if bucket_width not in ALLOWED_COMPLETIONS_BUCKET_WIDTH:
|
|
384
|
+
raise ValueError("`bucket_width` supports only `1m`, `1h`, or `1d`.")
|
|
385
|
+
|
|
386
|
+
if end_time is not None and not isinstance(end_time, int):
|
|
387
|
+
raise ValueError("`end_time` must be a Unix timestamp in seconds.")
|
|
388
|
+
|
|
389
|
+
if group_by is not None:
|
|
390
|
+
if not isinstance(group_by, list):
|
|
391
|
+
raise ValueError("`group_by` must be an array.")
|
|
392
|
+
invalid = [value for value in group_by if value not in ALLOWED_VECTOR_STORES_GROUP_BY]
|
|
393
|
+
if invalid:
|
|
394
|
+
raise ValueError("`group_by` supports only `project_id`.")
|
|
395
|
+
|
|
396
|
+
_validate_string_list(project_ids, "project_ids")
|
|
397
|
+
|
|
398
|
+
|
|
399
|
+
def _validate_users_inputs(emails: list[str] | None) -> None:
|
|
400
|
+
_validate_string_list(emails, "emails")
|
|
401
|
+
|
|
402
|
+
|
|
403
|
+
def _validate_user_id(user_id: str) -> None:
|
|
404
|
+
if not isinstance(user_id, str) or not user_id.strip():
|
|
405
|
+
raise ValueError("`user_id` is required and must be a non-empty string.")
|
|
406
|
+
|
|
407
|
+
|
|
408
|
+
def _validate_projects_inputs(include_archived: bool | None) -> None:
|
|
409
|
+
if include_archived is not None and not isinstance(include_archived, bool):
|
|
410
|
+
raise ValueError("`include_archived` must be a boolean.")
|
|
411
|
+
|
|
412
|
+
|
|
413
|
+
def _get_admin_key() -> str:
|
|
414
|
+
admin_key = os.getenv("OPENAI_ADMIN_KEY") or os.getenv("OPENAI_API_KEY")
|
|
415
|
+
if not admin_key:
|
|
416
|
+
raise ValueError(
|
|
417
|
+
"Missing OPENAI_ADMIN_KEY (preferred) or OPENAI_API_KEY environment variable."
|
|
418
|
+
)
|
|
419
|
+
return admin_key
|
|
420
|
+
|
|
421
|
+
|
|
422
|
+
async def _openai_get(url: str, query: dict[str, Any]) -> dict[str, Any]:
|
|
423
|
+
headers = {
|
|
424
|
+
"Authorization": f"Bearer {_get_admin_key()}",
|
|
425
|
+
"Content-Type": "application/json",
|
|
426
|
+
}
|
|
427
|
+
|
|
428
|
+
async with httpx.AsyncClient(timeout=30.0) as client:
|
|
429
|
+
response = await client.get(url, params=query, headers=headers)
|
|
430
|
+
if response.is_error:
|
|
431
|
+
try:
|
|
432
|
+
error_payload = response.json()
|
|
433
|
+
except Exception:
|
|
434
|
+
error_payload = {"message": response.text}
|
|
435
|
+
raise RuntimeError(
|
|
436
|
+
f"OpenAI API error {response.status_code}: {error_payload}"
|
|
437
|
+
)
|
|
438
|
+
return response.json()
|
|
439
|
+
|
|
440
|
+
|
|
441
|
+
async def _openai_get_all_pages(url: str, query: dict[str, Any]) -> dict[str, Any]:
|
|
442
|
+
"""Fetch all paginated results and return merged data only."""
|
|
443
|
+
merged_data: list[dict[str, Any]] = []
|
|
444
|
+
current_query = dict(query)
|
|
445
|
+
seen_pages: set[str] = set()
|
|
446
|
+
|
|
447
|
+
while True:
|
|
448
|
+
payload = await _openai_get(url, current_query)
|
|
449
|
+
merged_data.extend(payload.get("data", []))
|
|
450
|
+
|
|
451
|
+
has_more = bool(payload.get("has_more"))
|
|
452
|
+
next_page = payload.get("next_page")
|
|
453
|
+
if not has_more or not next_page:
|
|
454
|
+
break
|
|
455
|
+
if next_page in seen_pages:
|
|
456
|
+
raise RuntimeError("Pagination loop detected from repeated `next_page` cursor.")
|
|
457
|
+
seen_pages.add(next_page)
|
|
458
|
+
current_query["page"] = next_page
|
|
459
|
+
|
|
460
|
+
return {"data": merged_data}
|
|
461
|
+
|
|
462
|
+
|
|
463
|
+
async def _openai_get_all_after_pages(url: str, query: dict[str, Any]) -> dict[str, Any]:
|
|
464
|
+
"""Fetch all `after` cursor paginated results and return merged data only."""
|
|
465
|
+
merged_data: list[dict[str, Any]] = []
|
|
466
|
+
current_query = dict(query)
|
|
467
|
+
seen_cursors: set[str] = set()
|
|
468
|
+
|
|
469
|
+
while True:
|
|
470
|
+
payload = await _openai_get(url, current_query)
|
|
471
|
+
merged_data.extend(payload.get("data", []))
|
|
472
|
+
|
|
473
|
+
has_more = bool(payload.get("has_more"))
|
|
474
|
+
last_id = payload.get("last_id")
|
|
475
|
+
if not has_more or not last_id:
|
|
476
|
+
break
|
|
477
|
+
if last_id in seen_cursors:
|
|
478
|
+
raise RuntimeError("Pagination loop detected from repeated `last_id` cursor.")
|
|
479
|
+
seen_cursors.add(last_id)
|
|
480
|
+
current_query["after"] = last_id
|
|
481
|
+
|
|
482
|
+
return {"data": merged_data}
|
|
483
|
+
|
|
484
|
+
|
|
485
|
+
@mcp.tool()
|
|
486
|
+
async def get_organization_costs(
|
|
487
|
+
start_time: int,
|
|
488
|
+
bucket_width: str = "1d",
|
|
489
|
+
end_time: int | None = None,
|
|
490
|
+
group_by: list[str] | None = None,
|
|
491
|
+
project_ids: list[str] | None = None,
|
|
492
|
+
) -> dict[str, Any]:
|
|
493
|
+
"""Get costs details for the organization via OpenAI Admin API.
|
|
494
|
+
|
|
495
|
+
Args:
|
|
496
|
+
start_time: Start time (Unix seconds), inclusive.
|
|
497
|
+
bucket_width: Bucket width. Currently only "1d" is supported.
|
|
498
|
+
end_time: End time (Unix seconds), exclusive.
|
|
499
|
+
group_by: Optional grouping keys. Allowed values: "project_id", "line_item". Example: ["project_id"], ["line_item"], ["project_id", "line_item"]
|
|
500
|
+
project_ids: Filter results to these OpenAI project IDs.
|
|
501
|
+
"""
|
|
502
|
+
_validate_costs_inputs(start_time, bucket_width, end_time, group_by, project_ids)
|
|
503
|
+
|
|
504
|
+
query: dict[str, Any] = {
|
|
505
|
+
"start_time": start_time,
|
|
506
|
+
"bucket_width": bucket_width,
|
|
507
|
+
}
|
|
508
|
+
if end_time is not None:
|
|
509
|
+
query["end_time"] = end_time
|
|
510
|
+
if group_by:
|
|
511
|
+
query["group_by"] = group_by
|
|
512
|
+
if project_ids:
|
|
513
|
+
query["project_ids"] = project_ids
|
|
514
|
+
|
|
515
|
+
return await _openai_get_all_pages(OPENAI_COSTS_URL, query)
|
|
516
|
+
|
|
517
|
+
|
|
518
|
+
@mcp.tool()
|
|
519
|
+
async def get_organization_usage_completions(
|
|
520
|
+
start_time: int,
|
|
521
|
+
bucket_width: str = "1d",
|
|
522
|
+
end_time: int | None = None,
|
|
523
|
+
group_by: list[str] | None = None,
|
|
524
|
+
project_ids: list[str] | None = None,
|
|
525
|
+
user_ids: list[str] | None = None,
|
|
526
|
+
api_key_ids: list[str] | None = None,
|
|
527
|
+
models: list[str] | None = None,
|
|
528
|
+
batch: bool | None = None,
|
|
529
|
+
) -> dict[str, Any]:
|
|
530
|
+
"""Get completions usage details for the organization via OpenAI Admin API.
|
|
531
|
+
|
|
532
|
+
Args:
|
|
533
|
+
start_time: Start time (Unix seconds), inclusive.
|
|
534
|
+
bucket_width: Bucket width: "1m", "1h", or "1d" (default).
|
|
535
|
+
end_time: End time (Unix seconds), exclusive.
|
|
536
|
+
group_by: Optional grouping keys. Allowed values: "project_id", "user_id",
|
|
537
|
+
"api_key_id", "model", "batch", "service_tier". Any combination is allowed. Example: ["user_id"]
|
|
538
|
+
project_ids: Filter results by project IDs.
|
|
539
|
+
user_ids: Filter results by user IDs.
|
|
540
|
+
api_key_ids: Filter results by API key IDs.
|
|
541
|
+
models: Filter results by model names.
|
|
542
|
+
batch: If true, batch jobs only; if false, non-batch only.
|
|
543
|
+
"""
|
|
544
|
+
_validate_completions_inputs(
|
|
545
|
+
start_time=start_time,
|
|
546
|
+
bucket_width=bucket_width,
|
|
547
|
+
end_time=end_time,
|
|
548
|
+
group_by=group_by,
|
|
549
|
+
project_ids=project_ids,
|
|
550
|
+
user_ids=user_ids,
|
|
551
|
+
api_key_ids=api_key_ids,
|
|
552
|
+
models=models,
|
|
553
|
+
batch=batch,
|
|
554
|
+
)
|
|
555
|
+
|
|
556
|
+
query: dict[str, Any] = {
|
|
557
|
+
"start_time": start_time,
|
|
558
|
+
"bucket_width": bucket_width,
|
|
559
|
+
}
|
|
560
|
+
if end_time is not None:
|
|
561
|
+
query["end_time"] = end_time
|
|
562
|
+
if group_by:
|
|
563
|
+
query["group_by"] = group_by
|
|
564
|
+
if project_ids:
|
|
565
|
+
query["project_ids"] = project_ids
|
|
566
|
+
if user_ids:
|
|
567
|
+
query["user_ids"] = user_ids
|
|
568
|
+
if api_key_ids:
|
|
569
|
+
query["api_key_ids"] = api_key_ids
|
|
570
|
+
if models:
|
|
571
|
+
query["models"] = models
|
|
572
|
+
if batch is not None:
|
|
573
|
+
query["batch"] = batch
|
|
574
|
+
|
|
575
|
+
return await _openai_get_all_pages(OPENAI_USAGE_COMPLETIONS_URL, query)
|
|
576
|
+
|
|
577
|
+
|
|
578
|
+
@mcp.tool()
|
|
579
|
+
async def get_organization_usage_audio_speeches(
|
|
580
|
+
start_time: int,
|
|
581
|
+
bucket_width: str = "1d",
|
|
582
|
+
end_time: int | None = None,
|
|
583
|
+
group_by: list[str] | None = None,
|
|
584
|
+
project_ids: list[str] | None = None,
|
|
585
|
+
user_ids: list[str] | None = None,
|
|
586
|
+
api_key_ids: list[str] | None = None,
|
|
587
|
+
models: list[str] | None = None,
|
|
588
|
+
) -> dict[str, Any]:
|
|
589
|
+
"""Get audio speeches usage details for the organization via OpenAI Admin API.
|
|
590
|
+
|
|
591
|
+
Args:
|
|
592
|
+
start_time: Start time (Unix seconds), inclusive.
|
|
593
|
+
bucket_width: Bucket width: "1m", "1h", or "1d" (default).
|
|
594
|
+
end_time: End time (Unix seconds), exclusive.
|
|
595
|
+
group_by: Optional grouping keys. Allowed values: "project_id", "user_id",
|
|
596
|
+
"api_key_id", "model". Any combination is allowed. Example: ["user_id"]
|
|
597
|
+
project_ids: Filter results by project IDs.
|
|
598
|
+
user_ids: Filter results by user IDs.
|
|
599
|
+
api_key_ids: Filter results by API key IDs.
|
|
600
|
+
models: Filter results by model names.
|
|
601
|
+
"""
|
|
602
|
+
_validate_audio_speeches_inputs(
|
|
603
|
+
start_time=start_time,
|
|
604
|
+
bucket_width=bucket_width,
|
|
605
|
+
end_time=end_time,
|
|
606
|
+
group_by=group_by,
|
|
607
|
+
project_ids=project_ids,
|
|
608
|
+
user_ids=user_ids,
|
|
609
|
+
api_key_ids=api_key_ids,
|
|
610
|
+
models=models,
|
|
611
|
+
)
|
|
612
|
+
|
|
613
|
+
query: dict[str, Any] = {
|
|
614
|
+
"start_time": start_time,
|
|
615
|
+
"bucket_width": bucket_width,
|
|
616
|
+
}
|
|
617
|
+
if end_time is not None:
|
|
618
|
+
query["end_time"] = end_time
|
|
619
|
+
if group_by:
|
|
620
|
+
query["group_by"] = group_by
|
|
621
|
+
if project_ids:
|
|
622
|
+
query["project_ids"] = project_ids
|
|
623
|
+
if user_ids:
|
|
624
|
+
query["user_ids"] = user_ids
|
|
625
|
+
if api_key_ids:
|
|
626
|
+
query["api_key_ids"] = api_key_ids
|
|
627
|
+
if models:
|
|
628
|
+
query["models"] = models
|
|
629
|
+
|
|
630
|
+
return await _openai_get_all_pages(OPENAI_USAGE_AUDIO_SPEECHES_URL, query)
|
|
631
|
+
|
|
632
|
+
|
|
633
|
+
@mcp.tool()
|
|
634
|
+
async def get_organization_usage_audio_transcriptions(
|
|
635
|
+
start_time: int,
|
|
636
|
+
bucket_width: str = "1d",
|
|
637
|
+
end_time: int | None = None,
|
|
638
|
+
group_by: list[str] | None = None,
|
|
639
|
+
project_ids: list[str] | None = None,
|
|
640
|
+
user_ids: list[str] | None = None,
|
|
641
|
+
api_key_ids: list[str] | None = None,
|
|
642
|
+
models: list[str] | None = None,
|
|
643
|
+
) -> dict[str, Any]:
|
|
644
|
+
"""Get audio transcriptions usage details for the organization via OpenAI Admin API.
|
|
645
|
+
|
|
646
|
+
Args:
|
|
647
|
+
start_time: Start time (Unix seconds), inclusive.
|
|
648
|
+
bucket_width: Bucket width: "1m", "1h", or "1d" (default).
|
|
649
|
+
end_time: End time (Unix seconds), exclusive.
|
|
650
|
+
group_by: Optional grouping keys. Allowed values: "project_id", "user_id",
|
|
651
|
+
"api_key_id", "model". Any combination is allowed. Example: ["user_id"]
|
|
652
|
+
project_ids: Filter results by project IDs.
|
|
653
|
+
user_ids: Filter results by user IDs.
|
|
654
|
+
api_key_ids: Filter results by API key IDs.
|
|
655
|
+
models: Filter results by model names.
|
|
656
|
+
"""
|
|
657
|
+
_validate_audio_transcriptions_inputs(
|
|
658
|
+
start_time=start_time,
|
|
659
|
+
bucket_width=bucket_width,
|
|
660
|
+
end_time=end_time,
|
|
661
|
+
group_by=group_by,
|
|
662
|
+
project_ids=project_ids,
|
|
663
|
+
user_ids=user_ids,
|
|
664
|
+
api_key_ids=api_key_ids,
|
|
665
|
+
models=models,
|
|
666
|
+
)
|
|
667
|
+
|
|
668
|
+
query: dict[str, Any] = {
|
|
669
|
+
"start_time": start_time,
|
|
670
|
+
"bucket_width": bucket_width,
|
|
671
|
+
}
|
|
672
|
+
if end_time is not None:
|
|
673
|
+
query["end_time"] = end_time
|
|
674
|
+
if group_by:
|
|
675
|
+
query["group_by"] = group_by
|
|
676
|
+
if project_ids:
|
|
677
|
+
query["project_ids"] = project_ids
|
|
678
|
+
if user_ids:
|
|
679
|
+
query["user_ids"] = user_ids
|
|
680
|
+
if api_key_ids:
|
|
681
|
+
query["api_key_ids"] = api_key_ids
|
|
682
|
+
if models:
|
|
683
|
+
query["models"] = models
|
|
684
|
+
|
|
685
|
+
return await _openai_get_all_pages(OPENAI_USAGE_AUDIO_TRANSCRIPTIONS_URL, query)
|
|
686
|
+
|
|
687
|
+
|
|
688
|
+
@mcp.tool()
|
|
689
|
+
async def get_organization_usage_code_interpreter_sessions(
|
|
690
|
+
start_time: int,
|
|
691
|
+
bucket_width: str = "1d",
|
|
692
|
+
end_time: int | None = None,
|
|
693
|
+
group_by: list[str] | None = None,
|
|
694
|
+
project_ids: list[str] | None = None,
|
|
695
|
+
) -> dict[str, Any]:
|
|
696
|
+
"""Get code interpreter sessions usage details for the organization via OpenAI Admin API.
|
|
697
|
+
|
|
698
|
+
Args:
|
|
699
|
+
start_time: Start time (Unix seconds), inclusive.
|
|
700
|
+
bucket_width: Bucket width: "1m", "1h", or "1d" (default).
|
|
701
|
+
end_time: End time (Unix seconds), exclusive.
|
|
702
|
+
group_by: Optional grouping keys. Only "project_id" is allowed. Example: ["project_id"]
|
|
703
|
+
project_ids: Filter results by project IDs.
|
|
704
|
+
"""
|
|
705
|
+
_validate_code_interpreter_sessions_inputs(
|
|
706
|
+
start_time=start_time,
|
|
707
|
+
bucket_width=bucket_width,
|
|
708
|
+
end_time=end_time,
|
|
709
|
+
group_by=group_by,
|
|
710
|
+
project_ids=project_ids,
|
|
711
|
+
)
|
|
712
|
+
|
|
713
|
+
query: dict[str, Any] = {
|
|
714
|
+
"start_time": start_time,
|
|
715
|
+
"bucket_width": bucket_width,
|
|
716
|
+
}
|
|
717
|
+
if end_time is not None:
|
|
718
|
+
query["end_time"] = end_time
|
|
719
|
+
if group_by:
|
|
720
|
+
query["group_by"] = group_by
|
|
721
|
+
if project_ids:
|
|
722
|
+
query["project_ids"] = project_ids
|
|
723
|
+
|
|
724
|
+
return await _openai_get_all_pages(OPENAI_USAGE_CODE_INTERPRETER_SESSIONS_URL, query)
|
|
725
|
+
|
|
726
|
+
|
|
727
|
+
@mcp.tool()
|
|
728
|
+
async def get_organization_usage_embeddings(
|
|
729
|
+
start_time: int,
|
|
730
|
+
bucket_width: str = "1d",
|
|
731
|
+
end_time: int | None = None,
|
|
732
|
+
group_by: list[str] | None = None,
|
|
733
|
+
project_ids: list[str] | None = None,
|
|
734
|
+
user_ids: list[str] | None = None,
|
|
735
|
+
api_key_ids: list[str] | None = None,
|
|
736
|
+
models: list[str] | None = None,
|
|
737
|
+
) -> dict[str, Any]:
|
|
738
|
+
"""Get embeddings usage details for the organization via OpenAI Admin API.
|
|
739
|
+
|
|
740
|
+
Args:
|
|
741
|
+
start_time: Start time (Unix seconds), inclusive.
|
|
742
|
+
bucket_width: Bucket width: "1m", "1h", or "1d" (default).
|
|
743
|
+
end_time: End time (Unix seconds), exclusive.
|
|
744
|
+
group_by: Optional grouping keys. Allowed values: "project_id", "user_id",
|
|
745
|
+
"api_key_id", "model". Any combination is allowed. Example: ["user_id"].
|
|
746
|
+
project_ids: Filter results by project IDs.
|
|
747
|
+
user_ids: Filter results by user IDs.
|
|
748
|
+
api_key_ids: Filter results by API key IDs.
|
|
749
|
+
models: Filter results by model names.
|
|
750
|
+
"""
|
|
751
|
+
_validate_embeddings_inputs(
|
|
752
|
+
start_time=start_time,
|
|
753
|
+
bucket_width=bucket_width,
|
|
754
|
+
end_time=end_time,
|
|
755
|
+
group_by=group_by,
|
|
756
|
+
project_ids=project_ids,
|
|
757
|
+
user_ids=user_ids,
|
|
758
|
+
api_key_ids=api_key_ids,
|
|
759
|
+
models=models,
|
|
760
|
+
)
|
|
761
|
+
|
|
762
|
+
query: dict[str, Any] = {
|
|
763
|
+
"start_time": start_time,
|
|
764
|
+
"bucket_width": bucket_width,
|
|
765
|
+
}
|
|
766
|
+
if end_time is not None:
|
|
767
|
+
query["end_time"] = end_time
|
|
768
|
+
if group_by:
|
|
769
|
+
query["group_by"] = group_by
|
|
770
|
+
if project_ids:
|
|
771
|
+
query["project_ids"] = project_ids
|
|
772
|
+
if user_ids:
|
|
773
|
+
query["user_ids"] = user_ids
|
|
774
|
+
if api_key_ids:
|
|
775
|
+
query["api_key_ids"] = api_key_ids
|
|
776
|
+
if models:
|
|
777
|
+
query["models"] = models
|
|
778
|
+
|
|
779
|
+
return await _openai_get_all_pages(OPENAI_USAGE_EMBEDDINGS_URL, query)
|
|
780
|
+
|
|
781
|
+
|
|
782
|
+
@mcp.tool()
|
|
783
|
+
async def get_organization_usage_images(
|
|
784
|
+
start_time: int,
|
|
785
|
+
bucket_width: str = "1d",
|
|
786
|
+
end_time: int | None = None,
|
|
787
|
+
group_by: list[str] | None = None,
|
|
788
|
+
project_ids: list[str] | None = None,
|
|
789
|
+
user_ids: list[str] | None = None,
|
|
790
|
+
api_key_ids: list[str] | None = None,
|
|
791
|
+
models: list[str] | None = None,
|
|
792
|
+
sizes: list[str] | None = None,
|
|
793
|
+
sources: list[str] | None = None,
|
|
794
|
+
) -> dict[str, Any]:
|
|
795
|
+
"""Get images usage details for the organization via OpenAI Admin API.
|
|
796
|
+
|
|
797
|
+
Args:
|
|
798
|
+
start_time: Start time (Unix seconds), inclusive.
|
|
799
|
+
bucket_width: Bucket width: "1m", "1h", or "1d" (default).
|
|
800
|
+
end_time: End time (Unix seconds), exclusive.
|
|
801
|
+
group_by: Optional grouping keys. Allowed values: "project_id", "user_id",
|
|
802
|
+
"api_key_id", "model", "size", "source". Any combination is allowed. Example: ["user_id"]
|
|
803
|
+
project_ids: Filter results by project IDs.
|
|
804
|
+
user_ids: Filter results by user IDs.
|
|
805
|
+
api_key_ids: Filter results by API key IDs.
|
|
806
|
+
models: Filter results by model names.
|
|
807
|
+
sizes: Filter by image sizes. Allowed values: "256x256", "512x512", "1024x1024",
|
|
808
|
+
"1792x1792", "1024x1792".
|
|
809
|
+
sources: Filter by image source types. Allowed values: "image.generation",
|
|
810
|
+
"image.edit", "image.variation".
|
|
811
|
+
"""
|
|
812
|
+
_validate_images_inputs(
|
|
813
|
+
start_time=start_time,
|
|
814
|
+
bucket_width=bucket_width,
|
|
815
|
+
end_time=end_time,
|
|
816
|
+
group_by=group_by,
|
|
817
|
+
project_ids=project_ids,
|
|
818
|
+
user_ids=user_ids,
|
|
819
|
+
api_key_ids=api_key_ids,
|
|
820
|
+
models=models,
|
|
821
|
+
sizes=sizes,
|
|
822
|
+
sources=sources,
|
|
823
|
+
)
|
|
824
|
+
|
|
825
|
+
query: dict[str, Any] = {
|
|
826
|
+
"start_time": start_time,
|
|
827
|
+
"bucket_width": bucket_width,
|
|
828
|
+
}
|
|
829
|
+
if end_time is not None:
|
|
830
|
+
query["end_time"] = end_time
|
|
831
|
+
if group_by:
|
|
832
|
+
query["group_by"] = group_by
|
|
833
|
+
if project_ids:
|
|
834
|
+
query["project_ids"] = project_ids
|
|
835
|
+
if user_ids:
|
|
836
|
+
query["user_ids"] = user_ids
|
|
837
|
+
if api_key_ids:
|
|
838
|
+
query["api_key_ids"] = api_key_ids
|
|
839
|
+
if models:
|
|
840
|
+
query["models"] = models
|
|
841
|
+
if sizes:
|
|
842
|
+
query["sizes"] = sizes
|
|
843
|
+
if sources:
|
|
844
|
+
query["sources"] = sources
|
|
845
|
+
|
|
846
|
+
return await _openai_get_all_pages(OPENAI_USAGE_IMAGES_URL, query)
|
|
847
|
+
|
|
848
|
+
|
|
849
|
+
@mcp.tool()
|
|
850
|
+
async def get_organization_usage_moderations(
|
|
851
|
+
start_time: int,
|
|
852
|
+
bucket_width: str = "1d",
|
|
853
|
+
end_time: int | None = None,
|
|
854
|
+
group_by: list[str] | None = None,
|
|
855
|
+
project_ids: list[str] | None = None,
|
|
856
|
+
user_ids: list[str] | None = None,
|
|
857
|
+
api_key_ids: list[str] | None = None,
|
|
858
|
+
models: list[str] | None = None,
|
|
859
|
+
) -> dict[str, Any]:
|
|
860
|
+
"""Get moderations usage details for the organization via OpenAI Admin API.
|
|
861
|
+
|
|
862
|
+
Args:
|
|
863
|
+
start_time: Start time (Unix seconds), inclusive.
|
|
864
|
+
bucket_width: Bucket width: "1m", "1h", or "1d" (default).
|
|
865
|
+
end_time: End time (Unix seconds), exclusive.
|
|
866
|
+
group_by: Optional grouping keys. Allowed values: "project_id", "user_id",
|
|
867
|
+
"api_key_id", "model". Any combination is allowed. Example: ["project_id"]
|
|
868
|
+
project_ids: Filter results by project IDs.
|
|
869
|
+
user_ids: Filter results by user IDs.
|
|
870
|
+
api_key_ids: Filter results by API key IDs.
|
|
871
|
+
models: Filter results by model names.
|
|
872
|
+
"""
|
|
873
|
+
_validate_moderations_inputs(
|
|
874
|
+
start_time=start_time,
|
|
875
|
+
bucket_width=bucket_width,
|
|
876
|
+
end_time=end_time,
|
|
877
|
+
group_by=group_by,
|
|
878
|
+
project_ids=project_ids,
|
|
879
|
+
user_ids=user_ids,
|
|
880
|
+
api_key_ids=api_key_ids,
|
|
881
|
+
models=models,
|
|
882
|
+
)
|
|
883
|
+
|
|
884
|
+
query: dict[str, Any] = {
|
|
885
|
+
"start_time": start_time,
|
|
886
|
+
"bucket_width": bucket_width,
|
|
887
|
+
}
|
|
888
|
+
if end_time is not None:
|
|
889
|
+
query["end_time"] = end_time
|
|
890
|
+
if group_by:
|
|
891
|
+
query["group_by"] = group_by
|
|
892
|
+
if project_ids:
|
|
893
|
+
query["project_ids"] = project_ids
|
|
894
|
+
if user_ids:
|
|
895
|
+
query["user_ids"] = user_ids
|
|
896
|
+
if api_key_ids:
|
|
897
|
+
query["api_key_ids"] = api_key_ids
|
|
898
|
+
if models:
|
|
899
|
+
query["models"] = models
|
|
900
|
+
|
|
901
|
+
return await _openai_get_all_pages(OPENAI_USAGE_MODERATIONS_URL, query)
|
|
902
|
+
|
|
903
|
+
|
|
904
|
+
@mcp.tool()
|
|
905
|
+
async def get_organization_usage_vector_stores(
|
|
906
|
+
start_time: int,
|
|
907
|
+
bucket_width: str = "1d",
|
|
908
|
+
end_time: int | None = None,
|
|
909
|
+
group_by: list[str] | None = None,
|
|
910
|
+
project_ids: list[str] | None = None,
|
|
911
|
+
) -> dict[str, Any]:
|
|
912
|
+
"""Get vector stores usage details for the organization via OpenAI Admin API.
|
|
913
|
+
|
|
914
|
+
Args:
|
|
915
|
+
start_time: Start time (Unix seconds), inclusive.
|
|
916
|
+
bucket_width: Bucket width: "1m", "1h", or "1d" (default).
|
|
917
|
+
end_time: End time (Unix seconds), exclusive.
|
|
918
|
+
group_by: Optional grouping keys. Only "project_id" is allowed. Example: ["project_id"]
|
|
919
|
+
project_ids: Filter results by project IDs.
|
|
920
|
+
"""
|
|
921
|
+
_validate_vector_stores_inputs(
|
|
922
|
+
start_time=start_time,
|
|
923
|
+
bucket_width=bucket_width,
|
|
924
|
+
end_time=end_time,
|
|
925
|
+
group_by=group_by,
|
|
926
|
+
project_ids=project_ids,
|
|
927
|
+
)
|
|
928
|
+
|
|
929
|
+
query: dict[str, Any] = {
|
|
930
|
+
"start_time": start_time,
|
|
931
|
+
"bucket_width": bucket_width,
|
|
932
|
+
}
|
|
933
|
+
if end_time is not None:
|
|
934
|
+
query["end_time"] = end_time
|
|
935
|
+
if group_by:
|
|
936
|
+
query["group_by"] = group_by
|
|
937
|
+
if project_ids:
|
|
938
|
+
query["project_ids"] = project_ids
|
|
939
|
+
|
|
940
|
+
return await _openai_get_all_pages(OPENAI_USAGE_VECTOR_STORES_URL, query)
|
|
941
|
+
|
|
942
|
+
|
|
943
|
+
@mcp.tool()
|
|
944
|
+
async def get_organization_users(
|
|
945
|
+
emails: list[str] | None = None,
|
|
946
|
+
) -> dict[str, Any]:
|
|
947
|
+
"""List all organization users via OpenAI Admin API.
|
|
948
|
+
|
|
949
|
+
Args:
|
|
950
|
+
emails: Optional list of email addresses to filter users. Example: ["user1@example.com", "user2@example.com"]
|
|
951
|
+
"""
|
|
952
|
+
_validate_users_inputs(emails)
|
|
953
|
+
|
|
954
|
+
query: dict[str, Any] = {}
|
|
955
|
+
if emails:
|
|
956
|
+
query["emails"] = emails
|
|
957
|
+
|
|
958
|
+
return await _openai_get_all_after_pages(OPENAI_ORGANIZATION_USERS_URL, query)
|
|
959
|
+
|
|
960
|
+
|
|
961
|
+
@mcp.tool()
|
|
962
|
+
async def get_organization_user(
|
|
963
|
+
user_id: str,
|
|
964
|
+
) -> dict[str, Any]:
|
|
965
|
+
"""Retrieve a single organization user by ID via OpenAI Admin API.
|
|
966
|
+
|
|
967
|
+
Args:
|
|
968
|
+
user_id: The organization user identifier.
|
|
969
|
+
"""
|
|
970
|
+
_validate_user_id(user_id)
|
|
971
|
+
|
|
972
|
+
return await _openai_get(f"{OPENAI_ORGANIZATION_USERS_URL}/{user_id}", {})
|
|
973
|
+
|
|
974
|
+
|
|
975
|
+
@mcp.tool()
|
|
976
|
+
async def get_organization_projects(
|
|
977
|
+
include_archived: bool | None = None,
|
|
978
|
+
) -> dict[str, Any]:
|
|
979
|
+
"""List all organization projects via OpenAI Admin API.
|
|
980
|
+
|
|
981
|
+
Args:
|
|
982
|
+
include_archived: If true, include archived projects.
|
|
983
|
+
"""
|
|
984
|
+
_validate_projects_inputs(include_archived)
|
|
985
|
+
|
|
986
|
+
query: dict[str, Any] = {}
|
|
987
|
+
if include_archived is not None:
|
|
988
|
+
query["include_archived"] = include_archived
|
|
989
|
+
|
|
990
|
+
return await _openai_get_all_after_pages(OPENAI_ORGANIZATION_PROJECTS_URL, query)
|
|
991
|
+
|
|
992
|
+
|
|
993
|
+
def main() -> None:
|
|
994
|
+
mcp.run()
|
|
995
|
+
|
|
996
|
+
|
|
997
|
+
if __name__ == "__main__":
|
|
998
|
+
main()
|