@jhzhu89/m2r 0.1.5 → 0.1.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +21 -10
- package/dist/index.js +138 -103
- package/package.json +6 -3
package/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Jiahao Zhu <jiahzhu@outlook.com>
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
package/README.md
CHANGED
|
@@ -1,7 +1,6 @@
|
|
|
1
1
|
# m2r
|
|
2
2
|
|
|
3
|
-
Anthropic Messages API
|
|
4
|
-
|
|
3
|
+
Anthropic Messages API → Azure OpenAI Responses API proxy.
|
|
5
4
|
Enables Claude Code CLI and other Anthropic-compatible clients to use Azure OpenAI as the backend.
|
|
6
5
|
|
|
7
6
|
## Installation
|
|
@@ -12,16 +11,28 @@ npm install -g @jhzhu89/m2r
|
|
|
12
11
|
|
|
13
12
|
## Configuration
|
|
14
13
|
|
|
15
|
-
Create `~/.m2rrc` with your Azure OpenAI
|
|
14
|
+
Create `~/.m2rrc` with your Azure OpenAI settings (Entra ID only; API keys are not used):
|
|
16
15
|
|
|
17
16
|
```bash
|
|
17
|
+
# Required
|
|
18
18
|
AZURE_OPENAI_ENDPOINT=https://your-resource.openai.azure.com
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
19
|
+
|
|
20
|
+
# Optional
|
|
21
|
+
AZURE_OPENAI_API_VERSION=2025-04-01-preview
|
|
22
|
+
PROXY_PORT=8000
|
|
22
23
|
LOG_LEVEL=info
|
|
24
|
+
|
|
25
|
+
# Model routing (optional)
|
|
26
|
+
MODEL_MAP={"claude-3-5-sonnet":"gpt-5.2"}
|
|
27
|
+
TIER_HAIKU=gpt-5-mini
|
|
28
|
+
TIER_SONNET=gpt-5.2
|
|
29
|
+
TIER_OPUS=gpt-5.1-codex-max
|
|
23
30
|
```
|
|
24
31
|
|
|
32
|
+
Auth uses `DefaultAzureCredential`, so ensure your environment is logged in (e.g., `az login`) or set the usual `AZURE_CLIENT_ID` / `AZURE_TENANT_ID` / `AZURE_CLIENT_SECRET`.
|
|
33
|
+
|
|
34
|
+
Routing: `MODEL_MAP` overrides exact model aliases; otherwise `haiku`/`sonnet`/`opus` substrings map to the configured tier models.
|
|
35
|
+
|
|
25
36
|
## Usage
|
|
26
37
|
|
|
27
38
|
Start the proxy server:
|
|
@@ -30,7 +41,7 @@ Start the proxy server:
|
|
|
30
41
|
m2r
|
|
31
42
|
```
|
|
32
43
|
|
|
33
|
-
Then point your Anthropic client to `http://localhost:
|
|
44
|
+
Then point your Anthropic client to `http://localhost:8000`.
|
|
34
45
|
|
|
35
46
|
## Shell Integration
|
|
36
47
|
|
|
@@ -42,7 +53,7 @@ Add to `~/.zshrc` or `~/.bashrc`:
|
|
|
42
53
|
|
|
43
54
|
```bash
|
|
44
55
|
claude() {
|
|
45
|
-
local proxy_port=
|
|
56
|
+
local proxy_port=8000
|
|
46
57
|
local m2rrc="$HOME/.m2rrc"
|
|
47
58
|
|
|
48
59
|
if [[ -f "$m2rrc" ]]; then
|
|
@@ -80,7 +91,7 @@ m2r-log() {
|
|
|
80
91
|
}
|
|
81
92
|
|
|
82
93
|
m2r-restart() {
|
|
83
|
-
local proxy_port=
|
|
94
|
+
local proxy_port=8000 m2rrc="$HOME/.m2rrc"
|
|
84
95
|
if [[ -f "$m2rrc" ]]; then
|
|
85
96
|
local port_line=$(grep '^PROXY_PORT=' "$m2rrc")
|
|
86
97
|
[[ -n "$port_line" ]] && proxy_port="${port_line#PROXY_PORT=}"
|
|
@@ -106,7 +117,7 @@ function Get-M2rPort {
|
|
|
106
117
|
if (Test-Path $m2rrc) {
|
|
107
118
|
switch -Regex -File $m2rrc { '^PROXY_PORT=(\d+)' { return [int]$Matches[1] } }
|
|
108
119
|
}
|
|
109
|
-
return
|
|
120
|
+
return 8000
|
|
110
121
|
}
|
|
111
122
|
|
|
112
123
|
function Test-M2rRunning($port) {
|
package/dist/index.js
CHANGED
|
@@ -16160,16 +16160,22 @@ function fromRequest(body) {
|
|
|
16160
16160
|
for (const block of blocks) {
|
|
16161
16161
|
if (block.type === "text") {
|
|
16162
16162
|
content2.push({ type: "text", text: block.text });
|
|
16163
|
-
|
|
16163
|
+
continue;
|
|
16164
|
+
}
|
|
16165
|
+
if (block.type === "image") {
|
|
16164
16166
|
content2.push(parseImageBlock(block));
|
|
16165
|
-
|
|
16167
|
+
continue;
|
|
16168
|
+
}
|
|
16169
|
+
if (block.type === "tool_use") {
|
|
16166
16170
|
content2.push({
|
|
16167
16171
|
type: "tool_call",
|
|
16168
16172
|
id: block.id,
|
|
16169
16173
|
name: block.name,
|
|
16170
16174
|
arguments: block.input
|
|
16171
16175
|
});
|
|
16172
|
-
|
|
16176
|
+
continue;
|
|
16177
|
+
}
|
|
16178
|
+
if (block.type === "tool_result") {
|
|
16173
16179
|
content2.push({
|
|
16174
16180
|
type: "tool_result",
|
|
16175
16181
|
id: block.tool_use_id,
|
|
@@ -16207,13 +16213,6 @@ function fromRequest(body) {
|
|
|
16207
16213
|
function generateId(prefix = "msg") {
|
|
16208
16214
|
return `${prefix}_${Date.now().toString(36)}${Math.random().toString(36).slice(2, 8)}`;
|
|
16209
16215
|
}
|
|
16210
|
-
function mapStopReason(status, hasToolCall) {
|
|
16211
|
-
if (hasToolCall)
|
|
16212
|
-
return "tool_use";
|
|
16213
|
-
if (status === "incomplete")
|
|
16214
|
-
return "max_tokens";
|
|
16215
|
-
return "end_turn";
|
|
16216
|
-
}
|
|
16217
16216
|
|
|
16218
16217
|
// src/anthropic/to-response.ts
|
|
16219
16218
|
function toContentBlock(c) {
|
|
@@ -26803,14 +26802,16 @@ function extractUpstreamRequestId(error49) {
|
|
|
26803
26802
|
}
|
|
26804
26803
|
return null;
|
|
26805
26804
|
}
|
|
26806
|
-
function handleError(
|
|
26805
|
+
function handleError(errorOrUnused, maybeError, ctx) {
|
|
26806
|
+
const error49 = maybeError ?? errorOrUnused;
|
|
26807
|
+
const context = ctx ?? { reqId: "unknown" };
|
|
26807
26808
|
const status = extractStatus(error49);
|
|
26808
26809
|
const message2 = extractMessage(error49);
|
|
26809
26810
|
const errorType = mapStatusToErrorType(status);
|
|
26810
26811
|
const httpCode = mapStatusToHttpCode(status);
|
|
26811
26812
|
logger.error({
|
|
26812
|
-
reqId:
|
|
26813
|
-
model:
|
|
26813
|
+
reqId: context.reqId,
|
|
26814
|
+
model: context.model,
|
|
26814
26815
|
status,
|
|
26815
26816
|
errorType,
|
|
26816
26817
|
message: message2,
|
|
@@ -26846,68 +26847,19 @@ var init_error_mapper = __esm(() => {
|
|
|
26846
26847
|
init_logger();
|
|
26847
26848
|
});
|
|
26848
26849
|
|
|
26849
|
-
// src/config/index.ts
|
|
26850
|
-
var exports_config = {};
|
|
26851
|
-
__export(exports_config, {
|
|
26852
|
-
resolveModel: () => resolveModel,
|
|
26853
|
-
config: () => config2
|
|
26854
|
-
});
|
|
26855
|
-
function parseModelMap(env2) {
|
|
26856
|
-
if (!env2)
|
|
26857
|
-
return {};
|
|
26858
|
-
const parsed = JSON.parse(env2);
|
|
26859
|
-
if (typeof parsed !== "object" || parsed === null)
|
|
26860
|
-
return {};
|
|
26861
|
-
const result = {};
|
|
26862
|
-
for (const [key, value] of Object.entries(parsed)) {
|
|
26863
|
-
if (typeof value === "string") {
|
|
26864
|
-
result[key] = value;
|
|
26865
|
-
}
|
|
26866
|
-
}
|
|
26867
|
-
return result;
|
|
26868
|
-
}
|
|
26869
|
-
function resolveModel(alias) {
|
|
26870
|
-
if (config2.modelMap[alias]) {
|
|
26871
|
-
return config2.modelMap[alias];
|
|
26872
|
-
}
|
|
26873
|
-
const lower = alias.toLowerCase();
|
|
26874
|
-
for (const [tier, model] of Object.entries(config2.tiers)) {
|
|
26875
|
-
if (lower.includes(tier)) {
|
|
26876
|
-
return model;
|
|
26877
|
-
}
|
|
26878
|
-
}
|
|
26879
|
-
return alias;
|
|
26880
|
-
}
|
|
26881
|
-
var config2;
|
|
26882
|
-
var init_config = __esm(() => {
|
|
26883
|
-
config2 = {
|
|
26884
|
-
port: parseInt(process.env.PROXY_PORT || "8000"),
|
|
26885
|
-
azure: {
|
|
26886
|
-
endpoint: process.env.AZURE_OPENAI_ENDPOINT || "",
|
|
26887
|
-
apiVersion: process.env.AZURE_OPENAI_API_VERSION || "2025-04-01-preview"
|
|
26888
|
-
},
|
|
26889
|
-
modelMap: parseModelMap(process.env.MODEL_MAP),
|
|
26890
|
-
tiers: {
|
|
26891
|
-
haiku: process.env.TIER_HAIKU || "gpt-5-mini",
|
|
26892
|
-
sonnet: process.env.TIER_SONNET || "gpt-5.2",
|
|
26893
|
-
opus: process.env.TIER_OPUS || "gpt-5.1-codex-max"
|
|
26894
|
-
}
|
|
26895
|
-
};
|
|
26896
|
-
});
|
|
26897
|
-
|
|
26898
26850
|
// src/config/model-config.ts
|
|
26899
26851
|
function getModelConfig(slug) {
|
|
26900
26852
|
const lower = slug.toLowerCase();
|
|
26901
|
-
for (const { prefix, config:
|
|
26853
|
+
for (const { prefix, config: config2 } of modelFamilies) {
|
|
26902
26854
|
if (lower.startsWith(prefix)) {
|
|
26903
|
-
return
|
|
26855
|
+
return config2;
|
|
26904
26856
|
}
|
|
26905
26857
|
}
|
|
26906
|
-
return
|
|
26858
|
+
return defaultConfig;
|
|
26907
26859
|
}
|
|
26908
|
-
var
|
|
26860
|
+
var modelFamilies, defaultConfig;
|
|
26909
26861
|
var init_model_config = __esm(() => {
|
|
26910
|
-
|
|
26862
|
+
modelFamilies = [
|
|
26911
26863
|
{
|
|
26912
26864
|
prefix: "gpt-5.2-codex",
|
|
26913
26865
|
config: {
|
|
@@ -26976,13 +26928,71 @@ var init_model_config = __esm(() => {
|
|
|
26976
26928
|
}
|
|
26977
26929
|
}
|
|
26978
26930
|
];
|
|
26979
|
-
|
|
26931
|
+
defaultConfig = {
|
|
26980
26932
|
supportsParallelToolCalls: false,
|
|
26981
26933
|
supportsReasoningSummaries: false,
|
|
26982
26934
|
contextWindow: 128000
|
|
26983
26935
|
};
|
|
26984
26936
|
});
|
|
26985
26937
|
|
|
26938
|
+
// src/config/index.ts
|
|
26939
|
+
var exports_config = {};
|
|
26940
|
+
__export(exports_config, {
|
|
26941
|
+
resolveModelConfig: () => resolveModelConfig,
|
|
26942
|
+
resolveModel: () => resolveModel,
|
|
26943
|
+
getModelConfig: () => getModelConfig,
|
|
26944
|
+
config: () => config2
|
|
26945
|
+
});
|
|
26946
|
+
function parseModelMap(env2) {
|
|
26947
|
+
if (!env2)
|
|
26948
|
+
return {};
|
|
26949
|
+
try {
|
|
26950
|
+
const parsed = JSON.parse(env2);
|
|
26951
|
+
if (typeof parsed !== "object" || parsed === null)
|
|
26952
|
+
return {};
|
|
26953
|
+
const result = {};
|
|
26954
|
+
for (const [key, value] of Object.entries(parsed)) {
|
|
26955
|
+
if (typeof value === "string")
|
|
26956
|
+
result[key] = value;
|
|
26957
|
+
}
|
|
26958
|
+
return result;
|
|
26959
|
+
} catch {
|
|
26960
|
+
return {};
|
|
26961
|
+
}
|
|
26962
|
+
}
|
|
26963
|
+
function resolveModel(alias) {
|
|
26964
|
+
if (modelMap[alias])
|
|
26965
|
+
return modelMap[alias];
|
|
26966
|
+
const lower = alias.toLowerCase();
|
|
26967
|
+
for (const [tier, model] of Object.entries(tiers)) {
|
|
26968
|
+
if (lower.includes(tier))
|
|
26969
|
+
return model;
|
|
26970
|
+
}
|
|
26971
|
+
return alias;
|
|
26972
|
+
}
|
|
26973
|
+
function resolveModelConfig(model) {
|
|
26974
|
+
const resolved = resolveModel(model);
|
|
26975
|
+
return { model: resolved, config: getModelConfig(resolved) };
|
|
26976
|
+
}
|
|
26977
|
+
var modelMap, tiers, config2;
|
|
26978
|
+
var init_config = __esm(() => {
|
|
26979
|
+
init_model_config();
|
|
26980
|
+
modelMap = parseModelMap(process.env.MODEL_MAP);
|
|
26981
|
+
tiers = {
|
|
26982
|
+
haiku: process.env.TIER_HAIKU || "gpt-5-mini",
|
|
26983
|
+
sonnet: process.env.TIER_SONNET || "gpt-5.2",
|
|
26984
|
+
opus: process.env.TIER_OPUS || "gpt-5.1-codex-max"
|
|
26985
|
+
};
|
|
26986
|
+
config2 = {
|
|
26987
|
+
port: parseInt(process.env.PROXY_PORT || "8000"),
|
|
26988
|
+
azure: {
|
|
26989
|
+
endpoint: process.env.AZURE_OPENAI_ENDPOINT || "",
|
|
26990
|
+
apiVersion: process.env.AZURE_OPENAI_API_VERSION || "2025-04-01-preview"
|
|
26991
|
+
},
|
|
26992
|
+
tiers
|
|
26993
|
+
};
|
|
26994
|
+
});
|
|
26995
|
+
|
|
26986
26996
|
// src/openai/to-request.ts
|
|
26987
26997
|
function mapToolChoice(tc) {
|
|
26988
26998
|
if (!tc || tc.type === "auto")
|
|
@@ -26991,7 +27001,7 @@ function mapToolChoice(tc) {
|
|
|
26991
27001
|
return "required";
|
|
26992
27002
|
return { type: "function", name: tc.name };
|
|
26993
27003
|
}
|
|
26994
|
-
function
|
|
27004
|
+
function buildInput(ir) {
|
|
26995
27005
|
const input = [];
|
|
26996
27006
|
for (const msg of ir.messages) {
|
|
26997
27007
|
if (msg.role === "system") {
|
|
@@ -26999,14 +27009,20 @@ function toResponsesRequest(ir) {
|
|
|
26999
27009
|
role: "system",
|
|
27000
27010
|
content: msg.content.map((c) => c.type === "text" ? c.text : "").join("")
|
|
27001
27011
|
});
|
|
27002
|
-
|
|
27012
|
+
continue;
|
|
27013
|
+
}
|
|
27014
|
+
if (msg.role === "user") {
|
|
27003
27015
|
const parts = [];
|
|
27004
27016
|
for (const c of msg.content) {
|
|
27005
27017
|
if (c.type === "text") {
|
|
27006
27018
|
parts.push({ type: "input_text", text: c.text });
|
|
27007
|
-
|
|
27019
|
+
continue;
|
|
27020
|
+
}
|
|
27021
|
+
if (c.type === "image") {
|
|
27008
27022
|
parts.push({ type: "input_image", image_url: c.url, detail: "auto" });
|
|
27009
|
-
|
|
27023
|
+
continue;
|
|
27024
|
+
}
|
|
27025
|
+
if (c.type === "tool_result") {
|
|
27010
27026
|
input.push({
|
|
27011
27027
|
type: "function_call_output",
|
|
27012
27028
|
call_id: c.id,
|
|
@@ -27017,22 +27033,24 @@ function toResponsesRequest(ir) {
|
|
|
27017
27033
|
if (parts.length > 0) {
|
|
27018
27034
|
input.push({ type: "message", role: "user", content: parts });
|
|
27019
27035
|
}
|
|
27020
|
-
|
|
27021
|
-
|
|
27022
|
-
|
|
27023
|
-
|
|
27024
|
-
|
|
27025
|
-
|
|
27026
|
-
|
|
27027
|
-
|
|
27028
|
-
}
|
|
27029
|
-
|
|
27030
|
-
|
|
27031
|
-
|
|
27032
|
-
|
|
27033
|
-
|
|
27034
|
-
|
|
27035
|
-
|
|
27036
|
+
continue;
|
|
27037
|
+
}
|
|
27038
|
+
for (const c of msg.content) {
|
|
27039
|
+
if (c.type === "text") {
|
|
27040
|
+
input.push({
|
|
27041
|
+
type: "message",
|
|
27042
|
+
role: "assistant",
|
|
27043
|
+
content: c.text
|
|
27044
|
+
});
|
|
27045
|
+
continue;
|
|
27046
|
+
}
|
|
27047
|
+
if (c.type === "tool_call") {
|
|
27048
|
+
input.push({
|
|
27049
|
+
type: "function_call",
|
|
27050
|
+
call_id: c.id,
|
|
27051
|
+
name: c.name,
|
|
27052
|
+
arguments: JSON.stringify(c.arguments)
|
|
27053
|
+
});
|
|
27036
27054
|
}
|
|
27037
27055
|
}
|
|
27038
27056
|
}
|
|
@@ -27043,13 +27061,15 @@ function toResponsesRequest(ir) {
|
|
|
27043
27061
|
parameters: { type: "object", ...t.inputSchema },
|
|
27044
27062
|
strict: null
|
|
27045
27063
|
}));
|
|
27046
|
-
const
|
|
27047
|
-
|
|
27064
|
+
const toolChoice2 = mapToolChoice(ir.toolChoice);
|
|
27065
|
+
return { input, tools, toolChoice: toolChoice2 };
|
|
27066
|
+
}
|
|
27067
|
+
function buildOpenAIRequest(ir, model, modelConfig) {
|
|
27068
|
+
const { input, tools, toolChoice: toolChoice2 } = buildInput(ir);
|
|
27048
27069
|
const reasoning = modelConfig.supportsReasoningSummaries ? {
|
|
27049
27070
|
effort: ir.thinking?.effort ?? modelConfig.defaultReasoningEffort ?? "medium",
|
|
27050
27071
|
summary: "auto"
|
|
27051
27072
|
} : undefined;
|
|
27052
|
-
const toolChoice2 = mapToolChoice(ir.toolChoice);
|
|
27053
27073
|
return {
|
|
27054
27074
|
model,
|
|
27055
27075
|
input,
|
|
@@ -27063,11 +27083,23 @@ function toResponsesRequest(ir) {
|
|
|
27063
27083
|
...ir.topP !== undefined && { top_p: ir.topP }
|
|
27064
27084
|
};
|
|
27065
27085
|
}
|
|
27086
|
+
function toResponsesRequest(ir, resolved) {
|
|
27087
|
+
const resolution = resolved ?? resolveModelConfig(ir.model);
|
|
27088
|
+
return buildOpenAIRequest(ir, resolution.model, resolution.config);
|
|
27089
|
+
}
|
|
27066
27090
|
var init_to_request = __esm(() => {
|
|
27067
27091
|
init_config();
|
|
27068
|
-
init_model_config();
|
|
27069
27092
|
});
|
|
27070
27093
|
|
|
27094
|
+
// src/openai/stop-reason.ts
|
|
27095
|
+
function mapOpenAIStopReason(status, hasToolCall) {
|
|
27096
|
+
if (hasToolCall)
|
|
27097
|
+
return "tool_use";
|
|
27098
|
+
if (status === "incomplete")
|
|
27099
|
+
return "max_tokens";
|
|
27100
|
+
return "end_turn";
|
|
27101
|
+
}
|
|
27102
|
+
|
|
27071
27103
|
// src/openai/from-response.ts
|
|
27072
27104
|
function isOutputText(c) {
|
|
27073
27105
|
return c.type === "output_text";
|
|
@@ -27096,7 +27128,7 @@ function fromResponse(response) {
|
|
|
27096
27128
|
const hasToolCall = content2.some((c) => c.type === "tool_call");
|
|
27097
27129
|
return {
|
|
27098
27130
|
content: content2,
|
|
27099
|
-
stopReason:
|
|
27131
|
+
stopReason: mapOpenAIStopReason(response.status, hasToolCall),
|
|
27100
27132
|
usage: {
|
|
27101
27133
|
inputTokens: response.usage?.input_tokens ?? 0,
|
|
27102
27134
|
outputTokens: response.usage?.output_tokens ?? 0
|
|
@@ -27183,7 +27215,7 @@ function* fromStreamEvent(event) {
|
|
|
27183
27215
|
const hasToolCall = event.response.output.some((item) => item.type === "function_call");
|
|
27184
27216
|
yield {
|
|
27185
27217
|
type: "done",
|
|
27186
|
-
stopReason:
|
|
27218
|
+
stopReason: mapOpenAIStopReason(event.response.status, hasToolCall),
|
|
27187
27219
|
usage: {
|
|
27188
27220
|
inputTokens: event.response.usage?.input_tokens ?? 0,
|
|
27189
27221
|
outputTokens: event.response.usage?.output_tokens ?? 0
|
|
@@ -31695,9 +31727,9 @@ var init_main = __esm(() => {
|
|
|
31695
31727
|
|
|
31696
31728
|
// src/tokenizer/index.ts
|
|
31697
31729
|
function countMessageTokens2(messages) {
|
|
31698
|
-
let total =
|
|
31730
|
+
let total = baseTokens;
|
|
31699
31731
|
for (const msg of messages) {
|
|
31700
|
-
total +=
|
|
31732
|
+
total += tokensPerMessage;
|
|
31701
31733
|
total += encode4(msg.role).length;
|
|
31702
31734
|
for (const c of msg.content) {
|
|
31703
31735
|
switch (c.type) {
|
|
@@ -31729,7 +31761,7 @@ function countToolTokens(tools) {
|
|
|
31729
31761
|
}
|
|
31730
31762
|
return total;
|
|
31731
31763
|
}
|
|
31732
|
-
var
|
|
31764
|
+
var tokensPerMessage = 3, baseTokens = 3;
|
|
31733
31765
|
var init_tokenizer = __esm(() => {
|
|
31734
31766
|
init_main();
|
|
31735
31767
|
});
|
|
@@ -31769,19 +31801,19 @@ function createApp(client) {
|
|
|
31769
31801
|
}
|
|
31770
31802
|
const ir = fromRequest(validation.data);
|
|
31771
31803
|
const reqId = generateId("req");
|
|
31772
|
-
const
|
|
31804
|
+
const resolved = resolveModelConfig(ir.model);
|
|
31773
31805
|
c.header("x-request-id", reqId);
|
|
31774
31806
|
logger.info({
|
|
31775
31807
|
reqId,
|
|
31776
31808
|
model: ir.model,
|
|
31777
|
-
resolvedModel,
|
|
31809
|
+
resolvedModel: resolved.model,
|
|
31778
31810
|
stream: ir.stream,
|
|
31779
31811
|
maxTokens: ir.maxTokens,
|
|
31780
31812
|
tools: ir.tools?.length ?? 0,
|
|
31781
31813
|
thinking: ir.thinking?.type === "enabled"
|
|
31782
31814
|
}, "request received");
|
|
31783
31815
|
if (!ir.stream) {
|
|
31784
|
-
const openaiReq = toResponsesRequest(ir);
|
|
31816
|
+
const openaiReq = toResponsesRequest(ir, resolved);
|
|
31785
31817
|
try {
|
|
31786
31818
|
const openaiRes = await client.responses.create(openaiReq);
|
|
31787
31819
|
const irRes = fromResponse(openaiRes);
|
|
@@ -31794,14 +31826,14 @@ function createApp(client) {
|
|
|
31794
31826
|
}, "non-streaming response complete");
|
|
31795
31827
|
return c.json(anthropicRes);
|
|
31796
31828
|
} catch (error49) {
|
|
31797
|
-
return handleError(
|
|
31829
|
+
return handleError(error49, { reqId, model: resolved.model });
|
|
31798
31830
|
}
|
|
31799
31831
|
}
|
|
31800
31832
|
c.header("Content-Type", "text/event-stream");
|
|
31801
31833
|
c.header("Cache-Control", "no-cache");
|
|
31802
31834
|
c.header("Connection", "keep-alive");
|
|
31803
31835
|
return stream(c, async (s) => {
|
|
31804
|
-
const openaiReq = toResponsesRequest(ir);
|
|
31836
|
+
const openaiReq = toResponsesRequest(ir, resolved);
|
|
31805
31837
|
try {
|
|
31806
31838
|
const openaiStream = await client.responses.create({
|
|
31807
31839
|
...openaiReq,
|
|
@@ -31817,7 +31849,10 @@ function createApp(client) {
|
|
|
31817
31849
|
}
|
|
31818
31850
|
logger.info({ reqId }, "streaming response complete");
|
|
31819
31851
|
} catch (error49) {
|
|
31820
|
-
for (const sse of streamError(error49, {
|
|
31852
|
+
for (const sse of streamError(error49, {
|
|
31853
|
+
reqId,
|
|
31854
|
+
model: resolved.model
|
|
31855
|
+
})) {
|
|
31821
31856
|
await s.write(sse);
|
|
31822
31857
|
}
|
|
31823
31858
|
}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@jhzhu89/m2r",
|
|
3
|
-
"version": "0.1.
|
|
3
|
+
"version": "0.1.7",
|
|
4
4
|
"description": "Anthropic Messages API to Azure OpenAI Responses API proxy",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"license": "MIT",
|
|
@@ -33,10 +33,12 @@
|
|
|
33
33
|
"test": "bun test ./test/unit",
|
|
34
34
|
"test:integration": "bun test ./test/integration",
|
|
35
35
|
"test:all": "bun test ./test",
|
|
36
|
-
"test:watch": "bun test --watch ./test/unit"
|
|
36
|
+
"test:watch": "bun test --watch ./test/unit",
|
|
37
|
+
"release:patch": "npm version patch && git push && git push --tags",
|
|
38
|
+
"release:minor": "npm version minor && git push && git push --tags",
|
|
39
|
+
"release:major": "npm version major && git push && git push --tags"
|
|
37
40
|
},
|
|
38
41
|
"dependencies": {
|
|
39
|
-
"@anthropic-ai/sdk": "^0.71.2",
|
|
40
42
|
"@azure/identity": "^4.13.0",
|
|
41
43
|
"dotenv": "^17.2.3",
|
|
42
44
|
"gpt-tokenizer": "^3.4.0",
|
|
@@ -46,6 +48,7 @@
|
|
|
46
48
|
"zod": "^4.3.4"
|
|
47
49
|
},
|
|
48
50
|
"devDependencies": {
|
|
51
|
+
"@anthropic-ai/sdk": "^0.71.2",
|
|
49
52
|
"@eslint/js": "^9.39.2",
|
|
50
53
|
"@types/bun": "latest",
|
|
51
54
|
"eslint": "^9.39.2",
|