@jhzhu89/m2r 0.1.6 → 0.1.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (4) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +21 -10
  3. package/dist/index.js +48 -58
  4. package/package.json +5 -2
package/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Jiahao Zhu <jiahzhu@outlook.com>
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
package/README.md CHANGED
@@ -1,7 +1,6 @@
1
1
  # m2r
2
2
 
3
- Anthropic Messages API to Azure OpenAI Responses API proxy.
4
-
3
+ Anthropic Messages API Azure OpenAI Responses API proxy.
5
4
  Enables Claude Code CLI and other Anthropic-compatible clients to use Azure OpenAI as the backend.
6
5
 
7
6
  ## Installation
@@ -12,16 +11,28 @@ npm install -g @jhzhu89/m2r
12
11
 
13
12
  ## Configuration
14
13
 
15
- Create `~/.m2rrc` with your Azure OpenAI credentials:
14
+ Create `~/.m2rrc` with your Azure OpenAI settings (Entra ID only; API keys are not used):
16
15
 
17
16
  ```bash
17
+ # Required
18
18
  AZURE_OPENAI_ENDPOINT=https://your-resource.openai.azure.com
19
- AZURE_OPENAI_API_KEY=your-api-key
20
- AZURE_OPENAI_DEPLOYMENT=your-deployment-name
21
- PROXY_PORT=8001
19
+
20
+ # Optional
21
+ AZURE_OPENAI_API_VERSION=2025-04-01-preview
22
+ PROXY_PORT=8000
22
23
  LOG_LEVEL=info
24
+
25
+ # Model routing (optional)
26
+ MODEL_MAP={"claude-3-5-sonnet":"gpt-5.2"}
27
+ TIER_HAIKU=gpt-5-mini
28
+ TIER_SONNET=gpt-5.2
29
+ TIER_OPUS=gpt-5.1-codex-max
23
30
  ```
24
31
 
32
+ Auth uses `DefaultAzureCredential`, so ensure your environment is logged in (e.g., `az login`) or set the usual `AZURE_CLIENT_ID` / `AZURE_TENANT_ID` / `AZURE_CLIENT_SECRET`.
33
+
34
+ Routing: `MODEL_MAP` overrides exact model aliases; otherwise `haiku`/`sonnet`/`opus` substrings map to the configured tier models.
35
+
25
36
  ## Usage
26
37
 
27
38
  Start the proxy server:
@@ -30,7 +41,7 @@ Start the proxy server:
30
41
  m2r
31
42
  ```
32
43
 
33
- Then point your Anthropic client to `http://localhost:8001`.
44
+ Then point your Anthropic client to `http://localhost:8000`.
34
45
 
35
46
  ## Shell Integration
36
47
 
@@ -42,7 +53,7 @@ Add to `~/.zshrc` or `~/.bashrc`:
42
53
 
43
54
  ```bash
44
55
  claude() {
45
- local proxy_port=8001
56
+ local proxy_port=8000
46
57
  local m2rrc="$HOME/.m2rrc"
47
58
 
48
59
  if [[ -f "$m2rrc" ]]; then
@@ -80,7 +91,7 @@ m2r-log() {
80
91
  }
81
92
 
82
93
  m2r-restart() {
83
- local proxy_port=8001 m2rrc="$HOME/.m2rrc"
94
+ local proxy_port=8000 m2rrc="$HOME/.m2rrc"
84
95
  if [[ -f "$m2rrc" ]]; then
85
96
  local port_line=$(grep '^PROXY_PORT=' "$m2rrc")
86
97
  [[ -n "$port_line" ]] && proxy_port="${port_line#PROXY_PORT=}"
@@ -106,7 +117,7 @@ function Get-M2rPort {
106
117
  if (Test-Path $m2rrc) {
107
118
  switch -Regex -File $m2rrc { '^PROXY_PORT=(\d+)' { return [int]$Matches[1] } }
108
119
  }
109
- return 8001
120
+ return 8000
110
121
  }
111
122
 
112
123
  function Test-M2rRunning($port) {
package/dist/index.js CHANGED
@@ -26850,16 +26850,16 @@ var init_error_mapper = __esm(() => {
26850
26850
  // src/config/model-config.ts
26851
26851
  function getModelConfig(slug) {
26852
26852
  const lower = slug.toLowerCase();
26853
- for (const { prefix, config: config2 } of MODEL_FAMILIES) {
26853
+ for (const { prefix, config: config2 } of modelFamilies) {
26854
26854
  if (lower.startsWith(prefix)) {
26855
26855
  return config2;
26856
26856
  }
26857
26857
  }
26858
- return DEFAULT_CONFIG;
26858
+ return defaultConfig;
26859
26859
  }
26860
- var MODEL_FAMILIES, DEFAULT_CONFIG;
26860
+ var modelFamilies, defaultConfig;
26861
26861
  var init_model_config = __esm(() => {
26862
- MODEL_FAMILIES = [
26862
+ modelFamilies = [
26863
26863
  {
26864
26864
  prefix: "gpt-5.2-codex",
26865
26865
  config: {
@@ -26928,79 +26928,69 @@ var init_model_config = __esm(() => {
26928
26928
  }
26929
26929
  }
26930
26930
  ];
26931
- DEFAULT_CONFIG = {
26931
+ defaultConfig = {
26932
26932
  supportsParallelToolCalls: false,
26933
26933
  supportsReasoningSummaries: false,
26934
26934
  contextWindow: 128000
26935
26935
  };
26936
26936
  });
26937
26937
 
26938
- // src/config/routing.ts
26939
- function resolveModel(config2, alias) {
26940
- if (config2.modelMap[alias]) {
26941
- return config2.modelMap[alias];
26942
- }
26943
- const lower = alias.toLowerCase();
26944
- for (const [tier, model] of Object.entries(config2.tiers)) {
26945
- if (lower.includes(tier)) {
26946
- return model;
26947
- }
26948
- }
26949
- return alias;
26950
- }
26951
- function resolveModelConfig(config2, model) {
26952
- const resolvedModel = resolveModel(config2, model);
26953
- const configSnapshot = getModelConfig(resolvedModel);
26954
- return { model: resolvedModel, config: configSnapshot };
26955
- }
26956
- var init_routing = __esm(() => {
26957
- init_model_config();
26958
- });
26959
-
26960
26938
  // src/config/index.ts
26961
26939
  var exports_config = {};
26962
26940
  __export(exports_config, {
26963
- resolveModelConfig: () => resolveModelConfig2,
26964
- resolveModel: () => resolveModel2,
26941
+ resolveModelConfig: () => resolveModelConfig,
26942
+ resolveModel: () => resolveModel,
26943
+ getModelConfig: () => getModelConfig,
26965
26944
  config: () => config2
26966
26945
  });
26967
26946
  function parseModelMap(env2) {
26968
26947
  if (!env2)
26969
26948
  return {};
26970
- const parsed = JSON.parse(env2);
26971
- if (typeof parsed !== "object" || parsed === null)
26972
- return {};
26973
- const result = {};
26974
- for (const [key, value] of Object.entries(parsed)) {
26975
- if (typeof value === "string") {
26976
- result[key] = value;
26949
+ try {
26950
+ const parsed = JSON.parse(env2);
26951
+ if (typeof parsed !== "object" || parsed === null)
26952
+ return {};
26953
+ const result = {};
26954
+ for (const [key, value] of Object.entries(parsed)) {
26955
+ if (typeof value === "string")
26956
+ result[key] = value;
26977
26957
  }
26958
+ return result;
26959
+ } catch {
26960
+ return {};
26978
26961
  }
26979
- return result;
26980
26962
  }
26981
- var configSnapshot, config2, resolveModel2, resolveModelConfig2;
26963
+ function resolveModel(alias) {
26964
+ if (modelMap[alias])
26965
+ return modelMap[alias];
26966
+ const lower = alias.toLowerCase();
26967
+ for (const [tier, model] of Object.entries(tiers)) {
26968
+ if (lower.includes(tier))
26969
+ return model;
26970
+ }
26971
+ return alias;
26972
+ }
26973
+ function resolveModelConfig(model) {
26974
+ const resolved = resolveModel(model);
26975
+ return { model: resolved, config: getModelConfig(resolved) };
26976
+ }
26977
+ var modelMap, tiers, config2;
26982
26978
  var init_config = __esm(() => {
26983
- init_routing();
26984
- configSnapshot = {
26979
+ init_model_config();
26980
+ modelMap = parseModelMap(process.env.MODEL_MAP);
26981
+ tiers = {
26982
+ haiku: process.env.TIER_HAIKU || "gpt-5-mini",
26983
+ sonnet: process.env.TIER_SONNET || "gpt-5.2",
26984
+ opus: process.env.TIER_OPUS || "gpt-5.1-codex-max"
26985
+ };
26986
+ config2 = {
26985
26987
  port: parseInt(process.env.PROXY_PORT || "8000"),
26986
26988
  azure: {
26987
26989
  endpoint: process.env.AZURE_OPENAI_ENDPOINT || "",
26988
26990
  apiVersion: process.env.AZURE_OPENAI_API_VERSION || "2025-04-01-preview"
26989
26991
  },
26990
- modelMap: parseModelMap(process.env.MODEL_MAP),
26991
- tiers: {
26992
- haiku: process.env.TIER_HAIKU || "gpt-5-mini",
26993
- sonnet: process.env.TIER_SONNET || "gpt-5.2",
26994
- opus: process.env.TIER_OPUS || "gpt-5.1-codex-max"
26995
- }
26996
- };
26997
- config2 = {
26998
- ...configSnapshot,
26999
- resolveModel: (alias) => resolveModel(configSnapshot, alias),
27000
- resolveModelConfig: (model) => resolveModelConfig(configSnapshot, model)
26992
+ tiers
27001
26993
  };
27002
- resolveModel2 = config2.resolveModel;
27003
- resolveModelConfig2 = config2.resolveModelConfig;
27004
26994
  });
27005
26995
 
27006
26996
  // src/openai/to-request.ts
@@ -27094,7 +27084,7 @@ function buildOpenAIRequest(ir, model, modelConfig) {
27094
27084
  };
27095
27085
  }
27096
27086
  function toResponsesRequest(ir, resolved) {
27097
- const resolution = resolved ?? config2.resolveModelConfig(ir.model);
27087
+ const resolution = resolved ?? resolveModelConfig(ir.model);
27098
27088
  return buildOpenAIRequest(ir, resolution.model, resolution.config);
27099
27089
  }
27100
27090
  var init_to_request = __esm(() => {
@@ -31737,9 +31727,9 @@ var init_main = __esm(() => {
31737
31727
 
31738
31728
  // src/tokenizer/index.ts
31739
31729
  function countMessageTokens2(messages) {
31740
- let total = BASE_TOKENS;
31730
+ let total = baseTokens;
31741
31731
  for (const msg of messages) {
31742
- total += TOKENS_PER_MESSAGE;
31732
+ total += tokensPerMessage;
31743
31733
  total += encode4(msg.role).length;
31744
31734
  for (const c of msg.content) {
31745
31735
  switch (c.type) {
@@ -31771,7 +31761,7 @@ function countToolTokens(tools) {
31771
31761
  }
31772
31762
  return total;
31773
31763
  }
31774
- var TOKENS_PER_MESSAGE = 3, BASE_TOKENS = 3;
31764
+ var tokensPerMessage = 3, baseTokens = 3;
31775
31765
  var init_tokenizer = __esm(() => {
31776
31766
  init_main();
31777
31767
  });
@@ -31811,7 +31801,7 @@ function createApp(client) {
31811
31801
  }
31812
31802
  const ir = fromRequest(validation.data);
31813
31803
  const reqId = generateId("req");
31814
- const resolved = resolveModelConfig2(ir.model);
31804
+ const resolved = resolveModelConfig(ir.model);
31815
31805
  c.header("x-request-id", reqId);
31816
31806
  logger.info({
31817
31807
  reqId,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@jhzhu89/m2r",
3
- "version": "0.1.6",
3
+ "version": "0.1.7",
4
4
  "description": "Anthropic Messages API to Azure OpenAI Responses API proxy",
5
5
  "type": "module",
6
6
  "license": "MIT",
@@ -33,7 +33,10 @@
33
33
  "test": "bun test ./test/unit",
34
34
  "test:integration": "bun test ./test/integration",
35
35
  "test:all": "bun test ./test",
36
- "test:watch": "bun test --watch ./test/unit"
36
+ "test:watch": "bun test --watch ./test/unit",
37
+ "release:patch": "npm version patch && git push && git push --tags",
38
+ "release:minor": "npm version minor && git push && git push --tags",
39
+ "release:major": "npm version major && git push && git push --tags"
37
40
  },
38
41
  "dependencies": {
39
42
  "@azure/identity": "^4.13.0",