@jhzhu89/m2r 0.1.6 → 0.1.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (4) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +79 -40
  3. package/dist/index.js +49 -58
  4. package/package.json +5 -2
package/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Jiahao Zhu <jiahzhu@outlook.com>
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
package/README.md CHANGED
@@ -1,7 +1,6 @@
1
1
  # m2r
2
2
 
3
- Anthropic Messages API to Azure OpenAI Responses API proxy.
4
-
3
+ Anthropic Messages API Azure OpenAI Responses API proxy.
5
4
  Enables Claude Code CLI and other Anthropic-compatible clients to use Azure OpenAI as the backend.
6
5
 
7
6
  ## Installation
@@ -12,16 +11,28 @@ npm install -g @jhzhu89/m2r
12
11
 
13
12
  ## Configuration
14
13
 
15
- Create `~/.m2rrc` with your Azure OpenAI credentials:
14
+ Create `~/.m2rrc` with your Azure OpenAI settings (Entra ID only; API keys are not used):
16
15
 
17
16
  ```bash
17
+ # Required
18
18
  AZURE_OPENAI_ENDPOINT=https://your-resource.openai.azure.com
19
- AZURE_OPENAI_API_KEY=your-api-key
20
- AZURE_OPENAI_DEPLOYMENT=your-deployment-name
21
- PROXY_PORT=8001
19
+
20
+ # Optional
21
+ AZURE_OPENAI_API_VERSION=2025-04-01-preview
22
+ PROXY_PORT=8000
22
23
  LOG_LEVEL=info
24
+
25
+ # Model routing (optional)
26
+ MODEL_MAP={"claude-3-5-sonnet":"gpt-5.2"}
27
+ TIER_HAIKU=gpt-5-mini
28
+ TIER_SONNET=gpt-5.2
29
+ TIER_OPUS=gpt-5.1-codex-max
23
30
  ```
24
31
 
32
+ Auth uses `DefaultAzureCredential`, so ensure your environment is logged in (e.g., `az login`) or set the usual `AZURE_CLIENT_ID` / `AZURE_TENANT_ID` / `AZURE_CLIENT_SECRET`.
33
+
34
+ Routing: `MODEL_MAP` overrides exact model aliases; otherwise `haiku`/`sonnet`/`opus` substrings map to the configured tier models.
35
+
25
36
  ## Usage
26
37
 
27
38
  Start the proxy server:
@@ -30,19 +41,42 @@ Start the proxy server:
30
41
  m2r
31
42
  ```
32
43
 
33
- Then point your Anthropic client to `http://localhost:8001`.
44
+ Then point your Anthropic client to `http://localhost:8000`.
45
+
46
+ ## Linux (systemd user service)
47
+
48
+ Install and start:
49
+
50
+ ```bash
51
+ ./scripts/m2r-service.sh install
52
+ ```
53
+
54
+ Status and logs:
55
+
56
+ ```bash
57
+ systemctl --user status m2r.service
58
+ journalctl --user -u m2r.service -f
59
+ ```
60
+
61
+ Enable start on boot without login:
34
62
 
35
- ## Shell Integration
63
+ ```bash
64
+ loginctl enable-linger $USER
65
+ ```
36
66
 
37
- These shell functions automatically start `m2r` when you run `claude` and configure the necessary environment variables.
67
+ Uninstall:
68
+
69
+ ```bash
70
+ ./scripts/m2r-service.sh uninstall
71
+ ```
38
72
 
39
- ### Zsh / Bash
73
+ ### Zsh / Bash (optional helpers)
40
74
 
41
75
  Add to `~/.zshrc` or `~/.bashrc`:
42
76
 
43
77
  ```bash
44
78
  claude() {
45
- local proxy_port=8001
79
+ local proxy_port=8000
46
80
  local m2rrc="$HOME/.m2rrc"
47
81
 
48
82
  if [[ -f "$m2rrc" ]]; then
@@ -65,34 +99,39 @@ claude() {
65
99
  command claude "$@"
66
100
  }
67
101
 
68
- m2r-log() {
69
- local log="$HOME/.local/log/m2r.log"
70
- local follow=false tail=50
71
- while [[ $# -gt 0 ]]; do
72
- case "$1" in
73
- -f|--follow) follow=true; shift ;;
74
- -n|--tail) tail="$2"; shift 2 ;;
75
- *) shift ;;
76
- esac
77
- done
78
- [[ ! -f "$log" ]] && echo "Log not found: $log" && return 1
79
- $follow && tail -n "$tail" -f "$log" || tail -n "$tail" "$log"
80
- }
81
-
82
- m2r-restart() {
83
- local proxy_port=8001 m2rrc="$HOME/.m2rrc"
84
- if [[ -f "$m2rrc" ]]; then
85
- local port_line=$(grep '^PROXY_PORT=' "$m2rrc")
86
- [[ -n "$port_line" ]] && proxy_port="${port_line#PROXY_PORT=}"
87
- fi
88
- pkill -f "node.*m2r" 2>/dev/null && echo "Stopped m2r" || echo "m2r not running"
89
- mkdir -p "$HOME/.local/log"
90
- nohup m2r >> "$HOME/.local/log/m2r.log" 2>&1 &
91
- for i in {1..10}; do
92
- sleep 0.3
93
- nc -z localhost "$proxy_port" 2>/dev/null && echo "m2r started on port $proxy_port" && return 0
94
- done
95
- echo "Failed to start m2r"; return 1
102
+ m2r-config() {
103
+ local m2rrc="$HOME/.m2rrc"
104
+ local action="$1"
105
+ local key="$2"
106
+ local value="$3"
107
+
108
+ mkdir -p "$(dirname "$m2rrc")"
109
+
110
+ case "$action" in
111
+ get)
112
+ [[ -z "$key" ]] && echo "Usage: m2r-config get KEY" && return 1
113
+ [[ -f "$m2rrc" ]] && grep -E "^${key}=" "$m2rrc" | tail -n 1 | cut -d= -f2-
114
+ ;;
115
+ set)
116
+ [[ -z "$key" || -z "$value" ]] && echo "Usage: m2r-config set KEY VALUE" && return 1
117
+ if [[ -f "$m2rrc" ]] && grep -q "^${key}=" "$m2rrc"; then
118
+ if sed --version >/dev/null 2>&1; then
119
+ sed -i "s|^${key}=.*|${key}=${value}|" "$m2rrc"
120
+ else
121
+ sed -i '' "s|^${key}=.*|${key}=${value}|" "$m2rrc"
122
+ fi
123
+ else
124
+ echo "${key}=${value}" >> "$m2rrc"
125
+ fi
126
+ ;;
127
+ list|"")
128
+ [[ -f "$m2rrc" ]] && cat "$m2rrc" || true
129
+ ;;
130
+ *)
131
+ echo "Usage: m2r-config [list|get KEY|set KEY VALUE]"
132
+ return 1
133
+ ;;
134
+ esac
96
135
  }
97
136
  ```
98
137
 
@@ -106,7 +145,7 @@ function Get-M2rPort {
106
145
  if (Test-Path $m2rrc) {
107
146
  switch -Regex -File $m2rrc { '^PROXY_PORT=(\d+)' { return [int]$Matches[1] } }
108
147
  }
109
- return 8001
148
+ return 8000
110
149
  }
111
150
 
112
151
  function Test-M2rRunning($port) {
package/dist/index.js CHANGED
@@ -26850,16 +26850,16 @@ var init_error_mapper = __esm(() => {
26850
26850
  // src/config/model-config.ts
26851
26851
  function getModelConfig(slug) {
26852
26852
  const lower = slug.toLowerCase();
26853
- for (const { prefix, config: config2 } of MODEL_FAMILIES) {
26853
+ for (const { prefix, config: config2 } of modelFamilies) {
26854
26854
  if (lower.startsWith(prefix)) {
26855
26855
  return config2;
26856
26856
  }
26857
26857
  }
26858
- return DEFAULT_CONFIG;
26858
+ return defaultConfig;
26859
26859
  }
26860
- var MODEL_FAMILIES, DEFAULT_CONFIG;
26860
+ var modelFamilies, defaultConfig;
26861
26861
  var init_model_config = __esm(() => {
26862
- MODEL_FAMILIES = [
26862
+ modelFamilies = [
26863
26863
  {
26864
26864
  prefix: "gpt-5.2-codex",
26865
26865
  config: {
@@ -26928,79 +26928,69 @@ var init_model_config = __esm(() => {
26928
26928
  }
26929
26929
  }
26930
26930
  ];
26931
- DEFAULT_CONFIG = {
26931
+ defaultConfig = {
26932
26932
  supportsParallelToolCalls: false,
26933
26933
  supportsReasoningSummaries: false,
26934
26934
  contextWindow: 128000
26935
26935
  };
26936
26936
  });
26937
26937
 
26938
- // src/config/routing.ts
26939
- function resolveModel(config2, alias) {
26940
- if (config2.modelMap[alias]) {
26941
- return config2.modelMap[alias];
26942
- }
26943
- const lower = alias.toLowerCase();
26944
- for (const [tier, model] of Object.entries(config2.tiers)) {
26945
- if (lower.includes(tier)) {
26946
- return model;
26947
- }
26948
- }
26949
- return alias;
26950
- }
26951
- function resolveModelConfig(config2, model) {
26952
- const resolvedModel = resolveModel(config2, model);
26953
- const configSnapshot = getModelConfig(resolvedModel);
26954
- return { model: resolvedModel, config: configSnapshot };
26955
- }
26956
- var init_routing = __esm(() => {
26957
- init_model_config();
26958
- });
26959
-
26960
26938
  // src/config/index.ts
26961
26939
  var exports_config = {};
26962
26940
  __export(exports_config, {
26963
- resolveModelConfig: () => resolveModelConfig2,
26964
- resolveModel: () => resolveModel2,
26941
+ resolveModelConfig: () => resolveModelConfig,
26942
+ resolveModel: () => resolveModel,
26943
+ getModelConfig: () => getModelConfig,
26965
26944
  config: () => config2
26966
26945
  });
26967
26946
  function parseModelMap(env2) {
26968
26947
  if (!env2)
26969
26948
  return {};
26970
- const parsed = JSON.parse(env2);
26971
- if (typeof parsed !== "object" || parsed === null)
26972
- return {};
26973
- const result = {};
26974
- for (const [key, value] of Object.entries(parsed)) {
26975
- if (typeof value === "string") {
26976
- result[key] = value;
26949
+ try {
26950
+ const parsed = JSON.parse(env2);
26951
+ if (typeof parsed !== "object" || parsed === null)
26952
+ return {};
26953
+ const result = {};
26954
+ for (const [key, value] of Object.entries(parsed)) {
26955
+ if (typeof value === "string")
26956
+ result[key] = value;
26977
26957
  }
26958
+ return result;
26959
+ } catch {
26960
+ return {};
26978
26961
  }
26979
- return result;
26980
26962
  }
26981
- var configSnapshot, config2, resolveModel2, resolveModelConfig2;
26963
+ function resolveModel(alias) {
26964
+ if (modelMap[alias])
26965
+ return modelMap[alias];
26966
+ const lower = alias.toLowerCase();
26967
+ for (const [tier, model] of Object.entries(tiers)) {
26968
+ if (lower.includes(tier))
26969
+ return model;
26970
+ }
26971
+ return alias;
26972
+ }
26973
+ function resolveModelConfig(model) {
26974
+ const resolved = resolveModel(model);
26975
+ return { model: resolved, config: getModelConfig(resolved) };
26976
+ }
26977
+ var modelMap, tiers, config2;
26982
26978
  var init_config = __esm(() => {
26983
- init_routing();
26984
- configSnapshot = {
26979
+ init_model_config();
26980
+ modelMap = parseModelMap(process.env.MODEL_MAP);
26981
+ tiers = {
26982
+ haiku: process.env.TIER_HAIKU || "gpt-5-mini",
26983
+ sonnet: process.env.TIER_SONNET || "gpt-5.2",
26984
+ opus: process.env.TIER_OPUS || "gpt-5.1-codex-max"
26985
+ };
26986
+ config2 = {
26985
26987
  port: parseInt(process.env.PROXY_PORT || "8000"),
26986
26988
  azure: {
26987
26989
  endpoint: process.env.AZURE_OPENAI_ENDPOINT || "",
26988
26990
  apiVersion: process.env.AZURE_OPENAI_API_VERSION || "2025-04-01-preview"
26989
26991
  },
26990
- modelMap: parseModelMap(process.env.MODEL_MAP),
26991
- tiers: {
26992
- haiku: process.env.TIER_HAIKU || "gpt-5-mini",
26993
- sonnet: process.env.TIER_SONNET || "gpt-5.2",
26994
- opus: process.env.TIER_OPUS || "gpt-5.1-codex-max"
26995
- }
26996
- };
26997
- config2 = {
26998
- ...configSnapshot,
26999
- resolveModel: (alias) => resolveModel(configSnapshot, alias),
27000
- resolveModelConfig: (model) => resolveModelConfig(configSnapshot, model)
26992
+ tiers
27001
26993
  };
27002
- resolveModel2 = config2.resolveModel;
27003
- resolveModelConfig2 = config2.resolveModelConfig;
27004
26994
  });
27005
26995
 
27006
26996
  // src/openai/to-request.ts
@@ -27094,7 +27084,7 @@ function buildOpenAIRequest(ir, model, modelConfig) {
27094
27084
  };
27095
27085
  }
27096
27086
  function toResponsesRequest(ir, resolved) {
27097
- const resolution = resolved ?? config2.resolveModelConfig(ir.model);
27087
+ const resolution = resolved ?? resolveModelConfig(ir.model);
27098
27088
  return buildOpenAIRequest(ir, resolution.model, resolution.config);
27099
27089
  }
27100
27090
  var init_to_request = __esm(() => {
@@ -31737,9 +31727,9 @@ var init_main = __esm(() => {
31737
31727
 
31738
31728
  // src/tokenizer/index.ts
31739
31729
  function countMessageTokens2(messages) {
31740
- let total = BASE_TOKENS;
31730
+ let total = baseTokens;
31741
31731
  for (const msg of messages) {
31742
- total += TOKENS_PER_MESSAGE;
31732
+ total += tokensPerMessage;
31743
31733
  total += encode4(msg.role).length;
31744
31734
  for (const c of msg.content) {
31745
31735
  switch (c.type) {
@@ -31771,7 +31761,7 @@ function countToolTokens(tools) {
31771
31761
  }
31772
31762
  return total;
31773
31763
  }
31774
- var TOKENS_PER_MESSAGE = 3, BASE_TOKENS = 3;
31764
+ var tokensPerMessage = 3, baseTokens = 3;
31775
31765
  var init_tokenizer = __esm(() => {
31776
31766
  init_main();
31777
31767
  });
@@ -31811,7 +31801,7 @@ function createApp(client) {
31811
31801
  }
31812
31802
  const ir = fromRequest(validation.data);
31813
31803
  const reqId = generateId("req");
31814
- const resolved = resolveModelConfig2(ir.model);
31804
+ const resolved = resolveModelConfig(ir.model);
31815
31805
  c.header("x-request-id", reqId);
31816
31806
  logger.info({
31817
31807
  reqId,
@@ -54069,6 +54059,7 @@ var { createApp: createApp2 } = await Promise.resolve().then(() => (init_server(
54069
54059
  var { createClient: createClient2 } = await Promise.resolve().then(() => (init_client2(), exports_client));
54070
54060
  var { config: config3 } = await Promise.resolve().then(() => (init_config(), exports_config));
54071
54061
  var { logger: logger23 } = await Promise.resolve().then(() => (init_logger(), exports_logger));
54062
+ logger23.info({ config: config3 }, "server config");
54072
54063
  var app = createApp2(createClient2());
54073
54064
  var server = Bun.serve({
54074
54065
  fetch: app.fetch,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@jhzhu89/m2r",
3
- "version": "0.1.6",
3
+ "version": "0.1.8",
4
4
  "description": "Anthropic Messages API to Azure OpenAI Responses API proxy",
5
5
  "type": "module",
6
6
  "license": "MIT",
@@ -33,7 +33,10 @@
33
33
  "test": "bun test ./test/unit",
34
34
  "test:integration": "bun test ./test/integration",
35
35
  "test:all": "bun test ./test",
36
- "test:watch": "bun test --watch ./test/unit"
36
+ "test:watch": "bun test --watch ./test/unit",
37
+ "release:patch": "npm version patch && git push && git push --tags",
38
+ "release:minor": "npm version minor && git push && git push --tags",
39
+ "release:major": "npm version major && git push && git push --tags"
37
40
  },
38
41
  "dependencies": {
39
42
  "@azure/identity": "^4.13.0",