@agenr/agenr-plugin 1.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +12 -0
- package/dist/anthropic-RE4XNAKE.js +5515 -0
- package/dist/azure-openai-responses-IQLXOCZS.js +190 -0
- package/dist/chunk-6DQXEU2A.js +32306 -0
- package/dist/chunk-EAQYK3U2.js +41 -0
- package/dist/chunk-HNWLZUWE.js +31 -0
- package/dist/chunk-JRUUYSFL.js +262 -0
- package/dist/chunk-OLOUBEE5.js +14022 -0
- package/dist/chunk-P5HNPYGQ.js +174 -0
- package/dist/chunk-RD7BUOBD.js +416 -0
- package/dist/chunk-RWWH2U4W.js +7056 -0
- package/dist/chunk-SEOMNQGB.js +86 -0
- package/dist/chunk-SQLXP7LT.js +4792 -0
- package/dist/chunk-URGOKODJ.js +17 -0
- package/dist/dist-R6ESEJ6P.js +1244 -0
- package/dist/google-NAVXTQLO.js +371 -0
- package/dist/google-gemini-cli-NKYJWHX2.js +712 -0
- package/dist/google-vertex-ZBJ2EDRH.js +414 -0
- package/dist/index.js +15942 -0
- package/dist/mistral-SBQYC4J5.js +38407 -0
- package/dist/multipart-parser-DV373IRF.js +371 -0
- package/dist/openai-codex-responses-XN3T3DEN.js +712 -0
- package/dist/openai-completions-75ZFOFU6.js +657 -0
- package/dist/openai-responses-DCK4BVNT.js +198 -0
- package/dist/src-T5RRS2HN.js +1408 -0
- package/openclaw.plugin.json +86 -0
- package/package.json +31 -0
|
@@ -0,0 +1,190 @@
|
|
|
1
|
+
import {
|
|
2
|
+
convertResponsesMessages,
|
|
3
|
+
convertResponsesTools,
|
|
4
|
+
processResponsesStream
|
|
5
|
+
} from "./chunk-RD7BUOBD.js";
|
|
6
|
+
import "./chunk-URGOKODJ.js";
|
|
7
|
+
import {
|
|
8
|
+
AzureOpenAI
|
|
9
|
+
} from "./chunk-RWWH2U4W.js";
|
|
10
|
+
import "./chunk-JRUUYSFL.js";
|
|
11
|
+
import {
|
|
12
|
+
getEnvApiKey
|
|
13
|
+
} from "./chunk-SEOMNQGB.js";
|
|
14
|
+
import {
|
|
15
|
+
buildBaseOptions,
|
|
16
|
+
clampReasoning
|
|
17
|
+
} from "./chunk-P5HNPYGQ.js";
|
|
18
|
+
import {
|
|
19
|
+
AssistantMessageEventStream,
|
|
20
|
+
supportsXhigh
|
|
21
|
+
} from "./chunk-OLOUBEE5.js";
|
|
22
|
+
import "./chunk-EAQYK3U2.js";
|
|
23
|
+
|
|
24
|
+
// ../../node_modules/.pnpm/@mariozechner+pi-ai@0.63.2_@modelcontextprotocol+sdk@1.27.1_zod@4.3.6__ws@8.20.0_zod@4.3.6/node_modules/@mariozechner/pi-ai/dist/providers/azure-openai-responses.js
|
|
25
|
+
var DEFAULT_AZURE_API_VERSION = "v1";
|
|
26
|
+
var AZURE_TOOL_CALL_PROVIDERS = /* @__PURE__ */ new Set(["openai", "openai-codex", "opencode", "azure-openai-responses"]);
|
|
27
|
+
function parseDeploymentNameMap(value) {
|
|
28
|
+
const map = /* @__PURE__ */ new Map();
|
|
29
|
+
if (!value)
|
|
30
|
+
return map;
|
|
31
|
+
for (const entry of value.split(",")) {
|
|
32
|
+
const trimmed = entry.trim();
|
|
33
|
+
if (!trimmed)
|
|
34
|
+
continue;
|
|
35
|
+
const [modelId, deploymentName] = trimmed.split("=", 2);
|
|
36
|
+
if (!modelId || !deploymentName)
|
|
37
|
+
continue;
|
|
38
|
+
map.set(modelId.trim(), deploymentName.trim());
|
|
39
|
+
}
|
|
40
|
+
return map;
|
|
41
|
+
}
|
|
42
|
+
function resolveDeploymentName(model, options) {
|
|
43
|
+
if (options?.azureDeploymentName) {
|
|
44
|
+
return options.azureDeploymentName;
|
|
45
|
+
}
|
|
46
|
+
const mappedDeployment = parseDeploymentNameMap(process.env.AZURE_OPENAI_DEPLOYMENT_NAME_MAP).get(model.id);
|
|
47
|
+
return mappedDeployment || model.id;
|
|
48
|
+
}
|
|
49
|
+
var streamAzureOpenAIResponses = (model, context, options) => {
|
|
50
|
+
const stream = new AssistantMessageEventStream();
|
|
51
|
+
(async () => {
|
|
52
|
+
const deploymentName = resolveDeploymentName(model, options);
|
|
53
|
+
const output = {
|
|
54
|
+
role: "assistant",
|
|
55
|
+
content: [],
|
|
56
|
+
api: "azure-openai-responses",
|
|
57
|
+
provider: model.provider,
|
|
58
|
+
model: model.id,
|
|
59
|
+
usage: {
|
|
60
|
+
input: 0,
|
|
61
|
+
output: 0,
|
|
62
|
+
cacheRead: 0,
|
|
63
|
+
cacheWrite: 0,
|
|
64
|
+
totalTokens: 0,
|
|
65
|
+
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 }
|
|
66
|
+
},
|
|
67
|
+
stopReason: "stop",
|
|
68
|
+
timestamp: Date.now()
|
|
69
|
+
};
|
|
70
|
+
try {
|
|
71
|
+
const apiKey = options?.apiKey || getEnvApiKey(model.provider) || "";
|
|
72
|
+
const client = createClient(model, apiKey, options);
|
|
73
|
+
let params = buildParams(model, context, options, deploymentName);
|
|
74
|
+
const nextParams = await options?.onPayload?.(params, model);
|
|
75
|
+
if (nextParams !== void 0) {
|
|
76
|
+
params = nextParams;
|
|
77
|
+
}
|
|
78
|
+
const openaiStream = await client.responses.create(params, options?.signal ? { signal: options.signal } : void 0);
|
|
79
|
+
stream.push({ type: "start", partial: output });
|
|
80
|
+
await processResponsesStream(openaiStream, output, stream, model);
|
|
81
|
+
if (options?.signal?.aborted) {
|
|
82
|
+
throw new Error("Request was aborted");
|
|
83
|
+
}
|
|
84
|
+
if (output.stopReason === "aborted" || output.stopReason === "error") {
|
|
85
|
+
throw new Error("An unknown error occurred");
|
|
86
|
+
}
|
|
87
|
+
stream.push({ type: "done", reason: output.stopReason, message: output });
|
|
88
|
+
stream.end();
|
|
89
|
+
} catch (error) {
|
|
90
|
+
for (const block of output.content)
|
|
91
|
+
delete block.index;
|
|
92
|
+
output.stopReason = options?.signal?.aborted ? "aborted" : "error";
|
|
93
|
+
output.errorMessage = error instanceof Error ? error.message : JSON.stringify(error);
|
|
94
|
+
stream.push({ type: "error", reason: output.stopReason, error: output });
|
|
95
|
+
stream.end();
|
|
96
|
+
}
|
|
97
|
+
})();
|
|
98
|
+
return stream;
|
|
99
|
+
};
|
|
100
|
+
var streamSimpleAzureOpenAIResponses = (model, context, options) => {
|
|
101
|
+
const apiKey = options?.apiKey || getEnvApiKey(model.provider);
|
|
102
|
+
if (!apiKey) {
|
|
103
|
+
throw new Error(`No API key for provider: ${model.provider}`);
|
|
104
|
+
}
|
|
105
|
+
const base = buildBaseOptions(model, options, apiKey);
|
|
106
|
+
const reasoningEffort = supportsXhigh(model) ? options?.reasoning : clampReasoning(options?.reasoning);
|
|
107
|
+
return streamAzureOpenAIResponses(model, context, {
|
|
108
|
+
...base,
|
|
109
|
+
reasoningEffort
|
|
110
|
+
});
|
|
111
|
+
};
|
|
112
|
+
function normalizeAzureBaseUrl(baseUrl) {
|
|
113
|
+
return baseUrl.replace(/\/+$/, "");
|
|
114
|
+
}
|
|
115
|
+
function buildDefaultBaseUrl(resourceName) {
|
|
116
|
+
return `https://${resourceName}.openai.azure.com/openai/v1`;
|
|
117
|
+
}
|
|
118
|
+
function resolveAzureConfig(model, options) {
|
|
119
|
+
const apiVersion = options?.azureApiVersion || process.env.AZURE_OPENAI_API_VERSION || DEFAULT_AZURE_API_VERSION;
|
|
120
|
+
const baseUrl = options?.azureBaseUrl?.trim() || process.env.AZURE_OPENAI_BASE_URL?.trim() || void 0;
|
|
121
|
+
const resourceName = options?.azureResourceName || process.env.AZURE_OPENAI_RESOURCE_NAME;
|
|
122
|
+
let resolvedBaseUrl = baseUrl;
|
|
123
|
+
if (!resolvedBaseUrl && resourceName) {
|
|
124
|
+
resolvedBaseUrl = buildDefaultBaseUrl(resourceName);
|
|
125
|
+
}
|
|
126
|
+
if (!resolvedBaseUrl && model.baseUrl) {
|
|
127
|
+
resolvedBaseUrl = model.baseUrl;
|
|
128
|
+
}
|
|
129
|
+
if (!resolvedBaseUrl) {
|
|
130
|
+
throw new Error("Azure OpenAI base URL is required. Set AZURE_OPENAI_BASE_URL or AZURE_OPENAI_RESOURCE_NAME, or pass azureBaseUrl, azureResourceName, or model.baseUrl.");
|
|
131
|
+
}
|
|
132
|
+
return {
|
|
133
|
+
baseUrl: normalizeAzureBaseUrl(resolvedBaseUrl),
|
|
134
|
+
apiVersion
|
|
135
|
+
};
|
|
136
|
+
}
|
|
137
|
+
function createClient(model, apiKey, options) {
|
|
138
|
+
if (!apiKey) {
|
|
139
|
+
if (!process.env.AZURE_OPENAI_API_KEY) {
|
|
140
|
+
throw new Error("Azure OpenAI API key is required. Set AZURE_OPENAI_API_KEY environment variable or pass it as an argument.");
|
|
141
|
+
}
|
|
142
|
+
apiKey = process.env.AZURE_OPENAI_API_KEY;
|
|
143
|
+
}
|
|
144
|
+
const headers = { ...model.headers };
|
|
145
|
+
if (options?.headers) {
|
|
146
|
+
Object.assign(headers, options.headers);
|
|
147
|
+
}
|
|
148
|
+
const { baseUrl, apiVersion } = resolveAzureConfig(model, options);
|
|
149
|
+
return new AzureOpenAI({
|
|
150
|
+
apiKey,
|
|
151
|
+
apiVersion,
|
|
152
|
+
dangerouslyAllowBrowser: true,
|
|
153
|
+
defaultHeaders: headers,
|
|
154
|
+
baseURL: baseUrl
|
|
155
|
+
});
|
|
156
|
+
}
|
|
157
|
+
function buildParams(model, context, options, deploymentName) {
|
|
158
|
+
const messages = convertResponsesMessages(model, context, AZURE_TOOL_CALL_PROVIDERS);
|
|
159
|
+
const params = {
|
|
160
|
+
model: deploymentName,
|
|
161
|
+
input: messages,
|
|
162
|
+
stream: true,
|
|
163
|
+
prompt_cache_key: options?.sessionId
|
|
164
|
+
};
|
|
165
|
+
if (options?.maxTokens) {
|
|
166
|
+
params.max_output_tokens = options?.maxTokens;
|
|
167
|
+
}
|
|
168
|
+
if (options?.temperature !== void 0) {
|
|
169
|
+
params.temperature = options?.temperature;
|
|
170
|
+
}
|
|
171
|
+
if (context.tools) {
|
|
172
|
+
params.tools = convertResponsesTools(context.tools);
|
|
173
|
+
}
|
|
174
|
+
if (model.reasoning) {
|
|
175
|
+
if (options?.reasoningEffort || options?.reasoningSummary) {
|
|
176
|
+
params.reasoning = {
|
|
177
|
+
effort: options?.reasoningEffort || "medium",
|
|
178
|
+
summary: options?.reasoningSummary || "auto"
|
|
179
|
+
};
|
|
180
|
+
params.include = ["reasoning.encrypted_content"];
|
|
181
|
+
} else {
|
|
182
|
+
params.reasoning = { effort: "none" };
|
|
183
|
+
}
|
|
184
|
+
}
|
|
185
|
+
return params;
|
|
186
|
+
}
|
|
187
|
+
export {
|
|
188
|
+
streamAzureOpenAIResponses,
|
|
189
|
+
streamSimpleAzureOpenAIResponses
|
|
190
|
+
};
|