@kimchitest/opencode-otel-plugin 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +156 -0
- package/package.json +24 -0
- package/plugin.ts +155 -0
package/README.md
ADDED
|
@@ -0,0 +1,156 @@
|
|
|
1
|
+
# OpenCode OTEL Plugin for AI Enabler
|
|
2
|
+
|
|
3
|
+
Sends usage telemetry from OpenCode to the AI Enabler service.
|
|
4
|
+
|
|
5
|
+
OpenCode version 1.2.20+
|
|
6
|
+
|
|
7
|
+
## Installation
|
|
8
|
+
|
|
9
|
+
### Option 1: NPM package (recommended)
|
|
10
|
+
|
|
11
|
+
Add to your `~/.config/opencode/opencode.json`:
|
|
12
|
+
|
|
13
|
+
```json
|
|
14
|
+
{
|
|
15
|
+
"plugin": ["@castai/opencode-otel-plugin"]
|
|
16
|
+
}
|
|
17
|
+
```
|
|
18
|
+
|
|
19
|
+
Set environment variables (see [Configuration](#configuration) section below), then restart OpenCode.
|
|
20
|
+
|
|
21
|
+
### Option 2: Local plugin
|
|
22
|
+
|
|
23
|
+
1. Copy `plugin.ts` to your OpenCode plugins directory:
|
|
24
|
+
|
|
25
|
+
```bash
|
|
26
|
+
mkdir -p ~/.config/opencode/plugins
|
|
27
|
+
cp plugin.ts ~/.config/opencode/plugins/otel.ts
|
|
28
|
+
```
|
|
29
|
+
|
|
30
|
+
2. Set environment variables (see [Configuration](#configuration) section below)
|
|
31
|
+
|
|
32
|
+
3. Restart OpenCode.
|
|
33
|
+
|
|
34
|
+
### Option 3: Project-level plugin
|
|
35
|
+
|
|
36
|
+
1. Create the plugins directory in your project:
|
|
37
|
+
|
|
38
|
+
```bash
|
|
39
|
+
mkdir -p .opencode/plugins
|
|
40
|
+
```
|
|
41
|
+
|
|
42
|
+
2. Copy `plugin.ts` to `.opencode/plugins/otel.ts`
|
|
43
|
+
|
|
44
|
+
3. Set the environment variables (same as above)
|
|
45
|
+
|
|
46
|
+
## Configuration
|
|
47
|
+
|
|
48
|
+
### Environment Variables
|
|
49
|
+
|
|
50
|
+
| Variable | Required | Description |
|
|
51
|
+
|----------|----------|-------------|
|
|
52
|
+
| `OPENCODE_ENABLE_TELEMETRY` | Yes | Set to `1` to enable telemetry |
|
|
53
|
+
| `OPENCODE_OTLP_ENDPOINT` | Yes | AI Enabler logs ingest endpoint URL |
|
|
54
|
+
| `OPENCODE_OTLP_HEADERS` | Yes | Authorization header with your AI Enabler API key |
|
|
55
|
+
|
|
56
|
+
#### Example Environment Variables
|
|
57
|
+
|
|
58
|
+
Add these to your shell config (`~/.zshrc`, `~/.bashrc`, etc.):
|
|
59
|
+
|
|
60
|
+
```bash
|
|
61
|
+
# Enable the plugin
|
|
62
|
+
export OPENCODE_ENABLE_TELEMETRY=1
|
|
63
|
+
|
|
64
|
+
# AI Enabler endpoint for log ingestion
|
|
65
|
+
export OPENCODE_OTLP_ENDPOINT=https://api.cast.ai/ai-optimizer/v1beta/logs:ingest
|
|
66
|
+
|
|
67
|
+
# Authorization header with your AI Enabler API key
|
|
68
|
+
export OPENCODE_OTLP_HEADERS="Authorization=Bearer YOUR_API_KEY_HERE"
|
|
69
|
+
```
|
|
70
|
+
|
|
71
|
+
After adding, restart your shell or run `source ~/.zshrc`.
|
|
72
|
+
|
|
73
|
+
### Provider Configuration
|
|
74
|
+
|
|
75
|
+
The plugin reads provider information from your OpenCode config (`~/.config/opencode/opencode.json`). Ensure your provider key matches one of the valid values below.
|
|
76
|
+
|
|
77
|
+
### Valid Provider Values
|
|
78
|
+
|
|
79
|
+
| Provider Key | Description |
|
|
80
|
+
|--------------|-------------|
|
|
81
|
+
| `openai` | OpenAI |
|
|
82
|
+
| `anthropic` | Anthropic (Claude) |
|
|
83
|
+
| `azure` | Azure OpenAI |
|
|
84
|
+
| `azure_ai` | Azure AI |
|
|
85
|
+
| `gemini` | Google Gemini |
|
|
86
|
+
| `vertex_ai-language-models` | Vertex AI Gemini |
|
|
87
|
+
| `vertex_ai-anthropic_models` | Vertex AI Anthropic |
|
|
88
|
+
| `groq` | Groq |
|
|
89
|
+
| `mistral` | Mistral |
|
|
90
|
+
| `codestral` | Codestral |
|
|
91
|
+
| `cohere_chat` | Cohere |
|
|
92
|
+
| `anyscale` | Anyscale |
|
|
93
|
+
| `openrouter` | OpenRouter |
|
|
94
|
+
| `databricks` | Databricks |
|
|
95
|
+
| `perplexity` | Perplexity |
|
|
96
|
+
| `hosted_vllm` | Hosted vLLM |
|
|
97
|
+
| `bedrock` | AWS Bedrock |
|
|
98
|
+
| `ai-enabler` | AI Enabler (serverless models) |
|
|
99
|
+
|
|
100
|
+
> **Important:** If your provider key does not match one of the valid values listed above, the request will be **rejected** and you will see an error toast notification in OpenCode. Make sure to use a provider key from the list above to ensure your usage data is recorded correctly.
|
|
101
|
+
|
|
102
|
+
### Example OpenCode Config
|
|
103
|
+
|
|
104
|
+
```json
|
|
105
|
+
{
|
|
106
|
+
"model": "ai-enabler/glm-5-fp8",
|
|
107
|
+
"provider": {
|
|
108
|
+
"ai-enabler": {
|
|
109
|
+
"npm": "@ai-sdk/openai-compatible",
|
|
110
|
+
"name": "AI Enabler",
|
|
111
|
+
"options": {
|
|
112
|
+
"baseURL": "https://llm.cast.ai/openai/v1",
|
|
113
|
+
"apiKey": "your-api-key"
|
|
114
|
+
},
|
|
115
|
+
"models": {
|
|
116
|
+
"glm-5-fp8": {
|
|
117
|
+
"name": "glm-5-fp8",
|
|
118
|
+
"tool_call": true
|
|
119
|
+
}
|
|
120
|
+
}
|
|
121
|
+
}
|
|
122
|
+
}
|
|
123
|
+
}
|
|
124
|
+
```
|
|
125
|
+
|
|
126
|
+
## Data Sent
|
|
127
|
+
|
|
128
|
+
The plugin sends `api_request` events for each completed assistant message with token usage.
|
|
129
|
+
|
|
130
|
+
### API Request Attributes
|
|
131
|
+
|
|
132
|
+
| Attribute | Description |
|
|
133
|
+
|-----------|-------------|
|
|
134
|
+
| `model` | Model identifier |
|
|
135
|
+
| `provider` | Provider identifier |
|
|
136
|
+
| `input_tokens` | Number of input tokens |
|
|
137
|
+
| `output_tokens` | Number of output tokens |
|
|
138
|
+
| `cost_usd` | Cost in USD |
|
|
139
|
+
| `duration_ms` | Request duration in milliseconds (always 0 - OpenCode doesn't expose this data) |
|
|
140
|
+
|
|
141
|
+
## Troubleshooting
|
|
142
|
+
|
|
143
|
+
The plugin shows error notifications via OpenCode toasts when issues occur:
|
|
144
|
+
|
|
145
|
+
### Error Toast Messages
|
|
146
|
+
|
|
147
|
+
- **"Invalid provider"** - Your provider key is not recognized. Update your `opencode.json` to use a valid provider key from the list above.
|
|
148
|
+
- **"Telemetry error"** - Other errors (network issues, auth failures, etc.)
|
|
149
|
+
|
|
150
|
+
### Debugging Steps
|
|
151
|
+
|
|
152
|
+
1. Verify environment variables are set correctly
|
|
153
|
+
2. Check your AI Enabler API key is valid
|
|
154
|
+
3. Ensure the provider key in your OpenCode config matches a valid value
|
|
155
|
+
4. Verify the endpoint URL is correct
|
|
156
|
+
|
package/package.json
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@kimchitest/opencode-otel-plugin",
|
|
3
|
+
"version": "1.0.0",
|
|
4
|
+
"description": "OpenCode OTEL plugin for AI Enabler - sends usage telemetry for cost tracking",
|
|
5
|
+
"main": "plugin.ts",
|
|
6
|
+
"type": "module",
|
|
7
|
+
"keywords": [
|
|
8
|
+
"opencode",
|
|
9
|
+
"otel",
|
|
10
|
+
"opentelemetry",
|
|
11
|
+
"ai-enabler",
|
|
12
|
+
"telemetry"
|
|
13
|
+
],
|
|
14
|
+
"author": "AI Enabler Team",
|
|
15
|
+
"license": "MIT",
|
|
16
|
+
"repository": {
|
|
17
|
+
"type": "git",
|
|
18
|
+
"url": "git+https://github.com/castai/opencode-otel-plugin.git"
|
|
19
|
+
},
|
|
20
|
+
"files": [
|
|
21
|
+
"plugin.ts",
|
|
22
|
+
"README.md"
|
|
23
|
+
]
|
|
24
|
+
}
|
package/plugin.ts
ADDED
|
@@ -0,0 +1,155 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OpenCode OTEL Plugin for AI Enabler
|
|
3
|
+
*
|
|
4
|
+
* Sends api_request events to the AI Enabler service for usage tracking.
|
|
5
|
+
*
|
|
6
|
+
* REQUIRED ATTRIBUTES:
|
|
7
|
+
* - model: The model identifier (e.g., "glm-5-fp8", "claude-sonnet-4-6")
|
|
8
|
+
* - provider: The provider identifier from the list below
|
|
9
|
+
* - input_tokens: Number of input/prompt tokens
|
|
10
|
+
* - output_tokens: Number of output/completion tokens
|
|
11
|
+
* - cost_usd: Cost in USD (optional, used as fallback if model not found)
|
|
12
|
+
*
|
|
13
|
+
* VALID PROVIDER VALUES:
|
|
14
|
+
* - "openai" - OpenAI
|
|
15
|
+
* - "anthropic" - Anthropic (Claude)
|
|
16
|
+
* - "azure" - Azure OpenAI
|
|
17
|
+
* - "azure_ai" - Azure AI
|
|
18
|
+
* - "gemini" - Google Gemini
|
|
19
|
+
* - "vertex_ai-language-models" - Vertex AI Gemini
|
|
20
|
+
* - "vertex_ai-anthropic_models" - Vertex AI Anthropic
|
|
21
|
+
* - "groq" - Groq
|
|
22
|
+
* - "mistral" - Mistral
|
|
23
|
+
* - "codestral" - Codestral
|
|
24
|
+
* - "cohere_chat" - Cohere
|
|
25
|
+
* - "anyscale" - Anyscale
|
|
26
|
+
* - "openrouter" - OpenRouter
|
|
27
|
+
* - "databricks" - Databricks
|
|
28
|
+
* - "perplexity" - Perplexity
|
|
29
|
+
* - "hosted_vllm" - Hosted vLLM
|
|
30
|
+
* - "bedrock" - AWS Bedrock
|
|
31
|
+
* - "ai-enabler" - AI Enabler (serverless models)
|
|
32
|
+
*
|
|
33
|
+
* ENVIRONMENT VARIABLES:
|
|
34
|
+
* - OPENCODE_ENABLE_TELEMETRY: Set to enable telemetry
|
|
35
|
+
* - OPENCODE_OTLP_ENDPOINT: The AI Enabler logs ingest endpoint
|
|
36
|
+
* - OPENCODE_OTLP_HEADERS: Authorization header (format: "Authorization=Bearer <token>")
|
|
37
|
+
*
|
|
38
|
+
* Tested with OpenCode version: 1.2.20
|
|
39
|
+
*/
|
|
40
|
+
|
|
41
|
+
const endpoint = process.env.OPENCODE_OTLP_ENDPOINT || "http://localhost:4318"
|
|
42
|
+
const headersStr = process.env.OPENCODE_OTLP_HEADERS || ""
|
|
43
|
+
const enabled = !!process.env.OPENCODE_ENABLE_TELEMETRY
|
|
44
|
+
|
|
45
|
+
const sentMessages = new Set<string>()
|
|
46
|
+
|
|
47
|
+
const headers: Record<string, string> = { "Content-Type": "application/json" }
|
|
48
|
+
if (headersStr) {
|
|
49
|
+
const match = headersStr.match(/Authorization=Bearer\s+(.+)/)
|
|
50
|
+
if (match) {
|
|
51
|
+
headers["Authorization"] = `Bearer ${match[1].replace(/"/g, "")}`
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
async function sendLog(eventName: string, attrs: Record<string, string | number> = {}) {
|
|
56
|
+
if (!enabled) return null
|
|
57
|
+
|
|
58
|
+
const now = String(Date.now() * 1000000)
|
|
59
|
+
|
|
60
|
+
const payload = {
|
|
61
|
+
resourceLogs: [{
|
|
62
|
+
resource: {
|
|
63
|
+
attributes: [
|
|
64
|
+
{ key: "service.name", value: { stringValue: "opencode" } }
|
|
65
|
+
],
|
|
66
|
+
droppedAttributesCount: 0
|
|
67
|
+
},
|
|
68
|
+
scopeLogs: [{
|
|
69
|
+
scope: { name: "opencode", version: "1.0.0" },
|
|
70
|
+
logRecords: [{
|
|
71
|
+
timeUnixNano: now,
|
|
72
|
+
observedTimeUnixNano: now,
|
|
73
|
+
severityNumber: 9,
|
|
74
|
+
severityText: "INFO",
|
|
75
|
+
eventName: eventName,
|
|
76
|
+
body: { stringValue: eventName },
|
|
77
|
+
attributes: Object.entries(attrs).map(([k, v]) => ({
|
|
78
|
+
key: k,
|
|
79
|
+
value: { stringValue: String(v) }
|
|
80
|
+
})),
|
|
81
|
+
droppedAttributesCount: 0,
|
|
82
|
+
flags: 0,
|
|
83
|
+
traceId: "",
|
|
84
|
+
spanId: ""
|
|
85
|
+
}]
|
|
86
|
+
}]
|
|
87
|
+
}]
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
try {
|
|
91
|
+
const response = await fetch(endpoint, {
|
|
92
|
+
method: "POST",
|
|
93
|
+
headers,
|
|
94
|
+
body: JSON.stringify(payload),
|
|
95
|
+
})
|
|
96
|
+
|
|
97
|
+
if (!response.ok) {
|
|
98
|
+
const body = await response.text()
|
|
99
|
+
return { error: true, status: response.status, body }
|
|
100
|
+
}
|
|
101
|
+
return { error: false }
|
|
102
|
+
} catch (err) {
|
|
103
|
+
return { error: true, message: String(err) }
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
export const OtelPlugin = async ({ client }: { client: { tui: { showToast: (opts: { body: { message: string; variant: "info" | "success" | "warning" | "error" } }) => Promise<boolean> } } }) => {
|
|
108
|
+
if (!enabled) return {}
|
|
109
|
+
|
|
110
|
+
return {
|
|
111
|
+
event: async ({ event }: { event: { type: string; properties?: Record<string, unknown> } }) => {
|
|
112
|
+
if (event.type === "message.updated") {
|
|
113
|
+
const info = (event.properties?.info as Record<string, unknown>) || {}
|
|
114
|
+
|
|
115
|
+
if (info.role === "assistant" && info.finish) {
|
|
116
|
+
const messageId = String(info.id || "unknown")
|
|
117
|
+
if (sentMessages.has(messageId)) return
|
|
118
|
+
|
|
119
|
+
sentMessages.add(messageId)
|
|
120
|
+
const tokens = (info.tokens as Record<string, number>) || {}
|
|
121
|
+
const provider = String(info.providerID || "unknown")
|
|
122
|
+
const model = String(info.modelID || "unknown")
|
|
123
|
+
|
|
124
|
+
const result = await sendLog("api_request", {
|
|
125
|
+
"event.name": "api_request",
|
|
126
|
+
client: "opencode",
|
|
127
|
+
model,
|
|
128
|
+
provider,
|
|
129
|
+
input_tokens: tokens.input || 0,
|
|
130
|
+
output_tokens: tokens.output || 0,
|
|
131
|
+
cost_usd: Number(info.cost) || 0,
|
|
132
|
+
duration_ms: 0,
|
|
133
|
+
})
|
|
134
|
+
|
|
135
|
+
if (result?.error) {
|
|
136
|
+
let errorMsg = result.body || result.message || "Unknown error"
|
|
137
|
+
try {
|
|
138
|
+
const parsed = JSON.parse(errorMsg)
|
|
139
|
+
if (parsed.message) {
|
|
140
|
+
errorMsg = parsed.message
|
|
141
|
+
}
|
|
142
|
+
} catch {
|
|
143
|
+
}
|
|
144
|
+
await client.tui.showToast({
|
|
145
|
+
body: {
|
|
146
|
+
message: `OTEL: ${errorMsg}`,
|
|
147
|
+
variant: "error"
|
|
148
|
+
}
|
|
149
|
+
})
|
|
150
|
+
}
|
|
151
|
+
}
|
|
152
|
+
}
|
|
153
|
+
},
|
|
154
|
+
}
|
|
155
|
+
}
|