@link-assistant/agent 0.18.0 → 0.18.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@link-assistant/agent",
3
- "version": "0.18.0",
3
+ "version": "0.18.2",
4
4
  "description": "A minimal, public domain AI CLI agent compatible with OpenCode's JSON interface. Bun-only runtime.",
5
5
  "main": "src/index.js",
6
6
  "type": "module",
package/src/index.js CHANGED
@@ -802,8 +802,14 @@ async function main() {
802
802
  compactJson: isCompact,
803
803
  });
804
804
 
805
- // Monkey-patch globalThis.fetch for raw HTTP logging in --verbose mode.
806
- // Catches ALL HTTP calls regardless of AI SDK fetch passthrough. (#217)
805
+ // Global fetch monkey-patch for verbose HTTP logging (#221).
806
+ // This catches any HTTP calls that go through globalThis.fetch directly,
807
+ // including non-provider calls (auth, config, tools) that may not have
808
+ // their own createVerboseFetch wrapper. The provider-level wrapper in
809
+ // provider.ts getSDK() also logs independently — both mechanisms are
810
+ // kept active to maximize HTTP observability in --verbose mode.
811
+ // See: https://github.com/link-assistant/agent/issues/221
812
+ // See: https://github.com/link-assistant/agent/issues/217
807
813
  if (!globalThis.__agentVerboseFetchInstalled) {
808
814
  globalThis.fetch = createVerboseFetch(globalThis.fetch, {
809
815
  caller: 'global',
@@ -1201,11 +1201,13 @@ export namespace Provider {
1201
1201
  sessionID: provider.id,
1202
1202
  });
1203
1203
 
1204
- // Verbose HTTP logging is handled by the global fetch monkey-patch
1205
- // (installed in CLI middleware in index.js). The global patch catches ALL
1206
- // HTTP calls reliably, regardless of how the AI SDK passes fetch internally.
1207
- // This provider-level wrapper is kept as a fallback for environments where
1208
- // the global patch may not be installed (e.g., programmatic use).
1204
+ // Verbose HTTP logging for provider API calls.
1205
+ // This provider-level wrapper logs HTTP requests/responses independently
1206
+ // of the global fetch monkey-patch. Both mechanisms are kept active to
1207
+ // maximize HTTP observability the global patch may miss calls if the
1208
+ // AI SDK captures/resolves fetch references before it is installed,
1209
+ // while this wrapper is injected directly into the SDK's fetch option.
1210
+ // See: https://github.com/link-assistant/agent/issues/221
1209
1211
  // See: https://github.com/link-assistant/agent/issues/217
1210
1212
  // See: https://github.com/link-assistant/agent/issues/215
1211
1213
  {
@@ -1226,14 +1228,9 @@ export namespace Provider {
1226
1228
  init?: RequestInit
1227
1229
  ): Promise<Response> => {
1228
1230
  // Check verbose flag at call time — not at SDK creation time.
1229
- // When the global fetch monkey-patch is installed, it handles verbose
1230
- // logging for all calls. The provider wrapper is a fallback for
1231
- // environments without the global patch.
1232
- // See: https://github.com/link-assistant/agent/issues/217
1233
- if (
1234
- !Flag.OPENCODE_VERBOSE ||
1235
- globalThis.__agentVerboseFetchInstalled
1236
- ) {
1231
+ // This ensures --verbose works even when the flag is set after SDK creation.
1232
+ // See: https://github.com/link-assistant/agent/issues/206
1233
+ if (!Flag.OPENCODE_VERBOSE) {
1237
1234
  return innerFetch(input, init);
1238
1235
  }
1239
1236
 
@@ -94,22 +94,47 @@ export namespace SessionSummary {
94
94
  const assistantMsg = messages.find((m) => m.info.role === 'assistant')!
95
95
  .info as MessageV2.Assistant;
96
96
 
97
- // Use the same model as the main session (--model) instead of a small model
98
- // This ensures consistent behavior and uses the model the user explicitly requested
99
- // See: https://github.com/link-assistant/agent/issues/217
100
- log.info(() => ({
101
- message: 'loading model for summarization',
102
- providerID: assistantMsg.providerID,
103
- modelID: assistantMsg.modelID,
104
- hint: 'Using same model as --model (not a small model)',
105
- }));
106
- const model = await Provider.getModel(
107
- assistantMsg.providerID,
108
- assistantMsg.modelID
109
- ).catch(() => null);
97
+ // Use the compaction model (--compaction-model, e.g. gpt-5-nano) for summarization
98
+ // to avoid doubling rate-limit pressure on the main model.
99
+ // If the compaction model is unavailable, fall back to the main model.
100
+ // See: https://github.com/link-assistant/agent/issues/223
101
+ const compactionModel = userMsg.compactionModel;
102
+ let model: Awaited<ReturnType<typeof Provider.getModel>> | null = null;
103
+
104
+ if (compactionModel && !compactionModel.useSameModel) {
105
+ model = await Provider.getModel(
106
+ compactionModel.providerID,
107
+ compactionModel.modelID
108
+ ).catch(() => null);
109
+ if (model) {
110
+ log.info(() => ({
111
+ message: 'loading model for summarization',
112
+ providerID: model!.providerID,
113
+ modelID: model!.modelID,
114
+ hint: 'Using compaction model to reduce rate-limit pressure on main model',
115
+ mainModelID: assistantMsg.modelID,
116
+ }));
117
+ }
118
+ }
119
+
120
+ if (!model) {
121
+ // Fall back to the main model if compaction model is not configured or unavailable
122
+ log.info(() => ({
123
+ message: 'loading model for summarization',
124
+ providerID: assistantMsg.providerID,
125
+ modelID: assistantMsg.modelID,
126
+ hint: compactionModel
127
+ ? 'Compaction model unavailable, falling back to main model'
128
+ : 'Using same model as --model (no compaction model configured)',
129
+ }));
130
+ model = await Provider.getModel(
131
+ assistantMsg.providerID,
132
+ assistantMsg.modelID
133
+ ).catch(() => null);
134
+ }
110
135
  if (!model) {
111
136
  log.info(() => ({
112
- message: 'could not load session model for summarization, skipping',
137
+ message: 'could not load model for summarization, skipping',
113
138
  providerID: assistantMsg.providerID,
114
139
  modelID: assistantMsg.modelID,
115
140
  }));