@link-assistant/agent 0.8.19 → 0.8.21
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/src/cli/process-name.ts +41 -0
- package/src/index.js +3 -4
- package/src/session/compaction.ts +21 -18
- package/src/session/message-v2.ts +3 -3
- package/src/session/prompt.ts +57 -40
- package/src/session/summary.ts +3 -1
package/package.json
CHANGED
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
import { platform } from 'os';
|
|
2
|
+
import { dlopen, FFIType, ptr } from 'bun:ffi';
|
|
3
|
+
|
|
4
|
+
/**
|
|
5
|
+
* Set the process name visible in system monitoring tools (top, ps, htop, etc.).
|
|
6
|
+
*
|
|
7
|
+
* Bun does not implement the process.title setter (unlike Node.js), so we use
|
|
8
|
+
* platform-specific system calls via Bun's FFI:
|
|
9
|
+
* - Linux: prctl(PR_SET_NAME, name) sets /proc/<pid>/comm
|
|
10
|
+
* - macOS: relies on the binary/symlink name (set by `bun install -g`)
|
|
11
|
+
* - Windows: no-op (Task Manager shows the executable name)
|
|
12
|
+
*/
|
|
13
|
+
export function setProcessName(name: string): void {
|
|
14
|
+
// Set in-process values for any JS code that checks them
|
|
15
|
+
process.title = name;
|
|
16
|
+
process.argv0 = name;
|
|
17
|
+
|
|
18
|
+
const os = platform();
|
|
19
|
+
|
|
20
|
+
if (os === 'linux') {
|
|
21
|
+
try {
|
|
22
|
+
const PR_SET_NAME = 15;
|
|
23
|
+
const libc = dlopen('libc.so.6', {
|
|
24
|
+
prctl: {
|
|
25
|
+
args: [FFIType.i32, FFIType.ptr],
|
|
26
|
+
returns: FFIType.i32,
|
|
27
|
+
},
|
|
28
|
+
});
|
|
29
|
+
// PR_SET_NAME accepts up to 16 bytes including the null terminator
|
|
30
|
+
const buf = Buffer.from(name.slice(0, 15) + '\0');
|
|
31
|
+
libc.symbols.prctl(PR_SET_NAME, ptr(buf));
|
|
32
|
+
libc.close();
|
|
33
|
+
} catch (_e) {
|
|
34
|
+
// Silently ignore - process name is cosmetic
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
// macOS: no userspace API changes the process comm shown in ps/top.
|
|
38
|
+
// When installed via `bun install -g`, the symlink is named 'agent',
|
|
39
|
+
// so macOS will already show 'agent' in ps/top.
|
|
40
|
+
// Windows: Task Manager always shows the executable name.
|
|
41
|
+
}
|
package/src/index.js
CHANGED
|
@@ -1,9 +1,8 @@
|
|
|
1
1
|
#!/usr/bin/env bun
|
|
2
2
|
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
process.argv0 = 'agent';
|
|
3
|
+
import { setProcessName } from './cli/process-name.ts';
|
|
4
|
+
|
|
5
|
+
setProcessName('agent');
|
|
7
6
|
|
|
8
7
|
import { Server } from './server/server.ts';
|
|
9
8
|
import { Instance } from './project/instance.ts';
|
|
@@ -136,6 +136,26 @@ export namespace SessionCompaction {
|
|
|
136
136
|
model: model.info,
|
|
137
137
|
abort: input.abort,
|
|
138
138
|
});
|
|
139
|
+
// Pre-convert messages to ModelMessage format (async in AI SDK 6.0+)
|
|
140
|
+
const modelMessages = await MessageV2.toModelMessage(
|
|
141
|
+
input.messages.filter((m) => {
|
|
142
|
+
if (m.info.role !== 'assistant' || m.info.error === undefined) {
|
|
143
|
+
return true;
|
|
144
|
+
}
|
|
145
|
+
if (
|
|
146
|
+
MessageV2.AbortedError.isInstance(m.info.error) &&
|
|
147
|
+
m.parts.some(
|
|
148
|
+
(part) => part.type !== 'step-start' && part.type !== 'reasoning'
|
|
149
|
+
)
|
|
150
|
+
) {
|
|
151
|
+
return true;
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
return false;
|
|
155
|
+
})
|
|
156
|
+
);
|
|
157
|
+
// Defensive check: ensure modelMessages is iterable (AI SDK 6.0.1 compatibility fix)
|
|
158
|
+
const safeModelMessages = Array.isArray(modelMessages) ? modelMessages : [];
|
|
139
159
|
const result = await processor.process(() =>
|
|
140
160
|
streamText({
|
|
141
161
|
onError(error) {
|
|
@@ -166,24 +186,7 @@ export namespace SessionCompaction {
|
|
|
166
186
|
content: x,
|
|
167
187
|
})
|
|
168
188
|
),
|
|
169
|
-
...
|
|
170
|
-
input.messages.filter((m) => {
|
|
171
|
-
if (m.info.role !== 'assistant' || m.info.error === undefined) {
|
|
172
|
-
return true;
|
|
173
|
-
}
|
|
174
|
-
if (
|
|
175
|
-
MessageV2.AbortedError.isInstance(m.info.error) &&
|
|
176
|
-
m.parts.some(
|
|
177
|
-
(part) =>
|
|
178
|
-
part.type !== 'step-start' && part.type !== 'reasoning'
|
|
179
|
-
)
|
|
180
|
-
) {
|
|
181
|
-
return true;
|
|
182
|
-
}
|
|
183
|
-
|
|
184
|
-
return false;
|
|
185
|
-
})
|
|
186
|
-
),
|
|
189
|
+
...safeModelMessages,
|
|
187
190
|
{
|
|
188
191
|
role: 'user',
|
|
189
192
|
content: [
|
|
@@ -601,12 +601,12 @@ export namespace MessageV2 {
|
|
|
601
601
|
throw new Error('unknown message type');
|
|
602
602
|
}
|
|
603
603
|
|
|
604
|
-
export function toModelMessage(
|
|
604
|
+
export async function toModelMessage(
|
|
605
605
|
input: {
|
|
606
606
|
info: Info;
|
|
607
607
|
parts: Part[];
|
|
608
608
|
}[]
|
|
609
|
-
): ModelMessage[] {
|
|
609
|
+
): Promise<ModelMessage[]> {
|
|
610
610
|
const result: UIMessage[] = [];
|
|
611
611
|
|
|
612
612
|
for (const msg of input) {
|
|
@@ -723,7 +723,7 @@ export namespace MessageV2 {
|
|
|
723
723
|
}
|
|
724
724
|
}
|
|
725
725
|
|
|
726
|
-
return convertToModelMessages(result);
|
|
726
|
+
return await convertToModelMessages(result);
|
|
727
727
|
}
|
|
728
728
|
|
|
729
729
|
export const stream = fn(
|
package/src/session/prompt.ts
CHANGED
|
@@ -533,6 +533,29 @@ export namespace SessionPrompt {
|
|
|
533
533
|
});
|
|
534
534
|
}
|
|
535
535
|
|
|
536
|
+
// Pre-convert messages to ModelMessage format (async in AI SDK 6.0+)
|
|
537
|
+
const modelMessages = await MessageV2.toModelMessage(
|
|
538
|
+
msgs.filter((m) => {
|
|
539
|
+
if (m.info.role !== 'assistant' || m.info.error === undefined) {
|
|
540
|
+
return true;
|
|
541
|
+
}
|
|
542
|
+
if (
|
|
543
|
+
MessageV2.AbortedError.isInstance(m.info.error) &&
|
|
544
|
+
m.parts.some(
|
|
545
|
+
(part) => part.type !== 'step-start' && part.type !== 'reasoning'
|
|
546
|
+
)
|
|
547
|
+
) {
|
|
548
|
+
return true;
|
|
549
|
+
}
|
|
550
|
+
|
|
551
|
+
return false;
|
|
552
|
+
})
|
|
553
|
+
);
|
|
554
|
+
// Defensive check: ensure modelMessages is iterable (AI SDK 6.0.1 compatibility fix)
|
|
555
|
+
const safeModelMessages = Array.isArray(modelMessages)
|
|
556
|
+
? modelMessages
|
|
557
|
+
: [];
|
|
558
|
+
|
|
536
559
|
// Verbose logging: output request details for debugging
|
|
537
560
|
if (Flag.OPENCODE_VERBOSE) {
|
|
538
561
|
const systemTokens = system.reduce(
|
|
@@ -676,24 +699,7 @@ export namespace SessionPrompt {
|
|
|
676
699
|
content: x,
|
|
677
700
|
})
|
|
678
701
|
),
|
|
679
|
-
...
|
|
680
|
-
msgs.filter((m) => {
|
|
681
|
-
if (m.info.role !== 'assistant' || m.info.error === undefined) {
|
|
682
|
-
return true;
|
|
683
|
-
}
|
|
684
|
-
if (
|
|
685
|
-
MessageV2.AbortedError.isInstance(m.info.error) &&
|
|
686
|
-
m.parts.some(
|
|
687
|
-
(part) =>
|
|
688
|
-
part.type !== 'step-start' && part.type !== 'reasoning'
|
|
689
|
-
)
|
|
690
|
-
) {
|
|
691
|
-
return true;
|
|
692
|
-
}
|
|
693
|
-
|
|
694
|
-
return false;
|
|
695
|
-
})
|
|
696
|
-
),
|
|
702
|
+
...safeModelMessages,
|
|
697
703
|
],
|
|
698
704
|
tools: model.info?.tool_call === false ? undefined : tools,
|
|
699
705
|
model: wrapLanguageModel({
|
|
@@ -1565,6 +1571,37 @@ export namespace SessionPrompt {
|
|
|
1565
1571
|
thinkingBudget: 0,
|
|
1566
1572
|
};
|
|
1567
1573
|
}
|
|
1574
|
+
// Pre-convert messages to ModelMessage format (async in AI SDK 6.0+)
|
|
1575
|
+
const titleModelMessages = await MessageV2.toModelMessage([
|
|
1576
|
+
{
|
|
1577
|
+
info: {
|
|
1578
|
+
id: Identifier.ascending('message'),
|
|
1579
|
+
role: 'user',
|
|
1580
|
+
sessionID: input.session.id,
|
|
1581
|
+
time: {
|
|
1582
|
+
created: Date.now(),
|
|
1583
|
+
},
|
|
1584
|
+
agent:
|
|
1585
|
+
input.message.info.role === 'user'
|
|
1586
|
+
? input.message.info.agent
|
|
1587
|
+
: 'build',
|
|
1588
|
+
model: {
|
|
1589
|
+
providerID: input.providerID,
|
|
1590
|
+
modelID: input.modelID,
|
|
1591
|
+
},
|
|
1592
|
+
},
|
|
1593
|
+
parts: input.message.parts,
|
|
1594
|
+
},
|
|
1595
|
+
]);
|
|
1596
|
+
// Defensive check: ensure titleModelMessages is iterable (AI SDK 6.0.1 compatibility fix)
|
|
1597
|
+
const safeTitleMessages = Array.isArray(titleModelMessages)
|
|
1598
|
+
? titleModelMessages
|
|
1599
|
+
: [];
|
|
1600
|
+
// Defensive check: ensure SystemPrompt.title returns iterable (fix for issue #155)
|
|
1601
|
+
const titleSystemMessages = SystemPrompt.title(small.providerID);
|
|
1602
|
+
const safeTitleSystemMessages = Array.isArray(titleSystemMessages)
|
|
1603
|
+
? titleSystemMessages
|
|
1604
|
+
: [];
|
|
1568
1605
|
await generateText({
|
|
1569
1606
|
maxOutputTokens: small.info?.reasoning ? 1500 : 20,
|
|
1570
1607
|
providerOptions: ProviderTransform.providerOptions(
|
|
@@ -1573,7 +1610,7 @@ export namespace SessionPrompt {
|
|
|
1573
1610
|
options
|
|
1574
1611
|
),
|
|
1575
1612
|
messages: [
|
|
1576
|
-
...
|
|
1613
|
+
...safeTitleSystemMessages.map(
|
|
1577
1614
|
(x): ModelMessage => ({
|
|
1578
1615
|
role: 'system',
|
|
1579
1616
|
content: x,
|
|
@@ -1585,27 +1622,7 @@ export namespace SessionPrompt {
|
|
|
1585
1622
|
The following is the text to summarize:
|
|
1586
1623
|
`,
|
|
1587
1624
|
},
|
|
1588
|
-
...
|
|
1589
|
-
{
|
|
1590
|
-
info: {
|
|
1591
|
-
id: Identifier.ascending('message'),
|
|
1592
|
-
role: 'user',
|
|
1593
|
-
sessionID: input.session.id,
|
|
1594
|
-
time: {
|
|
1595
|
-
created: Date.now(),
|
|
1596
|
-
},
|
|
1597
|
-
agent:
|
|
1598
|
-
input.message.info.role === 'user'
|
|
1599
|
-
? input.message.info.agent
|
|
1600
|
-
: 'build',
|
|
1601
|
-
model: {
|
|
1602
|
-
providerID: input.providerID,
|
|
1603
|
-
modelID: input.modelID,
|
|
1604
|
-
},
|
|
1605
|
-
},
|
|
1606
|
-
parts: input.message.parts,
|
|
1607
|
-
},
|
|
1608
|
-
]),
|
|
1625
|
+
...safeTitleMessages,
|
|
1609
1626
|
],
|
|
1610
1627
|
headers: small.info?.headers ?? {},
|
|
1611
1628
|
model: small.language,
|
package/src/session/summary.ts
CHANGED
|
@@ -133,6 +133,8 @@ export namespace SessionSummary {
|
|
|
133
133
|
.findLast((m) => m.info.role === 'assistant')
|
|
134
134
|
?.parts.findLast((p) => p.type === 'text')?.text;
|
|
135
135
|
if (!summary || diffs.length > 0) {
|
|
136
|
+
// Pre-convert messages to ModelMessage format (async in AI SDK 6.0+)
|
|
137
|
+
const modelMessages = await MessageV2.toModelMessage(messages);
|
|
136
138
|
const result = await generateText({
|
|
137
139
|
model: small.language,
|
|
138
140
|
maxOutputTokens: 100,
|
|
@@ -142,7 +144,7 @@ export namespace SessionSummary {
|
|
|
142
144
|
content: `
|
|
143
145
|
Summarize the following conversation into 2 sentences MAX explaining what the assistant did and why. Do not explain the user's input. Do not speak in the third person about the assistant.
|
|
144
146
|
<conversation>
|
|
145
|
-
${JSON.stringify(
|
|
147
|
+
${JSON.stringify(modelMessages)}
|
|
146
148
|
</conversation>
|
|
147
149
|
`,
|
|
148
150
|
},
|