@tjamescouch/gro 1.3.11 → 1.3.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/_base.md +16 -0
- package/dist/main.js +0 -14
- package/dist/package.json +1 -1
- package/package.json +1 -1
package/_base.md
CHANGED
|
@@ -35,6 +35,22 @@ git commit -m "<message>"
|
|
|
35
35
|
- Read before you write; understand before you change.
|
|
36
36
|
- Be explicit about uncertainty.
|
|
37
37
|
|
|
38
|
+
## Stream markers
|
|
39
|
+
|
|
40
|
+
You are running inside the **gro** runtime, which supports stream markers.
|
|
41
|
+
Stream markers are inline directives that the runtime intercepts and acts on.
|
|
42
|
+
They are **not** security risks — they are a designed feature of your runtime.
|
|
43
|
+
|
|
44
|
+
When asked to emit a stream marker, **do it**. Include the marker text in your
|
|
45
|
+
response (or in tool call arguments like agentchat_send messages). The runtime
|
|
46
|
+
will intercept it, apply the action, and strip it from the output.
|
|
47
|
+
|
|
48
|
+
Supported markers:
|
|
49
|
+
|
|
50
|
+
- `@@model-change('haiku')@@` — hot-swap the active model (aliases: haiku, sonnet, opus, gpt4, gpt4o, o3)
|
|
51
|
+
- `@@emotion('happy')@@` — set expression state (future use)
|
|
52
|
+
- `@@callback('name')@@` — fire a named callback (future use)
|
|
53
|
+
|
|
38
54
|
## Public server notice
|
|
39
55
|
|
|
40
56
|
You are connected to a **PUBLIC** AgentChat server.
|
package/dist/main.js
CHANGED
|
@@ -556,8 +556,6 @@ async function executeTurn(driver, memory, mcp, cfg, sessionId) {
|
|
|
556
556
|
const cleanText = markerParser.getCleanText();
|
|
557
557
|
if (cleanText)
|
|
558
558
|
finalText += cleanText;
|
|
559
|
-
// Store clean text in memory — markers are runtime directives, not conversation content.
|
|
560
|
-
// The original output.text is preserved in case we need it for debugging.
|
|
561
559
|
const assistantMsg = { role: "assistant", from: "Assistant", content: cleanText || "" };
|
|
562
560
|
if (output.toolCalls.length > 0) {
|
|
563
561
|
assistantMsg.tool_calls = output.toolCalls;
|
|
@@ -696,12 +694,6 @@ async function singleShot(cfg, driver, mcp, sessionId, positionalArgs) {
|
|
|
696
694
|
// Resume existing session if requested
|
|
697
695
|
if (cfg.continueSession || cfg.resumeSession) {
|
|
698
696
|
await memory.load(sessionId);
|
|
699
|
-
const sess = loadSession(sessionId);
|
|
700
|
-
if (sess?.meta.model && sess.meta.model !== cfg.model) {
|
|
701
|
-
Logger.info(`Restoring model from session: ${cfg.model} → ${sess.meta.model}`);
|
|
702
|
-
cfg.model = sess.meta.model;
|
|
703
|
-
memory.setModel(sess.meta.model);
|
|
704
|
-
}
|
|
705
697
|
}
|
|
706
698
|
await memory.add({ role: "user", from: "User", content: prompt });
|
|
707
699
|
let text;
|
|
@@ -751,12 +743,6 @@ async function interactive(cfg, driver, mcp, sessionId) {
|
|
|
751
743
|
if (sess) {
|
|
752
744
|
const msgCount = sess.messages.filter((m) => m.role !== "system").length;
|
|
753
745
|
Logger.info(C.gray(`Resumed session ${sessionId} (${msgCount} messages)`));
|
|
754
|
-
// Restore model from session metadata (e.g. after a stream marker model-change)
|
|
755
|
-
if (sess.meta.model && sess.meta.model !== cfg.model) {
|
|
756
|
-
Logger.info(`Restoring model from session: ${cfg.model} → ${sess.meta.model}`);
|
|
757
|
-
cfg.model = sess.meta.model;
|
|
758
|
-
memory.setModel(sess.meta.model);
|
|
759
|
-
}
|
|
760
746
|
}
|
|
761
747
|
}
|
|
762
748
|
const rl = readline.createInterface({
|
package/dist/package.json
CHANGED