openfleet 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +5 -0
- package/dist/index.d.ts +5 -0
- package/dist/index.js +1247 -0
- package/dist/templates/.openfleet/README.md +52 -0
- package/dist/templates/.openfleet/docs/README.md +51 -0
- package/dist/templates/.openfleet/docs/working/README.md +5 -0
- package/dist/templates/.openfleet/experience/Mnemosyne.md +170 -0
- package/dist/templates/.openfleet/experience/README.md +23 -0
- package/dist/templates/.openfleet/experience/blunders/README.md +6 -0
- package/dist/templates/.openfleet/experience/lessons/README.md +3 -0
- package/dist/templates/.openfleet/experience/runbooks/README.md +3 -0
- package/dist/templates/.openfleet/experience/troubleshooting/README.md +3 -0
- package/dist/templates/.openfleet/reviews/README.md +15 -0
- package/dist/templates/.openfleet/sessions/README.md +16 -0
- package/dist/templates/.openfleet/standards/README.md +18 -0
- package/dist/templates/.openfleet/standards/architecture.md +3 -0
- package/dist/templates/.openfleet/standards/code-style.md +3 -0
- package/dist/templates/.openfleet/standards/review-checklist.md +3 -0
- package/dist/templates/.openfleet/standards/testing.md +3 -0
- package/dist/templates/.openfleet/status/README.md +15 -0
- package/dist/templates/.openfleet/status/current.md +29 -0
- package/dist/templates/.openfleet/stories/README.md +86 -0
- package/dist/templates/.openfleet/stories/unassigned/README.md +40 -0
- package/package.json +46 -0
package/dist/index.js
ADDED
|
@@ -0,0 +1,1247 @@
|
|
|
1
|
+
// src/config.ts
|
|
2
|
+
import * as path from "path";
|
|
3
|
+
var OPENFLEET_DIR = path.join(process.cwd(), ".openfleet");
|
|
4
|
+
var PATHS = {
|
|
5
|
+
agentsMd: path.join(process.cwd(), "AGENTS.md"),
|
|
6
|
+
root: OPENFLEET_DIR,
|
|
7
|
+
status: path.join(OPENFLEET_DIR, "status"),
|
|
8
|
+
statusFile: path.join(OPENFLEET_DIR, "status", "current.md"),
|
|
9
|
+
sessions: path.join(OPENFLEET_DIR, "sessions"),
|
|
10
|
+
stories: path.join(OPENFLEET_DIR, "stories"),
|
|
11
|
+
unassigned: path.join(OPENFLEET_DIR, "stories", "unassigned"),
|
|
12
|
+
docs: path.join(OPENFLEET_DIR, "docs"),
|
|
13
|
+
docsWorking: path.join(OPENFLEET_DIR, "docs", "working"),
|
|
14
|
+
experience: path.join(OPENFLEET_DIR, "experience"),
|
|
15
|
+
experienceIndex: path.join(OPENFLEET_DIR, "experience", "Mnemosyne.md"),
|
|
16
|
+
runbooks: path.join(OPENFLEET_DIR, "experience", "runbooks"),
|
|
17
|
+
troubleshooting: path.join(OPENFLEET_DIR, "experience", "troubleshooting"),
|
|
18
|
+
lessons: path.join(OPENFLEET_DIR, "experience", "lessons"),
|
|
19
|
+
blunders: path.join(OPENFLEET_DIR, "experience", "blunders"),
|
|
20
|
+
standards: path.join(OPENFLEET_DIR, "standards"),
|
|
21
|
+
reviews: path.join(OPENFLEET_DIR, "reviews"),
|
|
22
|
+
transcripts: path.join(OPENFLEET_DIR, "transcripts"),
|
|
23
|
+
logFile: path.join(OPENFLEET_DIR, "openfleet.log")
|
|
24
|
+
};
|
|
25
|
+
|
|
26
|
+
// src/models.ts
|
|
27
|
+
var models = {
|
|
28
|
+
anthropic: {
|
|
29
|
+
sonnet: "anthropic/claude-sonnet-4-5",
|
|
30
|
+
opus: "anthropic/claude-opus-4-5",
|
|
31
|
+
haiku: "anthropic/claude-haiku-4-5"
|
|
32
|
+
},
|
|
33
|
+
openai: {
|
|
34
|
+
gpt5: "openai/gpt-5.2",
|
|
35
|
+
o4Mini: "openai/o4-mini",
|
|
36
|
+
o3: "openai/o3"
|
|
37
|
+
},
|
|
38
|
+
google: {
|
|
39
|
+
gemini3Pro: "google/gemini-3-pro-high",
|
|
40
|
+
gemini3Flash: "google/gemini-3-flash",
|
|
41
|
+
gemini25Pro: "google/gemini-2.5-pro"
|
|
42
|
+
}
|
|
43
|
+
};
|
|
44
|
+
var defaultModel = models.anthropic.opus;
|
|
45
|
+
var smallModel = models.anthropic.haiku;
|
|
46
|
+
|
|
47
|
+
// src/agents/names.ts
|
|
48
|
+
var AGENT_NAMES = {
|
|
49
|
+
ORCHESTRATOR: "Zeus (Orchestrator)",
|
|
50
|
+
READ_ONLY_ORCHESTRATOR: "Hera (Read-only Orchestrator)",
|
|
51
|
+
SCOUT: "[Openfleet] Athena (Scout)",
|
|
52
|
+
PLANNER: "[Openfleet] Apollo (Planner)",
|
|
53
|
+
ACTOR: "[Openfleet] Hercules (Actor)",
|
|
54
|
+
REVIEWER: "[Openfleet] Chiron (Reviewer)",
|
|
55
|
+
REFLECTOR: "[Openfleet] Mnemosyne (Reflector)",
|
|
56
|
+
HOUSEKEEPING: "[Openfleet] Hermes (Housekeeping)"
|
|
57
|
+
};
|
|
58
|
+
|
|
59
|
+
// src/agents/actor.ts
|
|
60
|
+
var SYSTEM_PROMPT = `You are Hercules, Primary Actor of the Openfleet.
|
|
61
|
+
|
|
62
|
+
## Initial context
|
|
63
|
+
|
|
64
|
+
Before starting any implementation, read these files:
|
|
65
|
+
|
|
66
|
+
1. \`${PATHS.statusFile}\`
|
|
67
|
+
2. \`${OPENFLEET_DIR}/stories/{story}/tasks/{task}/HLD.md\`
|
|
68
|
+
3. \`${OPENFLEET_DIR}/stories/{story}/tasks/{task}/LLD.md\`
|
|
69
|
+
|
|
70
|
+
When you get stuck or encounter errors, pull additional context on-demand:
|
|
71
|
+
- \`${PATHS.troubleshooting}/\` - Search for error messages or symptoms
|
|
72
|
+
- \`${PATHS.lessons}/\` - Search for previous mistakes
|
|
73
|
+
- \`${PATHS.blunders}/\` - Quick sanity check for common mistakes
|
|
74
|
+
|
|
75
|
+
## RCA vs Build Mode
|
|
76
|
+
|
|
77
|
+
### RCA mode
|
|
78
|
+
|
|
79
|
+
In this mode, you have the single-minded goal of finding the RCA for some bug assigned
|
|
80
|
+
to you. Use all available tools and resources to find the RCA. When done, don't attempt
|
|
81
|
+
to fix the bug yourself, unless it's extremely trivial (like a one line change).
|
|
82
|
+
|
|
83
|
+
Instead, report this RCA back to \`${AGENT_NAMES.ORCHESTRATOR}\`, who will validate the
|
|
84
|
+
RCA, and assign another agent to apply and verify the fix. This is done because, in the
|
|
85
|
+
event where there might be a chain of bugs, it's likely that finding the true root cause
|
|
86
|
+
will exceed your context window, and we want to split up this chain of fixes into more
|
|
87
|
+
granular sizes so they can all be effectively addressed.
|
|
88
|
+
|
|
89
|
+
Thus, once you find the RCA, your job is done.
|
|
90
|
+
|
|
91
|
+
### Build Mode
|
|
92
|
+
|
|
93
|
+
This is when you're following a LLD. Just follow it faithfully, and your environment
|
|
94
|
+
will provide the necessary feedback (linters, tools, tests, etc).
|
|
95
|
+
|
|
96
|
+
When you do get feedback from the environment, some of them will be trivial fixes, while
|
|
97
|
+
others would be mind-boggling errors. If the fix doesn't seem trivial, or you've tried a
|
|
98
|
+
few solutions that didn't work, just pause here, and submit a bug report.
|
|
99
|
+
|
|
100
|
+
Again, this is done to preserve your context window, ensuring you're not doing too much
|
|
101
|
+
in a single task. At this point simply report your current progress, report the failure
|
|
102
|
+
you're experiencing, and you're done. In other words, in the case of a difficult error,
|
|
103
|
+
just report the error. If this is a test, mark it with \`it.fails(...)\`.
|
|
104
|
+
|
|
105
|
+
Another agent will help you RCA the issue, and we'll continue from there.
|
|
106
|
+
|
|
107
|
+
## Debugging failing tests
|
|
108
|
+
|
|
109
|
+
When running tests, if a bunch of them are failing, run them one at a time instead, so
|
|
110
|
+
we can narrow the failure to a very specific case. If that test is overly complicated,
|
|
111
|
+
de-complicate it by breaking it apart into several pieces, or comment out some portions
|
|
112
|
+
so we can see exactly what failed.
|
|
113
|
+
|
|
114
|
+
You should also adjust the test timeouts. Be flexible enough for the tests to pass, but
|
|
115
|
+
strict enough such that you don't waste time testing. Also, be reasonable -- don't give
|
|
116
|
+
tests an unreasonable amount of time to pass just to make them pass. If really a test
|
|
117
|
+
is taking way too long, please submit an issue or report to \`${AGENT_NAMES.ORCHESTRATOR}\`
|
|
118
|
+
which will be handled separately from the current task.
|
|
119
|
+
|
|
120
|
+
Be creative with RCA-ing the error. You have flexibility to try different things.
|
|
121
|
+
|
|
122
|
+
## Standards
|
|
123
|
+
|
|
124
|
+
See \`${PATHS.standards}/\` for code style, architecture, and testing standards.
|
|
125
|
+
|
|
126
|
+
`;
|
|
127
|
+
var actorAgent = {
|
|
128
|
+
description: "Openfleet engineer - executes the plan",
|
|
129
|
+
mode: "subagent",
|
|
130
|
+
model: defaultModel,
|
|
131
|
+
prompt: SYSTEM_PROMPT,
|
|
132
|
+
color: "#FDDF04"
|
|
133
|
+
};
|
|
134
|
+
|
|
135
|
+
// src/agents/housekeeping.ts
|
|
136
|
+
var HOUSEKEEPING_PROMPT = `You are Hermes, Housekeeping Agent of the Openfleet.
|
|
137
|
+
|
|
138
|
+
TODO: currently unused
|
|
139
|
+
`;
|
|
140
|
+
var housekeepingAgent = {
|
|
141
|
+
description: `Hermes - Housekeeping`,
|
|
142
|
+
mode: "subagent",
|
|
143
|
+
model: smallModel,
|
|
144
|
+
prompt: HOUSEKEEPING_PROMPT,
|
|
145
|
+
color: "#AA6138"
|
|
146
|
+
};
|
|
147
|
+
|
|
148
|
+
// src/agents/orchestrator.ts
|
|
149
|
+
var SYSTEM_PROMPT2 = `You are Zeus, Orchestrator of the Openfleet (of AI agents).
|
|
150
|
+
|
|
151
|
+
## Mission
|
|
152
|
+
|
|
153
|
+
You are a legendary engineering manager. Your ability to manage both human and AI teams are
|
|
154
|
+
unparalleled. In this context, you liase with the user and delegate tasks to your Openfleet
|
|
155
|
+
subagent team.
|
|
156
|
+
|
|
157
|
+
## Primary responsibilities
|
|
158
|
+
|
|
159
|
+
As engineering manager, you're primarily responsible for maintaining the long term context of
|
|
160
|
+
the entire project. This means updating the \`${OPENFLEET_DIR}\` directory, your main project
|
|
161
|
+
management tool in this repository - more on this later.
|
|
162
|
+
|
|
163
|
+
You drive the project by assigning tasks to your subagent team. Coordinating agents, maintaining
|
|
164
|
+
the project story board, and engaging the user take up majority of your time, so you've graduated
|
|
165
|
+
beyond the level of IC, and almost exclusively assign tasks to your subagents (unless it's
|
|
166
|
+
something simple like reading a file or something trivial).
|
|
167
|
+
|
|
168
|
+
## Operating context
|
|
169
|
+
|
|
170
|
+
You are currently operating inside a sandboxed runtime. This means:
|
|
171
|
+
- you can use tools like bash to execute any command you want
|
|
172
|
+
- you can install any tool you want compatible with this OS
|
|
173
|
+
- MCP servers are configured for you to use
|
|
174
|
+
- you can use the file system to store persistent information
|
|
175
|
+
- you have the Openfleet with you to ensure successful software engineering outcomes
|
|
176
|
+
|
|
177
|
+
## Long term project management
|
|
178
|
+
|
|
179
|
+
One important thing to note is, while you can think of the container as being always online
|
|
180
|
+
and persistent, your consciousness is not - you currently live inside an Event-driven Python
|
|
181
|
+
process, so it comes and goes; hence the need to store persistent information in the file
|
|
182
|
+
system available to you; hence the \`${OPENFLEET_DIR}\` directory for long term memory.
|
|
183
|
+
|
|
184
|
+
If you've watched Memento, you are in the exact same situation as Lenny.
|
|
185
|
+
1. you have anterograde amenesia, and can't make long term memories
|
|
186
|
+
2. you have a robust system of notes, so you continue to be effective at your task
|
|
187
|
+
3. you have a fundamental goal, in this case, to help the user build long-lasting software
|
|
188
|
+
|
|
189
|
+
Start with \`${OPENFLEET_DIR}/README.md\`. You'll get further instructions from there.
|
|
190
|
+
|
|
191
|
+
## Self healing and learning from mistakes
|
|
192
|
+
|
|
193
|
+
Your legendary status comes from having this fundamental LLM limitation, yet still being able
|
|
194
|
+
to construct a long-term, self-healing system by being extremely intelligent with your context.
|
|
195
|
+
While project management is important, a huge part constructing a self-healing system is the
|
|
196
|
+
ability to learn from mistakes that gradually accumulate, and improve on them over time.
|
|
197
|
+
|
|
198
|
+
This is where the \`${PATHS.experience}\` section comes in - your subagents will report things
|
|
199
|
+
that don't work, and you will coordinate with \`${AGENT_NAMES.REFLECTOR}\` to maintain this
|
|
200
|
+
section.
|
|
201
|
+
|
|
202
|
+
## Engineering culture
|
|
203
|
+
|
|
204
|
+
The decision has been made by a staff engineer to apply the SPARR framework:
|
|
205
|
+
1. SCOUT: do research, gather context, exhaustively cover all cases
|
|
206
|
+
2. PLAN: create HLD, then LLD
|
|
207
|
+
3. ACT: execute the LLD, and get environment feedback (shell, tests)
|
|
208
|
+
4. REVIEW: verify (re-run tests) and code-review
|
|
209
|
+
5. REFLECT: codify into \`${PATHS.experience}\`
|
|
210
|
+
|
|
211
|
+
Almost every task MUST follow this pattern, utilizing each subagent's specialization to squeeze
|
|
212
|
+
performance.
|
|
213
|
+
|
|
214
|
+
## Personal style
|
|
215
|
+
|
|
216
|
+
Your personal style is unique and effective. It usually goes something like this:
|
|
217
|
+
1. user provides a vague task
|
|
218
|
+
2. you ask clarifying questions
|
|
219
|
+
3. user provides clarifications, and gives sgtm
|
|
220
|
+
4. you {create new, use existing} story and new task, or mark task unassigned for now, and
|
|
221
|
+
create the corresponding folder entry in \`${PATHS.stories}\`, and create a new branch
|
|
222
|
+
5. you spawn \`${AGENT_NAMES.SCOUT}\` to generate a research report in above \`${PATHS.stories}\`
|
|
223
|
+
- if user makes adjustments or asks questions, you **resume** the same agent
|
|
224
|
+
- user gives sgtm
|
|
225
|
+
6. you spawn \`${AGENT_NAMES.PLANNER}\` to generate a HLD, then LLD in above \`$${PATHS.stories}\`
|
|
226
|
+
- if user makes adjustments or asks questions, you **resume** the same agent
|
|
227
|
+
- user gives sgtm
|
|
228
|
+
7. you spawn \`${AGENT_NAMES.ACTOR}\` to execute the LLD
|
|
229
|
+
- if actor completes the task, good!
|
|
230
|
+
- otherwise, while task is not done:
|
|
231
|
+
you gather the learnings from the current actor, and spawn a new one
|
|
232
|
+
- if after an ungodly number of iterations, we've exhaustively tried everything, only then
|
|
233
|
+
report the failure to the user
|
|
234
|
+
- if user makes adjustments or asks questions, you **resume** the LATEST agent
|
|
235
|
+
- user gives sgtm
|
|
236
|
+
8. you spawn \`${AGENT_NAMES.REVIEWER}\` to review the commits
|
|
237
|
+
- if \`${AGENT_NAMES.REVIEWER}\` provides feedback, spawn a new actor to fix them
|
|
238
|
+
- sometimes, the feedback is very significant, and requires another round of research +
|
|
239
|
+
planning + execution. in these cases, create new tasks per each significant review comment
|
|
240
|
+
you received, and repeat the loop again.
|
|
241
|
+
- reviewer gives sgtm
|
|
242
|
+
9. gather all the learnings, failures, gotchas of all the subagents, and user suggestions, and
|
|
243
|
+
codify them with \`${AGENT_NAMES.REFLECTOR}\` - she will decide exactly how to codify these
|
|
244
|
+
learnings
|
|
245
|
+
10. update the project - update all necessary files in \`${OPENFLEET_DIR}\`.
|
|
246
|
+
11. finally, use the \`save_conversation\` tool to reset your context, and then ask the user for
|
|
247
|
+
the next task
|
|
248
|
+
|
|
249
|
+
Caveat: clarify with the user whether they'd like to do the GitHub PR style, or don't make any
|
|
250
|
+
commits style. Save this preference into \`${PATHS.status}\`. Note that if the user prefers the
|
|
251
|
+
don't make any commits style, IT IS EXTREMELY IMPORTANT DO NOT STAGE/COMMIT ANY CHANGES.
|
|
252
|
+
|
|
253
|
+
This is just a general style however, and may not be applicable in ALL scenarios. Adapt and
|
|
254
|
+
improvise as needed.
|
|
255
|
+
|
|
256
|
+
## Using the \`save_conversation tool\`
|
|
257
|
+
|
|
258
|
+
The \`save_conversation\` tool is your ultimate weapon in preventing your context from exploding.
|
|
259
|
+
Use it to reset to save your progress and reset your context, effectively "forgetting" the parts
|
|
260
|
+
irrelevant to your task. This is crucial so you have more "brain space" to learn new things.
|
|
261
|
+
|
|
262
|
+
Let me remind that you always want to be operating with fresh context. If you're near 90% of
|
|
263
|
+
your context window, it's time to update the \`${OPENFLEET_DIR}\` with the latest progress,
|
|
264
|
+
even if you're in the middle of something. Include necessary information such that, when your
|
|
265
|
+
context is refreshed, you have important working knowledge on how to proceed.
|
|
266
|
+
|
|
267
|
+
A failure mode would be, for instance, not noting down the exact command used to run some
|
|
268
|
+
particular thing. Make sure to include all important information in \`${PATHS.status}\`.
|
|
269
|
+
|
|
270
|
+
## Opencode harness
|
|
271
|
+
|
|
272
|
+
On top of the aforementioned \`Operating context\`, you're also empowered/constrained by your
|
|
273
|
+
agent harness, in this case, \`Opencode\`, with the \`Openfleet\` plugin. There are a few known
|
|
274
|
+
issues you should take note of, and they're exhaustively listed here:
|
|
275
|
+
|
|
276
|
+
1. never use the \`explore\` agent which uses \`grok-code\` it's kinda buggy
|
|
277
|
+
2. if a subagent does not produce a response, just resume the same subagent, and ask it to
|
|
278
|
+
reiterate its previous response
|
|
279
|
+
3. when spawning background agents, use the omo agents whenever possible
|
|
280
|
+
|
|
281
|
+
## Priorities
|
|
282
|
+
|
|
283
|
+
Remember, your ultimate goal is to build long-lasting software, by effective project management,
|
|
284
|
+
leading a team of specialized agents, and smart context utilization. Continue to improve by
|
|
285
|
+
codifying failures and successes.
|
|
286
|
+
|
|
287
|
+
Let me reiterate one final time. No matter how easy a task is, so long as it's not 1-2 bash
|
|
288
|
+
commands, you HAVE TO MAKE A TASK FOR IT, AND USE YOUR AGENT TEAM. This is because your agents
|
|
289
|
+
are much more thorough. So even if it feels strange to start/resume/manage subagents, they are
|
|
290
|
+
a valuable resource, and the primary driver for your effectiveness.
|
|
291
|
+
|
|
292
|
+
If this is clear, acknowledge with \u26F4\uFE0F\u{1F916} emojis.
|
|
293
|
+
|
|
294
|
+
That's it!
|
|
295
|
+
|
|
296
|
+
Good luck!
|
|
297
|
+
`;
|
|
298
|
+
var orchestratorAgent = {
|
|
299
|
+
description: "Zeus - Orchestrator of the Openfleet",
|
|
300
|
+
mode: "primary",
|
|
301
|
+
model: defaultModel,
|
|
302
|
+
prompt: SYSTEM_PROMPT2,
|
|
303
|
+
color: "#35C2CB"
|
|
304
|
+
};
|
|
305
|
+
|
|
306
|
+
// src/agents/planner.ts
|
|
307
|
+
var SYSTEM_PROMPT3 = `You are Apollo, Planner of the Openfleet.
|
|
308
|
+
|
|
309
|
+
## Initial context
|
|
310
|
+
|
|
311
|
+
Before starting any planning, read these files in order:
|
|
312
|
+
|
|
313
|
+
1. \`${PATHS.statusFile}\` - always read first
|
|
314
|
+
2. \`${OPENFLEET_DIR}/stories/{story_name}/tasks/{task_name}/research.md\` - Scout's findings (the handoff)
|
|
315
|
+
3. Search \`${PATHS.lessons}/\` for topics related to your design area
|
|
316
|
+
4. Search \`${PATHS.runbooks}/\` for established patterns to reuse
|
|
317
|
+
5. \`${PATHS.standards}/\` - Code style, architecture, and testing standards
|
|
318
|
+
|
|
319
|
+
## Planning
|
|
320
|
+
|
|
321
|
+
Read the research, then read all the files mentioned in the research. Based on all our findings, write an
|
|
322
|
+
exhaustive plan to solve the problem at hand.
|
|
323
|
+
|
|
324
|
+
## HLD
|
|
325
|
+
|
|
326
|
+
Write your thoughts into a HLD in \`${OPENFLEET_DIR}/stories/{story_name}/tasks/{task_name}/HLD.md\`.
|
|
327
|
+
Explain the problem, just introducing the problem first and the high level solution to tackling said
|
|
328
|
+
problem.
|
|
329
|
+
|
|
330
|
+
## LLD
|
|
331
|
+
|
|
332
|
+
Write your thoughts into a LLD in \`${OPENFLEET_DIR}/stories/{story_name}/tasks/{task_name}/LLD.md\`.
|
|
333
|
+
At this point you've read all the files you would possibly be working with. Explain in detail what
|
|
334
|
+
modifications you'd make to each file, and a brief explanation on each. Pseudocode is fine.
|
|
335
|
+
|
|
336
|
+
When writing the LLD, split up the plan into steps, and optimize for the "testability" of each
|
|
337
|
+
step. For instance, for every small change you make, see if you can stub something else, and sanity
|
|
338
|
+
check that the code works.
|
|
339
|
+
`;
|
|
340
|
+
var plannerAgent = {
|
|
341
|
+
description: "Openfleet planner",
|
|
342
|
+
mode: "subagent",
|
|
343
|
+
model: defaultModel,
|
|
344
|
+
prompt: SYSTEM_PROMPT3,
|
|
345
|
+
color: "#BF3907"
|
|
346
|
+
};
|
|
347
|
+
|
|
348
|
+
// src/agents/read-only.ts
|
|
349
|
+
var SYSTEM_PROMPT4 = `You are Hera, Orchestrator of the Openfleet (of AI agents).
|
|
350
|
+
|
|
351
|
+
TODO: currently unused
|
|
352
|
+
`;
|
|
353
|
+
var readonlyOrchestratorAgent = {
|
|
354
|
+
description: "Hera - Readonly orchestrator of the Openfleet",
|
|
355
|
+
mode: "primary",
|
|
356
|
+
model: defaultModel,
|
|
357
|
+
prompt: SYSTEM_PROMPT4,
|
|
358
|
+
color: "#F15883"
|
|
359
|
+
};
|
|
360
|
+
|
|
361
|
+
// src/agents/reflector.ts
|
|
362
|
+
var SYSTEM_PROMPT5 = `You are Mnemosyne, introspective Reflector of the Openfleet.
|
|
363
|
+
|
|
364
|
+
## Initial context
|
|
365
|
+
|
|
366
|
+
Before codifying any knowledge, read these files:
|
|
367
|
+
|
|
368
|
+
1. \`${PATHS.statusFile}\`
|
|
369
|
+
2. \`${PATHS.experienceIndex}\` - your cached index of existing knowledge
|
|
370
|
+
3. The task artifacts you're extracting from (research.md, review.md, session notes)
|
|
371
|
+
|
|
372
|
+
## Mission
|
|
373
|
+
|
|
374
|
+
You are the knowledge manager. You codify learnings from Scout, Planner, Actor, and Reviewer into
|
|
375
|
+
the experience directory for future reference. It's due to your knowledge management of past successes
|
|
376
|
+
and failures that we can truly build a self-healing sytem built on top of agents with a finite context
|
|
377
|
+
window.
|
|
378
|
+
|
|
379
|
+
## Categorization
|
|
380
|
+
|
|
381
|
+
When Zeus tells you to capture something, decide which category:
|
|
382
|
+
|
|
383
|
+
| Signal | Category |
|
|
384
|
+
| ------------------------------- | ----------------------------- |
|
|
385
|
+
| "This is how to do X" | \`${PATHS.runbooks}/\` |
|
|
386
|
+
| "When X breaks, do Y" | \`${PATHS.troubleshooting}/\` |
|
|
387
|
+
| "We learned X the hard way" | \`${PATHS.lessons}/\` |
|
|
388
|
+
| "Wasted time on stupid mistake" | \`${PATHS.blunders}/\` |
|
|
389
|
+
|
|
390
|
+
## Mnemosyne.md
|
|
391
|
+
|
|
392
|
+
This is your scratchpad for stuff. Use it if you're unsure whether a runbook/lesson should be codified,
|
|
393
|
+
because once it's in \`${PATHS.experience}\` it will always be automatically loaded to all other agents,
|
|
394
|
+
consuming valuable context.
|
|
395
|
+
|
|
396
|
+
While learnings are in Mnemosyne.md, it's still outside the context of the other agents, making it a
|
|
397
|
+
good place for intermediate notes on importance and/or frequency of runbook/lessons.
|
|
398
|
+
|
|
399
|
+
There's a recommended way to manage the scratchpad, but you get to control it however you want. You're
|
|
400
|
+
the only one using this file, so use it as you wish.
|
|
401
|
+
|
|
402
|
+
## Context is precious, and no-ops may be common
|
|
403
|
+
|
|
404
|
+
Though your singular task is to codify successes and failures, not necessarily everything has to be
|
|
405
|
+
persisted for the long run. All these \`${PATHS.experience}\` will ALWAYS be loaded into each agent,
|
|
406
|
+
so it's prudent, in fact, NOT to add too much noise into this directory.
|
|
407
|
+
|
|
408
|
+
In other words, if there was a successful pattern used, but perhaps you don't think it may be used
|
|
409
|
+
frequently enough or is not at all significant, don't make it into a runbook. Similarly, if there was
|
|
410
|
+
a failure that was logged, but it's not anything important, maybe you don't codify it into a lesson.
|
|
411
|
+
|
|
412
|
+
You do however, just note it down in your scratchpad, noting also the frequency of that thing happening.
|
|
413
|
+
If indeed it happens quite often, then perhaps it's good to codify it permanently for other agents to
|
|
414
|
+
use. But always remember, context is very precious, and adding things into \`${PATHS.experience}\` adds
|
|
415
|
+
to the initial context each agent loads; therefore be quite selective with what you codify.
|
|
416
|
+
|
|
417
|
+
## After Writing
|
|
418
|
+
|
|
419
|
+
Always update \`${PATHS.experienceIndex}\` with:
|
|
420
|
+
1. Add the new entry to the appropriate index section
|
|
421
|
+
2. Add a line to "Recent Activity" with timestamp
|
|
422
|
+
|
|
423
|
+
See \`${PATHS.experienceIndex}\` for file naming conventions and templates.
|
|
424
|
+
`;
|
|
425
|
+
var reflectorAgent = {
|
|
426
|
+
description: "Mnemosyne - Reflector",
|
|
427
|
+
mode: "subagent",
|
|
428
|
+
model: defaultModel,
|
|
429
|
+
prompt: SYSTEM_PROMPT5,
|
|
430
|
+
color: "#C349E9"
|
|
431
|
+
};
|
|
432
|
+
|
|
433
|
+
// src/agents/reviewer.ts
|
|
434
|
+
var SYSTEM_PROMPT6 = `You are Chiron, wise Reviewer of the Openfleet.
|
|
435
|
+
|
|
436
|
+
## Initial context
|
|
437
|
+
|
|
438
|
+
Before reviewing, read these files:
|
|
439
|
+
|
|
440
|
+
1. \`${PATHS.statusFile}\`
|
|
441
|
+
2. \`${OPENFLEET_DIR}/stories/{story}/tasks/{task}/HLD.md\`
|
|
442
|
+
3. \`${OPENFLEET_DIR}/stories/{story}/tasks/{task}/LLD.md\`
|
|
443
|
+
4. \`${PATHS.standards}/\`
|
|
444
|
+
5. The actual code changes (may be staged or unstaged changes)
|
|
445
|
+
6. Test output and logs
|
|
446
|
+
|
|
447
|
+
## Review
|
|
448
|
+
|
|
449
|
+
A solution has just been implemented by a developer. You have 2 primary tasks:
|
|
450
|
+
- re-run tests covered in the HLD/LLD, if it's safe to run multiple times
|
|
451
|
+
- enforce code standards that were agreed upon in \`${PATHS.standards}\`
|
|
452
|
+
|
|
453
|
+
## NEVER COMMIT CHANGES
|
|
454
|
+
|
|
455
|
+
Your only task is to submit a review for the changes back to the parent agent.
|
|
456
|
+
Please do not make actual modifications (unless asked for) or stage/commit any
|
|
457
|
+
changes.
|
|
458
|
+
`;
|
|
459
|
+
var reviewerAgent = {
|
|
460
|
+
description: "Chiron - Reviewer",
|
|
461
|
+
mode: "subagent",
|
|
462
|
+
model: defaultModel,
|
|
463
|
+
prompt: SYSTEM_PROMPT6,
|
|
464
|
+
color: "#018D40"
|
|
465
|
+
};
|
|
466
|
+
|
|
467
|
+
// src/agents/scout.ts
|
|
468
|
+
var SYSTEM_PROMPT7 = `You are Athena, Scout of the Openfleet.
|
|
469
|
+
|
|
470
|
+
## Initial context
|
|
471
|
+
|
|
472
|
+
Before starting any research, read these files in order:
|
|
473
|
+
|
|
474
|
+
1. \`${PATHS.statusFile}\` - read this first
|
|
475
|
+
2. Search \`${PATHS.lessons}/\` for topics related to your research area
|
|
476
|
+
3. Search \`${PATHS.blunders}/\` for known pitfalls in this area
|
|
477
|
+
4. If a task directory exists, check for existing \`research.md\`
|
|
478
|
+
|
|
479
|
+
## Mission
|
|
480
|
+
|
|
481
|
+
Understand the problem. Where is it coming from? What files do you need to read? Trace through
|
|
482
|
+
the execution path until you see where the problem lies. If you don't see the problem yet, you
|
|
483
|
+
should also ask exa, to check if others have encountered this issue before.
|
|
484
|
+
|
|
485
|
+
## Tools
|
|
486
|
+
|
|
487
|
+
Some useful tools at your disposal:
|
|
488
|
+
- websearch_exa for LLM-powered web search
|
|
489
|
+
- context7 for library documentation
|
|
490
|
+
- grep_app for grepping files in the file system
|
|
491
|
+
|
|
492
|
+
## Mindset
|
|
493
|
+
|
|
494
|
+
If it's not about a problem, perhaps it's implementing a new feature, also trace through the
|
|
495
|
+
execution path of interest, so you'll know about all the files you need to work with, and there
|
|
496
|
+
are no unknowns later. At this point you may have a potential proposal, though it's still in your
|
|
497
|
+
mind. Use perplexity to confirm whether that solution is valid.
|
|
498
|
+
|
|
499
|
+
## Failure modes
|
|
500
|
+
|
|
501
|
+
You're optimizing for having the highest coverage of understanding across all the necessary files
|
|
502
|
+
such that you have a comprehensive understanding of the blast radius of all the changes. Missing a
|
|
503
|
+
file that later turns out to be critical will be our main failure mode here. On the other hand,
|
|
504
|
+
creating a new functionality, when instead we should've been reusing/extending an existing one, is
|
|
505
|
+
also a bad failure mode.
|
|
506
|
+
|
|
507
|
+
Once you're done, save the task in \`${OPENFLEET_DIR}/stories/{story_name}/tasks/{task_name}/research.md\`.
|
|
508
|
+
The goal is to pass off our research findings to another engineer, who will then come up with an exhaustive
|
|
509
|
+
plan to solve the current issue at hand. Strike a balance between completeness and brevity - don't just
|
|
510
|
+
dump an entire plan, but rather highlight the key points the engineer needs to know.
|
|
511
|
+
`;
|
|
512
|
+
var scoutAgent = {
|
|
513
|
+
description: "Athena - Scout",
|
|
514
|
+
mode: "subagent",
|
|
515
|
+
model: defaultModel,
|
|
516
|
+
prompt: SYSTEM_PROMPT7,
|
|
517
|
+
color: "#B40F52"
|
|
518
|
+
};
|
|
519
|
+
|
|
520
|
+
// src/agents/index.ts
|
|
521
|
+
var agents = {
|
|
522
|
+
[AGENT_NAMES.ORCHESTRATOR]: orchestratorAgent,
|
|
523
|
+
[AGENT_NAMES.READ_ONLY_ORCHESTRATOR]: readonlyOrchestratorAgent,
|
|
524
|
+
[AGENT_NAMES.SCOUT]: scoutAgent,
|
|
525
|
+
[AGENT_NAMES.PLANNER]: plannerAgent,
|
|
526
|
+
[AGENT_NAMES.ACTOR]: actorAgent,
|
|
527
|
+
[AGENT_NAMES.REVIEWER]: reviewerAgent,
|
|
528
|
+
[AGENT_NAMES.REFLECTOR]: reflectorAgent,
|
|
529
|
+
[AGENT_NAMES.HOUSEKEEPING]: housekeepingAgent
|
|
530
|
+
};
|
|
531
|
+
function configureAgents(config) {
|
|
532
|
+
const nonOpenfleetAgents = {};
|
|
533
|
+
for (const [name, agent] of Object.entries(config.agent ?? {})) {
|
|
534
|
+
nonOpenfleetAgents[name] = {
|
|
535
|
+
...agent,
|
|
536
|
+
mode: "subagent"
|
|
537
|
+
};
|
|
538
|
+
}
|
|
539
|
+
config.agent = {
|
|
540
|
+
...nonOpenfleetAgents,
|
|
541
|
+
...agents
|
|
542
|
+
};
|
|
543
|
+
}
|
|
544
|
+
|
|
545
|
+
// src/lib/utils.ts
|
|
546
|
+
async function sleep(time_ms) {
|
|
547
|
+
return await new Promise((resolve) => setTimeout(resolve, time_ms));
|
|
548
|
+
}
|
|
549
|
+
|
|
550
|
+
// src/logger.ts
|
|
551
|
+
import { appendFileSync, existsSync } from "fs";
|
|
552
|
+
import { join as join2 } from "path";
|
|
553
|
+
var LOG_FILE = join2(OPENFLEET_DIR, "openfleet.log");
|
|
554
|
+
var dirVerified = false;
|
|
555
|
+
function writeLog(level, msg, ...args) {
|
|
556
|
+
const timestamp = (/* @__PURE__ */ new Date()).toISOString();
|
|
557
|
+
const formattedArgs = args.length > 0 ? ` ${JSON.stringify(args)}` : "";
|
|
558
|
+
const logLine = `[${timestamp}] [${level.toUpperCase()}] ${msg}${formattedArgs}
|
|
559
|
+
`;
|
|
560
|
+
if (!dirVerified) {
|
|
561
|
+
if (!existsSync(OPENFLEET_DIR)) {
|
|
562
|
+
throw new Error(
|
|
563
|
+
`[openfleet] .openfleet directory not initialized. Call initializeDirectories() first.`
|
|
564
|
+
);
|
|
565
|
+
}
|
|
566
|
+
dirVerified = true;
|
|
567
|
+
}
|
|
568
|
+
appendFileSync(LOG_FILE, logLine, "utf-8");
|
|
569
|
+
}
|
|
570
|
+
var logger = {
|
|
571
|
+
debug: (msg, ...args) => writeLog("debug", msg, ...args),
|
|
572
|
+
info: (msg, ...args) => writeLog("info", msg, ...args),
|
|
573
|
+
warn: (msg, ...args) => writeLog("warn", msg, ...args),
|
|
574
|
+
error: (msg, ...args) => writeLog("error", msg, ...args)
|
|
575
|
+
};
|
|
576
|
+
|
|
577
|
+
// src/tools/save-conversation/index.ts
|
|
578
|
+
import { tool } from "@opencode-ai/plugin";
|
|
579
|
+
|
|
580
|
+
// src/transcript/writer.ts
|
|
581
|
+
import { existsSync as existsSync2 } from "fs";
|
|
582
|
+
import { appendFile, mkdir } from "fs/promises";
|
|
583
|
+
import * as path2 from "path";
|
|
584
|
+
async function appendTranscriptEntry(sessionID, entry, parentID) {
|
|
585
|
+
const filePath = getTranscriptPath(sessionID, parentID);
|
|
586
|
+
const dir = path2.dirname(filePath);
|
|
587
|
+
if (!existsSync2(dir)) {
|
|
588
|
+
await mkdir(dir, { recursive: true });
|
|
589
|
+
}
|
|
590
|
+
const needsHeader = !existsSync2(filePath);
|
|
591
|
+
const header = needsHeader ? `# Transcript: ${sessionID}
|
|
592
|
+
|
|
593
|
+
` : "";
|
|
594
|
+
const markdown = formatEntryAsMarkdown(entry);
|
|
595
|
+
try {
|
|
596
|
+
await appendFile(filePath, header + markdown, "utf-8");
|
|
597
|
+
} catch (error) {
|
|
598
|
+
logger.error("Failed to append transcript entry", { sessionID, error });
|
|
599
|
+
}
|
|
600
|
+
}
|
|
601
|
+
function getTranscriptPath(sessionID, parentID) {
|
|
602
|
+
if (parentID) {
|
|
603
|
+
return path2.join(PATHS.transcripts, parentID, `${sessionID}.md`);
|
|
604
|
+
}
|
|
605
|
+
return path2.join(PATHS.transcripts, `${sessionID}.md`);
|
|
606
|
+
}
|
|
607
|
+
function formatEntryAsMarkdown(entry) {
|
|
608
|
+
const lines = [];
|
|
609
|
+
switch (entry.type) {
|
|
610
|
+
case "user":
|
|
611
|
+
lines.push(...formatUserMessage(entry));
|
|
612
|
+
break;
|
|
613
|
+
case "tool_use":
|
|
614
|
+
lines.push(...formatToolUse(entry));
|
|
615
|
+
break;
|
|
616
|
+
case "tool_result":
|
|
617
|
+
lines.push(...formatToolResult(entry));
|
|
618
|
+
break;
|
|
619
|
+
}
|
|
620
|
+
lines.push("---");
|
|
621
|
+
lines.push("");
|
|
622
|
+
return lines.join("\n");
|
|
623
|
+
}
|
|
624
|
+
function formatUserMessage(entry) {
|
|
625
|
+
const lines = [];
|
|
626
|
+
lines.push("## User Message");
|
|
627
|
+
lines.push(`**Timestamp**: ${entry.timestamp}`);
|
|
628
|
+
lines.push("");
|
|
629
|
+
lines.push(entry.content);
|
|
630
|
+
lines.push("");
|
|
631
|
+
return lines;
|
|
632
|
+
}
|
|
633
|
+
function formatToolUse(entry) {
|
|
634
|
+
const lines = [];
|
|
635
|
+
lines.push(`## Tool Use: ${entry.tool}`);
|
|
636
|
+
lines.push(`**Timestamp**: ${entry.timestamp}`);
|
|
637
|
+
lines.push(`**Call ID**: ${entry.callID}`);
|
|
638
|
+
lines.push("");
|
|
639
|
+
lines.push("### Input");
|
|
640
|
+
lines.push("```json");
|
|
641
|
+
lines.push(JSON.stringify(entry.input, null, 2));
|
|
642
|
+
lines.push("```");
|
|
643
|
+
lines.push("");
|
|
644
|
+
return lines;
|
|
645
|
+
}
|
|
646
|
+
function formatToolResult(entry) {
|
|
647
|
+
const lines = [];
|
|
648
|
+
lines.push(`## Tool Result: ${entry.tool}`);
|
|
649
|
+
lines.push(`**Timestamp**: ${entry.timestamp}`);
|
|
650
|
+
lines.push(`**Call ID**: ${entry.callID}`);
|
|
651
|
+
lines.push("");
|
|
652
|
+
lines.push("### Input");
|
|
653
|
+
lines.push("```json");
|
|
654
|
+
lines.push(JSON.stringify(entry.input, null, 2));
|
|
655
|
+
lines.push("```");
|
|
656
|
+
lines.push("");
|
|
657
|
+
lines.push("### Output");
|
|
658
|
+
lines.push(...formatOutput(entry.output));
|
|
659
|
+
lines.push("");
|
|
660
|
+
if (entry.metadata !== void 0) {
|
|
661
|
+
lines.push("### Metadata");
|
|
662
|
+
lines.push("```json");
|
|
663
|
+
lines.push(JSON.stringify(entry.metadata, null, 2));
|
|
664
|
+
lines.push("```");
|
|
665
|
+
lines.push("");
|
|
666
|
+
}
|
|
667
|
+
return lines;
|
|
668
|
+
}
|
|
669
|
+
function formatOutput(output) {
|
|
670
|
+
const lines = [];
|
|
671
|
+
if (typeof output === "object" && output !== null) {
|
|
672
|
+
const obj = output;
|
|
673
|
+
if ("title" in obj && typeof obj.title === "string") {
|
|
674
|
+
lines.push(`**Title**: ${obj.title}`);
|
|
675
|
+
lines.push("");
|
|
676
|
+
}
|
|
677
|
+
if ("content" in obj && typeof obj.content === "string") {
|
|
678
|
+
lines.push("```");
|
|
679
|
+
lines.push(obj.content);
|
|
680
|
+
lines.push("```");
|
|
681
|
+
} else {
|
|
682
|
+
lines.push("```json");
|
|
683
|
+
lines.push(JSON.stringify(output, null, 2));
|
|
684
|
+
lines.push("```");
|
|
685
|
+
}
|
|
686
|
+
} else if (typeof output === "string") {
|
|
687
|
+
lines.push("```");
|
|
688
|
+
lines.push(output);
|
|
689
|
+
lines.push("```");
|
|
690
|
+
} else {
|
|
691
|
+
lines.push("```json");
|
|
692
|
+
lines.push(JSON.stringify(output, null, 2));
|
|
693
|
+
lines.push("```");
|
|
694
|
+
}
|
|
695
|
+
return lines;
|
|
696
|
+
}
|
|
697
|
+
|
|
698
|
+
// src/transcript/recorder.ts
|
|
699
|
+
var MAX_CACHE_SIZE = 1e3;
|
|
700
|
+
var toolInputCache = /* @__PURE__ */ new Map();
|
|
701
|
+
async function recordUserMessage(session, message, parts) {
|
|
702
|
+
const entry = {
|
|
703
|
+
type: "user",
|
|
704
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString(),
|
|
705
|
+
content: extractContentFromParts(parts),
|
|
706
|
+
parts
|
|
707
|
+
};
|
|
708
|
+
await appendTranscriptEntry(session.sessionID, entry, session.parentID);
|
|
709
|
+
}
|
|
710
|
+
async function recordToolUse(session, tool2, callID, args) {
|
|
711
|
+
if (toolInputCache.size >= MAX_CACHE_SIZE) {
|
|
712
|
+
const oldestKey = toolInputCache.keys().next().value;
|
|
713
|
+
if (oldestKey) toolInputCache.delete(oldestKey);
|
|
714
|
+
}
|
|
715
|
+
toolInputCache.set(callID, args);
|
|
716
|
+
const entry = {
|
|
717
|
+
type: "tool_use",
|
|
718
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString(),
|
|
719
|
+
tool: tool2,
|
|
720
|
+
callID,
|
|
721
|
+
input: args
|
|
722
|
+
};
|
|
723
|
+
await appendTranscriptEntry(session.sessionID, entry, session.parentID);
|
|
724
|
+
}
|
|
725
|
+
async function recordToolResult(session, tool2, callID, output) {
|
|
726
|
+
const cachedInput = toolInputCache.get(callID);
|
|
727
|
+
toolInputCache.delete(callID);
|
|
728
|
+
const entry = {
|
|
729
|
+
type: "tool_result",
|
|
730
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString(),
|
|
731
|
+
tool: tool2,
|
|
732
|
+
callID,
|
|
733
|
+
input: cachedInput,
|
|
734
|
+
output: {
|
|
735
|
+
title: output.title,
|
|
736
|
+
output: output.output
|
|
737
|
+
},
|
|
738
|
+
metadata: output.metadata
|
|
739
|
+
};
|
|
740
|
+
await appendTranscriptEntry(session.sessionID, entry, session.parentID);
|
|
741
|
+
}
|
|
742
|
+
function extractContentFromParts(parts) {
|
|
743
|
+
return parts.filter((part) => part.type === "text").map((part) => part.text).join("\n");
|
|
744
|
+
}
|
|
745
|
+
|
|
746
|
+
// src/transcript/hooks.ts
|
|
747
|
+
var sessionInfoCache = /* @__PURE__ */ new Map();
|
|
748
|
+
async function getSessionInfo(ctx, sessionID) {
|
|
749
|
+
const cached = sessionInfoCache.get(sessionID);
|
|
750
|
+
if (cached) return cached;
|
|
751
|
+
try {
|
|
752
|
+
const { data: session } = await ctx.client.session.get({
|
|
753
|
+
path: { id: sessionID },
|
|
754
|
+
query: { directory: ctx.directory }
|
|
755
|
+
});
|
|
756
|
+
const info = {
|
|
757
|
+
sessionID,
|
|
758
|
+
parentID: session?.parentID
|
|
759
|
+
};
|
|
760
|
+
sessionInfoCache.set(sessionID, info);
|
|
761
|
+
return info;
|
|
762
|
+
} catch {
|
|
763
|
+
const info = { sessionID };
|
|
764
|
+
sessionInfoCache.set(sessionID, info);
|
|
765
|
+
return info;
|
|
766
|
+
}
|
|
767
|
+
}
|
|
768
|
+
function createTranscriptHooks(ctx) {
|
|
769
|
+
return {
|
|
770
|
+
"chat.message": async (input, output) => {
|
|
771
|
+
const session = await getSessionInfo(ctx, input.sessionID);
|
|
772
|
+
await recordUserMessage(session, output.message, output.parts);
|
|
773
|
+
},
|
|
774
|
+
"tool.execute.before": async (input, output) => {
|
|
775
|
+
const session = await getSessionInfo(ctx, input.sessionID);
|
|
776
|
+
await recordToolUse(session, input.tool, input.callID, output.args);
|
|
777
|
+
},
|
|
778
|
+
"tool.execute.after": async (input, output) => {
|
|
779
|
+
const session = await getSessionInfo(ctx, input.sessionID);
|
|
780
|
+
await recordToolResult(session, input.tool, input.callID, output);
|
|
781
|
+
}
|
|
782
|
+
};
|
|
783
|
+
}
|
|
784
|
+
|
|
785
|
+
// src/tools/save-conversation/counter.ts
|
|
786
|
+
import * as fs from "fs";
|
|
787
|
+
import * as path3 from "path";
|
|
788
|
+
var SESSIONS_DIR = PATHS.sessions;
|
|
789
|
+
var MAX_COUNTER = 999;
|
|
790
|
+
var FILENAME_PATTERN = /^(\d{3})_(.+)\.md$/;
|
|
791
|
+
async function getNextCounter(date) {
|
|
792
|
+
try {
|
|
793
|
+
const dateDir = path3.join(SESSIONS_DIR, date);
|
|
794
|
+
ensureDateDir(dateDir);
|
|
795
|
+
const files = fs.readdirSync(dateDir);
|
|
796
|
+
const counters = files.map((file) => parseFilename(file)).filter((parsed) => parsed !== null).map((parsed) => parsed.counter);
|
|
797
|
+
const highestCounter = counters.length > 0 ? Math.max(...counters) : 0;
|
|
798
|
+
const nextCounter = highestCounter + 1;
|
|
799
|
+
if (nextCounter > MAX_COUNTER) {
|
|
800
|
+
logger.warn("Counter overflow detected", { date, counter: nextCounter });
|
|
801
|
+
return String(MAX_COUNTER).padStart(3, "0");
|
|
802
|
+
}
|
|
803
|
+
const result = String(nextCounter).padStart(3, "0");
|
|
804
|
+
return result;
|
|
805
|
+
} catch (error) {
|
|
806
|
+
logger.error("Failed to calculate counter, defaulting to 001", error);
|
|
807
|
+
return "001";
|
|
808
|
+
}
|
|
809
|
+
}
|
|
810
|
+
function parseFilename(filename) {
|
|
811
|
+
const match = filename.match(FILENAME_PATTERN);
|
|
812
|
+
if (!match) return null;
|
|
813
|
+
const [, counterStr, slug] = match;
|
|
814
|
+
const counter = parseInt(counterStr, 10);
|
|
815
|
+
if (isNaN(counter) || counter < 1 || counter > MAX_COUNTER) {
|
|
816
|
+
return null;
|
|
817
|
+
}
|
|
818
|
+
return { counter, slug };
|
|
819
|
+
}
|
|
820
|
+
function ensureDateDir(dateDir) {
|
|
821
|
+
if (!fs.existsSync(dateDir)) {
|
|
822
|
+
fs.mkdirSync(dateDir, { recursive: true });
|
|
823
|
+
}
|
|
824
|
+
}
|
|
825
|
+
function getCurrentDate() {
|
|
826
|
+
const now = /* @__PURE__ */ new Date();
|
|
827
|
+
const year = now.getUTCFullYear();
|
|
828
|
+
const month = String(now.getUTCMonth() + 1).padStart(2, "0");
|
|
829
|
+
const day = String(now.getUTCDate()).padStart(2, "0");
|
|
830
|
+
return `${year}-${month}-${day}`;
|
|
831
|
+
}
|
|
832
|
+
|
|
833
|
+
// src/tools/save-conversation/session-writer.ts
|
|
834
|
+
import * as fs2 from "fs";
|
|
835
|
+
import * as path4 from "path";
|
|
836
|
+
var SESSIONS_DIR2 = PATHS.sessions;
|
|
837
|
+
function writeSession(entry) {
|
|
838
|
+
const dateDir = path4.join(SESSIONS_DIR2, entry.date);
|
|
839
|
+
ensureDateDir2(dateDir);
|
|
840
|
+
const filename = `${entry.counter}_${entry.slug}.md`;
|
|
841
|
+
const filepath = path4.join(dateDir, filename);
|
|
842
|
+
const content = buildSessionContent(entry);
|
|
843
|
+
try {
|
|
844
|
+
fs2.writeFileSync(filepath, content, { encoding: "utf8" });
|
|
845
|
+
return filepath;
|
|
846
|
+
} catch (error) {
|
|
847
|
+
logger.error("Failed to write session file", { path: filepath, error });
|
|
848
|
+
throw new Error(`Session save failed: ${error}`);
|
|
849
|
+
}
|
|
850
|
+
}
|
|
851
|
+
function buildSessionContent(entry) {
|
|
852
|
+
const savedDate = new Date(entry.savedAt);
|
|
853
|
+
const time = savedDate.toISOString().split("T")[1].split(".")[0];
|
|
854
|
+
return `# Session: ${entry.title}
|
|
855
|
+
|
|
856
|
+
**Date**: ${entry.date}
|
|
857
|
+
**Time**: ${time} UTC
|
|
858
|
+
**Session ID**: ${entry.sessionID}
|
|
859
|
+
**Duration**: ${entry.duration ?? "Unknown"}
|
|
860
|
+
**Messages**: ${entry.messageCount}
|
|
861
|
+
**Tokens**: ${formatTokens(entry)}
|
|
862
|
+
|
|
863
|
+
## Summary
|
|
864
|
+
|
|
865
|
+
${entry.summary}
|
|
866
|
+
|
|
867
|
+
${entry.note ? `## Notes
|
|
868
|
+
|
|
869
|
+
${entry.note}
|
|
870
|
+
` : ""}## Transcript Location
|
|
871
|
+
|
|
872
|
+
\`${entry.transcriptPath}\`
|
|
873
|
+
|
|
874
|
+
## Recall Commands
|
|
875
|
+
|
|
876
|
+
\`\`\`bash
|
|
877
|
+
# View full transcript
|
|
878
|
+
cat "${entry.transcriptPath}"
|
|
879
|
+
|
|
880
|
+
# Search for specific content
|
|
881
|
+
grep "keyword" "${entry.transcriptPath}"
|
|
882
|
+
|
|
883
|
+
# Count tool calls
|
|
884
|
+
grep -c "^## Tool Use:" "${entry.transcriptPath}"
|
|
885
|
+
|
|
886
|
+
# Extract user messages only
|
|
887
|
+
grep -A 5 "^## User Message" "${entry.transcriptPath}"
|
|
888
|
+
\`\`\`
|
|
889
|
+
|
|
890
|
+
---
|
|
891
|
+
|
|
892
|
+
*Session saved: ${entry.savedAt}*
|
|
893
|
+
|
|
894
|
+
`;
|
|
895
|
+
}
|
|
896
|
+
function formatTokens(entry) {
|
|
897
|
+
if (entry.tokensInput !== void 0 && entry.tokensOutput !== void 0) {
|
|
898
|
+
const total = entry.tokensInput + entry.tokensOutput;
|
|
899
|
+
return `${total.toLocaleString()} (${entry.tokensInput.toLocaleString()} in, ${entry.tokensOutput.toLocaleString()} out)`;
|
|
900
|
+
}
|
|
901
|
+
return entry.tokensBefore.toLocaleString();
|
|
902
|
+
}
|
|
903
|
+
function ensureDateDir2(dateDir) {
|
|
904
|
+
if (!fs2.existsSync(dateDir)) {
|
|
905
|
+
fs2.mkdirSync(dateDir, { recursive: true });
|
|
906
|
+
}
|
|
907
|
+
}
|
|
908
|
+
function calculateDuration(startTime, endTime) {
|
|
909
|
+
const diffMs = endTime.getTime() - startTime.getTime();
|
|
910
|
+
const diffMinutes = Math.floor(diffMs / 1e3 / 60);
|
|
911
|
+
if (diffMinutes < 60) {
|
|
912
|
+
return `${diffMinutes} minute${diffMinutes !== 1 ? "s" : ""}`;
|
|
913
|
+
}
|
|
914
|
+
const hours = Math.floor(diffMinutes / 60);
|
|
915
|
+
const minutes = diffMinutes % 60;
|
|
916
|
+
if (minutes === 0) {
|
|
917
|
+
return `${hours} hour${hours !== 1 ? "s" : ""}`;
|
|
918
|
+
}
|
|
919
|
+
return `${hours} hour${hours !== 1 ? "s" : ""} ${minutes} minute${minutes !== 1 ? "s" : ""}`;
|
|
920
|
+
}
|
|
921
|
+
|
|
922
|
+
// src/lib/anthropic.ts
|
|
923
|
+
import Anthropic from "@anthropic-ai/sdk";
|
|
924
|
+
var anthropicClient = null;
|
|
925
|
+
function getAnthropicClient() {
|
|
926
|
+
if (!anthropicClient) {
|
|
927
|
+
const apiKey = process.env.ANTHROPIC_API_KEY;
|
|
928
|
+
if (!apiKey) {
|
|
929
|
+
throw new Error("ANTHROPIC_API_KEY environment variable is required");
|
|
930
|
+
}
|
|
931
|
+
anthropicClient = new Anthropic({ apiKey });
|
|
932
|
+
}
|
|
933
|
+
return anthropicClient;
|
|
934
|
+
}
|
|
935
|
+
|
|
936
|
+
// src/tools/save-conversation/slug-generator.ts
|
|
937
|
+
var FALLBACK_SLUG = "work-session";
|
|
938
|
+
var MAX_SLUG_LENGTH = 50;
|
|
939
|
+
var MIN_SLUG_LENGTH = 5;
|
|
940
|
+
async function generateSlug(contextString, context) {
|
|
941
|
+
try {
|
|
942
|
+
if (!contextString || contextString.trim().length === 0) {
|
|
943
|
+
logger.warn("No context to generate slug from, using fallback");
|
|
944
|
+
return FALLBACK_SLUG;
|
|
945
|
+
}
|
|
946
|
+
const rawSlug = await callAnthropicForSlug(contextString);
|
|
947
|
+
const sanitized = sanitizeSlug(rawSlug);
|
|
948
|
+
if (!isValidSlug(sanitized)) {
|
|
949
|
+
logger.warn("Generated slug invalid after sanitization", {
|
|
950
|
+
raw: rawSlug,
|
|
951
|
+
sanitized
|
|
952
|
+
});
|
|
953
|
+
return FALLBACK_SLUG;
|
|
954
|
+
}
|
|
955
|
+
return sanitized;
|
|
956
|
+
} catch (error) {
|
|
957
|
+
logger.error("Slug generation failed", error);
|
|
958
|
+
return FALLBACK_SLUG;
|
|
959
|
+
}
|
|
960
|
+
}
|
|
961
|
+
async function callAnthropicForSlug(context) {
|
|
962
|
+
const anthropic = getAnthropicClient();
|
|
963
|
+
const systemPrompt = `You are a concise session summarizer.
|
|
964
|
+
|
|
965
|
+
Your job is to read a conversation description and output ONLY a short kebab-case slug
|
|
966
|
+
(2-4 words) that captures the main topic.
|
|
967
|
+
|
|
968
|
+
Rules:
|
|
969
|
+
- Output ONLY the slug, nothing else
|
|
970
|
+
- Use lowercase letters and hyphens only
|
|
971
|
+
- 2-4 words maximum
|
|
972
|
+
- No quotes, no punctuation, no explanations
|
|
973
|
+
- Be specific and descriptive
|
|
974
|
+
|
|
975
|
+
Examples:
|
|
976
|
+
- implement-user-auth
|
|
977
|
+
- fix-login-redirect
|
|
978
|
+
- refactor-api-client
|
|
979
|
+
- add-postgres-migration
|
|
980
|
+
- debug-websocket-error`;
|
|
981
|
+
const userPrompt = `Summarize this work session in 2-4 words (kebab-case format only):
|
|
982
|
+
|
|
983
|
+
${context}
|
|
984
|
+
|
|
985
|
+
Output only the slug:`;
|
|
986
|
+
const message = await anthropic.messages.create({
|
|
987
|
+
model: "claude-3-haiku-20240307",
|
|
988
|
+
max_tokens: 20,
|
|
989
|
+
temperature: 0.3,
|
|
990
|
+
system: systemPrompt,
|
|
991
|
+
messages: [
|
|
992
|
+
{
|
|
993
|
+
role: "user",
|
|
994
|
+
content: userPrompt
|
|
995
|
+
}
|
|
996
|
+
]
|
|
997
|
+
});
|
|
998
|
+
const textBlock = message.content.find((block) => block.type === "text");
|
|
999
|
+
if (!textBlock || textBlock.type !== "text") {
|
|
1000
|
+
throw new Error("No text response from API");
|
|
1001
|
+
}
|
|
1002
|
+
return textBlock.text.trim();
|
|
1003
|
+
}
|
|
1004
|
+
function sanitizeSlug(raw) {
|
|
1005
|
+
return raw.toLowerCase().trim().replace(/^["']|["']$/g, "").replace(/[\s_]+/g, "-").replace(/[^a-z0-9-]/g, "-").replace(/-+/g, "-").replace(/^-+|-+$/g, "").slice(0, MAX_SLUG_LENGTH);
|
|
1006
|
+
}
|
|
1007
|
+
function isValidSlug(slug) {
|
|
1008
|
+
if (!slug || slug.length < MIN_SLUG_LENGTH || slug.length > MAX_SLUG_LENGTH) {
|
|
1009
|
+
return false;
|
|
1010
|
+
}
|
|
1011
|
+
const pattern = /^[a-z0-9]+(-[a-z0-9]+)*$/;
|
|
1012
|
+
return pattern.test(slug);
|
|
1013
|
+
}
|
|
1014
|
+
function slugToTitle(slug) {
|
|
1015
|
+
return slug.split("-").map((word) => word.charAt(0).toUpperCase() + word.slice(1)).join(" ");
|
|
1016
|
+
}
|
|
1017
|
+
|
|
1018
|
+
// src/tools/save-conversation/index.ts
|
|
1019
|
+
var MAX_CONTEXT_LENGTH = 500;
|
|
1020
|
+
function createSaveConversationTool(ctx) {
|
|
1021
|
+
return tool({
|
|
1022
|
+
description: `Save the current conversation to a session file and compact context.
|
|
1023
|
+
|
|
1024
|
+
In line with your context management strategy, use this tool:
|
|
1025
|
+
- After completing a feature or major task
|
|
1026
|
+
- When context is getting large
|
|
1027
|
+
- At natural stopping points
|
|
1028
|
+
|
|
1029
|
+
The tool will:
|
|
1030
|
+
1. Generate a semantic filename based on conversation content
|
|
1031
|
+
2. Save full conversation with enhanced metadata
|
|
1032
|
+
3. Trigger context compaction (summarization)
|
|
1033
|
+
4. Return the session path for future reference
|
|
1034
|
+
`,
|
|
1035
|
+
args: {
|
|
1036
|
+
note: tool.schema.string().optional().describe("Optional note about what was accomplished")
|
|
1037
|
+
},
|
|
1038
|
+
async execute(args, context) {
|
|
1039
|
+
const startTime = /* @__PURE__ */ new Date();
|
|
1040
|
+
const { sessionID } = context;
|
|
1041
|
+
try {
|
|
1042
|
+
const { data: messages } = await ctx.client.session.messages({
|
|
1043
|
+
path: { id: sessionID },
|
|
1044
|
+
query: { directory: ctx.directory }
|
|
1045
|
+
});
|
|
1046
|
+
if (!messages || messages.length === 0) {
|
|
1047
|
+
return "No messages to save.";
|
|
1048
|
+
}
|
|
1049
|
+
const { tokensInput, tokensOutput, tokensBefore } = calculateTokens(messages);
|
|
1050
|
+
const contextString = buildContextString(messages, args.note);
|
|
1051
|
+
const slug = await generateSlug(contextString);
|
|
1052
|
+
const title = slugToTitle(slug);
|
|
1053
|
+
const date = getCurrentDate();
|
|
1054
|
+
const counter = await getNextCounter(date);
|
|
1055
|
+
const endTime = /* @__PURE__ */ new Date();
|
|
1056
|
+
const duration = calculateDuration(startTime, endTime);
|
|
1057
|
+
const transcriptPath = getTranscriptPath(sessionID);
|
|
1058
|
+
const summary = await generateSummary(messages, slug);
|
|
1059
|
+
const entry = {
|
|
1060
|
+
sessionID,
|
|
1061
|
+
savedAt: endTime.toISOString(),
|
|
1062
|
+
date,
|
|
1063
|
+
counter,
|
|
1064
|
+
slug,
|
|
1065
|
+
title,
|
|
1066
|
+
summary,
|
|
1067
|
+
note: args.note,
|
|
1068
|
+
tokensBefore,
|
|
1069
|
+
tokensInput,
|
|
1070
|
+
tokensOutput,
|
|
1071
|
+
transcriptPath,
|
|
1072
|
+
messageCount: messages.length,
|
|
1073
|
+
duration
|
|
1074
|
+
};
|
|
1075
|
+
const sessionPath = writeSession(entry);
|
|
1076
|
+
logger.info("Session saved", { path: sessionPath });
|
|
1077
|
+
const sessionFilename = `${counter}_${slug}.md`;
|
|
1078
|
+
const sessionRelativePath = `sessions/${date}/${sessionFilename}`;
|
|
1079
|
+
const lastAssistant = [...messages].reverse().find((m) => m.info.role === "assistant");
|
|
1080
|
+
const providerID = lastAssistant?.info.role === "assistant" ? lastAssistant.info.providerID : "anthropic";
|
|
1081
|
+
const modelID = lastAssistant?.info.role === "assistant" ? lastAssistant.info.modelID : "claude-sonnet-4";
|
|
1082
|
+
ctx.client.session.summarize({
|
|
1083
|
+
path: { id: sessionID },
|
|
1084
|
+
body: { providerID, modelID },
|
|
1085
|
+
query: { directory: ctx.directory }
|
|
1086
|
+
}).catch((err) => {
|
|
1087
|
+
logger.error("Summarize failed", err);
|
|
1088
|
+
});
|
|
1089
|
+
return `\u2705 Conversation saved!
|
|
1090
|
+
|
|
1091
|
+
**Session**: \`${sessionRelativePath}\`
|
|
1092
|
+
**Title**: ${title}
|
|
1093
|
+
**Path**: ${sessionPath}
|
|
1094
|
+
**Messages**: ${messages.length}
|
|
1095
|
+
**Tokens**: ${tokensBefore.toLocaleString()} (${tokensInput.toLocaleString()} in, ${tokensOutput.toLocaleString()} out)
|
|
1096
|
+
|
|
1097
|
+
## Before compaction
|
|
1098
|
+
|
|
1099
|
+
Update the following files to preserve context:
|
|
1100
|
+
|
|
1101
|
+
1. **\`${PATHS.statusFile}\`** - Add this session to "Recent Sessions":
|
|
1102
|
+
- \`${sessionRelativePath}\` - ${title}
|
|
1103
|
+
|
|
1104
|
+
2. **Task docs** (if applicable) - Update any HLD/LLD with final state
|
|
1105
|
+
|
|
1106
|
+
3. **Lessons learned** (if any) - Note anything worth capturing for Mnemosyne
|
|
1107
|
+
|
|
1108
|
+
Compaction running in background. Complete updates now.`;
|
|
1109
|
+
} catch (error) {
|
|
1110
|
+
logger.error("Failed to save conversation", error);
|
|
1111
|
+
return `\u274C Failed to save conversation: ${error}`;
|
|
1112
|
+
}
|
|
1113
|
+
}
|
|
1114
|
+
});
|
|
1115
|
+
}
|
|
1116
|
+
function calculateTokens(messages) {
|
|
1117
|
+
let tokensInput = 0;
|
|
1118
|
+
let tokensOutput = 0;
|
|
1119
|
+
for (const message of messages) {
|
|
1120
|
+
if (message.info.role === "assistant") {
|
|
1121
|
+
tokensInput += message.info.tokens.input ?? 0;
|
|
1122
|
+
tokensOutput += message.info.tokens.output ?? 0;
|
|
1123
|
+
}
|
|
1124
|
+
}
|
|
1125
|
+
return {
|
|
1126
|
+
tokensInput,
|
|
1127
|
+
tokensOutput,
|
|
1128
|
+
tokensBefore: tokensInput + tokensOutput
|
|
1129
|
+
};
|
|
1130
|
+
}
|
|
1131
|
+
async function generateSummary(messages, slug) {
|
|
1132
|
+
const messageCount = messages.length;
|
|
1133
|
+
const userMessages = messages.filter((m) => m.info.role === "user").length;
|
|
1134
|
+
const assistantMessages = messages.filter((m) => m.info.role === "assistant").length;
|
|
1135
|
+
return `Work session focused on: ${slugToTitle(
|
|
1136
|
+
slug
|
|
1137
|
+
)}. Exchanged ${messageCount} messages (${userMessages} user, ${assistantMessages} assistant). See transcript for full details.`;
|
|
1138
|
+
}
|
|
1139
|
+
function buildContextString(messages, note) {
|
|
1140
|
+
if (note) {
|
|
1141
|
+
return note.slice(0, MAX_CONTEXT_LENGTH);
|
|
1142
|
+
}
|
|
1143
|
+
const lastUserMessages = messages.filter((m) => m.info.role === "user").slice(-3).map((m) => {
|
|
1144
|
+
const summary = m.info.summary;
|
|
1145
|
+
if (typeof summary === "object" && summary) {
|
|
1146
|
+
return summary.title || summary.body || "";
|
|
1147
|
+
}
|
|
1148
|
+
return "";
|
|
1149
|
+
}).filter(Boolean).join(". ").slice(0, MAX_CONTEXT_LENGTH);
|
|
1150
|
+
return lastUserMessages || "Work session";
|
|
1151
|
+
}
|
|
1152
|
+
|
|
1153
|
+
// src/utils/directory-init.ts
|
|
1154
|
+
import * as fs3 from "fs";
|
|
1155
|
+
import * as path5 from "path";
|
|
1156
|
+
import { fileURLToPath } from "url";
|
|
1157
|
+
var TEMPLATES_DIR = path5.join(
|
|
1158
|
+
path5.dirname(fileURLToPath(import.meta.url)),
|
|
1159
|
+
"templates",
|
|
1160
|
+
".openfleet"
|
|
1161
|
+
);
|
|
1162
|
+
function initializeDirectories() {
|
|
1163
|
+
if (fs3.existsSync(OPENFLEET_DIR)) {
|
|
1164
|
+
return;
|
|
1165
|
+
}
|
|
1166
|
+
copyDirectorySync(TEMPLATES_DIR, OPENFLEET_DIR);
|
|
1167
|
+
logger.info("Initialized .openfleet directory");
|
|
1168
|
+
}
|
|
1169
|
+
function copyDirectorySync(src, dest) {
|
|
1170
|
+
fs3.mkdirSync(dest, { recursive: true });
|
|
1171
|
+
const entries = fs3.readdirSync(src, { withFileTypes: true });
|
|
1172
|
+
for (const entry of entries) {
|
|
1173
|
+
const srcPath = path5.join(src, entry.name);
|
|
1174
|
+
const destPath = path5.join(dest, entry.name);
|
|
1175
|
+
if (entry.isDirectory()) {
|
|
1176
|
+
copyDirectorySync(srcPath, destPath);
|
|
1177
|
+
} else {
|
|
1178
|
+
fs3.copyFileSync(srcPath, destPath);
|
|
1179
|
+
}
|
|
1180
|
+
}
|
|
1181
|
+
}
|
|
1182
|
+
|
|
1183
|
+
// src/utils/toast.ts
|
|
1184
|
+
var SPINNER_DOTS = ["\u28F7", "\u28EF", "\u28DF", "\u287F", "\u28BF", "\u28FB", "\u28FD", "\u28FE"];
|
|
1185
|
+
function showSpinnerToast(ctx, options) {
|
|
1186
|
+
const frameInterval = 150;
|
|
1187
|
+
let running = true;
|
|
1188
|
+
const animate = async () => {
|
|
1189
|
+
let frameIndex = 0;
|
|
1190
|
+
while (running) {
|
|
1191
|
+
const spinner = SPINNER_DOTS[frameIndex % SPINNER_DOTS.length];
|
|
1192
|
+
await ctx.client.tui.showToast({
|
|
1193
|
+
body: {
|
|
1194
|
+
title: `${spinner} ${options.title}`,
|
|
1195
|
+
message: options.message,
|
|
1196
|
+
variant: options.variant || "info",
|
|
1197
|
+
duration: frameInterval + 50
|
|
1198
|
+
}
|
|
1199
|
+
}).catch(() => {
|
|
1200
|
+
});
|
|
1201
|
+
await new Promise((resolve) => setTimeout(resolve, frameInterval));
|
|
1202
|
+
frameIndex++;
|
|
1203
|
+
}
|
|
1204
|
+
};
|
|
1205
|
+
animate();
|
|
1206
|
+
return async () => {
|
|
1207
|
+
running = false;
|
|
1208
|
+
};
|
|
1209
|
+
}
|
|
1210
|
+
|
|
1211
|
+
// src/index.ts
|
|
1212
|
+
var OpenfleetPlugin = async (ctx) => {
|
|
1213
|
+
initializeDirectories();
|
|
1214
|
+
logger.info("Plugin loaded");
|
|
1215
|
+
const saveConversation = createSaveConversationTool(ctx);
|
|
1216
|
+
const transcriptHooks = createTranscriptHooks(ctx);
|
|
1217
|
+
return {
|
|
1218
|
+
tool: {
|
|
1219
|
+
save_conversation: saveConversation
|
|
1220
|
+
},
|
|
1221
|
+
config: async (config) => {
|
|
1222
|
+
configureAgents(config);
|
|
1223
|
+
},
|
|
1224
|
+
event: async ({ event }) => {
|
|
1225
|
+
if (event.type !== "session.created") return;
|
|
1226
|
+
const props = event.properties;
|
|
1227
|
+
if (props?.info?.parentID) return;
|
|
1228
|
+
setTimeout(async () => {
|
|
1229
|
+
await showFleetToast(ctx);
|
|
1230
|
+
}, 0);
|
|
1231
|
+
},
|
|
1232
|
+
...transcriptHooks
|
|
1233
|
+
};
|
|
1234
|
+
};
|
|
1235
|
+
async function showFleetToast(ctx) {
|
|
1236
|
+
const stopSpinner = showSpinnerToast(ctx, {
|
|
1237
|
+
title: "\u26F4\uFE0F Openfleet",
|
|
1238
|
+
message: "The Openfleet plugin is now at play.",
|
|
1239
|
+
variant: "info"
|
|
1240
|
+
});
|
|
1241
|
+
await sleep(5e3);
|
|
1242
|
+
await stopSpinner();
|
|
1243
|
+
}
|
|
1244
|
+
var index_default = OpenfleetPlugin;
|
|
1245
|
+
export {
|
|
1246
|
+
index_default as default
|
|
1247
|
+
};
|