@redaksjon/protokoll-engine 0.1.1-dev.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +47 -0
- package/dist/agentic/executor.d.ts +21 -0
- package/dist/agentic/executor.d.ts.map +1 -0
- package/dist/agentic/index.d.ts +27 -0
- package/dist/agentic/index.d.ts.map +1 -0
- package/dist/agentic/registry.d.ts +11 -0
- package/dist/agentic/registry.d.ts.map +1 -0
- package/dist/agentic/tools/lookup-person.d.ts +3 -0
- package/dist/agentic/tools/lookup-person.d.ts.map +1 -0
- package/dist/agentic/tools/lookup-project.d.ts +3 -0
- package/dist/agentic/tools/lookup-project.d.ts.map +1 -0
- package/dist/agentic/tools/route-note.d.ts +3 -0
- package/dist/agentic/tools/route-note.d.ts.map +1 -0
- package/dist/agentic/tools/store-context.d.ts +3 -0
- package/dist/agentic/tools/store-context.d.ts.map +1 -0
- package/dist/agentic/tools/verify-spelling.d.ts +3 -0
- package/dist/agentic/tools/verify-spelling.d.ts.map +1 -0
- package/dist/agentic/types.d.ts +110 -0
- package/dist/agentic/types.d.ts.map +1 -0
- package/dist/constants.d.ts +98 -0
- package/dist/constants.d.ts.map +1 -0
- package/dist/feedback/analyzer.d.ts +13 -0
- package/dist/feedback/analyzer.d.ts.map +1 -0
- package/dist/feedback/decision-tracker.d.ts +14 -0
- package/dist/feedback/decision-tracker.d.ts.map +1 -0
- package/dist/feedback/handler.d.ts +14 -0
- package/dist/feedback/handler.d.ts.map +1 -0
- package/dist/feedback/index.d.ts +12 -0
- package/dist/feedback/index.d.ts.map +1 -0
- package/dist/feedback/types.d.ts +72 -0
- package/dist/feedback/types.d.ts.map +1 -0
- package/dist/index.d.ts +24 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +32 -0
- package/dist/index.js.map +1 -0
- package/dist/index10.js +4 -0
- package/dist/index10.js.map +1 -0
- package/dist/index11.js +22 -0
- package/dist/index11.js.map +1 -0
- package/dist/index12.js +125 -0
- package/dist/index12.js.map +1 -0
- package/dist/index13.js +124 -0
- package/dist/index13.js.map +1 -0
- package/dist/index14.js +296 -0
- package/dist/index14.js.map +1 -0
- package/dist/index15.js +100 -0
- package/dist/index15.js.map +1 -0
- package/dist/index16.js +107 -0
- package/dist/index16.js.map +1 -0
- package/dist/index17.js +185 -0
- package/dist/index17.js.map +1 -0
- package/dist/index18.js +53 -0
- package/dist/index18.js.map +1 -0
- package/dist/index19.js +19 -0
- package/dist/index19.js.map +1 -0
- package/dist/index2.js +33 -0
- package/dist/index2.js.map +1 -0
- package/dist/index20.js +105 -0
- package/dist/index20.js.map +1 -0
- package/dist/index21.js +26 -0
- package/dist/index21.js.map +1 -0
- package/dist/index22.js +49 -0
- package/dist/index22.js.map +1 -0
- package/dist/index23.js +119 -0
- package/dist/index23.js.map +1 -0
- package/dist/index24.js +330 -0
- package/dist/index24.js.map +1 -0
- package/dist/index25.js +57 -0
- package/dist/index25.js.map +1 -0
- package/dist/index26.js +38 -0
- package/dist/index26.js.map +1 -0
- package/dist/index27.js +127 -0
- package/dist/index27.js.map +1 -0
- package/dist/index28.js +157 -0
- package/dist/index28.js.map +1 -0
- package/dist/index29.js +163 -0
- package/dist/index29.js.map +1 -0
- package/dist/index3.js +36 -0
- package/dist/index3.js.map +1 -0
- package/dist/index30.js +173 -0
- package/dist/index30.js.map +1 -0
- package/dist/index31.js +423 -0
- package/dist/index31.js.map +1 -0
- package/dist/index32.js +161 -0
- package/dist/index32.js.map +1 -0
- package/dist/index33.js +152 -0
- package/dist/index33.js.map +1 -0
- package/dist/index34.js +56 -0
- package/dist/index34.js.map +1 -0
- package/dist/index35.js +103 -0
- package/dist/index35.js.map +1 -0
- package/dist/index36.js +451 -0
- package/dist/index36.js.map +1 -0
- package/dist/index37.js +431 -0
- package/dist/index37.js.map +1 -0
- package/dist/index38.js +87 -0
- package/dist/index38.js.map +1 -0
- package/dist/index39.js +122 -0
- package/dist/index39.js.map +1 -0
- package/dist/index4.js +3 -0
- package/dist/index4.js.map +1 -0
- package/dist/index40.js +299 -0
- package/dist/index40.js.map +1 -0
- package/dist/index41.js +49 -0
- package/dist/index41.js.map +1 -0
- package/dist/index42.js +151 -0
- package/dist/index42.js.map +1 -0
- package/dist/index43.js +226 -0
- package/dist/index43.js.map +1 -0
- package/dist/index44.js +49 -0
- package/dist/index44.js.map +1 -0
- package/dist/index45.js +45 -0
- package/dist/index45.js.map +1 -0
- package/dist/index46.js +37 -0
- package/dist/index46.js.map +1 -0
- package/dist/index47.js +51 -0
- package/dist/index47.js.map +1 -0
- package/dist/index48.js +39 -0
- package/dist/index48.js.map +1 -0
- package/dist/index49.js +239 -0
- package/dist/index49.js.map +1 -0
- package/dist/index5.js +17 -0
- package/dist/index5.js.map +1 -0
- package/dist/index50.js +163 -0
- package/dist/index50.js.map +1 -0
- package/dist/index51.js +81 -0
- package/dist/index51.js.map +1 -0
- package/dist/index52.js +78 -0
- package/dist/index52.js.map +1 -0
- package/dist/index53.js +22 -0
- package/dist/index53.js.map +1 -0
- package/dist/index54.js +8 -0
- package/dist/index54.js.map +1 -0
- package/dist/index55.js +8 -0
- package/dist/index55.js.map +1 -0
- package/dist/index56.js +17 -0
- package/dist/index56.js.map +1 -0
- package/dist/index57.js +4 -0
- package/dist/index57.js.map +1 -0
- package/dist/index58.js +17 -0
- package/dist/index58.js.map +1 -0
- package/dist/index59.js +4 -0
- package/dist/index59.js.map +1 -0
- package/dist/index6.js +22 -0
- package/dist/index6.js.map +1 -0
- package/dist/index60.js +6 -0
- package/dist/index60.js.map +1 -0
- package/dist/index7.js +27 -0
- package/dist/index7.js.map +1 -0
- package/dist/index8.js +22 -0
- package/dist/index8.js.map +1 -0
- package/dist/index9.js +5 -0
- package/dist/index9.js.map +1 -0
- package/dist/logging.d.ts +7 -0
- package/dist/logging.d.ts.map +1 -0
- package/dist/output/index.d.ts +15 -0
- package/dist/output/index.d.ts.map +1 -0
- package/dist/phases/complete.d.ts +17 -0
- package/dist/phases/complete.d.ts.map +1 -0
- package/dist/phases/index.d.ts +5 -0
- package/dist/phases/index.d.ts.map +1 -0
- package/dist/phases/locate.d.ts +15 -0
- package/dist/phases/locate.d.ts.map +1 -0
- package/dist/phases/simple-replace.d.ts +72 -0
- package/dist/phases/simple-replace.d.ts.map +1 -0
- package/dist/phases/transcribe.d.ts +19 -0
- package/dist/phases/transcribe.d.ts.map +1 -0
- package/dist/pipeline/index.d.ts +10 -0
- package/dist/pipeline/index.d.ts.map +1 -0
- package/dist/pipeline/orchestrator.d.ts +13 -0
- package/dist/pipeline/orchestrator.d.ts.map +1 -0
- package/dist/pipeline/types.d.ts +58 -0
- package/dist/pipeline/types.d.ts.map +1 -0
- package/dist/prompt/index.d.ts +3 -0
- package/dist/prompt/index.d.ts.map +1 -0
- package/dist/prompt/templates.d.ts +40 -0
- package/dist/prompt/templates.d.ts.map +1 -0
- package/dist/prompt/transcribe.d.ts +42 -0
- package/dist/prompt/transcribe.d.ts.map +1 -0
- package/dist/reasoning/client.d.ts +42 -0
- package/dist/reasoning/client.d.ts.map +1 -0
- package/dist/reasoning/index.d.ts +17 -0
- package/dist/reasoning/index.d.ts.map +1 -0
- package/dist/reasoning/strategy.d.ts +12 -0
- package/dist/reasoning/strategy.d.ts.map +1 -0
- package/dist/reasoning/types.d.ts +58 -0
- package/dist/reasoning/types.d.ts.map +1 -0
- package/dist/reflection/collector.d.ts +18 -0
- package/dist/reflection/collector.d.ts.map +1 -0
- package/dist/reflection/index.d.ts +13 -0
- package/dist/reflection/index.d.ts.map +1 -0
- package/dist/reflection/reporter.d.ts +10 -0
- package/dist/reflection/reporter.d.ts.map +1 -0
- package/dist/reflection/types.d.ts +99 -0
- package/dist/reflection/types.d.ts.map +1 -0
- package/dist/routing/classifier.d.ts +8 -0
- package/dist/routing/classifier.d.ts.map +1 -0
- package/dist/routing/index.d.ts +12 -0
- package/dist/routing/index.d.ts.map +1 -0
- package/dist/routing/router.d.ts +8 -0
- package/dist/routing/router.d.ts.map +1 -0
- package/dist/routing/types.d.ts +68 -0
- package/dist/routing/types.d.ts.map +1 -0
- package/dist/transcript/feedback.d.ts +70 -0
- package/dist/transcript/feedback.d.ts.map +1 -0
- package/dist/transcript/index.d.ts +10 -0
- package/dist/transcript/index.d.ts.map +1 -0
- package/dist/transcript/operations.d.ts +152 -0
- package/dist/transcript/operations.d.ts.map +1 -0
- package/dist/transcript/pkl-utils.d.ts +66 -0
- package/dist/transcript/pkl-utils.d.ts.map +1 -0
- package/dist/transcription/index.d.ts +17 -0
- package/dist/transcription/index.d.ts.map +1 -0
- package/dist/transcription/service.d.ts +10 -0
- package/dist/transcription/service.d.ts.map +1 -0
- package/dist/transcription/types.d.ts +41 -0
- package/dist/transcription/types.d.ts.map +1 -0
- package/dist/types.d.ts +28 -0
- package/dist/types.d.ts.map +1 -0
- package/dist/util/collision-detector.d.ts +77 -0
- package/dist/util/collision-detector.d.ts.map +1 -0
- package/dist/util/dates.d.ts +57 -0
- package/dist/util/dates.d.ts.map +1 -0
- package/dist/util/general.d.ts +3 -0
- package/dist/util/general.d.ts.map +1 -0
- package/dist/util/media.d.ts +9 -0
- package/dist/util/media.d.ts.map +1 -0
- package/dist/util/metadata.d.ts +138 -0
- package/dist/util/metadata.d.ts.map +1 -0
- package/dist/util/openai.d.ts +22 -0
- package/dist/util/openai.d.ts.map +1 -0
- package/dist/util/sounds-like-database.d.ts +98 -0
- package/dist/util/sounds-like-database.d.ts.map +1 -0
- package/dist/util/storage.d.ts +35 -0
- package/dist/util/storage.d.ts.map +1 -0
- package/dist/util/text-replacer.d.ts +56 -0
- package/dist/util/text-replacer.d.ts.map +1 -0
- package/dist/utils/entityFinder.d.ts +29 -0
- package/dist/utils/entityFinder.d.ts.map +1 -0
- package/package.json +84 -0
package/dist/index24.js
ADDED
|
@@ -0,0 +1,330 @@
|
|
|
1
|
+
import { create as create$1 } from './index25.js';
|
|
2
|
+
import { getLogger } from './index41.js';
|
|
3
|
+
import { ConversationBuilder } from '@kjerneverk/riotprompt';
|
|
4
|
+
|
|
5
|
+
const toRiotToolCalls = (toolCalls) => {
|
|
6
|
+
return toolCalls.map((tc) => ({
|
|
7
|
+
id: tc.id,
|
|
8
|
+
type: "function",
|
|
9
|
+
function: {
|
|
10
|
+
name: tc.name,
|
|
11
|
+
arguments: JSON.stringify(tc.arguments)
|
|
12
|
+
}
|
|
13
|
+
}));
|
|
14
|
+
};
|
|
15
|
+
const cleanResponseContent = (content) => {
|
|
16
|
+
let cleaned = content.replace(/^(?:Using tools?|Let me|I'll|I will|Now I'll|First,?\s*I(?:'ll| will)).*?[\r\n]+/gim, "");
|
|
17
|
+
cleaned = cleaned.replace(/\{"tool":\s*"[^"]+",\s*"(?:args|input)":\s*\{[^}]*\}\}/g, "");
|
|
18
|
+
cleaned = cleaned.replace(/\b\w+_\w+\(\{[^}]*\}\)/g, "");
|
|
19
|
+
cleaned = cleaned.replace(/^.*\s+to=\w+\.\w+.*$/gm, "");
|
|
20
|
+
const spamPattern = /^.*[\u4E00-\u9FFF].*(app|官网|彩票|中彩票).*$/gm;
|
|
21
|
+
cleaned = cleaned.replace(spamPattern, "");
|
|
22
|
+
const corruptionStartPattern = /^[\u0530-\u058F\u0E00-\u0E7F\u0A80-\u0AFF\u0C00-\u0C7F].*$/gm;
|
|
23
|
+
cleaned = cleaned.replace(corruptionStartPattern, "");
|
|
24
|
+
const lines = cleaned.split("\n");
|
|
25
|
+
let startIndex = 0;
|
|
26
|
+
for (let i = 0; i < lines.length; i++) {
|
|
27
|
+
const line = lines[i].trim();
|
|
28
|
+
if (line === "") continue;
|
|
29
|
+
const isCommentary = /^(checking|verifying|looking|searching|analyzing|processing|determining|using|calling|executing|I'm|I am|Let me)/i.test(line) || line.includes("tool") || line.includes('{"') || line.includes("reasoning") || line.includes("undefined");
|
|
30
|
+
if (!isCommentary) {
|
|
31
|
+
startIndex = i;
|
|
32
|
+
break;
|
|
33
|
+
}
|
|
34
|
+
}
|
|
35
|
+
if (startIndex > 0) {
|
|
36
|
+
cleaned = lines.slice(startIndex).join("\n");
|
|
37
|
+
}
|
|
38
|
+
cleaned = cleaned.replace(/\n{3,}/g, "\n\n");
|
|
39
|
+
return cleaned.trim();
|
|
40
|
+
};
|
|
41
|
+
const create = (reasoning, ctx) => {
|
|
42
|
+
const logger = getLogger();
|
|
43
|
+
const registry = create$1(ctx);
|
|
44
|
+
const process = async (transcriptText) => {
|
|
45
|
+
const state = {
|
|
46
|
+
originalText: transcriptText,
|
|
47
|
+
correctedText: transcriptText,
|
|
48
|
+
unknownEntities: [],
|
|
49
|
+
resolvedEntities: /* @__PURE__ */ new Map(),
|
|
50
|
+
referencedEntities: {
|
|
51
|
+
people: /* @__PURE__ */ new Set(),
|
|
52
|
+
projects: /* @__PURE__ */ new Set(),
|
|
53
|
+
terms: /* @__PURE__ */ new Set(),
|
|
54
|
+
companies: /* @__PURE__ */ new Set()
|
|
55
|
+
},
|
|
56
|
+
confidence: 0
|
|
57
|
+
};
|
|
58
|
+
ctx.resolvedEntities = state.resolvedEntities;
|
|
59
|
+
const toolsUsed = [];
|
|
60
|
+
const contextChanges = [];
|
|
61
|
+
let iterations = 0;
|
|
62
|
+
let totalTokens = 0;
|
|
63
|
+
const maxIterations = 15;
|
|
64
|
+
const conversation = ConversationBuilder.create({ model: "gpt-4o" }).withTokenBudget({
|
|
65
|
+
max: 1e5,
|
|
66
|
+
// 100k token context window
|
|
67
|
+
reserveForResponse: 4e3,
|
|
68
|
+
// Reserve 4k tokens for response
|
|
69
|
+
strategy: "summarize",
|
|
70
|
+
// Summarize old messages if budget exceeded
|
|
71
|
+
onBudgetExceeded: "compress",
|
|
72
|
+
// Automatically compress when exceeded
|
|
73
|
+
preserveSystem: true,
|
|
74
|
+
// Always keep system messages
|
|
75
|
+
preserveRecent: 5
|
|
76
|
+
// Keep last 5 messages
|
|
77
|
+
});
|
|
78
|
+
const systemPrompt = `You are an intelligent transcription assistant. Your job is to:
|
|
79
|
+
1. Analyze the transcript for names, projects, and companies
|
|
80
|
+
2. Use the available tools to verify spellings and gather context
|
|
81
|
+
3. Correct any misheard names or terms
|
|
82
|
+
4. Determine the appropriate destination for this note
|
|
83
|
+
5. Produce a clean, accurate Markdown transcript
|
|
84
|
+
|
|
85
|
+
CRITICAL RULES:
|
|
86
|
+
- This is NOT a summary. Preserve ALL content from the original transcript.
|
|
87
|
+
- Only fix obvious transcription errors like misheard names.
|
|
88
|
+
- When you have finished processing, output the COMPLETE corrected transcript as Markdown.
|
|
89
|
+
- Do NOT say you don't have the transcript - it's in the conversation history.
|
|
90
|
+
|
|
91
|
+
OUTPUT REQUIREMENTS - EXTREMELY IMPORTANT:
|
|
92
|
+
- Your final response MUST contain ONLY the corrected transcript text.
|
|
93
|
+
- DO NOT include any commentary like "Using tools to..." or "Let me verify...".
|
|
94
|
+
- DO NOT include any explanations about what you're doing or have done.
|
|
95
|
+
- DO NOT include any tool call information or JSON in your response.
|
|
96
|
+
- DO NOT include any reasoning or processing notes.
|
|
97
|
+
- Your output will be inserted directly into the user-facing document.
|
|
98
|
+
- If you need to use tools, use them silently - don't narrate what you're doing.
|
|
99
|
+
|
|
100
|
+
Available tools:
|
|
101
|
+
- lookup_person: Find information about people (use for any name that might be misspelled)
|
|
102
|
+
- lookup_project: Find project routing information
|
|
103
|
+
- verify_spelling: Ask user about unknown terms (if interactive mode)
|
|
104
|
+
- route_note: Determine where to file this note
|
|
105
|
+
- store_context: Remember new information for future use`;
|
|
106
|
+
conversation.addSystemMessage(systemPrompt);
|
|
107
|
+
const initialPrompt = `Here is the raw transcript to process:
|
|
108
|
+
|
|
109
|
+
--- BEGIN TRANSCRIPT ---
|
|
110
|
+
${transcriptText}
|
|
111
|
+
--- END TRANSCRIPT ---
|
|
112
|
+
|
|
113
|
+
Please:
|
|
114
|
+
1. Identify any names, companies, or technical terms that might be misspelled
|
|
115
|
+
2. Use the lookup_person tool to verify spelling of any names you find
|
|
116
|
+
3. Use route_note to determine the destination
|
|
117
|
+
4. Then output the COMPLETE corrected transcript as clean Markdown
|
|
118
|
+
|
|
119
|
+
CRITICAL: Your response must contain ONLY the transcript text - no commentary, no explanations, no tool information.
|
|
120
|
+
Remember: preserve ALL content, only fix transcription errors.`;
|
|
121
|
+
conversation.addUserMessage(initialPrompt);
|
|
122
|
+
try {
|
|
123
|
+
logger.debug("Starting agentic transcription - analyzing for names and routing...");
|
|
124
|
+
let response = await reasoning.complete({
|
|
125
|
+
systemPrompt,
|
|
126
|
+
prompt: initialPrompt,
|
|
127
|
+
tools: registry.getToolDefinitions(),
|
|
128
|
+
maxIterations
|
|
129
|
+
});
|
|
130
|
+
if (response.usage) {
|
|
131
|
+
totalTokens += response.usage.totalTokens;
|
|
132
|
+
}
|
|
133
|
+
if (response.toolCalls && response.toolCalls.length > 0) {
|
|
134
|
+
conversation.addAssistantWithToolCalls(
|
|
135
|
+
response.content,
|
|
136
|
+
toRiotToolCalls(response.toolCalls)
|
|
137
|
+
);
|
|
138
|
+
} else {
|
|
139
|
+
conversation.addAssistantMessage(response.content);
|
|
140
|
+
}
|
|
141
|
+
while (response.toolCalls && response.toolCalls.length > 0 && iterations < maxIterations) {
|
|
142
|
+
iterations++;
|
|
143
|
+
logger.debug("Iteration %d: Processing %d tool calls...", iterations, response.toolCalls.length);
|
|
144
|
+
const toolResults = [];
|
|
145
|
+
for (const toolCall of response.toolCalls) {
|
|
146
|
+
logger.debug("Executing tool: %s", toolCall.name);
|
|
147
|
+
toolsUsed.push(toolCall.name);
|
|
148
|
+
try {
|
|
149
|
+
const result = await registry.executeTool(toolCall.name, toolCall.arguments);
|
|
150
|
+
const resultStr = JSON.stringify(result.data || { success: result.success, message: result.error || "OK" });
|
|
151
|
+
toolResults.push({ id: toolCall.id, name: toolCall.name, result: resultStr });
|
|
152
|
+
logger.debug("Tool %s result: %s", toolCall.name, result.success ? "success" : "failed");
|
|
153
|
+
if (result.data?.person) {
|
|
154
|
+
state.resolvedEntities.set(result.data.person.name, result.data.suggestion);
|
|
155
|
+
state.referencedEntities.people.add(result.data.person.id);
|
|
156
|
+
}
|
|
157
|
+
if (result.data?.term) {
|
|
158
|
+
state.referencedEntities.terms.add(result.data.term.id);
|
|
159
|
+
}
|
|
160
|
+
if (result.data?.company) {
|
|
161
|
+
state.referencedEntities.companies.add(result.data.company.id);
|
|
162
|
+
}
|
|
163
|
+
if (result.data?.routingDecision?.destination) {
|
|
164
|
+
const routingDecision = result.data.routingDecision;
|
|
165
|
+
state.routeDecision = {
|
|
166
|
+
projectId: routingDecision.projectId,
|
|
167
|
+
destination: routingDecision.destination,
|
|
168
|
+
confidence: routingDecision.confidence || 1,
|
|
169
|
+
signals: routingDecision.signals,
|
|
170
|
+
reasoning: routingDecision.reasoning || "Determined by route_note tool"
|
|
171
|
+
};
|
|
172
|
+
if (routingDecision.projectId) {
|
|
173
|
+
state.referencedEntities.projects.add(routingDecision.projectId);
|
|
174
|
+
}
|
|
175
|
+
}
|
|
176
|
+
if (result.data?.found && result.data?.project?.routing?.destination) {
|
|
177
|
+
const project = result.data.project;
|
|
178
|
+
state.routeDecision = {
|
|
179
|
+
projectId: project.id,
|
|
180
|
+
destination: {
|
|
181
|
+
path: project.routing.destination,
|
|
182
|
+
structure: project.routing.structure || "month"
|
|
183
|
+
},
|
|
184
|
+
confidence: 1,
|
|
185
|
+
signals: [{ type: "explicit_phrase", value: project.name, weight: 1 }],
|
|
186
|
+
reasoning: `Matched project "${project.name}" with routing to ${project.routing.destination}`
|
|
187
|
+
};
|
|
188
|
+
logger.debug(
|
|
189
|
+
"Captured routing from project lookup: %s -> %s",
|
|
190
|
+
project.name,
|
|
191
|
+
project.routing.destination
|
|
192
|
+
);
|
|
193
|
+
state.referencedEntities.projects.add(project.id);
|
|
194
|
+
}
|
|
195
|
+
} catch (error) {
|
|
196
|
+
logger.error("Tool execution failed", { tool: toolCall.name, error });
|
|
197
|
+
toolResults.push({
|
|
198
|
+
id: toolCall.id,
|
|
199
|
+
name: toolCall.name,
|
|
200
|
+
result: JSON.stringify({ error: String(error) })
|
|
201
|
+
});
|
|
202
|
+
}
|
|
203
|
+
}
|
|
204
|
+
for (const tr of toolResults) {
|
|
205
|
+
conversation.addToolResult(tr.id, tr.result, tr.name);
|
|
206
|
+
}
|
|
207
|
+
const continuationPrompt = `Tool results received. Here's a reminder of your task:
|
|
208
|
+
|
|
209
|
+
ORIGINAL TRANSCRIPT (process this):
|
|
210
|
+
--- BEGIN TRANSCRIPT ---
|
|
211
|
+
${transcriptText}
|
|
212
|
+
--- END TRANSCRIPT ---
|
|
213
|
+
|
|
214
|
+
Corrections made so far: ${state.resolvedEntities.size > 0 ? Array.from(state.resolvedEntities.entries()).map(([k, v]) => `${k} -> ${v}`).join(", ") : "none yet"}
|
|
215
|
+
|
|
216
|
+
Continue analyzing. If you need more information, use the tools.
|
|
217
|
+
When you're done with tool calls, output the COMPLETE corrected transcript as Markdown.
|
|
218
|
+
Do NOT summarize - include ALL original content with corrections applied.
|
|
219
|
+
|
|
220
|
+
CRITICAL REMINDER: Your response must contain ONLY the transcript text. Do NOT include any commentary, explanations, or processing notes - those will leak into the user-facing document.`;
|
|
221
|
+
conversation.addUserMessage(continuationPrompt);
|
|
222
|
+
response = await reasoning.complete({
|
|
223
|
+
systemPrompt,
|
|
224
|
+
prompt: continuationPrompt,
|
|
225
|
+
tools: registry.getToolDefinitions()
|
|
226
|
+
});
|
|
227
|
+
if (response.usage) {
|
|
228
|
+
totalTokens += response.usage.totalTokens;
|
|
229
|
+
}
|
|
230
|
+
if (response.toolCalls && response.toolCalls.length > 0) {
|
|
231
|
+
conversation.addAssistantWithToolCalls(
|
|
232
|
+
response.content,
|
|
233
|
+
toRiotToolCalls(response.toolCalls)
|
|
234
|
+
);
|
|
235
|
+
} else {
|
|
236
|
+
conversation.addAssistantMessage(response.content);
|
|
237
|
+
}
|
|
238
|
+
}
|
|
239
|
+
if (response.content && response.content.length > 50) {
|
|
240
|
+
const cleanedContent = cleanResponseContent(response.content);
|
|
241
|
+
if (cleanedContent !== response.content) {
|
|
242
|
+
const removedChars = response.content.length - cleanedContent.length;
|
|
243
|
+
logger.warn(
|
|
244
|
+
"Removed leaked internal processing from response (%d -> %d chars, removed %d chars)",
|
|
245
|
+
response.content.length,
|
|
246
|
+
cleanedContent.length,
|
|
247
|
+
removedChars
|
|
248
|
+
);
|
|
249
|
+
const corruptionRatio = removedChars / response.content.length;
|
|
250
|
+
const hasSuspiciousUnicode = /[\u0530-\u058F\u0E00-\u0E7F\u4E00-\u9FFF\u0A80-\u0AFF\u0C00-\u0C7F]/.test(response.content);
|
|
251
|
+
if (corruptionRatio > 0.1 || hasSuspiciousUnicode) {
|
|
252
|
+
logger.error(
|
|
253
|
+
"SEVERE CORRUPTION DETECTED in LLM response (%.1f%% removed, suspicious unicode: %s)",
|
|
254
|
+
corruptionRatio * 100,
|
|
255
|
+
hasSuspiciousUnicode
|
|
256
|
+
);
|
|
257
|
+
logger.error(
|
|
258
|
+
"Raw response preview (first 500 chars): %s",
|
|
259
|
+
response.content.substring(0, 500).replace(/\n/g, "\\n")
|
|
260
|
+
);
|
|
261
|
+
}
|
|
262
|
+
}
|
|
263
|
+
state.correctedText = cleanedContent;
|
|
264
|
+
state.confidence = 0.9;
|
|
265
|
+
logger.debug("Final transcript generated: %d characters", cleanedContent.length);
|
|
266
|
+
} else {
|
|
267
|
+
logger.debug("Model did not produce transcript, requesting explicitly...");
|
|
268
|
+
const finalRequest = `Please output the COMPLETE corrected transcript now.
|
|
269
|
+
|
|
270
|
+
ORIGINAL:
|
|
271
|
+
${transcriptText}
|
|
272
|
+
|
|
273
|
+
CORRECTIONS TO APPLY:
|
|
274
|
+
${state.resolvedEntities.size > 0 ? Array.from(state.resolvedEntities.entries()).map(([k, v]) => `- "${k}" should be "${v}"`).join("\n") : "None identified"}
|
|
275
|
+
|
|
276
|
+
Output the full transcript as clean Markdown. Do NOT summarize.
|
|
277
|
+
|
|
278
|
+
CRITICAL: Your response must contain ONLY the corrected transcript text - absolutely no commentary, tool information, or explanations.`;
|
|
279
|
+
const finalResponse = await reasoning.complete({
|
|
280
|
+
systemPrompt,
|
|
281
|
+
prompt: finalRequest
|
|
282
|
+
});
|
|
283
|
+
if (finalResponse.usage) {
|
|
284
|
+
totalTokens += finalResponse.usage.totalTokens;
|
|
285
|
+
}
|
|
286
|
+
const cleanedFinalContent = cleanResponseContent(finalResponse.content || transcriptText);
|
|
287
|
+
if (cleanedFinalContent !== finalResponse.content) {
|
|
288
|
+
const removedChars = (finalResponse.content?.length || 0) - cleanedFinalContent.length;
|
|
289
|
+
logger.warn(
|
|
290
|
+
"Removed leaked internal processing from final response (%d -> %d chars, removed %d chars)",
|
|
291
|
+
finalResponse.content?.length || 0,
|
|
292
|
+
cleanedFinalContent.length,
|
|
293
|
+
removedChars
|
|
294
|
+
);
|
|
295
|
+
const corruptionRatio = removedChars / (finalResponse.content?.length || 1);
|
|
296
|
+
const hasSuspiciousUnicode = /[\u0530-\u058F\u0E00-\u0E7F\u4E00-\u9FFF\u0A80-\u0AFF\u0C00-\u0C7F]/.test(finalResponse.content || "");
|
|
297
|
+
if (corruptionRatio > 0.1 || hasSuspiciousUnicode) {
|
|
298
|
+
logger.error(
|
|
299
|
+
"SEVERE CORRUPTION DETECTED in final LLM response (%.1f%% removed, suspicious unicode: %s)",
|
|
300
|
+
corruptionRatio * 100,
|
|
301
|
+
hasSuspiciousUnicode
|
|
302
|
+
);
|
|
303
|
+
logger.error(
|
|
304
|
+
"Raw response preview (first 500 chars): %s",
|
|
305
|
+
(finalResponse.content || "").substring(0, 500).replace(/\n/g, "\\n")
|
|
306
|
+
);
|
|
307
|
+
}
|
|
308
|
+
}
|
|
309
|
+
state.correctedText = cleanedFinalContent;
|
|
310
|
+
state.confidence = 0.8;
|
|
311
|
+
}
|
|
312
|
+
} catch (error) {
|
|
313
|
+
logger.error("Agentic processing failed", { error });
|
|
314
|
+
state.correctedText = transcriptText;
|
|
315
|
+
state.confidence = 0.5;
|
|
316
|
+
}
|
|
317
|
+
return {
|
|
318
|
+
enhancedText: state.correctedText,
|
|
319
|
+
state,
|
|
320
|
+
toolsUsed: [...new Set(toolsUsed)],
|
|
321
|
+
iterations,
|
|
322
|
+
totalTokens: totalTokens > 0 ? totalTokens : void 0,
|
|
323
|
+
contextChanges: contextChanges.length > 0 ? contextChanges : void 0
|
|
324
|
+
};
|
|
325
|
+
};
|
|
326
|
+
return { process };
|
|
327
|
+
};
|
|
328
|
+
|
|
329
|
+
export { create };
|
|
330
|
+
//# sourceMappingURL=index24.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index24.js","sources":["../src/agentic/executor.ts"],"sourcesContent":["/**\n * Agentic Executor\n * \n * Executes the agentic transcription loop with tool calls.\n * Uses RiotPrompt's ConversationBuilder for conversation management.\n */\n\nimport { ToolContext, TranscriptionState } from './types';\nimport * as Registry from './registry';\nimport * as Reasoning from '../reasoning';\nimport * as Logging from '../logging';\nimport { ConversationBuilder, ToolCall as RiotToolCall } from '@kjerneverk/riotprompt';\n\nexport interface ContextChangeRecord {\n entityType: 'person' | 'project' | 'company' | 'term' | 'ignored';\n entityId: string;\n entityName: string;\n action: 'created' | 'updated';\n details?: Record<string, unknown>;\n}\n\nexport interface ExecutorInstance {\n process(transcriptText: string): Promise<{\n enhancedText: string;\n state: TranscriptionState;\n toolsUsed: string[];\n iterations: number;\n totalTokens?: number;\n contextChanges?: ContextChangeRecord[];\n }>;\n}\n\n/**\n * Convert internal tool call format to RiotPrompt's ToolCall format\n */\nconst toRiotToolCalls = (toolCalls: Array<{ id: string; name: string; arguments: Record<string, unknown> }>): RiotToolCall[] => {\n return toolCalls.map(tc => ({\n id: tc.id,\n type: 'function' as const,\n function: {\n name: tc.name,\n arguments: JSON.stringify(tc.arguments),\n },\n }));\n};\n\n/**\n * Clean response content by removing any leaked internal processing information\n * that should never appear in the user-facing transcript.\n */\nconst cleanResponseContent = (content: string): string => {\n // Remove common patterns of leaked internal processing\n // Pattern 1: \"Using tools to...\" type commentary\n let cleaned = content.replace(/^(?:Using tools?|Let me|I'll|I will|Now I'll|First,?\\s*I(?:'ll| will)).*?[\\r\\n]+/gim, '');\n \n // Pattern 2: JSON tool call artifacts - match complete JSON objects with \"tool\" key\n // Matches: {\"tool\":\"...\",\"args\":{...}}, {\"tool\":\"...\",\"input\":{...}}, etc.\n // Use a more careful pattern that matches balanced braces\n cleaned = cleaned.replace(/\\{\"tool\":\\s*\"[^\"]+\",\\s*\"(?:args|input)\":\\s*\\{[^}]*\\}\\}/g, '');\n \n // Pattern 3: Tool call references in the format tool_name({...})\n cleaned = cleaned.replace(/\\b\\w+_\\w+\\(\\{[^}]*\\}\\)/g, '');\n \n // Pattern 4: Remove lines with \"to=\" patterns (internal routing artifacts)\n // Matches: \"Այ to=lookup_project.commentary\", \"undefined to=route_note.commentary\"\n // Do this BEFORE Unicode filtering to catch mixed corruption\n cleaned = cleaned.replace(/^.*\\s+to=\\w+\\.\\w+.*$/gm, '');\n \n // Pattern 5: Remove lines that look like spam/SEO (Chinese gambling sites, etc.)\n // Matches lines with Chinese characters followed by \"app\", \"官网\", etc.\n // This is more specific than general Unicode filtering\n const spamPattern = /^.*[\\u4E00-\\u9FFF].*(app|官网|彩票|中彩票).*$/gm;\n cleaned = cleaned.replace(spamPattern, '');\n \n // Pattern 6: Remove lines with suspicious Unicode at the START (corruption indicators)\n // Only remove lines that START with non-Latin scripts (not legitimate content)\n // This catches corruption like \"Այ to=...\" or \"สามสิบเอ็ด\" at line start\n const corruptionStartPattern = /^[\\u0530-\\u058F\\u0E00-\\u0E7F\\u0A80-\\u0AFF\\u0C00-\\u0C7F].*$/gm;\n cleaned = cleaned.replace(corruptionStartPattern, '');\n \n // Pattern 7: Lines that are purely reasoning/commentary before the actual content\n // Look for lines like \"I'll verify...\", \"Checking...\", etc.\n const lines = cleaned.split('\\n');\n let startIndex = 0;\n \n // Skip leading lines that look like internal commentary\n for (let i = 0; i < lines.length; i++) {\n const line = lines[i].trim();\n \n // Skip empty lines\n if (line === '') continue;\n \n // Check if line looks like commentary (starts with action verbs, contains \"tool\", etc.)\n const isCommentary = /^(checking|verifying|looking|searching|analyzing|processing|determining|using|calling|executing|I'm|I am|Let me)/i.test(line)\n || line.includes('tool')\n || line.includes('{\"')\n || line.includes('reasoning')\n || line.includes('undefined');\n \n if (!isCommentary) {\n // This looks like actual content - start from here\n startIndex = i;\n break;\n }\n }\n \n // Rejoin from the first real content line\n if (startIndex > 0) {\n cleaned = lines.slice(startIndex).join('\\n');\n }\n \n // Final cleanup: remove multiple consecutive blank lines\n cleaned = cleaned.replace(/\\n{3,}/g, '\\n\\n');\n \n return cleaned.trim();\n};\n\nexport const create = (\n reasoning: Reasoning.ReasoningInstance,\n ctx: ToolContext\n): ExecutorInstance => {\n const logger = Logging.getLogger();\n const registry = Registry.create(ctx);\n \n const process = async (transcriptText: string) => {\n const state: TranscriptionState = {\n originalText: transcriptText,\n correctedText: transcriptText,\n unknownEntities: [],\n resolvedEntities: new Map(),\n referencedEntities: {\n people: new Set(),\n projects: new Set(),\n terms: new Set(),\n companies: new Set(),\n },\n confidence: 0,\n };\n \n // Make resolvedEntities available to tools so they can avoid re-asking\n ctx.resolvedEntities = state.resolvedEntities;\n \n const toolsUsed: string[] = [];\n const contextChanges: ContextChangeRecord[] = [];\n let iterations = 0;\n let totalTokens = 0;\n const maxIterations = 15;\n \n // Use ConversationBuilder for conversation management with token budget\n const conversation = ConversationBuilder.create({ model: 'gpt-4o' })\n .withTokenBudget({\n max: 100000, // 100k token context window\n reserveForResponse: 4000, // Reserve 4k tokens for response\n strategy: 'summarize', // Summarize old messages if budget exceeded\n onBudgetExceeded: 'compress', // Automatically compress when exceeded\n preserveSystem: true, // Always keep system messages\n preserveRecent: 5 // Keep last 5 messages\n });\n \n // Build the system prompt\n const systemPrompt = `You are an intelligent transcription assistant. Your job is to:\n1. Analyze the transcript for names, projects, and companies\n2. Use the available tools to verify spellings and gather context\n3. Correct any misheard names or terms\n4. Determine the appropriate destination for this note\n5. Produce a clean, accurate Markdown transcript\n\nCRITICAL RULES:\n- This is NOT a summary. Preserve ALL content from the original transcript.\n- Only fix obvious transcription errors like misheard names.\n- When you have finished processing, output the COMPLETE corrected transcript as Markdown.\n- Do NOT say you don't have the transcript - it's in the conversation history.\n\nOUTPUT REQUIREMENTS - EXTREMELY IMPORTANT:\n- Your final response MUST contain ONLY the corrected transcript text.\n- DO NOT include any commentary like \"Using tools to...\" or \"Let me verify...\".\n- DO NOT include any explanations about what you're doing or have done.\n- DO NOT include any tool call information or JSON in your response.\n- DO NOT include any reasoning or processing notes.\n- Your output will be inserted directly into the user-facing document.\n- If you need to use tools, use them silently - don't narrate what you're doing.\n\nAvailable tools:\n- lookup_person: Find information about people (use for any name that might be misspelled)\n- lookup_project: Find project routing information \n- verify_spelling: Ask user about unknown terms (if interactive mode)\n- route_note: Determine where to file this note\n- store_context: Remember new information for future use`;\n\n // Add system message using ConversationBuilder\n conversation.addSystemMessage(systemPrompt);\n \n // Add the initial user message with transcript\n const initialPrompt = `Here is the raw transcript to process:\n\n--- BEGIN TRANSCRIPT ---\n${transcriptText}\n--- END TRANSCRIPT ---\n\nPlease:\n1. Identify any names, companies, or technical terms that might be misspelled\n2. Use the lookup_person tool to verify spelling of any names you find\n3. Use route_note to determine the destination\n4. Then output the COMPLETE corrected transcript as clean Markdown\n\nCRITICAL: Your response must contain ONLY the transcript text - no commentary, no explanations, no tool information.\nRemember: preserve ALL content, only fix transcription errors.`;\n\n conversation.addUserMessage(initialPrompt);\n\n try {\n // Initial reasoning call\n logger.debug('Starting agentic transcription - analyzing for names and routing...');\n let response = await reasoning.complete({\n systemPrompt,\n prompt: initialPrompt,\n tools: registry.getToolDefinitions(),\n maxIterations,\n });\n \n // Track token usage\n if (response.usage) {\n totalTokens += response.usage.totalTokens;\n }\n \n // Add assistant response to conversation\n if (response.toolCalls && response.toolCalls.length > 0) {\n conversation.addAssistantWithToolCalls(\n response.content,\n toRiotToolCalls(response.toolCalls)\n );\n } else {\n conversation.addAssistantMessage(response.content);\n }\n \n // Iterative tool use loop\n while (response.toolCalls && response.toolCalls.length > 0 && iterations < maxIterations) {\n iterations++;\n logger.debug('Iteration %d: Processing %d tool calls...', iterations, response.toolCalls.length);\n \n // Collect tool results\n const toolResults: Array<{ id: string; name: string; result: string }> = [];\n \n // Execute each tool call\n for (const toolCall of response.toolCalls) {\n logger.debug('Executing tool: %s', toolCall.name);\n toolsUsed.push(toolCall.name);\n \n try {\n const result = await registry.executeTool(toolCall.name, toolCall.arguments);\n \n // Format result for the model\n const resultStr = JSON.stringify(result.data || { success: result.success, message: result.error || 'OK' });\n toolResults.push({ id: toolCall.id, name: toolCall.name, result: resultStr });\n \n logger.debug('Tool %s result: %s', toolCall.name, result.success ? 'success' : 'failed');\n \n // Handle results that need user input\n // Interactive functionality moved to protokoll-cli\n /* \n // eslint-disable-next-line no-constant-condition\n if (result.needsUserInput && false) {\n logger.info('Interactive: %s requires clarification', toolCall.name);\n \n const termName = String(toolCall.arguments.name || toolCall.arguments.term || '');\n \n const clarification = await ctx.interactiveInstance.handleClarification({\n type: result.data?.clarificationType || 'general',\n term: result.data?.term || termName,\n context: result.userPrompt || '',\n suggestion: result.data?.suggestion,\n options: result.data?.options,\n });\n \n if (clarification.response) {\n state.resolvedEntities.set(termName, clarification.response);\n logger.info('Clarified: %s -> %s', termName, clarification.response);\n \n // Handle new project/term wizard response\n if (result.data?.clarificationType === 'new_project' && clarification.additionalInfo) {\n const wizardResult = clarification.additionalInfo as {\n action: 'create' | 'link' | 'term' | 'skip' | 'ignore';\n projectName?: string;\n destination?: string;\n description?: string;\n linkedProjectIndex?: number;\n linkedTermName?: string;\n aliasName?: string;\n termDescription?: string;\n // For 'term' action\n termName?: string;\n termExpansion?: string;\n termProjects?: number[];\n // For nested project creation from term wizard\n createdProject?: {\n action: 'create' | 'link' | 'skip';\n projectName?: string;\n destination?: string;\n description?: string;\n };\n // For 'ignore' action\n ignoredTerm?: string;\n };\n \n const knownProjects = result.data?.knownProjects as Array<{\n id: string;\n name: string;\n description?: string;\n classification?: { explicit_phrases?: string[]; context_type?: string };\n routing?: { destination: string; structure?: string; filename_options?: string[] };\n }> | undefined;\n \n if (wizardResult.action === 'create') {\n // CREATE NEW PROJECT\n const projectName = wizardResult.projectName || termName;\n const projectId = projectName.toLowerCase().replace(/\\s+/g, '-');\n const projectDestination = wizardResult.destination;\n \n const newProject = {\n id: projectId,\n name: projectName,\n type: 'project' as const,\n description: wizardResult.description || `Project for \"${projectName}\"`,\n classification: {\n context_type: 'work' as const,\n explicit_phrases: [termName.toLowerCase(), projectName.toLowerCase()].filter((v, i, a) => a.indexOf(v) === i),\n },\n routing: {\n // Only include destination if explicitly provided - otherwise uses global default\n ...(projectDestination && { destination: projectDestination }),\n structure: 'month' as const,\n filename_options: ['date', 'time', 'subject'] as Array<'date' | 'time' | 'subject'>,\n },\n active: true,\n };\n \n try {\n await ctx.contextInstance.saveEntity(newProject);\n await ctx.contextInstance.reload(); // Reload so subsequent searches find this entity\n logger.info('Created new project: %s%s', projectName, projectDestination ? ` -> ${projectDestination}` : ' (using default destination)');\n \n contextChanges.push({\n entityType: 'project',\n entityId: projectId,\n entityName: projectName,\n action: 'created',\n details: {\n ...(projectDestination && { destination: projectDestination }),\n description: wizardResult.description,\n triggeredByTerm: termName,\n },\n });\n \n // Update routing if destination was specified\n if (projectDestination) {\n state.routeDecision = {\n projectId,\n destination: { path: projectDestination, structure: 'month' },\n confidence: 1.0,\n signals: [{ type: 'explicit_phrase', value: termName, weight: 1.0 }],\n reasoning: `User created new project \"${projectName}\" routing to ${projectDestination}`,\n };\n }\n } catch (error) {\n logger.warn('Failed to save new project: %s', error);\n }\n \n } else if (wizardResult.action === 'link' && wizardResult.linkedTermName) {\n // LINK AS ALIAS TO EXISTING TERM\n const existingTermName = wizardResult.linkedTermName;\n const aliasVariant = wizardResult.aliasName || termName;\n \n // Search for the existing term\n const termSearch = await ctx.contextInstance.search(existingTermName);\n const existingTerm = termSearch.find(e => e.type === 'term' && \n e.name.toLowerCase() === existingTermName.toLowerCase());\n \n if (existingTerm) {\n // Add the new variant to sounds_like\n const existingVariants = (existingTerm as { sounds_like?: string[] }).sounds_like || [];\n const updatedVariants = [...existingVariants, aliasVariant.toLowerCase()]\n .filter((v, i, a) => a.indexOf(v) === i); // dedupe\n \n const updatedTerm = {\n ...existingTerm,\n type: 'term' as const,\n sounds_like: updatedVariants,\n };\n \n try {\n await ctx.contextInstance.saveEntity(updatedTerm);\n await ctx.contextInstance.reload();\n logger.info('Added alias \"%s\" to existing term \"%s\"', aliasVariant, existingTerm.name);\n \n // Mark as resolved\n state.resolvedEntities.set(termName, existingTerm.name);\n state.resolvedEntities.set(aliasVariant, existingTerm.name);\n \n contextChanges.push({\n entityType: 'term',\n entityId: existingTerm.id,\n entityName: existingTerm.name,\n action: 'updated',\n details: {\n addedAlias: aliasVariant,\n sounds_like: updatedVariants,\n },\n });\n \n // If term has associated projects, use for routing\n const termProjects = (existingTerm as { projects?: string[] }).projects || [];\n if (termProjects.length > 0) {\n const allProjects = ctx.contextInstance.getAllProjects();\n const primaryProject = allProjects.find(p => p.id === termProjects[0]);\n if (primaryProject?.routing?.destination) {\n state.routeDecision = {\n projectId: primaryProject.id,\n destination: {\n path: primaryProject.routing.destination,\n structure: 'month'\n },\n confidence: 1.0,\n signals: [{ type: 'explicit_phrase', value: existingTerm.name, weight: 1.0 }],\n reasoning: `User linked \"${aliasVariant}\" as alias for term \"${existingTerm.name}\" associated with project \"${primaryProject.name}\"`,\n };\n }\n }\n } catch (error) {\n logger.warn('Failed to add alias to existing term: %s', error);\n }\n } else {\n logger.warn('Could not find existing term \"%s\" to link alias', existingTermName);\n }\n \n } else if (wizardResult.action === 'link' && typeof wizardResult.linkedProjectIndex === 'number') {\n // LINK TO EXISTING PROJECT\n if (knownProjects && wizardResult.linkedProjectIndex < knownProjects.length) {\n const linkedProject = knownProjects[wizardResult.linkedProjectIndex];\n \n // Add the term as an alias\n const existingPhrases = linkedProject.classification?.explicit_phrases || [];\n const updatedPhrases = [...existingPhrases, termName.toLowerCase()]\n .filter((v, i, a) => a.indexOf(v) === i); // dedupe\n \n const updatedProject = {\n ...linkedProject,\n type: 'project' as const,\n // Add term description to project notes if provided\n notes: wizardResult.termDescription \n ? `${linkedProject.description || ''}\\n\\n${termName}: ${wizardResult.termDescription}`.trim()\n : linkedProject.description,\n classification: {\n ...linkedProject.classification,\n context_type: (linkedProject.classification?.context_type || 'work') as 'work' | 'personal' | 'mixed',\n explicit_phrases: updatedPhrases,\n },\n routing: {\n // Preserve existing destination (or omit if not set)\n ...(linkedProject.routing?.destination && { destination: linkedProject.routing.destination }),\n structure: (linkedProject.routing?.structure || 'month') as 'none' | 'year' | 'month' | 'day',\n filename_options: (linkedProject.routing?.filename_options || ['date', 'time']) as Array<'date' | 'time' | 'subject'>,\n },\n };\n \n try {\n await ctx.contextInstance.saveEntity(updatedProject);\n await ctx.contextInstance.reload(); // Reload so subsequent searches find this entity\n logger.info('Linked \"%s\" to project \"%s\"', termName, linkedProject.name);\n \n contextChanges.push({\n entityType: 'project',\n entityId: linkedProject.id,\n entityName: linkedProject.name,\n action: 'updated',\n details: {\n addedAlias: termName,\n termDescription: wizardResult.termDescription,\n explicit_phrases: updatedPhrases,\n },\n });\n \n // Update routing to use the linked project\n if (linkedProject.routing?.destination) {\n state.routeDecision = {\n projectId: linkedProject.id,\n destination: { \n path: linkedProject.routing.destination, \n structure: 'month' \n },\n confidence: 1.0,\n signals: [{ type: 'explicit_phrase', value: termName, weight: 1.0 }],\n reasoning: `User linked \"${termName}\" to existing project \"${linkedProject.name}\"`,\n };\n }\n } catch (error) {\n logger.warn('Failed to update project with alias: %s', error);\n }\n }\n } else if (wizardResult.action === 'term') {\n // CREATE NEW TERM ENTITY\n const termNameFinal = wizardResult.termName || termName;\n const termId = termNameFinal.toLowerCase().replace(/\\s+/g, '-');\n \n // Get project IDs from indices\n const projectIds: string[] = [];\n if (wizardResult.termProjects && knownProjects) {\n for (const idx of wizardResult.termProjects) {\n if (idx >= 0 && idx < knownProjects.length) {\n projectIds.push(knownProjects[idx].id);\n }\n }\n }\n \n // Handle nested project creation from term wizard\n if (wizardResult.createdProject?.action === 'create' && wizardResult.createdProject.projectName) {\n const projectName = wizardResult.createdProject.projectName;\n const projectId = projectName.toLowerCase().replace(/\\s+/g, '-');\n const projectDestination = wizardResult.createdProject.destination;\n \n const newProject = {\n id: projectId,\n name: projectName,\n type: 'project' as const,\n description: wizardResult.createdProject.description || `Project for \"${projectName}\"`,\n classification: {\n context_type: 'work' as const,\n explicit_phrases: [projectName.toLowerCase(), termNameFinal.toLowerCase()].filter((v, i, a) => a.indexOf(v) === i),\n },\n routing: {\n // Only include destination if explicitly provided - otherwise uses global default\n ...(projectDestination && { destination: projectDestination }),\n structure: 'month' as const,\n filename_options: ['date', 'time', 'subject'] as Array<'date' | 'time' | 'subject'>,\n },\n active: true,\n };\n \n try {\n await ctx.contextInstance.saveEntity(newProject);\n await ctx.contextInstance.reload(); // Reload so subsequent searches find this entity\n logger.info('Created new project from term wizard: %s%s', projectName, projectDestination ? ` -> ${projectDestination}` : ' (using default destination)');\n \n // Add the new project to the projectIds list for term association\n projectIds.push(projectId);\n \n contextChanges.push({\n entityType: 'project',\n entityId: projectId,\n entityName: projectName,\n action: 'created',\n details: {\n ...(projectDestination && { destination: projectDestination }),\n description: wizardResult.createdProject.description,\n createdForTerm: termNameFinal,\n },\n });\n \n // Update routing to use the new project (if destination was specified)\n if (projectDestination) {\n state.routeDecision = {\n projectId,\n destination: { path: projectDestination, structure: 'month' },\n confidence: 1.0,\n signals: [{ type: 'explicit_phrase', value: termNameFinal, weight: 1.0 }],\n reasoning: `User created project \"${projectName}\" for term \"${termNameFinal}\"`,\n };\n }\n } catch (error) {\n logger.warn('Failed to save new project from term wizard: %s', error);\n }\n }\n \n const newTerm = {\n id: termId,\n name: termNameFinal,\n type: 'term' as const,\n expansion: wizardResult.termExpansion,\n notes: wizardResult.termDescription,\n projects: projectIds.length > 0 ? projectIds : undefined,\n sounds_like: [termName.toLowerCase()],\n };\n \n try {\n await ctx.contextInstance.saveEntity(newTerm);\n await ctx.contextInstance.reload(); // Reload so subsequent searches find this entity\n logger.info('Created new term: %s (projects: %s)', \n termNameFinal, \n projectIds.length > 0 ? projectIds.join(', ') : 'none'\n );\n \n contextChanges.push({\n entityType: 'term',\n entityId: termId,\n entityName: termNameFinal,\n action: 'created',\n details: {\n expansion: wizardResult.termExpansion,\n projects: projectIds,\n description: wizardResult.termDescription,\n },\n });\n \n // If term has associated projects and we haven't set routing yet, use the first one\n if (projectIds.length > 0 && !state.routeDecision) {\n // For newly created project, we already set routing above\n // For existing projects, look them up\n if (knownProjects) {\n const primaryProject = knownProjects.find(p => p.id === projectIds[0]);\n if (primaryProject?.routing?.destination) {\n state.routeDecision = {\n projectId: primaryProject.id,\n destination: { \n path: primaryProject.routing.destination, \n structure: 'month' \n },\n confidence: 1.0,\n signals: [{ type: 'explicit_phrase', value: termNameFinal, weight: 1.0 }],\n reasoning: `User created term \"${termNameFinal}\" associated with project \"${primaryProject.name}\"`,\n };\n }\n }\n }\n } catch (error) {\n logger.warn('Failed to save new term: %s', error);\n }\n } else if (wizardResult.action === 'ignore' && wizardResult.ignoredTerm) {\n // IGNORE - add term to ignore list so user won't be asked again\n const ignoredTermName = wizardResult.ignoredTerm;\n const ignoredId = ignoredTermName.toLowerCase()\n .replace(/[^a-z0-9]/g, '-')\n .replace(/-+/g, '-')\n .replace(/^-|-$/g, '');\n \n const newIgnored = {\n id: ignoredId,\n name: ignoredTermName,\n type: 'ignored' as const,\n ignoredAt: new Date().toISOString(),\n };\n \n try {\n await ctx.contextInstance.saveEntity(newIgnored);\n await ctx.contextInstance.reload();\n logger.info('Added to ignore list: %s', ignoredTermName);\n \n contextChanges.push({\n entityType: 'ignored',\n entityId: ignoredId,\n entityName: ignoredTermName,\n action: 'created',\n details: {\n reason: 'User chose to ignore this term',\n },\n });\n } catch (error) {\n logger.warn('Failed to save ignored term: %s', error);\n }\n }\n // 'skip' action - do nothing\n }\n \n // Handle new person wizard response\n if (result.data?.clarificationType === 'new_person' && clarification.additionalInfo) {\n const personWizardResult = clarification.additionalInfo as {\n action: 'create' | 'skip';\n personName?: string;\n organization?: string;\n notes?: string;\n linkedProjectId?: string;\n linkedProjectIndex?: number;\n createdProject?: {\n action: 'create' | 'link' | 'skip';\n projectName?: string;\n destination?: string;\n description?: string;\n };\n };\n \n const knownProjects = result.data?.knownProjects as Array<{\n id: string;\n name: string;\n description?: string;\n classification?: { explicit_phrases?: string[]; context_type?: string };\n routing?: { destination: string; structure?: string; filename_options?: string[] };\n }> | undefined;\n \n if (personWizardResult.action === 'create') {\n let linkedProjectId: string | undefined;\n \n // First, handle any nested project creation\n if (personWizardResult.createdProject?.action === 'create' && personWizardResult.createdProject.projectName) {\n const projectName = personWizardResult.createdProject.projectName;\n const projectId = projectName.toLowerCase().replace(/\\s+/g, '-');\n const projectDestination = personWizardResult.createdProject.destination;\n \n const newProject = {\n id: projectId,\n name: projectName,\n type: 'project' as const,\n description: personWizardResult.createdProject.description || `Project for \"${projectName}\"`,\n classification: {\n context_type: 'work' as const,\n explicit_phrases: [projectName.toLowerCase()],\n },\n routing: {\n // Only include destination if explicitly provided - otherwise uses global default\n ...(projectDestination && { destination: projectDestination }),\n structure: 'month' as const,\n filename_options: ['date', 'time', 'subject'] as Array<'date' | 'time' | 'subject'>,\n },\n active: true,\n };\n \n try {\n await ctx.contextInstance.saveEntity(newProject);\n await ctx.contextInstance.reload(); // Reload so subsequent searches find this entity\n logger.info('Created new project from person wizard: %s%s', projectName, projectDestination ? ` -> ${projectDestination}` : ' (using default destination)');\n linkedProjectId = projectId;\n \n contextChanges.push({\n entityType: 'project',\n entityId: projectId,\n entityName: projectName,\n action: 'created',\n details: {\n ...(projectDestination && { destination: projectDestination }),\n description: personWizardResult.createdProject.description,\n createdForPerson: personWizardResult.personName,\n },\n });\n \n // Update routing to use the new project (if destination was specified)\n if (projectDestination) {\n state.routeDecision = {\n projectId,\n destination: { path: projectDestination, structure: 'month' },\n confidence: 1.0,\n signals: [{ type: 'explicit_phrase', value: projectName, weight: 1.0 }],\n reasoning: `User created project \"${projectName}\" for person \"${personWizardResult.personName}\"`,\n };\n }\n } catch (error) {\n logger.warn('Failed to save new project from person wizard: %s', error);\n }\n } else if (typeof personWizardResult.linkedProjectIndex === 'number' && knownProjects) {\n // User linked to existing project\n if (personWizardResult.linkedProjectIndex < knownProjects.length) {\n const linkedProject = knownProjects[personWizardResult.linkedProjectIndex];\n linkedProjectId = linkedProject.id;\n \n // Update routing to use the linked project\n if (linkedProject.routing?.destination) {\n state.routeDecision = {\n projectId: linkedProject.id,\n destination: { \n path: linkedProject.routing.destination, \n structure: 'month' \n },\n confidence: 1.0,\n signals: [{ type: 'explicit_phrase', value: personWizardResult.personName || termName, weight: 1.0 }],\n reasoning: `User linked person \"${personWizardResult.personName}\" to project \"${linkedProject.name}\"`,\n };\n }\n }\n }\n \n // Now save the person\n const personName = personWizardResult.personName || termName;\n const personId = personName.toLowerCase().replace(/\\s+/g, '-');\n \n const newPerson = {\n id: personId,\n name: personName,\n type: 'person' as const,\n organization: personWizardResult.organization,\n notes: personWizardResult.notes,\n projects: linkedProjectId ? [linkedProjectId] : [],\n sounds_like: [termName.toLowerCase()],\n };\n \n try {\n await ctx.contextInstance.saveEntity(newPerson);\n await ctx.contextInstance.reload(); // Reload so subsequent searches find this entity\n logger.info('Created new person: %s (org: %s, project: %s)', \n personName, \n personWizardResult.organization || 'none',\n linkedProjectId || 'none'\n );\n \n // Update resolved entities with correct name\n state.resolvedEntities.set(termName, personName);\n \n contextChanges.push({\n entityType: 'person',\n entityId: personId,\n entityName: personName,\n action: 'created',\n details: {\n organization: personWizardResult.organization,\n linkedProject: linkedProjectId,\n notes: personWizardResult.notes,\n heardAs: termName,\n },\n });\n } catch (error) {\n logger.warn('Failed to save new person: %s', error);\n }\n }\n // 'skip' action - do nothing\n }\n }\n }\n */ // End of commented interactive code\n \n // Update state based on tool results\n if (result.data?.person) {\n state.resolvedEntities.set(result.data.person.name, result.data.suggestion);\n // Track person entity reference\n state.referencedEntities.people.add(result.data.person.id);\n }\n \n // Track term entities\n if (result.data?.term) {\n state.referencedEntities.terms.add(result.data.term.id);\n }\n \n // Track company entities\n if (result.data?.company) {\n state.referencedEntities.companies.add(result.data.company.id);\n }\n \n // Capture routing from route_note tool\n if (result.data?.routingDecision?.destination) {\n const routingDecision = result.data.routingDecision;\n state.routeDecision = {\n projectId: routingDecision.projectId,\n destination: routingDecision.destination,\n confidence: routingDecision.confidence || 1.0,\n signals: routingDecision.signals,\n reasoning: routingDecision.reasoning || 'Determined by route_note tool',\n };\n \n // Track project if routing decision includes it\n if (routingDecision.projectId) {\n state.referencedEntities.projects.add(routingDecision.projectId);\n }\n }\n \n // Capture routing from lookup_project when project has routing config\n if (result.data?.found && result.data?.project?.routing?.destination) {\n const project = result.data.project;\n state.routeDecision = {\n projectId: project.id,\n destination: { \n path: project.routing.destination,\n structure: project.routing.structure || 'month',\n },\n confidence: 1.0,\n signals: [{ type: 'explicit_phrase', value: project.name, weight: 1.0 }],\n reasoning: `Matched project \"${project.name}\" with routing to ${project.routing.destination}`,\n };\n logger.debug('Captured routing from project lookup: %s -> %s', \n project.name, project.routing.destination);\n \n // Track project entity reference\n state.referencedEntities.projects.add(project.id);\n }\n \n } catch (error) {\n logger.error('Tool execution failed', { tool: toolCall.name, error });\n toolResults.push({ \n id: toolCall.id, \n name: toolCall.name, \n result: JSON.stringify({ error: String(error) }) \n });\n }\n }\n \n // Add tool results to conversation\n for (const tr of toolResults) {\n conversation.addToolResult(tr.id, tr.result, tr.name);\n }\n \n // Build continuation prompt with full context\n const continuationPrompt = `Tool results received. Here's a reminder of your task:\n\nORIGINAL TRANSCRIPT (process this):\n--- BEGIN TRANSCRIPT ---\n${transcriptText}\n--- END TRANSCRIPT ---\n\nCorrections made so far: ${state.resolvedEntities.size > 0 ? Array.from(state.resolvedEntities.entries()).map(([k, v]) => `${k} -> ${v}`).join(', ') : 'none yet'}\n\nContinue analyzing. If you need more information, use the tools. \nWhen you're done with tool calls, output the COMPLETE corrected transcript as Markdown.\nDo NOT summarize - include ALL original content with corrections applied.\n\nCRITICAL REMINDER: Your response must contain ONLY the transcript text. Do NOT include any commentary, explanations, or processing notes - those will leak into the user-facing document.`;\n\n conversation.addUserMessage(continuationPrompt);\n \n // Continue conversation with full context\n response = await reasoning.complete({\n systemPrompt,\n prompt: continuationPrompt,\n tools: registry.getToolDefinitions(),\n });\n \n // Track token usage\n if (response.usage) {\n totalTokens += response.usage.totalTokens;\n }\n \n // Add assistant response to conversation\n if (response.toolCalls && response.toolCalls.length > 0) {\n conversation.addAssistantWithToolCalls(\n response.content,\n toRiotToolCalls(response.toolCalls)\n );\n } else {\n conversation.addAssistantMessage(response.content);\n }\n }\n \n // Extract final corrected text\n if (response.content && response.content.length > 50) {\n // Clean the response to remove any leaked internal processing\n const cleanedContent = cleanResponseContent(response.content);\n \n if (cleanedContent !== response.content) {\n const removedChars = response.content.length - cleanedContent.length;\n logger.warn('Removed leaked internal processing from response (%d -> %d chars, removed %d chars)',\n response.content.length, cleanedContent.length, removedChars);\n \n // Detect severe corruption (>10% of content removed or suspicious patterns)\n const corruptionRatio = removedChars / response.content.length;\n const hasSuspiciousUnicode = /[\\u0530-\\u058F\\u0E00-\\u0E7F\\u4E00-\\u9FFF\\u0A80-\\u0AFF\\u0C00-\\u0C7F]/.test(response.content);\n \n if (corruptionRatio > 0.1 || hasSuspiciousUnicode) {\n logger.error('SEVERE CORRUPTION DETECTED in LLM response (%.1f%% removed, suspicious unicode: %s)',\n corruptionRatio * 100, hasSuspiciousUnicode);\n logger.error('Raw response preview (first 500 chars): %s', \n response.content.substring(0, 500).replace(/\\n/g, '\\\\n'));\n }\n }\n \n state.correctedText = cleanedContent;\n state.confidence = 0.9;\n logger.debug('Final transcript generated: %d characters', cleanedContent.length);\n } else {\n // Model didn't produce content - ask for it explicitly\n logger.debug('Model did not produce transcript, requesting explicitly...');\n \n const finalRequest = `Please output the COMPLETE corrected transcript now.\n\nORIGINAL:\n${transcriptText}\n\nCORRECTIONS TO APPLY:\n${state.resolvedEntities.size > 0 ? Array.from(state.resolvedEntities.entries()).map(([k, v]) => `- \"${k}\" should be \"${v}\"`).join('\\n') : 'None identified'}\n\nOutput the full transcript as clean Markdown. Do NOT summarize.\n\nCRITICAL: Your response must contain ONLY the corrected transcript text - absolutely no commentary, tool information, or explanations.`;\n\n const finalResponse = await reasoning.complete({\n systemPrompt,\n prompt: finalRequest,\n });\n \n // Track token usage\n if (finalResponse.usage) {\n totalTokens += finalResponse.usage.totalTokens;\n }\n \n // Clean the final response as well\n const cleanedFinalContent = cleanResponseContent(finalResponse.content || transcriptText);\n \n if (cleanedFinalContent !== finalResponse.content) {\n const removedChars = (finalResponse.content?.length || 0) - cleanedFinalContent.length;\n logger.warn('Removed leaked internal processing from final response (%d -> %d chars, removed %d chars)',\n finalResponse.content?.length || 0, cleanedFinalContent.length, removedChars);\n \n // Detect severe corruption\n const corruptionRatio = removedChars / (finalResponse.content?.length || 1);\n const hasSuspiciousUnicode = /[\\u0530-\\u058F\\u0E00-\\u0E7F\\u4E00-\\u9FFF\\u0A80-\\u0AFF\\u0C00-\\u0C7F]/.test(finalResponse.content || '');\n \n if (corruptionRatio > 0.1 || hasSuspiciousUnicode) {\n logger.error('SEVERE CORRUPTION DETECTED in final LLM response (%.1f%% removed, suspicious unicode: %s)',\n corruptionRatio * 100, hasSuspiciousUnicode);\n logger.error('Raw response preview (first 500 chars): %s', \n (finalResponse.content || '').substring(0, 500).replace(/\\n/g, '\\\\n'));\n }\n }\n \n state.correctedText = cleanedFinalContent;\n state.confidence = 0.8;\n }\n \n } catch (error) {\n logger.error('Agentic processing failed', { error });\n // Fall back to original text\n state.correctedText = transcriptText;\n state.confidence = 0.5;\n }\n \n return {\n enhancedText: state.correctedText,\n state,\n toolsUsed: [...new Set(toolsUsed)],\n iterations,\n totalTokens: totalTokens > 0 ? totalTokens : undefined,\n contextChanges: contextChanges.length > 0 ? contextChanges : undefined,\n };\n };\n \n return { process };\n};\n\n"],"names":["Logging.getLogger","Registry.create"],"mappings":";;;;AAmCA,MAAM,eAAA,GAAkB,CAAC,SAAA,KAAuG;AAC5H,EAAA,OAAO,SAAA,CAAU,IAAI,CAAA,EAAA,MAAO;AAAA,IACxB,IAAI,EAAA,CAAG,EAAA;AAAA,IACP,IAAA,EAAM,UAAA;AAAA,IACN,QAAA,EAAU;AAAA,MACN,MAAM,EAAA,CAAG,IAAA;AAAA,MACT,SAAA,EAAW,IAAA,CAAK,SAAA,CAAU,EAAA,CAAG,SAAS;AAAA;AAC1C,GACJ,CAAE,CAAA;AACN,CAAA;AAMA,MAAM,oBAAA,GAAuB,CAAC,OAAA,KAA4B;AAGtD,EAAA,IAAI,OAAA,GAAU,OAAA,CAAQ,OAAA,CAAQ,qFAAA,EAAuF,EAAE,CAAA;AAKvH,EAAA,OAAA,GAAU,OAAA,CAAQ,OAAA,CAAQ,yDAAA,EAA2D,EAAE,CAAA;AAGvF,EAAA,OAAA,GAAU,OAAA,CAAQ,OAAA,CAAQ,yBAAA,EAA2B,EAAE,CAAA;AAKvD,EAAA,OAAA,GAAU,OAAA,CAAQ,OAAA,CAAQ,wBAAA,EAA0B,EAAE,CAAA;AAKtD,EAAA,MAAM,WAAA,GAAc,0CAAA;AACpB,EAAA,OAAA,GAAU,OAAA,CAAQ,OAAA,CAAQ,WAAA,EAAa,EAAE,CAAA;AAKzC,EAAA,MAAM,sBAAA,GAAyB,8DAAA;AAC/B,EAAA,OAAA,GAAU,OAAA,CAAQ,OAAA,CAAQ,sBAAA,EAAwB,EAAE,CAAA;AAIpD,EAAA,MAAM,KAAA,GAAQ,OAAA,CAAQ,KAAA,CAAM,IAAI,CAAA;AAChC,EAAA,IAAI,UAAA,GAAa,CAAA;AAGjB,EAAA,KAAA,IAAS,CAAA,GAAI,CAAA,EAAG,CAAA,GAAI,KAAA,CAAM,QAAQ,CAAA,EAAA,EAAK;AACnC,IAAA,MAAM,IAAA,GAAO,KAAA,CAAM,CAAC,CAAA,CAAE,IAAA,EAAK;AAG3B,IAAA,IAAI,SAAS,EAAA,EAAI;AAGjB,IAAA,MAAM,eAAe,mHAAA,CAAoH,IAAA,CAAK,IAAI,CAAA,IAC3I,IAAA,CAAK,SAAS,MAAM,CAAA,IACpB,KAAK,QAAA,CAAS,IAAI,KAClB,IAAA,CAAK,QAAA,CAAS,WAAW,CAAA,IACzB,IAAA,CAAK,SAAS,WAAW,CAAA;AAEhC,IAAA,IAAI,CAAC,YAAA,EAAc;AAEf,MAAA,UAAA,GAAa,CAAA;AACb,MAAA;AAAA,IACJ;AAAA,EACJ;AAGA,EAAA,IAAI,aAAa,CAAA,EAAG;AAChB,IAAA,OAAA,GAAU,KAAA,CAAM,KAAA,CAAM,UAAU,CAAA,CAAE,KAAK,IAAI,CAAA;AAAA,EAC/C;AAGA,EAAA,OAAA,GAAU,OAAA,CAAQ,OAAA,CAAQ,SAAA,EAAW,MAAM,CAAA;AAE3C,EAAA,OAAO,QAAQ,IAAA,EAAK;AACxB,CAAA;AAEO,MAAM,MAAA,GAAS,CAClB,SAAA,EACA,GAAA,KACmB;AACnB,EAAA,MAAM,MAAA,GAASA,SAAQ,EAAU;AACjC,EAAA,MAAM,QAAA,GAAWC,QAAS,CAAO,GAAG,CAAA;AAEpC,EAAA,MAAM,OAAA,GAAU,OAAO,cAAA,KAA2B;AAC9C,IAAA,MAAM,KAAA,GAA4B;AAAA,MAC9B,YAAA,EAAc,cAAA;AAAA,MACd,aAAA,EAAe,cAAA;AAAA,MACf,iBAAiB,EAAC;AAAA,MAClB,gBAAA,sBAAsB,GAAA,EAAI;AAAA,MAC1B,kBAAA,EAAoB;AAAA,QAChB,MAAA,sBAAY,GAAA,EAAI;AAAA,QAChB,QAAA,sBAAc,GAAA,EAAI;AAAA,QAClB,KAAA,sBAAW,GAAA,EAAI;AAAA,QACf,SAAA,sBAAe,GAAA;AAAI,OACvB;AAAA,MACA,UAAA,EAAY;AAAA,KAChB;AAGA,IAAA,GAAA,CAAI,mBAAmB,KAAA,CAAM,gBAAA;AAE7B,IAAA,MAAM,YAAsB,EAAC;AAC7B,IAAA,MAAM,iBAAwC,EAAC;AAC/C,IAAA,IAAI,UAAA,GAAa,CAAA;AACjB,IAAA,IAAI,WAAA,GAAc,CAAA;AAClB,IAAA,MAAM,aAAA,GAAgB,EAAA;AAGtB,IAAA,MAAM,YAAA,GAAe,oBAAoB,MAAA,CAAO,EAAE,OAAO,QAAA,EAAU,EAC9D,eAAA,CAAgB;AAAA,MACb,GAAA,EAAK,GAAA;AAAA;AAAA,MACL,kBAAA,EAAoB,GAAA;AAAA;AAAA,MACpB,QAAA,EAAU,WAAA;AAAA;AAAA,MACV,gBAAA,EAAkB,UAAA;AAAA;AAAA,MAClB,cAAA,EAAgB,IAAA;AAAA;AAAA,MAChB,cAAA,EAAgB;AAAA;AAAA,KACnB,CAAA;AAGL,IAAA,MAAM,YAAA,GAAe,CAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;AAAA;AAAA;AAAA;AAAA;AAAA;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,wDAAA,CAAA;AA8BrB,IAAA,YAAA,CAAa,iBAAiB,YAAY,CAAA;AAG1C,IAAA,MAAM,aAAA,GAAgB,CAAA;;AAAA;AAAA,EAG5B,cAAc;AAAA;;AAAA;AAAA;AAAA;AAAA;AAAA;;AAAA;AAAA,8DAAA,CAAA;AAYR,IAAA,YAAA,CAAa,eAAe,aAAa,CAAA;AAEzC,IAAA,IAAI;AAEA,MAAA,MAAA,CAAO,MAAM,qEAAqE,CAAA;AAClF,MAAA,IAAI,QAAA,GAAW,MAAM,SAAA,CAAU,QAAA,CAAS;AAAA,QACpC,YAAA;AAAA,QACA,MAAA,EAAQ,aAAA;AAAA,QACR,KAAA,EAAO,SAAS,kBAAA,EAAmB;AAAA,QACnC;AAAA,OACH,CAAA;AAGD,MAAA,IAAI,SAAS,KAAA,EAAO;AAChB,QAAA,WAAA,IAAe,SAAS,KAAA,CAAM,WAAA;AAAA,MAClC;AAGA,MAAA,IAAI,QAAA,CAAS,SAAA,IAAa,QAAA,CAAS,SAAA,CAAU,SAAS,CAAA,EAAG;AACrD,QAAA,YAAA,CAAa,yBAAA;AAAA,UACT,QAAA,CAAS,OAAA;AAAA,UACT,eAAA,CAAgB,SAAS,SAAS;AAAA,SACtC;AAAA,MACJ,CAAA,MAAO;AACH,QAAA,YAAA,CAAa,mBAAA,CAAoB,SAAS,OAAO,CAAA;AAAA,MACrD;AAGA,MAAA,OAAO,SAAS,SAAA,IAAa,QAAA,CAAS,UAAU,MAAA,GAAS,CAAA,IAAK,aAAa,aAAA,EAAe;AACtF,QAAA,UAAA,EAAA;AACA,QAAA,MAAA,CAAO,KAAA,CAAM,2CAAA,EAA6C,UAAA,EAAY,QAAA,CAAS,UAAU,MAAM,CAAA;AAG/F,QAAA,MAAM,cAAmE,EAAC;AAG1E,QAAA,KAAA,MAAW,QAAA,IAAY,SAAS,SAAA,EAAW;AACvC,UAAA,MAAA,CAAO,KAAA,CAAM,oBAAA,EAAsB,QAAA,CAAS,IAAI,CAAA;AAChD,UAAA,SAAA,CAAU,IAAA,CAAK,SAAS,IAAI,CAAA;AAE5B,UAAA,IAAI;AACA,YAAA,MAAM,SAAS,MAAM,QAAA,CAAS,YAAY,QAAA,CAAS,IAAA,EAAM,SAAS,SAAS,CAAA;AAG3E,YAAA,MAAM,SAAA,GAAY,IAAA,CAAK,SAAA,CAAU,MAAA,CAAO,IAAA,IAAQ,EAAE,OAAA,EAAS,MAAA,CAAO,OAAA,EAAS,OAAA,EAAS,MAAA,CAAO,KAAA,IAAS,MAAM,CAAA;AAC1G,YAAA,WAAA,CAAY,IAAA,CAAK,EAAE,EAAA,EAAI,QAAA,CAAS,EAAA,EAAI,MAAM,QAAA,CAAS,IAAA,EAAM,MAAA,EAAQ,SAAA,EAAW,CAAA;AAE5E,YAAA,MAAA,CAAO,MAAM,oBAAA,EAAsB,QAAA,CAAS,MAAM,MAAA,CAAO,OAAA,GAAU,YAAY,QAAQ,CAAA;AAgjBvF,YAAA,IAAI,MAAA,CAAO,MAAM,MAAA,EAAQ;AACrB,cAAA,KAAA,CAAM,gBAAA,CAAiB,IAAI,MAAA,CAAO,IAAA,CAAK,OAAO,IAAA,EAAM,MAAA,CAAO,KAAK,UAAU,CAAA;AAE1E,cAAA,KAAA,CAAM,mBAAmB,MAAA,CAAO,GAAA,CAAI,MAAA,CAAO,IAAA,CAAK,OAAO,EAAE,CAAA;AAAA,YAC7D;AAGA,YAAA,IAAI,MAAA,CAAO,MAAM,IAAA,EAAM;AACnB,cAAA,KAAA,CAAM,mBAAmB,KAAA,CAAM,GAAA,CAAI,MAAA,CAAO,IAAA,CAAK,KAAK,EAAE,CAAA;AAAA,YAC1D;AAGA,YAAA,IAAI,MAAA,CAAO,MAAM,OAAA,EAAS;AACtB,cAAA,KAAA,CAAM,mBAAmB,SAAA,CAAU,GAAA,CAAI,MAAA,CAAO,IAAA,CAAK,QAAQ,EAAE,CAAA;AAAA,YACjE;AAGA,YAAA,IAAI,MAAA,CAAO,IAAA,EAAM,eAAA,EAAiB,WAAA,EAAa;AAC3C,cAAA,MAAM,eAAA,GAAkB,OAAO,IAAA,CAAK,eAAA;AACpC,cAAA,KAAA,CAAM,aAAA,GAAgB;AAAA,gBAClB,WAAW,eAAA,CAAgB,SAAA;AAAA,gBAC3B,aAAa,eAAA,CAAgB,WAAA;AAAA,gBAC7B,UAAA,EAAY,gBAAgB,UAAA,IAAc,CAAA;AAAA,gBAC1C,SAAS,eAAA,CAAgB,OAAA;AAAA,gBACzB,SAAA,EAAW,gBAAgB,SAAA,IAAa;AAAA,eAC5C;AAGA,cAAA,IAAI,gBAAgB,SAAA,EAAW;AAC3B,gBAAA,KAAA,CAAM,kBAAA,CAAmB,QAAA,CAAS,GAAA,CAAI,eAAA,CAAgB,SAAS,CAAA;AAAA,cACnE;AAAA,YACJ;AAGA,YAAA,IAAI,OAAO,IAAA,EAAM,KAAA,IAAS,OAAO,IAAA,EAAM,OAAA,EAAS,SAAS,WAAA,EAAa;AAClE,cAAA,MAAM,OAAA,GAAU,OAAO,IAAA,CAAK,OAAA;AAC5B,cAAA,KAAA,CAAM,aAAA,GAAgB;AAAA,gBAClB,WAAW,OAAA,CAAQ,EAAA;AAAA,gBACnB,WAAA,EAAa;AAAA,kBACT,IAAA,EAAM,QAAQ,OAAA,CAAQ,WAAA;AAAA,kBACtB,SAAA,EAAW,OAAA,CAAQ,OAAA,CAAQ,SAAA,IAAa;AAAA,iBAC5C;AAAA,gBACA,UAAA,EAAY,CAAA;AAAA,gBACZ,OAAA,EAAS,CAAC,EAAE,IAAA,EAAM,iBAAA,EAAmB,OAAO,OAAA,CAAQ,IAAA,EAAM,MAAA,EAAQ,CAAA,EAAK,CAAA;AAAA,gBACvE,WAAW,CAAA,iBAAA,EAAoB,OAAA,CAAQ,IAAI,CAAA,kBAAA,EAAqB,OAAA,CAAQ,QAAQ,WAAW,CAAA;AAAA,eAC/F;AACA,cAAA,MAAA,CAAO,KAAA;AAAA,gBAAM,gDAAA;AAAA,gBACT,OAAA,CAAQ,IAAA;AAAA,gBAAM,QAAQ,OAAA,CAAQ;AAAA,eAAW;AAG7C,cAAA,KAAA,CAAM,kBAAA,CAAmB,QAAA,CAAS,GAAA,CAAI,OAAA,CAAQ,EAAE,CAAA;AAAA,YACpD;AAAA,UAEJ,SAAS,KAAA,EAAO;AACZ,YAAA,MAAA,CAAO,MAAM,uBAAA,EAAyB,EAAE,MAAM,QAAA,CAAS,IAAA,EAAM,OAAO,CAAA;AACpE,YAAA,WAAA,CAAY,IAAA,CAAK;AAAA,cACb,IAAI,QAAA,CAAS,EAAA;AAAA,cACb,MAAM,QAAA,CAAS,IAAA;AAAA,cACf,MAAA,EAAQ,KAAK,SAAA,CAAU,EAAE,OAAO,MAAA,CAAO,KAAK,GAAG;AAAA,aAClD,CAAA;AAAA,UACL;AAAA,QACJ;AAGA,QAAA,KAAA,MAAW,MAAM,WAAA,EAAa;AAC1B,UAAA,YAAA,CAAa,cAAc,EAAA,CAAG,EAAA,EAAI,EAAA,CAAG,MAAA,EAAQ,GAAG,IAAI,CAAA;AAAA,QACxD;AAGA,QAAA,MAAM,kBAAA,GAAqB,CAAA;;AAAA;AAAA;AAAA,EAIzC,cAAc;AAAA;;AAAA,yBAAA,EAGW,KAAA,CAAM,gBAAA,CAAiB,IAAA,GAAO,CAAA,GAAI,KAAA,CAAM,IAAA,CAAK,KAAA,CAAM,gBAAA,CAAiB,OAAA,EAAS,CAAA,CAAE,GAAA,CAAI,CAAC,CAAC,CAAA,EAAG,CAAC,CAAA,KAAM,CAAA,EAAG,CAAC,CAAA,IAAA,EAAO,CAAC,CAAA,CAAE,CAAA,CAAE,IAAA,CAAK,IAAI,CAAA,GAAI,UAAU;;AAAA;AAAA;AAAA;;AAAA,yLAAA,CAAA;AAQjJ,QAAA,YAAA,CAAa,eAAe,kBAAkB,CAAA;AAG9C,QAAA,QAAA,GAAW,MAAM,UAAU,QAAA,CAAS;AAAA,UAChC,YAAA;AAAA,UACA,MAAA,EAAQ,kBAAA;AAAA,UACR,KAAA,EAAO,SAAS,kBAAA;AAAmB,SACtC,CAAA;AAGD,QAAA,IAAI,SAAS,KAAA,EAAO;AAChB,UAAA,WAAA,IAAe,SAAS,KAAA,CAAM,WAAA;AAAA,QAClC;AAGA,QAAA,IAAI,QAAA,CAAS,SAAA,IAAa,QAAA,CAAS,SAAA,CAAU,SAAS,CAAA,EAAG;AACrD,UAAA,YAAA,CAAa,yBAAA;AAAA,YACT,QAAA,CAAS,OAAA;AAAA,YACT,eAAA,CAAgB,SAAS,SAAS;AAAA,WACtC;AAAA,QACJ,CAAA,MAAO;AACH,UAAA,YAAA,CAAa,mBAAA,CAAoB,SAAS,OAAO,CAAA;AAAA,QACrD;AAAA,MACJ;AAGA,MAAA,IAAI,QAAA,CAAS,OAAA,IAAW,QAAA,CAAS,OAAA,CAAQ,SAAS,EAAA,EAAI;AAElD,QAAA,MAAM,cAAA,GAAiB,oBAAA,CAAqB,QAAA,CAAS,OAAO,CAAA;AAE5D,QAAA,IAAI,cAAA,KAAmB,SAAS,OAAA,EAAS;AACrC,UAAA,MAAM,YAAA,GAAe,QAAA,CAAS,OAAA,CAAQ,MAAA,GAAS,cAAA,CAAe,MAAA;AAC9D,UAAA,MAAA,CAAO,IAAA;AAAA,YAAK,qFAAA;AAAA,YACR,SAAS,OAAA,CAAQ,MAAA;AAAA,YAAQ,cAAA,CAAe,MAAA;AAAA,YAAQ;AAAA,WAAY;AAGhE,UAAA,MAAM,eAAA,GAAkB,YAAA,GAAe,QAAA,CAAS,OAAA,CAAQ,MAAA;AACxD,UAAA,MAAM,oBAAA,GAAuB,qEAAA,CAAsE,IAAA,CAAK,QAAA,CAAS,OAAO,CAAA;AAExH,UAAA,IAAI,eAAA,GAAkB,OAAO,oBAAA,EAAsB;AAC/C,YAAA,MAAA,CAAO,KAAA;AAAA,cAAM,qFAAA;AAAA,cACT,eAAA,GAAkB,GAAA;AAAA,cAAK;AAAA,aAAoB;AAC/C,YAAA,MAAA,CAAO,KAAA;AAAA,cAAM,4CAAA;AAAA,cACT,QAAA,CAAS,QAAQ,SAAA,CAAU,CAAA,EAAG,GAAG,CAAA,CAAE,OAAA,CAAQ,OAAO,KAAK;AAAA,aAAC;AAAA,UAChE;AAAA,QACJ;AAEA,QAAA,KAAA,CAAM,aAAA,GAAgB,cAAA;AACtB,QAAA,KAAA,CAAM,UAAA,GAAa,GAAA;AACnB,QAAA,MAAA,CAAO,KAAA,CAAM,2CAAA,EAA6C,cAAA,CAAe,MAAM,CAAA;AAAA,MACnF,CAAA,MAAO;AAEH,QAAA,MAAA,CAAO,MAAM,4DAA4D,CAAA;AAEzE,QAAA,MAAM,YAAA,GAAe,CAAA;;AAAA;AAAA,EAGnC,cAAc;;AAAA;AAAA,EAGd,KAAA,CAAM,gBAAA,CAAiB,IAAA,GAAO,CAAA,GAAI,KAAA,CAAM,IAAA,CAAK,KAAA,CAAM,gBAAA,CAAiB,OAAA,EAAS,CAAA,CAAE,GAAA,CAAI,CAAC,CAAC,CAAA,EAAG,CAAC,CAAA,KAAM,CAAA,GAAA,EAAM,CAAC,CAAA,aAAA,EAAgB,CAAC,CAAA,CAAA,CAAG,CAAA,CAAE,IAAA,CAAK,IAAI,CAAA,GAAI,iBAAiB;;AAAA;;AAAA,sIAAA,CAAA;AAM5I,QAAA,MAAM,aAAA,GAAgB,MAAM,SAAA,CAAU,QAAA,CAAS;AAAA,UAC3C,YAAA;AAAA,UACA,MAAA,EAAQ;AAAA,SACX,CAAA;AAGD,QAAA,IAAI,cAAc,KAAA,EAAO;AACrB,UAAA,WAAA,IAAe,cAAc,KAAA,CAAM,WAAA;AAAA,QACvC;AAGA,QAAA,MAAM,mBAAA,GAAsB,oBAAA,CAAqB,aAAA,CAAc,OAAA,IAAW,cAAc,CAAA;AAExF,QAAA,IAAI,mBAAA,KAAwB,cAAc,OAAA,EAAS;AAC/C,UAAA,MAAM,YAAA,GAAA,CAAgB,aAAA,CAAc,OAAA,EAAS,MAAA,IAAU,KAAK,mBAAA,CAAoB,MAAA;AAChF,UAAA,MAAA,CAAO,IAAA;AAAA,YAAK,2FAAA;AAAA,YACR,aAAA,CAAc,SAAS,MAAA,IAAU,CAAA;AAAA,YAAG,mBAAA,CAAoB,MAAA;AAAA,YAAQ;AAAA,WAAY;AAGhF,UAAA,MAAM,eAAA,GAAkB,YAAA,IAAgB,aAAA,CAAc,OAAA,EAAS,MAAA,IAAU,CAAA,CAAA;AACzE,UAAA,MAAM,oBAAA,GAAuB,qEAAA,CAAsE,IAAA,CAAK,aAAA,CAAc,WAAW,EAAE,CAAA;AAEnI,UAAA,IAAI,eAAA,GAAkB,OAAO,oBAAA,EAAsB;AAC/C,YAAA,MAAA,CAAO,KAAA;AAAA,cAAM,2FAAA;AAAA,cACT,eAAA,GAAkB,GAAA;AAAA,cAAK;AAAA,aAAoB;AAC/C,YAAA,MAAA,CAAO,KAAA;AAAA,cAAM,4CAAA;AAAA,cAAA,CACR,aAAA,CAAc,WAAW,EAAA,EAAI,SAAA,CAAU,GAAG,GAAG,CAAA,CAAE,OAAA,CAAQ,KAAA,EAAO,KAAK;AAAA,aAAC;AAAA,UAC7E;AAAA,QACJ;AAEA,QAAA,KAAA,CAAM,aAAA,GAAgB,mBAAA;AACtB,QAAA,KAAA,CAAM,UAAA,GAAa,GAAA;AAAA,MACvB;AAAA,IAEJ,SAAS,KAAA,EAAO;AACZ,MAAA,MAAA,CAAO,KAAA,CAAM,2BAAA,EAA6B,EAAE,KAAA,EAAO,CAAA;AAEnD,MAAA,KAAA,CAAM,aAAA,GAAgB,cAAA;AACtB,MAAA,KAAA,CAAM,UAAA,GAAa,GAAA;AAAA,IACvB;AAEA,IAAA,OAAO;AAAA,MACH,cAAc,KAAA,CAAM,aAAA;AAAA,MACpB,KAAA;AAAA,MACA,WAAW,CAAC,GAAG,IAAI,GAAA,CAAI,SAAS,CAAC,CAAA;AAAA,MACjC,UAAA;AAAA,MACA,WAAA,EAAa,WAAA,GAAc,CAAA,GAAI,WAAA,GAAc,MAAA;AAAA,MAC7C,cAAA,EAAgB,cAAA,CAAe,MAAA,GAAS,CAAA,GAAI,cAAA,GAAiB;AAAA,KACjE;AAAA,EACJ,CAAA;AAEA,EAAA,OAAO,EAAE,OAAA,EAAQ;AACrB;;;;"}
|
package/dist/index25.js
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
import { ToolRegistry } from '@kjerneverk/riotprompt';
|
|
2
|
+
import { create as create$1 } from './index42.js';
|
|
3
|
+
import { create as create$2 } from './index43.js';
|
|
4
|
+
import { create as create$3 } from './index44.js';
|
|
5
|
+
import { create as create$4 } from './index45.js';
|
|
6
|
+
import { create as create$5 } from './index46.js';
|
|
7
|
+
|
|
8
|
+
const toRiotTool = (tool, category) => ({
|
|
9
|
+
name: tool.name,
|
|
10
|
+
description: tool.description,
|
|
11
|
+
parameters: tool.parameters,
|
|
12
|
+
category,
|
|
13
|
+
cost: "cheap",
|
|
14
|
+
execute: async (params) => {
|
|
15
|
+
const result = await tool.execute(params);
|
|
16
|
+
return result;
|
|
17
|
+
}
|
|
18
|
+
});
|
|
19
|
+
const create = (ctx) => {
|
|
20
|
+
const tools = [
|
|
21
|
+
create$1(ctx),
|
|
22
|
+
create$2(ctx),
|
|
23
|
+
create$3(ctx),
|
|
24
|
+
create$4(ctx),
|
|
25
|
+
create$5()
|
|
26
|
+
];
|
|
27
|
+
const toolMap = new Map(tools.map((t) => [t.name, t]));
|
|
28
|
+
const riotRegistry = ToolRegistry.create();
|
|
29
|
+
riotRegistry.register(toRiotTool(tools[0], "lookup"));
|
|
30
|
+
riotRegistry.register(toRiotTool(tools[1], "lookup"));
|
|
31
|
+
riotRegistry.register(toRiotTool(tools[2], "verification"));
|
|
32
|
+
riotRegistry.register(toRiotTool(tools[3], "routing"));
|
|
33
|
+
riotRegistry.register(toRiotTool(tools[4], "storage"));
|
|
34
|
+
return {
|
|
35
|
+
getTools: () => tools,
|
|
36
|
+
// Use RiotPrompt's OpenAI format export
|
|
37
|
+
getToolDefinitions: () => riotRegistry.toOpenAIFormat().map((t) => ({
|
|
38
|
+
name: t.function.name,
|
|
39
|
+
description: t.function.description,
|
|
40
|
+
parameters: t.function.parameters
|
|
41
|
+
})),
|
|
42
|
+
executeTool: async (name, args) => {
|
|
43
|
+
const tool = toolMap.get(name);
|
|
44
|
+
if (!tool) {
|
|
45
|
+
return {
|
|
46
|
+
success: false,
|
|
47
|
+
error: `Unknown tool: ${name}`
|
|
48
|
+
};
|
|
49
|
+
}
|
|
50
|
+
return tool.execute(args);
|
|
51
|
+
},
|
|
52
|
+
getRiotRegistry: () => riotRegistry
|
|
53
|
+
};
|
|
54
|
+
};
|
|
55
|
+
|
|
56
|
+
export { create };
|
|
57
|
+
//# sourceMappingURL=index25.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index25.js","sources":["../src/agentic/registry.ts"],"sourcesContent":["/**\n * Tool Registry\n * \n * Manages available tools for agentic transcription.\n * Uses RiotPrompt's ToolRegistry for format conversion while\n * preserving protokoll's custom ToolResult interface.\n */\n\nimport { ToolRegistry } from '@kjerneverk/riotprompt';\nimport type { Tool as RiotTool } from '@kjerneverk/riotprompt';\nimport { TranscriptionTool, ToolContext, ToolResult } from './types';\nimport * as LookupPerson from './tools/lookup-person';\nimport * as LookupProject from './tools/lookup-project';\nimport * as VerifySpelling from './tools/verify-spelling';\nimport * as RouteNote from './tools/route-note';\nimport * as StoreContext from './tools/store-context';\n\nexport interface RegistryInstance {\n getTools(): TranscriptionTool[];\n \n getToolDefinitions(): any[]; // For LLM API format (OpenAI compatible)\n \n executeTool(name: string, args: any): Promise<ToolResult>;\n \n /** Get the underlying RiotPrompt ToolRegistry */\n getRiotRegistry(): ToolRegistry;\n}\n\n/**\n * Convert a protokoll TranscriptionTool to a RiotPrompt Tool.\n * The execute function is adapted to work with both systems.\n */\nconst toRiotTool = (tool: TranscriptionTool, category?: string): RiotTool => ({\n name: tool.name,\n description: tool.description,\n parameters: tool.parameters as RiotTool['parameters'],\n category,\n cost: 'cheap',\n execute: async (params: any) => {\n const result = await tool.execute(params);\n return result;\n },\n});\n\nexport const create = (ctx: ToolContext): RegistryInstance => {\n // Create protokoll tools\n const tools: TranscriptionTool[] = [\n LookupPerson.create(ctx),\n LookupProject.create(ctx),\n VerifySpelling.create(ctx),\n RouteNote.create(ctx),\n StoreContext.create(ctx),\n ];\n \n const toolMap = new Map(tools.map(t => [t.name, t]));\n \n // Create RiotPrompt ToolRegistry for format conversion\n const riotRegistry = ToolRegistry.create();\n \n // Register tools with categories\n riotRegistry.register(toRiotTool(tools[0], 'lookup')); // lookup_person\n riotRegistry.register(toRiotTool(tools[1], 'lookup')); // lookup_project\n riotRegistry.register(toRiotTool(tools[2], 'verification')); // verify_spelling\n riotRegistry.register(toRiotTool(tools[3], 'routing')); // route_note\n riotRegistry.register(toRiotTool(tools[4], 'storage')); // store_context\n \n return {\n getTools: () => tools,\n \n // Use RiotPrompt's OpenAI format export\n getToolDefinitions: () => riotRegistry.toOpenAIFormat().map(t => ({\n name: t.function.name,\n description: t.function.description,\n parameters: t.function.parameters,\n })),\n \n executeTool: async (name: string, args: any): Promise<ToolResult> => {\n const tool = toolMap.get(name);\n if (!tool) {\n return {\n success: false,\n error: `Unknown tool: ${name}`,\n };\n }\n return tool.execute(args);\n },\n \n getRiotRegistry: () => riotRegistry,\n };\n};\n\n"],"names":["LookupPerson.create","LookupProject.create","VerifySpelling.create","RouteNote.create","StoreContext.create"],"mappings":";;;;;;;AAgCA,MAAM,UAAA,GAAa,CAAC,IAAA,EAAyB,QAAA,MAAiC;AAAA,EAC1E,MAAM,IAAA,CAAK,IAAA;AAAA,EACX,aAAa,IAAA,CAAK,WAAA;AAAA,EAClB,YAAY,IAAA,CAAK,UAAA;AAAA,EACjB,QAAA;AAAA,EACA,IAAA,EAAM,OAAA;AAAA,EACN,OAAA,EAAS,OAAO,MAAA,KAAgB;AAC5B,IAAA,MAAM,MAAA,GAAS,MAAM,IAAA,CAAK,OAAA,CAAQ,MAAM,CAAA;AACxC,IAAA,OAAO,MAAA;AAAA,EACX;AACJ,CAAA,CAAA;AAEO,MAAM,MAAA,GAAS,CAAC,GAAA,KAAuC;AAE1D,EAAA,MAAM,KAAA,GAA6B;AAAA,IAC/BA,SAAoB,GAAG,CAAA;AAAA,IACvBC,SAAqB,GAAG,CAAA;AAAA,IACxBC,SAAsB,GAAG,CAAA;AAAA,IACzBC,SAAiB,GAAG,CAAA;AAAA,IACpBC,SAAuB;AAAA,GAC3B;AAEA,EAAA,MAAM,OAAA,GAAU,IAAI,GAAA,CAAI,KAAA,CAAM,GAAA,CAAI,CAAA,CAAA,KAAK,CAAC,CAAA,CAAE,IAAA,EAAM,CAAC,CAAC,CAAC,CAAA;AAGnD,EAAA,MAAM,YAAA,GAAe,aAAa,MAAA,EAAO;AAGzC,EAAA,YAAA,CAAa,SAAS,UAAA,CAAW,KAAA,CAAM,CAAC,CAAA,EAAG,QAAQ,CAAC,CAAA;AACpD,EAAA,YAAA,CAAa,SAAS,UAAA,CAAW,KAAA,CAAM,CAAC,CAAA,EAAG,QAAQ,CAAC,CAAA;AACpD,EAAA,YAAA,CAAa,SAAS,UAAA,CAAW,KAAA,CAAM,CAAC,CAAA,EAAG,cAAc,CAAC,CAAA;AAC1D,EAAA,YAAA,CAAa,SAAS,UAAA,CAAW,KAAA,CAAM,CAAC,CAAA,EAAG,SAAS,CAAC,CAAA;AACrD,EAAA,YAAA,CAAa,SAAS,UAAA,CAAW,KAAA,CAAM,CAAC,CAAA,EAAG,SAAS,CAAC,CAAA;AAErD,EAAA,OAAO;AAAA,IACH,UAAU,MAAM,KAAA;AAAA;AAAA,IAGhB,oBAAoB,MAAM,YAAA,CAAa,cAAA,EAAe,CAAE,IAAI,CAAA,CAAA,MAAM;AAAA,MAC9D,IAAA,EAAM,EAAE,QAAA,CAAS,IAAA;AAAA,MACjB,WAAA,EAAa,EAAE,QAAA,CAAS,WAAA;AAAA,MACxB,UAAA,EAAY,EAAE,QAAA,CAAS;AAAA,KAC3B,CAAE,CAAA;AAAA,IAEF,WAAA,EAAa,OAAO,IAAA,EAAc,IAAA,KAAmC;AACjE,MAAA,MAAM,IAAA,GAAO,OAAA,CAAQ,GAAA,CAAI,IAAI,CAAA;AAC7B,MAAA,IAAI,CAAC,IAAA,EAAM;AACP,QAAA,OAAO;AAAA,UACH,OAAA,EAAS,KAAA;AAAA,UACT,KAAA,EAAO,iBAAiB,IAAI,CAAA;AAAA,SAChC;AAAA,MACJ;AACA,MAAA,OAAO,IAAA,CAAK,QAAQ,IAAI,CAAA;AAAA,IAC5B,CAAA;AAAA,IAEA,iBAAiB,MAAM;AAAA,GAC3B;AACJ;;;;"}
|
package/dist/index26.js
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
import { z } from 'zod';
|
|
2
|
+
|
|
3
|
+
const CorrectedEntitySchema = z.object({
|
|
4
|
+
original: z.string().describe("Original text from transcript"),
|
|
5
|
+
corrected: z.string().describe("Corrected spelling/name"),
|
|
6
|
+
type: z.enum(["person", "project", "term", "company"]).describe("Entity type"),
|
|
7
|
+
confidence: z.number().min(0).max(1).describe("Confidence in correction")
|
|
8
|
+
});
|
|
9
|
+
const RoutingDecisionSchema = z.object({
|
|
10
|
+
projectId: z.string().optional().describe("Matched project ID"),
|
|
11
|
+
destination: z.object({
|
|
12
|
+
path: z.string().describe("File destination path"),
|
|
13
|
+
structure: z.enum(["none", "year", "month", "day"]).default("month")
|
|
14
|
+
}),
|
|
15
|
+
confidence: z.number().min(0).max(1).describe("Confidence in routing"),
|
|
16
|
+
signals: z.array(z.object({
|
|
17
|
+
type: z.enum(["explicit_phrase", "associated_person", "associated_company", "topic", "context_type"]),
|
|
18
|
+
value: z.string(),
|
|
19
|
+
weight: z.number()
|
|
20
|
+
})).optional(),
|
|
21
|
+
reasoning: z.string().optional().describe("Why this destination was chosen")
|
|
22
|
+
});
|
|
23
|
+
const ReferencedEntitiesSchema = z.object({
|
|
24
|
+
people: z.array(z.string()).describe("IDs of people mentioned"),
|
|
25
|
+
projects: z.array(z.string()).describe("IDs of projects mentioned"),
|
|
26
|
+
terms: z.array(z.string()).describe("IDs of terms mentioned"),
|
|
27
|
+
companies: z.array(z.string()).describe("IDs of companies mentioned")
|
|
28
|
+
});
|
|
29
|
+
const ToolResultSchema = z.object({
|
|
30
|
+
success: z.boolean(),
|
|
31
|
+
data: z.any().optional(),
|
|
32
|
+
error: z.string().optional(),
|
|
33
|
+
needsUserInput: z.boolean().optional(),
|
|
34
|
+
userPrompt: z.string().optional()
|
|
35
|
+
});
|
|
36
|
+
|
|
37
|
+
export { CorrectedEntitySchema, ReferencedEntitiesSchema, RoutingDecisionSchema, ToolResultSchema };
|
|
38
|
+
//# sourceMappingURL=index26.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index26.js","sources":["../src/agentic/types.ts"],"sourcesContent":["/**\n * Agentic Transcription Types\n *\n * Types for tool-based transcription enhancement.\n * Includes Zod schemas for validation and documentation.\n */\n\nimport { z } from 'zod';\nimport * as Context from '@redaksjon/context';\nimport * as Routing from '../routing';\n\n// ============================================================================\n// Zod Schemas for Structured Outputs\n// ============================================================================\n\n/**\n * Schema for a corrected entity (spelling/name correction)\n */\nexport const CorrectedEntitySchema = z.object({\n original: z.string().describe('Original text from transcript'),\n corrected: z.string().describe('Corrected spelling/name'),\n type: z.enum(['person', 'project', 'term', 'company']).describe('Entity type'),\n confidence: z.number().min(0).max(1).describe('Confidence in correction'),\n});\n\nexport type CorrectedEntity = z.infer<typeof CorrectedEntitySchema>;\n\n/**\n * Schema for routing decision\n * Note: Uses the existing ClassificationSignal types from routing/types.ts\n */\nexport const RoutingDecisionSchema = z.object({\n projectId: z.string().optional().describe('Matched project ID'),\n destination: z.object({\n path: z.string().describe('File destination path'),\n structure: z.enum(['none', 'year', 'month', 'day']).default('month'),\n }),\n confidence: z.number().min(0).max(1).describe('Confidence in routing'),\n signals: z.array(z.object({\n type: z.enum(['explicit_phrase', 'associated_person', 'associated_company', 'topic', 'context_type']),\n value: z.string(),\n weight: z.number(),\n })).optional(),\n reasoning: z.string().optional().describe('Why this destination was chosen'),\n});\n\nexport type RoutingDecision = z.infer<typeof RoutingDecisionSchema>;\n\n/**\n * Schema for referenced entities\n */\nexport const ReferencedEntitiesSchema = z.object({\n people: z.array(z.string()).describe('IDs of people mentioned'),\n projects: z.array(z.string()).describe('IDs of projects mentioned'),\n terms: z.array(z.string()).describe('IDs of terms mentioned'),\n companies: z.array(z.string()).describe('IDs of companies mentioned'),\n});\n\nexport type ReferencedEntitiesOutput = z.infer<typeof ReferencedEntitiesSchema>;\n\n/**\n * Schema for tool result data\n */\nexport const ToolResultSchema = z.object({\n success: z.boolean(),\n data: z.any().optional(),\n error: z.string().optional(),\n needsUserInput: z.boolean().optional(),\n userPrompt: z.string().optional(),\n});\n\n// ============================================================================\n// TypeScript Interfaces (existing, preserved for compatibility)\n// ============================================================================\n\nexport interface TranscriptionTool {\n name: string;\n description: string;\n \n parameters: Record<string, any>; // JSON Schema\n \n execute: (args: any) => Promise<ToolResult>;\n}\n\nexport interface ToolContext {\n transcriptText: string;\n audioDate: Date;\n sourceFile: string;\n contextInstance: Context.ContextInstance;\n routingInstance: Routing.RoutingInstance;\n interactiveMode: boolean;\n // interactiveInstance?: Interactive.InteractiveInstance; // Interactive moved to protokoll-cli\n resolvedEntities?: Map<string, string>; // Entities resolved during this session\n}\n\nexport interface ToolResult {\n success: boolean;\n \n data?: any;\n error?: string;\n needsUserInput?: boolean;\n userPrompt?: string;\n}\n\nexport interface ReferencedEntity {\n id: string;\n name: string;\n type: 'person' | 'project' | 'term' | 'company';\n}\n\nexport interface TranscriptionState {\n originalText: string;\n correctedText: string;\n unknownEntities: string[];\n resolvedEntities: Map<string, string>; // name mapping (old -> new)\n \n routeDecision?: RoutingDecision;\n confidence: number;\n \n // Track all entities referenced during processing\n referencedEntities: {\n people: Set<string>; // IDs of people mentioned\n projects: Set<string>; // IDs of projects mentioned\n terms: Set<string>; // IDs of terms mentioned\n companies: Set<string>; // IDs of companies mentioned\n };\n}\n\n"],"names":[],"mappings":";;AAkBO,MAAM,qBAAA,GAAwB,EAAE,MAAA,CAAO;AAAA,EAC1C,QAAA,EAAU,CAAA,CAAE,MAAA,EAAO,CAAE,SAAS,+BAA+B,CAAA;AAAA,EAC7D,SAAA,EAAW,CAAA,CAAE,MAAA,EAAO,CAAE,SAAS,yBAAyB,CAAA;AAAA,EACxD,IAAA,EAAM,CAAA,CAAE,IAAA,CAAK,CAAC,QAAA,EAAU,SAAA,EAAW,MAAA,EAAQ,SAAS,CAAC,CAAA,CAAE,QAAA,CAAS,aAAa,CAAA;AAAA,EAC7E,UAAA,EAAY,CAAA,CAAE,MAAA,EAAO,CAAE,GAAA,CAAI,CAAC,CAAA,CAAE,GAAA,CAAI,CAAC,CAAA,CAAE,QAAA,CAAS,0BAA0B;AAC5E,CAAC;AAQM,MAAM,qBAAA,GAAwB,EAAE,MAAA,CAAO;AAAA,EAC1C,WAAW,CAAA,CAAE,MAAA,GAAS,QAAA,EAAS,CAAE,SAAS,oBAAoB,CAAA;AAAA,EAC9D,WAAA,EAAa,EAAE,MAAA,CAAO;AAAA,IAClB,IAAA,EAAM,CAAA,CAAE,MAAA,EAAO,CAAE,SAAS,uBAAuB,CAAA;AAAA,IACjD,SAAA,EAAW,CAAA,CAAE,IAAA,CAAK,CAAC,MAAA,EAAQ,MAAA,EAAQ,OAAA,EAAS,KAAK,CAAC,CAAA,CAAE,OAAA,CAAQ,OAAO;AAAA,GACtE,CAAA;AAAA,EACD,UAAA,EAAY,CAAA,CAAE,MAAA,EAAO,CAAE,GAAA,CAAI,CAAC,CAAA,CAAE,GAAA,CAAI,CAAC,CAAA,CAAE,QAAA,CAAS,uBAAuB,CAAA;AAAA,EACrE,OAAA,EAAS,CAAA,CAAE,KAAA,CAAM,CAAA,CAAE,MAAA,CAAO;AAAA,IACtB,IAAA,EAAM,EAAE,IAAA,CAAK,CAAC,mBAAmB,mBAAA,EAAqB,oBAAA,EAAsB,OAAA,EAAS,cAAc,CAAC,CAAA;AAAA,IACpG,KAAA,EAAO,EAAE,MAAA,EAAO;AAAA,IAChB,MAAA,EAAQ,EAAE,MAAA;AAAO,GACpB,CAAC,CAAA,CAAE,QAAA,EAAS;AAAA,EACb,WAAW,CAAA,CAAE,MAAA,GAAS,QAAA,EAAS,CAAE,SAAS,iCAAiC;AAC/E,CAAC;AAOM,MAAM,wBAAA,GAA2B,EAAE,MAAA,CAAO;AAAA,EAC7C,MAAA,EAAQ,EAAE,KAAA,CAAM,CAAA,CAAE,QAAQ,CAAA,CAAE,SAAS,yBAAyB,CAAA;AAAA,EAC9D,QAAA,EAAU,EAAE,KAAA,CAAM,CAAA,CAAE,QAAQ,CAAA,CAAE,SAAS,2BAA2B,CAAA;AAAA,EAClE,KAAA,EAAO,EAAE,KAAA,CAAM,CAAA,CAAE,QAAQ,CAAA,CAAE,SAAS,wBAAwB,CAAA;AAAA,EAC5D,SAAA,EAAW,EAAE,KAAA,CAAM,CAAA,CAAE,QAAQ,CAAA,CAAE,SAAS,4BAA4B;AACxE,CAAC;AAOM,MAAM,gBAAA,GAAmB,EAAE,MAAA,CAAO;AAAA,EACrC,OAAA,EAAS,EAAE,OAAA,EAAQ;AAAA,EACnB,IAAA,EAAM,CAAA,CAAE,GAAA,EAAI,CAAE,QAAA,EAAS;AAAA,EACvB,KAAA,EAAO,CAAA,CAAE,MAAA,EAAO,CAAE,QAAA,EAAS;AAAA,EAC3B,cAAA,EAAgB,CAAA,CAAE,OAAA,EAAQ,CAAE,QAAA,EAAS;AAAA,EACrC,UAAA,EAAY,CAAA,CAAE,MAAA,EAAO,CAAE,QAAA;AAC3B,CAAC;;;;"}
|