@redaksjon/protokoll-engine 0.1.1-dev.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (240) hide show
  1. package/README.md +47 -0
  2. package/dist/agentic/executor.d.ts +21 -0
  3. package/dist/agentic/executor.d.ts.map +1 -0
  4. package/dist/agentic/index.d.ts +27 -0
  5. package/dist/agentic/index.d.ts.map +1 -0
  6. package/dist/agentic/registry.d.ts +11 -0
  7. package/dist/agentic/registry.d.ts.map +1 -0
  8. package/dist/agentic/tools/lookup-person.d.ts +3 -0
  9. package/dist/agentic/tools/lookup-person.d.ts.map +1 -0
  10. package/dist/agentic/tools/lookup-project.d.ts +3 -0
  11. package/dist/agentic/tools/lookup-project.d.ts.map +1 -0
  12. package/dist/agentic/tools/route-note.d.ts +3 -0
  13. package/dist/agentic/tools/route-note.d.ts.map +1 -0
  14. package/dist/agentic/tools/store-context.d.ts +3 -0
  15. package/dist/agentic/tools/store-context.d.ts.map +1 -0
  16. package/dist/agentic/tools/verify-spelling.d.ts +3 -0
  17. package/dist/agentic/tools/verify-spelling.d.ts.map +1 -0
  18. package/dist/agentic/types.d.ts +110 -0
  19. package/dist/agentic/types.d.ts.map +1 -0
  20. package/dist/constants.d.ts +98 -0
  21. package/dist/constants.d.ts.map +1 -0
  22. package/dist/feedback/analyzer.d.ts +13 -0
  23. package/dist/feedback/analyzer.d.ts.map +1 -0
  24. package/dist/feedback/decision-tracker.d.ts +14 -0
  25. package/dist/feedback/decision-tracker.d.ts.map +1 -0
  26. package/dist/feedback/handler.d.ts +14 -0
  27. package/dist/feedback/handler.d.ts.map +1 -0
  28. package/dist/feedback/index.d.ts +12 -0
  29. package/dist/feedback/index.d.ts.map +1 -0
  30. package/dist/feedback/types.d.ts +72 -0
  31. package/dist/feedback/types.d.ts.map +1 -0
  32. package/dist/index.d.ts +24 -0
  33. package/dist/index.d.ts.map +1 -0
  34. package/dist/index.js +32 -0
  35. package/dist/index.js.map +1 -0
  36. package/dist/index10.js +4 -0
  37. package/dist/index10.js.map +1 -0
  38. package/dist/index11.js +22 -0
  39. package/dist/index11.js.map +1 -0
  40. package/dist/index12.js +125 -0
  41. package/dist/index12.js.map +1 -0
  42. package/dist/index13.js +124 -0
  43. package/dist/index13.js.map +1 -0
  44. package/dist/index14.js +296 -0
  45. package/dist/index14.js.map +1 -0
  46. package/dist/index15.js +100 -0
  47. package/dist/index15.js.map +1 -0
  48. package/dist/index16.js +107 -0
  49. package/dist/index16.js.map +1 -0
  50. package/dist/index17.js +185 -0
  51. package/dist/index17.js.map +1 -0
  52. package/dist/index18.js +53 -0
  53. package/dist/index18.js.map +1 -0
  54. package/dist/index19.js +19 -0
  55. package/dist/index19.js.map +1 -0
  56. package/dist/index2.js +33 -0
  57. package/dist/index2.js.map +1 -0
  58. package/dist/index20.js +105 -0
  59. package/dist/index20.js.map +1 -0
  60. package/dist/index21.js +26 -0
  61. package/dist/index21.js.map +1 -0
  62. package/dist/index22.js +49 -0
  63. package/dist/index22.js.map +1 -0
  64. package/dist/index23.js +119 -0
  65. package/dist/index23.js.map +1 -0
  66. package/dist/index24.js +330 -0
  67. package/dist/index24.js.map +1 -0
  68. package/dist/index25.js +57 -0
  69. package/dist/index25.js.map +1 -0
  70. package/dist/index26.js +38 -0
  71. package/dist/index26.js.map +1 -0
  72. package/dist/index27.js +127 -0
  73. package/dist/index27.js.map +1 -0
  74. package/dist/index28.js +157 -0
  75. package/dist/index28.js.map +1 -0
  76. package/dist/index29.js +163 -0
  77. package/dist/index29.js.map +1 -0
  78. package/dist/index3.js +36 -0
  79. package/dist/index3.js.map +1 -0
  80. package/dist/index30.js +173 -0
  81. package/dist/index30.js.map +1 -0
  82. package/dist/index31.js +423 -0
  83. package/dist/index31.js.map +1 -0
  84. package/dist/index32.js +161 -0
  85. package/dist/index32.js.map +1 -0
  86. package/dist/index33.js +152 -0
  87. package/dist/index33.js.map +1 -0
  88. package/dist/index34.js +56 -0
  89. package/dist/index34.js.map +1 -0
  90. package/dist/index35.js +103 -0
  91. package/dist/index35.js.map +1 -0
  92. package/dist/index36.js +451 -0
  93. package/dist/index36.js.map +1 -0
  94. package/dist/index37.js +431 -0
  95. package/dist/index37.js.map +1 -0
  96. package/dist/index38.js +87 -0
  97. package/dist/index38.js.map +1 -0
  98. package/dist/index39.js +122 -0
  99. package/dist/index39.js.map +1 -0
  100. package/dist/index4.js +3 -0
  101. package/dist/index4.js.map +1 -0
  102. package/dist/index40.js +299 -0
  103. package/dist/index40.js.map +1 -0
  104. package/dist/index41.js +49 -0
  105. package/dist/index41.js.map +1 -0
  106. package/dist/index42.js +151 -0
  107. package/dist/index42.js.map +1 -0
  108. package/dist/index43.js +226 -0
  109. package/dist/index43.js.map +1 -0
  110. package/dist/index44.js +49 -0
  111. package/dist/index44.js.map +1 -0
  112. package/dist/index45.js +45 -0
  113. package/dist/index45.js.map +1 -0
  114. package/dist/index46.js +37 -0
  115. package/dist/index46.js.map +1 -0
  116. package/dist/index47.js +51 -0
  117. package/dist/index47.js.map +1 -0
  118. package/dist/index48.js +39 -0
  119. package/dist/index48.js.map +1 -0
  120. package/dist/index49.js +239 -0
  121. package/dist/index49.js.map +1 -0
  122. package/dist/index5.js +17 -0
  123. package/dist/index5.js.map +1 -0
  124. package/dist/index50.js +163 -0
  125. package/dist/index50.js.map +1 -0
  126. package/dist/index51.js +81 -0
  127. package/dist/index51.js.map +1 -0
  128. package/dist/index52.js +78 -0
  129. package/dist/index52.js.map +1 -0
  130. package/dist/index53.js +22 -0
  131. package/dist/index53.js.map +1 -0
  132. package/dist/index54.js +8 -0
  133. package/dist/index54.js.map +1 -0
  134. package/dist/index55.js +8 -0
  135. package/dist/index55.js.map +1 -0
  136. package/dist/index56.js +17 -0
  137. package/dist/index56.js.map +1 -0
  138. package/dist/index57.js +4 -0
  139. package/dist/index57.js.map +1 -0
  140. package/dist/index58.js +17 -0
  141. package/dist/index58.js.map +1 -0
  142. package/dist/index59.js +4 -0
  143. package/dist/index59.js.map +1 -0
  144. package/dist/index6.js +22 -0
  145. package/dist/index6.js.map +1 -0
  146. package/dist/index60.js +6 -0
  147. package/dist/index60.js.map +1 -0
  148. package/dist/index7.js +27 -0
  149. package/dist/index7.js.map +1 -0
  150. package/dist/index8.js +22 -0
  151. package/dist/index8.js.map +1 -0
  152. package/dist/index9.js +5 -0
  153. package/dist/index9.js.map +1 -0
  154. package/dist/logging.d.ts +7 -0
  155. package/dist/logging.d.ts.map +1 -0
  156. package/dist/output/index.d.ts +15 -0
  157. package/dist/output/index.d.ts.map +1 -0
  158. package/dist/phases/complete.d.ts +17 -0
  159. package/dist/phases/complete.d.ts.map +1 -0
  160. package/dist/phases/index.d.ts +5 -0
  161. package/dist/phases/index.d.ts.map +1 -0
  162. package/dist/phases/locate.d.ts +15 -0
  163. package/dist/phases/locate.d.ts.map +1 -0
  164. package/dist/phases/simple-replace.d.ts +72 -0
  165. package/dist/phases/simple-replace.d.ts.map +1 -0
  166. package/dist/phases/transcribe.d.ts +19 -0
  167. package/dist/phases/transcribe.d.ts.map +1 -0
  168. package/dist/pipeline/index.d.ts +10 -0
  169. package/dist/pipeline/index.d.ts.map +1 -0
  170. package/dist/pipeline/orchestrator.d.ts +13 -0
  171. package/dist/pipeline/orchestrator.d.ts.map +1 -0
  172. package/dist/pipeline/types.d.ts +58 -0
  173. package/dist/pipeline/types.d.ts.map +1 -0
  174. package/dist/prompt/index.d.ts +3 -0
  175. package/dist/prompt/index.d.ts.map +1 -0
  176. package/dist/prompt/templates.d.ts +40 -0
  177. package/dist/prompt/templates.d.ts.map +1 -0
  178. package/dist/prompt/transcribe.d.ts +42 -0
  179. package/dist/prompt/transcribe.d.ts.map +1 -0
  180. package/dist/reasoning/client.d.ts +42 -0
  181. package/dist/reasoning/client.d.ts.map +1 -0
  182. package/dist/reasoning/index.d.ts +17 -0
  183. package/dist/reasoning/index.d.ts.map +1 -0
  184. package/dist/reasoning/strategy.d.ts +12 -0
  185. package/dist/reasoning/strategy.d.ts.map +1 -0
  186. package/dist/reasoning/types.d.ts +58 -0
  187. package/dist/reasoning/types.d.ts.map +1 -0
  188. package/dist/reflection/collector.d.ts +18 -0
  189. package/dist/reflection/collector.d.ts.map +1 -0
  190. package/dist/reflection/index.d.ts +13 -0
  191. package/dist/reflection/index.d.ts.map +1 -0
  192. package/dist/reflection/reporter.d.ts +10 -0
  193. package/dist/reflection/reporter.d.ts.map +1 -0
  194. package/dist/reflection/types.d.ts +99 -0
  195. package/dist/reflection/types.d.ts.map +1 -0
  196. package/dist/routing/classifier.d.ts +8 -0
  197. package/dist/routing/classifier.d.ts.map +1 -0
  198. package/dist/routing/index.d.ts +12 -0
  199. package/dist/routing/index.d.ts.map +1 -0
  200. package/dist/routing/router.d.ts +8 -0
  201. package/dist/routing/router.d.ts.map +1 -0
  202. package/dist/routing/types.d.ts +68 -0
  203. package/dist/routing/types.d.ts.map +1 -0
  204. package/dist/transcript/feedback.d.ts +70 -0
  205. package/dist/transcript/feedback.d.ts.map +1 -0
  206. package/dist/transcript/index.d.ts +10 -0
  207. package/dist/transcript/index.d.ts.map +1 -0
  208. package/dist/transcript/operations.d.ts +152 -0
  209. package/dist/transcript/operations.d.ts.map +1 -0
  210. package/dist/transcript/pkl-utils.d.ts +66 -0
  211. package/dist/transcript/pkl-utils.d.ts.map +1 -0
  212. package/dist/transcription/index.d.ts +17 -0
  213. package/dist/transcription/index.d.ts.map +1 -0
  214. package/dist/transcription/service.d.ts +10 -0
  215. package/dist/transcription/service.d.ts.map +1 -0
  216. package/dist/transcription/types.d.ts +41 -0
  217. package/dist/transcription/types.d.ts.map +1 -0
  218. package/dist/types.d.ts +28 -0
  219. package/dist/types.d.ts.map +1 -0
  220. package/dist/util/collision-detector.d.ts +77 -0
  221. package/dist/util/collision-detector.d.ts.map +1 -0
  222. package/dist/util/dates.d.ts +57 -0
  223. package/dist/util/dates.d.ts.map +1 -0
  224. package/dist/util/general.d.ts +3 -0
  225. package/dist/util/general.d.ts.map +1 -0
  226. package/dist/util/media.d.ts +9 -0
  227. package/dist/util/media.d.ts.map +1 -0
  228. package/dist/util/metadata.d.ts +138 -0
  229. package/dist/util/metadata.d.ts.map +1 -0
  230. package/dist/util/openai.d.ts +22 -0
  231. package/dist/util/openai.d.ts.map +1 -0
  232. package/dist/util/sounds-like-database.d.ts +98 -0
  233. package/dist/util/sounds-like-database.d.ts.map +1 -0
  234. package/dist/util/storage.d.ts +35 -0
  235. package/dist/util/storage.d.ts.map +1 -0
  236. package/dist/util/text-replacer.d.ts +56 -0
  237. package/dist/util/text-replacer.d.ts.map +1 -0
  238. package/dist/utils/entityFinder.d.ts +29 -0
  239. package/dist/utils/entityFinder.d.ts.map +1 -0
  240. package/package.json +84 -0
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index19.js","sources":["../src/reasoning/types.ts"],"sourcesContent":["/**\n * Reasoning System Types\n *\n * Configuration and types for reasoning model integration.\n * Uses riotprompt for prompt building and execution.\n */\n\n// Model families with reasoning capabilities\nexport type ReasoningModel =\n | 'claude-3-5-sonnet'\n | 'claude-3-opus'\n | 'claude-4'\n | 'gpt-4o'\n | 'gpt-4o-mini'\n | 'gpt-4-turbo'\n | 'gpt-5'\n | 'gpt-5-nano'\n | 'gpt-5-mini'\n | 'gpt-5.1'\n | 'gpt-5.2'\n | 'o1'\n | 'o1-mini'\n | 'o3'\n | 'o3-mini'\n | string; // Allow any model string\n\nexport type ReasoningLevel = 'none' | 'low' | 'medium' | 'high';\n\nexport interface ReasoningConfig {\n model: ReasoningModel;\n reasoningLevel?: ReasoningLevel; // For models that support it (o1, etc.)\n maxTokens?: number;\n temperature?: number;\n apiKey?: string; // Override default\n provider?: 'openai' | 'anthropic' | 'gemini' | 'auto';\n}\n\nexport interface ReasoningRequest {\n prompt: string;\n systemPrompt?: string;\n context?: string[];\n maxIterations?: number;\n tools?: ToolDefinition[];\n}\n\nexport interface ToolCall {\n id: string;\n name: string;\n arguments: Record<string, unknown>;\n}\n\nexport interface ToolDefinition {\n name: string;\n description: string;\n parameters: Record<string, unknown>;\n}\n\nexport interface ReasoningResponse {\n content: string;\n model: string;\n usage?: {\n promptTokens: number;\n completionTokens: number;\n totalTokens: number;\n };\n toolCalls?: ToolCall[];\n finishReason?: string;\n duration?: number;\n}\n\nexport interface ReasoningMetrics {\n iterations: number;\n toolCallsExecuted: number;\n totalDuration: number;\n tokensUsed: number;\n}\n\n// Model-specific settings\nexport interface ModelSettings {\n maxTokens: number;\n supportsTools: boolean;\n reasoningLevel?: boolean;\n}\n\nexport const REASONING_MODELS: Record<string, ModelSettings> = {\n 'claude-3-5-sonnet': { maxTokens: 4096, supportsTools: true },\n 'claude-3-opus': { maxTokens: 4096, supportsTools: true },\n 'claude-4': { maxTokens: 8192, supportsTools: true },\n 'gpt-4o': { maxTokens: 4096, supportsTools: true },\n 'gpt-4o-mini': { maxTokens: 4096, supportsTools: true },\n 'gpt-5': { maxTokens: 8192, supportsTools: true },\n 'gpt-5-nano': { maxTokens: 2048, supportsTools: true },\n 'gpt-5-mini': { maxTokens: 4096, supportsTools: true },\n 'gpt-5.1': { maxTokens: 16384, supportsTools: true, reasoningLevel: true },\n 'gpt-5.2': { maxTokens: 32768, supportsTools: true, reasoningLevel: true },\n 'o1': { maxTokens: 65536, supportsTools: false, reasoningLevel: true },\n 'o1-mini': { maxTokens: 65536, supportsTools: false, reasoningLevel: true },\n 'o3': { maxTokens: 100000, supportsTools: true, reasoningLevel: true },\n 'o3-mini': { maxTokens: 65536, supportsTools: true, reasoningLevel: true },\n};\n\n"],"names":[],"mappings":"AAoFO,MAAM,gBAAA,GAAkD;AAAA,EAC3D,mBAAA,EAAqB,EAAE,SAAA,EAAW,IAAA,EAAM,eAAe,IAAA,EAAK;AAAA,EAC5D,eAAA,EAAiB,EAAE,SAAA,EAAW,IAAA,EAAM,eAAe,IAAA,EAAK;AAAA,EACxD,UAAA,EAAY,EAAE,SAAA,EAAW,IAAA,EAAM,eAAe,IAAA,EAAK;AAAA,EACnD,QAAA,EAAU,EAAE,SAAA,EAAW,IAAA,EAAM,eAAe,IAAA,EAAK;AAAA,EACjD,aAAA,EAAe,EAAE,SAAA,EAAW,IAAA,EAAM,eAAe,IAAA,EAAK;AAAA,EACtD,OAAA,EAAS,EAAE,SAAA,EAAW,IAAA,EAAM,eAAe,IAAA,EAAK;AAAA,EAChD,YAAA,EAAc,EAAE,SAAA,EAAW,IAAA,EAAM,eAAe,IAAA,EAAK;AAAA,EACrD,YAAA,EAAc,EAAE,SAAA,EAAW,IAAA,EAAM,eAAe,IAAA,EAAK;AAAA,EACrD,WAAW,EAAE,SAAA,EAAW,OAAO,aAAA,EAAe,IAAA,EAAM,gBAAgB,IAAA,EAAK;AAAA,EACzE,WAAW,EAAE,SAAA,EAAW,OAAO,aAAA,EAAe,IAAA,EAAM,gBAAgB,IAAA,EAAK;AAAA,EACzE,MAAM,EAAE,SAAA,EAAW,OAAO,aAAA,EAAe,KAAA,EAAO,gBAAgB,IAAA,EAAK;AAAA,EACrE,WAAW,EAAE,SAAA,EAAW,OAAO,aAAA,EAAe,KAAA,EAAO,gBAAgB,IAAA,EAAK;AAAA,EAC1E,MAAM,EAAE,SAAA,EAAW,KAAQ,aAAA,EAAe,IAAA,EAAM,gBAAgB,IAAA,EAAK;AAAA,EACrE,WAAW,EAAE,SAAA,EAAW,OAAO,aAAA,EAAe,IAAA,EAAM,gBAAgB,IAAA;AACxE;;;;"}
package/dist/index2.js ADDED
@@ -0,0 +1,33 @@
1
+ import { create as create$1 } from './index17.js';
2
+ import { getRecommendedStrategy, createStrategy } from './index18.js';
3
+ export { REASONING_MODELS } from './index19.js';
4
+
5
+ const create = (config) => {
6
+ const client = create$1(config);
7
+ return {
8
+ complete: (request) => client.complete(request),
9
+ completeWithTools: (request) => client.completeWithTools(request),
10
+ executeWithStrategy: async (request, strategyType) => {
11
+ createStrategy({
12
+ type: strategyType,
13
+ maxIterations: request.maxIterations
14
+ });
15
+ const response = await client.complete(request);
16
+ return {
17
+ ...response,
18
+ metrics: {
19
+ iterations: 1,
20
+ toolCallsExecuted: response.toolCalls?.length ?? 0,
21
+ totalDuration: response.duration ?? 0,
22
+ tokensUsed: response.usage?.totalTokens ?? 0
23
+ }
24
+ };
25
+ },
26
+ isReasoningModel: client.isReasoningModel,
27
+ getModelFamily: client.getModelFamily,
28
+ getRecommendedStrategy: getRecommendedStrategy
29
+ };
30
+ };
31
+
32
+ export { create };
33
+ //# sourceMappingURL=index2.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index2.js","sources":["../src/reasoning/index.ts"],"sourcesContent":["/**\n * Reasoning System\n * \n * Main entry point for the reasoning system. Provides a factory function\n * to create reasoning instances that can execute LLM calls with various\n * strategies using riotprompt.\n */\n\nimport { ReasoningConfig, ReasoningRequest, ReasoningResponse, ReasoningMetrics } from './types';\nimport * as Client from './client';\nimport * as Strategy from './strategy';\n\nexport interface ReasoningInstance {\n // Single completion\n complete(request: ReasoningRequest): Promise<ReasoningResponse>;\n \n // Multi-turn tool calling\n completeWithTools(request: Client.ToolCallRequest): Promise<Client.ToolCallResponse>;\n \n // Strategy-based execution\n executeWithStrategy(\n request: ReasoningRequest,\n strategyType: Strategy.TranscriptionStrategy\n ): Promise<ReasoningResponse & { metrics: ReasoningMetrics }>;\n \n // Model information\n isReasoningModel(model: string): boolean;\n getModelFamily(model: string): 'openai' | 'anthropic' | 'gemini' | 'unknown';\n \n // Strategy helpers\n getRecommendedStrategy(\n transcriptLength: number,\n hasUnknownNames: boolean,\n complexity: 'low' | 'medium' | 'high'\n ): Strategy.TranscriptionStrategy;\n}\n\nexport const create = (config: ReasoningConfig): ReasoningInstance => {\n const client = Client.create(config);\n \n return {\n complete: (request) => client.complete(request),\n completeWithTools: (request) => client.completeWithTools(request),\n \n executeWithStrategy: async (request, strategyType) => {\n // Create the strategy (for future use with full agentic execution)\n Strategy.createStrategy({\n type: strategyType,\n maxIterations: request.maxIterations,\n });\n \n // For now, simple execution\n // Full strategy execution will be implemented in Step 05 (Agentic)\n const response = await client.complete(request);\n \n return {\n ...response,\n metrics: {\n iterations: 1,\n toolCallsExecuted: response.toolCalls?.length ?? 0,\n totalDuration: response.duration ?? 0,\n tokensUsed: response.usage?.totalTokens ?? 0,\n },\n };\n },\n \n isReasoningModel: client.isReasoningModel,\n getModelFamily: client.getModelFamily,\n getRecommendedStrategy: Strategy.getRecommendedStrategy,\n };\n};\n\n// Re-export types\nexport * from './types';\nexport type { TranscriptionStrategy, StrategyConfig } from './strategy';\n\n"],"names":["Client.create","Strategy.createStrategy","Strategy.getRecommendedStrategy"],"mappings":";;;;AAqCO,MAAM,MAAA,GAAS,CAAC,MAAA,KAA+C;AAClE,EAAA,MAAM,MAAA,GAASA,QAAO,CAAO,MAAM,CAAA;AAEnC,EAAA,OAAO;AAAA,IACH,QAAA,EAAU,CAAC,OAAA,KAAY,MAAA,CAAO,SAAS,OAAO,CAAA;AAAA,IAC9C,iBAAA,EAAmB,CAAC,OAAA,KAAY,MAAA,CAAO,kBAAkB,OAAO,CAAA;AAAA,IAEhE,mBAAA,EAAqB,OAAO,OAAA,EAAS,YAAA,KAAiB;AAElD,MAAAC,cAAS,CAAe;AAAA,QACpB,IAAA,EAAM,YAAA;AAAA,QACN,eAAe,OAAA,CAAQ;AAAA,OAC1B,CAAA;AAID,MAAA,MAAM,QAAA,GAAW,MAAM,MAAA,CAAO,QAAA,CAAS,OAAO,CAAA;AAE9C,MAAA,OAAO;AAAA,QACH,GAAG,QAAA;AAAA,QACH,OAAA,EAAS;AAAA,UACL,UAAA,EAAY,CAAA;AAAA,UACZ,iBAAA,EAAmB,QAAA,CAAS,SAAA,EAAW,MAAA,IAAU,CAAA;AAAA,UACjD,aAAA,EAAe,SAAS,QAAA,IAAY,CAAA;AAAA,UACpC,UAAA,EAAY,QAAA,CAAS,KAAA,EAAO,WAAA,IAAe;AAAA;AAC/C,OACJ;AAAA,IACJ,CAAA;AAAA,IAEA,kBAAkB,MAAA,CAAO,gBAAA;AAAA,IACzB,gBAAgB,MAAA,CAAO,cAAA;AAAA,IACvB,wBAAwBC;AAAS,GACrC;AACJ;;;;"}
@@ -0,0 +1,105 @@
1
+ import { create as create$1 } from './index12.js';
2
+ import { create as create$2 } from './index13.js';
3
+ import { MODEL_CAPABILITIES } from './index21.js';
4
+ import { getLogger } from './index41.js';
5
+ import * as path from 'node:path';
6
+ import * as os from 'node:os';
7
+
8
+ const create = (openai) => {
9
+ const logger = getLogger();
10
+ const storage$1 = create$1({ log: logger.debug });
11
+ const media$1 = create$2(logger);
12
+ const supportsStreaming = (model) => {
13
+ return MODEL_CAPABILITIES[model]?.supportsStreaming ?? false;
14
+ };
15
+ const supportsDiarization = (model) => {
16
+ return MODEL_CAPABILITIES[model]?.supportsDiarization ?? false;
17
+ };
18
+ const transcribe = async (request) => {
19
+ const { audioFile, config } = request;
20
+ logger.debug("Starting transcription", { model: config.model, file: audioFile });
21
+ const MAX_AUDIO_SIZE = 26214400;
22
+ const tempDir = path.join(os.tmpdir(), "protokoll-conversions");
23
+ const originalFileSize = await media$1.getFileSize(audioFile);
24
+ const originalFileSizeMB = (originalFileSize / (1024 * 1024)).toFixed(1);
25
+ logger.debug(`Original audio file size: ${originalFileSize} bytes (${originalFileSizeMB} MB)`);
26
+ const needsConversion = originalFileSize > MAX_AUDIO_SIZE * 0.95;
27
+ const convertedAudioFile = needsConversion ? await media$1.convertToSupportedFormat(audioFile, tempDir, true) : await media$1.convertToSupportedFormat(audioFile, tempDir);
28
+ logger.debug(`Using audio file for transcription: ${convertedAudioFile}`);
29
+ const fileSize = await media$1.getFileSize(convertedAudioFile);
30
+ const fileSizeMB = (fileSize / (1024 * 1024)).toFixed(1);
31
+ logger.debug(`Audio file size: ${fileSize} bytes (${fileSizeMB} MB), max size: ${MAX_AUDIO_SIZE} bytes`);
32
+ let transcriptionText;
33
+ let totalDuration = 0;
34
+ if (fileSize > MAX_AUDIO_SIZE) {
35
+ logger.info(`Audio file exceeds maximum size (${fileSize} > ${MAX_AUDIO_SIZE} bytes), splitting into chunks`);
36
+ const splitTempDir = path.join(tempDir, `split_audio_${Date.now()}`);
37
+ await storage$1.createDirectory(splitTempDir);
38
+ try {
39
+ const audioChunks = await media$1.splitAudioFile(convertedAudioFile, splitTempDir, MAX_AUDIO_SIZE);
40
+ logger.info(`Split audio file into ${audioChunks.length} chunks`);
41
+ const transcriptions = [];
42
+ for (let i = 0; i < audioChunks.length; i++) {
43
+ const chunkPath = audioChunks[i];
44
+ logger.info(`Transcribing chunk ${i + 1}/${audioChunks.length}: ${chunkPath}`);
45
+ const chunkStream = await storage$1.readStream(chunkPath);
46
+ const chunkStartTime = Date.now();
47
+ const chunkResponse = await openai.audio.transcriptions.create({
48
+ model: config.model,
49
+ file: chunkStream,
50
+ response_format: config.response_format ?? "json",
51
+ ...config.language && { language: config.language },
52
+ ...config.temperature !== void 0 && { temperature: config.temperature },
53
+ ...config.prompt && { prompt: config.prompt }
54
+ });
55
+ const chunkDuration = Date.now() - chunkStartTime;
56
+ totalDuration += chunkDuration;
57
+ transcriptions.push(chunkResponse.text);
58
+ }
59
+ transcriptionText = transcriptions.join(" ");
60
+ for (const chunk of audioChunks) {
61
+ try {
62
+ await storage$1.deleteFile(chunk);
63
+ } catch (error) {
64
+ logger.warn(`Failed to delete temporary chunk ${chunk}: ${error}`);
65
+ }
66
+ }
67
+ try {
68
+ await storage$1.deleteDirectory(splitTempDir);
69
+ } catch (error) {
70
+ logger.warn(`Failed to delete temporary split directory ${splitTempDir}: ${error}`);
71
+ }
72
+ } catch (error) {
73
+ logger.error(`Error processing split audio files: ${error}`);
74
+ throw new Error(`Failed to process split audio files: ${error}`);
75
+ }
76
+ } else {
77
+ const audioStream = await storage$1.readStream(convertedAudioFile);
78
+ const startTime = Date.now();
79
+ const response = await openai.audio.transcriptions.create({
80
+ model: config.model,
81
+ file: audioStream,
82
+ response_format: config.response_format ?? "json",
83
+ ...config.language && { language: config.language },
84
+ ...config.temperature !== void 0 && { temperature: config.temperature },
85
+ ...config.prompt && { prompt: config.prompt }
86
+ });
87
+ totalDuration = Date.now() - startTime;
88
+ transcriptionText = response.text;
89
+ }
90
+ logger.debug("Transcription complete", { duration: totalDuration, model: config.model });
91
+ return {
92
+ text: transcriptionText,
93
+ model: config.model,
94
+ duration: totalDuration
95
+ };
96
+ };
97
+ return {
98
+ transcribe,
99
+ supportsStreaming,
100
+ supportsDiarization
101
+ };
102
+ };
103
+
104
+ export { create };
105
+ //# sourceMappingURL=index20.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index20.js","sources":["../src/transcription/service.ts"],"sourcesContent":["/**\n * Transcription Service\n * \n * Handles audio transcription using OpenAI's transcription models.\n * Keeps transcription simple - the complexity is in the reasoning pass.\n */\n\nimport OpenAI from 'openai';\nimport * as Storage from '../util/storage';\nimport * as Media from '../util/media';\nimport {\n TranscriptionRequest,\n TranscriptionResult,\n TranscriptionModel,\n MODEL_CAPABILITIES\n} from './types';\nimport * as Logging from '../logging';\nimport * as path from 'node:path';\nimport * as os from 'node:os';\n\nexport interface ServiceInstance {\n transcribe(request: TranscriptionRequest): Promise<TranscriptionResult>;\n supportsStreaming(model: TranscriptionModel): boolean;\n supportsDiarization(model: TranscriptionModel): boolean;\n}\n\n// Alias for backwards compatibility\nexport type TranscriptionService = ServiceInstance;\n\nexport const create = (openai: OpenAI): ServiceInstance => {\n const logger = Logging.getLogger();\n const storage = Storage.create({ log: logger.debug });\n const media = Media.create(logger);\n\n const supportsStreaming = (model: TranscriptionModel): boolean => {\n return MODEL_CAPABILITIES[model]?.supportsStreaming ?? false;\n };\n\n const supportsDiarization = (model: TranscriptionModel): boolean => {\n return MODEL_CAPABILITIES[model]?.supportsDiarization ?? false;\n };\n\n const transcribe = async (request: TranscriptionRequest): Promise<TranscriptionResult> => {\n const { audioFile, config } = request;\n\n logger.debug('Starting transcription', { model: config.model, file: audioFile });\n\n // OpenAI API has a 25MB limit for audio files\n const MAX_AUDIO_SIZE = 26214400; // 25MB in bytes\n const tempDir = path.join(os.tmpdir(), 'protokoll-conversions');\n\n // Check original file size first\n const originalFileSize = await media.getFileSize(audioFile);\n const originalFileSizeMB = (originalFileSize / (1024 * 1024)).toFixed(1);\n logger.debug(`Original audio file size: ${originalFileSize} bytes (${originalFileSizeMB} MB)`);\n\n // Convert audio file to a supported format if necessary\n // Force conversion if file is close to or over the size limit to ensure compression\n const needsConversion = originalFileSize > (MAX_AUDIO_SIZE * 0.95); // Convert if within 5% of limit\n const convertedAudioFile = needsConversion \n ? await media.convertToSupportedFormat(audioFile, tempDir, true) // Force conversion\n : await media.convertToSupportedFormat(audioFile, tempDir);\n logger.debug(`Using audio file for transcription: ${convertedAudioFile}`);\n\n // Check if audio file exceeds the size limit after conversion\n const fileSize = await media.getFileSize(convertedAudioFile);\n const fileSizeMB = (fileSize / (1024 * 1024)).toFixed(1);\n logger.debug(`Audio file size: ${fileSize} bytes (${fileSizeMB} MB), max size: ${MAX_AUDIO_SIZE} bytes`);\n\n let transcriptionText: string;\n let totalDuration = 0;\n\n if (fileSize > MAX_AUDIO_SIZE) {\n logger.info(`Audio file exceeds maximum size (${fileSize} > ${MAX_AUDIO_SIZE} bytes), splitting into chunks`);\n\n // Create a temporary directory for the audio chunks\n const splitTempDir = path.join(tempDir, `split_audio_${Date.now()}`);\n await storage.createDirectory(splitTempDir);\n\n try {\n // Split the audio file into chunks\n const audioChunks = await media.splitAudioFile(convertedAudioFile, splitTempDir, MAX_AUDIO_SIZE);\n logger.info(`Split audio file into ${audioChunks.length} chunks`);\n\n // Transcribe each chunk\n const transcriptions: string[] = [];\n for (let i = 0; i < audioChunks.length; i++) {\n const chunkPath = audioChunks[i];\n logger.info(`Transcribing chunk ${i + 1}/${audioChunks.length}: ${chunkPath}`);\n\n const chunkStream = await storage.readStream(chunkPath);\n const chunkStartTime = Date.now();\n \n const chunkResponse = await openai.audio.transcriptions.create({\n model: config.model,\n file: chunkStream,\n response_format: config.response_format ?? 'json',\n ...(config.language && { language: config.language }),\n ...(config.temperature !== undefined && { temperature: config.temperature }),\n ...(config.prompt && { prompt: config.prompt }),\n });\n\n const chunkDuration = Date.now() - chunkStartTime;\n totalDuration += chunkDuration;\n transcriptions.push(chunkResponse.text);\n }\n\n // Combine all transcriptions\n transcriptionText = transcriptions.join(' ');\n\n // Clean up temporary chunks\n for (const chunk of audioChunks) {\n try {\n await storage.deleteFile(chunk);\n } catch (error) {\n logger.warn(`Failed to delete temporary chunk ${chunk}: ${error}`);\n }\n }\n \n // Clean up split directory\n try {\n await storage.deleteDirectory(splitTempDir);\n } catch (error) {\n logger.warn(`Failed to delete temporary split directory ${splitTempDir}: ${error}`);\n }\n } catch (error) {\n logger.error(`Error processing split audio files: ${error}`);\n throw new Error(`Failed to process split audio files: ${error}`);\n }\n } else {\n // If file size is within the limit, transcribe normally\n const audioStream = await storage.readStream(convertedAudioFile);\n \n // Execute transcription\n const startTime = Date.now();\n const response = await openai.audio.transcriptions.create({\n model: config.model,\n file: audioStream,\n response_format: config.response_format ?? 'json',\n ...(config.language && { language: config.language }),\n ...(config.temperature !== undefined && { temperature: config.temperature }),\n ...(config.prompt && { prompt: config.prompt }),\n });\n totalDuration = Date.now() - startTime;\n transcriptionText = response.text;\n }\n \n logger.debug('Transcription complete', { duration: totalDuration, model: config.model });\n \n // Handle the response\n return {\n text: transcriptionText,\n model: config.model,\n duration: totalDuration,\n };\n };\n \n return {\n transcribe,\n supportsStreaming,\n supportsDiarization,\n };\n};\n"],"names":["Logging.getLogger","storage","Storage.create","media","Media.create"],"mappings":";;;;;;;AA6BO,MAAM,MAAA,GAAS,CAAC,MAAA,KAAoC;AACvD,EAAA,MAAM,MAAA,GAASA,SAAQ,EAAU;AACjC,EAAA,MAAMC,YAAUC,QAAQ,CAAO,EAAE,GAAA,EAAK,MAAA,CAAO,OAAO,CAAA;AACpD,EAAA,MAAMC,OAAA,GAAQC,QAAM,CAAO,MAAM,CAAA;AAEjC,EAAA,MAAM,iBAAA,GAAoB,CAAC,KAAA,KAAuC;AAC9D,IAAA,OAAO,kBAAA,CAAmB,KAAK,CAAA,EAAG,iBAAA,IAAqB,KAAA;AAAA,EAC3D,CAAA;AAEA,EAAA,MAAM,mBAAA,GAAsB,CAAC,KAAA,KAAuC;AAChE,IAAA,OAAO,kBAAA,CAAmB,KAAK,CAAA,EAAG,mBAAA,IAAuB,KAAA;AAAA,EAC7D,CAAA;AAEA,EAAA,MAAM,UAAA,GAAa,OAAO,OAAA,KAAgE;AACtF,IAAA,MAAM,EAAE,SAAA,EAAW,MAAA,EAAO,GAAI,OAAA;AAE9B,IAAA,MAAA,CAAO,KAAA,CAAM,0BAA0B,EAAE,KAAA,EAAO,OAAO,KAAA,EAAO,IAAA,EAAM,WAAW,CAAA;AAG/E,IAAA,MAAM,cAAA,GAAiB,QAAA;AACvB,IAAA,MAAM,UAAU,IAAA,CAAK,IAAA,CAAK,EAAA,CAAG,MAAA,IAAU,uBAAuB,CAAA;AAG9D,IAAA,MAAM,gBAAA,GAAmB,MAAMD,OAAA,CAAM,WAAA,CAAY,SAAS,CAAA;AAC1D,IAAA,MAAM,kBAAA,GAAA,CAAsB,gBAAA,IAAoB,IAAA,GAAO,IAAA,CAAA,EAAO,QAAQ,CAAC,CAAA;AACvE,IAAA,MAAA,CAAO,KAAA,CAAM,CAAA,0BAAA,EAA6B,gBAAgB,CAAA,QAAA,EAAW,kBAAkB,CAAA,IAAA,CAAM,CAAA;AAI7F,IAAA,MAAM,eAAA,GAAkB,mBAAoB,cAAA,GAAiB,IAAA;AAC7D,IAAA,MAAM,kBAAA,GAAqB,eAAA,GACrB,MAAMA,OAAA,CAAM,wBAAA,CAAyB,SAAA,EAAW,OAAA,EAAS,IAAI,CAAA,GAC7D,MAAMA,OAAA,CAAM,wBAAA,CAAyB,WAAW,OAAO,CAAA;AAC7D,IAAA,MAAA,CAAO,KAAA,CAAM,CAAA,oCAAA,EAAuC,kBAAkB,CAAA,CAAE,CAAA;AAGxE,IAAA,MAAM,QAAA,GAAW,MAAMA,OAAA,CAAM,WAAA,CAAY,kBAAkB,CAAA;AAC3D,IAAA,MAAM,UAAA,GAAA,CAAc,QAAA,IAAY,IAAA,GAAO,IAAA,CAAA,EAAO,QAAQ,CAAC,CAAA;AACvD,IAAA,MAAA,CAAO,MAAM,CAAA,iBAAA,EAAoB,QAAQ,WAAW,UAAU,CAAA,gBAAA,EAAmB,cAAc,CAAA,MAAA,CAAQ,CAAA;AAEvG,IAAA,IAAI,iBAAA;AACJ,IAAA,IAAI,aAAA,GAAgB,CAAA;AAEpB,IAAA,IAAI,WAAW,cAAA,EAAgB;AAC3B,MAAA,MAAA,CAAO,IAAA,CAAK,CAAA,iCAAA,EAAoC,QAAQ,CAAA,GAAA,EAAM,cAAc,CAAA,8BAAA,CAAgC,CAAA;AAG5G,MAAA,MAAM,YAAA,GAAe,KAAK,IAAA,CAAK,OAAA,EAAS,eAAe,IAAA,CAAK,GAAA,EAAK,CAAA,CAAE,CAAA;AACnE,MAAA,MAAMF,SAAA,CAAQ,gBAAgB,YAAY,CAAA;AAE1C,MAAA,IAAI;AAEA,QAAA,MAAM,cAAc,MAAME,OAAA,CAAM,cAAA,CAAe,kBAAA,EAAoB,cAAc,cAAc,CAAA;AAC/F,QAAA,MAAA,CAAO,IAAA,CAAK,CAAA,sBAAA,EAAyB,WAAA,CAAY,MAAM,CAAA,OAAA,CAAS,CAAA;AAGhE,QAAA,MAAM,iBAA2B,EAAC;AAClC,QAAA,KAAA,IAAS,CAAA,GAAI,CAAA,EAAG,CAAA,GAAI,WAAA,CAAY,QAAQ,CAAA,EAAA,EAAK;AACzC,UAAA,MAAM,SAAA,GAAY,YAAY,CAAC,CAAA;AAC/B,UAAA,MAAA,CAAO,IAAA,CAAK,sBAAsB,CAAA,GAAI,CAAC,IAAI,WAAA,CAAY,MAAM,CAAA,EAAA,EAAK,SAAS,CAAA,CAAE,CAAA;AAE7E,UAAA,MAAM,WAAA,GAAc,MAAMF,SAAA,CAAQ,UAAA,CAAW,SAAS,CAAA;AACtD,UAAA,MAAM,cAAA,GAAiB,KAAK,GAAA,EAAI;AAEhC,UAAA,MAAM,aAAA,GAAgB,MAAM,MAAA,CAAO,KAAA,CAAM,eAAe,MAAA,CAAO;AAAA,YAC3D,OAAO,MAAA,CAAO,KAAA;AAAA,YACd,IAAA,EAAM,WAAA;AAAA,YACN,eAAA,EAAiB,OAAO,eAAA,IAAmB,MAAA;AAAA,YAC3C,GAAI,MAAA,CAAO,QAAA,IAAY,EAAE,QAAA,EAAU,OAAO,QAAA,EAAS;AAAA,YACnD,GAAI,MAAA,CAAO,WAAA,KAAgB,UAAa,EAAE,WAAA,EAAa,OAAO,WAAA,EAAY;AAAA,YAC1E,GAAI,MAAA,CAAO,MAAA,IAAU,EAAE,MAAA,EAAQ,OAAO,MAAA;AAAO,WAChD,CAAA;AAED,UAAA,MAAM,aAAA,GAAgB,IAAA,CAAK,GAAA,EAAI,GAAI,cAAA;AACnC,UAAA,aAAA,IAAiB,aAAA;AACjB,UAAA,cAAA,CAAe,IAAA,CAAK,cAAc,IAAI,CAAA;AAAA,QAC1C;AAGA,QAAA,iBAAA,GAAoB,cAAA,CAAe,KAAK,GAAG,CAAA;AAG3C,QAAA,KAAA,MAAW,SAAS,WAAA,EAAa;AAC7B,UAAA,IAAI;AACA,YAAA,MAAMA,SAAA,CAAQ,WAAW,KAAK,CAAA;AAAA,UAClC,SAAS,KAAA,EAAO;AACZ,YAAA,MAAA,CAAO,IAAA,CAAK,CAAA,iCAAA,EAAoC,KAAK,CAAA,EAAA,EAAK,KAAK,CAAA,CAAE,CAAA;AAAA,UACrE;AAAA,QACJ;AAGA,QAAA,IAAI;AACA,UAAA,MAAMA,SAAA,CAAQ,gBAAgB,YAAY,CAAA;AAAA,QAC9C,SAAS,KAAA,EAAO;AACZ,UAAA,MAAA,CAAO,IAAA,CAAK,CAAA,2CAAA,EAA8C,YAAY,CAAA,EAAA,EAAK,KAAK,CAAA,CAAE,CAAA;AAAA,QACtF;AAAA,MACJ,SAAS,KAAA,EAAO;AACZ,QAAA,MAAA,CAAO,KAAA,CAAM,CAAA,oCAAA,EAAuC,KAAK,CAAA,CAAE,CAAA;AAC3D,QAAA,MAAM,IAAI,KAAA,CAAM,CAAA,qCAAA,EAAwC,KAAK,CAAA,CAAE,CAAA;AAAA,MACnE;AAAA,IACJ,CAAA,MAAO;AAEH,MAAA,MAAM,WAAA,GAAc,MAAMA,SAAA,CAAQ,UAAA,CAAW,kBAAkB,CAAA;AAG/D,MAAA,MAAM,SAAA,GAAY,KAAK,GAAA,EAAI;AAC3B,MAAA,MAAM,QAAA,GAAW,MAAM,MAAA,CAAO,KAAA,CAAM,eAAe,MAAA,CAAO;AAAA,QACtD,OAAO,MAAA,CAAO,KAAA;AAAA,QACd,IAAA,EAAM,WAAA;AAAA,QACN,eAAA,EAAiB,OAAO,eAAA,IAAmB,MAAA;AAAA,QAC3C,GAAI,MAAA,CAAO,QAAA,IAAY,EAAE,QAAA,EAAU,OAAO,QAAA,EAAS;AAAA,QACnD,GAAI,MAAA,CAAO,WAAA,KAAgB,UAAa,EAAE,WAAA,EAAa,OAAO,WAAA,EAAY;AAAA,QAC1E,GAAI,MAAA,CAAO,MAAA,IAAU,EAAE,MAAA,EAAQ,OAAO,MAAA;AAAO,OAChD,CAAA;AACD,MAAA,aAAA,GAAgB,IAAA,CAAK,KAAI,GAAI,SAAA;AAC7B,MAAA,iBAAA,GAAoB,QAAA,CAAS,IAAA;AAAA,IACjC;AAEA,IAAA,MAAA,CAAO,KAAA,CAAM,0BAA0B,EAAE,QAAA,EAAU,eAAe,KAAA,EAAO,MAAA,CAAO,OAAO,CAAA;AAGvF,IAAA,OAAO;AAAA,MACH,IAAA,EAAM,iBAAA;AAAA,MACN,OAAO,MAAA,CAAO,KAAA;AAAA,MACd,QAAA,EAAU;AAAA,KACd;AAAA,EACJ,CAAA;AAEA,EAAA,OAAO;AAAA,IACH,UAAA;AAAA,IACA,iBAAA;AAAA,IACA;AAAA,GACJ;AACJ;;;;"}
@@ -0,0 +1,26 @@
1
+ const MODEL_CAPABILITIES = {
2
+ "whisper-1": {
3
+ supportsStreaming: false,
4
+ supportsDiarization: false,
5
+ maxFileSize: 25 * 1024 * 1024
6
+ // 25 MB
7
+ },
8
+ "gpt-4o-mini-transcribe": {
9
+ supportsStreaming: true,
10
+ supportsDiarization: false,
11
+ maxFileSize: 25 * 1024 * 1024
12
+ },
13
+ "gpt-4o-transcribe": {
14
+ supportsStreaming: true,
15
+ supportsDiarization: false,
16
+ maxFileSize: 25 * 1024 * 1024
17
+ },
18
+ "gpt-4o-transcribe-diarize": {
19
+ supportsStreaming: true,
20
+ supportsDiarization: true,
21
+ maxFileSize: 25 * 1024 * 1024
22
+ }
23
+ };
24
+
25
+ export { MODEL_CAPABILITIES };
26
+ //# sourceMappingURL=index21.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index21.js","sources":["../src/transcription/types.ts"],"sourcesContent":["/**\n * Transcription System Types\n * \n * Supports multiple OpenAI transcription models with different capabilities.\n * The transcription service produces raw phonetic output that will be\n * corrected by the full reasoning pass.\n */\n\nexport type TranscriptionModel = \n | 'whisper-1'\n | 'gpt-4o-mini-transcribe'\n | 'gpt-4o-transcribe'\n | 'gpt-4o-transcribe-diarize';\n\nexport interface TranscriptionConfig {\n model: TranscriptionModel;\n language?: string;\n prompt?: string;\n response_format?: 'json' | 'text' | 'verbose_json' | 'srt' | 'vtt';\n temperature?: number;\n streaming?: boolean;\n}\n\nexport interface TranscriptionRequest {\n audioFile: string; // Path to audio file\n config: TranscriptionConfig;\n contextPrompt?: string; // Built from known entities (limited to 224 tokens)\n}\n\nexport interface TranscriptionSegment {\n start: number;\n end: number;\n text: string;\n speaker?: string; // For diarization\n}\n\nexport interface TranscriptionResult {\n text: string;\n model: string;\n segments?: TranscriptionSegment[];\n duration?: number;\n language?: string;\n}\n\nexport interface ModelCapabilities {\n supportsStreaming: boolean;\n supportsDiarization: boolean;\n maxFileSize: number;\n}\n\nexport const MODEL_CAPABILITIES: Record<TranscriptionModel, ModelCapabilities> = {\n 'whisper-1': {\n supportsStreaming: false,\n supportsDiarization: false,\n maxFileSize: 25 * 1024 * 1024, // 25 MB\n },\n 'gpt-4o-mini-transcribe': {\n supportsStreaming: true,\n supportsDiarization: false,\n maxFileSize: 25 * 1024 * 1024,\n },\n 'gpt-4o-transcribe': {\n supportsStreaming: true,\n supportsDiarization: false,\n maxFileSize: 25 * 1024 * 1024,\n },\n 'gpt-4o-transcribe-diarize': {\n supportsStreaming: true,\n supportsDiarization: true,\n maxFileSize: 25 * 1024 * 1024,\n },\n};\n\n"],"names":[],"mappings":"AAkDO,MAAM,kBAAA,GAAoE;AAAA,EAC7E,WAAA,EAAa;AAAA,IACT,iBAAA,EAAmB,KAAA;AAAA,IACnB,mBAAA,EAAqB,KAAA;AAAA,IACrB,WAAA,EAAa,KAAK,IAAA,GAAO;AAAA;AAAA,GAC7B;AAAA,EACA,wBAAA,EAA0B;AAAA,IACtB,iBAAA,EAAmB,IAAA;AAAA,IACnB,mBAAA,EAAqB,KAAA;AAAA,IACrB,WAAA,EAAa,KAAK,IAAA,GAAO;AAAA,GAC7B;AAAA,EACA,mBAAA,EAAqB;AAAA,IACjB,iBAAA,EAAmB,IAAA;AAAA,IACnB,mBAAA,EAAqB,KAAA;AAAA,IACrB,WAAA,EAAa,KAAK,IAAA,GAAO;AAAA,GAC7B;AAAA,EACA,2BAAA,EAA6B;AAAA,IACzB,iBAAA,EAAmB,IAAA;AAAA,IACnB,mBAAA,EAAqB,IAAA;AAAA,IACrB,WAAA,EAAa,KAAK,IAAA,GAAO;AAAA;AAEjC;;;;"}
@@ -0,0 +1,49 @@
1
+ import { cook } from '@kjerneverk/riotprompt';
2
+ import { DEFAULT_INSTRUCTIONS_TRANSCRIBE_FILE, DEFAULT_PERSONA_TRANSCRIBER_FILE } from './index16.js';
3
+ import { fileURLToPath } from 'node:url';
4
+ import path__default from 'node:path';
5
+ import { initializeTemplates, selectTemplate } from './index23.js';
6
+
7
+ const __filename$1 = fileURLToPath(import.meta.url);
8
+ const __dirname$1 = path__default.dirname(__filename$1);
9
+ initializeTemplates();
10
+ const createTranscribePrompt = async (transcriptionText, _config, options) => {
11
+ let templateName;
12
+ if (options?.template) {
13
+ templateName = options.template;
14
+ } else if (options?.autoSelectTemplate !== false) {
15
+ templateName = selectTemplate(transcriptionText, options?.templateHints);
16
+ }
17
+ const prompt = await cook({
18
+ basePath: __dirname$1,
19
+ // Use template if selected, otherwise fall back to file-based
20
+ ...templateName && { template: templateName },
21
+ // Load persona from file (only if not using template)
22
+ ...!templateName && {
23
+ persona: {
24
+ path: DEFAULT_PERSONA_TRANSCRIBER_FILE
25
+ }
26
+ },
27
+ // Load instructions from file
28
+ instructions: [
29
+ { path: DEFAULT_INSTRUCTIONS_TRANSCRIBE_FILE }
30
+ ],
31
+ // Add the transcript as content
32
+ content: [
33
+ { content: transcriptionText, title: "Transcript" }
34
+ ]
35
+ // Context is NOT loaded here - it's queried via tools in agentic mode
36
+ // This prevents sending huge context payloads with every request
37
+ });
38
+ return prompt;
39
+ };
40
+ const create = (_model, config) => {
41
+ return {
42
+ createTranscribePrompt: async (transcriptionText) => {
43
+ return createTranscribePrompt(transcriptionText);
44
+ }
45
+ };
46
+ };
47
+
48
+ export { create, createTranscribePrompt };
49
+ //# sourceMappingURL=index22.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index22.js","sources":["../src/prompt/transcribe.ts"],"sourcesContent":["import { cook } from \"@kjerneverk/riotprompt\";\nimport type { Prompt } from \"@kjerneverk/riotprompt\";\nimport { DEFAULT_INSTRUCTIONS_TRANSCRIBE_FILE, DEFAULT_PERSONA_TRANSCRIBER_FILE } from '@/constants';\nimport { Config } from '@/types';\nimport { fileURLToPath } from \"node:url\";\nimport path from \"node:path\";\nimport { initializeTemplates, selectTemplate } from './templates';\n\nconst __filename = fileURLToPath(import.meta.url);\nconst __dirname = path.dirname(__filename);\n\n// Initialize templates once on module load\ninitializeTemplates();\n\n// Re-export Prompt type for compatibility\nexport type { Prompt };\n\n/**\n * Creates a prompt for the transcription formatting task.\n * \n * Uses RiotPrompt's cook() API for declarative prompt construction with templates.\n * \n * NOTE: Context is NOT loaded into the prompt. Instead, the agentic executor\n * provides tools for the model to query context on-demand. This is the \n * agentic approach - the model investigates what it needs rather than\n * receiving everything upfront.\n * \n * @param transcriptionText - The raw transcript text\n * @param _config - Protokoll configuration\n * @param options - Optional configuration for template selection\n * @returns Cooked prompt ready for the model\n */\nexport const createTranscribePrompt = async (\n transcriptionText: string,\n _config: Config,\n options?: {\n template?: string;\n autoSelectTemplate?: boolean;\n templateHints?: { \n isMeeting?: boolean; \n isTechnical?: boolean; \n isQuick?: boolean;\n isInterview?: boolean;\n };\n }\n): Promise<Prompt> => {\n // Determine which template to use\n let templateName: string | undefined;\n \n if (options?.template) {\n // Explicit template specified\n templateName = options.template;\n } else if (options?.autoSelectTemplate !== false) {\n // Auto-select template based on content (default behavior)\n templateName = selectTemplate(transcriptionText, options?.templateHints);\n }\n // If no template and autoSelect disabled, use file-based persona/instructions\n \n // Use cook() for declarative prompt construction\n const prompt = await cook({\n basePath: __dirname,\n \n // Use template if selected, otherwise fall back to file-based\n ...(templateName && { template: templateName }),\n \n // Load persona from file (only if not using template)\n ...(!templateName && {\n persona: {\n path: DEFAULT_PERSONA_TRANSCRIBER_FILE\n }\n }),\n \n // Load instructions from file\n instructions: [\n { path: DEFAULT_INSTRUCTIONS_TRANSCRIBE_FILE }\n ],\n \n // Add the transcript as content\n content: [\n { content: transcriptionText, title: 'Transcript' }\n ]\n \n // Context is NOT loaded here - it's queried via tools in agentic mode\n // This prevents sending huge context payloads with every request\n });\n \n return prompt;\n};\n\n/**\n * Factory interface for transcribe prompts\n */\nexport interface Factory {\n createTranscribePrompt: (transcriptionText: string) => Promise<Prompt>;\n}\n\n/**\n * Create a factory for transcribe prompts\n * \n * @param _model - Model parameter (unused, preserved for API compatibility)\n * @param config - Protokoll configuration\n */\nexport const create = (_model: unknown, config: Config): Factory => {\n return {\n createTranscribePrompt: async (transcriptionText: string): Promise<Prompt> => {\n return createTranscribePrompt(transcriptionText, config);\n }\n };\n}; "],"names":["__filename","__dirname","path"],"mappings":";;;;;;AAQA,MAAMA,YAAA,GAAa,aAAA,CAAc,MAAA,CAAA,IAAA,CAAY,GAAG,CAAA;AAChD,MAAMC,WAAA,GAAYC,aAAA,CAAK,OAAA,CAAQF,YAAU,CAAA;AAGzC,mBAAA,EAAoB;AAoBb,MAAM,sBAAA,GAAyB,OAClC,iBAAA,EACA,OAAA,EACA,OAAA,KAUkB;AAElB,EAAA,IAAI,YAAA;AAEJ,EAAA,IAAI,SAAS,QAAA,EAAU;AAEnB,IAAA,YAAA,GAAe,OAAA,CAAQ,QAAA;AAAA,EAC3B,CAAA,MAAA,IAAW,OAAA,EAAS,kBAAA,KAAuB,KAAA,EAAO;AAE9C,IAAA,YAAA,GAAe,cAAA,CAAe,iBAAA,EAAmB,OAAA,EAAS,aAAa,CAAA;AAAA,EAC3E;AAIA,EAAA,MAAM,MAAA,GAAS,MAAM,IAAA,CAAK;AAAA,IACtB,QAAA,EAAUC,WAAA;AAAA;AAAA,IAGV,GAAI,YAAA,IAAgB,EAAE,QAAA,EAAU,YAAA,EAAa;AAAA;AAAA,IAG7C,GAAI,CAAC,YAAA,IAAgB;AAAA,MACjB,OAAA,EAAS;AAAA,QACL,IAAA,EAAM;AAAA;AACV,KACJ;AAAA;AAAA,IAGA,YAAA,EAAc;AAAA,MACV,EAAE,MAAM,oCAAA;AAAqC,KACjD;AAAA;AAAA,IAGA,OAAA,EAAS;AAAA,MACL,EAAE,OAAA,EAAS,iBAAA,EAAmB,KAAA,EAAO,YAAA;AAAa;AACtD;AAAA;AAAA,GAIH,CAAA;AAED,EAAA,OAAO,MAAA;AACX;AAeO,MAAM,MAAA,GAAS,CAAC,MAAA,EAAiB,MAAA,KAA4B;AAChE,EAAA,OAAO;AAAA,IACH,sBAAA,EAAwB,OAAO,iBAAA,KAA+C;AAC1E,MAAA,OAAO,sBAAA,CAAuB,iBAAyB,CAAA;AAAA,IAC3D;AAAA,GACJ;AACJ;;;;"}
@@ -0,0 +1,119 @@
1
+ import { registerTemplates, clearTemplates, getTemplates } from '@kjerneverk/riotprompt';
2
+
3
+ const TEMPLATES = {
4
+ // Standard transcription template (default)
5
+ "transcription-standard": {
6
+ persona: {
7
+ content: "You are an expert transcription assistant specializing in correcting misheard names and technical terms while preserving all original content."
8
+ },
9
+ constraints: [
10
+ { content: "Preserve ALL original content - this is NOT a summary." },
11
+ { content: "Only correct obvious transcription errors (misheard names, technical terms)." },
12
+ { content: "Maintain the original structure, flow, and speaking style." },
13
+ { content: "Never add information that was not in the original transcript." }
14
+ ],
15
+ tone: [
16
+ { content: "Professional and accurate." },
17
+ { content: "When uncertain about a correction, preserve the original text." }
18
+ ]
19
+ },
20
+ // Meeting notes template
21
+ "transcription-meeting": {
22
+ persona: {
23
+ content: "You are an expert transcription assistant specializing in meeting notes and discussions."
24
+ },
25
+ constraints: [
26
+ { content: "Preserve ALL discussion points, decisions, and action items." },
27
+ { content: "Correct names of participants and companies." },
28
+ { content: "Maintain the chronological flow of the discussion." },
29
+ { content: "Format action items and decisions clearly." }
30
+ ],
31
+ tone: [
32
+ { content: "Professional and organized." },
33
+ { content: "Clear and structured for easy reference." }
34
+ ]
35
+ },
36
+ // Technical discussion template
37
+ "transcription-technical": {
38
+ persona: {
39
+ content: "You are an expert transcription assistant with deep technical knowledge across software, engineering, and technology domains."
40
+ },
41
+ constraints: [
42
+ { content: "Preserve technical details and specifications accurately." },
43
+ { content: "Correct technical term spellings (APIs, frameworks, tools, protocols)." },
44
+ { content: "Maintain code snippets, commands, and technical references exactly." },
45
+ { content: "Preserve technical jargon and domain-specific terminology." }
46
+ ],
47
+ tone: [
48
+ { content: "Precise and technically accurate." },
49
+ { content: "Maintain the technical depth of the original discussion." }
50
+ ]
51
+ },
52
+ // Quick notes template (brief voice memos)
53
+ "transcription-quick": {
54
+ persona: {
55
+ content: "You are a transcription assistant for quick voice notes and brief memos."
56
+ },
57
+ constraints: [
58
+ { content: "Keep the original brevity and directness." },
59
+ { content: "Focus on capturing key information clearly." },
60
+ { content: "Correct obvious errors but maintain the informal style." }
61
+ ],
62
+ tone: [
63
+ { content: "Concise and direct." },
64
+ { content: "Preserve the informal, note-taking style." }
65
+ ]
66
+ },
67
+ // Interview template
68
+ "transcription-interview": {
69
+ persona: {
70
+ content: "You are an expert transcription assistant specializing in interviews and conversations."
71
+ },
72
+ constraints: [
73
+ { content: "Preserve ALL questions and answers completely." },
74
+ { content: "Maintain speaker attribution and conversational flow." },
75
+ { content: "Correct names and proper nouns mentioned." },
76
+ { content: "Keep the natural speaking patterns and emphasis." }
77
+ ],
78
+ tone: [
79
+ { content: "Accurate and respectful of the speakers' voices." },
80
+ { content: "Clear speaker identification throughout." }
81
+ ]
82
+ }
83
+ };
84
+ const initializeTemplates = () => {
85
+ registerTemplates(TEMPLATES);
86
+ };
87
+ const getTemplateNames = () => {
88
+ return Object.keys(getTemplates());
89
+ };
90
+ const clearAllTemplates = () => {
91
+ clearTemplates();
92
+ };
93
+ const selectTemplate = (transcriptText, hints) => {
94
+ if (hints?.isMeeting) return "transcription-meeting";
95
+ if (hints?.isTechnical) return "transcription-technical";
96
+ if (hints?.isQuick) return "transcription-quick";
97
+ if (hints?.isInterview) return "transcription-interview";
98
+ const lowerText = transcriptText.toLowerCase();
99
+ if (lowerText.includes("meeting") || lowerText.includes("agenda") || lowerText.includes("action item") || lowerText.includes("minutes") || lowerText.includes("attendees")) {
100
+ return "transcription-meeting";
101
+ }
102
+ if ((lowerText.match(/\binterviewer\b/g) || []).length > 1 || (lowerText.match(/\binterviewee\b/g) || []).length > 1 || (lowerText.match(/\bq:|question:/gi) || []).length > 2 || (lowerText.match(/\ba:|answer:/gi) || []).length > 2) {
103
+ return "transcription-interview";
104
+ }
105
+ if (lowerText.includes("code") || lowerText.includes("function") || lowerText.includes("api") || lowerText.includes("database") || lowerText.includes("server") || lowerText.includes("algorithm") || /\b(npm|git|docker|kubernetes|react|python|javascript|typescript)\b/i.test(transcriptText)) {
106
+ return "transcription-technical";
107
+ }
108
+ if (transcriptText.length < 300) {
109
+ return "transcription-quick";
110
+ }
111
+ return "transcription-standard";
112
+ };
113
+ const getTemplate = (name) => {
114
+ const templates = getTemplates();
115
+ return templates[name];
116
+ };
117
+
118
+ export { TEMPLATES, clearAllTemplates, getTemplate, getTemplateNames, initializeTemplates, selectTemplate };
119
+ //# sourceMappingURL=index23.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index23.js","sources":["../src/prompt/templates.ts"],"sourcesContent":["/**\n * Transcription Prompt Templates\n * \n * Reusable templates for different transcription scenarios.\n * Uses RiotPrompt's template system for consistent prompt structure.\n */\n\nimport { registerTemplates, getTemplates, clearTemplates } from '@kjerneverk/riotprompt';\nimport type { TemplateConfig } from '@kjerneverk/riotprompt';\n\n/**\n * Available transcription templates\n */\nexport const TEMPLATES: Record<string, TemplateConfig> = {\n // Standard transcription template (default)\n 'transcription-standard': {\n persona: { \n content: 'You are an expert transcription assistant specializing in correcting misheard names and technical terms while preserving all original content.'\n },\n constraints: [\n { content: 'Preserve ALL original content - this is NOT a summary.' },\n { content: 'Only correct obvious transcription errors (misheard names, technical terms).' },\n { content: 'Maintain the original structure, flow, and speaking style.' },\n { content: 'Never add information that was not in the original transcript.' }\n ],\n tone: [\n { content: 'Professional and accurate.' },\n { content: 'When uncertain about a correction, preserve the original text.' }\n ]\n },\n \n // Meeting notes template\n 'transcription-meeting': {\n persona: { \n content: 'You are an expert transcription assistant specializing in meeting notes and discussions.'\n },\n constraints: [\n { content: 'Preserve ALL discussion points, decisions, and action items.' },\n { content: 'Correct names of participants and companies.' },\n { content: 'Maintain the chronological flow of the discussion.' },\n { content: 'Format action items and decisions clearly.' }\n ],\n tone: [\n { content: 'Professional and organized.' },\n { content: 'Clear and structured for easy reference.' }\n ]\n },\n \n // Technical discussion template\n 'transcription-technical': {\n persona: { \n content: 'You are an expert transcription assistant with deep technical knowledge across software, engineering, and technology domains.'\n },\n constraints: [\n { content: 'Preserve technical details and specifications accurately.' },\n { content: 'Correct technical term spellings (APIs, frameworks, tools, protocols).' },\n { content: 'Maintain code snippets, commands, and technical references exactly.' },\n { content: 'Preserve technical jargon and domain-specific terminology.' }\n ],\n tone: [\n { content: 'Precise and technically accurate.' },\n { content: 'Maintain the technical depth of the original discussion.' }\n ]\n },\n \n // Quick notes template (brief voice memos)\n 'transcription-quick': {\n persona: { \n content: 'You are a transcription assistant for quick voice notes and brief memos.'\n },\n constraints: [\n { content: 'Keep the original brevity and directness.' },\n { content: 'Focus on capturing key information clearly.' },\n { content: 'Correct obvious errors but maintain the informal style.' }\n ],\n tone: [\n { content: 'Concise and direct.' },\n { content: 'Preserve the informal, note-taking style.' }\n ]\n },\n \n // Interview template\n 'transcription-interview': {\n persona: {\n content: 'You are an expert transcription assistant specializing in interviews and conversations.'\n },\n constraints: [\n { content: 'Preserve ALL questions and answers completely.' },\n { content: 'Maintain speaker attribution and conversational flow.' },\n { content: 'Correct names and proper nouns mentioned.' },\n { content: 'Keep the natural speaking patterns and emphasis.' }\n ],\n tone: [\n { content: 'Accurate and respectful of the speakers\\' voices.' },\n { content: 'Clear speaker identification throughout.' }\n ]\n }\n};\n\n/**\n * Initialize templates by registering them with RiotPrompt\n * Should be called once at application startup\n */\nexport const initializeTemplates = (): void => {\n registerTemplates(TEMPLATES);\n};\n\n/**\n * Get list of available template names\n */\nexport const getTemplateNames = (): string[] => {\n return Object.keys(getTemplates());\n};\n\n/**\n * Clear all registered templates\n * Useful for testing or reinitialization\n */\nexport const clearAllTemplates = (): void => {\n clearTemplates();\n};\n\n/**\n * Auto-select appropriate template based on transcript content\n * \n * @param transcriptText - The transcript text to analyze\n * @param hints - Optional hints about the transcript type\n * @returns Template name to use\n */\nexport const selectTemplate = (\n transcriptText: string,\n hints?: { \n isMeeting?: boolean; \n isTechnical?: boolean; \n isQuick?: boolean;\n isInterview?: boolean;\n }\n): string => {\n // Check explicit hints first\n if (hints?.isMeeting) return 'transcription-meeting';\n if (hints?.isTechnical) return 'transcription-technical';\n if (hints?.isQuick) return 'transcription-quick';\n if (hints?.isInterview) return 'transcription-interview';\n \n // Auto-detect from content\n const lowerText = transcriptText.toLowerCase();\n \n // Meeting indicators\n if (lowerText.includes('meeting') || \n lowerText.includes('agenda') || \n lowerText.includes('action item') ||\n lowerText.includes('minutes') ||\n lowerText.includes('attendees')) {\n return 'transcription-meeting';\n }\n \n // Interview indicators (check before length-based quick detection)\n if ((lowerText.match(/\\binterviewer\\b/g) || []).length > 1 ||\n (lowerText.match(/\\binterviewee\\b/g) || []).length > 1 ||\n (lowerText.match(/\\bq:|question:/gi) || []).length > 2 ||\n (lowerText.match(/\\ba:|answer:/gi) || []).length > 2) {\n return 'transcription-interview';\n }\n \n // Technical indicators\n if (lowerText.includes('code') || \n lowerText.includes('function') || \n lowerText.includes('api') ||\n lowerText.includes('database') ||\n lowerText.includes('server') ||\n lowerText.includes('algorithm') ||\n /\\b(npm|git|docker|kubernetes|react|python|javascript|typescript)\\b/i.test(transcriptText)) {\n return 'transcription-technical';\n }\n \n // Quick note indicators (short length) - check last to avoid false positives\n // Use a more conservative threshold to avoid misclassifying short but substantial content\n if (transcriptText.length < 300) {\n return 'transcription-quick';\n }\n \n // Default to standard\n return 'transcription-standard';\n};\n\n/**\n * Get template configuration by name\n * \n * @param name - Template name\n * @returns Template configuration or undefined if not found\n */\nexport const getTemplate = (name: string): TemplateConfig | undefined => {\n const templates = getTemplates();\n return templates[name];\n};\n"],"names":[],"mappings":";;AAaO,MAAM,SAAA,GAA4C;AAAA;AAAA,EAErD,wBAAA,EAA0B;AAAA,IACtB,OAAA,EAAS;AAAA,MACL,OAAA,EAAS;AAAA,KACb;AAAA,IACA,WAAA,EAAa;AAAA,MACT,EAAE,SAAS,wDAAA,EAAyD;AAAA,MACpE,EAAE,SAAS,8EAAA,EAA+E;AAAA,MAC1F,EAAE,SAAS,4DAAA,EAA6D;AAAA,MACxE,EAAE,SAAS,gEAAA;AAAiE,KAChF;AAAA,IACA,IAAA,EAAM;AAAA,MACF,EAAE,SAAS,4BAAA,EAA6B;AAAA,MACxC,EAAE,SAAS,gEAAA;AAAiE;AAChF,GACJ;AAAA;AAAA,EAGA,uBAAA,EAAyB;AAAA,IACrB,OAAA,EAAS;AAAA,MACL,OAAA,EAAS;AAAA,KACb;AAAA,IACA,WAAA,EAAa;AAAA,MACT,EAAE,SAAS,8DAAA,EAA+D;AAAA,MAC1E,EAAE,SAAS,8CAAA,EAA+C;AAAA,MAC1D,EAAE,SAAS,oDAAA,EAAqD;AAAA,MAChE,EAAE,SAAS,4CAAA;AAA6C,KAC5D;AAAA,IACA,IAAA,EAAM;AAAA,MACF,EAAE,SAAS,6BAAA,EAA8B;AAAA,MACzC,EAAE,SAAS,0CAAA;AAA2C;AAC1D,GACJ;AAAA;AAAA,EAGA,yBAAA,EAA2B;AAAA,IACvB,OAAA,EAAS;AAAA,MACL,OAAA,EAAS;AAAA,KACb;AAAA,IACA,WAAA,EAAa;AAAA,MACT,EAAE,SAAS,2DAAA,EAA4D;AAAA,MACvE,EAAE,SAAS,wEAAA,EAAyE;AAAA,MACpF,EAAE,SAAS,qEAAA,EAAsE;AAAA,MACjF,EAAE,SAAS,4DAAA;AAA6D,KAC5E;AAAA,IACA,IAAA,EAAM;AAAA,MACF,EAAE,SAAS,mCAAA,EAAoC;AAAA,MAC/C,EAAE,SAAS,0DAAA;AAA2D;AAC1E,GACJ;AAAA;AAAA,EAGA,qBAAA,EAAuB;AAAA,IACnB,OAAA,EAAS;AAAA,MACL,OAAA,EAAS;AAAA,KACb;AAAA,IACA,WAAA,EAAa;AAAA,MACT,EAAE,SAAS,2CAAA,EAA4C;AAAA,MACvD,EAAE,SAAS,6CAAA,EAA8C;AAAA,MACzD,EAAE,SAAS,yDAAA;AAA0D,KACzE;AAAA,IACA,IAAA,EAAM;AAAA,MACF,EAAE,SAAS,qBAAA,EAAsB;AAAA,MACjC,EAAE,SAAS,2CAAA;AAA4C;AAC3D,GACJ;AAAA;AAAA,EAGA,yBAAA,EAA2B;AAAA,IACvB,OAAA,EAAS;AAAA,MACL,OAAA,EAAS;AAAA,KACb;AAAA,IACA,WAAA,EAAa;AAAA,MACT,EAAE,SAAS,gDAAA,EAAiD;AAAA,MAC5D,EAAE,SAAS,uDAAA,EAAwD;AAAA,MACnE,EAAE,SAAS,2CAAA,EAA4C;AAAA,MACvD,EAAE,SAAS,kDAAA;AAAmD,KAClE;AAAA,IACA,IAAA,EAAM;AAAA,MACF,EAAE,SAAS,kDAAA,EAAoD;AAAA,MAC/D,EAAE,SAAS,0CAAA;AAA2C;AAC1D;AAER;AAMO,MAAM,sBAAsB,MAAY;AAC3C,EAAA,iBAAA,CAAkB,SAAS,CAAA;AAC/B;AAKO,MAAM,mBAAmB,MAAgB;AAC5C,EAAA,OAAO,MAAA,CAAO,IAAA,CAAK,YAAA,EAAc,CAAA;AACrC;AAMO,MAAM,oBAAoB,MAAY;AACzC,EAAA,cAAA,EAAe;AACnB;AASO,MAAM,cAAA,GAAiB,CAC1B,cAAA,EACA,KAAA,KAMS;AAET,EAAA,IAAI,KAAA,EAAO,WAAW,OAAO,uBAAA;AAC7B,EAAA,IAAI,KAAA,EAAO,aAAa,OAAO,yBAAA;AAC/B,EAAA,IAAI,KAAA,EAAO,SAAS,OAAO,qBAAA;AAC3B,EAAA,IAAI,KAAA,EAAO,aAAa,OAAO,yBAAA;AAG/B,EAAA,MAAM,SAAA,GAAY,eAAe,WAAA,EAAY;AAG7C,EAAA,IAAI,UAAU,QAAA,CAAS,SAAS,KAC5B,SAAA,CAAU,QAAA,CAAS,QAAQ,CAAA,IAC3B,SAAA,CAAU,SAAS,aAAa,CAAA,IAChC,UAAU,QAAA,CAAS,SAAS,KAC5B,SAAA,CAAU,QAAA,CAAS,WAAW,CAAA,EAAG;AACjC,IAAA,OAAO,uBAAA;AAAA,EACX;AAGA,EAAA,IAAA,CAAK,SAAA,CAAU,KAAA,CAAM,kBAAkB,CAAA,IAAK,EAAC,EAAG,MAAA,GAAS,CAAA,IAAA,CACpD,SAAA,CAAU,KAAA,CAAM,kBAAkB,CAAA,IAAK,EAAC,EAAG,MAAA,GAAS,CAAA,IAAA,CACpD,SAAA,CAAU,KAAA,CAAM,kBAAkB,CAAA,IAAK,IAAI,MAAA,GAAS,CAAA,IAAA,CACpD,SAAA,CAAU,KAAA,CAAM,gBAAgB,CAAA,IAAK,EAAC,EAAG,SAAS,CAAA,EAAG;AACtD,IAAA,OAAO,yBAAA;AAAA,EACX;AAGA,EAAA,IAAI,SAAA,CAAU,QAAA,CAAS,MAAM,CAAA,IACzB,SAAA,CAAU,QAAA,CAAS,UAAU,CAAA,IAC7B,SAAA,CAAU,QAAA,CAAS,KAAK,CAAA,IACxB,SAAA,CAAU,QAAA,CAAS,UAAU,CAAA,IAC7B,SAAA,CAAU,QAAA,CAAS,QAAQ,CAAA,IAC3B,SAAA,CAAU,QAAA,CAAS,WAAW,CAAA,IAC9B,qEAAA,CAAsE,IAAA,CAAK,cAAc,CAAA,EAAG;AAC5F,IAAA,OAAO,yBAAA;AAAA,EACX;AAIA,EAAA,IAAI,cAAA,CAAe,SAAS,GAAA,EAAK;AAC7B,IAAA,OAAO,qBAAA;AAAA,EACX;AAGA,EAAA,OAAO,wBAAA;AACX;AAQO,MAAM,WAAA,GAAc,CAAC,IAAA,KAA6C;AACrE,EAAA,MAAM,YAAY,YAAA,EAAa;AAC/B,EAAA,OAAO,UAAU,IAAI,CAAA;AACzB;;;;"}