speechflow 1.7.1 → 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (99) hide show
  1. package/CHANGELOG.md +18 -0
  2. package/README.md +387 -119
  3. package/etc/claude.md +5 -5
  4. package/etc/speechflow.yaml +2 -2
  5. package/package.json +3 -3
  6. package/speechflow-cli/dst/speechflow-main-graph.d.ts +1 -0
  7. package/speechflow-cli/dst/speechflow-main-graph.js +28 -5
  8. package/speechflow-cli/dst/speechflow-main-graph.js.map +1 -1
  9. package/speechflow-cli/dst/speechflow-node-a2a-wav.js +24 -4
  10. package/speechflow-cli/dst/speechflow-node-a2a-wav.js.map +1 -1
  11. package/speechflow-cli/dst/speechflow-node-a2t-google.d.ts +17 -0
  12. package/speechflow-cli/dst/speechflow-node-a2t-google.js +320 -0
  13. package/speechflow-cli/dst/speechflow-node-a2t-google.js.map +1 -0
  14. package/speechflow-cli/dst/speechflow-node-t2a-google.d.ts +15 -0
  15. package/speechflow-cli/dst/speechflow-node-t2a-google.js +218 -0
  16. package/speechflow-cli/dst/speechflow-node-t2a-google.js.map +1 -0
  17. package/speechflow-cli/dst/speechflow-node-t2a-openai.d.ts +15 -0
  18. package/speechflow-cli/dst/speechflow-node-t2a-openai.js +195 -0
  19. package/speechflow-cli/dst/speechflow-node-t2a-openai.js.map +1 -0
  20. package/speechflow-cli/dst/speechflow-node-t2a-supertonic.d.ts +17 -0
  21. package/speechflow-cli/dst/speechflow-node-t2a-supertonic.js +608 -0
  22. package/speechflow-cli/dst/speechflow-node-t2a-supertonic.js.map +1 -0
  23. package/speechflow-cli/dst/speechflow-node-t2t-amazon.js.map +1 -1
  24. package/speechflow-cli/dst/{speechflow-node-t2t-transformers.d.ts → speechflow-node-t2t-opus.d.ts} +1 -3
  25. package/speechflow-cli/dst/speechflow-node-t2t-opus.js +159 -0
  26. package/speechflow-cli/dst/speechflow-node-t2t-opus.js.map +1 -0
  27. package/speechflow-cli/dst/speechflow-node-t2t-profanity.d.ts +11 -0
  28. package/speechflow-cli/dst/speechflow-node-t2t-profanity.js +118 -0
  29. package/speechflow-cli/dst/speechflow-node-t2t-profanity.js.map +1 -0
  30. package/speechflow-cli/dst/speechflow-node-t2t-punctuation.d.ts +13 -0
  31. package/speechflow-cli/dst/speechflow-node-t2t-punctuation.js +220 -0
  32. package/speechflow-cli/dst/speechflow-node-t2t-punctuation.js.map +1 -0
  33. package/speechflow-cli/dst/{speechflow-node-t2t-openai.d.ts → speechflow-node-t2t-spellcheck.d.ts} +2 -2
  34. package/speechflow-cli/dst/{speechflow-node-t2t-openai.js → speechflow-node-t2t-spellcheck.js} +47 -99
  35. package/speechflow-cli/dst/speechflow-node-t2t-spellcheck.js.map +1 -0
  36. package/speechflow-cli/dst/speechflow-node-t2t-subtitle.js +3 -6
  37. package/speechflow-cli/dst/speechflow-node-t2t-subtitle.js.map +1 -1
  38. package/speechflow-cli/dst/speechflow-node-t2t-summary.d.ts +16 -0
  39. package/speechflow-cli/dst/speechflow-node-t2t-summary.js +241 -0
  40. package/speechflow-cli/dst/speechflow-node-t2t-summary.js.map +1 -0
  41. package/speechflow-cli/dst/{speechflow-node-t2t-ollama.d.ts → speechflow-node-t2t-translate.d.ts} +2 -2
  42. package/speechflow-cli/dst/{speechflow-node-t2t-transformers.js → speechflow-node-t2t-translate.js} +53 -115
  43. package/speechflow-cli/dst/speechflow-node-t2t-translate.js.map +1 -0
  44. package/speechflow-cli/dst/speechflow-node-xio-exec.d.ts +12 -0
  45. package/speechflow-cli/dst/speechflow-node-xio-exec.js +223 -0
  46. package/speechflow-cli/dst/speechflow-node-xio-exec.js.map +1 -0
  47. package/speechflow-cli/dst/speechflow-node-xio-file.d.ts +1 -0
  48. package/speechflow-cli/dst/speechflow-node-xio-file.js +79 -66
  49. package/speechflow-cli/dst/speechflow-node-xio-file.js.map +1 -1
  50. package/speechflow-cli/dst/speechflow-node-xio-vban.d.ts +17 -0
  51. package/speechflow-cli/dst/speechflow-node-xio-vban.js +330 -0
  52. package/speechflow-cli/dst/speechflow-node-xio-vban.js.map +1 -0
  53. package/speechflow-cli/dst/speechflow-node-xio-webrtc.d.ts +39 -0
  54. package/speechflow-cli/dst/speechflow-node-xio-webrtc.js +500 -0
  55. package/speechflow-cli/dst/speechflow-node-xio-webrtc.js.map +1 -0
  56. package/speechflow-cli/dst/speechflow-util-audio.js +4 -5
  57. package/speechflow-cli/dst/speechflow-util-audio.js.map +1 -1
  58. package/speechflow-cli/dst/speechflow-util-error.d.ts +1 -0
  59. package/speechflow-cli/dst/speechflow-util-error.js +5 -0
  60. package/speechflow-cli/dst/speechflow-util-error.js.map +1 -1
  61. package/speechflow-cli/dst/speechflow-util-llm.d.ts +35 -0
  62. package/speechflow-cli/dst/speechflow-util-llm.js +363 -0
  63. package/speechflow-cli/dst/speechflow-util-llm.js.map +1 -0
  64. package/speechflow-cli/dst/speechflow-util.d.ts +1 -0
  65. package/speechflow-cli/dst/speechflow-util.js +1 -0
  66. package/speechflow-cli/dst/speechflow-util.js.map +1 -1
  67. package/speechflow-cli/etc/oxlint.jsonc +2 -1
  68. package/speechflow-cli/package.json +34 -17
  69. package/speechflow-cli/src/lib.d.ts +5 -0
  70. package/speechflow-cli/src/speechflow-main-graph.ts +31 -5
  71. package/speechflow-cli/src/speechflow-node-a2a-wav.ts +24 -4
  72. package/speechflow-cli/src/speechflow-node-a2t-google.ts +322 -0
  73. package/speechflow-cli/src/speechflow-node-t2a-google.ts +206 -0
  74. package/speechflow-cli/src/speechflow-node-t2a-openai.ts +179 -0
  75. package/speechflow-cli/src/speechflow-node-t2a-supertonic.ts +701 -0
  76. package/speechflow-cli/src/speechflow-node-t2t-amazon.ts +2 -1
  77. package/speechflow-cli/src/speechflow-node-t2t-opus.ts +136 -0
  78. package/speechflow-cli/src/speechflow-node-t2t-profanity.ts +93 -0
  79. package/speechflow-cli/src/speechflow-node-t2t-punctuation.ts +201 -0
  80. package/speechflow-cli/src/{speechflow-node-t2t-openai.ts → speechflow-node-t2t-spellcheck.ts} +48 -107
  81. package/speechflow-cli/src/speechflow-node-t2t-subtitle.ts +3 -6
  82. package/speechflow-cli/src/speechflow-node-t2t-summary.ts +229 -0
  83. package/speechflow-cli/src/speechflow-node-t2t-translate.ts +181 -0
  84. package/speechflow-cli/src/speechflow-node-xio-exec.ts +210 -0
  85. package/speechflow-cli/src/speechflow-node-xio-file.ts +92 -79
  86. package/speechflow-cli/src/speechflow-node-xio-vban.ts +325 -0
  87. package/speechflow-cli/src/speechflow-node-xio-webrtc.ts +533 -0
  88. package/speechflow-cli/src/speechflow-util-audio.ts +5 -5
  89. package/speechflow-cli/src/speechflow-util-error.ts +9 -0
  90. package/speechflow-cli/src/speechflow-util-llm.ts +367 -0
  91. package/speechflow-cli/src/speechflow-util.ts +1 -0
  92. package/speechflow-ui-db/package.json +9 -9
  93. package/speechflow-ui-st/package.json +9 -9
  94. package/speechflow-cli/dst/speechflow-node-t2t-ollama.js +0 -293
  95. package/speechflow-cli/dst/speechflow-node-t2t-ollama.js.map +0 -1
  96. package/speechflow-cli/dst/speechflow-node-t2t-openai.js.map +0 -1
  97. package/speechflow-cli/dst/speechflow-node-t2t-transformers.js.map +0 -1
  98. package/speechflow-cli/src/speechflow-node-t2t-ollama.ts +0 -281
  99. package/speechflow-cli/src/speechflow-node-t2t-transformers.ts +0 -247
@@ -0,0 +1,367 @@
1
+ /*
2
+ ** SpeechFlow - Speech Processing Flow Graph
3
+ ** Copyright (c) 2024-2025 Dr. Ralf S. Engelschall <rse@engelschall.com>
4
+ ** Licensed under GPL 3.0 <https://spdx.org/licenses/GPL-3.0-only>
5
+ */
6
+
7
+ /* standard dependencies */
8
+ import EventEmitter from "node:events"
9
+
10
+ /* external dependencies */
11
+ import OpenAI from "openai"
12
+ import Anthropic from "@anthropic-ai/sdk"
13
+ import { GoogleGenAI } from "@google/genai"
14
+ import { Ollama, type ListResponse } from "ollama"
15
+ import * as Transformers from "@huggingface/transformers"
16
+
17
+ /* own utility types */
18
+ export type LLMCompleteMessage = {
19
+ role: "system" | "user" | "assistant"
20
+ content: string
21
+ }
22
+ export type LLMConfig = {
23
+ provider?: "openai" | "anthropic" | "google" | "ollama" | "transformers"
24
+ api?: string
25
+ model?: string
26
+ key?: string
27
+ timeout?: number
28
+ temperature?: number
29
+ maxTokens?: number
30
+ topP?: number
31
+ cacheDir?: string
32
+ }
33
+ export type LLMCompleteOptions = {
34
+ system?: string
35
+ messages?: LLMCompleteMessage[]
36
+ prompt: string
37
+ }
38
+
39
+ /* LLM class for unified LLM access */
40
+ export class LLM extends EventEmitter {
41
+ /* internal state */
42
+ private config: Required<LLMConfig>
43
+ private openai: OpenAI | null = null
44
+ private anthropic: Anthropic | null = null
45
+ private google: GoogleGenAI | null = null
46
+ private ollama: Ollama | null = null
47
+ private transformer: Transformers.TextGenerationPipeline | null = null
48
+ private initialized = false
49
+
50
+ /* construct LLM instance */
51
+ constructor (config: LLMConfig) {
52
+ /* pass-through to EventEmitter */
53
+ super()
54
+
55
+ /* provide configuration defaults */
56
+ this.config = {
57
+ provider: "openai",
58
+ api: "",
59
+ model: "",
60
+ key: "",
61
+ timeout: 30 * 1000,
62
+ temperature: 0.7,
63
+ maxTokens: 1024,
64
+ topP: 0.5,
65
+ cacheDir: "",
66
+ ...config
67
+ } as Required<LLMConfig>
68
+
69
+ /* validate configuration options */
70
+ if (this.config.key === "") {
71
+ if (this.config.provider === "openai")
72
+ this.config.key = process.env.SPEECHFLOW_OPENAI_KEY ?? ""
73
+ else if (this.config.provider === "anthropic")
74
+ this.config.key = process.env.SPEECHFLOW_ANTHROPIC_KEY ?? ""
75
+ else if (this.config.provider === "google")
76
+ this.config.key = process.env.SPEECHFLOW_GOOGLE_KEY ?? ""
77
+ if (this.config.provider.match(/^(?:openai|anthropic|google)$/) && this.config.key === "")
78
+ throw new Error(`API key is required for provider "${this.config.provider}"`)
79
+ }
80
+ if (this.config.model === "")
81
+ throw new Error("model is required")
82
+ }
83
+
84
+ /* internal logging helper */
85
+ private log (level: "info" | "warning" | "error", message: string): void {
86
+ this.emit("log", level, message)
87
+ }
88
+
89
+ /* initialize the LLM client */
90
+ async open (): Promise<void> {
91
+ if (this.initialized)
92
+ return
93
+ if (this.config.provider === "openai") {
94
+ /* instantiate OpenAI API */
95
+ this.openai = new OpenAI({
96
+ ...(this.config.api !== "" ? { baseURL: this.config.api } : {}),
97
+ apiKey: this.config.key,
98
+ timeout: this.config.timeout
99
+ })
100
+ }
101
+ else if (this.config.provider === "anthropic") {
102
+ /* instantiate Anthropic API */
103
+ this.anthropic = new Anthropic({
104
+ ...(this.config.api !== "" ? { baseURL: this.config.api } : {}),
105
+ apiKey: this.config.key,
106
+ timeout: this.config.timeout
107
+ })
108
+ }
109
+ else if (this.config.provider === "google") {
110
+ /* instantiate Google API */
111
+ this.google = new GoogleGenAI({
112
+ apiKey: this.config.key,
113
+ httpOptions: {
114
+ timeout: this.config.timeout,
115
+ ...(this.config.api !== "" ? { baseUrl: this.config.api } : {})
116
+ }
117
+ })
118
+ }
119
+ else if (this.config.provider === "ollama") {
120
+ /* instantiate Ollama API */
121
+ this.ollama = new Ollama({ host: this.config.api })
122
+
123
+ /* ensure the model is available */
124
+ let models: ListResponse
125
+ try {
126
+ models = await this.ollama.list()
127
+ }
128
+ catch (err) {
129
+ throw new Error(`failed to connect to Ollama API at ${this.config.api}: ${err}`, { cause: err })
130
+ }
131
+ const exists = models.models.some((m) => m.name === this.config.model)
132
+ if (!exists) {
133
+ this.log("info", `LLM: model "${this.config.model}" still not present in Ollama -- ` +
134
+ "automatically downloading model")
135
+ let artifact = ""
136
+ let percent = 0
137
+ let lastLoggedPercent = -1
138
+ const interval = setInterval(() => {
139
+ if (percent !== lastLoggedPercent) {
140
+ this.log("info", `LLM: downloaded ${percent.toFixed(2)}% of artifact "${artifact}"`)
141
+ lastLoggedPercent = percent
142
+ }
143
+ }, 1000)
144
+ try {
145
+ const progress = await this.ollama.pull({ model: this.config.model, stream: true })
146
+ for await (const event of progress) {
147
+ if (event.digest)
148
+ artifact = event.digest
149
+ if (event.completed && event.total)
150
+ percent = (event.completed / event.total) * 100
151
+ }
152
+ }
153
+ finally {
154
+ clearInterval(interval)
155
+ }
156
+ }
157
+ }
158
+ else if (this.config.provider === "transformers") {
159
+ /* track download progress when instantiating Transformers pipeline */
160
+ const progressState = new Map<string, number>()
161
+ const progressCallback: Transformers.ProgressCallback = (progress: any) => {
162
+ let artifact = this.config.model
163
+ if (typeof progress.file === "string")
164
+ artifact += `:${progress.file}`
165
+ let percent = 0
166
+ if (typeof progress.loaded === "number" && typeof progress.total === "number")
167
+ percent = (progress.loaded / progress.total) * 100
168
+ else if (typeof progress.progress === "number")
169
+ percent = progress.progress
170
+ if (percent > 0)
171
+ progressState.set(artifact, percent)
172
+ }
173
+ const interval = setInterval(() => {
174
+ for (const [ artifact, percent ] of progressState) {
175
+ this.log("info", `LLM: downloaded ${percent.toFixed(2)}% of artifact "${artifact}"`)
176
+ if (percent >= 100.0)
177
+ progressState.delete(artifact)
178
+ }
179
+ }, 1000)
180
+
181
+ /* instantiate HuggingFace Transformers text generation pipeline */
182
+ try {
183
+ const pipelinePromise = Transformers.pipeline("text-generation", this.config.model, {
184
+ ...(this.config.cacheDir !== "" ? { cache_dir: this.config.cacheDir } : {}),
185
+ dtype: "q4",
186
+ device: "auto",
187
+ progress_callback: progressCallback
188
+ })
189
+ this.transformer = await pipelinePromise
190
+ }
191
+ catch (err) {
192
+ throw new Error(`failed to instantiate HuggingFace Transformers pipeline: ${err}`, { cause: err })
193
+ }
194
+ finally {
195
+ clearInterval(interval)
196
+ }
197
+ if (this.transformer === null)
198
+ throw new Error("failed to instantiate HuggingFace Transformers pipeline")
199
+ }
200
+ else {
201
+ const exhaustive: never = this.config.provider
202
+ throw new Error(`unsupported LLM provider: ${exhaustive}`)
203
+ }
204
+ this.log("info", `LLM: initialized ${this.config.provider} client ` +
205
+ `(${this.config.api !== "" ? `api: ${this.config.api}, ` : ""}model: ${this.config.model})`)
206
+ this.initialized = true
207
+ }
208
+
209
+ /* perform a completion */
210
+ async complete (options: LLMCompleteOptions): Promise<string> {
211
+ if (!this.initialized)
212
+ throw new Error("LLM still not initialized")
213
+
214
+ /* build messages array */
215
+ const messages: LLMCompleteMessage[] = []
216
+ if (options.system)
217
+ messages.push({ role: "system", content: options.system })
218
+ if (options.messages)
219
+ messages.push(...options.messages)
220
+ messages.push({ role: "user", content: options.prompt })
221
+
222
+ /* perform LLM query */
223
+ if (this.config.provider === "openai") {
224
+ if (!this.openai)
225
+ throw new Error("OpenAI client not available")
226
+
227
+ /* perform OpenAI chat completion */
228
+ const completion = await this.openai.chat.completions.create({
229
+ model: this.config.model,
230
+ max_tokens: this.config.maxTokens,
231
+ temperature: this.config.temperature,
232
+ top_p: this.config.topP,
233
+ messages: messages as OpenAI.ChatCompletionMessageParam[]
234
+ }).catch((err) => {
235
+ throw new Error(`failed to perform OpenAI chat completion: ${err}`, { cause: err })
236
+ })
237
+ const content = completion?.choices?.[0]?.message?.content
238
+ if (!content)
239
+ throw new Error("OpenAI API returned empty content")
240
+ return content
241
+ }
242
+ else if (this.config.provider === "anthropic") {
243
+ if (!this.anthropic)
244
+ throw new Error("Anthropic client not available")
245
+
246
+ /* separate system message from other messages for Anthropic API */
247
+ const systemMessage = messages.find((m) => m.role === "system")
248
+ const chatMessages = messages.filter((m) => m.role !== "system")
249
+
250
+ /* perform Anthropic chat completion */
251
+ const message = await this.anthropic.messages.create({
252
+ model: this.config.model,
253
+ max_tokens: this.config.maxTokens,
254
+ temperature: this.config.temperature,
255
+ top_p: this.config.topP,
256
+ system: systemMessage?.content,
257
+ messages: chatMessages as Anthropic.MessageParam[]
258
+ }).catch((err) => {
259
+ throw new Error(`failed to perform Anthropic chat completion: ${err}`, { cause: err })
260
+ })
261
+ const content = message?.content?.[0]
262
+ if (!content || content.type !== "text")
263
+ throw new Error("Anthropic API returned empty or non-text content")
264
+ return content.text
265
+ }
266
+ else if (this.config.provider === "google") {
267
+ if (!this.google)
268
+ throw new Error("Google client not available")
269
+
270
+ /* convert messages for Google API */
271
+ const systemInstruction =
272
+ messages.find((m) => m.role === "system")?.content
273
+ const contents =
274
+ messages.filter((m) => m.role !== "system").map((m) => ({
275
+ role: m.role === "assistant" ? "model" : "user",
276
+ parts: [ { text: m.content } ]
277
+ }))
278
+
279
+ /* perform Google chat completion */
280
+ const response = await this.google.models.generateContent({
281
+ model: this.config.model,
282
+ contents,
283
+ config: {
284
+ maxOutputTokens: this.config.maxTokens,
285
+ temperature: this.config.temperature,
286
+ topP: this.config.topP,
287
+ ...(systemInstruction ? { systemInstruction } : {})
288
+ }
289
+ }).catch((err) => {
290
+ throw new Error(`failed to perform Google chat completion: ${err}`, { cause: err })
291
+ })
292
+ const content = response?.text
293
+ if (!content)
294
+ throw new Error("Google API returned empty content")
295
+ return content
296
+ }
297
+ else if (this.config.provider === "ollama") {
298
+ if (!this.ollama)
299
+ throw new Error("Ollama client not available")
300
+
301
+ /* perform Ollama chat completion */
302
+ const response = await this.ollama.chat({
303
+ model: this.config.model,
304
+ messages,
305
+ keep_alive: "10m",
306
+ options: {
307
+ num_predict: this.config.maxTokens,
308
+ temperature: this.config.temperature,
309
+ top_p: this.config.topP
310
+ }
311
+ }).catch((err) => {
312
+ throw new Error(`failed to perform Ollama chat completion: ${err}`, { cause: err })
313
+ })
314
+ const content = response?.message?.content
315
+ if (!content)
316
+ throw new Error("Ollama API returned empty content")
317
+ return content
318
+ }
319
+ else if (this.config.provider === "transformers") {
320
+ if (!this.transformer)
321
+ throw new Error("HuggingFace Transformers pipeline not available")
322
+
323
+ /* perform HuggingFace Transformers text generation */
324
+ const result = await this.transformer(messages, {
325
+ max_new_tokens: this.config.maxTokens,
326
+ temperature: this.config.temperature,
327
+ top_p: this.config.topP,
328
+ do_sample: true
329
+ }).catch((err) => {
330
+ throw new Error(`failed to perform HuggingFace Transformers text generation: ${err}`, { cause: err })
331
+ })
332
+ const single = Array.isArray(result) ? result[0] : result
333
+ const generatedText = (single as Transformers.TextGenerationSingle).generated_text
334
+ const content = typeof generatedText === "string" ?
335
+ generatedText :
336
+ generatedText.at(-1)?.content
337
+ if (!content)
338
+ throw new Error("HuggingFace Transformers API returned empty content")
339
+ return content
340
+ }
341
+ else {
342
+ const exhaustive: never = this.config.provider
343
+ throw new Error(`unsupported LLM provider: ${exhaustive}`)
344
+ }
345
+ }
346
+
347
+ /* close the LLM client */
348
+ async close (): Promise<void> {
349
+ if (!this.initialized)
350
+ return
351
+ if (this.config.provider === "openai")
352
+ this.openai = null
353
+ else if (this.config.provider === "anthropic")
354
+ this.anthropic = null
355
+ else if (this.config.provider === "google")
356
+ this.google = null
357
+ else if (this.config.provider === "ollama") {
358
+ this.ollama?.abort()
359
+ this.ollama = null
360
+ }
361
+ else if (this.config.provider === "transformers") {
362
+ this.transformer?.dispose()
363
+ this.transformer = null
364
+ }
365
+ this.initialized = false
366
+ }
367
+ }
@@ -9,3 +9,4 @@ export * from "./speechflow-util-error"
9
9
  export * from "./speechflow-util-stream"
10
10
  export * from "./speechflow-util-queue"
11
11
  export * from "./speechflow-util-misc"
12
+ export * from "./speechflow-util-llm"
@@ -18,17 +18,17 @@
18
18
  "luxon": "3.7.2",
19
19
  "@opensumi/reconnecting-websocket": "4.4.0",
20
20
  "axios": "1.13.2",
21
- "typopro-web": "4.2.7",
21
+ "typopro-web": "4.2.8",
22
22
  "@fortawesome/fontawesome-free": "7.1.0",
23
23
  "patch-package": "8.0.1",
24
24
  "@rse/stx": "1.1.2"
25
25
  },
26
26
  "devDependencies": {
27
- "vite": "7.2.6",
28
- "typescript-eslint": "8.48.1",
29
- "@typescript-eslint/eslint-plugin": "8.48.1",
30
- "@typescript-eslint/parser": "8.48.1",
31
- "@vitejs/plugin-vue": "6.0.2",
27
+ "vite": "7.2.7",
28
+ "typescript-eslint": "8.49.0",
29
+ "@typescript-eslint/eslint-plugin": "8.49.0",
30
+ "@typescript-eslint/parser": "8.49.0",
31
+ "@vitejs/plugin-vue": "6.0.3",
32
32
  "@rollup/plugin-yaml": "4.1.2",
33
33
  "vite-plugin-node-polyfills": "0.24.0",
34
34
  "vite-svg-loader": "5.1.0",
@@ -37,8 +37,8 @@
37
37
 
38
38
  "@vue/eslint-config-typescript": "14.6.0",
39
39
  "vue-eslint-parser": "10.2.0",
40
- "eslint": "9.39.1",
41
- "@eslint/js": "9.39.1",
40
+ "eslint": "9.39.2",
41
+ "@eslint/js": "9.39.2",
42
42
  "neostandard": "0.12.2",
43
43
  "eslint-plugin-import": "2.32.0",
44
44
  "eslint-plugin-vue": "10.6.2",
@@ -58,7 +58,7 @@
58
58
  "postcss-html": "1.8.0",
59
59
  "stylus": "0.64.0",
60
60
  "typescript": "5.9.3",
61
- "vue-tsc": "3.1.7",
61
+ "vue-tsc": "3.1.8",
62
62
  "delay-cli": "3.0.0",
63
63
  "cross-env": "10.1.0",
64
64
  "serve": "14.2.5",
@@ -19,18 +19,18 @@
19
19
  "luxon": "3.7.2",
20
20
  "@opensumi/reconnecting-websocket": "4.4.0",
21
21
  "axios": "1.13.2",
22
- "typopro-web": "4.2.7",
22
+ "typopro-web": "4.2.8",
23
23
  "@fortawesome/fontawesome-free": "7.1.0",
24
24
  "patch-package": "8.0.1",
25
25
  "@rse/stx": "1.1.2",
26
26
  "animejs": "4.2.2"
27
27
  },
28
28
  "devDependencies": {
29
- "vite": "7.2.6",
30
- "typescript-eslint": "8.48.1",
31
- "@typescript-eslint/eslint-plugin": "8.48.1",
32
- "@typescript-eslint/parser": "8.48.1",
33
- "@vitejs/plugin-vue": "6.0.2",
29
+ "vite": "7.2.7",
30
+ "typescript-eslint": "8.49.0",
31
+ "@typescript-eslint/eslint-plugin": "8.49.0",
32
+ "@typescript-eslint/parser": "8.49.0",
33
+ "@vitejs/plugin-vue": "6.0.3",
34
34
  "@rollup/plugin-yaml": "4.1.2",
35
35
  "vite-plugin-node-polyfills": "0.24.0",
36
36
  "vite-svg-loader": "5.1.0",
@@ -39,8 +39,8 @@
39
39
 
40
40
  "@vue/eslint-config-typescript": "14.6.0",
41
41
  "vue-eslint-parser": "10.2.0",
42
- "eslint": "9.39.1",
43
- "@eslint/js": "9.39.1",
42
+ "eslint": "9.39.2",
43
+ "@eslint/js": "9.39.2",
44
44
  "neostandard": "0.12.2",
45
45
  "eslint-plugin-import": "2.32.0",
46
46
  "eslint-plugin-vue": "10.6.2",
@@ -60,7 +60,7 @@
60
60
  "postcss-html": "1.8.0",
61
61
  "stylus": "0.64.0",
62
62
  "typescript": "5.9.3",
63
- "vue-tsc": "3.1.7",
63
+ "vue-tsc": "3.1.8",
64
64
  "delay-cli": "3.0.0",
65
65
  "cross-env": "10.1.0",
66
66
  "serve": "14.2.5",