speechflow 2.0.1 → 2.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. package/CHANGELOG.md +6 -0
  2. package/package.json +4 -4
  3. package/speechflow-cli/dst/speechflow-node-t2a-supertonic.d.ts +2 -3
  4. package/speechflow-cli/dst/speechflow-node-t2a-supertonic.js +93 -466
  5. package/speechflow-cli/dst/speechflow-node-t2a-supertonic.js.map +1 -1
  6. package/speechflow-cli/dst/speechflow-node-t2t-punctuation.js +1 -2
  7. package/speechflow-cli/dst/speechflow-node-t2t-punctuation.js.map +1 -1
  8. package/speechflow-cli/dst/speechflow-node-t2t-spellcheck.js +1 -2
  9. package/speechflow-cli/dst/speechflow-node-t2t-spellcheck.js.map +1 -1
  10. package/speechflow-cli/dst/speechflow-node-t2t-summary.js +1 -2
  11. package/speechflow-cli/dst/speechflow-node-t2t-summary.js.map +1 -1
  12. package/speechflow-cli/dst/speechflow-node-t2t-translate.js +1 -2
  13. package/speechflow-cli/dst/speechflow-node-t2t-translate.js.map +1 -1
  14. package/speechflow-cli/dst/speechflow-util-llm.d.ts +0 -1
  15. package/speechflow-cli/dst/speechflow-util-llm.js +4 -8
  16. package/speechflow-cli/dst/speechflow-util-llm.js.map +1 -1
  17. package/speechflow-cli/dst/test.d.ts +1 -0
  18. package/speechflow-cli/dst/test.js +18 -0
  19. package/speechflow-cli/dst/test.js.map +1 -0
  20. package/speechflow-cli/etc/oxlint.jsonc +3 -1
  21. package/speechflow-cli/package.json +12 -12
  22. package/speechflow-cli/src/speechflow-node-t2a-supertonic.ts +103 -577
  23. package/speechflow-cli/src/speechflow-node-t2t-punctuation.ts +1 -2
  24. package/speechflow-cli/src/speechflow-node-t2t-spellcheck.ts +1 -2
  25. package/speechflow-cli/src/speechflow-node-t2t-summary.ts +1 -2
  26. package/speechflow-cli/src/speechflow-node-t2t-translate.ts +1 -2
  27. package/speechflow-cli/src/speechflow-util-llm.ts +4 -9
  28. package/speechflow-cli/src/speechflow-util-queue.ts +1 -1
  29. package/speechflow-ui-db/dst/index.js +14 -14
  30. package/speechflow-ui-db/package.json +2 -2
  31. package/speechflow-ui-st/dst/index.js +32 -32
  32. package/speechflow-ui-st/package.json +2 -2
@@ -133,8 +133,7 @@ export default class SpeechFlowNodeT2TPunctuation extends SpeechFlowNode {
133
133
  api: this.params.api,
134
134
  model: this.params.model,
135
135
  key: this.params.key,
136
- temperature: 0.7,
137
- topP: 0.5
136
+ temperature: 0.7
138
137
  })
139
138
  this.llm.on("log", (level: string, message: string) => {
140
139
  this.log(level as "info" | "warning" | "error", message)
@@ -120,8 +120,7 @@ export default class SpeechFlowNodeT2TSpellcheck extends SpeechFlowNode {
120
120
  api: this.params.api,
121
121
  model: this.params.model,
122
122
  key: this.params.key,
123
- temperature: 0.7,
124
- topP: 0.5
123
+ temperature: 0.7
125
124
  })
126
125
  this.llm.on("log", (level: string, message: string) => {
127
126
  this.log(level as "info" | "warning" | "error", message)
@@ -119,8 +119,7 @@ export default class SpeechFlowNodeT2TSummary extends SpeechFlowNode {
119
119
  api: this.params.api,
120
120
  model: this.params.model,
121
121
  key: this.params.key,
122
- temperature: 0.7,
123
- topP: 0.5
122
+ temperature: 0.7
124
123
  })
125
124
  this.llm.on("log", (level: string, message: string) => {
126
125
  this.log(level as "info" | "warning" | "error", message)
@@ -112,8 +112,7 @@ export default class SpeechFlowNodeT2TTranslate extends SpeechFlowNode {
112
112
  api: this.params.api,
113
113
  model: this.params.model,
114
114
  key: this.params.key,
115
- temperature: 0.7,
116
- topP: 0.5
115
+ temperature: 0.7
117
116
  })
118
117
  this.llm.on("log", (level: string, message: string) => {
119
118
  this.log(level as "info" | "warning" | "error", message)
@@ -27,7 +27,6 @@ export type LLMConfig = {
27
27
  timeout?: number
28
28
  temperature?: number
29
29
  maxTokens?: number
30
- topP?: number
31
30
  cacheDir?: string
32
31
  }
33
32
  export type LLMCompleteOptions = {
@@ -61,7 +60,6 @@ export class LLM extends EventEmitter {
61
60
  timeout: 30 * 1000,
62
61
  temperature: 0.7,
63
62
  maxTokens: 1024,
64
- topP: 0.5,
65
63
  cacheDir: "",
66
64
  ...config
67
65
  } as Required<LLMConfig>
@@ -102,7 +100,9 @@ export class LLM extends EventEmitter {
102
100
  /* instantiate Anthropic API */
103
101
  this.anthropic = new Anthropic({
104
102
  ...(this.config.api !== "" ? { baseURL: this.config.api } : {}),
105
- apiKey: this.config.key,
103
+ ...(this.config.key.match(/^sk-ant-oat/) ?
104
+ { authToken: this.config.key } :
105
+ { apiKey: this.config.key } ),
106
106
  timeout: this.config.timeout
107
107
  })
108
108
  }
@@ -229,7 +229,6 @@ export class LLM extends EventEmitter {
229
229
  model: this.config.model,
230
230
  max_tokens: this.config.maxTokens,
231
231
  temperature: this.config.temperature,
232
- top_p: this.config.topP,
233
232
  messages: messages as OpenAI.ChatCompletionMessageParam[]
234
233
  }).catch((err) => {
235
234
  throw new Error(`failed to perform OpenAI chat completion: ${err}`, { cause: err })
@@ -252,7 +251,6 @@ export class LLM extends EventEmitter {
252
251
  model: this.config.model,
253
252
  max_tokens: this.config.maxTokens,
254
253
  temperature: this.config.temperature,
255
- top_p: this.config.topP,
256
254
  system: systemMessage?.content,
257
255
  messages: chatMessages as Anthropic.MessageParam[]
258
256
  }).catch((err) => {
@@ -283,7 +281,6 @@ export class LLM extends EventEmitter {
283
281
  config: {
284
282
  maxOutputTokens: this.config.maxTokens,
285
283
  temperature: this.config.temperature,
286
- topP: this.config.topP,
287
284
  ...(systemInstruction ? { systemInstruction } : {})
288
285
  }
289
286
  }).catch((err) => {
@@ -305,8 +302,7 @@ export class LLM extends EventEmitter {
305
302
  keep_alive: "10m",
306
303
  options: {
307
304
  num_predict: this.config.maxTokens,
308
- temperature: this.config.temperature,
309
- top_p: this.config.topP
305
+ temperature: this.config.temperature
310
306
  }
311
307
  }).catch((err) => {
312
308
  throw new Error(`failed to perform Ollama chat completion: ${err}`, { cause: err })
@@ -324,7 +320,6 @@ export class LLM extends EventEmitter {
324
320
  const result = await this.transformer(messages, {
325
321
  max_new_tokens: this.config.maxTokens,
326
322
  temperature: this.config.temperature,
327
- top_p: this.config.topP,
328
323
  do_sample: true
329
324
  }).catch((err) => {
330
325
  throw new Error(`failed to perform HuggingFace Transformers text generation: ${err}`, { cause: err })
@@ -16,7 +16,7 @@ import * as IntervalTree from "node-interval-tree"
16
16
  import * as util from "./speechflow-util"
17
17
 
18
18
  /* import an object with parsing and strict error handling */
19
- export function importObject<T>(name: string, arg: object | string, validator: Type<T, {}>): T {
19
+ export function importObject<T> (name: string, arg: object | string, validator: Type<T, {}>): T {
20
20
  const obj: object = typeof arg === "string" ?
21
21
  util.run(`${name}: parsing JSON`, () => JSON.parse(arg)) :
22
22
  arg