@huggingface/transformers 3.1.1 → 3.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (119) hide show
  1. package/README.md +6 -3
  2. package/dist/ort-wasm-simd-threaded.jsep.wasm +0 -0
  3. package/dist/transformers.cjs +300 -55
  4. package/dist/transformers.cjs.map +1 -1
  5. package/dist/transformers.js +1468 -1105
  6. package/dist/transformers.js.map +1 -1
  7. package/dist/transformers.min.cjs +1 -358
  8. package/dist/transformers.min.cjs.map +1 -1
  9. package/dist/transformers.min.js +1 -421
  10. package/dist/transformers.min.js.map +1 -1
  11. package/dist/transformers.min.mjs +1 -358
  12. package/dist/transformers.min.mjs.map +1 -1
  13. package/dist/transformers.mjs +311 -54
  14. package/dist/transformers.mjs.map +1 -1
  15. package/package.json +11 -16
  16. package/src/backends/onnx.js +2 -7
  17. package/src/configs.js +1 -0
  18. package/src/env.js +1 -1
  19. package/src/models/paligemma/processing_paligemma.js +82 -0
  20. package/src/models/processors.js +1 -0
  21. package/src/models.js +57 -5
  22. package/src/tokenizers.js +12 -1
  23. package/src/utils/core.js +39 -9
  24. package/src/utils/hub.js +8 -12
  25. package/src/utils/image.js +40 -0
  26. package/types/backends/onnx.d.ts +2 -2
  27. package/types/backends/onnx.d.ts.map +1 -1
  28. package/types/base/feature_extraction_utils.d.ts +1 -1
  29. package/types/base/feature_extraction_utils.d.ts.map +1 -1
  30. package/types/base/image_processors_utils.d.ts +2 -2
  31. package/types/base/image_processors_utils.d.ts.map +1 -1
  32. package/types/base/processing_utils.d.ts +4 -4
  33. package/types/base/processing_utils.d.ts.map +1 -1
  34. package/types/configs.d.ts +7 -7
  35. package/types/configs.d.ts.map +1 -1
  36. package/types/env.d.ts +1 -1
  37. package/types/env.d.ts.map +1 -1
  38. package/types/generation/configuration_utils.d.ts +2 -2
  39. package/types/generation/logits_process.d.ts +2 -2
  40. package/types/generation/logits_process.d.ts.map +1 -1
  41. package/types/generation/logits_sampler.d.ts.map +1 -1
  42. package/types/generation/parameters.d.ts +5 -5
  43. package/types/generation/stopping_criteria.d.ts +1 -1
  44. package/types/generation/stopping_criteria.d.ts.map +1 -1
  45. package/types/generation/streamers.d.ts +2 -2
  46. package/types/generation/streamers.d.ts.map +1 -1
  47. package/types/models/audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.d.ts +1 -1
  48. package/types/models/audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.d.ts.map +1 -1
  49. package/types/models/auto/feature_extraction_auto.d.ts.map +1 -1
  50. package/types/models/auto/image_processing_auto.d.ts.map +1 -1
  51. package/types/models/auto/processing_auto.d.ts +1 -1
  52. package/types/models/auto/processing_auto.d.ts.map +1 -1
  53. package/types/models/clap/feature_extraction_clap.d.ts +1 -1
  54. package/types/models/clap/feature_extraction_clap.d.ts.map +1 -1
  55. package/types/models/detr/image_processing_detr.d.ts +11 -11
  56. package/types/models/detr/image_processing_detr.d.ts.map +1 -1
  57. package/types/models/donut/image_processing_donut.d.ts +1 -1
  58. package/types/models/donut/image_processing_donut.d.ts.map +1 -1
  59. package/types/models/florence2/processing_florence2.d.ts.map +1 -1
  60. package/types/models/idefics3/image_processing_idefics3.d.ts.map +1 -1
  61. package/types/models/idefics3/processing_idefics3.d.ts.map +1 -1
  62. package/types/models/janus/image_processing_janus.d.ts +1 -1
  63. package/types/models/janus/image_processing_janus.d.ts.map +1 -1
  64. package/types/models/janus/processing_janus.d.ts.map +1 -1
  65. package/types/models/maskformer/image_processing_maskformer.d.ts +8 -8
  66. package/types/models/maskformer/image_processing_maskformer.d.ts.map +1 -1
  67. package/types/models/mgp_str/processing_mgp_str.d.ts +2 -2
  68. package/types/models/mgp_str/processing_mgp_str.d.ts.map +1 -1
  69. package/types/models/owlvit/image_processing_owlvit.d.ts.map +1 -1
  70. package/types/models/paligemma/processing_paligemma.d.ts +12 -0
  71. package/types/models/paligemma/processing_paligemma.d.ts.map +1 -0
  72. package/types/models/processors.d.ts +1 -0
  73. package/types/models/pyannote/feature_extraction_pyannote.d.ts.map +1 -1
  74. package/types/models/pyannote/processing_pyannote.d.ts +1 -1
  75. package/types/models/pyannote/processing_pyannote.d.ts.map +1 -1
  76. package/types/models/qwen2_vl/processing_qwen2_vl.d.ts.map +1 -1
  77. package/types/models/rt_detr/image_processing_rt_detr.d.ts.map +1 -1
  78. package/types/models/sam/image_processing_sam.d.ts.map +1 -1
  79. package/types/models/seamless_m4t/feature_extraction_seamless_m4t.d.ts +1 -1
  80. package/types/models/seamless_m4t/feature_extraction_seamless_m4t.d.ts.map +1 -1
  81. package/types/models/segformer/image_processing_segformer.d.ts.map +1 -1
  82. package/types/models/speecht5/processing_speecht5.d.ts.map +1 -1
  83. package/types/models/swin2sr/image_processing_swin2sr.d.ts +1 -1
  84. package/types/models/swin2sr/image_processing_swin2sr.d.ts.map +1 -1
  85. package/types/models/vitmatte/image_processing_vitmatte.d.ts.map +1 -1
  86. package/types/models/vitpose/image_processing_vitpose.d.ts +1 -1
  87. package/types/models/vitpose/image_processing_vitpose.d.ts.map +1 -1
  88. package/types/models/wav2vec2/feature_extraction_wav2vec2.d.ts.map +1 -1
  89. package/types/models/wav2vec2/processing_wav2vec2.d.ts.map +1 -1
  90. package/types/models/wespeaker/feature_extraction_wespeaker.d.ts +1 -1
  91. package/types/models/wespeaker/feature_extraction_wespeaker.d.ts.map +1 -1
  92. package/types/models/whisper/feature_extraction_whisper.d.ts +1 -1
  93. package/types/models/whisper/feature_extraction_whisper.d.ts.map +1 -1
  94. package/types/models/whisper/generation_whisper.d.ts.map +1 -1
  95. package/types/models/whisper/processing_whisper.d.ts.map +1 -1
  96. package/types/models/yolos/image_processing_yolos.d.ts.map +1 -1
  97. package/types/models.d.ts +28 -4
  98. package/types/models.d.ts.map +1 -1
  99. package/types/ops/registry.d.ts.map +1 -1
  100. package/types/pipelines.d.ts +26 -51
  101. package/types/pipelines.d.ts.map +1 -1
  102. package/types/tokenizers.d.ts +10 -6
  103. package/types/tokenizers.d.ts.map +1 -1
  104. package/types/utils/audio.d.ts.map +1 -1
  105. package/types/utils/constants.d.ts.map +1 -1
  106. package/types/utils/core.d.ts +87 -22
  107. package/types/utils/core.d.ts.map +1 -1
  108. package/types/utils/data-structures.d.ts.map +1 -1
  109. package/types/utils/devices.d.ts.map +1 -1
  110. package/types/utils/dtypes.d.ts.map +1 -1
  111. package/types/utils/generic.d.ts.map +1 -1
  112. package/types/utils/hub.d.ts +3 -3
  113. package/types/utils/hub.d.ts.map +1 -1
  114. package/types/utils/image.d.ts +10 -1
  115. package/types/utils/image.d.ts.map +1 -1
  116. package/types/utils/maths.d.ts +10 -10
  117. package/types/utils/maths.d.ts.map +1 -1
  118. package/types/utils/tensor.d.ts +6 -6
  119. package/types/utils/tensor.d.ts.map +1 -1
@@ -1,359 +1,2 @@
1
- import*as e from"fs";import*as t from"onnxruntime-node";import*as s from"path";import*as r from"sharp";import*as o from"url";var n,a,i={fs:
2
- /*!*********************!*\
3
- !*** external "fs" ***!
4
- \*********************/t=>{t.exports=e},"onnxruntime-node":
5
- /*!***********************************!*\
6
- !*** external "onnxruntime-node" ***!
7
- \***********************************/e=>{e.exports=t},path:
8
- /*!***********************!*\
9
- !*** external "path" ***!
10
- \***********************/e=>{e.exports=s},sharp:
11
- /*!************************!*\
12
- !*** external "sharp" ***!
13
- \************************/e=>{e.exports=r},url:
14
- /*!**********************!*\
15
- !*** external "url" ***!
16
- \**********************/e=>{e.exports=o},"?cb4d":
17
- /*!*************************************!*\
18
- !*** #onnxruntime-webgpu (ignored) ***!
19
- \*************************************/()=>{},"./node_modules/@huggingface/jinja/dist/index.js":
20
- /*!*******************************************************!*\
21
- !*** ./node_modules/@huggingface/jinja/dist/index.js ***!
22
- \*******************************************************/(e,t,s)=>{s.r(t),s.d(t,{Environment:()=>Y,Interpreter:()=>K,Template:()=>te,parse:()=>N,tokenize:()=>d});var r=Object.freeze({Text:"Text",NumericLiteral:"NumericLiteral",BooleanLiteral:"BooleanLiteral",NullLiteral:"NullLiteral",StringLiteral:"StringLiteral",Identifier:"Identifier",Equals:"Equals",OpenParen:"OpenParen",CloseParen:"CloseParen",OpenStatement:"OpenStatement",CloseStatement:"CloseStatement",OpenExpression:"OpenExpression",CloseExpression:"CloseExpression",OpenSquareBracket:"OpenSquareBracket",CloseSquareBracket:"CloseSquareBracket",OpenCurlyBracket:"OpenCurlyBracket",CloseCurlyBracket:"CloseCurlyBracket",Comma:"Comma",Dot:"Dot",Colon:"Colon",Pipe:"Pipe",CallOperator:"CallOperator",AdditiveBinaryOperator:"AdditiveBinaryOperator",MultiplicativeBinaryOperator:"MultiplicativeBinaryOperator",ComparisonBinaryOperator:"ComparisonBinaryOperator",UnaryOperator:"UnaryOperator",Set:"Set",If:"If",For:"For",In:"In",Is:"Is",NotIn:"NotIn",Else:"Else",EndIf:"EndIf",ElseIf:"ElseIf",EndFor:"EndFor",And:"And",Or:"Or",Not:"UnaryOperator",Macro:"Macro",EndMacro:"EndMacro"}),o=Object.freeze({set:r.Set,for:r.For,in:r.In,is:r.Is,if:r.If,else:r.Else,endif:r.EndIf,elif:r.ElseIf,endfor:r.EndFor,and:r.And,or:r.Or,not:r.Not,"not in":r.NotIn,macro:r.Macro,endmacro:r.EndMacro,true:r.BooleanLiteral,false:r.BooleanLiteral,none:r.NullLiteral,True:r.BooleanLiteral,False:r.BooleanLiteral,None:r.NullLiteral}),n=class{constructor(e,t){this.value=e,this.type=t}};function a(e){return/\w/.test(e)}function i(e){return/[0-9]/.test(e)}var l=[["{%",r.OpenStatement],["%}",r.CloseStatement],["{{",r.OpenExpression],["}}",r.CloseExpression],["(",r.OpenParen],[")",r.CloseParen],["{",r.OpenCurlyBracket],["}",r.CloseCurlyBracket],["[",r.OpenSquareBracket],["]",r.CloseSquareBracket],[",",r.Comma],[".",r.Dot],[":",r.Colon],["|",r.Pipe],["<=",r.ComparisonBinaryOperator],[">=",r.ComparisonBinaryOperator],["==",r.ComparisonBinaryOperator],["!=",r.ComparisonBinaryOperator],["<",r.ComparisonBinaryOperator],[">",r.ComparisonBinaryOperator],["+",r.AdditiveBinaryOperator],["-",r.AdditiveBinaryOperator],["*",r.MultiplicativeBinaryOperator],["/",r.MultiplicativeBinaryOperator],["%",r.MultiplicativeBinaryOperator],["=",r.Equals]],c=new Map([["n","\n"],["t","\t"],["r","\r"],["b","\b"],["f","\f"],["v","\v"],["'","'"],['"','"'],["\\","\\"]]);function d(e,t={}){const s=[],d=function(e,t={}){return e.endsWith("\n")&&(e=e.slice(0,-1)),e=e.replace(/{#.*?#}/gs,"{##}"),t.lstrip_blocks&&(e=e.replace(/^[ \t]*({[#%])/gm,"$1")),t.trim_blocks&&(e=e.replace(/([#%]})\n/g,"$1")),e.replace(/{##}/g,"").replace(/-%}\s*/g,"%}").replace(/\s*{%-/g,"{%").replace(/-}}\s*/g,"}}").replace(/\s*{{-/g,"{{")}(e,t);let u=0;const p=e=>{let t="";for(;e(d[u]);)if("\\"!==d[u]){if(t+=d[u++],u>=d.length)throw new SyntaxError("Unexpected end of input")}else{if(++u,u>=d.length)throw new SyntaxError("Unexpected end of input");const e=d[u++],s=c.get(e);if(void 0===s)throw new SyntaxError(`Unexpected escaped character: ${e}`);t+=s}return t};e:for(;u<d.length;){const e=s.at(-1)?.type;if(void 0===e||e===r.CloseStatement||e===r.CloseExpression){let e="";for(;u<d.length&&("{"!==d[u]||"%"!==d[u+1]&&"{"!==d[u+1]);)e+=d[u++];if(e.length>0){s.push(new n(e,r.Text));continue}}p((e=>/\s/.test(e)));const t=d[u];if("-"===t||"+"===t){const e=s.at(-1)?.type;if(e===r.Text||void 0===e)throw new SyntaxError(`Unexpected character: ${t}`);switch(e){case r.Identifier:case r.NumericLiteral:case r.BooleanLiteral:case r.NullLiteral:case r.StringLiteral:case r.CloseParen:case r.CloseSquareBracket:break;default:{++u;const e=p(i);s.push(new n(`${t}${e}`,e.length>0?r.NumericLiteral:r.UnaryOperator));continue}}}for(const[e,t]of l){if(d.slice(u,u+e.length)===e){s.push(new n(e,t)),u+=e.length;continue e}}if("'"!==t&&'"'!==t)if(i(t)){const e=p(i);s.push(new n(e,r.NumericLiteral))}else{if(!a(t))throw new SyntaxError(`Unexpected character: ${t}`);{const e=p(a),t=Object.hasOwn(o,e)?o[e]:r.Identifier;t===r.In&&s.at(-1)?.type===r.Not?(s.pop(),s.push(new n("not in",r.NotIn))):s.push(new n(e,t))}}else{++u;const e=p((e=>e!==t));s.push(new n(e,r.StringLiteral)),++u}}return s}var u=class{type="Statement"},p=class extends u{constructor(e){super(),this.body=e}type="Program"},m=class extends u{constructor(e,t,s){super(),this.test=e,this.body=t,this.alternate=s}type="If"},_=class extends u{constructor(e,t,s,r){super(),this.loopvar=e,this.iterable=t,this.body=s,this.defaultBlock=r}type="For"},h=class extends u{constructor(e,t){super(),this.assignee=e,this.value=t}type="Set"},f=class extends u{constructor(e,t,s){super(),this.name=e,this.args=t,this.body=s}type="Macro"},g=class extends u{type="Expression"},M=class extends g{constructor(e,t,s){super(),this.object=e,this.property=t,this.computed=s}type="MemberExpression"},w=class extends g{constructor(e,t){super(),this.callee=e,this.args=t}type="CallExpression"},T=class extends g{constructor(e){super(),this.value=e}type="Identifier"},b=class extends g{constructor(e){super(),this.value=e}type="Literal"},x=class extends b{type="NumericLiteral"},P=class extends b{type="StringLiteral"},k=class extends b{type="BooleanLiteral"},y=class extends b{type="NullLiteral"},F=class extends b{type="ArrayLiteral"},v=class extends b{type="TupleLiteral"},C=class extends b{type="ObjectLiteral"},S=class extends g{constructor(e,t,s){super(),this.operator=e,this.left=t,this.right=s}type="BinaryExpression"},A=class extends g{constructor(e,t){super(),this.operand=e,this.filter=t}type="FilterExpression"},E=class extends g{constructor(e,t){super(),this.iterable=e,this.test=t}type="SelectExpression"},L=class extends g{constructor(e,t,s){super(),this.operand=e,this.negate=t,this.test=s}type="TestExpression"},I=class extends g{constructor(e,t){super(),this.operator=e,this.argument=t}type="UnaryExpression"},z=class extends g{constructor(e=void 0,t=void 0,s=void 0){super(),this.start=e,this.stop=t,this.step=s}type="SliceExpression"},j=class extends g{constructor(e,t){super(),this.key=e,this.value=t}type="KeywordArgumentExpression"};function N(e){const t=new p([]);let s=0;function o(t,r){const o=e[s++];if(!o||o.type!==t)throw new Error(`Parser Error: ${r}. ${o.type} !== ${t}.`);return o}function n(){switch(e[s].type){case r.Text:return new P(o(r.Text,"Expected text token").value);case r.OpenStatement:return function(){let t;switch(o(r.OpenStatement,"Expected opening statement token"),e[s].type){case r.Set:++s,t=l(),o(r.CloseStatement,"Expected closing statement token");break;case r.If:++s,t=c(),o(r.OpenStatement,"Expected {% token"),o(r.EndIf,"Expected endif token"),o(r.CloseStatement,"Expected %} token");break;case r.Macro:++s,t=function(){const e=W();if("Identifier"!==e.type)throw new SyntaxError("Expected identifier following macro statement");const t=D();o(r.CloseStatement,"Expected closing statement token");const s=[];for(;a(r.OpenStatement,r.EndMacro);)s.push(n());return new f(e,t,s)}(),o(r.OpenStatement,"Expected {% token"),o(r.EndMacro,"Expected endmacro token"),o(r.CloseStatement,"Expected %} token");break;case r.For:++s,t=function(){const e=d(!0);if(!(e instanceof T||e instanceof v))throw new SyntaxError(`Expected identifier/tuple for the loop variable, got ${e.type} instead`);o(r.In,"Expected `in` keyword following loop variable");const t=u();o(r.CloseStatement,"Expected closing statement token");const l=[];for(;a(r.OpenStatement,r.EndFor)&&a(r.OpenStatement,r.Else);)l.push(n());const c=[];if(i(r.OpenStatement,r.Else))for(++s,++s,o(r.CloseStatement,"Expected closing statement token");a(r.OpenStatement,r.EndFor);)c.push(n());return new _(e,t,l,c)}(),o(r.OpenStatement,"Expected {% token"),o(r.EndFor,"Expected endfor token"),o(r.CloseStatement,"Expected %} token");break;default:throw new SyntaxError(`Unknown statement type: ${e[s].type}`)}return t}();case r.OpenExpression:return function(){o(r.OpenExpression,"Expected opening expression token");const e=u();return o(r.CloseExpression,"Expected closing expression token"),e}();default:throw new SyntaxError(`Unexpected token type: ${e[s].type}`)}}function a(...t){return s+t.length<=e.length&&t.some(((t,r)=>t!==e[s+r].type))}function i(...t){return s+t.length<=e.length&&t.every(((t,r)=>t===e[s+r].type))}function l(){const e=u();if(i(r.Equals)){++s;const t=l();return new h(e,t)}return e}function c(){const t=u();o(r.CloseStatement,"Expected closing statement token");const a=[],l=[];for(;e[s]?.type!==r.OpenStatement||e[s+1]?.type!==r.ElseIf&&e[s+1]?.type!==r.Else&&e[s+1]?.type!==r.EndIf;)a.push(n());if(e[s]?.type===r.OpenStatement&&e[s+1]?.type!==r.EndIf)if(++s,i(r.ElseIf))o(r.ElseIf,"Expected elseif token"),l.push(c());else for(o(r.Else,"Expected else token"),o(r.CloseStatement,"Expected closing statement token");e[s]?.type!==r.OpenStatement||e[s+1]?.type!==r.EndIf;)l.push(n());return new m(t,a,l)}function d(e=!1){const t=e?W:u,o=[t()],n=i(r.Comma);for(;n&&(++s,o.push(t()),i(r.Comma)););return n?new v(o):o[0]}function u(){return function(){const e=g();if(i(r.If)){++s;const t=g();if(i(r.Else)){++s;const r=g();return new m(t,[e],[r])}return new E(e,t)}return e}()}function g(){let t=b();for(;i(r.Or);){const r=e[s];++s;const o=b();t=new S(r,t,o)}return t}function b(){let t=N();for(;i(r.And);){const r=e[s];++s;const o=N();t=new S(r,t,o)}return t}function N(){let t;for(;i(r.Not);){const r=e[s];++s;const o=N();t=new I(r,o)}return t??function(){let t=V();for(;i(r.ComparisonBinaryOperator)||i(r.In)||i(r.NotIn);){const r=e[s];++s;const o=V();t=new S(r,t,o)}return t}()}function V(){let t=G();for(;i(r.AdditiveBinaryOperator);){const r=e[s];++s;const o=G();t=new S(r,t,o)}return t}function B(){const t=function(){let t=W();for(;i(r.Dot)||i(r.OpenSquareBracket);){const n=e[s];let a;++s;const i=n.type!==r.Dot;if(i)a=R(),o(r.CloseSquareBracket,"Expected closing square bracket");else if(a=W(),"Identifier"!==a.type)throw new SyntaxError("Expected identifier following dot operator");t=new M(t,a,i)}return t}();return i(r.OpenParen)?O(t):t}function O(e){let t=new w(e,D());return i(r.OpenParen)&&(t=O(t)),t}function D(){o(r.OpenParen,"Expected opening parenthesis for arguments list");const e=function(){const e=[];for(;!i(r.CloseParen);){let t=u();if(i(r.Equals)){if(++s,!(t instanceof T))throw new SyntaxError("Expected identifier for keyword argument");const e=u();t=new j(t,e)}e.push(t),i(r.Comma)&&++s}return e}();return o(r.CloseParen,"Expected closing parenthesis for arguments list"),e}function R(){const e=[];let t=!1;for(;!i(r.CloseSquareBracket);)i(r.Colon)?(e.push(void 0),++s,t=!0):(e.push(u()),i(r.Colon)&&(++s,t=!0));if(0===e.length)throw new SyntaxError("Expected at least one argument for member/slice expression");if(t){if(e.length>3)throw new SyntaxError("Expected 0-3 arguments for slice expression");return new z(...e)}return e[0]}function G(){let t=q();for(;i(r.MultiplicativeBinaryOperator);){const r=e[s];++s;const o=q();t=new S(r,t,o)}return t}function q(){let e=function(){let e=B();for(;i(r.Pipe);){++s;let t=W();if(!(t instanceof T))throw new SyntaxError("Expected identifier for the filter");i(r.OpenParen)&&(t=O(t)),e=new A(e,t)}return e}();for(;i(r.Is);){++s;const t=i(r.Not);t&&++s;let o=W();if(o instanceof k?o=new T(o.value.toString()):o instanceof y&&(o=new T("none")),!(o instanceof T))throw new SyntaxError("Expected identifier for the test");e=new L(e,t,o)}return e}function W(){const t=e[s];switch(t.type){case r.NumericLiteral:return++s,new x(Number(t.value));case r.StringLiteral:return++s,new P(t.value);case r.BooleanLiteral:return++s,new k("true"===t.value.toLowerCase());case r.NullLiteral:return++s,new y(null);case r.Identifier:return++s,new T(t.value);case r.OpenParen:{++s;const t=d();if(e[s].type!==r.CloseParen)throw new SyntaxError(`Expected closing parenthesis, got ${e[s].type} instead`);return++s,t}case r.OpenSquareBracket:{++s;const e=[];for(;!i(r.CloseSquareBracket);)e.push(u()),i(r.Comma)&&++s;return++s,new F(e)}case r.OpenCurlyBracket:{++s;const e=new Map;for(;!i(r.CloseCurlyBracket);){const t=u();o(r.Colon,"Expected colon between key and value in object literal");const n=u();e.set(t,n),i(r.Comma)&&++s}return++s,new C(e)}default:throw new SyntaxError(`Unexpected token: ${t.type}`)}}for(;s<e.length;)t.body.push(n());return t}function V(e,t,s=1){void 0===t&&(t=e,e=0);const r=[];for(let o=e;o<t;o+=s)r.push(o);return r}function B(e,t,s,r=1){const o=Math.sign(r);o>=0?(t=(t??=0)<0?Math.max(e.length+t,0):Math.min(t,e.length),s=(s??=e.length)<0?Math.max(e.length+s,0):Math.min(s,e.length)):(t=(t??=e.length-1)<0?Math.max(e.length+t,-1):Math.min(t,e.length-1),s=(s??=-1)<-1?Math.max(e.length+s,-1):Math.min(s,e.length-1));const n=[];for(let a=t;o*a<o*s;a+=r)n.push(e[a]);return n}function O(e){return e.replace(/\b\w/g,(e=>e.toUpperCase()))}var D=class{type="RuntimeValue";value;builtins=new Map;constructor(e=void 0){this.value=e}__bool__(){return new q(!!this.value)}},R=class extends D{type="NumericValue"},G=class extends D{type="StringValue";builtins=new Map([["upper",new X((()=>new G(this.value.toUpperCase())))],["lower",new X((()=>new G(this.value.toLowerCase())))],["strip",new X((()=>new G(this.value.trim())))],["title",new X((()=>new G(O(this.value))))],["length",new R(this.value.length)],["rstrip",new X((()=>new G(this.value.trimEnd())))],["lstrip",new X((()=>new G(this.value.trimStart())))]])},q=class extends D{type="BooleanValue"},W=class extends D{type="ObjectValue";__bool__(){return new q(this.value.size>0)}builtins=new Map([["get",new X((([e,t])=>{if(!(e instanceof G))throw new Error(`Object key must be a string: got ${e.type}`);return this.value.get(e.value)??t??new H}))],["items",new X((()=>new U(Array.from(this.value.entries()).map((([e,t])=>new U([new G(e),t]))))))]])},$=class extends W{type="KeywordArgumentsValue"},U=class extends D{type="ArrayValue";builtins=new Map([["length",new R(this.value.length)]]);__bool__(){return new q(this.value.length>0)}},Q=class extends U{type="TupleValue"},X=class extends D{type="FunctionValue"},H=class extends D{type="NullValue"},J=class extends D{type="UndefinedValue"},Y=class{constructor(e){this.parent=e}variables=new Map([["namespace",new X((e=>{if(0===e.length)return new W(new Map);if(1!==e.length||!(e[0]instanceof W))throw new Error("`namespace` expects either zero arguments or a single object argument");return e[0]}))]]);tests=new Map([["boolean",e=>"BooleanValue"===e.type],["callable",e=>e instanceof X],["odd",e=>{if("NumericValue"!==e.type)throw new Error(`Cannot apply test "odd" to type: ${e.type}`);return e.value%2!=0}],["even",e=>{if("NumericValue"!==e.type)throw new Error(`Cannot apply test "even" to type: ${e.type}`);return e.value%2==0}],["false",e=>"BooleanValue"===e.type&&!e.value],["true",e=>"BooleanValue"===e.type&&e.value],["none",e=>"NullValue"===e.type],["string",e=>"StringValue"===e.type],["number",e=>"NumericValue"===e.type],["integer",e=>"NumericValue"===e.type&&Number.isInteger(e.value)],["iterable",e=>"ArrayValue"===e.type||"StringValue"===e.type],["mapping",e=>"ObjectValue"===e.type],["lower",e=>{const t=e.value;return"StringValue"===e.type&&t===t.toLowerCase()}],["upper",e=>{const t=e.value;return"StringValue"===e.type&&t===t.toUpperCase()}],["none",e=>"NullValue"===e.type],["defined",e=>"UndefinedValue"!==e.type],["undefined",e=>"UndefinedValue"===e.type],["equalto",(e,t)=>e.value===t.value],["eq",(e,t)=>e.value===t.value]]);set(e,t){return this.declareVariable(e,Z(t))}declareVariable(e,t){if(this.variables.has(e))throw new SyntaxError(`Variable already declared: ${e}`);return this.variables.set(e,t),t}setVariable(e,t){return this.variables.set(e,t),t}resolve(e){if(this.variables.has(e))return this;if(this.parent)return this.parent.resolve(e);throw new Error(`Unknown variable: ${e}`)}lookupVariable(e){try{return this.resolve(e).variables.get(e)??new J}catch{return new J}}},K=class{global;constructor(e){this.global=e??new Y}run(e){return this.evaluate(e,this.global)}evaluateBinaryExpression(e,t){const s=this.evaluate(e.left,t);switch(e.operator.value){case"and":return s.__bool__().value?this.evaluate(e.right,t):s;case"or":return s.__bool__().value?s:this.evaluate(e.right,t)}const r=this.evaluate(e.right,t);switch(e.operator.value){case"==":return new q(s.value==r.value);case"!=":return new q(s.value!=r.value)}if(s instanceof J||r instanceof J)throw new Error("Cannot perform operation on undefined values");if(s instanceof H||r instanceof H)throw new Error("Cannot perform operation on null values");if(s instanceof R&&r instanceof R)switch(e.operator.value){case"+":return new R(s.value+r.value);case"-":return new R(s.value-r.value);case"*":return new R(s.value*r.value);case"/":return new R(s.value/r.value);case"%":return new R(s.value%r.value);case"<":return new q(s.value<r.value);case">":return new q(s.value>r.value);case">=":return new q(s.value>=r.value);case"<=":return new q(s.value<=r.value)}else if(s instanceof U&&r instanceof U){if("+"===e.operator.value)return new U(s.value.concat(r.value))}else if(r instanceof U){const t=void 0!==r.value.find((e=>e.value===s.value));switch(e.operator.value){case"in":return new q(t);case"not in":return new q(!t)}}if((s instanceof G||r instanceof G)&&"+"===e.operator.value)return new G(s.value.toString()+r.value.toString());if(s instanceof G&&r instanceof G)switch(e.operator.value){case"in":return new q(r.value.includes(s.value));case"not in":return new q(!r.value.includes(s.value))}if(s instanceof G&&r instanceof W)switch(e.operator.value){case"in":return new q(r.value.has(s.value));case"not in":return new q(!r.value.has(s.value))}throw new SyntaxError(`Unknown operator "${e.operator.value}" between ${s.type} and ${r.type}`)}evaluateArguments(e,t){const s=[],r=new Map;for(const o of e)if("KeywordArgumentExpression"===o.type){const e=o;r.set(e.key.value,this.evaluate(e.value,t))}else{if(r.size>0)throw new Error("Positional arguments must come before keyword arguments");s.push(this.evaluate(o,t))}return[s,r]}evaluateFilterExpression(e,t){const s=this.evaluate(e.operand,t);if("Identifier"===e.filter.type){const t=e.filter;if("tojson"===t.value)return new G(ee(s));if(s instanceof U)switch(t.value){case"list":return s;case"first":return s.value[0];case"last":return s.value[s.value.length-1];case"length":return new R(s.value.length);case"reverse":return new U(s.value.reverse());case"sort":return new U(s.value.sort(((e,t)=>{if(e.type!==t.type)throw new Error(`Cannot compare different types: ${e.type} and ${t.type}`);switch(e.type){case"NumericValue":return e.value-t.value;case"StringValue":return e.value.localeCompare(t.value);default:throw new Error(`Cannot compare type: ${e.type}`)}})));default:throw new Error(`Unknown ArrayValue filter: ${t.value}`)}else if(s instanceof G)switch(t.value){case"length":return new R(s.value.length);case"upper":return new G(s.value.toUpperCase());case"lower":return new G(s.value.toLowerCase());case"title":return new G(O(s.value));case"capitalize":return new G(s.value.charAt(0).toUpperCase()+s.value.slice(1));case"trim":return new G(s.value.trim());case"indent":return new G(s.value.split("\n").map(((e,t)=>0===t||0===e.length?e:" "+e)).join("\n"));case"string":return s;default:throw new Error(`Unknown StringValue filter: ${t.value}`)}else{if(s instanceof R){if("abs"===t.value)return new R(Math.abs(s.value));throw new Error(`Unknown NumericValue filter: ${t.value}`)}if(s instanceof W)switch(t.value){case"items":return new U(Array.from(s.value.entries()).map((([e,t])=>new U([new G(e),t]))));case"length":return new R(s.value.size);default:throw new Error(`Unknown ObjectValue filter: ${t.value}`)}}throw new Error(`Cannot apply filter "${t.value}" to type: ${s.type}`)}if("CallExpression"===e.filter.type){const r=e.filter;if("Identifier"!==r.callee.type)throw new Error(`Unknown filter: ${r.callee.type}`);const o=r.callee.value;if("tojson"===o){const[,e]=this.evaluateArguments(r.args,t),o=e.get("indent")??new H;if(!(o instanceof R||o instanceof H))throw new Error("If set, indent must be a number");return new G(ee(s,o.value))}if(s instanceof U){switch(o){case"selectattr":case"rejectattr":{const e="selectattr"===o;if(s.value.some((e=>!(e instanceof W))))throw new Error(`\`${o}\` can only be applied to array of objects`);if(r.args.some((e=>"StringLiteral"!==e.type)))throw new Error(`arguments of \`${o}\` must be strings`);const[n,a,i]=r.args.map((e=>this.evaluate(e,t)));let l;if(a){const e=t.tests.get(a.value);if(!e)throw new Error(`Unknown test: ${a.value}`);l=e}else l=(...e)=>e[0].__bool__().value;const c=s.value.filter((t=>{const s=t.value.get(n.value),r=!!s&&l(s,i);return e?r:!r}));return new U(c)}case"map":{const[,e]=this.evaluateArguments(r.args,t);if(e.has("attribute")){const t=e.get("attribute");if(!(t instanceof G))throw new Error("attribute must be a string");const r=e.get("default"),o=s.value.map((e=>{if(!(e instanceof W))throw new Error("items in map must be an object");return e.value.get(t.value)??r??new J}));return new U(o)}throw new Error("`map` expressions without `attribute` set are not currently supported.")}}throw new Error(`Unknown ArrayValue filter: ${o}`)}if(s instanceof G){if("indent"===o){const[e,o]=this.evaluateArguments(r.args,t),n=e.at(0)??o.get("width")??new R(4);if(!(n instanceof R))throw new Error("width must be a number");const a=e.at(1)??o.get("first")??new q(!1),i=e.at(2)??o.get("blank")??new q(!1),l=s.value.split("\n"),c=" ".repeat(n.value),d=l.map(((e,t)=>!a.value&&0===t||!i.value&&0===e.length?e:c+e));return new G(d.join("\n"))}throw new Error(`Unknown StringValue filter: ${o}`)}throw new Error(`Cannot apply filter "${o}" to type: ${s.type}`)}throw new Error(`Unknown filter: ${e.filter.type}`)}evaluateTestExpression(e,t){const s=this.evaluate(e.operand,t),r=t.tests.get(e.test.value);if(!r)throw new Error(`Unknown test: ${e.test.value}`);const o=r(s);return new q(e.negate?!o:o)}evaluateUnaryExpression(e,t){const s=this.evaluate(e.argument,t);if("not"===e.operator.value)return new q(!s.value);throw new SyntaxError(`Unknown operator: ${e.operator.value}`)}evalProgram(e,t){return this.evaluateBlock(e.body,t)}evaluateBlock(e,t){let s="";for(const r of e){const e=this.evaluate(r,t);"NullValue"!==e.type&&"UndefinedValue"!==e.type&&(s+=e.value)}return new G(s)}evaluateIdentifier(e,t){return t.lookupVariable(e.value)}evaluateCallExpression(e,t){const[s,r]=this.evaluateArguments(e.args,t);r.size>0&&s.push(new $(r));const o=this.evaluate(e.callee,t);if("FunctionValue"!==o.type)throw new Error(`Cannot call something that is not a function: got ${o.type}`);return o.value(s,t)}evaluateSliceExpression(e,t,s){if(!(e instanceof U||e instanceof G))throw new Error("Slice object must be an array or string");const r=this.evaluate(t.start,s),o=this.evaluate(t.stop,s),n=this.evaluate(t.step,s);if(!(r instanceof R||r instanceof J))throw new Error("Slice start must be numeric or undefined");if(!(o instanceof R||o instanceof J))throw new Error("Slice stop must be numeric or undefined");if(!(n instanceof R||n instanceof J))throw new Error("Slice step must be numeric or undefined");return e instanceof U?new U(B(e.value,r.value,o.value,n.value)):new G(B(Array.from(e.value),r.value,o.value,n.value).join(""))}evaluateMemberExpression(e,t){const s=this.evaluate(e.object,t);let r,o;if(e.computed){if("SliceExpression"===e.property.type)return this.evaluateSliceExpression(s,e.property,t);r=this.evaluate(e.property,t)}else r=new G(e.property.value);if(s instanceof W){if(!(r instanceof G))throw new Error(`Cannot access property with non-string: got ${r.type}`);o=s.value.get(r.value)??s.builtins.get(r.value)}else if(s instanceof U||s instanceof G)if(r instanceof R)o=s.value.at(r.value),s instanceof G&&(o=new G(s.value.at(r.value)));else{if(!(r instanceof G))throw new Error(`Cannot access property with non-string/non-number: got ${r.type}`);o=s.builtins.get(r.value)}else{if(!(r instanceof G))throw new Error(`Cannot access property with non-string: got ${r.type}`);o=s.builtins.get(r.value)}return o instanceof D?o:new J}evaluateSet(e,t){const s=this.evaluate(e.value,t);if("Identifier"===e.assignee.type){const r=e.assignee.value;t.setVariable(r,s)}else{if("MemberExpression"!==e.assignee.type)throw new Error(`Invalid LHS inside assignment expression: ${JSON.stringify(e.assignee)}`);{const r=e.assignee,o=this.evaluate(r.object,t);if(!(o instanceof W))throw new Error("Cannot assign to member of non-object");if("Identifier"!==r.property.type)throw new Error("Cannot assign to member with non-identifier property");o.value.set(r.property.value,s)}}return new H}evaluateIf(e,t){const s=this.evaluate(e.test,t);return this.evaluateBlock(s.__bool__().value?e.body:e.alternate,t)}evaluateFor(e,t){const s=new Y(t);let r,o;if("SelectExpression"===e.iterable.type){const t=e.iterable;o=this.evaluate(t.iterable,s),r=t.test}else o=this.evaluate(e.iterable,s);if(!(o instanceof U))throw new Error(`Expected iterable type in for loop: got ${o.type}`);const n=[],a=[];for(let t=0;t<o.value.length;++t){const i=new Y(s),l=o.value[t];let c;if("Identifier"===e.loopvar.type)c=t=>t.setVariable(e.loopvar.value,l);else{if("TupleLiteral"!==e.loopvar.type)throw new Error(`Invalid loop variable(s): ${e.loopvar.type}`);{const t=e.loopvar;if("ArrayValue"!==l.type)throw new Error(`Cannot unpack non-iterable type: ${l.type}`);const s=l;if(t.value.length!==s.value.length)throw new Error(`Too ${t.value.length>s.value.length?"few":"many"} items to unpack`);c=e=>{for(let r=0;r<t.value.length;++r){if("Identifier"!==t.value[r].type)throw new Error(`Cannot unpack non-identifier type: ${t.value[r].type}`);e.setVariable(t.value[r].value,s.value[r])}}}}if(r){c(i);if(!this.evaluate(r,i).__bool__().value)continue}n.push(l),a.push(c)}let i="",l=!0;for(let t=0;t<n.length;++t){const r=new Map([["index",new R(t+1)],["index0",new R(t)],["revindex",new R(n.length-t)],["revindex0",new R(n.length-t-1)],["first",new q(0===t)],["last",new q(t===n.length-1)],["length",new R(n.length)],["previtem",t>0?n[t-1]:new J],["nextitem",t<n.length-1?n[t+1]:new J]]);s.setVariable("loop",new W(r)),a[t](s);i+=this.evaluateBlock(e.body,s).value,l=!1}if(l){i+=this.evaluateBlock(e.defaultBlock,s).value}return new G(i)}evaluateMacro(e,t){return t.setVariable(e.name.value,new X(((t,s)=>{const r=new Y(s);let o;t=t.slice(),"KeywordArgumentsValue"===t.at(-1)?.type&&(o=t.pop());for(let s=0;s<e.args.length;++s){const n=e.args[s],a=t[s];if("Identifier"===n.type){const e=n;if(!a)throw new Error(`Missing positional argument: ${e.value}`);r.setVariable(e.value,a)}else{if("KeywordArgumentExpression"!==n.type)throw new Error(`Unknown argument type: ${n.type}`);{const e=n,t=a??o?.value.get(e.key.value)??this.evaluate(e.value,r);r.setVariable(e.key.value,t)}}}return this.evaluateBlock(e.body,r)}))),new H}evaluate(e,t){if(void 0===e)return new J;switch(e.type){case"Program":return this.evalProgram(e,t);case"Set":return this.evaluateSet(e,t);case"If":return this.evaluateIf(e,t);case"For":return this.evaluateFor(e,t);case"Macro":return this.evaluateMacro(e,t);case"NumericLiteral":return new R(Number(e.value));case"StringLiteral":return new G(e.value);case"BooleanLiteral":return new q(e.value);case"NullLiteral":return new H(e.value);case"ArrayLiteral":return new U(e.value.map((e=>this.evaluate(e,t))));case"TupleLiteral":return new Q(e.value.map((e=>this.evaluate(e,t))));case"ObjectLiteral":{const s=new Map;for(const[r,o]of e.value){const e=this.evaluate(r,t);if(!(e instanceof G))throw new Error(`Object keys must be strings: got ${e.type}`);s.set(e.value,this.evaluate(o,t))}return new W(s)}case"Identifier":return this.evaluateIdentifier(e,t);case"CallExpression":return this.evaluateCallExpression(e,t);case"MemberExpression":return this.evaluateMemberExpression(e,t);case"UnaryExpression":return this.evaluateUnaryExpression(e,t);case"BinaryExpression":return this.evaluateBinaryExpression(e,t);case"FilterExpression":return this.evaluateFilterExpression(e,t);case"TestExpression":return this.evaluateTestExpression(e,t);default:throw new SyntaxError(`Unknown node type: ${e.type}`)}}};function Z(e){switch(typeof e){case"number":return new R(e);case"string":return new G(e);case"boolean":return new q(e);case"undefined":return new J;case"object":return null===e?new H:Array.isArray(e)?new U(e.map(Z)):new W(new Map(Object.entries(e).map((([e,t])=>[e,Z(t)]))));case"function":return new X(((t,s)=>Z(e(...t.map((e=>e.value)))??null)));default:throw new Error(`Cannot convert to runtime value: ${e}`)}}function ee(e,t,s){const r=s??0;switch(e.type){case"NullValue":case"UndefinedValue":return"null";case"NumericValue":case"StringValue":case"BooleanValue":return JSON.stringify(e.value);case"ArrayValue":case"ObjectValue":{const s=t?" ".repeat(t):"",o="\n"+s.repeat(r),n=o+s;if("ArrayValue"===e.type){const s=e.value.map((e=>ee(e,t,r+1)));return t?`[${n}${s.join(`,${n}`)}${o}]`:`[${s.join(", ")}]`}{const s=Array.from(e.value.entries()).map((([e,s])=>{const o=`"${e}": ${ee(s,t,r+1)}`;return t?`${n}${o}`:o}));return t?`{${s.join(",")}${o}}`:`{${s.join(", ")}}`}}default:throw new Error(`Cannot convert to JSON: ${e.type}`)}}var te=class{parsed;constructor(e){const t=d(e,{lstrip_blocks:!0,trim_blocks:!0});this.parsed=N(t)}render(e){const t=new Y;t.set("false",!1),t.set("true",!0),t.set("raise_exception",(e=>{throw new Error(e)})),t.set("range",V);for(const[s,r]of Object.entries(e))t.set(s,r);return new K(t).run(this.parsed).value}}},"./node_modules/onnxruntime-common/dist/esm/backend-impl.js":
23
- /*!******************************************************************!*\
24
- !*** ./node_modules/onnxruntime-common/dist/esm/backend-impl.js ***!
25
- \******************************************************************/(e,t,s)=>{s.r(t),s.d(t,{registerBackend:()=>n,resolveBackendAndExecutionProviders:()=>i});const r=new Map,o=[],n=(e,t,s)=>{if(!t||"function"!=typeof t.init||"function"!=typeof t.createInferenceSessionHandler)throw new TypeError("not a valid backend");{const n=r.get(e);if(void 0===n)r.set(e,{backend:t,priority:s});else{if(n.priority>s)return;if(n.priority===s&&n.backend!==t)throw new Error(`cannot register backend "${e}" using priority ${s}`)}if(s>=0){const t=o.indexOf(e);-1!==t&&o.splice(t,1);for(let t=0;t<o.length;t++)if(r.get(o[t]).priority<=s)return void o.splice(t,0,e);o.push(e)}}},a=async e=>{const t=r.get(e);if(!t)return"backend not found.";if(t.initialized)return t.backend;if(t.aborted)return t.error;{const s=!!t.initPromise;try{return s||(t.initPromise=t.backend.init(e)),await t.initPromise,t.initialized=!0,t.backend}catch(e){return s||(t.error=`${e}`,t.aborted=!0),t.error}finally{delete t.initPromise}}},i=async e=>{const t=e.executionProviders||[],s=t.map((e=>"string"==typeof e?e:e.name)),r=0===s.length?o:s;let n;const i=[],l=new Set;for(const e of r){const t=await a(e);"string"==typeof t?i.push({name:e,err:t}):(n||(n=t),n===t&&l.add(e))}if(!n)throw new Error(`no available backend found. ERR: ${i.map((e=>`[${e.name}] ${e.err}`)).join(", ")}`);for(const{name:e,err:t}of i)s.includes(e)&&console.warn(`removing requested execution provider "${e}" from session options because it is not available: ${t}`);const c=t.filter((e=>l.has("string"==typeof e?e:e.name)));return[n,new Proxy(e,{get:(e,t)=>"executionProviders"===t?c:Reflect.get(e,t)})]}},"./node_modules/onnxruntime-common/dist/esm/backend.js":
26
- /*!*************************************************************!*\
27
- !*** ./node_modules/onnxruntime-common/dist/esm/backend.js ***!
28
- \*************************************************************/(e,t,s)=>{s.r(t),s.d(t,{registerBackend:()=>r.registerBackend});var r=s(/*! ./backend-impl.js */"./node_modules/onnxruntime-common/dist/esm/backend-impl.js")},"./node_modules/onnxruntime-common/dist/esm/env-impl.js":
29
- /*!**************************************************************!*\
30
- !*** ./node_modules/onnxruntime-common/dist/esm/env-impl.js ***!
31
- \**************************************************************/(e,t,s)=>{s.r(t),s.d(t,{env:()=>n});var r=s(/*! ./version.js */"./node_modules/onnxruntime-common/dist/esm/version.js");let o="warning";const n={wasm:{},webgl:{},webgpu:{},versions:{common:r.version},set logLevel(e){if(void 0!==e){if("string"!=typeof e||-1===["verbose","info","warning","error","fatal"].indexOf(e))throw new Error(`Unsupported logging level: ${e}`);o=e}},get logLevel(){return o}};Object.defineProperty(n,"logLevel",{enumerable:!0})},"./node_modules/onnxruntime-common/dist/esm/env.js":
32
- /*!*********************************************************!*\
33
- !*** ./node_modules/onnxruntime-common/dist/esm/env.js ***!
34
- \*********************************************************/(e,t,s)=>{s.r(t),s.d(t,{env:()=>r});const r=s(/*! ./env-impl.js */"./node_modules/onnxruntime-common/dist/esm/env-impl.js").env},"./node_modules/onnxruntime-common/dist/esm/index.js":
35
- /*!***********************************************************!*\
36
- !*** ./node_modules/onnxruntime-common/dist/esm/index.js ***!
37
- \***********************************************************/(e,t,s)=>{s.r(t),s.d(t,{InferenceSession:()=>n.InferenceSession,TRACE:()=>i.TRACE,TRACE_FUNC_BEGIN:()=>i.TRACE_FUNC_BEGIN,TRACE_FUNC_END:()=>i.TRACE_FUNC_END,Tensor:()=>a.Tensor,TrainingSession:()=>l.TrainingSession,env:()=>o.env,registerBackend:()=>r.registerBackend});var r=s(/*! ./backend.js */"./node_modules/onnxruntime-common/dist/esm/backend.js"),o=s(/*! ./env.js */"./node_modules/onnxruntime-common/dist/esm/env.js"),n=s(/*! ./inference-session.js */"./node_modules/onnxruntime-common/dist/esm/inference-session.js"),a=s(/*! ./tensor.js */"./node_modules/onnxruntime-common/dist/esm/tensor.js"),i=(s(/*! ./tensor-conversion.js */"./node_modules/onnxruntime-common/dist/esm/tensor-conversion.js"),s(/*! ./tensor-factory.js */"./node_modules/onnxruntime-common/dist/esm/tensor-factory.js"),s(/*! ./trace.js */"./node_modules/onnxruntime-common/dist/esm/trace.js")),l=(s(/*! ./onnx-model.js */"./node_modules/onnxruntime-common/dist/esm/onnx-model.js"),s(/*! ./onnx-value.js */"./node_modules/onnxruntime-common/dist/esm/onnx-value.js"),s(/*! ./training-session.js */"./node_modules/onnxruntime-common/dist/esm/training-session.js"))},"./node_modules/onnxruntime-common/dist/esm/inference-session-impl.js":
38
- /*!****************************************************************************!*\
39
- !*** ./node_modules/onnxruntime-common/dist/esm/inference-session-impl.js ***!
40
- \****************************************************************************/(e,t,s)=>{s.r(t),s.d(t,{InferenceSession:()=>a});var r=s(/*! ./backend-impl.js */"./node_modules/onnxruntime-common/dist/esm/backend-impl.js"),o=s(/*! ./tensor.js */"./node_modules/onnxruntime-common/dist/esm/tensor.js"),n=s(/*! ./trace.js */"./node_modules/onnxruntime-common/dist/esm/trace.js");class a{constructor(e){this.handler=e}async run(e,t,s){(0,n.TRACE_FUNC_BEGIN)();const r={};let a={};if("object"!=typeof e||null===e||e instanceof o.Tensor||Array.isArray(e))throw new TypeError("'feeds' must be an object that use input names as keys and OnnxValue as corresponding values.");let i=!0;if("object"==typeof t){if(null===t)throw new TypeError("Unexpected argument[1]: cannot be null.");if(t instanceof o.Tensor)throw new TypeError("'fetches' cannot be a Tensor");if(Array.isArray(t)){if(0===t.length)throw new TypeError("'fetches' cannot be an empty array.");i=!1;for(const e of t){if("string"!=typeof e)throw new TypeError("'fetches' must be a string array or an object.");if(-1===this.outputNames.indexOf(e))throw new RangeError(`'fetches' contains invalid output name: ${e}.`);r[e]=null}if("object"==typeof s&&null!==s)a=s;else if(void 0!==s)throw new TypeError("'options' must be an object.")}else{let e=!1;const n=Object.getOwnPropertyNames(t);for(const s of this.outputNames)if(-1!==n.indexOf(s)){const n=t[s];(null===n||n instanceof o.Tensor)&&(e=!0,i=!1,r[s]=n)}if(e){if("object"==typeof s&&null!==s)a=s;else if(void 0!==s)throw new TypeError("'options' must be an object.")}else a=t}}else if(void 0!==t)throw new TypeError("Unexpected argument[1]: must be 'fetches' or 'options'.");for(const t of this.inputNames)if(void 0===e[t])throw new Error(`input '${t}' is missing in 'feeds'.`);if(i)for(const e of this.outputNames)r[e]=null;const l=await this.handler.run(e,r,a),c={};for(const e in l)if(Object.hasOwnProperty.call(l,e)){const t=l[e];t instanceof o.Tensor?c[e]=t:c[e]=new o.Tensor(t.type,t.data,t.dims)}return(0,n.TRACE_FUNC_END)(),c}async release(){return this.handler.dispose()}static async create(e,t,s,o){let i;(0,n.TRACE_FUNC_BEGIN)();let l={};if("string"==typeof e){if(i=e,"object"==typeof t&&null!==t)l=t;else if(void 0!==t)throw new TypeError("'options' must be an object.")}else if(e instanceof Uint8Array){if(i=e,"object"==typeof t&&null!==t)l=t;else if(void 0!==t)throw new TypeError("'options' must be an object.")}else{if(!(e instanceof ArrayBuffer||"undefined"!=typeof SharedArrayBuffer&&e instanceof SharedArrayBuffer))throw new TypeError("Unexpected argument[0]: must be 'path' or 'buffer'.");{const r=e;let n=0,a=e.byteLength;if("object"==typeof t&&null!==t)l=t;else if("number"==typeof t){if(n=t,!Number.isSafeInteger(n))throw new RangeError("'byteOffset' must be an integer.");if(n<0||n>=r.byteLength)throw new RangeError(`'byteOffset' is out of range [0, ${r.byteLength}).`);if(a=e.byteLength-n,"number"==typeof s){if(a=s,!Number.isSafeInteger(a))throw new RangeError("'byteLength' must be an integer.");if(a<=0||n+a>r.byteLength)throw new RangeError(`'byteLength' is out of range (0, ${r.byteLength-n}].`);if("object"==typeof o&&null!==o)l=o;else if(void 0!==o)throw new TypeError("'options' must be an object.")}else if(void 0!==s)throw new TypeError("'byteLength' must be a number.")}else if(void 0!==t)throw new TypeError("'options' must be an object.");i=new Uint8Array(r,n,a)}}const[c,d]=await(0,r.resolveBackendAndExecutionProviders)(l),u=await c.createInferenceSessionHandler(i,d);return(0,n.TRACE_FUNC_END)(),new a(u)}startProfiling(){this.handler.startProfiling()}endProfiling(){this.handler.endProfiling()}get inputNames(){return this.handler.inputNames}get outputNames(){return this.handler.outputNames}}},"./node_modules/onnxruntime-common/dist/esm/inference-session.js":
41
- /*!***********************************************************************!*\
42
- !*** ./node_modules/onnxruntime-common/dist/esm/inference-session.js ***!
43
- \***********************************************************************/(e,t,s)=>{s.r(t),s.d(t,{InferenceSession:()=>r});const r=s(/*! ./inference-session-impl.js */"./node_modules/onnxruntime-common/dist/esm/inference-session-impl.js").InferenceSession},"./node_modules/onnxruntime-common/dist/esm/onnx-model.js":
44
- /*!****************************************************************!*\
45
- !*** ./node_modules/onnxruntime-common/dist/esm/onnx-model.js ***!
46
- \****************************************************************/(e,t,s)=>{s.r(t)},"./node_modules/onnxruntime-common/dist/esm/onnx-value.js":
47
- /*!****************************************************************!*\
48
- !*** ./node_modules/onnxruntime-common/dist/esm/onnx-value.js ***!
49
- \****************************************************************/(e,t,s)=>{s.r(t)},"./node_modules/onnxruntime-common/dist/esm/tensor-conversion-impl.js":
50
- /*!****************************************************************************!*\
51
- !*** ./node_modules/onnxruntime-common/dist/esm/tensor-conversion-impl.js ***!
52
- \****************************************************************************/(e,t,s)=>{s.r(t),s.d(t,{tensorToDataURL:()=>r,tensorToImageData:()=>o});const r=(e,t)=>{const s="undefined"!=typeof document?document.createElement("canvas"):new OffscreenCanvas(1,1);s.width=e.dims[3],s.height=e.dims[2];const r=s.getContext("2d");if(null!=r){let o,n;void 0!==t?.tensorLayout&&"NHWC"===t.tensorLayout?(o=e.dims[2],n=e.dims[3]):(o=e.dims[3],n=e.dims[2]);const a=void 0!==t?.format?t.format:"RGB",i=t?.norm;let l,c;void 0===i||void 0===i.mean?l=[255,255,255,255]:"number"==typeof i.mean?l=[i.mean,i.mean,i.mean,i.mean]:(l=[i.mean[0],i.mean[1],i.mean[2],0],void 0!==i.mean[3]&&(l[3]=i.mean[3])),void 0===i||void 0===i.bias?c=[0,0,0,0]:"number"==typeof i.bias?c=[i.bias,i.bias,i.bias,i.bias]:(c=[i.bias[0],i.bias[1],i.bias[2],0],void 0!==i.bias[3]&&(c[3]=i.bias[3]));const d=n*o;let u=0,p=d,m=2*d,_=-1;"RGBA"===a?(u=0,p=d,m=2*d,_=3*d):"RGB"===a?(u=0,p=d,m=2*d):"RBG"===a&&(u=0,m=d,p=2*d);for(let t=0;t<n;t++)for(let s=0;s<o;s++){const o=(e.data[u++]-c[0])*l[0],n=(e.data[p++]-c[1])*l[1],a=(e.data[m++]-c[2])*l[2],i=-1===_?255:(e.data[_++]-c[3])*l[3];r.fillStyle="rgba("+o+","+n+","+a+","+i+")",r.fillRect(s,t,1,1)}if("toDataURL"in s)return s.toDataURL();throw new Error("toDataURL is not supported")}throw new Error("Can not access image data")},o=(e,t)=>{const s="undefined"!=typeof document?document.createElement("canvas").getContext("2d"):new OffscreenCanvas(1,1).getContext("2d");let r;if(null==s)throw new Error("Can not access image data");{let o,n,a;void 0!==t?.tensorLayout&&"NHWC"===t.tensorLayout?(o=e.dims[2],n=e.dims[1],a=e.dims[3]):(o=e.dims[3],n=e.dims[2],a=e.dims[1]);const i=void 0!==t&&void 0!==t.format?t.format:"RGB",l=t?.norm;let c,d;void 0===l||void 0===l.mean?c=[255,255,255,255]:"number"==typeof l.mean?c=[l.mean,l.mean,l.mean,l.mean]:(c=[l.mean[0],l.mean[1],l.mean[2],255],void 0!==l.mean[3]&&(c[3]=l.mean[3])),void 0===l||void 0===l.bias?d=[0,0,0,0]:"number"==typeof l.bias?d=[l.bias,l.bias,l.bias,l.bias]:(d=[l.bias[0],l.bias[1],l.bias[2],0],void 0!==l.bias[3]&&(d[3]=l.bias[3]));const u=n*o;if(void 0!==t&&(void 0!==t.format&&4===a&&"RGBA"!==t.format||3===a&&"RGB"!==t.format&&"BGR"!==t.format))throw new Error("Tensor format doesn't match input tensor dims");const p=4;let m=0,_=1,h=2,f=3,g=0,M=u,w=2*u,T=-1;"RGBA"===i?(g=0,M=u,w=2*u,T=3*u):"RGB"===i?(g=0,M=u,w=2*u):"RBG"===i&&(g=0,w=u,M=2*u),r=s.createImageData(o,n);for(let t=0;t<n*o;m+=p,_+=p,h+=p,f+=p,t++)r.data[m]=(e.data[g++]-d[0])*c[0],r.data[_]=(e.data[M++]-d[1])*c[1],r.data[h]=(e.data[w++]-d[2])*c[2],r.data[f]=-1===T?255:(e.data[T++]-d[3])*c[3]}return r}},"./node_modules/onnxruntime-common/dist/esm/tensor-conversion.js":
53
- /*!***********************************************************************!*\
54
- !*** ./node_modules/onnxruntime-common/dist/esm/tensor-conversion.js ***!
55
- \***********************************************************************/(e,t,s)=>{s.r(t)},"./node_modules/onnxruntime-common/dist/esm/tensor-factory-impl.js":
56
- /*!*************************************************************************!*\
57
- !*** ./node_modules/onnxruntime-common/dist/esm/tensor-factory-impl.js ***!
58
- \*************************************************************************/(e,t,s)=>{s.r(t),s.d(t,{bufferToTensor:()=>o,tensorFromGpuBuffer:()=>i,tensorFromImage:()=>n,tensorFromMLTensor:()=>l,tensorFromPinnedBuffer:()=>c,tensorFromTexture:()=>a});var r=s(/*! ./tensor-impl.js */"./node_modules/onnxruntime-common/dist/esm/tensor-impl.js");const o=(e,t)=>{if(void 0===e)throw new Error("Image buffer must be defined");if(void 0===t.height||void 0===t.width)throw new Error("Image height and width must be defined");if("NHWC"===t.tensorLayout)throw new Error("NHWC Tensor layout is not supported yet");const{height:s,width:o}=t,n=t.norm??{mean:255,bias:0};let a,i;a="number"==typeof n.mean?[n.mean,n.mean,n.mean,n.mean]:[n.mean[0],n.mean[1],n.mean[2],n.mean[3]??255],i="number"==typeof n.bias?[n.bias,n.bias,n.bias,n.bias]:[n.bias[0],n.bias[1],n.bias[2],n.bias[3]??0];const l=void 0!==t.format?t.format:"RGBA",c=void 0!==t.tensorFormat&&void 0!==t.tensorFormat?t.tensorFormat:"RGB",d=s*o,u="RGBA"===c?new Float32Array(4*d):new Float32Array(3*d);let p=4,m=0,_=1,h=2,f=3,g=0,M=d,w=2*d,T=-1;"RGB"===l&&(p=3,m=0,_=1,h=2,f=-1),"RGBA"===c?T=3*d:"RBG"===c?(g=0,w=d,M=2*d):"BGR"===c&&(w=0,M=d,g=2*d);for(let t=0;t<d;t++,m+=p,h+=p,_+=p,f+=p)u[g++]=(e[m]+i[0])/a[0],u[M++]=(e[_]+i[1])/a[1],u[w++]=(e[h]+i[2])/a[2],-1!==T&&-1!==f&&(u[T++]=(e[f]+i[3])/a[3]);return"RGBA"===c?new r.Tensor("float32",u,[1,4,s,o]):new r.Tensor("float32",u,[1,3,s,o])},n=async(e,t)=>{const s="undefined"!=typeof HTMLImageElement&&e instanceof HTMLImageElement,r="undefined"!=typeof ImageData&&e instanceof ImageData,n="undefined"!=typeof ImageBitmap&&e instanceof ImageBitmap,a="string"==typeof e;let i,l=t??{};const c=()=>{if("undefined"!=typeof document)return document.createElement("canvas");if("undefined"!=typeof OffscreenCanvas)return new OffscreenCanvas(1,1);throw new Error("Canvas is not supported")},d=e=>"undefined"!=typeof HTMLCanvasElement&&e instanceof HTMLCanvasElement||e instanceof OffscreenCanvas?e.getContext("2d"):null;if(s){const s=c();s.width=e.width,s.height=e.height;const r=d(s);if(null==r)throw new Error("Can not access image data");{let s=e.height,o=e.width;if(void 0!==t&&void 0!==t.resizedHeight&&void 0!==t.resizedWidth&&(s=t.resizedHeight,o=t.resizedWidth),void 0!==t){if(l=t,void 0!==t.tensorFormat)throw new Error("Image input config format must be RGBA for HTMLImageElement");l.tensorFormat="RGBA",l.height=s,l.width=o}else l.tensorFormat="RGBA",l.height=s,l.width=o;r.drawImage(e,0,0),i=r.getImageData(0,0,o,s).data}}else{if(!r){if(n){if(void 0===t)throw new Error("Please provide image config with format for Imagebitmap");const s=c();s.width=e.width,s.height=e.height;const r=d(s);if(null!=r){const t=e.height,s=e.width;return r.drawImage(e,0,0,s,t),i=r.getImageData(0,0,s,t).data,l.height=t,l.width=s,o(i,l)}throw new Error("Can not access image data")}if(a)return new Promise(((t,s)=>{const r=c(),n=d(r);if(!e||!n)return s();const a=new Image;a.crossOrigin="Anonymous",a.src=e,a.onload=()=>{r.width=a.width,r.height=a.height,n.drawImage(a,0,0,r.width,r.height);const e=n.getImageData(0,0,r.width,r.height);l.height=r.height,l.width=r.width,t(o(e.data,l))}}));throw new Error("Input data provided is not supported - aborted tensor creation")}{let s,r;if(void 0!==t&&void 0!==t.resizedWidth&&void 0!==t.resizedHeight?(s=t.resizedHeight,r=t.resizedWidth):(s=e.height,r=e.width),void 0!==t&&(l=t),l.format="RGBA",l.height=s,l.width=r,void 0!==t){const t=c();t.width=r,t.height=s;const o=d(t);if(null==o)throw new Error("Can not access image data");o.putImageData(e,0,0),i=o.getImageData(0,0,r,s).data}else i=e.data}}if(void 0!==i)return o(i,l);throw new Error("Input data provided is not supported - aborted tensor creation")},a=(e,t)=>{const{width:s,height:o,download:n,dispose:a}=t,i=[1,o,s,4];return new r.Tensor({location:"texture",type:"float32",texture:e,dims:i,download:n,dispose:a})},i=(e,t)=>{const{dataType:s,dims:o,download:n,dispose:a}=t;return new r.Tensor({location:"gpu-buffer",type:s??"float32",gpuBuffer:e,dims:o,download:n,dispose:a})},l=(e,t)=>{const{dataType:s,dims:o,download:n,dispose:a}=t;return new r.Tensor({location:"ml-tensor",type:s??"float32",mlTensor:e,dims:o,download:n,dispose:a})},c=(e,t,s)=>new r.Tensor({location:"cpu-pinned",type:e,data:t,dims:s??[t.length]})},"./node_modules/onnxruntime-common/dist/esm/tensor-factory.js":
59
- /*!********************************************************************!*\
60
- !*** ./node_modules/onnxruntime-common/dist/esm/tensor-factory.js ***!
61
- \********************************************************************/(e,t,s)=>{s.r(t)},"./node_modules/onnxruntime-common/dist/esm/tensor-impl-type-mapping.js":
62
- /*!******************************************************************************!*\
63
- !*** ./node_modules/onnxruntime-common/dist/esm/tensor-impl-type-mapping.js ***!
64
- \******************************************************************************/(e,t,s)=>{s.r(t),s.d(t,{NUMERIC_TENSOR_TYPEDARRAY_TO_TYPE_MAP:()=>o,NUMERIC_TENSOR_TYPE_TO_TYPEDARRAY_MAP:()=>r,checkTypedArray:()=>a});const r=new Map([["float32",Float32Array],["uint8",Uint8Array],["int8",Int8Array],["uint16",Uint16Array],["int16",Int16Array],["int32",Int32Array],["bool",Uint8Array],["float64",Float64Array],["uint32",Uint32Array],["int4",Uint8Array],["uint4",Uint8Array]]),o=new Map([[Float32Array,"float32"],[Uint8Array,"uint8"],[Int8Array,"int8"],[Uint16Array,"uint16"],[Int16Array,"int16"],[Int32Array,"int32"],[Float64Array,"float64"],[Uint32Array,"uint32"]]);let n=!1;const a=()=>{if(!n){n=!0;const e="undefined"!=typeof BigInt64Array&&BigInt64Array.from,t="undefined"!=typeof BigUint64Array&&BigUint64Array.from,s="undefined"!=typeof Float16Array&&Float16Array.from;e&&(r.set("int64",BigInt64Array),o.set(BigInt64Array,"int64")),t&&(r.set("uint64",BigUint64Array),o.set(BigUint64Array,"uint64")),s?(r.set("float16",Float16Array),o.set(Float16Array,"float16")):r.set("float16",Uint16Array)}}},"./node_modules/onnxruntime-common/dist/esm/tensor-impl.js":
65
- /*!*****************************************************************!*\
66
- !*** ./node_modules/onnxruntime-common/dist/esm/tensor-impl.js ***!
67
- \*****************************************************************/(e,t,s)=>{s.r(t),s.d(t,{Tensor:()=>i});var r=s(/*! ./tensor-conversion-impl.js */"./node_modules/onnxruntime-common/dist/esm/tensor-conversion-impl.js"),o=s(/*! ./tensor-factory-impl.js */"./node_modules/onnxruntime-common/dist/esm/tensor-factory-impl.js"),n=s(/*! ./tensor-impl-type-mapping.js */"./node_modules/onnxruntime-common/dist/esm/tensor-impl-type-mapping.js"),a=s(/*! ./tensor-utils-impl.js */"./node_modules/onnxruntime-common/dist/esm/tensor-utils-impl.js");class i{constructor(e,t,s){let r,o;if((0,n.checkTypedArray)(),"object"==typeof e&&"location"in e)switch(this.dataLocation=e.location,r=e.type,o=e.dims,e.location){case"cpu-pinned":{const t=n.NUMERIC_TENSOR_TYPE_TO_TYPEDARRAY_MAP.get(r);if(!t)throw new TypeError(`unsupported type "${r}" to create tensor from pinned buffer`);if(!(e.data instanceof t))throw new TypeError(`buffer should be of type ${t.name}`);this.cpuData=e.data;break}case"texture":if("float32"!==r)throw new TypeError(`unsupported type "${r}" to create tensor from texture`);this.gpuTextureData=e.texture,this.downloader=e.download,this.disposer=e.dispose;break;case"gpu-buffer":if("float32"!==r&&"float16"!==r&&"int32"!==r&&"int64"!==r&&"uint32"!==r&&"uint8"!==r&&"bool"!==r&&"uint4"!==r&&"int4"!==r)throw new TypeError(`unsupported type "${r}" to create tensor from gpu buffer`);this.gpuBufferData=e.gpuBuffer,this.downloader=e.download,this.disposer=e.dispose;break;case"ml-tensor":if("float32"!==r&&"float16"!==r&&"int32"!==r&&"int64"!==r&&"uint32"!==r&&"uint64"!==r&&"int8"!==r&&"uint8"!==r&&"bool"!==r)throw new TypeError(`unsupported type "${r}" to create tensor from MLTensor`);this.mlTensorData=e.mlTensor,this.downloader=e.download,this.disposer=e.dispose;break;default:throw new Error(`Tensor constructor: unsupported location '${this.dataLocation}'`)}else{let a,i;if("string"==typeof e)if(r=e,i=s,"string"===e){if(!Array.isArray(t))throw new TypeError("A string tensor's data must be a string array.");a=t}else{const s=n.NUMERIC_TENSOR_TYPE_TO_TYPEDARRAY_MAP.get(e);if(void 0===s)throw new TypeError(`Unsupported tensor type: ${e}.`);if(Array.isArray(t)){if("float16"===e&&s===Uint16Array||"uint4"===e||"int4"===e)throw new TypeError(`Creating a ${e} tensor from number array is not supported. Please use ${s.name} as data.`);a="uint64"===e||"int64"===e?s.from(t,BigInt):s.from(t)}else if(t instanceof s)a=t;else{if(!(t instanceof Uint8ClampedArray))throw new TypeError(`A ${r} tensor's data must be type of ${s}`);if("uint8"!==e)throw new TypeError("A Uint8ClampedArray tensor's data must be type of uint8");a=Uint8Array.from(t)}}else if(i=t,Array.isArray(e)){if(0===e.length)throw new TypeError("Tensor type cannot be inferred from an empty array.");const t=typeof e[0];if("string"===t)r="string",a=e;else{if("boolean"!==t)throw new TypeError(`Invalid element type of data array: ${t}.`);r="bool",a=Uint8Array.from(e)}}else if(e instanceof Uint8ClampedArray)r="uint8",a=Uint8Array.from(e);else{const t=n.NUMERIC_TENSOR_TYPEDARRAY_TO_TYPE_MAP.get(e.constructor);if(void 0===t)throw new TypeError(`Unsupported type for tensor data: ${e.constructor}.`);r=t,a=e}if(void 0===i)i=[a.length];else if(!Array.isArray(i))throw new TypeError("A tensor's dims must be a number array");o=i,this.cpuData=a,this.dataLocation="cpu"}const i=(0,a.calculateSize)(o);if(this.cpuData&&i!==this.cpuData.length&&("uint4"!==r&&"int4"!==r||Math.ceil(i/2)!==this.cpuData.length))throw new Error(`Tensor's size(${i}) does not match data length(${this.cpuData.length}).`);this.type=r,this.dims=o,this.size=i}static async fromImage(e,t){return(0,o.tensorFromImage)(e,t)}static fromTexture(e,t){return(0,o.tensorFromTexture)(e,t)}static fromGpuBuffer(e,t){return(0,o.tensorFromGpuBuffer)(e,t)}static fromMLTensor(e,t){return(0,o.tensorFromMLTensor)(e,t)}static fromPinnedBuffer(e,t,s){return(0,o.tensorFromPinnedBuffer)(e,t,s)}toDataURL(e){return(0,r.tensorToDataURL)(this,e)}toImageData(e){return(0,r.tensorToImageData)(this,e)}get data(){if(this.ensureValid(),!this.cpuData)throw new Error("The data is not on CPU. Use `getData()` to download GPU data to CPU, or use `texture` or `gpuBuffer` property to access the GPU data directly.");return this.cpuData}get location(){return this.dataLocation}get texture(){if(this.ensureValid(),!this.gpuTextureData)throw new Error("The data is not stored as a WebGL texture.");return this.gpuTextureData}get gpuBuffer(){if(this.ensureValid(),!this.gpuBufferData)throw new Error("The data is not stored as a WebGPU buffer.");return this.gpuBufferData}get mlTensor(){if(this.ensureValid(),!this.mlTensorData)throw new Error("The data is not stored as a WebNN MLTensor.");return this.mlTensorData}async getData(e){switch(this.ensureValid(),this.dataLocation){case"cpu":case"cpu-pinned":return this.data;case"texture":case"gpu-buffer":case"ml-tensor":if(!this.downloader)throw new Error("The current tensor is not created with a specified data downloader.");if(this.isDownloading)throw new Error("The current tensor is being downloaded.");try{this.isDownloading=!0;const t=await this.downloader();return this.downloader=void 0,this.dataLocation="cpu",this.cpuData=t,e&&this.disposer&&(this.disposer(),this.disposer=void 0),t}finally{this.isDownloading=!1}default:throw new Error(`cannot get data from location: ${this.dataLocation}`)}}dispose(){if(this.isDownloading)throw new Error("The current tensor is being downloaded.");this.disposer&&(this.disposer(),this.disposer=void 0),this.cpuData=void 0,this.gpuTextureData=void 0,this.gpuBufferData=void 0,this.mlTensorData=void 0,this.downloader=void 0,this.isDownloading=void 0,this.dataLocation="none"}ensureValid(){if("none"===this.dataLocation)throw new Error("The tensor is disposed.")}reshape(e){if(this.ensureValid(),this.downloader||this.disposer)throw new Error("Cannot reshape a tensor that owns GPU resource.");return(0,a.tensorReshape)(this,e)}}},"./node_modules/onnxruntime-common/dist/esm/tensor-utils-impl.js":
68
- /*!***********************************************************************!*\
69
- !*** ./node_modules/onnxruntime-common/dist/esm/tensor-utils-impl.js ***!
70
- \***********************************************************************/(e,t,s)=>{s.r(t),s.d(t,{calculateSize:()=>o,tensorReshape:()=>n});var r=s(/*! ./tensor-impl.js */"./node_modules/onnxruntime-common/dist/esm/tensor-impl.js");const o=e=>{let t=1;for(let s=0;s<e.length;s++){const r=e[s];if("number"!=typeof r||!Number.isSafeInteger(r))throw new TypeError(`dims[${s}] must be an integer, got: ${r}`);if(r<0)throw new RangeError(`dims[${s}] must be a non-negative integer, got: ${r}`);t*=r}return t},n=(e,t)=>{switch(e.location){case"cpu":return new r.Tensor(e.type,e.data,t);case"cpu-pinned":return new r.Tensor({location:"cpu-pinned",data:e.data,type:e.type,dims:t});case"texture":return new r.Tensor({location:"texture",texture:e.texture,type:e.type,dims:t});case"gpu-buffer":return new r.Tensor({location:"gpu-buffer",gpuBuffer:e.gpuBuffer,type:e.type,dims:t});case"ml-tensor":return new r.Tensor({location:"ml-tensor",mlTensor:e.mlTensor,type:e.type,dims:t});default:throw new Error(`tensorReshape: tensor location ${e.location} is not supported`)}}},"./node_modules/onnxruntime-common/dist/esm/tensor.js":
71
- /*!************************************************************!*\
72
- !*** ./node_modules/onnxruntime-common/dist/esm/tensor.js ***!
73
- \************************************************************/(e,t,s)=>{s.r(t),s.d(t,{Tensor:()=>r});const r=s(/*! ./tensor-impl.js */"./node_modules/onnxruntime-common/dist/esm/tensor-impl.js").Tensor},"./node_modules/onnxruntime-common/dist/esm/trace.js":
74
- /*!***********************************************************!*\
75
- !*** ./node_modules/onnxruntime-common/dist/esm/trace.js ***!
76
- \***********************************************************/(e,t,s)=>{s.r(t),s.d(t,{TRACE:()=>o,TRACE_FUNC_BEGIN:()=>a,TRACE_FUNC_END:()=>i});var r=s(/*! ./env-impl.js */"./node_modules/onnxruntime-common/dist/esm/env-impl.js");const o=(e,t)=>{(void 0===r.env.trace?r.env.wasm.trace:r.env.trace)&&console.timeStamp(`${e}::ORT::${t}`)},n=(e,t)=>{const s=(new Error).stack?.split(/\r\n|\r|\n/g)||[];let r=!1;for(let n=0;n<s.length;n++){if(r&&!s[n].includes("TRACE_FUNC")){let r=`FUNC_${e}::${s[n].trim().split(" ")[1]}`;return t&&(r+=`::${t}`),void o("CPU",r)}s[n].includes("TRACE_FUNC")&&(r=!0)}},a=e=>{(void 0===r.env.trace?r.env.wasm.trace:r.env.trace)&&n("BEGIN",e)},i=e=>{(void 0===r.env.trace?r.env.wasm.trace:r.env.trace)&&n("END",e)}},"./node_modules/onnxruntime-common/dist/esm/training-session-impl.js":
77
- /*!***************************************************************************!*\
78
- !*** ./node_modules/onnxruntime-common/dist/esm/training-session-impl.js ***!
79
- \***************************************************************************/(e,t,s)=>{s.r(t),s.d(t,{TrainingSession:()=>n});var r=s(/*! ./backend-impl.js */"./node_modules/onnxruntime-common/dist/esm/backend-impl.js"),o=s(/*! ./tensor.js */"./node_modules/onnxruntime-common/dist/esm/tensor.js");class n{constructor(e,t,s){this.handler=e,this.hasOptimizerModel=t,this.hasEvalModel=s}get trainingInputNames(){return this.handler.inputNames}get trainingOutputNames(){return this.handler.outputNames}get evalInputNames(){if(this.hasEvalModel)return this.handler.evalInputNames;throw new Error("This training session has no evalModel loaded.")}get evalOutputNames(){if(this.hasEvalModel)return this.handler.evalOutputNames;throw new Error("This training session has no evalModel loaded.")}static async create(e,t){const s=e.evalModel||"",o=e.optimizerModel||"",a=t||{},[i,l]=await(0,r.resolveBackendAndExecutionProviders)(a);if(i.createTrainingSessionHandler){const t=await i.createTrainingSessionHandler(e.checkpointState,e.trainModel,s,o,l);return new n(t,!!e.optimizerModel,!!e.evalModel)}throw new Error("Training backend could not be resolved. Make sure you're using the correct configuration & WebAssembly files.")}typeNarrowingForRunStep(e,t,s,r,n){const a={};let i={};if("object"!=typeof s||null===s||s instanceof o.Tensor||Array.isArray(s))throw new TypeError("'feeds' must be an object that use input names as keys and OnnxValue as corresponding values.");let l=!0;if("object"==typeof r){if(null===r)throw new TypeError("Unexpected argument[1]: cannot be null.");if(r instanceof o.Tensor)throw new TypeError("'fetches' cannot be a Tensor");if(Array.isArray(r)){if(0===r.length)throw new TypeError("'fetches' cannot be an empty array.");l=!1;for(const e of r){if("string"!=typeof e)throw new TypeError("'fetches' must be a string array or an object.");if(-1===t.indexOf(e))throw new RangeError(`'fetches' contains invalid output name: ${e}.`);a[e]=null}if("object"==typeof n&&null!==n)i=n;else if(void 0!==n)throw new TypeError("'options' must be an object.")}else{let e=!1;const s=Object.getOwnPropertyNames(r);for(const n of t)if(-1!==s.indexOf(n)){const t=r[n];(null===t||t instanceof o.Tensor)&&(e=!0,l=!1,a[n]=t)}if(e){if("object"==typeof n&&null!==n)i=n;else if(void 0!==n)throw new TypeError("'options' must be an object.")}else i=r}}else if(void 0!==r)throw new TypeError("Unexpected argument[1]: must be 'fetches' or 'options'.");for(const t of e)if(void 0===s[t])throw new Error(`input '${t}' is missing in 'feeds'.`);if(l)for(const e of t)a[e]=null;return[a,i]}convertHandlerReturnTypeToMapOfTensors(e){const t={};for(const s in e)if(Object.hasOwnProperty.call(e,s)){const r=e[s];r instanceof o.Tensor?t[s]=r:t[s]=new o.Tensor(r.type,r.data,r.dims)}return t}async lazyResetGrad(){await this.handler.lazyResetGrad()}async runTrainStep(e,t,s){const[r,o]=this.typeNarrowingForRunStep(this.trainingInputNames,this.trainingOutputNames,e,t,s),n=await this.handler.runTrainStep(e,r,o);return this.convertHandlerReturnTypeToMapOfTensors(n)}async runOptimizerStep(e){if(!this.hasOptimizerModel)throw new Error("This TrainingSession has no OptimizerModel loaded.");await this.handler.runOptimizerStep(e||{})}async runEvalStep(e,t,s){if(this.hasEvalModel){const[r,o]=this.typeNarrowingForRunStep(this.evalInputNames,this.evalOutputNames,e,t,s),n=await this.handler.runEvalStep(e,r,o);return this.convertHandlerReturnTypeToMapOfTensors(n)}throw new Error("This TrainingSession has no EvalModel loaded.")}async getParametersSize(e=!0){return this.handler.getParametersSize(e)}async loadParametersBuffer(e,t=!0){const s=await this.getParametersSize(t);if(e.length!==4*s)throw new Error("Size of the buffer passed into loadParametersBuffer must match the number of parameters in the model. Please use getParametersSize method to check.");return this.handler.loadParametersBuffer(e,t)}async getContiguousParameters(e=!0){return this.handler.getContiguousParameters(e)}async release(){return this.handler.dispose()}}},"./node_modules/onnxruntime-common/dist/esm/training-session.js":
80
- /*!**********************************************************************!*\
81
- !*** ./node_modules/onnxruntime-common/dist/esm/training-session.js ***!
82
- \**********************************************************************/(e,t,s)=>{s.r(t),s.d(t,{TrainingSession:()=>r});const r=s(/*! ./training-session-impl.js */"./node_modules/onnxruntime-common/dist/esm/training-session-impl.js").TrainingSession},"./node_modules/onnxruntime-common/dist/esm/version.js":
83
- /*!*************************************************************!*\
84
- !*** ./node_modules/onnxruntime-common/dist/esm/version.js ***!
85
- \*************************************************************/(e,t,s)=>{s.r(t),s.d(t,{version:()=>r});const r="1.20.1"},"./src/backends/onnx.js":
86
- /*!******************************!*\
87
- !*** ./src/backends/onnx.js ***!
88
- \******************************/(e,t,s)=>{var r;s.r(t),s.d(t,{Tensor:()=>i.Tensor,createInferenceSession:()=>f,deviceToExecutionProviders:()=>_,isONNXProxy:()=>w,isONNXTensor:()=>g});var o=s(/*! ../env.js */"./src/env.js"),n=s(/*! onnxruntime-node */"onnxruntime-node"),a=s(/*! #onnxruntime-webgpu */"?cb4d"),i=s(/*! onnxruntime-common */"./node_modules/onnxruntime-common/dist/esm/index.js");const l=Object.freeze({auto:null,gpu:null,cpu:"cpu",wasm:"wasm",webgpu:"webgpu",cuda:"cuda",dml:"dml",webnn:{name:"webnn",deviceType:"cpu"},"webnn-npu":{name:"webnn",deviceType:"npu"},"webnn-gpu":{name:"webnn",deviceType:"gpu"},"webnn-cpu":{name:"webnn",deviceType:"cpu"}}),c=[];let d,u;const p=Symbol.for("onnxruntime");if(p in globalThis)u=globalThis[p];else if(o.apis.IS_NODE_ENV){switch(u=n.default??n,process.platform){case"win32":c.push("dml");break;case"linux":"x64"===process.arch&&c.push("cuda")}c.push("cpu"),d=["cpu"]}else u=r||(r=s.t(a,2)),o.apis.IS_WEBNN_AVAILABLE&&c.push("webnn-npu","webnn-gpu","webnn-cpu","webnn"),o.apis.IS_WEBGPU_AVAILABLE&&c.push("webgpu"),c.push("wasm"),d=["wasm"];const m=u.InferenceSession;function _(e=null){if(!e)return d;switch(e){case"auto":return c;case"gpu":return c.filter((e=>["webgpu","cuda","dml","webnn-gpu"].includes(e)))}if(c.includes(e))return[l[e]??e];throw new Error(`Unsupported device: "${e}". Should be one of: ${c.join(", ")}.`)}let h=null;async function f(e,t,s){h&&await h;const r=m.create(e,t);h??=r;const o=await r;return o.config=s,o}function g(e){return e instanceof u.Tensor}const M=u?.env;function w(){return M?.wasm?.proxy}M?.wasm&&(M.wasm.wasmPaths=`https://cdn.jsdelivr.net/npm/@huggingface/transformers@${o.env.version}/dist/`,M.wasm.proxy=!1,"undefined"!=typeof crossOriginIsolated&&crossOriginIsolated||(M.wasm.numThreads=1)),M?.webgpu&&(M.webgpu.powerPreference="high-performance"),o.env.backends.onnx=M},"./src/base/feature_extraction_utils.js":
89
- /*!**********************************************!*\
90
- !*** ./src/base/feature_extraction_utils.js ***!
91
- \**********************************************/(e,t,s)=>{s.r(t),s.d(t,{FeatureExtractor:()=>a,validate_audio_inputs:()=>i});var r=s(/*! ../utils/constants.js */"./src/utils/constants.js"),o=s(/*! ../utils/generic.js */"./src/utils/generic.js"),n=s(/*! ../utils/hub.js */"./src/utils/hub.js");class a extends o.Callable{constructor(e){super(),this.config=e}static async from_pretrained(e,t){return new this(await(0,n.getModelJSON)(e,r.FEATURE_EXTRACTOR_NAME,!0,t))}}function i(e,t){if(!(e instanceof Float32Array||e instanceof Float64Array))throw new Error(`${t} expects input to be a Float32Array or a Float64Array, but got ${e?.constructor?.name??typeof e} instead. If using the feature extractor directly, remember to use \`read_audio(url, sampling_rate)\` to obtain the raw audio data of the file/url.`)}},"./src/base/image_processors_utils.js":
92
- /*!********************************************!*\
93
- !*** ./src/base/image_processors_utils.js ***!
94
- \********************************************/(e,t,s)=>{s.r(t),s.d(t,{ImageProcessor:()=>w,post_process_instance_segmentation:()=>M,post_process_object_detection:()=>p,post_process_panoptic_segmentation:()=>g,post_process_semantic_segmentation:()=>m});var r=s(/*! ../utils/generic.js */"./src/utils/generic.js"),o=s(/*! ../utils/tensor.js */"./src/utils/tensor.js"),n=s(/*! ../utils/maths.js */"./src/utils/maths.js"),a=(s(/*! ../utils/image.js */"./src/utils/image.js"),s(/*! ../utils/core.js */"./src/utils/core.js")),i=s(/*! ../utils/hub.js */"./src/utils/hub.js"),l=s(/*! ../utils/constants.js */"./src/utils/constants.js");function c(e,t,s=0,r=null){const o=e/t;let a=(0,n.bankers_round)(o)*t;return null!==r&&a>r&&(a=Math.floor(o)*t),a<s&&(a=Math.ceil(o)*t),a}function d([e,t],s){return[Math.max(Math.floor(e/s),1)*s,Math.max(Math.floor(t/s),1)*s]}function u([e,t,s,r]){return[e-s/2,t-r/2,e+s/2,t+r/2]}function p(e,t=.5,s=null,r=!1){const o=e.logits,a=e.pred_boxes,[i,l,c]=o.dims;if(null!==s&&s.length!==i)throw Error("Make sure that you pass in as many target sizes as the batch dimension of the logits");let d=[];for(let e=0;e<i;++e){let i=null!==s?s[e]:null,p={boxes:[],classes:[],scores:[]},m=o[e],_=a[e];for(let e=0;e<l;++e){let s,o=m[e],a=[];if(r){s=o.sigmoid().data;for(let e=0;e<s.length;++e)s[e]>t&&a.push(e)}else{let e=(0,n.max)(o.data)[1];if(e===c-1)continue;if(s=(0,n.softmax)(o.data),s[e]<t)continue;a.push(e)}for(const t of a){let r=_[e].data;r=u(r),null!==i&&(r=r.map(((e,t)=>e*i[(t+1)%2]))),p.boxes.push(r),p.classes.push(t),p.scores.push(s[t])}}d.push(p)}return d}function m(e,t=null){const s=e.logits,r=s.dims[0];if(null!==t&&t.length!==r)throw Error("Make sure that you pass in as many target sizes as the batch dimension of the logits");const n=[];for(let e=0;e<r;++e){const r=null!==t?t[e]:null;let a=s[e];null!==r&&(a=(0,o.interpolate)(a,r,"bilinear",!1));const[i,l]=r??a.dims.slice(-2),c=new o.Tensor("int32",new Int32Array(i*l),[i,l]),d=a[0].data,u=c.data;for(let e=1;e<a.dims[0];++e){const t=a[e].data;for(let s=0;s<t.length;++s)t[s]>d[s]&&(d[s]=t[s],u[s]=e)}const p=new Array(a.dims[0]);for(let e=0;e<u.length;++e){const t=u[e];p[t]=t}const m=p.filter((e=>void 0!==e));n.push({segmentation:c,labels:m})}return n}function _(e,t,s,r){const o=[],a=[],i=[];for(let l=0;l<e.dims[0];++l){const c=e[l],d=t[l],u=(0,n.max)(c.data)[1];if(u===r)continue;const p=(0,n.softmax)(c.data)[u];p>s&&(o.push(d),a.push(p),i.push(u))}return[o,a,i]}function h(e,t,s,r=.5,o=.8){const n=[];let a=0,i=0;const l=t[s].data;for(let t=0;t<e.length;++t)e[t]===s&&(n.push(t),++a),l[t]>=r&&++i;let c=a>0&&i>0;if(c){c=a/i>o}return[c,n]}function f(e,t,s,r,n,a=null,i=null){const[l,c]=i??e[0].dims,d=new o.Tensor("int32",new Int32Array(l*c),[l,c]),u=[];if(null!==i)for(let t=0;t<e.length;++t)e[t]=(0,o.interpolate)(e[t],i,"bilinear",!1);const p=new Int32Array(e[0].data.length),m=new Float32Array(e[0].data.length);for(let s=0;s<e.length;++s){let r=t[s];const o=e[s].data;for(let e=0;e<o.length;++e)o[e]*=r,o[e]>m[e]&&(p[e]=s,m[e]=o[e])}let _=0;const f=d.data;for(let o=0;o<s.length;++o){const a=s[o],[i,l]=h(p,e,o,r,n);if(i){++_;for(const e of l)f[e]=_;u.push({id:_,label_id:a,score:t[o]})}}return[d,u]}function g(e,t=.5,s=.5,r=.8,n=null,a=null){null===n&&(console.warn("`label_ids_to_fuse` unset. No instance will be fused."),n=new Set);const i=e.class_queries_logits??e.logits,l=(e.masks_queries_logits??e.pred_masks).sigmoid();let[c,d,u]=i.dims;if(u-=1,null!==a&&a.length!==c)throw Error("Make sure that you pass in as many target sizes as the batch dimension of the logits");let p=[];for(let e=0;e<c;++e){let c=null!==a?a[e]:null,d=i[e],m=l[e],[h,g,M]=_(d,m,t,u);if(0===M.length){let[e,t]=c??m.dims.slice(-2),s=new o.Tensor("int32",new Int32Array(e*t).fill(-1),[e,t]);p.push({segmentation:s,segments_info:[]});continue}let[w,T]=f(h,g,M,s,r,n,c);p.push({segmentation:w,segments_info:T})}return p}function M(e,t=.5,s=null){throw new Error("`post_process_instance_segmentation` is not yet implemented.")}class w extends r.Callable{constructor(e){super(),this.image_mean=e.image_mean??e.mean,this.image_std=e.image_std??e.std,this.resample=e.resample??2,this.do_rescale=e.do_rescale??!0,this.rescale_factor=e.rescale_factor??1/255,this.do_normalize=e.do_normalize,this.do_thumbnail=e.do_thumbnail,this.size=e.size??e.image_size,this.do_resize=e.do_resize??void 0!==this.size,this.size_divisibility=e.size_divisibility??e.size_divisor,this.do_center_crop=e.do_center_crop,this.crop_size=e.crop_size,this.do_convert_rgb=e.do_convert_rgb??!0,this.do_crop_margin=e.do_crop_margin,this.pad_size=e.pad_size,this.do_pad=e.do_pad,this.do_pad&&!this.pad_size&&this.size&&void 0!==this.size.width&&void 0!==this.size.height&&(this.pad_size=this.size),this.do_flip_channel_order=e.do_flip_channel_order??!1,this.config=e}async thumbnail(e,t,s=2){const r=e.height,o=e.width,n=t.height,a=t.width;let i=Math.min(r,n),l=Math.min(o,a);return i===r&&l===o?e:(r>o?l=Math.floor(o*i/r):o>r&&(i=Math.floor(r*l/o)),await e.resize(l,i,{resample:s}))}async crop_margin(e,t=200){const s=e.clone().grayscale(),r=(0,n.min)(s.data)[0],o=(0,n.max)(s.data)[0]-r;if(0===o)return e;const a=t/255;let i=s.width,l=s.height,c=0,d=0;const u=s.data;for(let e=0;e<s.height;++e){const t=e*s.width;for(let n=0;n<s.width;++n)(u[t+n]-r)/o<a&&(i=Math.min(i,n),l=Math.min(l,e),c=Math.max(c,n),d=Math.max(d,e))}return e=await e.crop([i,l,c,d])}pad_image(e,t,s,{mode:r="constant",center:o=!1,constant_values:n=0}={}){const[i,l,c]=t;let d,u;if("number"==typeof s?(d=s,u=s):(d=s.width,u=s.height),d!==l||u!==i){const s=new Float32Array(d*u*c);if(Array.isArray(n))for(let e=0;e<s.length;++e)s[e]=n[e%c];else 0!==n&&s.fill(n);const[p,m]=o?[Math.floor((d-l)/2),Math.floor((u-i)/2)]:[0,0];for(let t=0;t<i;++t){const r=(t+m)*d,o=t*l;for(let t=0;t<l;++t){const n=(r+t+p)*c,a=(o+t)*c;for(let t=0;t<c;++t)s[n+t]=e[a+t]}}if("symmetric"===r){if(o)throw new Error("`center` padding is not supported when `mode` is set to `symmetric`.");const t=i-1,r=l-1;for(let o=0;o<u;++o){const n=o*d,u=(0,a.calculateReflectOffset)(o,t)*l;for(let t=0;t<d;++t){if(o<i&&t<l)continue;const d=(n+t)*c,p=(u+(0,a.calculateReflectOffset)(t,r))*c;for(let t=0;t<c;++t)s[d+t]=e[p+t]}}}e=s,t=[u,d,c]}return[e,t]}rescale(e){for(let t=0;t<e.length;++t)e[t]=this.rescale_factor*e[t]}get_resize_output_image_size(e,t){const[s,r]=e.size;let o,n;if(this.do_thumbnail){const{height:e,width:s}=t;o=Math.min(e,s)}else Number.isInteger(t)?(o=t,n=this.config.max_size??o):void 0!==t&&(o=t.shortest_edge,n=t.longest_edge);if(void 0!==o||void 0!==n){const e=void 0===o?1:Math.max(o/s,o/r),t=s*e,a=r*e,i=void 0===n?1:Math.min(n/t,n/a);let l=Math.floor(Number((t*i).toFixed(2))),c=Math.floor(Number((a*i).toFixed(2)));return void 0!==this.size_divisibility&&([l,c]=d([l,c],this.size_divisibility)),[l,c]}if(void 0!==t&&void 0!==t.width&&void 0!==t.height){let e=t.width,o=t.height;if(this.config.keep_aspect_ratio&&this.config.ensure_multiple_of){let t=o/r,n=e/s;Math.abs(1-n)<Math.abs(1-t)?t=n:n=t,o=c(t*r,this.config.ensure_multiple_of),e=c(n*s,this.config.ensure_multiple_of)}return[e,o]}if(void 0!==this.size_divisibility)return d([s,r],this.size_divisibility);if(void 0!==t.min_pixels&&void 0!==t.max_pixels){const{min_pixels:e,max_pixels:o}=t;return function(e,t,s=28,r=3136,o=1003520){if(e<s||t<s)throw new Error(`height:${e} or width:${t} must be larger than factor:${s}`);if(Math.max(e,t)/Math.min(e,t)>200)throw new Error("absolute aspect ratio must be smaller than 200, got "+Math.max(e,t)/Math.min(e,t));let n=Math.round(e/s)*s,a=Math.round(t/s)*s;if(n*a>o){const r=Math.sqrt(e*t/o);n=Math.floor(e/r/s)*s,a=Math.floor(t/r/s)*s}else if(n*a<r){const o=Math.sqrt(r/(e*t));n=Math.ceil(e*o/s)*s,a=Math.ceil(t*o/s)*s}return[n,a]}(r,s,this.config.patch_size*this.config.merge_size,e,o)}throw new Error(`Could not resize image due to unsupported \`this.size\` option in config: ${JSON.stringify(t)}`)}async resize(e){const[t,s]=this.get_resize_output_image_size(e,this.size);return await e.resize(t,s,{resample:this.resample})}async preprocess(e,{do_normalize:t=null,do_pad:s=null,do_convert_rgb:r=null,do_convert_grayscale:n=null,do_flip_channel_order:a=null}={}){this.do_crop_margin&&(e=await this.crop_margin(e));const[i,l]=e.size;if(r??this.do_convert_rgb?e=e.rgb():n&&(e=e.grayscale()),this.do_resize&&(e=await this.resize(e)),this.do_thumbnail&&(e=await this.thumbnail(e,this.size,this.resample)),this.do_center_crop){let t,s;Number.isInteger(this.crop_size)?(t=this.crop_size,s=this.crop_size):(t=this.crop_size.width,s=this.crop_size.height),e=await e.center_crop(t,s)}const c=[e.height,e.width];let u=Float32Array.from(e.data),p=[e.height,e.width,e.channels];if(this.do_rescale&&this.rescale(u),t??this.do_normalize){let t=this.image_mean;Array.isArray(this.image_mean)||(t=new Array(e.channels).fill(t));let s=this.image_std;if(Array.isArray(this.image_std)||(s=new Array(e.channels).fill(t)),t.length!==e.channels||s.length!==e.channels)throw new Error(`When set to arrays, the length of \`image_mean\` (${t.length}) and \`image_std\` (${s.length}) must match the number of channels in the image (${e.channels}).`);for(let r=0;r<u.length;r+=e.channels)for(let o=0;o<e.channels;++o)u[r+o]=(u[r+o]-t[o])/s[o]}if(s??this.do_pad)if(this.pad_size){const t=this.pad_image(u,[e.height,e.width,e.channels],this.pad_size);[u,p]=t}else if(this.size_divisibility){const[e,t]=d([p[1],p[0]],this.size_divisibility);[u,p]=this.pad_image(u,p,{width:e,height:t})}if(a??this.do_flip_channel_order){if(3!==p[2])throw new Error("Flipping channel order is only supported for RGB images.");for(let e=0;e<u.length;e+=3){const t=u[e];u[e]=u[e+2],u[e+2]=t}}return{original_size:[l,i],reshaped_input_size:c,pixel_values:new o.Tensor("float32",u,p).permute(2,0,1)}}async _call(e,...t){Array.isArray(e)||(e=[e]);const s=await Promise.all(e.map((e=>this.preprocess(e))));return{pixel_values:(0,o.stack)(s.map((e=>e.pixel_values)),0),original_sizes:s.map((e=>e.original_size)),reshaped_input_sizes:s.map((e=>e.reshaped_input_size))}}static async from_pretrained(e,t){return new this(await(0,i.getModelJSON)(e,l.IMAGE_PROCESSOR_NAME,!0,t))}}},"./src/base/processing_utils.js":
95
- /*!**************************************!*\
96
- !*** ./src/base/processing_utils.js ***!
97
- \**************************************/(e,t,s)=>{s.r(t),s.d(t,{Processor:()=>a});var r=s(/*! ../utils/constants.js */"./src/utils/constants.js"),o=s(/*! ../utils/generic.js */"./src/utils/generic.js"),n=s(/*! ../utils/hub.js */"./src/utils/hub.js");class a extends o.Callable{static classes=["image_processor_class","tokenizer_class","feature_extractor_class"];static uses_processor_config=!1;constructor(e,t){super(),this.config=e,this.components=t}get image_processor(){return this.components.image_processor}get tokenizer(){return this.components.tokenizer}get feature_extractor(){return this.components.feature_extractor}apply_chat_template(e,t={}){if(!this.tokenizer)throw new Error("Unable to apply chat template without a tokenizer.");return this.tokenizer.apply_chat_template(e,{tokenize:!1,...t})}batch_decode(...e){if(!this.tokenizer)throw new Error("Unable to decode without a tokenizer.");return this.tokenizer.batch_decode(...e)}async _call(e,...t){for(const s of[this.image_processor,this.feature_extractor,this.tokenizer])if(s)return s(e,...t);throw new Error("No image processor, feature extractor, or tokenizer found.")}static async from_pretrained(e,t){const[s,o]=await Promise.all([this.uses_processor_config?(0,n.getModelJSON)(e,r.PROCESSOR_NAME,!0,t):{},Promise.all(this.classes.filter((e=>e in this)).map((async s=>{const r=await this[s].from_pretrained(e,t);return[s.replace(/_class$/,""),r]}))).then(Object.fromEntries)]);return new this(s,o)}}},"./src/configs.js":
98
- /*!************************!*\
99
- !*** ./src/configs.js ***!
100
- \************************/(e,t,s)=>{s.r(t),s.d(t,{AutoConfig:()=>l,PretrainedConfig:()=>i,getKeyValueShapes:()=>a});var r=s(/*! ./utils/core.js */"./src/utils/core.js"),o=s(/*! ./utils/hub.js */"./src/utils/hub.js");function n(e){const t={};let s={};switch(e.model_type){case"llava":case"paligemma":case"florence2":case"llava_onevision":case"idefics3":s=n(e.text_config);break;case"moondream1":s=n(e.phi_config);break;case"musicgen":s=n(e.decoder);break;case"multi_modality":s=n(e.language_config);break;case"gpt2":case"gptj":case"jais":case"codegen":case"gpt_bigcode":t.num_heads="n_head",t.num_layers="n_layer",t.hidden_size="n_embd";break;case"gpt_neox":case"stablelm":case"opt":case"phi":case"phi3":case"falcon":t.num_heads="num_attention_heads",t.num_layers="num_hidden_layers",t.hidden_size="hidden_size";break;case"llama":case"olmo":case"mobilellm":case"granite":case"cohere":case"mistral":case"starcoder2":case"qwen2":case"qwen2_vl":t.num_heads="num_key_value_heads",t.num_layers="num_hidden_layers",t.hidden_size="hidden_size",t.num_attention_heads="num_attention_heads";break;case"gemma":case"gemma2":t.num_heads="num_key_value_heads",t.num_layers="num_hidden_layers",t.dim_kv="head_dim";break;case"openelm":t.num_heads="num_kv_heads",t.num_layers="num_transformer_layers",t.dim_kv="head_dim";break;case"gpt_neo":case"donut-swin":t.num_heads="num_heads",t.num_layers="num_layers",t.hidden_size="hidden_size";break;case"bloom":t.num_heads="n_head",t.num_layers="n_layer",t.hidden_size="hidden_size";break;case"mpt":t.num_heads="n_heads",t.num_layers="n_layers",t.hidden_size="d_model";break;case"t5":case"mt5":case"longt5":t.num_decoder_layers="num_decoder_layers",t.num_decoder_heads="num_heads",t.decoder_dim_kv="d_kv",t.num_encoder_layers="num_layers",t.num_encoder_heads="num_heads",t.encoder_dim_kv="d_kv";break;case"bart":case"mbart":case"marian":case"whisper":case"m2m_100":case"blenderbot":case"blenderbot-small":case"florence2_language":t.num_decoder_layers="decoder_layers",t.num_decoder_heads="decoder_attention_heads",t.decoder_hidden_size="d_model",t.num_encoder_layers="encoder_layers",t.num_encoder_heads="encoder_attention_heads",t.encoder_hidden_size="d_model";break;case"speecht5":t.num_decoder_layers="decoder_layers",t.num_decoder_heads="decoder_attention_heads",t.decoder_hidden_size="hidden_size",t.num_encoder_layers="encoder_layers",t.num_encoder_heads="encoder_attention_heads",t.encoder_hidden_size="hidden_size";break;case"trocr":t.num_encoder_layers=t.num_decoder_layers="decoder_layers",t.num_encoder_heads=t.num_decoder_heads="decoder_attention_heads",t.encoder_hidden_size=t.decoder_hidden_size="d_model";break;case"musicgen_decoder":t.num_encoder_layers=t.num_decoder_layers="num_hidden_layers",t.num_encoder_heads=t.num_decoder_heads="num_attention_heads",t.encoder_hidden_size=t.decoder_hidden_size="hidden_size";break;case"vision-encoder-decoder":const o=n(e.decoder),a="num_decoder_layers"in o,i=(0,r.pick)(e,["model_type","is_encoder_decoder"]);return a?(i.num_decoder_layers=o.num_decoder_layers,i.num_decoder_heads=o.num_decoder_heads,i.decoder_hidden_size=o.decoder_hidden_size,i.num_encoder_layers=o.num_encoder_layers,i.num_encoder_heads=o.num_encoder_heads,i.encoder_hidden_size=o.encoder_hidden_size):(i.num_layers=o.num_layers,i.num_heads=o.num_heads,i.hidden_size=o.hidden_size),i}const o={...s,...(0,r.pick)(e,["model_type","multi_query","is_encoder_decoder"])};for(const s in t)o[s]=e[t[s]];return o}function a(e,{prefix:t="past_key_values",batch_size:s=1}={}){const r={},o=e.normalized_config;if(o.is_encoder_decoder&&"num_encoder_heads"in o&&"num_decoder_heads"in o){const e=o.encoder_dim_kv??o.encoder_hidden_size/o.num_encoder_heads,n=o.decoder_dim_kv??o.decoder_hidden_size/o.num_decoder_heads,a=[s,o.num_encoder_heads,0,e],i=[s,o.num_decoder_heads,0,n];for(let e=0;e<o.num_decoder_layers;++e)r[`${t}.${e}.encoder.key`]=a,r[`${t}.${e}.encoder.value`]=a,r[`${t}.${e}.decoder.key`]=i,r[`${t}.${e}.decoder.value`]=i}else{const e=o.num_heads,n=o.num_layers,a=o.dim_kv??o.hidden_size/(o.num_attention_heads??e);if("falcon"===o.model_type){const o=[s*e,0,a];for(let e=0;e<n;++e)r[`${t}.${e}.key`]=o,r[`${t}.${e}.value`]=o}else if(o.multi_query){const o=[s*e,0,2*a];for(let e=0;e<n;++e)r[`${t}.${e}.key_value`]=o}else if("bloom"===o.model_type){const o=[s*e,a,0],i=[s*e,0,a];for(let e=0;e<n;++e)r[`${t}.${e}.key`]=o,r[`${t}.${e}.value`]=i}else if("openelm"===o.model_type)for(let o=0;o<n;++o){const n=[s,e[o],0,a];r[`${t}.${o}.key`]=n,r[`${t}.${o}.value`]=n}else{const o=[s,e,0,a];for(let e=0;e<n;++e)r[`${t}.${e}.key`]=o,r[`${t}.${e}.value`]=o}}return r}class i{model_type=null;is_encoder_decoder=!1;max_position_embeddings;"transformers.js_config";constructor(e){Object.assign(this,e),this.normalized_config=n(this)}static async from_pretrained(e,{progress_callback:t=null,config:s=null,cache_dir:r=null,local_files_only:n=!1,revision:a="main"}={}){!s||s instanceof i||(s=new i(s));const l=s??await async function(e,t){return await(0,o.getModelJSON)(e,"config.json",!0,t)}(e,{progress_callback:t,config:s,cache_dir:r,local_files_only:n,revision:a});return new this(l)}}class l{static async from_pretrained(...e){return i.from_pretrained(...e)}}},"./src/env.js":
101
- /*!********************!*\
102
- !*** ./src/env.js ***!
103
- \********************/(e,t,s)=>{s.r(t),s.d(t,{apis:()=>h,env:()=>T});var r=s(/*! fs */"fs"),o=s(/*! path */"path"),n=s(/*! url */"url");const a="undefined"!=typeof window&&void 0!==window.document,i="undefined"!=typeof self&&"DedicatedWorkerGlobalScope"===self.constructor?.name,l="undefined"!=typeof self&&"caches"in self,c="undefined"!=typeof navigator&&"gpu"in navigator,d="undefined"!=typeof navigator&&"ml"in navigator,u="undefined"!=typeof process,p=u&&"node"===process?.release?.name,m=!b(r.default),_=!b(o.default),h=Object.freeze({IS_BROWSER_ENV:a,IS_WEBWORKER_ENV:i,IS_WEB_CACHE_AVAILABLE:l,IS_WEBGPU_AVAILABLE:c,IS_WEBNN_AVAILABLE:d,IS_PROCESS_AVAILABLE:u,IS_NODE_ENV:p,IS_FS_AVAILABLE:m,IS_PATH_AVAILABLE:_}),f=m&&_;let g="./";if(f){const e=Object(import.meta).url;e?g=o.default.dirname(o.default.dirname(n.default.fileURLToPath(e))):"undefined"!=typeof __dirname&&(g=o.default.dirname(__dirname))}const M=f?o.default.join(g,"/.cache/"):null,w="/models/",T={version:"3.1.1",backends:{onnx:{}},allowRemoteModels:!0,remoteHost:"https://huggingface.co/",remotePathTemplate:"{model}/resolve/{revision}/",allowLocalModels:!(a||i),localModelPath:f?o.default.join(g,w):w,useFS:m,useBrowserCache:l,useFSCache:m,cacheDir:M,useCustomCache:!1,customCache:null};function b(e){return 0===Object.keys(e).length}},"./src/generation/configuration_utils.js":
104
- /*!***********************************************!*\
105
- !*** ./src/generation/configuration_utils.js ***!
106
- \***********************************************/(e,t,s)=>{s.r(t),s.d(t,{GenerationConfig:()=>o});var r=s(/*! ../utils/core.js */"./src/utils/core.js");class o{max_length=20;max_new_tokens=null;min_length=0;min_new_tokens=null;early_stopping=!1;max_time=null;do_sample=!1;num_beams=1;num_beam_groups=1;penalty_alpha=null;use_cache=!0;temperature=1;top_k=50;top_p=1;typical_p=1;epsilon_cutoff=0;eta_cutoff=0;diversity_penalty=0;repetition_penalty=1;encoder_repetition_penalty=1;length_penalty=1;no_repeat_ngram_size=0;bad_words_ids=null;force_words_ids=null;renormalize_logits=!1;constraints=null;forced_bos_token_id=null;forced_eos_token_id=null;remove_invalid_values=!1;exponential_decay_length_penalty=null;suppress_tokens=null;streamer=null;begin_suppress_tokens=null;forced_decoder_ids=null;guidance_scale=null;num_return_sequences=1;output_attentions=!1;output_hidden_states=!1;output_scores=!1;return_dict_in_generate=!1;pad_token_id=null;bos_token_id=null;eos_token_id=null;encoder_no_repeat_ngram_size=0;decoder_start_token_id=null;generation_kwargs={};constructor(e){Object.assign(this,(0,r.pick)(e,Object.getOwnPropertyNames(this)))}}},"./src/generation/logits_process.js":
107
- /*!******************************************!*\
108
- !*** ./src/generation/logits_process.js ***!
109
- \******************************************/(e,t,s)=>{s.r(t),s.d(t,{ClassifierFreeGuidanceLogitsProcessor:()=>g,ForcedBOSTokenLogitsProcessor:()=>l,ForcedEOSTokenLogitsProcessor:()=>c,LogitsProcessor:()=>n,LogitsProcessorList:()=>i,LogitsWarper:()=>a,MinLengthLogitsProcessor:()=>_,MinNewTokensLengthLogitsProcessor:()=>h,NoBadWordsLogitsProcessor:()=>f,NoRepeatNGramLogitsProcessor:()=>p,RepetitionPenaltyLogitsProcessor:()=>m,SuppressTokensAtBeginLogitsProcessor:()=>d,TemperatureLogitsWarper:()=>M,TopKLogitsWarper:()=>T,TopPLogitsWarper:()=>w,WhisperTimeStampLogitsProcessor:()=>u});var r=s(/*! ../utils/generic.js */"./src/utils/generic.js"),o=(s(/*! ../utils/tensor.js */"./src/utils/tensor.js"),s(/*! ../utils/maths.js */"./src/utils/maths.js"));class n extends r.Callable{_call(e,t){throw Error("`_call` should be implemented in a subclass")}}class a extends r.Callable{_call(e,t){throw Error("`_call` should be implemented in a subclass")}}class i extends r.Callable{constructor(){super(),this.processors=[]}push(e){this.processors.push(e)}extend(e){this.processors.push(...e)}_call(e,t){let s=t;for(const t of this.processors)s=t(e,s);return s}[Symbol.iterator](){return this.processors.values()}}class l extends n{constructor(e){super(),this.bos_token_id=e}_call(e,t){for(let s=0;s<e.length;++s)if(1===e[s].length){const e=t[s].data;e.fill(-1/0),e[this.bos_token_id]=0}return t}}class c extends n{constructor(e,t){super(),this.max_length=e,this.eos_token_id=Array.isArray(t)?t:[t]}_call(e,t){for(let s=0;s<e.length;++s)if(e[s].length===this.max_length-1){const e=t[s].data;e.fill(-1/0);for(const t of this.eos_token_id)e[t]=0}return t}}class d extends n{constructor(e,t){super(),this.begin_suppress_tokens=e,this.begin_index=t}_call(e,t){for(let s=0;s<e.length;++s)if(e[s].length===this.begin_index){const e=t[s].data;for(const t of this.begin_suppress_tokens)e[t]=-1/0}return t}}class u extends n{constructor(e,t){super(),this.eos_token_id=Array.isArray(e.eos_token_id)?e.eos_token_id[0]:e.eos_token_id,this.no_timestamps_token_id=e.no_timestamps_token_id,this.timestamp_begin=this.no_timestamps_token_id+1,this.begin_index=t.length,t.at(-1)===this.no_timestamps_token_id&&(this.begin_index-=1),this.max_initial_timestamp_index=e.max_initial_timestamp_index}_call(e,t){for(let s=0;s<e.length;++s){const r=t[s].data;if(r[this.no_timestamps_token_id]=-1/0,e[s].length===this.begin_index-1){r.fill(-1/0),r[this.timestamp_begin]=0;continue}const n=e[s].slice(this.begin_index),a=n.length>=1&&n[n.length-1]>=this.timestamp_begin,i=n.length<2||n[n.length-2]>=this.timestamp_begin;if(a&&(i?r.subarray(this.timestamp_begin).fill(-1/0):r.subarray(0,this.eos_token_id).fill(-1/0)),e[s].length===this.begin_index&&null!==this.max_initial_timestamp_index){const e=this.timestamp_begin+this.max_initial_timestamp_index;r.subarray(e+1).fill(-1/0)}const l=(0,o.log_softmax)(r);Math.log(l.subarray(this.timestamp_begin).map(Math.exp).reduce(((e,t)=>e+t)))>(0,o.max)(l.subarray(0,this.timestamp_begin))[0]&&r.subarray(0,this.timestamp_begin).fill(-1/0)}return t}}class p extends n{constructor(e){super(),this.no_repeat_ngram_size=e}getNgrams(e){const t=e.length,s=[];for(let r=0;r<t+1-this.no_repeat_ngram_size;++r){const t=[];for(let s=0;s<this.no_repeat_ngram_size;++s)t.push(e[r+s]);s.push(t.map(Number))}const r=new Map;for(const e of s){const t=e.slice(0,e.length-1),s=JSON.stringify(t),o=r.get(s)??[];o.push(e[e.length-1]),r.set(s,o)}return r}getGeneratedNgrams(e,t){const s=t.slice(t.length+1-this.no_repeat_ngram_size,t.length);return e.get(JSON.stringify(s.map(Number)))??[]}calcBannedNgramTokens(e){const t=[];if(e.length+1<this.no_repeat_ngram_size)return t;{const t=this.getNgrams(e);return this.getGeneratedNgrams(t,e)}}_call(e,t){for(let s=0;s<e.length;++s){const r=t[s].data,o=this.calcBannedNgramTokens(e[s]);for(const e of o)r[e]=-1/0}return t}}class m extends n{constructor(e){super(),this.penalty=e}_call(e,t){for(let s=0;s<e.length;++s){const r=t[s].data;for(const t of new Set(e[s])){const e=Number(t);r[e]<0?r[e]*=this.penalty:r[e]/=this.penalty}}return t}}class _ extends n{constructor(e,t){super(),this.min_length=e,this.eos_token_id=Array.isArray(t)?t:[t]}_call(e,t){for(let s=0;s<e.length;++s)if(e[s].length<this.min_length){const e=t[s].data;for(const t of this.eos_token_id)e[t]=-1/0}return t}}class h extends n{constructor(e,t,s){super(),this.prompt_length_to_skip=e,this.min_new_tokens=t,this.eos_token_id=Array.isArray(s)?s:[s]}_call(e,t){for(let s=0;s<e.length;++s){if(e[s].length-this.prompt_length_to_skip<this.min_new_tokens){const e=t[s].data;for(const t of this.eos_token_id)e[t]=-1/0}}return t}}class f extends n{constructor(e,t){super(),this.bad_words_ids=e,this.eos_token_id=Array.isArray(t)?t:[t]}_call(e,t){for(let s=0;s<e.length;++s){const r=t[s].data,o=e[s];for(const e of this.bad_words_ids){let t=!0;for(let s=1;s<=e.length-1&&e.length<o.length;++s)if(e.at(-s-1)!=o.at(-s)){t=!1;break}t&&(r[e.at(-1)]=-1/0)}}return t}}class g extends n{constructor(e){if(super(),e<=1)throw new Error(`Require guidance scale >1 to use the classifier free guidance processor, got guidance scale ${e}.`);this.guidance_scale=e}_call(e,t){if(t.dims[0]!==2*e.length)throw new Error(`Logits should have twice the batch size of the input ids, the first half of batches corresponding to the conditional inputs, and the second half of batches corresponding to the unconditional inputs. Got batch size ${t.dims[0]} for the logits and ${e.length} for the input ids.`);const s=e.length,r=t.slice([0,s],null),o=t.slice([s,t.dims[0]],null);for(let e=0;e<o.data.length;++e)o.data[e]+=(r.data[e]-o.data[e])*this.guidance_scale;return o}}class M extends a{constructor(e){if(super(),"number"!=typeof e||e<=0){let t=`\`temperature\` (=${e}) must be a strictly positive float, otherwise your next token scores will be invalid.`;0===e&&(t+=" If you're looking for greedy decoding strategies, set `do_sample=false`.")}this.temperature=e}_call(e,t){const s=t.data;for(let e=0;e<s.length;++e)s[e]/=this.temperature;return t}}class w extends a{constructor(e,{filter_value:t=-1/0,min_tokens_to_keep:s=1}={}){if(super(),e<0||e>1)throw new Error(`\`top_p\` must be a float > 0 and < 1, but is ${e}`);if(!Number.isInteger(s)||s<1)throw new Error(`\`min_tokens_to_keep\` must be a positive integer, but is ${s}`);this.top_p=e,this.filter_value=t,this.min_tokens_to_keep=s}}class T extends a{constructor(e,{filter_value:t=-1/0,min_tokens_to_keep:s=1}={}){if(super(),!Number.isInteger(e)||e<0)throw new Error(`\`top_k\` must be a positive integer, but is ${e}`);this.top_k=Math.max(e,s),this.filter_value=t}}},"./src/generation/logits_sampler.js":
110
- /*!******************************************!*\
111
- !*** ./src/generation/logits_sampler.js ***!
112
- \******************************************/(e,t,s)=>{s.r(t),s.d(t,{LogitsSampler:()=>a});var r=s(/*! ../utils/generic.js */"./src/utils/generic.js"),o=s(/*! ../utils/tensor.js */"./src/utils/tensor.js"),n=s(/*! ../utils/maths.js */"./src/utils/maths.js");s(/*! ../generation/configuration_utils.js */"./src/generation/configuration_utils.js");class a extends r.Callable{constructor(e){super(),this.generation_config=e}async _call(e){return this.sample(e)}async sample(e){throw Error("sample should be implemented in subclasses.")}getLogits(e,t){let s=e.dims.at(-1),r=e.data;if(-1===t)r=r.slice(-s);else{let e=t*s;r=r.slice(e,e+s)}return r}randomSelect(e){let t=0;for(let s=0;s<e.length;++s)t+=e[s];let s=Math.random()*t;for(let t=0;t<e.length;++t)if(s-=e[t],s<=0)return t;return 0}static getSampler(e){if(e.do_sample)return new l(e);if(e.num_beams>1)return new c(e);if(e.num_return_sequences>1)throw Error(`num_return_sequences has to be 1 when doing greedy search, but is ${e.num_return_sequences}.`);return new i(e)}}class i extends a{async sample(e){const t=(0,n.max)(e.data)[1];return[[BigInt(t),0]]}}class l extends a{async sample(e){let t=e.dims.at(-1);this.generation_config.top_k>0&&(t=Math.min(this.generation_config.top_k,t));const[s,r]=await(0,o.topk)(e,t),a=(0,n.softmax)(s.data);return Array.from({length:this.generation_config.num_beams},(()=>{const e=this.randomSelect(a);return[r.data[e],Math.log(a[e])]}))}}class c extends a{async sample(e){let t=e.dims.at(-1);this.generation_config.top_k>0&&(t=Math.min(this.generation_config.top_k,t));const[s,r]=await(0,o.topk)(e,t),a=(0,n.softmax)(s.data);return Array.from({length:this.generation_config.num_beams},((e,t)=>[r.data[t],Math.log(a[t])]))}}},"./src/generation/stopping_criteria.js":
113
- /*!*********************************************!*\
114
- !*** ./src/generation/stopping_criteria.js ***!
115
- \*********************************************/(e,t,s)=>{s.r(t),s.d(t,{EosTokenCriteria:()=>i,InterruptableStoppingCriteria:()=>l,MaxLengthCriteria:()=>a,StoppingCriteria:()=>o,StoppingCriteriaList:()=>n});var r=s(/*! ../utils/generic.js */"./src/utils/generic.js");class o extends r.Callable{_call(e,t){throw Error("StoppingCriteria needs to be subclassed")}}class n extends r.Callable{constructor(){super(),this.criteria=[]}push(e){this.criteria.push(e)}extend(e){e instanceof n?e=e.criteria:e instanceof o&&(e=[e]),this.criteria.push(...e)}_call(e,t){const s=new Array(e.length).fill(!1);for(const r of this.criteria){const o=r(e,t);for(let e=0;e<s.length;++e)s[e]||=o[e]}return s}[Symbol.iterator](){return this.criteria.values()}}class a extends o{constructor(e,t=null){super(),this.max_length=e,this.max_position_embeddings=t}_call(e){return e.map((e=>e.length>=this.max_length))}}class i extends o{constructor(e){super(),Array.isArray(e)||(e=[e]),this.eos_token_id=e}_call(e,t){return e.map((e=>{const t=e.at(-1);return this.eos_token_id.some((e=>t==e))}))}}class l extends o{constructor(){super(),this.interrupted=!1}interrupt(){this.interrupted=!0}reset(){this.interrupted=!1}_call(e,t){return new Array(e.length).fill(this.interrupted)}}},"./src/generation/streamers.js":
116
- /*!*************************************!*\
117
- !*** ./src/generation/streamers.js ***!
118
- \*************************************/(e,t,s)=>{s.r(t),s.d(t,{BaseStreamer:()=>a,TextStreamer:()=>l,WhisperTextStreamer:()=>c});var r=s(/*! ../utils/core.js */"./src/utils/core.js"),o=s(/*! ../tokenizers.js */"./src/tokenizers.js"),n=s(/*! ../env.js */"./src/env.js");class a{put(e){throw Error("Not implemented")}end(){throw Error("Not implemented")}}const i=n.apis.IS_PROCESS_AVAILABLE?e=>process.stdout.write(e):e=>console.log(e);class l extends a{constructor(e,{skip_prompt:t=!1,callback_function:s=null,token_callback_function:r=null,decode_kwargs:o={},...n}={}){super(),this.tokenizer=e,this.skip_prompt=t,this.callback_function=s??i,this.token_callback_function=r,this.decode_kwargs={...o,...n},this.token_cache=[],this.print_len=0,this.next_tokens_are_prompt=!0}put(e){if(e.length>1)throw Error("TextStreamer only supports batch size of 1");if(this.skip_prompt&&this.next_tokens_are_prompt)return void(this.next_tokens_are_prompt=!1);const t=e[0];this.token_callback_function?.(t),this.token_cache=(0,r.mergeArrays)(this.token_cache,t);const s=this.tokenizer.decode(this.token_cache,this.decode_kwargs);let n;s.endsWith("\n")?(n=s.slice(this.print_len),this.token_cache=[],this.print_len=0):s.length>0&&(0,o.is_chinese_char)(s.charCodeAt(s.length-1))?(n=s.slice(this.print_len),this.print_len+=n.length):(n=s.slice(this.print_len,s.lastIndexOf(" ")+1),this.print_len+=n.length),this.on_finalized_text(n,!1)}end(){let e;if(this.token_cache.length>0){e=this.tokenizer.decode(this.token_cache,this.decode_kwargs).slice(this.print_len),this.token_cache=[],this.print_len=0}else e="";this.next_tokens_are_prompt=!0,this.on_finalized_text(e,!0)}on_finalized_text(e,t){e.length>0&&this.callback_function?.(e),t&&this.callback_function===i&&n.apis.IS_PROCESS_AVAILABLE&&this.callback_function?.("\n")}}class c extends l{constructor(e,{skip_prompt:t=!1,callback_function:s=null,token_callback_function:r=null,on_chunk_start:o=null,on_chunk_end:n=null,on_finalize:a=null,time_precision:i=.02,skip_special_tokens:l=!0,decode_kwargs:c={}}={}){super(e,{skip_prompt:t,callback_function:s,token_callback_function:r,decode_kwargs:{skip_special_tokens:l,...c}}),this.timestamp_begin=e.timestamp_begin,this.on_chunk_start=o,this.on_chunk_end=n,this.on_finalize=a,this.time_precision=i,this.waiting_for_timestamp=!1}put(e){if(e.length>1)throw Error("WhisperTextStreamer only supports batch size of 1");const t=e[0];if(1===t.length){const s=Number(t[0])-this.timestamp_begin;if(s>=0){const t=s*this.time_precision;this.waiting_for_timestamp?this.on_chunk_end?.(t):this.on_chunk_start?.(t),this.waiting_for_timestamp=!this.waiting_for_timestamp,e=[[]]}}return super.put(e)}end(){super.end(),this.on_finalize?.()}}},"./src/models.js":
119
- /*!***********************!*\
120
- !*** ./src/models.js ***!
121
- \***********************/(e,t,s)=>{s.r(t),s.d(t,{ASTForAudioClassification:()=>cs,ASTModel:()=>ls,ASTPreTrainedModel:()=>is,AlbertForMaskedLM:()=>Mt,AlbertForQuestionAnswering:()=>gt,AlbertForSequenceClassification:()=>ft,AlbertModel:()=>ht,AlbertPreTrainedModel:()=>_t,AutoModel:()=>Tl,AutoModelForAudioClassification:()=>Ol,AutoModelForAudioFrameClassification:()=>Rl,AutoModelForCTC:()=>Bl,AutoModelForCausalLM:()=>vl,AutoModelForDepthEstimation:()=>$l,AutoModelForDocumentQuestionAnswering:()=>Gl,AutoModelForImageClassification:()=>El,AutoModelForImageFeatureExtraction:()=>Xl,AutoModelForImageMatting:()=>ql,AutoModelForImageSegmentation:()=>Ll,AutoModelForImageToImage:()=>Wl,AutoModelForMaskGeneration:()=>Vl,AutoModelForMaskedLM:()=>Cl,AutoModelForNormalEstimation:()=>Ul,AutoModelForObjectDetection:()=>jl,AutoModelForPoseEstimation:()=>Ql,AutoModelForQuestionAnswering:()=>Sl,AutoModelForSemanticSegmentation:()=>Il,AutoModelForSeq2SeqLM:()=>Pl,AutoModelForSequenceClassification:()=>bl,AutoModelForSpeechSeq2Seq:()=>kl,AutoModelForTextToSpectrogram:()=>yl,AutoModelForTextToWaveform:()=>Fl,AutoModelForTokenClassification:()=>xl,AutoModelForUniversalSegmentation:()=>zl,AutoModelForVision2Seq:()=>Al,AutoModelForXVector:()=>Dl,AutoModelForZeroShotObjectDetection:()=>Nl,BartForConditionalGeneration:()=>At,BartForSequenceClassification:()=>Et,BartModel:()=>St,BartPretrainedModel:()=>Ct,BaseModelOutput:()=>J,BeitForImageClassification:()=>Io,BeitModel:()=>Lo,BeitPreTrainedModel:()=>Eo,BertForMaskedLM:()=>Z,BertForQuestionAnswering:()=>se,BertForSequenceClassification:()=>ee,BertForTokenClassification:()=>te,BertModel:()=>K,BertPreTrainedModel:()=>Y,BlenderbotForConditionalGeneration:()=>Ot,BlenderbotModel:()=>Bt,BlenderbotPreTrainedModel:()=>Vt,BlenderbotSmallForConditionalGeneration:()=>Gt,BlenderbotSmallModel:()=>Rt,BlenderbotSmallPreTrainedModel:()=>Dt,BloomForCausalLM:()=>$r,BloomModel:()=>Wr,BloomPreTrainedModel:()=>qr,CLIPModel:()=>Ps,CLIPPreTrainedModel:()=>xs,CLIPSegForImageSegmentation:()=>Ds,CLIPSegModel:()=>Os,CLIPSegPreTrainedModel:()=>Bs,CLIPTextModel:()=>ks,CLIPTextModelWithProjection:()=>ys,CLIPVisionModel:()=>Fs,CLIPVisionModelWithProjection:()=>vs,CamembertForMaskedLM:()=>ye,CamembertForQuestionAnswering:()=>Ce,CamembertForSequenceClassification:()=>Fe,CamembertForTokenClassification:()=>ve,CamembertModel:()=>ke,CamembertPreTrainedModel:()=>Pe,CausalLMOutput:()=>tc,CausalLMOutputWithPast:()=>sc,ChineseCLIPModel:()=>Is,ChineseCLIPPreTrainedModel:()=>Ls,ClapAudioModelWithProjection:()=>Ha,ClapModel:()=>Qa,ClapPreTrainedModel:()=>Ua,ClapTextModelWithProjection:()=>Xa,CodeGenForCausalLM:()=>ir,CodeGenModel:()=>ar,CodeGenPreTrainedModel:()=>nr,CohereForCausalLM:()=>xr,CohereModel:()=>br,CoherePreTrainedModel:()=>Tr,ConvBertForMaskedLM:()=>me,ConvBertForQuestionAnswering:()=>fe,ConvBertForSequenceClassification:()=>_e,ConvBertForTokenClassification:()=>he,ConvBertModel:()=>pe,ConvBertPreTrainedModel:()=>ue,ConvNextForImageClassification:()=>En,ConvNextModel:()=>An,ConvNextPreTrainedModel:()=>Sn,ConvNextV2ForImageClassification:()=>zn,ConvNextV2Model:()=>In,ConvNextV2PreTrainedModel:()=>Ln,DPTForDepthEstimation:()=>pn,DPTModel:()=>un,DPTPreTrainedModel:()=>dn,DebertaForMaskedLM:()=>Ee,DebertaForQuestionAnswering:()=>ze,DebertaForSequenceClassification:()=>Le,DebertaForTokenClassification:()=>Ie,DebertaModel:()=>Ae,DebertaPreTrainedModel:()=>Se,DebertaV2ForMaskedLM:()=>Ve,DebertaV2ForQuestionAnswering:()=>De,DebertaV2ForSequenceClassification:()=>Be,DebertaV2ForTokenClassification:()=>Oe,DebertaV2Model:()=>Ne,DebertaV2PreTrainedModel:()=>je,DecisionTransformerModel:()=>yi,DecisionTransformerPreTrainedModel:()=>ki,DeiTForImageClassification:()=>Jo,DeiTModel:()=>Ho,DeiTPreTrainedModel:()=>Xo,DepthAnythingForDepthEstimation:()=>_n,DepthAnythingPreTrainedModel:()=>mn,DepthProForDepthEstimation:()=>Tn,DepthProPreTrainedModel:()=>wn,DetrForObjectDetection:()=>No,DetrForSegmentation:()=>Vo,DetrModel:()=>jo,DetrObjectDetectionOutput:()=>Bo,DetrPreTrainedModel:()=>zo,DetrSegmentationOutput:()=>Oo,Dinov2ForImageClassification:()=>Vn,Dinov2Model:()=>Nn,Dinov2PreTrainedModel:()=>jn,DistilBertForMaskedLM:()=>Ue,DistilBertForQuestionAnswering:()=>$e,DistilBertForSequenceClassification:()=>qe,DistilBertForTokenClassification:()=>We,DistilBertModel:()=>Ge,DistilBertPreTrainedModel:()=>Re,DonutSwinModel:()=>Cn,DonutSwinPreTrainedModel:()=>vn,EfficientNetForImageClassification:()=>ii,EfficientNetModel:()=>ai,EfficientNetPreTrainedModel:()=>ni,ElectraForMaskedLM:()=>we,ElectraForQuestionAnswering:()=>xe,ElectraForSequenceClassification:()=>Te,ElectraForTokenClassification:()=>be,ElectraModel:()=>Me,ElectraPreTrainedModel:()=>ge,EsmForMaskedLM:()=>He,EsmForSequenceClassification:()=>Je,EsmForTokenClassification:()=>Ye,EsmModel:()=>Xe,EsmPreTrainedModel:()=>Qe,FalconForCausalLM:()=>$a,FalconModel:()=>Wa,FalconPreTrainedModel:()=>qa,FastViTForImageClassification:()=>fo,FastViTModel:()=>ho,FastViTPreTrainedModel:()=>_o,Florence2ForConditionalGeneration:()=>ws,Florence2PreTrainedModel:()=>Ms,GLPNForDepthEstimation:()=>Fn,GLPNModel:()=>yn,GLPNPreTrainedModel:()=>kn,GPT2LMHeadModel:()=>qs,GPT2Model:()=>Gs,GPT2PreTrainedModel:()=>Rs,GPTBigCodeForCausalLM:()=>or,GPTBigCodeModel:()=>rr,GPTBigCodePreTrainedModel:()=>sr,GPTJForCausalLM:()=>tr,GPTJModel:()=>er,GPTJPreTrainedModel:()=>Zs,GPTNeoForCausalLM:()=>Hs,GPTNeoModel:()=>Xs,GPTNeoPreTrainedModel:()=>Qs,GPTNeoXForCausalLM:()=>Ks,GPTNeoXModel:()=>Ys,GPTNeoXPreTrainedModel:()=>Js,Gemma2ForCausalLM:()=>Cr,Gemma2Model:()=>vr,Gemma2PreTrainedModel:()=>Fr,GemmaForCausalLM:()=>yr,GemmaModel:()=>kr,GemmaPreTrainedModel:()=>Pr,GraniteForCausalLM:()=>wr,GraniteModel:()=>Mr,GranitePreTrainedModel:()=>gr,GroupViTModel:()=>mo,GroupViTPreTrainedModel:()=>po,HieraForImageClassification:()=>Zo,HieraModel:()=>Ko,HieraPreTrainedModel:()=>Yo,HubertForCTC:()=>xa,HubertForSequenceClassification:()=>Pa,HubertModel:()=>ba,HubertPreTrainedModel:()=>Ta,Idefics3ForConditionalGeneration:()=>bs,Idefics3PreTrainedModel:()=>Ts,ImageMattingOutput:()=>rc,JAISLMHeadModel:()=>Us,JAISModel:()=>$s,JAISPreTrainedModel:()=>Ws,JinaCLIPModel:()=>js,JinaCLIPPreTrainedModel:()=>zs,JinaCLIPTextModel:()=>Ns,JinaCLIPVisionModel:()=>Vs,LlamaForCausalLM:()=>dr,LlamaModel:()=>cr,LlamaPreTrainedModel:()=>lr,LlavaForConditionalGeneration:()=>hs,LlavaOnevisionForConditionalGeneration:()=>fs,LlavaPreTrainedModel:()=>_s,LongT5ForConditionalGeneration:()=>kt,LongT5Model:()=>Pt,LongT5PreTrainedModel:()=>xt,M2M100ForConditionalGeneration:()=>Jn,M2M100Model:()=>Hn,M2M100PreTrainedModel:()=>Xn,MBartForCausalLM:()=>Nt,MBartForConditionalGeneration:()=>zt,MBartForSequenceClassification:()=>jt,MBartModel:()=>It,MBartPreTrainedModel:()=>Lt,MPNetForMaskedLM:()=>nt,MPNetForQuestionAnswering:()=>lt,MPNetForSequenceClassification:()=>at,MPNetForTokenClassification:()=>it,MPNetModel:()=>ot,MPNetPreTrainedModel:()=>rt,MT5ForConditionalGeneration:()=>vt,MT5Model:()=>Ft,MT5PreTrainedModel:()=>yt,MarianMTModel:()=>Qn,MarianModel:()=>Un,MarianPreTrainedModel:()=>$n,MaskFormerForInstanceSegmentation:()=>Pn,MaskFormerModel:()=>xn,MaskFormerPreTrainedModel:()=>bn,MaskedLMOutput:()=>Zl,MgpstrForSceneTextRecognition:()=>Ai,MgpstrModelOutput:()=>Ci,MgpstrPreTrainedModel:()=>Si,MistralForCausalLM:()=>Oa,MistralModel:()=>Ba,MistralPreTrainedModel:()=>Va,MobileBertForMaskedLM:()=>et,MobileBertForQuestionAnswering:()=>st,MobileBertForSequenceClassification:()=>tt,MobileBertModel:()=>Ze,MobileBertPreTrainedModel:()=>Ke,MobileLLMForCausalLM:()=>mr,MobileLLMModel:()=>pr,MobileLLMPreTrainedModel:()=>ur,MobileNetV1ForImageClassification:()=>_i,MobileNetV1Model:()=>mi,MobileNetV1PreTrainedModel:()=>pi,MobileNetV2ForImageClassification:()=>gi,MobileNetV2Model:()=>fi,MobileNetV2PreTrainedModel:()=>hi,MobileNetV3ForImageClassification:()=>Ti,MobileNetV3Model:()=>wi,MobileNetV3PreTrainedModel:()=>Mi,MobileNetV4ForImageClassification:()=>Pi,MobileNetV4Model:()=>xi,MobileNetV4PreTrainedModel:()=>bi,MobileViTForImageClassification:()=>bo,MobileViTModel:()=>To,MobileViTPreTrainedModel:()=>wo,MobileViTV2ForImageClassification:()=>ko,MobileViTV2Model:()=>Po,MobileViTV2PreTrainedModel:()=>xo,ModelOutput:()=>H,Moondream1ForConditionalGeneration:()=>gs,MptForCausalLM:()=>Xr,MptModel:()=>Qr,MptPreTrainedModel:()=>Ur,MultiModalityCausalLM:()=>vi,MultiModalityPreTrainedModel:()=>Fi,MusicgenForCausalLM:()=>di,MusicgenForConditionalGeneration:()=>ui,MusicgenModel:()=>ci,MusicgenPreTrainedModel:()=>li,NomicBertModel:()=>oe,NomicBertPreTrainedModel:()=>re,OPTForCausalLM:()=>Yr,OPTModel:()=>Jr,OPTPreTrainedModel:()=>Hr,OlmoForCausalLM:()=>fr,OlmoModel:()=>hr,OlmoPreTrainedModel:()=>_r,OpenELMForCausalLM:()=>Er,OpenELMModel:()=>Ar,OpenELMPreTrainedModel:()=>Sr,OwlViTForObjectDetection:()=>vo,OwlViTModel:()=>Fo,OwlViTPreTrainedModel:()=>yo,Owlv2ForObjectDetection:()=>Ao,Owlv2Model:()=>So,Owlv2PreTrainedModel:()=>Co,PatchTSMixerForPrediction:()=>Ni,PatchTSMixerModel:()=>ji,PatchTSMixerPreTrainedModel:()=>zi,PatchTSTForPrediction:()=>Ii,PatchTSTModel:()=>Li,PatchTSTPreTrainedModel:()=>Ei,Phi3ForCausalLM:()=>Gr,Phi3Model:()=>Rr,Phi3PreTrainedModel:()=>Dr,PhiForCausalLM:()=>Or,PhiModel:()=>Br,PhiPreTrainedModel:()=>Vr,PreTrainedModel:()=>X,PretrainedMixin:()=>Vi,PvtForImageClassification:()=>no,PvtModel:()=>oo,PvtPreTrainedModel:()=>ro,PyAnnoteForAudioFrameClassification:()=>oa,PyAnnoteModel:()=>ra,PyAnnotePreTrainedModel:()=>sa,QuestionAnsweringModelOutput:()=>ec,Qwen2ForCausalLM:()=>zr,Qwen2Model:()=>Ir,Qwen2PreTrainedModel:()=>Lr,Qwen2VLForConditionalGeneration:()=>Nr,Qwen2VLPreTrainedModel:()=>jr,RTDetrForObjectDetection:()=>Go,RTDetrModel:()=>Ro,RTDetrObjectDetectionOutput:()=>qo,RTDetrPreTrainedModel:()=>Do,ResNetForImageClassification:()=>sn,ResNetModel:()=>tn,ResNetPreTrainedModel:()=>en,RoFormerForMaskedLM:()=>ie,RoFormerForQuestionAnswering:()=>de,RoFormerForSequenceClassification:()=>le,RoFormerForTokenClassification:()=>ce,RoFormerModel:()=>ae,RoFormerPreTrainedModel:()=>ne,RobertaForMaskedLM:()=>$t,RobertaForQuestionAnswering:()=>Xt,RobertaForSequenceClassification:()=>Ut,RobertaForTokenClassification:()=>Qt,RobertaModel:()=>Wt,RobertaPreTrainedModel:()=>qt,SamImageSegmentationOutput:()=>Wn,SamModel:()=>qn,SamPreTrainedModel:()=>Gn,SapiensForDepthEstimation:()=>gn,SapiensForNormalEstimation:()=>Mn,SapiensForSemanticSegmentation:()=>fn,SapiensPreTrainedModel:()=>hn,SegformerForImageClassification:()=>ei,SegformerForSemanticSegmentation:()=>ti,SegformerModel:()=>Za,SegformerPreTrainedModel:()=>Ka,Seq2SeqLMOutput:()=>Hl,SequenceClassifierOutput:()=>Jl,SiglipModel:()=>Ss,SiglipPreTrainedModel:()=>Cs,SiglipTextModel:()=>As,SiglipVisionModel:()=>Es,SpeechT5ForSpeechToText:()=>La,SpeechT5ForTextToSpeech:()=>Ia,SpeechT5HifiGan:()=>za,SpeechT5Model:()=>Ea,SpeechT5PreTrainedModel:()=>Aa,SqueezeBertForMaskedLM:()=>ut,SqueezeBertForQuestionAnswering:()=>mt,SqueezeBertForSequenceClassification:()=>pt,SqueezeBertModel:()=>dt,SqueezeBertPreTrainedModel:()=>ct,StableLmForCausalLM:()=>oi,StableLmModel:()=>ri,StableLmPreTrainedModel:()=>si,Starcoder2ForCausalLM:()=>Ga,Starcoder2Model:()=>Ra,Starcoder2PreTrainedModel:()=>Da,Swin2SRForImageSuperResolution:()=>cn,Swin2SRModel:()=>ln,Swin2SRPreTrainedModel:()=>an,SwinForImageClassification:()=>nn,SwinModel:()=>on,SwinPreTrainedModel:()=>rn,T5ForConditionalGeneration:()=>bt,T5Model:()=>Tt,T5PreTrainedModel:()=>wt,TableTransformerForObjectDetection:()=>Uo,TableTransformerModel:()=>$o,TableTransformerObjectDetectionOutput:()=>Qo,TableTransformerPreTrainedModel:()=>Wo,TokenClassifierOutput:()=>Kl,TrOCRForCausalLM:()=>Na,TrOCRPreTrainedModel:()=>ja,UniSpeechForCTC:()=>ca,UniSpeechForSequenceClassification:()=>da,UniSpeechModel:()=>la,UniSpeechPreTrainedModel:()=>ia,UniSpeechSatForAudioFrameClassification:()=>ha,UniSpeechSatForCTC:()=>ma,UniSpeechSatForSequenceClassification:()=>_a,UniSpeechSatModel:()=>pa,UniSpeechSatPreTrainedModel:()=>ua,ViTForImageClassification:()=>eo,ViTMAEModel:()=>io,ViTMAEPreTrainedModel:()=>ao,ViTMSNForImageClassification:()=>uo,ViTMSNModel:()=>co,ViTMSNPreTrainedModel:()=>lo,ViTModel:()=>Zr,ViTPreTrainedModel:()=>Kr,VisionEncoderDecoderModel:()=>ms,VitMatteForImageMatting:()=>Mo,VitMattePreTrainedModel:()=>go,VitPoseForPoseEstimation:()=>so,VitPosePreTrainedModel:()=>to,VitsModel:()=>Ya,VitsModelOutput:()=>oc,VitsPreTrainedModel:()=>Ja,Wav2Vec2BertForCTC:()=>Ma,Wav2Vec2BertForSequenceClassification:()=>wa,Wav2Vec2BertModel:()=>ga,Wav2Vec2BertPreTrainedModel:()=>fa,Wav2Vec2ForAudioFrameClassification:()=>ta,Wav2Vec2ForCTC:()=>Zn,Wav2Vec2ForSequenceClassification:()=>ea,Wav2Vec2Model:()=>Kn,Wav2Vec2PreTrainedModel:()=>Yn,WavLMForAudioFrameClassification:()=>Sa,WavLMForCTC:()=>Fa,WavLMForSequenceClassification:()=>va,WavLMForXVector:()=>Ca,WavLMModel:()=>ya,WavLMPreTrainedModel:()=>ka,WeSpeakerResNetModel:()=>aa,WeSpeakerResNetPreTrainedModel:()=>na,WhisperForConditionalGeneration:()=>ps,WhisperModel:()=>us,WhisperPreTrainedModel:()=>ds,XLMForQuestionAnswering:()=>es,XLMForSequenceClassification:()=>Kt,XLMForTokenClassification:()=>Zt,XLMModel:()=>Jt,XLMPreTrainedModel:()=>Ht,XLMRobertaForMaskedLM:()=>rs,XLMRobertaForQuestionAnswering:()=>as,XLMRobertaForSequenceClassification:()=>os,XLMRobertaForTokenClassification:()=>ns,XLMRobertaModel:()=>ss,XLMRobertaPreTrainedModel:()=>ts,XLMWithLMHeadModel:()=>Yt,XVectorOutput:()=>Yl,YolosForObjectDetection:()=>Dn,YolosModel:()=>On,YolosObjectDetectionOutput:()=>Rn,YolosPreTrainedModel:()=>Bn});var r=s(/*! ./configs.js */"./src/configs.js"),o=s(/*! ./backends/onnx.js */"./src/backends/onnx.js"),n=s(/*! ./utils/dtypes.js */"./src/utils/dtypes.js"),a=s(/*! ./utils/generic.js */"./src/utils/generic.js"),i=s(/*! ./utils/core.js */"./src/utils/core.js"),l=s(/*! ./utils/hub.js */"./src/utils/hub.js"),c=s(/*! ./utils/constants.js */"./src/utils/constants.js"),d=s(/*! ./generation/logits_process.js */"./src/generation/logits_process.js"),u=s(/*! ./generation/configuration_utils.js */"./src/generation/configuration_utils.js"),p=s(/*! ./utils/tensor.js */"./src/utils/tensor.js"),m=s(/*! ./utils/image.js */"./src/utils/image.js"),_=s(/*! ./utils/maths.js */"./src/utils/maths.js"),h=s(/*! ./generation/stopping_criteria.js */"./src/generation/stopping_criteria.js"),f=s(/*! ./generation/logits_sampler.js */"./src/generation/logits_sampler.js"),g=s(/*! ./env.js */"./src/env.js"),M=s(/*! ./models/whisper/generation_whisper.js */"./src/models/whisper/generation_whisper.js"),w=s(/*! ./models/whisper/common_whisper.js */"./src/models/whisper/common_whisper.js");const T=0,b=1,x=2,P=3,k=4,y=5,F=6,v=7,C=8,S=new Map,A=new Map,E=new Map;async function L(e,t,s){return Object.fromEntries(await Promise.all(Object.keys(t).map((async a=>{const{buffer:i,session_options:c,session_config:d}=await async function(e,t,s){const a=s.config?.["transformers.js_config"]??{};let i=s.device??a.device;i&&"string"!=typeof i&&(i.hasOwnProperty(t)?i=i[t]:(console.warn(`device not specified for "${t}". Using the default device.`),i=null));const c=i??(g.apis.IS_NODE_ENV?"cpu":"wasm"),d=(0,o.deviceToExecutionProviders)(c);let u=s.dtype??a.dtype;if("string"!=typeof u&&(u&&u.hasOwnProperty(t)?u=u[t]:(u=n.DEFAULT_DEVICE_DTYPE_MAPPING[c]??n.DATA_TYPES.fp32,console.warn(`dtype not specified for "${t}". Using the default dtype (${u}) for this device (${c}).`))),u===n.DATA_TYPES.auto){let e=a.dtype;"string"!=typeof e&&(e=e[t]),u=e&&e!==n.DATA_TYPES.auto&&n.DATA_TYPES.hasOwnProperty(e)?e:n.DEFAULT_DEVICE_DTYPE_MAPPING[c]??n.DATA_TYPES.fp32}const p=u;if(!n.DEFAULT_DTYPE_SUFFIX_MAPPING.hasOwnProperty(p))throw new Error(`Invalid dtype: ${p}. Should be one of: ${Object.keys(n.DATA_TYPES).join(", ")}`);if(p===n.DATA_TYPES.fp16&&"webgpu"===c&&!await(0,n.isWebGpuFp16Supported)())throw new Error(`The device (${c}) does not support fp16.`);const m=a.kv_cache_dtype?"string"==typeof a.kv_cache_dtype?a.kv_cache_dtype:a.kv_cache_dtype[p]??"float32":void 0;if(m&&!["float32","float16"].includes(m))throw new Error(`Invalid kv_cache_dtype: ${m}. Should be one of: float32, float16`);const _={dtype:p,kv_cache_dtype:m},h=n.DEFAULT_DTYPE_SUFFIX_MAPPING[p],f=`${s.subfolder??""}/${t}${h}.onnx`,M={...s.session_options};M.executionProviders??=d;const w=a.free_dimension_overrides;w?M.freeDimensionOverrides??=w:c.startsWith("webnn")&&!M.freeDimensionOverrides&&console.warn('WebNN does not currently support dynamic shapes and requires `free_dimension_overrides` to be set in config.json as a field within "transformers.js_config". When `free_dimension_overrides` is not set, you may experience significant performance degradation.');const T=(0,l.getModelFile)(e,f,!0,s),b=s.use_external_data_format??a.use_external_data_format;let x=[];if(b&&(!0===b||"object"==typeof b&&b.hasOwnProperty(t)&&!0===b[t])){if(g.apis.IS_NODE_ENV)throw new Error("External data format is not yet supported in Node.js");const r=`${t}${h}.onnx_data`,o=`${s.subfolder??""}/${r}`;x.push(new Promise((async(t,n)=>{const a=await(0,l.getModelFile)(e,o,!0,s);t({path:r,data:a})})))}else void 0!==M.externalData&&(x=M.externalData.map((async t=>{if("string"==typeof t.data){const r=await(0,l.getModelFile)(e,t.data,!0,s);return{...t,data:r}}return t})));if(x.length>0&&(M.externalData=await Promise.all(x)),"webgpu"===c){const e=(0,r.getKeyValueShapes)(s.config,{prefix:"present"});if(Object.keys(e).length>0&&!(0,o.isONNXProxy)()){const t={};for(const s in e)t[s]="gpu-buffer";M.preferredOutputLocation=t}}return{buffer:await T,session_options:M,session_config:_}}(e,t[a],s);return[a,await(0,o.createInferenceSession)(i,c,d)]}))))}async function I(e,t,s){return Object.fromEntries(await Promise.all(Object.keys(t).map((async r=>[r,await(0,l.getModelJSON)(e,t[r],!1,s)]))))}async function z(e,t){const s=function(e,t){const s=Object.create(null),r=[];for(const n of e.inputNames){const e=t[n];e instanceof p.Tensor?s[n]=(0,o.isONNXProxy)()?e.clone():e:r.push(n)}if(r.length>0)throw new Error(`An error occurred during model execution: "Missing the following inputs: ${r.join(", ")}.`);const n=Object.keys(t).length,a=e.inputNames.length;if(n>a){let s=Object.keys(t).filter((t=>!e.inputNames.includes(t)));console.warn(`WARNING: Too many inputs were provided (${n} > ${a}). The following inputs will be ignored: "${s.join(", ")}".`)}return s}(e,t);try{const t=Object.fromEntries(Object.entries(s).map((([e,t])=>[e,t.ort_tensor])));let r=await e.run(t);return r=j(r),r}catch(e){const t=Object.fromEntries(Object.entries(s).map((([e,{type:t,dims:s,data:r}])=>[e,{type:t,dims:s,data:r}])));throw console.error(`An error occurred during model execution: "${e}".`),console.error("Inputs given to model:",t),e}}function j(e){for(let t in e)(0,o.isONNXTensor)(e[t])?e[t]=new p.Tensor(e[t]):"object"==typeof e[t]&&j(e[t]);return e}function N(e){if(e instanceof p.Tensor)return e;if(0===e.length)throw Error("items must be non-empty");if(Array.isArray(e[0])){if(e.some((t=>t.length!==e[0].length)))throw Error("Unable to create tensor, you should probably activate truncation and/or padding with 'padding=True' and/or 'truncation=True' to have batched tensors with the same length.");return new p.Tensor("int64",BigInt64Array.from(e.flat().map((e=>BigInt(e)))),[e.length,e[0].length])}return new p.Tensor("int64",BigInt64Array.from(e.map((e=>BigInt(e)))),[1,e.length])}function V(e){return new p.Tensor("bool",[e],[1])}async function B(e,t){let{encoder_outputs:s,input_ids:r,decoder_input_ids:o,...n}=t;if(!s){const r=(0,i.pick)(t,e.sessions.model.inputNames);s=(await O(e,r)).last_hidden_state}n.input_ids=o,n.encoder_hidden_states=s,e.sessions.decoder_model_merged.inputNames.includes("encoder_attention_mask")&&(n.encoder_attention_mask=t.attention_mask);return await D(e,n,!0)}async function O(e,t){const s=e.sessions.model,r=(0,i.pick)(t,s.inputNames);if(s.inputNames.includes("inputs_embeds")&&!r.inputs_embeds){if(!t.input_ids)throw new Error("Both `input_ids` and `inputs_embeds` are missing in the model inputs.");r.inputs_embeds=await e.encode_text({input_ids:t.input_ids})}return s.inputNames.includes("token_type_ids")&&!r.token_type_ids&&(r.token_type_ids=new p.Tensor("int64",new BigInt64Array(r.input_ids.data.length),r.input_ids.dims)),await z(s,r)}async function D(e,t,s=!1){const r=e.sessions[s?"decoder_model_merged":"model"],{past_key_values:o,...n}=t;r.inputNames.includes("use_cache_branch")&&(n.use_cache_branch=V(!!o)),r.inputNames.includes("position_ids")&&n.attention_mask&&!n.position_ids&&(n.position_ids=function(e,t=null){const{input_ids:s,inputs_embeds:r,attention_mask:o}=e,{data:n,dims:a}=q(o);let i=new p.Tensor("int64",n,a);if(t){const e=-(s??r).dims.at(1);i=i.slice(null,[e,null])}return i}(n,o)),e.addPastKeyValues(n,o);const a=(0,i.pick)(n,r.inputNames);return await z(r,a)}function R({image_token_id:e,inputs_embeds:t,image_features:s,input_ids:r,attention_mask:o}){const n=r.tolist().map((t=>t.reduce(((t,s,r)=>(s==e&&t.push(r),t)),[]))),a=n.reduce(((e,t)=>e+t.length),0),i=s.dims[0];if(a!==i)throw new Error(`Image features and image tokens do not match: tokens: ${a}, features ${i}`);let l=0;for(let e=0;e<n.length;++e){const r=n[e],o=t[e];for(let e=0;e<r.length;++e)o[r[e]].data.set(s[l++].data)}return{inputs_embeds:t,attention_mask:o}}async function G(e,{input_ids:t=null,attention_mask:s=null,pixel_values:r=null,position_ids:o=null,inputs_embeds:n=null,past_key_values:a=null,generation_config:i=null,logits_processor:l=null,...c}){if(!n)if(n=await e.encode_text({input_ids:t,...c}),r&&1!==t.dims[1]){const o=await e.encode_image({pixel_values:r,...c});({inputs_embeds:n,attention_mask:s}=e._merge_input_ids_with_image_features({image_features:o,inputs_embeds:n,input_ids:t,attention_mask:s}))}else if(a&&r&&1===t.dims[1]){const e=t.dims[1],r=Object.values(a)[0].dims.at(-2);s=(0,p.cat)([(0,p.ones)([t.dims[0],r]),s.slice(null,[s.dims[1]-e,s.dims[1]])],1)}if(!o&&"qwen2_vl"===e.config.model_type){const{image_grid_thw:r,video_grid_thw:n}=c;[o]=e.get_rope_index(t,r,n,s)}return await D(e,{inputs_embeds:n,past_key_values:a,attention_mask:s,position_ids:o,generation_config:i,logits_processor:l},!0)}function q(e){const[t,s]=e.dims,r=e.data,o=new BigInt64Array(r.length);for(let e=0;e<t;++e){const t=e*s;let n=BigInt(0);for(let e=0;e<s;++e){const s=t+e;0n===r[s]?o[s]=BigInt(1):(o[s]=n,n+=r[s])}}return{data:o,dims:e.dims}}function W(e,t,s,r){if(s.past_key_values){const t=Object.values(s.past_key_values)[0].dims.at(-2),{input_ids:r,attention_mask:o}=s;if(o&&o.dims[1]>r.dims[1]);else if(t<r.dims[1])s.input_ids=r.slice(null,[t,null]);else if(null!=e.config.image_token_index&&r.data.some((t=>t==e.config.image_token_index))){const o=e.config.num_image_tokens;if(!o)throw new Error("`num_image_tokens` is missing in the model configuration.");const n=r.dims[1]-(t-o);s.input_ids=r.slice(null,[-n,null]),s.attention_mask=(0,p.ones)([1,t+n])}}return s}function $(e,t,s,r){return s.past_key_values&&(t=t.map((e=>[e.at(-1)]))),{...s,decoder_input_ids:N(t)}}function U(e,...t){return e.config.is_encoder_decoder?$(e,...t):W(e,...t)}function Q(e,t,s,r){const o=!!s.past_key_values;if(null!==r.guidance_scale&&r.guidance_scale>1&&(o?s.input_ids=(0,p.cat)([s.input_ids,s.input_ids],0):(s.input_ids=(0,p.cat)([s.input_ids,(0,p.full_like)(s.input_ids,BigInt(r.pad_token_id))],0),s.attention_mask=(0,p.cat)([s.attention_mask,(0,p.full_like)(s.attention_mask,0n)],0))),!o&&s.pixel_values||(s.pixel_values=(0,p.full)([0,0,3,384,384],1)),o){const e=0,t=1,r=e>0?1:0,o=1;s.images_seq_mask=new p.Tensor("bool",new Array(e+t).fill(!0).fill(!1,0,t),[o,e+t]),s.images_emb_mask=new p.Tensor("bool",new Array(e).fill(!!r),[o,1,e])}return s}class X extends a.Callable{main_input_name="input_ids";forward_params=["input_ids","attention_mask"];constructor(e,t,s){super(),this.config=e,this.sessions=t,this.configs=s;const r=E.get(this.constructor),o=S.get(r);switch(this.can_generate=!1,this._forward=null,this._prepare_inputs_for_generation=null,o){case k:this.can_generate=!0,this._forward=D,this._prepare_inputs_for_generation=W;break;case x:case P:case v:this.can_generate=!0,this._forward=B,this._prepare_inputs_for_generation=$;break;case b:this._forward=B;break;case F:this.can_generate=!0,this._forward=G,this._prepare_inputs_for_generation=U;break;case C:this.can_generate=!0,this._prepare_inputs_for_generation=Q;break;default:this._forward=O}this.can_generate&&this.forward_params.push("past_key_values"),this.custom_config=this.config["transformers.js_config"]??{}}async dispose(){const e=[];for(const t of Object.values(this.sessions))t?.handler?.dispose&&e.push(t.handler.dispose());return await Promise.all(e)}static async from_pretrained(e,{progress_callback:t=null,config:s=null,cache_dir:o=null,local_files_only:n=!1,revision:a="main",model_file_name:i=null,subfolder:l="onnx",device:d=null,dtype:u=null,use_external_data_format:p=null,session_options:m={}}={}){let _={progress_callback:t,config:s,cache_dir:o,local_files_only:n,revision:a,model_file_name:i,subfolder:l,device:d,dtype:u,use_external_data_format:p,session_options:m};const h=E.get(this),f=S.get(h);let g;if(s=_.config=await r.AutoConfig.from_pretrained(e,_),f===k)g=await Promise.all([L(e,{model:_.model_file_name??"model"},_),I(e,{generation_config:"generation_config.json"},_)]);else if(f===x||f===P)g=await Promise.all([L(e,{model:"encoder_model",decoder_model_merged:"decoder_model_merged"},_),I(e,{generation_config:"generation_config.json"},_)]);else if(f===y)g=await Promise.all([L(e,{model:"vision_encoder",prompt_encoder_mask_decoder:"prompt_encoder_mask_decoder"},_)]);else if(f===b)g=await Promise.all([L(e,{model:"encoder_model",decoder_model_merged:"decoder_model_merged"},_)]);else if(f===F){const t={embed_tokens:"embed_tokens",vision_encoder:"vision_encoder",decoder_model_merged:"decoder_model_merged"};s.is_encoder_decoder&&(t.model="encoder_model"),g=await Promise.all([L(e,t,_),I(e,{generation_config:"generation_config.json"},_)])}else if(f===v)g=await Promise.all([L(e,{model:"text_encoder",decoder_model_merged:"decoder_model_merged",encodec_decode:"encodec_decode"},_),I(e,{generation_config:"generation_config.json"},_)]);else if(f===C)g=await Promise.all([L(e,{prepare_inputs_embeds:"prepare_inputs_embeds",model:"language_model",lm_head:"lm_head",gen_head:"gen_head",gen_img_embeds:"gen_img_embeds",image_decode:"image_decode"},_),I(e,{generation_config:"generation_config.json"},_)]);else{if(f!==T){const e=h??s?.model_type;"custom"!==e&&console.warn(`Model type for '${e}' not found, assuming encoder-only architecture. Please report this at ${c.GITHUB_ISSUE_URL}.`)}g=await Promise.all([L(e,{model:_.model_file_name??"model"},_)])}return new this(s,...g)}async _call(e){return await this.forward(e)}async forward(e){return await this._forward(this,e)}get generation_config(){return this.configs?.generation_config??null}_get_logits_warper(e){const t=new d.LogitsProcessorList;return null!==e.temperature&&1!==e.temperature&&t.push(new d.TemperatureLogitsWarper(e.temperature)),null!==e.top_k&&0!==e.top_k&&t.push(new d.TopKLogitsWarper(e.top_k)),null!==e.top_p&&e.top_p<1&&t.push(new d.TopPLogitsWarper(e.top_p)),t}_get_logits_processor(e,t,s=null){const r=new d.LogitsProcessorList;if(null!==e.repetition_penalty&&1!==e.repetition_penalty&&r.push(new d.RepetitionPenaltyLogitsProcessor(e.repetition_penalty)),null!==e.no_repeat_ngram_size&&e.no_repeat_ngram_size>0&&r.push(new d.NoRepeatNGramLogitsProcessor(e.no_repeat_ngram_size)),null!==e.bad_words_ids&&r.push(new d.NoBadWordsLogitsProcessor(e.bad_words_ids,e.eos_token_id)),null!==e.min_length&&null!==e.eos_token_id&&e.min_length>0&&r.push(new d.MinLengthLogitsProcessor(e.min_length,e.eos_token_id)),null!==e.min_new_tokens&&null!==e.eos_token_id&&e.min_new_tokens>0&&r.push(new d.MinNewTokensLengthLogitsProcessor(t,e.min_new_tokens,e.eos_token_id)),null!==e.forced_bos_token_id&&r.push(new d.ForcedBOSTokenLogitsProcessor(e.forced_bos_token_id)),null!==e.forced_eos_token_id&&r.push(new d.ForcedEOSTokenLogitsProcessor(e.max_length,e.forced_eos_token_id)),null!==e.begin_suppress_tokens){const s=t>1||null===e.forced_bos_token_id?t:t+1;r.push(new d.SuppressTokensAtBeginLogitsProcessor(e.begin_suppress_tokens,s))}return null!==e.guidance_scale&&e.guidance_scale>1&&r.push(new d.ClassifierFreeGuidanceLogitsProcessor(e.guidance_scale)),null!==s&&r.extend(s),r}_prepare_generation_config(e,t,s=u.GenerationConfig){const r={...this.config};for(const e of["decoder","generator","text_config"])e in r&&Object.assign(r,r[e]);const o=new s(r);return Object.assign(o,this.generation_config??{}),e&&Object.assign(o,e),t&&Object.assign(o,(0,i.pick)(t,Object.getOwnPropertyNames(o))),o}_get_stopping_criteria(e,t=null){const s=new h.StoppingCriteriaList;return null!==e.max_length&&s.push(new h.MaxLengthCriteria(e.max_length,this.config.max_position_embeddings??null)),null!==e.eos_token_id&&s.push(new h.EosTokenCriteria(e.eos_token_id)),t&&s.extend(t),s}_validate_model_class(){if(!this.can_generate){const e=[Qi,Yi,Ui,Ri],t=E.get(this.constructor),s=new Set,r=this.config.model_type;for(const t of e){const e=t.get(r);e&&s.add(e[0])}let o=`The current model class (${t}) is not compatible with \`.generate()\`, as it doesn't have a language model head.`;throw s.size>0&&(o+=` Please use the following class instead: ${[...s].join(", ")}`),Error(o)}}prepare_inputs_for_generation(...e){return this._prepare_inputs_for_generation(this,...e)}_update_model_kwargs_for_generation({generated_input_ids:e,outputs:t,model_inputs:s,is_encoder_decoder:r}){return s.past_key_values=this.getPastKeyValues(t,s.past_key_values),s.input_ids=new p.Tensor("int64",e.flat(),[e.length,1]),r||(s.attention_mask=(0,p.cat)([s.attention_mask,(0,p.ones)([s.attention_mask.dims[0],1])],1)),s.position_ids=null,s}_prepare_model_inputs({inputs:e,bos_token_id:t,model_kwargs:s}){const r=(0,i.pick)(s,this.forward_params),o=this.main_input_name;if(o in r){if(e)throw new Error("`inputs`: {inputs}` were passed alongside {input_name} which is not allowed. Make sure to either pass {inputs} or {input_name}=...")}else r[o]=e;return{inputs_tensor:r[o],model_inputs:r,model_input_name:o}}async _prepare_encoder_decoder_kwargs_for_generation({inputs_tensor:e,model_inputs:t,model_input_name:s,generation_config:r}){if(this.sessions.model.inputNames.includes("inputs_embeds")&&!t.inputs_embeds&&"_prepare_inputs_embeds"in this){const{input_ids:e,pixel_values:s,attention_mask:r,...o}=t,n=await this._prepare_inputs_embeds(t);t={...o,...(0,i.pick)(n,["inputs_embeds","attention_mask"])}}let{last_hidden_state:o}=await O(this,t);if(null!==r.guidance_scale&&r.guidance_scale>1)o=(0,p.cat)([o,(0,p.full_like)(o,0)],0),"attention_mask"in t&&(t.attention_mask=(0,p.cat)([t.attention_mask,(0,p.zeros_like)(t.attention_mask)],0));else if(t.decoder_input_ids){const e=N(t.decoder_input_ids).dims[0];if(e!==o.dims[0]){if(1!==o.dims[0])throw new Error(`The encoder outputs have a different batch size (${o.dims[0]}) than the decoder inputs (${e}).`);o=(0,p.cat)(Array.from({length:e},(()=>o)),0)}}return t.encoder_outputs=o,t}_prepare_decoder_input_ids_for_generation({batch_size:e,model_input_name:t,model_kwargs:s,decoder_start_token_id:r,bos_token_id:o,generation_config:n}){let{decoder_input_ids:a,...i}=s;if(!(a instanceof p.Tensor)){if(a)Array.isArray(a[0])||(a=Array.from({length:e},(()=>a)));else if(r??=o,"musicgen"===this.config.model_type)a=Array.from({length:e*this.config.decoder.num_codebooks},(()=>[r]));else if(Array.isArray(r)){if(r.length!==e)throw new Error(`\`decoder_start_token_id\` expcted to have length ${e} but got ${r.length}`);a=r}else a=Array.from({length:e},(()=>[r]));a=N(a)}return s.decoder_attention_mask=(0,p.ones_like)(a),{input_ids:a,model_inputs:i}}async generate({inputs:e=null,generation_config:t=null,logits_processor:s=null,stopping_criteria:r=null,streamer:o=null,...n}){this._validate_model_class(),t=this._prepare_generation_config(t,n);let{inputs_tensor:a,model_inputs:i,model_input_name:l}=this._prepare_model_inputs({inputs:e,model_kwargs:n});const c=this.config.is_encoder_decoder;let d;c&&("encoder_outputs"in i||(i=await this._prepare_encoder_decoder_kwargs_for_generation({inputs_tensor:a,model_inputs:i,model_input_name:l,generation_config:t}))),c?({input_ids:d,model_inputs:i}=this._prepare_decoder_input_ids_for_generation({batch_size:i[l].dims.at(0),model_input_name:l,model_kwargs:i,decoder_start_token_id:t.decoder_start_token_id,bos_token_id:t.bos_token_id,generation_config:t})):d=i[l];let u=d.dims.at(-1);null!==t.max_new_tokens&&(t.max_length=u+t.max_new_tokens);const m=this._get_logits_processor(t,u,s),_=this._get_stopping_criteria(t,r),h=i[l].dims.at(0),g=f.LogitsSampler.getSampler(t),M=new Array(h).fill(0),w=d.tolist();let T;o&&o.put(w);let b={};for(;;){if(i=this.prepare_inputs_for_generation(w,i,t),T=await this.forward(i),t.output_attentions&&t.return_dict_in_generate){const e=this.getAttentions(T);for(const t in e)t in b||(b[t]=[]),b[t].push(e[t])}const e=m(w,T.logits.slice(null,-1,null)),s=[];for(let t=0;t<e.dims.at(0);++t){const r=e[t],o=await g(r);for(const[e,r]of o){const o=BigInt(e);M[t]+=r,w[t].push(o),s.push([o]);break}}o&&o.put(s);if(_(w).every((e=>e)))break;i=this._update_model_kwargs_for_generation({generated_input_ids:s,outputs:T,model_inputs:i,is_encoder_decoder:c})}o&&o.end();const x=this.getPastKeyValues(T,i.past_key_values,!0),P=new p.Tensor("int64",w.flat(),[w.length,w[0].length]);if(t.return_dict_in_generate)return{sequences:P,past_key_values:x,...b};for(const e of Object.values(T))"gpu-buffer"===e.location&&e.dispose();return P}getPastKeyValues(e,t,s=!1){const r=Object.create(null);for(const o in e)if(o.startsWith("present")){const n=o.replace("present","past_key_values"),a=o.includes("encoder");if(r[n]=a&&t?t[n]:e[o],t&&(!a||s)){const e=t[n];"gpu-buffer"===e.location&&e.dispose()}}return r}getAttentions(e){const t={};for(const s of["cross_attentions","encoder_attentions","decoder_attentions"])for(const r in e)r.startsWith(s)&&(s in t||(t[s]=[]),t[s].push(e[r]));return t}addPastKeyValues(e,t){if(t)Object.assign(e,t);else{const t=this.sessions.decoder_model_merged??this.sessions.model,s=t?.config?.kv_cache_dtype??"float32",o="float16"===s?new Uint16Array:[],n=(e[this.main_input_name]??e.attention_mask)?.dims?.[0]??1,a=(0,r.getKeyValueShapes)(this.config,{batch_size:n});for(const t in a)e[t]=new p.Tensor(s,o,a[t])}}async encode_image({pixel_values:e}){const t=(await z(this.sessions.vision_encoder,{pixel_values:e})).image_features;return this.config.num_image_tokens||(console.warn(`The number of image tokens was not set in the model configuration. Setting it to the number of features detected by the vision encoder (${t.dims[1]}).`),this.config.num_image_tokens=t.dims[1]),t}async encode_text({input_ids:e}){return(await z(this.sessions.embed_tokens,{input_ids:e})).inputs_embeds}}class H{}class J extends H{constructor({last_hidden_state:e,hidden_states:t=null,attentions:s=null}){super(),this.last_hidden_state=e,this.hidden_states=t,this.attentions=s}}class Y extends X{}class K extends Y{}class Z extends Y{async _call(e){return new Zl(await super._call(e))}}class ee extends Y{async _call(e){return new Jl(await super._call(e))}}class te extends Y{async _call(e){return new Kl(await super._call(e))}}class se extends Y{async _call(e){return new ec(await super._call(e))}}class re extends X{}class oe extends re{}class ne extends X{}class ae extends ne{}class ie extends ne{async _call(e){return new Zl(await super._call(e))}}class le extends ne{async _call(e){return new Jl(await super._call(e))}}class ce extends ne{async _call(e){return new Kl(await super._call(e))}}class de extends ne{async _call(e){return new ec(await super._call(e))}}class ue extends X{}class pe extends ue{}class me extends ue{async _call(e){return new Zl(await super._call(e))}}class _e extends ue{async _call(e){return new Jl(await super._call(e))}}class he extends ue{async _call(e){return new Kl(await super._call(e))}}class fe extends ue{async _call(e){return new ec(await super._call(e))}}class ge extends X{}class Me extends ge{}class we extends ge{async _call(e){return new Zl(await super._call(e))}}class Te extends ge{async _call(e){return new Jl(await super._call(e))}}class be extends ge{async _call(e){return new Kl(await super._call(e))}}class xe extends ge{async _call(e){return new ec(await super._call(e))}}class Pe extends X{}class ke extends Pe{}class ye extends Pe{async _call(e){return new Zl(await super._call(e))}}class Fe extends Pe{async _call(e){return new Jl(await super._call(e))}}class ve extends Pe{async _call(e){return new Kl(await super._call(e))}}class Ce extends Pe{async _call(e){return new ec(await super._call(e))}}class Se extends X{}class Ae extends Se{}class Ee extends Se{async _call(e){return new Zl(await super._call(e))}}class Le extends Se{async _call(e){return new Jl(await super._call(e))}}class Ie extends Se{async _call(e){return new Kl(await super._call(e))}}class ze extends Se{async _call(e){return new ec(await super._call(e))}}class je extends X{}class Ne extends je{}class Ve extends je{async _call(e){return new Zl(await super._call(e))}}class Be extends je{async _call(e){return new Jl(await super._call(e))}}class Oe extends je{async _call(e){return new Kl(await super._call(e))}}class De extends je{async _call(e){return new ec(await super._call(e))}}class Re extends X{}class Ge extends Re{}class qe extends Re{async _call(e){return new Jl(await super._call(e))}}class We extends Re{async _call(e){return new Kl(await super._call(e))}}class $e extends Re{async _call(e){return new ec(await super._call(e))}}class Ue extends Re{async _call(e){return new Zl(await super._call(e))}}class Qe extends X{}class Xe extends Qe{}class He extends Qe{async _call(e){return new Zl(await super._call(e))}}class Je extends Qe{async _call(e){return new Jl(await super._call(e))}}class Ye extends Qe{async _call(e){return new Kl(await super._call(e))}}class Ke extends X{}class Ze extends Ke{}class et extends Ke{async _call(e){return new Zl(await super._call(e))}}class tt extends Ke{async _call(e){return new Jl(await super._call(e))}}class st extends Ke{async _call(e){return new ec(await super._call(e))}}class rt extends X{}class ot extends rt{}class nt extends rt{async _call(e){return new Zl(await super._call(e))}}class at extends rt{async _call(e){return new Jl(await super._call(e))}}class it extends rt{async _call(e){return new Kl(await super._call(e))}}class lt extends rt{async _call(e){return new ec(await super._call(e))}}class ct extends X{}class dt extends ct{}class ut extends ct{async _call(e){return new Zl(await super._call(e))}}class pt extends ct{async _call(e){return new Jl(await super._call(e))}}class mt extends ct{async _call(e){return new ec(await super._call(e))}}class _t extends X{}class ht extends _t{}class ft extends _t{async _call(e){return new Jl(await super._call(e))}}class gt extends _t{async _call(e){return new ec(await super._call(e))}}class Mt extends _t{async _call(e){return new Zl(await super._call(e))}}class wt extends X{forward_params=["input_ids","attention_mask","encoder_outputs","decoder_input_ids","decoder_attention_mask","past_key_values"]}class Tt extends wt{}class bt extends wt{}class xt extends X{}class Pt extends xt{}class kt extends xt{}class yt extends X{}class Ft extends yt{}class vt extends yt{}class Ct extends X{}class St extends Ct{}class At extends Ct{}class Et extends Ct{async _call(e){return new Jl(await super._call(e))}}class Lt extends X{}class It extends Lt{}class zt extends Lt{}class jt extends Lt{async _call(e){return new Jl(await super._call(e))}}class Nt extends Lt{}class Vt extends X{}class Bt extends Vt{}class Ot extends Vt{}class Dt extends X{}class Rt extends Dt{}class Gt extends Dt{}class qt extends X{}class Wt extends qt{}class $t extends qt{async _call(e){return new Zl(await super._call(e))}}class Ut extends qt{async _call(e){return new Jl(await super._call(e))}}class Qt extends qt{async _call(e){return new Kl(await super._call(e))}}class Xt extends qt{async _call(e){return new ec(await super._call(e))}}class Ht extends X{}class Jt extends Ht{}class Yt extends Ht{async _call(e){return new Zl(await super._call(e))}}class Kt extends Ht{async _call(e){return new Jl(await super._call(e))}}class Zt extends Ht{async _call(e){return new Kl(await super._call(e))}}class es extends Ht{async _call(e){return new ec(await super._call(e))}}class ts extends X{}class ss extends ts{}class rs extends ts{async _call(e){return new Zl(await super._call(e))}}class os extends ts{async _call(e){return new Jl(await super._call(e))}}class ns extends ts{async _call(e){return new Kl(await super._call(e))}}class as extends ts{async _call(e){return new ec(await super._call(e))}}class is extends X{}class ls extends is{}class cs extends is{}class ds extends X{requires_attention_mask=!1;main_input_name="input_features";forward_params=["input_features","attention_mask","decoder_input_ids","decoder_attention_mask","past_key_values"]}class us extends ds{}class ps extends ds{_prepare_generation_config(e,t){return super._prepare_generation_config(e,t,M.WhisperGenerationConfig)}_retrieve_init_tokens(e){const t=[e.decoder_start_token_id];let s=e.language;const r=e.task;if(e.is_multilingual){s||(console.warn("No language specified - defaulting to English (en)."),s="en");const o=`<|${(0,w.whisper_language_to_code)(s)}|>`;t.push(e.lang_to_id[o]),t.push(e.task_to_id[r??"transcribe"])}else if(s||r)throw new Error("Cannot specify `task` or `language` for an English-only model. If the model is intended to be multilingual, pass `is_multilingual=true` to generate, or update the generation config.");return!e.return_timestamps&&e.no_timestamps_token_id&&t.at(-1)!==e.no_timestamps_token_id?t.push(e.no_timestamps_token_id):e.return_timestamps&&t.at(-1)===e.no_timestamps_token_id&&(console.warn("<|notimestamps|> prompt token is removed from generation_config since `return_timestamps` is set to `true`."),t.pop()),t.filter((e=>null!=e))}async generate({inputs:e=null,generation_config:t=null,logits_processor:s=null,stopping_criteria:r=null,...o}){t=this._prepare_generation_config(t,o);const n=o.decoder_input_ids??this._retrieve_init_tokens(t);if(t.return_timestamps&&(s??=new d.LogitsProcessorList,s.push(new d.WhisperTimeStampLogitsProcessor(t,n))),t.begin_suppress_tokens&&(s??=new d.LogitsProcessorList,s.push(new d.SuppressTokensAtBeginLogitsProcessor(t.begin_suppress_tokens,n.length))),t.return_token_timestamps){if(!t.alignment_heads)throw new Error("Model generation config has no `alignment_heads`, token-level timestamps not available. See https://gist.github.com/hollance/42e32852f24243b748ae6bc1f985b13a on how to add this property to the generation config.");"translate"===t.task&&console.warn("Token-level timestamps may not be reliable for task 'translate'."),t.output_attentions=!0,t.return_dict_in_generate=!0}const a=await super.generate({inputs:e,generation_config:t,logits_processor:s,decoder_input_ids:n,...o});return t.return_token_timestamps&&(a.token_timestamps=this._extract_token_timestamps(a,t.alignment_heads,t.num_frames)),a}_extract_token_timestamps(e,t,s=null,r=.02){if(!e.cross_attentions)throw new Error("Model outputs must contain cross attentions to extract timestamps. This is most likely because the model was not exported with `output_attentions=True`.");null==s&&console.warn("`num_frames` has not been set, meaning the entire audio will be analyzed. This may lead to inaccurate token-level timestamps for short audios (< 30 seconds).");let o=this.config.median_filter_width;void 0===o&&(console.warn("Model config has no `median_filter_width`, using default value of 7."),o=7);const n=e.cross_attentions,a=Array.from({length:this.config.decoder_layers},((e,t)=>(0,p.cat)(n.map((e=>e[t])),2))),l=(0,p.stack)(t.map((([e,t])=>{if(e>=a.length)throw new Error(`Layer index ${e} is out of bounds for cross attentions (length ${a.length}).`);return s?a[e].slice(null,t,null,[0,s]):a[e].slice(null,t)}))).transpose(1,0,2,3),[c,d]=(0,p.std_mean)(l,-2,0,!0),u=l.clone();for(let e=0;e<u.dims[0];++e){const t=u[e];for(let s=0;s<t.dims[0];++s){const r=t[s],n=c[e][s][0].data,a=d[e][s][0].data;for(let e=0;e<r.dims[0];++e){let t=r[e].data;for(let e=0;e<t.length;++e)t[e]=(t[e]-a[e])/n[e];t.set((0,_.medianFilter)(t,o))}}}const m=[(0,p.mean)(u,1)],h=e.sequences.dims,f=new p.Tensor("float32",new Float32Array(h[0]*h[1]),h);for(let e=0;e<h[0];++e){const t=m[e].neg().squeeze_(0),[s,o]=(0,_.dynamic_time_warping)(t.tolist()),n=Array.from({length:s.length-1},((e,t)=>s[t+1]-s[t])),a=(0,i.mergeArrays)([1],n).map((e=>!!e)),l=[];for(let e=0;e<a.length;++e)a[e]&&l.push(o[e]*r);f[e].data.set(l,1)}return f}}class ms extends X{main_input_name="pixel_values";forward_params=["pixel_values","decoder_input_ids","encoder_hidden_states","past_key_values"]}class _s extends X{forward_params=["input_ids","attention_mask","pixel_values","position_ids","past_key_values"]}class hs extends _s{_merge_input_ids_with_image_features({inputs_embeds:e,image_features:t,input_ids:s,attention_mask:r}){const o=this.config.image_token_index,n=s.tolist().map((e=>e.findIndex((e=>e==o)))),a=n.every((e=>-1===e)),i=n.every((e=>-1!==e));if(!a&&!i)throw new Error("Every input should contain either 0 or 1 image token.");if(a)return{inputs_embeds:e,attention_mask:r};const l=[],c=[];for(let s=0;s<n.length;++s){const o=n[s],a=e[s],i=t[s],d=r[s];l.push((0,p.cat)([a.slice([0,o]),i,a.slice([o+1,a.dims[0]])],0)),c.push((0,p.cat)([d.slice([0,o]),(0,p.ones)([i.dims[0]]),d.slice([o+1,d.dims[0]])],0))}return{inputs_embeds:(0,p.stack)(l,0),attention_mask:(0,p.stack)(c,0)}}}class fs extends hs{}class gs extends hs{}class Ms extends X{forward_params=["input_ids","inputs_embeds","attention_mask","pixel_values","encoder_outputs","decoder_input_ids","decoder_inputs_embeds","decoder_attention_mask","past_key_values"];main_input_name="inputs_embeds"}class ws extends Ms{_merge_input_ids_with_image_features({inputs_embeds:e,image_features:t,input_ids:s,attention_mask:r}){return{inputs_embeds:(0,p.cat)([t,e],1),attention_mask:(0,p.cat)([(0,p.ones)(t.dims.slice(0,2)),r],1)}}async _prepare_inputs_embeds({input_ids:e,pixel_values:t,inputs_embeds:s,attention_mask:r}){if(!e&&!t)throw new Error("Either `input_ids` or `pixel_values` should be provided.");let o,n;return e&&(o=await this.encode_text({input_ids:e})),t&&(n=await this.encode_image({pixel_values:t})),o&&n?({inputs_embeds:s,attention_mask:r}=this._merge_input_ids_with_image_features({inputs_embeds:o,image_features:n,input_ids:e,attention_mask:r})):s=o||n,{inputs_embeds:s,attention_mask:r}}async forward({input_ids:e,pixel_values:t,attention_mask:s,decoder_input_ids:r,decoder_attention_mask:o,encoder_outputs:n,past_key_values:a,inputs_embeds:i,decoder_inputs_embeds:l}){if(i||({inputs_embeds:i,attention_mask:s}=await this._prepare_inputs_embeds({input_ids:e,pixel_values:t,inputs_embeds:i,attention_mask:s})),!n){let{last_hidden_state:e}=await O(this,{inputs_embeds:i,attention_mask:s});n=e}if(!l){if(!r)throw new Error("Either `decoder_input_ids` or `decoder_inputs_embeds` should be provided.");l=await this.encode_text({input_ids:r})}const c={inputs_embeds:l,attention_mask:o,encoder_attention_mask:s,encoder_hidden_states:n,past_key_values:a};return await D(this,c,!0)}}class Ts extends X{forward_params=["input_ids","attention_mask","pixel_values","pixel_attention_mask","position_ids","past_key_values"]}class bs extends Ts{async encode_image({pixel_values:e,pixel_attention_mask:t}){return(await z(this.sessions.vision_encoder,{pixel_values:e,pixel_attention_mask:t})).image_features}_merge_input_ids_with_image_features(e){const t=e.image_features.dims.at(-1),s=e.image_features.view(-1,t);return R({image_token_id:this.config.image_token_id,...e,image_features:s})}}class xs extends X{}class Ps extends xs{}class ks extends xs{static async from_pretrained(e,t={}){return t.model_file_name??="text_model",super.from_pretrained(e,t)}}class ys extends xs{static async from_pretrained(e,t={}){return t.model_file_name??="text_model",super.from_pretrained(e,t)}}class Fs extends xs{static async from_pretrained(e,t={}){return t.model_file_name??="vision_model",super.from_pretrained(e,t)}}class vs extends xs{static async from_pretrained(e,t={}){return t.model_file_name??="vision_model",super.from_pretrained(e,t)}}class Cs extends X{}class Ss extends Cs{}class As extends Cs{static async from_pretrained(e,t={}){return t.model_file_name??="text_model",super.from_pretrained(e,t)}}class Es extends xs{static async from_pretrained(e,t={}){return t.model_file_name??="vision_model",super.from_pretrained(e,t)}}class Ls extends X{}class Is extends Ls{}class zs extends X{}class js extends zs{async forward(e){const t=!e.input_ids,s=!e.pixel_values;if(t&&s)throw new Error("Either `input_ids` or `pixel_values` should be provided.");if(t&&(e.input_ids=(0,p.ones)([e.pixel_values.dims[0],1])),s){const{image_size:t}=this.config.vision_config;e.pixel_values=(0,p.full)([0,3,t,t],0)}const{text_embeddings:r,image_embeddings:o,l2norm_text_embeddings:n,l2norm_image_embeddings:a}=await super.forward(e),i={};return t||(i.text_embeddings=r,i.l2norm_text_embeddings=n),s||(i.image_embeddings=o,i.l2norm_image_embeddings=a),i}}class Ns extends zs{static async from_pretrained(e,t={}){return t.model_file_name??="text_model",super.from_pretrained(e,t)}}class Vs extends zs{static async from_pretrained(e,t={}){return t.model_file_name??="vision_model",super.from_pretrained(e,t)}}class Bs extends X{}class Os extends Bs{}class Ds extends Bs{}class Rs extends X{}class Gs extends Rs{}class qs extends Rs{}class Ws extends X{}class $s extends Ws{}class Us extends Ws{}class Qs extends X{}class Xs extends Qs{}class Hs extends Qs{}class Js extends X{}class Ys extends Js{}class Ks extends Js{}class Zs extends X{}class er extends Zs{}class tr extends Zs{}class sr extends X{}class rr extends sr{}class or extends sr{}class nr extends X{}class ar extends nr{}class ir extends nr{}class lr extends X{}class cr extends lr{}class dr extends lr{}class ur extends X{}class pr extends ur{}class mr extends ur{}class _r extends X{}class hr extends _r{}class fr extends _r{}class gr extends X{}class Mr extends gr{}class wr extends gr{}class Tr extends X{}class br extends Tr{}class xr extends Tr{}class Pr extends X{}class kr extends Pr{}class yr extends Pr{}class Fr extends X{}class vr extends Fr{}class Cr extends Fr{}class Sr extends X{}class Ar extends Sr{}class Er extends Sr{}class Lr extends X{}class Ir extends Lr{}class zr extends Lr{}class jr extends X{forward_params=["input_ids","attention_mask","position_ids","past_key_values","pixel_values","image_grid_thw"]}class Nr extends jr{get_rope_index(e,t,s,r){const{vision_config:o,image_token_id:n,video_token_id:a,vision_start_token_id:i}=this.config,l=o.spatial_merge_size??2,c=[];if(t||s){let o=e.tolist();r||(r=(0,p.ones_like)(e));const d=r.tolist(),u=Array.from({length:3},(t=>Array.from({length:e.dims[0]},(t=>Array.from({length:e.dims[1]},(e=>1)))))),m=t?t.tolist():[],h=s?s.tolist():[];let f=0,g=0;for(let e=0;e<o.length;++e){const t=o[e].filter(((t,s)=>1==d[e][s])),s=t.reduce(((e,t,s)=>(t==i&&e.push(s),e)),[]).map((e=>t[e+1])),r=s.filter((e=>e==n)).length,p=s.filter((e=>e==a)).length;let M=[],w=0,T=r,b=p;for(let e=0;e<s.length;++e){const e=t.findIndex(((e,t)=>t>w&&e==n)),s=t.findIndex(((e,t)=>t>w&&e==a)),r=T>0&&-1!==e?e:t.length+1,o=b>0&&-1!==s?s:t.length+1;let i,c,d,u;r<o?([c,d,u]=m[f],++f,--T,i=r):([c,d,u]=h[g],++g,--b,i=o);const[p,x,P]=[Number(c),Math.floor(Number(d)/l),Math.floor(Number(u)/l)],k=i-w,y=M.length>0?(0,_.max)(M.at(-1))[0]+1:0;M.push(Array.from({length:3*k},((e,t)=>y+t%k)));const F=k+y,v=p*x*P,C=Array.from({length:v},((e,t)=>F+Math.floor(t/(x*P)))),S=Array.from({length:v},((e,t)=>F+Math.floor(t/P)%x)),A=Array.from({length:v},((e,t)=>F+t%P));M.push([C,S,A].flat()),w=i+v}if(w<t.length){const e=M.length>0?(0,_.max)(M.at(-1))[0]+1:0,s=t.length-w;M.push(Array.from({length:3*s},((t,r)=>e+r%s)))}const x=M.reduce(((e,t)=>e+t.length),0),P=new Array(x);let k=0;for(let e=0;e<3;++e)for(let t=0;t<M.length;++t){const s=M[t],r=s.length/3;for(let t=e*r;t<(e+1)*r;++t)P[k++]=s[t]}let y=0;const F=d[e];for(let t=0;t<F.length;++t)if(1==F[t]){for(let s=0;s<3;++s)u[s][e][t]=P[s*x/3+y];++y}const v=(0,_.max)(P)[0];c.push(v+1-o[e].length)}return[new p.Tensor("int64",u.flat(1/0),[3,e.dims[0],e.dims[1]]),new p.Tensor("int64",c,[c.length,1])]}if(r){const{data:e,dims:t}=q(r),s=BigInt64Array.from({length:3*e.length},((t,s)=>e[s%e.length])),o=Array.from({length:t[0]},((s,r)=>(0,_.max)(e.subarray(t[1]*r,t[1]*(r+1)))[0]+1+t[1]));return[new p.Tensor("int64",s,[3,...t]),new p.Tensor("int64",o,[o.length,1])]}{const[t,s]=e.dims,r=BigInt64Array.from({length:3*t*s},((e,r)=>BigInt(Math.floor(r%s/t))));return[new p.Tensor("int64",r,[3,...e.dims]),(0,p.zeros)([t,1])]}}async encode_image({pixel_values:e,image_grid_thw:t}){return(await z(this.sessions.vision_encoder,{pixel_values:e,grid_thw:t})).image_features}_merge_input_ids_with_image_features(e){return R({image_token_id:this.config.image_token_id,...e})}prepare_inputs_for_generation(e,t,s){if(t.attention_mask&&!t.position_ids)if(t.past_key_values){t.pixel_values=null;const e=BigInt(Object.values(t.past_key_values)[0].dims.at(-2)),s=t.rope_deltas.map((t=>e+t));t.position_ids=(0,p.stack)([s,s,s],0)}else[t.position_ids,t.rope_deltas]=this.get_rope_index(t.input_ids,t.image_grid_thw,t.video_grid_thw,t.attention_mask);return t}}class Vr extends X{}class Br extends Vr{}class Or extends Vr{}class Dr extends X{}class Rr extends Dr{}class Gr extends Dr{}class qr extends X{}class Wr extends qr{}class $r extends qr{}class Ur extends X{}class Qr extends Ur{}class Xr extends Ur{}class Hr extends X{}class Jr extends Hr{}class Yr extends Hr{}class Kr extends X{}class Zr extends Kr{}class eo extends Kr{async _call(e){return new Jl(await super._call(e))}}class to extends X{}class so extends to{}class ro extends X{}class oo extends ro{}class no extends ro{async _call(e){return new Jl(await super._call(e))}}class ao extends X{}class io extends ao{}class lo extends X{}class co extends lo{}class uo extends lo{async _call(e){return new Jl(await super._call(e))}}class po extends X{}class mo extends po{}class _o extends X{}class ho extends _o{}class fo extends _o{async _call(e){return new Jl(await super._call(e))}}class go extends X{}class Mo extends go{async _call(e){return new rc(await super._call(e))}}class wo extends X{}class To extends wo{}class bo extends wo{async _call(e){return new Jl(await super._call(e))}}class xo extends X{}class Po extends xo{}class ko extends xo{async _call(e){return new Jl(await super._call(e))}}class yo extends X{}class Fo extends yo{}class vo extends yo{}class Co extends X{}class So extends Co{}class Ao extends Co{}class Eo extends X{}class Lo extends Eo{}class Io extends Eo{async _call(e){return new Jl(await super._call(e))}}class zo extends X{}class jo extends zo{}class No extends zo{async _call(e){return new Bo(await super._call(e))}}class Vo extends zo{async _call(e){return new Oo(await super._call(e))}}class Bo extends H{constructor({logits:e,pred_boxes:t}){super(),this.logits=e,this.pred_boxes=t}}class Oo extends H{constructor({logits:e,pred_boxes:t,pred_masks:s}){super(),this.logits=e,this.pred_boxes=t,this.pred_masks=s}}class Do extends X{}class Ro extends Do{}class Go extends Do{async _call(e){return new qo(await super._call(e))}}class qo extends H{constructor({logits:e,pred_boxes:t}){super(),this.logits=e,this.pred_boxes=t}}class Wo extends X{}class $o extends Wo{}class Uo extends Wo{async _call(e){return new Qo(await super._call(e))}}class Qo extends Bo{}class Xo extends X{}class Ho extends Xo{}class Jo extends Xo{async _call(e){return new Jl(await super._call(e))}}class Yo extends X{}class Ko extends Yo{}class Zo extends Yo{async _call(e){return new Jl(await super._call(e))}}class en extends X{}class tn extends en{}class sn extends en{async _call(e){return new Jl(await super._call(e))}}class rn extends X{}class on extends rn{}class nn extends rn{async _call(e){return new Jl(await super._call(e))}}class an extends X{}class ln extends an{}class cn extends an{}class dn extends X{}class un extends dn{}class pn extends dn{}class mn extends X{}class _n extends mn{}class hn extends X{}class fn extends hn{}class gn extends hn{}class Mn extends hn{}class wn extends X{}class Tn extends wn{}class bn extends X{}class xn extends bn{}class Pn extends bn{}class kn extends X{}class yn extends kn{}class Fn extends kn{}class vn extends X{}class Cn extends vn{}class Sn extends X{}class An extends Sn{}class En extends Sn{async _call(e){return new Jl(await super._call(e))}}class Ln extends X{}class In extends Ln{}class zn extends Ln{async _call(e){return new Jl(await super._call(e))}}class jn extends X{}class Nn extends jn{}class Vn extends jn{async _call(e){return new Jl(await super._call(e))}}class Bn extends X{}class On extends Bn{}class Dn extends Bn{async _call(e){return new Rn(await super._call(e))}}class Rn extends H{constructor({logits:e,pred_boxes:t}){super(),this.logits=e,this.pred_boxes=t}}class Gn extends X{}class qn extends Gn{async get_image_embeddings({pixel_values:e}){return await O(this,{pixel_values:e})}async forward(e){if(e.image_embeddings&&e.image_positional_embeddings||(e={...e,...await this.get_image_embeddings(e)}),!e.input_labels&&e.input_points){const t=e.input_points.dims.slice(0,-1),s=t.reduce(((e,t)=>e*t),1);e.input_labels=new p.Tensor("int64",new BigInt64Array(s).fill(1n),t)}const t={image_embeddings:e.image_embeddings,image_positional_embeddings:e.image_positional_embeddings};return e.input_points&&(t.input_points=e.input_points),e.input_labels&&(t.input_labels=e.input_labels),e.input_boxes&&(t.input_boxes=e.input_boxes),await z(this.sessions.prompt_encoder_mask_decoder,t)}async _call(e){return new Wn(await super._call(e))}}class Wn extends H{constructor({iou_scores:e,pred_masks:t}){super(),this.iou_scores=e,this.pred_masks=t}}class $n extends X{}class Un extends $n{}class Qn extends $n{}class Xn extends X{}class Hn extends Xn{}class Jn extends Xn{}class Yn extends X{}class Kn extends Yn{}class Zn extends Yn{async _call(e){return new tc(await super._call(e))}}class ea extends Yn{async _call(e){return new Jl(await super._call(e))}}class ta extends Yn{async _call(e){return new Kl(await super._call(e))}}class sa extends X{}class ra extends sa{}class oa extends sa{async _call(e){return new Kl(await super._call(e))}}class na extends X{}class aa extends na{}class ia extends X{}class la extends ia{}class ca extends ia{async _call(e){return new tc(await super._call(e))}}class da extends ia{async _call(e){return new Jl(await super._call(e))}}class ua extends X{}class pa extends ua{}class ma extends ua{async _call(e){return new tc(await super._call(e))}}class _a extends ua{async _call(e){return new Jl(await super._call(e))}}class ha extends ua{async _call(e){return new Kl(await super._call(e))}}class fa extends X{}class ga extends fa{}class Ma extends fa{async _call(e){return new tc(await super._call(e))}}class wa extends fa{async _call(e){return new Jl(await super._call(e))}}class Ta extends X{}class ba extends Yn{}class xa extends Yn{async _call(e){return new tc(await super._call(e))}}class Pa extends Yn{async _call(e){return new Jl(await super._call(e))}}class ka extends X{}class ya extends ka{}class Fa extends ka{async _call(e){return new tc(await super._call(e))}}class va extends ka{async _call(e){return new Jl(await super._call(e))}}class Ca extends ka{async _call(e){return new Yl(await super._call(e))}}class Sa extends ka{async _call(e){return new Kl(await super._call(e))}}class Aa extends X{}class Ea extends Aa{}class La extends Aa{}class Ia extends Aa{async generate_speech(e,t,{threshold:s=.5,minlenratio:r=0,maxlenratio:o=20,vocoder:n=null}={}){const a={input_ids:e},{encoder_outputs:i,encoder_attention_mask:l}=await O(this,a),c=i.dims[1]/this.config.reduction_factor,d=Math.floor(c*o),u=Math.floor(c*r),m=this.config.num_mel_bins;let _=[],h=null,f=null,g=0;for(;;){++g;const e=V(!!f);let r;r=f?f.output_sequence_out:new p.Tensor("float32",new Float32Array(m),[1,1,m]);let o={use_cache_branch:e,output_sequence:r,encoder_attention_mask:l,speaker_embeddings:t,encoder_hidden_states:i};this.addPastKeyValues(o,h),f=await z(this.sessions.decoder_model_merged,o),h=this.getPastKeyValues(f,h);const{prob:n,spectrum:a}=f;if(_.push(a),g>=u&&(Array.from(n.data).filter((e=>e>=s)).length>0||g>=d))break}const M=(0,p.cat)(_),{waveform:w}=await z(n.sessions.model,{spectrogram:M});return{spectrogram:M,waveform:w}}}class za extends X{main_input_name="spectrogram"}class ja extends X{}class Na extends ja{}class Va extends X{}class Ba extends Va{}class Oa extends Va{}class Da extends X{}class Ra extends Da{}class Ga extends Da{}class qa extends X{}class Wa extends qa{}class $a extends qa{}class Ua extends X{}class Qa extends Ua{}class Xa extends Ua{static async from_pretrained(e,t={}){return t.model_file_name??="text_model",super.from_pretrained(e,t)}}class Ha extends Ua{static async from_pretrained(e,t={}){return t.model_file_name??="audio_model",super.from_pretrained(e,t)}}class Ja extends X{}class Ya extends Ja{async _call(e){return new oc(await super._call(e))}}class Ka extends X{}class Za extends Ka{}class ei extends Ka{}class ti extends Ka{}class si extends X{}class ri extends si{}class oi extends si{}class ni extends X{}class ai extends ni{}class ii extends ni{async _call(e){return new Jl(await super._call(e))}}class li extends X{}class ci extends li{}class di extends li{}class ui extends X{forward_params=["input_ids","attention_mask","encoder_outputs","decoder_input_ids","decoder_attention_mask","past_key_values"];_apply_and_filter_by_delay_pattern_mask(e){const[t,s]=e.dims,r=this.config.decoder.num_codebooks,o=s-r;let n=0;for(let t=0;t<e.size;++t){if(e.data[t]===this.config.decoder.pad_token_id)continue;const a=t%s-Math.floor(t/s)%r;a>0&&a<=o&&(e.data[n++]=e.data[t])}const a=Math.floor(t/r),i=n/(a*r);return new p.Tensor(e.type,e.data.slice(0,n),[a,r,i])}prepare_inputs_for_generation(e,t,s){let r=structuredClone(e);for(let e=0;e<r.length;++e)for(let t=0;t<r[e].length;++t)e%this.config.decoder.num_codebooks>=t&&(r[e][t]=BigInt(this.config.decoder.pad_token_id));null!==s.guidance_scale&&s.guidance_scale>1&&(r=r.concat(r));return super.prepare_inputs_for_generation(r,t,s)}async generate(e){const t=await super.generate(e),s=this._apply_and_filter_by_delay_pattern_mask(t).unsqueeze_(0),{audio_values:r}=await z(this.sessions.encodec_decode,{audio_codes:s});return r}}class pi extends X{}class mi extends pi{}class _i extends pi{async _call(e){return new Jl(await super._call(e))}}class hi extends X{}class fi extends hi{}class gi extends hi{async _call(e){return new Jl(await super._call(e))}}class Mi extends X{}class wi extends Mi{}class Ti extends Mi{async _call(e){return new Jl(await super._call(e))}}class bi extends X{}class xi extends bi{}class Pi extends bi{async _call(e){return new Jl(await super._call(e))}}class ki extends X{}class yi extends ki{}class Fi extends X{}class vi extends Fi{forward_params=["input_ids","pixel_values","images_seq_mask","images_emb_mask","attention_mask","position_ids","past_key_values"];constructor(...e){super(...e),this._generation_mode="text"}async forward(e){const t=this._generation_mode??"text";let s;if("text"!==t&&e.past_key_values){const t=this.sessions.gen_img_embeds,r=(0,i.pick)({image_ids:e.input_ids},t.inputNames);s=await z(t,r)}else{const t=this.sessions.prepare_inputs_embeds,r=(0,i.pick)(e,t.inputNames);s=await z(t,r)}const r={...e,...s},o=await D(this,r),n=this.sessions["text"===t?"lm_head":"gen_head"];if(!n)throw new Error(`Unable to find "${n}" generation head`);const a=await z(n,(0,i.pick)(o,n.inputNames));return{...s,...o,...a}}async generate(e){return this._generation_mode="text",super.generate(e)}async generate_images(e){this._generation_mode="image";const t=(e.inputs??e[this.main_input_name]).dims[1],s=(await super.generate(e)).slice(null,[t,null]),r=this.sessions.image_decode,{decoded_image:o}=await z(r,{generated_tokens:s}),n=o.add_(1).mul_(127.5).clamp_(0,255).to("uint8"),a=[];for(const e of n){const t=m.RawImage.fromTensor(e);a.push(t)}return a}}class Ci extends H{constructor({char_logits:e,bpe_logits:t,wp_logits:s}){super(),this.char_logits=e,this.bpe_logits=t,this.wp_logits=s}get logits(){return[this.char_logits,this.bpe_logits,this.wp_logits]}}class Si extends X{}class Ai extends Si{async _call(e){return new Ci(await super._call(e))}}class Ei extends X{}class Li extends Ei{}class Ii extends Ei{}class zi extends X{}class ji extends zi{}class Ni extends zi{}class Vi{static MODEL_CLASS_MAPPINGS=null;static BASE_IF_FAIL=!1;static async from_pretrained(e,{progress_callback:t=null,config:s=null,cache_dir:o=null,local_files_only:n=!1,revision:a="main",model_file_name:i=null,subfolder:l="onnx",device:c=null,dtype:d=null,use_external_data_format:u=null,session_options:p={}}={}){const m={progress_callback:t,config:s,cache_dir:o,local_files_only:n,revision:a,model_file_name:i,subfolder:l,device:c,dtype:d,use_external_data_format:u,session_options:p};if(m.config=await r.AutoConfig.from_pretrained(e,m),!this.MODEL_CLASS_MAPPINGS)throw new Error("`MODEL_CLASS_MAPPINGS` not implemented for this type of `AutoClass`: "+this.name);for(const t of this.MODEL_CLASS_MAPPINGS){const s=t.get(m.config.model_type);if(s)return await s[1].from_pretrained(e,m)}if(this.BASE_IF_FAIL)return console.warn(`Unknown model class "${m.config.model_type}", attempting to construct from base class.`),await X.from_pretrained(e,m);throw Error(`Unsupported model type: ${m.config.model_type}`)}}const Bi=new Map([["bert",["BertModel",K]],["nomic_bert",["NomicBertModel",oe]],["roformer",["RoFormerModel",ae]],["electra",["ElectraModel",Me]],["esm",["EsmModel",Xe]],["convbert",["ConvBertModel",pe]],["camembert",["CamembertModel",ke]],["deberta",["DebertaModel",Ae]],["deberta-v2",["DebertaV2Model",Ne]],["mpnet",["MPNetModel",ot]],["albert",["AlbertModel",ht]],["distilbert",["DistilBertModel",Ge]],["roberta",["RobertaModel",Wt]],["xlm",["XLMModel",Jt]],["xlm-roberta",["XLMRobertaModel",ss]],["clap",["ClapModel",Qa]],["clip",["CLIPModel",Ps]],["clipseg",["CLIPSegModel",Os]],["chinese_clip",["ChineseCLIPModel",Is]],["siglip",["SiglipModel",Ss]],["jina_clip",["JinaCLIPModel",js]],["mobilebert",["MobileBertModel",Ze]],["squeezebert",["SqueezeBertModel",dt]],["wav2vec2",["Wav2Vec2Model",Kn]],["wav2vec2-bert",["Wav2Vec2BertModel",ga]],["unispeech",["UniSpeechModel",la]],["unispeech-sat",["UniSpeechSatModel",pa]],["hubert",["HubertModel",ba]],["wavlm",["WavLMModel",ya]],["audio-spectrogram-transformer",["ASTModel",ls]],["vits",["VitsModel",Ya]],["pyannote",["PyAnnoteModel",ra]],["wespeaker-resnet",["WeSpeakerResNetModel",aa]],["detr",["DetrModel",jo]],["rt_detr",["RTDetrModel",Ro]],["table-transformer",["TableTransformerModel",$o]],["vit",["ViTModel",Zr]],["pvt",["PvtModel",oo]],["vit_msn",["ViTMSNModel",co]],["vit_mae",["ViTMAEModel",io]],["groupvit",["GroupViTModel",mo]],["fastvit",["FastViTModel",ho]],["mobilevit",["MobileViTModel",To]],["mobilevitv2",["MobileViTV2Model",Po]],["owlvit",["OwlViTModel",Fo]],["owlv2",["Owlv2Model",So]],["beit",["BeitModel",Lo]],["deit",["DeiTModel",Ho]],["hiera",["HieraModel",Ko]],["convnext",["ConvNextModel",An]],["convnextv2",["ConvNextV2Model",In]],["dinov2",["Dinov2Model",Nn]],["resnet",["ResNetModel",tn]],["swin",["SwinModel",on]],["swin2sr",["Swin2SRModel",ln]],["donut-swin",["DonutSwinModel",Cn]],["yolos",["YolosModel",On]],["dpt",["DPTModel",un]],["glpn",["GLPNModel",yn]],["hifigan",["SpeechT5HifiGan",za]],["efficientnet",["EfficientNetModel",ai]],["decision_transformer",["DecisionTransformerModel",yi]],["patchtst",["PatchTSTForPrediction",Li]],["patchtsmixer",["PatchTSMixerForPrediction",ji]],["mobilenet_v1",["MobileNetV1Model",mi]],["mobilenet_v2",["MobileNetV2Model",fi]],["mobilenet_v3",["MobileNetV3Model",wi]],["mobilenet_v4",["MobileNetV4Model",xi]],["maskformer",["MaskFormerModel",xn]],["mgp-str",["MgpstrForSceneTextRecognition",Ai]]]),Oi=new Map([["t5",["T5Model",Tt]],["longt5",["LongT5Model",Pt]],["mt5",["MT5Model",Ft]],["bart",["BartModel",St]],["mbart",["MBartModel",It]],["marian",["MarianModel",Un]],["whisper",["WhisperModel",us]],["m2m_100",["M2M100Model",Hn]],["blenderbot",["BlenderbotModel",Bt]],["blenderbot-small",["BlenderbotSmallModel",Rt]]]),Di=new Map([["bloom",["BloomModel",Wr]],["jais",["JAISModel",$s]],["gpt2",["GPT2Model",Gs]],["gptj",["GPTJModel",er]],["gpt_bigcode",["GPTBigCodeModel",rr]],["gpt_neo",["GPTNeoModel",Xs]],["gpt_neox",["GPTNeoXModel",Ys]],["codegen",["CodeGenModel",ar]],["llama",["LlamaModel",cr]],["olmo",["OlmoModel",hr]],["mobilellm",["MobileLLMModel",pr]],["granite",["GraniteModel",Mr]],["cohere",["CohereModel",br]],["gemma",["GemmaModel",kr]],["gemma2",["Gemma2Model",vr]],["openelm",["OpenELMModel",Ar]],["qwen2",["Qwen2Model",Ir]],["phi",["PhiModel",Br]],["phi3",["Phi3Model",Rr]],["mpt",["MptModel",Qr]],["opt",["OPTModel",Jr]],["mistral",["MistralModel",Ba]],["starcoder2",["Starcoder2Model",Ra]],["falcon",["FalconModel",Wa]],["stablelm",["StableLmModel",ri]]]),Ri=new Map([["speecht5",["SpeechT5ForSpeechToText",La]],["whisper",["WhisperForConditionalGeneration",ps]]]),Gi=new Map([["speecht5",["SpeechT5ForTextToSpeech",Ia]]]),qi=new Map([["vits",["VitsModel",Ya]],["musicgen",["MusicgenForConditionalGeneration",ui]]]),Wi=new Map([["bert",["BertForSequenceClassification",ee]],["roformer",["RoFormerForSequenceClassification",le]],["electra",["ElectraForSequenceClassification",Te]],["esm",["EsmForSequenceClassification",Je]],["convbert",["ConvBertForSequenceClassification",_e]],["camembert",["CamembertForSequenceClassification",Fe]],["deberta",["DebertaForSequenceClassification",Le]],["deberta-v2",["DebertaV2ForSequenceClassification",Be]],["mpnet",["MPNetForSequenceClassification",at]],["albert",["AlbertForSequenceClassification",ft]],["distilbert",["DistilBertForSequenceClassification",qe]],["roberta",["RobertaForSequenceClassification",Ut]],["xlm",["XLMForSequenceClassification",Kt]],["xlm-roberta",["XLMRobertaForSequenceClassification",os]],["bart",["BartForSequenceClassification",Et]],["mbart",["MBartForSequenceClassification",jt]],["mobilebert",["MobileBertForSequenceClassification",tt]],["squeezebert",["SqueezeBertForSequenceClassification",pt]]]),$i=new Map([["bert",["BertForTokenClassification",te]],["roformer",["RoFormerForTokenClassification",ce]],["electra",["ElectraForTokenClassification",be]],["esm",["EsmForTokenClassification",Ye]],["convbert",["ConvBertForTokenClassification",he]],["camembert",["CamembertForTokenClassification",ve]],["deberta",["DebertaForTokenClassification",Ie]],["deberta-v2",["DebertaV2ForTokenClassification",Oe]],["mpnet",["MPNetForTokenClassification",it]],["distilbert",["DistilBertForTokenClassification",We]],["roberta",["RobertaForTokenClassification",Qt]],["xlm",["XLMForTokenClassification",Zt]],["xlm-roberta",["XLMRobertaForTokenClassification",ns]]]),Ui=new Map([["t5",["T5ForConditionalGeneration",bt]],["longt5",["LongT5ForConditionalGeneration",kt]],["mt5",["MT5ForConditionalGeneration",vt]],["bart",["BartForConditionalGeneration",At]],["mbart",["MBartForConditionalGeneration",zt]],["marian",["MarianMTModel",Qn]],["m2m_100",["M2M100ForConditionalGeneration",Jn]],["blenderbot",["BlenderbotForConditionalGeneration",Ot]],["blenderbot-small",["BlenderbotSmallForConditionalGeneration",Gt]]]),Qi=new Map([["bloom",["BloomForCausalLM",$r]],["gpt2",["GPT2LMHeadModel",qs]],["jais",["JAISLMHeadModel",Us]],["gptj",["GPTJForCausalLM",tr]],["gpt_bigcode",["GPTBigCodeForCausalLM",or]],["gpt_neo",["GPTNeoForCausalLM",Hs]],["gpt_neox",["GPTNeoXForCausalLM",Ks]],["codegen",["CodeGenForCausalLM",ir]],["llama",["LlamaForCausalLM",dr]],["olmo",["OlmoForCausalLM",fr]],["mobilellm",["MobileLLMForCausalLM",mr]],["granite",["GraniteForCausalLM",wr]],["cohere",["CohereForCausalLM",xr]],["gemma",["GemmaForCausalLM",yr]],["gemma2",["Gemma2ForCausalLM",Cr]],["openelm",["OpenELMForCausalLM",Er]],["qwen2",["Qwen2ForCausalLM",zr]],["phi",["PhiForCausalLM",Or]],["phi3",["Phi3ForCausalLM",Gr]],["mpt",["MptForCausalLM",Xr]],["opt",["OPTForCausalLM",Yr]],["mbart",["MBartForCausalLM",Nt]],["mistral",["MistralForCausalLM",Oa]],["starcoder2",["Starcoder2ForCausalLM",Ga]],["falcon",["FalconForCausalLM",$a]],["trocr",["TrOCRForCausalLM",Na]],["stablelm",["StableLmForCausalLM",oi]]]),Xi=new Map([["multi_modality",["MultiModalityCausalLM",vi]]]),Hi=new Map([["bert",["BertForMaskedLM",Z]],["roformer",["RoFormerForMaskedLM",ie]],["electra",["ElectraForMaskedLM",we]],["esm",["EsmForMaskedLM",He]],["convbert",["ConvBertForMaskedLM",me]],["camembert",["CamembertForMaskedLM",ye]],["deberta",["DebertaForMaskedLM",Ee]],["deberta-v2",["DebertaV2ForMaskedLM",Ve]],["mpnet",["MPNetForMaskedLM",nt]],["albert",["AlbertForMaskedLM",Mt]],["distilbert",["DistilBertForMaskedLM",Ue]],["roberta",["RobertaForMaskedLM",$t]],["xlm",["XLMWithLMHeadModel",Yt]],["xlm-roberta",["XLMRobertaForMaskedLM",rs]],["mobilebert",["MobileBertForMaskedLM",et]],["squeezebert",["SqueezeBertForMaskedLM",ut]]]),Ji=new Map([["bert",["BertForQuestionAnswering",se]],["roformer",["RoFormerForQuestionAnswering",de]],["electra",["ElectraForQuestionAnswering",xe]],["convbert",["ConvBertForQuestionAnswering",fe]],["camembert",["CamembertForQuestionAnswering",Ce]],["deberta",["DebertaForQuestionAnswering",ze]],["deberta-v2",["DebertaV2ForQuestionAnswering",De]],["mpnet",["MPNetForQuestionAnswering",lt]],["albert",["AlbertForQuestionAnswering",gt]],["distilbert",["DistilBertForQuestionAnswering",$e]],["roberta",["RobertaForQuestionAnswering",Xt]],["xlm",["XLMForQuestionAnswering",es]],["xlm-roberta",["XLMRobertaForQuestionAnswering",as]],["mobilebert",["MobileBertForQuestionAnswering",st]],["squeezebert",["SqueezeBertForQuestionAnswering",mt]]]),Yi=new Map([["vision-encoder-decoder",["VisionEncoderDecoderModel",ms]],["idefics3",["Idefics3ForConditionalGeneration",bs]]]),Ki=new Map([["llava",["LlavaForConditionalGeneration",hs]],["llava_onevision",["LlavaOnevisionForConditionalGeneration",fs]],["moondream1",["Moondream1ForConditionalGeneration",gs]],["florence2",["Florence2ForConditionalGeneration",ws]],["qwen2-vl",["Qwen2VLForConditionalGeneration",Nr]],["idefics3",["Idefics3ForConditionalGeneration",bs]]]),Zi=new Map([["vision-encoder-decoder",["VisionEncoderDecoderModel",ms]]]),el=new Map([["vit",["ViTForImageClassification",eo]],["pvt",["PvtForImageClassification",no]],["vit_msn",["ViTMSNForImageClassification",uo]],["fastvit",["FastViTForImageClassification",fo]],["mobilevit",["MobileViTForImageClassification",bo]],["mobilevitv2",["MobileViTV2ForImageClassification",ko]],["beit",["BeitForImageClassification",Io]],["deit",["DeiTForImageClassification",Jo]],["hiera",["HieraForImageClassification",Zo]],["convnext",["ConvNextForImageClassification",En]],["convnextv2",["ConvNextV2ForImageClassification",zn]],["dinov2",["Dinov2ForImageClassification",Vn]],["resnet",["ResNetForImageClassification",sn]],["swin",["SwinForImageClassification",nn]],["segformer",["SegformerForImageClassification",ei]],["efficientnet",["EfficientNetForImageClassification",ii]],["mobilenet_v1",["MobileNetV1ForImageClassification",_i]],["mobilenet_v2",["MobileNetV2ForImageClassification",gi]],["mobilenet_v3",["MobileNetV3ForImageClassification",Ti]],["mobilenet_v4",["MobileNetV4ForImageClassification",Pi]]]),tl=new Map([["detr",["DetrForObjectDetection",No]],["rt_detr",["RTDetrForObjectDetection",Go]],["table-transformer",["TableTransformerForObjectDetection",Uo]],["yolos",["YolosForObjectDetection",Dn]]]),sl=new Map([["owlvit",["OwlViTForObjectDetection",vo]],["owlv2",["Owlv2ForObjectDetection",Ao]]]),rl=new Map([["detr",["DetrForSegmentation",Vo]],["clipseg",["CLIPSegForImageSegmentation",Ds]]]),ol=new Map([["segformer",["SegformerForSemanticSegmentation",ti]],["sapiens",["SapiensForSemanticSegmentation",fn]]]),nl=new Map([["detr",["DetrForSegmentation",Vo]],["maskformer",["MaskFormerForInstanceSegmentation",Pn]]]),al=new Map([["sam",["SamModel",qn]]]),il=new Map([["wav2vec2",["Wav2Vec2ForCTC",Zn]],["wav2vec2-bert",["Wav2Vec2BertForCTC",Ma]],["unispeech",["UniSpeechForCTC",ca]],["unispeech-sat",["UniSpeechSatForCTC",ma]],["wavlm",["WavLMForCTC",Fa]],["hubert",["HubertForCTC",xa]]]),ll=new Map([["wav2vec2",["Wav2Vec2ForSequenceClassification",ea]],["wav2vec2-bert",["Wav2Vec2BertForSequenceClassification",wa]],["unispeech",["UniSpeechForSequenceClassification",da]],["unispeech-sat",["UniSpeechSatForSequenceClassification",_a]],["wavlm",["WavLMForSequenceClassification",va]],["hubert",["HubertForSequenceClassification",Pa]],["audio-spectrogram-transformer",["ASTForAudioClassification",cs]]]),cl=new Map([["wavlm",["WavLMForXVector",Ca]]]),dl=new Map([["unispeech-sat",["UniSpeechSatForAudioFrameClassification",ha]],["wavlm",["WavLMForAudioFrameClassification",Sa]],["wav2vec2",["Wav2Vec2ForAudioFrameClassification",ta]],["pyannote",["PyAnnoteForAudioFrameClassification",oa]]]),ul=new Map([["vitmatte",["VitMatteForImageMatting",Mo]]]),pl=new Map([["patchtst",["PatchTSTForPrediction",Ii]],["patchtsmixer",["PatchTSMixerForPrediction",Ni]]]),ml=new Map([["swin2sr",["Swin2SRForImageSuperResolution",cn]]]),_l=new Map([["dpt",["DPTForDepthEstimation",pn]],["depth_anything",["DepthAnythingForDepthEstimation",_n]],["glpn",["GLPNForDepthEstimation",Fn]],["sapiens",["SapiensForDepthEstimation",gn]],["depth_pro",["DepthProForDepthEstimation",Tn]]]),hl=new Map([["sapiens",["SapiensForNormalEstimation",Mn]]]),fl=new Map([["vitpose",["VitPoseForPoseEstimation",so]]]),gl=new Map([["clip",["CLIPVisionModelWithProjection",vs]],["siglip",["SiglipVisionModel",Es]],["jina_clip",["JinaCLIPVisionModel",Vs]]]),Ml=[[Bi,T],[Oi,b],[Di,k],[Wi,T],[$i,T],[Ui,x],[Ri,x],[Qi,k],[Xi,C],[Hi,T],[Ji,T],[Yi,P],[Ki,F],[el,T],[rl,T],[nl,T],[ol,T],[ul,T],[pl,T],[ml,T],[_l,T],[hl,T],[fl,T],[tl,T],[sl,T],[al,y],[il,T],[ll,T],[Gi,x],[qi,T],[cl,T],[dl,T],[gl,T]];for(const[e,t]of Ml)for(const[s,r]of e.values())S.set(s,t),E.set(r,s),A.set(s,r);const wl=[["MusicgenForConditionalGeneration",ui,v],["CLIPTextModelWithProjection",ys,T],["SiglipTextModel",As,T],["JinaCLIPTextModel",Ns,T],["ClapTextModelWithProjection",Xa,T],["ClapAudioModelWithProjection",Ha,T]];for(const[e,t,s]of wl)S.set(e,s),E.set(t,e),A.set(e,t);class Tl extends Vi{static MODEL_CLASS_MAPPINGS=Ml.map((e=>e[0]));static BASE_IF_FAIL=!0}class bl extends Vi{static MODEL_CLASS_MAPPINGS=[Wi]}class xl extends Vi{static MODEL_CLASS_MAPPINGS=[$i]}class Pl extends Vi{static MODEL_CLASS_MAPPINGS=[Ui]}class kl extends Vi{static MODEL_CLASS_MAPPINGS=[Ri]}class yl extends Vi{static MODEL_CLASS_MAPPINGS=[Gi]}class Fl extends Vi{static MODEL_CLASS_MAPPINGS=[qi]}class vl extends Vi{static MODEL_CLASS_MAPPINGS=[Qi]}class Cl extends Vi{static MODEL_CLASS_MAPPINGS=[Hi]}class Sl extends Vi{static MODEL_CLASS_MAPPINGS=[Ji]}class Al extends Vi{static MODEL_CLASS_MAPPINGS=[Yi]}class El extends Vi{static MODEL_CLASS_MAPPINGS=[el]}class Ll extends Vi{static MODEL_CLASS_MAPPINGS=[rl]}class Il extends Vi{static MODEL_CLASS_MAPPINGS=[ol]}class zl extends Vi{static MODEL_CLASS_MAPPINGS=[nl]}class jl extends Vi{static MODEL_CLASS_MAPPINGS=[tl]}class Nl extends Vi{static MODEL_CLASS_MAPPINGS=[sl]}class Vl extends Vi{static MODEL_CLASS_MAPPINGS=[al]}class Bl extends Vi{static MODEL_CLASS_MAPPINGS=[il]}class Ol extends Vi{static MODEL_CLASS_MAPPINGS=[ll]}class Dl extends Vi{static MODEL_CLASS_MAPPINGS=[cl]}class Rl extends Vi{static MODEL_CLASS_MAPPINGS=[dl]}class Gl extends Vi{static MODEL_CLASS_MAPPINGS=[Zi]}class ql extends Vi{static MODEL_CLASS_MAPPINGS=[ul]}class Wl extends Vi{static MODEL_CLASS_MAPPINGS=[ml]}class $l extends Vi{static MODEL_CLASS_MAPPINGS=[_l]}class Ul extends Vi{static MODEL_CLASS_MAPPINGS=[hl]}class Ql extends Vi{static MODEL_CLASS_MAPPINGS=[fl]}class Xl extends Vi{static MODEL_CLASS_MAPPINGS=[gl]}class Hl extends H{constructor({logits:e,past_key_values:t,encoder_outputs:s,decoder_attentions:r=null,cross_attentions:o=null}){super(),this.logits=e,this.past_key_values=t,this.encoder_outputs=s,this.decoder_attentions=r,this.cross_attentions=o}}class Jl extends H{constructor({logits:e}){super(),this.logits=e}}class Yl extends H{constructor({logits:e,embeddings:t}){super(),this.logits=e,this.embeddings=t}}class Kl extends H{constructor({logits:e}){super(),this.logits=e}}class Zl extends H{constructor({logits:e}){super(),this.logits=e}}class ec extends H{constructor({start_logits:e,end_logits:t}){super(),this.start_logits=e,this.end_logits=t}}class tc extends H{constructor({logits:e}){super(),this.logits=e}}class sc extends H{constructor({logits:e,past_key_values:t}){super(),this.logits=e,this.past_key_values=t}}class rc extends H{constructor({alphas:e}){super(),this.alphas=e}}class oc extends H{constructor({waveform:e,spectrogram:t}){super(),this.waveform=e,this.spectrogram=t}}},"./src/models/audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.js":
122
- /*!******************************************************************************************************!*\
123
- !*** ./src/models/audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.js ***!
124
- \******************************************************************************************************/(e,t,s)=>{s.r(t),s.d(t,{ASTFeatureExtractor:()=>n});var r=s(/*! ../../base/feature_extraction_utils.js */"./src/base/feature_extraction_utils.js"),o=(s(/*! ../../utils/tensor.js */"./src/utils/tensor.js"),s(/*! ../../utils/audio.js */"./src/utils/audio.js"));class n extends r.FeatureExtractor{constructor(e){super(e);const t=this.config.sampling_rate,s=(0,o.mel_filter_bank)(256,this.config.num_mel_bins,20,Math.floor(t/2),t,null,"kaldi",!0);for(let e=0;e<s.length;++e)s[e].push(0);this.mel_filters=s,this.window=(0,o.window_function)(400,"hann",{periodic:!1}),this.mean=this.config.mean,this.std=this.config.std}async _extract_fbank_features(e,t){return(0,o.spectrogram)(e,this.window,400,160,{fft_length:512,power:2,center:!1,preemphasis:.97,mel_filters:this.mel_filters,log_mel:"log",mel_floor:1.192092955078125e-7,remove_dc_offset:!0,max_num_frames:t,transpose:!0})}async _call(e){(0,r.validate_audio_inputs)(e,"ASTFeatureExtractor");const t=await this._extract_fbank_features(e,this.config.max_length);if(this.config.do_normalize){const e=2*this.std,s=t.data;for(let t=0;t<s.length;++t)s[t]=(s[t]-this.mean)/e}return{input_values:t.unsqueeze_(0)}}}},"./src/models/auto/feature_extraction_auto.js":
125
- /*!****************************************************!*\
126
- !*** ./src/models/auto/feature_extraction_auto.js ***!
127
- \****************************************************/(e,t,s)=>{s.r(t),s.d(t,{AutoFeatureExtractor:()=>a});var r=s(/*! ../../utils/constants.js */"./src/utils/constants.js"),o=s(/*! ../../utils/hub.js */"./src/utils/hub.js"),n=(s(/*! ../../base/feature_extraction_utils.js */"./src/base/feature_extraction_utils.js"),s(/*! ../feature_extractors.js */"./src/models/feature_extractors.js"));class a{static async from_pretrained(e,t={}){const s=await(0,o.getModelJSON)(e,r.FEATURE_EXTRACTOR_NAME,!0,t),a=s.feature_extractor_type,i=n[a];if(!i)throw new Error(`Unknown feature_extractor_type: '${a}'. Please report this at ${r.GITHUB_ISSUE_URL}.`);return new i(s)}}},"./src/models/auto/image_processing_auto.js":
128
- /*!**************************************************!*\
129
- !*** ./src/models/auto/image_processing_auto.js ***!
130
- \**************************************************/(e,t,s)=>{s.r(t),s.d(t,{AutoImageProcessor:()=>i});var r=s(/*! ../../utils/constants.js */"./src/utils/constants.js"),o=s(/*! ../../utils/hub.js */"./src/utils/hub.js"),n=s(/*! ../../base/image_processors_utils.js */"./src/base/image_processors_utils.js"),a=s(/*! ../image_processors.js */"./src/models/image_processors.js");class i{static async from_pretrained(e,t={}){const s=await(0,o.getModelJSON)(e,r.IMAGE_PROCESSOR_NAME,!0,t),i=s.image_processor_type??s.feature_extractor_type;let l=a[i];return l||(void 0!==i&&console.warn(`Image processor type '${i}' not found, assuming base ImageProcessor. Please report this at ${r.GITHUB_ISSUE_URL}.`),l=n.ImageProcessor),new l(s)}}},"./src/models/auto/processing_auto.js":
131
- /*!********************************************!*\
132
- !*** ./src/models/auto/processing_auto.js ***!
133
- \********************************************/(e,t,s)=>{s.r(t),s.d(t,{AutoProcessor:()=>c});var r=s(/*! ../../utils/constants.js */"./src/utils/constants.js"),o=s(/*! ../../utils/hub.js */"./src/utils/hub.js"),n=s(/*! ../../base/processing_utils.js */"./src/base/processing_utils.js"),a=s(/*! ../processors.js */"./src/models/processors.js"),i=s(/*! ../image_processors.js */"./src/models/image_processors.js"),l=s(/*! ../feature_extractors.js */"./src/models/feature_extractors.js");class c{static async from_pretrained(e,t={}){const s=await(0,o.getModelJSON)(e,r.IMAGE_PROCESSOR_NAME,!0,t),{image_processor_type:c,feature_extractor_type:d,processor_class:u}=s;if(u&&a[u])return a[u].from_pretrained(e,t);if(!c&&!d)throw new Error("No `image_processor_type` or `feature_extractor_type` found in the config.");const p={};if(c){const e=i[c];if(!e)throw new Error(`Unknown image_processor_type: '${c}'.`);p.image_processor=new e(s)}if(d){const e=i[d];if(e)p.image_processor=new e(s);else{const e=l[d];if(!e)throw new Error(`Unknown feature_extractor_type: '${d}'.`);p.feature_extractor=new e(s)}}return new n.Processor({},p)}}},"./src/models/beit/image_processing_beit.js":
134
- /*!**************************************************!*\
135
- !*** ./src/models/beit/image_processing_beit.js ***!
136
- \**************************************************/(e,t,s)=>{s.r(t),s.d(t,{BeitFeatureExtractor:()=>o});var r=s(/*! ../../base/image_processors_utils.js */"./src/base/image_processors_utils.js");class o extends r.ImageProcessor{}},"./src/models/bit/image_processing_bit.js":
137
- /*!************************************************!*\
138
- !*** ./src/models/bit/image_processing_bit.js ***!
139
- \************************************************/(e,t,s)=>{s.r(t),s.d(t,{BitImageProcessor:()=>o});var r=s(/*! ../../base/image_processors_utils.js */"./src/base/image_processors_utils.js");class o extends r.ImageProcessor{}},"./src/models/chinese_clip/image_processing_chinese_clip.js":
140
- /*!******************************************************************!*\
141
- !*** ./src/models/chinese_clip/image_processing_chinese_clip.js ***!
142
- \******************************************************************/(e,t,s)=>{s.r(t),s.d(t,{ChineseCLIPFeatureExtractor:()=>o});var r=s(/*! ../../base/image_processors_utils.js */"./src/base/image_processors_utils.js");class o extends r.ImageProcessor{}},"./src/models/clap/feature_extraction_clap.js":
143
- /*!****************************************************!*\
144
- !*** ./src/models/clap/feature_extraction_clap.js ***!
145
- \****************************************************/(e,t,s)=>{s.r(t),s.d(t,{ClapFeatureExtractor:()=>n});var r=s(/*! ../../base/feature_extraction_utils.js */"./src/base/feature_extraction_utils.js"),o=(s(/*! ../../utils/tensor.js */"./src/utils/tensor.js"),s(/*! ../../utils/audio.js */"./src/utils/audio.js"));class n extends r.FeatureExtractor{constructor(e){super(e),this.mel_filters=(0,o.mel_filter_bank)(this.config.nb_frequency_bins,this.config.feature_size,this.config.frequency_min,this.config.frequency_max,this.config.sampling_rate,null,"htk"),this.mel_filters_slaney=(0,o.mel_filter_bank)(this.config.nb_frequency_bins,this.config.feature_size,this.config.frequency_min,this.config.frequency_max,this.config.sampling_rate,"slaney","slaney"),this.window=(0,o.window_function)(this.config.fft_window_size,"hann")}async _get_input_mel(e,t,s,r){let o,n=!1;const a=e.length-t;if(a>0){if("rand_trunc"!==s)throw new Error(`Truncation strategy "${s}" not implemented`);{n=!0;const s=Math.floor(Math.random()*(a+1));e=e.subarray(s,s+t),o=await this._extract_fbank_features(e,this.mel_filters_slaney,this.config.nb_max_samples)}}else{if(a<0){let s=new Float64Array(t);if(s.set(e),"repeat"===r)for(let r=e.length;r<t;r+=e.length)s.set(e.subarray(0,Math.min(e.length,t-r)),r);else if("repeatpad"===r)for(let t=e.length;t<-a;t+=e.length)s.set(e,t);e=s}if("fusion"===s)throw new Error(`Truncation strategy "${s}" not implemented`);o=await this._extract_fbank_features(e,this.mel_filters_slaney,this.config.nb_max_samples)}return o.unsqueeze_(0)}async _extract_fbank_features(e,t,s=null){return(0,o.spectrogram)(e,this.window,this.config.fft_window_size,this.config.hop_length,{power:2,mel_filters:t,log_mel:"dB",max_num_frames:s,do_pad:!1,transpose:!0})}async _call(e,{max_length:t=null}={}){(0,r.validate_audio_inputs)(e,"ClapFeatureExtractor");return{input_features:(await this._get_input_mel(e,t??this.config.nb_max_samples,this.config.truncation,this.config.padding)).unsqueeze_(0)}}}},"./src/models/clip/image_processing_clip.js":
146
- /*!**************************************************!*\
147
- !*** ./src/models/clip/image_processing_clip.js ***!
148
- \**************************************************/(e,t,s)=>{s.r(t),s.d(t,{CLIPFeatureExtractor:()=>n,CLIPImageProcessor:()=>o});var r=s(/*! ../../base/image_processors_utils.js */"./src/base/image_processors_utils.js");class o extends r.ImageProcessor{}class n extends o{}},"./src/models/convnext/image_processing_convnext.js":
149
- /*!**********************************************************!*\
150
- !*** ./src/models/convnext/image_processing_convnext.js ***!
151
- \**********************************************************/(e,t,s)=>{s.r(t),s.d(t,{ConvNextFeatureExtractor:()=>n,ConvNextImageProcessor:()=>o});var r=s(/*! ../../base/image_processors_utils.js */"./src/base/image_processors_utils.js");class o extends r.ImageProcessor{constructor(e){super(e),this.crop_pct=this.config.crop_pct??.875}async resize(e){const t=this.size?.shortest_edge;if(void 0===t)throw new Error("Size dictionary must contain 'shortest_edge' key.");if(t<384){const s=Math.floor(t/this.crop_pct),[r,o]=this.get_resize_output_image_size(e,{shortest_edge:s});e=await e.resize(r,o,{resample:this.resample}),e=await e.center_crop(t,t)}else e=await e.resize(t,t,{resample:this.resample});return e}}class n extends o{}},"./src/models/deit/image_processing_deit.js":
152
- /*!**************************************************!*\
153
- !*** ./src/models/deit/image_processing_deit.js ***!
154
- \**************************************************/(e,t,s)=>{s.r(t),s.d(t,{DeiTFeatureExtractor:()=>n,DeiTImageProcessor:()=>o});var r=s(/*! ../../base/image_processors_utils.js */"./src/base/image_processors_utils.js");class o extends r.ImageProcessor{}class n extends o{}},"./src/models/detr/image_processing_detr.js":
155
- /*!**************************************************!*\
156
- !*** ./src/models/detr/image_processing_detr.js ***!
157
- \**************************************************/(e,t,s)=>{s.r(t),s.d(t,{DetrFeatureExtractor:()=>a,DetrImageProcessor:()=>n});var r=s(/*! ../../base/image_processors_utils.js */"./src/base/image_processors_utils.js"),o=s(/*! ../../utils/tensor.js */"./src/utils/tensor.js");class n extends r.ImageProcessor{async _call(e){const t=await super._call(e),s=[t.pixel_values.dims[0],64,64],r=(0,o.full)(s,1n);return{...t,pixel_mask:r}}post_process_object_detection(...e){return(0,r.post_process_object_detection)(...e)}post_process_panoptic_segmentation(...e){return(0,r.post_process_panoptic_segmentation)(...e)}post_process_instance_segmentation(...e){return(0,r.post_process_instance_segmentation)(...e)}}class a extends n{}},"./src/models/donut/image_processing_donut.js":
158
- /*!****************************************************!*\
159
- !*** ./src/models/donut/image_processing_donut.js ***!
160
- \****************************************************/(e,t,s)=>{s.r(t),s.d(t,{DonutFeatureExtractor:()=>n,DonutImageProcessor:()=>o});var r=s(/*! ../../base/image_processors_utils.js */"./src/base/image_processors_utils.js");class o extends r.ImageProcessor{pad_image(e,t,s,r={}){const[o,n,a]=t;let i=this.image_mean;Array.isArray(this.image_mean)||(i=new Array(a).fill(i));let l=this.image_std;Array.isArray(l)||(l=new Array(a).fill(i));const c=i.map(((e,t)=>-e/l[t]));return super.pad_image(e,t,s,{center:!0,constant_values:c,...r})}}class n extends o{}},"./src/models/dpt/image_processing_dpt.js":
161
- /*!************************************************!*\
162
- !*** ./src/models/dpt/image_processing_dpt.js ***!
163
- \************************************************/(e,t,s)=>{s.r(t),s.d(t,{DPTFeatureExtractor:()=>n,DPTImageProcessor:()=>o});var r=s(/*! ../../base/image_processors_utils.js */"./src/base/image_processors_utils.js");class o extends r.ImageProcessor{}class n extends o{}},"./src/models/efficientnet/image_processing_efficientnet.js":
164
- /*!******************************************************************!*\
165
- !*** ./src/models/efficientnet/image_processing_efficientnet.js ***!
166
- \******************************************************************/(e,t,s)=>{s.r(t),s.d(t,{EfficientNetImageProcessor:()=>o});var r=s(/*! ../../base/image_processors_utils.js */"./src/base/image_processors_utils.js");class o extends r.ImageProcessor{constructor(e){super(e),this.include_top=this.config.include_top??!0,this.include_top&&(this.image_std=this.image_std.map((e=>e*e)))}}},"./src/models/feature_extractors.js":
167
- /*!******************************************!*\
168
- !*** ./src/models/feature_extractors.js ***!
169
- \******************************************/(e,t,s)=>{s.r(t),s.d(t,{ASTFeatureExtractor:()=>r.ASTFeatureExtractor,ClapFeatureExtractor:()=>o.ClapFeatureExtractor,ImageFeatureExtractor:()=>u.ImageProcessor,PyAnnoteFeatureExtractor:()=>n.PyAnnoteFeatureExtractor,SeamlessM4TFeatureExtractor:()=>a.SeamlessM4TFeatureExtractor,SpeechT5FeatureExtractor:()=>i.SpeechT5FeatureExtractor,Wav2Vec2FeatureExtractor:()=>l.Wav2Vec2FeatureExtractor,WeSpeakerFeatureExtractor:()=>c.WeSpeakerFeatureExtractor,WhisperFeatureExtractor:()=>d.WhisperFeatureExtractor});var r=s(/*! ./audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.js */"./src/models/audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.js"),o=s(/*! ./clap/feature_extraction_clap.js */"./src/models/clap/feature_extraction_clap.js"),n=s(/*! ./pyannote/feature_extraction_pyannote.js */"./src/models/pyannote/feature_extraction_pyannote.js"),a=s(/*! ./seamless_m4t/feature_extraction_seamless_m4t.js */"./src/models/seamless_m4t/feature_extraction_seamless_m4t.js"),i=s(/*! ./speecht5/feature_extraction_speecht5.js */"./src/models/speecht5/feature_extraction_speecht5.js"),l=s(/*! ./wav2vec2/feature_extraction_wav2vec2.js */"./src/models/wav2vec2/feature_extraction_wav2vec2.js"),c=s(/*! ./wespeaker/feature_extraction_wespeaker.js */"./src/models/wespeaker/feature_extraction_wespeaker.js"),d=s(/*! ./whisper/feature_extraction_whisper.js */"./src/models/whisper/feature_extraction_whisper.js"),u=s(/*! ../base/image_processors_utils.js */"./src/base/image_processors_utils.js")},"./src/models/florence2/processing_florence2.js":
170
- /*!******************************************************!*\
171
- !*** ./src/models/florence2/processing_florence2.js ***!
172
- \******************************************************/(e,t,s)=>{s.r(t),s.d(t,{Florence2Processor:()=>a});var r=s(/*! ../../base/processing_utils.js */"./src/base/processing_utils.js"),o=s(/*! ../auto/image_processing_auto.js */"./src/models/auto/image_processing_auto.js"),n=s(/*! ../../tokenizers.js */"./src/tokenizers.js");class a extends r.Processor{static tokenizer_class=n.AutoTokenizer;static image_processor_class=o.AutoImageProcessor;constructor(e,t){super(e,t);const{tasks_answer_post_processing_type:s,task_prompts_without_inputs:r,task_prompts_with_input:o}=this.image_processor.config;this.tasks_answer_post_processing_type=new Map(Object.entries(s??{})),this.task_prompts_without_inputs=new Map(Object.entries(r??{})),this.task_prompts_with_input=new Map(Object.entries(o??{})),this.regexes={quad_boxes:/(.+?)<loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)>/gm,bboxes:/([^<]+)?<loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)>/gm},this.size_per_bin=1e3}construct_prompts(e){"string"==typeof e&&(e=[e]);const t=[];for(const s of e)if(this.task_prompts_without_inputs.has(s))t.push(this.task_prompts_without_inputs.get(s));else{for(const[e,r]of this.task_prompts_with_input)if(s.includes(e)){t.push(r.replaceAll("{input}",s).replaceAll(e,""));break}t.length!==e.length&&t.push(s)}return t}post_process_generation(e,t,s){const r=this.tasks_answer_post_processing_type.get(t)??"pure_text";let o;switch(e=e.replaceAll("<s>","").replaceAll("</s>",""),r){case"pure_text":o=e;break;case"description_with_bboxes":case"bboxes":case"phrase_grounding":case"ocr":const n="ocr"===r?"quad_boxes":"bboxes",a=e.matchAll(this.regexes[n]),i=[],l=[];for(const[e,t,...r]of a)i.push(t?t.trim():i.at(-1)??""),l.push(r.map(((e,t)=>(Number(e)+.5)/this.size_per_bin*s[t%2])));o={labels:i,[n]:l};break;default:throw new Error(`Task "${t}" (of type "${r}") not yet implemented.`)}return{[t]:o}}async _call(e,t=null,s={}){if(!e&&!t)throw new Error("Either text or images must be provided");return{...await this.image_processor(e,s),...t?this.tokenizer(t,s):{}}}}},"./src/models/glpn/image_processing_glpn.js":
173
- /*!**************************************************!*\
174
- !*** ./src/models/glpn/image_processing_glpn.js ***!
175
- \**************************************************/(e,t,s)=>{s.r(t),s.d(t,{GLPNFeatureExtractor:()=>o});var r=s(/*! ../../base/image_processors_utils.js */"./src/base/image_processors_utils.js");class o extends r.ImageProcessor{}},"./src/models/idefics3/image_processing_idefics3.js":
176
- /*!**********************************************************!*\
177
- !*** ./src/models/idefics3/image_processing_idefics3.js ***!
178
- \**********************************************************/(e,t,s)=>{s.r(t),s.d(t,{Idefics3ImageProcessor:()=>n});var r=s(/*! ../../base/image_processors_utils.js */"./src/base/image_processors_utils.js"),o=s(/*! ../../utils/tensor.js */"./src/utils/tensor.js");class n extends r.ImageProcessor{constructor(e){super(e),this.do_image_splitting=e.do_image_splitting??!0,this.max_image_size=e.max_image_size}get_resize_for_vision_encoder(e,t){let[s,r]=e.dims.slice(-2);const o=r/s;return r>=s?(r=Math.ceil(r/t)*t,s=Math.floor(r/o),s=Math.ceil(s/t)*t):(s=Math.ceil(s/t)*t,r=Math.floor(s*o),r=Math.ceil(r/t)*t),{height:s,width:r}}async _call(e,{do_image_splitting:t=null,return_row_col_info:s=!1}={}){let r;if(Array.isArray(e)){if(0===e.length||!e[0])throw new Error("No images provided.");r=Array.isArray(e[0])?e:[e]}else r=[[e]];let n=[],a=[],i=[];const l=[],c=[];for(const e of r){let s=await Promise.all(e.map((e=>this.preprocess(e))));l.push(...s.map((e=>e.original_size))),c.push(...s.map((e=>e.reshaped_input_size))),s.forEach((e=>e.pixel_values.unsqueeze_(0)));const{longest_edge:r}=this.max_image_size;let d;if(t??this.do_image_splitting){let e=new Array(s.length),t=new Array(s.length);d=await Promise.all(s.map((async(s,n)=>{const a=this.get_resize_for_vision_encoder(s.pixel_values,r),i=await(0,o.interpolate_4d)(s.pixel_values,{size:[a.height,a.width]}),{frames:l,num_splits_h:c,num_splits_w:d}=await this.split_image(i,this.max_image_size);return e[n]=c,t[n]=d,(0,o.cat)(l,0)}))),a.push(e),i.push(t)}else{const e=[r,r];d=await Promise.all(s.map((t=>(0,o.interpolate_4d)(t.pixel_values,{size:e})))),a.push(new Array(s.length).fill(0)),i.push(new Array(s.length).fill(0))}n.push((0,o.cat)(d,0))}const d=n.length,[u,p,m,_]=n[0].dims;let h,f;if(1===d)h=n[0].unsqueeze_(0),f=(0,o.full)([d,u,m,_],!0);else{const e=Math.max(...n.map((e=>e.dims.at(0))));f=(0,o.full)([d,e,m,_],!0);const t=f.data,s=e*m*_;for(let r=0;r<d;++r){const a=n[r].dims[0];if(a<e){n[r]=(0,o.cat)([n[r],(0,o.full)([e-a,p,m,_],0)],0);const i=r*s+a*m*_,l=(r+1)*s;t.fill(!1,i,l)}}h=(0,o.stack)(n,0)}return{pixel_values:h,pixel_attention_mask:f,original_sizes:l,reshaped_input_sizes:c,...s?{rows:a,cols:i}:{}}}async split_image(e,{longest_edge:t}){const s=t,r=t,n=[],[a,i]=e.dims.slice(-2);let l=0,c=0;if(a>s||i>r){l=Math.ceil(a/s),c=Math.ceil(i/r);const t=Math.ceil(a/l),d=Math.ceil(i/c);for(let s=0;s<l;s++)for(let r=0;r<c;r++){const o=r*d,l=s*t,c=Math.min(o+d,i),u=Math.min(l+t,a);n.push(e.slice(null,null,[l,u],[o,c]))}const u=s,p=r;a===u&&i===p||(e=await(0,o.interpolate_4d)(e,{size:[u,p]}))}return n.push(e),{frames:n,num_splits_h:l,num_splits_w:c}}}},"./src/models/idefics3/processing_idefics3.js":
179
- /*!****************************************************!*\
180
- !*** ./src/models/idefics3/processing_idefics3.js ***!
181
- \****************************************************/(e,t,s)=>{s.r(t),s.d(t,{Idefics3Processor:()=>l});var r=s(/*! ../../base/processing_utils.js */"./src/base/processing_utils.js"),o=s(/*! ../auto/image_processing_auto.js */"./src/models/auto/image_processing_auto.js"),n=s(/*! ../../tokenizers.js */"./src/tokenizers.js"),a=(s(/*! ../../utils/image.js */"./src/utils/image.js"),s(/*! ../../utils/core.js */"./src/utils/core.js"));function i(e,t,s,r,o,n){return 0===e&&0===t?function(e,t,s,r){return`${t}${r}`+s.repeat(e)+`${t}`}(s,r,o,n):function(e,t,s,r,o,n){let a="";for(let n=0;n<t;++n){for(let t=0;t<s;++t)a+=r+`<row_${n+1}_col_${t+1}>`+o.repeat(e);a+="\n"}return a+=`\n${r}${n}`+o.repeat(e)+`${r}`,a}(s,e,t,r,o,n)}class l extends r.Processor{static image_processor_class=o.AutoImageProcessor;static tokenizer_class=n.AutoTokenizer;static uses_processor_config=!0;fake_image_token="<fake_token_around_image>";image_token="<image>";global_img_token="<global-img>";async _call(e,t=null,s={}){let r;s.return_row_col_info??=!0,t&&(r=await this.image_processor(t,s)),Array.isArray(e)||(e=[e]);const o=r.rows??[new Array(e.length).fill(0)],n=r.cols??[new Array(e.length).fill(0)],l=this.config.image_seq_len,c=[],d=[];for(let t=0;t<e.length;++t){const s=e[t],r=o[t],u=n[t];c.push((0,a.count)(s,this.image_token));const p=r.map(((e,t)=>i(e,u[t],l,this.fake_image_token,this.image_token,this.global_img_token))),m=s.split(this.image_token);if(0===m.length)throw new Error("The image token should be present in the text.");let _=m[0];for(let e=0;e<p.length;++e)_+=p[e]+m[e+1];d.push(_)}return{...this.tokenizer(d),...r}}}},"./src/models/image_processors.js":
182
- /*!****************************************!*\
183
- !*** ./src/models/image_processors.js ***!
184
- \****************************************/(e,t,s)=>{s.r(t),s.d(t,{BeitFeatureExtractor:()=>r.BeitFeatureExtractor,BitImageProcessor:()=>o.BitImageProcessor,CLIPFeatureExtractor:()=>a.CLIPFeatureExtractor,CLIPImageProcessor:()=>a.CLIPImageProcessor,ChineseCLIPFeatureExtractor:()=>n.ChineseCLIPFeatureExtractor,ConvNextFeatureExtractor:()=>i.ConvNextFeatureExtractor,ConvNextImageProcessor:()=>i.ConvNextImageProcessor,DPTFeatureExtractor:()=>u.DPTFeatureExtractor,DPTImageProcessor:()=>u.DPTImageProcessor,DeiTFeatureExtractor:()=>l.DeiTFeatureExtractor,DeiTImageProcessor:()=>l.DeiTImageProcessor,DetrFeatureExtractor:()=>c.DetrFeatureExtractor,DetrImageProcessor:()=>c.DetrImageProcessor,DonutFeatureExtractor:()=>d.DonutFeatureExtractor,DonutImageProcessor:()=>d.DonutImageProcessor,EfficientNetImageProcessor:()=>p.EfficientNetImageProcessor,GLPNFeatureExtractor:()=>m.GLPNFeatureExtractor,Idefics3ImageProcessor:()=>_.Idefics3ImageProcessor,JinaCLIPImageProcessor:()=>f.JinaCLIPImageProcessor,LlavaOnevisionImageProcessor:()=>g.LlavaOnevisionImageProcessor,Mask2FormerImageProcessor:()=>M.Mask2FormerImageProcessor,MaskFormerFeatureExtractor:()=>w.MaskFormerFeatureExtractor,MaskFormerImageProcessor:()=>w.MaskFormerImageProcessor,MobileNetV1FeatureExtractor:()=>T.MobileNetV1FeatureExtractor,MobileNetV1ImageProcessor:()=>T.MobileNetV1ImageProcessor,MobileNetV2FeatureExtractor:()=>b.MobileNetV2FeatureExtractor,MobileNetV2ImageProcessor:()=>b.MobileNetV2ImageProcessor,MobileNetV3FeatureExtractor:()=>x.MobileNetV3FeatureExtractor,MobileNetV3ImageProcessor:()=>x.MobileNetV3ImageProcessor,MobileNetV4FeatureExtractor:()=>P.MobileNetV4FeatureExtractor,MobileNetV4ImageProcessor:()=>P.MobileNetV4ImageProcessor,MobileViTFeatureExtractor:()=>k.MobileViTFeatureExtractor,MobileViTImageProcessor:()=>k.MobileViTImageProcessor,NougatImageProcessor:()=>y.NougatImageProcessor,OwlViTFeatureExtractor:()=>v.OwlViTFeatureExtractor,OwlViTImageProcessor:()=>v.OwlViTImageProcessor,Owlv2ImageProcessor:()=>F.Owlv2ImageProcessor,PvtImageProcessor:()=>C.PvtImageProcessor,Qwen2VLImageProcessor:()=>S.Qwen2VLImageProcessor,RTDetrImageProcessor:()=>A.RTDetrImageProcessor,SamImageProcessor:()=>E.SamImageProcessor,SegformerFeatureExtractor:()=>L.SegformerFeatureExtractor,SegformerImageProcessor:()=>L.SegformerImageProcessor,SiglipImageProcessor:()=>I.SiglipImageProcessor,Swin2SRImageProcessor:()=>z.Swin2SRImageProcessor,VLMImageProcessor:()=>h.VLMImageProcessor,ViTFeatureExtractor:()=>j.ViTFeatureExtractor,ViTImageProcessor:()=>j.ViTImageProcessor,VitMatteImageProcessor:()=>N.VitMatteImageProcessor,VitPoseImageProcessor:()=>V.VitPoseImageProcessor,YolosFeatureExtractor:()=>B.YolosFeatureExtractor,YolosImageProcessor:()=>B.YolosImageProcessor});var r=s(/*! ./beit/image_processing_beit.js */"./src/models/beit/image_processing_beit.js"),o=s(/*! ./bit/image_processing_bit.js */"./src/models/bit/image_processing_bit.js"),n=s(/*! ./chinese_clip/image_processing_chinese_clip.js */"./src/models/chinese_clip/image_processing_chinese_clip.js"),a=s(/*! ./clip/image_processing_clip.js */"./src/models/clip/image_processing_clip.js"),i=s(/*! ./convnext/image_processing_convnext.js */"./src/models/convnext/image_processing_convnext.js"),l=s(/*! ./deit/image_processing_deit.js */"./src/models/deit/image_processing_deit.js"),c=s(/*! ./detr/image_processing_detr.js */"./src/models/detr/image_processing_detr.js"),d=s(/*! ./donut/image_processing_donut.js */"./src/models/donut/image_processing_donut.js"),u=s(/*! ./dpt/image_processing_dpt.js */"./src/models/dpt/image_processing_dpt.js"),p=s(/*! ./efficientnet/image_processing_efficientnet.js */"./src/models/efficientnet/image_processing_efficientnet.js"),m=s(/*! ./glpn/image_processing_glpn.js */"./src/models/glpn/image_processing_glpn.js"),_=s(/*! ./idefics3/image_processing_idefics3.js */"./src/models/idefics3/image_processing_idefics3.js"),h=s(/*! ./janus/image_processing_janus.js */"./src/models/janus/image_processing_janus.js"),f=s(/*! ./jina_clip/image_processing_jina_clip.js */"./src/models/jina_clip/image_processing_jina_clip.js"),g=s(/*! ./llava_onevision/image_processing_llava_onevision.js */"./src/models/llava_onevision/image_processing_llava_onevision.js"),M=s(/*! ./mask2former/image_processing_mask2former.js */"./src/models/mask2former/image_processing_mask2former.js"),w=s(/*! ./maskformer/image_processing_maskformer.js */"./src/models/maskformer/image_processing_maskformer.js"),T=s(/*! ./mobilenet_v1/image_processing_mobilenet_v1.js */"./src/models/mobilenet_v1/image_processing_mobilenet_v1.js"),b=s(/*! ./mobilenet_v2/image_processing_mobilenet_v2.js */"./src/models/mobilenet_v2/image_processing_mobilenet_v2.js"),x=s(/*! ./mobilenet_v3/image_processing_mobilenet_v3.js */"./src/models/mobilenet_v3/image_processing_mobilenet_v3.js"),P=s(/*! ./mobilenet_v4/image_processing_mobilenet_v4.js */"./src/models/mobilenet_v4/image_processing_mobilenet_v4.js"),k=s(/*! ./mobilevit/image_processing_mobilevit.js */"./src/models/mobilevit/image_processing_mobilevit.js"),y=s(/*! ./nougat/image_processing_nougat.js */"./src/models/nougat/image_processing_nougat.js"),F=s(/*! ./owlv2/image_processing_owlv2.js */"./src/models/owlv2/image_processing_owlv2.js"),v=s(/*! ./owlvit/image_processing_owlvit.js */"./src/models/owlvit/image_processing_owlvit.js"),C=s(/*! ./pvt/image_processing_pvt.js */"./src/models/pvt/image_processing_pvt.js"),S=s(/*! ./qwen2_vl/image_processing_qwen2_vl.js */"./src/models/qwen2_vl/image_processing_qwen2_vl.js"),A=s(/*! ./rt_detr/image_processing_rt_detr.js */"./src/models/rt_detr/image_processing_rt_detr.js"),E=s(/*! ./sam/image_processing_sam.js */"./src/models/sam/image_processing_sam.js"),L=s(/*! ./segformer/image_processing_segformer.js */"./src/models/segformer/image_processing_segformer.js"),I=s(/*! ./siglip/image_processing_siglip.js */"./src/models/siglip/image_processing_siglip.js"),z=s(/*! ./swin2sr/image_processing_swin2sr.js */"./src/models/swin2sr/image_processing_swin2sr.js"),j=s(/*! ./vit/image_processing_vit.js */"./src/models/vit/image_processing_vit.js"),N=s(/*! ./vitmatte/image_processing_vitmatte.js */"./src/models/vitmatte/image_processing_vitmatte.js"),V=s(/*! ./vitpose/image_processing_vitpose.js */"./src/models/vitpose/image_processing_vitpose.js"),B=s(/*! ./yolos/image_processing_yolos.js */"./src/models/yolos/image_processing_yolos.js")},"./src/models/janus/image_processing_janus.js":
185
- /*!****************************************************!*\
186
- !*** ./src/models/janus/image_processing_janus.js ***!
187
- \****************************************************/(e,t,s)=>{s.r(t),s.d(t,{VLMImageProcessor:()=>o});var r=s(/*! ../../base/image_processors_utils.js */"./src/base/image_processors_utils.js");class o extends r.ImageProcessor{constructor(e){super({do_pad:!0,pad_size:{width:e.image_size,height:e.image_size},...e}),this.constant_values=this.config.background_color.map((e=>e*this.rescale_factor))}pad_image(e,t,s,r){return super.pad_image(e,t,s,{constant_values:this.constant_values,center:!0,...r})}}},"./src/models/janus/processing_janus.js":
188
- /*!**********************************************!*\
189
- !*** ./src/models/janus/processing_janus.js ***!
190
- \**********************************************/(e,t,s)=>{s.r(t),s.d(t,{VLChatProcessor:()=>c});var r=s(/*! ../../base/processing_utils.js */"./src/base/processing_utils.js"),o=s(/*! ../auto/image_processing_auto.js */"./src/models/auto/image_processing_auto.js"),n=s(/*! ../../tokenizers.js */"./src/tokenizers.js"),a=s(/*! ../../utils/core.js */"./src/utils/core.js"),i=s(/*! ../../utils/tensor.js */"./src/utils/tensor.js"),l=s(/*! ../../utils/image.js */"./src/utils/image.js");class c extends r.Processor{static image_processor_class=o.AutoImageProcessor;static tokenizer_class=n.AutoTokenizer;static uses_processor_config=!0;constructor(e,t){super(e,t),this.image_tag=this.config.image_tag,this.image_start_tag=this.config.image_start_tag,this.image_end_tag=this.config.image_end_tag,this.num_image_tokens=this.config.num_image_tokens}async _call(e,{images:t=null,chat_template:s="default"}={}){t?Array.isArray(t)||(t=[t]):t=await Promise.all(e.filter((e=>e.images)).flatMap((e=>e.images)).map((e=>l.RawImage.read(e))));const r=this.tokenizer,o=e=>r.encode(e,{add_special_tokens:!1}),n=r.apply_chat_template(e,{tokenize:!1,add_generation_prompt:!0,chat_template:s}).split(this.image_tag),c=n.length-1;if(t.length!==c)throw new Error(`Number of images provided (${t.length}) does not match number of "${this.image_tag}" image tags (${c})`);const[d,u,p]=r.model.convert_tokens_to_ids([this.image_tag,this.image_start_tag,this.image_end_tag]);let m=o(n[0]),_=new Array(m.length).fill(!1);for(let e=1;e<n.length;++e){const t=new Array(this.num_image_tokens).fill(d),s=o(n[e]);m=(0,a.mergeArrays)(m,[u],t,[p],s);const r=new Array(this.num_image_tokens).fill(!0);_=(0,a.mergeArrays)(_,[!1],r,[!1],new Array(s.length).fill(!1))}const h=[1,m.length],f={input_ids:new i.Tensor("int64",m,h),attention_mask:new i.Tensor("int64",new Array(m.length).fill(1),h),images_seq_mask:new i.Tensor("bool",_,h),images_emb_mask:new i.Tensor("bool",new Array(c*this.num_image_tokens).fill(!0),[1,c,this.num_image_tokens])};if(t&&t.length>0){const e=await this.image_processor(t);return e.pixel_values.unsqueeze_(0),{...f,...e}}return f}}},"./src/models/jina_clip/image_processing_jina_clip.js":
191
- /*!************************************************************!*\
192
- !*** ./src/models/jina_clip/image_processing_jina_clip.js ***!
193
- \************************************************************/(e,t,s)=>{s.r(t),s.d(t,{JinaCLIPImageProcessor:()=>o});var r=s(/*! ../../base/image_processors_utils.js */"./src/base/image_processors_utils.js");class o extends r.ImageProcessor{constructor(e){const{resize_mode:t,fill_color:s,interpolation:r,size:o,...n}=e;super({...n,size:"squash"===t?{width:o,height:o}:"shortest"===t?{shortest_edge:o}:{longest_edge:o},resample:"bicubic"===r?3:2,do_center_crop:!0,crop_size:o,do_normalize:!0})}}},"./src/models/jina_clip/processing_jina_clip.js":
194
- /*!******************************************************!*\
195
- !*** ./src/models/jina_clip/processing_jina_clip.js ***!
196
- \******************************************************/(e,t,s)=>{s.r(t),s.d(t,{JinaCLIPProcessor:()=>a});var r=s(/*! ../../base/processing_utils.js */"./src/base/processing_utils.js"),o=s(/*! ../auto/image_processing_auto.js */"./src/models/auto/image_processing_auto.js"),n=s(/*! ../../tokenizers.js */"./src/tokenizers.js");class a extends r.Processor{static tokenizer_class=n.AutoTokenizer;static image_processor_class=o.AutoImageProcessor;async _call(e=null,t=null,s={}){if(!e&&!t)throw new Error("Either text or images must be provided");return{...e?this.tokenizer(e,s):{},...t?await this.image_processor(t,s):{}}}}},"./src/models/llava_onevision/image_processing_llava_onevision.js":
197
- /*!************************************************************************!*\
198
- !*** ./src/models/llava_onevision/image_processing_llava_onevision.js ***!
199
- \************************************************************************/(e,t,s)=>{s.r(t),s.d(t,{LlavaOnevisionImageProcessor:()=>o});var r=s(/*! ../../base/image_processors_utils.js */"./src/base/image_processors_utils.js");class o extends r.ImageProcessor{}},"./src/models/mask2former/image_processing_mask2former.js":
200
- /*!****************************************************************!*\
201
- !*** ./src/models/mask2former/image_processing_mask2former.js ***!
202
- \****************************************************************/(e,t,s)=>{s.r(t),s.d(t,{Mask2FormerImageProcessor:()=>o});var r=s(/*! ../maskformer/image_processing_maskformer.js */"./src/models/maskformer/image_processing_maskformer.js");class o extends r.MaskFormerImageProcessor{}},"./src/models/maskformer/image_processing_maskformer.js":
203
- /*!**************************************************************!*\
204
- !*** ./src/models/maskformer/image_processing_maskformer.js ***!
205
- \**************************************************************/(e,t,s)=>{s.r(t),s.d(t,{MaskFormerFeatureExtractor:()=>n,MaskFormerImageProcessor:()=>o});var r=s(/*! ../../base/image_processors_utils.js */"./src/base/image_processors_utils.js");class o extends r.ImageProcessor{post_process_panoptic_segmentation(...e){return(0,r.post_process_panoptic_segmentation)(...e)}post_process_instance_segmentation(...e){return(0,r.post_process_instance_segmentation)(...e)}}class n extends o{}},"./src/models/mgp_str/processing_mgp_str.js":
206
- /*!**************************************************!*\
207
- !*** ./src/models/mgp_str/processing_mgp_str.js ***!
208
- \**************************************************/(e,t,s)=>{s.r(t),s.d(t,{MgpstrProcessor:()=>l});var r=s(/*! ../../base/processing_utils.js */"./src/base/processing_utils.js"),o=s(/*! ../auto/image_processing_auto.js */"./src/models/auto/image_processing_auto.js"),n=s(/*! ../../tokenizers.js */"./src/tokenizers.js"),a=s(/*! ../../utils/maths.js */"./src/utils/maths.js");const i={char:["char_decode",1],bpe:["bpe_decode",2],wp:["wp_decode",102]};class l extends r.Processor{static tokenizer_class=n.AutoTokenizer;static image_processor_class=o.AutoImageProcessor;get char_tokenizer(){return this.components.char_tokenizer}get bpe_tokenizer(){return this.components.bpe_tokenizer}get wp_tokenizer(){return this.components.wp_tokenizer}_decode_helper(e,t){if(!i.hasOwnProperty(t))throw new Error(`Format ${t} is not supported.`);const[s,r]=i[t],o=this[s].bind(this),[n,l]=e.dims,c=[],d=[],u=e.tolist();for(let e=0;e<n;++e){const t=u[e],s=[],o=[];for(let e=1;e<l;++e){const[n,i]=(0,a.max)((0,a.softmax)(t[e]));if(o.push(n),i==r)break;s.push(i)}const n=o.length>0?o.reduce(((e,t)=>e*t),1):0;d.push(s),c.push(n)}return[o(d),c]}char_decode(e){return this.char_tokenizer.batch_decode(e).map((e=>e.replaceAll(" ","")))}bpe_decode(e){return this.bpe_tokenizer.batch_decode(e)}wp_decode(e){return this.wp_tokenizer.batch_decode(e).map((e=>e.replaceAll(" ","")))}batch_decode([e,t,s]){const[r,o]=this._decode_helper(e,"char"),[n,i]=this._decode_helper(t,"bpe"),[l,c]=this._decode_helper(s,"wp"),d=[],u=[];for(let e=0;e<r.length;++e){const[t,s]=(0,a.max)([o[e],i[e],c[e]]);d.push([r[e],n[e],l[e]][s]),u.push(t)}return{generated_text:d,scores:u,char_preds:r,bpe_preds:n,wp_preds:l}}static async from_pretrained(...e){const t=await super.from_pretrained(...e),s=await n.AutoTokenizer.from_pretrained("Xenova/gpt2"),r=await n.AutoTokenizer.from_pretrained("Xenova/bert-base-uncased");return t.components={image_processor:t.image_processor,char_tokenizer:t.tokenizer,bpe_tokenizer:s,wp_tokenizer:r},t}async _call(e,t=null){const s=await this.image_processor(e);return t&&(s.labels=this.tokenizer(t).input_ids),s}}},"./src/models/mobilenet_v1/image_processing_mobilenet_v1.js":
209
- /*!******************************************************************!*\
210
- !*** ./src/models/mobilenet_v1/image_processing_mobilenet_v1.js ***!
211
- \******************************************************************/(e,t,s)=>{s.r(t),s.d(t,{MobileNetV1FeatureExtractor:()=>n,MobileNetV1ImageProcessor:()=>o});var r=s(/*! ../../base/image_processors_utils.js */"./src/base/image_processors_utils.js");class o extends r.ImageProcessor{}class n extends o{}},"./src/models/mobilenet_v2/image_processing_mobilenet_v2.js":
212
- /*!******************************************************************!*\
213
- !*** ./src/models/mobilenet_v2/image_processing_mobilenet_v2.js ***!
214
- \******************************************************************/(e,t,s)=>{s.r(t),s.d(t,{MobileNetV2FeatureExtractor:()=>n,MobileNetV2ImageProcessor:()=>o});var r=s(/*! ../../base/image_processors_utils.js */"./src/base/image_processors_utils.js");class o extends r.ImageProcessor{}class n extends o{}},"./src/models/mobilenet_v3/image_processing_mobilenet_v3.js":
215
- /*!******************************************************************!*\
216
- !*** ./src/models/mobilenet_v3/image_processing_mobilenet_v3.js ***!
217
- \******************************************************************/(e,t,s)=>{s.r(t),s.d(t,{MobileNetV3FeatureExtractor:()=>n,MobileNetV3ImageProcessor:()=>o});var r=s(/*! ../../base/image_processors_utils.js */"./src/base/image_processors_utils.js");class o extends r.ImageProcessor{}class n extends o{}},"./src/models/mobilenet_v4/image_processing_mobilenet_v4.js":
218
- /*!******************************************************************!*\
219
- !*** ./src/models/mobilenet_v4/image_processing_mobilenet_v4.js ***!
220
- \******************************************************************/(e,t,s)=>{s.r(t),s.d(t,{MobileNetV4FeatureExtractor:()=>n,MobileNetV4ImageProcessor:()=>o});var r=s(/*! ../../base/image_processors_utils.js */"./src/base/image_processors_utils.js");class o extends r.ImageProcessor{}class n extends o{}},"./src/models/mobilevit/image_processing_mobilevit.js":
221
- /*!************************************************************!*\
222
- !*** ./src/models/mobilevit/image_processing_mobilevit.js ***!
223
- \************************************************************/(e,t,s)=>{s.r(t),s.d(t,{MobileViTFeatureExtractor:()=>n,MobileViTImageProcessor:()=>o});var r=s(/*! ../../base/image_processors_utils.js */"./src/base/image_processors_utils.js");class o extends r.ImageProcessor{}class n extends o{}},"./src/models/nougat/image_processing_nougat.js":
224
- /*!******************************************************!*\
225
- !*** ./src/models/nougat/image_processing_nougat.js ***!
226
- \******************************************************/(e,t,s)=>{s.r(t),s.d(t,{NougatImageProcessor:()=>o});var r=s(/*! ../donut/image_processing_donut.js */"./src/models/donut/image_processing_donut.js");class o extends r.DonutImageProcessor{}},"./src/models/owlv2/image_processing_owlv2.js":
227
- /*!****************************************************!*\
228
- !*** ./src/models/owlv2/image_processing_owlv2.js ***!
229
- \****************************************************/(e,t,s)=>{s.r(t),s.d(t,{Owlv2ImageProcessor:()=>o});var r=s(/*! ../owlvit/image_processing_owlvit.js */"./src/models/owlvit/image_processing_owlvit.js");class o extends r.OwlViTImageProcessor{}},"./src/models/owlvit/image_processing_owlvit.js":
230
- /*!******************************************************!*\
231
- !*** ./src/models/owlvit/image_processing_owlvit.js ***!
232
- \******************************************************/(e,t,s)=>{s.r(t),s.d(t,{OwlViTFeatureExtractor:()=>n,OwlViTImageProcessor:()=>o});var r=s(/*! ../../base/image_processors_utils.js */"./src/base/image_processors_utils.js");class o extends r.ImageProcessor{post_process_object_detection(...e){return(0,r.post_process_object_detection)(...e)}}class n extends o{}},"./src/models/owlvit/processing_owlvit.js":
233
- /*!************************************************!*\
234
- !*** ./src/models/owlvit/processing_owlvit.js ***!
235
- \************************************************/(e,t,s)=>{s.r(t),s.d(t,{OwlViTProcessor:()=>a});var r=s(/*! ../../base/processing_utils.js */"./src/base/processing_utils.js"),o=s(/*! ../auto/image_processing_auto.js */"./src/models/auto/image_processing_auto.js"),n=s(/*! ../../tokenizers.js */"./src/tokenizers.js");class a extends r.Processor{static tokenizer_class=n.AutoTokenizer;static image_processor_class=o.AutoImageProcessor}},"./src/models/processors.js":
236
- /*!**********************************!*\
237
- !*** ./src/models/processors.js ***!
238
- \**********************************/(e,t,s)=>{s.r(t),s.d(t,{Florence2Processor:()=>r.Florence2Processor,Idefics3Processor:()=>n.Idefics3Processor,JinaCLIPProcessor:()=>i.JinaCLIPProcessor,MgpstrProcessor:()=>o.MgpstrProcessor,OwlViTProcessor:()=>l.OwlViTProcessor,PyAnnoteProcessor:()=>c.PyAnnoteProcessor,Qwen2VLProcessor:()=>d.Qwen2VLProcessor,SamProcessor:()=>u.SamProcessor,SpeechT5Processor:()=>p.SpeechT5Processor,VLChatProcessor:()=>a.VLChatProcessor,Wav2Vec2ProcessorWithLM:()=>m.Wav2Vec2ProcessorWithLM,WhisperProcessor:()=>_.WhisperProcessor});var r=s(/*! ./florence2/processing_florence2.js */"./src/models/florence2/processing_florence2.js"),o=s(/*! ./mgp_str/processing_mgp_str.js */"./src/models/mgp_str/processing_mgp_str.js"),n=s(/*! ./idefics3/processing_idefics3.js */"./src/models/idefics3/processing_idefics3.js"),a=s(/*! ./janus/processing_janus.js */"./src/models/janus/processing_janus.js"),i=s(/*! ./jina_clip/processing_jina_clip.js */"./src/models/jina_clip/processing_jina_clip.js"),l=s(/*! ./owlvit/processing_owlvit.js */"./src/models/owlvit/processing_owlvit.js"),c=s(/*! ./pyannote/processing_pyannote.js */"./src/models/pyannote/processing_pyannote.js"),d=s(/*! ./qwen2_vl/processing_qwen2_vl.js */"./src/models/qwen2_vl/processing_qwen2_vl.js"),u=s(/*! ./sam/processing_sam.js */"./src/models/sam/processing_sam.js"),p=s(/*! ./speecht5/processing_speecht5.js */"./src/models/speecht5/processing_speecht5.js"),m=s(/*! ./wav2vec2/processing_wav2vec2.js */"./src/models/wav2vec2/processing_wav2vec2.js"),_=s(/*! ./whisper/processing_whisper.js */"./src/models/whisper/processing_whisper.js")},"./src/models/pvt/image_processing_pvt.js":
239
- /*!************************************************!*\
240
- !*** ./src/models/pvt/image_processing_pvt.js ***!
241
- \************************************************/(e,t,s)=>{s.r(t),s.d(t,{PvtImageProcessor:()=>o});var r=s(/*! ../../base/image_processors_utils.js */"./src/base/image_processors_utils.js");class o extends r.ImageProcessor{}},"./src/models/pyannote/feature_extraction_pyannote.js":
242
- /*!************************************************************!*\
243
- !*** ./src/models/pyannote/feature_extraction_pyannote.js ***!
244
- \************************************************************/(e,t,s)=>{s.r(t),s.d(t,{PyAnnoteFeatureExtractor:()=>n});var r=s(/*! ../../base/feature_extraction_utils.js */"./src/base/feature_extraction_utils.js"),o=s(/*! ../../utils/tensor.js */"./src/utils/tensor.js");class n extends r.FeatureExtractor{async _call(e){(0,r.validate_audio_inputs)(e,"PyAnnoteFeatureExtractor"),e instanceof Float64Array&&(e=new Float32Array(e));const t=[1,1,e.length];return{input_values:new o.Tensor("float32",e,t)}}}},"./src/models/pyannote/processing_pyannote.js":
245
- /*!****************************************************!*\
246
- !*** ./src/models/pyannote/processing_pyannote.js ***!
247
- \****************************************************/(e,t,s)=>{s.r(t),s.d(t,{PyAnnoteProcessor:()=>a});var r=s(/*! ../../base/processing_utils.js */"./src/base/processing_utils.js"),o=s(/*! ../auto/feature_extraction_auto.js */"./src/models/auto/feature_extraction_auto.js"),n=s(/*! ../../utils/maths.js */"./src/utils/maths.js");class a extends r.Processor{static feature_extractor_class=o.AutoFeatureExtractor;async _call(e){return await this.feature_extractor(e)}samples_to_frames(e){return(e-this.config.offset)/this.config.step}post_process_speaker_diarization(e,t){const s=t/this.samples_to_frames(t)/this.config.sampling_rate,r=[];for(const t of e.tolist()){const e=[];let o=-1;for(let s=0;s<t.length;++s){const r=(0,n.softmax)(t[s]),[a,i]=(0,n.max)(r),[l,c]=[s,s+1];i!==o?(o=i,e.push({id:i,start:l,end:c,score:a})):(e.at(-1).end=c,e.at(-1).score+=a)}r.push(e.map((({id:e,start:t,end:r,score:o})=>({id:e,start:t*s,end:r*s,confidence:o/(r-t)}))))}return r}}},"./src/models/qwen2_vl/image_processing_qwen2_vl.js":
248
- /*!**********************************************************!*\
249
- !*** ./src/models/qwen2_vl/image_processing_qwen2_vl.js ***!
250
- \**********************************************************/(e,t,s)=>{s.r(t),s.d(t,{Qwen2VLImageProcessor:()=>n});var r=s(/*! ../../base/image_processors_utils.js */"./src/base/image_processors_utils.js"),o=s(/*! ../../utils/tensor.js */"./src/utils/tensor.js");class n extends r.ImageProcessor{async _call(e,...t){const{pixel_values:s,original_sizes:r,reshaped_input_sizes:n}=await super._call(e,...t);let a=s;const{temporal_patch_size:i,merge_size:l,patch_size:c}=this.config;1===a.dims[0]&&(a=(0,o.cat)(Array.from({length:i},(()=>a)),0));const d=a.dims[0]/i,u=a.dims[1],p=Math.floor(a.dims[2]/c),m=Math.floor(a.dims[3]/c);return{pixel_values:a.view(d,i,u,Math.floor(p/l),l,c,Math.floor(m/l),l,c).permute(0,3,6,4,7,2,1,5,8).view(d*p*m,u*i*c*c),image_grid_thw:new o.Tensor("int64",[d,p,m],[1,3]),original_sizes:r,reshaped_input_sizes:n}}}},"./src/models/qwen2_vl/processing_qwen2_vl.js":
251
- /*!****************************************************!*\
252
- !*** ./src/models/qwen2_vl/processing_qwen2_vl.js ***!
253
- \****************************************************/(e,t,s)=>{s.r(t),s.d(t,{Qwen2VLProcessor:()=>a});var r=s(/*! ../../base/processing_utils.js */"./src/base/processing_utils.js"),o=s(/*! ../auto/image_processing_auto.js */"./src/models/auto/image_processing_auto.js"),n=s(/*! ../../tokenizers.js */"./src/tokenizers.js");s(/*! ../../utils/image.js */"./src/utils/image.js");class a extends r.Processor{static image_processor_class=o.AutoImageProcessor;static tokenizer_class=n.AutoTokenizer;async _call(e,t=null,...s){let r,o;if(Array.isArray(e)||(e=[e]),t&&(r=await this.image_processor(t),o=r.image_grid_thw),o){let t=this.image_processor.config.merge_size**2,s=0;const r=o.tolist();e=e.map((e=>{for(;e.includes("<|image_pad|>");){const o=Number(r[s++].reduce(((e,t)=>e*t),1n));e=e.replace("<|image_pad|>","<|placeholder|>".repeat(Math.floor(o/t)))}return e.replaceAll("<|placeholder|>","<|image_pad|>")}))}return{...this.tokenizer(e),...r}}}},"./src/models/rt_detr/image_processing_rt_detr.js":
254
- /*!********************************************************!*\
255
- !*** ./src/models/rt_detr/image_processing_rt_detr.js ***!
256
- \********************************************************/(e,t,s)=>{s.r(t),s.d(t,{RTDetrImageProcessor:()=>o});var r=s(/*! ../../base/image_processors_utils.js */"./src/base/image_processors_utils.js");class o extends r.ImageProcessor{post_process_object_detection(...e){return(0,r.post_process_object_detection)(...e)}}},"./src/models/sam/image_processing_sam.js":
257
- /*!************************************************!*\
258
- !*** ./src/models/sam/image_processing_sam.js ***!
259
- \************************************************/(e,t,s)=>{s.r(t),s.d(t,{SamImageProcessor:()=>a});var r=s(/*! ../../base/image_processors_utils.js */"./src/base/image_processors_utils.js"),o=s(/*! ../../utils/core.js */"./src/utils/core.js"),n=s(/*! ../../utils/tensor.js */"./src/utils/tensor.js");class a extends r.ImageProcessor{reshape_input_points(e,t,s,r=!1){e=structuredClone(e);let a=(0,o.calculateDimensions)(e);if(3===a.length)r||(a=[1,...a]),e=[e];else if(4!==a.length)throw Error("The input_points must be a 4D tensor of shape `batch_size`, `point_batch_size`, `nb_points_per_image`, `2`.");for(let r=0;r<e.length;++r){let o=t[r],n=s[r],a=[n[0]/o[0],n[1]/o[1]];for(let t=0;t<e[r].length;++t)for(let s=0;s<e[r][t].length;++s)for(let o=0;o<e[r][t][s].length;++o)e[r][t][s][o]*=a[o%2]}return new n.Tensor("float32",Float32Array.from(e.flat(1/0)),a)}add_input_labels(e,t){let s=(0,o.calculateDimensions)(e);if(2===s.length)s=[1,...s],e=[e];else if(3!==s.length)throw Error("The input_points must be a 4D tensor of shape `batch_size`, `point_batch_size`, `nb_points_per_image`, `2`.");if(s.some(((e,s)=>e!==t.dims[s])))throw Error(`The first ${s.length} dimensions of 'input_points' and 'input_labels' must be the same.`);return new n.Tensor("int64",e.flat(1/0).map(BigInt),s)}async _call(e,{input_points:t=null,input_labels:s=null,input_boxes:r=null}={}){const o=await super._call(e);if(t&&(o.input_points=this.reshape_input_points(t,o.original_sizes,o.reshaped_input_sizes)),s){if(!o.input_points)throw Error("`input_points` must be provided if `input_labels` are provided.");o.input_labels=this.add_input_labels(s,o.input_points)}return r&&(o.input_boxes=this.reshape_input_points(r,o.original_sizes,o.reshaped_input_sizes,!0)),o}async post_process_masks(e,t,s,{mask_threshold:r=0,binarize:o=!0,pad_size:a=null}={}){const i=[],l=[(a=a??this.pad_size).height,a.width];for(let a=0;a<t.length;++a){const c=t[a],d=s[a];let u=await(0,n.interpolate_4d)(e[a],{mode:"bilinear",size:l});if(u=u.slice(null,null,[0,d[0]],[0,d[1]]),u=await(0,n.interpolate_4d)(u,{mode:"bilinear",size:c}),o){const e=u.data,t=new Uint8Array(e.length);for(let s=0;s<e.length;++s)e[s]>r&&(t[s]=1);u=new n.Tensor("bool",t,u.dims)}i.push(u)}return i}generate_crop_boxes(e,t,{crop_n_layers:s=0,overlap_ratio:r=512/1500,points_per_crop:o=32,crop_n_points_downscale_factor:n=1}={}){}}},"./src/models/sam/processing_sam.js":
260
- /*!******************************************!*\
261
- !*** ./src/models/sam/processing_sam.js ***!
262
- \******************************************/(e,t,s)=>{s.r(t),s.d(t,{SamProcessor:()=>n});var r=s(/*! ../../base/processing_utils.js */"./src/base/processing_utils.js"),o=s(/*! ../auto/image_processing_auto.js */"./src/models/auto/image_processing_auto.js");class n extends r.Processor{static image_processor_class=o.AutoImageProcessor;async _call(...e){return await this.image_processor(...e)}post_process_masks(...e){return this.image_processor.post_process_masks(...e)}reshape_input_points(...e){return this.image_processor.reshape_input_points(...e)}}},"./src/models/seamless_m4t/feature_extraction_seamless_m4t.js":
263
- /*!********************************************************************!*\
264
- !*** ./src/models/seamless_m4t/feature_extraction_seamless_m4t.js ***!
265
- \********************************************************************/(e,t,s)=>{s.r(t),s.d(t,{SeamlessM4TFeatureExtractor:()=>a});var r=s(/*! ../../base/feature_extraction_utils.js */"./src/base/feature_extraction_utils.js"),o=s(/*! ../../utils/tensor.js */"./src/utils/tensor.js"),n=s(/*! ../../utils/audio.js */"./src/utils/audio.js");class a extends r.FeatureExtractor{constructor(e){super(e);const t=this.config.sampling_rate,s=(0,n.mel_filter_bank)(256,this.config.num_mel_bins,20,Math.floor(t/2),t,null,"kaldi",!0);for(let e=0;e<s.length;++e)s[e].push(0);this.mel_filters=s,this.window=(0,n.window_function)(400,"povey",{periodic:!1})}async _extract_fbank_features(e,t){return e=e.map((e=>32768*e)),(0,n.spectrogram)(e,this.window,400,160,{fft_length:512,power:2,center:!1,preemphasis:.97,mel_filters:this.mel_filters,log_mel:"log",mel_floor:1.192092955078125e-7,remove_dc_offset:!0,max_num_frames:t,transpose:!0})}async _call(e,{padding:t=!0,pad_to_multiple_of:s=2,do_normalize_per_mel_bins:n=!0,return_attention_mask:a=!0}={}){(0,r.validate_audio_inputs)(e,"SeamlessM4TFeatureExtractor");let i,l=await this._extract_fbank_features(e,this.config.max_length);if(n){const[e,t]=l.dims,s=l.data;for(let r=0;r<t;++r){let o=0;for(let n=0;n<e;++n)o+=s[n*t+r];const n=o/e;let a=0;for(let o=0;o<e;++o)a+=(s[o*t+r]-n)**2;a/=e-1;const i=Math.sqrt(a+1e-7);for(let o=0;o<e;++o){const e=o*t+r;s[e]=(s[e]-n)/i}}}if(t){const[e,t]=l.dims,r=l.data,n=e%s;if(n>0){const s=new Float32Array(t*(e+n));s.set(r),s.fill(this.config.padding_value,r.length);const c=e+n;l=new o.Tensor(l.type,s,[c,t]),a&&(i=new o.Tensor("int64",new BigInt64Array(c),[1,c]),i.data.fill(1n,0,e))}}const[c,d]=l.dims,u=this.config.stride;if(0!==c%u)throw new Error(`The number of frames (${c}) must be a multiple of the stride (${u}).`);const p=l.view(1,Math.floor(c/u),d*u),m={input_features:p};if(a){const e=p.dims[1],t=new BigInt64Array(e);if(i){const e=i.data;for(let s=1,r=0;s<c;s+=u,++r)t[r]=e[s]}else t.fill(1n);m.attention_mask=new o.Tensor("int64",t,[1,e])}return m}}},"./src/models/segformer/image_processing_segformer.js":
266
- /*!************************************************************!*\
267
- !*** ./src/models/segformer/image_processing_segformer.js ***!
268
- \************************************************************/(e,t,s)=>{s.r(t),s.d(t,{SegformerFeatureExtractor:()=>n,SegformerImageProcessor:()=>o});var r=s(/*! ../../base/image_processors_utils.js */"./src/base/image_processors_utils.js");class o extends r.ImageProcessor{post_process_semantic_segmentation(...e){return(0,r.post_process_semantic_segmentation)(...e)}}class n extends o{}},"./src/models/siglip/image_processing_siglip.js":
269
- /*!******************************************************!*\
270
- !*** ./src/models/siglip/image_processing_siglip.js ***!
271
- \******************************************************/(e,t,s)=>{s.r(t),s.d(t,{SiglipImageProcessor:()=>o});var r=s(/*! ../../base/image_processors_utils.js */"./src/base/image_processors_utils.js");class o extends r.ImageProcessor{}},"./src/models/speecht5/feature_extraction_speecht5.js":
272
- /*!************************************************************!*\
273
- !*** ./src/models/speecht5/feature_extraction_speecht5.js ***!
274
- \************************************************************/(e,t,s)=>{s.r(t),s.d(t,{SpeechT5FeatureExtractor:()=>o});var r=s(/*! ../../base/feature_extraction_utils.js */"./src/base/feature_extraction_utils.js");class o extends r.FeatureExtractor{}},"./src/models/speecht5/processing_speecht5.js":
275
- /*!****************************************************!*\
276
- !*** ./src/models/speecht5/processing_speecht5.js ***!
277
- \****************************************************/(e,t,s)=>{s.r(t),s.d(t,{SpeechT5Processor:()=>a});var r=s(/*! ../../base/processing_utils.js */"./src/base/processing_utils.js"),o=s(/*! ../../tokenizers.js */"./src/tokenizers.js"),n=s(/*! ../auto/feature_extraction_auto.js */"./src/models/auto/feature_extraction_auto.js");class a extends r.Processor{static tokenizer_class=o.AutoTokenizer;static feature_extractor_class=n.AutoFeatureExtractor;async _call(e){return await this.feature_extractor(e)}}},"./src/models/swin2sr/image_processing_swin2sr.js":
278
- /*!********************************************************!*\
279
- !*** ./src/models/swin2sr/image_processing_swin2sr.js ***!
280
- \********************************************************/(e,t,s)=>{s.r(t),s.d(t,{Swin2SRImageProcessor:()=>o});var r=s(/*! ../../base/image_processors_utils.js */"./src/base/image_processors_utils.js");class o extends r.ImageProcessor{pad_image(e,t,s,r={}){const[o,n,a]=t;return super.pad_image(e,t,{width:n+(s-n%s)%s,height:o+(s-o%s)%s},{mode:"symmetric",center:!1,constant_values:-1,...r})}}},"./src/models/vit/image_processing_vit.js":
281
- /*!************************************************!*\
282
- !*** ./src/models/vit/image_processing_vit.js ***!
283
- \************************************************/(e,t,s)=>{s.r(t),s.d(t,{ViTFeatureExtractor:()=>n,ViTImageProcessor:()=>o});var r=s(/*! ../../base/image_processors_utils.js */"./src/base/image_processors_utils.js");class o extends r.ImageProcessor{}class n extends o{}},"./src/models/vitmatte/image_processing_vitmatte.js":
284
- /*!**********************************************************!*\
285
- !*** ./src/models/vitmatte/image_processing_vitmatte.js ***!
286
- \**********************************************************/(e,t,s)=>{s.r(t),s.d(t,{VitMatteImageProcessor:()=>n});var r=s(/*! ../../base/image_processors_utils.js */"./src/base/image_processors_utils.js"),o=s(/*! ../../utils/tensor.js */"./src/utils/tensor.js");class n extends r.ImageProcessor{async _call(e,t){Array.isArray(e)||(e=[e]),Array.isArray(t)||(t=[t]);const s=await Promise.all(e.map((e=>this.preprocess(e)))),r=await Promise.all(t.map((e=>this.preprocess(e,{do_normalize:!1,do_convert_rgb:!1,do_convert_grayscale:!0}))));return{pixel_values:(0,o.stack)(s.map(((e,t)=>(0,o.cat)([e.pixel_values,r[t].pixel_values],0))),0),original_sizes:s.map((e=>e.original_size)),reshaped_input_sizes:s.map((e=>e.reshaped_input_size))}}}},"./src/models/vitpose/image_processing_vitpose.js":
287
- /*!********************************************************!*\
288
- !*** ./src/models/vitpose/image_processing_vitpose.js ***!
289
- \********************************************************/(e,t,s)=>{s.r(t),s.d(t,{VitPoseImageProcessor:()=>o});var r=s(/*! ../../base/image_processors_utils.js */"./src/base/image_processors_utils.js");class o extends r.ImageProcessor{post_process_pose_estimation(e,t,{threshold:s=null}={}){const r=e.tolist(),[o,n,a,i]=e.dims,l=[];for(let e=0;e<o;++e){const o=r[e],n=t[e],c=[];for(let e=0;e<n.length;++e){const t=n[e],r=[],l=[],d=[],u=t.at(-2)/i,p=t.at(-1)/a;for(let e=0;e<o.length;++e){let[t,n]=[0,0],a=0,i=-1/0;const c=o[e];for(let e=0;e<c.length;++e){const s=c[e];for(let r=0;r<s.length;++r){const o=s[r];a+=o,i=Math.max(i,o),t+=(r+.5)*o,n+=e*o}}if(null!=s&&i<s)continue;const m=[u*t/a,p*n/a];r.push(m),d.push(e),l.push(i)}c.push({bbox:t,scores:l,labels:d,keypoints:r})}l.push(c)}return l}}},"./src/models/wav2vec2/feature_extraction_wav2vec2.js":
290
- /*!************************************************************!*\
291
- !*** ./src/models/wav2vec2/feature_extraction_wav2vec2.js ***!
292
- \************************************************************/(e,t,s)=>{s.r(t),s.d(t,{Wav2Vec2FeatureExtractor:()=>n});var r=s(/*! ../../base/feature_extraction_utils.js */"./src/base/feature_extraction_utils.js"),o=s(/*! ../../utils/tensor.js */"./src/utils/tensor.js");class n extends r.FeatureExtractor{_zero_mean_unit_var_norm(e){const t=e.reduce(((e,t)=>e+t),0)/e.length,s=e.reduce(((e,s)=>e+(s-t)**2),0)/e.length;return e.map((e=>(e-t)/Math.sqrt(s+1e-7)))}async _call(e){(0,r.validate_audio_inputs)(e,"Wav2Vec2FeatureExtractor"),e instanceof Float64Array&&(e=new Float32Array(e));let t=e;this.config.do_normalize&&(t=this._zero_mean_unit_var_norm(t));const s=[1,t.length];return{input_values:new o.Tensor("float32",t,s),attention_mask:new o.Tensor("int64",new BigInt64Array(t.length).fill(1n),s)}}}},"./src/models/wav2vec2/processing_wav2vec2.js":
293
- /*!****************************************************!*\
294
- !*** ./src/models/wav2vec2/processing_wav2vec2.js ***!
295
- \****************************************************/(e,t,s)=>{s.r(t),s.d(t,{Wav2Vec2ProcessorWithLM:()=>n});var r=s(/*! ../../base/processing_utils.js */"./src/base/processing_utils.js"),o=s(/*! ../auto/feature_extraction_auto.js */"./src/models/auto/feature_extraction_auto.js");class n extends r.Processor{static feature_extractor_class=o.AutoFeatureExtractor;async _call(e){return await this.feature_extractor(e)}}},"./src/models/wespeaker/feature_extraction_wespeaker.js":
296
- /*!**************************************************************!*\
297
- !*** ./src/models/wespeaker/feature_extraction_wespeaker.js ***!
298
- \**************************************************************/(e,t,s)=>{s.r(t),s.d(t,{WeSpeakerFeatureExtractor:()=>n});var r=s(/*! ../../base/feature_extraction_utils.js */"./src/base/feature_extraction_utils.js"),o=(s(/*! ../../utils/tensor.js */"./src/utils/tensor.js"),s(/*! ../../utils/audio.js */"./src/utils/audio.js"));class n extends r.FeatureExtractor{constructor(e){super(e);const t=this.config.sampling_rate,s=(0,o.mel_filter_bank)(256,this.config.num_mel_bins,20,Math.floor(t/2),t,null,"kaldi",!0);for(let e=0;e<s.length;++e)s[e].push(0);this.mel_filters=s,this.window=(0,o.window_function)(400,"hamming",{periodic:!1}),this.min_num_frames=this.config.min_num_frames}async _extract_fbank_features(e){return e=e.map((e=>32768*e)),(0,o.spectrogram)(e,this.window,400,160,{fft_length:512,power:2,center:!1,preemphasis:.97,mel_filters:this.mel_filters,log_mel:"log",mel_floor:1.192092955078125e-7,remove_dc_offset:!0,transpose:!0,min_num_frames:this.min_num_frames})}async _call(e){(0,r.validate_audio_inputs)(e,"WeSpeakerFeatureExtractor");const t=(await this._extract_fbank_features(e)).unsqueeze_(0);if(null===this.config.fbank_centering_span){const e=t.mean(1).data,s=t.data,[r,o,n]=t.dims;for(let t=0;t<r;++t){const r=t*o*n,a=t*n;for(let t=0;t<o;++t){const o=r+t*n;for(let t=0;t<n;++t)s[o+t]-=e[a+t]}}}return{input_features:t}}}},"./src/models/whisper/common_whisper.js":
299
- /*!**********************************************!*\
300
- !*** ./src/models/whisper/common_whisper.js ***!
301
- \**********************************************/(e,t,s)=>{s.r(t),s.d(t,{WHISPER_LANGUAGE_MAPPING:()=>o,WHISPER_TO_LANGUAGE_CODE_MAPPING:()=>n,whisper_language_to_code:()=>a});const r=[["en","english"],["zh","chinese"],["de","german"],["es","spanish"],["ru","russian"],["ko","korean"],["fr","french"],["ja","japanese"],["pt","portuguese"],["tr","turkish"],["pl","polish"],["ca","catalan"],["nl","dutch"],["ar","arabic"],["sv","swedish"],["it","italian"],["id","indonesian"],["hi","hindi"],["fi","finnish"],["vi","vietnamese"],["he","hebrew"],["uk","ukrainian"],["el","greek"],["ms","malay"],["cs","czech"],["ro","romanian"],["da","danish"],["hu","hungarian"],["ta","tamil"],["no","norwegian"],["th","thai"],["ur","urdu"],["hr","croatian"],["bg","bulgarian"],["lt","lithuanian"],["la","latin"],["mi","maori"],["ml","malayalam"],["cy","welsh"],["sk","slovak"],["te","telugu"],["fa","persian"],["lv","latvian"],["bn","bengali"],["sr","serbian"],["az","azerbaijani"],["sl","slovenian"],["kn","kannada"],["et","estonian"],["mk","macedonian"],["br","breton"],["eu","basque"],["is","icelandic"],["hy","armenian"],["ne","nepali"],["mn","mongolian"],["bs","bosnian"],["kk","kazakh"],["sq","albanian"],["sw","swahili"],["gl","galician"],["mr","marathi"],["pa","punjabi"],["si","sinhala"],["km","khmer"],["sn","shona"],["yo","yoruba"],["so","somali"],["af","afrikaans"],["oc","occitan"],["ka","georgian"],["be","belarusian"],["tg","tajik"],["sd","sindhi"],["gu","gujarati"],["am","amharic"],["yi","yiddish"],["lo","lao"],["uz","uzbek"],["fo","faroese"],["ht","haitian creole"],["ps","pashto"],["tk","turkmen"],["nn","nynorsk"],["mt","maltese"],["sa","sanskrit"],["lb","luxembourgish"],["my","myanmar"],["bo","tibetan"],["tl","tagalog"],["mg","malagasy"],["as","assamese"],["tt","tatar"],["haw","hawaiian"],["ln","lingala"],["ha","hausa"],["ba","bashkir"],["jw","javanese"],["su","sundanese"]],o=new Map(r),n=new Map([...r.map((([e,t])=>[t,e])),["burmese","my"],["valencian","ca"],["flemish","nl"],["haitian","ht"],["letzeburgesch","lb"],["pushto","ps"],["panjabi","pa"],["moldavian","ro"],["moldovan","ro"],["sinhalese","si"],["castilian","es"]]);function a(e){e=e.toLowerCase();let t=n.get(e);if(void 0===t){if(!o.has(e)){const t=2===e.length?o.keys():o.values();throw new Error(`Language "${e}" is not supported. Must be one of: ${JSON.stringify(t)}`)}t=e}return t}},"./src/models/whisper/feature_extraction_whisper.js":
302
- /*!**********************************************************!*\
303
- !*** ./src/models/whisper/feature_extraction_whisper.js ***!
304
- \**********************************************************/(e,t,s)=>{s.r(t),s.d(t,{WhisperFeatureExtractor:()=>a});var r=s(/*! ../../base/feature_extraction_utils.js */"./src/base/feature_extraction_utils.js"),o=(s(/*! ../../utils/tensor.js */"./src/utils/tensor.js"),s(/*! ../../utils/audio.js */"./src/utils/audio.js")),n=s(/*! ../../utils/maths.js */"./src/utils/maths.js");class a extends r.FeatureExtractor{constructor(e){super(e),this.config.mel_filters??=(0,o.mel_filter_bank)(Math.floor(1+this.config.n_fft/2),this.config.feature_size,0,8e3,this.config.sampling_rate,"slaney","slaney"),this.window=(0,o.window_function)(this.config.n_fft,"hann")}async _extract_fbank_features(e){const t=await(0,o.spectrogram)(e,this.window,this.config.n_fft,this.config.hop_length,{power:2,mel_filters:this.config.mel_filters,log_mel:"log10",max_num_frames:this.config.nb_max_frames}),s=t.data,r=(0,n.max)(s)[0];for(let e=0;e<s.length;++e)s[e]=(Math.max(s[e],r-8)+4)/4;return t}async _call(e){let t;(0,r.validate_audio_inputs)(e,"WhisperFeatureExtractor"),e.length>this.config.n_samples?(console.warn("Attempting to extract features for audio longer than 30 seconds. If using a pipeline to extract transcript from a long audio clip, remember to specify `chunk_length_s` and/or `stride_length_s`."),t=e.slice(0,this.config.n_samples)):(t=new Float32Array(this.config.n_samples),t.set(e));return{input_features:(await this._extract_fbank_features(t)).unsqueeze_(0)}}}},"./src/models/whisper/generation_whisper.js":
305
- /*!**************************************************!*\
306
- !*** ./src/models/whisper/generation_whisper.js ***!
307
- \**************************************************/(e,t,s)=>{s.r(t),s.d(t,{WhisperGenerationConfig:()=>o});var r=s(/*! ../../generation/configuration_utils.js */"./src/generation/configuration_utils.js");class o extends r.GenerationConfig{return_timestamps=null;return_token_timestamps=null;num_frames=null;alignment_heads=null;task=null;language=null;no_timestamps_token_id=null;prompt_ids=null;is_multilingual=null;lang_to_id=null;task_to_id=null;max_initial_timestamp_index=1}},"./src/models/whisper/processing_whisper.js":
308
- /*!**************************************************!*\
309
- !*** ./src/models/whisper/processing_whisper.js ***!
310
- \**************************************************/(e,t,s)=>{s.r(t),s.d(t,{WhisperProcessor:()=>a});var r=s(/*! ../auto/feature_extraction_auto.js */"./src/models/auto/feature_extraction_auto.js"),o=s(/*! ../../tokenizers.js */"./src/tokenizers.js"),n=s(/*! ../../base/processing_utils.js */"./src/base/processing_utils.js");class a extends n.Processor{static tokenizer_class=o.AutoTokenizer;static feature_extractor_class=r.AutoFeatureExtractor;async _call(e){return await this.feature_extractor(e)}}},"./src/models/yolos/image_processing_yolos.js":
311
- /*!****************************************************!*\
312
- !*** ./src/models/yolos/image_processing_yolos.js ***!
313
- \****************************************************/(e,t,s)=>{s.r(t),s.d(t,{YolosFeatureExtractor:()=>n,YolosImageProcessor:()=>o});var r=s(/*! ../../base/image_processors_utils.js */"./src/base/image_processors_utils.js");class o extends r.ImageProcessor{post_process_object_detection(...e){return(0,r.post_process_object_detection)(...e)}}class n extends o{}},"./src/ops/registry.js":
314
- /*!*****************************!*\
315
- !*** ./src/ops/registry.js ***!
316
- \*****************************/(e,t,s)=>{s.r(t),s.d(t,{TensorOpRegistry:()=>a});var r=s(/*! ../backends/onnx.js */"./src/backends/onnx.js"),o=s(/*! ../utils/tensor.js */"./src/utils/tensor.js");const n=async(e,t,s)=>{const n=await(0,r.createInferenceSession)(new Uint8Array(e),t);return async e=>{const t=Object.fromEntries(Object.entries(e).map((([e,t])=>[e,t.ort_tensor]))),r=await n.run(t);return Array.isArray(s)?s.map((e=>new o.Tensor(r[e]))):new o.Tensor(r[s])}};class a{static session_options={};static get bilinear_interpolate_4d(){return this._bilinear_interpolate_4d||(this._bilinear_interpolate_4d=n([8,9,18,0,58,128,1,10,40,10,1,120,10,0,10,0,10,1,115,18,1,121,34,6,82,101,115,105,122,101,42,17,10,4,109,111,100,101,34,6,108,105,110,101,97,114,160,1,3,18,1,114,90,31,10,1,120,18,26,10,24,8,1,18,20,10,3,18,1,98,10,3,18,1,99,10,3,18,1,104,10,3,18,1,119,90,15,10,1,115,18,10,10,8,8,7,18,4,10,2,8,4,98,31,10,1,121,18,26,10,24,8,1,18,20,10,3,18,1,98,10,3,18,1,99,10,3,18,1,104,10,3,18,1,119,66,2,16,20],this.session_options,"y")),this._bilinear_interpolate_4d}static get bicubic_interpolate_4d(){return this._bicubic_interpolate_4d||(this._bicubic_interpolate_4d=n([8,9,18,0,58,127,10,39,10,1,120,10,0,10,0,10,1,115,18,1,121,34,6,82,101,115,105,122,101,42,16,10,4,109,111,100,101,34,5,99,117,98,105,99,160,1,3,18,1,114,90,31,10,1,120,18,26,10,24,8,1,18,20,10,3,18,1,98,10,3,18,1,99,10,3,18,1,104,10,3,18,1,119,90,15,10,1,115,18,10,10,8,8,7,18,4,10,2,8,4,98,31,10,1,121,18,26,10,24,8,1,18,20,10,3,18,1,98,10,3,18,1,99,10,3,18,1,104,10,3,18,1,119,66,2,16,20],this.session_options,"y")),this._bicubic_interpolate_4d}static get matmul(){return this._matmul||(this._matmul=n([8,9,18,0,58,55,10,17,10,1,97,10,1,98,18,1,99,34,6,77,97,116,77,117,108,18,1,114,90,9,10,1,97,18,4,10,2,8,1,90,9,10,1,98,18,4,10,2,8,1,98,9,10,1,99,18,4,10,2,8,1,66,2,16,20],this.session_options,"c")),this._matmul}static get stft(){return this._stft||(this._stft=n([8,7,18,0,58,148,1,10,38,10,1,115,10,1,106,10,1,119,10,1,108,18,1,111,34,4,83,84,70,84,42,15,10,8,111,110,101,115,105,100,101,100,24,1,160,1,2,18,1,115,90,26,10,1,115,18,21,10,19,8,1,18,15,10,3,18,1,98,10,3,18,1,115,10,3,18,1,99,90,11,10,1,106,18,6,10,4,8,7,18,0,90,16,10,1,119,18,11,10,9,8,1,18,5,10,3,18,1,119,90,11,10,1,108,18,6,10,4,8,7,18,0,98,31,10,1,111,18,26,10,24,8,1,18,20,10,3,18,1,98,10,3,18,1,102,10,3,18,1,100,10,3,18,1,99,66,2,16,17],this.session_options,"o")),this._stft}static get rfft(){return this._rfft||(this._rfft=n([8,9,18,0,58,97,10,33,10,1,120,10,0,10,1,97,18,1,121,34,3,68,70,84,42,15,10,8,111,110,101,115,105,100,101,100,24,1,160,1,2,18,1,100,90,21,10,1,120,18,16,10,14,8,1,18,10,10,3,18,1,115,10,3,18,1,99,90,11,10,1,97,18,6,10,4,8,7,18,0,98,21,10,1,121,18,16,10,14,8,1,18,10,10,3,18,1,115,10,3,18,1,99,66,2,16,20],this.session_options,"y")),this._rfft}static get top_k(){return this._top_k||(this._top_k=n([8,10,18,0,58,73,10,18,10,1,120,10,1,107,18,1,118,18,1,105,34,4,84,111,112,75,18,1,116,90,9,10,1,120,18,4,10,2,8,1,90,15,10,1,107,18,10,10,8,8,7,18,4,10,2,8,1,98,9,10,1,118,18,4,10,2,8,1,98,9,10,1,105,18,4,10,2,8,7,66,2,16,21],this.session_options,["v","i"])),this._top_k}}},"./src/pipelines.js":
317
- /*!**************************!*\
318
- !*** ./src/pipelines.js ***!
319
- \**************************/(e,t,s)=>{s.r(t),s.d(t,{AudioClassificationPipeline:()=>C,AutomaticSpeechRecognitionPipeline:()=>A,DepthEstimationPipeline:()=>D,DocumentQuestionAnsweringPipeline:()=>V,FeatureExtractionPipeline:()=>F,FillMaskPipeline:()=>w,ImageClassificationPipeline:()=>L,ImageFeatureExtractionPipeline:()=>v,ImageSegmentationPipeline:()=>I,ImageToImagePipeline:()=>O,ImageToTextPipeline:()=>E,ObjectDetectionPipeline:()=>j,Pipeline:()=>h,QuestionAnsweringPipeline:()=>M,SummarizationPipeline:()=>b,Text2TextGenerationPipeline:()=>T,TextClassificationPipeline:()=>f,TextGenerationPipeline:()=>k,TextToAudioPipeline:()=>B,TokenClassificationPipeline:()=>g,TranslationPipeline:()=>x,ZeroShotAudioClassificationPipeline:()=>S,ZeroShotClassificationPipeline:()=>y,ZeroShotImageClassificationPipeline:()=>z,ZeroShotObjectDetectionPipeline:()=>N,pipeline:()=>q});var r=s(/*! ./tokenizers.js */"./src/tokenizers.js"),o=s(/*! ./models.js */"./src/models.js"),n=s(/*! ./models/auto/processing_auto.js */"./src/models/auto/processing_auto.js"),a=(s(/*! ./base/processing_utils.js */"./src/base/processing_utils.js"),s(/*! ./utils/generic.js */"./src/utils/generic.js")),i=s(/*! ./utils/core.js */"./src/utils/core.js"),l=s(/*! ./utils/maths.js */"./src/utils/maths.js"),c=s(/*! ./utils/audio.js */"./src/utils/audio.js"),d=s(/*! ./utils/tensor.js */"./src/utils/tensor.js"),u=s(/*! ./utils/image.js */"./src/utils/image.js");async function p(e){return Array.isArray(e)||(e=[e]),await Promise.all(e.map((e=>u.RawImage.read(e))))}async function m(e,t){return Array.isArray(e)||(e=[e]),await Promise.all(e.map((e=>"string"==typeof e||e instanceof URL?(0,c.read_audio)(e,t):e instanceof Float64Array?new Float32Array(e):e)))}function _(e,t){t&&(e=e.map((e=>0|e)));const[s,r,o,n]=e;return{xmin:s,ymin:r,xmax:o,ymax:n}}class h extends a.Callable{constructor({task:e,model:t,tokenizer:s=null,processor:r=null}){super(),this.task=e,this.model=t,this.tokenizer=s,this.processor=r}async dispose(){await this.model.dispose()}}class f extends h{constructor(e){super(e)}async _call(e,{top_k:t=1}={}){const s=this.tokenizer(e,{padding:!0,truncation:!0}),r=await this.model(s),o="multi_label_classification"===this.model.config.problem_type?e=>e.sigmoid():e=>new d.Tensor("float32",(0,l.softmax)(e.data),e.dims),n=this.model.config.id2label,a=[];for(const e of r.logits){const s=o(e),r=await(0,d.topk)(s,t),i=r[0].tolist(),l=r[1].tolist().map(((e,t)=>({label:n?n[e]:`LABEL_${e}`,score:i[t]})));1===t?a.push(...l):a.push(l)}return Array.isArray(e)||1===t?a:a[0]}}class g extends h{constructor(e){super(e)}async _call(e,{ignore_labels:t=["O"]}={}){const s=Array.isArray(e),r=this.tokenizer(s?e:[e],{padding:!0,truncation:!0}),o=(await this.model(r)).logits,n=this.model.config.id2label,a=[];for(let e=0;e<o.dims[0];++e){const s=r.input_ids[e],i=o[e],c=[];for(let e=0;e<i.dims[0];++e){const r=i[e],o=(0,l.max)(r.data)[1],a=n?n[o]:`LABEL_${o}`;if(t.includes(a))continue;const d=this.tokenizer.decode([s[e].item()],{skip_special_tokens:!0});if(""===d)continue;const u=(0,l.softmax)(r.data);c.push({entity:a,score:u[o],index:e,word:d})}a.push(c)}return s?a:a[0]}}class M extends h{constructor(e){super(e)}async _call(e,t,{top_k:s=1}={}){const r=this.tokenizer(e,{text_pair:t,padding:!0,truncation:!0}),{start_logits:o,end_logits:n}=await this.model(r),a=r.input_ids.tolist(),c=r.attention_mask.tolist(),d=this.tokenizer.all_special_ids,u=[];for(let e=0;e<o.dims[0];++e){const t=a[e],r=t.findIndex((e=>e==this.tokenizer.sep_token_id)),p=(c[e].map(((e,s)=>1==e&&(0===s||s>r&&-1===d.findIndex((e=>e==t[s]))))),o[e].tolist()),m=n[e].tolist();for(let s=1;s<p.length;++s)(0==c[e]||s<=r||-1!==d.findIndex((e=>e==t[s])))&&(p[s]=-1/0,m[s]=-1/0);const _=(0,l.softmax)(p).map(((e,t)=>[e,t])),h=(0,l.softmax)(m).map(((e,t)=>[e,t]));_[0][0]=0,h[0][0]=0;const f=(0,i.product)(_,h).filter((e=>e[0][1]<=e[1][1])).map((e=>[e[0][1],e[1][1],e[0][0]*e[1][0]])).sort(((e,t)=>t[2]-e[2]));for(let e=0;e<Math.min(f.length,s);++e){const[s,r,o]=f[e],n=t.slice(s,r+1),a=this.tokenizer.decode(n,{skip_special_tokens:!0});u.push({answer:a,score:o})}}return 1===s?u[0]:u}}class w extends h{constructor(e){super(e)}async _call(e,{top_k:t=5}={}){const s=this.tokenizer(e,{padding:!0,truncation:!0}),{logits:r}=await this.model(s),o=[],n=s.input_ids.tolist();for(let e=0;e<n.length;++e){const s=n[e],a=s.findIndex((e=>e==this.tokenizer.mask_token_id));if(-1===a)throw Error(`Mask token (${this.tokenizer.mask_token}) not found in text.`);const i=r[e][a],c=await(0,d.topk)(new d.Tensor("float32",(0,l.softmax)(i.data),i.dims),t),u=c[0].tolist(),p=c[1].tolist();o.push(p.map(((e,t)=>{const r=s.slice();return r[a]=e,{score:u[t],token:Number(e),token_str:this.tokenizer.model.vocab[e],sequence:this.tokenizer.decode(r,{skip_special_tokens:!0})}})))}return Array.isArray(e)?o:o[0]}}class T extends h{_key="generated_text";constructor(e){super(e)}async _call(e,t={}){Array.isArray(e)||(e=[e]),this.model.config.prefix&&(e=e.map((e=>this.model.config.prefix+e)));const s=this.model.config.task_specific_params;s&&s[this.task]&&s[this.task].prefix&&(e=e.map((e=>s[this.task].prefix+e)));const r=this.tokenizer,o={padding:!0,truncation:!0};let n;n=this instanceof x&&"_build_translation_inputs"in r?r._build_translation_inputs(e,o,t):r(e,o);const a=await this.model.generate({...n,...t});return r.batch_decode(a,{skip_special_tokens:!0}).map((e=>({[this._key]:e})))}}class b extends T{_key="summary_text";constructor(e){super(e)}}class x extends T{_key="translation_text";constructor(e){super(e)}}function P(e){return Array.isArray(e)&&e.every((e=>"role"in e&&"content"in e))}class k extends h{constructor(e){super(e)}async _call(e,t={}){let s,r=!1,o=!1;if("string"==typeof e)s=e=[e];else if(Array.isArray(e)&&e.every((e=>"string"==typeof e)))r=!0,s=e;else{if(P(e))e=[e];else{if(!Array.isArray(e)||!e.every(P))throw new Error("Input must be a string, an array of strings, a Chat, or an array of Chats");r=!0}o=!0,s=e.map((e=>this.tokenizer.apply_chat_template(e,{tokenize:!1,add_generation_prompt:!0})))}const n=t.add_special_tokens??!1,a=!o&&(t.return_full_text??!0);this.tokenizer.padding_side="left";const i=this.tokenizer(s,{add_special_tokens:n,padding:!0,truncation:!0}),l=await this.model.generate({...i,...t}),c=this.tokenizer.batch_decode(l,{skip_special_tokens:!0});let d;!a&&i.input_ids.dims.at(-1)>0&&(d=this.tokenizer.batch_decode(i.input_ids,{skip_special_tokens:!0}).map((e=>e.length)));const u=Array.from({length:e.length},(e=>[]));for(let t=0;t<c.length;++t){const s=Math.floor(t/l.dims[0]*e.length);d&&(c[t]=c[t].slice(d[s])),u[s].push({generated_text:o?[...e[s],{role:"assistant",content:c[t]}]:c[t]})}return r||1!==u.length?u:u[0]}}class y extends h{constructor(e){super(e),this.label2id=Object.fromEntries(Object.entries(this.model.config.label2id).map((([e,t])=>[e.toLowerCase(),t]))),this.entailment_id=this.label2id.entailment,void 0===this.entailment_id&&(console.warn("Could not find 'entailment' in label2id mapping. Using 2 as entailment_id."),this.entailment_id=2),this.contradiction_id=this.label2id.contradiction??this.label2id.not_entailment,void 0===this.contradiction_id&&(console.warn("Could not find 'contradiction' in label2id mapping. Using 0 as contradiction_id."),this.contradiction_id=0)}async _call(e,t,{hypothesis_template:s="This example is {}.",multi_label:r=!1}={}){const o=Array.isArray(e);o||(e=[e]),Array.isArray(t)||(t=[t]);const n=t.map((e=>s.replace("{}",e))),a=r||1===t.length,i=[];for(const s of e){const e=[];for(const t of n){const r=this.tokenizer(s,{text_pair:t,padding:!0,truncation:!0}),o=await this.model(r);a?e.push([o.logits.data[this.contradiction_id],o.logits.data[this.entailment_id]]):e.push(o.logits.data[this.entailment_id])}const r=(a?e.map((e=>(0,l.softmax)(e)[1])):(0,l.softmax)(e)).map(((e,t)=>[e,t])).sort(((e,t)=>t[0]-e[0]));i.push({sequence:s,labels:r.map((e=>t[e[1]])),scores:r.map((e=>e[0]))})}return o?i:i[0]}}class F extends h{constructor(e){super(e)}async _call(e,{pooling:t="none",normalize:s=!1,quantize:r=!1,precision:o="binary"}={}){const n=this.tokenizer(e,{padding:!0,truncation:!0}),a=await this.model(n);let i=a.last_hidden_state??a.logits??a.token_embeddings;if("none"===t);else if("mean"===t)i=(0,d.mean_pooling)(i,n.attention_mask);else{if("cls"!==t)throw Error(`Pooling method '${t}' not supported.`);i=i.slice(null,0)}return s&&(i=i.normalize(2,-1)),r&&(i=(0,d.quantize_embeddings)(i,o)),i}}class v extends h{constructor(e){super(e)}async _call(e,{pool:t=null}={}){const s=await p(e),{pixel_values:r}=await this.processor(s),o=await this.model({pixel_values:r});let n;if(t){if(!("pooler_output"in o))throw Error("No pooled output was returned. Make sure the model has a 'pooler' layer when using the 'pool' option.");n=o.pooler_output}else n=o.last_hidden_state??o.logits??o.image_embeds;return n}}class C extends h{constructor(e){super(e)}async _call(e,{top_k:t=5}={}){const s=this.processor.feature_extractor.config.sampling_rate,r=await m(e,s),o=this.model.config.id2label,n=[];for(const e of r){const s=await this.processor(e),r=(await this.model(s)).logits[0],a=await(0,d.topk)(new d.Tensor("float32",(0,l.softmax)(r.data),r.dims),t),i=a[0].tolist(),c=a[1].tolist().map(((e,t)=>({label:o?o[e]:`LABEL_${e}`,score:i[t]})));n.push(c)}return Array.isArray(e)?n:n[0]}}class S extends h{constructor(e){super(e)}async _call(e,t,{hypothesis_template:s="This is a sound of {}."}={}){const r=!Array.isArray(e);r&&(e=[e]);const o=t.map((e=>s.replace("{}",e))),n=this.tokenizer(o,{padding:!0,truncation:!0}),a=this.processor.feature_extractor.config.sampling_rate,i=await m(e,a),c=[];for(const e of i){const s=await this.processor(e),r=await this.model({...n,...s}),o=(0,l.softmax)(r.logits_per_audio.data);c.push([...o].map(((e,s)=>({score:e,label:t[s]}))))}return r?c[0]:c}}class A extends h{constructor(e){super(e)}async _call(e,t={}){switch(this.model.config.model_type){case"whisper":return this._call_whisper(e,t);case"wav2vec2":case"wav2vec2-bert":case"unispeech":case"unispeech-sat":case"hubert":return this._call_wav2vec2(e,t);default:throw new Error(`AutomaticSpeechRecognitionPipeline does not support model type '${this.model.config.model_type}'.`)}}async _call_wav2vec2(e,t){t.language&&console.warn('`language` parameter is not yet supported for `wav2vec2` models, defaulting to "English".'),t.task&&console.warn('`task` parameter is not yet supported for `wav2vec2` models, defaulting to "transcribe".');const s=!Array.isArray(e);s&&(e=[e]);const r=this.processor.feature_extractor.config.sampling_rate,o=await m(e,r),n=[];for(const e of o){const t=await this.processor(e),s=(await this.model(t)).logits[0],r=[];for(const e of s)r.push((0,l.max)(e.data)[1]);const o=this.tokenizer.decode(r);n.push({text:o})}return s?n[0]:n}async _call_whisper(e,t){const s=t.return_timestamps??!1,r=t.chunk_length_s??0,o=t.force_full_sequences??!1;let n=t.stride_length_s??null;const a={...t};"word"===s&&(a.return_token_timestamps=!0,a.return_timestamps=!1);const i=!Array.isArray(e);i&&(e=[e]);const c=this.processor.feature_extractor.config.chunk_length/this.model.config.max_source_positions,d=this.processor.feature_extractor.config.hop_length,u=this.processor.feature_extractor.config.sampling_rate,p=await m(e,u),_=[];for(const e of p){let t=[];if(r>0){if(null===n)n=r/6;else if(r<=n)throw Error("`chunk_length_s` must be larger than `stride_length_s`.");const s=u*r,o=u*n,a=s-2*o;let i=0;for(;;){const r=i+s,n=e.subarray(i,r),l=await this.processor(n),c=0===i,d=r>=e.length;if(t.push({stride:[n.length,c?0:o,d?0:o],input_features:l.input_features,is_last:d}),d)break;i+=a}}else t=[{stride:[e.length,0,0],input_features:(await this.processor(e)).input_features,is_last:!0}];for(const e of t){a.num_frames=Math.floor(e.stride[0]/d);const t=await this.model.generate({inputs:e.input_features,...a});"word"===s?(e.tokens=t.sequences.tolist()[0],e.token_timestamps=t.token_timestamps.tolist()[0].map((e=>(0,l.round)(e,2)))):e.tokens=t[0].tolist(),e.stride=e.stride.map((e=>e/u))}const[i,p]=this.tokenizer._decode_asr(t,{time_precision:c,return_timestamps:s,force_full_sequences:o});_.push({text:i,...p})}return i?_[0]:_}}class E extends h{constructor(e){super(e)}async _call(e,t={}){const s=Array.isArray(e),r=await p(e),{pixel_values:o}=await this.processor(r),n=[];for(const e of o){e.dims=[1,...e.dims];const s=await this.model.generate({inputs:e,...t}),r=this.tokenizer.batch_decode(s,{skip_special_tokens:!0}).map((e=>({generated_text:e.trim()})));n.push(r)}return s?n:n[0]}}class L extends h{constructor(e){super(e)}async _call(e,{top_k:t=5}={}){const s=await p(e),{pixel_values:r}=await this.processor(s),o=await this.model({pixel_values:r}),n=this.model.config.id2label,a=[];for(const e of o.logits){const s=await(0,d.topk)(new d.Tensor("float32",(0,l.softmax)(e.data),e.dims),t),r=s[0].tolist(),o=s[1].tolist().map(((e,t)=>({label:n?n[e]:`LABEL_${e}`,score:r[t]})));a.push(o)}return Array.isArray(e)?a:a[0]}}class I extends h{constructor(e){super(e),this.subtasks_mapping={panoptic:"post_process_panoptic_segmentation",instance:"post_process_instance_segmentation",semantic:"post_process_semantic_segmentation"}}async _call(e,{threshold:t=.5,mask_threshold:s=.5,overlap_mask_area_threshold:r=.8,label_ids_to_fuse:o=null,target_sizes:n=null,subtask:a=null}={}){if(Array.isArray(e)&&1!==e.length)throw Error("Image segmentation pipeline currently only supports a batch size of 1.");const i=await p(e),l=i.map((e=>[e.height,e.width])),{pixel_values:c,pixel_mask:d}=await this.processor(i),m=await this.model({pixel_values:c,pixel_mask:d});let _=null;if(null!==a)_=this.subtasks_mapping[a];else for(let[e,t]of Object.entries(this.subtasks_mapping))if(t in this.processor.image_processor){_=this.processor.image_processor[t].bind(this.processor.image_processor),a=e;break}const h=this.model.config.id2label,f=[];if("panoptic"===a||"instance"===a){const e=_(m,t,s,r,o,n??l)[0],a=e.segmentation;for(const t of e.segments_info){const e=new Uint8ClampedArray(a.data.length);for(let s=0;s<a.data.length;++s)a.data[s]===t.id&&(e[s]=255);const s=new u.RawImage(e,a.dims[1],a.dims[0],1);f.push({score:t.score,label:h[t.label_id],mask:s})}}else{if("semantic"!==a)throw Error(`Subtask ${a} not supported.`);{const{segmentation:e,labels:t}=_(m,n??l)[0];for(const s of t){const t=new Uint8ClampedArray(e.data.length);for(let r=0;r<e.data.length;++r)e.data[r]===s&&(t[r]=255);const r=new u.RawImage(t,e.dims[1],e.dims[0],1);f.push({score:null,label:h[s],mask:r})}}}return f}}class z extends h{constructor(e){super(e)}async _call(e,t,{hypothesis_template:s="This is a photo of {}"}={}){const r=Array.isArray(e),o=await p(e),n=t.map((e=>s.replace("{}",e))),a=this.tokenizer(n,{padding:"siglip"!==this.model.config.model_type||"max_length",truncation:!0}),{pixel_values:i}=await this.processor(o),c=await this.model({...a,pixel_values:i}),d="siglip"===this.model.config.model_type?e=>e.sigmoid().data:e=>(0,l.softmax)(e.data),u=[];for(const e of c.logits_per_image){const s=[...d(e)].map(((e,s)=>({score:e,label:t[s]})));s.sort(((e,t)=>t.score-e.score)),u.push(s)}return r?u:u[0]}}class j extends h{constructor(e){super(e)}async _call(e,{threshold:t=.9,percentage:s=!1}={}){const r=Array.isArray(e);if(r&&1!==e.length)throw Error("Object detection pipeline currently only supports a batch size of 1.");const o=await p(e),n=s?null:o.map((e=>[e.height,e.width])),{pixel_values:a,pixel_mask:i}=await this.processor(o),l=await this.model({pixel_values:a,pixel_mask:i}),c=this.processor.image_processor.post_process_object_detection(l,t,n),d=this.model.config.id2label,u=c.map((e=>e.boxes.map(((t,r)=>({score:e.scores[r],label:d[e.classes[r]],box:_(t,!s)})))));return r?u:u[0]}}class N extends h{constructor(e){super(e)}async _call(e,t,{threshold:s=.1,top_k:r=null,percentage:o=!1}={}){const n=Array.isArray(e),a=await p(e),i=this.tokenizer(t,{padding:!0,truncation:!0}),l=await this.processor(a),c=[];for(let e=0;e<a.length;++e){const n=a[e],d=o?null:[[n.height,n.width]],u=l.pixel_values[e].unsqueeze_(0),p=await this.model({...i,pixel_values:u}),m=this.processor.image_processor.post_process_object_detection(p,s,d,!0)[0];let h=m.boxes.map(((e,s)=>({score:m.scores[s],label:t[m.classes[s]],box:_(e,!o)}))).sort(((e,t)=>t.score-e.score));null!==r&&(h=h.slice(0,r)),c.push(h)}return n?c:c[0]}}class V extends h{constructor(e){super(e)}async _call(e,t,s={}){const r=(await p(e))[0],{pixel_values:o}=await this.processor(r),n=`<s_docvqa><s_question>${t}</s_question><s_answer>`,a=this.tokenizer(n,{add_special_tokens:!1,padding:!0,truncation:!0}).input_ids,i=await this.model.generate({inputs:o,max_length:this.model.config.decoder.max_position_embeddings,decoder_input_ids:a,...s}),l=this.tokenizer.batch_decode(i)[0].match(/<s_answer>(.*?)<\/s_answer>/);let c=null;return l&&l.length>=2&&(c=l[1].trim()),[{answer:c}]}}class B extends h{DEFAULT_VOCODER_ID="Xenova/speecht5_hifigan";constructor(e){super(e),this.vocoder=e.vocoder??null}async _call(e,{speaker_embeddings:t=null}={}){return this.processor?this._call_text_to_spectrogram(e,{speaker_embeddings:t}):this._call_text_to_waveform(e)}async _call_text_to_waveform(e){const t=this.tokenizer(e,{padding:!0,truncation:!0}),{waveform:s}=await this.model(t),r=this.model.config.sampling_rate;return{audio:s.data,sampling_rate:r}}async _call_text_to_spectrogram(e,{speaker_embeddings:t}){if(this.vocoder||(console.log("No vocoder specified, using default HifiGan vocoder."),this.vocoder=await o.AutoModel.from_pretrained(this.DEFAULT_VOCODER_ID,{dtype:"fp32"})),("string"==typeof t||t instanceof URL)&&(t=new Float32Array(await(await fetch(t)).arrayBuffer())),t instanceof Float32Array)t=new d.Tensor("float32",t,[1,t.length]);else if(!(t instanceof d.Tensor))throw new Error("Speaker embeddings must be a `Tensor`, `Float32Array`, `string`, or `URL`.");const{input_ids:s}=this.tokenizer(e,{padding:!0,truncation:!0}),{waveform:r}=await this.model.generate_speech(s,t,{vocoder:this.vocoder}),n=this.processor.feature_extractor.config.sampling_rate;return{audio:r.data,sampling_rate:n}}}class O extends h{constructor(e){super(e)}async _call(e){const t=await p(e),s=await this.processor(t),r=await this.model(s),o=[];for(const e of r.reconstruction){const t=e.squeeze().clamp_(0,1).mul_(255).round_().to("uint8");o.push(u.RawImage.fromTensor(t))}return o.length>1?o:o[0]}}class D extends h{constructor(e){super(e)}async _call(e){const t=await p(e),s=await this.processor(t),{predicted_depth:r}=await this.model(s),o=[];for(let e=0;e<t.length;++e){const s=(0,d.interpolate)(r[e],t[e].size.reverse(),"bilinear",!1),n=s.mul_(255/(0,l.max)(s.data)[0]).to("uint8");o.push({predicted_depth:r[e],depth:u.RawImage.fromTensor(n)})}return o.length>1?o:o[0]}}const R=Object.freeze({"text-classification":{tokenizer:r.AutoTokenizer,pipeline:f,model:o.AutoModelForSequenceClassification,default:{model:"Xenova/distilbert-base-uncased-finetuned-sst-2-english"},type:"text"},"token-classification":{tokenizer:r.AutoTokenizer,pipeline:g,model:o.AutoModelForTokenClassification,default:{model:"Xenova/bert-base-multilingual-cased-ner-hrl"},type:"text"},"question-answering":{tokenizer:r.AutoTokenizer,pipeline:M,model:o.AutoModelForQuestionAnswering,default:{model:"Xenova/distilbert-base-cased-distilled-squad"},type:"text"},"fill-mask":{tokenizer:r.AutoTokenizer,pipeline:w,model:o.AutoModelForMaskedLM,default:{model:"Xenova/bert-base-uncased"},type:"text"},summarization:{tokenizer:r.AutoTokenizer,pipeline:b,model:o.AutoModelForSeq2SeqLM,default:{model:"Xenova/distilbart-cnn-6-6"},type:"text"},translation:{tokenizer:r.AutoTokenizer,pipeline:x,model:o.AutoModelForSeq2SeqLM,default:{model:"Xenova/t5-small"},type:"text"},"text2text-generation":{tokenizer:r.AutoTokenizer,pipeline:T,model:o.AutoModelForSeq2SeqLM,default:{model:"Xenova/flan-t5-small"},type:"text"},"text-generation":{tokenizer:r.AutoTokenizer,pipeline:k,model:o.AutoModelForCausalLM,default:{model:"Xenova/gpt2"},type:"text"},"zero-shot-classification":{tokenizer:r.AutoTokenizer,pipeline:y,model:o.AutoModelForSequenceClassification,default:{model:"Xenova/distilbert-base-uncased-mnli"},type:"text"},"audio-classification":{pipeline:C,model:o.AutoModelForAudioClassification,processor:n.AutoProcessor,default:{model:"Xenova/wav2vec2-base-superb-ks"},type:"audio"},"zero-shot-audio-classification":{tokenizer:r.AutoTokenizer,pipeline:S,model:o.AutoModel,processor:n.AutoProcessor,default:{model:"Xenova/clap-htsat-unfused"},type:"multimodal"},"automatic-speech-recognition":{tokenizer:r.AutoTokenizer,pipeline:A,model:[o.AutoModelForSpeechSeq2Seq,o.AutoModelForCTC],processor:n.AutoProcessor,default:{model:"Xenova/whisper-tiny.en"},type:"multimodal"},"text-to-audio":{tokenizer:r.AutoTokenizer,pipeline:B,model:[o.AutoModelForTextToWaveform,o.AutoModelForTextToSpectrogram],processor:[n.AutoProcessor,null],default:{model:"Xenova/speecht5_tts"},type:"text"},"image-to-text":{tokenizer:r.AutoTokenizer,pipeline:E,model:o.AutoModelForVision2Seq,processor:n.AutoProcessor,default:{model:"Xenova/vit-gpt2-image-captioning"},type:"multimodal"},"image-classification":{pipeline:L,model:o.AutoModelForImageClassification,processor:n.AutoProcessor,default:{model:"Xenova/vit-base-patch16-224"},type:"multimodal"},"image-segmentation":{pipeline:I,model:[o.AutoModelForImageSegmentation,o.AutoModelForSemanticSegmentation,o.AutoModelForUniversalSegmentation],processor:n.AutoProcessor,default:{model:"Xenova/detr-resnet-50-panoptic"},type:"multimodal"},"zero-shot-image-classification":{tokenizer:r.AutoTokenizer,pipeline:z,model:o.AutoModel,processor:n.AutoProcessor,default:{model:"Xenova/clip-vit-base-patch32"},type:"multimodal"},"object-detection":{pipeline:j,model:o.AutoModelForObjectDetection,processor:n.AutoProcessor,default:{model:"Xenova/detr-resnet-50"},type:"multimodal"},"zero-shot-object-detection":{tokenizer:r.AutoTokenizer,pipeline:N,model:o.AutoModelForZeroShotObjectDetection,processor:n.AutoProcessor,default:{model:"Xenova/owlvit-base-patch32"},type:"multimodal"},"document-question-answering":{tokenizer:r.AutoTokenizer,pipeline:V,model:o.AutoModelForDocumentQuestionAnswering,processor:n.AutoProcessor,default:{model:"Xenova/donut-base-finetuned-docvqa"},type:"multimodal"},"image-to-image":{pipeline:O,model:o.AutoModelForImageToImage,processor:n.AutoProcessor,default:{model:"Xenova/swin2SR-classical-sr-x2-64"},type:"image"},"depth-estimation":{pipeline:D,model:o.AutoModelForDepthEstimation,processor:n.AutoProcessor,default:{model:"Xenova/dpt-large"},type:"image"},"feature-extraction":{tokenizer:r.AutoTokenizer,pipeline:F,model:o.AutoModel,default:{model:"Xenova/all-MiniLM-L6-v2"},type:"text"},"image-feature-extraction":{processor:n.AutoProcessor,pipeline:v,model:[o.AutoModelForImageFeatureExtraction,o.AutoModel],default:{model:"Xenova/vit-base-patch16-224-in21k"},type:"image"}}),G=Object.freeze({"sentiment-analysis":"text-classification",ner:"token-classification",asr:"automatic-speech-recognition","text-to-speech":"text-to-audio",embeddings:"feature-extraction"});async function q(e,t=null,{progress_callback:s=null,config:r=null,cache_dir:o=null,local_files_only:n=!1,revision:a="main",device:l=null,dtype:c=null,model_file_name:d=null,session_options:u={}}={}){e=G[e]??e;const p=R[e.split("_",1)[0]];if(!p)throw Error(`Unsupported pipeline: ${e}. Must be one of [${Object.keys(R)}]`);t||(t=p.default.model,console.log(`No model specified. Using default model: "${t}".`));const m={progress_callback:s,config:r,cache_dir:o,local_files_only:n,revision:a,device:l,dtype:c,model_file_name:d,session_options:u},_=new Map([["tokenizer",p.tokenizer],["model",p.model],["processor",p.processor]]),h=await async function(e,t,s){const r=Object.create(null),o=[];for(const[n,a]of e.entries()){if(!a)continue;let e;e=Array.isArray(a)?new Promise((async(e,r)=>{let o;for(const n of a){if(null===n)return void e(null);try{return void e(await n.from_pretrained(t,s))}catch(e){if(e.message?.includes("Unsupported model type"))o=e;else{if(!e.message?.includes("Could not locate file"))return void r(e);o=e}}}r(o)})):a.from_pretrained(t,s),r[n]=e,o.push(e)}await Promise.all(o);for(const[e,t]of Object.entries(r))r[e]=await t;return r}(_,t,m);h.task=e,(0,i.dispatchCallback)(s,{status:"ready",task:e,model:t});return new(0,p.pipeline)(h)}},"./src/tokenizers.js":
320
- /*!***************************!*\
321
- !*** ./src/tokenizers.js ***!
322
- \***************************/(e,t,s)=>{s.r(t),s.d(t,{AlbertTokenizer:()=>xe,AutoTokenizer:()=>_t,BartTokenizer:()=>Ne,BertTokenizer:()=>be,BlenderbotSmallTokenizer:()=>lt,BlenderbotTokenizer:()=>it,BloomTokenizer:()=>De,CLIPTokenizer:()=>rt,CamembertTokenizer:()=>Ee,CodeGenTokenizer:()=>st,CodeLlamaTokenizer:()=>qe,CohereTokenizer:()=>pt,ConvBertTokenizer:()=>Ce,DebertaTokenizer:()=>ye,DebertaV2Tokenizer:()=>Fe,DistilBertTokenizer:()=>Ae,ElectraTokenizer:()=>Ie,EsmTokenizer:()=>Xe,FalconTokenizer:()=>Ue,GPT2Tokenizer:()=>je,GPTNeoXTokenizer:()=>Qe,GemmaTokenizer:()=>Je,Grok1Tokenizer:()=>Ye,HerbertTokenizer:()=>ve,LlamaTokenizer:()=>Ge,M2M100Tokenizer:()=>et,MBart50Tokenizer:()=>Be,MBartTokenizer:()=>Ve,MPNetTokenizer:()=>$e,MarianTokenizer:()=>nt,MgpstrTokenizer:()=>mt,MobileBertTokenizer:()=>Pe,NllbTokenizer:()=>Ze,NougatTokenizer:()=>dt,PreTrainedTokenizer:()=>Te,Qwen2Tokenizer:()=>He,RoFormerTokenizer:()=>Se,RobertaTokenizer:()=>Oe,SiglipTokenizer:()=>ot,SpeechT5Tokenizer:()=>ct,SqueezeBertTokenizer:()=>ke,T5Tokenizer:()=>ze,TokenizerModel:()=>P,VitsTokenizer:()=>ut,Wav2Vec2CTCTokenizer:()=>at,WhisperTokenizer:()=>tt,XLMRobertaTokenizer:()=>We,XLMTokenizer:()=>Le,is_chinese_char:()=>g});var r=s(/*! ./utils/generic.js */"./src/utils/generic.js"),o=s(/*! ./utils/core.js */"./src/utils/core.js"),n=s(/*! ./utils/hub.js */"./src/utils/hub.js"),a=s(/*! ./utils/maths.js */"./src/utils/maths.js"),i=s(/*! ./utils/tensor.js */"./src/utils/tensor.js"),l=s(/*! ./utils/data-structures.js */"./src/utils/data-structures.js"),c=s(/*! @huggingface/jinja */"./node_modules/@huggingface/jinja/dist/index.js"),d=s(/*! ./models/whisper/common_whisper.js */"./src/models/whisper/common_whisper.js");s(/*! ./utils/constants.js */"./src/utils/constants.js");async function u(e,t){const s=await Promise.all([(0,n.getModelJSON)(e,"tokenizer.json",!0,t),(0,n.getModelJSON)(e,"tokenizer_config.json",!0,t)]);return null!==t.legacy&&(s[1].legacy=t.legacy),s}function p(e,t=!0){if(void 0!==e.Regex){let t=e.Regex.replace(/\\([#&~])/g,"$1");for(const[e,s]of b)t=t.replaceAll(e,s);return new RegExp(t,"gu")}if(void 0!==e.String){const s=(0,o.escapeRegExp)(e.String);return new RegExp(t?s:`(${s})`,"gu")}return console.warn("Unknown pattern type:",e),null}function m(e){return new Map(Object.entries(e))}function _(e){const t=e.dims;switch(t.length){case 1:return e.tolist();case 2:if(1!==t[0])throw new Error("Unable to decode tensor with `batch size !== 1`. Use `tokenizer.batch_decode(...)` for batched inputs.");return e.tolist()[0];default:throw new Error(`Expected tensor to have 1-2 dimensions, got ${t.length}.`)}}function h(e){return e.replace(/ \./g,".").replace(/ \?/g,"?").replace(/ \!/g,"!").replace(/ ,/g,",").replace(/ \' /g,"'").replace(/ n\'t/g,"n't").replace(/ \'m/g,"'m").replace(/ \'s/g,"'s").replace(/ \'ve/g,"'ve").replace(/ \'re/g,"'re")}function f(e){return e.replace(/\p{M}/gu,"")}function g(e){return e>=19968&&e<=40959||e>=13312&&e<=19903||e>=131072&&e<=173791||e>=173824&&e<=177983||e>=177984&&e<=178207||e>=178208&&e<=183983||e>=63744&&e<=64255||e>=194560&&e<=195103}const M="\\p{P}\\u0021-\\u002F\\u003A-\\u0040\\u005B-\\u0060\\u007B-\\u007E",w=new RegExp(`^[${M}]+$`,"gu"),T=".,!?…。,、।۔،",b=new Map([["(?i:'s|'t|'re|'ve|'m|'ll|'d)","(?:'([sS]|[tT]|[rR][eE]|[vV][eE]|[mM]|[lL][lL]|[dD]))"],[` ?[^(\\s|[${T}])]+`,` ?[^\\s${T}]+`]]);class x{constructor(e){this.content=e.content,this.id=e.id,this.single_word=e.single_word??!1,this.lstrip=e.lstrip??!1,this.rstrip=e.rstrip??!1,this.special=e.special??!1,this.normalized=e.normalized??null}}class P extends r.Callable{constructor(e){super(),this.config=e,this.vocab=[],this.tokens_to_ids=new Map,this.unk_token_id=void 0,this.unk_token=void 0,this.end_of_word_suffix=void 0,this.fuse_unk=this.config.fuse_unk??!1}static fromConfig(e,...t){switch(e.type){case"WordPiece":return new k(e);case"Unigram":return new y(e,...t);case"BPE":return new C(e);default:if(e.vocab)return Array.isArray(e.vocab)?new y(e,...t):new S(e,...t);throw new Error(`Unknown TokenizerModel type: ${e.type}`)}}_call(e){return e=this.encode(e),this.fuse_unk&&(e=function(e,t,s){const r=[];let o=0;for(;o<e.length;)if(r.push(e[o]),(t.get(e[o])??s)===s)for(;++o<e.length&&(t.get(e[o])??s)===s;)t.get(r.at(-1))!==s&&(r[r.length-1]+=e[o]);else++o;return r}(e,this.tokens_to_ids,this.unk_token_id)),e}encode(e){throw Error("encode should be implemented in subclass.")}convert_tokens_to_ids(e){return e.map((e=>this.tokens_to_ids.get(e)??this.unk_token_id))}convert_ids_to_tokens(e){return e.map((e=>this.vocab[e]??this.unk_token))}}class k extends P{constructor(e){super(e),this.tokens_to_ids=m(e.vocab),this.unk_token_id=this.tokens_to_ids.get(e.unk_token),this.unk_token=e.unk_token,this.max_input_chars_per_word=e.max_input_chars_per_word??100,this.vocab=new Array(this.tokens_to_ids.size);for(const[e,t]of this.tokens_to_ids)this.vocab[t]=e}encode(e){const t=[];for(const s of e){const e=[...s];if(e.length>this.max_input_chars_per_word){t.push(this.unk_token);continue}let r=!1,o=0;const n=[];for(;o<e.length;){let t=e.length,s=null;for(;o<t;){let r=e.slice(o,t).join("");if(o>0&&(r=this.config.continuing_subword_prefix+r),this.tokens_to_ids.has(r)){s=r;break}--t}if(null===s){r=!0;break}n.push(s),o=t}r?t.push(this.unk_token):t.push(...n)}return t}}class y extends P{constructor(e,t){super(e);const s=e.vocab.length;this.vocab=new Array(s),this.scores=new Array(s);for(let t=0;t<s;++t){const s=e.vocab[t];this.vocab[t]=s[0],this.scores[t]=s[1]}this.unk_token_id=e.unk_id,this.unk_token=this.vocab[e.unk_id],this.tokens_to_ids=new Map(this.vocab.map(((e,t)=>[e,t]))),this.bos_token=" ",this.bos_token_id=this.tokens_to_ids.get(this.bos_token),this.eos_token=t.eos_token,this.eos_token_id=this.tokens_to_ids.get(this.eos_token),this.unk_token=this.vocab[this.unk_token_id],this.minScore=(0,a.min)(this.scores)[0],this.unk_score=this.minScore-10,this.scores[this.unk_token_id]=this.unk_score,this.trie=new l.CharTrie,this.trie.extend(this.vocab),this.fuse_unk=!0}populateNodes(e){const t=e.chars;let s=0;for(;s<t.length;){let r=!1;const n=[],a=t.slice(s).join(""),i=this.trie.commonPrefixSearch(a);for(const t of i){n.push(t);const a=this.tokens_to_ids.get(t),i=this.scores[a],l=(0,o.len)(t);e.insert(s,l,i,a),r||1!==l||(r=!0)}r||e.insert(s,1,this.unk_score,this.unk_token_id),s+=1}}tokenize(e){const t=new l.TokenLattice(e,this.bos_token_id,this.eos_token_id);return this.populateNodes(t),t.tokens()}encode(e){const t=[];for(const s of e){const e=this.tokenize(s);t.push(...e)}return t}}const F=(()=>{const e=[...Array.from({length:"~".charCodeAt(0)-"!".charCodeAt(0)+1},((e,t)=>t+"!".charCodeAt(0))),...Array.from({length:"¬".charCodeAt(0)-"¡".charCodeAt(0)+1},((e,t)=>t+"¡".charCodeAt(0))),...Array.from({length:"ÿ".charCodeAt(0)-"®".charCodeAt(0)+1},((e,t)=>t+"®".charCodeAt(0)))],t=e.slice();let s=0;for(let r=0;r<256;++r)e.includes(r)||(e.push(r),t.push(256+s),s+=1);const r=t.map((e=>String.fromCharCode(e)));return Object.fromEntries(e.map(((e,t)=>[e,r[t]])))})(),v=(0,o.reverseDictionary)(F);class C extends P{constructor(e){super(e),this.tokens_to_ids=m(e.vocab),this.unk_token_id=this.tokens_to_ids.get(e.unk_token),this.unk_token=e.unk_token,this.vocab=new Array(this.tokens_to_ids.size);for(const[e,t]of this.tokens_to_ids)this.vocab[t]=e;const t=Array.isArray(e.merges[0]);this.merges=t?e.merges:e.merges.map((e=>e.split(" ",2))),this.bpe_ranks=new Map(this.merges.map(((e,t)=>[JSON.stringify(e),t]))),this.end_of_word_suffix=e.end_of_word_suffix,this.continuing_subword_suffix=e.continuing_subword_suffix??null,this.byte_fallback=this.config.byte_fallback??!1,this.byte_fallback&&(this.text_encoder=new TextEncoder),this.ignore_merges=this.config.ignore_merges??!1,this.cache=new Map}bpe(e){if(0===e.length)return[];const t=this.cache.get(e);if(void 0!==t)return t;const s=Array.from(e);this.end_of_word_suffix&&(s[s.length-1]+=this.end_of_word_suffix);let r=[];if(s.length>1){const e=new l.PriorityQueue(((e,t)=>e.score<t.score));let t={token:s[0],bias:0,prev:null,next:null},o=t;for(let t=1;t<s.length;++t){const r={bias:t/s.length,token:s[t],prev:o,next:null};o.next=r,this._add_node(e,o),o=r}for(;!e.isEmpty();){const s=e.pop();if(s.deleted||!s.next||s.next.deleted)continue;if(s.deleted=!0,s.next.deleted=!0,s.prev){const e={...s.prev};s.prev.deleted=!0,s.prev=e,e.prev?e.prev.next=e:t=e}const r={token:s.token+s.next.token,bias:s.bias,prev:s.prev,next:s.next.next};r.prev?(r.prev.next=r,this._add_node(e,r.prev)):t=r,r.next&&(r.next.prev=r,this._add_node(e,r))}for(let e=t;null!==e;e=e.next)r.push(e.token)}else r=s;if(this.continuing_subword_suffix)for(let e=0;e<r.length-1;++e)r[e]+=this.continuing_subword_suffix;return this.cache.set(e,r),r}_add_node(e,t){const s=this.bpe_ranks.get(JSON.stringify([t.token,t.next.token]));void 0!==s&&(t.score=s+t.bias,e.push(t))}encode(e){const t=[];for(const s of e){if(this.ignore_merges&&this.tokens_to_ids.has(s)){t.push(s);continue}const e=this.bpe(s);for(const s of e)if(this.tokens_to_ids.has(s))t.push(s);else if(this.byte_fallback){const e=Array.from(this.text_encoder.encode(s)).map((e=>`<0x${e.toString(16).toUpperCase().padStart(2,"0")}>`));e.every((e=>this.tokens_to_ids.has(e)))?t.push(...e):t.push(this.unk_token)}else t.push(this.unk_token)}return t}}class S extends P{constructor(e,t){super(e),this.tokens_to_ids=m(t.target_lang?e.vocab[t.target_lang]:e.vocab),this.bos_token=t.bos_token,this.bos_token_id=this.tokens_to_ids.get(this.bos_token),this.eos_token=t.eos_token,this.eos_token_id=this.tokens_to_ids.get(this.eos_token),this.pad_token=t.pad_token,this.pad_token_id=this.tokens_to_ids.get(this.pad_token),this.unk_token=t.unk_token,this.unk_token_id=this.tokens_to_ids.get(this.unk_token),this.vocab=new Array(this.tokens_to_ids.size);for(const[e,t]of this.tokens_to_ids)this.vocab[t]=e}encode(e){return e}}class A extends r.Callable{constructor(e){super(),this.config=e}static fromConfig(e){if(null===e)return null;switch(e.type){case"BertNormalizer":return new D(e);case"Precompiled":return new pe(e);case"Sequence":return new O(e);case"Replace":return new E(e);case"NFC":return new L(e);case"NFKC":return new I(e);case"NFKD":return new z(e);case"Strip":return new j(e);case"StripAccents":return new N(e);case"Lowercase":return new V(e);case"Prepend":return new B(e);default:throw new Error(`Unknown Normalizer type: ${e.type}`)}}normalize(e){throw Error("normalize should be implemented in subclass.")}_call(e){return this.normalize(e)}}class E extends A{normalize(e){const t=p(this.config.pattern);return null===t?e:e.replaceAll(t,this.config.content)}}class L extends A{normalize(e){return e=e.normalize("NFC")}}class I extends A{normalize(e){return e=e.normalize("NFKC")}}class z extends A{normalize(e){return e=e.normalize("NFKD")}}class j extends A{normalize(e){return this.config.strip_left&&this.config.strip_right?e=e.trim():(this.config.strip_left&&(e=e.trimStart()),this.config.strip_right&&(e=e.trimEnd())),e}}class N extends A{normalize(e){return e=f(e)}}class V extends A{normalize(e){return e=e.toLowerCase()}}class B extends A{normalize(e){return e=this.config.prepend+e}}class O extends A{constructor(e){super(e),this.normalizers=e.normalizers.map((e=>A.fromConfig(e)))}normalize(e){return this.normalizers.reduce(((e,t)=>t.normalize(e)),e)}}class D extends A{_tokenize_chinese_chars(e){const t=[];for(let s=0;s<e.length;++s){const r=e[s];g(r.charCodeAt(0))?(t.push(" "),t.push(r),t.push(" ")):t.push(r)}return t.join("")}stripAccents(e){return e.normalize("NFD").replace(/\p{Mn}/gu,"")}_is_control(e){switch(e){case"\t":case"\n":case"\r":return!1;default:return/^\p{Cc}|\p{Cf}|\p{Co}|\p{Cs}$/u.test(e)}}_clean_text(e){const t=[];for(const s of e){const e=s.charCodeAt(0);0===e||65533===e||this._is_control(s)||(/^\s$/.test(s)?t.push(" "):t.push(s))}return t.join("")}normalize(e){return this.config.clean_text&&(e=this._clean_text(e)),this.config.handle_chinese_chars&&(e=this._tokenize_chinese_chars(e)),this.config.lowercase?(e=e.toLowerCase(),!1!==this.config.strip_accents&&(e=this.stripAccents(e))):this.config.strip_accents&&(e=this.stripAccents(e)),e}}class R extends r.Callable{static fromConfig(e){if(null===e)return null;switch(e.type){case"BertPreTokenizer":return new G(e);case"Sequence":return new me(e);case"Whitespace":return new _e(e);case"WhitespaceSplit":return new he(e);case"Metaspace":return new de(e);case"ByteLevel":return new q(e);case"Split":return new W(e);case"Punctuation":return new $(e);case"Digits":return new U(e);case"Replace":return new fe(e);default:throw new Error(`Unknown PreTokenizer type: ${e.type}`)}}pre_tokenize_text(e,t){throw Error("pre_tokenize_text should be implemented in subclass.")}pre_tokenize(e,t){return(Array.isArray(e)?e.map((e=>this.pre_tokenize_text(e,t))):this.pre_tokenize_text(e,t)).flat()}_call(e,t){return this.pre_tokenize(e,t)}}class G extends R{constructor(e){super(),this.pattern=new RegExp(`[^\\s${M}]+|[${M}]`,"gu")}pre_tokenize_text(e,t){return e.trim().match(this.pattern)||[]}}class q extends R{constructor(e){super(),this.config=e,this.add_prefix_space=this.config.add_prefix_space,this.trim_offsets=this.config.trim_offsets,this.use_regex=this.config.use_regex??!0,this.pattern=/'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+/gu,this.byte_encoder=F,this.text_encoder=new TextEncoder}pre_tokenize_text(e,t){this.add_prefix_space&&!e.startsWith(" ")&&(e=" "+e);return(this.use_regex?e.match(this.pattern)||[]:[e]).map((e=>Array.from(this.text_encoder.encode(e),(e=>this.byte_encoder[e])).join("")))}}class W extends R{constructor(e){super(),this.config=e,this.pattern=p(this.config.pattern,this.config.invert)}pre_tokenize_text(e,t){return null===this.pattern?[]:this.config.invert?e.match(this.pattern)||[]:"removed"===this.config.behavior?.toLowerCase()?e.split(this.pattern).filter((e=>e)):function(e,t){const s=[];let r=0;for(const o of e.matchAll(t)){const t=o[0];r<o.index&&s.push(e.slice(r,o.index)),t.length>0&&s.push(t),r=o.index+t.length}return r<e.length&&s.push(e.slice(r)),s}(e,this.pattern)}}class $ extends R{constructor(e){super(),this.config=e,this.pattern=new RegExp(`[^${M}]+|[${M}]+`,"gu")}pre_tokenize_text(e,t){return e.match(this.pattern)||[]}}class U extends R{constructor(e){super(),this.config=e;const t="[^\\d]+|\\d"+(this.config.individual_digits?"":"+");this.pattern=new RegExp(t,"gu")}pre_tokenize_text(e,t){return e.match(this.pattern)||[]}}class Q extends r.Callable{constructor(e){super(),this.config=e}static fromConfig(e){if(null===e)return null;switch(e.type){case"TemplateProcessing":return new J(e);case"ByteLevel":return new Y(e);case"RobertaProcessing":return new H(e);case"BertProcessing":return new X(e);case"Sequence":return new K(e);default:throw new Error(`Unknown PostProcessor type: ${e.type}`)}}post_process(e,...t){throw Error("post_process should be implemented in subclass.")}_call(e,...t){return this.post_process(e,...t)}}class X extends Q{constructor(e){super(e),this.cls=e.cls[0],this.sep=e.sep[0]}post_process(e,t=null,{add_special_tokens:s=!0}={}){s&&(e=(0,o.mergeArrays)([this.cls],e,[this.sep]));let r=new Array(e.length).fill(0);if(null!==t){const n=s&&this instanceof H?[this.sep]:[],a=s?[this.sep]:[];e=(0,o.mergeArrays)(e,n,t,a),r=(0,o.mergeArrays)(r,new Array(t.length+n.length+a.length).fill(1))}return{tokens:e,token_type_ids:r}}}class H extends X{}class J extends Q{constructor(e){super(e),this.single=e.single,this.pair=e.pair}post_process(e,t=null,{add_special_tokens:s=!0}={}){const r=null===t?this.single:this.pair;let n=[],a=[];for(const i of r)"SpecialToken"in i?s&&(n.push(i.SpecialToken.id),a.push(i.SpecialToken.type_id)):"Sequence"in i&&("A"===i.Sequence.id?(n=(0,o.mergeArrays)(n,e),a=(0,o.mergeArrays)(a,new Array(e.length).fill(i.Sequence.type_id))):"B"===i.Sequence.id&&(n=(0,o.mergeArrays)(n,t),a=(0,o.mergeArrays)(a,new Array(t.length).fill(i.Sequence.type_id))));return{tokens:n,token_type_ids:a}}}class Y extends Q{post_process(e,t=null){return t&&(e=(0,o.mergeArrays)(e,t)),{tokens:e}}}class K extends Q{constructor(e){super(e),this.processors=e.processors.map((e=>Q.fromConfig(e)))}post_process(e,t=null,s={}){let r;for(const o of this.processors)if(o instanceof Y){if(e=o.post_process(e).tokens,t){t=o.post_process(t).tokens}}else{const n=o.post_process(e,t,s);e=n.tokens,r=n.token_type_ids}return{tokens:e,token_type_ids:r}}}class Z extends r.Callable{constructor(e){super(),this.config=e,this.added_tokens=[],this.end_of_word_suffix=null,this.trim_offsets=e.trim_offsets}static fromConfig(e){if(null===e)return null;switch(e.type){case"WordPiece":return new oe(e);case"Metaspace":return new ue(e);case"ByteLevel":return new ne(e);case"Replace":return new ee(e);case"ByteFallback":return new te(e);case"Fuse":return new se(e);case"Strip":return new re(e);case"Sequence":return new ie(e);case"CTC":return new ae(e);case"BPEDecoder":return new le(e);default:throw new Error(`Unknown Decoder type: ${e.type}`)}}_call(e){return this.decode(e)}decode(e){return this.decode_chain(e).join("")}decode_chain(e){throw Error("`decode_chain` should be implemented in subclass.")}}class ee extends Z{decode_chain(e){const t=p(this.config.pattern);return null===t?e:e.map((e=>e.replaceAll(t,this.config.content)))}}class te extends Z{constructor(e){super(e),this.text_decoder=new TextDecoder}decode_chain(e){const t=[];let s=[];for(const r of e){let e=null;if(6===r.length&&r.startsWith("<0x")&&r.endsWith(">")){const t=parseInt(r.slice(3,5),16);isNaN(t)||(e=t)}if(null!==e)s.push(e);else{if(s.length>0){const e=this.text_decoder.decode(Uint8Array.from(s));t.push(e),s=[]}t.push(r)}}if(s.length>0){const e=this.text_decoder.decode(Uint8Array.from(s));t.push(e),s=[]}return t}}class se extends Z{decode_chain(e){return[e.join("")]}}class re extends Z{constructor(e){super(e),this.content=this.config.content,this.start=this.config.start,this.stop=this.config.stop}decode_chain(e){return e.map((e=>{let t=0;for(let s=0;s<this.start&&e[s]===this.content;++s)t=s+1;let s=e.length;for(let t=0;t<this.stop;++t){const r=e.length-t-1;if(e[r]!==this.content)break;s=r}return e.slice(t,s)}))}}class oe extends Z{constructor(e){super(e),this.cleanup=e.cleanup}decode_chain(e){return e.map(((e,t)=>(0!==t&&(e=e.startsWith(this.config.prefix)?e.replace(this.config.prefix,""):" "+e),this.cleanup&&(e=h(e)),e)))}}class ne extends Z{constructor(e){super(e),this.byte_decoder=v,this.text_decoder=new TextDecoder("utf-8",{fatal:!1,ignoreBOM:!0}),this.end_of_word_suffix=null}convert_tokens_to_string(e){const t=e.join(""),s=new Uint8Array([...t].map((e=>this.byte_decoder[e])));return this.text_decoder.decode(s)}decode_chain(e){const t=[];let s=[];for(const r of e)void 0!==this.added_tokens.find((e=>e.content===r))?(s.length>0&&(t.push(this.convert_tokens_to_string(s)),s=[]),t.push(r)):s.push(r);return s.length>0&&t.push(this.convert_tokens_to_string(s)),t}}class ae extends Z{constructor(e){super(e),this.pad_token=this.config.pad_token,this.word_delimiter_token=this.config.word_delimiter_token,this.cleanup=this.config.cleanup}convert_tokens_to_string(e){if(0===e.length)return"";const t=[e[0]];for(let s=1;s<e.length;++s)e[s]!==t.at(-1)&&t.push(e[s]);let s=t.filter((e=>e!==this.pad_token)).join("");return this.cleanup&&(s=h(s).replaceAll(this.word_delimiter_token," ").trim()),s}decode_chain(e){return[this.convert_tokens_to_string(e)]}}class ie extends Z{constructor(e){super(e),this.decoders=e.decoders.map((e=>Z.fromConfig(e)))}decode_chain(e){return this.decoders.reduce(((e,t)=>t.decode_chain(e)),e)}}class le extends Z{constructor(e){super(e),this.suffix=this.config.suffix}decode_chain(e){return e.map(((t,s)=>t.replaceAll(this.suffix,s===e.length-1?"":" ")))}}class ce extends Z{decode_chain(e){let t="";for(let s=1;s<e.length;s+=2)t+=e[s];return[t]}}class de extends R{constructor(e){super(),this.addPrefixSpace=e.add_prefix_space,this.replacement=e.replacement,this.strRep=e.str_rep||this.replacement,this.prepend_scheme=e.prepend_scheme??"always"}pre_tokenize_text(e,{section_index:t}={}){let s=e.replaceAll(" ",this.strRep);return this.addPrefixSpace&&!s.startsWith(this.replacement)&&("always"===this.prepend_scheme||"first"===this.prepend_scheme&&0===t)&&(s=this.strRep+s),[s]}}class ue extends Z{constructor(e){super(e),this.addPrefixSpace=e.add_prefix_space,this.replacement=e.replacement}decode_chain(e){const t=[];for(let s=0;s<e.length;++s){let r=e[s].replaceAll(this.replacement," ");this.addPrefixSpace&&0==s&&r.startsWith(" ")&&(r=r.substring(1)),t.push(r)}return t}}class pe extends A{constructor(e){super(e),this.charsmap=e.precompiled_charsmap}normalize(e){if((e=(e=e.replace(/[\u0001-\u0008\u000B\u000E-\u001F\u007F\u008F\u009F]/gm,"")).replace(/[\u0009\u000A\u000C\u000D\u00A0\u1680\u2000-\u200F\u2028\u2029\u202F\u205F\u2581\u3000\uFEFF\uFFFD]/gm," ")).includes("~")){const t=e.split("~");e=t.map((e=>e.normalize("NFKC"))).join("~")}else e=e.normalize("NFKC");return e}}class me extends R{constructor(e){super(),this.tokenizers=e.pretokenizers.map((e=>R.fromConfig(e)))}pre_tokenize_text(e,t){return this.tokenizers.reduce(((e,s)=>s.pre_tokenize(e,t)),[e])}}class _e extends R{constructor(e){super()}pre_tokenize_text(e,t){return e.match(/\w+|[^\w\s]+/g)||[]}}class he extends R{constructor(e){super()}pre_tokenize_text(e,t){return function(e){return e.match(/\S+/g)||[]}(e)}}class fe extends R{constructor(e){super(),this.config=e,this.pattern=p(this.config.pattern),this.content=this.config.content}pre_tokenize_text(e,t){return null===this.pattern?[e]:[e.replaceAll(this.pattern,this.config.content)]}}const ge=["bos_token","eos_token","unk_token","sep_token","pad_token","cls_token","mask_token"];function Me(e,t,s,r){for(const n of Object.keys(e)){const a=t-e[n].length,i=s(n),l=new Array(a).fill(i);e[n]="right"===r?(0,o.mergeArrays)(e[n],l):(0,o.mergeArrays)(l,e[n])}}function we(e,t){for(const s of Object.keys(e))e[s].length=t}class Te extends r.Callable{return_token_type_ids=!1;padding_side="right";constructor(e,t){super(),this._tokenizer_config=t,this.normalizer=A.fromConfig(e.normalizer),this.pre_tokenizer=R.fromConfig(e.pre_tokenizer),this.model=P.fromConfig(e.model,t),this.post_processor=Q.fromConfig(e.post_processor),this.decoder=Z.fromConfig(e.decoder),this.special_tokens=[],this.all_special_ids=[],this.added_tokens=[];for(const t of e.added_tokens){const e=new x(t);this.added_tokens.push(e),this.model.tokens_to_ids.set(e.content,e.id),this.model.vocab[e.id]=e.content,e.special&&(this.special_tokens.push(e.content),this.all_special_ids.push(e.id))}if(this.additional_special_tokens=t.additional_special_tokens??[],this.special_tokens.push(...this.additional_special_tokens),this.special_tokens=[...new Set(this.special_tokens)],this.decoder&&(this.decoder.added_tokens=this.added_tokens,this.decoder.end_of_word_suffix=this.model.end_of_word_suffix),this.added_tokens_regex=this.added_tokens.length>0?new RegExp(this.added_tokens.slice().sort(((e,t)=>t.content.length-e.content.length)).map((e=>`${e.lstrip?"\\s*":""}(${(0,o.escapeRegExp)(e.content)})${e.rstrip?"\\s*":""}`)).join("|")):null,this.mask_token=this.getToken("mask_token"),this.mask_token_id=this.model.tokens_to_ids.get(this.mask_token),this.pad_token=this.getToken("pad_token","eos_token"),this.pad_token_id=this.model.tokens_to_ids.get(this.pad_token),this.sep_token=this.getToken("sep_token"),this.sep_token_id=this.model.tokens_to_ids.get(this.sep_token),this.unk_token=this.getToken("unk_token"),this.unk_token_id=this.model.tokens_to_ids.get(this.unk_token),this.model_max_length=t.model_max_length,this.remove_space=t.remove_space,this.clean_up_tokenization_spaces=t.clean_up_tokenization_spaces??!0,this.do_lowercase_and_remove_accent=t.do_lowercase_and_remove_accent??!1,t.padding_side&&(this.padding_side=t.padding_side),this.legacy=!1,this.chat_template=t.chat_template??null,Array.isArray(this.chat_template)){const e=Object.create(null);for(const{name:t,template:s}of this.chat_template){if("string"!=typeof t||"string"!=typeof s)throw new Error('Chat template must be a list of objects with "name" and "template" properties');e[t]=s}this.chat_template=e}this._compiled_template_cache=new Map}getToken(...e){for(const t of e){const e=this._tokenizer_config[t];if(e){if("object"==typeof e){if("AddedToken"===e.__type)return e.content;throw Error(`Unknown token: ${e}`)}return e}}return null}static async from_pretrained(e,{progress_callback:t=null,config:s=null,cache_dir:r=null,local_files_only:o=!1,revision:n="main",legacy:a=null}={}){return new this(...await u(e,{progress_callback:t,config:s,cache_dir:r,local_files_only:o,revision:n,legacy:a}))}_call(e,{text_pair:t=null,add_special_tokens:s=!0,padding:r=!1,truncation:o=null,max_length:n=null,return_tensor:l=!0,return_token_type_ids:c=null}={}){const d=Array.isArray(e);let u;if(d){if(0===e.length)throw Error("text array must be non-empty");if(null!==t){if(!Array.isArray(t))throw Error("text_pair must also be an array");if(e.length!==t.length)throw Error("text and text_pair must have the same length");u=e.map(((e,r)=>this._encode_plus(e,{text_pair:t[r],add_special_tokens:s,return_token_type_ids:c})))}else u=e.map((e=>this._encode_plus(e,{add_special_tokens:s,return_token_type_ids:c})))}else{if(null==e)throw Error("text may not be null or undefined");if(Array.isArray(t))throw Error("When specifying `text_pair`, since `text` is a string, `text_pair` must also be a string (i.e., not an array).");u=[this._encode_plus(e,{text_pair:t,add_special_tokens:s,return_token_type_ids:c})]}if(null===n?n="max_length"===r?this.model_max_length:(0,a.max)(u.map((e=>e.input_ids.length)))[0]:o||console.warn("Truncation was not explicitly activated but `max_length` is provided a specific value, please use `truncation=true` to explicitly truncate examples to max length."),n=Math.min(n,this.model_max_length??1/0),r||o)for(let e=0;e<u.length;++e)u[e].input_ids.length!==n&&(u[e].input_ids.length>n?o&&we(u[e],n):r&&Me(u[e],n,(e=>"input_ids"===e?this.pad_token_id:0),this.padding_side));const p={};if(l){if((!r||!o)&&u.some((e=>{for(const t of Object.keys(e))if(e[t].length!==u[0][t]?.length)return!0;return!1})))throw Error("Unable to create tensor, you should probably activate truncation and/or padding with 'padding=true' and 'truncation=true' to have batched tensors with the same length.");const e=[u.length,u[0].input_ids.length];for(const t of Object.keys(u[0]))p[t]=new i.Tensor("int64",BigInt64Array.from(u.flatMap((e=>e[t])).map(BigInt)),e)}else{for(const e of Object.keys(u[0]))p[e]=u.map((t=>t[e]));if(!d)for(const e of Object.keys(p))p[e]=p[e][0]}return p}_encode_text(e){if(null===e)return null;const t=(this.added_tokens_regex?e.split(this.added_tokens_regex).filter((e=>e)):[e]).map(((e,t)=>{if(void 0!==this.added_tokens.find((t=>t.content===e)))return e;{if(!0===this.remove_space&&(e=e.trim().split(/\s+/).join(" ")),this.do_lowercase_and_remove_accent&&(e=function(e){return f(e.toLowerCase())}(e)),null!==this.normalizer&&(e=this.normalizer(e)),0===e.length)return[];const s=null!==this.pre_tokenizer?this.pre_tokenizer(e,{section_index:t}):[e];return this.model(s)}})).flat();return t}_encode_plus(e,{text_pair:t=null,add_special_tokens:s=!0,return_token_type_ids:r=null}={}){const{tokens:o,token_type_ids:n}=this._tokenize_helper(e,{pair:t,add_special_tokens:s}),a=this.model.convert_tokens_to_ids(o),i={input_ids:a,attention_mask:new Array(a.length).fill(1)};return(r??this.return_token_type_ids)&&n&&(i.token_type_ids=n),i}_tokenize_helper(e,{pair:t=null,add_special_tokens:s=!1}={}){const r=this._encode_text(e),n=this._encode_text(t);return this.post_processor?this.post_processor(r,n,{add_special_tokens:s}):{tokens:(0,o.mergeArrays)(r??[],n??[])}}tokenize(e,{pair:t=null,add_special_tokens:s=!1}={}){return this._tokenize_helper(e,{pair:t,add_special_tokens:s}).tokens}encode(e,{text_pair:t=null,add_special_tokens:s=!0,return_token_type_ids:r=null}={}){return this._encode_plus(e,{text_pair:t,add_special_tokens:s,return_token_type_ids:r}).input_ids}batch_decode(e,t={}){return e instanceof i.Tensor&&(e=e.tolist()),e.map((e=>this.decode(e,t)))}decode(e,t={}){if(e instanceof i.Tensor&&(e=_(e)),!Array.isArray(e)||0===e.length||!(0,o.isIntegralNumber)(e[0]))throw Error("token_ids must be a non-empty array of integers.");return this.decode_single(e,t)}decode_single(e,{skip_special_tokens:t=!1,clean_up_tokenization_spaces:s=null}){let r=this.model.convert_ids_to_tokens(e);t&&(r=r.filter((e=>!this.special_tokens.includes(e))));let o=this.decoder?this.decoder(r):r.join(" ");return this.decoder&&this.decoder.end_of_word_suffix&&(o=o.replaceAll(this.decoder.end_of_word_suffix," "),t&&(o=o.trim())),(s??this.clean_up_tokenization_spaces)&&(o=h(o)),o}get_chat_template({chat_template:e=null,tools:t=null}={}){if(this.chat_template&&"object"==typeof this.chat_template){const s=this.chat_template;if(null!==e&&Object.hasOwn(s,e))e=s[e];else if(null===e)if(null!==t&&"tool_use"in s)e=s.tool_use;else{if(!("default"in s))throw Error(`This model has multiple chat templates with no default specified! Please either pass a chat template or the name of the template you wish to use to the 'chat_template' argument. Available template names are ${Object.keys(s).sort()}.`);e=s.default}}else if(null===e){if(!this.chat_template)throw Error("Cannot use apply_chat_template() because tokenizer.chat_template is not set and no template argument was passed! For information about writing templates and setting the tokenizer.chat_template attribute, please see the documentation at https://huggingface.co/docs/transformers/main/en/chat_templating");e=this.chat_template}return e}apply_chat_template(e,{tools:t=null,documents:s=null,chat_template:r=null,add_generation_prompt:o=!1,tokenize:n=!0,padding:a=!1,truncation:i=!1,max_length:l=null,return_tensor:d=!0,return_dict:u=!1,tokenizer_kwargs:p={},...m}={}){if("string"!=typeof(r=this.get_chat_template({chat_template:r,tools:t})))throw Error("chat_template must be a string, but got "+typeof r);let _=this._compiled_template_cache.get(r);void 0===_&&(_=new c.Template(r),this._compiled_template_cache.set(r,_));const h=Object.create(null);for(const e of ge){const t=this.getToken(e);t&&(h[e]=t)}const f=_.render({messages:e,add_generation_prompt:o,tools:t,documents:s,...h,...m});if(n){const e=this._call(f,{add_special_tokens:!1,padding:a,truncation:i,max_length:l,return_tensor:d,...p});return u?e:e.input_ids}return f}}class be extends Te{return_token_type_ids=!0}class xe extends Te{return_token_type_ids=!0}class Pe extends Te{return_token_type_ids=!0}class ke extends Te{return_token_type_ids=!0}class ye extends Te{return_token_type_ids=!0}class Fe extends Te{return_token_type_ids=!0}class ve extends Te{return_token_type_ids=!0}class Ce extends Te{return_token_type_ids=!0}class Se extends Te{return_token_type_ids=!0}class Ae extends Te{}class Ee extends Te{}class Le extends Te{return_token_type_ids=!0;constructor(e,t){super(e,t),console.warn('WARNING: `XLMTokenizer` is not yet supported by Hugging Face\'s "fast" tokenizers library. Therefore, you may experience slightly inaccurate results.')}}class Ie extends Te{return_token_type_ids=!0}class ze extends Te{}class je extends Te{}class Ne extends Te{}class Ve extends Te{constructor(e,t){super(e,t),this.languageRegex=/^[a-z]{2}_[A-Z]{2}$/,this.language_codes=this.special_tokens.filter((e=>this.languageRegex.test(e))),this.lang_to_token=e=>e}_build_translation_inputs(e,t,s){return Ke(this,e,t,s)}}class Be extends Ve{}class Oe extends Te{}class De extends Te{}const Re="▁";class Ge extends Te{padding_side="left";constructor(e,t){super(e,t),this.legacy=t.legacy??!0,this.legacy||(this.normalizer=null,this.pre_tokenizer=new de({replacement:Re,add_prefix_space:!0,prepend_scheme:"first"}))}_encode_text(e){if(null===e)return null;if(this.legacy||0===e.length)return super._encode_text(e);let t=super._encode_text(Re+e.replaceAll(Re," "));return t.length>1&&t[0]===Re&&this.special_tokens.includes(t[1])&&(t=t.slice(1)),t}}class qe extends Te{}class We extends Te{}class $e extends Te{}class Ue extends Te{}class Qe extends Te{}class Xe extends Te{}class He extends Te{}class Je extends Te{}class Ye extends Te{}function Ke(e,t,s,r){if(!("language_codes"in e)||!Array.isArray(e.language_codes))throw new Error("Tokenizer must have `language_codes` attribute set and it should be an array of language ids.");if(!("languageRegex"in e&&e.languageRegex instanceof RegExp))throw new Error("Tokenizer must have `languageRegex` attribute set and it should be a regular expression.");if(!("lang_to_token"in e)||"function"!=typeof e.lang_to_token)throw new Error("Tokenizer must have `lang_to_token` attribute set and it should be a function.");const o=r.src_lang,n=r.tgt_lang;if(!e.language_codes.includes(n))throw new Error(`Target language code "${n}" is not valid. Must be one of: {${e.language_codes.join(", ")}}`);if(void 0!==o){if(!e.language_codes.includes(o))throw new Error(`Source language code "${o}" is not valid. Must be one of: {${e.language_codes.join(", ")}}`);for(const t of e.post_processor.config.single)if("SpecialToken"in t&&e.languageRegex.test(t.SpecialToken.id)){t.SpecialToken.id=e.lang_to_token(o);break}}return r.forced_bos_token_id=e.model.convert_tokens_to_ids([e.lang_to_token(n)])[0],e._call(t,s)}class Ze extends Te{constructor(e,t){super(e,t),this.languageRegex=/^[a-z]{3}_[A-Z][a-z]{3}$/,this.language_codes=this.special_tokens.filter((e=>this.languageRegex.test(e))),this.lang_to_token=e=>e}_build_translation_inputs(e,t,s){return Ke(this,e,t,s)}}class et extends Te{constructor(e,t){super(e,t),this.languageRegex=/^__[a-z]{2,3}__$/,this.language_codes=this.special_tokens.filter((e=>this.languageRegex.test(e))).map((e=>e.slice(2,-2))),this.lang_to_token=e=>`__${e}__`}_build_translation_inputs(e,t,s){return Ke(this,e,t,s)}}class tt extends Te{get timestamp_begin(){return this.model.convert_tokens_to_ids(["<|notimestamps|>"])[0]+1}_decode_asr(e,{return_timestamps:t=!1,return_language:s=!1,time_precision:r=null,force_full_sequences:o=!0}={}){if(null===r)throw Error("Must specify time_precision");let n=null;const i="word"===t;function l(){return{language:n,timestamp:[null,null],text:""}}const c=[];let u=l(),p=0;const m=this.timestamp_begin;let _=[],h=[],f=!1,g=null;const M=new Set(this.all_special_ids);for(const s of e){const e=s.tokens,o=i?s.token_timestamps:null;let T=null,b=m;if("stride"in s){const[t,o,n]=s.stride;if(p-=o,g=t-n,o&&(b=o/r+m),n)for(let t=e.length-1;t>=0;--t){const s=Number(e[t]);if(s>=m){if(null!==T&&(s-m)*r<g)break;T=s}}}let x=[],P=[];for(let s=0;s<e.length;++s){const g=Number(e[s]);if(M.has(g)){const e=this.decode([g]),s=d.WHISPER_LANGUAGE_MAPPING.get(e.slice(2,-2));if(void 0!==s){if(null!==n&&s!==n&&!t){_.push(x);const e=this.findLongestCommonSequence(_)[0],t=this.decode(e);u.text=t,c.push(u),_=[],x=[],u=l()}n=u.language=s}}else if(g>=m){const e=(g-m)*r+p,t=(0,a.round)(e,2);if(null!==T&&g>=T)f=!0;else if(f||_.length>0&&g<b)f=!1;else if(null===u.timestamp[0])u.timestamp[0]=t;else if(t===u.timestamp[0]);else{u.timestamp[1]=t,_.push(x),i&&h.push(P);const[e,s]=this.findLongestCommonSequence(_,h),r=this.decode(e);u.text=r,i&&(u.words=this.collateWordTimestamps(e,s,n)),c.push(u),_=[],x=[],h=[],P=[],u=l()}}else if(x.push(g),i){let e,t=(0,a.round)(o[s]+p,2);if(s+1<o.length){e=(0,a.round)(o[s+1]+p,2);const n=this.decode([g]);w.test(n)&&(e=(0,a.round)(Math.min(t+r,e),2))}else e=null;P.push([t,e])}}if("stride"in s){const[e,t,r]=s.stride;p+=e-r}x.length>0?(_.push(x),i&&h.push(P)):_.every((e=>0===e.length))&&(u=l(),_=[],x=[],h=[],P=[])}if(_.length>0){if(o&&t)throw new Error("Whisper did not predict an ending timestamp, which can happen if audio is cut off in the middle of a word. Also make sure WhisperTimeStampLogitsProcessor was used during generation.");const[e,s]=this.findLongestCommonSequence(_,h),r=this.decode(e);u.text=r,i&&(u.words=this.collateWordTimestamps(e,s,n)),c.push(u)}let T=Object.create(null);const b=c.map((e=>e.text)).join("");if(t||s){for(let e=0;e<c.length;++e){const r=c[e];t||delete r.timestamp,s||delete r.language}if(i){const e=[];for(const t of c)for(const s of t.words)e.push(s);T={chunks:e}}else T={chunks:c}}return[b,T]}findLongestCommonSequence(e,t=null){let s=e[0],r=s.length,o=[];const n=Array.isArray(t)&&t.length>0;let a=n?[]:null,i=n?t[0]:null;for(let l=1;l<e.length;++l){const c=e[l];let d=0,u=[r,r,0,0];const p=c.length;for(let e=1;e<r+p;++e){const o=Math.max(0,r-e),a=Math.min(r,r+p-e),m=s.slice(o,a),_=Math.max(0,e-r),h=Math.min(p,e),f=c.slice(_,h);if(m.length!==f.length)throw new Error("There is a bug within whisper `decode_asr` function, please report it. Dropping to prevent bad inference.");let g;g=n?m.filter(((e,s)=>e===f[s]&&i[o+s]<=t[l][_+s])).length:m.filter(((e,t)=>e===f[t])).length;const M=g/e+e/1e4;g>1&&M>d&&(d=M,u=[o,a,_,h])}const[m,_,h,f]=u,g=Math.floor((_+m)/2),M=Math.floor((f+h)/2);o.push(...s.slice(0,g)),s=c.slice(M),r=s.length,n&&(a.push(...i.slice(0,g)),i=t[l].slice(M))}return o.push(...s),n?(a.push(...i),[o,a]):[o,[]]}collateWordTimestamps(e,t,s){const[r,o,n]=this.combineTokensIntoWords(e,s),a=[];for(let e=0;e<r.length;++e){const s=n[e];a.push({text:r[e],timestamp:[t[s.at(0)][0],t[s.at(-1)][1]]})}return a}combineTokensIntoWords(e,t,s="\"'“¡¿([{-",r="\"'.。,,!!??::”)]}、"){let o,n,a;return["chinese","japanese","thai","lao","myanmar"].includes(t=t??"english")?[o,n,a]=this.splitTokensOnUnicode(e):[o,n,a]=this.splitTokensOnSpaces(e),this.mergePunctuations(o,n,a,s,r)}decode(e,t){let s;return t?.decode_with_timestamps?(e instanceof i.Tensor&&(e=_(e)),s=this.decodeWithTimestamps(e,t)):s=super.decode(e,t),s}decodeWithTimestamps(e,t){const s=t?.time_precision??.02,r=Array.from(this.all_special_ids).at(-1)+1;let o=[[]];for(let t of e)if(t=Number(t),t>=r){const e=((t-r)*s).toFixed(2);o.push(`<|${e}|>`),o.push([])}else o[o.length-1].push(t);return o=o.map((e=>"string"==typeof e?e:super.decode(e,t))),o.join("")}splitTokensOnUnicode(e){const t=this.decode(e,{decode_with_timestamps:!0}),s=[],r=[],o=[];let n=[],a=[],i=0;for(let l=0;l<e.length;++l){const c=e[l];n.push(c),a.push(l);const d=this.decode(n,{decode_with_timestamps:!0});d.includes("�")&&"�"!==t[i+d.indexOf("�")]||(s.push(d),r.push(n),o.push(a),n=[],a=[],i+=d.length)}return[s,r,o]}splitTokensOnSpaces(e){const[t,s,r]=this.splitTokensOnUnicode(e),o=[],n=[],a=[],i=new RegExp(`^[${M}]$`,"gu");for(let e=0;e<t.length;++e){const l=t[e],c=s[e],d=r[e],u=c[0]>=this.model.tokens_to_ids.get("<|endoftext|>"),p=l.startsWith(" "),m=l.trim(),_=i.test(m);if(u||p||_||0===o.length)o.push(l),n.push(c),a.push(d);else{const e=o.length-1;o[e]+=l,n[e].push(...c),a[e].push(...d)}}return[o,n,a]}mergePunctuations(e,t,s,r,n){const a=structuredClone(e),i=structuredClone(t),l=structuredClone(s);let c=a.length-2,d=a.length-1;for(;c>=0;)a[c].startsWith(" ")&&r.includes(a[c].trim())?(a[d]=a[c]+a[d],i[d]=(0,o.mergeArrays)(i[c],i[d]),l[d]=(0,o.mergeArrays)(l[c],l[d]),a[c]="",i[c]=[],l[c]=[]):d=c,--c;for(c=0,d=1;d<a.length;)!a[c].endsWith(" ")&&n.includes(a[d])?(a[c]+=a[d],i[c]=(0,o.mergeArrays)(i[c],i[d]),l[c]=(0,o.mergeArrays)(l[c],l[d]),a[d]="",i[d]=[],l[d]=[]):c=d,++d;return[a.filter((e=>e)),i.filter((e=>e.length>0)),l.filter((e=>e.length>0))]}}class st extends Te{}class rt extends Te{}class ot extends Te{}class nt extends Te{constructor(e,t){super(e,t),this.languageRegex=/^(>>\w+<<)\s*/g,this.supported_language_codes=this.model.vocab.filter((e=>this.languageRegex.test(e))),console.warn('WARNING: `MarianTokenizer` is not yet supported by Hugging Face\'s "fast" tokenizers library. Therefore, you may experience slightly inaccurate results.')}_encode_text(e){if(null===e)return null;const[t,...s]=e.trim().split(this.languageRegex);if(0===s.length)return super._encode_text(t);if(2===s.length){const[e,t]=s;return this.supported_language_codes.includes(e)||console.warn(`Unsupported language code "${e}" detected, which may lead to unexpected behavior. Should be one of: ${JSON.stringify(this.supported_language_codes)}`),(0,o.mergeArrays)([e],super._encode_text(t))}}}class at extends Te{}class it extends Te{}class lt extends Te{}class ct extends Te{}class dt extends Te{}class ut extends Te{constructor(e,t){super(e,t),this.decoder=new ce({})}}class pt extends Te{}class mt extends Te{}class _t{static TOKENIZER_CLASS_MAPPING={T5Tokenizer:ze,DistilBertTokenizer:Ae,CamembertTokenizer:Ee,DebertaTokenizer:ye,DebertaV2Tokenizer:Fe,BertTokenizer:be,HerbertTokenizer:ve,ConvBertTokenizer:Ce,RoFormerTokenizer:Se,XLMTokenizer:Le,ElectraTokenizer:Ie,MobileBertTokenizer:Pe,SqueezeBertTokenizer:ke,AlbertTokenizer:xe,GPT2Tokenizer:je,BartTokenizer:Ne,MBartTokenizer:Ve,MBart50Tokenizer:Be,RobertaTokenizer:Oe,WhisperTokenizer:tt,CodeGenTokenizer:st,CLIPTokenizer:rt,SiglipTokenizer:ot,MarianTokenizer:nt,BloomTokenizer:De,NllbTokenizer:Ze,M2M100Tokenizer:et,LlamaTokenizer:Ge,CodeLlamaTokenizer:qe,XLMRobertaTokenizer:We,MPNetTokenizer:$e,FalconTokenizer:Ue,GPTNeoXTokenizer:Qe,EsmTokenizer:Xe,Wav2Vec2CTCTokenizer:at,BlenderbotTokenizer:it,BlenderbotSmallTokenizer:lt,SpeechT5Tokenizer:ct,NougatTokenizer:dt,VitsTokenizer:ut,Qwen2Tokenizer:He,GemmaTokenizer:Je,Grok1Tokenizer:Ye,CohereTokenizer:pt,MgpstrTokenizer:mt,PreTrainedTokenizer:Te};static async from_pretrained(e,{progress_callback:t=null,config:s=null,cache_dir:r=null,local_files_only:o=!1,revision:n="main",legacy:a=null}={}){const[i,l]=await u(e,{progress_callback:t,config:s,cache_dir:r,local_files_only:o,revision:n,legacy:a}),c=l.tokenizer_class?.replace(/Fast$/,"")??"PreTrainedTokenizer";let d=this.TOKENIZER_CLASS_MAPPING[c];return d||(console.warn(`Unknown tokenizer class "${c}", attempting to construct from base class.`),d=Te),new d(i,l)}}},"./src/utils/audio.js":
323
- /*!****************************!*\
324
- !*** ./src/utils/audio.js ***!
325
- \****************************/(e,t,s)=>{s.r(t),s.d(t,{hamming:()=>d,hanning:()=>c,mel_filter_bank:()=>h,read_audio:()=>i,spectrogram:()=>g,window_function:()=>M});var r=s(/*! ./hub.js */"./src/utils/hub.js"),o=s(/*! ./maths.js */"./src/utils/maths.js"),n=s(/*! ./core.js */"./src/utils/core.js"),a=s(/*! ./tensor.js */"./src/utils/tensor.js");async function i(e,t){if("undefined"==typeof AudioContext)throw Error("Unable to load audio from path/URL since `AudioContext` is not available in your environment. Instead, audio data should be passed directly to the pipeline/processor. For more information and some example code, see https://huggingface.co/docs/transformers.js/guides/node-audio-processing.");const s=await(await(0,r.getFile)(e)).arrayBuffer(),o=new AudioContext({sampleRate:t});void 0===t&&console.warn(`No sampling rate provided, using default of ${o.sampleRate}Hz.`);const n=await o.decodeAudioData(s);let a;if(2===n.numberOfChannels){const e=Math.sqrt(2),t=n.getChannelData(0),s=n.getChannelData(1);a=new Float32Array(t.length);for(let r=0;r<n.length;++r)a[r]=e*(t[r]+s[r])/2}else a=n.getChannelData(0);return a}function l(e,t){if(e<1)return new Float64Array;if(1===e)return new Float64Array([1]);const s=1-t,r=2*Math.PI/(e-1),o=new Float64Array(e);for(let n=0;n<e;++n)o[n]=t-s*Math.cos(n*r);return o}function c(e){return l(e,.5)}function d(e){return l(e,.54)}const u={htk:e=>2595*Math.log10(1+e/700),kaldi:e=>1127*Math.log(1+e/700),slaney:(e,t=1e3,s=15,r=27/Math.log(6.4))=>e>=t?s+Math.log(e/t)*r:3*e/200};function p(e,t="htk"){const s=u[t];if(!s)throw new Error('mel_scale should be one of "htk", "slaney" or "kaldi".');return"number"==typeof e?s(e):e.map((e=>s(e)))}const m={htk:e=>700*(10**(e/2595)-1),kaldi:e=>700*(Math.exp(e/1127)-1),slaney:(e,t=1e3,s=15,r=Math.log(6.4)/27)=>e>=s?t*Math.exp(r*(e-s)):200*e/3};function _(e,t,s){const r=(t-e)/(s-1);return Float64Array.from({length:s},((t,s)=>e+r*s))}function h(e,t,s,r,o,n=null,a="htk",i=!1){if(null!==n&&"slaney"!==n)throw new Error('norm must be one of null or "slaney"');const l=_(p(s,a),p(r,a),t+2);let c,d=function(e,t="htk"){const s=m[t];if(!s)throw new Error('mel_scale should be one of "htk", "slaney" or "kaldi".');return"number"==typeof e?s(e):e.map((e=>s(e)))}(l,a);if(i){const t=o/(2*e);c=p(Float64Array.from({length:e},((e,s)=>s*t)),a),d=l}else c=_(0,Math.floor(o/2),e);const u=function(e,t){const s=Float64Array.from({length:t.length-1},((e,s)=>t[s+1]-t[s])),r=Array.from({length:e.length},(()=>new Array(t.length)));for(let s=0;s<e.length;++s){const o=r[s];for(let r=0;r<t.length;++r)o[r]=t[r]-e[s]}const o=t.length-2,n=Array.from({length:o},(()=>new Array(e.length)));for(let t=0;t<e.length;++t){const e=r[t];for(let r=0;r<o;++r){const o=-e[r]/s[r],a=e[r+2]/s[r+1];n[r][t]=Math.max(0,Math.min(o,a))}}return n}(c,d);if(null!==n&&"slaney"===n)for(let s=0;s<t;++s){const t=u[s],r=2/(d[s+2]-d[s]);for(let s=0;s<e;++s)t[s]*=r}return u}function f(e,t,s,r,n){if(s<=0)throw new Error("reference must be greater than zero");if(r<=0)throw new Error("min_value must be greater than zero");s=Math.max(r,s);const a=Math.log10(s);for(let s=0;s<e.length;++s)e[s]=t*Math.log10(Math.max(r,e[s])-a);if(null!==n){if(n<=0)throw new Error("db_range must be greater than zero");const t=(0,o.max)(e)[0]-n;for(let s=0;s<e.length;++s)e[s]=Math.max(e[s],t)}return e}async function g(e,t,s,r,{fft_length:i=null,power:l=1,center:c=!0,pad_mode:d="reflect",onesided:u=!0,preemphasis:p=null,mel_filters:m=null,mel_floor:_=1e-10,log_mel:h=null,reference:g=1,min_value:M=1e-10,db_range:w=null,remove_dc_offset:T=null,min_num_frames:b=null,max_num_frames:x=null,do_pad:P=!0,transpose:k=!1}={}){const y=t.length;if(null===i&&(i=s),s>i)throw Error(`frame_length (${s}) may not be larger than fft_length (${i})`);if(y!==s)throw new Error(`Length of the window (${y}) must equal frame_length (${s})`);if(r<=0)throw new Error("hop_length must be greater than zero");if(null===l&&null!==m)throw new Error("You have provided `mel_filters` but `power` is `None`. Mel spectrogram computation is not yet supported for complex-valued spectrogram. Specify `power` to fix this issue.");if(c){if("reflect"!==d)throw new Error(`pad_mode="${d}" not implemented yet.`);const t=Math.floor((i-1)/2)+1;e=function(e,t,s){const r=new e.constructor(e.length+t+s),o=e.length-1;for(let s=0;s<e.length;++s)r[t+s]=e[s];for(let s=1;s<=t;++s)r[t-s]=e[(0,n.calculateReflectOffset)(s,o)];for(let a=1;a<=s;++a)r[o+t+a]=e[(0,n.calculateReflectOffset)(o-a,o)];return r}(e,t,t)}let F=Math.floor(1+Math.floor((e.length-s)/r));null!==b&&F<b&&(F=b);const v=u?Math.floor(i/2)+1:i;let C=F,S=F;null!==x&&(x>F?P&&(S=x):S=C=x);const A=new o.FFT(i),E=new Float64Array(i),L=new Float64Array(A.outputBufferSize),I=new Float32Array(v*S);for(let o=0;o<C;++o){const n=o*r,a=Math.min(e.length-n,s);a!==s&&E.fill(0,0,s);for(let t=0;t<a;++t)E[t]=e[n+t];if(T){let e=0;for(let t=0;t<a;++t)e+=E[t];const t=e/a;for(let e=0;e<a;++e)E[e]-=t}if(null!==p){for(let e=a-1;e>=1;--e)E[e]-=p*E[e-1];E[0]*=1-p}for(let e=0;e<t.length;++e)E[e]*=t[e];A.realTransform(L,E);for(let e=0;e<v;++e){const t=e<<1;I[e*S+o]=L[t]**2+L[t+1]**2}}if(null!==l&&2!==l){const e=2/l;for(let t=0;t<I.length;++t)I[t]**=e}const z=m.length;let j=await(0,a.matmul)(new a.Tensor("float32",m.flat(),[z,v]),new a.Tensor("float32",I,[v,S]));k&&(j=j.transpose(1,0));const N=j.data;for(let e=0;e<N.length;++e)N[e]=Math.max(_,N[e]);if(null!==l&&null!==h){const e=Math.min(N.length,C*z);switch(h){case"log":for(let t=0;t<e;++t)N[t]=Math.log(N[t]);break;case"log10":for(let t=0;t<e;++t)N[t]=Math.log10(N[t]);break;case"dB":if(1===l)!function(e,t=1,s=1e-5,r=null){f(e,20,t,s,r)}(N,g,M,w);else{if(2!==l)throw new Error(`Cannot use log_mel option '${h}' with power ${l}`);!function(e,t=1,s=1e-10,r=null){f(e,10,t,s,r)}(N,g,M,w)}break;default:throw new Error(`log_mel must be one of null, 'log', 'log10' or 'dB'. Got '${h}'`)}}return j}function M(e,t,{periodic:s=!0,frame_length:r=null,center:o=!0}={}){const n=s?e+1:e;let a;switch(t){case"boxcar":a=new Float64Array(n).fill(1);break;case"hann":case"hann_window":a=c(n);break;case"hamming":a=d(n);break;case"povey":a=c(n).map((e=>Math.pow(e,.85)));break;default:throw new Error(`Unknown window type ${t}.`)}if(s&&(a=a.subarray(0,e)),null===r)return a;if(e>r)throw new Error(`Length of the window (${e}) may not be larger than frame_length (${r})`);return a}},"./src/utils/constants.js":
326
- /*!********************************!*\
327
- !*** ./src/utils/constants.js ***!
328
- \********************************/(e,t,s)=>{s.r(t),s.d(t,{CHAT_TEMPLATE_NAME:()=>l,CONFIG_NAME:()=>o,FEATURE_EXTRACTOR_NAME:()=>n,GENERATION_CONFIG_NAME:()=>c,GITHUB_ISSUE_URL:()=>r,IMAGE_PROCESSOR_NAME:()=>a,PROCESSOR_NAME:()=>i});const r="https://github.com/huggingface/transformers.js/issues/new/choose",o="config.json",n="preprocessor_config.json",a=n,i="processor_config.json",l="chat_template.json",c="generation_config.json"},"./src/utils/core.js":
329
- /*!***************************!*\
330
- !*** ./src/utils/core.js ***!
331
- \***************************/(e,t,s)=>{function r(e,t){e&&e(t)}function o(e){return Object.fromEntries(Object.entries(e).map((([e,t])=>[t,e])))}function n(e){return e.replace(/[.*+?^${}()|[\]\\]/g,"\\$&")}function a(e){return"TypedArray"===e?.prototype?.__proto__?.constructor?.name}function i(e){return Number.isInteger(e)||"bigint"==typeof e}function l(e){return null==e||-1===e}function c(e){const t=[];let s=e;for(;Array.isArray(s);)t.push(s.length),s=s[0];return t}function d(e,t,s=void 0){const r=e[t];if(void 0!==r)return delete e[t],r;if(void 0===s)throw Error(`Key ${t} does not exist in object.`);return s}function u(...e){return Array.prototype.concat.apply([],e)}function p(...e){return e.reduce(((e,t)=>e.flatMap((e=>t.map((t=>[e,t]))))))}function m(e,t){return Math.abs((e+t)%(2*t)-t)}function _(e,t){return Object.assign({},...t.map((t=>{if(void 0!==e[t])return{[t]:e[t]}})))}function h(e){let t=0;for(const s of e)++t;return t}function f(e,t){let s=0;for(const r of e)r===t&&++s;return s}s.r(t),s.d(t,{calculateDimensions:()=>c,calculateReflectOffset:()=>m,count:()=>f,dispatchCallback:()=>r,escapeRegExp:()=>n,isIntegralNumber:()=>i,isNullishDimension:()=>l,isTypedArray:()=>a,len:()=>h,mergeArrays:()=>u,pick:()=>_,pop:()=>d,product:()=>p,reverseDictionary:()=>o})},"./src/utils/data-structures.js":
332
- /*!**************************************!*\
333
- !*** ./src/utils/data-structures.js ***!
334
- \**************************************/(e,t,s)=>{s.r(t),s.d(t,{CharTrie:()=>o,PriorityQueue:()=>r,TokenLattice:()=>a});class r{constructor(e=(e,t)=>e>t,t=1/0){this._heap=[],this._comparator=e,this._maxSize=t}get size(){return this._heap.length}isEmpty(){return 0===this.size}peek(){return this._heap[0]}push(...e){return this.extend(e)}extend(e){for(const t of e)if(this.size<this._maxSize)this._heap.push(t),this._siftUp();else{const e=this._smallest();this._comparator(t,this._heap[e])&&(this._heap[e]=t,this._siftUpFrom(e))}return this.size}pop(){const e=this.peek(),t=this.size-1;return t>0&&this._swap(0,t),this._heap.pop(),this._siftDown(),e}replace(e){const t=this.peek();return this._heap[0]=e,this._siftDown(),t}_parent(e){return(e+1>>>1)-1}_left(e){return 1+(e<<1)}_right(e){return e+1<<1}_greater(e,t){return this._comparator(this._heap[e],this._heap[t])}_swap(e,t){const s=this._heap[e];this._heap[e]=this._heap[t],this._heap[t]=s}_siftUp(){this._siftUpFrom(this.size-1)}_siftUpFrom(e){for(;e>0&&this._greater(e,this._parent(e));)this._swap(e,this._parent(e)),e=this._parent(e)}_siftDown(){let e=0;for(;this._left(e)<this.size&&this._greater(this._left(e),e)||this._right(e)<this.size&&this._greater(this._right(e),e);){const t=this._right(e)<this.size&&this._greater(this._right(e),this._left(e))?this._right(e):this._left(e);this._swap(e,t),e=t}}_smallest(){return 2**Math.floor(Math.log2(this.size))-1}}class o{constructor(){this.root=n.default()}extend(e){for(const t of e)this.push(t)}push(e){let t=this.root;for(const s of e){let e=t.children.get(s);void 0===e&&(e=n.default(),t.children.set(s,e)),t=e}t.isLeaf=!0}*commonPrefixSearch(e){let t=this.root;if(void 0===t)return;let s="";for(const r of e){if(s+=r,t=t.children.get(r),void 0===t)return;t.isLeaf&&(yield s)}}}class n{constructor(e,t){this.isLeaf=e,this.children=t}static default(){return new n(!1,new Map)}}class a{constructor(e,t,s){this.chars=Array.from(e),this.len=this.chars.length,this.bosTokenId=t,this.eosTokenId=s,this.nodes=[],this.beginNodes=Array.from({length:this.len+1},(()=>[])),this.endNodes=Array.from({length:this.len+1},(()=>[]));const r=new i(this.bosTokenId,0,0,0,0),o=new i(this.eosTokenId,1,this.len,0,0);this.nodes.push(r.clone()),this.nodes.push(o.clone()),this.beginNodes[this.len].push(o),this.endNodes[0].push(r)}insert(e,t,s,r){const o=this.nodes.length,n=new i(r,o,e,t,s);this.beginNodes[e].push(n),this.endNodes[e+t].push(n),this.nodes.push(n)}viterbi(){const e=this.len;let t=0;for(;t<=e;){if(0==this.beginNodes[t].length)return[];for(let e of this.beginNodes[t]){e.prev=null;let s=0,r=null;for(let o of this.endNodes[t]){const t=o.backtraceScore+e.score;(null===r||t>s)&&(r=o.clone(),s=t)}if(null===r)return[];e.prev=r,e.backtraceScore=s}++t}const s=[],r=this.beginNodes[e][0].prev;if(null===r)return[];let o=r.clone();for(;null!==o.prev;){s.push(o.clone());const e=o.clone();o=e.prev.clone()}return s.reverse(),s}piece(e){return this.chars.slice(e.pos,e.pos+e.length).join("")}tokens(){return this.viterbi().map((e=>this.piece(e)))}tokenIds(){return this.viterbi().map((e=>e.tokenId))}}class i{constructor(e,t,s,r,o){this.tokenId=e,this.nodeId=t,this.pos=s,this.length=r,this.score=o,this.prev=null,this.backtraceScore=0}clone(){const e=new i(this.tokenId,this.nodeId,this.pos,this.length,this.score);return e.prev=this.prev,e.backtraceScore=this.backtraceScore,e}}},"./src/utils/devices.js":
335
- /*!******************************!*\
336
- !*** ./src/utils/devices.js ***!
337
- \******************************/(e,t,s)=>{s.r(t),s.d(t,{DEVICE_TYPES:()=>r});const r=Object.freeze({auto:"auto",gpu:"gpu",cpu:"cpu",wasm:"wasm",webgpu:"webgpu",cuda:"cuda",dml:"dml",webnn:"webnn","webnn-npu":"webnn-npu","webnn-gpu":"webnn-gpu","webnn-cpu":"webnn-cpu"})},"./src/utils/dtypes.js":
338
- /*!*****************************!*\
339
- !*** ./src/utils/dtypes.js ***!
340
- \*****************************/(e,t,s)=>{s.r(t),s.d(t,{DATA_TYPES:()=>a,DEFAULT_DEVICE_DTYPE_MAPPING:()=>i,DEFAULT_DTYPE_SUFFIX_MAPPING:()=>l,isWebGpuFp16Supported:()=>n});var r=s(/*! ../env.js */"./src/env.js"),o=s(/*! ./devices.js */"./src/utils/devices.js");const n=function(){let e;return async function(){if(void 0===e)if(r.apis.IS_WEBGPU_AVAILABLE)try{const t=await navigator.gpu.requestAdapter();e=t.features.has("shader-f16")}catch(t){e=!1}else e=!1;return e}}(),a=Object.freeze({auto:"auto",fp32:"fp32",fp16:"fp16",q8:"q8",int8:"int8",uint8:"uint8",q4:"q4",bnb4:"bnb4",q4f16:"q4f16"}),i=Object.freeze({[o.DEVICE_TYPES.wasm]:a.q8}),l=Object.freeze({[a.fp32]:"",[a.fp16]:"_fp16",[a.int8]:"_int8",[a.uint8]:"_uint8",[a.q8]:"_quantized",[a.q4]:"_q4",[a.q4f16]:"_q4f16",[a.bnb4]:"_bnb4"})},"./src/utils/generic.js":
341
- /*!******************************!*\
342
- !*** ./src/utils/generic.js ***!
343
- \******************************/(e,t,s)=>{s.r(t),s.d(t,{Callable:()=>r});const r=class{constructor(){let e=function(...t){return e._call(...t)};return Object.setPrototypeOf(e,new.target.prototype)}_call(...e){throw Error("Must implement _call method in subclass")}}},"./src/utils/hub.js":
344
- /*!**************************!*\
345
- !*** ./src/utils/hub.js ***!
346
- \**************************/(e,t,s)=>{s.r(t),s.d(t,{getFile:()=>d,getModelFile:()=>m,getModelJSON:()=>_});var r=s(/*! fs */"fs"),o=s(/*! path */"path"),n=s(/*! ../env.js */"./src/env.js"),a=s(/*! ./core.js */"./src/utils/core.js");const i={txt:"text/plain",html:"text/html",css:"text/css",js:"text/javascript",json:"application/json",png:"image/png",jpg:"image/jpeg",jpeg:"image/jpeg",gif:"image/gif"};class l{constructor(e){if(this.filePath=e,this.headers=new Headers,this.exists=r.default.existsSync(e),this.exists){this.status=200,this.statusText="OK";let t=r.default.statSync(e);this.headers.set("content-length",t.size.toString()),this.updateContentType();let s=this;this.body=new ReadableStream({start(e){s.arrayBuffer().then((t=>{e.enqueue(new Uint8Array(t)),e.close()}))}})}else this.status=404,this.statusText="Not Found",this.body=null}updateContentType(){const e=this.filePath.toString().split(".").pop().toLowerCase();this.headers.set("content-type",i[e]??"application/octet-stream")}clone(){let e=new l(this.filePath);return e.exists=this.exists,e.status=this.status,e.statusText=this.statusText,e.headers=new Headers(this.headers),e}async arrayBuffer(){return(await r.default.promises.readFile(this.filePath)).buffer}async blob(){const e=await r.default.promises.readFile(this.filePath);return new Blob([e],{type:this.headers.get("content-type")})}async text(){return await r.default.promises.readFile(this.filePath,"utf8")}async json(){return JSON.parse(await this.text())}}function c(e,t=null,s=null){let r;try{r=new URL(e)}catch(e){return!1}return!(t&&!t.includes(r.protocol))&&!(s&&!s.includes(r.hostname))}async function d(e){if(n.env.useFS&&!c(e,["http:","https:","blob:"]))return new l(e);if("undefined"!=typeof process&&"node"===process?.release?.name){const t=!!process.env?.TESTING_REMOTELY,s=n.env.version,r=new Headers;r.set("User-Agent",`transformers.js/${s}; is_ci/${t};`);if(c(e,["http:","https:"],["huggingface.co","hf.co"])){const e=process.env?.HF_TOKEN??process.env?.HF_ACCESS_TOKEN;e&&r.set("Authorization",`Bearer ${e}`)}return fetch(e,{headers:r})}return fetch(e)}const u={400:"Bad request error occurred while trying to load file",401:"Unauthorized access to file",403:"Forbidden access to file",404:"Could not locate file",408:"Request timeout error occurred while trying to load file",500:"Internal server error error occurred while trying to load file",502:"Bad gateway error occurred while trying to load file",503:"Service unavailable error occurred while trying to load file",504:"Gateway timeout error occurred while trying to load file"};class p{constructor(e){this.path=e}async match(e){let t=o.default.join(this.path,e),s=new l(t);return s.exists?s:void 0}async put(e,t){const s=Buffer.from(await t.arrayBuffer());let n=o.default.join(this.path,e);try{await r.default.promises.mkdir(o.default.dirname(n),{recursive:!0}),await r.default.promises.writeFile(n,s)}catch(e){console.warn("An error occurred while writing the file to cache:",e)}}}async function m(e,t,s=!0,r={}){if(!n.env.allowLocalModels){if(r.local_files_only)throw Error("Invalid configuration detected: local models are disabled (`env.allowLocalModels=false`) but you have requested to only use local models (`local_files_only=true`).");if(!n.env.allowRemoteModels)throw Error("Invalid configuration detected: both local and remote models are disabled. Fix by setting `env.allowLocalModels` or `env.allowRemoteModels` to `true`.")}let o;if((0,a.dispatchCallback)(r.progress_callback,{status:"initiate",name:e,file:t}),!o&&n.env.useBrowserCache){if("undefined"==typeof caches)throw Error("Browser cache is not available in this environment.");try{o=await caches.open("transformers-cache")}catch(e){console.warn("An error occurred while opening the browser cache:",e)}}if(!o&&n.env.useFSCache&&(o=new p(r.cache_dir??n.env.cacheDir)),!o&&n.env.useCustomCache){if(!n.env.customCache)throw Error("`env.useCustomCache=true`, but `env.customCache` is not defined.");if(!n.env.customCache.match||!n.env.customCache.put)throw new Error("`env.customCache` must be an object which implements the `match` and `put` functions of the Web Cache API. For more information, see https://developer.mozilla.org/en-US/docs/Web/API/Cache");o=n.env.customCache}const i=r.revision??"main";let l,m,_=h(e,t),f=h(n.env.localModelPath,_),g=h(n.env.remoteHost,n.env.remotePathTemplate.replaceAll("{model}",e).replaceAll("{revision}",encodeURIComponent(i)),t),M="main"===i?_:h(e,i,t),w=o instanceof p?M:g,T=!1;o&&(m=await async function(e,...t){for(let s of t)try{let t=await e.match(s);if(t)return t}catch(e){continue}}(o,f,w));const b=void 0!==m;if(void 0===m){if(n.env.allowLocalModels){if(c(_,["http:","https:"])){if(r.local_files_only)throw new Error(`\`local_files_only=true\`, but attempted to load a remote file from: ${_}.`);if(!n.env.allowRemoteModels)throw new Error(`\`env.allowRemoteModels=false\`, but attempted to load a remote file from: ${_}.`)}else try{m=await d(f),l=f}catch(e){console.warn(`Unable to load from local path "${f}": "${e}"`)}}if(void 0===m||404===m.status){if(r.local_files_only||!n.env.allowRemoteModels){if(s)throw Error(`\`local_files_only=true\` or \`env.allowRemoteModels=false\` and file was not found locally at "${f}".`);return null}if(m=await d(g),200!==m.status)return function(e,t,s){if(!s)return null;const r=u[e]??`Error (${e}) occurred while trying to load file`;throw Error(`${r}: "${t}".`)}(m.status,g,s);l=w}T=o&&"undefined"!=typeof Response&&m instanceof Response&&200===m.status}(0,a.dispatchCallback)(r.progress_callback,{status:"download",name:e,file:t});const x={status:"progress",name:e,file:t};let P;return r.progress_callback?b&&"undefined"!=typeof navigator&&/firefox/i.test(navigator.userAgent)?(P=new Uint8Array(await m.arrayBuffer()),(0,a.dispatchCallback)(r.progress_callback,{...x,progress:100,loaded:P.length,total:P.length})):P=await async function(e,t){const s=e.headers.get("Content-Length");null===s&&console.warn("Unable to determine content-length from response headers. Will expand buffer when needed.");let r=parseInt(s??"0"),o=new Uint8Array(r),n=0;const a=e.body.getReader();async function i(){const{done:e,value:s}=await a.read();if(e)return;let l=n+s.length;if(l>r){r=l;let e=new Uint8Array(r);e.set(o),o=e}o.set(s,n),n=l;return t({progress:n/r*100,loaded:n,total:r}),i()}return await i(),o}(m,(e=>{(0,a.dispatchCallback)(r.progress_callback,{...x,...e})})):P=new Uint8Array(await m.arrayBuffer()),T&&l&&void 0===await o.match(l)&&await o.put(l,new Response(P,{headers:m.headers})).catch((e=>{console.warn(`Unable to add response to browser cache: ${e}.`)})),(0,a.dispatchCallback)(r.progress_callback,{status:"done",name:e,file:t}),P}async function _(e,t,s=!0,r={}){let o=await m(e,t,s,r);if(null===o)return{};let n=new TextDecoder("utf-8").decode(o);return JSON.parse(n)}function h(...e){return(e=e.map(((t,s)=>(s&&(t=t.replace(new RegExp("^/"),"")),s!==e.length-1&&(t=t.replace(new RegExp("/$"),"")),t)))).join("/")}},"./src/utils/image.js":
347
- /*!****************************!*\
348
- !*** ./src/utils/image.js ***!
349
- \****************************/(e,t,s)=>{s.r(t),s.d(t,{RawImage:()=>_,load_image:()=>h});var r=s(/*! ./core.js */"./src/utils/core.js"),o=s(/*! ./hub.js */"./src/utils/hub.js"),n=s(/*! ../env.js */"./src/env.js"),a=s(/*! ./tensor.js */"./src/utils/tensor.js"),i=s(/*! sharp */"sharp");let l,c,d;const u=n.apis.IS_BROWSER_ENV||n.apis.IS_WEBWORKER_ENV;if(u)l=(e,t)=>{if(!self.OffscreenCanvas)throw new Error("OffscreenCanvas not supported by this browser.");return new self.OffscreenCanvas(e,t)},d=self.createImageBitmap,c=self.ImageData;else{if(!i.default)throw new Error("Unable to load image processing library.");d=async e=>{const t=(await e.metadata()).channels,{data:s,info:r}=await e.rotate().raw().toBuffer({resolveWithObject:!0}),o=new _(new Uint8ClampedArray(s),r.width,r.height,r.channels);return void 0!==t&&t!==r.channels&&o.convert(t),o}}const p={0:"nearest",1:"lanczos",2:"bilinear",3:"bicubic",4:"box",5:"hamming"},m=new Map([["png","image/png"],["jpg","image/jpeg"],["jpeg","image/jpeg"],["gif","image/gif"]]);class _{constructor(e,t,s,r){this.data=e,this.width=t,this.height=s,this.channels=r}get size(){return[this.width,this.height]}static async read(e){if(e instanceof _)return e;if("string"==typeof e||e instanceof URL)return await this.fromURL(e);throw new Error("Unsupported input type: "+typeof e)}static fromCanvas(e){if(!u)throw new Error("fromCanvas() is only supported in browser environments.");const t=e.getContext("2d").getImageData(0,0,e.width,e.height).data;return new _(t,e.width,e.height,4)}static async fromURL(e){const t=await(0,o.getFile)(e);if(200!==t.status)throw new Error(`Unable to read image from "${e}" (${t.status} ${t.statusText})`);const s=await t.blob();return this.fromBlob(s)}static async fromBlob(e){if(u){const t=await d(e),s=l(t.width,t.height).getContext("2d");return s.drawImage(t,0,0),new this(s.getImageData(0,0,t.width,t.height).data,t.width,t.height,4)}{const t=(0,i.default)(await e.arrayBuffer());return await d(t)}}static fromTensor(e,t="CHW"){if(3!==e.dims.length)throw new Error(`Tensor should have 3 dimensions, but has ${e.dims.length} dimensions.`);if("CHW"===t)e=e.transpose(1,2,0);else if("HWC"!==t)throw new Error(`Unsupported channel format: ${t}`);if(!(e.data instanceof Uint8ClampedArray||e.data instanceof Uint8Array))throw new Error(`Unsupported tensor type: ${e.type}`);switch(e.dims[2]){case 1:case 2:case 3:case 4:return new _(e.data,e.dims[1],e.dims[0],e.dims[2]);default:throw new Error(`Unsupported number of channels: ${e.dims[2]}`)}}grayscale(){if(1===this.channels)return this;const e=new Uint8ClampedArray(this.width*this.height*1);switch(this.channels){case 3:case 4:for(let t=0,s=0;t<this.data.length;t+=this.channels){const r=this.data[t],o=this.data[t+1],n=this.data[t+2];e[s++]=Math.round(.2989*r+.587*o+.114*n)}break;default:throw new Error(`Conversion failed due to unsupported number of channels: ${this.channels}`)}return this._update(e,this.width,this.height,1)}rgb(){if(3===this.channels)return this;const e=new Uint8ClampedArray(this.width*this.height*3);switch(this.channels){case 1:for(let t=0,s=0;t<this.data.length;++t)e[s++]=this.data[t],e[s++]=this.data[t],e[s++]=this.data[t];break;case 4:for(let t=0,s=0;t<this.data.length;t+=4)e[s++]=this.data[t],e[s++]=this.data[t+1],e[s++]=this.data[t+2];break;default:throw new Error(`Conversion failed due to unsupported number of channels: ${this.channels}`)}return this._update(e,this.width,this.height,3)}rgba(){if(4===this.channels)return this;const e=new Uint8ClampedArray(this.width*this.height*4);switch(this.channels){case 1:for(let t=0,s=0;t<this.data.length;++t)e[s++]=this.data[t],e[s++]=this.data[t],e[s++]=this.data[t],e[s++]=255;break;case 3:for(let t=0,s=0;t<this.data.length;t+=3)e[s++]=this.data[t],e[s++]=this.data[t+1],e[s++]=this.data[t+2],e[s++]=255;break;default:throw new Error(`Conversion failed due to unsupported number of channels: ${this.channels}`)}return this._update(e,this.width,this.height,4)}async resize(e,t,{resample:s=2}={}){if(this.width===e&&this.height===t)return this;let o=p[s]??s;const n=(0,r.isNullishDimension)(e),a=(0,r.isNullishDimension)(t);if(n&&a)return this;if(n?e=t/this.height*this.width:a&&(t=e/this.width*this.height),u){const s=this.channels,r=this.toCanvas(),o=l(e,t).getContext("2d");o.drawImage(r,0,0,e,t);return new _(o.getImageData(0,0,e,t).data,e,t,4).convert(s)}{let s=this.toSharp();switch(o){case"box":case"hamming":"box"!==o&&"hamming"!==o||(console.warn(`Resampling method ${o} is not yet supported. Using bilinear instead.`),o="bilinear");case"nearest":case"bilinear":case"bicubic":s=s.affine([e/this.width,0,0,t/this.height],{interpolator:o});break;case"lanczos":s=s.resize({width:e,height:t,fit:"fill",kernel:"lanczos3"});break;default:throw new Error(`Resampling method ${o} is not supported.`)}return await d(s)}}async pad([e,t,s,r]){if(e=Math.max(e,0),t=Math.max(t,0),s=Math.max(s,0),r=Math.max(r,0),0===e&&0===t&&0===s&&0===r)return this;if(u){const o=this.channels,n=this.toCanvas(),a=this.width+e+t,i=this.height+s+r,c=l(a,i).getContext("2d");c.drawImage(n,0,0,this.width,this.height,e,s,this.width,this.height);return new _(c.getImageData(0,0,a,i).data,a,i,4).convert(o)}{const o=this.toSharp().extend({left:e,right:t,top:s,bottom:r});return await d(o)}}async crop([e,t,s,r]){if(e=Math.max(e,0),t=Math.max(t,0),s=Math.min(s,this.width-1),r=Math.min(r,this.height-1),0===e&&0===t&&s===this.width-1&&r===this.height-1)return this;const o=s-e+1,n=r-t+1;if(u){const s=this.channels,r=this.toCanvas(),a=l(o,n).getContext("2d");a.drawImage(r,e,t,o,n,0,0,o,n);return new _(a.getImageData(0,0,o,n).data,o,n,4).convert(s)}{const s=this.toSharp().extract({left:e,top:t,width:o,height:n});return await d(s)}}async center_crop(e,t){if(this.width===e&&this.height===t)return this;const s=(this.width-e)/2,r=(this.height-t)/2;if(u){const o=this.channels,n=this.toCanvas(),a=l(e,t).getContext("2d");let i=0,c=0,d=0,u=0;s>=0?i=s:d=-s,r>=0?c=r:u=-r,a.drawImage(n,i,c,e,t,d,u,e,t);return new _(a.getImageData(0,0,e,t).data,e,t,4).convert(o)}{let o=this.toSharp();if(s>=0&&r>=0)o=o.extract({left:Math.floor(s),top:Math.floor(r),width:e,height:t});else if(s<=0&&r<=0){const n=Math.floor(-r),a=Math.floor(-s);o=o.extend({top:n,left:a,right:e-this.width-a,bottom:t-this.height-n})}else{let n=[0,0],a=0;r<0?(n[0]=Math.floor(-r),n[1]=t-this.height-n[0]):a=Math.floor(r);let i=[0,0],l=0;s<0?(i[0]=Math.floor(-s),i[1]=e-this.width-i[0]):l=Math.floor(s),o=o.extend({top:n[0],bottom:n[1],left:i[0],right:i[1]}).extract({left:l,top:a,width:e,height:t})}return await d(o)}}async toBlob(e="image/png",t=1){if(!u)throw new Error("toBlob() is only supported in browser environments.");const s=this.toCanvas();return await s.convertToBlob({type:e,quality:t})}toTensor(e="CHW"){let t=new a.Tensor("uint8",new Uint8Array(this.data),[this.height,this.width,this.channels]);if("HWC"===e);else{if("CHW"!==e)throw new Error(`Unsupported channel format: ${e}`);t=t.permute(2,0,1)}return t}toCanvas(){if(!u)throw new Error("toCanvas() is only supported in browser environments.");const e=this.clone().rgba(),t=l(e.width,e.height),s=new c(e.data,e.width,e.height);return t.getContext("2d").putImageData(s,0,0),t}split(){const{data:e,width:t,height:s,channels:r}=this,o=e.constructor,n=e.length/r,a=Array.from({length:r},(()=>new o(n)));for(let t=0;t<n;++t){const s=r*t;for(let o=0;o<r;++o)a[o][t]=e[s+o]}return a.map((e=>new _(e,t,s,1)))}_update(e,t,s,r=null){return this.data=e,this.width=t,this.height=s,null!==r&&(this.channels=r),this}clone(){return new _(this.data.slice(),this.width,this.height,this.channels)}convert(e){if(this.channels===e)return this;switch(e){case 1:this.grayscale();break;case 3:this.rgb();break;case 4:this.rgba();break;default:throw new Error(`Conversion failed due to unsupported number of channels: ${this.channels}`)}return this}async save(e){if(!u){if(n.env.useFS){const t=this.toSharp();return await t.toFile(e)}throw new Error("Unable to save the image because filesystem is disabled in this environment.")}{if(n.apis.IS_WEBWORKER_ENV)throw new Error("Unable to save an image from a Web Worker.");const t=e.split(".").pop().toLowerCase(),s=m.get(t)??"image/png",r=await this.toBlob(s),o=URL.createObjectURL(r),a=document.createElement("a");a.href=o,a.download=e,a.click(),a.remove()}}toSharp(){if(u)throw new Error("toSharp() is only supported in server-side environments.");return(0,i.default)(this.data,{raw:{width:this.width,height:this.height,channels:this.channels}})}}const h=_.read.bind(_)},"./src/utils/maths.js":
350
- /*!****************************!*\
351
- !*** ./src/utils/maths.js ***!
352
- \****************************/(e,t,s)=>{function r(e,[t,s,r],[o,n],a="bilinear",i=!1){const l=n/r,c=o/s,d=new e.constructor(o*n*t),u=s*r,p=o*n;for(let a=0;a<o;++a)for(let o=0;o<n;++o){const i=a*n+o,m=(o+.5)/l-.5,_=(a+.5)/c-.5;let h=Math.floor(m),f=Math.floor(_);const g=Math.min(h+1,r-1),M=Math.min(f+1,s-1);h=Math.max(h,0),f=Math.max(f,0);const w=m-h,T=_-f,b=(1-w)*(1-T),x=w*(1-T),P=(1-w)*T,k=w*T,y=f*r,F=M*r,v=y+h,C=y+g,S=F+h,A=F+g;for(let s=0;s<t;++s){const t=s*u;d[s*p+i]=b*e[t+v]+x*e[t+C]+P*e[t+S]+k*e[t+A]}}return d}function o(e,t,s){const r=new Array(s.length),o=new Array(s.length);for(let e=s.length-1,n=1;e>=0;--e)o[e]=n,r[e]=t[s[e]],n*=r[e];const n=s.map(((e,t)=>o[s.indexOf(t)])),a=new e.constructor(e.length);for(let s=0;s<e.length;++s){let r=0;for(let e=t.length-1,o=s;e>=0;--e)r+=o%t[e]*n[e],o=Math.floor(o/t[e]);a[r]=e[s]}return[a,r]}function n(e){const t=u(e)[0],s=e.map((e=>Math.exp(e-t))),r=s.reduce(((e,t)=>e+t),0);return s.map((e=>e/r))}function a(e){const t=u(e)[0];let s=0;for(let r=0;r<e.length;++r)s+=Math.exp(e[r]-t);const r=Math.log(s);return e.map((e=>e-t-r))}function i(e,t){let s=0;for(let r=0;r<e.length;++r)s+=e[r]*t[r];return s}function l(e,t){return i(e,t)/(c(e)*c(t))}function c(e){return Math.sqrt(e.reduce(((e,t)=>e+t*t),0))}function d(e){if(0===e.length)throw Error("Array must not be empty");let t=e[0],s=0;for(let r=1;r<e.length;++r)e[r]<t&&(t=e[r],s=r);return[t,s]}function u(e){if(0===e.length)throw Error("Array must not be empty");let t=e[0],s=0;for(let r=1;r<e.length;++r)e[r]>t&&(t=e[r],s=r);return[Number(t),s]}function p(e){return e>0&&!(e&e-1)}s.r(t),s.d(t,{FFT:()=>h,bankers_round:()=>M,cos_sim:()=>l,dot:()=>i,dynamic_time_warping:()=>w,interpolate_data:()=>r,log_softmax:()=>a,magnitude:()=>c,max:()=>u,medianFilter:()=>f,min:()=>d,permute_data:()=>o,round:()=>g,softmax:()=>n});class m{constructor(e){if(this.size=0|e,this.size<=1||!p(this.size))throw new Error("FFT size must be a power of two larger than 1");this._csize=e<<1,this.table=new Float64Array(2*this.size);for(let e=0;e<this.table.length;e+=2){const t=Math.PI*e/this.size;this.table[e]=Math.cos(t),this.table[e+1]=-Math.sin(t)}let t=0;for(let e=1;this.size>e;e<<=1)++t;this._width=t%2==0?t-1:t,this._bitrev=new Int32Array(1<<this._width);for(let e=0;e<this._bitrev.length;++e){this._bitrev[e]=0;for(let t=0;t<this._width;t+=2){const s=this._width-t-2;this._bitrev[e]|=(e>>>t&3)<<s}}}createComplexArray(){return new Float64Array(this._csize)}fromComplexArray(e,t){const s=t||new Array(e.length>>>1);for(let t=0;t<e.length;t+=2)s[t>>>1]=e[t];return s}toComplexArray(e,t){const s=t||this.createComplexArray();for(let t=0;t<s.length;t+=2)s[t]=e[t>>>1],s[t+1]=0;return s}transform(e,t){if(e===t)throw new Error("Input and output buffers must be different");this._transform4(e,t,1)}realTransform(e,t){if(e===t)throw new Error("Input and output buffers must be different");this._realTransform4(e,t,1)}inverseTransform(e,t){if(e===t)throw new Error("Input and output buffers must be different");this._transform4(e,t,-1);for(let t=0;t<e.length;++t)e[t]/=this.size}_transform4(e,t,s){const r=this._csize;let o,n,a=1<<this._width,i=r/a<<1;const l=this._bitrev;if(4===i)for(o=0,n=0;o<r;o+=i,++n){const s=l[n];this._singleTransform2(t,e,o,s,a)}else for(o=0,n=0;o<r;o+=i,++n){const r=l[n];this._singleTransform4(t,e,o,r,a,s)}const c=this.table;for(a>>=2;a>=2;a>>=2){i=r/a<<1;const t=i>>>2;for(o=0;o<r;o+=i){const r=o+t-1;for(let n=o,i=0;n<r;n+=2,i+=a){const r=n,o=r+t,a=o+t,l=a+t,d=e[r],u=e[r+1],p=e[o],m=e[o+1],_=e[a],h=e[a+1],f=e[l],g=e[l+1],M=c[i],w=s*c[i+1],T=p*M-m*w,b=p*w+m*M,x=c[2*i],P=s*c[2*i+1],k=_*x-h*P,y=_*P+h*x,F=c[3*i],v=s*c[3*i+1],C=f*F-g*v,S=f*v+g*F,A=d+k,E=u+y,L=d-k,I=u-y,z=T+C,j=b+S,N=s*(T-C),V=s*(b-S);e[r]=A+z,e[r+1]=E+j,e[o]=L+V,e[o+1]=I-N,e[a]=A-z,e[a+1]=E-j,e[l]=L-V,e[l+1]=I+N}}}}_singleTransform2(e,t,s,r,o){const n=e[r],a=e[r+1],i=e[r+o],l=e[r+o+1];t[s]=n+i,t[s+1]=a+l,t[s+2]=n-i,t[s+3]=a-l}_singleTransform4(e,t,s,r,o,n){const a=2*o,i=3*o,l=e[r],c=e[r+1],d=e[r+o],u=e[r+o+1],p=e[r+a],m=e[r+a+1],_=e[r+i],h=e[r+i+1],f=l+p,g=c+m,M=l-p,w=c-m,T=d+_,b=u+h,x=n*(d-_),P=n*(u-h);t[s]=f+T,t[s+1]=g+b,t[s+2]=M+P,t[s+3]=w-x,t[s+4]=f-T,t[s+5]=g-b,t[s+6]=M-P,t[s+7]=w+x}_realTransform4(e,t,s){const r=this._csize;let o,n,a=1<<this._width,i=r/a<<1;const l=this._bitrev;if(4===i)for(o=0,n=0;o<r;o+=i,++n){const s=l[n];this._singleRealTransform2(t,e,o,s>>>1,a>>>1)}else for(o=0,n=0;o<r;o+=i,++n){const r=l[n];this._singleRealTransform4(t,e,o,r>>>1,a>>>1,s)}const c=this.table;for(a>>=2;a>=2;a>>=2){i=r/a<<1;const t=i>>>1,n=t>>>1,l=n>>>1;for(o=0;o<r;o+=i)for(let r=0,i=0;r<=l;r+=2,i+=a){const a=o+r,d=a+n,u=d+n,p=u+n,m=e[a],_=e[a+1],h=e[d],f=e[d+1],g=e[u],M=e[u+1],w=e[p],T=e[p+1],b=m,x=_,P=c[i],k=s*c[i+1],y=h*P-f*k,F=h*k+f*P,v=c[2*i],C=s*c[2*i+1],S=g*v-M*C,A=g*C+M*v,E=c[3*i],L=s*c[3*i+1],I=w*E-T*L,z=w*L+T*E,j=b+S,N=x+A,V=b-S,B=x-A,O=y+I,D=F+z,R=s*(y-I),G=s*(F-z);if(e[a]=j+O,e[a+1]=N+D,e[d]=V+G,e[d+1]=B-R,0===r){e[u]=j-O,e[u+1]=N-D;continue}if(r===l)continue;const q=o+n-r,W=o+t-r;e[q]=V-s*G,e[q+1]=-B-s*R,e[W]=j-s*O,e[W+1]=s*D-N}}const d=r>>>1;for(let t=2;t<d;t+=2)e[r-t]=e[t],e[r-t+1]=-e[t+1]}_singleRealTransform2(e,t,s,r,o){const n=e[r],a=e[r+o];t[s]=n+a,t[s+1]=0,t[s+2]=n-a,t[s+3]=0}_singleRealTransform4(e,t,s,r,o,n){const a=2*o,i=3*o,l=e[r],c=e[r+o],d=e[r+a],u=e[r+i],p=l+d,m=l-d,_=c+u,h=n*(c-u);t[s]=p+_,t[s+1]=0,t[s+2]=m,t[s+3]=-h,t[s+4]=p-_,t[s+5]=0,t[s+6]=m,t[s+7]=h}}class _{constructor(e){const t=2*(e-1),s=2*(2*e-1),r=2**Math.ceil(Math.log2(s));this.bufferSize=r,this._a=t;const o=new Float64Array(s),n=new Float64Array(r);this._chirpBuffer=new Float64Array(r),this._buffer1=new Float64Array(r),this._buffer2=new Float64Array(r),this._outBuffer1=new Float64Array(r),this._outBuffer2=new Float64Array(r);const a=-2*Math.PI/e,i=Math.cos(a),l=Math.sin(a);for(let t=0;t<s>>1;++t){const s=(t+1-e)**2/2,r=Math.sqrt(i**2+l**2)**s,a=s*Math.atan2(l,i),c=2*t;o[c]=r*Math.cos(a),o[c+1]=r*Math.sin(a),n[c]=o[c],n[c+1]=-o[c+1]}this._slicedChirpBuffer=o.subarray(t,s),this._f=new m(r>>1),this._f.transform(this._chirpBuffer,n)}_transform(e,t,s){const r=this._buffer1,o=this._buffer2,n=this._outBuffer1,a=this._outBuffer2,i=this._chirpBuffer,l=this._slicedChirpBuffer,c=this._a;if(s)for(let e=0;e<l.length;e+=2){const s=e+1,o=t[e>>1];r[e]=o*l[e],r[s]=o*l[s]}else for(let e=0;e<l.length;e+=2){const s=e+1;r[e]=t[e]*l[e]-t[s]*l[s],r[s]=t[e]*l[s]+t[s]*l[e]}this._f.transform(n,r);for(let e=0;e<i.length;e+=2){const t=e+1;o[e]=n[e]*i[e]-n[t]*i[t],o[t]=n[e]*i[t]+n[t]*i[e]}this._f.inverseTransform(a,o);for(let t=0;t<a.length;t+=2){const s=a[t+c],r=a[t+c+1],o=l[t],n=l[t+1];e[t]=s*o-r*n,e[t+1]=s*n+r*o}}transform(e,t){this._transform(e,t,!1)}realTransform(e,t){this._transform(e,t,!0)}}class h{constructor(e){this.fft_length=e,this.isPowerOfTwo=p(e),this.isPowerOfTwo?(this.fft=new m(e),this.outputBufferSize=2*e):(this.fft=new _(e),this.outputBufferSize=this.fft.bufferSize)}realTransform(e,t){this.fft.realTransform(e,t)}transform(e,t){this.fft.transform(e,t)}}function f(e,t){if(t%2==0||t<=0)throw new Error("Window size must be a positive odd number");const s=new e.constructor(e.length),r=new e.constructor(t),o=Math.floor(t/2);for(let t=0;t<e.length;++t){let n=0;for(let s=-o;s<=o;++s){let o=t+s;o<0?o=Math.abs(o):o>=e.length&&(o=2*(e.length-1)-o),r[n++]=e[o]}r.sort(),s[t]=r[o]}return s}function g(e,t){const s=Math.pow(10,t);return Math.round(e*s)/s}function M(e){const t=Math.round(e);return Math.abs(e)%1==.5?t%2==0?t:t-1:t}function w(e){const t=e.length,s=e[0].length,r=[t+1,s+1],o=Array.from({length:r[0]},(()=>Array(r[1]).fill(1/0)));o[0][0]=0;const n=Array.from({length:r[0]},(()=>Array(r[1]).fill(-1)));for(let t=1;t<r[1];++t)for(let s=1;s<r[0];++s){const r=o[s-1][t-1],a=o[s-1][t],i=o[s][t-1];let l,c;r<a&&r<i?(l=r,c=0):a<r&&a<i?(l=a,c=1):(l=i,c=2),o[s][t]=e[s-1][t-1]+l,n[s][t]=c}for(let e=0;e<r[1];++e)n[0][e]=2;for(let e=0;e<r[0];++e)n[e][0]=1;let a=t,i=s,l=[],c=[];for(;a>0||i>0;)switch(l.push(a-1),c.push(i-1),n[a][i]){case 0:--a,--i;break;case 1:--a;break;case 2:--i;break;default:throw new Error(`Internal error in dynamic time warping. Unexpected trace[${a}, ${i}]. Please file a bug report.`)}return l.reverse(),c.reverse(),[l,c]}},"./src/utils/tensor.js":
353
- /*!*****************************!*\
354
- !*** ./src/utils/tensor.js ***!
355
- \*****************************/(e,t,s)=>{s.r(t),s.d(t,{Tensor:()=>i,cat:()=>w,full:()=>k,full_like:()=>y,interpolate:()=>c,interpolate_4d:()=>d,layer_norm:()=>h,matmul:()=>u,mean:()=>x,mean_pooling:()=>_,ones:()=>F,ones_like:()=>v,permute:()=>l,quantize_embeddings:()=>A,rfft:()=>p,stack:()=>T,std_mean:()=>b,topk:()=>m,zeros:()=>C,zeros_like:()=>S});var r=s(/*! ./maths.js */"./src/utils/maths.js"),o=s(/*! ../backends/onnx.js */"./src/backends/onnx.js"),n=s(/*! ../ops/registry.js */"./src/ops/registry.js");const a=Object.freeze({float32:Float32Array,float16:Uint16Array,float64:Float64Array,string:Array,int8:Int8Array,uint8:Uint8Array,int16:Int16Array,uint16:Uint16Array,int32:Int32Array,uint32:Uint32Array,int64:BigInt64Array,uint64:BigUint64Array,bool:Uint8Array,uint4:Uint8Array,int4:Int8Array});class i{get dims(){return this.ort_tensor.dims}set dims(e){this.ort_tensor.dims=e}get type(){return this.ort_tensor.type}get data(){return this.ort_tensor.data}get size(){return this.ort_tensor.size}get location(){return this.ort_tensor.location}ort_tensor;constructor(...e){return(0,o.isONNXTensor)(e[0])?this.ort_tensor=e[0]:this.ort_tensor=new o.Tensor(e[0],e[1],e[2]),new Proxy(this,{get:(e,t)=>{if("string"==typeof t){let s=Number(t);if(Number.isInteger(s))return e._getitem(s)}return e[t]},set:(e,t,s)=>e[t]=s})}dispose(){this.ort_tensor.dispose()}*[Symbol.iterator](){const[e,...t]=this.dims;if(t.length>0){const s=t.reduce(((e,t)=>e*t));for(let r=0;r<e;++r)yield this._subarray(r,s,t)}else yield*this.data}_getitem(e){const[t,...s]=this.dims;if(e=M(e,t),s.length>0){const t=s.reduce(((e,t)=>e*t));return this._subarray(e,t,s)}return new i(this.type,[this.data[e]],s)}indexOf(e){const t=this.data;for(let s=0;s<t.length;++s)if(t[s]==e)return s;return-1}_subarray(e,t,s){const r=e*t,o=(e+1)*t,n="subarray"in this.data?this.data.subarray(r,o):this.data.slice(r,o);return new i(this.type,n,s)}item(){const e=this.data;if(1!==e.length)throw new Error(`a Tensor with ${e.length} elements cannot be converted to Scalar`);return e[0]}tolist(){return function(e,t){const s=e.length,r=t.reduce(((e,t)=>e*t));if(s!==r)throw Error(`cannot reshape array of size ${s} into shape (${t})`);let o=e;for(let e=t.length-1;e>=0;e--)o=o.reduce(((s,r)=>{let o=s[s.length-1];return o.length<t[e]?o.push(r):s.push([r]),s}),[[]]);return o[0]}(this.data,this.dims)}sigmoid(){return this.clone().sigmoid_()}sigmoid_(){const e=this.data;for(let t=0;t<e.length;++t)e[t]=1/(1+Math.exp(-e[t]));return this}map(e){return this.clone().map_(e)}map_(e){const t=this.data;for(let s=0;s<t.length;++s)t[s]=e(t[s],s,t);return this}mul(e){return this.clone().mul_(e)}mul_(e){const t=this.data;for(let s=0;s<t.length;++s)t[s]*=e;return this}div(e){return this.clone().div_(e)}div_(e){const t=this.data;for(let s=0;s<t.length;++s)t[s]/=e;return this}add(e){return this.clone().add_(e)}add_(e){const t=this.data;for(let s=0;s<t.length;++s)t[s]+=e;return this}sub(e){return this.clone().sub_(e)}sub_(e){const t=this.data;for(let s=0;s<t.length;++s)t[s]-=e;return this}clone(){return new i(this.type,this.data.slice(),this.dims.slice())}slice(...e){const t=[],s=[];for(let r=0;r<this.dims.length;++r){let o=e[r];if(null==o)s.push([0,this.dims[r]]),t.push(this.dims[r]);else if("number"==typeof o)o=M(o,this.dims[r],r),s.push([o,o+1]);else{if(!Array.isArray(o)||2!==o.length)throw new Error(`Invalid slice: ${o}`);{let[e,n]=o;if(e=null===e?0:M(e,this.dims[r],r,!1),n=null===n?this.dims[r]:M(n,this.dims[r],r,!1),e>n)throw new Error(`Invalid slice: ${o}`);const a=[Math.max(e,0),Math.min(n,this.dims[r])];s.push(a),t.push(a[1]-a[0])}}}const r=s.map((([e,t])=>t-e)),o=r.reduce(((e,t)=>e*t)),n=this.data,a=new n.constructor(o),l=this.stride();for(let e=0;e<o;++e){let t=0;for(let o=r.length-1,n=e;o>=0;--o){const e=r[o];t+=(n%e+s[o][0])*l[o],n=Math.floor(n/e)}a[e]=n[t]}return new i(this.type,a,t)}permute(...e){return l(this,e)}transpose(...e){return this.permute(...e)}sum(e=null,t=!1){return this.norm(1,e,t)}norm(e="fro",t=null,s=!1){if("fro"===e)e=2;else if("string"==typeof e)throw Error(`Unsupported norm: ${e}`);const r=this.data;if(null===t){let t=r.reduce(((t,s)=>t+s**e),0)**(1/e);return new i(this.type,[t],[])}t=M(t,this.dims.length);const o=this.dims.slice();o[t]=1;const n=new r.constructor(r.length/this.dims[t]);for(let s=0;s<r.length;++s){let a=0;for(let e=this.dims.length-1,r=s,n=1;e>=0;--e){const s=this.dims[e];if(e!==t){a+=r%s*n,n*=o[e]}r=Math.floor(r/s)}n[a]+=r[s]**e}if(1!==e)for(let t=0;t<n.length;++t)n[t]=n[t]**(1/e);return s||o.splice(t,1),new i(this.type,n,o)}normalize_(e=2,t=1){t=M(t,this.dims.length);const s=this.norm(e,t,!0),r=this.data,o=s.data;for(let e=0;e<r.length;++e){let s=0;for(let r=this.dims.length-1,o=e,n=1;r>=0;--r){const e=this.dims[r];if(r!==t){s+=o%e*n,n*=this.dims[r]}o=Math.floor(o/e)}r[e]/=o[s]}return this}normalize(e=2,t=1){return this.clone().normalize_(e,t)}stride(){return function(e){const t=new Array(e.length);for(let s=e.length-1,r=1;s>=0;--s)t[s]=r,r*=e[s];return t}(this.dims)}squeeze(e=null){return new i(this.type,this.data,f(this.dims,e))}squeeze_(e=null){return this.dims=f(this.dims,e),this}unsqueeze(e=null){return new i(this.type,this.data,g(this.dims,e))}unsqueeze_(e=null){return this.dims=g(this.dims,e),this}flatten_(e=0,t=-1){t=(t+this.dims.length)%this.dims.length;let s=this.dims.slice(0,e),r=this.dims.slice(e,t+1),o=this.dims.slice(t+1);return this.dims=[...s,r.reduce(((e,t)=>e*t),1),...o],this}flatten(e=0,t=-1){return this.clone().flatten_(e,t)}view(...e){let t=-1;for(let s=0;s<e.length;++s)if(-1===e[s]){if(-1!==t)throw new Error("Only one dimension can be inferred");t=s}const s=this.data;if(-1!==t){const r=e.reduce(((e,s,r)=>r!==t?e*s:e),1);e[t]=s.length/r}return new i(this.type,s,e)}neg_(){const e=this.data;for(let t=0;t<e.length;++t)e[t]=-e[t];return this}neg(){return this.clone().neg_()}clamp_(e,t){const s=this.data;for(let r=0;r<s.length;++r)s[r]=Math.min(Math.max(s[r],e),t);return this}clamp(e,t){return this.clone().clamp_(e,t)}round_(){const e=this.data;for(let t=0;t<e.length;++t)e[t]=Math.round(e[t]);return this}round(){return this.clone().round_()}mean(e=null,t=!1){return x(this,e,t)}to(e){if(this.type===e)return this;if(!a.hasOwnProperty(e))throw new Error(`Unsupported type: ${e}`);return new i(e,a[e].from(this.data),this.dims)}}function l(e,t){const[s,o]=(0,r.permute_data)(e.data,e.dims,t);return new i(e.type,s,o)}function c(e,[t,s],o="bilinear",n=!1){const a=e.dims.at(-3)??1,l=e.dims.at(-2),c=e.dims.at(-1);let d=(0,r.interpolate_data)(e.data,[a,l,c],[t,s],o,n);return new i(e.type,d,[a,t,s])}async function d(e,{size:t=null,mode:s="bilinear"}={}){if(4!==e.dims.length)throw new Error("`interpolate_4d` currently only supports 4D input.");if(!t)throw new Error("`interpolate_4d` requires a `size` argument.");let r,o;if(2===t.length)r=[...e.dims.slice(0,2),...t];else if(3===t.length)r=[e.dims[0],...t];else{if(4!==t.length)throw new Error("`size` must be of length 2, 3, or 4.");r=t}if("bilinear"===s)o=await n.TensorOpRegistry.bilinear_interpolate_4d;else{if("bicubic"!==s)throw new Error(`Unsupported mode: ${s}`);o=await n.TensorOpRegistry.bicubic_interpolate_4d}const a=new i("int64",new BigInt64Array(r.map(BigInt)),[r.length]);return await o({x:e,s:a})}async function u(e,t){const s=await n.TensorOpRegistry.matmul;return await s({a:e,b:t})}async function p(e,t){const s=await n.TensorOpRegistry.rfft;return await s({x:e,a:t})}async function m(e,t){const s=await n.TensorOpRegistry.top_k;return t=null===t?e.dims.at(-1):Math.min(t,e.dims.at(-1)),await s({x:e,k:new i("int64",[BigInt(t)],[1])})}function _(e,t){const s=e.data,r=t.data,o=[e.dims[0],e.dims[2]],n=new s.constructor(o[0]*o[1]),[a,l,c]=e.dims;let d=0;for(let e=0;e<a;++e){const t=e*c*l;for(let o=0;o<c;++o){let a=0,i=0;const u=e*l,p=t+o;for(let e=0;e<l;++e){const t=Number(r[u+e]);i+=t,a+=s[p+e*c]*t}const m=a/i;n[d++]=m}}return new i(e.type,n,o)}function h(e,t,{eps:s=1e-5}={}){if(2!==e.dims.length)throw new Error("`layer_norm` currently only supports 2D input.");const[r,o]=e.dims;if(1!==t.length&&t[0]!==o)throw new Error("`normalized_shape` must be a 1D array with shape `[input.dims[1]]`.");const[n,a]=b(e,1,0,!0),l=n.data,c=a.data,d=e.data,u=new d.constructor(d.length);for(let e=0;e<r;++e){const t=e*o;for(let r=0;r<o;++r){const o=t+r;u[o]=(d[o]-c[e])/(l[e]+s)}}return new i(e.type,u,e.dims)}function f(e,t){return e=e.slice(),null===t?e=e.filter((e=>1!==e)):"number"==typeof t?1===e[t]&&e.splice(t,1):Array.isArray(t)&&(e=e.filter(((e,s)=>1!==e||!t.includes(s)))),e}function g(e,t){return t=M(t,e.length+1),(e=e.slice()).splice(t,0,1),e}function M(e,t,s=null,r=!0){if(r&&(e<-t||e>=t))throw new Error(`IndexError: index ${e} is out of bounds for dimension${null===s?"":" "+s} with size ${t}`);return e<0&&(e=(e%t+t)%t),e}function w(e,t=0){t=M(t,e[0].dims.length);const s=e[0].dims.slice();s[t]=e.reduce(((e,s)=>e+s.dims[t]),0);const r=s.reduce(((e,t)=>e*t),1),o=new e[0].data.constructor(r),n=e[0].type;if(0===t){let t=0;for(const s of e){const e=s.data;o.set(e,t),t+=e.length}}else{let r=0;for(let n=0;n<e.length;++n){const{data:a,dims:i}=e[n];for(let e=0;e<a.length;++e){let n=0;for(let o=i.length-1,a=e,l=1;o>=0;--o){const e=i[o];let c=a%e;o===t&&(c+=r),n+=c*l,l*=s[o],a=Math.floor(a/e)}o[n]=a[e]}r+=i[t]}}return new i(n,o,s)}function T(e,t=0){return w(e.map((e=>e.unsqueeze(t))),t)}function b(e,t=null,s=1,r=!1){const o=e.data,n=e.dims;if(null===t){const t=o.reduce(((e,t)=>e+t),0)/o.length,r=Math.sqrt(o.reduce(((e,s)=>e+(s-t)**2),0)/(o.length-s)),n=new i(e.type,[t],[]);return[new i(e.type,[r],[]),n]}const a=x(e,t=M(t,n.length),r),l=a.data,c=n.slice();c[t]=1;const d=new o.constructor(o.length/n[t]);for(let e=0;e<o.length;++e){let s=0;for(let r=n.length-1,o=e,a=1;r>=0;--r){const e=n[r];if(r!==t){s+=o%e*a,a*=c[r]}o=Math.floor(o/e)}d[s]+=(o[e]-l[s])**2}for(let e=0;e<d.length;++e)d[e]=Math.sqrt(d[e]/(n[t]-s));r||c.splice(t,1);return[new i(e.type,d,c),a]}function x(e,t=null,s=!1){const r=e.data;if(null===t){const t=r.reduce(((e,t)=>e+t),0);return new i(e.type,[t/r.length],[])}const o=e.dims;t=M(t,o.length);const n=o.slice();n[t]=1;const a=new r.constructor(r.length/o[t]);for(let e=0;e<r.length;++e){let s=0;for(let r=o.length-1,a=e,i=1;r>=0;--r){const e=o[r];if(r!==t){s+=a%e*i,i*=n[r]}a=Math.floor(a/e)}a[s]+=r[e]}if(1!==o[t])for(let e=0;e<a.length;++e)a[e]=a[e]/o[t];return s||n.splice(t,1),new i(e.type,a,n)}function P(e,t,s,r){const o=e.reduce(((e,t)=>e*t),1);return new i(s,new r(o).fill(t),e)}function k(e,t){let s,r;if("number"==typeof t)s="float32",r=Float32Array;else if("bigint"==typeof t)s="int64",r=BigInt64Array;else{if("boolean"!=typeof t)throw new Error("Unsupported data type: "+typeof t);s="bool",r=Uint8Array}return P(e,t,s,r)}function y(e,t){return k(e.dims,t)}function F(e){return P(e,1n,"int64",BigInt64Array)}function v(e){return F(e.dims)}function C(e){return P(e,0n,"int64",BigInt64Array)}function S(e){return C(e.dims)}function A(e,t){if(2!==e.dims.length)throw new Error("The tensor must have 2 dimensions");if(e.dims.at(-1)%8!=0)throw new Error("The last dimension of the tensor must be a multiple of 8");if(!["binary","ubinary"].includes(t))throw new Error("The precision must be either 'binary' or 'ubinary'");const s="binary"===t,r=s?"int8":"uint8",o=s?Int8Array:Uint8Array,n=e.data,a=new o(n.length/8);for(let e=0;e<n.length;++e){const t=n[e]>0?1:0,r=Math.floor(e/8),o=e%8;a[r]|=t<<7-o,s&&0===o&&(a[r]-=128)}return new i(r,a,[e.dims[0],e.dims[1]/8])}}},l={};function c(e){var t=l[e];if(void 0!==t)return t.exports;var s=l[e]={exports:{}};return i[e](s,s.exports,c),s.exports}a=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__,c.t=function(e,t){if(1&t&&(e=this(e)),8&t)return e;if("object"==typeof e&&e){if(4&t&&e.__esModule)return e;if(16&t&&"function"==typeof e.then)return e}var s=Object.create(null);c.r(s);var r={};n=n||[null,a({}),a([]),a(a)];for(var o=2&t&&e;"object"==typeof o&&!~n.indexOf(o);o=a(o))Object.getOwnPropertyNames(o).forEach((t=>r[t]=()=>e[t]));return r.default=()=>e,c.d(s,r),s},c.d=(e,t)=>{for(var s in t)c.o(t,s)&&!c.o(e,s)&&Object.defineProperty(e,s,{enumerable:!0,get:t[s]})},c.o=(e,t)=>Object.prototype.hasOwnProperty.call(e,t),c.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})};var d={};
356
- /*!*****************************!*\
357
- !*** ./src/transformers.js ***!
358
- \*****************************/c.r(d),c.d(d,{ASTFeatureExtractor:()=>b.ASTFeatureExtractor,ASTForAudioClassification:()=>m.ASTForAudioClassification,ASTModel:()=>m.ASTModel,ASTPreTrainedModel:()=>m.ASTPreTrainedModel,AlbertForMaskedLM:()=>m.AlbertForMaskedLM,AlbertForQuestionAnswering:()=>m.AlbertForQuestionAnswering,AlbertForSequenceClassification:()=>m.AlbertForSequenceClassification,AlbertModel:()=>m.AlbertModel,AlbertPreTrainedModel:()=>m.AlbertPreTrainedModel,AlbertTokenizer:()=>_.AlbertTokenizer,AudioClassificationPipeline:()=>p.AudioClassificationPipeline,AutoConfig:()=>h.AutoConfig,AutoFeatureExtractor:()=>x.AutoFeatureExtractor,AutoImageProcessor:()=>y.AutoImageProcessor,AutoModel:()=>m.AutoModel,AutoModelForAudioClassification:()=>m.AutoModelForAudioClassification,AutoModelForAudioFrameClassification:()=>m.AutoModelForAudioFrameClassification,AutoModelForCTC:()=>m.AutoModelForCTC,AutoModelForCausalLM:()=>m.AutoModelForCausalLM,AutoModelForDepthEstimation:()=>m.AutoModelForDepthEstimation,AutoModelForDocumentQuestionAnswering:()=>m.AutoModelForDocumentQuestionAnswering,AutoModelForImageClassification:()=>m.AutoModelForImageClassification,AutoModelForImageFeatureExtraction:()=>m.AutoModelForImageFeatureExtraction,AutoModelForImageMatting:()=>m.AutoModelForImageMatting,AutoModelForImageSegmentation:()=>m.AutoModelForImageSegmentation,AutoModelForImageToImage:()=>m.AutoModelForImageToImage,AutoModelForMaskGeneration:()=>m.AutoModelForMaskGeneration,AutoModelForMaskedLM:()=>m.AutoModelForMaskedLM,AutoModelForNormalEstimation:()=>m.AutoModelForNormalEstimation,AutoModelForObjectDetection:()=>m.AutoModelForObjectDetection,AutoModelForPoseEstimation:()=>m.AutoModelForPoseEstimation,AutoModelForQuestionAnswering:()=>m.AutoModelForQuestionAnswering,AutoModelForSemanticSegmentation:()=>m.AutoModelForSemanticSegmentation,AutoModelForSeq2SeqLM:()=>m.AutoModelForSeq2SeqLM,AutoModelForSequenceClassification:()=>m.AutoModelForSequenceClassification,AutoModelForSpeechSeq2Seq:()=>m.AutoModelForSpeechSeq2Seq,AutoModelForTextToSpectrogram:()=>m.AutoModelForTextToSpectrogram,AutoModelForTextToWaveform:()=>m.AutoModelForTextToWaveform,AutoModelForTokenClassification:()=>m.AutoModelForTokenClassification,AutoModelForUniversalSegmentation:()=>m.AutoModelForUniversalSegmentation,AutoModelForVision2Seq:()=>m.AutoModelForVision2Seq,AutoModelForXVector:()=>m.AutoModelForXVector,AutoModelForZeroShotObjectDetection:()=>m.AutoModelForZeroShotObjectDetection,AutoProcessor:()=>C.AutoProcessor,AutoTokenizer:()=>_.AutoTokenizer,AutomaticSpeechRecognitionPipeline:()=>p.AutomaticSpeechRecognitionPipeline,BartForConditionalGeneration:()=>m.BartForConditionalGeneration,BartForSequenceClassification:()=>m.BartForSequenceClassification,BartModel:()=>m.BartModel,BartPretrainedModel:()=>m.BartPretrainedModel,BartTokenizer:()=>_.BartTokenizer,BaseModelOutput:()=>m.BaseModelOutput,BaseStreamer:()=>S.BaseStreamer,BeitFeatureExtractor:()=>k.BeitFeatureExtractor,BeitForImageClassification:()=>m.BeitForImageClassification,BeitModel:()=>m.BeitModel,BeitPreTrainedModel:()=>m.BeitPreTrainedModel,BertForMaskedLM:()=>m.BertForMaskedLM,BertForQuestionAnswering:()=>m.BertForQuestionAnswering,BertForSequenceClassification:()=>m.BertForSequenceClassification,BertForTokenClassification:()=>m.BertForTokenClassification,BertModel:()=>m.BertModel,BertPreTrainedModel:()=>m.BertPreTrainedModel,BertTokenizer:()=>_.BertTokenizer,BitImageProcessor:()=>k.BitImageProcessor,BlenderbotForConditionalGeneration:()=>m.BlenderbotForConditionalGeneration,BlenderbotModel:()=>m.BlenderbotModel,BlenderbotPreTrainedModel:()=>m.BlenderbotPreTrainedModel,BlenderbotSmallForConditionalGeneration:()=>m.BlenderbotSmallForConditionalGeneration,BlenderbotSmallModel:()=>m.BlenderbotSmallModel,BlenderbotSmallPreTrainedModel:()=>m.BlenderbotSmallPreTrainedModel,BlenderbotSmallTokenizer:()=>_.BlenderbotSmallTokenizer,BlenderbotTokenizer:()=>_.BlenderbotTokenizer,BloomForCausalLM:()=>m.BloomForCausalLM,BloomModel:()=>m.BloomModel,BloomPreTrainedModel:()=>m.BloomPreTrainedModel,BloomTokenizer:()=>_.BloomTokenizer,CLIPFeatureExtractor:()=>k.CLIPFeatureExtractor,CLIPImageProcessor:()=>k.CLIPImageProcessor,CLIPModel:()=>m.CLIPModel,CLIPPreTrainedModel:()=>m.CLIPPreTrainedModel,CLIPSegForImageSegmentation:()=>m.CLIPSegForImageSegmentation,CLIPSegModel:()=>m.CLIPSegModel,CLIPSegPreTrainedModel:()=>m.CLIPSegPreTrainedModel,CLIPTextModel:()=>m.CLIPTextModel,CLIPTextModelWithProjection:()=>m.CLIPTextModelWithProjection,CLIPTokenizer:()=>_.CLIPTokenizer,CLIPVisionModel:()=>m.CLIPVisionModel,CLIPVisionModelWithProjection:()=>m.CLIPVisionModelWithProjection,CamembertForMaskedLM:()=>m.CamembertForMaskedLM,CamembertForQuestionAnswering:()=>m.CamembertForQuestionAnswering,CamembertForSequenceClassification:()=>m.CamembertForSequenceClassification,CamembertForTokenClassification:()=>m.CamembertForTokenClassification,CamembertModel:()=>m.CamembertModel,CamembertPreTrainedModel:()=>m.CamembertPreTrainedModel,CamembertTokenizer:()=>_.CamembertTokenizer,CausalLMOutput:()=>m.CausalLMOutput,CausalLMOutputWithPast:()=>m.CausalLMOutputWithPast,ChineseCLIPFeatureExtractor:()=>k.ChineseCLIPFeatureExtractor,ChineseCLIPModel:()=>m.ChineseCLIPModel,ChineseCLIPPreTrainedModel:()=>m.ChineseCLIPPreTrainedModel,ClapAudioModelWithProjection:()=>m.ClapAudioModelWithProjection,ClapFeatureExtractor:()=>b.ClapFeatureExtractor,ClapModel:()=>m.ClapModel,ClapPreTrainedModel:()=>m.ClapPreTrainedModel,ClapTextModelWithProjection:()=>m.ClapTextModelWithProjection,ClassifierFreeGuidanceLogitsProcessor:()=>E.ClassifierFreeGuidanceLogitsProcessor,CodeGenForCausalLM:()=>m.CodeGenForCausalLM,CodeGenModel:()=>m.CodeGenModel,CodeGenPreTrainedModel:()=>m.CodeGenPreTrainedModel,CodeGenTokenizer:()=>_.CodeGenTokenizer,CodeLlamaTokenizer:()=>_.CodeLlamaTokenizer,CohereForCausalLM:()=>m.CohereForCausalLM,CohereModel:()=>m.CohereModel,CoherePreTrainedModel:()=>m.CoherePreTrainedModel,CohereTokenizer:()=>_.CohereTokenizer,ConvBertForMaskedLM:()=>m.ConvBertForMaskedLM,ConvBertForQuestionAnswering:()=>m.ConvBertForQuestionAnswering,ConvBertForSequenceClassification:()=>m.ConvBertForSequenceClassification,ConvBertForTokenClassification:()=>m.ConvBertForTokenClassification,ConvBertModel:()=>m.ConvBertModel,ConvBertPreTrainedModel:()=>m.ConvBertPreTrainedModel,ConvBertTokenizer:()=>_.ConvBertTokenizer,ConvNextFeatureExtractor:()=>k.ConvNextFeatureExtractor,ConvNextForImageClassification:()=>m.ConvNextForImageClassification,ConvNextImageProcessor:()=>k.ConvNextImageProcessor,ConvNextModel:()=>m.ConvNextModel,ConvNextPreTrainedModel:()=>m.ConvNextPreTrainedModel,ConvNextV2ForImageClassification:()=>m.ConvNextV2ForImageClassification,ConvNextV2Model:()=>m.ConvNextV2Model,ConvNextV2PreTrainedModel:()=>m.ConvNextV2PreTrainedModel,DPTFeatureExtractor:()=>k.DPTFeatureExtractor,DPTForDepthEstimation:()=>m.DPTForDepthEstimation,DPTImageProcessor:()=>k.DPTImageProcessor,DPTModel:()=>m.DPTModel,DPTPreTrainedModel:()=>m.DPTPreTrainedModel,DebertaForMaskedLM:()=>m.DebertaForMaskedLM,DebertaForQuestionAnswering:()=>m.DebertaForQuestionAnswering,DebertaForSequenceClassification:()=>m.DebertaForSequenceClassification,DebertaForTokenClassification:()=>m.DebertaForTokenClassification,DebertaModel:()=>m.DebertaModel,DebertaPreTrainedModel:()=>m.DebertaPreTrainedModel,DebertaTokenizer:()=>_.DebertaTokenizer,DebertaV2ForMaskedLM:()=>m.DebertaV2ForMaskedLM,DebertaV2ForQuestionAnswering:()=>m.DebertaV2ForQuestionAnswering,DebertaV2ForSequenceClassification:()=>m.DebertaV2ForSequenceClassification,DebertaV2ForTokenClassification:()=>m.DebertaV2ForTokenClassification,DebertaV2Model:()=>m.DebertaV2Model,DebertaV2PreTrainedModel:()=>m.DebertaV2PreTrainedModel,DebertaV2Tokenizer:()=>_.DebertaV2Tokenizer,DecisionTransformerModel:()=>m.DecisionTransformerModel,DecisionTransformerPreTrainedModel:()=>m.DecisionTransformerPreTrainedModel,DeiTFeatureExtractor:()=>k.DeiTFeatureExtractor,DeiTForImageClassification:()=>m.DeiTForImageClassification,DeiTImageProcessor:()=>k.DeiTImageProcessor,DeiTModel:()=>m.DeiTModel,DeiTPreTrainedModel:()=>m.DeiTPreTrainedModel,DepthAnythingForDepthEstimation:()=>m.DepthAnythingForDepthEstimation,DepthAnythingPreTrainedModel:()=>m.DepthAnythingPreTrainedModel,DepthEstimationPipeline:()=>p.DepthEstimationPipeline,DepthProForDepthEstimation:()=>m.DepthProForDepthEstimation,DepthProPreTrainedModel:()=>m.DepthProPreTrainedModel,DetrFeatureExtractor:()=>k.DetrFeatureExtractor,DetrForObjectDetection:()=>m.DetrForObjectDetection,DetrForSegmentation:()=>m.DetrForSegmentation,DetrImageProcessor:()=>k.DetrImageProcessor,DetrModel:()=>m.DetrModel,DetrObjectDetectionOutput:()=>m.DetrObjectDetectionOutput,DetrPreTrainedModel:()=>m.DetrPreTrainedModel,DetrSegmentationOutput:()=>m.DetrSegmentationOutput,Dinov2ForImageClassification:()=>m.Dinov2ForImageClassification,Dinov2Model:()=>m.Dinov2Model,Dinov2PreTrainedModel:()=>m.Dinov2PreTrainedModel,DistilBertForMaskedLM:()=>m.DistilBertForMaskedLM,DistilBertForQuestionAnswering:()=>m.DistilBertForQuestionAnswering,DistilBertForSequenceClassification:()=>m.DistilBertForSequenceClassification,DistilBertForTokenClassification:()=>m.DistilBertForTokenClassification,DistilBertModel:()=>m.DistilBertModel,DistilBertPreTrainedModel:()=>m.DistilBertPreTrainedModel,DistilBertTokenizer:()=>_.DistilBertTokenizer,DocumentQuestionAnsweringPipeline:()=>p.DocumentQuestionAnsweringPipeline,DonutFeatureExtractor:()=>k.DonutFeatureExtractor,DonutImageProcessor:()=>k.DonutImageProcessor,DonutSwinModel:()=>m.DonutSwinModel,DonutSwinPreTrainedModel:()=>m.DonutSwinPreTrainedModel,EfficientNetForImageClassification:()=>m.EfficientNetForImageClassification,EfficientNetImageProcessor:()=>k.EfficientNetImageProcessor,EfficientNetModel:()=>m.EfficientNetModel,EfficientNetPreTrainedModel:()=>m.EfficientNetPreTrainedModel,ElectraForMaskedLM:()=>m.ElectraForMaskedLM,ElectraForQuestionAnswering:()=>m.ElectraForQuestionAnswering,ElectraForSequenceClassification:()=>m.ElectraForSequenceClassification,ElectraForTokenClassification:()=>m.ElectraForTokenClassification,ElectraModel:()=>m.ElectraModel,ElectraPreTrainedModel:()=>m.ElectraPreTrainedModel,ElectraTokenizer:()=>_.ElectraTokenizer,EosTokenCriteria:()=>A.EosTokenCriteria,EsmForMaskedLM:()=>m.EsmForMaskedLM,EsmForSequenceClassification:()=>m.EsmForSequenceClassification,EsmForTokenClassification:()=>m.EsmForTokenClassification,EsmModel:()=>m.EsmModel,EsmPreTrainedModel:()=>m.EsmPreTrainedModel,EsmTokenizer:()=>_.EsmTokenizer,FFT:()=>w.FFT,FalconForCausalLM:()=>m.FalconForCausalLM,FalconModel:()=>m.FalconModel,FalconPreTrainedModel:()=>m.FalconPreTrainedModel,FalconTokenizer:()=>_.FalconTokenizer,FastViTForImageClassification:()=>m.FastViTForImageClassification,FastViTModel:()=>m.FastViTModel,FastViTPreTrainedModel:()=>m.FastViTPreTrainedModel,FeatureExtractionPipeline:()=>p.FeatureExtractionPipeline,FeatureExtractor:()=>T.FeatureExtractor,FillMaskPipeline:()=>p.FillMaskPipeline,Florence2ForConditionalGeneration:()=>m.Florence2ForConditionalGeneration,Florence2PreTrainedModel:()=>m.Florence2PreTrainedModel,Florence2Processor:()=>v.Florence2Processor,ForcedBOSTokenLogitsProcessor:()=>E.ForcedBOSTokenLogitsProcessor,ForcedEOSTokenLogitsProcessor:()=>E.ForcedEOSTokenLogitsProcessor,GLPNFeatureExtractor:()=>k.GLPNFeatureExtractor,GLPNForDepthEstimation:()=>m.GLPNForDepthEstimation,GLPNModel:()=>m.GLPNModel,GLPNPreTrainedModel:()=>m.GLPNPreTrainedModel,GPT2LMHeadModel:()=>m.GPT2LMHeadModel,GPT2Model:()=>m.GPT2Model,GPT2PreTrainedModel:()=>m.GPT2PreTrainedModel,GPT2Tokenizer:()=>_.GPT2Tokenizer,GPTBigCodeForCausalLM:()=>m.GPTBigCodeForCausalLM,GPTBigCodeModel:()=>m.GPTBigCodeModel,GPTBigCodePreTrainedModel:()=>m.GPTBigCodePreTrainedModel,GPTJForCausalLM:()=>m.GPTJForCausalLM,GPTJModel:()=>m.GPTJModel,GPTJPreTrainedModel:()=>m.GPTJPreTrainedModel,GPTNeoForCausalLM:()=>m.GPTNeoForCausalLM,GPTNeoModel:()=>m.GPTNeoModel,GPTNeoPreTrainedModel:()=>m.GPTNeoPreTrainedModel,GPTNeoXForCausalLM:()=>m.GPTNeoXForCausalLM,GPTNeoXModel:()=>m.GPTNeoXModel,GPTNeoXPreTrainedModel:()=>m.GPTNeoXPreTrainedModel,GPTNeoXTokenizer:()=>_.GPTNeoXTokenizer,Gemma2ForCausalLM:()=>m.Gemma2ForCausalLM,Gemma2Model:()=>m.Gemma2Model,Gemma2PreTrainedModel:()=>m.Gemma2PreTrainedModel,GemmaForCausalLM:()=>m.GemmaForCausalLM,GemmaModel:()=>m.GemmaModel,GemmaPreTrainedModel:()=>m.GemmaPreTrainedModel,GemmaTokenizer:()=>_.GemmaTokenizer,GraniteForCausalLM:()=>m.GraniteForCausalLM,GraniteModel:()=>m.GraniteModel,GranitePreTrainedModel:()=>m.GranitePreTrainedModel,Grok1Tokenizer:()=>_.Grok1Tokenizer,GroupViTModel:()=>m.GroupViTModel,GroupViTPreTrainedModel:()=>m.GroupViTPreTrainedModel,HerbertTokenizer:()=>_.HerbertTokenizer,HieraForImageClassification:()=>m.HieraForImageClassification,HieraModel:()=>m.HieraModel,HieraPreTrainedModel:()=>m.HieraPreTrainedModel,HubertForCTC:()=>m.HubertForCTC,HubertForSequenceClassification:()=>m.HubertForSequenceClassification,HubertModel:()=>m.HubertModel,HubertPreTrainedModel:()=>m.HubertPreTrainedModel,Idefics3ForConditionalGeneration:()=>m.Idefics3ForConditionalGeneration,Idefics3ImageProcessor:()=>k.Idefics3ImageProcessor,Idefics3PreTrainedModel:()=>m.Idefics3PreTrainedModel,Idefics3Processor:()=>v.Idefics3Processor,ImageClassificationPipeline:()=>p.ImageClassificationPipeline,ImageFeatureExtractionPipeline:()=>p.ImageFeatureExtractionPipeline,ImageFeatureExtractor:()=>b.ImageFeatureExtractor,ImageMattingOutput:()=>m.ImageMattingOutput,ImageProcessor:()=>P.ImageProcessor,ImageSegmentationPipeline:()=>p.ImageSegmentationPipeline,ImageToImagePipeline:()=>p.ImageToImagePipeline,ImageToTextPipeline:()=>p.ImageToTextPipeline,InterruptableStoppingCriteria:()=>A.InterruptableStoppingCriteria,JAISLMHeadModel:()=>m.JAISLMHeadModel,JAISModel:()=>m.JAISModel,JAISPreTrainedModel:()=>m.JAISPreTrainedModel,JinaCLIPImageProcessor:()=>k.JinaCLIPImageProcessor,JinaCLIPModel:()=>m.JinaCLIPModel,JinaCLIPPreTrainedModel:()=>m.JinaCLIPPreTrainedModel,JinaCLIPProcessor:()=>v.JinaCLIPProcessor,JinaCLIPTextModel:()=>m.JinaCLIPTextModel,JinaCLIPVisionModel:()=>m.JinaCLIPVisionModel,LlamaForCausalLM:()=>m.LlamaForCausalLM,LlamaModel:()=>m.LlamaModel,LlamaPreTrainedModel:()=>m.LlamaPreTrainedModel,LlamaTokenizer:()=>_.LlamaTokenizer,LlavaForConditionalGeneration:()=>m.LlavaForConditionalGeneration,LlavaOnevisionForConditionalGeneration:()=>m.LlavaOnevisionForConditionalGeneration,LlavaOnevisionImageProcessor:()=>k.LlavaOnevisionImageProcessor,LlavaPreTrainedModel:()=>m.LlavaPreTrainedModel,LogitsProcessor:()=>E.LogitsProcessor,LogitsProcessorList:()=>E.LogitsProcessorList,LogitsWarper:()=>E.LogitsWarper,LongT5ForConditionalGeneration:()=>m.LongT5ForConditionalGeneration,LongT5Model:()=>m.LongT5Model,LongT5PreTrainedModel:()=>m.LongT5PreTrainedModel,M2M100ForConditionalGeneration:()=>m.M2M100ForConditionalGeneration,M2M100Model:()=>m.M2M100Model,M2M100PreTrainedModel:()=>m.M2M100PreTrainedModel,M2M100Tokenizer:()=>_.M2M100Tokenizer,MBart50Tokenizer:()=>_.MBart50Tokenizer,MBartForCausalLM:()=>m.MBartForCausalLM,MBartForConditionalGeneration:()=>m.MBartForConditionalGeneration,MBartForSequenceClassification:()=>m.MBartForSequenceClassification,MBartModel:()=>m.MBartModel,MBartPreTrainedModel:()=>m.MBartPreTrainedModel,MBartTokenizer:()=>_.MBartTokenizer,MPNetForMaskedLM:()=>m.MPNetForMaskedLM,MPNetForQuestionAnswering:()=>m.MPNetForQuestionAnswering,MPNetForSequenceClassification:()=>m.MPNetForSequenceClassification,MPNetForTokenClassification:()=>m.MPNetForTokenClassification,MPNetModel:()=>m.MPNetModel,MPNetPreTrainedModel:()=>m.MPNetPreTrainedModel,MPNetTokenizer:()=>_.MPNetTokenizer,MT5ForConditionalGeneration:()=>m.MT5ForConditionalGeneration,MT5Model:()=>m.MT5Model,MT5PreTrainedModel:()=>m.MT5PreTrainedModel,MarianMTModel:()=>m.MarianMTModel,MarianModel:()=>m.MarianModel,MarianPreTrainedModel:()=>m.MarianPreTrainedModel,MarianTokenizer:()=>_.MarianTokenizer,Mask2FormerImageProcessor:()=>k.Mask2FormerImageProcessor,MaskFormerFeatureExtractor:()=>k.MaskFormerFeatureExtractor,MaskFormerForInstanceSegmentation:()=>m.MaskFormerForInstanceSegmentation,MaskFormerImageProcessor:()=>k.MaskFormerImageProcessor,MaskFormerModel:()=>m.MaskFormerModel,MaskFormerPreTrainedModel:()=>m.MaskFormerPreTrainedModel,MaskedLMOutput:()=>m.MaskedLMOutput,MaxLengthCriteria:()=>A.MaxLengthCriteria,MgpstrForSceneTextRecognition:()=>m.MgpstrForSceneTextRecognition,MgpstrModelOutput:()=>m.MgpstrModelOutput,MgpstrPreTrainedModel:()=>m.MgpstrPreTrainedModel,MgpstrProcessor:()=>v.MgpstrProcessor,MgpstrTokenizer:()=>_.MgpstrTokenizer,MinLengthLogitsProcessor:()=>E.MinLengthLogitsProcessor,MinNewTokensLengthLogitsProcessor:()=>E.MinNewTokensLengthLogitsProcessor,MistralForCausalLM:()=>m.MistralForCausalLM,MistralModel:()=>m.MistralModel,MistralPreTrainedModel:()=>m.MistralPreTrainedModel,MobileBertForMaskedLM:()=>m.MobileBertForMaskedLM,MobileBertForQuestionAnswering:()=>m.MobileBertForQuestionAnswering,MobileBertForSequenceClassification:()=>m.MobileBertForSequenceClassification,MobileBertModel:()=>m.MobileBertModel,MobileBertPreTrainedModel:()=>m.MobileBertPreTrainedModel,MobileBertTokenizer:()=>_.MobileBertTokenizer,MobileLLMForCausalLM:()=>m.MobileLLMForCausalLM,MobileLLMModel:()=>m.MobileLLMModel,MobileLLMPreTrainedModel:()=>m.MobileLLMPreTrainedModel,MobileNetV1FeatureExtractor:()=>k.MobileNetV1FeatureExtractor,MobileNetV1ForImageClassification:()=>m.MobileNetV1ForImageClassification,MobileNetV1ImageProcessor:()=>k.MobileNetV1ImageProcessor,MobileNetV1Model:()=>m.MobileNetV1Model,MobileNetV1PreTrainedModel:()=>m.MobileNetV1PreTrainedModel,MobileNetV2FeatureExtractor:()=>k.MobileNetV2FeatureExtractor,MobileNetV2ForImageClassification:()=>m.MobileNetV2ForImageClassification,MobileNetV2ImageProcessor:()=>k.MobileNetV2ImageProcessor,MobileNetV2Model:()=>m.MobileNetV2Model,MobileNetV2PreTrainedModel:()=>m.MobileNetV2PreTrainedModel,MobileNetV3FeatureExtractor:()=>k.MobileNetV3FeatureExtractor,MobileNetV3ForImageClassification:()=>m.MobileNetV3ForImageClassification,MobileNetV3ImageProcessor:()=>k.MobileNetV3ImageProcessor,MobileNetV3Model:()=>m.MobileNetV3Model,MobileNetV3PreTrainedModel:()=>m.MobileNetV3PreTrainedModel,MobileNetV4FeatureExtractor:()=>k.MobileNetV4FeatureExtractor,MobileNetV4ForImageClassification:()=>m.MobileNetV4ForImageClassification,MobileNetV4ImageProcessor:()=>k.MobileNetV4ImageProcessor,MobileNetV4Model:()=>m.MobileNetV4Model,MobileNetV4PreTrainedModel:()=>m.MobileNetV4PreTrainedModel,MobileViTFeatureExtractor:()=>k.MobileViTFeatureExtractor,MobileViTForImageClassification:()=>m.MobileViTForImageClassification,MobileViTImageProcessor:()=>k.MobileViTImageProcessor,MobileViTModel:()=>m.MobileViTModel,MobileViTPreTrainedModel:()=>m.MobileViTPreTrainedModel,MobileViTV2ForImageClassification:()=>m.MobileViTV2ForImageClassification,MobileViTV2Model:()=>m.MobileViTV2Model,MobileViTV2PreTrainedModel:()=>m.MobileViTV2PreTrainedModel,ModelOutput:()=>m.ModelOutput,Moondream1ForConditionalGeneration:()=>m.Moondream1ForConditionalGeneration,MptForCausalLM:()=>m.MptForCausalLM,MptModel:()=>m.MptModel,MptPreTrainedModel:()=>m.MptPreTrainedModel,MultiModalityCausalLM:()=>m.MultiModalityCausalLM,MultiModalityPreTrainedModel:()=>m.MultiModalityPreTrainedModel,MusicgenForCausalLM:()=>m.MusicgenForCausalLM,MusicgenForConditionalGeneration:()=>m.MusicgenForConditionalGeneration,MusicgenModel:()=>m.MusicgenModel,MusicgenPreTrainedModel:()=>m.MusicgenPreTrainedModel,NllbTokenizer:()=>_.NllbTokenizer,NoBadWordsLogitsProcessor:()=>E.NoBadWordsLogitsProcessor,NoRepeatNGramLogitsProcessor:()=>E.NoRepeatNGramLogitsProcessor,NomicBertModel:()=>m.NomicBertModel,NomicBertPreTrainedModel:()=>m.NomicBertPreTrainedModel,NougatImageProcessor:()=>k.NougatImageProcessor,NougatTokenizer:()=>_.NougatTokenizer,OPTForCausalLM:()=>m.OPTForCausalLM,OPTModel:()=>m.OPTModel,OPTPreTrainedModel:()=>m.OPTPreTrainedModel,ObjectDetectionPipeline:()=>p.ObjectDetectionPipeline,OlmoForCausalLM:()=>m.OlmoForCausalLM,OlmoModel:()=>m.OlmoModel,OlmoPreTrainedModel:()=>m.OlmoPreTrainedModel,OpenELMForCausalLM:()=>m.OpenELMForCausalLM,OpenELMModel:()=>m.OpenELMModel,OpenELMPreTrainedModel:()=>m.OpenELMPreTrainedModel,OwlViTFeatureExtractor:()=>k.OwlViTFeatureExtractor,OwlViTForObjectDetection:()=>m.OwlViTForObjectDetection,OwlViTImageProcessor:()=>k.OwlViTImageProcessor,OwlViTModel:()=>m.OwlViTModel,OwlViTPreTrainedModel:()=>m.OwlViTPreTrainedModel,OwlViTProcessor:()=>v.OwlViTProcessor,Owlv2ForObjectDetection:()=>m.Owlv2ForObjectDetection,Owlv2ImageProcessor:()=>k.Owlv2ImageProcessor,Owlv2Model:()=>m.Owlv2Model,Owlv2PreTrainedModel:()=>m.Owlv2PreTrainedModel,PatchTSMixerForPrediction:()=>m.PatchTSMixerForPrediction,PatchTSMixerModel:()=>m.PatchTSMixerModel,PatchTSMixerPreTrainedModel:()=>m.PatchTSMixerPreTrainedModel,PatchTSTForPrediction:()=>m.PatchTSTForPrediction,PatchTSTModel:()=>m.PatchTSTModel,PatchTSTPreTrainedModel:()=>m.PatchTSTPreTrainedModel,Phi3ForCausalLM:()=>m.Phi3ForCausalLM,Phi3Model:()=>m.Phi3Model,Phi3PreTrainedModel:()=>m.Phi3PreTrainedModel,PhiForCausalLM:()=>m.PhiForCausalLM,PhiModel:()=>m.PhiModel,PhiPreTrainedModel:()=>m.PhiPreTrainedModel,Pipeline:()=>p.Pipeline,PreTrainedModel:()=>m.PreTrainedModel,PreTrainedTokenizer:()=>_.PreTrainedTokenizer,PretrainedConfig:()=>h.PretrainedConfig,PretrainedMixin:()=>m.PretrainedMixin,Processor:()=>F.Processor,PvtForImageClassification:()=>m.PvtForImageClassification,PvtImageProcessor:()=>k.PvtImageProcessor,PvtModel:()=>m.PvtModel,PvtPreTrainedModel:()=>m.PvtPreTrainedModel,PyAnnoteFeatureExtractor:()=>b.PyAnnoteFeatureExtractor,PyAnnoteForAudioFrameClassification:()=>m.PyAnnoteForAudioFrameClassification,PyAnnoteModel:()=>m.PyAnnoteModel,PyAnnotePreTrainedModel:()=>m.PyAnnotePreTrainedModel,PyAnnoteProcessor:()=>v.PyAnnoteProcessor,QuestionAnsweringModelOutput:()=>m.QuestionAnsweringModelOutput,QuestionAnsweringPipeline:()=>p.QuestionAnsweringPipeline,Qwen2ForCausalLM:()=>m.Qwen2ForCausalLM,Qwen2Model:()=>m.Qwen2Model,Qwen2PreTrainedModel:()=>m.Qwen2PreTrainedModel,Qwen2Tokenizer:()=>_.Qwen2Tokenizer,Qwen2VLForConditionalGeneration:()=>m.Qwen2VLForConditionalGeneration,Qwen2VLImageProcessor:()=>k.Qwen2VLImageProcessor,Qwen2VLPreTrainedModel:()=>m.Qwen2VLPreTrainedModel,Qwen2VLProcessor:()=>v.Qwen2VLProcessor,RTDetrForObjectDetection:()=>m.RTDetrForObjectDetection,RTDetrImageProcessor:()=>k.RTDetrImageProcessor,RTDetrModel:()=>m.RTDetrModel,RTDetrObjectDetectionOutput:()=>m.RTDetrObjectDetectionOutput,RTDetrPreTrainedModel:()=>m.RTDetrPreTrainedModel,RawImage:()=>g.RawImage,RepetitionPenaltyLogitsProcessor:()=>E.RepetitionPenaltyLogitsProcessor,ResNetForImageClassification:()=>m.ResNetForImageClassification,ResNetModel:()=>m.ResNetModel,ResNetPreTrainedModel:()=>m.ResNetPreTrainedModel,RoFormerForMaskedLM:()=>m.RoFormerForMaskedLM,RoFormerForQuestionAnswering:()=>m.RoFormerForQuestionAnswering,RoFormerForSequenceClassification:()=>m.RoFormerForSequenceClassification,RoFormerForTokenClassification:()=>m.RoFormerForTokenClassification,RoFormerModel:()=>m.RoFormerModel,RoFormerPreTrainedModel:()=>m.RoFormerPreTrainedModel,RoFormerTokenizer:()=>_.RoFormerTokenizer,RobertaForMaskedLM:()=>m.RobertaForMaskedLM,RobertaForQuestionAnswering:()=>m.RobertaForQuestionAnswering,RobertaForSequenceClassification:()=>m.RobertaForSequenceClassification,RobertaForTokenClassification:()=>m.RobertaForTokenClassification,RobertaModel:()=>m.RobertaModel,RobertaPreTrainedModel:()=>m.RobertaPreTrainedModel,RobertaTokenizer:()=>_.RobertaTokenizer,SamImageProcessor:()=>k.SamImageProcessor,SamImageSegmentationOutput:()=>m.SamImageSegmentationOutput,SamModel:()=>m.SamModel,SamPreTrainedModel:()=>m.SamPreTrainedModel,SamProcessor:()=>v.SamProcessor,SapiensForDepthEstimation:()=>m.SapiensForDepthEstimation,SapiensForNormalEstimation:()=>m.SapiensForNormalEstimation,SapiensForSemanticSegmentation:()=>m.SapiensForSemanticSegmentation,SapiensPreTrainedModel:()=>m.SapiensPreTrainedModel,SeamlessM4TFeatureExtractor:()=>b.SeamlessM4TFeatureExtractor,SegformerFeatureExtractor:()=>k.SegformerFeatureExtractor,SegformerForImageClassification:()=>m.SegformerForImageClassification,SegformerForSemanticSegmentation:()=>m.SegformerForSemanticSegmentation,SegformerImageProcessor:()=>k.SegformerImageProcessor,SegformerModel:()=>m.SegformerModel,SegformerPreTrainedModel:()=>m.SegformerPreTrainedModel,Seq2SeqLMOutput:()=>m.Seq2SeqLMOutput,SequenceClassifierOutput:()=>m.SequenceClassifierOutput,SiglipImageProcessor:()=>k.SiglipImageProcessor,SiglipModel:()=>m.SiglipModel,SiglipPreTrainedModel:()=>m.SiglipPreTrainedModel,SiglipTextModel:()=>m.SiglipTextModel,SiglipTokenizer:()=>_.SiglipTokenizer,SiglipVisionModel:()=>m.SiglipVisionModel,SpeechT5FeatureExtractor:()=>b.SpeechT5FeatureExtractor,SpeechT5ForSpeechToText:()=>m.SpeechT5ForSpeechToText,SpeechT5ForTextToSpeech:()=>m.SpeechT5ForTextToSpeech,SpeechT5HifiGan:()=>m.SpeechT5HifiGan,SpeechT5Model:()=>m.SpeechT5Model,SpeechT5PreTrainedModel:()=>m.SpeechT5PreTrainedModel,SpeechT5Processor:()=>v.SpeechT5Processor,SpeechT5Tokenizer:()=>_.SpeechT5Tokenizer,SqueezeBertForMaskedLM:()=>m.SqueezeBertForMaskedLM,SqueezeBertForQuestionAnswering:()=>m.SqueezeBertForQuestionAnswering,SqueezeBertForSequenceClassification:()=>m.SqueezeBertForSequenceClassification,SqueezeBertModel:()=>m.SqueezeBertModel,SqueezeBertPreTrainedModel:()=>m.SqueezeBertPreTrainedModel,SqueezeBertTokenizer:()=>_.SqueezeBertTokenizer,StableLmForCausalLM:()=>m.StableLmForCausalLM,StableLmModel:()=>m.StableLmModel,StableLmPreTrainedModel:()=>m.StableLmPreTrainedModel,Starcoder2ForCausalLM:()=>m.Starcoder2ForCausalLM,Starcoder2Model:()=>m.Starcoder2Model,Starcoder2PreTrainedModel:()=>m.Starcoder2PreTrainedModel,StoppingCriteria:()=>A.StoppingCriteria,StoppingCriteriaList:()=>A.StoppingCriteriaList,SummarizationPipeline:()=>p.SummarizationPipeline,SuppressTokensAtBeginLogitsProcessor:()=>E.SuppressTokensAtBeginLogitsProcessor,Swin2SRForImageSuperResolution:()=>m.Swin2SRForImageSuperResolution,Swin2SRImageProcessor:()=>k.Swin2SRImageProcessor,Swin2SRModel:()=>m.Swin2SRModel,Swin2SRPreTrainedModel:()=>m.Swin2SRPreTrainedModel,SwinForImageClassification:()=>m.SwinForImageClassification,SwinModel:()=>m.SwinModel,SwinPreTrainedModel:()=>m.SwinPreTrainedModel,T5ForConditionalGeneration:()=>m.T5ForConditionalGeneration,T5Model:()=>m.T5Model,T5PreTrainedModel:()=>m.T5PreTrainedModel,T5Tokenizer:()=>_.T5Tokenizer,TableTransformerForObjectDetection:()=>m.TableTransformerForObjectDetection,TableTransformerModel:()=>m.TableTransformerModel,TableTransformerObjectDetectionOutput:()=>m.TableTransformerObjectDetectionOutput,TableTransformerPreTrainedModel:()=>m.TableTransformerPreTrainedModel,TemperatureLogitsWarper:()=>E.TemperatureLogitsWarper,Tensor:()=>M.Tensor,Text2TextGenerationPipeline:()=>p.Text2TextGenerationPipeline,TextClassificationPipeline:()=>p.TextClassificationPipeline,TextGenerationPipeline:()=>p.TextGenerationPipeline,TextStreamer:()=>S.TextStreamer,TextToAudioPipeline:()=>p.TextToAudioPipeline,TokenClassificationPipeline:()=>p.TokenClassificationPipeline,TokenClassifierOutput:()=>m.TokenClassifierOutput,TokenizerModel:()=>_.TokenizerModel,TopKLogitsWarper:()=>E.TopKLogitsWarper,TopPLogitsWarper:()=>E.TopPLogitsWarper,TrOCRForCausalLM:()=>m.TrOCRForCausalLM,TrOCRPreTrainedModel:()=>m.TrOCRPreTrainedModel,TranslationPipeline:()=>p.TranslationPipeline,UniSpeechForCTC:()=>m.UniSpeechForCTC,UniSpeechForSequenceClassification:()=>m.UniSpeechForSequenceClassification,UniSpeechModel:()=>m.UniSpeechModel,UniSpeechPreTrainedModel:()=>m.UniSpeechPreTrainedModel,UniSpeechSatForAudioFrameClassification:()=>m.UniSpeechSatForAudioFrameClassification,UniSpeechSatForCTC:()=>m.UniSpeechSatForCTC,UniSpeechSatForSequenceClassification:()=>m.UniSpeechSatForSequenceClassification,UniSpeechSatModel:()=>m.UniSpeechSatModel,UniSpeechSatPreTrainedModel:()=>m.UniSpeechSatPreTrainedModel,VLChatProcessor:()=>v.VLChatProcessor,VLMImageProcessor:()=>k.VLMImageProcessor,ViTFeatureExtractor:()=>k.ViTFeatureExtractor,ViTForImageClassification:()=>m.ViTForImageClassification,ViTImageProcessor:()=>k.ViTImageProcessor,ViTMAEModel:()=>m.ViTMAEModel,ViTMAEPreTrainedModel:()=>m.ViTMAEPreTrainedModel,ViTMSNForImageClassification:()=>m.ViTMSNForImageClassification,ViTMSNModel:()=>m.ViTMSNModel,ViTMSNPreTrainedModel:()=>m.ViTMSNPreTrainedModel,ViTModel:()=>m.ViTModel,ViTPreTrainedModel:()=>m.ViTPreTrainedModel,VisionEncoderDecoderModel:()=>m.VisionEncoderDecoderModel,VitMatteForImageMatting:()=>m.VitMatteForImageMatting,VitMatteImageProcessor:()=>k.VitMatteImageProcessor,VitMattePreTrainedModel:()=>m.VitMattePreTrainedModel,VitPoseForPoseEstimation:()=>m.VitPoseForPoseEstimation,VitPoseImageProcessor:()=>k.VitPoseImageProcessor,VitPosePreTrainedModel:()=>m.VitPosePreTrainedModel,VitsModel:()=>m.VitsModel,VitsModelOutput:()=>m.VitsModelOutput,VitsPreTrainedModel:()=>m.VitsPreTrainedModel,VitsTokenizer:()=>_.VitsTokenizer,Wav2Vec2BertForCTC:()=>m.Wav2Vec2BertForCTC,Wav2Vec2BertForSequenceClassification:()=>m.Wav2Vec2BertForSequenceClassification,Wav2Vec2BertModel:()=>m.Wav2Vec2BertModel,Wav2Vec2BertPreTrainedModel:()=>m.Wav2Vec2BertPreTrainedModel,Wav2Vec2CTCTokenizer:()=>_.Wav2Vec2CTCTokenizer,Wav2Vec2FeatureExtractor:()=>b.Wav2Vec2FeatureExtractor,Wav2Vec2ForAudioFrameClassification:()=>m.Wav2Vec2ForAudioFrameClassification,Wav2Vec2ForCTC:()=>m.Wav2Vec2ForCTC,Wav2Vec2ForSequenceClassification:()=>m.Wav2Vec2ForSequenceClassification,Wav2Vec2Model:()=>m.Wav2Vec2Model,Wav2Vec2PreTrainedModel:()=>m.Wav2Vec2PreTrainedModel,Wav2Vec2ProcessorWithLM:()=>v.Wav2Vec2ProcessorWithLM,WavLMForAudioFrameClassification:()=>m.WavLMForAudioFrameClassification,WavLMForCTC:()=>m.WavLMForCTC,WavLMForSequenceClassification:()=>m.WavLMForSequenceClassification,WavLMForXVector:()=>m.WavLMForXVector,WavLMModel:()=>m.WavLMModel,WavLMPreTrainedModel:()=>m.WavLMPreTrainedModel,WeSpeakerFeatureExtractor:()=>b.WeSpeakerFeatureExtractor,WeSpeakerResNetModel:()=>m.WeSpeakerResNetModel,WeSpeakerResNetPreTrainedModel:()=>m.WeSpeakerResNetPreTrainedModel,WhisperFeatureExtractor:()=>b.WhisperFeatureExtractor,WhisperForConditionalGeneration:()=>m.WhisperForConditionalGeneration,WhisperModel:()=>m.WhisperModel,WhisperPreTrainedModel:()=>m.WhisperPreTrainedModel,WhisperProcessor:()=>v.WhisperProcessor,WhisperTextStreamer:()=>S.WhisperTextStreamer,WhisperTimeStampLogitsProcessor:()=>E.WhisperTimeStampLogitsProcessor,WhisperTokenizer:()=>_.WhisperTokenizer,XLMForQuestionAnswering:()=>m.XLMForQuestionAnswering,XLMForSequenceClassification:()=>m.XLMForSequenceClassification,XLMForTokenClassification:()=>m.XLMForTokenClassification,XLMModel:()=>m.XLMModel,XLMPreTrainedModel:()=>m.XLMPreTrainedModel,XLMRobertaForMaskedLM:()=>m.XLMRobertaForMaskedLM,XLMRobertaForQuestionAnswering:()=>m.XLMRobertaForQuestionAnswering,XLMRobertaForSequenceClassification:()=>m.XLMRobertaForSequenceClassification,XLMRobertaForTokenClassification:()=>m.XLMRobertaForTokenClassification,XLMRobertaModel:()=>m.XLMRobertaModel,XLMRobertaPreTrainedModel:()=>m.XLMRobertaPreTrainedModel,XLMRobertaTokenizer:()=>_.XLMRobertaTokenizer,XLMTokenizer:()=>_.XLMTokenizer,XLMWithLMHeadModel:()=>m.XLMWithLMHeadModel,XVectorOutput:()=>m.XVectorOutput,YolosFeatureExtractor:()=>k.YolosFeatureExtractor,YolosForObjectDetection:()=>m.YolosForObjectDetection,YolosImageProcessor:()=>k.YolosImageProcessor,YolosModel:()=>m.YolosModel,YolosObjectDetectionOutput:()=>m.YolosObjectDetectionOutput,YolosPreTrainedModel:()=>m.YolosPreTrainedModel,ZeroShotAudioClassificationPipeline:()=>p.ZeroShotAudioClassificationPipeline,ZeroShotClassificationPipeline:()=>p.ZeroShotClassificationPipeline,ZeroShotImageClassificationPipeline:()=>p.ZeroShotImageClassificationPipeline,ZeroShotObjectDetectionPipeline:()=>p.ZeroShotObjectDetectionPipeline,bankers_round:()=>w.bankers_round,cat:()=>M.cat,cos_sim:()=>w.cos_sim,dot:()=>w.dot,dynamic_time_warping:()=>w.dynamic_time_warping,env:()=>u.env,full:()=>M.full,full_like:()=>M.full_like,getKeyValueShapes:()=>h.getKeyValueShapes,hamming:()=>f.hamming,hanning:()=>f.hanning,interpolate:()=>M.interpolate,interpolate_4d:()=>M.interpolate_4d,interpolate_data:()=>w.interpolate_data,is_chinese_char:()=>_.is_chinese_char,layer_norm:()=>M.layer_norm,load_image:()=>g.load_image,log_softmax:()=>w.log_softmax,magnitude:()=>w.magnitude,matmul:()=>M.matmul,max:()=>w.max,mean:()=>M.mean,mean_pooling:()=>M.mean_pooling,medianFilter:()=>w.medianFilter,mel_filter_bank:()=>f.mel_filter_bank,min:()=>w.min,ones:()=>M.ones,ones_like:()=>M.ones_like,permute:()=>M.permute,permute_data:()=>w.permute_data,pipeline:()=>p.pipeline,quantize_embeddings:()=>M.quantize_embeddings,read_audio:()=>f.read_audio,rfft:()=>M.rfft,round:()=>w.round,softmax:()=>w.softmax,spectrogram:()=>f.spectrogram,stack:()=>M.stack,std_mean:()=>M.std_mean,topk:()=>M.topk,window_function:()=>f.window_function,zeros:()=>M.zeros,zeros_like:()=>M.zeros_like});var u=c(/*! ./env.js */"./src/env.js"),p=c(/*! ./pipelines.js */"./src/pipelines.js"),m=c(/*! ./models.js */"./src/models.js"),_=c(/*! ./tokenizers.js */"./src/tokenizers.js"),h=c(/*! ./configs.js */"./src/configs.js"),f=c(/*! ./utils/audio.js */"./src/utils/audio.js"),g=c(/*! ./utils/image.js */"./src/utils/image.js"),M=c(/*! ./utils/tensor.js */"./src/utils/tensor.js"),w=c(/*! ./utils/maths.js */"./src/utils/maths.js"),T=c(/*! ./base/feature_extraction_utils.js */"./src/base/feature_extraction_utils.js"),b=c(/*! ./models/feature_extractors.js */"./src/models/feature_extractors.js"),x=c(/*! ./models/auto/feature_extraction_auto.js */"./src/models/auto/feature_extraction_auto.js"),P=c(/*! ./base/image_processors_utils.js */"./src/base/image_processors_utils.js"),k=c(/*! ./models/image_processors.js */"./src/models/image_processors.js"),y=c(/*! ./models/auto/image_processing_auto.js */"./src/models/auto/image_processing_auto.js"),F=c(/*! ./base/processing_utils.js */"./src/base/processing_utils.js"),v=c(/*! ./models/processors.js */"./src/models/processors.js"),C=c(/*! ./models/auto/processing_auto.js */"./src/models/auto/processing_auto.js"),S=c(/*! ./generation/streamers.js */"./src/generation/streamers.js"),A=c(/*! ./generation/stopping_criteria.js */"./src/generation/stopping_criteria.js"),E=c(/*! ./generation/logits_process.js */"./src/generation/logits_process.js"),L=d.ASTFeatureExtractor,I=d.ASTForAudioClassification,z=d.ASTModel,j=d.ASTPreTrainedModel,N=d.AlbertForMaskedLM,V=d.AlbertForQuestionAnswering,B=d.AlbertForSequenceClassification,O=d.AlbertModel,D=d.AlbertPreTrainedModel,R=d.AlbertTokenizer,G=d.AudioClassificationPipeline,q=d.AutoConfig,W=d.AutoFeatureExtractor,$=d.AutoImageProcessor,U=d.AutoModel,Q=d.AutoModelForAudioClassification,X=d.AutoModelForAudioFrameClassification,H=d.AutoModelForCTC,J=d.AutoModelForCausalLM,Y=d.AutoModelForDepthEstimation,K=d.AutoModelForDocumentQuestionAnswering,Z=d.AutoModelForImageClassification,ee=d.AutoModelForImageFeatureExtraction,te=d.AutoModelForImageMatting,se=d.AutoModelForImageSegmentation,re=d.AutoModelForImageToImage,oe=d.AutoModelForMaskGeneration,ne=d.AutoModelForMaskedLM,ae=d.AutoModelForNormalEstimation,ie=d.AutoModelForObjectDetection,le=d.AutoModelForPoseEstimation,ce=d.AutoModelForQuestionAnswering,de=d.AutoModelForSemanticSegmentation,ue=d.AutoModelForSeq2SeqLM,pe=d.AutoModelForSequenceClassification,me=d.AutoModelForSpeechSeq2Seq,_e=d.AutoModelForTextToSpectrogram,he=d.AutoModelForTextToWaveform,fe=d.AutoModelForTokenClassification,ge=d.AutoModelForUniversalSegmentation,Me=d.AutoModelForVision2Seq,we=d.AutoModelForXVector,Te=d.AutoModelForZeroShotObjectDetection,be=d.AutoProcessor,xe=d.AutoTokenizer,Pe=d.AutomaticSpeechRecognitionPipeline,ke=d.BartForConditionalGeneration,ye=d.BartForSequenceClassification,Fe=d.BartModel,ve=d.BartPretrainedModel,Ce=d.BartTokenizer,Se=d.BaseModelOutput,Ae=d.BaseStreamer,Ee=d.BeitFeatureExtractor,Le=d.BeitForImageClassification,Ie=d.BeitModel,ze=d.BeitPreTrainedModel,je=d.BertForMaskedLM,Ne=d.BertForQuestionAnswering,Ve=d.BertForSequenceClassification,Be=d.BertForTokenClassification,Oe=d.BertModel,De=d.BertPreTrainedModel,Re=d.BertTokenizer,Ge=d.BitImageProcessor,qe=d.BlenderbotForConditionalGeneration,We=d.BlenderbotModel,$e=d.BlenderbotPreTrainedModel,Ue=d.BlenderbotSmallForConditionalGeneration,Qe=d.BlenderbotSmallModel,Xe=d.BlenderbotSmallPreTrainedModel,He=d.BlenderbotSmallTokenizer,Je=d.BlenderbotTokenizer,Ye=d.BloomForCausalLM,Ke=d.BloomModel,Ze=d.BloomPreTrainedModel,et=d.BloomTokenizer,tt=d.CLIPFeatureExtractor,st=d.CLIPImageProcessor,rt=d.CLIPModel,ot=d.CLIPPreTrainedModel,nt=d.CLIPSegForImageSegmentation,at=d.CLIPSegModel,it=d.CLIPSegPreTrainedModel,lt=d.CLIPTextModel,ct=d.CLIPTextModelWithProjection,dt=d.CLIPTokenizer,ut=d.CLIPVisionModel,pt=d.CLIPVisionModelWithProjection,mt=d.CamembertForMaskedLM,_t=d.CamembertForQuestionAnswering,ht=d.CamembertForSequenceClassification,ft=d.CamembertForTokenClassification,gt=d.CamembertModel,Mt=d.CamembertPreTrainedModel,wt=d.CamembertTokenizer,Tt=d.CausalLMOutput,bt=d.CausalLMOutputWithPast,xt=d.ChineseCLIPFeatureExtractor,Pt=d.ChineseCLIPModel,kt=d.ChineseCLIPPreTrainedModel,yt=d.ClapAudioModelWithProjection,Ft=d.ClapFeatureExtractor,vt=d.ClapModel,Ct=d.ClapPreTrainedModel,St=d.ClapTextModelWithProjection,At=d.ClassifierFreeGuidanceLogitsProcessor,Et=d.CodeGenForCausalLM,Lt=d.CodeGenModel,It=d.CodeGenPreTrainedModel,zt=d.CodeGenTokenizer,jt=d.CodeLlamaTokenizer,Nt=d.CohereForCausalLM,Vt=d.CohereModel,Bt=d.CoherePreTrainedModel,Ot=d.CohereTokenizer,Dt=d.ConvBertForMaskedLM,Rt=d.ConvBertForQuestionAnswering,Gt=d.ConvBertForSequenceClassification,qt=d.ConvBertForTokenClassification,Wt=d.ConvBertModel,$t=d.ConvBertPreTrainedModel,Ut=d.ConvBertTokenizer,Qt=d.ConvNextFeatureExtractor,Xt=d.ConvNextForImageClassification,Ht=d.ConvNextImageProcessor,Jt=d.ConvNextModel,Yt=d.ConvNextPreTrainedModel,Kt=d.ConvNextV2ForImageClassification,Zt=d.ConvNextV2Model,es=d.ConvNextV2PreTrainedModel,ts=d.DPTFeatureExtractor,ss=d.DPTForDepthEstimation,rs=d.DPTImageProcessor,os=d.DPTModel,ns=d.DPTPreTrainedModel,as=d.DebertaForMaskedLM,is=d.DebertaForQuestionAnswering,ls=d.DebertaForSequenceClassification,cs=d.DebertaForTokenClassification,ds=d.DebertaModel,us=d.DebertaPreTrainedModel,ps=d.DebertaTokenizer,ms=d.DebertaV2ForMaskedLM,_s=d.DebertaV2ForQuestionAnswering,hs=d.DebertaV2ForSequenceClassification,fs=d.DebertaV2ForTokenClassification,gs=d.DebertaV2Model,Ms=d.DebertaV2PreTrainedModel,ws=d.DebertaV2Tokenizer,Ts=d.DecisionTransformerModel,bs=d.DecisionTransformerPreTrainedModel,xs=d.DeiTFeatureExtractor,Ps=d.DeiTForImageClassification,ks=d.DeiTImageProcessor,ys=d.DeiTModel,Fs=d.DeiTPreTrainedModel,vs=d.DepthAnythingForDepthEstimation,Cs=d.DepthAnythingPreTrainedModel,Ss=d.DepthEstimationPipeline,As=d.DepthProForDepthEstimation,Es=d.DepthProPreTrainedModel,Ls=d.DetrFeatureExtractor,Is=d.DetrForObjectDetection,zs=d.DetrForSegmentation,js=d.DetrImageProcessor,Ns=d.DetrModel,Vs=d.DetrObjectDetectionOutput,Bs=d.DetrPreTrainedModel,Os=d.DetrSegmentationOutput,Ds=d.Dinov2ForImageClassification,Rs=d.Dinov2Model,Gs=d.Dinov2PreTrainedModel,qs=d.DistilBertForMaskedLM,Ws=d.DistilBertForQuestionAnswering,$s=d.DistilBertForSequenceClassification,Us=d.DistilBertForTokenClassification,Qs=d.DistilBertModel,Xs=d.DistilBertPreTrainedModel,Hs=d.DistilBertTokenizer,Js=d.DocumentQuestionAnsweringPipeline,Ys=d.DonutFeatureExtractor,Ks=d.DonutImageProcessor,Zs=d.DonutSwinModel,er=d.DonutSwinPreTrainedModel,tr=d.EfficientNetForImageClassification,sr=d.EfficientNetImageProcessor,rr=d.EfficientNetModel,or=d.EfficientNetPreTrainedModel,nr=d.ElectraForMaskedLM,ar=d.ElectraForQuestionAnswering,ir=d.ElectraForSequenceClassification,lr=d.ElectraForTokenClassification,cr=d.ElectraModel,dr=d.ElectraPreTrainedModel,ur=d.ElectraTokenizer,pr=d.EosTokenCriteria,mr=d.EsmForMaskedLM,_r=d.EsmForSequenceClassification,hr=d.EsmForTokenClassification,fr=d.EsmModel,gr=d.EsmPreTrainedModel,Mr=d.EsmTokenizer,wr=d.FFT,Tr=d.FalconForCausalLM,br=d.FalconModel,xr=d.FalconPreTrainedModel,Pr=d.FalconTokenizer,kr=d.FastViTForImageClassification,yr=d.FastViTModel,Fr=d.FastViTPreTrainedModel,vr=d.FeatureExtractionPipeline,Cr=d.FeatureExtractor,Sr=d.FillMaskPipeline,Ar=d.Florence2ForConditionalGeneration,Er=d.Florence2PreTrainedModel,Lr=d.Florence2Processor,Ir=d.ForcedBOSTokenLogitsProcessor,zr=d.ForcedEOSTokenLogitsProcessor,jr=d.GLPNFeatureExtractor,Nr=d.GLPNForDepthEstimation,Vr=d.GLPNModel,Br=d.GLPNPreTrainedModel,Or=d.GPT2LMHeadModel,Dr=d.GPT2Model,Rr=d.GPT2PreTrainedModel,Gr=d.GPT2Tokenizer,qr=d.GPTBigCodeForCausalLM,Wr=d.GPTBigCodeModel,$r=d.GPTBigCodePreTrainedModel,Ur=d.GPTJForCausalLM,Qr=d.GPTJModel,Xr=d.GPTJPreTrainedModel,Hr=d.GPTNeoForCausalLM,Jr=d.GPTNeoModel,Yr=d.GPTNeoPreTrainedModel,Kr=d.GPTNeoXForCausalLM,Zr=d.GPTNeoXModel,eo=d.GPTNeoXPreTrainedModel,to=d.GPTNeoXTokenizer,so=d.Gemma2ForCausalLM,ro=d.Gemma2Model,oo=d.Gemma2PreTrainedModel,no=d.GemmaForCausalLM,ao=d.GemmaModel,io=d.GemmaPreTrainedModel,lo=d.GemmaTokenizer,co=d.GraniteForCausalLM,uo=d.GraniteModel,po=d.GranitePreTrainedModel,mo=d.Grok1Tokenizer,_o=d.GroupViTModel,ho=d.GroupViTPreTrainedModel,fo=d.HerbertTokenizer,go=d.HieraForImageClassification,Mo=d.HieraModel,wo=d.HieraPreTrainedModel,To=d.HubertForCTC,bo=d.HubertForSequenceClassification,xo=d.HubertModel,Po=d.HubertPreTrainedModel,ko=d.Idefics3ForConditionalGeneration,yo=d.Idefics3ImageProcessor,Fo=d.Idefics3PreTrainedModel,vo=d.Idefics3Processor,Co=d.ImageClassificationPipeline,So=d.ImageFeatureExtractionPipeline,Ao=d.ImageFeatureExtractor,Eo=d.ImageMattingOutput,Lo=d.ImageProcessor,Io=d.ImageSegmentationPipeline,zo=d.ImageToImagePipeline,jo=d.ImageToTextPipeline,No=d.InterruptableStoppingCriteria,Vo=d.JAISLMHeadModel,Bo=d.JAISModel,Oo=d.JAISPreTrainedModel,Do=d.JinaCLIPImageProcessor,Ro=d.JinaCLIPModel,Go=d.JinaCLIPPreTrainedModel,qo=d.JinaCLIPProcessor,Wo=d.JinaCLIPTextModel,$o=d.JinaCLIPVisionModel,Uo=d.LlamaForCausalLM,Qo=d.LlamaModel,Xo=d.LlamaPreTrainedModel,Ho=d.LlamaTokenizer,Jo=d.LlavaForConditionalGeneration,Yo=d.LlavaOnevisionForConditionalGeneration,Ko=d.LlavaOnevisionImageProcessor,Zo=d.LlavaPreTrainedModel,en=d.LogitsProcessor,tn=d.LogitsProcessorList,sn=d.LogitsWarper,rn=d.LongT5ForConditionalGeneration,on=d.LongT5Model,nn=d.LongT5PreTrainedModel,an=d.M2M100ForConditionalGeneration,ln=d.M2M100Model,cn=d.M2M100PreTrainedModel,dn=d.M2M100Tokenizer,un=d.MBart50Tokenizer,pn=d.MBartForCausalLM,mn=d.MBartForConditionalGeneration,_n=d.MBartForSequenceClassification,hn=d.MBartModel,fn=d.MBartPreTrainedModel,gn=d.MBartTokenizer,Mn=d.MPNetForMaskedLM,wn=d.MPNetForQuestionAnswering,Tn=d.MPNetForSequenceClassification,bn=d.MPNetForTokenClassification,xn=d.MPNetModel,Pn=d.MPNetPreTrainedModel,kn=d.MPNetTokenizer,yn=d.MT5ForConditionalGeneration,Fn=d.MT5Model,vn=d.MT5PreTrainedModel,Cn=d.MarianMTModel,Sn=d.MarianModel,An=d.MarianPreTrainedModel,En=d.MarianTokenizer,Ln=d.Mask2FormerImageProcessor,In=d.MaskFormerFeatureExtractor,zn=d.MaskFormerForInstanceSegmentation,jn=d.MaskFormerImageProcessor,Nn=d.MaskFormerModel,Vn=d.MaskFormerPreTrainedModel,Bn=d.MaskedLMOutput,On=d.MaxLengthCriteria,Dn=d.MgpstrForSceneTextRecognition,Rn=d.MgpstrModelOutput,Gn=d.MgpstrPreTrainedModel,qn=d.MgpstrProcessor,Wn=d.MgpstrTokenizer,$n=d.MinLengthLogitsProcessor,Un=d.MinNewTokensLengthLogitsProcessor,Qn=d.MistralForCausalLM,Xn=d.MistralModel,Hn=d.MistralPreTrainedModel,Jn=d.MobileBertForMaskedLM,Yn=d.MobileBertForQuestionAnswering,Kn=d.MobileBertForSequenceClassification,Zn=d.MobileBertModel,ea=d.MobileBertPreTrainedModel,ta=d.MobileBertTokenizer,sa=d.MobileLLMForCausalLM,ra=d.MobileLLMModel,oa=d.MobileLLMPreTrainedModel,na=d.MobileNetV1FeatureExtractor,aa=d.MobileNetV1ForImageClassification,ia=d.MobileNetV1ImageProcessor,la=d.MobileNetV1Model,ca=d.MobileNetV1PreTrainedModel,da=d.MobileNetV2FeatureExtractor,ua=d.MobileNetV2ForImageClassification,pa=d.MobileNetV2ImageProcessor,ma=d.MobileNetV2Model,_a=d.MobileNetV2PreTrainedModel,ha=d.MobileNetV3FeatureExtractor,fa=d.MobileNetV3ForImageClassification,ga=d.MobileNetV3ImageProcessor,Ma=d.MobileNetV3Model,wa=d.MobileNetV3PreTrainedModel,Ta=d.MobileNetV4FeatureExtractor,ba=d.MobileNetV4ForImageClassification,xa=d.MobileNetV4ImageProcessor,Pa=d.MobileNetV4Model,ka=d.MobileNetV4PreTrainedModel,ya=d.MobileViTFeatureExtractor,Fa=d.MobileViTForImageClassification,va=d.MobileViTImageProcessor,Ca=d.MobileViTModel,Sa=d.MobileViTPreTrainedModel,Aa=d.MobileViTV2ForImageClassification,Ea=d.MobileViTV2Model,La=d.MobileViTV2PreTrainedModel,Ia=d.ModelOutput,za=d.Moondream1ForConditionalGeneration,ja=d.MptForCausalLM,Na=d.MptModel,Va=d.MptPreTrainedModel,Ba=d.MultiModalityCausalLM,Oa=d.MultiModalityPreTrainedModel,Da=d.MusicgenForCausalLM,Ra=d.MusicgenForConditionalGeneration,Ga=d.MusicgenModel,qa=d.MusicgenPreTrainedModel,Wa=d.NllbTokenizer,$a=d.NoBadWordsLogitsProcessor,Ua=d.NoRepeatNGramLogitsProcessor,Qa=d.NomicBertModel,Xa=d.NomicBertPreTrainedModel,Ha=d.NougatImageProcessor,Ja=d.NougatTokenizer,Ya=d.OPTForCausalLM,Ka=d.OPTModel,Za=d.OPTPreTrainedModel,ei=d.ObjectDetectionPipeline,ti=d.OlmoForCausalLM,si=d.OlmoModel,ri=d.OlmoPreTrainedModel,oi=d.OpenELMForCausalLM,ni=d.OpenELMModel,ai=d.OpenELMPreTrainedModel,ii=d.OwlViTFeatureExtractor,li=d.OwlViTForObjectDetection,ci=d.OwlViTImageProcessor,di=d.OwlViTModel,ui=d.OwlViTPreTrainedModel,pi=d.OwlViTProcessor,mi=d.Owlv2ForObjectDetection,_i=d.Owlv2ImageProcessor,hi=d.Owlv2Model,fi=d.Owlv2PreTrainedModel,gi=d.PatchTSMixerForPrediction,Mi=d.PatchTSMixerModel,wi=d.PatchTSMixerPreTrainedModel,Ti=d.PatchTSTForPrediction,bi=d.PatchTSTModel,xi=d.PatchTSTPreTrainedModel,Pi=d.Phi3ForCausalLM,ki=d.Phi3Model,yi=d.Phi3PreTrainedModel,Fi=d.PhiForCausalLM,vi=d.PhiModel,Ci=d.PhiPreTrainedModel,Si=d.Pipeline,Ai=d.PreTrainedModel,Ei=d.PreTrainedTokenizer,Li=d.PretrainedConfig,Ii=d.PretrainedMixin,zi=d.Processor,ji=d.PvtForImageClassification,Ni=d.PvtImageProcessor,Vi=d.PvtModel,Bi=d.PvtPreTrainedModel,Oi=d.PyAnnoteFeatureExtractor,Di=d.PyAnnoteForAudioFrameClassification,Ri=d.PyAnnoteModel,Gi=d.PyAnnotePreTrainedModel,qi=d.PyAnnoteProcessor,Wi=d.QuestionAnsweringModelOutput,$i=d.QuestionAnsweringPipeline,Ui=d.Qwen2ForCausalLM,Qi=d.Qwen2Model,Xi=d.Qwen2PreTrainedModel,Hi=d.Qwen2Tokenizer,Ji=d.Qwen2VLForConditionalGeneration,Yi=d.Qwen2VLImageProcessor,Ki=d.Qwen2VLPreTrainedModel,Zi=d.Qwen2VLProcessor,el=d.RTDetrForObjectDetection,tl=d.RTDetrImageProcessor,sl=d.RTDetrModel,rl=d.RTDetrObjectDetectionOutput,ol=d.RTDetrPreTrainedModel,nl=d.RawImage,al=d.RepetitionPenaltyLogitsProcessor,il=d.ResNetForImageClassification,ll=d.ResNetModel,cl=d.ResNetPreTrainedModel,dl=d.RoFormerForMaskedLM,ul=d.RoFormerForQuestionAnswering,pl=d.RoFormerForSequenceClassification,ml=d.RoFormerForTokenClassification,_l=d.RoFormerModel,hl=d.RoFormerPreTrainedModel,fl=d.RoFormerTokenizer,gl=d.RobertaForMaskedLM,Ml=d.RobertaForQuestionAnswering,wl=d.RobertaForSequenceClassification,Tl=d.RobertaForTokenClassification,bl=d.RobertaModel,xl=d.RobertaPreTrainedModel,Pl=d.RobertaTokenizer,kl=d.SamImageProcessor,yl=d.SamImageSegmentationOutput,Fl=d.SamModel,vl=d.SamPreTrainedModel,Cl=d.SamProcessor,Sl=d.SapiensForDepthEstimation,Al=d.SapiensForNormalEstimation,El=d.SapiensForSemanticSegmentation,Ll=d.SapiensPreTrainedModel,Il=d.SeamlessM4TFeatureExtractor,zl=d.SegformerFeatureExtractor,jl=d.SegformerForImageClassification,Nl=d.SegformerForSemanticSegmentation,Vl=d.SegformerImageProcessor,Bl=d.SegformerModel,Ol=d.SegformerPreTrainedModel,Dl=d.Seq2SeqLMOutput,Rl=d.SequenceClassifierOutput,Gl=d.SiglipImageProcessor,ql=d.SiglipModel,Wl=d.SiglipPreTrainedModel,$l=d.SiglipTextModel,Ul=d.SiglipTokenizer,Ql=d.SiglipVisionModel,Xl=d.SpeechT5FeatureExtractor,Hl=d.SpeechT5ForSpeechToText,Jl=d.SpeechT5ForTextToSpeech,Yl=d.SpeechT5HifiGan,Kl=d.SpeechT5Model,Zl=d.SpeechT5PreTrainedModel,ec=d.SpeechT5Processor,tc=d.SpeechT5Tokenizer,sc=d.SqueezeBertForMaskedLM,rc=d.SqueezeBertForQuestionAnswering,oc=d.SqueezeBertForSequenceClassification,nc=d.SqueezeBertModel,ac=d.SqueezeBertPreTrainedModel,ic=d.SqueezeBertTokenizer,lc=d.StableLmForCausalLM,cc=d.StableLmModel,dc=d.StableLmPreTrainedModel,uc=d.Starcoder2ForCausalLM,pc=d.Starcoder2Model,mc=d.Starcoder2PreTrainedModel,_c=d.StoppingCriteria,hc=d.StoppingCriteriaList,fc=d.SummarizationPipeline,gc=d.SuppressTokensAtBeginLogitsProcessor,Mc=d.Swin2SRForImageSuperResolution,wc=d.Swin2SRImageProcessor,Tc=d.Swin2SRModel,bc=d.Swin2SRPreTrainedModel,xc=d.SwinForImageClassification,Pc=d.SwinModel,kc=d.SwinPreTrainedModel,yc=d.T5ForConditionalGeneration,Fc=d.T5Model,vc=d.T5PreTrainedModel,Cc=d.T5Tokenizer,Sc=d.TableTransformerForObjectDetection,Ac=d.TableTransformerModel,Ec=d.TableTransformerObjectDetectionOutput,Lc=d.TableTransformerPreTrainedModel,Ic=d.TemperatureLogitsWarper,zc=d.Tensor,jc=d.Text2TextGenerationPipeline,Nc=d.TextClassificationPipeline,Vc=d.TextGenerationPipeline,Bc=d.TextStreamer,Oc=d.TextToAudioPipeline,Dc=d.TokenClassificationPipeline,Rc=d.TokenClassifierOutput,Gc=d.TokenizerModel,qc=d.TopKLogitsWarper,Wc=d.TopPLogitsWarper,$c=d.TrOCRForCausalLM,Uc=d.TrOCRPreTrainedModel,Qc=d.TranslationPipeline,Xc=d.UniSpeechForCTC,Hc=d.UniSpeechForSequenceClassification,Jc=d.UniSpeechModel,Yc=d.UniSpeechPreTrainedModel,Kc=d.UniSpeechSatForAudioFrameClassification,Zc=d.UniSpeechSatForCTC,ed=d.UniSpeechSatForSequenceClassification,td=d.UniSpeechSatModel,sd=d.UniSpeechSatPreTrainedModel,rd=d.VLChatProcessor,od=d.VLMImageProcessor,nd=d.ViTFeatureExtractor,ad=d.ViTForImageClassification,id=d.ViTImageProcessor,ld=d.ViTMAEModel,cd=d.ViTMAEPreTrainedModel,dd=d.ViTMSNForImageClassification,ud=d.ViTMSNModel,pd=d.ViTMSNPreTrainedModel,md=d.ViTModel,_d=d.ViTPreTrainedModel,hd=d.VisionEncoderDecoderModel,fd=d.VitMatteForImageMatting,gd=d.VitMatteImageProcessor,Md=d.VitMattePreTrainedModel,wd=d.VitPoseForPoseEstimation,Td=d.VitPoseImageProcessor,bd=d.VitPosePreTrainedModel,xd=d.VitsModel,Pd=d.VitsModelOutput,kd=d.VitsPreTrainedModel,yd=d.VitsTokenizer,Fd=d.Wav2Vec2BertForCTC,vd=d.Wav2Vec2BertForSequenceClassification,Cd=d.Wav2Vec2BertModel,Sd=d.Wav2Vec2BertPreTrainedModel,Ad=d.Wav2Vec2CTCTokenizer,Ed=d.Wav2Vec2FeatureExtractor,Ld=d.Wav2Vec2ForAudioFrameClassification,Id=d.Wav2Vec2ForCTC,zd=d.Wav2Vec2ForSequenceClassification,jd=d.Wav2Vec2Model,Nd=d.Wav2Vec2PreTrainedModel,Vd=d.Wav2Vec2ProcessorWithLM,Bd=d.WavLMForAudioFrameClassification,Od=d.WavLMForCTC,Dd=d.WavLMForSequenceClassification,Rd=d.WavLMForXVector,Gd=d.WavLMModel,qd=d.WavLMPreTrainedModel,Wd=d.WeSpeakerFeatureExtractor,$d=d.WeSpeakerResNetModel,Ud=d.WeSpeakerResNetPreTrainedModel,Qd=d.WhisperFeatureExtractor,Xd=d.WhisperForConditionalGeneration,Hd=d.WhisperModel,Jd=d.WhisperPreTrainedModel,Yd=d.WhisperProcessor,Kd=d.WhisperTextStreamer,Zd=d.WhisperTimeStampLogitsProcessor,eu=d.WhisperTokenizer,tu=d.XLMForQuestionAnswering,su=d.XLMForSequenceClassification,ru=d.XLMForTokenClassification,ou=d.XLMModel,nu=d.XLMPreTrainedModel,au=d.XLMRobertaForMaskedLM,iu=d.XLMRobertaForQuestionAnswering,lu=d.XLMRobertaForSequenceClassification,cu=d.XLMRobertaForTokenClassification,du=d.XLMRobertaModel,uu=d.XLMRobertaPreTrainedModel,pu=d.XLMRobertaTokenizer,mu=d.XLMTokenizer,_u=d.XLMWithLMHeadModel,hu=d.XVectorOutput,fu=d.YolosFeatureExtractor,gu=d.YolosForObjectDetection,Mu=d.YolosImageProcessor,wu=d.YolosModel,Tu=d.YolosObjectDetectionOutput,bu=d.YolosPreTrainedModel,xu=d.ZeroShotAudioClassificationPipeline,Pu=d.ZeroShotClassificationPipeline,ku=d.ZeroShotImageClassificationPipeline,yu=d.ZeroShotObjectDetectionPipeline,Fu=d.bankers_round,vu=d.cat,Cu=d.cos_sim,Su=d.dot,Au=d.dynamic_time_warping,Eu=d.env,Lu=d.full,Iu=d.full_like,zu=d.getKeyValueShapes,ju=d.hamming,Nu=d.hanning,Vu=d.interpolate,Bu=d.interpolate_4d,Ou=d.interpolate_data,Du=d.is_chinese_char,Ru=d.layer_norm,Gu=d.load_image,qu=d.log_softmax,Wu=d.magnitude,$u=d.matmul,Uu=d.max,Qu=d.mean,Xu=d.mean_pooling,Hu=d.medianFilter,Ju=d.mel_filter_bank,Yu=d.min,Ku=d.ones,Zu=d.ones_like,ep=d.permute,tp=d.permute_data,sp=d.pipeline,rp=d.quantize_embeddings,op=d.read_audio,np=d.rfft,ap=d.round,ip=d.softmax,lp=d.spectrogram,cp=d.stack,dp=d.std_mean,up=d.topk,pp=d.window_function,mp=d.zeros,_p=d.zeros_like;export{L as ASTFeatureExtractor,I as ASTForAudioClassification,z as ASTModel,j as ASTPreTrainedModel,N as AlbertForMaskedLM,V as AlbertForQuestionAnswering,B as AlbertForSequenceClassification,O as AlbertModel,D as AlbertPreTrainedModel,R as AlbertTokenizer,G as AudioClassificationPipeline,q as AutoConfig,W as AutoFeatureExtractor,$ as AutoImageProcessor,U as AutoModel,Q as AutoModelForAudioClassification,X as AutoModelForAudioFrameClassification,H as AutoModelForCTC,J as AutoModelForCausalLM,Y as AutoModelForDepthEstimation,K as AutoModelForDocumentQuestionAnswering,Z as AutoModelForImageClassification,ee as AutoModelForImageFeatureExtraction,te as AutoModelForImageMatting,se as AutoModelForImageSegmentation,re as AutoModelForImageToImage,oe as AutoModelForMaskGeneration,ne as AutoModelForMaskedLM,ae as AutoModelForNormalEstimation,ie as AutoModelForObjectDetection,le as AutoModelForPoseEstimation,ce as AutoModelForQuestionAnswering,de as AutoModelForSemanticSegmentation,ue as AutoModelForSeq2SeqLM,pe as AutoModelForSequenceClassification,me as AutoModelForSpeechSeq2Seq,_e as AutoModelForTextToSpectrogram,he as AutoModelForTextToWaveform,fe as AutoModelForTokenClassification,ge as AutoModelForUniversalSegmentation,Me as AutoModelForVision2Seq,we as AutoModelForXVector,Te as AutoModelForZeroShotObjectDetection,be as AutoProcessor,xe as AutoTokenizer,Pe as AutomaticSpeechRecognitionPipeline,ke as BartForConditionalGeneration,ye as BartForSequenceClassification,Fe as BartModel,ve as BartPretrainedModel,Ce as BartTokenizer,Se as BaseModelOutput,Ae as BaseStreamer,Ee as BeitFeatureExtractor,Le as BeitForImageClassification,Ie as BeitModel,ze as BeitPreTrainedModel,je as BertForMaskedLM,Ne as BertForQuestionAnswering,Ve as BertForSequenceClassification,Be as BertForTokenClassification,Oe as BertModel,De as BertPreTrainedModel,Re as BertTokenizer,Ge as BitImageProcessor,qe as BlenderbotForConditionalGeneration,We as BlenderbotModel,$e as BlenderbotPreTrainedModel,Ue as BlenderbotSmallForConditionalGeneration,Qe as BlenderbotSmallModel,Xe as BlenderbotSmallPreTrainedModel,He as BlenderbotSmallTokenizer,Je as BlenderbotTokenizer,Ye as BloomForCausalLM,Ke as BloomModel,Ze as BloomPreTrainedModel,et as BloomTokenizer,tt as CLIPFeatureExtractor,st as CLIPImageProcessor,rt as CLIPModel,ot as CLIPPreTrainedModel,nt as CLIPSegForImageSegmentation,at as CLIPSegModel,it as CLIPSegPreTrainedModel,lt as CLIPTextModel,ct as CLIPTextModelWithProjection,dt as CLIPTokenizer,ut as CLIPVisionModel,pt as CLIPVisionModelWithProjection,mt as CamembertForMaskedLM,_t as CamembertForQuestionAnswering,ht as CamembertForSequenceClassification,ft as CamembertForTokenClassification,gt as CamembertModel,Mt as CamembertPreTrainedModel,wt as CamembertTokenizer,Tt as CausalLMOutput,bt as CausalLMOutputWithPast,xt as ChineseCLIPFeatureExtractor,Pt as ChineseCLIPModel,kt as ChineseCLIPPreTrainedModel,yt as ClapAudioModelWithProjection,Ft as ClapFeatureExtractor,vt as ClapModel,Ct as ClapPreTrainedModel,St as ClapTextModelWithProjection,At as ClassifierFreeGuidanceLogitsProcessor,Et as CodeGenForCausalLM,Lt as CodeGenModel,It as CodeGenPreTrainedModel,zt as CodeGenTokenizer,jt as CodeLlamaTokenizer,Nt as CohereForCausalLM,Vt as CohereModel,Bt as CoherePreTrainedModel,Ot as CohereTokenizer,Dt as ConvBertForMaskedLM,Rt as ConvBertForQuestionAnswering,Gt as ConvBertForSequenceClassification,qt as ConvBertForTokenClassification,Wt as ConvBertModel,$t as ConvBertPreTrainedModel,Ut as ConvBertTokenizer,Qt as ConvNextFeatureExtractor,Xt as ConvNextForImageClassification,Ht as ConvNextImageProcessor,Jt as ConvNextModel,Yt as ConvNextPreTrainedModel,Kt as ConvNextV2ForImageClassification,Zt as ConvNextV2Model,es as ConvNextV2PreTrainedModel,ts as DPTFeatureExtractor,ss as DPTForDepthEstimation,rs as DPTImageProcessor,os as DPTModel,ns as DPTPreTrainedModel,as as DebertaForMaskedLM,is as DebertaForQuestionAnswering,ls as DebertaForSequenceClassification,cs as DebertaForTokenClassification,ds as DebertaModel,us as DebertaPreTrainedModel,ps as DebertaTokenizer,ms as DebertaV2ForMaskedLM,_s as DebertaV2ForQuestionAnswering,hs as DebertaV2ForSequenceClassification,fs as DebertaV2ForTokenClassification,gs as DebertaV2Model,Ms as DebertaV2PreTrainedModel,ws as DebertaV2Tokenizer,Ts as DecisionTransformerModel,bs as DecisionTransformerPreTrainedModel,xs as DeiTFeatureExtractor,Ps as DeiTForImageClassification,ks as DeiTImageProcessor,ys as DeiTModel,Fs as DeiTPreTrainedModel,vs as DepthAnythingForDepthEstimation,Cs as DepthAnythingPreTrainedModel,Ss as DepthEstimationPipeline,As as DepthProForDepthEstimation,Es as DepthProPreTrainedModel,Ls as DetrFeatureExtractor,Is as DetrForObjectDetection,zs as DetrForSegmentation,js as DetrImageProcessor,Ns as DetrModel,Vs as DetrObjectDetectionOutput,Bs as DetrPreTrainedModel,Os as DetrSegmentationOutput,Ds as Dinov2ForImageClassification,Rs as Dinov2Model,Gs as Dinov2PreTrainedModel,qs as DistilBertForMaskedLM,Ws as DistilBertForQuestionAnswering,$s as DistilBertForSequenceClassification,Us as DistilBertForTokenClassification,Qs as DistilBertModel,Xs as DistilBertPreTrainedModel,Hs as DistilBertTokenizer,Js as DocumentQuestionAnsweringPipeline,Ys as DonutFeatureExtractor,Ks as DonutImageProcessor,Zs as DonutSwinModel,er as DonutSwinPreTrainedModel,tr as EfficientNetForImageClassification,sr as EfficientNetImageProcessor,rr as EfficientNetModel,or as EfficientNetPreTrainedModel,nr as ElectraForMaskedLM,ar as ElectraForQuestionAnswering,ir as ElectraForSequenceClassification,lr as ElectraForTokenClassification,cr as ElectraModel,dr as ElectraPreTrainedModel,ur as ElectraTokenizer,pr as EosTokenCriteria,mr as EsmForMaskedLM,_r as EsmForSequenceClassification,hr as EsmForTokenClassification,fr as EsmModel,gr as EsmPreTrainedModel,Mr as EsmTokenizer,wr as FFT,Tr as FalconForCausalLM,br as FalconModel,xr as FalconPreTrainedModel,Pr as FalconTokenizer,kr as FastViTForImageClassification,yr as FastViTModel,Fr as FastViTPreTrainedModel,vr as FeatureExtractionPipeline,Cr as FeatureExtractor,Sr as FillMaskPipeline,Ar as Florence2ForConditionalGeneration,Er as Florence2PreTrainedModel,Lr as Florence2Processor,Ir as ForcedBOSTokenLogitsProcessor,zr as ForcedEOSTokenLogitsProcessor,jr as GLPNFeatureExtractor,Nr as GLPNForDepthEstimation,Vr as GLPNModel,Br as GLPNPreTrainedModel,Or as GPT2LMHeadModel,Dr as GPT2Model,Rr as GPT2PreTrainedModel,Gr as GPT2Tokenizer,qr as GPTBigCodeForCausalLM,Wr as GPTBigCodeModel,$r as GPTBigCodePreTrainedModel,Ur as GPTJForCausalLM,Qr as GPTJModel,Xr as GPTJPreTrainedModel,Hr as GPTNeoForCausalLM,Jr as GPTNeoModel,Yr as GPTNeoPreTrainedModel,Kr as GPTNeoXForCausalLM,Zr as GPTNeoXModel,eo as GPTNeoXPreTrainedModel,to as GPTNeoXTokenizer,so as Gemma2ForCausalLM,ro as Gemma2Model,oo as Gemma2PreTrainedModel,no as GemmaForCausalLM,ao as GemmaModel,io as GemmaPreTrainedModel,lo as GemmaTokenizer,co as GraniteForCausalLM,uo as GraniteModel,po as GranitePreTrainedModel,mo as Grok1Tokenizer,_o as GroupViTModel,ho as GroupViTPreTrainedModel,fo as HerbertTokenizer,go as HieraForImageClassification,Mo as HieraModel,wo as HieraPreTrainedModel,To as HubertForCTC,bo as HubertForSequenceClassification,xo as HubertModel,Po as HubertPreTrainedModel,ko as Idefics3ForConditionalGeneration,yo as Idefics3ImageProcessor,Fo as Idefics3PreTrainedModel,vo as Idefics3Processor,Co as ImageClassificationPipeline,So as ImageFeatureExtractionPipeline,Ao as ImageFeatureExtractor,Eo as ImageMattingOutput,Lo as ImageProcessor,Io as ImageSegmentationPipeline,zo as ImageToImagePipeline,jo as ImageToTextPipeline,No as InterruptableStoppingCriteria,Vo as JAISLMHeadModel,Bo as JAISModel,Oo as JAISPreTrainedModel,Do as JinaCLIPImageProcessor,Ro as JinaCLIPModel,Go as JinaCLIPPreTrainedModel,qo as JinaCLIPProcessor,Wo as JinaCLIPTextModel,$o as JinaCLIPVisionModel,Uo as LlamaForCausalLM,Qo as LlamaModel,Xo as LlamaPreTrainedModel,Ho as LlamaTokenizer,Jo as LlavaForConditionalGeneration,Yo as LlavaOnevisionForConditionalGeneration,Ko as LlavaOnevisionImageProcessor,Zo as LlavaPreTrainedModel,en as LogitsProcessor,tn as LogitsProcessorList,sn as LogitsWarper,rn as LongT5ForConditionalGeneration,on as LongT5Model,nn as LongT5PreTrainedModel,an as M2M100ForConditionalGeneration,ln as M2M100Model,cn as M2M100PreTrainedModel,dn as M2M100Tokenizer,un as MBart50Tokenizer,pn as MBartForCausalLM,mn as MBartForConditionalGeneration,_n as MBartForSequenceClassification,hn as MBartModel,fn as MBartPreTrainedModel,gn as MBartTokenizer,Mn as MPNetForMaskedLM,wn as MPNetForQuestionAnswering,Tn as MPNetForSequenceClassification,bn as MPNetForTokenClassification,xn as MPNetModel,Pn as MPNetPreTrainedModel,kn as MPNetTokenizer,yn as MT5ForConditionalGeneration,Fn as MT5Model,vn as MT5PreTrainedModel,Cn as MarianMTModel,Sn as MarianModel,An as MarianPreTrainedModel,En as MarianTokenizer,Ln as Mask2FormerImageProcessor,In as MaskFormerFeatureExtractor,zn as MaskFormerForInstanceSegmentation,jn as MaskFormerImageProcessor,Nn as MaskFormerModel,Vn as MaskFormerPreTrainedModel,Bn as MaskedLMOutput,On as MaxLengthCriteria,Dn as MgpstrForSceneTextRecognition,Rn as MgpstrModelOutput,Gn as MgpstrPreTrainedModel,qn as MgpstrProcessor,Wn as MgpstrTokenizer,$n as MinLengthLogitsProcessor,Un as MinNewTokensLengthLogitsProcessor,Qn as MistralForCausalLM,Xn as MistralModel,Hn as MistralPreTrainedModel,Jn as MobileBertForMaskedLM,Yn as MobileBertForQuestionAnswering,Kn as MobileBertForSequenceClassification,Zn as MobileBertModel,ea as MobileBertPreTrainedModel,ta as MobileBertTokenizer,sa as MobileLLMForCausalLM,ra as MobileLLMModel,oa as MobileLLMPreTrainedModel,na as MobileNetV1FeatureExtractor,aa as MobileNetV1ForImageClassification,ia as MobileNetV1ImageProcessor,la as MobileNetV1Model,ca as MobileNetV1PreTrainedModel,da as MobileNetV2FeatureExtractor,ua as MobileNetV2ForImageClassification,pa as MobileNetV2ImageProcessor,ma as MobileNetV2Model,_a as MobileNetV2PreTrainedModel,ha as MobileNetV3FeatureExtractor,fa as MobileNetV3ForImageClassification,ga as MobileNetV3ImageProcessor,Ma as MobileNetV3Model,wa as MobileNetV3PreTrainedModel,Ta as MobileNetV4FeatureExtractor,ba as MobileNetV4ForImageClassification,xa as MobileNetV4ImageProcessor,Pa as MobileNetV4Model,ka as MobileNetV4PreTrainedModel,ya as MobileViTFeatureExtractor,Fa as MobileViTForImageClassification,va as MobileViTImageProcessor,Ca as MobileViTModel,Sa as MobileViTPreTrainedModel,Aa as MobileViTV2ForImageClassification,Ea as MobileViTV2Model,La as MobileViTV2PreTrainedModel,Ia as ModelOutput,za as Moondream1ForConditionalGeneration,ja as MptForCausalLM,Na as MptModel,Va as MptPreTrainedModel,Ba as MultiModalityCausalLM,Oa as MultiModalityPreTrainedModel,Da as MusicgenForCausalLM,Ra as MusicgenForConditionalGeneration,Ga as MusicgenModel,qa as MusicgenPreTrainedModel,Wa as NllbTokenizer,$a as NoBadWordsLogitsProcessor,Ua as NoRepeatNGramLogitsProcessor,Qa as NomicBertModel,Xa as NomicBertPreTrainedModel,Ha as NougatImageProcessor,Ja as NougatTokenizer,Ya as OPTForCausalLM,Ka as OPTModel,Za as OPTPreTrainedModel,ei as ObjectDetectionPipeline,ti as OlmoForCausalLM,si as OlmoModel,ri as OlmoPreTrainedModel,oi as OpenELMForCausalLM,ni as OpenELMModel,ai as OpenELMPreTrainedModel,ii as OwlViTFeatureExtractor,li as OwlViTForObjectDetection,ci as OwlViTImageProcessor,di as OwlViTModel,ui as OwlViTPreTrainedModel,pi as OwlViTProcessor,mi as Owlv2ForObjectDetection,_i as Owlv2ImageProcessor,hi as Owlv2Model,fi as Owlv2PreTrainedModel,gi as PatchTSMixerForPrediction,Mi as PatchTSMixerModel,wi as PatchTSMixerPreTrainedModel,Ti as PatchTSTForPrediction,bi as PatchTSTModel,xi as PatchTSTPreTrainedModel,Pi as Phi3ForCausalLM,ki as Phi3Model,yi as Phi3PreTrainedModel,Fi as PhiForCausalLM,vi as PhiModel,Ci as PhiPreTrainedModel,Si as Pipeline,Ai as PreTrainedModel,Ei as PreTrainedTokenizer,Li as PretrainedConfig,Ii as PretrainedMixin,zi as Processor,ji as PvtForImageClassification,Ni as PvtImageProcessor,Vi as PvtModel,Bi as PvtPreTrainedModel,Oi as PyAnnoteFeatureExtractor,Di as PyAnnoteForAudioFrameClassification,Ri as PyAnnoteModel,Gi as PyAnnotePreTrainedModel,qi as PyAnnoteProcessor,Wi as QuestionAnsweringModelOutput,$i as QuestionAnsweringPipeline,Ui as Qwen2ForCausalLM,Qi as Qwen2Model,Xi as Qwen2PreTrainedModel,Hi as Qwen2Tokenizer,Ji as Qwen2VLForConditionalGeneration,Yi as Qwen2VLImageProcessor,Ki as Qwen2VLPreTrainedModel,Zi as Qwen2VLProcessor,el as RTDetrForObjectDetection,tl as RTDetrImageProcessor,sl as RTDetrModel,rl as RTDetrObjectDetectionOutput,ol as RTDetrPreTrainedModel,nl as RawImage,al as RepetitionPenaltyLogitsProcessor,il as ResNetForImageClassification,ll as ResNetModel,cl as ResNetPreTrainedModel,dl as RoFormerForMaskedLM,ul as RoFormerForQuestionAnswering,pl as RoFormerForSequenceClassification,ml as RoFormerForTokenClassification,_l as RoFormerModel,hl as RoFormerPreTrainedModel,fl as RoFormerTokenizer,gl as RobertaForMaskedLM,Ml as RobertaForQuestionAnswering,wl as RobertaForSequenceClassification,Tl as RobertaForTokenClassification,bl as RobertaModel,xl as RobertaPreTrainedModel,Pl as RobertaTokenizer,kl as SamImageProcessor,yl as SamImageSegmentationOutput,Fl as SamModel,vl as SamPreTrainedModel,Cl as SamProcessor,Sl as SapiensForDepthEstimation,Al as SapiensForNormalEstimation,El as SapiensForSemanticSegmentation,Ll as SapiensPreTrainedModel,Il as SeamlessM4TFeatureExtractor,zl as SegformerFeatureExtractor,jl as SegformerForImageClassification,Nl as SegformerForSemanticSegmentation,Vl as SegformerImageProcessor,Bl as SegformerModel,Ol as SegformerPreTrainedModel,Dl as Seq2SeqLMOutput,Rl as SequenceClassifierOutput,Gl as SiglipImageProcessor,ql as SiglipModel,Wl as SiglipPreTrainedModel,$l as SiglipTextModel,Ul as SiglipTokenizer,Ql as SiglipVisionModel,Xl as SpeechT5FeatureExtractor,Hl as SpeechT5ForSpeechToText,Jl as SpeechT5ForTextToSpeech,Yl as SpeechT5HifiGan,Kl as SpeechT5Model,Zl as SpeechT5PreTrainedModel,ec as SpeechT5Processor,tc as SpeechT5Tokenizer,sc as SqueezeBertForMaskedLM,rc as SqueezeBertForQuestionAnswering,oc as SqueezeBertForSequenceClassification,nc as SqueezeBertModel,ac as SqueezeBertPreTrainedModel,ic as SqueezeBertTokenizer,lc as StableLmForCausalLM,cc as StableLmModel,dc as StableLmPreTrainedModel,uc as Starcoder2ForCausalLM,pc as Starcoder2Model,mc as Starcoder2PreTrainedModel,_c as StoppingCriteria,hc as StoppingCriteriaList,fc as SummarizationPipeline,gc as SuppressTokensAtBeginLogitsProcessor,Mc as Swin2SRForImageSuperResolution,wc as Swin2SRImageProcessor,Tc as Swin2SRModel,bc as Swin2SRPreTrainedModel,xc as SwinForImageClassification,Pc as SwinModel,kc as SwinPreTrainedModel,yc as T5ForConditionalGeneration,Fc as T5Model,vc as T5PreTrainedModel,Cc as T5Tokenizer,Sc as TableTransformerForObjectDetection,Ac as TableTransformerModel,Ec as TableTransformerObjectDetectionOutput,Lc as TableTransformerPreTrainedModel,Ic as TemperatureLogitsWarper,zc as Tensor,jc as Text2TextGenerationPipeline,Nc as TextClassificationPipeline,Vc as TextGenerationPipeline,Bc as TextStreamer,Oc as TextToAudioPipeline,Dc as TokenClassificationPipeline,Rc as TokenClassifierOutput,Gc as TokenizerModel,qc as TopKLogitsWarper,Wc as TopPLogitsWarper,$c as TrOCRForCausalLM,Uc as TrOCRPreTrainedModel,Qc as TranslationPipeline,Xc as UniSpeechForCTC,Hc as UniSpeechForSequenceClassification,Jc as UniSpeechModel,Yc as UniSpeechPreTrainedModel,Kc as UniSpeechSatForAudioFrameClassification,Zc as UniSpeechSatForCTC,ed as UniSpeechSatForSequenceClassification,td as UniSpeechSatModel,sd as UniSpeechSatPreTrainedModel,rd as VLChatProcessor,od as VLMImageProcessor,nd as ViTFeatureExtractor,ad as ViTForImageClassification,id as ViTImageProcessor,ld as ViTMAEModel,cd as ViTMAEPreTrainedModel,dd as ViTMSNForImageClassification,ud as ViTMSNModel,pd as ViTMSNPreTrainedModel,md as ViTModel,_d as ViTPreTrainedModel,hd as VisionEncoderDecoderModel,fd as VitMatteForImageMatting,gd as VitMatteImageProcessor,Md as VitMattePreTrainedModel,wd as VitPoseForPoseEstimation,Td as VitPoseImageProcessor,bd as VitPosePreTrainedModel,xd as VitsModel,Pd as VitsModelOutput,kd as VitsPreTrainedModel,yd as VitsTokenizer,Fd as Wav2Vec2BertForCTC,vd as Wav2Vec2BertForSequenceClassification,Cd as Wav2Vec2BertModel,Sd as Wav2Vec2BertPreTrainedModel,Ad as Wav2Vec2CTCTokenizer,Ed as Wav2Vec2FeatureExtractor,Ld as Wav2Vec2ForAudioFrameClassification,Id as Wav2Vec2ForCTC,zd as Wav2Vec2ForSequenceClassification,jd as Wav2Vec2Model,Nd as Wav2Vec2PreTrainedModel,Vd as Wav2Vec2ProcessorWithLM,Bd as WavLMForAudioFrameClassification,Od as WavLMForCTC,Dd as WavLMForSequenceClassification,Rd as WavLMForXVector,Gd as WavLMModel,qd as WavLMPreTrainedModel,Wd as WeSpeakerFeatureExtractor,$d as WeSpeakerResNetModel,Ud as WeSpeakerResNetPreTrainedModel,Qd as WhisperFeatureExtractor,Xd as WhisperForConditionalGeneration,Hd as WhisperModel,Jd as WhisperPreTrainedModel,Yd as WhisperProcessor,Kd as WhisperTextStreamer,Zd as WhisperTimeStampLogitsProcessor,eu as WhisperTokenizer,tu as XLMForQuestionAnswering,su as XLMForSequenceClassification,ru as XLMForTokenClassification,ou as XLMModel,nu as XLMPreTrainedModel,au as XLMRobertaForMaskedLM,iu as XLMRobertaForQuestionAnswering,lu as XLMRobertaForSequenceClassification,cu as XLMRobertaForTokenClassification,du as XLMRobertaModel,uu as XLMRobertaPreTrainedModel,pu as XLMRobertaTokenizer,mu as XLMTokenizer,_u as XLMWithLMHeadModel,hu as XVectorOutput,fu as YolosFeatureExtractor,gu as YolosForObjectDetection,Mu as YolosImageProcessor,wu as YolosModel,Tu as YolosObjectDetectionOutput,bu as YolosPreTrainedModel,xu as ZeroShotAudioClassificationPipeline,Pu as ZeroShotClassificationPipeline,ku as ZeroShotImageClassificationPipeline,yu as ZeroShotObjectDetectionPipeline,Fu as bankers_round,vu as cat,Cu as cos_sim,Su as dot,Au as dynamic_time_warping,Eu as env,Lu as full,Iu as full_like,zu as getKeyValueShapes,ju as hamming,Nu as hanning,Vu as interpolate,Bu as interpolate_4d,Ou as interpolate_data,Du as is_chinese_char,Ru as layer_norm,Gu as load_image,qu as log_softmax,Wu as magnitude,$u as matmul,Uu as max,Qu as mean,Xu as mean_pooling,Hu as medianFilter,Ju as mel_filter_bank,Yu as min,Ku as ones,Zu as ones_like,ep as permute,tp as permute_data,sp as pipeline,rp as quantize_embeddings,op as read_audio,np as rfft,ap as round,ip as softmax,lp as spectrogram,cp as stack,dp as std_mean,up as topk,pp as window_function,mp as zeros,_p as zeros_like};
1
+ import*as e from"fs";import*as t from"onnxruntime-node";import*as s from"path";import*as r from"sharp";import*as o from"url";var n,a,i={fs:t=>{t.exports=e},"onnxruntime-node":e=>{e.exports=t},path:e=>{e.exports=s},sharp:e=>{e.exports=r},url:e=>{e.exports=o},"?8b6b":()=>{},"./node_modules/@huggingface/jinja/dist/index.js":(e,t,s)=>{s.r(t),s.d(t,{Environment:()=>Y,Interpreter:()=>K,Template:()=>te,parse:()=>N,tokenize:()=>d});var r=Object.freeze({Text:"Text",NumericLiteral:"NumericLiteral",BooleanLiteral:"BooleanLiteral",NullLiteral:"NullLiteral",StringLiteral:"StringLiteral",Identifier:"Identifier",Equals:"Equals",OpenParen:"OpenParen",CloseParen:"CloseParen",OpenStatement:"OpenStatement",CloseStatement:"CloseStatement",OpenExpression:"OpenExpression",CloseExpression:"CloseExpression",OpenSquareBracket:"OpenSquareBracket",CloseSquareBracket:"CloseSquareBracket",OpenCurlyBracket:"OpenCurlyBracket",CloseCurlyBracket:"CloseCurlyBracket",Comma:"Comma",Dot:"Dot",Colon:"Colon",Pipe:"Pipe",CallOperator:"CallOperator",AdditiveBinaryOperator:"AdditiveBinaryOperator",MultiplicativeBinaryOperator:"MultiplicativeBinaryOperator",ComparisonBinaryOperator:"ComparisonBinaryOperator",UnaryOperator:"UnaryOperator",Set:"Set",If:"If",For:"For",In:"In",Is:"Is",NotIn:"NotIn",Else:"Else",EndIf:"EndIf",ElseIf:"ElseIf",EndFor:"EndFor",And:"And",Or:"Or",Not:"UnaryOperator",Macro:"Macro",EndMacro:"EndMacro"}),o=Object.freeze({set:r.Set,for:r.For,in:r.In,is:r.Is,if:r.If,else:r.Else,endif:r.EndIf,elif:r.ElseIf,endfor:r.EndFor,and:r.And,or:r.Or,not:r.Not,"not in":r.NotIn,macro:r.Macro,endmacro:r.EndMacro,true:r.BooleanLiteral,false:r.BooleanLiteral,none:r.NullLiteral,True:r.BooleanLiteral,False:r.BooleanLiteral,None:r.NullLiteral}),n=class{constructor(e,t){this.value=e,this.type=t}};function a(e){return/\w/.test(e)}function i(e){return/[0-9]/.test(e)}var l=[["{%",r.OpenStatement],["%}",r.CloseStatement],["{{",r.OpenExpression],["}}",r.CloseExpression],["(",r.OpenParen],[")",r.CloseParen],["{",r.OpenCurlyBracket],["}",r.CloseCurlyBracket],["[",r.OpenSquareBracket],["]",r.CloseSquareBracket],[",",r.Comma],[".",r.Dot],[":",r.Colon],["|",r.Pipe],["<=",r.ComparisonBinaryOperator],[">=",r.ComparisonBinaryOperator],["==",r.ComparisonBinaryOperator],["!=",r.ComparisonBinaryOperator],["<",r.ComparisonBinaryOperator],[">",r.ComparisonBinaryOperator],["+",r.AdditiveBinaryOperator],["-",r.AdditiveBinaryOperator],["*",r.MultiplicativeBinaryOperator],["/",r.MultiplicativeBinaryOperator],["%",r.MultiplicativeBinaryOperator],["=",r.Equals]],c=new Map([["n","\n"],["t","\t"],["r","\r"],["b","\b"],["f","\f"],["v","\v"],["'","'"],['"','"'],["\\","\\"]]);function d(e,t={}){const s=[],d=function(e,t={}){return e.endsWith("\n")&&(e=e.slice(0,-1)),e=e.replace(/{#.*?#}/gs,"{##}"),t.lstrip_blocks&&(e=e.replace(/^[ \t]*({[#%])/gm,"$1")),t.trim_blocks&&(e=e.replace(/([#%]})\n/g,"$1")),e.replace(/{##}/g,"").replace(/-%}\s*/g,"%}").replace(/\s*{%-/g,"{%").replace(/-}}\s*/g,"}}").replace(/\s*{{-/g,"{{")}(e,t);let u=0;const p=e=>{let t="";for(;e(d[u]);)if("\\"!==d[u]){if(t+=d[u++],u>=d.length)throw new SyntaxError("Unexpected end of input")}else{if(++u,u>=d.length)throw new SyntaxError("Unexpected end of input");const e=d[u++],s=c.get(e);if(void 0===s)throw new SyntaxError(`Unexpected escaped character: ${e}`);t+=s}return t};e:for(;u<d.length;){const e=s.at(-1)?.type;if(void 0===e||e===r.CloseStatement||e===r.CloseExpression){let e="";for(;u<d.length&&("{"!==d[u]||"%"!==d[u+1]&&"{"!==d[u+1]);)e+=d[u++];if(e.length>0){s.push(new n(e,r.Text));continue}}p((e=>/\s/.test(e)));const t=d[u];if("-"===t||"+"===t){const e=s.at(-1)?.type;if(e===r.Text||void 0===e)throw new SyntaxError(`Unexpected character: ${t}`);switch(e){case r.Identifier:case r.NumericLiteral:case r.BooleanLiteral:case r.NullLiteral:case r.StringLiteral:case r.CloseParen:case r.CloseSquareBracket:break;default:{++u;const e=p(i);s.push(new n(`${t}${e}`,e.length>0?r.NumericLiteral:r.UnaryOperator));continue}}}for(const[e,t]of l){if(d.slice(u,u+e.length)===e){s.push(new n(e,t)),u+=e.length;continue e}}if("'"!==t&&'"'!==t)if(i(t)){const e=p(i);s.push(new n(e,r.NumericLiteral))}else{if(!a(t))throw new SyntaxError(`Unexpected character: ${t}`);{const e=p(a),t=Object.hasOwn(o,e)?o[e]:r.Identifier;t===r.In&&s.at(-1)?.type===r.Not?(s.pop(),s.push(new n("not in",r.NotIn))):s.push(new n(e,t))}}else{++u;const e=p((e=>e!==t));s.push(new n(e,r.StringLiteral)),++u}}return s}var u=class{type="Statement"},p=class extends u{constructor(e){super(),this.body=e}type="Program"},m=class extends u{constructor(e,t,s){super(),this.test=e,this.body=t,this.alternate=s}type="If"},_=class extends u{constructor(e,t,s,r){super(),this.loopvar=e,this.iterable=t,this.body=s,this.defaultBlock=r}type="For"},h=class extends u{constructor(e,t){super(),this.assignee=e,this.value=t}type="Set"},f=class extends u{constructor(e,t,s){super(),this.name=e,this.args=t,this.body=s}type="Macro"},g=class extends u{type="Expression"},M=class extends g{constructor(e,t,s){super(),this.object=e,this.property=t,this.computed=s}type="MemberExpression"},w=class extends g{constructor(e,t){super(),this.callee=e,this.args=t}type="CallExpression"},T=class extends g{constructor(e){super(),this.value=e}type="Identifier"},b=class extends g{constructor(e){super(),this.value=e}type="Literal"},P=class extends b{type="NumericLiteral"},x=class extends b{type="StringLiteral"},k=class extends b{type="BooleanLiteral"},y=class extends b{type="NullLiteral"},F=class extends b{type="ArrayLiteral"},v=class extends b{type="TupleLiteral"},C=class extends b{type="ObjectLiteral"},S=class extends g{constructor(e,t,s){super(),this.operator=e,this.left=t,this.right=s}type="BinaryExpression"},A=class extends g{constructor(e,t){super(),this.operand=e,this.filter=t}type="FilterExpression"},E=class extends g{constructor(e,t){super(),this.iterable=e,this.test=t}type="SelectExpression"},L=class extends g{constructor(e,t,s){super(),this.operand=e,this.negate=t,this.test=s}type="TestExpression"},I=class extends g{constructor(e,t){super(),this.operator=e,this.argument=t}type="UnaryExpression"},z=class extends g{constructor(e=void 0,t=void 0,s=void 0){super(),this.start=e,this.stop=t,this.step=s}type="SliceExpression"},j=class extends g{constructor(e,t){super(),this.key=e,this.value=t}type="KeywordArgumentExpression"};function N(e){const t=new p([]);let s=0;function o(t,r){const o=e[s++];if(!o||o.type!==t)throw new Error(`Parser Error: ${r}. ${o.type} !== ${t}.`);return o}function n(){switch(e[s].type){case r.Text:return new x(o(r.Text,"Expected text token").value);case r.OpenStatement:return function(){let t;switch(o(r.OpenStatement,"Expected opening statement token"),e[s].type){case r.Set:++s,t=l(),o(r.CloseStatement,"Expected closing statement token");break;case r.If:++s,t=c(),o(r.OpenStatement,"Expected {% token"),o(r.EndIf,"Expected endif token"),o(r.CloseStatement,"Expected %} token");break;case r.Macro:++s,t=function(){const e=$();if("Identifier"!==e.type)throw new SyntaxError("Expected identifier following macro statement");const t=D();o(r.CloseStatement,"Expected closing statement token");const s=[];for(;a(r.OpenStatement,r.EndMacro);)s.push(n());return new f(e,t,s)}(),o(r.OpenStatement,"Expected {% token"),o(r.EndMacro,"Expected endmacro token"),o(r.CloseStatement,"Expected %} token");break;case r.For:++s,t=function(){const e=d(!0);if(!(e instanceof T||e instanceof v))throw new SyntaxError(`Expected identifier/tuple for the loop variable, got ${e.type} instead`);o(r.In,"Expected `in` keyword following loop variable");const t=u();o(r.CloseStatement,"Expected closing statement token");const l=[];for(;a(r.OpenStatement,r.EndFor)&&a(r.OpenStatement,r.Else);)l.push(n());const c=[];if(i(r.OpenStatement,r.Else))for(++s,++s,o(r.CloseStatement,"Expected closing statement token");a(r.OpenStatement,r.EndFor);)c.push(n());return new _(e,t,l,c)}(),o(r.OpenStatement,"Expected {% token"),o(r.EndFor,"Expected endfor token"),o(r.CloseStatement,"Expected %} token");break;default:throw new SyntaxError(`Unknown statement type: ${e[s].type}`)}return t}();case r.OpenExpression:return function(){o(r.OpenExpression,"Expected opening expression token");const e=u();return o(r.CloseExpression,"Expected closing expression token"),e}();default:throw new SyntaxError(`Unexpected token type: ${e[s].type}`)}}function a(...t){return s+t.length<=e.length&&t.some(((t,r)=>t!==e[s+r].type))}function i(...t){return s+t.length<=e.length&&t.every(((t,r)=>t===e[s+r].type))}function l(){const e=u();if(i(r.Equals)){++s;const t=l();return new h(e,t)}return e}function c(){const t=u();o(r.CloseStatement,"Expected closing statement token");const a=[],l=[];for(;e[s]?.type!==r.OpenStatement||e[s+1]?.type!==r.ElseIf&&e[s+1]?.type!==r.Else&&e[s+1]?.type!==r.EndIf;)a.push(n());if(e[s]?.type===r.OpenStatement&&e[s+1]?.type!==r.EndIf)if(++s,i(r.ElseIf))o(r.ElseIf,"Expected elseif token"),l.push(c());else for(o(r.Else,"Expected else token"),o(r.CloseStatement,"Expected closing statement token");e[s]?.type!==r.OpenStatement||e[s+1]?.type!==r.EndIf;)l.push(n());return new m(t,a,l)}function d(e=!1){const t=e?$:u,o=[t()],n=i(r.Comma);for(;n&&(++s,o.push(t()),i(r.Comma)););return n?new v(o):o[0]}function u(){return function(){const e=g();if(i(r.If)){++s;const t=g();if(i(r.Else)){++s;const r=g();return new m(t,[e],[r])}return new E(e,t)}return e}()}function g(){let t=b();for(;i(r.Or);){const r=e[s];++s;const o=b();t=new S(r,t,o)}return t}function b(){let t=N();for(;i(r.And);){const r=e[s];++s;const o=N();t=new S(r,t,o)}return t}function N(){let t;for(;i(r.Not);){const r=e[s];++s;const o=N();t=new I(r,o)}return t??function(){let t=V();for(;i(r.ComparisonBinaryOperator)||i(r.In)||i(r.NotIn);){const r=e[s];++s;const o=V();t=new S(r,t,o)}return t}()}function V(){let t=R();for(;i(r.AdditiveBinaryOperator);){const r=e[s];++s;const o=R();t=new S(r,t,o)}return t}function O(){const t=function(){let t=$();for(;i(r.Dot)||i(r.OpenSquareBracket);){const n=e[s];let a;++s;const i=n.type!==r.Dot;if(i)a=G(),o(r.CloseSquareBracket,"Expected closing square bracket");else if(a=$(),"Identifier"!==a.type)throw new SyntaxError("Expected identifier following dot operator");t=new M(t,a,i)}return t}();return i(r.OpenParen)?B(t):t}function B(e){let t=new w(e,D());return i(r.OpenParen)&&(t=B(t)),t}function D(){o(r.OpenParen,"Expected opening parenthesis for arguments list");const e=function(){const e=[];for(;!i(r.CloseParen);){let t=u();if(i(r.Equals)){if(++s,!(t instanceof T))throw new SyntaxError("Expected identifier for keyword argument");const e=u();t=new j(t,e)}e.push(t),i(r.Comma)&&++s}return e}();return o(r.CloseParen,"Expected closing parenthesis for arguments list"),e}function G(){const e=[];let t=!1;for(;!i(r.CloseSquareBracket);)i(r.Colon)?(e.push(void 0),++s,t=!0):(e.push(u()),i(r.Colon)&&(++s,t=!0));if(0===e.length)throw new SyntaxError("Expected at least one argument for member/slice expression");if(t){if(e.length>3)throw new SyntaxError("Expected 0-3 arguments for slice expression");return new z(...e)}return e[0]}function R(){let t=q();for(;i(r.MultiplicativeBinaryOperator);){const r=e[s];++s;const o=q();t=new S(r,t,o)}return t}function q(){let e=function(){let e=O();for(;i(r.Pipe);){++s;let t=$();if(!(t instanceof T))throw new SyntaxError("Expected identifier for the filter");i(r.OpenParen)&&(t=B(t)),e=new A(e,t)}return e}();for(;i(r.Is);){++s;const t=i(r.Not);t&&++s;let o=$();if(o instanceof k?o=new T(o.value.toString()):o instanceof y&&(o=new T("none")),!(o instanceof T))throw new SyntaxError("Expected identifier for the test");e=new L(e,t,o)}return e}function $(){const t=e[s];switch(t.type){case r.NumericLiteral:return++s,new P(Number(t.value));case r.StringLiteral:return++s,new x(t.value);case r.BooleanLiteral:return++s,new k("true"===t.value.toLowerCase());case r.NullLiteral:return++s,new y(null);case r.Identifier:return++s,new T(t.value);case r.OpenParen:{++s;const t=d();if(e[s].type!==r.CloseParen)throw new SyntaxError(`Expected closing parenthesis, got ${e[s].type} instead`);return++s,t}case r.OpenSquareBracket:{++s;const e=[];for(;!i(r.CloseSquareBracket);)e.push(u()),i(r.Comma)&&++s;return++s,new F(e)}case r.OpenCurlyBracket:{++s;const e=new Map;for(;!i(r.CloseCurlyBracket);){const t=u();o(r.Colon,"Expected colon between key and value in object literal");const n=u();e.set(t,n),i(r.Comma)&&++s}return++s,new C(e)}default:throw new SyntaxError(`Unexpected token: ${t.type}`)}}for(;s<e.length;)t.body.push(n());return t}function V(e,t,s=1){void 0===t&&(t=e,e=0);const r=[];for(let o=e;o<t;o+=s)r.push(o);return r}function O(e,t,s,r=1){const o=Math.sign(r);o>=0?(t=(t??=0)<0?Math.max(e.length+t,0):Math.min(t,e.length),s=(s??=e.length)<0?Math.max(e.length+s,0):Math.min(s,e.length)):(t=(t??=e.length-1)<0?Math.max(e.length+t,-1):Math.min(t,e.length-1),s=(s??=-1)<-1?Math.max(e.length+s,-1):Math.min(s,e.length-1));const n=[];for(let a=t;o*a<o*s;a+=r)n.push(e[a]);return n}function B(e){return e.replace(/\b\w/g,(e=>e.toUpperCase()))}var D=class{type="RuntimeValue";value;builtins=new Map;constructor(e=void 0){this.value=e}__bool__(){return new q(!!this.value)}},G=class extends D{type="NumericValue"},R=class extends D{type="StringValue";builtins=new Map([["upper",new X((()=>new R(this.value.toUpperCase())))],["lower",new X((()=>new R(this.value.toLowerCase())))],["strip",new X((()=>new R(this.value.trim())))],["title",new X((()=>new R(B(this.value))))],["length",new G(this.value.length)],["rstrip",new X((()=>new R(this.value.trimEnd())))],["lstrip",new X((()=>new R(this.value.trimStart())))]])},q=class extends D{type="BooleanValue"},$=class extends D{type="ObjectValue";__bool__(){return new q(this.value.size>0)}builtins=new Map([["get",new X((([e,t])=>{if(!(e instanceof R))throw new Error(`Object key must be a string: got ${e.type}`);return this.value.get(e.value)??t??new H}))],["items",new X((()=>new U(Array.from(this.value.entries()).map((([e,t])=>new U([new R(e),t]))))))]])},W=class extends ${type="KeywordArgumentsValue"},U=class extends D{type="ArrayValue";builtins=new Map([["length",new G(this.value.length)]]);__bool__(){return new q(this.value.length>0)}},Q=class extends U{type="TupleValue"},X=class extends D{type="FunctionValue"},H=class extends D{type="NullValue"},J=class extends D{type="UndefinedValue"},Y=class{constructor(e){this.parent=e}variables=new Map([["namespace",new X((e=>{if(0===e.length)return new $(new Map);if(1!==e.length||!(e[0]instanceof $))throw new Error("`namespace` expects either zero arguments or a single object argument");return e[0]}))]]);tests=new Map([["boolean",e=>"BooleanValue"===e.type],["callable",e=>e instanceof X],["odd",e=>{if("NumericValue"!==e.type)throw new Error(`Cannot apply test "odd" to type: ${e.type}`);return e.value%2!=0}],["even",e=>{if("NumericValue"!==e.type)throw new Error(`Cannot apply test "even" to type: ${e.type}`);return e.value%2==0}],["false",e=>"BooleanValue"===e.type&&!e.value],["true",e=>"BooleanValue"===e.type&&e.value],["none",e=>"NullValue"===e.type],["string",e=>"StringValue"===e.type],["number",e=>"NumericValue"===e.type],["integer",e=>"NumericValue"===e.type&&Number.isInteger(e.value)],["iterable",e=>"ArrayValue"===e.type||"StringValue"===e.type],["mapping",e=>"ObjectValue"===e.type],["lower",e=>{const t=e.value;return"StringValue"===e.type&&t===t.toLowerCase()}],["upper",e=>{const t=e.value;return"StringValue"===e.type&&t===t.toUpperCase()}],["none",e=>"NullValue"===e.type],["defined",e=>"UndefinedValue"!==e.type],["undefined",e=>"UndefinedValue"===e.type],["equalto",(e,t)=>e.value===t.value],["eq",(e,t)=>e.value===t.value]]);set(e,t){return this.declareVariable(e,Z(t))}declareVariable(e,t){if(this.variables.has(e))throw new SyntaxError(`Variable already declared: ${e}`);return this.variables.set(e,t),t}setVariable(e,t){return this.variables.set(e,t),t}resolve(e){if(this.variables.has(e))return this;if(this.parent)return this.parent.resolve(e);throw new Error(`Unknown variable: ${e}`)}lookupVariable(e){try{return this.resolve(e).variables.get(e)??new J}catch{return new J}}},K=class{global;constructor(e){this.global=e??new Y}run(e){return this.evaluate(e,this.global)}evaluateBinaryExpression(e,t){const s=this.evaluate(e.left,t);switch(e.operator.value){case"and":return s.__bool__().value?this.evaluate(e.right,t):s;case"or":return s.__bool__().value?s:this.evaluate(e.right,t)}const r=this.evaluate(e.right,t);switch(e.operator.value){case"==":return new q(s.value==r.value);case"!=":return new q(s.value!=r.value)}if(s instanceof J||r instanceof J)throw new Error("Cannot perform operation on undefined values");if(s instanceof H||r instanceof H)throw new Error("Cannot perform operation on null values");if(s instanceof G&&r instanceof G)switch(e.operator.value){case"+":return new G(s.value+r.value);case"-":return new G(s.value-r.value);case"*":return new G(s.value*r.value);case"/":return new G(s.value/r.value);case"%":return new G(s.value%r.value);case"<":return new q(s.value<r.value);case">":return new q(s.value>r.value);case">=":return new q(s.value>=r.value);case"<=":return new q(s.value<=r.value)}else if(s instanceof U&&r instanceof U){if("+"===e.operator.value)return new U(s.value.concat(r.value))}else if(r instanceof U){const t=void 0!==r.value.find((e=>e.value===s.value));switch(e.operator.value){case"in":return new q(t);case"not in":return new q(!t)}}if((s instanceof R||r instanceof R)&&"+"===e.operator.value)return new R(s.value.toString()+r.value.toString());if(s instanceof R&&r instanceof R)switch(e.operator.value){case"in":return new q(r.value.includes(s.value));case"not in":return new q(!r.value.includes(s.value))}if(s instanceof R&&r instanceof $)switch(e.operator.value){case"in":return new q(r.value.has(s.value));case"not in":return new q(!r.value.has(s.value))}throw new SyntaxError(`Unknown operator "${e.operator.value}" between ${s.type} and ${r.type}`)}evaluateArguments(e,t){const s=[],r=new Map;for(const o of e)if("KeywordArgumentExpression"===o.type){const e=o;r.set(e.key.value,this.evaluate(e.value,t))}else{if(r.size>0)throw new Error("Positional arguments must come before keyword arguments");s.push(this.evaluate(o,t))}return[s,r]}evaluateFilterExpression(e,t){const s=this.evaluate(e.operand,t);if("Identifier"===e.filter.type){const t=e.filter;if("tojson"===t.value)return new R(ee(s));if(s instanceof U)switch(t.value){case"list":return s;case"first":return s.value[0];case"last":return s.value[s.value.length-1];case"length":return new G(s.value.length);case"reverse":return new U(s.value.reverse());case"sort":return new U(s.value.sort(((e,t)=>{if(e.type!==t.type)throw new Error(`Cannot compare different types: ${e.type} and ${t.type}`);switch(e.type){case"NumericValue":return e.value-t.value;case"StringValue":return e.value.localeCompare(t.value);default:throw new Error(`Cannot compare type: ${e.type}`)}})));default:throw new Error(`Unknown ArrayValue filter: ${t.value}`)}else if(s instanceof R)switch(t.value){case"length":return new G(s.value.length);case"upper":return new R(s.value.toUpperCase());case"lower":return new R(s.value.toLowerCase());case"title":return new R(B(s.value));case"capitalize":return new R(s.value.charAt(0).toUpperCase()+s.value.slice(1));case"trim":return new R(s.value.trim());case"indent":return new R(s.value.split("\n").map(((e,t)=>0===t||0===e.length?e:" "+e)).join("\n"));case"string":return s;default:throw new Error(`Unknown StringValue filter: ${t.value}`)}else{if(s instanceof G){if("abs"===t.value)return new G(Math.abs(s.value));throw new Error(`Unknown NumericValue filter: ${t.value}`)}if(s instanceof $)switch(t.value){case"items":return new U(Array.from(s.value.entries()).map((([e,t])=>new U([new R(e),t]))));case"length":return new G(s.value.size);default:throw new Error(`Unknown ObjectValue filter: ${t.value}`)}}throw new Error(`Cannot apply filter "${t.value}" to type: ${s.type}`)}if("CallExpression"===e.filter.type){const r=e.filter;if("Identifier"!==r.callee.type)throw new Error(`Unknown filter: ${r.callee.type}`);const o=r.callee.value;if("tojson"===o){const[,e]=this.evaluateArguments(r.args,t),o=e.get("indent")??new H;if(!(o instanceof G||o instanceof H))throw new Error("If set, indent must be a number");return new R(ee(s,o.value))}if(s instanceof U){switch(o){case"selectattr":case"rejectattr":{const e="selectattr"===o;if(s.value.some((e=>!(e instanceof $))))throw new Error(`\`${o}\` can only be applied to array of objects`);if(r.args.some((e=>"StringLiteral"!==e.type)))throw new Error(`arguments of \`${o}\` must be strings`);const[n,a,i]=r.args.map((e=>this.evaluate(e,t)));let l;if(a){const e=t.tests.get(a.value);if(!e)throw new Error(`Unknown test: ${a.value}`);l=e}else l=(...e)=>e[0].__bool__().value;const c=s.value.filter((t=>{const s=t.value.get(n.value),r=!!s&&l(s,i);return e?r:!r}));return new U(c)}case"map":{const[,e]=this.evaluateArguments(r.args,t);if(e.has("attribute")){const t=e.get("attribute");if(!(t instanceof R))throw new Error("attribute must be a string");const r=e.get("default"),o=s.value.map((e=>{if(!(e instanceof $))throw new Error("items in map must be an object");return e.value.get(t.value)??r??new J}));return new U(o)}throw new Error("`map` expressions without `attribute` set are not currently supported.")}}throw new Error(`Unknown ArrayValue filter: ${o}`)}if(s instanceof R){if("indent"===o){const[e,o]=this.evaluateArguments(r.args,t),n=e.at(0)??o.get("width")??new G(4);if(!(n instanceof G))throw new Error("width must be a number");const a=e.at(1)??o.get("first")??new q(!1),i=e.at(2)??o.get("blank")??new q(!1),l=s.value.split("\n"),c=" ".repeat(n.value),d=l.map(((e,t)=>!a.value&&0===t||!i.value&&0===e.length?e:c+e));return new R(d.join("\n"))}throw new Error(`Unknown StringValue filter: ${o}`)}throw new Error(`Cannot apply filter "${o}" to type: ${s.type}`)}throw new Error(`Unknown filter: ${e.filter.type}`)}evaluateTestExpression(e,t){const s=this.evaluate(e.operand,t),r=t.tests.get(e.test.value);if(!r)throw new Error(`Unknown test: ${e.test.value}`);const o=r(s);return new q(e.negate?!o:o)}evaluateUnaryExpression(e,t){const s=this.evaluate(e.argument,t);if("not"===e.operator.value)return new q(!s.value);throw new SyntaxError(`Unknown operator: ${e.operator.value}`)}evalProgram(e,t){return this.evaluateBlock(e.body,t)}evaluateBlock(e,t){let s="";for(const r of e){const e=this.evaluate(r,t);"NullValue"!==e.type&&"UndefinedValue"!==e.type&&(s+=e.value)}return new R(s)}evaluateIdentifier(e,t){return t.lookupVariable(e.value)}evaluateCallExpression(e,t){const[s,r]=this.evaluateArguments(e.args,t);r.size>0&&s.push(new W(r));const o=this.evaluate(e.callee,t);if("FunctionValue"!==o.type)throw new Error(`Cannot call something that is not a function: got ${o.type}`);return o.value(s,t)}evaluateSliceExpression(e,t,s){if(!(e instanceof U||e instanceof R))throw new Error("Slice object must be an array or string");const r=this.evaluate(t.start,s),o=this.evaluate(t.stop,s),n=this.evaluate(t.step,s);if(!(r instanceof G||r instanceof J))throw new Error("Slice start must be numeric or undefined");if(!(o instanceof G||o instanceof J))throw new Error("Slice stop must be numeric or undefined");if(!(n instanceof G||n instanceof J))throw new Error("Slice step must be numeric or undefined");return e instanceof U?new U(O(e.value,r.value,o.value,n.value)):new R(O(Array.from(e.value),r.value,o.value,n.value).join(""))}evaluateMemberExpression(e,t){const s=this.evaluate(e.object,t);let r,o;if(e.computed){if("SliceExpression"===e.property.type)return this.evaluateSliceExpression(s,e.property,t);r=this.evaluate(e.property,t)}else r=new R(e.property.value);if(s instanceof $){if(!(r instanceof R))throw new Error(`Cannot access property with non-string: got ${r.type}`);o=s.value.get(r.value)??s.builtins.get(r.value)}else if(s instanceof U||s instanceof R)if(r instanceof G)o=s.value.at(r.value),s instanceof R&&(o=new R(s.value.at(r.value)));else{if(!(r instanceof R))throw new Error(`Cannot access property with non-string/non-number: got ${r.type}`);o=s.builtins.get(r.value)}else{if(!(r instanceof R))throw new Error(`Cannot access property with non-string: got ${r.type}`);o=s.builtins.get(r.value)}return o instanceof D?o:new J}evaluateSet(e,t){const s=this.evaluate(e.value,t);if("Identifier"===e.assignee.type){const r=e.assignee.value;t.setVariable(r,s)}else{if("MemberExpression"!==e.assignee.type)throw new Error(`Invalid LHS inside assignment expression: ${JSON.stringify(e.assignee)}`);{const r=e.assignee,o=this.evaluate(r.object,t);if(!(o instanceof $))throw new Error("Cannot assign to member of non-object");if("Identifier"!==r.property.type)throw new Error("Cannot assign to member with non-identifier property");o.value.set(r.property.value,s)}}return new H}evaluateIf(e,t){const s=this.evaluate(e.test,t);return this.evaluateBlock(s.__bool__().value?e.body:e.alternate,t)}evaluateFor(e,t){const s=new Y(t);let r,o;if("SelectExpression"===e.iterable.type){const t=e.iterable;o=this.evaluate(t.iterable,s),r=t.test}else o=this.evaluate(e.iterable,s);if(!(o instanceof U))throw new Error(`Expected iterable type in for loop: got ${o.type}`);const n=[],a=[];for(let t=0;t<o.value.length;++t){const i=new Y(s),l=o.value[t];let c;if("Identifier"===e.loopvar.type)c=t=>t.setVariable(e.loopvar.value,l);else{if("TupleLiteral"!==e.loopvar.type)throw new Error(`Invalid loop variable(s): ${e.loopvar.type}`);{const t=e.loopvar;if("ArrayValue"!==l.type)throw new Error(`Cannot unpack non-iterable type: ${l.type}`);const s=l;if(t.value.length!==s.value.length)throw new Error(`Too ${t.value.length>s.value.length?"few":"many"} items to unpack`);c=e=>{for(let r=0;r<t.value.length;++r){if("Identifier"!==t.value[r].type)throw new Error(`Cannot unpack non-identifier type: ${t.value[r].type}`);e.setVariable(t.value[r].value,s.value[r])}}}}if(r){c(i);if(!this.evaluate(r,i).__bool__().value)continue}n.push(l),a.push(c)}let i="",l=!0;for(let t=0;t<n.length;++t){const r=new Map([["index",new G(t+1)],["index0",new G(t)],["revindex",new G(n.length-t)],["revindex0",new G(n.length-t-1)],["first",new q(0===t)],["last",new q(t===n.length-1)],["length",new G(n.length)],["previtem",t>0?n[t-1]:new J],["nextitem",t<n.length-1?n[t+1]:new J]]);s.setVariable("loop",new $(r)),a[t](s);i+=this.evaluateBlock(e.body,s).value,l=!1}if(l){i+=this.evaluateBlock(e.defaultBlock,s).value}return new R(i)}evaluateMacro(e,t){return t.setVariable(e.name.value,new X(((t,s)=>{const r=new Y(s);let o;t=t.slice(),"KeywordArgumentsValue"===t.at(-1)?.type&&(o=t.pop());for(let s=0;s<e.args.length;++s){const n=e.args[s],a=t[s];if("Identifier"===n.type){const e=n;if(!a)throw new Error(`Missing positional argument: ${e.value}`);r.setVariable(e.value,a)}else{if("KeywordArgumentExpression"!==n.type)throw new Error(`Unknown argument type: ${n.type}`);{const e=n,t=a??o?.value.get(e.key.value)??this.evaluate(e.value,r);r.setVariable(e.key.value,t)}}}return this.evaluateBlock(e.body,r)}))),new H}evaluate(e,t){if(void 0===e)return new J;switch(e.type){case"Program":return this.evalProgram(e,t);case"Set":return this.evaluateSet(e,t);case"If":return this.evaluateIf(e,t);case"For":return this.evaluateFor(e,t);case"Macro":return this.evaluateMacro(e,t);case"NumericLiteral":return new G(Number(e.value));case"StringLiteral":return new R(e.value);case"BooleanLiteral":return new q(e.value);case"NullLiteral":return new H(e.value);case"ArrayLiteral":return new U(e.value.map((e=>this.evaluate(e,t))));case"TupleLiteral":return new Q(e.value.map((e=>this.evaluate(e,t))));case"ObjectLiteral":{const s=new Map;for(const[r,o]of e.value){const e=this.evaluate(r,t);if(!(e instanceof R))throw new Error(`Object keys must be strings: got ${e.type}`);s.set(e.value,this.evaluate(o,t))}return new $(s)}case"Identifier":return this.evaluateIdentifier(e,t);case"CallExpression":return this.evaluateCallExpression(e,t);case"MemberExpression":return this.evaluateMemberExpression(e,t);case"UnaryExpression":return this.evaluateUnaryExpression(e,t);case"BinaryExpression":return this.evaluateBinaryExpression(e,t);case"FilterExpression":return this.evaluateFilterExpression(e,t);case"TestExpression":return this.evaluateTestExpression(e,t);default:throw new SyntaxError(`Unknown node type: ${e.type}`)}}};function Z(e){switch(typeof e){case"number":return new G(e);case"string":return new R(e);case"boolean":return new q(e);case"undefined":return new J;case"object":return null===e?new H:Array.isArray(e)?new U(e.map(Z)):new $(new Map(Object.entries(e).map((([e,t])=>[e,Z(t)]))));case"function":return new X(((t,s)=>Z(e(...t.map((e=>e.value)))??null)));default:throw new Error(`Cannot convert to runtime value: ${e}`)}}function ee(e,t,s){const r=s??0;switch(e.type){case"NullValue":case"UndefinedValue":return"null";case"NumericValue":case"StringValue":case"BooleanValue":return JSON.stringify(e.value);case"ArrayValue":case"ObjectValue":{const s=t?" ".repeat(t):"",o="\n"+s.repeat(r),n=o+s;if("ArrayValue"===e.type){const s=e.value.map((e=>ee(e,t,r+1)));return t?`[${n}${s.join(`,${n}`)}${o}]`:`[${s.join(", ")}]`}{const s=Array.from(e.value.entries()).map((([e,s])=>{const o=`"${e}": ${ee(s,t,r+1)}`;return t?`${n}${o}`:o}));return t?`{${s.join(",")}${o}}`:`{${s.join(", ")}}`}}default:throw new Error(`Cannot convert to JSON: ${e.type}`)}}var te=class{parsed;constructor(e){const t=d(e,{lstrip_blocks:!0,trim_blocks:!0});this.parsed=N(t)}render(e){const t=new Y;t.set("false",!1),t.set("true",!0),t.set("raise_exception",(e=>{throw new Error(e)})),t.set("range",V);for(const[s,r]of Object.entries(e))t.set(s,r);return new K(t).run(this.parsed).value}}},"./node_modules/onnxruntime-common/dist/esm/backend-impl.js":(e,t,s)=>{s.r(t),s.d(t,{registerBackend:()=>n,resolveBackendAndExecutionProviders:()=>i});const r=new Map,o=[],n=(e,t,s)=>{if(!t||"function"!=typeof t.init||"function"!=typeof t.createInferenceSessionHandler)throw new TypeError("not a valid backend");{const n=r.get(e);if(void 0===n)r.set(e,{backend:t,priority:s});else{if(n.priority>s)return;if(n.priority===s&&n.backend!==t)throw new Error(`cannot register backend "${e}" using priority ${s}`)}if(s>=0){const t=o.indexOf(e);-1!==t&&o.splice(t,1);for(let t=0;t<o.length;t++)if(r.get(o[t]).priority<=s)return void o.splice(t,0,e);o.push(e)}}},a=async e=>{const t=r.get(e);if(!t)return"backend not found.";if(t.initialized)return t.backend;if(t.aborted)return t.error;{const s=!!t.initPromise;try{return s||(t.initPromise=t.backend.init(e)),await t.initPromise,t.initialized=!0,t.backend}catch(e){return s||(t.error=`${e}`,t.aborted=!0),t.error}finally{delete t.initPromise}}},i=async e=>{const t=e.executionProviders||[],s=t.map((e=>"string"==typeof e?e:e.name)),r=0===s.length?o:s;let n;const i=[],l=new Set;for(const e of r){const t=await a(e);"string"==typeof t?i.push({name:e,err:t}):(n||(n=t),n===t&&l.add(e))}if(!n)throw new Error(`no available backend found. ERR: ${i.map((e=>`[${e.name}] ${e.err}`)).join(", ")}`);for(const{name:e,err:t}of i)s.includes(e)&&console.warn(`removing requested execution provider "${e}" from session options because it is not available: ${t}`);const c=t.filter((e=>l.has("string"==typeof e?e:e.name)));return[n,new Proxy(e,{get:(e,t)=>"executionProviders"===t?c:Reflect.get(e,t)})]}},"./node_modules/onnxruntime-common/dist/esm/backend.js":(e,t,s)=>{s.r(t),s.d(t,{registerBackend:()=>r.registerBackend});var r=s("./node_modules/onnxruntime-common/dist/esm/backend-impl.js")},"./node_modules/onnxruntime-common/dist/esm/env-impl.js":(e,t,s)=>{s.r(t),s.d(t,{env:()=>n});var r=s("./node_modules/onnxruntime-common/dist/esm/version.js");let o="warning";const n={wasm:{},webgl:{},webgpu:{},versions:{common:r.version},set logLevel(e){if(void 0!==e){if("string"!=typeof e||-1===["verbose","info","warning","error","fatal"].indexOf(e))throw new Error(`Unsupported logging level: ${e}`);o=e}},get logLevel(){return o}};Object.defineProperty(n,"logLevel",{enumerable:!0})},"./node_modules/onnxruntime-common/dist/esm/env.js":(e,t,s)=>{s.r(t),s.d(t,{env:()=>r});const r=s("./node_modules/onnxruntime-common/dist/esm/env-impl.js").env},"./node_modules/onnxruntime-common/dist/esm/index.js":(e,t,s)=>{s.r(t),s.d(t,{InferenceSession:()=>n.InferenceSession,TRACE:()=>i.TRACE,TRACE_FUNC_BEGIN:()=>i.TRACE_FUNC_BEGIN,TRACE_FUNC_END:()=>i.TRACE_FUNC_END,Tensor:()=>a.Tensor,TrainingSession:()=>l.TrainingSession,env:()=>o.env,registerBackend:()=>r.registerBackend});var r=s("./node_modules/onnxruntime-common/dist/esm/backend.js"),o=s("./node_modules/onnxruntime-common/dist/esm/env.js"),n=s("./node_modules/onnxruntime-common/dist/esm/inference-session.js"),a=s("./node_modules/onnxruntime-common/dist/esm/tensor.js"),i=(s("./node_modules/onnxruntime-common/dist/esm/tensor-conversion.js"),s("./node_modules/onnxruntime-common/dist/esm/tensor-factory.js"),s("./node_modules/onnxruntime-common/dist/esm/trace.js")),l=(s("./node_modules/onnxruntime-common/dist/esm/onnx-model.js"),s("./node_modules/onnxruntime-common/dist/esm/onnx-value.js"),s("./node_modules/onnxruntime-common/dist/esm/training-session.js"))},"./node_modules/onnxruntime-common/dist/esm/inference-session-impl.js":(e,t,s)=>{s.r(t),s.d(t,{InferenceSession:()=>a});var r=s("./node_modules/onnxruntime-common/dist/esm/backend-impl.js"),o=s("./node_modules/onnxruntime-common/dist/esm/tensor.js"),n=s("./node_modules/onnxruntime-common/dist/esm/trace.js");class a{constructor(e){this.handler=e}async run(e,t,s){(0,n.TRACE_FUNC_BEGIN)();const r={};let a={};if("object"!=typeof e||null===e||e instanceof o.Tensor||Array.isArray(e))throw new TypeError("'feeds' must be an object that use input names as keys and OnnxValue as corresponding values.");let i=!0;if("object"==typeof t){if(null===t)throw new TypeError("Unexpected argument[1]: cannot be null.");if(t instanceof o.Tensor)throw new TypeError("'fetches' cannot be a Tensor");if(Array.isArray(t)){if(0===t.length)throw new TypeError("'fetches' cannot be an empty array.");i=!1;for(const e of t){if("string"!=typeof e)throw new TypeError("'fetches' must be a string array or an object.");if(-1===this.outputNames.indexOf(e))throw new RangeError(`'fetches' contains invalid output name: ${e}.`);r[e]=null}if("object"==typeof s&&null!==s)a=s;else if(void 0!==s)throw new TypeError("'options' must be an object.")}else{let e=!1;const n=Object.getOwnPropertyNames(t);for(const s of this.outputNames)if(-1!==n.indexOf(s)){const n=t[s];(null===n||n instanceof o.Tensor)&&(e=!0,i=!1,r[s]=n)}if(e){if("object"==typeof s&&null!==s)a=s;else if(void 0!==s)throw new TypeError("'options' must be an object.")}else a=t}}else if(void 0!==t)throw new TypeError("Unexpected argument[1]: must be 'fetches' or 'options'.");for(const t of this.inputNames)if(void 0===e[t])throw new Error(`input '${t}' is missing in 'feeds'.`);if(i)for(const e of this.outputNames)r[e]=null;const l=await this.handler.run(e,r,a),c={};for(const e in l)if(Object.hasOwnProperty.call(l,e)){const t=l[e];t instanceof o.Tensor?c[e]=t:c[e]=new o.Tensor(t.type,t.data,t.dims)}return(0,n.TRACE_FUNC_END)(),c}async release(){return this.handler.dispose()}static async create(e,t,s,o){let i;(0,n.TRACE_FUNC_BEGIN)();let l={};if("string"==typeof e){if(i=e,"object"==typeof t&&null!==t)l=t;else if(void 0!==t)throw new TypeError("'options' must be an object.")}else if(e instanceof Uint8Array){if(i=e,"object"==typeof t&&null!==t)l=t;else if(void 0!==t)throw new TypeError("'options' must be an object.")}else{if(!(e instanceof ArrayBuffer||"undefined"!=typeof SharedArrayBuffer&&e instanceof SharedArrayBuffer))throw new TypeError("Unexpected argument[0]: must be 'path' or 'buffer'.");{const r=e;let n=0,a=e.byteLength;if("object"==typeof t&&null!==t)l=t;else if("number"==typeof t){if(n=t,!Number.isSafeInteger(n))throw new RangeError("'byteOffset' must be an integer.");if(n<0||n>=r.byteLength)throw new RangeError(`'byteOffset' is out of range [0, ${r.byteLength}).`);if(a=e.byteLength-n,"number"==typeof s){if(a=s,!Number.isSafeInteger(a))throw new RangeError("'byteLength' must be an integer.");if(a<=0||n+a>r.byteLength)throw new RangeError(`'byteLength' is out of range (0, ${r.byteLength-n}].`);if("object"==typeof o&&null!==o)l=o;else if(void 0!==o)throw new TypeError("'options' must be an object.")}else if(void 0!==s)throw new TypeError("'byteLength' must be a number.")}else if(void 0!==t)throw new TypeError("'options' must be an object.");i=new Uint8Array(r,n,a)}}const[c,d]=await(0,r.resolveBackendAndExecutionProviders)(l),u=await c.createInferenceSessionHandler(i,d);return(0,n.TRACE_FUNC_END)(),new a(u)}startProfiling(){this.handler.startProfiling()}endProfiling(){this.handler.endProfiling()}get inputNames(){return this.handler.inputNames}get outputNames(){return this.handler.outputNames}}},"./node_modules/onnxruntime-common/dist/esm/inference-session.js":(e,t,s)=>{s.r(t),s.d(t,{InferenceSession:()=>r});const r=s("./node_modules/onnxruntime-common/dist/esm/inference-session-impl.js").InferenceSession},"./node_modules/onnxruntime-common/dist/esm/onnx-model.js":(e,t,s)=>{s.r(t)},"./node_modules/onnxruntime-common/dist/esm/onnx-value.js":(e,t,s)=>{s.r(t)},"./node_modules/onnxruntime-common/dist/esm/tensor-conversion-impl.js":(e,t,s)=>{s.r(t),s.d(t,{tensorToDataURL:()=>r,tensorToImageData:()=>o});const r=(e,t)=>{const s="undefined"!=typeof document?document.createElement("canvas"):new OffscreenCanvas(1,1);s.width=e.dims[3],s.height=e.dims[2];const r=s.getContext("2d");if(null!=r){let o,n;void 0!==t?.tensorLayout&&"NHWC"===t.tensorLayout?(o=e.dims[2],n=e.dims[3]):(o=e.dims[3],n=e.dims[2]);const a=void 0!==t?.format?t.format:"RGB",i=t?.norm;let l,c;void 0===i||void 0===i.mean?l=[255,255,255,255]:"number"==typeof i.mean?l=[i.mean,i.mean,i.mean,i.mean]:(l=[i.mean[0],i.mean[1],i.mean[2],0],void 0!==i.mean[3]&&(l[3]=i.mean[3])),void 0===i||void 0===i.bias?c=[0,0,0,0]:"number"==typeof i.bias?c=[i.bias,i.bias,i.bias,i.bias]:(c=[i.bias[0],i.bias[1],i.bias[2],0],void 0!==i.bias[3]&&(c[3]=i.bias[3]));const d=n*o;let u=0,p=d,m=2*d,_=-1;"RGBA"===a?(u=0,p=d,m=2*d,_=3*d):"RGB"===a?(u=0,p=d,m=2*d):"RBG"===a&&(u=0,m=d,p=2*d);for(let t=0;t<n;t++)for(let s=0;s<o;s++){const o=(e.data[u++]-c[0])*l[0],n=(e.data[p++]-c[1])*l[1],a=(e.data[m++]-c[2])*l[2],i=-1===_?255:(e.data[_++]-c[3])*l[3];r.fillStyle="rgba("+o+","+n+","+a+","+i+")",r.fillRect(s,t,1,1)}if("toDataURL"in s)return s.toDataURL();throw new Error("toDataURL is not supported")}throw new Error("Can not access image data")},o=(e,t)=>{const s="undefined"!=typeof document?document.createElement("canvas").getContext("2d"):new OffscreenCanvas(1,1).getContext("2d");let r;if(null==s)throw new Error("Can not access image data");{let o,n,a;void 0!==t?.tensorLayout&&"NHWC"===t.tensorLayout?(o=e.dims[2],n=e.dims[1],a=e.dims[3]):(o=e.dims[3],n=e.dims[2],a=e.dims[1]);const i=void 0!==t&&void 0!==t.format?t.format:"RGB",l=t?.norm;let c,d;void 0===l||void 0===l.mean?c=[255,255,255,255]:"number"==typeof l.mean?c=[l.mean,l.mean,l.mean,l.mean]:(c=[l.mean[0],l.mean[1],l.mean[2],255],void 0!==l.mean[3]&&(c[3]=l.mean[3])),void 0===l||void 0===l.bias?d=[0,0,0,0]:"number"==typeof l.bias?d=[l.bias,l.bias,l.bias,l.bias]:(d=[l.bias[0],l.bias[1],l.bias[2],0],void 0!==l.bias[3]&&(d[3]=l.bias[3]));const u=n*o;if(void 0!==t&&(void 0!==t.format&&4===a&&"RGBA"!==t.format||3===a&&"RGB"!==t.format&&"BGR"!==t.format))throw new Error("Tensor format doesn't match input tensor dims");const p=4;let m=0,_=1,h=2,f=3,g=0,M=u,w=2*u,T=-1;"RGBA"===i?(g=0,M=u,w=2*u,T=3*u):"RGB"===i?(g=0,M=u,w=2*u):"RBG"===i&&(g=0,w=u,M=2*u),r=s.createImageData(o,n);for(let t=0;t<n*o;m+=p,_+=p,h+=p,f+=p,t++)r.data[m]=(e.data[g++]-d[0])*c[0],r.data[_]=(e.data[M++]-d[1])*c[1],r.data[h]=(e.data[w++]-d[2])*c[2],r.data[f]=-1===T?255:(e.data[T++]-d[3])*c[3]}return r}},"./node_modules/onnxruntime-common/dist/esm/tensor-conversion.js":(e,t,s)=>{s.r(t)},"./node_modules/onnxruntime-common/dist/esm/tensor-factory-impl.js":(e,t,s)=>{s.r(t),s.d(t,{bufferToTensor:()=>o,tensorFromGpuBuffer:()=>i,tensorFromImage:()=>n,tensorFromMLTensor:()=>l,tensorFromPinnedBuffer:()=>c,tensorFromTexture:()=>a});var r=s("./node_modules/onnxruntime-common/dist/esm/tensor-impl.js");const o=(e,t)=>{if(void 0===e)throw new Error("Image buffer must be defined");if(void 0===t.height||void 0===t.width)throw new Error("Image height and width must be defined");if("NHWC"===t.tensorLayout)throw new Error("NHWC Tensor layout is not supported yet");const{height:s,width:o}=t,n=t.norm??{mean:255,bias:0};let a,i;a="number"==typeof n.mean?[n.mean,n.mean,n.mean,n.mean]:[n.mean[0],n.mean[1],n.mean[2],n.mean[3]??255],i="number"==typeof n.bias?[n.bias,n.bias,n.bias,n.bias]:[n.bias[0],n.bias[1],n.bias[2],n.bias[3]??0];const l=void 0!==t.format?t.format:"RGBA",c=void 0!==t.tensorFormat&&void 0!==t.tensorFormat?t.tensorFormat:"RGB",d=s*o,u="RGBA"===c?new Float32Array(4*d):new Float32Array(3*d);let p=4,m=0,_=1,h=2,f=3,g=0,M=d,w=2*d,T=-1;"RGB"===l&&(p=3,m=0,_=1,h=2,f=-1),"RGBA"===c?T=3*d:"RBG"===c?(g=0,w=d,M=2*d):"BGR"===c&&(w=0,M=d,g=2*d);for(let t=0;t<d;t++,m+=p,h+=p,_+=p,f+=p)u[g++]=(e[m]+i[0])/a[0],u[M++]=(e[_]+i[1])/a[1],u[w++]=(e[h]+i[2])/a[2],-1!==T&&-1!==f&&(u[T++]=(e[f]+i[3])/a[3]);return"RGBA"===c?new r.Tensor("float32",u,[1,4,s,o]):new r.Tensor("float32",u,[1,3,s,o])},n=async(e,t)=>{const s="undefined"!=typeof HTMLImageElement&&e instanceof HTMLImageElement,r="undefined"!=typeof ImageData&&e instanceof ImageData,n="undefined"!=typeof ImageBitmap&&e instanceof ImageBitmap,a="string"==typeof e;let i,l=t??{};const c=()=>{if("undefined"!=typeof document)return document.createElement("canvas");if("undefined"!=typeof OffscreenCanvas)return new OffscreenCanvas(1,1);throw new Error("Canvas is not supported")},d=e=>"undefined"!=typeof HTMLCanvasElement&&e instanceof HTMLCanvasElement||e instanceof OffscreenCanvas?e.getContext("2d"):null;if(s){const s=c();s.width=e.width,s.height=e.height;const r=d(s);if(null==r)throw new Error("Can not access image data");{let s=e.height,o=e.width;if(void 0!==t&&void 0!==t.resizedHeight&&void 0!==t.resizedWidth&&(s=t.resizedHeight,o=t.resizedWidth),void 0!==t){if(l=t,void 0!==t.tensorFormat)throw new Error("Image input config format must be RGBA for HTMLImageElement");l.tensorFormat="RGBA",l.height=s,l.width=o}else l.tensorFormat="RGBA",l.height=s,l.width=o;r.drawImage(e,0,0),i=r.getImageData(0,0,o,s).data}}else{if(!r){if(n){if(void 0===t)throw new Error("Please provide image config with format for Imagebitmap");const s=c();s.width=e.width,s.height=e.height;const r=d(s);if(null!=r){const t=e.height,s=e.width;return r.drawImage(e,0,0,s,t),i=r.getImageData(0,0,s,t).data,l.height=t,l.width=s,o(i,l)}throw new Error("Can not access image data")}if(a)return new Promise(((t,s)=>{const r=c(),n=d(r);if(!e||!n)return s();const a=new Image;a.crossOrigin="Anonymous",a.src=e,a.onload=()=>{r.width=a.width,r.height=a.height,n.drawImage(a,0,0,r.width,r.height);const e=n.getImageData(0,0,r.width,r.height);l.height=r.height,l.width=r.width,t(o(e.data,l))}}));throw new Error("Input data provided is not supported - aborted tensor creation")}{let s,r;if(void 0!==t&&void 0!==t.resizedWidth&&void 0!==t.resizedHeight?(s=t.resizedHeight,r=t.resizedWidth):(s=e.height,r=e.width),void 0!==t&&(l=t),l.format="RGBA",l.height=s,l.width=r,void 0!==t){const t=c();t.width=r,t.height=s;const o=d(t);if(null==o)throw new Error("Can not access image data");o.putImageData(e,0,0),i=o.getImageData(0,0,r,s).data}else i=e.data}}if(void 0!==i)return o(i,l);throw new Error("Input data provided is not supported - aborted tensor creation")},a=(e,t)=>{const{width:s,height:o,download:n,dispose:a}=t,i=[1,o,s,4];return new r.Tensor({location:"texture",type:"float32",texture:e,dims:i,download:n,dispose:a})},i=(e,t)=>{const{dataType:s,dims:o,download:n,dispose:a}=t;return new r.Tensor({location:"gpu-buffer",type:s??"float32",gpuBuffer:e,dims:o,download:n,dispose:a})},l=(e,t)=>{const{dataType:s,dims:o,download:n,dispose:a}=t;return new r.Tensor({location:"ml-tensor",type:s??"float32",mlTensor:e,dims:o,download:n,dispose:a})},c=(e,t,s)=>new r.Tensor({location:"cpu-pinned",type:e,data:t,dims:s??[t.length]})},"./node_modules/onnxruntime-common/dist/esm/tensor-factory.js":(e,t,s)=>{s.r(t)},"./node_modules/onnxruntime-common/dist/esm/tensor-impl-type-mapping.js":(e,t,s)=>{s.r(t),s.d(t,{NUMERIC_TENSOR_TYPEDARRAY_TO_TYPE_MAP:()=>o,NUMERIC_TENSOR_TYPE_TO_TYPEDARRAY_MAP:()=>r,checkTypedArray:()=>a});const r=new Map([["float32",Float32Array],["uint8",Uint8Array],["int8",Int8Array],["uint16",Uint16Array],["int16",Int16Array],["int32",Int32Array],["bool",Uint8Array],["float64",Float64Array],["uint32",Uint32Array],["int4",Uint8Array],["uint4",Uint8Array]]),o=new Map([[Float32Array,"float32"],[Uint8Array,"uint8"],[Int8Array,"int8"],[Uint16Array,"uint16"],[Int16Array,"int16"],[Int32Array,"int32"],[Float64Array,"float64"],[Uint32Array,"uint32"]]);let n=!1;const a=()=>{if(!n){n=!0;const e="undefined"!=typeof BigInt64Array&&BigInt64Array.from,t="undefined"!=typeof BigUint64Array&&BigUint64Array.from,s="undefined"!=typeof Float16Array&&Float16Array.from;e&&(r.set("int64",BigInt64Array),o.set(BigInt64Array,"int64")),t&&(r.set("uint64",BigUint64Array),o.set(BigUint64Array,"uint64")),s?(r.set("float16",Float16Array),o.set(Float16Array,"float16")):r.set("float16",Uint16Array)}}},"./node_modules/onnxruntime-common/dist/esm/tensor-impl.js":(e,t,s)=>{s.r(t),s.d(t,{Tensor:()=>i});var r=s("./node_modules/onnxruntime-common/dist/esm/tensor-conversion-impl.js"),o=s("./node_modules/onnxruntime-common/dist/esm/tensor-factory-impl.js"),n=s("./node_modules/onnxruntime-common/dist/esm/tensor-impl-type-mapping.js"),a=s("./node_modules/onnxruntime-common/dist/esm/tensor-utils-impl.js");class i{constructor(e,t,s){let r,o;if((0,n.checkTypedArray)(),"object"==typeof e&&"location"in e)switch(this.dataLocation=e.location,r=e.type,o=e.dims,e.location){case"cpu-pinned":{const t=n.NUMERIC_TENSOR_TYPE_TO_TYPEDARRAY_MAP.get(r);if(!t)throw new TypeError(`unsupported type "${r}" to create tensor from pinned buffer`);if(!(e.data instanceof t))throw new TypeError(`buffer should be of type ${t.name}`);this.cpuData=e.data;break}case"texture":if("float32"!==r)throw new TypeError(`unsupported type "${r}" to create tensor from texture`);this.gpuTextureData=e.texture,this.downloader=e.download,this.disposer=e.dispose;break;case"gpu-buffer":if("float32"!==r&&"float16"!==r&&"int32"!==r&&"int64"!==r&&"uint32"!==r&&"uint8"!==r&&"bool"!==r&&"uint4"!==r&&"int4"!==r)throw new TypeError(`unsupported type "${r}" to create tensor from gpu buffer`);this.gpuBufferData=e.gpuBuffer,this.downloader=e.download,this.disposer=e.dispose;break;case"ml-tensor":if("float32"!==r&&"float16"!==r&&"int32"!==r&&"int64"!==r&&"uint32"!==r&&"uint64"!==r&&"int8"!==r&&"uint8"!==r&&"bool"!==r)throw new TypeError(`unsupported type "${r}" to create tensor from MLTensor`);this.mlTensorData=e.mlTensor,this.downloader=e.download,this.disposer=e.dispose;break;default:throw new Error(`Tensor constructor: unsupported location '${this.dataLocation}'`)}else{let a,i;if("string"==typeof e)if(r=e,i=s,"string"===e){if(!Array.isArray(t))throw new TypeError("A string tensor's data must be a string array.");a=t}else{const s=n.NUMERIC_TENSOR_TYPE_TO_TYPEDARRAY_MAP.get(e);if(void 0===s)throw new TypeError(`Unsupported tensor type: ${e}.`);if(Array.isArray(t)){if("float16"===e&&s===Uint16Array||"uint4"===e||"int4"===e)throw new TypeError(`Creating a ${e} tensor from number array is not supported. Please use ${s.name} as data.`);a="uint64"===e||"int64"===e?s.from(t,BigInt):s.from(t)}else if(t instanceof s)a=t;else{if(!(t instanceof Uint8ClampedArray))throw new TypeError(`A ${r} tensor's data must be type of ${s}`);if("uint8"!==e)throw new TypeError("A Uint8ClampedArray tensor's data must be type of uint8");a=Uint8Array.from(t)}}else if(i=t,Array.isArray(e)){if(0===e.length)throw new TypeError("Tensor type cannot be inferred from an empty array.");const t=typeof e[0];if("string"===t)r="string",a=e;else{if("boolean"!==t)throw new TypeError(`Invalid element type of data array: ${t}.`);r="bool",a=Uint8Array.from(e)}}else if(e instanceof Uint8ClampedArray)r="uint8",a=Uint8Array.from(e);else{const t=n.NUMERIC_TENSOR_TYPEDARRAY_TO_TYPE_MAP.get(e.constructor);if(void 0===t)throw new TypeError(`Unsupported type for tensor data: ${e.constructor}.`);r=t,a=e}if(void 0===i)i=[a.length];else if(!Array.isArray(i))throw new TypeError("A tensor's dims must be a number array");o=i,this.cpuData=a,this.dataLocation="cpu"}const i=(0,a.calculateSize)(o);if(this.cpuData&&i!==this.cpuData.length&&("uint4"!==r&&"int4"!==r||Math.ceil(i/2)!==this.cpuData.length))throw new Error(`Tensor's size(${i}) does not match data length(${this.cpuData.length}).`);this.type=r,this.dims=o,this.size=i}static async fromImage(e,t){return(0,o.tensorFromImage)(e,t)}static fromTexture(e,t){return(0,o.tensorFromTexture)(e,t)}static fromGpuBuffer(e,t){return(0,o.tensorFromGpuBuffer)(e,t)}static fromMLTensor(e,t){return(0,o.tensorFromMLTensor)(e,t)}static fromPinnedBuffer(e,t,s){return(0,o.tensorFromPinnedBuffer)(e,t,s)}toDataURL(e){return(0,r.tensorToDataURL)(this,e)}toImageData(e){return(0,r.tensorToImageData)(this,e)}get data(){if(this.ensureValid(),!this.cpuData)throw new Error("The data is not on CPU. Use `getData()` to download GPU data to CPU, or use `texture` or `gpuBuffer` property to access the GPU data directly.");return this.cpuData}get location(){return this.dataLocation}get texture(){if(this.ensureValid(),!this.gpuTextureData)throw new Error("The data is not stored as a WebGL texture.");return this.gpuTextureData}get gpuBuffer(){if(this.ensureValid(),!this.gpuBufferData)throw new Error("The data is not stored as a WebGPU buffer.");return this.gpuBufferData}get mlTensor(){if(this.ensureValid(),!this.mlTensorData)throw new Error("The data is not stored as a WebNN MLTensor.");return this.mlTensorData}async getData(e){switch(this.ensureValid(),this.dataLocation){case"cpu":case"cpu-pinned":return this.data;case"texture":case"gpu-buffer":case"ml-tensor":if(!this.downloader)throw new Error("The current tensor is not created with a specified data downloader.");if(this.isDownloading)throw new Error("The current tensor is being downloaded.");try{this.isDownloading=!0;const t=await this.downloader();return this.downloader=void 0,this.dataLocation="cpu",this.cpuData=t,e&&this.disposer&&(this.disposer(),this.disposer=void 0),t}finally{this.isDownloading=!1}default:throw new Error(`cannot get data from location: ${this.dataLocation}`)}}dispose(){if(this.isDownloading)throw new Error("The current tensor is being downloaded.");this.disposer&&(this.disposer(),this.disposer=void 0),this.cpuData=void 0,this.gpuTextureData=void 0,this.gpuBufferData=void 0,this.mlTensorData=void 0,this.downloader=void 0,this.isDownloading=void 0,this.dataLocation="none"}ensureValid(){if("none"===this.dataLocation)throw new Error("The tensor is disposed.")}reshape(e){if(this.ensureValid(),this.downloader||this.disposer)throw new Error("Cannot reshape a tensor that owns GPU resource.");return(0,a.tensorReshape)(this,e)}}},"./node_modules/onnxruntime-common/dist/esm/tensor-utils-impl.js":(e,t,s)=>{s.r(t),s.d(t,{calculateSize:()=>o,tensorReshape:()=>n});var r=s("./node_modules/onnxruntime-common/dist/esm/tensor-impl.js");const o=e=>{let t=1;for(let s=0;s<e.length;s++){const r=e[s];if("number"!=typeof r||!Number.isSafeInteger(r))throw new TypeError(`dims[${s}] must be an integer, got: ${r}`);if(r<0)throw new RangeError(`dims[${s}] must be a non-negative integer, got: ${r}`);t*=r}return t},n=(e,t)=>{switch(e.location){case"cpu":return new r.Tensor(e.type,e.data,t);case"cpu-pinned":return new r.Tensor({location:"cpu-pinned",data:e.data,type:e.type,dims:t});case"texture":return new r.Tensor({location:"texture",texture:e.texture,type:e.type,dims:t});case"gpu-buffer":return new r.Tensor({location:"gpu-buffer",gpuBuffer:e.gpuBuffer,type:e.type,dims:t});case"ml-tensor":return new r.Tensor({location:"ml-tensor",mlTensor:e.mlTensor,type:e.type,dims:t});default:throw new Error(`tensorReshape: tensor location ${e.location} is not supported`)}}},"./node_modules/onnxruntime-common/dist/esm/tensor.js":(e,t,s)=>{s.r(t),s.d(t,{Tensor:()=>r});const r=s("./node_modules/onnxruntime-common/dist/esm/tensor-impl.js").Tensor},"./node_modules/onnxruntime-common/dist/esm/trace.js":(e,t,s)=>{s.r(t),s.d(t,{TRACE:()=>o,TRACE_FUNC_BEGIN:()=>a,TRACE_FUNC_END:()=>i});var r=s("./node_modules/onnxruntime-common/dist/esm/env-impl.js");const o=(e,t)=>{(void 0===r.env.trace?r.env.wasm.trace:r.env.trace)&&console.timeStamp(`${e}::ORT::${t}`)},n=(e,t)=>{const s=(new Error).stack?.split(/\r\n|\r|\n/g)||[];let r=!1;for(let n=0;n<s.length;n++){if(r&&!s[n].includes("TRACE_FUNC")){let r=`FUNC_${e}::${s[n].trim().split(" ")[1]}`;return t&&(r+=`::${t}`),void o("CPU",r)}s[n].includes("TRACE_FUNC")&&(r=!0)}},a=e=>{(void 0===r.env.trace?r.env.wasm.trace:r.env.trace)&&n("BEGIN",e)},i=e=>{(void 0===r.env.trace?r.env.wasm.trace:r.env.trace)&&n("END",e)}},"./node_modules/onnxruntime-common/dist/esm/training-session-impl.js":(e,t,s)=>{s.r(t),s.d(t,{TrainingSession:()=>n});var r=s("./node_modules/onnxruntime-common/dist/esm/backend-impl.js"),o=s("./node_modules/onnxruntime-common/dist/esm/tensor.js");class n{constructor(e,t,s){this.handler=e,this.hasOptimizerModel=t,this.hasEvalModel=s}get trainingInputNames(){return this.handler.inputNames}get trainingOutputNames(){return this.handler.outputNames}get evalInputNames(){if(this.hasEvalModel)return this.handler.evalInputNames;throw new Error("This training session has no evalModel loaded.")}get evalOutputNames(){if(this.hasEvalModel)return this.handler.evalOutputNames;throw new Error("This training session has no evalModel loaded.")}static async create(e,t){const s=e.evalModel||"",o=e.optimizerModel||"",a=t||{},[i,l]=await(0,r.resolveBackendAndExecutionProviders)(a);if(i.createTrainingSessionHandler){const t=await i.createTrainingSessionHandler(e.checkpointState,e.trainModel,s,o,l);return new n(t,!!e.optimizerModel,!!e.evalModel)}throw new Error("Training backend could not be resolved. Make sure you're using the correct configuration & WebAssembly files.")}typeNarrowingForRunStep(e,t,s,r,n){const a={};let i={};if("object"!=typeof s||null===s||s instanceof o.Tensor||Array.isArray(s))throw new TypeError("'feeds' must be an object that use input names as keys and OnnxValue as corresponding values.");let l=!0;if("object"==typeof r){if(null===r)throw new TypeError("Unexpected argument[1]: cannot be null.");if(r instanceof o.Tensor)throw new TypeError("'fetches' cannot be a Tensor");if(Array.isArray(r)){if(0===r.length)throw new TypeError("'fetches' cannot be an empty array.");l=!1;for(const e of r){if("string"!=typeof e)throw new TypeError("'fetches' must be a string array or an object.");if(-1===t.indexOf(e))throw new RangeError(`'fetches' contains invalid output name: ${e}.`);a[e]=null}if("object"==typeof n&&null!==n)i=n;else if(void 0!==n)throw new TypeError("'options' must be an object.")}else{let e=!1;const s=Object.getOwnPropertyNames(r);for(const n of t)if(-1!==s.indexOf(n)){const t=r[n];(null===t||t instanceof o.Tensor)&&(e=!0,l=!1,a[n]=t)}if(e){if("object"==typeof n&&null!==n)i=n;else if(void 0!==n)throw new TypeError("'options' must be an object.")}else i=r}}else if(void 0!==r)throw new TypeError("Unexpected argument[1]: must be 'fetches' or 'options'.");for(const t of e)if(void 0===s[t])throw new Error(`input '${t}' is missing in 'feeds'.`);if(l)for(const e of t)a[e]=null;return[a,i]}convertHandlerReturnTypeToMapOfTensors(e){const t={};for(const s in e)if(Object.hasOwnProperty.call(e,s)){const r=e[s];r instanceof o.Tensor?t[s]=r:t[s]=new o.Tensor(r.type,r.data,r.dims)}return t}async lazyResetGrad(){await this.handler.lazyResetGrad()}async runTrainStep(e,t,s){const[r,o]=this.typeNarrowingForRunStep(this.trainingInputNames,this.trainingOutputNames,e,t,s),n=await this.handler.runTrainStep(e,r,o);return this.convertHandlerReturnTypeToMapOfTensors(n)}async runOptimizerStep(e){if(!this.hasOptimizerModel)throw new Error("This TrainingSession has no OptimizerModel loaded.");await this.handler.runOptimizerStep(e||{})}async runEvalStep(e,t,s){if(this.hasEvalModel){const[r,o]=this.typeNarrowingForRunStep(this.evalInputNames,this.evalOutputNames,e,t,s),n=await this.handler.runEvalStep(e,r,o);return this.convertHandlerReturnTypeToMapOfTensors(n)}throw new Error("This TrainingSession has no EvalModel loaded.")}async getParametersSize(e=!0){return this.handler.getParametersSize(e)}async loadParametersBuffer(e,t=!0){const s=await this.getParametersSize(t);if(e.length!==4*s)throw new Error("Size of the buffer passed into loadParametersBuffer must match the number of parameters in the model. Please use getParametersSize method to check.");return this.handler.loadParametersBuffer(e,t)}async getContiguousParameters(e=!0){return this.handler.getContiguousParameters(e)}async release(){return this.handler.dispose()}}},"./node_modules/onnxruntime-common/dist/esm/training-session.js":(e,t,s)=>{s.r(t),s.d(t,{TrainingSession:()=>r});const r=s("./node_modules/onnxruntime-common/dist/esm/training-session-impl.js").TrainingSession},"./node_modules/onnxruntime-common/dist/esm/version.js":(e,t,s)=>{s.r(t),s.d(t,{version:()=>r});const r="1.20.1"},"./src/backends/onnx.js":(e,t,s)=>{var r;s.r(t),s.d(t,{Tensor:()=>i.Tensor,createInferenceSession:()=>f,deviceToExecutionProviders:()=>_,isONNXProxy:()=>w,isONNXTensor:()=>g});var o=s("./src/env.js"),n=s("onnxruntime-node"),a=s("?8b6b"),i=s("./node_modules/onnxruntime-common/dist/esm/index.js");const l=Object.freeze({auto:null,gpu:null,cpu:"cpu",wasm:"wasm",webgpu:"webgpu",cuda:"cuda",dml:"dml",webnn:{name:"webnn",deviceType:"cpu"},"webnn-npu":{name:"webnn",deviceType:"npu"},"webnn-gpu":{name:"webnn",deviceType:"gpu"},"webnn-cpu":{name:"webnn",deviceType:"cpu"}}),c=[];let d,u;const p=Symbol.for("onnxruntime");if(p in globalThis)u=globalThis[p];else if(o.apis.IS_NODE_ENV){switch(u=n.default??n,process.platform){case"win32":c.push("dml");break;case"linux":"x64"===process.arch&&c.push("cuda")}c.push("cpu"),d=["cpu"]}else u=r||(r=s.t(a,2)),o.apis.IS_WEBNN_AVAILABLE&&c.push("webnn-npu","webnn-gpu","webnn-cpu","webnn"),o.apis.IS_WEBGPU_AVAILABLE&&c.push("webgpu"),c.push("wasm"),d=["wasm"];const m=u.InferenceSession;function _(e=null){if(!e)return d;switch(e){case"auto":return c;case"gpu":return c.filter((e=>["webgpu","cuda","dml","webnn-gpu"].includes(e)))}if(c.includes(e))return[l[e]??e];throw new Error(`Unsupported device: "${e}". Should be one of: ${c.join(", ")}.`)}let h=null;async function f(e,t,s){h&&await h;const r=m.create(e,t);h??=r;const o=await r;return o.config=s,o}function g(e){return e instanceof u.Tensor}const M=u?.env;function w(){return M?.wasm?.proxy}M?.wasm&&(M.wasm.wasmPaths=`https://cdn.jsdelivr.net/npm/@huggingface/transformers@${o.env.version}/dist/`,M.wasm.proxy=!1,"undefined"!=typeof crossOriginIsolated&&crossOriginIsolated||(M.wasm.numThreads=1)),M?.webgpu&&(M.webgpu.powerPreference="high-performance"),o.env.backends.onnx=M},"./src/base/feature_extraction_utils.js":(e,t,s)=>{s.r(t),s.d(t,{FeatureExtractor:()=>a,validate_audio_inputs:()=>i});var r=s("./src/utils/constants.js"),o=s("./src/utils/generic.js"),n=s("./src/utils/hub.js");class a extends o.Callable{constructor(e){super(),this.config=e}static async from_pretrained(e,t){return new this(await(0,n.getModelJSON)(e,r.FEATURE_EXTRACTOR_NAME,!0,t))}}function i(e,t){if(!(e instanceof Float32Array||e instanceof Float64Array))throw new Error(`${t} expects input to be a Float32Array or a Float64Array, but got ${e?.constructor?.name??typeof e} instead. If using the feature extractor directly, remember to use \`read_audio(url, sampling_rate)\` to obtain the raw audio data of the file/url.`)}},"./src/base/image_processors_utils.js":(e,t,s)=>{s.r(t),s.d(t,{ImageProcessor:()=>w,post_process_instance_segmentation:()=>M,post_process_object_detection:()=>p,post_process_panoptic_segmentation:()=>g,post_process_semantic_segmentation:()=>m});var r=s("./src/utils/generic.js"),o=s("./src/utils/tensor.js"),n=s("./src/utils/maths.js"),a=(s("./src/utils/image.js"),s("./src/utils/core.js")),i=s("./src/utils/hub.js"),l=s("./src/utils/constants.js");function c(e,t,s=0,r=null){const o=e/t;let a=(0,n.bankers_round)(o)*t;return null!==r&&a>r&&(a=Math.floor(o)*t),a<s&&(a=Math.ceil(o)*t),a}function d([e,t],s){return[Math.max(Math.floor(e/s),1)*s,Math.max(Math.floor(t/s),1)*s]}function u([e,t,s,r]){return[e-s/2,t-r/2,e+s/2,t+r/2]}function p(e,t=.5,s=null,r=!1){const o=e.logits,a=e.pred_boxes,[i,l,c]=o.dims;if(null!==s&&s.length!==i)throw Error("Make sure that you pass in as many target sizes as the batch dimension of the logits");let d=[];for(let e=0;e<i;++e){let i=null!==s?s[e]:null,p={boxes:[],classes:[],scores:[]},m=o[e],_=a[e];for(let e=0;e<l;++e){let s,o=m[e],a=[];if(r){s=o.sigmoid().data;for(let e=0;e<s.length;++e)s[e]>t&&a.push(e)}else{let e=(0,n.max)(o.data)[1];if(e===c-1)continue;if(s=(0,n.softmax)(o.data),s[e]<t)continue;a.push(e)}for(const t of a){let r=_[e].data;r=u(r),null!==i&&(r=r.map(((e,t)=>e*i[(t+1)%2]))),p.boxes.push(r),p.classes.push(t),p.scores.push(s[t])}}d.push(p)}return d}function m(e,t=null){const s=e.logits,r=s.dims[0];if(null!==t&&t.length!==r)throw Error("Make sure that you pass in as many target sizes as the batch dimension of the logits");const n=[];for(let e=0;e<r;++e){const r=null!==t?t[e]:null;let a=s[e];null!==r&&(a=(0,o.interpolate)(a,r,"bilinear",!1));const[i,l]=r??a.dims.slice(-2),c=new o.Tensor("int32",new Int32Array(i*l),[i,l]),d=a[0].data,u=c.data;for(let e=1;e<a.dims[0];++e){const t=a[e].data;for(let s=0;s<t.length;++s)t[s]>d[s]&&(d[s]=t[s],u[s]=e)}const p=new Array(a.dims[0]);for(let e=0;e<u.length;++e){const t=u[e];p[t]=t}const m=p.filter((e=>void 0!==e));n.push({segmentation:c,labels:m})}return n}function _(e,t,s,r){const o=[],a=[],i=[];for(let l=0;l<e.dims[0];++l){const c=e[l],d=t[l],u=(0,n.max)(c.data)[1];if(u===r)continue;const p=(0,n.softmax)(c.data)[u];p>s&&(o.push(d),a.push(p),i.push(u))}return[o,a,i]}function h(e,t,s,r=.5,o=.8){const n=[];let a=0,i=0;const l=t[s].data;for(let t=0;t<e.length;++t)e[t]===s&&(n.push(t),++a),l[t]>=r&&++i;let c=a>0&&i>0;if(c){c=a/i>o}return[c,n]}function f(e,t,s,r,n,a=null,i=null){const[l,c]=i??e[0].dims,d=new o.Tensor("int32",new Int32Array(l*c),[l,c]),u=[];if(null!==i)for(let t=0;t<e.length;++t)e[t]=(0,o.interpolate)(e[t],i,"bilinear",!1);const p=new Int32Array(e[0].data.length),m=new Float32Array(e[0].data.length);for(let s=0;s<e.length;++s){let r=t[s];const o=e[s].data;for(let e=0;e<o.length;++e)o[e]*=r,o[e]>m[e]&&(p[e]=s,m[e]=o[e])}let _=0;const f=d.data;for(let o=0;o<s.length;++o){const a=s[o],[i,l]=h(p,e,o,r,n);if(i){++_;for(const e of l)f[e]=_;u.push({id:_,label_id:a,score:t[o]})}}return[d,u]}function g(e,t=.5,s=.5,r=.8,n=null,a=null){null===n&&(console.warn("`label_ids_to_fuse` unset. No instance will be fused."),n=new Set);const i=e.class_queries_logits??e.logits,l=(e.masks_queries_logits??e.pred_masks).sigmoid();let[c,d,u]=i.dims;if(u-=1,null!==a&&a.length!==c)throw Error("Make sure that you pass in as many target sizes as the batch dimension of the logits");let p=[];for(let e=0;e<c;++e){let c=null!==a?a[e]:null,d=i[e],m=l[e],[h,g,M]=_(d,m,t,u);if(0===M.length){let[e,t]=c??m.dims.slice(-2),s=new o.Tensor("int32",new Int32Array(e*t).fill(-1),[e,t]);p.push({segmentation:s,segments_info:[]});continue}let[w,T]=f(h,g,M,s,r,n,c);p.push({segmentation:w,segments_info:T})}return p}function M(e,t=.5,s=null){throw new Error("`post_process_instance_segmentation` is not yet implemented.")}class w extends r.Callable{constructor(e){super(),this.image_mean=e.image_mean??e.mean,this.image_std=e.image_std??e.std,this.resample=e.resample??2,this.do_rescale=e.do_rescale??!0,this.rescale_factor=e.rescale_factor??1/255,this.do_normalize=e.do_normalize,this.do_thumbnail=e.do_thumbnail,this.size=e.size??e.image_size,this.do_resize=e.do_resize??void 0!==this.size,this.size_divisibility=e.size_divisibility??e.size_divisor,this.do_center_crop=e.do_center_crop,this.crop_size=e.crop_size,this.do_convert_rgb=e.do_convert_rgb??!0,this.do_crop_margin=e.do_crop_margin,this.pad_size=e.pad_size,this.do_pad=e.do_pad,this.do_pad&&!this.pad_size&&this.size&&void 0!==this.size.width&&void 0!==this.size.height&&(this.pad_size=this.size),this.do_flip_channel_order=e.do_flip_channel_order??!1,this.config=e}async thumbnail(e,t,s=2){const r=e.height,o=e.width,n=t.height,a=t.width;let i=Math.min(r,n),l=Math.min(o,a);return i===r&&l===o?e:(r>o?l=Math.floor(o*i/r):o>r&&(i=Math.floor(r*l/o)),await e.resize(l,i,{resample:s}))}async crop_margin(e,t=200){const s=e.clone().grayscale(),r=(0,n.min)(s.data)[0],o=(0,n.max)(s.data)[0]-r;if(0===o)return e;const a=t/255;let i=s.width,l=s.height,c=0,d=0;const u=s.data;for(let e=0;e<s.height;++e){const t=e*s.width;for(let n=0;n<s.width;++n)(u[t+n]-r)/o<a&&(i=Math.min(i,n),l=Math.min(l,e),c=Math.max(c,n),d=Math.max(d,e))}return e=await e.crop([i,l,c,d])}pad_image(e,t,s,{mode:r="constant",center:o=!1,constant_values:n=0}={}){const[i,l,c]=t;let d,u;if("number"==typeof s?(d=s,u=s):(d=s.width,u=s.height),d!==l||u!==i){const s=new Float32Array(d*u*c);if(Array.isArray(n))for(let e=0;e<s.length;++e)s[e]=n[e%c];else 0!==n&&s.fill(n);const[p,m]=o?[Math.floor((d-l)/2),Math.floor((u-i)/2)]:[0,0];for(let t=0;t<i;++t){const r=(t+m)*d,o=t*l;for(let t=0;t<l;++t){const n=(r+t+p)*c,a=(o+t)*c;for(let t=0;t<c;++t)s[n+t]=e[a+t]}}if("symmetric"===r){if(o)throw new Error("`center` padding is not supported when `mode` is set to `symmetric`.");const t=i-1,r=l-1;for(let o=0;o<u;++o){const n=o*d,u=(0,a.calculateReflectOffset)(o,t)*l;for(let t=0;t<d;++t){if(o<i&&t<l)continue;const d=(n+t)*c,p=(u+(0,a.calculateReflectOffset)(t,r))*c;for(let t=0;t<c;++t)s[d+t]=e[p+t]}}}e=s,t=[u,d,c]}return[e,t]}rescale(e){for(let t=0;t<e.length;++t)e[t]=this.rescale_factor*e[t]}get_resize_output_image_size(e,t){const[s,r]=e.size;let o,n;if(this.do_thumbnail){const{height:e,width:s}=t;o=Math.min(e,s)}else Number.isInteger(t)?(o=t,n=this.config.max_size??o):void 0!==t&&(o=t.shortest_edge,n=t.longest_edge);if(void 0!==o||void 0!==n){const e=void 0===o?1:Math.max(o/s,o/r),t=s*e,a=r*e,i=void 0===n?1:Math.min(n/t,n/a);let l=Math.floor(Number((t*i).toFixed(2))),c=Math.floor(Number((a*i).toFixed(2)));return void 0!==this.size_divisibility&&([l,c]=d([l,c],this.size_divisibility)),[l,c]}if(void 0!==t&&void 0!==t.width&&void 0!==t.height){let e=t.width,o=t.height;if(this.config.keep_aspect_ratio&&this.config.ensure_multiple_of){let t=o/r,n=e/s;Math.abs(1-n)<Math.abs(1-t)?t=n:n=t,o=c(t*r,this.config.ensure_multiple_of),e=c(n*s,this.config.ensure_multiple_of)}return[e,o]}if(void 0!==this.size_divisibility)return d([s,r],this.size_divisibility);if(void 0!==t.min_pixels&&void 0!==t.max_pixels){const{min_pixels:e,max_pixels:o}=t;return function(e,t,s=28,r=3136,o=1003520){if(e<s||t<s)throw new Error(`height:${e} or width:${t} must be larger than factor:${s}`);if(Math.max(e,t)/Math.min(e,t)>200)throw new Error("absolute aspect ratio must be smaller than 200, got "+Math.max(e,t)/Math.min(e,t));let n=Math.round(e/s)*s,a=Math.round(t/s)*s;if(n*a>o){const r=Math.sqrt(e*t/o);n=Math.floor(e/r/s)*s,a=Math.floor(t/r/s)*s}else if(n*a<r){const o=Math.sqrt(r/(e*t));n=Math.ceil(e*o/s)*s,a=Math.ceil(t*o/s)*s}return[n,a]}(r,s,this.config.patch_size*this.config.merge_size,e,o)}throw new Error(`Could not resize image due to unsupported \`this.size\` option in config: ${JSON.stringify(t)}`)}async resize(e){const[t,s]=this.get_resize_output_image_size(e,this.size);return await e.resize(t,s,{resample:this.resample})}async preprocess(e,{do_normalize:t=null,do_pad:s=null,do_convert_rgb:r=null,do_convert_grayscale:n=null,do_flip_channel_order:a=null}={}){this.do_crop_margin&&(e=await this.crop_margin(e));const[i,l]=e.size;if(r??this.do_convert_rgb?e=e.rgb():n&&(e=e.grayscale()),this.do_resize&&(e=await this.resize(e)),this.do_thumbnail&&(e=await this.thumbnail(e,this.size,this.resample)),this.do_center_crop){let t,s;Number.isInteger(this.crop_size)?(t=this.crop_size,s=this.crop_size):(t=this.crop_size.width,s=this.crop_size.height),e=await e.center_crop(t,s)}const c=[e.height,e.width];let u=Float32Array.from(e.data),p=[e.height,e.width,e.channels];if(this.do_rescale&&this.rescale(u),t??this.do_normalize){let t=this.image_mean;Array.isArray(this.image_mean)||(t=new Array(e.channels).fill(t));let s=this.image_std;if(Array.isArray(this.image_std)||(s=new Array(e.channels).fill(t)),t.length!==e.channels||s.length!==e.channels)throw new Error(`When set to arrays, the length of \`image_mean\` (${t.length}) and \`image_std\` (${s.length}) must match the number of channels in the image (${e.channels}).`);for(let r=0;r<u.length;r+=e.channels)for(let o=0;o<e.channels;++o)u[r+o]=(u[r+o]-t[o])/s[o]}if(s??this.do_pad)if(this.pad_size){const t=this.pad_image(u,[e.height,e.width,e.channels],this.pad_size);[u,p]=t}else if(this.size_divisibility){const[e,t]=d([p[1],p[0]],this.size_divisibility);[u,p]=this.pad_image(u,p,{width:e,height:t})}if(a??this.do_flip_channel_order){if(3!==p[2])throw new Error("Flipping channel order is only supported for RGB images.");for(let e=0;e<u.length;e+=3){const t=u[e];u[e]=u[e+2],u[e+2]=t}}return{original_size:[l,i],reshaped_input_size:c,pixel_values:new o.Tensor("float32",u,p).permute(2,0,1)}}async _call(e,...t){Array.isArray(e)||(e=[e]);const s=await Promise.all(e.map((e=>this.preprocess(e))));return{pixel_values:(0,o.stack)(s.map((e=>e.pixel_values)),0),original_sizes:s.map((e=>e.original_size)),reshaped_input_sizes:s.map((e=>e.reshaped_input_size))}}static async from_pretrained(e,t){return new this(await(0,i.getModelJSON)(e,l.IMAGE_PROCESSOR_NAME,!0,t))}}},"./src/base/processing_utils.js":(e,t,s)=>{s.r(t),s.d(t,{Processor:()=>a});var r=s("./src/utils/constants.js"),o=s("./src/utils/generic.js"),n=s("./src/utils/hub.js");class a extends o.Callable{static classes=["image_processor_class","tokenizer_class","feature_extractor_class"];static uses_processor_config=!1;constructor(e,t){super(),this.config=e,this.components=t}get image_processor(){return this.components.image_processor}get tokenizer(){return this.components.tokenizer}get feature_extractor(){return this.components.feature_extractor}apply_chat_template(e,t={}){if(!this.tokenizer)throw new Error("Unable to apply chat template without a tokenizer.");return this.tokenizer.apply_chat_template(e,{tokenize:!1,...t})}batch_decode(...e){if(!this.tokenizer)throw new Error("Unable to decode without a tokenizer.");return this.tokenizer.batch_decode(...e)}async _call(e,...t){for(const s of[this.image_processor,this.feature_extractor,this.tokenizer])if(s)return s(e,...t);throw new Error("No image processor, feature extractor, or tokenizer found.")}static async from_pretrained(e,t){const[s,o]=await Promise.all([this.uses_processor_config?(0,n.getModelJSON)(e,r.PROCESSOR_NAME,!0,t):{},Promise.all(this.classes.filter((e=>e in this)).map((async s=>{const r=await this[s].from_pretrained(e,t);return[s.replace(/_class$/,""),r]}))).then(Object.fromEntries)]);return new this(s,o)}}},"./src/configs.js":(e,t,s)=>{s.r(t),s.d(t,{AutoConfig:()=>l,PretrainedConfig:()=>i,getKeyValueShapes:()=>a});var r=s("./src/utils/core.js"),o=s("./src/utils/hub.js");function n(e){const t={};let s={};switch(e.model_type){case"llava":case"paligemma":case"florence2":case"llava_onevision":case"idefics3":s=n(e.text_config);break;case"moondream1":s=n(e.phi_config);break;case"musicgen":s=n(e.decoder);break;case"multi_modality":s=n(e.language_config);break;case"gpt2":case"gptj":case"jais":case"codegen":case"gpt_bigcode":t.num_heads="n_head",t.num_layers="n_layer",t.hidden_size="n_embd";break;case"gpt_neox":case"stablelm":case"opt":case"phi":case"phi3":case"falcon":t.num_heads="num_attention_heads",t.num_layers="num_hidden_layers",t.hidden_size="hidden_size";break;case"llama":case"olmo":case"olmo2":case"mobilellm":case"granite":case"cohere":case"mistral":case"starcoder2":case"qwen2":case"qwen2_vl":t.num_heads="num_key_value_heads",t.num_layers="num_hidden_layers",t.hidden_size="hidden_size",t.num_attention_heads="num_attention_heads";break;case"gemma":case"gemma2":t.num_heads="num_key_value_heads",t.num_layers="num_hidden_layers",t.dim_kv="head_dim";break;case"openelm":t.num_heads="num_kv_heads",t.num_layers="num_transformer_layers",t.dim_kv="head_dim";break;case"gpt_neo":case"donut-swin":t.num_heads="num_heads",t.num_layers="num_layers",t.hidden_size="hidden_size";break;case"bloom":t.num_heads="n_head",t.num_layers="n_layer",t.hidden_size="hidden_size";break;case"mpt":t.num_heads="n_heads",t.num_layers="n_layers",t.hidden_size="d_model";break;case"t5":case"mt5":case"longt5":t.num_decoder_layers="num_decoder_layers",t.num_decoder_heads="num_heads",t.decoder_dim_kv="d_kv",t.num_encoder_layers="num_layers",t.num_encoder_heads="num_heads",t.encoder_dim_kv="d_kv";break;case"bart":case"mbart":case"marian":case"whisper":case"m2m_100":case"blenderbot":case"blenderbot-small":case"florence2_language":t.num_decoder_layers="decoder_layers",t.num_decoder_heads="decoder_attention_heads",t.decoder_hidden_size="d_model",t.num_encoder_layers="encoder_layers",t.num_encoder_heads="encoder_attention_heads",t.encoder_hidden_size="d_model";break;case"speecht5":t.num_decoder_layers="decoder_layers",t.num_decoder_heads="decoder_attention_heads",t.decoder_hidden_size="hidden_size",t.num_encoder_layers="encoder_layers",t.num_encoder_heads="encoder_attention_heads",t.encoder_hidden_size="hidden_size";break;case"trocr":t.num_encoder_layers=t.num_decoder_layers="decoder_layers",t.num_encoder_heads=t.num_decoder_heads="decoder_attention_heads",t.encoder_hidden_size=t.decoder_hidden_size="d_model";break;case"musicgen_decoder":t.num_encoder_layers=t.num_decoder_layers="num_hidden_layers",t.num_encoder_heads=t.num_decoder_heads="num_attention_heads",t.encoder_hidden_size=t.decoder_hidden_size="hidden_size";break;case"vision-encoder-decoder":const o=n(e.decoder),a="num_decoder_layers"in o,i=(0,r.pick)(e,["model_type","is_encoder_decoder"]);return a?(i.num_decoder_layers=o.num_decoder_layers,i.num_decoder_heads=o.num_decoder_heads,i.decoder_hidden_size=o.decoder_hidden_size,i.num_encoder_layers=o.num_encoder_layers,i.num_encoder_heads=o.num_encoder_heads,i.encoder_hidden_size=o.encoder_hidden_size):(i.num_layers=o.num_layers,i.num_heads=o.num_heads,i.hidden_size=o.hidden_size),i}const o={...s,...(0,r.pick)(e,["model_type","multi_query","is_encoder_decoder"])};for(const s in t)o[s]=e[t[s]];return o}function a(e,{prefix:t="past_key_values",batch_size:s=1}={}){const r={},o=e.normalized_config;if(o.is_encoder_decoder&&"num_encoder_heads"in o&&"num_decoder_heads"in o){const e=o.encoder_dim_kv??o.encoder_hidden_size/o.num_encoder_heads,n=o.decoder_dim_kv??o.decoder_hidden_size/o.num_decoder_heads,a=[s,o.num_encoder_heads,0,e],i=[s,o.num_decoder_heads,0,n];for(let e=0;e<o.num_decoder_layers;++e)r[`${t}.${e}.encoder.key`]=a,r[`${t}.${e}.encoder.value`]=a,r[`${t}.${e}.decoder.key`]=i,r[`${t}.${e}.decoder.value`]=i}else{const e=o.num_heads,n=o.num_layers,a=o.dim_kv??o.hidden_size/(o.num_attention_heads??e);if("falcon"===o.model_type){const o=[s*e,0,a];for(let e=0;e<n;++e)r[`${t}.${e}.key`]=o,r[`${t}.${e}.value`]=o}else if(o.multi_query){const o=[s*e,0,2*a];for(let e=0;e<n;++e)r[`${t}.${e}.key_value`]=o}else if("bloom"===o.model_type){const o=[s*e,a,0],i=[s*e,0,a];for(let e=0;e<n;++e)r[`${t}.${e}.key`]=o,r[`${t}.${e}.value`]=i}else if("openelm"===o.model_type)for(let o=0;o<n;++o){const n=[s,e[o],0,a];r[`${t}.${o}.key`]=n,r[`${t}.${o}.value`]=n}else{const o=[s,e,0,a];for(let e=0;e<n;++e)r[`${t}.${e}.key`]=o,r[`${t}.${e}.value`]=o}}return r}class i{model_type=null;is_encoder_decoder=!1;max_position_embeddings;"transformers.js_config";constructor(e){Object.assign(this,e),this.normalized_config=n(this)}static async from_pretrained(e,{progress_callback:t=null,config:s=null,cache_dir:r=null,local_files_only:n=!1,revision:a="main"}={}){!s||s instanceof i||(s=new i(s));const l=s??await async function(e,t){return await(0,o.getModelJSON)(e,"config.json",!0,t)}(e,{progress_callback:t,config:s,cache_dir:r,local_files_only:n,revision:a});return new this(l)}}class l{static async from_pretrained(...e){return i.from_pretrained(...e)}}},"./src/env.js":(e,t,s)=>{s.r(t),s.d(t,{apis:()=>h,env:()=>T});var r=s("fs"),o=s("path"),n=s("url");const a="undefined"!=typeof window&&void 0!==window.document,i="undefined"!=typeof self&&"DedicatedWorkerGlobalScope"===self.constructor?.name,l="undefined"!=typeof self&&"caches"in self,c="undefined"!=typeof navigator&&"gpu"in navigator,d="undefined"!=typeof navigator&&"ml"in navigator,u="undefined"!=typeof process,p=u&&"node"===process?.release?.name,m=!b(r.default),_=!b(o.default),h=Object.freeze({IS_BROWSER_ENV:a,IS_WEBWORKER_ENV:i,IS_WEB_CACHE_AVAILABLE:l,IS_WEBGPU_AVAILABLE:c,IS_WEBNN_AVAILABLE:d,IS_PROCESS_AVAILABLE:u,IS_NODE_ENV:p,IS_FS_AVAILABLE:m,IS_PATH_AVAILABLE:_}),f=m&&_;let g="./";if(f){const e=Object(import.meta).url;e?g=o.default.dirname(o.default.dirname(n.default.fileURLToPath(e))):"undefined"!=typeof __dirname&&(g=o.default.dirname(__dirname))}const M=f?o.default.join(g,"/.cache/"):null,w="/models/",T={version:"3.1.2",backends:{onnx:{}},allowRemoteModels:!0,remoteHost:"https://huggingface.co/",remotePathTemplate:"{model}/resolve/{revision}/",allowLocalModels:!(a||i),localModelPath:f?o.default.join(g,w):w,useFS:m,useBrowserCache:l,useFSCache:m,cacheDir:M,useCustomCache:!1,customCache:null};function b(e){return 0===Object.keys(e).length}},"./src/generation/configuration_utils.js":(e,t,s)=>{s.r(t),s.d(t,{GenerationConfig:()=>o});var r=s("./src/utils/core.js");class o{max_length=20;max_new_tokens=null;min_length=0;min_new_tokens=null;early_stopping=!1;max_time=null;do_sample=!1;num_beams=1;num_beam_groups=1;penalty_alpha=null;use_cache=!0;temperature=1;top_k=50;top_p=1;typical_p=1;epsilon_cutoff=0;eta_cutoff=0;diversity_penalty=0;repetition_penalty=1;encoder_repetition_penalty=1;length_penalty=1;no_repeat_ngram_size=0;bad_words_ids=null;force_words_ids=null;renormalize_logits=!1;constraints=null;forced_bos_token_id=null;forced_eos_token_id=null;remove_invalid_values=!1;exponential_decay_length_penalty=null;suppress_tokens=null;streamer=null;begin_suppress_tokens=null;forced_decoder_ids=null;guidance_scale=null;num_return_sequences=1;output_attentions=!1;output_hidden_states=!1;output_scores=!1;return_dict_in_generate=!1;pad_token_id=null;bos_token_id=null;eos_token_id=null;encoder_no_repeat_ngram_size=0;decoder_start_token_id=null;generation_kwargs={};constructor(e){Object.assign(this,(0,r.pick)(e,Object.getOwnPropertyNames(this)))}}},"./src/generation/logits_process.js":(e,t,s)=>{s.r(t),s.d(t,{ClassifierFreeGuidanceLogitsProcessor:()=>g,ForcedBOSTokenLogitsProcessor:()=>l,ForcedEOSTokenLogitsProcessor:()=>c,LogitsProcessor:()=>n,LogitsProcessorList:()=>i,LogitsWarper:()=>a,MinLengthLogitsProcessor:()=>_,MinNewTokensLengthLogitsProcessor:()=>h,NoBadWordsLogitsProcessor:()=>f,NoRepeatNGramLogitsProcessor:()=>p,RepetitionPenaltyLogitsProcessor:()=>m,SuppressTokensAtBeginLogitsProcessor:()=>d,TemperatureLogitsWarper:()=>M,TopKLogitsWarper:()=>T,TopPLogitsWarper:()=>w,WhisperTimeStampLogitsProcessor:()=>u});var r=s("./src/utils/generic.js"),o=(s("./src/utils/tensor.js"),s("./src/utils/maths.js"));class n extends r.Callable{_call(e,t){throw Error("`_call` should be implemented in a subclass")}}class a extends r.Callable{_call(e,t){throw Error("`_call` should be implemented in a subclass")}}class i extends r.Callable{constructor(){super(),this.processors=[]}push(e){this.processors.push(e)}extend(e){this.processors.push(...e)}_call(e,t){let s=t;for(const t of this.processors)s=t(e,s);return s}[Symbol.iterator](){return this.processors.values()}}class l extends n{constructor(e){super(),this.bos_token_id=e}_call(e,t){for(let s=0;s<e.length;++s)if(1===e[s].length){const e=t[s].data;e.fill(-1/0),e[this.bos_token_id]=0}return t}}class c extends n{constructor(e,t){super(),this.max_length=e,this.eos_token_id=Array.isArray(t)?t:[t]}_call(e,t){for(let s=0;s<e.length;++s)if(e[s].length===this.max_length-1){const e=t[s].data;e.fill(-1/0);for(const t of this.eos_token_id)e[t]=0}return t}}class d extends n{constructor(e,t){super(),this.begin_suppress_tokens=e,this.begin_index=t}_call(e,t){for(let s=0;s<e.length;++s)if(e[s].length===this.begin_index){const e=t[s].data;for(const t of this.begin_suppress_tokens)e[t]=-1/0}return t}}class u extends n{constructor(e,t){super(),this.eos_token_id=Array.isArray(e.eos_token_id)?e.eos_token_id[0]:e.eos_token_id,this.no_timestamps_token_id=e.no_timestamps_token_id,this.timestamp_begin=this.no_timestamps_token_id+1,this.begin_index=t.length,t.at(-1)===this.no_timestamps_token_id&&(this.begin_index-=1),this.max_initial_timestamp_index=e.max_initial_timestamp_index}_call(e,t){for(let s=0;s<e.length;++s){const r=t[s].data;if(r[this.no_timestamps_token_id]=-1/0,e[s].length===this.begin_index-1){r.fill(-1/0),r[this.timestamp_begin]=0;continue}const n=e[s].slice(this.begin_index),a=n.length>=1&&n[n.length-1]>=this.timestamp_begin,i=n.length<2||n[n.length-2]>=this.timestamp_begin;if(a&&(i?r.subarray(this.timestamp_begin).fill(-1/0):r.subarray(0,this.eos_token_id).fill(-1/0)),e[s].length===this.begin_index&&null!==this.max_initial_timestamp_index){const e=this.timestamp_begin+this.max_initial_timestamp_index;r.subarray(e+1).fill(-1/0)}const l=(0,o.log_softmax)(r);Math.log(l.subarray(this.timestamp_begin).map(Math.exp).reduce(((e,t)=>e+t)))>(0,o.max)(l.subarray(0,this.timestamp_begin))[0]&&r.subarray(0,this.timestamp_begin).fill(-1/0)}return t}}class p extends n{constructor(e){super(),this.no_repeat_ngram_size=e}getNgrams(e){const t=e.length,s=[];for(let r=0;r<t+1-this.no_repeat_ngram_size;++r){const t=[];for(let s=0;s<this.no_repeat_ngram_size;++s)t.push(e[r+s]);s.push(t.map(Number))}const r=new Map;for(const e of s){const t=e.slice(0,e.length-1),s=JSON.stringify(t),o=r.get(s)??[];o.push(e[e.length-1]),r.set(s,o)}return r}getGeneratedNgrams(e,t){const s=t.slice(t.length+1-this.no_repeat_ngram_size,t.length);return e.get(JSON.stringify(s.map(Number)))??[]}calcBannedNgramTokens(e){const t=[];if(e.length+1<this.no_repeat_ngram_size)return t;{const t=this.getNgrams(e);return this.getGeneratedNgrams(t,e)}}_call(e,t){for(let s=0;s<e.length;++s){const r=t[s].data,o=this.calcBannedNgramTokens(e[s]);for(const e of o)r[e]=-1/0}return t}}class m extends n{constructor(e){super(),this.penalty=e}_call(e,t){for(let s=0;s<e.length;++s){const r=t[s].data;for(const t of new Set(e[s])){const e=Number(t);r[e]<0?r[e]*=this.penalty:r[e]/=this.penalty}}return t}}class _ extends n{constructor(e,t){super(),this.min_length=e,this.eos_token_id=Array.isArray(t)?t:[t]}_call(e,t){for(let s=0;s<e.length;++s)if(e[s].length<this.min_length){const e=t[s].data;for(const t of this.eos_token_id)e[t]=-1/0}return t}}class h extends n{constructor(e,t,s){super(),this.prompt_length_to_skip=e,this.min_new_tokens=t,this.eos_token_id=Array.isArray(s)?s:[s]}_call(e,t){for(let s=0;s<e.length;++s){if(e[s].length-this.prompt_length_to_skip<this.min_new_tokens){const e=t[s].data;for(const t of this.eos_token_id)e[t]=-1/0}}return t}}class f extends n{constructor(e,t){super(),this.bad_words_ids=e,this.eos_token_id=Array.isArray(t)?t:[t]}_call(e,t){for(let s=0;s<e.length;++s){const r=t[s].data,o=e[s];for(const e of this.bad_words_ids){let t=!0;for(let s=1;s<=e.length-1&&e.length<o.length;++s)if(e.at(-s-1)!=o.at(-s)){t=!1;break}t&&(r[e.at(-1)]=-1/0)}}return t}}class g extends n{constructor(e){if(super(),e<=1)throw new Error(`Require guidance scale >1 to use the classifier free guidance processor, got guidance scale ${e}.`);this.guidance_scale=e}_call(e,t){if(t.dims[0]!==2*e.length)throw new Error(`Logits should have twice the batch size of the input ids, the first half of batches corresponding to the conditional inputs, and the second half of batches corresponding to the unconditional inputs. Got batch size ${t.dims[0]} for the logits and ${e.length} for the input ids.`);const s=e.length,r=t.slice([0,s],null),o=t.slice([s,t.dims[0]],null);for(let e=0;e<o.data.length;++e)o.data[e]+=(r.data[e]-o.data[e])*this.guidance_scale;return o}}class M extends a{constructor(e){if(super(),"number"!=typeof e||e<=0){let t=`\`temperature\` (=${e}) must be a strictly positive float, otherwise your next token scores will be invalid.`;0===e&&(t+=" If you're looking for greedy decoding strategies, set `do_sample=false`.")}this.temperature=e}_call(e,t){const s=t.data;for(let e=0;e<s.length;++e)s[e]/=this.temperature;return t}}class w extends a{constructor(e,{filter_value:t=-1/0,min_tokens_to_keep:s=1}={}){if(super(),e<0||e>1)throw new Error(`\`top_p\` must be a float > 0 and < 1, but is ${e}`);if(!Number.isInteger(s)||s<1)throw new Error(`\`min_tokens_to_keep\` must be a positive integer, but is ${s}`);this.top_p=e,this.filter_value=t,this.min_tokens_to_keep=s}}class T extends a{constructor(e,{filter_value:t=-1/0,min_tokens_to_keep:s=1}={}){if(super(),!Number.isInteger(e)||e<0)throw new Error(`\`top_k\` must be a positive integer, but is ${e}`);this.top_k=Math.max(e,s),this.filter_value=t}}},"./src/generation/logits_sampler.js":(e,t,s)=>{s.r(t),s.d(t,{LogitsSampler:()=>a});var r=s("./src/utils/generic.js"),o=s("./src/utils/tensor.js"),n=s("./src/utils/maths.js");s("./src/generation/configuration_utils.js");class a extends r.Callable{constructor(e){super(),this.generation_config=e}async _call(e){return this.sample(e)}async sample(e){throw Error("sample should be implemented in subclasses.")}getLogits(e,t){let s=e.dims.at(-1),r=e.data;if(-1===t)r=r.slice(-s);else{let e=t*s;r=r.slice(e,e+s)}return r}randomSelect(e){let t=0;for(let s=0;s<e.length;++s)t+=e[s];let s=Math.random()*t;for(let t=0;t<e.length;++t)if(s-=e[t],s<=0)return t;return 0}static getSampler(e){if(e.do_sample)return new l(e);if(e.num_beams>1)return new c(e);if(e.num_return_sequences>1)throw Error(`num_return_sequences has to be 1 when doing greedy search, but is ${e.num_return_sequences}.`);return new i(e)}}class i extends a{async sample(e){const t=(0,n.max)(e.data)[1];return[[BigInt(t),0]]}}class l extends a{async sample(e){let t=e.dims.at(-1);this.generation_config.top_k>0&&(t=Math.min(this.generation_config.top_k,t));const[s,r]=await(0,o.topk)(e,t),a=(0,n.softmax)(s.data);return Array.from({length:this.generation_config.num_beams},(()=>{const e=this.randomSelect(a);return[r.data[e],Math.log(a[e])]}))}}class c extends a{async sample(e){let t=e.dims.at(-1);this.generation_config.top_k>0&&(t=Math.min(this.generation_config.top_k,t));const[s,r]=await(0,o.topk)(e,t),a=(0,n.softmax)(s.data);return Array.from({length:this.generation_config.num_beams},((e,t)=>[r.data[t],Math.log(a[t])]))}}},"./src/generation/stopping_criteria.js":(e,t,s)=>{s.r(t),s.d(t,{EosTokenCriteria:()=>i,InterruptableStoppingCriteria:()=>l,MaxLengthCriteria:()=>a,StoppingCriteria:()=>o,StoppingCriteriaList:()=>n});var r=s("./src/utils/generic.js");class o extends r.Callable{_call(e,t){throw Error("StoppingCriteria needs to be subclassed")}}class n extends r.Callable{constructor(){super(),this.criteria=[]}push(e){this.criteria.push(e)}extend(e){e instanceof n?e=e.criteria:e instanceof o&&(e=[e]),this.criteria.push(...e)}_call(e,t){const s=new Array(e.length).fill(!1);for(const r of this.criteria){const o=r(e,t);for(let e=0;e<s.length;++e)s[e]||=o[e]}return s}[Symbol.iterator](){return this.criteria.values()}}class a extends o{constructor(e,t=null){super(),this.max_length=e,this.max_position_embeddings=t}_call(e){return e.map((e=>e.length>=this.max_length))}}class i extends o{constructor(e){super(),Array.isArray(e)||(e=[e]),this.eos_token_id=e}_call(e,t){return e.map((e=>{const t=e.at(-1);return this.eos_token_id.some((e=>t==e))}))}}class l extends o{constructor(){super(),this.interrupted=!1}interrupt(){this.interrupted=!0}reset(){this.interrupted=!1}_call(e,t){return new Array(e.length).fill(this.interrupted)}}},"./src/generation/streamers.js":(e,t,s)=>{s.r(t),s.d(t,{BaseStreamer:()=>a,TextStreamer:()=>l,WhisperTextStreamer:()=>c});var r=s("./src/utils/core.js"),o=s("./src/tokenizers.js"),n=s("./src/env.js");class a{put(e){throw Error("Not implemented")}end(){throw Error("Not implemented")}}const i=n.apis.IS_PROCESS_AVAILABLE?e=>process.stdout.write(e):e=>console.log(e);class l extends a{constructor(e,{skip_prompt:t=!1,callback_function:s=null,token_callback_function:r=null,decode_kwargs:o={},...n}={}){super(),this.tokenizer=e,this.skip_prompt=t,this.callback_function=s??i,this.token_callback_function=r,this.decode_kwargs={...o,...n},this.token_cache=[],this.print_len=0,this.next_tokens_are_prompt=!0}put(e){if(e.length>1)throw Error("TextStreamer only supports batch size of 1");if(this.skip_prompt&&this.next_tokens_are_prompt)return void(this.next_tokens_are_prompt=!1);const t=e[0];this.token_callback_function?.(t),this.token_cache=(0,r.mergeArrays)(this.token_cache,t);const s=this.tokenizer.decode(this.token_cache,this.decode_kwargs);let n;s.endsWith("\n")?(n=s.slice(this.print_len),this.token_cache=[],this.print_len=0):s.length>0&&(0,o.is_chinese_char)(s.charCodeAt(s.length-1))?(n=s.slice(this.print_len),this.print_len+=n.length):(n=s.slice(this.print_len,s.lastIndexOf(" ")+1),this.print_len+=n.length),this.on_finalized_text(n,!1)}end(){let e;if(this.token_cache.length>0){e=this.tokenizer.decode(this.token_cache,this.decode_kwargs).slice(this.print_len),this.token_cache=[],this.print_len=0}else e="";this.next_tokens_are_prompt=!0,this.on_finalized_text(e,!0)}on_finalized_text(e,t){e.length>0&&this.callback_function?.(e),t&&this.callback_function===i&&n.apis.IS_PROCESS_AVAILABLE&&this.callback_function?.("\n")}}class c extends l{constructor(e,{skip_prompt:t=!1,callback_function:s=null,token_callback_function:r=null,on_chunk_start:o=null,on_chunk_end:n=null,on_finalize:a=null,time_precision:i=.02,skip_special_tokens:l=!0,decode_kwargs:c={}}={}){super(e,{skip_prompt:t,callback_function:s,token_callback_function:r,decode_kwargs:{skip_special_tokens:l,...c}}),this.timestamp_begin=e.timestamp_begin,this.on_chunk_start=o,this.on_chunk_end=n,this.on_finalize=a,this.time_precision=i,this.waiting_for_timestamp=!1}put(e){if(e.length>1)throw Error("WhisperTextStreamer only supports batch size of 1");const t=e[0];if(1===t.length){const s=Number(t[0])-this.timestamp_begin;if(s>=0){const t=s*this.time_precision;this.waiting_for_timestamp?this.on_chunk_end?.(t):this.on_chunk_start?.(t),this.waiting_for_timestamp=!this.waiting_for_timestamp,e=[[]]}}return super.put(e)}end(){super.end(),this.on_finalize?.()}}},"./src/models.js":(e,t,s)=>{s.r(t),s.d(t,{ASTForAudioClassification:()=>cs,ASTModel:()=>ls,ASTPreTrainedModel:()=>is,AlbertForMaskedLM:()=>Mt,AlbertForQuestionAnswering:()=>gt,AlbertForSequenceClassification:()=>ft,AlbertModel:()=>ht,AlbertPreTrainedModel:()=>_t,AutoModel:()=>Cl,AutoModelForAudioClassification:()=>Ql,AutoModelForAudioFrameClassification:()=>Hl,AutoModelForCTC:()=>Ul,AutoModelForCausalLM:()=>jl,AutoModelForDepthEstimation:()=>Zl,AutoModelForDocumentQuestionAnswering:()=>Jl,AutoModelForImageClassification:()=>Bl,AutoModelForImageFeatureExtraction:()=>sc,AutoModelForImageMatting:()=>Yl,AutoModelForImageSegmentation:()=>Dl,AutoModelForImageToImage:()=>Kl,AutoModelForMaskGeneration:()=>Wl,AutoModelForMaskedLM:()=>Nl,AutoModelForNormalEstimation:()=>ec,AutoModelForObjectDetection:()=>ql,AutoModelForPoseEstimation:()=>tc,AutoModelForQuestionAnswering:()=>Vl,AutoModelForSemanticSegmentation:()=>Gl,AutoModelForSeq2SeqLM:()=>El,AutoModelForSequenceClassification:()=>Sl,AutoModelForSpeechSeq2Seq:()=>Ll,AutoModelForTextToSpectrogram:()=>Il,AutoModelForTextToWaveform:()=>zl,AutoModelForTokenClassification:()=>Al,AutoModelForUniversalSegmentation:()=>Rl,AutoModelForVision2Seq:()=>Ol,AutoModelForXVector:()=>Xl,AutoModelForZeroShotObjectDetection:()=>$l,BartForConditionalGeneration:()=>At,BartForSequenceClassification:()=>Et,BartModel:()=>St,BartPretrainedModel:()=>Ct,BaseModelOutput:()=>J,BeitForImageClassification:()=>Go,BeitModel:()=>Do,BeitPreTrainedModel:()=>Bo,BertForMaskedLM:()=>Z,BertForQuestionAnswering:()=>se,BertForSequenceClassification:()=>ee,BertForTokenClassification:()=>te,BertModel:()=>K,BertPreTrainedModel:()=>Y,BlenderbotForConditionalGeneration:()=>Bt,BlenderbotModel:()=>Ot,BlenderbotPreTrainedModel:()=>Vt,BlenderbotSmallForConditionalGeneration:()=>Rt,BlenderbotSmallModel:()=>Gt,BlenderbotSmallPreTrainedModel:()=>Dt,BloomForCausalLM:()=>Jr,BloomModel:()=>Hr,BloomPreTrainedModel:()=>Xr,CLIPModel:()=>ys,CLIPPreTrainedModel:()=>ks,CLIPSegForImageSegmentation:()=>Rs,CLIPSegModel:()=>Gs,CLIPSegPreTrainedModel:()=>Ds,CLIPTextModel:()=>Fs,CLIPTextModelWithProjection:()=>vs,CLIPVisionModel:()=>Cs,CLIPVisionModelWithProjection:()=>Ss,CamembertForMaskedLM:()=>ye,CamembertForQuestionAnswering:()=>Ce,CamembertForSequenceClassification:()=>Fe,CamembertForTokenClassification:()=>ve,CamembertModel:()=>ke,CamembertPreTrainedModel:()=>xe,CausalLMOutput:()=>cc,CausalLMOutputWithPast:()=>dc,ChineseCLIPModel:()=>js,ChineseCLIPPreTrainedModel:()=>zs,ClapAudioModelWithProjection:()=>ri,ClapModel:()=>ti,ClapPreTrainedModel:()=>ei,ClapTextModelWithProjection:()=>si,CodeGenForCausalLM:()=>cr,CodeGenModel:()=>lr,CodeGenPreTrainedModel:()=>ir,CohereForCausalLM:()=>vr,CohereModel:()=>Fr,CoherePreTrainedModel:()=>yr,ConvBertForMaskedLM:()=>me,ConvBertForQuestionAnswering:()=>fe,ConvBertForSequenceClassification:()=>_e,ConvBertForTokenClassification:()=>he,ConvBertModel:()=>pe,ConvBertPreTrainedModel:()=>ue,ConvNextForImageClassification:()=>Bn,ConvNextModel:()=>On,ConvNextPreTrainedModel:()=>Vn,ConvNextV2ForImageClassification:()=>Rn,ConvNextV2Model:()=>Gn,ConvNextV2PreTrainedModel:()=>Dn,DPTForDepthEstimation:()=>Tn,DPTModel:()=>wn,DPTPreTrainedModel:()=>Mn,DebertaForMaskedLM:()=>Ee,DebertaForQuestionAnswering:()=>ze,DebertaForSequenceClassification:()=>Le,DebertaForTokenClassification:()=>Ie,DebertaModel:()=>Ae,DebertaPreTrainedModel:()=>Se,DebertaV2ForMaskedLM:()=>Ve,DebertaV2ForQuestionAnswering:()=>De,DebertaV2ForSequenceClassification:()=>Oe,DebertaV2ForTokenClassification:()=>Be,DebertaV2Model:()=>Ne,DebertaV2PreTrainedModel:()=>je,DecisionTransformerModel:()=>Ii,DecisionTransformerPreTrainedModel:()=>Li,DeiTForImageClassification:()=>on,DeiTModel:()=>rn,DeiTPreTrainedModel:()=>sn,DepthAnythingForDepthEstimation:()=>Pn,DepthAnythingPreTrainedModel:()=>bn,DepthProForDepthEstimation:()=>Cn,DepthProPreTrainedModel:()=>vn,DetrForObjectDetection:()=>$o,DetrForSegmentation:()=>Wo,DetrModel:()=>qo,DetrObjectDetectionOutput:()=>Uo,DetrPreTrainedModel:()=>Ro,DetrSegmentationOutput:()=>Qo,Dinov2ForImageClassification:()=>Wn,Dinov2Model:()=>$n,Dinov2PreTrainedModel:()=>qn,DistilBertForMaskedLM:()=>Ue,DistilBertForQuestionAnswering:()=>We,DistilBertForSequenceClassification:()=>qe,DistilBertForTokenClassification:()=>$e,DistilBertModel:()=>Re,DistilBertPreTrainedModel:()=>Ge,DonutSwinModel:()=>Nn,DonutSwinPreTrainedModel:()=>jn,EfficientNetForImageClassification:()=>hi,EfficientNetModel:()=>_i,EfficientNetPreTrainedModel:()=>mi,ElectraForMaskedLM:()=>we,ElectraForQuestionAnswering:()=>Pe,ElectraForSequenceClassification:()=>Te,ElectraForTokenClassification:()=>be,ElectraModel:()=>Me,ElectraPreTrainedModel:()=>ge,EsmForMaskedLM:()=>He,EsmForSequenceClassification:()=>Je,EsmForTokenClassification:()=>Ye,EsmModel:()=>Xe,EsmPreTrainedModel:()=>Qe,FalconForCausalLM:()=>Za,FalconModel:()=>Ka,FalconPreTrainedModel:()=>Ya,FastViTForImageClassification:()=>ko,FastViTModel:()=>xo,FastViTPreTrainedModel:()=>Po,Florence2ForConditionalGeneration:()=>ws,Florence2PreTrainedModel:()=>Ms,GLPNForDepthEstimation:()=>zn,GLPNModel:()=>In,GLPNPreTrainedModel:()=>Ln,GPT2LMHeadModel:()=>Ws,GPT2Model:()=>$s,GPT2PreTrainedModel:()=>qs,GPTBigCodeForCausalLM:()=>ar,GPTBigCodeModel:()=>nr,GPTBigCodePreTrainedModel:()=>or,GPTJForCausalLM:()=>rr,GPTJModel:()=>sr,GPTJPreTrainedModel:()=>tr,GPTNeoForCausalLM:()=>Ys,GPTNeoModel:()=>Js,GPTNeoPreTrainedModel:()=>Hs,GPTNeoXForCausalLM:()=>er,GPTNeoXModel:()=>Zs,GPTNeoXPreTrainedModel:()=>Ks,Gemma2ForCausalLM:()=>Ir,Gemma2Model:()=>Lr,Gemma2PreTrainedModel:()=>Er,GemmaForCausalLM:()=>Ar,GemmaModel:()=>Sr,GemmaPreTrainedModel:()=>Cr,GraniteForCausalLM:()=>kr,GraniteModel:()=>xr,GranitePreTrainedModel:()=>Pr,GroupViTModel:()=>bo,GroupViTPreTrainedModel:()=>To,HieraForImageClassification:()=>ln,HieraModel:()=>an,HieraPreTrainedModel:()=>nn,HubertForCTC:()=>Aa,HubertForSequenceClassification:()=>Ea,HubertModel:()=>Sa,HubertPreTrainedModel:()=>Ca,IJepaForImageClassification:()=>lo,IJepaModel:()=>io,IJepaPreTrainedModel:()=>ao,Idefics3ForConditionalGeneration:()=>xs,Idefics3PreTrainedModel:()=>Ps,ImageMattingOutput:()=>uc,JAISLMHeadModel:()=>Xs,JAISModel:()=>Qs,JAISPreTrainedModel:()=>Us,JinaCLIPModel:()=>Vs,JinaCLIPPreTrainedModel:()=>Ns,JinaCLIPTextModel:()=>Os,JinaCLIPVisionModel:()=>Bs,LlamaForCausalLM:()=>pr,LlamaModel:()=>ur,LlamaPreTrainedModel:()=>dr,LlavaForConditionalGeneration:()=>hs,LlavaOnevisionForConditionalGeneration:()=>fs,LlavaPreTrainedModel:()=>_s,LongT5ForConditionalGeneration:()=>kt,LongT5Model:()=>xt,LongT5PreTrainedModel:()=>Pt,M2M100ForConditionalGeneration:()=>oa,M2M100Model:()=>ra,M2M100PreTrainedModel:()=>sa,MBartForCausalLM:()=>Nt,MBartForConditionalGeneration:()=>zt,MBartForSequenceClassification:()=>jt,MBartModel:()=>It,MBartPreTrainedModel:()=>Lt,MPNetForMaskedLM:()=>nt,MPNetForQuestionAnswering:()=>lt,MPNetForSequenceClassification:()=>at,MPNetForTokenClassification:()=>it,MPNetModel:()=>ot,MPNetPreTrainedModel:()=>rt,MT5ForConditionalGeneration:()=>vt,MT5Model:()=>Ft,MT5PreTrainedModel:()=>yt,MarianMTModel:()=>ta,MarianModel:()=>ea,MarianPreTrainedModel:()=>Zn,MaskFormerForInstanceSegmentation:()=>En,MaskFormerModel:()=>An,MaskFormerPreTrainedModel:()=>Sn,MaskedLMOutput:()=>ic,MgpstrForSceneTextRecognition:()=>Oi,MgpstrModelOutput:()=>Ni,MgpstrPreTrainedModel:()=>Vi,MistralForCausalLM:()=>Qa,MistralModel:()=>Ua,MistralPreTrainedModel:()=>Wa,MobileBertForMaskedLM:()=>et,MobileBertForQuestionAnswering:()=>st,MobileBertForSequenceClassification:()=>tt,MobileBertModel:()=>Ze,MobileBertPreTrainedModel:()=>Ke,MobileLLMForCausalLM:()=>hr,MobileLLMModel:()=>_r,MobileLLMPreTrainedModel:()=>mr,MobileNetV1ForImageClassification:()=>Pi,MobileNetV1Model:()=>bi,MobileNetV1PreTrainedModel:()=>Ti,MobileNetV2ForImageClassification:()=>yi,MobileNetV2Model:()=>ki,MobileNetV2PreTrainedModel:()=>xi,MobileNetV3ForImageClassification:()=>Ci,MobileNetV3Model:()=>vi,MobileNetV3PreTrainedModel:()=>Fi,MobileNetV4ForImageClassification:()=>Ei,MobileNetV4Model:()=>Ai,MobileNetV4PreTrainedModel:()=>Si,MobileViTForImageClassification:()=>So,MobileViTModel:()=>Co,MobileViTPreTrainedModel:()=>vo,MobileViTV2ForImageClassification:()=>Lo,MobileViTV2Model:()=>Eo,MobileViTV2PreTrainedModel:()=>Ao,ModelOutput:()=>H,Moondream1ForConditionalGeneration:()=>gs,MptForCausalLM:()=>Zr,MptModel:()=>Kr,MptPreTrainedModel:()=>Yr,MultiModalityCausalLM:()=>ji,MultiModalityPreTrainedModel:()=>zi,MusicgenForCausalLM:()=>Mi,MusicgenForConditionalGeneration:()=>wi,MusicgenModel:()=>gi,MusicgenPreTrainedModel:()=>fi,NomicBertModel:()=>oe,NomicBertPreTrainedModel:()=>re,OPTForCausalLM:()=>so,OPTModel:()=>to,OPTPreTrainedModel:()=>eo,Olmo2ForCausalLM:()=>br,Olmo2Model:()=>Tr,Olmo2PreTrainedModel:()=>wr,OlmoForCausalLM:()=>Mr,OlmoModel:()=>gr,OlmoPreTrainedModel:()=>fr,OpenELMForCausalLM:()=>Nr,OpenELMModel:()=>jr,OpenELMPreTrainedModel:()=>zr,OwlViTForObjectDetection:()=>jo,OwlViTModel:()=>zo,OwlViTPreTrainedModel:()=>Io,Owlv2ForObjectDetection:()=>Oo,Owlv2Model:()=>Vo,Owlv2PreTrainedModel:()=>No,PaliGemmaForConditionalGeneration:()=>bs,PaliGemmaPreTrainedModel:()=>Ts,PatchTSMixerForPrediction:()=>$i,PatchTSMixerModel:()=>qi,PatchTSMixerPreTrainedModel:()=>Ri,PatchTSTForPrediction:()=>Gi,PatchTSTModel:()=>Di,PatchTSTPreTrainedModel:()=>Bi,Phi3ForCausalLM:()=>Qr,Phi3Model:()=>Ur,Phi3PreTrainedModel:()=>Wr,PhiForCausalLM:()=>$r,PhiModel:()=>qr,PhiPreTrainedModel:()=>Rr,PreTrainedModel:()=>X,PretrainedMixin:()=>Wi,PvtForImageClassification:()=>_o,PvtModel:()=>mo,PvtPreTrainedModel:()=>po,PyAnnoteForAudioFrameClassification:()=>pa,PyAnnoteModel:()=>ua,PyAnnotePreTrainedModel:()=>da,QuestionAnsweringModelOutput:()=>lc,Qwen2ForCausalLM:()=>Br,Qwen2Model:()=>Or,Qwen2PreTrainedModel:()=>Vr,Qwen2VLForConditionalGeneration:()=>Gr,Qwen2VLPreTrainedModel:()=>Dr,RTDetrForObjectDetection:()=>Jo,RTDetrModel:()=>Ho,RTDetrObjectDetectionOutput:()=>Yo,RTDetrPreTrainedModel:()=>Xo,ResNetForImageClassification:()=>un,ResNetModel:()=>dn,ResNetPreTrainedModel:()=>cn,RoFormerForMaskedLM:()=>ie,RoFormerForQuestionAnswering:()=>de,RoFormerForSequenceClassification:()=>le,RoFormerForTokenClassification:()=>ce,RoFormerModel:()=>ae,RoFormerPreTrainedModel:()=>ne,RobertaForMaskedLM:()=>Wt,RobertaForQuestionAnswering:()=>Xt,RobertaForSequenceClassification:()=>Ut,RobertaForTokenClassification:()=>Qt,RobertaModel:()=>$t,RobertaPreTrainedModel:()=>qt,SamImageSegmentationOutput:()=>Kn,SamModel:()=>Yn,SamPreTrainedModel:()=>Jn,SapiensForDepthEstimation:()=>yn,SapiensForNormalEstimation:()=>Fn,SapiensForSemanticSegmentation:()=>kn,SapiensPreTrainedModel:()=>xn,SegformerForImageClassification:()=>li,SegformerForSemanticSegmentation:()=>ci,SegformerModel:()=>ii,SegformerPreTrainedModel:()=>ai,Seq2SeqLMOutput:()=>rc,SequenceClassifierOutput:()=>oc,SiglipModel:()=>Es,SiglipPreTrainedModel:()=>As,SiglipTextModel:()=>Ls,SiglipVisionModel:()=>Is,SpeechT5ForSpeechToText:()=>Da,SpeechT5ForTextToSpeech:()=>Ga,SpeechT5HifiGan:()=>Ra,SpeechT5Model:()=>Ba,SpeechT5PreTrainedModel:()=>Oa,SqueezeBertForMaskedLM:()=>ut,SqueezeBertForQuestionAnswering:()=>mt,SqueezeBertForSequenceClassification:()=>pt,SqueezeBertModel:()=>dt,SqueezeBertPreTrainedModel:()=>ct,StableLmForCausalLM:()=>pi,StableLmModel:()=>ui,StableLmPreTrainedModel:()=>di,Starcoder2ForCausalLM:()=>Ja,Starcoder2Model:()=>Ha,Starcoder2PreTrainedModel:()=>Xa,Swin2SRForImageSuperResolution:()=>gn,Swin2SRModel:()=>fn,Swin2SRPreTrainedModel:()=>hn,SwinForImageClassification:()=>_n,SwinModel:()=>mn,SwinPreTrainedModel:()=>pn,T5ForConditionalGeneration:()=>bt,T5Model:()=>Tt,T5PreTrainedModel:()=>wt,TableTransformerForObjectDetection:()=>en,TableTransformerModel:()=>Zo,TableTransformerObjectDetectionOutput:()=>tn,TableTransformerPreTrainedModel:()=>Ko,TokenClassifierOutput:()=>ac,TrOCRForCausalLM:()=>$a,TrOCRPreTrainedModel:()=>qa,UniSpeechForCTC:()=>ga,UniSpeechForSequenceClassification:()=>Ma,UniSpeechModel:()=>fa,UniSpeechPreTrainedModel:()=>ha,UniSpeechSatForAudioFrameClassification:()=>xa,UniSpeechSatForCTC:()=>ba,UniSpeechSatForSequenceClassification:()=>Pa,UniSpeechSatModel:()=>Ta,UniSpeechSatPreTrainedModel:()=>wa,ViTForImageClassification:()=>no,ViTMAEModel:()=>fo,ViTMAEPreTrainedModel:()=>ho,ViTMSNForImageClassification:()=>wo,ViTMSNModel:()=>Mo,ViTMSNPreTrainedModel:()=>go,ViTModel:()=>oo,ViTPreTrainedModel:()=>ro,VisionEncoderDecoderModel:()=>ms,VitMatteForImageMatting:()=>Fo,VitMattePreTrainedModel:()=>yo,VitPoseForPoseEstimation:()=>uo,VitPosePreTrainedModel:()=>co,VitsModel:()=>ni,VitsModelOutput:()=>pc,VitsPreTrainedModel:()=>oi,Wav2Vec2BertForCTC:()=>Fa,Wav2Vec2BertForSequenceClassification:()=>va,Wav2Vec2BertModel:()=>ya,Wav2Vec2BertPreTrainedModel:()=>ka,Wav2Vec2ForAudioFrameClassification:()=>ca,Wav2Vec2ForCTC:()=>ia,Wav2Vec2ForSequenceClassification:()=>la,Wav2Vec2Model:()=>aa,Wav2Vec2PreTrainedModel:()=>na,WavLMForAudioFrameClassification:()=>Va,WavLMForCTC:()=>za,WavLMForSequenceClassification:()=>ja,WavLMForXVector:()=>Na,WavLMModel:()=>Ia,WavLMPreTrainedModel:()=>La,WeSpeakerResNetModel:()=>_a,WeSpeakerResNetPreTrainedModel:()=>ma,WhisperForConditionalGeneration:()=>ps,WhisperModel:()=>us,WhisperPreTrainedModel:()=>ds,XLMForQuestionAnswering:()=>es,XLMForSequenceClassification:()=>Kt,XLMForTokenClassification:()=>Zt,XLMModel:()=>Jt,XLMPreTrainedModel:()=>Ht,XLMRobertaForMaskedLM:()=>rs,XLMRobertaForQuestionAnswering:()=>as,XLMRobertaForSequenceClassification:()=>os,XLMRobertaForTokenClassification:()=>ns,XLMRobertaModel:()=>ss,XLMRobertaPreTrainedModel:()=>ts,XLMWithLMHeadModel:()=>Yt,XVectorOutput:()=>nc,YolosForObjectDetection:()=>Xn,YolosModel:()=>Qn,YolosObjectDetectionOutput:()=>Hn,YolosPreTrainedModel:()=>Un});var r=s("./src/configs.js"),o=s("./src/backends/onnx.js"),n=s("./src/utils/dtypes.js"),a=s("./src/utils/generic.js"),i=s("./src/utils/core.js"),l=s("./src/utils/hub.js"),c=s("./src/utils/constants.js"),d=s("./src/generation/logits_process.js"),u=s("./src/generation/configuration_utils.js"),p=s("./src/utils/tensor.js"),m=s("./src/utils/image.js"),_=s("./src/utils/maths.js"),h=s("./src/generation/stopping_criteria.js"),f=s("./src/generation/logits_sampler.js"),g=s("./src/env.js"),M=s("./src/models/whisper/generation_whisper.js"),w=s("./src/models/whisper/common_whisper.js");const T=0,b=1,P=2,x=3,k=4,y=5,F=6,v=7,C=8,S=new Map,A=new Map,E=new Map;async function L(e,t,s){return Object.fromEntries(await Promise.all(Object.keys(t).map((async a=>{const{buffer:i,session_options:c,session_config:d}=await async function(e,t,s){const a=s.config?.["transformers.js_config"]??{};let i=s.device??a.device;i&&"string"!=typeof i&&(i.hasOwnProperty(t)?i=i[t]:(console.warn(`device not specified for "${t}". Using the default device.`),i=null));const c=i??(g.apis.IS_NODE_ENV?"cpu":"wasm"),d=(0,o.deviceToExecutionProviders)(c);let u=s.dtype??a.dtype;if("string"!=typeof u&&(u&&u.hasOwnProperty(t)?u=u[t]:(u=n.DEFAULT_DEVICE_DTYPE_MAPPING[c]??n.DATA_TYPES.fp32,console.warn(`dtype not specified for "${t}". Using the default dtype (${u}) for this device (${c}).`))),u===n.DATA_TYPES.auto){let e=a.dtype;"string"!=typeof e&&(e=e[t]),u=e&&e!==n.DATA_TYPES.auto&&n.DATA_TYPES.hasOwnProperty(e)?e:n.DEFAULT_DEVICE_DTYPE_MAPPING[c]??n.DATA_TYPES.fp32}const p=u;if(!n.DEFAULT_DTYPE_SUFFIX_MAPPING.hasOwnProperty(p))throw new Error(`Invalid dtype: ${p}. Should be one of: ${Object.keys(n.DATA_TYPES).join(", ")}`);if(p===n.DATA_TYPES.fp16&&"webgpu"===c&&!await(0,n.isWebGpuFp16Supported)())throw new Error(`The device (${c}) does not support fp16.`);const m=a.kv_cache_dtype?"string"==typeof a.kv_cache_dtype?a.kv_cache_dtype:a.kv_cache_dtype[p]??"float32":void 0;if(m&&!["float32","float16"].includes(m))throw new Error(`Invalid kv_cache_dtype: ${m}. Should be one of: float32, float16`);const _={dtype:p,kv_cache_dtype:m},h=n.DEFAULT_DTYPE_SUFFIX_MAPPING[p],f=`${s.subfolder??""}/${t}${h}.onnx`,M={...s.session_options};M.executionProviders??=d;const w=a.free_dimension_overrides;w?M.freeDimensionOverrides??=w:c.startsWith("webnn")&&!M.freeDimensionOverrides&&console.warn('WebNN does not currently support dynamic shapes and requires `free_dimension_overrides` to be set in config.json as a field within "transformers.js_config". When `free_dimension_overrides` is not set, you may experience significant performance degradation.');const T=(0,l.getModelFile)(e,f,!0,s),b=s.use_external_data_format??a.use_external_data_format;let P=[];if(b&&(!0===b||"object"==typeof b&&b.hasOwnProperty(t)&&!0===b[t])){if(g.apis.IS_NODE_ENV)throw new Error("External data format is not yet supported in Node.js");const r=`${t}${h}.onnx_data`,o=`${s.subfolder??""}/${r}`;P.push(new Promise((async(t,n)=>{const a=await(0,l.getModelFile)(e,o,!0,s);t({path:r,data:a})})))}else void 0!==M.externalData&&(P=M.externalData.map((async t=>{if("string"==typeof t.data){const r=await(0,l.getModelFile)(e,t.data,!0,s);return{...t,data:r}}return t})));if(P.length>0&&(M.externalData=await Promise.all(P)),"webgpu"===c){const e=(0,r.getKeyValueShapes)(s.config,{prefix:"present"});if(Object.keys(e).length>0&&!(0,o.isONNXProxy)()){const t={};for(const s in e)t[s]="gpu-buffer";M.preferredOutputLocation=t}}return{buffer:await T,session_options:M,session_config:_}}(e,t[a],s);return[a,await(0,o.createInferenceSession)(i,c,d)]}))))}async function I(e,t,s){return Object.fromEntries(await Promise.all(Object.keys(t).map((async r=>[r,await(0,l.getModelJSON)(e,t[r],!1,s)]))))}async function z(e,t){const s=function(e,t){const s=Object.create(null),r=[];for(const n of e.inputNames){const e=t[n];e instanceof p.Tensor?s[n]=(0,o.isONNXProxy)()?e.clone():e:r.push(n)}if(r.length>0)throw new Error(`An error occurred during model execution: "Missing the following inputs: ${r.join(", ")}.`);const n=Object.keys(t).length,a=e.inputNames.length;if(n>a){let s=Object.keys(t).filter((t=>!e.inputNames.includes(t)));console.warn(`WARNING: Too many inputs were provided (${n} > ${a}). The following inputs will be ignored: "${s.join(", ")}".`)}return s}(e,t);try{const t=Object.fromEntries(Object.entries(s).map((([e,t])=>[e,t.ort_tensor])));let r=await e.run(t);return r=j(r),r}catch(e){const t=Object.fromEntries(Object.entries(s).map((([e,{type:t,dims:s,data:r}])=>[e,{type:t,dims:s,data:r}])));throw console.error(`An error occurred during model execution: "${e}".`),console.error("Inputs given to model:",t),e}}function j(e){for(let t in e)(0,o.isONNXTensor)(e[t])?e[t]=new p.Tensor(e[t]):"object"==typeof e[t]&&j(e[t]);return e}function N(e){if(e instanceof p.Tensor)return e;if(0===e.length)throw Error("items must be non-empty");if(Array.isArray(e[0])){if(e.some((t=>t.length!==e[0].length)))throw Error("Unable to create tensor, you should probably activate truncation and/or padding with 'padding=True' and/or 'truncation=True' to have batched tensors with the same length.");return new p.Tensor("int64",BigInt64Array.from(e.flat().map((e=>BigInt(e)))),[e.length,e[0].length])}return new p.Tensor("int64",BigInt64Array.from(e.map((e=>BigInt(e)))),[1,e.length])}function V(e){return new p.Tensor("bool",[e],[1])}async function O(e,t){let{encoder_outputs:s,input_ids:r,decoder_input_ids:o,...n}=t;if(!s){const r=(0,i.pick)(t,e.sessions.model.inputNames);s=(await B(e,r)).last_hidden_state}n.input_ids=o,n.encoder_hidden_states=s,e.sessions.decoder_model_merged.inputNames.includes("encoder_attention_mask")&&(n.encoder_attention_mask=t.attention_mask);return await D(e,n,!0)}async function B(e,t){const s=e.sessions.model,r=(0,i.pick)(t,s.inputNames);if(s.inputNames.includes("inputs_embeds")&&!r.inputs_embeds){if(!t.input_ids)throw new Error("Both `input_ids` and `inputs_embeds` are missing in the model inputs.");r.inputs_embeds=await e.encode_text({input_ids:t.input_ids})}return s.inputNames.includes("token_type_ids")&&!r.token_type_ids&&(r.token_type_ids=new p.Tensor("int64",new BigInt64Array(r.input_ids.data.length),r.input_ids.dims)),await z(s,r)}async function D(e,t,s=!1){const r=e.sessions[s?"decoder_model_merged":"model"],{past_key_values:o,...n}=t;if(r.inputNames.includes("use_cache_branch")&&(n.use_cache_branch=V(!!o)),r.inputNames.includes("position_ids")&&n.attention_mask&&!n.position_ids){const t="paligemma"===e.config.model_type?1:0;n.position_ids=function(e,t=null,s=0){const{input_ids:r,inputs_embeds:o,attention_mask:n}=e,{data:a,dims:i}=q(n,s);let l=new p.Tensor("int64",a,i);if(t){const e=-(r??o).dims.at(1);l=l.slice(null,[e,null])}return l}(n,o,t)}e.addPastKeyValues(n,o);const a=(0,i.pick)(n,r.inputNames);return await z(r,a)}function G({image_token_id:e,inputs_embeds:t,image_features:s,input_ids:r,attention_mask:o}){const n=r.tolist().map((t=>t.reduce(((t,s,r)=>(s==e&&t.push(r),t)),[]))),a=n.reduce(((e,t)=>e+t.length),0),i=s.dims[0];if(a!==i)throw new Error(`Image features and image tokens do not match: tokens: ${a}, features ${i}`);let l=0;for(let e=0;e<n.length;++e){const r=n[e],o=t[e];for(let e=0;e<r.length;++e)o[r[e]].data.set(s[l++].data)}return{inputs_embeds:t,attention_mask:o}}async function R(e,{input_ids:t=null,attention_mask:s=null,pixel_values:r=null,position_ids:o=null,inputs_embeds:n=null,past_key_values:a=null,generation_config:i=null,logits_processor:l=null,...c}){if(!n)if(n=await e.encode_text({input_ids:t,...c}),r&&1!==t.dims[1]){const o=await e.encode_image({pixel_values:r,...c});({inputs_embeds:n,attention_mask:s}=e._merge_input_ids_with_image_features({image_features:o,inputs_embeds:n,input_ids:t,attention_mask:s}))}else if(a&&r&&1===t.dims[1]){const e=t.dims[1],r=Object.values(a)[0].dims.at(-2);s=(0,p.cat)([(0,p.ones)([t.dims[0],r]),s.slice(null,[s.dims[1]-e,s.dims[1]])],1)}if(!o&&"qwen2_vl"===e.config.model_type){const{image_grid_thw:r,video_grid_thw:n}=c;[o]=e.get_rope_index(t,r,n,s)}return await D(e,{inputs_embeds:n,past_key_values:a,attention_mask:s,position_ids:o,generation_config:i,logits_processor:l},!0)}function q(e,t=0){const[s,r]=e.dims,o=e.data,n=new BigInt64Array(o.length);for(let e=0;e<s;++e){const s=e*r;let a=BigInt(t);for(let e=0;e<r;++e){const t=s+e;0n===o[t]?n[t]=BigInt(1):(n[t]=a,a+=o[t])}}return{data:n,dims:e.dims}}function $(e,t,s,r){if(s.past_key_values){const t=Object.values(s.past_key_values)[0].dims.at(-2),{input_ids:r,attention_mask:o}=s;if(o&&o.dims[1]>r.dims[1]);else if(t<r.dims[1])s.input_ids=r.slice(null,[t,null]);else if(null!=e.config.image_token_index&&r.data.some((t=>t==e.config.image_token_index))){const o=e.config.num_image_tokens;if(!o)throw new Error("`num_image_tokens` is missing in the model configuration.");const n=r.dims[1]-(t-o);s.input_ids=r.slice(null,[-n,null]),s.attention_mask=(0,p.ones)([1,t+n])}}return s}function W(e,t,s,r){return s.past_key_values&&(t=t.map((e=>[e.at(-1)]))),{...s,decoder_input_ids:N(t)}}function U(e,...t){return e.config.is_encoder_decoder?W(e,...t):$(e,...t)}function Q(e,t,s,r){const o=!!s.past_key_values;if(null!==r.guidance_scale&&r.guidance_scale>1&&(o?s.input_ids=(0,p.cat)([s.input_ids,s.input_ids],0):(s.input_ids=(0,p.cat)([s.input_ids,(0,p.full_like)(s.input_ids,BigInt(r.pad_token_id))],0),s.attention_mask=(0,p.cat)([s.attention_mask,(0,p.full_like)(s.attention_mask,0n)],0))),!o&&s.pixel_values||(s.pixel_values=(0,p.full)([0,0,3,384,384],1)),o){const e=0,t=1,r=e>0?1:0,o=1;s.images_seq_mask=new p.Tensor("bool",new Array(e+t).fill(!0).fill(!1,0,t),[o,e+t]),s.images_emb_mask=new p.Tensor("bool",new Array(e).fill(!!r),[o,1,e])}return s}class X extends a.Callable{main_input_name="input_ids";forward_params=["input_ids","attention_mask"];constructor(e,t,s){super(),this.config=e,this.sessions=t,this.configs=s;const r=E.get(this.constructor),o=S.get(r);switch(this.can_generate=!1,this._forward=null,this._prepare_inputs_for_generation=null,o){case k:this.can_generate=!0,this._forward=D,this._prepare_inputs_for_generation=$;break;case P:case x:case v:this.can_generate=!0,this._forward=O,this._prepare_inputs_for_generation=W;break;case b:this._forward=O;break;case F:this.can_generate=!0,this._forward=R,this._prepare_inputs_for_generation=U;break;case C:this.can_generate=!0,this._prepare_inputs_for_generation=Q;break;default:this._forward=B}this.can_generate&&this.forward_params.push("past_key_values"),this.custom_config=this.config["transformers.js_config"]??{}}async dispose(){const e=[];for(const t of Object.values(this.sessions))t?.handler?.dispose&&e.push(t.handler.dispose());return await Promise.all(e)}static async from_pretrained(e,{progress_callback:t=null,config:s=null,cache_dir:o=null,local_files_only:n=!1,revision:a="main",model_file_name:i=null,subfolder:l="onnx",device:d=null,dtype:u=null,use_external_data_format:p=null,session_options:m={}}={}){let _={progress_callback:t,config:s,cache_dir:o,local_files_only:n,revision:a,model_file_name:i,subfolder:l,device:d,dtype:u,use_external_data_format:p,session_options:m};const h=E.get(this),f=S.get(h);let g;if(s=_.config=await r.AutoConfig.from_pretrained(e,_),f===k)g=await Promise.all([L(e,{model:_.model_file_name??"model"},_),I(e,{generation_config:"generation_config.json"},_)]);else if(f===P||f===x)g=await Promise.all([L(e,{model:"encoder_model",decoder_model_merged:"decoder_model_merged"},_),I(e,{generation_config:"generation_config.json"},_)]);else if(f===y)g=await Promise.all([L(e,{model:"vision_encoder",prompt_encoder_mask_decoder:"prompt_encoder_mask_decoder"},_)]);else if(f===b)g=await Promise.all([L(e,{model:"encoder_model",decoder_model_merged:"decoder_model_merged"},_)]);else if(f===F){const t={embed_tokens:"embed_tokens",vision_encoder:"vision_encoder",decoder_model_merged:"decoder_model_merged"};s.is_encoder_decoder&&(t.model="encoder_model"),g=await Promise.all([L(e,t,_),I(e,{generation_config:"generation_config.json"},_)])}else if(f===v)g=await Promise.all([L(e,{model:"text_encoder",decoder_model_merged:"decoder_model_merged",encodec_decode:"encodec_decode"},_),I(e,{generation_config:"generation_config.json"},_)]);else if(f===C)g=await Promise.all([L(e,{prepare_inputs_embeds:"prepare_inputs_embeds",model:"language_model",lm_head:"lm_head",gen_head:"gen_head",gen_img_embeds:"gen_img_embeds",image_decode:"image_decode"},_),I(e,{generation_config:"generation_config.json"},_)]);else{if(f!==T){const e=h??s?.model_type;"custom"!==e&&console.warn(`Model type for '${e}' not found, assuming encoder-only architecture. Please report this at ${c.GITHUB_ISSUE_URL}.`)}g=await Promise.all([L(e,{model:_.model_file_name??"model"},_)])}return new this(s,...g)}async _call(e){return await this.forward(e)}async forward(e){return await this._forward(this,e)}get generation_config(){return this.configs?.generation_config??null}_get_logits_warper(e){const t=new d.LogitsProcessorList;return null!==e.temperature&&1!==e.temperature&&t.push(new d.TemperatureLogitsWarper(e.temperature)),null!==e.top_k&&0!==e.top_k&&t.push(new d.TopKLogitsWarper(e.top_k)),null!==e.top_p&&e.top_p<1&&t.push(new d.TopPLogitsWarper(e.top_p)),t}_get_logits_processor(e,t,s=null){const r=new d.LogitsProcessorList;if(null!==e.repetition_penalty&&1!==e.repetition_penalty&&r.push(new d.RepetitionPenaltyLogitsProcessor(e.repetition_penalty)),null!==e.no_repeat_ngram_size&&e.no_repeat_ngram_size>0&&r.push(new d.NoRepeatNGramLogitsProcessor(e.no_repeat_ngram_size)),null!==e.bad_words_ids&&r.push(new d.NoBadWordsLogitsProcessor(e.bad_words_ids,e.eos_token_id)),null!==e.min_length&&null!==e.eos_token_id&&e.min_length>0&&r.push(new d.MinLengthLogitsProcessor(e.min_length,e.eos_token_id)),null!==e.min_new_tokens&&null!==e.eos_token_id&&e.min_new_tokens>0&&r.push(new d.MinNewTokensLengthLogitsProcessor(t,e.min_new_tokens,e.eos_token_id)),null!==e.forced_bos_token_id&&r.push(new d.ForcedBOSTokenLogitsProcessor(e.forced_bos_token_id)),null!==e.forced_eos_token_id&&r.push(new d.ForcedEOSTokenLogitsProcessor(e.max_length,e.forced_eos_token_id)),null!==e.begin_suppress_tokens){const s=t>1||null===e.forced_bos_token_id?t:t+1;r.push(new d.SuppressTokensAtBeginLogitsProcessor(e.begin_suppress_tokens,s))}return null!==e.guidance_scale&&e.guidance_scale>1&&r.push(new d.ClassifierFreeGuidanceLogitsProcessor(e.guidance_scale)),null!==s&&r.extend(s),r}_prepare_generation_config(e,t,s=u.GenerationConfig){const r={...this.config};for(const e of["decoder","generator","text_config"])e in r&&Object.assign(r,r[e]);const o=new s(r);return Object.assign(o,this.generation_config??{}),e&&Object.assign(o,e),t&&Object.assign(o,(0,i.pick)(t,Object.getOwnPropertyNames(o))),o}_get_stopping_criteria(e,t=null){const s=new h.StoppingCriteriaList;return null!==e.max_length&&s.push(new h.MaxLengthCriteria(e.max_length,this.config.max_position_embeddings??null)),null!==e.eos_token_id&&s.push(new h.EosTokenCriteria(e.eos_token_id)),t&&s.extend(t),s}_validate_model_class(){if(!this.can_generate){const e=[tl,nl,el,Hi],t=E.get(this.constructor),s=new Set,r=this.config.model_type;for(const t of e){const e=t.get(r);e&&s.add(e[0])}let o=`The current model class (${t}) is not compatible with \`.generate()\`, as it doesn't have a language model head.`;throw s.size>0&&(o+=` Please use the following class instead: ${[...s].join(", ")}`),Error(o)}}prepare_inputs_for_generation(...e){return this._prepare_inputs_for_generation(this,...e)}_update_model_kwargs_for_generation({generated_input_ids:e,outputs:t,model_inputs:s,is_encoder_decoder:r}){return s.past_key_values=this.getPastKeyValues(t,s.past_key_values),s.input_ids=new p.Tensor("int64",e.flat(),[e.length,1]),r||(s.attention_mask=(0,p.cat)([s.attention_mask,(0,p.ones)([s.attention_mask.dims[0],1])],1)),s.position_ids=null,s}_prepare_model_inputs({inputs:e,bos_token_id:t,model_kwargs:s}){const r=(0,i.pick)(s,this.forward_params),o=this.main_input_name;if(o in r){if(e)throw new Error("`inputs`: {inputs}` were passed alongside {input_name} which is not allowed. Make sure to either pass {inputs} or {input_name}=...")}else r[o]=e;return{inputs_tensor:r[o],model_inputs:r,model_input_name:o}}async _prepare_encoder_decoder_kwargs_for_generation({inputs_tensor:e,model_inputs:t,model_input_name:s,generation_config:r}){if(this.sessions.model.inputNames.includes("inputs_embeds")&&!t.inputs_embeds&&"_prepare_inputs_embeds"in this){const{input_ids:e,pixel_values:s,attention_mask:r,...o}=t,n=await this._prepare_inputs_embeds(t);t={...o,...(0,i.pick)(n,["inputs_embeds","attention_mask"])}}let{last_hidden_state:o}=await B(this,t);if(null!==r.guidance_scale&&r.guidance_scale>1)o=(0,p.cat)([o,(0,p.full_like)(o,0)],0),"attention_mask"in t&&(t.attention_mask=(0,p.cat)([t.attention_mask,(0,p.zeros_like)(t.attention_mask)],0));else if(t.decoder_input_ids){const e=N(t.decoder_input_ids).dims[0];if(e!==o.dims[0]){if(1!==o.dims[0])throw new Error(`The encoder outputs have a different batch size (${o.dims[0]}) than the decoder inputs (${e}).`);o=(0,p.cat)(Array.from({length:e},(()=>o)),0)}}return t.encoder_outputs=o,t}_prepare_decoder_input_ids_for_generation({batch_size:e,model_input_name:t,model_kwargs:s,decoder_start_token_id:r,bos_token_id:o,generation_config:n}){let{decoder_input_ids:a,...i}=s;if(!(a instanceof p.Tensor)){if(a)Array.isArray(a[0])||(a=Array.from({length:e},(()=>a)));else if(r??=o,"musicgen"===this.config.model_type)a=Array.from({length:e*this.config.decoder.num_codebooks},(()=>[r]));else if(Array.isArray(r)){if(r.length!==e)throw new Error(`\`decoder_start_token_id\` expcted to have length ${e} but got ${r.length}`);a=r}else a=Array.from({length:e},(()=>[r]));a=N(a)}return s.decoder_attention_mask=(0,p.ones_like)(a),{input_ids:a,model_inputs:i}}async generate({inputs:e=null,generation_config:t=null,logits_processor:s=null,stopping_criteria:r=null,streamer:o=null,...n}){this._validate_model_class(),t=this._prepare_generation_config(t,n);let{inputs_tensor:a,model_inputs:i,model_input_name:l}=this._prepare_model_inputs({inputs:e,model_kwargs:n});const c=this.config.is_encoder_decoder;let d;c&&("encoder_outputs"in i||(i=await this._prepare_encoder_decoder_kwargs_for_generation({inputs_tensor:a,model_inputs:i,model_input_name:l,generation_config:t}))),c?({input_ids:d,model_inputs:i}=this._prepare_decoder_input_ids_for_generation({batch_size:i[l].dims.at(0),model_input_name:l,model_kwargs:i,decoder_start_token_id:t.decoder_start_token_id,bos_token_id:t.bos_token_id,generation_config:t})):d=i[l];let u=d.dims.at(-1);null!==t.max_new_tokens&&(t.max_length=u+t.max_new_tokens);const m=this._get_logits_processor(t,u,s),_=this._get_stopping_criteria(t,r),h=i[l].dims.at(0),g=f.LogitsSampler.getSampler(t),M=new Array(h).fill(0),w=d.tolist();let T;o&&o.put(w);let b={};for(;;){if(i=this.prepare_inputs_for_generation(w,i,t),T=await this.forward(i),t.output_attentions&&t.return_dict_in_generate){const e=this.getAttentions(T);for(const t in e)t in b||(b[t]=[]),b[t].push(e[t])}const e=m(w,T.logits.slice(null,-1,null)),s=[];for(let t=0;t<e.dims.at(0);++t){const r=e[t],o=await g(r);for(const[e,r]of o){const o=BigInt(e);M[t]+=r,w[t].push(o),s.push([o]);break}}o&&o.put(s);if(_(w).every((e=>e)))break;i=this._update_model_kwargs_for_generation({generated_input_ids:s,outputs:T,model_inputs:i,is_encoder_decoder:c})}o&&o.end();const P=this.getPastKeyValues(T,i.past_key_values,!0),x=new p.Tensor("int64",w.flat(),[w.length,w[0].length]);if(t.return_dict_in_generate)return{sequences:x,past_key_values:P,...b};for(const e of Object.values(T))"gpu-buffer"===e.location&&e.dispose();return x}getPastKeyValues(e,t,s=!1){const r=Object.create(null);for(const o in e)if(o.startsWith("present")){const n=o.replace("present","past_key_values"),a=o.includes("encoder");if(r[n]=a&&t?t[n]:e[o],t&&(!a||s)){const e=t[n];"gpu-buffer"===e.location&&e.dispose()}}return r}getAttentions(e){const t={};for(const s of["cross_attentions","encoder_attentions","decoder_attentions"])for(const r in e)r.startsWith(s)&&(s in t||(t[s]=[]),t[s].push(e[r]));return t}addPastKeyValues(e,t){if(t)Object.assign(e,t);else{const t=this.sessions.decoder_model_merged??this.sessions.model,s=t?.config?.kv_cache_dtype??"float32",o="float16"===s?new Uint16Array:[],n=(e[this.main_input_name]??e.attention_mask)?.dims?.[0]??1,a=(0,r.getKeyValueShapes)(this.config,{batch_size:n});for(const t in a)e[t]=new p.Tensor(s,o,a[t])}}async encode_image({pixel_values:e}){const t=(await z(this.sessions.vision_encoder,{pixel_values:e})).image_features;return this.config.num_image_tokens||(console.warn(`The number of image tokens was not set in the model configuration. Setting it to the number of features detected by the vision encoder (${t.dims[1]}).`),this.config.num_image_tokens=t.dims[1]),t}async encode_text({input_ids:e}){return(await z(this.sessions.embed_tokens,{input_ids:e})).inputs_embeds}}class H{}class J extends H{constructor({last_hidden_state:e,hidden_states:t=null,attentions:s=null}){super(),this.last_hidden_state=e,this.hidden_states=t,this.attentions=s}}class Y extends X{}class K extends Y{}class Z extends Y{async _call(e){return new ic(await super._call(e))}}class ee extends Y{async _call(e){return new oc(await super._call(e))}}class te extends Y{async _call(e){return new ac(await super._call(e))}}class se extends Y{async _call(e){return new lc(await super._call(e))}}class re extends X{}class oe extends re{}class ne extends X{}class ae extends ne{}class ie extends ne{async _call(e){return new ic(await super._call(e))}}class le extends ne{async _call(e){return new oc(await super._call(e))}}class ce extends ne{async _call(e){return new ac(await super._call(e))}}class de extends ne{async _call(e){return new lc(await super._call(e))}}class ue extends X{}class pe extends ue{}class me extends ue{async _call(e){return new ic(await super._call(e))}}class _e extends ue{async _call(e){return new oc(await super._call(e))}}class he extends ue{async _call(e){return new ac(await super._call(e))}}class fe extends ue{async _call(e){return new lc(await super._call(e))}}class ge extends X{}class Me extends ge{}class we extends ge{async _call(e){return new ic(await super._call(e))}}class Te extends ge{async _call(e){return new oc(await super._call(e))}}class be extends ge{async _call(e){return new ac(await super._call(e))}}class Pe extends ge{async _call(e){return new lc(await super._call(e))}}class xe extends X{}class ke extends xe{}class ye extends xe{async _call(e){return new ic(await super._call(e))}}class Fe extends xe{async _call(e){return new oc(await super._call(e))}}class ve extends xe{async _call(e){return new ac(await super._call(e))}}class Ce extends xe{async _call(e){return new lc(await super._call(e))}}class Se extends X{}class Ae extends Se{}class Ee extends Se{async _call(e){return new ic(await super._call(e))}}class Le extends Se{async _call(e){return new oc(await super._call(e))}}class Ie extends Se{async _call(e){return new ac(await super._call(e))}}class ze extends Se{async _call(e){return new lc(await super._call(e))}}class je extends X{}class Ne extends je{}class Ve extends je{async _call(e){return new ic(await super._call(e))}}class Oe extends je{async _call(e){return new oc(await super._call(e))}}class Be extends je{async _call(e){return new ac(await super._call(e))}}class De extends je{async _call(e){return new lc(await super._call(e))}}class Ge extends X{}class Re extends Ge{}class qe extends Ge{async _call(e){return new oc(await super._call(e))}}class $e extends Ge{async _call(e){return new ac(await super._call(e))}}class We extends Ge{async _call(e){return new lc(await super._call(e))}}class Ue extends Ge{async _call(e){return new ic(await super._call(e))}}class Qe extends X{}class Xe extends Qe{}class He extends Qe{async _call(e){return new ic(await super._call(e))}}class Je extends Qe{async _call(e){return new oc(await super._call(e))}}class Ye extends Qe{async _call(e){return new ac(await super._call(e))}}class Ke extends X{}class Ze extends Ke{}class et extends Ke{async _call(e){return new ic(await super._call(e))}}class tt extends Ke{async _call(e){return new oc(await super._call(e))}}class st extends Ke{async _call(e){return new lc(await super._call(e))}}class rt extends X{}class ot extends rt{}class nt extends rt{async _call(e){return new ic(await super._call(e))}}class at extends rt{async _call(e){return new oc(await super._call(e))}}class it extends rt{async _call(e){return new ac(await super._call(e))}}class lt extends rt{async _call(e){return new lc(await super._call(e))}}class ct extends X{}class dt extends ct{}class ut extends ct{async _call(e){return new ic(await super._call(e))}}class pt extends ct{async _call(e){return new oc(await super._call(e))}}class mt extends ct{async _call(e){return new lc(await super._call(e))}}class _t extends X{}class ht extends _t{}class ft extends _t{async _call(e){return new oc(await super._call(e))}}class gt extends _t{async _call(e){return new lc(await super._call(e))}}class Mt extends _t{async _call(e){return new ic(await super._call(e))}}class wt extends X{forward_params=["input_ids","attention_mask","encoder_outputs","decoder_input_ids","decoder_attention_mask","past_key_values"]}class Tt extends wt{}class bt extends wt{}class Pt extends X{}class xt extends Pt{}class kt extends Pt{}class yt extends X{}class Ft extends yt{}class vt extends yt{}class Ct extends X{}class St extends Ct{}class At extends Ct{}class Et extends Ct{async _call(e){return new oc(await super._call(e))}}class Lt extends X{}class It extends Lt{}class zt extends Lt{}class jt extends Lt{async _call(e){return new oc(await super._call(e))}}class Nt extends Lt{}class Vt extends X{}class Ot extends Vt{}class Bt extends Vt{}class Dt extends X{}class Gt extends Dt{}class Rt extends Dt{}class qt extends X{}class $t extends qt{}class Wt extends qt{async _call(e){return new ic(await super._call(e))}}class Ut extends qt{async _call(e){return new oc(await super._call(e))}}class Qt extends qt{async _call(e){return new ac(await super._call(e))}}class Xt extends qt{async _call(e){return new lc(await super._call(e))}}class Ht extends X{}class Jt extends Ht{}class Yt extends Ht{async _call(e){return new ic(await super._call(e))}}class Kt extends Ht{async _call(e){return new oc(await super._call(e))}}class Zt extends Ht{async _call(e){return new ac(await super._call(e))}}class es extends Ht{async _call(e){return new lc(await super._call(e))}}class ts extends X{}class ss extends ts{}class rs extends ts{async _call(e){return new ic(await super._call(e))}}class os extends ts{async _call(e){return new oc(await super._call(e))}}class ns extends ts{async _call(e){return new ac(await super._call(e))}}class as extends ts{async _call(e){return new lc(await super._call(e))}}class is extends X{}class ls extends is{}class cs extends is{}class ds extends X{requires_attention_mask=!1;main_input_name="input_features";forward_params=["input_features","attention_mask","decoder_input_ids","decoder_attention_mask","past_key_values"]}class us extends ds{}class ps extends ds{_prepare_generation_config(e,t){return super._prepare_generation_config(e,t,M.WhisperGenerationConfig)}_retrieve_init_tokens(e){const t=[e.decoder_start_token_id];let s=e.language;const r=e.task;if(e.is_multilingual){s||(console.warn("No language specified - defaulting to English (en)."),s="en");const o=`<|${(0,w.whisper_language_to_code)(s)}|>`;t.push(e.lang_to_id[o]),t.push(e.task_to_id[r??"transcribe"])}else if(s||r)throw new Error("Cannot specify `task` or `language` for an English-only model. If the model is intended to be multilingual, pass `is_multilingual=true` to generate, or update the generation config.");return!e.return_timestamps&&e.no_timestamps_token_id&&t.at(-1)!==e.no_timestamps_token_id?t.push(e.no_timestamps_token_id):e.return_timestamps&&t.at(-1)===e.no_timestamps_token_id&&(console.warn("<|notimestamps|> prompt token is removed from generation_config since `return_timestamps` is set to `true`."),t.pop()),t.filter((e=>null!=e))}async generate({inputs:e=null,generation_config:t=null,logits_processor:s=null,stopping_criteria:r=null,...o}){t=this._prepare_generation_config(t,o);const n=o.decoder_input_ids??this._retrieve_init_tokens(t);if(t.return_timestamps&&(s??=new d.LogitsProcessorList,s.push(new d.WhisperTimeStampLogitsProcessor(t,n))),t.begin_suppress_tokens&&(s??=new d.LogitsProcessorList,s.push(new d.SuppressTokensAtBeginLogitsProcessor(t.begin_suppress_tokens,n.length))),t.return_token_timestamps){if(!t.alignment_heads)throw new Error("Model generation config has no `alignment_heads`, token-level timestamps not available. See https://gist.github.com/hollance/42e32852f24243b748ae6bc1f985b13a on how to add this property to the generation config.");"translate"===t.task&&console.warn("Token-level timestamps may not be reliable for task 'translate'."),t.output_attentions=!0,t.return_dict_in_generate=!0}const a=await super.generate({inputs:e,generation_config:t,logits_processor:s,decoder_input_ids:n,...o});return t.return_token_timestamps&&(a.token_timestamps=this._extract_token_timestamps(a,t.alignment_heads,t.num_frames)),a}_extract_token_timestamps(e,t,s=null,r=.02){if(!e.cross_attentions)throw new Error("Model outputs must contain cross attentions to extract timestamps. This is most likely because the model was not exported with `output_attentions=True`.");null==s&&console.warn("`num_frames` has not been set, meaning the entire audio will be analyzed. This may lead to inaccurate token-level timestamps for short audios (< 30 seconds).");let o=this.config.median_filter_width;void 0===o&&(console.warn("Model config has no `median_filter_width`, using default value of 7."),o=7);const n=e.cross_attentions,a=Array.from({length:this.config.decoder_layers},((e,t)=>(0,p.cat)(n.map((e=>e[t])),2))),l=(0,p.stack)(t.map((([e,t])=>{if(e>=a.length)throw new Error(`Layer index ${e} is out of bounds for cross attentions (length ${a.length}).`);return s?a[e].slice(null,t,null,[0,s]):a[e].slice(null,t)}))).transpose(1,0,2,3),[c,d]=(0,p.std_mean)(l,-2,0,!0),u=l.clone();for(let e=0;e<u.dims[0];++e){const t=u[e];for(let s=0;s<t.dims[0];++s){const r=t[s],n=c[e][s][0].data,a=d[e][s][0].data;for(let e=0;e<r.dims[0];++e){let t=r[e].data;for(let e=0;e<t.length;++e)t[e]=(t[e]-a[e])/n[e];t.set((0,_.medianFilter)(t,o))}}}const m=[(0,p.mean)(u,1)],h=e.sequences.dims,f=new p.Tensor("float32",new Float32Array(h[0]*h[1]),h);for(let e=0;e<h[0];++e){const t=m[e].neg().squeeze_(0),[s,o]=(0,_.dynamic_time_warping)(t.tolist()),n=Array.from({length:s.length-1},((e,t)=>s[t+1]-s[t])),a=(0,i.mergeArrays)([1],n).map((e=>!!e)),l=[];for(let e=0;e<a.length;++e)a[e]&&l.push(o[e]*r);f[e].data.set(l,1)}return f}}class ms extends X{main_input_name="pixel_values";forward_params=["pixel_values","decoder_input_ids","encoder_hidden_states","past_key_values"]}class _s extends X{forward_params=["input_ids","attention_mask","pixel_values","position_ids","past_key_values"]}class hs extends _s{_merge_input_ids_with_image_features({inputs_embeds:e,image_features:t,input_ids:s,attention_mask:r}){const o=this.config.image_token_index,n=s.tolist().map((e=>e.findIndex((e=>e==o)))),a=n.every((e=>-1===e)),i=n.every((e=>-1!==e));if(!a&&!i)throw new Error("Every input should contain either 0 or 1 image token.");if(a)return{inputs_embeds:e,attention_mask:r};const l=[],c=[];for(let s=0;s<n.length;++s){const o=n[s],a=e[s],i=t[s],d=r[s];l.push((0,p.cat)([a.slice([0,o]),i,a.slice([o+1,a.dims[0]])],0)),c.push((0,p.cat)([d.slice([0,o]),(0,p.ones)([i.dims[0]]),d.slice([o+1,d.dims[0]])],0))}return{inputs_embeds:(0,p.stack)(l,0),attention_mask:(0,p.stack)(c,0)}}}class fs extends hs{}class gs extends hs{}class Ms extends X{forward_params=["input_ids","inputs_embeds","attention_mask","pixel_values","encoder_outputs","decoder_input_ids","decoder_inputs_embeds","decoder_attention_mask","past_key_values"];main_input_name="inputs_embeds"}class ws extends Ms{_merge_input_ids_with_image_features({inputs_embeds:e,image_features:t,input_ids:s,attention_mask:r}){return{inputs_embeds:(0,p.cat)([t,e],1),attention_mask:(0,p.cat)([(0,p.ones)(t.dims.slice(0,2)),r],1)}}async _prepare_inputs_embeds({input_ids:e,pixel_values:t,inputs_embeds:s,attention_mask:r}){if(!e&&!t)throw new Error("Either `input_ids` or `pixel_values` should be provided.");let o,n;return e&&(o=await this.encode_text({input_ids:e})),t&&(n=await this.encode_image({pixel_values:t})),o&&n?({inputs_embeds:s,attention_mask:r}=this._merge_input_ids_with_image_features({inputs_embeds:o,image_features:n,input_ids:e,attention_mask:r})):s=o||n,{inputs_embeds:s,attention_mask:r}}async forward({input_ids:e,pixel_values:t,attention_mask:s,decoder_input_ids:r,decoder_attention_mask:o,encoder_outputs:n,past_key_values:a,inputs_embeds:i,decoder_inputs_embeds:l}){if(i||({inputs_embeds:i,attention_mask:s}=await this._prepare_inputs_embeds({input_ids:e,pixel_values:t,inputs_embeds:i,attention_mask:s})),!n){let{last_hidden_state:e}=await B(this,{inputs_embeds:i,attention_mask:s});n=e}if(!l){if(!r)throw new Error("Either `decoder_input_ids` or `decoder_inputs_embeds` should be provided.");l=await this.encode_text({input_ids:r})}const c={inputs_embeds:l,attention_mask:o,encoder_attention_mask:s,encoder_hidden_states:n,past_key_values:a};return await D(this,c,!0)}}class Ts extends X{forward_params=["input_ids","attention_mask","pixel_values","position_ids","past_key_values"]}class bs extends Ts{_merge_input_ids_with_image_features(e){const t=e.image_features.dims.at(-1),s=e.image_features.view(-1,t);return G({image_token_id:this.config.image_token_index,...e,image_features:s})}}class Ps extends X{forward_params=["input_ids","attention_mask","pixel_values","pixel_attention_mask","position_ids","past_key_values"]}class xs extends Ps{async encode_image({pixel_values:e,pixel_attention_mask:t}){return(await z(this.sessions.vision_encoder,{pixel_values:e,pixel_attention_mask:t})).image_features}_merge_input_ids_with_image_features(e){const t=e.image_features.dims.at(-1),s=e.image_features.view(-1,t);return G({image_token_id:this.config.image_token_id,...e,image_features:s})}}class ks extends X{}class ys extends ks{}class Fs extends ks{static async from_pretrained(e,t={}){return t.model_file_name??="text_model",super.from_pretrained(e,t)}}class vs extends ks{static async from_pretrained(e,t={}){return t.model_file_name??="text_model",super.from_pretrained(e,t)}}class Cs extends ks{static async from_pretrained(e,t={}){return t.model_file_name??="vision_model",super.from_pretrained(e,t)}}class Ss extends ks{static async from_pretrained(e,t={}){return t.model_file_name??="vision_model",super.from_pretrained(e,t)}}class As extends X{}class Es extends As{}class Ls extends As{static async from_pretrained(e,t={}){return t.model_file_name??="text_model",super.from_pretrained(e,t)}}class Is extends ks{static async from_pretrained(e,t={}){return t.model_file_name??="vision_model",super.from_pretrained(e,t)}}class zs extends X{}class js extends zs{}class Ns extends X{}class Vs extends Ns{async forward(e){const t=!e.input_ids,s=!e.pixel_values;if(t&&s)throw new Error("Either `input_ids` or `pixel_values` should be provided.");if(t&&(e.input_ids=(0,p.ones)([e.pixel_values.dims[0],1])),s){const{image_size:t}=this.config.vision_config;e.pixel_values=(0,p.full)([0,3,t,t],0)}const{text_embeddings:r,image_embeddings:o,l2norm_text_embeddings:n,l2norm_image_embeddings:a}=await super.forward(e),i={};return t||(i.text_embeddings=r,i.l2norm_text_embeddings=n),s||(i.image_embeddings=o,i.l2norm_image_embeddings=a),i}}class Os extends Ns{static async from_pretrained(e,t={}){return t.model_file_name??="text_model",super.from_pretrained(e,t)}}class Bs extends Ns{static async from_pretrained(e,t={}){return t.model_file_name??="vision_model",super.from_pretrained(e,t)}}class Ds extends X{}class Gs extends Ds{}class Rs extends Ds{}class qs extends X{}class $s extends qs{}class Ws extends qs{}class Us extends X{}class Qs extends Us{}class Xs extends Us{}class Hs extends X{}class Js extends Hs{}class Ys extends Hs{}class Ks extends X{}class Zs extends Ks{}class er extends Ks{}class tr extends X{}class sr extends tr{}class rr extends tr{}class or extends X{}class nr extends or{}class ar extends or{}class ir extends X{}class lr extends ir{}class cr extends ir{}class dr extends X{}class ur extends dr{}class pr extends dr{}class mr extends X{}class _r extends mr{}class hr extends mr{}class fr extends X{}class gr extends fr{}class Mr extends fr{}class wr extends X{}class Tr extends wr{}class br extends wr{}class Pr extends X{}class xr extends Pr{}class kr extends Pr{}class yr extends X{}class Fr extends yr{}class vr extends yr{}class Cr extends X{}class Sr extends Cr{}class Ar extends Cr{}class Er extends X{}class Lr extends Er{}class Ir extends Er{}class zr extends X{}class jr extends zr{}class Nr extends zr{}class Vr extends X{}class Or extends Vr{}class Br extends Vr{}class Dr extends X{forward_params=["input_ids","attention_mask","position_ids","past_key_values","pixel_values","image_grid_thw"]}class Gr extends Dr{get_rope_index(e,t,s,r){const{vision_config:o,image_token_id:n,video_token_id:a,vision_start_token_id:i}=this.config,l=o.spatial_merge_size??2,c=[];if(t||s){let o=e.tolist();r||(r=(0,p.ones_like)(e));const d=r.tolist(),u=Array.from({length:3},(t=>Array.from({length:e.dims[0]},(t=>Array.from({length:e.dims[1]},(e=>1)))))),m=t?t.tolist():[],h=s?s.tolist():[];let f=0,g=0;for(let e=0;e<o.length;++e){const t=o[e].filter(((t,s)=>1==d[e][s])),s=t.reduce(((e,t,s)=>(t==i&&e.push(s),e)),[]).map((e=>t[e+1])),r=s.filter((e=>e==n)).length,p=s.filter((e=>e==a)).length;let M=[],w=0,T=r,b=p;for(let e=0;e<s.length;++e){const e=t.findIndex(((e,t)=>t>w&&e==n)),s=t.findIndex(((e,t)=>t>w&&e==a)),r=T>0&&-1!==e?e:t.length+1,o=b>0&&-1!==s?s:t.length+1;let i,c,d,u;r<o?([c,d,u]=m[f],++f,--T,i=r):([c,d,u]=h[g],++g,--b,i=o);const[p,P,x]=[Number(c),Math.floor(Number(d)/l),Math.floor(Number(u)/l)],k=i-w,y=M.length>0?(0,_.max)(M.at(-1))[0]+1:0;M.push(Array.from({length:3*k},((e,t)=>y+t%k)));const F=k+y,v=p*P*x,C=Array.from({length:v},((e,t)=>F+Math.floor(t/(P*x)))),S=Array.from({length:v},((e,t)=>F+Math.floor(t/x)%P)),A=Array.from({length:v},((e,t)=>F+t%x));M.push([C,S,A].flat()),w=i+v}if(w<t.length){const e=M.length>0?(0,_.max)(M.at(-1))[0]+1:0,s=t.length-w;M.push(Array.from({length:3*s},((t,r)=>e+r%s)))}const P=M.reduce(((e,t)=>e+t.length),0),x=new Array(P);let k=0;for(let e=0;e<3;++e)for(let t=0;t<M.length;++t){const s=M[t],r=s.length/3;for(let t=e*r;t<(e+1)*r;++t)x[k++]=s[t]}let y=0;const F=d[e];for(let t=0;t<F.length;++t)if(1==F[t]){for(let s=0;s<3;++s)u[s][e][t]=x[s*P/3+y];++y}const v=(0,_.max)(x)[0];c.push(v+1-o[e].length)}return[new p.Tensor("int64",u.flat(1/0),[3,e.dims[0],e.dims[1]]),new p.Tensor("int64",c,[c.length,1])]}if(r){const{data:e,dims:t}=q(r),s=BigInt64Array.from({length:3*e.length},((t,s)=>e[s%e.length])),o=Array.from({length:t[0]},((s,r)=>(0,_.max)(e.subarray(t[1]*r,t[1]*(r+1)))[0]+1+t[1]));return[new p.Tensor("int64",s,[3,...t]),new p.Tensor("int64",o,[o.length,1])]}{const[t,s]=e.dims,r=BigInt64Array.from({length:3*t*s},((e,r)=>BigInt(Math.floor(r%s/t))));return[new p.Tensor("int64",r,[3,...e.dims]),(0,p.zeros)([t,1])]}}async encode_image({pixel_values:e,image_grid_thw:t}){return(await z(this.sessions.vision_encoder,{pixel_values:e,grid_thw:t})).image_features}_merge_input_ids_with_image_features(e){return G({image_token_id:this.config.image_token_id,...e})}prepare_inputs_for_generation(e,t,s){if(t.attention_mask&&!t.position_ids)if(t.past_key_values){t.pixel_values=null;const e=BigInt(Object.values(t.past_key_values)[0].dims.at(-2)),s=t.rope_deltas.map((t=>e+t));t.position_ids=(0,p.stack)([s,s,s],0)}else[t.position_ids,t.rope_deltas]=this.get_rope_index(t.input_ids,t.image_grid_thw,t.video_grid_thw,t.attention_mask);return t}}class Rr extends X{}class qr extends Rr{}class $r extends Rr{}class Wr extends X{}class Ur extends Wr{}class Qr extends Wr{}class Xr extends X{}class Hr extends Xr{}class Jr extends Xr{}class Yr extends X{}class Kr extends Yr{}class Zr extends Yr{}class eo extends X{}class to extends eo{}class so extends eo{}class ro extends X{}class oo extends ro{}class no extends ro{async _call(e){return new oc(await super._call(e))}}class ao extends X{}class io extends ao{}class lo extends ao{async _call(e){return new oc(await super._call(e))}}class co extends X{}class uo extends co{}class po extends X{}class mo extends po{}class _o extends po{async _call(e){return new oc(await super._call(e))}}class ho extends X{}class fo extends ho{}class go extends X{}class Mo extends go{}class wo extends go{async _call(e){return new oc(await super._call(e))}}class To extends X{}class bo extends To{}class Po extends X{}class xo extends Po{}class ko extends Po{async _call(e){return new oc(await super._call(e))}}class yo extends X{}class Fo extends yo{async _call(e){return new uc(await super._call(e))}}class vo extends X{}class Co extends vo{}class So extends vo{async _call(e){return new oc(await super._call(e))}}class Ao extends X{}class Eo extends Ao{}class Lo extends Ao{async _call(e){return new oc(await super._call(e))}}class Io extends X{}class zo extends Io{}class jo extends Io{}class No extends X{}class Vo extends No{}class Oo extends No{}class Bo extends X{}class Do extends Bo{}class Go extends Bo{async _call(e){return new oc(await super._call(e))}}class Ro extends X{}class qo extends Ro{}class $o extends Ro{async _call(e){return new Uo(await super._call(e))}}class Wo extends Ro{async _call(e){return new Qo(await super._call(e))}}class Uo extends H{constructor({logits:e,pred_boxes:t}){super(),this.logits=e,this.pred_boxes=t}}class Qo extends H{constructor({logits:e,pred_boxes:t,pred_masks:s}){super(),this.logits=e,this.pred_boxes=t,this.pred_masks=s}}class Xo extends X{}class Ho extends Xo{}class Jo extends Xo{async _call(e){return new Yo(await super._call(e))}}class Yo extends H{constructor({logits:e,pred_boxes:t}){super(),this.logits=e,this.pred_boxes=t}}class Ko extends X{}class Zo extends Ko{}class en extends Ko{async _call(e){return new tn(await super._call(e))}}class tn extends Uo{}class sn extends X{}class rn extends sn{}class on extends sn{async _call(e){return new oc(await super._call(e))}}class nn extends X{}class an extends nn{}class ln extends nn{async _call(e){return new oc(await super._call(e))}}class cn extends X{}class dn extends cn{}class un extends cn{async _call(e){return new oc(await super._call(e))}}class pn extends X{}class mn extends pn{}class _n extends pn{async _call(e){return new oc(await super._call(e))}}class hn extends X{}class fn extends hn{}class gn extends hn{}class Mn extends X{}class wn extends Mn{}class Tn extends Mn{}class bn extends X{}class Pn extends bn{}class xn extends X{}class kn extends xn{}class yn extends xn{}class Fn extends xn{}class vn extends X{}class Cn extends vn{}class Sn extends X{}class An extends Sn{}class En extends Sn{}class Ln extends X{}class In extends Ln{}class zn extends Ln{}class jn extends X{}class Nn extends jn{}class Vn extends X{}class On extends Vn{}class Bn extends Vn{async _call(e){return new oc(await super._call(e))}}class Dn extends X{}class Gn extends Dn{}class Rn extends Dn{async _call(e){return new oc(await super._call(e))}}class qn extends X{}class $n extends qn{}class Wn extends qn{async _call(e){return new oc(await super._call(e))}}class Un extends X{}class Qn extends Un{}class Xn extends Un{async _call(e){return new Hn(await super._call(e))}}class Hn extends H{constructor({logits:e,pred_boxes:t}){super(),this.logits=e,this.pred_boxes=t}}class Jn extends X{}class Yn extends Jn{async get_image_embeddings({pixel_values:e}){return await B(this,{pixel_values:e})}async forward(e){if(e.image_embeddings&&e.image_positional_embeddings||(e={...e,...await this.get_image_embeddings(e)}),!e.input_labels&&e.input_points){const t=e.input_points.dims.slice(0,-1),s=t.reduce(((e,t)=>e*t),1);e.input_labels=new p.Tensor("int64",new BigInt64Array(s).fill(1n),t)}const t={image_embeddings:e.image_embeddings,image_positional_embeddings:e.image_positional_embeddings};return e.input_points&&(t.input_points=e.input_points),e.input_labels&&(t.input_labels=e.input_labels),e.input_boxes&&(t.input_boxes=e.input_boxes),await z(this.sessions.prompt_encoder_mask_decoder,t)}async _call(e){return new Kn(await super._call(e))}}class Kn extends H{constructor({iou_scores:e,pred_masks:t}){super(),this.iou_scores=e,this.pred_masks=t}}class Zn extends X{}class ea extends Zn{}class ta extends Zn{}class sa extends X{}class ra extends sa{}class oa extends sa{}class na extends X{}class aa extends na{}class ia extends na{async _call(e){return new cc(await super._call(e))}}class la extends na{async _call(e){return new oc(await super._call(e))}}class ca extends na{async _call(e){return new ac(await super._call(e))}}class da extends X{}class ua extends da{}class pa extends da{async _call(e){return new ac(await super._call(e))}}class ma extends X{}class _a extends ma{}class ha extends X{}class fa extends ha{}class ga extends ha{async _call(e){return new cc(await super._call(e))}}class Ma extends ha{async _call(e){return new oc(await super._call(e))}}class wa extends X{}class Ta extends wa{}class ba extends wa{async _call(e){return new cc(await super._call(e))}}class Pa extends wa{async _call(e){return new oc(await super._call(e))}}class xa extends wa{async _call(e){return new ac(await super._call(e))}}class ka extends X{}class ya extends ka{}class Fa extends ka{async _call(e){return new cc(await super._call(e))}}class va extends ka{async _call(e){return new oc(await super._call(e))}}class Ca extends X{}class Sa extends na{}class Aa extends na{async _call(e){return new cc(await super._call(e))}}class Ea extends na{async _call(e){return new oc(await super._call(e))}}class La extends X{}class Ia extends La{}class za extends La{async _call(e){return new cc(await super._call(e))}}class ja extends La{async _call(e){return new oc(await super._call(e))}}class Na extends La{async _call(e){return new nc(await super._call(e))}}class Va extends La{async _call(e){return new ac(await super._call(e))}}class Oa extends X{}class Ba extends Oa{}class Da extends Oa{}class Ga extends Oa{async generate_speech(e,t,{threshold:s=.5,minlenratio:r=0,maxlenratio:o=20,vocoder:n=null}={}){const a={input_ids:e},{encoder_outputs:i,encoder_attention_mask:l}=await B(this,a),c=i.dims[1]/this.config.reduction_factor,d=Math.floor(c*o),u=Math.floor(c*r),m=this.config.num_mel_bins;let _=[],h=null,f=null,g=0;for(;;){++g;const e=V(!!f);let r;r=f?f.output_sequence_out:new p.Tensor("float32",new Float32Array(m),[1,1,m]);let o={use_cache_branch:e,output_sequence:r,encoder_attention_mask:l,speaker_embeddings:t,encoder_hidden_states:i};this.addPastKeyValues(o,h),f=await z(this.sessions.decoder_model_merged,o),h=this.getPastKeyValues(f,h);const{prob:n,spectrum:a}=f;if(_.push(a),g>=u&&(Array.from(n.data).filter((e=>e>=s)).length>0||g>=d))break}const M=(0,p.cat)(_),{waveform:w}=await z(n.sessions.model,{spectrogram:M});return{spectrogram:M,waveform:w}}}class Ra extends X{main_input_name="spectrogram"}class qa extends X{}class $a extends qa{}class Wa extends X{}class Ua extends Wa{}class Qa extends Wa{}class Xa extends X{}class Ha extends Xa{}class Ja extends Xa{}class Ya extends X{}class Ka extends Ya{}class Za extends Ya{}class ei extends X{}class ti extends ei{}class si extends ei{static async from_pretrained(e,t={}){return t.model_file_name??="text_model",super.from_pretrained(e,t)}}class ri extends ei{static async from_pretrained(e,t={}){return t.model_file_name??="audio_model",super.from_pretrained(e,t)}}class oi extends X{}class ni extends oi{async _call(e){return new pc(await super._call(e))}}class ai extends X{}class ii extends ai{}class li extends ai{}class ci extends ai{}class di extends X{}class ui extends di{}class pi extends di{}class mi extends X{}class _i extends mi{}class hi extends mi{async _call(e){return new oc(await super._call(e))}}class fi extends X{}class gi extends fi{}class Mi extends fi{}class wi extends X{forward_params=["input_ids","attention_mask","encoder_outputs","decoder_input_ids","decoder_attention_mask","past_key_values"];_apply_and_filter_by_delay_pattern_mask(e){const[t,s]=e.dims,r=this.config.decoder.num_codebooks,o=s-r;let n=0;for(let t=0;t<e.size;++t){if(e.data[t]===this.config.decoder.pad_token_id)continue;const a=t%s-Math.floor(t/s)%r;a>0&&a<=o&&(e.data[n++]=e.data[t])}const a=Math.floor(t/r),i=n/(a*r);return new p.Tensor(e.type,e.data.slice(0,n),[a,r,i])}prepare_inputs_for_generation(e,t,s){let r=structuredClone(e);for(let e=0;e<r.length;++e)for(let t=0;t<r[e].length;++t)e%this.config.decoder.num_codebooks>=t&&(r[e][t]=BigInt(this.config.decoder.pad_token_id));null!==s.guidance_scale&&s.guidance_scale>1&&(r=r.concat(r));return super.prepare_inputs_for_generation(r,t,s)}async generate(e){const t=await super.generate(e),s=this._apply_and_filter_by_delay_pattern_mask(t).unsqueeze_(0),{audio_values:r}=await z(this.sessions.encodec_decode,{audio_codes:s});return r}}class Ti extends X{}class bi extends Ti{}class Pi extends Ti{async _call(e){return new oc(await super._call(e))}}class xi extends X{}class ki extends xi{}class yi extends xi{async _call(e){return new oc(await super._call(e))}}class Fi extends X{}class vi extends Fi{}class Ci extends Fi{async _call(e){return new oc(await super._call(e))}}class Si extends X{}class Ai extends Si{}class Ei extends Si{async _call(e){return new oc(await super._call(e))}}class Li extends X{}class Ii extends Li{}class zi extends X{}class ji extends zi{forward_params=["input_ids","pixel_values","images_seq_mask","images_emb_mask","attention_mask","position_ids","past_key_values"];constructor(...e){super(...e),this._generation_mode="text"}async forward(e){const t=this._generation_mode??"text";let s;if("text"!==t&&e.past_key_values){const t=this.sessions.gen_img_embeds,r=(0,i.pick)({image_ids:e.input_ids},t.inputNames);s=await z(t,r)}else{const t=this.sessions.prepare_inputs_embeds,r=(0,i.pick)(e,t.inputNames);s=await z(t,r)}const r={...e,...s},o=await D(this,r),n=this.sessions["text"===t?"lm_head":"gen_head"];if(!n)throw new Error(`Unable to find "${n}" generation head`);const a=await z(n,(0,i.pick)(o,n.inputNames));return{...s,...o,...a}}async generate(e){return this._generation_mode="text",super.generate(e)}async generate_images(e){this._generation_mode="image";const t=(e.inputs??e[this.main_input_name]).dims[1],s=(await super.generate(e)).slice(null,[t,null]),r=this.sessions.image_decode,{decoded_image:o}=await z(r,{generated_tokens:s}),n=o.add_(1).mul_(127.5).clamp_(0,255).to("uint8"),a=[];for(const e of n){const t=m.RawImage.fromTensor(e);a.push(t)}return a}}class Ni extends H{constructor({char_logits:e,bpe_logits:t,wp_logits:s}){super(),this.char_logits=e,this.bpe_logits=t,this.wp_logits=s}get logits(){return[this.char_logits,this.bpe_logits,this.wp_logits]}}class Vi extends X{}class Oi extends Vi{async _call(e){return new Ni(await super._call(e))}}class Bi extends X{}class Di extends Bi{}class Gi extends Bi{}class Ri extends X{}class qi extends Ri{}class $i extends Ri{}class Wi{static MODEL_CLASS_MAPPINGS=null;static BASE_IF_FAIL=!1;static async from_pretrained(e,{progress_callback:t=null,config:s=null,cache_dir:o=null,local_files_only:n=!1,revision:a="main",model_file_name:i=null,subfolder:l="onnx",device:c=null,dtype:d=null,use_external_data_format:u=null,session_options:p={}}={}){const m={progress_callback:t,config:s,cache_dir:o,local_files_only:n,revision:a,model_file_name:i,subfolder:l,device:c,dtype:d,use_external_data_format:u,session_options:p};if(m.config=await r.AutoConfig.from_pretrained(e,m),!this.MODEL_CLASS_MAPPINGS)throw new Error("`MODEL_CLASS_MAPPINGS` not implemented for this type of `AutoClass`: "+this.name);for(const t of this.MODEL_CLASS_MAPPINGS){const s=t.get(m.config.model_type);if(s)return await s[1].from_pretrained(e,m)}if(this.BASE_IF_FAIL)return console.warn(`Unknown model class "${m.config.model_type}", attempting to construct from base class.`),await X.from_pretrained(e,m);throw Error(`Unsupported model type: ${m.config.model_type}`)}}const Ui=new Map([["bert",["BertModel",K]],["nomic_bert",["NomicBertModel",oe]],["roformer",["RoFormerModel",ae]],["electra",["ElectraModel",Me]],["esm",["EsmModel",Xe]],["convbert",["ConvBertModel",pe]],["camembert",["CamembertModel",ke]],["deberta",["DebertaModel",Ae]],["deberta-v2",["DebertaV2Model",Ne]],["mpnet",["MPNetModel",ot]],["albert",["AlbertModel",ht]],["distilbert",["DistilBertModel",Re]],["roberta",["RobertaModel",$t]],["xlm",["XLMModel",Jt]],["xlm-roberta",["XLMRobertaModel",ss]],["clap",["ClapModel",ti]],["clip",["CLIPModel",ys]],["clipseg",["CLIPSegModel",Gs]],["chinese_clip",["ChineseCLIPModel",js]],["siglip",["SiglipModel",Es]],["jina_clip",["JinaCLIPModel",Vs]],["mobilebert",["MobileBertModel",Ze]],["squeezebert",["SqueezeBertModel",dt]],["wav2vec2",["Wav2Vec2Model",aa]],["wav2vec2-bert",["Wav2Vec2BertModel",ya]],["unispeech",["UniSpeechModel",fa]],["unispeech-sat",["UniSpeechSatModel",Ta]],["hubert",["HubertModel",Sa]],["wavlm",["WavLMModel",Ia]],["audio-spectrogram-transformer",["ASTModel",ls]],["vits",["VitsModel",ni]],["pyannote",["PyAnnoteModel",ua]],["wespeaker-resnet",["WeSpeakerResNetModel",_a]],["detr",["DetrModel",qo]],["rt_detr",["RTDetrModel",Ho]],["table-transformer",["TableTransformerModel",Zo]],["vit",["ViTModel",oo]],["ijepa",["IJepaModel",io]],["pvt",["PvtModel",mo]],["vit_msn",["ViTMSNModel",Mo]],["vit_mae",["ViTMAEModel",fo]],["groupvit",["GroupViTModel",bo]],["fastvit",["FastViTModel",xo]],["mobilevit",["MobileViTModel",Co]],["mobilevitv2",["MobileViTV2Model",Eo]],["owlvit",["OwlViTModel",zo]],["owlv2",["Owlv2Model",Vo]],["beit",["BeitModel",Do]],["deit",["DeiTModel",rn]],["hiera",["HieraModel",an]],["convnext",["ConvNextModel",On]],["convnextv2",["ConvNextV2Model",Gn]],["dinov2",["Dinov2Model",$n]],["resnet",["ResNetModel",dn]],["swin",["SwinModel",mn]],["swin2sr",["Swin2SRModel",fn]],["donut-swin",["DonutSwinModel",Nn]],["yolos",["YolosModel",Qn]],["dpt",["DPTModel",wn]],["glpn",["GLPNModel",In]],["hifigan",["SpeechT5HifiGan",Ra]],["efficientnet",["EfficientNetModel",_i]],["decision_transformer",["DecisionTransformerModel",Ii]],["patchtst",["PatchTSTForPrediction",Di]],["patchtsmixer",["PatchTSMixerForPrediction",qi]],["mobilenet_v1",["MobileNetV1Model",bi]],["mobilenet_v2",["MobileNetV2Model",ki]],["mobilenet_v3",["MobileNetV3Model",vi]],["mobilenet_v4",["MobileNetV4Model",Ai]],["maskformer",["MaskFormerModel",An]],["mgp-str",["MgpstrForSceneTextRecognition",Oi]]]),Qi=new Map([["t5",["T5Model",Tt]],["longt5",["LongT5Model",xt]],["mt5",["MT5Model",Ft]],["bart",["BartModel",St]],["mbart",["MBartModel",It]],["marian",["MarianModel",ea]],["whisper",["WhisperModel",us]],["m2m_100",["M2M100Model",ra]],["blenderbot",["BlenderbotModel",Ot]],["blenderbot-small",["BlenderbotSmallModel",Gt]]]),Xi=new Map([["bloom",["BloomModel",Hr]],["jais",["JAISModel",Qs]],["gpt2",["GPT2Model",$s]],["gptj",["GPTJModel",sr]],["gpt_bigcode",["GPTBigCodeModel",nr]],["gpt_neo",["GPTNeoModel",Js]],["gpt_neox",["GPTNeoXModel",Zs]],["codegen",["CodeGenModel",lr]],["llama",["LlamaModel",ur]],["olmo",["OlmoModel",gr]],["olmo2",["Olmo2Model",Tr]],["mobilellm",["MobileLLMModel",_r]],["granite",["GraniteModel",xr]],["cohere",["CohereModel",Fr]],["gemma",["GemmaModel",Sr]],["gemma2",["Gemma2Model",Lr]],["openelm",["OpenELMModel",jr]],["qwen2",["Qwen2Model",Or]],["phi",["PhiModel",qr]],["phi3",["Phi3Model",Ur]],["mpt",["MptModel",Kr]],["opt",["OPTModel",to]],["mistral",["MistralModel",Ua]],["starcoder2",["Starcoder2Model",Ha]],["falcon",["FalconModel",Ka]],["stablelm",["StableLmModel",ui]]]),Hi=new Map([["speecht5",["SpeechT5ForSpeechToText",Da]],["whisper",["WhisperForConditionalGeneration",ps]]]),Ji=new Map([["speecht5",["SpeechT5ForTextToSpeech",Ga]]]),Yi=new Map([["vits",["VitsModel",ni]],["musicgen",["MusicgenForConditionalGeneration",wi]]]),Ki=new Map([["bert",["BertForSequenceClassification",ee]],["roformer",["RoFormerForSequenceClassification",le]],["electra",["ElectraForSequenceClassification",Te]],["esm",["EsmForSequenceClassification",Je]],["convbert",["ConvBertForSequenceClassification",_e]],["camembert",["CamembertForSequenceClassification",Fe]],["deberta",["DebertaForSequenceClassification",Le]],["deberta-v2",["DebertaV2ForSequenceClassification",Oe]],["mpnet",["MPNetForSequenceClassification",at]],["albert",["AlbertForSequenceClassification",ft]],["distilbert",["DistilBertForSequenceClassification",qe]],["roberta",["RobertaForSequenceClassification",Ut]],["xlm",["XLMForSequenceClassification",Kt]],["xlm-roberta",["XLMRobertaForSequenceClassification",os]],["bart",["BartForSequenceClassification",Et]],["mbart",["MBartForSequenceClassification",jt]],["mobilebert",["MobileBertForSequenceClassification",tt]],["squeezebert",["SqueezeBertForSequenceClassification",pt]]]),Zi=new Map([["bert",["BertForTokenClassification",te]],["roformer",["RoFormerForTokenClassification",ce]],["electra",["ElectraForTokenClassification",be]],["esm",["EsmForTokenClassification",Ye]],["convbert",["ConvBertForTokenClassification",he]],["camembert",["CamembertForTokenClassification",ve]],["deberta",["DebertaForTokenClassification",Ie]],["deberta-v2",["DebertaV2ForTokenClassification",Be]],["mpnet",["MPNetForTokenClassification",it]],["distilbert",["DistilBertForTokenClassification",$e]],["roberta",["RobertaForTokenClassification",Qt]],["xlm",["XLMForTokenClassification",Zt]],["xlm-roberta",["XLMRobertaForTokenClassification",ns]]]),el=new Map([["t5",["T5ForConditionalGeneration",bt]],["longt5",["LongT5ForConditionalGeneration",kt]],["mt5",["MT5ForConditionalGeneration",vt]],["bart",["BartForConditionalGeneration",At]],["mbart",["MBartForConditionalGeneration",zt]],["marian",["MarianMTModel",ta]],["m2m_100",["M2M100ForConditionalGeneration",oa]],["blenderbot",["BlenderbotForConditionalGeneration",Bt]],["blenderbot-small",["BlenderbotSmallForConditionalGeneration",Rt]]]),tl=new Map([["bloom",["BloomForCausalLM",Jr]],["gpt2",["GPT2LMHeadModel",Ws]],["jais",["JAISLMHeadModel",Xs]],["gptj",["GPTJForCausalLM",rr]],["gpt_bigcode",["GPTBigCodeForCausalLM",ar]],["gpt_neo",["GPTNeoForCausalLM",Ys]],["gpt_neox",["GPTNeoXForCausalLM",er]],["codegen",["CodeGenForCausalLM",cr]],["llama",["LlamaForCausalLM",pr]],["olmo",["OlmoForCausalLM",Mr]],["olmo2",["Olmo2ForCausalLM",br]],["mobilellm",["MobileLLMForCausalLM",hr]],["granite",["GraniteForCausalLM",kr]],["cohere",["CohereForCausalLM",vr]],["gemma",["GemmaForCausalLM",Ar]],["gemma2",["Gemma2ForCausalLM",Ir]],["openelm",["OpenELMForCausalLM",Nr]],["qwen2",["Qwen2ForCausalLM",Br]],["phi",["PhiForCausalLM",$r]],["phi3",["Phi3ForCausalLM",Qr]],["mpt",["MptForCausalLM",Zr]],["opt",["OPTForCausalLM",so]],["mbart",["MBartForCausalLM",Nt]],["mistral",["MistralForCausalLM",Qa]],["starcoder2",["Starcoder2ForCausalLM",Ja]],["falcon",["FalconForCausalLM",Za]],["trocr",["TrOCRForCausalLM",$a]],["stablelm",["StableLmForCausalLM",pi]]]),sl=new Map([["multi_modality",["MultiModalityCausalLM",ji]]]),rl=new Map([["bert",["BertForMaskedLM",Z]],["roformer",["RoFormerForMaskedLM",ie]],["electra",["ElectraForMaskedLM",we]],["esm",["EsmForMaskedLM",He]],["convbert",["ConvBertForMaskedLM",me]],["camembert",["CamembertForMaskedLM",ye]],["deberta",["DebertaForMaskedLM",Ee]],["deberta-v2",["DebertaV2ForMaskedLM",Ve]],["mpnet",["MPNetForMaskedLM",nt]],["albert",["AlbertForMaskedLM",Mt]],["distilbert",["DistilBertForMaskedLM",Ue]],["roberta",["RobertaForMaskedLM",Wt]],["xlm",["XLMWithLMHeadModel",Yt]],["xlm-roberta",["XLMRobertaForMaskedLM",rs]],["mobilebert",["MobileBertForMaskedLM",et]],["squeezebert",["SqueezeBertForMaskedLM",ut]]]),ol=new Map([["bert",["BertForQuestionAnswering",se]],["roformer",["RoFormerForQuestionAnswering",de]],["electra",["ElectraForQuestionAnswering",Pe]],["convbert",["ConvBertForQuestionAnswering",fe]],["camembert",["CamembertForQuestionAnswering",Ce]],["deberta",["DebertaForQuestionAnswering",ze]],["deberta-v2",["DebertaV2ForQuestionAnswering",De]],["mpnet",["MPNetForQuestionAnswering",lt]],["albert",["AlbertForQuestionAnswering",gt]],["distilbert",["DistilBertForQuestionAnswering",We]],["roberta",["RobertaForQuestionAnswering",Xt]],["xlm",["XLMForQuestionAnswering",es]],["xlm-roberta",["XLMRobertaForQuestionAnswering",as]],["mobilebert",["MobileBertForQuestionAnswering",st]],["squeezebert",["SqueezeBertForQuestionAnswering",mt]]]),nl=new Map([["vision-encoder-decoder",["VisionEncoderDecoderModel",ms]],["idefics3",["Idefics3ForConditionalGeneration",xs]]]),al=new Map([["llava",["LlavaForConditionalGeneration",hs]],["llava_onevision",["LlavaOnevisionForConditionalGeneration",fs]],["moondream1",["Moondream1ForConditionalGeneration",gs]],["florence2",["Florence2ForConditionalGeneration",ws]],["qwen2-vl",["Qwen2VLForConditionalGeneration",Gr]],["idefics3",["Idefics3ForConditionalGeneration",xs]],["paligemma",["PaliGemmaForConditionalGeneration",bs]]]),il=new Map([["vision-encoder-decoder",["VisionEncoderDecoderModel",ms]]]),ll=new Map([["vit",["ViTForImageClassification",no]],["ijepa",["IJepaForImageClassification",lo]],["pvt",["PvtForImageClassification",_o]],["vit_msn",["ViTMSNForImageClassification",wo]],["fastvit",["FastViTForImageClassification",ko]],["mobilevit",["MobileViTForImageClassification",So]],["mobilevitv2",["MobileViTV2ForImageClassification",Lo]],["beit",["BeitForImageClassification",Go]],["deit",["DeiTForImageClassification",on]],["hiera",["HieraForImageClassification",ln]],["convnext",["ConvNextForImageClassification",Bn]],["convnextv2",["ConvNextV2ForImageClassification",Rn]],["dinov2",["Dinov2ForImageClassification",Wn]],["resnet",["ResNetForImageClassification",un]],["swin",["SwinForImageClassification",_n]],["segformer",["SegformerForImageClassification",li]],["efficientnet",["EfficientNetForImageClassification",hi]],["mobilenet_v1",["MobileNetV1ForImageClassification",Pi]],["mobilenet_v2",["MobileNetV2ForImageClassification",yi]],["mobilenet_v3",["MobileNetV3ForImageClassification",Ci]],["mobilenet_v4",["MobileNetV4ForImageClassification",Ei]]]),cl=new Map([["detr",["DetrForObjectDetection",$o]],["rt_detr",["RTDetrForObjectDetection",Jo]],["table-transformer",["TableTransformerForObjectDetection",en]],["yolos",["YolosForObjectDetection",Xn]]]),dl=new Map([["owlvit",["OwlViTForObjectDetection",jo]],["owlv2",["Owlv2ForObjectDetection",Oo]]]),ul=new Map([["detr",["DetrForSegmentation",Wo]],["clipseg",["CLIPSegForImageSegmentation",Rs]]]),pl=new Map([["segformer",["SegformerForSemanticSegmentation",ci]],["sapiens",["SapiensForSemanticSegmentation",kn]]]),ml=new Map([["detr",["DetrForSegmentation",Wo]],["maskformer",["MaskFormerForInstanceSegmentation",En]]]),_l=new Map([["sam",["SamModel",Yn]]]),hl=new Map([["wav2vec2",["Wav2Vec2ForCTC",ia]],["wav2vec2-bert",["Wav2Vec2BertForCTC",Fa]],["unispeech",["UniSpeechForCTC",ga]],["unispeech-sat",["UniSpeechSatForCTC",ba]],["wavlm",["WavLMForCTC",za]],["hubert",["HubertForCTC",Aa]]]),fl=new Map([["wav2vec2",["Wav2Vec2ForSequenceClassification",la]],["wav2vec2-bert",["Wav2Vec2BertForSequenceClassification",va]],["unispeech",["UniSpeechForSequenceClassification",Ma]],["unispeech-sat",["UniSpeechSatForSequenceClassification",Pa]],["wavlm",["WavLMForSequenceClassification",ja]],["hubert",["HubertForSequenceClassification",Ea]],["audio-spectrogram-transformer",["ASTForAudioClassification",cs]]]),gl=new Map([["wavlm",["WavLMForXVector",Na]]]),Ml=new Map([["unispeech-sat",["UniSpeechSatForAudioFrameClassification",xa]],["wavlm",["WavLMForAudioFrameClassification",Va]],["wav2vec2",["Wav2Vec2ForAudioFrameClassification",ca]],["pyannote",["PyAnnoteForAudioFrameClassification",pa]]]),wl=new Map([["vitmatte",["VitMatteForImageMatting",Fo]]]),Tl=new Map([["patchtst",["PatchTSTForPrediction",Gi]],["patchtsmixer",["PatchTSMixerForPrediction",$i]]]),bl=new Map([["swin2sr",["Swin2SRForImageSuperResolution",gn]]]),Pl=new Map([["dpt",["DPTForDepthEstimation",Tn]],["depth_anything",["DepthAnythingForDepthEstimation",Pn]],["glpn",["GLPNForDepthEstimation",zn]],["sapiens",["SapiensForDepthEstimation",yn]],["depth_pro",["DepthProForDepthEstimation",Cn]]]),xl=new Map([["sapiens",["SapiensForNormalEstimation",Fn]]]),kl=new Map([["vitpose",["VitPoseForPoseEstimation",uo]]]),yl=new Map([["clip",["CLIPVisionModelWithProjection",Ss]],["siglip",["SiglipVisionModel",Is]],["jina_clip",["JinaCLIPVisionModel",Bs]]]),Fl=[[Ui,T],[Qi,b],[Xi,k],[Ki,T],[Zi,T],[el,P],[Hi,P],[tl,k],[sl,C],[rl,T],[ol,T],[nl,x],[al,F],[ll,T],[ul,T],[ml,T],[pl,T],[wl,T],[Tl,T],[bl,T],[Pl,T],[xl,T],[kl,T],[cl,T],[dl,T],[_l,y],[hl,T],[fl,T],[Ji,P],[Yi,T],[gl,T],[Ml,T],[yl,T]];for(const[e,t]of Fl)for(const[s,r]of e.values())S.set(s,t),E.set(r,s),A.set(s,r);const vl=[["MusicgenForConditionalGeneration",wi,v],["CLIPTextModelWithProjection",vs,T],["SiglipTextModel",Ls,T],["JinaCLIPTextModel",Os,T],["ClapTextModelWithProjection",si,T],["ClapAudioModelWithProjection",ri,T]];for(const[e,t,s]of vl)S.set(e,s),E.set(t,e),A.set(e,t);class Cl extends Wi{static MODEL_CLASS_MAPPINGS=Fl.map((e=>e[0]));static BASE_IF_FAIL=!0}class Sl extends Wi{static MODEL_CLASS_MAPPINGS=[Ki]}class Al extends Wi{static MODEL_CLASS_MAPPINGS=[Zi]}class El extends Wi{static MODEL_CLASS_MAPPINGS=[el]}class Ll extends Wi{static MODEL_CLASS_MAPPINGS=[Hi]}class Il extends Wi{static MODEL_CLASS_MAPPINGS=[Ji]}class zl extends Wi{static MODEL_CLASS_MAPPINGS=[Yi]}class jl extends Wi{static MODEL_CLASS_MAPPINGS=[tl]}class Nl extends Wi{static MODEL_CLASS_MAPPINGS=[rl]}class Vl extends Wi{static MODEL_CLASS_MAPPINGS=[ol]}class Ol extends Wi{static MODEL_CLASS_MAPPINGS=[nl]}class Bl extends Wi{static MODEL_CLASS_MAPPINGS=[ll]}class Dl extends Wi{static MODEL_CLASS_MAPPINGS=[ul]}class Gl extends Wi{static MODEL_CLASS_MAPPINGS=[pl]}class Rl extends Wi{static MODEL_CLASS_MAPPINGS=[ml]}class ql extends Wi{static MODEL_CLASS_MAPPINGS=[cl]}class $l extends Wi{static MODEL_CLASS_MAPPINGS=[dl]}class Wl extends Wi{static MODEL_CLASS_MAPPINGS=[_l]}class Ul extends Wi{static MODEL_CLASS_MAPPINGS=[hl]}class Ql extends Wi{static MODEL_CLASS_MAPPINGS=[fl]}class Xl extends Wi{static MODEL_CLASS_MAPPINGS=[gl]}class Hl extends Wi{static MODEL_CLASS_MAPPINGS=[Ml]}class Jl extends Wi{static MODEL_CLASS_MAPPINGS=[il]}class Yl extends Wi{static MODEL_CLASS_MAPPINGS=[wl]}class Kl extends Wi{static MODEL_CLASS_MAPPINGS=[bl]}class Zl extends Wi{static MODEL_CLASS_MAPPINGS=[Pl]}class ec extends Wi{static MODEL_CLASS_MAPPINGS=[xl]}class tc extends Wi{static MODEL_CLASS_MAPPINGS=[kl]}class sc extends Wi{static MODEL_CLASS_MAPPINGS=[yl]}class rc extends H{constructor({logits:e,past_key_values:t,encoder_outputs:s,decoder_attentions:r=null,cross_attentions:o=null}){super(),this.logits=e,this.past_key_values=t,this.encoder_outputs=s,this.decoder_attentions=r,this.cross_attentions=o}}class oc extends H{constructor({logits:e}){super(),this.logits=e}}class nc extends H{constructor({logits:e,embeddings:t}){super(),this.logits=e,this.embeddings=t}}class ac extends H{constructor({logits:e}){super(),this.logits=e}}class ic extends H{constructor({logits:e}){super(),this.logits=e}}class lc extends H{constructor({start_logits:e,end_logits:t}){super(),this.start_logits=e,this.end_logits=t}}class cc extends H{constructor({logits:e}){super(),this.logits=e}}class dc extends H{constructor({logits:e,past_key_values:t}){super(),this.logits=e,this.past_key_values=t}}class uc extends H{constructor({alphas:e}){super(),this.alphas=e}}class pc extends H{constructor({waveform:e,spectrogram:t}){super(),this.waveform=e,this.spectrogram=t}}},"./src/models/audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.js":(e,t,s)=>{s.r(t),s.d(t,{ASTFeatureExtractor:()=>n});var r=s("./src/base/feature_extraction_utils.js"),o=(s("./src/utils/tensor.js"),s("./src/utils/audio.js"));class n extends r.FeatureExtractor{constructor(e){super(e);const t=this.config.sampling_rate,s=(0,o.mel_filter_bank)(256,this.config.num_mel_bins,20,Math.floor(t/2),t,null,"kaldi",!0);for(let e=0;e<s.length;++e)s[e].push(0);this.mel_filters=s,this.window=(0,o.window_function)(400,"hann",{periodic:!1}),this.mean=this.config.mean,this.std=this.config.std}async _extract_fbank_features(e,t){return(0,o.spectrogram)(e,this.window,400,160,{fft_length:512,power:2,center:!1,preemphasis:.97,mel_filters:this.mel_filters,log_mel:"log",mel_floor:1.192092955078125e-7,remove_dc_offset:!0,max_num_frames:t,transpose:!0})}async _call(e){(0,r.validate_audio_inputs)(e,"ASTFeatureExtractor");const t=await this._extract_fbank_features(e,this.config.max_length);if(this.config.do_normalize){const e=2*this.std,s=t.data;for(let t=0;t<s.length;++t)s[t]=(s[t]-this.mean)/e}return{input_values:t.unsqueeze_(0)}}}},"./src/models/auto/feature_extraction_auto.js":(e,t,s)=>{s.r(t),s.d(t,{AutoFeatureExtractor:()=>a});var r=s("./src/utils/constants.js"),o=s("./src/utils/hub.js"),n=(s("./src/base/feature_extraction_utils.js"),s("./src/models/feature_extractors.js"));class a{static async from_pretrained(e,t={}){const s=await(0,o.getModelJSON)(e,r.FEATURE_EXTRACTOR_NAME,!0,t),a=s.feature_extractor_type,i=n[a];if(!i)throw new Error(`Unknown feature_extractor_type: '${a}'. Please report this at ${r.GITHUB_ISSUE_URL}.`);return new i(s)}}},"./src/models/auto/image_processing_auto.js":(e,t,s)=>{s.r(t),s.d(t,{AutoImageProcessor:()=>i});var r=s("./src/utils/constants.js"),o=s("./src/utils/hub.js"),n=s("./src/base/image_processors_utils.js"),a=s("./src/models/image_processors.js");class i{static async from_pretrained(e,t={}){const s=await(0,o.getModelJSON)(e,r.IMAGE_PROCESSOR_NAME,!0,t),i=s.image_processor_type??s.feature_extractor_type;let l=a[i];return l||(void 0!==i&&console.warn(`Image processor type '${i}' not found, assuming base ImageProcessor. Please report this at ${r.GITHUB_ISSUE_URL}.`),l=n.ImageProcessor),new l(s)}}},"./src/models/auto/processing_auto.js":(e,t,s)=>{s.r(t),s.d(t,{AutoProcessor:()=>c});var r=s("./src/utils/constants.js"),o=s("./src/utils/hub.js"),n=s("./src/base/processing_utils.js"),a=s("./src/models/processors.js"),i=s("./src/models/image_processors.js"),l=s("./src/models/feature_extractors.js");class c{static async from_pretrained(e,t={}){const s=await(0,o.getModelJSON)(e,r.IMAGE_PROCESSOR_NAME,!0,t),{image_processor_type:c,feature_extractor_type:d,processor_class:u}=s;if(u&&a[u])return a[u].from_pretrained(e,t);if(!c&&!d)throw new Error("No `image_processor_type` or `feature_extractor_type` found in the config.");const p={};if(c){const e=i[c];if(!e)throw new Error(`Unknown image_processor_type: '${c}'.`);p.image_processor=new e(s)}if(d){const e=i[d];if(e)p.image_processor=new e(s);else{const e=l[d];if(!e)throw new Error(`Unknown feature_extractor_type: '${d}'.`);p.feature_extractor=new e(s)}}return new n.Processor({},p)}}},"./src/models/beit/image_processing_beit.js":(e,t,s)=>{s.r(t),s.d(t,{BeitFeatureExtractor:()=>o});var r=s("./src/base/image_processors_utils.js");class o extends r.ImageProcessor{}},"./src/models/bit/image_processing_bit.js":(e,t,s)=>{s.r(t),s.d(t,{BitImageProcessor:()=>o});var r=s("./src/base/image_processors_utils.js");class o extends r.ImageProcessor{}},"./src/models/chinese_clip/image_processing_chinese_clip.js":(e,t,s)=>{s.r(t),s.d(t,{ChineseCLIPFeatureExtractor:()=>o});var r=s("./src/base/image_processors_utils.js");class o extends r.ImageProcessor{}},"./src/models/clap/feature_extraction_clap.js":(e,t,s)=>{s.r(t),s.d(t,{ClapFeatureExtractor:()=>n});var r=s("./src/base/feature_extraction_utils.js"),o=(s("./src/utils/tensor.js"),s("./src/utils/audio.js"));class n extends r.FeatureExtractor{constructor(e){super(e),this.mel_filters=(0,o.mel_filter_bank)(this.config.nb_frequency_bins,this.config.feature_size,this.config.frequency_min,this.config.frequency_max,this.config.sampling_rate,null,"htk"),this.mel_filters_slaney=(0,o.mel_filter_bank)(this.config.nb_frequency_bins,this.config.feature_size,this.config.frequency_min,this.config.frequency_max,this.config.sampling_rate,"slaney","slaney"),this.window=(0,o.window_function)(this.config.fft_window_size,"hann")}async _get_input_mel(e,t,s,r){let o,n=!1;const a=e.length-t;if(a>0){if("rand_trunc"!==s)throw new Error(`Truncation strategy "${s}" not implemented`);{n=!0;const s=Math.floor(Math.random()*(a+1));e=e.subarray(s,s+t),o=await this._extract_fbank_features(e,this.mel_filters_slaney,this.config.nb_max_samples)}}else{if(a<0){let s=new Float64Array(t);if(s.set(e),"repeat"===r)for(let r=e.length;r<t;r+=e.length)s.set(e.subarray(0,Math.min(e.length,t-r)),r);else if("repeatpad"===r)for(let t=e.length;t<-a;t+=e.length)s.set(e,t);e=s}if("fusion"===s)throw new Error(`Truncation strategy "${s}" not implemented`);o=await this._extract_fbank_features(e,this.mel_filters_slaney,this.config.nb_max_samples)}return o.unsqueeze_(0)}async _extract_fbank_features(e,t,s=null){return(0,o.spectrogram)(e,this.window,this.config.fft_window_size,this.config.hop_length,{power:2,mel_filters:t,log_mel:"dB",max_num_frames:s,do_pad:!1,transpose:!0})}async _call(e,{max_length:t=null}={}){(0,r.validate_audio_inputs)(e,"ClapFeatureExtractor");return{input_features:(await this._get_input_mel(e,t??this.config.nb_max_samples,this.config.truncation,this.config.padding)).unsqueeze_(0)}}}},"./src/models/clip/image_processing_clip.js":(e,t,s)=>{s.r(t),s.d(t,{CLIPFeatureExtractor:()=>n,CLIPImageProcessor:()=>o});var r=s("./src/base/image_processors_utils.js");class o extends r.ImageProcessor{}class n extends o{}},"./src/models/convnext/image_processing_convnext.js":(e,t,s)=>{s.r(t),s.d(t,{ConvNextFeatureExtractor:()=>n,ConvNextImageProcessor:()=>o});var r=s("./src/base/image_processors_utils.js");class o extends r.ImageProcessor{constructor(e){super(e),this.crop_pct=this.config.crop_pct??.875}async resize(e){const t=this.size?.shortest_edge;if(void 0===t)throw new Error("Size dictionary must contain 'shortest_edge' key.");if(t<384){const s=Math.floor(t/this.crop_pct),[r,o]=this.get_resize_output_image_size(e,{shortest_edge:s});e=await e.resize(r,o,{resample:this.resample}),e=await e.center_crop(t,t)}else e=await e.resize(t,t,{resample:this.resample});return e}}class n extends o{}},"./src/models/deit/image_processing_deit.js":(e,t,s)=>{s.r(t),s.d(t,{DeiTFeatureExtractor:()=>n,DeiTImageProcessor:()=>o});var r=s("./src/base/image_processors_utils.js");class o extends r.ImageProcessor{}class n extends o{}},"./src/models/detr/image_processing_detr.js":(e,t,s)=>{s.r(t),s.d(t,{DetrFeatureExtractor:()=>a,DetrImageProcessor:()=>n});var r=s("./src/base/image_processors_utils.js"),o=s("./src/utils/tensor.js");class n extends r.ImageProcessor{async _call(e){const t=await super._call(e),s=[t.pixel_values.dims[0],64,64],r=(0,o.full)(s,1n);return{...t,pixel_mask:r}}post_process_object_detection(...e){return(0,r.post_process_object_detection)(...e)}post_process_panoptic_segmentation(...e){return(0,r.post_process_panoptic_segmentation)(...e)}post_process_instance_segmentation(...e){return(0,r.post_process_instance_segmentation)(...e)}}class a extends n{}},"./src/models/donut/image_processing_donut.js":(e,t,s)=>{s.r(t),s.d(t,{DonutFeatureExtractor:()=>n,DonutImageProcessor:()=>o});var r=s("./src/base/image_processors_utils.js");class o extends r.ImageProcessor{pad_image(e,t,s,r={}){const[o,n,a]=t;let i=this.image_mean;Array.isArray(this.image_mean)||(i=new Array(a).fill(i));let l=this.image_std;Array.isArray(l)||(l=new Array(a).fill(i));const c=i.map(((e,t)=>-e/l[t]));return super.pad_image(e,t,s,{center:!0,constant_values:c,...r})}}class n extends o{}},"./src/models/dpt/image_processing_dpt.js":(e,t,s)=>{s.r(t),s.d(t,{DPTFeatureExtractor:()=>n,DPTImageProcessor:()=>o});var r=s("./src/base/image_processors_utils.js");class o extends r.ImageProcessor{}class n extends o{}},"./src/models/efficientnet/image_processing_efficientnet.js":(e,t,s)=>{s.r(t),s.d(t,{EfficientNetImageProcessor:()=>o});var r=s("./src/base/image_processors_utils.js");class o extends r.ImageProcessor{constructor(e){super(e),this.include_top=this.config.include_top??!0,this.include_top&&(this.image_std=this.image_std.map((e=>e*e)))}}},"./src/models/feature_extractors.js":(e,t,s)=>{s.r(t),s.d(t,{ASTFeatureExtractor:()=>r.ASTFeatureExtractor,ClapFeatureExtractor:()=>o.ClapFeatureExtractor,ImageFeatureExtractor:()=>u.ImageProcessor,PyAnnoteFeatureExtractor:()=>n.PyAnnoteFeatureExtractor,SeamlessM4TFeatureExtractor:()=>a.SeamlessM4TFeatureExtractor,SpeechT5FeatureExtractor:()=>i.SpeechT5FeatureExtractor,Wav2Vec2FeatureExtractor:()=>l.Wav2Vec2FeatureExtractor,WeSpeakerFeatureExtractor:()=>c.WeSpeakerFeatureExtractor,WhisperFeatureExtractor:()=>d.WhisperFeatureExtractor});var r=s("./src/models/audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.js"),o=s("./src/models/clap/feature_extraction_clap.js"),n=s("./src/models/pyannote/feature_extraction_pyannote.js"),a=s("./src/models/seamless_m4t/feature_extraction_seamless_m4t.js"),i=s("./src/models/speecht5/feature_extraction_speecht5.js"),l=s("./src/models/wav2vec2/feature_extraction_wav2vec2.js"),c=s("./src/models/wespeaker/feature_extraction_wespeaker.js"),d=s("./src/models/whisper/feature_extraction_whisper.js"),u=s("./src/base/image_processors_utils.js")},"./src/models/florence2/processing_florence2.js":(e,t,s)=>{s.r(t),s.d(t,{Florence2Processor:()=>a});var r=s("./src/base/processing_utils.js"),o=s("./src/models/auto/image_processing_auto.js"),n=s("./src/tokenizers.js");class a extends r.Processor{static tokenizer_class=n.AutoTokenizer;static image_processor_class=o.AutoImageProcessor;constructor(e,t){super(e,t);const{tasks_answer_post_processing_type:s,task_prompts_without_inputs:r,task_prompts_with_input:o}=this.image_processor.config;this.tasks_answer_post_processing_type=new Map(Object.entries(s??{})),this.task_prompts_without_inputs=new Map(Object.entries(r??{})),this.task_prompts_with_input=new Map(Object.entries(o??{})),this.regexes={quad_boxes:/(.+?)<loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)>/gm,bboxes:/([^<]+)?<loc_(\d+)><loc_(\d+)><loc_(\d+)><loc_(\d+)>/gm},this.size_per_bin=1e3}construct_prompts(e){"string"==typeof e&&(e=[e]);const t=[];for(const s of e)if(this.task_prompts_without_inputs.has(s))t.push(this.task_prompts_without_inputs.get(s));else{for(const[e,r]of this.task_prompts_with_input)if(s.includes(e)){t.push(r.replaceAll("{input}",s).replaceAll(e,""));break}t.length!==e.length&&t.push(s)}return t}post_process_generation(e,t,s){const r=this.tasks_answer_post_processing_type.get(t)??"pure_text";let o;switch(e=e.replaceAll("<s>","").replaceAll("</s>",""),r){case"pure_text":o=e;break;case"description_with_bboxes":case"bboxes":case"phrase_grounding":case"ocr":const n="ocr"===r?"quad_boxes":"bboxes",a=e.matchAll(this.regexes[n]),i=[],l=[];for(const[e,t,...r]of a)i.push(t?t.trim():i.at(-1)??""),l.push(r.map(((e,t)=>(Number(e)+.5)/this.size_per_bin*s[t%2])));o={labels:i,[n]:l};break;default:throw new Error(`Task "${t}" (of type "${r}") not yet implemented.`)}return{[t]:o}}async _call(e,t=null,s={}){if(!e&&!t)throw new Error("Either text or images must be provided");return{...await this.image_processor(e,s),...t?this.tokenizer(t,s):{}}}}},"./src/models/glpn/image_processing_glpn.js":(e,t,s)=>{s.r(t),s.d(t,{GLPNFeatureExtractor:()=>o});var r=s("./src/base/image_processors_utils.js");class o extends r.ImageProcessor{}},"./src/models/idefics3/image_processing_idefics3.js":(e,t,s)=>{s.r(t),s.d(t,{Idefics3ImageProcessor:()=>n});var r=s("./src/base/image_processors_utils.js"),o=s("./src/utils/tensor.js");class n extends r.ImageProcessor{constructor(e){super(e),this.do_image_splitting=e.do_image_splitting??!0,this.max_image_size=e.max_image_size}get_resize_for_vision_encoder(e,t){let[s,r]=e.dims.slice(-2);const o=r/s;return r>=s?(r=Math.ceil(r/t)*t,s=Math.floor(r/o),s=Math.ceil(s/t)*t):(s=Math.ceil(s/t)*t,r=Math.floor(s*o),r=Math.ceil(r/t)*t),{height:s,width:r}}async _call(e,{do_image_splitting:t=null,return_row_col_info:s=!1}={}){let r;if(Array.isArray(e)){if(0===e.length||!e[0])throw new Error("No images provided.");r=Array.isArray(e[0])?e:[e]}else r=[[e]];let n=[],a=[],i=[];const l=[],c=[];for(const e of r){let s=await Promise.all(e.map((e=>this.preprocess(e))));l.push(...s.map((e=>e.original_size))),c.push(...s.map((e=>e.reshaped_input_size))),s.forEach((e=>e.pixel_values.unsqueeze_(0)));const{longest_edge:r}=this.max_image_size;let d;if(t??this.do_image_splitting){let e=new Array(s.length),t=new Array(s.length);d=await Promise.all(s.map((async(s,n)=>{const a=this.get_resize_for_vision_encoder(s.pixel_values,r),i=await(0,o.interpolate_4d)(s.pixel_values,{size:[a.height,a.width]}),{frames:l,num_splits_h:c,num_splits_w:d}=await this.split_image(i,this.max_image_size);return e[n]=c,t[n]=d,(0,o.cat)(l,0)}))),a.push(e),i.push(t)}else{const e=[r,r];d=await Promise.all(s.map((t=>(0,o.interpolate_4d)(t.pixel_values,{size:e})))),a.push(new Array(s.length).fill(0)),i.push(new Array(s.length).fill(0))}n.push((0,o.cat)(d,0))}const d=n.length,[u,p,m,_]=n[0].dims;let h,f;if(1===d)h=n[0].unsqueeze_(0),f=(0,o.full)([d,u,m,_],!0);else{const e=Math.max(...n.map((e=>e.dims.at(0))));f=(0,o.full)([d,e,m,_],!0);const t=f.data,s=e*m*_;for(let r=0;r<d;++r){const a=n[r].dims[0];if(a<e){n[r]=(0,o.cat)([n[r],(0,o.full)([e-a,p,m,_],0)],0);const i=r*s+a*m*_,l=(r+1)*s;t.fill(!1,i,l)}}h=(0,o.stack)(n,0)}return{pixel_values:h,pixel_attention_mask:f,original_sizes:l,reshaped_input_sizes:c,...s?{rows:a,cols:i}:{}}}async split_image(e,{longest_edge:t}){const s=t,r=t,n=[],[a,i]=e.dims.slice(-2);let l=0,c=0;if(a>s||i>r){l=Math.ceil(a/s),c=Math.ceil(i/r);const t=Math.ceil(a/l),d=Math.ceil(i/c);for(let s=0;s<l;s++)for(let r=0;r<c;r++){const o=r*d,l=s*t,c=Math.min(o+d,i),u=Math.min(l+t,a);n.push(e.slice(null,null,[l,u],[o,c]))}const u=s,p=r;a===u&&i===p||(e=await(0,o.interpolate_4d)(e,{size:[u,p]}))}return n.push(e),{frames:n,num_splits_h:l,num_splits_w:c}}}},"./src/models/idefics3/processing_idefics3.js":(e,t,s)=>{s.r(t),s.d(t,{Idefics3Processor:()=>l});var r=s("./src/base/processing_utils.js"),o=s("./src/models/auto/image_processing_auto.js"),n=s("./src/tokenizers.js"),a=(s("./src/utils/image.js"),s("./src/utils/core.js"));function i(e,t,s,r,o,n){return 0===e&&0===t?function(e,t,s,r){return`${t}${r}`+s.repeat(e)+`${t}`}(s,r,o,n):function(e,t,s,r,o,n){let a="";for(let n=0;n<t;++n){for(let t=0;t<s;++t)a+=r+`<row_${n+1}_col_${t+1}>`+o.repeat(e);a+="\n"}return a+=`\n${r}${n}`+o.repeat(e)+`${r}`,a}(s,e,t,r,o,n)}class l extends r.Processor{static image_processor_class=o.AutoImageProcessor;static tokenizer_class=n.AutoTokenizer;static uses_processor_config=!0;fake_image_token="<fake_token_around_image>";image_token="<image>";global_img_token="<global-img>";async _call(e,t=null,s={}){let r;s.return_row_col_info??=!0,t&&(r=await this.image_processor(t,s)),Array.isArray(e)||(e=[e]);const o=r.rows??[new Array(e.length).fill(0)],n=r.cols??[new Array(e.length).fill(0)],l=this.config.image_seq_len,c=[],d=[];for(let t=0;t<e.length;++t){const s=e[t],r=o[t],u=n[t];c.push((0,a.count)(s,this.image_token));const p=r.map(((e,t)=>i(e,u[t],l,this.fake_image_token,this.image_token,this.global_img_token))),m=s.split(this.image_token);if(0===m.length)throw new Error("The image token should be present in the text.");let _=m[0];for(let e=0;e<p.length;++e)_+=p[e]+m[e+1];d.push(_)}return{...this.tokenizer(d),...r}}}},"./src/models/image_processors.js":(e,t,s)=>{s.r(t),s.d(t,{BeitFeatureExtractor:()=>r.BeitFeatureExtractor,BitImageProcessor:()=>o.BitImageProcessor,CLIPFeatureExtractor:()=>a.CLIPFeatureExtractor,CLIPImageProcessor:()=>a.CLIPImageProcessor,ChineseCLIPFeatureExtractor:()=>n.ChineseCLIPFeatureExtractor,ConvNextFeatureExtractor:()=>i.ConvNextFeatureExtractor,ConvNextImageProcessor:()=>i.ConvNextImageProcessor,DPTFeatureExtractor:()=>u.DPTFeatureExtractor,DPTImageProcessor:()=>u.DPTImageProcessor,DeiTFeatureExtractor:()=>l.DeiTFeatureExtractor,DeiTImageProcessor:()=>l.DeiTImageProcessor,DetrFeatureExtractor:()=>c.DetrFeatureExtractor,DetrImageProcessor:()=>c.DetrImageProcessor,DonutFeatureExtractor:()=>d.DonutFeatureExtractor,DonutImageProcessor:()=>d.DonutImageProcessor,EfficientNetImageProcessor:()=>p.EfficientNetImageProcessor,GLPNFeatureExtractor:()=>m.GLPNFeatureExtractor,Idefics3ImageProcessor:()=>_.Idefics3ImageProcessor,JinaCLIPImageProcessor:()=>f.JinaCLIPImageProcessor,LlavaOnevisionImageProcessor:()=>g.LlavaOnevisionImageProcessor,Mask2FormerImageProcessor:()=>M.Mask2FormerImageProcessor,MaskFormerFeatureExtractor:()=>w.MaskFormerFeatureExtractor,MaskFormerImageProcessor:()=>w.MaskFormerImageProcessor,MobileNetV1FeatureExtractor:()=>T.MobileNetV1FeatureExtractor,MobileNetV1ImageProcessor:()=>T.MobileNetV1ImageProcessor,MobileNetV2FeatureExtractor:()=>b.MobileNetV2FeatureExtractor,MobileNetV2ImageProcessor:()=>b.MobileNetV2ImageProcessor,MobileNetV3FeatureExtractor:()=>P.MobileNetV3FeatureExtractor,MobileNetV3ImageProcessor:()=>P.MobileNetV3ImageProcessor,MobileNetV4FeatureExtractor:()=>x.MobileNetV4FeatureExtractor,MobileNetV4ImageProcessor:()=>x.MobileNetV4ImageProcessor,MobileViTFeatureExtractor:()=>k.MobileViTFeatureExtractor,MobileViTImageProcessor:()=>k.MobileViTImageProcessor,NougatImageProcessor:()=>y.NougatImageProcessor,OwlViTFeatureExtractor:()=>v.OwlViTFeatureExtractor,OwlViTImageProcessor:()=>v.OwlViTImageProcessor,Owlv2ImageProcessor:()=>F.Owlv2ImageProcessor,PvtImageProcessor:()=>C.PvtImageProcessor,Qwen2VLImageProcessor:()=>S.Qwen2VLImageProcessor,RTDetrImageProcessor:()=>A.RTDetrImageProcessor,SamImageProcessor:()=>E.SamImageProcessor,SegformerFeatureExtractor:()=>L.SegformerFeatureExtractor,SegformerImageProcessor:()=>L.SegformerImageProcessor,SiglipImageProcessor:()=>I.SiglipImageProcessor,Swin2SRImageProcessor:()=>z.Swin2SRImageProcessor,VLMImageProcessor:()=>h.VLMImageProcessor,ViTFeatureExtractor:()=>j.ViTFeatureExtractor,ViTImageProcessor:()=>j.ViTImageProcessor,VitMatteImageProcessor:()=>N.VitMatteImageProcessor,VitPoseImageProcessor:()=>V.VitPoseImageProcessor,YolosFeatureExtractor:()=>O.YolosFeatureExtractor,YolosImageProcessor:()=>O.YolosImageProcessor});var r=s("./src/models/beit/image_processing_beit.js"),o=s("./src/models/bit/image_processing_bit.js"),n=s("./src/models/chinese_clip/image_processing_chinese_clip.js"),a=s("./src/models/clip/image_processing_clip.js"),i=s("./src/models/convnext/image_processing_convnext.js"),l=s("./src/models/deit/image_processing_deit.js"),c=s("./src/models/detr/image_processing_detr.js"),d=s("./src/models/donut/image_processing_donut.js"),u=s("./src/models/dpt/image_processing_dpt.js"),p=s("./src/models/efficientnet/image_processing_efficientnet.js"),m=s("./src/models/glpn/image_processing_glpn.js"),_=s("./src/models/idefics3/image_processing_idefics3.js"),h=s("./src/models/janus/image_processing_janus.js"),f=s("./src/models/jina_clip/image_processing_jina_clip.js"),g=s("./src/models/llava_onevision/image_processing_llava_onevision.js"),M=s("./src/models/mask2former/image_processing_mask2former.js"),w=s("./src/models/maskformer/image_processing_maskformer.js"),T=s("./src/models/mobilenet_v1/image_processing_mobilenet_v1.js"),b=s("./src/models/mobilenet_v2/image_processing_mobilenet_v2.js"),P=s("./src/models/mobilenet_v3/image_processing_mobilenet_v3.js"),x=s("./src/models/mobilenet_v4/image_processing_mobilenet_v4.js"),k=s("./src/models/mobilevit/image_processing_mobilevit.js"),y=s("./src/models/nougat/image_processing_nougat.js"),F=s("./src/models/owlv2/image_processing_owlv2.js"),v=s("./src/models/owlvit/image_processing_owlvit.js"),C=s("./src/models/pvt/image_processing_pvt.js"),S=s("./src/models/qwen2_vl/image_processing_qwen2_vl.js"),A=s("./src/models/rt_detr/image_processing_rt_detr.js"),E=s("./src/models/sam/image_processing_sam.js"),L=s("./src/models/segformer/image_processing_segformer.js"),I=s("./src/models/siglip/image_processing_siglip.js"),z=s("./src/models/swin2sr/image_processing_swin2sr.js"),j=s("./src/models/vit/image_processing_vit.js"),N=s("./src/models/vitmatte/image_processing_vitmatte.js"),V=s("./src/models/vitpose/image_processing_vitpose.js"),O=s("./src/models/yolos/image_processing_yolos.js")},"./src/models/janus/image_processing_janus.js":(e,t,s)=>{s.r(t),s.d(t,{VLMImageProcessor:()=>o});var r=s("./src/base/image_processors_utils.js");class o extends r.ImageProcessor{constructor(e){super({do_pad:!0,pad_size:{width:e.image_size,height:e.image_size},...e}),this.constant_values=this.config.background_color.map((e=>e*this.rescale_factor))}pad_image(e,t,s,r){return super.pad_image(e,t,s,{constant_values:this.constant_values,center:!0,...r})}}},"./src/models/janus/processing_janus.js":(e,t,s)=>{s.r(t),s.d(t,{VLChatProcessor:()=>c});var r=s("./src/base/processing_utils.js"),o=s("./src/models/auto/image_processing_auto.js"),n=s("./src/tokenizers.js"),a=s("./src/utils/core.js"),i=s("./src/utils/tensor.js"),l=s("./src/utils/image.js");class c extends r.Processor{static image_processor_class=o.AutoImageProcessor;static tokenizer_class=n.AutoTokenizer;static uses_processor_config=!0;constructor(e,t){super(e,t),this.image_tag=this.config.image_tag,this.image_start_tag=this.config.image_start_tag,this.image_end_tag=this.config.image_end_tag,this.num_image_tokens=this.config.num_image_tokens}async _call(e,{images:t=null,chat_template:s="default"}={}){t?Array.isArray(t)||(t=[t]):t=await Promise.all(e.filter((e=>e.images)).flatMap((e=>e.images)).map((e=>l.RawImage.read(e))));const r=this.tokenizer,o=e=>r.encode(e,{add_special_tokens:!1}),n=r.apply_chat_template(e,{tokenize:!1,add_generation_prompt:!0,chat_template:s}).split(this.image_tag),c=n.length-1;if(t.length!==c)throw new Error(`Number of images provided (${t.length}) does not match number of "${this.image_tag}" image tags (${c})`);const[d,u,p]=r.model.convert_tokens_to_ids([this.image_tag,this.image_start_tag,this.image_end_tag]);let m=o(n[0]),_=new Array(m.length).fill(!1);for(let e=1;e<n.length;++e){const t=new Array(this.num_image_tokens).fill(d),s=o(n[e]);m=(0,a.mergeArrays)(m,[u],t,[p],s);const r=new Array(this.num_image_tokens).fill(!0);_=(0,a.mergeArrays)(_,[!1],r,[!1],new Array(s.length).fill(!1))}const h=[1,m.length],f={input_ids:new i.Tensor("int64",m,h),attention_mask:new i.Tensor("int64",new Array(m.length).fill(1),h),images_seq_mask:new i.Tensor("bool",_,h),images_emb_mask:new i.Tensor("bool",new Array(c*this.num_image_tokens).fill(!0),[1,c,this.num_image_tokens])};if(t&&t.length>0){const e=await this.image_processor(t);return e.pixel_values.unsqueeze_(0),{...f,...e}}return f}}},"./src/models/jina_clip/image_processing_jina_clip.js":(e,t,s)=>{s.r(t),s.d(t,{JinaCLIPImageProcessor:()=>o});var r=s("./src/base/image_processors_utils.js");class o extends r.ImageProcessor{constructor(e){const{resize_mode:t,fill_color:s,interpolation:r,size:o,...n}=e;super({...n,size:"squash"===t?{width:o,height:o}:"shortest"===t?{shortest_edge:o}:{longest_edge:o},resample:"bicubic"===r?3:2,do_center_crop:!0,crop_size:o,do_normalize:!0})}}},"./src/models/jina_clip/processing_jina_clip.js":(e,t,s)=>{s.r(t),s.d(t,{JinaCLIPProcessor:()=>a});var r=s("./src/base/processing_utils.js"),o=s("./src/models/auto/image_processing_auto.js"),n=s("./src/tokenizers.js");class a extends r.Processor{static tokenizer_class=n.AutoTokenizer;static image_processor_class=o.AutoImageProcessor;async _call(e=null,t=null,s={}){if(!e&&!t)throw new Error("Either text or images must be provided");return{...e?this.tokenizer(e,s):{},...t?await this.image_processor(t,s):{}}}}},"./src/models/llava_onevision/image_processing_llava_onevision.js":(e,t,s)=>{s.r(t),s.d(t,{LlavaOnevisionImageProcessor:()=>o});var r=s("./src/base/image_processors_utils.js");class o extends r.ImageProcessor{}},"./src/models/mask2former/image_processing_mask2former.js":(e,t,s)=>{s.r(t),s.d(t,{Mask2FormerImageProcessor:()=>o});var r=s("./src/models/maskformer/image_processing_maskformer.js");class o extends r.MaskFormerImageProcessor{}},"./src/models/maskformer/image_processing_maskformer.js":(e,t,s)=>{s.r(t),s.d(t,{MaskFormerFeatureExtractor:()=>n,MaskFormerImageProcessor:()=>o});var r=s("./src/base/image_processors_utils.js");class o extends r.ImageProcessor{post_process_panoptic_segmentation(...e){return(0,r.post_process_panoptic_segmentation)(...e)}post_process_instance_segmentation(...e){return(0,r.post_process_instance_segmentation)(...e)}}class n extends o{}},"./src/models/mgp_str/processing_mgp_str.js":(e,t,s)=>{s.r(t),s.d(t,{MgpstrProcessor:()=>l});var r=s("./src/base/processing_utils.js"),o=s("./src/models/auto/image_processing_auto.js"),n=s("./src/tokenizers.js"),a=s("./src/utils/maths.js");const i={char:["char_decode",1],bpe:["bpe_decode",2],wp:["wp_decode",102]};class l extends r.Processor{static tokenizer_class=n.AutoTokenizer;static image_processor_class=o.AutoImageProcessor;get char_tokenizer(){return this.components.char_tokenizer}get bpe_tokenizer(){return this.components.bpe_tokenizer}get wp_tokenizer(){return this.components.wp_tokenizer}_decode_helper(e,t){if(!i.hasOwnProperty(t))throw new Error(`Format ${t} is not supported.`);const[s,r]=i[t],o=this[s].bind(this),[n,l]=e.dims,c=[],d=[],u=e.tolist();for(let e=0;e<n;++e){const t=u[e],s=[],o=[];for(let e=1;e<l;++e){const[n,i]=(0,a.max)((0,a.softmax)(t[e]));if(o.push(n),i==r)break;s.push(i)}const n=o.length>0?o.reduce(((e,t)=>e*t),1):0;d.push(s),c.push(n)}return[o(d),c]}char_decode(e){return this.char_tokenizer.batch_decode(e).map((e=>e.replaceAll(" ","")))}bpe_decode(e){return this.bpe_tokenizer.batch_decode(e)}wp_decode(e){return this.wp_tokenizer.batch_decode(e).map((e=>e.replaceAll(" ","")))}batch_decode([e,t,s]){const[r,o]=this._decode_helper(e,"char"),[n,i]=this._decode_helper(t,"bpe"),[l,c]=this._decode_helper(s,"wp"),d=[],u=[];for(let e=0;e<r.length;++e){const[t,s]=(0,a.max)([o[e],i[e],c[e]]);d.push([r[e],n[e],l[e]][s]),u.push(t)}return{generated_text:d,scores:u,char_preds:r,bpe_preds:n,wp_preds:l}}static async from_pretrained(...e){const t=await super.from_pretrained(...e),s=await n.AutoTokenizer.from_pretrained("Xenova/gpt2"),r=await n.AutoTokenizer.from_pretrained("Xenova/bert-base-uncased");return t.components={image_processor:t.image_processor,char_tokenizer:t.tokenizer,bpe_tokenizer:s,wp_tokenizer:r},t}async _call(e,t=null){const s=await this.image_processor(e);return t&&(s.labels=this.tokenizer(t).input_ids),s}}},"./src/models/mobilenet_v1/image_processing_mobilenet_v1.js":(e,t,s)=>{s.r(t),s.d(t,{MobileNetV1FeatureExtractor:()=>n,MobileNetV1ImageProcessor:()=>o});var r=s("./src/base/image_processors_utils.js");class o extends r.ImageProcessor{}class n extends o{}},"./src/models/mobilenet_v2/image_processing_mobilenet_v2.js":(e,t,s)=>{s.r(t),s.d(t,{MobileNetV2FeatureExtractor:()=>n,MobileNetV2ImageProcessor:()=>o});var r=s("./src/base/image_processors_utils.js");class o extends r.ImageProcessor{}class n extends o{}},"./src/models/mobilenet_v3/image_processing_mobilenet_v3.js":(e,t,s)=>{s.r(t),s.d(t,{MobileNetV3FeatureExtractor:()=>n,MobileNetV3ImageProcessor:()=>o});var r=s("./src/base/image_processors_utils.js");class o extends r.ImageProcessor{}class n extends o{}},"./src/models/mobilenet_v4/image_processing_mobilenet_v4.js":(e,t,s)=>{s.r(t),s.d(t,{MobileNetV4FeatureExtractor:()=>n,MobileNetV4ImageProcessor:()=>o});var r=s("./src/base/image_processors_utils.js");class o extends r.ImageProcessor{}class n extends o{}},"./src/models/mobilevit/image_processing_mobilevit.js":(e,t,s)=>{s.r(t),s.d(t,{MobileViTFeatureExtractor:()=>n,MobileViTImageProcessor:()=>o});var r=s("./src/base/image_processors_utils.js");class o extends r.ImageProcessor{}class n extends o{}},"./src/models/nougat/image_processing_nougat.js":(e,t,s)=>{s.r(t),s.d(t,{NougatImageProcessor:()=>o});var r=s("./src/models/donut/image_processing_donut.js");class o extends r.DonutImageProcessor{}},"./src/models/owlv2/image_processing_owlv2.js":(e,t,s)=>{s.r(t),s.d(t,{Owlv2ImageProcessor:()=>o});var r=s("./src/models/owlvit/image_processing_owlvit.js");class o extends r.OwlViTImageProcessor{}},"./src/models/owlvit/image_processing_owlvit.js":(e,t,s)=>{s.r(t),s.d(t,{OwlViTFeatureExtractor:()=>n,OwlViTImageProcessor:()=>o});var r=s("./src/base/image_processors_utils.js");class o extends r.ImageProcessor{post_process_object_detection(...e){return(0,r.post_process_object_detection)(...e)}}class n extends o{}},"./src/models/owlvit/processing_owlvit.js":(e,t,s)=>{s.r(t),s.d(t,{OwlViTProcessor:()=>a});var r=s("./src/base/processing_utils.js"),o=s("./src/models/auto/image_processing_auto.js"),n=s("./src/tokenizers.js");class a extends r.Processor{static tokenizer_class=n.AutoTokenizer;static image_processor_class=o.AutoImageProcessor}},"./src/models/paligemma/processing_paligemma.js":(e,t,s)=>{s.r(t),s.d(t,{PaliGemmaProcessor:()=>i});var r=s("./src/base/processing_utils.js"),o=s("./src/models/auto/image_processing_auto.js"),n=s("./src/tokenizers.js");const a="<image>";class i extends r.Processor{static tokenizer_class=n.AutoTokenizer;static image_processor_class=o.AutoImageProcessor;static uses_processor_config=!1;async _call(e,t=null,s={}){t||(console.warn("You are using PaliGemma without a text prefix. It will perform as a picture-captioning model."),t=""),Array.isArray(e)||(e=[e]),Array.isArray(t)||(t=[t]);const r=this.tokenizer.bos_token,o=this.image_processor.config.image_seq_length;let n;t.some((e=>e.includes(a)))?n=t.map((e=>{const t=e.replaceAll(a,a.repeat(o)),s=t.lastIndexOf(a),n=-1===s?0:s+7;return t.slice(0,n)+r+t.slice(n)+"\n"})):(console.warn("You are passing both `text` and `images` to `PaliGemmaProcessor`. The processor expects special image tokens in the text, as many tokens as there are images per each text. It is recommended to add `<image>` tokens in the very beginning of your text. For this call, we will infer how many images each text has and add special tokens."),n=t.map((t=>function(e,t,s,r,o){return`${r.repeat(s*o)}${t}${e}\n`}(t,r,o,a,e.length))));const i=this.tokenizer(n,s);return{...await this.image_processor(e,s),...i}}}},"./src/models/processors.js":(e,t,s)=>{s.r(t),s.d(t,{Florence2Processor:()=>r.Florence2Processor,Idefics3Processor:()=>n.Idefics3Processor,JinaCLIPProcessor:()=>i.JinaCLIPProcessor,MgpstrProcessor:()=>o.MgpstrProcessor,OwlViTProcessor:()=>l.OwlViTProcessor,PaliGemmaProcessor:()=>c.PaliGemmaProcessor,PyAnnoteProcessor:()=>d.PyAnnoteProcessor,Qwen2VLProcessor:()=>u.Qwen2VLProcessor,SamProcessor:()=>p.SamProcessor,SpeechT5Processor:()=>m.SpeechT5Processor,VLChatProcessor:()=>a.VLChatProcessor,Wav2Vec2ProcessorWithLM:()=>_.Wav2Vec2ProcessorWithLM,WhisperProcessor:()=>h.WhisperProcessor});var r=s("./src/models/florence2/processing_florence2.js"),o=s("./src/models/mgp_str/processing_mgp_str.js"),n=s("./src/models/idefics3/processing_idefics3.js"),a=s("./src/models/janus/processing_janus.js"),i=s("./src/models/jina_clip/processing_jina_clip.js"),l=s("./src/models/owlvit/processing_owlvit.js"),c=s("./src/models/paligemma/processing_paligemma.js"),d=s("./src/models/pyannote/processing_pyannote.js"),u=s("./src/models/qwen2_vl/processing_qwen2_vl.js"),p=s("./src/models/sam/processing_sam.js"),m=s("./src/models/speecht5/processing_speecht5.js"),_=s("./src/models/wav2vec2/processing_wav2vec2.js"),h=s("./src/models/whisper/processing_whisper.js")},"./src/models/pvt/image_processing_pvt.js":(e,t,s)=>{s.r(t),s.d(t,{PvtImageProcessor:()=>o});var r=s("./src/base/image_processors_utils.js");class o extends r.ImageProcessor{}},"./src/models/pyannote/feature_extraction_pyannote.js":(e,t,s)=>{s.r(t),s.d(t,{PyAnnoteFeatureExtractor:()=>n});var r=s("./src/base/feature_extraction_utils.js"),o=s("./src/utils/tensor.js");class n extends r.FeatureExtractor{async _call(e){(0,r.validate_audio_inputs)(e,"PyAnnoteFeatureExtractor"),e instanceof Float64Array&&(e=new Float32Array(e));const t=[1,1,e.length];return{input_values:new o.Tensor("float32",e,t)}}}},"./src/models/pyannote/processing_pyannote.js":(e,t,s)=>{s.r(t),s.d(t,{PyAnnoteProcessor:()=>a});var r=s("./src/base/processing_utils.js"),o=s("./src/models/auto/feature_extraction_auto.js"),n=s("./src/utils/maths.js");class a extends r.Processor{static feature_extractor_class=o.AutoFeatureExtractor;async _call(e){return await this.feature_extractor(e)}samples_to_frames(e){return(e-this.config.offset)/this.config.step}post_process_speaker_diarization(e,t){const s=t/this.samples_to_frames(t)/this.config.sampling_rate,r=[];for(const t of e.tolist()){const e=[];let o=-1;for(let s=0;s<t.length;++s){const r=(0,n.softmax)(t[s]),[a,i]=(0,n.max)(r),[l,c]=[s,s+1];i!==o?(o=i,e.push({id:i,start:l,end:c,score:a})):(e.at(-1).end=c,e.at(-1).score+=a)}r.push(e.map((({id:e,start:t,end:r,score:o})=>({id:e,start:t*s,end:r*s,confidence:o/(r-t)}))))}return r}}},"./src/models/qwen2_vl/image_processing_qwen2_vl.js":(e,t,s)=>{s.r(t),s.d(t,{Qwen2VLImageProcessor:()=>n});var r=s("./src/base/image_processors_utils.js"),o=s("./src/utils/tensor.js");class n extends r.ImageProcessor{async _call(e,...t){const{pixel_values:s,original_sizes:r,reshaped_input_sizes:n}=await super._call(e,...t);let a=s;const{temporal_patch_size:i,merge_size:l,patch_size:c}=this.config;1===a.dims[0]&&(a=(0,o.cat)(Array.from({length:i},(()=>a)),0));const d=a.dims[0]/i,u=a.dims[1],p=Math.floor(a.dims[2]/c),m=Math.floor(a.dims[3]/c);return{pixel_values:a.view(d,i,u,Math.floor(p/l),l,c,Math.floor(m/l),l,c).permute(0,3,6,4,7,2,1,5,8).view(d*p*m,u*i*c*c),image_grid_thw:new o.Tensor("int64",[d,p,m],[1,3]),original_sizes:r,reshaped_input_sizes:n}}}},"./src/models/qwen2_vl/processing_qwen2_vl.js":(e,t,s)=>{s.r(t),s.d(t,{Qwen2VLProcessor:()=>a});var r=s("./src/base/processing_utils.js"),o=s("./src/models/auto/image_processing_auto.js"),n=s("./src/tokenizers.js");s("./src/utils/image.js");class a extends r.Processor{static image_processor_class=o.AutoImageProcessor;static tokenizer_class=n.AutoTokenizer;async _call(e,t=null,...s){let r,o;if(Array.isArray(e)||(e=[e]),t&&(r=await this.image_processor(t),o=r.image_grid_thw),o){let t=this.image_processor.config.merge_size**2,s=0;const r=o.tolist();e=e.map((e=>{for(;e.includes("<|image_pad|>");){const o=Number(r[s++].reduce(((e,t)=>e*t),1n));e=e.replace("<|image_pad|>","<|placeholder|>".repeat(Math.floor(o/t)))}return e.replaceAll("<|placeholder|>","<|image_pad|>")}))}return{...this.tokenizer(e),...r}}}},"./src/models/rt_detr/image_processing_rt_detr.js":(e,t,s)=>{s.r(t),s.d(t,{RTDetrImageProcessor:()=>o});var r=s("./src/base/image_processors_utils.js");class o extends r.ImageProcessor{post_process_object_detection(...e){return(0,r.post_process_object_detection)(...e)}}},"./src/models/sam/image_processing_sam.js":(e,t,s)=>{s.r(t),s.d(t,{SamImageProcessor:()=>a});var r=s("./src/base/image_processors_utils.js"),o=s("./src/utils/core.js"),n=s("./src/utils/tensor.js");class a extends r.ImageProcessor{reshape_input_points(e,t,s,r=!1){e=structuredClone(e);let a=(0,o.calculateDimensions)(e);if(3===a.length)r||(a=[1,...a]),e=[e];else if(4!==a.length)throw Error("The input_points must be a 4D tensor of shape `batch_size`, `point_batch_size`, `nb_points_per_image`, `2`.");for(let r=0;r<e.length;++r){let o=t[r],n=s[r],a=[n[0]/o[0],n[1]/o[1]];for(let t=0;t<e[r].length;++t)for(let s=0;s<e[r][t].length;++s)for(let o=0;o<e[r][t][s].length;++o)e[r][t][s][o]*=a[o%2]}return new n.Tensor("float32",Float32Array.from(e.flat(1/0)),a)}add_input_labels(e,t){let s=(0,o.calculateDimensions)(e);if(2===s.length)s=[1,...s],e=[e];else if(3!==s.length)throw Error("The input_points must be a 4D tensor of shape `batch_size`, `point_batch_size`, `nb_points_per_image`, `2`.");if(s.some(((e,s)=>e!==t.dims[s])))throw Error(`The first ${s.length} dimensions of 'input_points' and 'input_labels' must be the same.`);return new n.Tensor("int64",e.flat(1/0).map(BigInt),s)}async _call(e,{input_points:t=null,input_labels:s=null,input_boxes:r=null}={}){const o=await super._call(e);if(t&&(o.input_points=this.reshape_input_points(t,o.original_sizes,o.reshaped_input_sizes)),s){if(!o.input_points)throw Error("`input_points` must be provided if `input_labels` are provided.");o.input_labels=this.add_input_labels(s,o.input_points)}return r&&(o.input_boxes=this.reshape_input_points(r,o.original_sizes,o.reshaped_input_sizes,!0)),o}async post_process_masks(e,t,s,{mask_threshold:r=0,binarize:o=!0,pad_size:a=null}={}){const i=[],l=[(a=a??this.pad_size).height,a.width];for(let a=0;a<t.length;++a){const c=t[a],d=s[a];let u=await(0,n.interpolate_4d)(e[a],{mode:"bilinear",size:l});if(u=u.slice(null,null,[0,d[0]],[0,d[1]]),u=await(0,n.interpolate_4d)(u,{mode:"bilinear",size:c}),o){const e=u.data,t=new Uint8Array(e.length);for(let s=0;s<e.length;++s)e[s]>r&&(t[s]=1);u=new n.Tensor("bool",t,u.dims)}i.push(u)}return i}generate_crop_boxes(e,t,{crop_n_layers:s=0,overlap_ratio:r=512/1500,points_per_crop:o=32,crop_n_points_downscale_factor:n=1}={}){}}},"./src/models/sam/processing_sam.js":(e,t,s)=>{s.r(t),s.d(t,{SamProcessor:()=>n});var r=s("./src/base/processing_utils.js"),o=s("./src/models/auto/image_processing_auto.js");class n extends r.Processor{static image_processor_class=o.AutoImageProcessor;async _call(...e){return await this.image_processor(...e)}post_process_masks(...e){return this.image_processor.post_process_masks(...e)}reshape_input_points(...e){return this.image_processor.reshape_input_points(...e)}}},"./src/models/seamless_m4t/feature_extraction_seamless_m4t.js":(e,t,s)=>{s.r(t),s.d(t,{SeamlessM4TFeatureExtractor:()=>a});var r=s("./src/base/feature_extraction_utils.js"),o=s("./src/utils/tensor.js"),n=s("./src/utils/audio.js");class a extends r.FeatureExtractor{constructor(e){super(e);const t=this.config.sampling_rate,s=(0,n.mel_filter_bank)(256,this.config.num_mel_bins,20,Math.floor(t/2),t,null,"kaldi",!0);for(let e=0;e<s.length;++e)s[e].push(0);this.mel_filters=s,this.window=(0,n.window_function)(400,"povey",{periodic:!1})}async _extract_fbank_features(e,t){return e=e.map((e=>32768*e)),(0,n.spectrogram)(e,this.window,400,160,{fft_length:512,power:2,center:!1,preemphasis:.97,mel_filters:this.mel_filters,log_mel:"log",mel_floor:1.192092955078125e-7,remove_dc_offset:!0,max_num_frames:t,transpose:!0})}async _call(e,{padding:t=!0,pad_to_multiple_of:s=2,do_normalize_per_mel_bins:n=!0,return_attention_mask:a=!0}={}){(0,r.validate_audio_inputs)(e,"SeamlessM4TFeatureExtractor");let i,l=await this._extract_fbank_features(e,this.config.max_length);if(n){const[e,t]=l.dims,s=l.data;for(let r=0;r<t;++r){let o=0;for(let n=0;n<e;++n)o+=s[n*t+r];const n=o/e;let a=0;for(let o=0;o<e;++o)a+=(s[o*t+r]-n)**2;a/=e-1;const i=Math.sqrt(a+1e-7);for(let o=0;o<e;++o){const e=o*t+r;s[e]=(s[e]-n)/i}}}if(t){const[e,t]=l.dims,r=l.data,n=e%s;if(n>0){const s=new Float32Array(t*(e+n));s.set(r),s.fill(this.config.padding_value,r.length);const c=e+n;l=new o.Tensor(l.type,s,[c,t]),a&&(i=new o.Tensor("int64",new BigInt64Array(c),[1,c]),i.data.fill(1n,0,e))}}const[c,d]=l.dims,u=this.config.stride;if(0!==c%u)throw new Error(`The number of frames (${c}) must be a multiple of the stride (${u}).`);const p=l.view(1,Math.floor(c/u),d*u),m={input_features:p};if(a){const e=p.dims[1],t=new BigInt64Array(e);if(i){const e=i.data;for(let s=1,r=0;s<c;s+=u,++r)t[r]=e[s]}else t.fill(1n);m.attention_mask=new o.Tensor("int64",t,[1,e])}return m}}},"./src/models/segformer/image_processing_segformer.js":(e,t,s)=>{s.r(t),s.d(t,{SegformerFeatureExtractor:()=>n,SegformerImageProcessor:()=>o});var r=s("./src/base/image_processors_utils.js");class o extends r.ImageProcessor{post_process_semantic_segmentation(...e){return(0,r.post_process_semantic_segmentation)(...e)}}class n extends o{}},"./src/models/siglip/image_processing_siglip.js":(e,t,s)=>{s.r(t),s.d(t,{SiglipImageProcessor:()=>o});var r=s("./src/base/image_processors_utils.js");class o extends r.ImageProcessor{}},"./src/models/speecht5/feature_extraction_speecht5.js":(e,t,s)=>{s.r(t),s.d(t,{SpeechT5FeatureExtractor:()=>o});var r=s("./src/base/feature_extraction_utils.js");class o extends r.FeatureExtractor{}},"./src/models/speecht5/processing_speecht5.js":(e,t,s)=>{s.r(t),s.d(t,{SpeechT5Processor:()=>a});var r=s("./src/base/processing_utils.js"),o=s("./src/tokenizers.js"),n=s("./src/models/auto/feature_extraction_auto.js");class a extends r.Processor{static tokenizer_class=o.AutoTokenizer;static feature_extractor_class=n.AutoFeatureExtractor;async _call(e){return await this.feature_extractor(e)}}},"./src/models/swin2sr/image_processing_swin2sr.js":(e,t,s)=>{s.r(t),s.d(t,{Swin2SRImageProcessor:()=>o});var r=s("./src/base/image_processors_utils.js");class o extends r.ImageProcessor{pad_image(e,t,s,r={}){const[o,n,a]=t;return super.pad_image(e,t,{width:n+(s-n%s)%s,height:o+(s-o%s)%s},{mode:"symmetric",center:!1,constant_values:-1,...r})}}},"./src/models/vit/image_processing_vit.js":(e,t,s)=>{s.r(t),s.d(t,{ViTFeatureExtractor:()=>n,ViTImageProcessor:()=>o});var r=s("./src/base/image_processors_utils.js");class o extends r.ImageProcessor{}class n extends o{}},"./src/models/vitmatte/image_processing_vitmatte.js":(e,t,s)=>{s.r(t),s.d(t,{VitMatteImageProcessor:()=>n});var r=s("./src/base/image_processors_utils.js"),o=s("./src/utils/tensor.js");class n extends r.ImageProcessor{async _call(e,t){Array.isArray(e)||(e=[e]),Array.isArray(t)||(t=[t]);const s=await Promise.all(e.map((e=>this.preprocess(e)))),r=await Promise.all(t.map((e=>this.preprocess(e,{do_normalize:!1,do_convert_rgb:!1,do_convert_grayscale:!0}))));return{pixel_values:(0,o.stack)(s.map(((e,t)=>(0,o.cat)([e.pixel_values,r[t].pixel_values],0))),0),original_sizes:s.map((e=>e.original_size)),reshaped_input_sizes:s.map((e=>e.reshaped_input_size))}}}},"./src/models/vitpose/image_processing_vitpose.js":(e,t,s)=>{s.r(t),s.d(t,{VitPoseImageProcessor:()=>o});var r=s("./src/base/image_processors_utils.js");class o extends r.ImageProcessor{post_process_pose_estimation(e,t,{threshold:s=null}={}){const r=e.tolist(),[o,n,a,i]=e.dims,l=[];for(let e=0;e<o;++e){const o=r[e],n=t[e],c=[];for(let e=0;e<n.length;++e){const t=n[e],r=[],l=[],d=[],u=t.at(-2)/i,p=t.at(-1)/a;for(let e=0;e<o.length;++e){let[t,n]=[0,0],a=0,i=-1/0;const c=o[e];for(let e=0;e<c.length;++e){const s=c[e];for(let r=0;r<s.length;++r){const o=s[r];a+=o,i=Math.max(i,o),t+=(r+.5)*o,n+=e*o}}if(null!=s&&i<s)continue;const m=[u*t/a,p*n/a];r.push(m),d.push(e),l.push(i)}c.push({bbox:t,scores:l,labels:d,keypoints:r})}l.push(c)}return l}}},"./src/models/wav2vec2/feature_extraction_wav2vec2.js":(e,t,s)=>{s.r(t),s.d(t,{Wav2Vec2FeatureExtractor:()=>n});var r=s("./src/base/feature_extraction_utils.js"),o=s("./src/utils/tensor.js");class n extends r.FeatureExtractor{_zero_mean_unit_var_norm(e){const t=e.reduce(((e,t)=>e+t),0)/e.length,s=e.reduce(((e,s)=>e+(s-t)**2),0)/e.length;return e.map((e=>(e-t)/Math.sqrt(s+1e-7)))}async _call(e){(0,r.validate_audio_inputs)(e,"Wav2Vec2FeatureExtractor"),e instanceof Float64Array&&(e=new Float32Array(e));let t=e;this.config.do_normalize&&(t=this._zero_mean_unit_var_norm(t));const s=[1,t.length];return{input_values:new o.Tensor("float32",t,s),attention_mask:new o.Tensor("int64",new BigInt64Array(t.length).fill(1n),s)}}}},"./src/models/wav2vec2/processing_wav2vec2.js":(e,t,s)=>{s.r(t),s.d(t,{Wav2Vec2ProcessorWithLM:()=>n});var r=s("./src/base/processing_utils.js"),o=s("./src/models/auto/feature_extraction_auto.js");class n extends r.Processor{static feature_extractor_class=o.AutoFeatureExtractor;async _call(e){return await this.feature_extractor(e)}}},"./src/models/wespeaker/feature_extraction_wespeaker.js":(e,t,s)=>{s.r(t),s.d(t,{WeSpeakerFeatureExtractor:()=>n});var r=s("./src/base/feature_extraction_utils.js"),o=(s("./src/utils/tensor.js"),s("./src/utils/audio.js"));class n extends r.FeatureExtractor{constructor(e){super(e);const t=this.config.sampling_rate,s=(0,o.mel_filter_bank)(256,this.config.num_mel_bins,20,Math.floor(t/2),t,null,"kaldi",!0);for(let e=0;e<s.length;++e)s[e].push(0);this.mel_filters=s,this.window=(0,o.window_function)(400,"hamming",{periodic:!1}),this.min_num_frames=this.config.min_num_frames}async _extract_fbank_features(e){return e=e.map((e=>32768*e)),(0,o.spectrogram)(e,this.window,400,160,{fft_length:512,power:2,center:!1,preemphasis:.97,mel_filters:this.mel_filters,log_mel:"log",mel_floor:1.192092955078125e-7,remove_dc_offset:!0,transpose:!0,min_num_frames:this.min_num_frames})}async _call(e){(0,r.validate_audio_inputs)(e,"WeSpeakerFeatureExtractor");const t=(await this._extract_fbank_features(e)).unsqueeze_(0);if(null===this.config.fbank_centering_span){const e=t.mean(1).data,s=t.data,[r,o,n]=t.dims;for(let t=0;t<r;++t){const r=t*o*n,a=t*n;for(let t=0;t<o;++t){const o=r+t*n;for(let t=0;t<n;++t)s[o+t]-=e[a+t]}}}return{input_features:t}}}},"./src/models/whisper/common_whisper.js":(e,t,s)=>{s.r(t),s.d(t,{WHISPER_LANGUAGE_MAPPING:()=>o,WHISPER_TO_LANGUAGE_CODE_MAPPING:()=>n,whisper_language_to_code:()=>a});const r=[["en","english"],["zh","chinese"],["de","german"],["es","spanish"],["ru","russian"],["ko","korean"],["fr","french"],["ja","japanese"],["pt","portuguese"],["tr","turkish"],["pl","polish"],["ca","catalan"],["nl","dutch"],["ar","arabic"],["sv","swedish"],["it","italian"],["id","indonesian"],["hi","hindi"],["fi","finnish"],["vi","vietnamese"],["he","hebrew"],["uk","ukrainian"],["el","greek"],["ms","malay"],["cs","czech"],["ro","romanian"],["da","danish"],["hu","hungarian"],["ta","tamil"],["no","norwegian"],["th","thai"],["ur","urdu"],["hr","croatian"],["bg","bulgarian"],["lt","lithuanian"],["la","latin"],["mi","maori"],["ml","malayalam"],["cy","welsh"],["sk","slovak"],["te","telugu"],["fa","persian"],["lv","latvian"],["bn","bengali"],["sr","serbian"],["az","azerbaijani"],["sl","slovenian"],["kn","kannada"],["et","estonian"],["mk","macedonian"],["br","breton"],["eu","basque"],["is","icelandic"],["hy","armenian"],["ne","nepali"],["mn","mongolian"],["bs","bosnian"],["kk","kazakh"],["sq","albanian"],["sw","swahili"],["gl","galician"],["mr","marathi"],["pa","punjabi"],["si","sinhala"],["km","khmer"],["sn","shona"],["yo","yoruba"],["so","somali"],["af","afrikaans"],["oc","occitan"],["ka","georgian"],["be","belarusian"],["tg","tajik"],["sd","sindhi"],["gu","gujarati"],["am","amharic"],["yi","yiddish"],["lo","lao"],["uz","uzbek"],["fo","faroese"],["ht","haitian creole"],["ps","pashto"],["tk","turkmen"],["nn","nynorsk"],["mt","maltese"],["sa","sanskrit"],["lb","luxembourgish"],["my","myanmar"],["bo","tibetan"],["tl","tagalog"],["mg","malagasy"],["as","assamese"],["tt","tatar"],["haw","hawaiian"],["ln","lingala"],["ha","hausa"],["ba","bashkir"],["jw","javanese"],["su","sundanese"]],o=new Map(r),n=new Map([...r.map((([e,t])=>[t,e])),["burmese","my"],["valencian","ca"],["flemish","nl"],["haitian","ht"],["letzeburgesch","lb"],["pushto","ps"],["panjabi","pa"],["moldavian","ro"],["moldovan","ro"],["sinhalese","si"],["castilian","es"]]);function a(e){e=e.toLowerCase();let t=n.get(e);if(void 0===t){if(!o.has(e)){const t=2===e.length?o.keys():o.values();throw new Error(`Language "${e}" is not supported. Must be one of: ${JSON.stringify(t)}`)}t=e}return t}},"./src/models/whisper/feature_extraction_whisper.js":(e,t,s)=>{s.r(t),s.d(t,{WhisperFeatureExtractor:()=>a});var r=s("./src/base/feature_extraction_utils.js"),o=(s("./src/utils/tensor.js"),s("./src/utils/audio.js")),n=s("./src/utils/maths.js");class a extends r.FeatureExtractor{constructor(e){super(e),this.config.mel_filters??=(0,o.mel_filter_bank)(Math.floor(1+this.config.n_fft/2),this.config.feature_size,0,8e3,this.config.sampling_rate,"slaney","slaney"),this.window=(0,o.window_function)(this.config.n_fft,"hann")}async _extract_fbank_features(e){const t=await(0,o.spectrogram)(e,this.window,this.config.n_fft,this.config.hop_length,{power:2,mel_filters:this.config.mel_filters,log_mel:"log10",max_num_frames:this.config.nb_max_frames}),s=t.data,r=(0,n.max)(s)[0];for(let e=0;e<s.length;++e)s[e]=(Math.max(s[e],r-8)+4)/4;return t}async _call(e){let t;(0,r.validate_audio_inputs)(e,"WhisperFeatureExtractor"),e.length>this.config.n_samples?(console.warn("Attempting to extract features for audio longer than 30 seconds. If using a pipeline to extract transcript from a long audio clip, remember to specify `chunk_length_s` and/or `stride_length_s`."),t=e.slice(0,this.config.n_samples)):(t=new Float32Array(this.config.n_samples),t.set(e));return{input_features:(await this._extract_fbank_features(t)).unsqueeze_(0)}}}},"./src/models/whisper/generation_whisper.js":(e,t,s)=>{s.r(t),s.d(t,{WhisperGenerationConfig:()=>o});var r=s("./src/generation/configuration_utils.js");class o extends r.GenerationConfig{return_timestamps=null;return_token_timestamps=null;num_frames=null;alignment_heads=null;task=null;language=null;no_timestamps_token_id=null;prompt_ids=null;is_multilingual=null;lang_to_id=null;task_to_id=null;max_initial_timestamp_index=1}},"./src/models/whisper/processing_whisper.js":(e,t,s)=>{s.r(t),s.d(t,{WhisperProcessor:()=>a});var r=s("./src/models/auto/feature_extraction_auto.js"),o=s("./src/tokenizers.js"),n=s("./src/base/processing_utils.js");class a extends n.Processor{static tokenizer_class=o.AutoTokenizer;static feature_extractor_class=r.AutoFeatureExtractor;async _call(e){return await this.feature_extractor(e)}}},"./src/models/yolos/image_processing_yolos.js":(e,t,s)=>{s.r(t),s.d(t,{YolosFeatureExtractor:()=>n,YolosImageProcessor:()=>o});var r=s("./src/base/image_processors_utils.js");class o extends r.ImageProcessor{post_process_object_detection(...e){return(0,r.post_process_object_detection)(...e)}}class n extends o{}},"./src/ops/registry.js":(e,t,s)=>{s.r(t),s.d(t,{TensorOpRegistry:()=>a});var r=s("./src/backends/onnx.js"),o=s("./src/utils/tensor.js");const n=async(e,t,s)=>{const n=await(0,r.createInferenceSession)(new Uint8Array(e),t);return async e=>{const t=Object.fromEntries(Object.entries(e).map((([e,t])=>[e,t.ort_tensor]))),r=await n.run(t);return Array.isArray(s)?s.map((e=>new o.Tensor(r[e]))):new o.Tensor(r[s])}};class a{static session_options={};static get bilinear_interpolate_4d(){return this._bilinear_interpolate_4d||(this._bilinear_interpolate_4d=n([8,9,18,0,58,128,1,10,40,10,1,120,10,0,10,0,10,1,115,18,1,121,34,6,82,101,115,105,122,101,42,17,10,4,109,111,100,101,34,6,108,105,110,101,97,114,160,1,3,18,1,114,90,31,10,1,120,18,26,10,24,8,1,18,20,10,3,18,1,98,10,3,18,1,99,10,3,18,1,104,10,3,18,1,119,90,15,10,1,115,18,10,10,8,8,7,18,4,10,2,8,4,98,31,10,1,121,18,26,10,24,8,1,18,20,10,3,18,1,98,10,3,18,1,99,10,3,18,1,104,10,3,18,1,119,66,2,16,20],this.session_options,"y")),this._bilinear_interpolate_4d}static get bicubic_interpolate_4d(){return this._bicubic_interpolate_4d||(this._bicubic_interpolate_4d=n([8,9,18,0,58,127,10,39,10,1,120,10,0,10,0,10,1,115,18,1,121,34,6,82,101,115,105,122,101,42,16,10,4,109,111,100,101,34,5,99,117,98,105,99,160,1,3,18,1,114,90,31,10,1,120,18,26,10,24,8,1,18,20,10,3,18,1,98,10,3,18,1,99,10,3,18,1,104,10,3,18,1,119,90,15,10,1,115,18,10,10,8,8,7,18,4,10,2,8,4,98,31,10,1,121,18,26,10,24,8,1,18,20,10,3,18,1,98,10,3,18,1,99,10,3,18,1,104,10,3,18,1,119,66,2,16,20],this.session_options,"y")),this._bicubic_interpolate_4d}static get matmul(){return this._matmul||(this._matmul=n([8,9,18,0,58,55,10,17,10,1,97,10,1,98,18,1,99,34,6,77,97,116,77,117,108,18,1,114,90,9,10,1,97,18,4,10,2,8,1,90,9,10,1,98,18,4,10,2,8,1,98,9,10,1,99,18,4,10,2,8,1,66,2,16,20],this.session_options,"c")),this._matmul}static get stft(){return this._stft||(this._stft=n([8,7,18,0,58,148,1,10,38,10,1,115,10,1,106,10,1,119,10,1,108,18,1,111,34,4,83,84,70,84,42,15,10,8,111,110,101,115,105,100,101,100,24,1,160,1,2,18,1,115,90,26,10,1,115,18,21,10,19,8,1,18,15,10,3,18,1,98,10,3,18,1,115,10,3,18,1,99,90,11,10,1,106,18,6,10,4,8,7,18,0,90,16,10,1,119,18,11,10,9,8,1,18,5,10,3,18,1,119,90,11,10,1,108,18,6,10,4,8,7,18,0,98,31,10,1,111,18,26,10,24,8,1,18,20,10,3,18,1,98,10,3,18,1,102,10,3,18,1,100,10,3,18,1,99,66,2,16,17],this.session_options,"o")),this._stft}static get rfft(){return this._rfft||(this._rfft=n([8,9,18,0,58,97,10,33,10,1,120,10,0,10,1,97,18,1,121,34,3,68,70,84,42,15,10,8,111,110,101,115,105,100,101,100,24,1,160,1,2,18,1,100,90,21,10,1,120,18,16,10,14,8,1,18,10,10,3,18,1,115,10,3,18,1,99,90,11,10,1,97,18,6,10,4,8,7,18,0,98,21,10,1,121,18,16,10,14,8,1,18,10,10,3,18,1,115,10,3,18,1,99,66,2,16,20],this.session_options,"y")),this._rfft}static get top_k(){return this._top_k||(this._top_k=n([8,10,18,0,58,73,10,18,10,1,120,10,1,107,18,1,118,18,1,105,34,4,84,111,112,75,18,1,116,90,9,10,1,120,18,4,10,2,8,1,90,15,10,1,107,18,10,10,8,8,7,18,4,10,2,8,1,98,9,10,1,118,18,4,10,2,8,1,98,9,10,1,105,18,4,10,2,8,7,66,2,16,21],this.session_options,["v","i"])),this._top_k}}},"./src/pipelines.js":(e,t,s)=>{s.r(t),s.d(t,{AudioClassificationPipeline:()=>C,AutomaticSpeechRecognitionPipeline:()=>A,DepthEstimationPipeline:()=>D,DocumentQuestionAnsweringPipeline:()=>V,FeatureExtractionPipeline:()=>F,FillMaskPipeline:()=>w,ImageClassificationPipeline:()=>L,ImageFeatureExtractionPipeline:()=>v,ImageSegmentationPipeline:()=>I,ImageToImagePipeline:()=>B,ImageToTextPipeline:()=>E,ObjectDetectionPipeline:()=>j,Pipeline:()=>h,QuestionAnsweringPipeline:()=>M,SummarizationPipeline:()=>b,Text2TextGenerationPipeline:()=>T,TextClassificationPipeline:()=>f,TextGenerationPipeline:()=>k,TextToAudioPipeline:()=>O,TokenClassificationPipeline:()=>g,TranslationPipeline:()=>P,ZeroShotAudioClassificationPipeline:()=>S,ZeroShotClassificationPipeline:()=>y,ZeroShotImageClassificationPipeline:()=>z,ZeroShotObjectDetectionPipeline:()=>N,pipeline:()=>q});var r=s("./src/tokenizers.js"),o=s("./src/models.js"),n=s("./src/models/auto/processing_auto.js"),a=(s("./src/base/processing_utils.js"),s("./src/utils/generic.js")),i=s("./src/utils/core.js"),l=s("./src/utils/maths.js"),c=s("./src/utils/audio.js"),d=s("./src/utils/tensor.js"),u=s("./src/utils/image.js");async function p(e){return Array.isArray(e)||(e=[e]),await Promise.all(e.map((e=>u.RawImage.read(e))))}async function m(e,t){return Array.isArray(e)||(e=[e]),await Promise.all(e.map((e=>"string"==typeof e||e instanceof URL?(0,c.read_audio)(e,t):e instanceof Float64Array?new Float32Array(e):e)))}function _(e,t){t&&(e=e.map((e=>0|e)));const[s,r,o,n]=e;return{xmin:s,ymin:r,xmax:o,ymax:n}}class h extends a.Callable{constructor({task:e,model:t,tokenizer:s=null,processor:r=null}){super(),this.task=e,this.model=t,this.tokenizer=s,this.processor=r}async dispose(){await this.model.dispose()}}class f extends h{constructor(e){super(e)}async _call(e,{top_k:t=1}={}){const s=this.tokenizer(e,{padding:!0,truncation:!0}),r=await this.model(s),o="multi_label_classification"===this.model.config.problem_type?e=>e.sigmoid():e=>new d.Tensor("float32",(0,l.softmax)(e.data),e.dims),n=this.model.config.id2label,a=[];for(const e of r.logits){const s=o(e),r=await(0,d.topk)(s,t),i=r[0].tolist(),l=r[1].tolist().map(((e,t)=>({label:n?n[e]:`LABEL_${e}`,score:i[t]})));1===t?a.push(...l):a.push(l)}return Array.isArray(e)||1===t?a:a[0]}}class g extends h{constructor(e){super(e)}async _call(e,{ignore_labels:t=["O"]}={}){const s=Array.isArray(e),r=this.tokenizer(s?e:[e],{padding:!0,truncation:!0}),o=(await this.model(r)).logits,n=this.model.config.id2label,a=[];for(let e=0;e<o.dims[0];++e){const s=r.input_ids[e],i=o[e],c=[];for(let e=0;e<i.dims[0];++e){const r=i[e],o=(0,l.max)(r.data)[1],a=n?n[o]:`LABEL_${o}`;if(t.includes(a))continue;const d=this.tokenizer.decode([s[e].item()],{skip_special_tokens:!0});if(""===d)continue;const u=(0,l.softmax)(r.data);c.push({entity:a,score:u[o],index:e,word:d})}a.push(c)}return s?a:a[0]}}class M extends h{constructor(e){super(e)}async _call(e,t,{top_k:s=1}={}){const r=this.tokenizer(e,{text_pair:t,padding:!0,truncation:!0}),{start_logits:o,end_logits:n}=await this.model(r),a=r.input_ids.tolist(),c=r.attention_mask.tolist(),d=this.tokenizer.all_special_ids,u=[];for(let e=0;e<o.dims[0];++e){const t=a[e],r=t.findIndex((e=>e==this.tokenizer.sep_token_id)),p=(c[e].map(((e,s)=>1==e&&(0===s||s>r&&-1===d.findIndex((e=>e==t[s]))))),o[e].tolist()),m=n[e].tolist();for(let s=1;s<p.length;++s)(0==c[e]||s<=r||-1!==d.findIndex((e=>e==t[s])))&&(p[s]=-1/0,m[s]=-1/0);const _=(0,l.softmax)(p).map(((e,t)=>[e,t])),h=(0,l.softmax)(m).map(((e,t)=>[e,t]));_[0][0]=0,h[0][0]=0;const f=(0,i.product)(_,h).filter((e=>e[0][1]<=e[1][1])).map((e=>[e[0][1],e[1][1],e[0][0]*e[1][0]])).sort(((e,t)=>t[2]-e[2]));for(let e=0;e<Math.min(f.length,s);++e){const[s,r,o]=f[e],n=t.slice(s,r+1),a=this.tokenizer.decode(n,{skip_special_tokens:!0});u.push({answer:a,score:o})}}return 1===s?u[0]:u}}class w extends h{constructor(e){super(e)}async _call(e,{top_k:t=5}={}){const s=this.tokenizer(e,{padding:!0,truncation:!0}),{logits:r}=await this.model(s),o=[],n=s.input_ids.tolist();for(let e=0;e<n.length;++e){const s=n[e],a=s.findIndex((e=>e==this.tokenizer.mask_token_id));if(-1===a)throw Error(`Mask token (${this.tokenizer.mask_token}) not found in text.`);const i=r[e][a],c=await(0,d.topk)(new d.Tensor("float32",(0,l.softmax)(i.data),i.dims),t),u=c[0].tolist(),p=c[1].tolist();o.push(p.map(((e,t)=>{const r=s.slice();return r[a]=e,{score:u[t],token:Number(e),token_str:this.tokenizer.model.vocab[e],sequence:this.tokenizer.decode(r,{skip_special_tokens:!0})}})))}return Array.isArray(e)?o:o[0]}}class T extends h{_key="generated_text";constructor(e){super(e)}async _call(e,t={}){Array.isArray(e)||(e=[e]),this.model.config.prefix&&(e=e.map((e=>this.model.config.prefix+e)));const s=this.model.config.task_specific_params;s&&s[this.task]&&s[this.task].prefix&&(e=e.map((e=>s[this.task].prefix+e)));const r=this.tokenizer,o={padding:!0,truncation:!0};let n;n=this instanceof P&&"_build_translation_inputs"in r?r._build_translation_inputs(e,o,t):r(e,o);const a=await this.model.generate({...n,...t});return r.batch_decode(a,{skip_special_tokens:!0}).map((e=>({[this._key]:e})))}}class b extends T{_key="summary_text";constructor(e){super(e)}}class P extends T{_key="translation_text";constructor(e){super(e)}}function x(e){return Array.isArray(e)&&e.every((e=>"role"in e&&"content"in e))}class k extends h{constructor(e){super(e)}async _call(e,t={}){let s,r=!1,o=!1;if("string"==typeof e)s=e=[e];else if(Array.isArray(e)&&e.every((e=>"string"==typeof e)))r=!0,s=e;else{if(x(e))e=[e];else{if(!Array.isArray(e)||!e.every(x))throw new Error("Input must be a string, an array of strings, a Chat, or an array of Chats");r=!0}o=!0,s=e.map((e=>this.tokenizer.apply_chat_template(e,{tokenize:!1,add_generation_prompt:!0})))}const n=t.add_special_tokens??!1,a=!o&&(t.return_full_text??!0);this.tokenizer.padding_side="left";const i=this.tokenizer(s,{add_special_tokens:n,padding:!0,truncation:!0}),l=await this.model.generate({...i,...t}),c=this.tokenizer.batch_decode(l,{skip_special_tokens:!0});let d;!a&&i.input_ids.dims.at(-1)>0&&(d=this.tokenizer.batch_decode(i.input_ids,{skip_special_tokens:!0}).map((e=>e.length)));const u=Array.from({length:e.length},(e=>[]));for(let t=0;t<c.length;++t){const s=Math.floor(t/l.dims[0]*e.length);d&&(c[t]=c[t].slice(d[s])),u[s].push({generated_text:o?[...e[s],{role:"assistant",content:c[t]}]:c[t]})}return r||1!==u.length?u:u[0]}}class y extends h{constructor(e){super(e),this.label2id=Object.fromEntries(Object.entries(this.model.config.label2id).map((([e,t])=>[e.toLowerCase(),t]))),this.entailment_id=this.label2id.entailment,void 0===this.entailment_id&&(console.warn("Could not find 'entailment' in label2id mapping. Using 2 as entailment_id."),this.entailment_id=2),this.contradiction_id=this.label2id.contradiction??this.label2id.not_entailment,void 0===this.contradiction_id&&(console.warn("Could not find 'contradiction' in label2id mapping. Using 0 as contradiction_id."),this.contradiction_id=0)}async _call(e,t,{hypothesis_template:s="This example is {}.",multi_label:r=!1}={}){const o=Array.isArray(e);o||(e=[e]),Array.isArray(t)||(t=[t]);const n=t.map((e=>s.replace("{}",e))),a=r||1===t.length,i=[];for(const s of e){const e=[];for(const t of n){const r=this.tokenizer(s,{text_pair:t,padding:!0,truncation:!0}),o=await this.model(r);a?e.push([o.logits.data[this.contradiction_id],o.logits.data[this.entailment_id]]):e.push(o.logits.data[this.entailment_id])}const r=(a?e.map((e=>(0,l.softmax)(e)[1])):(0,l.softmax)(e)).map(((e,t)=>[e,t])).sort(((e,t)=>t[0]-e[0]));i.push({sequence:s,labels:r.map((e=>t[e[1]])),scores:r.map((e=>e[0]))})}return o?i:i[0]}}class F extends h{constructor(e){super(e)}async _call(e,{pooling:t="none",normalize:s=!1,quantize:r=!1,precision:o="binary"}={}){const n=this.tokenizer(e,{padding:!0,truncation:!0}),a=await this.model(n);let i=a.last_hidden_state??a.logits??a.token_embeddings;if("none"===t);else if("mean"===t)i=(0,d.mean_pooling)(i,n.attention_mask);else{if("cls"!==t)throw Error(`Pooling method '${t}' not supported.`);i=i.slice(null,0)}return s&&(i=i.normalize(2,-1)),r&&(i=(0,d.quantize_embeddings)(i,o)),i}}class v extends h{constructor(e){super(e)}async _call(e,{pool:t=null}={}){const s=await p(e),{pixel_values:r}=await this.processor(s),o=await this.model({pixel_values:r});let n;if(t){if(!("pooler_output"in o))throw Error("No pooled output was returned. Make sure the model has a 'pooler' layer when using the 'pool' option.");n=o.pooler_output}else n=o.last_hidden_state??o.logits??o.image_embeds;return n}}class C extends h{constructor(e){super(e)}async _call(e,{top_k:t=5}={}){const s=this.processor.feature_extractor.config.sampling_rate,r=await m(e,s),o=this.model.config.id2label,n=[];for(const e of r){const s=await this.processor(e),r=(await this.model(s)).logits[0],a=await(0,d.topk)(new d.Tensor("float32",(0,l.softmax)(r.data),r.dims),t),i=a[0].tolist(),c=a[1].tolist().map(((e,t)=>({label:o?o[e]:`LABEL_${e}`,score:i[t]})));n.push(c)}return Array.isArray(e)?n:n[0]}}class S extends h{constructor(e){super(e)}async _call(e,t,{hypothesis_template:s="This is a sound of {}."}={}){const r=!Array.isArray(e);r&&(e=[e]);const o=t.map((e=>s.replace("{}",e))),n=this.tokenizer(o,{padding:!0,truncation:!0}),a=this.processor.feature_extractor.config.sampling_rate,i=await m(e,a),c=[];for(const e of i){const s=await this.processor(e),r=await this.model({...n,...s}),o=(0,l.softmax)(r.logits_per_audio.data);c.push([...o].map(((e,s)=>({score:e,label:t[s]}))))}return r?c[0]:c}}class A extends h{constructor(e){super(e)}async _call(e,t={}){switch(this.model.config.model_type){case"whisper":return this._call_whisper(e,t);case"wav2vec2":case"wav2vec2-bert":case"unispeech":case"unispeech-sat":case"hubert":return this._call_wav2vec2(e,t);default:throw new Error(`AutomaticSpeechRecognitionPipeline does not support model type '${this.model.config.model_type}'.`)}}async _call_wav2vec2(e,t){t.language&&console.warn('`language` parameter is not yet supported for `wav2vec2` models, defaulting to "English".'),t.task&&console.warn('`task` parameter is not yet supported for `wav2vec2` models, defaulting to "transcribe".');const s=!Array.isArray(e);s&&(e=[e]);const r=this.processor.feature_extractor.config.sampling_rate,o=await m(e,r),n=[];for(const e of o){const t=await this.processor(e),s=(await this.model(t)).logits[0],r=[];for(const e of s)r.push((0,l.max)(e.data)[1]);const o=this.tokenizer.decode(r);n.push({text:o})}return s?n[0]:n}async _call_whisper(e,t){const s=t.return_timestamps??!1,r=t.chunk_length_s??0,o=t.force_full_sequences??!1;let n=t.stride_length_s??null;const a={...t};"word"===s&&(a.return_token_timestamps=!0,a.return_timestamps=!1);const i=!Array.isArray(e);i&&(e=[e]);const c=this.processor.feature_extractor.config.chunk_length/this.model.config.max_source_positions,d=this.processor.feature_extractor.config.hop_length,u=this.processor.feature_extractor.config.sampling_rate,p=await m(e,u),_=[];for(const e of p){let t=[];if(r>0){if(null===n)n=r/6;else if(r<=n)throw Error("`chunk_length_s` must be larger than `stride_length_s`.");const s=u*r,o=u*n,a=s-2*o;let i=0;for(;;){const r=i+s,n=e.subarray(i,r),l=await this.processor(n),c=0===i,d=r>=e.length;if(t.push({stride:[n.length,c?0:o,d?0:o],input_features:l.input_features,is_last:d}),d)break;i+=a}}else t=[{stride:[e.length,0,0],input_features:(await this.processor(e)).input_features,is_last:!0}];for(const e of t){a.num_frames=Math.floor(e.stride[0]/d);const t=await this.model.generate({inputs:e.input_features,...a});"word"===s?(e.tokens=t.sequences.tolist()[0],e.token_timestamps=t.token_timestamps.tolist()[0].map((e=>(0,l.round)(e,2)))):e.tokens=t[0].tolist(),e.stride=e.stride.map((e=>e/u))}const[i,p]=this.tokenizer._decode_asr(t,{time_precision:c,return_timestamps:s,force_full_sequences:o});_.push({text:i,...p})}return i?_[0]:_}}class E extends h{constructor(e){super(e)}async _call(e,t={}){const s=Array.isArray(e),r=await p(e),{pixel_values:o}=await this.processor(r),n=[];for(const e of o){e.dims=[1,...e.dims];const s=await this.model.generate({inputs:e,...t}),r=this.tokenizer.batch_decode(s,{skip_special_tokens:!0}).map((e=>({generated_text:e.trim()})));n.push(r)}return s?n:n[0]}}class L extends h{constructor(e){super(e)}async _call(e,{top_k:t=5}={}){const s=await p(e),{pixel_values:r}=await this.processor(s),o=await this.model({pixel_values:r}),n=this.model.config.id2label,a=[];for(const e of o.logits){const s=await(0,d.topk)(new d.Tensor("float32",(0,l.softmax)(e.data),e.dims),t),r=s[0].tolist(),o=s[1].tolist().map(((e,t)=>({label:n?n[e]:`LABEL_${e}`,score:r[t]})));a.push(o)}return Array.isArray(e)?a:a[0]}}class I extends h{constructor(e){super(e),this.subtasks_mapping={panoptic:"post_process_panoptic_segmentation",instance:"post_process_instance_segmentation",semantic:"post_process_semantic_segmentation"}}async _call(e,{threshold:t=.5,mask_threshold:s=.5,overlap_mask_area_threshold:r=.8,label_ids_to_fuse:o=null,target_sizes:n=null,subtask:a=null}={}){if(Array.isArray(e)&&1!==e.length)throw Error("Image segmentation pipeline currently only supports a batch size of 1.");const i=await p(e),l=i.map((e=>[e.height,e.width])),{pixel_values:c,pixel_mask:d}=await this.processor(i),m=await this.model({pixel_values:c,pixel_mask:d});let _=null;if(null!==a)_=this.subtasks_mapping[a];else for(let[e,t]of Object.entries(this.subtasks_mapping))if(t in this.processor.image_processor){_=this.processor.image_processor[t].bind(this.processor.image_processor),a=e;break}const h=this.model.config.id2label,f=[];if("panoptic"===a||"instance"===a){const e=_(m,t,s,r,o,n??l)[0],a=e.segmentation;for(const t of e.segments_info){const e=new Uint8ClampedArray(a.data.length);for(let s=0;s<a.data.length;++s)a.data[s]===t.id&&(e[s]=255);const s=new u.RawImage(e,a.dims[1],a.dims[0],1);f.push({score:t.score,label:h[t.label_id],mask:s})}}else{if("semantic"!==a)throw Error(`Subtask ${a} not supported.`);{const{segmentation:e,labels:t}=_(m,n??l)[0];for(const s of t){const t=new Uint8ClampedArray(e.data.length);for(let r=0;r<e.data.length;++r)e.data[r]===s&&(t[r]=255);const r=new u.RawImage(t,e.dims[1],e.dims[0],1);f.push({score:null,label:h[s],mask:r})}}}return f}}class z extends h{constructor(e){super(e)}async _call(e,t,{hypothesis_template:s="This is a photo of {}"}={}){const r=Array.isArray(e),o=await p(e),n=t.map((e=>s.replace("{}",e))),a=this.tokenizer(n,{padding:"siglip"!==this.model.config.model_type||"max_length",truncation:!0}),{pixel_values:i}=await this.processor(o),c=await this.model({...a,pixel_values:i}),d="siglip"===this.model.config.model_type?e=>e.sigmoid().data:e=>(0,l.softmax)(e.data),u=[];for(const e of c.logits_per_image){const s=[...d(e)].map(((e,s)=>({score:e,label:t[s]})));s.sort(((e,t)=>t.score-e.score)),u.push(s)}return r?u:u[0]}}class j extends h{constructor(e){super(e)}async _call(e,{threshold:t=.9,percentage:s=!1}={}){const r=Array.isArray(e);if(r&&1!==e.length)throw Error("Object detection pipeline currently only supports a batch size of 1.");const o=await p(e),n=s?null:o.map((e=>[e.height,e.width])),{pixel_values:a,pixel_mask:i}=await this.processor(o),l=await this.model({pixel_values:a,pixel_mask:i}),c=this.processor.image_processor.post_process_object_detection(l,t,n),d=this.model.config.id2label,u=c.map((e=>e.boxes.map(((t,r)=>({score:e.scores[r],label:d[e.classes[r]],box:_(t,!s)})))));return r?u:u[0]}}class N extends h{constructor(e){super(e)}async _call(e,t,{threshold:s=.1,top_k:r=null,percentage:o=!1}={}){const n=Array.isArray(e),a=await p(e),i=this.tokenizer(t,{padding:!0,truncation:!0}),l=await this.processor(a),c=[];for(let e=0;e<a.length;++e){const n=a[e],d=o?null:[[n.height,n.width]],u=l.pixel_values[e].unsqueeze_(0),p=await this.model({...i,pixel_values:u}),m=this.processor.image_processor.post_process_object_detection(p,s,d,!0)[0];let h=m.boxes.map(((e,s)=>({score:m.scores[s],label:t[m.classes[s]],box:_(e,!o)}))).sort(((e,t)=>t.score-e.score));null!==r&&(h=h.slice(0,r)),c.push(h)}return n?c:c[0]}}class V extends h{constructor(e){super(e)}async _call(e,t,s={}){const r=(await p(e))[0],{pixel_values:o}=await this.processor(r),n=`<s_docvqa><s_question>${t}</s_question><s_answer>`,a=this.tokenizer(n,{add_special_tokens:!1,padding:!0,truncation:!0}).input_ids,i=await this.model.generate({inputs:o,max_length:this.model.config.decoder.max_position_embeddings,decoder_input_ids:a,...s}),l=this.tokenizer.batch_decode(i)[0].match(/<s_answer>(.*?)<\/s_answer>/);let c=null;return l&&l.length>=2&&(c=l[1].trim()),[{answer:c}]}}class O extends h{DEFAULT_VOCODER_ID="Xenova/speecht5_hifigan";constructor(e){super(e),this.vocoder=e.vocoder??null}async _call(e,{speaker_embeddings:t=null}={}){return this.processor?this._call_text_to_spectrogram(e,{speaker_embeddings:t}):this._call_text_to_waveform(e)}async _call_text_to_waveform(e){const t=this.tokenizer(e,{padding:!0,truncation:!0}),{waveform:s}=await this.model(t),r=this.model.config.sampling_rate;return{audio:s.data,sampling_rate:r}}async _call_text_to_spectrogram(e,{speaker_embeddings:t}){if(this.vocoder||(console.log("No vocoder specified, using default HifiGan vocoder."),this.vocoder=await o.AutoModel.from_pretrained(this.DEFAULT_VOCODER_ID,{dtype:"fp32"})),("string"==typeof t||t instanceof URL)&&(t=new Float32Array(await(await fetch(t)).arrayBuffer())),t instanceof Float32Array)t=new d.Tensor("float32",t,[1,t.length]);else if(!(t instanceof d.Tensor))throw new Error("Speaker embeddings must be a `Tensor`, `Float32Array`, `string`, or `URL`.");const{input_ids:s}=this.tokenizer(e,{padding:!0,truncation:!0}),{waveform:r}=await this.model.generate_speech(s,t,{vocoder:this.vocoder}),n=this.processor.feature_extractor.config.sampling_rate;return{audio:r.data,sampling_rate:n}}}class B extends h{constructor(e){super(e)}async _call(e){const t=await p(e),s=await this.processor(t),r=await this.model(s),o=[];for(const e of r.reconstruction){const t=e.squeeze().clamp_(0,1).mul_(255).round_().to("uint8");o.push(u.RawImage.fromTensor(t))}return o.length>1?o:o[0]}}class D extends h{constructor(e){super(e)}async _call(e){const t=await p(e),s=await this.processor(t),{predicted_depth:r}=await this.model(s),o=[];for(let e=0;e<t.length;++e){const s=(0,d.interpolate)(r[e],t[e].size.reverse(),"bilinear",!1),n=s.mul_(255/(0,l.max)(s.data)[0]).to("uint8");o.push({predicted_depth:r[e],depth:u.RawImage.fromTensor(n)})}return o.length>1?o:o[0]}}const G=Object.freeze({"text-classification":{tokenizer:r.AutoTokenizer,pipeline:f,model:o.AutoModelForSequenceClassification,default:{model:"Xenova/distilbert-base-uncased-finetuned-sst-2-english"},type:"text"},"token-classification":{tokenizer:r.AutoTokenizer,pipeline:g,model:o.AutoModelForTokenClassification,default:{model:"Xenova/bert-base-multilingual-cased-ner-hrl"},type:"text"},"question-answering":{tokenizer:r.AutoTokenizer,pipeline:M,model:o.AutoModelForQuestionAnswering,default:{model:"Xenova/distilbert-base-cased-distilled-squad"},type:"text"},"fill-mask":{tokenizer:r.AutoTokenizer,pipeline:w,model:o.AutoModelForMaskedLM,default:{model:"Xenova/bert-base-uncased"},type:"text"},summarization:{tokenizer:r.AutoTokenizer,pipeline:b,model:o.AutoModelForSeq2SeqLM,default:{model:"Xenova/distilbart-cnn-6-6"},type:"text"},translation:{tokenizer:r.AutoTokenizer,pipeline:P,model:o.AutoModelForSeq2SeqLM,default:{model:"Xenova/t5-small"},type:"text"},"text2text-generation":{tokenizer:r.AutoTokenizer,pipeline:T,model:o.AutoModelForSeq2SeqLM,default:{model:"Xenova/flan-t5-small"},type:"text"},"text-generation":{tokenizer:r.AutoTokenizer,pipeline:k,model:o.AutoModelForCausalLM,default:{model:"Xenova/gpt2"},type:"text"},"zero-shot-classification":{tokenizer:r.AutoTokenizer,pipeline:y,model:o.AutoModelForSequenceClassification,default:{model:"Xenova/distilbert-base-uncased-mnli"},type:"text"},"audio-classification":{pipeline:C,model:o.AutoModelForAudioClassification,processor:n.AutoProcessor,default:{model:"Xenova/wav2vec2-base-superb-ks"},type:"audio"},"zero-shot-audio-classification":{tokenizer:r.AutoTokenizer,pipeline:S,model:o.AutoModel,processor:n.AutoProcessor,default:{model:"Xenova/clap-htsat-unfused"},type:"multimodal"},"automatic-speech-recognition":{tokenizer:r.AutoTokenizer,pipeline:A,model:[o.AutoModelForSpeechSeq2Seq,o.AutoModelForCTC],processor:n.AutoProcessor,default:{model:"Xenova/whisper-tiny.en"},type:"multimodal"},"text-to-audio":{tokenizer:r.AutoTokenizer,pipeline:O,model:[o.AutoModelForTextToWaveform,o.AutoModelForTextToSpectrogram],processor:[n.AutoProcessor,null],default:{model:"Xenova/speecht5_tts"},type:"text"},"image-to-text":{tokenizer:r.AutoTokenizer,pipeline:E,model:o.AutoModelForVision2Seq,processor:n.AutoProcessor,default:{model:"Xenova/vit-gpt2-image-captioning"},type:"multimodal"},"image-classification":{pipeline:L,model:o.AutoModelForImageClassification,processor:n.AutoProcessor,default:{model:"Xenova/vit-base-patch16-224"},type:"multimodal"},"image-segmentation":{pipeline:I,model:[o.AutoModelForImageSegmentation,o.AutoModelForSemanticSegmentation,o.AutoModelForUniversalSegmentation],processor:n.AutoProcessor,default:{model:"Xenova/detr-resnet-50-panoptic"},type:"multimodal"},"zero-shot-image-classification":{tokenizer:r.AutoTokenizer,pipeline:z,model:o.AutoModel,processor:n.AutoProcessor,default:{model:"Xenova/clip-vit-base-patch32"},type:"multimodal"},"object-detection":{pipeline:j,model:o.AutoModelForObjectDetection,processor:n.AutoProcessor,default:{model:"Xenova/detr-resnet-50"},type:"multimodal"},"zero-shot-object-detection":{tokenizer:r.AutoTokenizer,pipeline:N,model:o.AutoModelForZeroShotObjectDetection,processor:n.AutoProcessor,default:{model:"Xenova/owlvit-base-patch32"},type:"multimodal"},"document-question-answering":{tokenizer:r.AutoTokenizer,pipeline:V,model:o.AutoModelForDocumentQuestionAnswering,processor:n.AutoProcessor,default:{model:"Xenova/donut-base-finetuned-docvqa"},type:"multimodal"},"image-to-image":{pipeline:B,model:o.AutoModelForImageToImage,processor:n.AutoProcessor,default:{model:"Xenova/swin2SR-classical-sr-x2-64"},type:"image"},"depth-estimation":{pipeline:D,model:o.AutoModelForDepthEstimation,processor:n.AutoProcessor,default:{model:"Xenova/dpt-large"},type:"image"},"feature-extraction":{tokenizer:r.AutoTokenizer,pipeline:F,model:o.AutoModel,default:{model:"Xenova/all-MiniLM-L6-v2"},type:"text"},"image-feature-extraction":{processor:n.AutoProcessor,pipeline:v,model:[o.AutoModelForImageFeatureExtraction,o.AutoModel],default:{model:"Xenova/vit-base-patch16-224-in21k"},type:"image"}}),R=Object.freeze({"sentiment-analysis":"text-classification",ner:"token-classification",asr:"automatic-speech-recognition","text-to-speech":"text-to-audio",embeddings:"feature-extraction"});async function q(e,t=null,{progress_callback:s=null,config:r=null,cache_dir:o=null,local_files_only:n=!1,revision:a="main",device:l=null,dtype:c=null,model_file_name:d=null,session_options:u={}}={}){e=R[e]??e;const p=G[e.split("_",1)[0]];if(!p)throw Error(`Unsupported pipeline: ${e}. Must be one of [${Object.keys(G)}]`);t||(t=p.default.model,console.log(`No model specified. Using default model: "${t}".`));const m={progress_callback:s,config:r,cache_dir:o,local_files_only:n,revision:a,device:l,dtype:c,model_file_name:d,session_options:u},_=new Map([["tokenizer",p.tokenizer],["model",p.model],["processor",p.processor]]),h=await async function(e,t,s){const r=Object.create(null),o=[];for(const[n,a]of e.entries()){if(!a)continue;let e;e=Array.isArray(a)?new Promise((async(e,r)=>{let o;for(const n of a){if(null===n)return void e(null);try{return void e(await n.from_pretrained(t,s))}catch(e){if(e.message?.includes("Unsupported model type"))o=e;else{if(!e.message?.includes("Could not locate file"))return void r(e);o=e}}}r(o)})):a.from_pretrained(t,s),r[n]=e,o.push(e)}await Promise.all(o);for(const[e,t]of Object.entries(r))r[e]=await t;return r}(_,t,m);h.task=e,(0,i.dispatchCallback)(s,{status:"ready",task:e,model:t});return new(0,p.pipeline)(h)}},"./src/tokenizers.js":(e,t,s)=>{s.r(t),s.d(t,{AlbertTokenizer:()=>Pe,AutoTokenizer:()=>_t,BartTokenizer:()=>Ne,BertTokenizer:()=>be,BlenderbotSmallTokenizer:()=>lt,BlenderbotTokenizer:()=>it,BloomTokenizer:()=>De,CLIPTokenizer:()=>rt,CamembertTokenizer:()=>Ee,CodeGenTokenizer:()=>st,CodeLlamaTokenizer:()=>qe,CohereTokenizer:()=>pt,ConvBertTokenizer:()=>Ce,DebertaTokenizer:()=>ye,DebertaV2Tokenizer:()=>Fe,DistilBertTokenizer:()=>Ae,ElectraTokenizer:()=>Ie,EsmTokenizer:()=>Xe,FalconTokenizer:()=>Ue,GPT2Tokenizer:()=>je,GPTNeoXTokenizer:()=>Qe,GemmaTokenizer:()=>Je,Grok1Tokenizer:()=>Ye,HerbertTokenizer:()=>ve,LlamaTokenizer:()=>Re,M2M100Tokenizer:()=>et,MBart50Tokenizer:()=>Oe,MBartTokenizer:()=>Ve,MPNetTokenizer:()=>We,MarianTokenizer:()=>nt,MgpstrTokenizer:()=>mt,MobileBertTokenizer:()=>xe,NllbTokenizer:()=>Ze,NougatTokenizer:()=>dt,PreTrainedTokenizer:()=>Te,Qwen2Tokenizer:()=>He,RoFormerTokenizer:()=>Se,RobertaTokenizer:()=>Be,SiglipTokenizer:()=>ot,SpeechT5Tokenizer:()=>ct,SqueezeBertTokenizer:()=>ke,T5Tokenizer:()=>ze,TokenizerModel:()=>x,VitsTokenizer:()=>ut,Wav2Vec2CTCTokenizer:()=>at,WhisperTokenizer:()=>tt,XLMRobertaTokenizer:()=>$e,XLMTokenizer:()=>Le,is_chinese_char:()=>g});var r=s("./src/utils/generic.js"),o=s("./src/utils/core.js"),n=s("./src/utils/hub.js"),a=s("./src/utils/maths.js"),i=s("./src/utils/tensor.js"),l=s("./src/utils/data-structures.js"),c=s("./node_modules/@huggingface/jinja/dist/index.js"),d=s("./src/models/whisper/common_whisper.js");s("./src/utils/constants.js");async function u(e,t){const s=await Promise.all([(0,n.getModelJSON)(e,"tokenizer.json",!0,t),(0,n.getModelJSON)(e,"tokenizer_config.json",!0,t)]);return null!==t.legacy&&(s[1].legacy=t.legacy),s}function p(e,t=!0){if(void 0!==e.Regex){let t=e.Regex.replace(/\\([#&~])/g,"$1");for(const[e,s]of b)t=t.replaceAll(e,s);return new RegExp(t,"gu")}if(void 0!==e.String){const s=(0,o.escapeRegExp)(e.String);return new RegExp(t?s:`(${s})`,"gu")}return console.warn("Unknown pattern type:",e),null}function m(e){return new Map(Object.entries(e))}function _(e){const t=e.dims;switch(t.length){case 1:return e.tolist();case 2:if(1!==t[0])throw new Error("Unable to decode tensor with `batch size !== 1`. Use `tokenizer.batch_decode(...)` for batched inputs.");return e.tolist()[0];default:throw new Error(`Expected tensor to have 1-2 dimensions, got ${t.length}.`)}}function h(e){return e.replace(/ \./g,".").replace(/ \?/g,"?").replace(/ \!/g,"!").replace(/ ,/g,",").replace(/ \' /g,"'").replace(/ n\'t/g,"n't").replace(/ \'m/g,"'m").replace(/ \'s/g,"'s").replace(/ \'ve/g,"'ve").replace(/ \'re/g,"'re")}function f(e){return e.replace(/\p{M}/gu,"")}function g(e){return e>=19968&&e<=40959||e>=13312&&e<=19903||e>=131072&&e<=173791||e>=173824&&e<=177983||e>=177984&&e<=178207||e>=178208&&e<=183983||e>=63744&&e<=64255||e>=194560&&e<=195103}const M="\\p{P}\\u0021-\\u002F\\u003A-\\u0040\\u005B-\\u0060\\u007B-\\u007E",w=new RegExp(`^[${M}]+$`,"gu"),T=".,!?…。,、।۔،",b=new Map([["(?i:'s|'t|'re|'ve|'m|'ll|'d)","(?:'([sS]|[tT]|[rR][eE]|[vV][eE]|[mM]|[lL][lL]|[dD]))"],[` ?[^(\\s|[${T}])]+`,` ?[^\\s${T}]+`]]);class P{constructor(e){this.content=e.content,this.id=e.id,this.single_word=e.single_word??!1,this.lstrip=e.lstrip??!1,this.rstrip=e.rstrip??!1,this.special=e.special??!1,this.normalized=e.normalized??null}}class x extends r.Callable{constructor(e){super(),this.config=e,this.vocab=[],this.tokens_to_ids=new Map,this.unk_token_id=void 0,this.unk_token=void 0,this.end_of_word_suffix=void 0,this.fuse_unk=this.config.fuse_unk??!1}static fromConfig(e,...t){switch(e.type){case"WordPiece":return new k(e);case"Unigram":return new y(e,...t);case"BPE":return new C(e);default:if(e.vocab)return Array.isArray(e.vocab)?new y(e,...t):new S(e,...t);throw new Error(`Unknown TokenizerModel type: ${e.type}`)}}_call(e){return e=this.encode(e),this.fuse_unk&&(e=function(e,t,s){const r=[];let o=0;for(;o<e.length;)if(r.push(e[o]),(t.get(e[o])??s)===s)for(;++o<e.length&&(t.get(e[o])??s)===s;)t.get(r.at(-1))!==s&&(r[r.length-1]+=e[o]);else++o;return r}(e,this.tokens_to_ids,this.unk_token_id)),e}encode(e){throw Error("encode should be implemented in subclass.")}convert_tokens_to_ids(e){return e.map((e=>this.tokens_to_ids.get(e)??this.unk_token_id))}convert_ids_to_tokens(e){return e.map((e=>this.vocab[e]??this.unk_token))}}class k extends x{constructor(e){super(e),this.tokens_to_ids=m(e.vocab),this.unk_token_id=this.tokens_to_ids.get(e.unk_token),this.unk_token=e.unk_token,this.max_input_chars_per_word=e.max_input_chars_per_word??100,this.vocab=new Array(this.tokens_to_ids.size);for(const[e,t]of this.tokens_to_ids)this.vocab[t]=e}encode(e){const t=[];for(const s of e){const e=[...s];if(e.length>this.max_input_chars_per_word){t.push(this.unk_token);continue}let r=!1,o=0;const n=[];for(;o<e.length;){let t=e.length,s=null;for(;o<t;){let r=e.slice(o,t).join("");if(o>0&&(r=this.config.continuing_subword_prefix+r),this.tokens_to_ids.has(r)){s=r;break}--t}if(null===s){r=!0;break}n.push(s),o=t}r?t.push(this.unk_token):t.push(...n)}return t}}class y extends x{constructor(e,t){super(e);const s=e.vocab.length;this.vocab=new Array(s),this.scores=new Array(s);for(let t=0;t<s;++t){const s=e.vocab[t];this.vocab[t]=s[0],this.scores[t]=s[1]}this.unk_token_id=e.unk_id,this.unk_token=this.vocab[e.unk_id],this.tokens_to_ids=new Map(this.vocab.map(((e,t)=>[e,t]))),this.bos_token=" ",this.bos_token_id=this.tokens_to_ids.get(this.bos_token),this.eos_token=t.eos_token,this.eos_token_id=this.tokens_to_ids.get(this.eos_token),this.unk_token=this.vocab[this.unk_token_id],this.minScore=(0,a.min)(this.scores)[0],this.unk_score=this.minScore-10,this.scores[this.unk_token_id]=this.unk_score,this.trie=new l.CharTrie,this.trie.extend(this.vocab),this.fuse_unk=!0}populateNodes(e){const t=e.chars;let s=0;for(;s<t.length;){let r=!1;const n=[],a=t.slice(s).join(""),i=this.trie.commonPrefixSearch(a);for(const t of i){n.push(t);const a=this.tokens_to_ids.get(t),i=this.scores[a],l=(0,o.len)(t);e.insert(s,l,i,a),r||1!==l||(r=!0)}r||e.insert(s,1,this.unk_score,this.unk_token_id),s+=1}}tokenize(e){const t=new l.TokenLattice(e,this.bos_token_id,this.eos_token_id);return this.populateNodes(t),t.tokens()}encode(e){const t=[];for(const s of e){const e=this.tokenize(s);t.push(...e)}return t}}const F=(()=>{const e=[...Array.from({length:"~".charCodeAt(0)-"!".charCodeAt(0)+1},((e,t)=>t+"!".charCodeAt(0))),...Array.from({length:"¬".charCodeAt(0)-"¡".charCodeAt(0)+1},((e,t)=>t+"¡".charCodeAt(0))),...Array.from({length:"ÿ".charCodeAt(0)-"®".charCodeAt(0)+1},((e,t)=>t+"®".charCodeAt(0)))],t=e.slice();let s=0;for(let r=0;r<256;++r)e.includes(r)||(e.push(r),t.push(256+s),s+=1);const r=t.map((e=>String.fromCharCode(e)));return Object.fromEntries(e.map(((e,t)=>[e,r[t]])))})(),v=(0,o.reverseDictionary)(F);class C extends x{constructor(e){super(e),this.tokens_to_ids=m(e.vocab),this.unk_token_id=this.tokens_to_ids.get(e.unk_token),this.unk_token=e.unk_token,this.vocab=new Array(this.tokens_to_ids.size);for(const[e,t]of this.tokens_to_ids)this.vocab[t]=e;const t=Array.isArray(e.merges[0]);this.merges=t?e.merges:e.merges.map((e=>e.split(" ",2))),this.bpe_ranks=new Map(this.merges.map(((e,t)=>[JSON.stringify(e),t]))),this.end_of_word_suffix=e.end_of_word_suffix,this.continuing_subword_suffix=e.continuing_subword_suffix??null,this.byte_fallback=this.config.byte_fallback??!1,this.byte_fallback&&(this.text_encoder=new TextEncoder),this.ignore_merges=this.config.ignore_merges??!1,this.cache=new Map}bpe(e){if(0===e.length)return[];const t=this.cache.get(e);if(void 0!==t)return t;const s=Array.from(e);this.end_of_word_suffix&&(s[s.length-1]+=this.end_of_word_suffix);let r=[];if(s.length>1){const e=new l.PriorityQueue(((e,t)=>e.score<t.score));let t={token:s[0],bias:0,prev:null,next:null},o=t;for(let t=1;t<s.length;++t){const r={bias:t/s.length,token:s[t],prev:o,next:null};o.next=r,this._add_node(e,o),o=r}for(;!e.isEmpty();){const s=e.pop();if(s.deleted||!s.next||s.next.deleted)continue;if(s.deleted=!0,s.next.deleted=!0,s.prev){const e={...s.prev};s.prev.deleted=!0,s.prev=e,e.prev?e.prev.next=e:t=e}const r={token:s.token+s.next.token,bias:s.bias,prev:s.prev,next:s.next.next};r.prev?(r.prev.next=r,this._add_node(e,r.prev)):t=r,r.next&&(r.next.prev=r,this._add_node(e,r))}for(let e=t;null!==e;e=e.next)r.push(e.token)}else r=s;if(this.continuing_subword_suffix)for(let e=0;e<r.length-1;++e)r[e]+=this.continuing_subword_suffix;return this.cache.set(e,r),r}_add_node(e,t){const s=this.bpe_ranks.get(JSON.stringify([t.token,t.next.token]));void 0!==s&&(t.score=s+t.bias,e.push(t))}encode(e){const t=[];for(const s of e){if(this.ignore_merges&&this.tokens_to_ids.has(s)){t.push(s);continue}const e=this.bpe(s);for(const s of e)if(this.tokens_to_ids.has(s))t.push(s);else if(this.byte_fallback){const e=Array.from(this.text_encoder.encode(s)).map((e=>`<0x${e.toString(16).toUpperCase().padStart(2,"0")}>`));e.every((e=>this.tokens_to_ids.has(e)))?t.push(...e):t.push(this.unk_token)}else t.push(this.unk_token)}return t}}class S extends x{constructor(e,t){super(e),this.tokens_to_ids=m(t.target_lang?e.vocab[t.target_lang]:e.vocab),this.bos_token=t.bos_token,this.bos_token_id=this.tokens_to_ids.get(this.bos_token),this.eos_token=t.eos_token,this.eos_token_id=this.tokens_to_ids.get(this.eos_token),this.pad_token=t.pad_token,this.pad_token_id=this.tokens_to_ids.get(this.pad_token),this.unk_token=t.unk_token,this.unk_token_id=this.tokens_to_ids.get(this.unk_token),this.vocab=new Array(this.tokens_to_ids.size);for(const[e,t]of this.tokens_to_ids)this.vocab[t]=e}encode(e){return e}}class A extends r.Callable{constructor(e){super(),this.config=e}static fromConfig(e){if(null===e)return null;switch(e.type){case"BertNormalizer":return new D(e);case"Precompiled":return new pe(e);case"Sequence":return new B(e);case"Replace":return new E(e);case"NFC":return new L(e);case"NFKC":return new I(e);case"NFKD":return new z(e);case"Strip":return new j(e);case"StripAccents":return new N(e);case"Lowercase":return new V(e);case"Prepend":return new O(e);default:throw new Error(`Unknown Normalizer type: ${e.type}`)}}normalize(e){throw Error("normalize should be implemented in subclass.")}_call(e){return this.normalize(e)}}class E extends A{normalize(e){const t=p(this.config.pattern);return null===t?e:e.replaceAll(t,this.config.content)}}class L extends A{normalize(e){return e=e.normalize("NFC")}}class I extends A{normalize(e){return e=e.normalize("NFKC")}}class z extends A{normalize(e){return e=e.normalize("NFKD")}}class j extends A{normalize(e){return this.config.strip_left&&this.config.strip_right?e=e.trim():(this.config.strip_left&&(e=e.trimStart()),this.config.strip_right&&(e=e.trimEnd())),e}}class N extends A{normalize(e){return e=f(e)}}class V extends A{normalize(e){return e=e.toLowerCase()}}class O extends A{normalize(e){return e=this.config.prepend+e}}class B extends A{constructor(e){super(e),this.normalizers=e.normalizers.map((e=>A.fromConfig(e)))}normalize(e){return this.normalizers.reduce(((e,t)=>t.normalize(e)),e)}}class D extends A{_tokenize_chinese_chars(e){const t=[];for(let s=0;s<e.length;++s){const r=e[s];g(r.charCodeAt(0))?(t.push(" "),t.push(r),t.push(" ")):t.push(r)}return t.join("")}stripAccents(e){return e.normalize("NFD").replace(/\p{Mn}/gu,"")}_is_control(e){switch(e){case"\t":case"\n":case"\r":return!1;default:return/^\p{Cc}|\p{Cf}|\p{Co}|\p{Cs}$/u.test(e)}}_clean_text(e){const t=[];for(const s of e){const e=s.charCodeAt(0);0===e||65533===e||this._is_control(s)||(/^\s$/.test(s)?t.push(" "):t.push(s))}return t.join("")}normalize(e){return this.config.clean_text&&(e=this._clean_text(e)),this.config.handle_chinese_chars&&(e=this._tokenize_chinese_chars(e)),this.config.lowercase?(e=e.toLowerCase(),!1!==this.config.strip_accents&&(e=this.stripAccents(e))):this.config.strip_accents&&(e=this.stripAccents(e)),e}}class G extends r.Callable{static fromConfig(e){if(null===e)return null;switch(e.type){case"BertPreTokenizer":return new R(e);case"Sequence":return new me(e);case"Whitespace":return new _e(e);case"WhitespaceSplit":return new he(e);case"Metaspace":return new de(e);case"ByteLevel":return new q(e);case"Split":return new $(e);case"Punctuation":return new W(e);case"Digits":return new U(e);case"Replace":return new fe(e);default:throw new Error(`Unknown PreTokenizer type: ${e.type}`)}}pre_tokenize_text(e,t){throw Error("pre_tokenize_text should be implemented in subclass.")}pre_tokenize(e,t){return(Array.isArray(e)?e.map((e=>this.pre_tokenize_text(e,t))):this.pre_tokenize_text(e,t)).flat()}_call(e,t){return this.pre_tokenize(e,t)}}class R extends G{constructor(e){super(),this.pattern=new RegExp(`[^\\s${M}]+|[${M}]`,"gu")}pre_tokenize_text(e,t){return e.trim().match(this.pattern)||[]}}class q extends G{constructor(e){super(),this.config=e,this.add_prefix_space=this.config.add_prefix_space,this.trim_offsets=this.config.trim_offsets,this.use_regex=this.config.use_regex??!0,this.pattern=/'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+/gu,this.byte_encoder=F,this.text_encoder=new TextEncoder}pre_tokenize_text(e,t){this.add_prefix_space&&!e.startsWith(" ")&&(e=" "+e);return(this.use_regex?e.match(this.pattern)||[]:[e]).map((e=>Array.from(this.text_encoder.encode(e),(e=>this.byte_encoder[e])).join("")))}}class $ extends G{constructor(e){super(),this.config=e,this.pattern=p(this.config.pattern,this.config.invert)}pre_tokenize_text(e,t){return null===this.pattern?[]:this.config.invert?e.match(this.pattern)||[]:"removed"===this.config.behavior?.toLowerCase()?e.split(this.pattern).filter((e=>e)):function(e,t){const s=[];let r=0;for(const o of e.matchAll(t)){const t=o[0];r<o.index&&s.push(e.slice(r,o.index)),t.length>0&&s.push(t),r=o.index+t.length}return r<e.length&&s.push(e.slice(r)),s}(e,this.pattern)}}class W extends G{constructor(e){super(),this.config=e,this.pattern=new RegExp(`[^${M}]+|[${M}]+`,"gu")}pre_tokenize_text(e,t){return e.match(this.pattern)||[]}}class U extends G{constructor(e){super(),this.config=e;const t="[^\\d]+|\\d"+(this.config.individual_digits?"":"+");this.pattern=new RegExp(t,"gu")}pre_tokenize_text(e,t){return e.match(this.pattern)||[]}}class Q extends r.Callable{constructor(e){super(),this.config=e}static fromConfig(e){if(null===e)return null;switch(e.type){case"TemplateProcessing":return new J(e);case"ByteLevel":return new Y(e);case"RobertaProcessing":return new H(e);case"BertProcessing":return new X(e);case"Sequence":return new K(e);default:throw new Error(`Unknown PostProcessor type: ${e.type}`)}}post_process(e,...t){throw Error("post_process should be implemented in subclass.")}_call(e,...t){return this.post_process(e,...t)}}class X extends Q{constructor(e){super(e),this.cls=e.cls[0],this.sep=e.sep[0]}post_process(e,t=null,{add_special_tokens:s=!0}={}){s&&(e=(0,o.mergeArrays)([this.cls],e,[this.sep]));let r=new Array(e.length).fill(0);if(null!==t){const n=s&&this instanceof H?[this.sep]:[],a=s?[this.sep]:[];e=(0,o.mergeArrays)(e,n,t,a),r=(0,o.mergeArrays)(r,new Array(t.length+n.length+a.length).fill(1))}return{tokens:e,token_type_ids:r}}}class H extends X{}class J extends Q{constructor(e){super(e),this.single=e.single,this.pair=e.pair}post_process(e,t=null,{add_special_tokens:s=!0}={}){const r=null===t?this.single:this.pair;let n=[],a=[];for(const i of r)"SpecialToken"in i?s&&(n.push(i.SpecialToken.id),a.push(i.SpecialToken.type_id)):"Sequence"in i&&("A"===i.Sequence.id?(n=(0,o.mergeArrays)(n,e),a=(0,o.mergeArrays)(a,new Array(e.length).fill(i.Sequence.type_id))):"B"===i.Sequence.id&&(n=(0,o.mergeArrays)(n,t),a=(0,o.mergeArrays)(a,new Array(t.length).fill(i.Sequence.type_id))));return{tokens:n,token_type_ids:a}}}class Y extends Q{post_process(e,t=null){return t&&(e=(0,o.mergeArrays)(e,t)),{tokens:e}}}class K extends Q{constructor(e){super(e),this.processors=e.processors.map((e=>Q.fromConfig(e)))}post_process(e,t=null,s={}){let r;for(const o of this.processors)if(o instanceof Y){if(e=o.post_process(e).tokens,t){t=o.post_process(t).tokens}}else{const n=o.post_process(e,t,s);e=n.tokens,r=n.token_type_ids}return{tokens:e,token_type_ids:r}}}class Z extends r.Callable{constructor(e){super(),this.config=e,this.added_tokens=[],this.end_of_word_suffix=null,this.trim_offsets=e.trim_offsets}static fromConfig(e){if(null===e)return null;switch(e.type){case"WordPiece":return new oe(e);case"Metaspace":return new ue(e);case"ByteLevel":return new ne(e);case"Replace":return new ee(e);case"ByteFallback":return new te(e);case"Fuse":return new se(e);case"Strip":return new re(e);case"Sequence":return new ie(e);case"CTC":return new ae(e);case"BPEDecoder":return new le(e);default:throw new Error(`Unknown Decoder type: ${e.type}`)}}_call(e){return this.decode(e)}decode(e){return this.decode_chain(e).join("")}decode_chain(e){throw Error("`decode_chain` should be implemented in subclass.")}}class ee extends Z{decode_chain(e){const t=p(this.config.pattern);return null===t?e:e.map((e=>e.replaceAll(t,this.config.content)))}}class te extends Z{constructor(e){super(e),this.text_decoder=new TextDecoder}decode_chain(e){const t=[];let s=[];for(const r of e){let e=null;if(6===r.length&&r.startsWith("<0x")&&r.endsWith(">")){const t=parseInt(r.slice(3,5),16);isNaN(t)||(e=t)}if(null!==e)s.push(e);else{if(s.length>0){const e=this.text_decoder.decode(Uint8Array.from(s));t.push(e),s=[]}t.push(r)}}if(s.length>0){const e=this.text_decoder.decode(Uint8Array.from(s));t.push(e),s=[]}return t}}class se extends Z{decode_chain(e){return[e.join("")]}}class re extends Z{constructor(e){super(e),this.content=this.config.content,this.start=this.config.start,this.stop=this.config.stop}decode_chain(e){return e.map((e=>{let t=0;for(let s=0;s<this.start&&e[s]===this.content;++s)t=s+1;let s=e.length;for(let t=0;t<this.stop;++t){const r=e.length-t-1;if(e[r]!==this.content)break;s=r}return e.slice(t,s)}))}}class oe extends Z{constructor(e){super(e),this.cleanup=e.cleanup}decode_chain(e){return e.map(((e,t)=>(0!==t&&(e=e.startsWith(this.config.prefix)?e.replace(this.config.prefix,""):" "+e),this.cleanup&&(e=h(e)),e)))}}class ne extends Z{constructor(e){super(e),this.byte_decoder=v,this.text_decoder=new TextDecoder("utf-8",{fatal:!1,ignoreBOM:!0}),this.end_of_word_suffix=null}convert_tokens_to_string(e){const t=e.join(""),s=new Uint8Array([...t].map((e=>this.byte_decoder[e])));return this.text_decoder.decode(s)}decode_chain(e){const t=[];let s=[];for(const r of e)void 0!==this.added_tokens.find((e=>e.content===r))?(s.length>0&&(t.push(this.convert_tokens_to_string(s)),s=[]),t.push(r)):s.push(r);return s.length>0&&t.push(this.convert_tokens_to_string(s)),t}}class ae extends Z{constructor(e){super(e),this.pad_token=this.config.pad_token,this.word_delimiter_token=this.config.word_delimiter_token,this.cleanup=this.config.cleanup}convert_tokens_to_string(e){if(0===e.length)return"";const t=[e[0]];for(let s=1;s<e.length;++s)e[s]!==t.at(-1)&&t.push(e[s]);let s=t.filter((e=>e!==this.pad_token)).join("");return this.cleanup&&(s=h(s).replaceAll(this.word_delimiter_token," ").trim()),s}decode_chain(e){return[this.convert_tokens_to_string(e)]}}class ie extends Z{constructor(e){super(e),this.decoders=e.decoders.map((e=>Z.fromConfig(e)))}decode_chain(e){return this.decoders.reduce(((e,t)=>t.decode_chain(e)),e)}}class le extends Z{constructor(e){super(e),this.suffix=this.config.suffix}decode_chain(e){return e.map(((t,s)=>t.replaceAll(this.suffix,s===e.length-1?"":" ")))}}class ce extends Z{decode_chain(e){let t="";for(let s=1;s<e.length;s+=2)t+=e[s];return[t]}}class de extends G{constructor(e){super(),this.addPrefixSpace=e.add_prefix_space,this.replacement=e.replacement,this.strRep=e.str_rep||this.replacement,this.prepend_scheme=e.prepend_scheme??"always"}pre_tokenize_text(e,{section_index:t}={}){let s=e.replaceAll(" ",this.strRep);return this.addPrefixSpace&&!s.startsWith(this.replacement)&&("always"===this.prepend_scheme||"first"===this.prepend_scheme&&0===t)&&(s=this.strRep+s),[s]}}class ue extends Z{constructor(e){super(e),this.addPrefixSpace=e.add_prefix_space,this.replacement=e.replacement}decode_chain(e){const t=[];for(let s=0;s<e.length;++s){let r=e[s].replaceAll(this.replacement," ");this.addPrefixSpace&&0==s&&r.startsWith(" ")&&(r=r.substring(1)),t.push(r)}return t}}class pe extends A{constructor(e){super(e),this.charsmap=e.precompiled_charsmap}normalize(e){if((e=(e=e.replace(/[\u0001-\u0008\u000B\u000E-\u001F\u007F\u008F\u009F]/gm,"")).replace(/[\u0009\u000A\u000C\u000D\u00A0\u1680\u2000-\u200F\u2028\u2029\u202F\u205F\u2581\u3000\uFEFF\uFFFD]/gm," ")).includes("~")){const t=e.split("~");e=t.map((e=>e.normalize("NFKC"))).join("~")}else e=e.normalize("NFKC");return e}}class me extends G{constructor(e){super(),this.tokenizers=e.pretokenizers.map((e=>G.fromConfig(e)))}pre_tokenize_text(e,t){return this.tokenizers.reduce(((e,s)=>s.pre_tokenize(e,t)),[e])}}class _e extends G{constructor(e){super()}pre_tokenize_text(e,t){return e.match(/\w+|[^\w\s]+/g)||[]}}class he extends G{constructor(e){super()}pre_tokenize_text(e,t){return function(e){return e.match(/\S+/g)||[]}(e)}}class fe extends G{constructor(e){super(),this.config=e,this.pattern=p(this.config.pattern),this.content=this.config.content}pre_tokenize_text(e,t){return null===this.pattern?[e]:[e.replaceAll(this.pattern,this.config.content)]}}const ge=["bos_token","eos_token","unk_token","sep_token","pad_token","cls_token","mask_token"];function Me(e,t,s,r){for(const n of Object.keys(e)){const a=t-e[n].length,i=s(n),l=new Array(a).fill(i);e[n]="right"===r?(0,o.mergeArrays)(e[n],l):(0,o.mergeArrays)(l,e[n])}}function we(e,t){for(const s of Object.keys(e))e[s].length=t}class Te extends r.Callable{return_token_type_ids=!1;padding_side="right";constructor(e,t){super(),this._tokenizer_config=t,this.normalizer=A.fromConfig(e.normalizer),this.pre_tokenizer=G.fromConfig(e.pre_tokenizer),this.model=x.fromConfig(e.model,t),this.post_processor=Q.fromConfig(e.post_processor),this.decoder=Z.fromConfig(e.decoder),this.special_tokens=[],this.all_special_ids=[],this.added_tokens=[];for(const t of e.added_tokens){const e=new P(t);this.added_tokens.push(e),this.model.tokens_to_ids.set(e.content,e.id),this.model.vocab[e.id]=e.content,e.special&&(this.special_tokens.push(e.content),this.all_special_ids.push(e.id))}if(this.additional_special_tokens=t.additional_special_tokens??[],this.special_tokens.push(...this.additional_special_tokens),this.special_tokens=[...new Set(this.special_tokens)],this.decoder&&(this.decoder.added_tokens=this.added_tokens,this.decoder.end_of_word_suffix=this.model.end_of_word_suffix),this.added_tokens_regex=this.added_tokens.length>0?new RegExp(this.added_tokens.slice().sort(((e,t)=>t.content.length-e.content.length)).map((e=>`${e.lstrip?"\\s*":""}(${(0,o.escapeRegExp)(e.content)})${e.rstrip?"\\s*":""}`)).join("|")):null,this.mask_token=this.getToken("mask_token"),this.mask_token_id=this.model.tokens_to_ids.get(this.mask_token),this.pad_token=this.getToken("pad_token","eos_token"),this.pad_token_id=this.model.tokens_to_ids.get(this.pad_token),this.sep_token=this.getToken("sep_token"),this.sep_token_id=this.model.tokens_to_ids.get(this.sep_token),this.unk_token=this.getToken("unk_token"),this.unk_token_id=this.model.tokens_to_ids.get(this.unk_token),this.bos_token=this.getToken("bos_token"),this.bos_token_id=this.model.tokens_to_ids.get(this.bos_token),this.eos_token=this.getToken("eos_token"),this.eos_token_id=this.model.tokens_to_ids.get(this.eos_token),this.model_max_length=t.model_max_length,this.remove_space=t.remove_space,this.clean_up_tokenization_spaces=t.clean_up_tokenization_spaces??!0,this.do_lowercase_and_remove_accent=t.do_lowercase_and_remove_accent??!1,t.padding_side&&(this.padding_side=t.padding_side),this.legacy=!1,this.chat_template=t.chat_template??null,Array.isArray(this.chat_template)){const e=Object.create(null);for(const{name:t,template:s}of this.chat_template){if("string"!=typeof t||"string"!=typeof s)throw new Error('Chat template must be a list of objects with "name" and "template" properties');e[t]=s}this.chat_template=e}this._compiled_template_cache=new Map}getToken(...e){for(const t of e){const e=this._tokenizer_config[t];if(e){if("object"==typeof e){if("AddedToken"===e.__type)return e.content;throw Error(`Unknown token: ${e}`)}return e}}return null}static async from_pretrained(e,{progress_callback:t=null,config:s=null,cache_dir:r=null,local_files_only:o=!1,revision:n="main",legacy:a=null}={}){return new this(...await u(e,{progress_callback:t,config:s,cache_dir:r,local_files_only:o,revision:n,legacy:a}))}_call(e,{text_pair:t=null,add_special_tokens:s=!0,padding:r=!1,truncation:o=null,max_length:n=null,return_tensor:l=!0,return_token_type_ids:c=null}={}){const d=Array.isArray(e);let u;if(d){if(0===e.length)throw Error("text array must be non-empty");if(null!==t){if(!Array.isArray(t))throw Error("text_pair must also be an array");if(e.length!==t.length)throw Error("text and text_pair must have the same length");u=e.map(((e,r)=>this._encode_plus(e,{text_pair:t[r],add_special_tokens:s,return_token_type_ids:c})))}else u=e.map((e=>this._encode_plus(e,{add_special_tokens:s,return_token_type_ids:c})))}else{if(null==e)throw Error("text may not be null or undefined");if(Array.isArray(t))throw Error("When specifying `text_pair`, since `text` is a string, `text_pair` must also be a string (i.e., not an array).");u=[this._encode_plus(e,{text_pair:t,add_special_tokens:s,return_token_type_ids:c})]}if(null===n?n="max_length"===r?this.model_max_length:(0,a.max)(u.map((e=>e.input_ids.length)))[0]:o||console.warn("Truncation was not explicitly activated but `max_length` is provided a specific value, please use `truncation=true` to explicitly truncate examples to max length."),n=Math.min(n,this.model_max_length??1/0),r||o)for(let e=0;e<u.length;++e)u[e].input_ids.length!==n&&(u[e].input_ids.length>n?o&&we(u[e],n):r&&Me(u[e],n,(e=>"input_ids"===e?this.pad_token_id:0),this.padding_side));const p={};if(l){if((!r||!o)&&u.some((e=>{for(const t of Object.keys(e))if(e[t].length!==u[0][t]?.length)return!0;return!1})))throw Error("Unable to create tensor, you should probably activate truncation and/or padding with 'padding=true' and 'truncation=true' to have batched tensors with the same length.");const e=[u.length,u[0].input_ids.length];for(const t of Object.keys(u[0]))p[t]=new i.Tensor("int64",BigInt64Array.from(u.flatMap((e=>e[t])).map(BigInt)),e)}else{for(const e of Object.keys(u[0]))p[e]=u.map((t=>t[e]));if(!d)for(const e of Object.keys(p))p[e]=p[e][0]}return p}_encode_text(e){if(null===e)return null;const t=(this.added_tokens_regex?e.split(this.added_tokens_regex).filter((e=>e)):[e]).map(((e,t)=>{if(void 0!==this.added_tokens.find((t=>t.content===e)))return e;{if(!0===this.remove_space&&(e=e.trim().split(/\s+/).join(" ")),this.do_lowercase_and_remove_accent&&(e=function(e){return f(e.toLowerCase())}(e)),null!==this.normalizer&&(e=this.normalizer(e)),0===e.length)return[];const s=null!==this.pre_tokenizer?this.pre_tokenizer(e,{section_index:t}):[e];return this.model(s)}})).flat();return t}_encode_plus(e,{text_pair:t=null,add_special_tokens:s=!0,return_token_type_ids:r=null}={}){const{tokens:o,token_type_ids:n}=this._tokenize_helper(e,{pair:t,add_special_tokens:s}),a=this.model.convert_tokens_to_ids(o),i={input_ids:a,attention_mask:new Array(a.length).fill(1)};return(r??this.return_token_type_ids)&&n&&(i.token_type_ids=n),i}_tokenize_helper(e,{pair:t=null,add_special_tokens:s=!1}={}){const r=this._encode_text(e),n=this._encode_text(t);return this.post_processor?this.post_processor(r,n,{add_special_tokens:s}):{tokens:(0,o.mergeArrays)(r??[],n??[])}}tokenize(e,{pair:t=null,add_special_tokens:s=!1}={}){return this._tokenize_helper(e,{pair:t,add_special_tokens:s}).tokens}encode(e,{text_pair:t=null,add_special_tokens:s=!0,return_token_type_ids:r=null}={}){return this._encode_plus(e,{text_pair:t,add_special_tokens:s,return_token_type_ids:r}).input_ids}batch_decode(e,t={}){return e instanceof i.Tensor&&(e=e.tolist()),e.map((e=>this.decode(e,t)))}decode(e,t={}){if(e instanceof i.Tensor&&(e=_(e)),!Array.isArray(e)||0===e.length||!(0,o.isIntegralNumber)(e[0]))throw Error("token_ids must be a non-empty array of integers.");return this.decode_single(e,t)}decode_single(e,{skip_special_tokens:t=!1,clean_up_tokenization_spaces:s=null}){let r=this.model.convert_ids_to_tokens(e);t&&(r=r.filter((e=>!this.special_tokens.includes(e))));let o=this.decoder?this.decoder(r):r.join(" ");return this.decoder&&this.decoder.end_of_word_suffix&&(o=o.replaceAll(this.decoder.end_of_word_suffix," "),t&&(o=o.trim())),(s??this.clean_up_tokenization_spaces)&&(o=h(o)),o}get_chat_template({chat_template:e=null,tools:t=null}={}){if(this.chat_template&&"object"==typeof this.chat_template){const s=this.chat_template;if(null!==e&&Object.hasOwn(s,e))e=s[e];else if(null===e)if(null!==t&&"tool_use"in s)e=s.tool_use;else{if(!("default"in s))throw Error(`This model has multiple chat templates with no default specified! Please either pass a chat template or the name of the template you wish to use to the 'chat_template' argument. Available template names are ${Object.keys(s).sort()}.`);e=s.default}}else if(null===e){if(!this.chat_template)throw Error("Cannot use apply_chat_template() because tokenizer.chat_template is not set and no template argument was passed! For information about writing templates and setting the tokenizer.chat_template attribute, please see the documentation at https://huggingface.co/docs/transformers/main/en/chat_templating");e=this.chat_template}return e}apply_chat_template(e,{tools:t=null,documents:s=null,chat_template:r=null,add_generation_prompt:o=!1,tokenize:n=!0,padding:a=!1,truncation:i=!1,max_length:l=null,return_tensor:d=!0,return_dict:u=!1,tokenizer_kwargs:p={},...m}={}){if("string"!=typeof(r=this.get_chat_template({chat_template:r,tools:t})))throw Error("chat_template must be a string, but got "+typeof r);let _=this._compiled_template_cache.get(r);void 0===_&&(_=new c.Template(r),this._compiled_template_cache.set(r,_));const h=Object.create(null);for(const e of ge){const t=this.getToken(e);t&&(h[e]=t)}const f=_.render({messages:e,add_generation_prompt:o,tools:t,documents:s,...h,...m});if(n){const e=this._call(f,{add_special_tokens:!1,padding:a,truncation:i,max_length:l,return_tensor:d,...p});return u?e:e.input_ids}return f}}class be extends Te{return_token_type_ids=!0}class Pe extends Te{return_token_type_ids=!0}class xe extends Te{return_token_type_ids=!0}class ke extends Te{return_token_type_ids=!0}class ye extends Te{return_token_type_ids=!0}class Fe extends Te{return_token_type_ids=!0}class ve extends Te{return_token_type_ids=!0}class Ce extends Te{return_token_type_ids=!0}class Se extends Te{return_token_type_ids=!0}class Ae extends Te{}class Ee extends Te{}class Le extends Te{return_token_type_ids=!0;constructor(e,t){super(e,t),console.warn('WARNING: `XLMTokenizer` is not yet supported by Hugging Face\'s "fast" tokenizers library. Therefore, you may experience slightly inaccurate results.')}}class Ie extends Te{return_token_type_ids=!0}class ze extends Te{}class je extends Te{}class Ne extends Te{}class Ve extends Te{constructor(e,t){super(e,t),this.languageRegex=/^[a-z]{2}_[A-Z]{2}$/,this.language_codes=this.special_tokens.filter((e=>this.languageRegex.test(e))),this.lang_to_token=e=>e}_build_translation_inputs(e,t,s){return Ke(this,e,t,s)}}class Oe extends Ve{}class Be extends Te{}class De extends Te{}const Ge="▁";class Re extends Te{padding_side="left";constructor(e,t){super(e,t),this.legacy=t.legacy??!0,this.legacy||(this.normalizer=null,this.pre_tokenizer=new de({replacement:Ge,add_prefix_space:!0,prepend_scheme:"first"}))}_encode_text(e){if(null===e)return null;if(this.legacy||0===e.length)return super._encode_text(e);let t=super._encode_text(Ge+e.replaceAll(Ge," "));return t.length>1&&t[0]===Ge&&this.special_tokens.includes(t[1])&&(t=t.slice(1)),t}}class qe extends Te{}class $e extends Te{}class We extends Te{}class Ue extends Te{}class Qe extends Te{}class Xe extends Te{}class He extends Te{}class Je extends Te{}class Ye extends Te{}function Ke(e,t,s,r){if(!("language_codes"in e)||!Array.isArray(e.language_codes))throw new Error("Tokenizer must have `language_codes` attribute set and it should be an array of language ids.");if(!("languageRegex"in e&&e.languageRegex instanceof RegExp))throw new Error("Tokenizer must have `languageRegex` attribute set and it should be a regular expression.");if(!("lang_to_token"in e)||"function"!=typeof e.lang_to_token)throw new Error("Tokenizer must have `lang_to_token` attribute set and it should be a function.");const o=r.src_lang,n=r.tgt_lang;if(!e.language_codes.includes(n))throw new Error(`Target language code "${n}" is not valid. Must be one of: {${e.language_codes.join(", ")}}`);if(void 0!==o){if(!e.language_codes.includes(o))throw new Error(`Source language code "${o}" is not valid. Must be one of: {${e.language_codes.join(", ")}}`);for(const t of e.post_processor.config.single)if("SpecialToken"in t&&e.languageRegex.test(t.SpecialToken.id)){t.SpecialToken.id=e.lang_to_token(o);break}}return r.forced_bos_token_id=e.model.convert_tokens_to_ids([e.lang_to_token(n)])[0],e._call(t,s)}class Ze extends Te{constructor(e,t){super(e,t),this.languageRegex=/^[a-z]{3}_[A-Z][a-z]{3}$/,this.language_codes=this.special_tokens.filter((e=>this.languageRegex.test(e))),this.lang_to_token=e=>e}_build_translation_inputs(e,t,s){return Ke(this,e,t,s)}}class et extends Te{constructor(e,t){super(e,t),this.languageRegex=/^__[a-z]{2,3}__$/,this.language_codes=this.special_tokens.filter((e=>this.languageRegex.test(e))).map((e=>e.slice(2,-2))),this.lang_to_token=e=>`__${e}__`}_build_translation_inputs(e,t,s){return Ke(this,e,t,s)}}class tt extends Te{get timestamp_begin(){return this.model.convert_tokens_to_ids(["<|notimestamps|>"])[0]+1}_decode_asr(e,{return_timestamps:t=!1,return_language:s=!1,time_precision:r=null,force_full_sequences:o=!0}={}){if(null===r)throw Error("Must specify time_precision");let n=null;const i="word"===t;function l(){return{language:n,timestamp:[null,null],text:""}}const c=[];let u=l(),p=0;const m=this.timestamp_begin,_=m+1500;let h=[],f=[],g=!1,M=null;const T=new Set(this.all_special_ids);for(const s of e){const e=s.tokens,o=i?s.token_timestamps:null;let b=null,P=m;if("stride"in s){const[t,o,n]=s.stride;if(p-=o,M=t-n,o&&(P=o/r+m),n)for(let t=e.length-1;t>=0;--t){const s=Number(e[t]);if(s>=m){if(null!==b&&(s-m)*r<M)break;b=s}}}let x=[],k=[];for(let s=0;s<e.length;++s){const M=Number(e[s]);if(T.has(M)){const e=this.decode([M]),s=d.WHISPER_LANGUAGE_MAPPING.get(e.slice(2,-2));if(void 0!==s){if(null!==n&&s!==n&&!t){h.push(x);const e=this.findLongestCommonSequence(h)[0],t=this.decode(e);u.text=t,c.push(u),h=[],x=[],u=l()}n=u.language=s}}else if(M>=m&&M<=_){const e=(M-m)*r+p,t=(0,a.round)(e,2);if(null!==b&&M>=b)g=!0;else if(g||h.length>0&&M<P)g=!1;else if(null===u.timestamp[0])u.timestamp[0]=t;else if(t===u.timestamp[0]);else{u.timestamp[1]=t,h.push(x),i&&f.push(k);const[e,s]=this.findLongestCommonSequence(h,f),r=this.decode(e);u.text=r,i&&(u.words=this.collateWordTimestamps(e,s,n)),c.push(u),h=[],x=[],f=[],k=[],u=l()}}else if(x.push(M),i){let e,t=(0,a.round)(o[s]+p,2);if(s+1<o.length){e=(0,a.round)(o[s+1]+p,2);const n=this.decode([M]);w.test(n)&&(e=(0,a.round)(Math.min(t+r,e),2))}else e=null;k.push([t,e])}}if("stride"in s){const[e,t,r]=s.stride;p+=e-r}x.length>0?(h.push(x),i&&f.push(k)):h.every((e=>0===e.length))&&(u=l(),h=[],x=[],f=[],k=[])}if(h.length>0){if(o&&t)throw new Error("Whisper did not predict an ending timestamp, which can happen if audio is cut off in the middle of a word. Also make sure WhisperTimeStampLogitsProcessor was used during generation.");const[e,s]=this.findLongestCommonSequence(h,f),r=this.decode(e);u.text=r,i&&(u.words=this.collateWordTimestamps(e,s,n)),c.push(u)}let b=Object.create(null);const P=c.map((e=>e.text)).join("");if(t||s){for(let e=0;e<c.length;++e){const r=c[e];t||delete r.timestamp,s||delete r.language}if(i){const e=[];for(const t of c)for(const s of t.words)e.push(s);b={chunks:e}}else b={chunks:c}}return[P,b]}findLongestCommonSequence(e,t=null){let s=e[0],r=s.length,o=[];const n=Array.isArray(t)&&t.length>0;let a=n?[]:null,i=n?t[0]:null;for(let l=1;l<e.length;++l){const c=e[l];let d=0,u=[r,r,0,0];const p=c.length;for(let e=1;e<r+p;++e){const o=Math.max(0,r-e),a=Math.min(r,r+p-e),m=s.slice(o,a),_=Math.max(0,e-r),h=Math.min(p,e),f=c.slice(_,h);if(m.length!==f.length)throw new Error("There is a bug within whisper `decode_asr` function, please report it. Dropping to prevent bad inference.");let g;g=n?m.filter(((e,s)=>e===f[s]&&i[o+s]<=t[l][_+s])).length:m.filter(((e,t)=>e===f[t])).length;const M=g/e+e/1e4;g>1&&M>d&&(d=M,u=[o,a,_,h])}const[m,_,h,f]=u,g=Math.floor((_+m)/2),M=Math.floor((f+h)/2);o.push(...s.slice(0,g)),s=c.slice(M),r=s.length,n&&(a.push(...i.slice(0,g)),i=t[l].slice(M))}return o.push(...s),n?(a.push(...i),[o,a]):[o,[]]}collateWordTimestamps(e,t,s){const[r,o,n]=this.combineTokensIntoWords(e,s),a=[];for(let e=0;e<r.length;++e){const s=n[e];a.push({text:r[e],timestamp:[t[s.at(0)][0],t[s.at(-1)][1]]})}return a}combineTokensIntoWords(e,t,s="\"'“¡¿([{-",r="\"'.。,,!!??::”)]}、"){let o,n,a;return["chinese","japanese","thai","lao","myanmar"].includes(t=t??"english")?[o,n,a]=this.splitTokensOnUnicode(e):[o,n,a]=this.splitTokensOnSpaces(e),this.mergePunctuations(o,n,a,s,r)}decode(e,t){let s;return t?.decode_with_timestamps?(e instanceof i.Tensor&&(e=_(e)),s=this.decodeWithTimestamps(e,t)):s=super.decode(e,t),s}decodeWithTimestamps(e,t){const s=t?.time_precision??.02,r=Array.from(this.all_special_ids).at(-1)+1;let o=[[]];for(let t of e)if(t=Number(t),t>=r){const e=((t-r)*s).toFixed(2);o.push(`<|${e}|>`),o.push([])}else o[o.length-1].push(t);return o=o.map((e=>"string"==typeof e?e:super.decode(e,t))),o.join("")}splitTokensOnUnicode(e){const t=this.decode(e,{decode_with_timestamps:!0}),s=[],r=[],o=[];let n=[],a=[],i=0;for(let l=0;l<e.length;++l){const c=e[l];n.push(c),a.push(l);const d=this.decode(n,{decode_with_timestamps:!0});d.includes("�")&&"�"!==t[i+d.indexOf("�")]||(s.push(d),r.push(n),o.push(a),n=[],a=[],i+=d.length)}return[s,r,o]}splitTokensOnSpaces(e){const[t,s,r]=this.splitTokensOnUnicode(e),o=[],n=[],a=[],i=new RegExp(`^[${M}]$`,"gu");for(let e=0;e<t.length;++e){const l=t[e],c=s[e],d=r[e],u=c[0]>=this.model.tokens_to_ids.get("<|endoftext|>"),p=l.startsWith(" "),m=l.trim(),_=i.test(m);if(u||p||_||0===o.length)o.push(l),n.push(c),a.push(d);else{const e=o.length-1;o[e]+=l,n[e].push(...c),a[e].push(...d)}}return[o,n,a]}mergePunctuations(e,t,s,r,n){const a=structuredClone(e),i=structuredClone(t),l=structuredClone(s);let c=a.length-2,d=a.length-1;for(;c>=0;)a[c].startsWith(" ")&&r.includes(a[c].trim())?(a[d]=a[c]+a[d],i[d]=(0,o.mergeArrays)(i[c],i[d]),l[d]=(0,o.mergeArrays)(l[c],l[d]),a[c]="",i[c]=[],l[c]=[]):d=c,--c;for(c=0,d=1;d<a.length;)!a[c].endsWith(" ")&&n.includes(a[d])?(a[c]+=a[d],i[c]=(0,o.mergeArrays)(i[c],i[d]),l[c]=(0,o.mergeArrays)(l[c],l[d]),a[d]="",i[d]=[],l[d]=[]):c=d,++d;return[a.filter((e=>e)),i.filter((e=>e.length>0)),l.filter((e=>e.length>0))]}}class st extends Te{}class rt extends Te{}class ot extends Te{}class nt extends Te{constructor(e,t){super(e,t),this.languageRegex=/^(>>\w+<<)\s*/g,this.supported_language_codes=this.model.vocab.filter((e=>this.languageRegex.test(e))),console.warn('WARNING: `MarianTokenizer` is not yet supported by Hugging Face\'s "fast" tokenizers library. Therefore, you may experience slightly inaccurate results.')}_encode_text(e){if(null===e)return null;const[t,...s]=e.trim().split(this.languageRegex);if(0===s.length)return super._encode_text(t);if(2===s.length){const[e,t]=s;return this.supported_language_codes.includes(e)||console.warn(`Unsupported language code "${e}" detected, which may lead to unexpected behavior. Should be one of: ${JSON.stringify(this.supported_language_codes)}`),(0,o.mergeArrays)([e],super._encode_text(t))}}}class at extends Te{}class it extends Te{}class lt extends Te{}class ct extends Te{}class dt extends Te{}class ut extends Te{constructor(e,t){super(e,t),this.decoder=new ce({})}}class pt extends Te{}class mt extends Te{}class _t{static TOKENIZER_CLASS_MAPPING={T5Tokenizer:ze,DistilBertTokenizer:Ae,CamembertTokenizer:Ee,DebertaTokenizer:ye,DebertaV2Tokenizer:Fe,BertTokenizer:be,HerbertTokenizer:ve,ConvBertTokenizer:Ce,RoFormerTokenizer:Se,XLMTokenizer:Le,ElectraTokenizer:Ie,MobileBertTokenizer:xe,SqueezeBertTokenizer:ke,AlbertTokenizer:Pe,GPT2Tokenizer:je,BartTokenizer:Ne,MBartTokenizer:Ve,MBart50Tokenizer:Oe,RobertaTokenizer:Be,WhisperTokenizer:tt,CodeGenTokenizer:st,CLIPTokenizer:rt,SiglipTokenizer:ot,MarianTokenizer:nt,BloomTokenizer:De,NllbTokenizer:Ze,M2M100Tokenizer:et,LlamaTokenizer:Re,CodeLlamaTokenizer:qe,XLMRobertaTokenizer:$e,MPNetTokenizer:We,FalconTokenizer:Ue,GPTNeoXTokenizer:Qe,EsmTokenizer:Xe,Wav2Vec2CTCTokenizer:at,BlenderbotTokenizer:it,BlenderbotSmallTokenizer:lt,SpeechT5Tokenizer:ct,NougatTokenizer:dt,VitsTokenizer:ut,Qwen2Tokenizer:He,GemmaTokenizer:Je,Grok1Tokenizer:Ye,CohereTokenizer:pt,MgpstrTokenizer:mt,PreTrainedTokenizer:Te};static async from_pretrained(e,{progress_callback:t=null,config:s=null,cache_dir:r=null,local_files_only:o=!1,revision:n="main",legacy:a=null}={}){const[i,l]=await u(e,{progress_callback:t,config:s,cache_dir:r,local_files_only:o,revision:n,legacy:a}),c=l.tokenizer_class?.replace(/Fast$/,"")??"PreTrainedTokenizer";let d=this.TOKENIZER_CLASS_MAPPING[c];return d||(console.warn(`Unknown tokenizer class "${c}", attempting to construct from base class.`),d=Te),new d(i,l)}}},"./src/utils/audio.js":(e,t,s)=>{s.r(t),s.d(t,{hamming:()=>d,hanning:()=>c,mel_filter_bank:()=>h,read_audio:()=>i,spectrogram:()=>g,window_function:()=>M});var r=s("./src/utils/hub.js"),o=s("./src/utils/maths.js"),n=s("./src/utils/core.js"),a=s("./src/utils/tensor.js");async function i(e,t){if("undefined"==typeof AudioContext)throw Error("Unable to load audio from path/URL since `AudioContext` is not available in your environment. Instead, audio data should be passed directly to the pipeline/processor. For more information and some example code, see https://huggingface.co/docs/transformers.js/guides/node-audio-processing.");const s=await(await(0,r.getFile)(e)).arrayBuffer(),o=new AudioContext({sampleRate:t});void 0===t&&console.warn(`No sampling rate provided, using default of ${o.sampleRate}Hz.`);const n=await o.decodeAudioData(s);let a;if(2===n.numberOfChannels){const e=Math.sqrt(2),t=n.getChannelData(0),s=n.getChannelData(1);a=new Float32Array(t.length);for(let r=0;r<n.length;++r)a[r]=e*(t[r]+s[r])/2}else a=n.getChannelData(0);return a}function l(e,t){if(e<1)return new Float64Array;if(1===e)return new Float64Array([1]);const s=1-t,r=2*Math.PI/(e-1),o=new Float64Array(e);for(let n=0;n<e;++n)o[n]=t-s*Math.cos(n*r);return o}function c(e){return l(e,.5)}function d(e){return l(e,.54)}const u={htk:e=>2595*Math.log10(1+e/700),kaldi:e=>1127*Math.log(1+e/700),slaney:(e,t=1e3,s=15,r=27/Math.log(6.4))=>e>=t?s+Math.log(e/t)*r:3*e/200};function p(e,t="htk"){const s=u[t];if(!s)throw new Error('mel_scale should be one of "htk", "slaney" or "kaldi".');return"number"==typeof e?s(e):e.map((e=>s(e)))}const m={htk:e=>700*(10**(e/2595)-1),kaldi:e=>700*(Math.exp(e/1127)-1),slaney:(e,t=1e3,s=15,r=Math.log(6.4)/27)=>e>=s?t*Math.exp(r*(e-s)):200*e/3};function _(e,t,s){const r=(t-e)/(s-1);return Float64Array.from({length:s},((t,s)=>e+r*s))}function h(e,t,s,r,o,n=null,a="htk",i=!1){if(null!==n&&"slaney"!==n)throw new Error('norm must be one of null or "slaney"');const l=_(p(s,a),p(r,a),t+2);let c,d=function(e,t="htk"){const s=m[t];if(!s)throw new Error('mel_scale should be one of "htk", "slaney" or "kaldi".');return"number"==typeof e?s(e):e.map((e=>s(e)))}(l,a);if(i){const t=o/(2*e);c=p(Float64Array.from({length:e},((e,s)=>s*t)),a),d=l}else c=_(0,Math.floor(o/2),e);const u=function(e,t){const s=Float64Array.from({length:t.length-1},((e,s)=>t[s+1]-t[s])),r=Array.from({length:e.length},(()=>new Array(t.length)));for(let s=0;s<e.length;++s){const o=r[s];for(let r=0;r<t.length;++r)o[r]=t[r]-e[s]}const o=t.length-2,n=Array.from({length:o},(()=>new Array(e.length)));for(let t=0;t<e.length;++t){const e=r[t];for(let r=0;r<o;++r){const o=-e[r]/s[r],a=e[r+2]/s[r+1];n[r][t]=Math.max(0,Math.min(o,a))}}return n}(c,d);if(null!==n&&"slaney"===n)for(let s=0;s<t;++s){const t=u[s],r=2/(d[s+2]-d[s]);for(let s=0;s<e;++s)t[s]*=r}return u}function f(e,t,s,r,n){if(s<=0)throw new Error("reference must be greater than zero");if(r<=0)throw new Error("min_value must be greater than zero");s=Math.max(r,s);const a=Math.log10(s);for(let s=0;s<e.length;++s)e[s]=t*Math.log10(Math.max(r,e[s])-a);if(null!==n){if(n<=0)throw new Error("db_range must be greater than zero");const t=(0,o.max)(e)[0]-n;for(let s=0;s<e.length;++s)e[s]=Math.max(e[s],t)}return e}async function g(e,t,s,r,{fft_length:i=null,power:l=1,center:c=!0,pad_mode:d="reflect",onesided:u=!0,preemphasis:p=null,mel_filters:m=null,mel_floor:_=1e-10,log_mel:h=null,reference:g=1,min_value:M=1e-10,db_range:w=null,remove_dc_offset:T=null,min_num_frames:b=null,max_num_frames:P=null,do_pad:x=!0,transpose:k=!1}={}){const y=t.length;if(null===i&&(i=s),s>i)throw Error(`frame_length (${s}) may not be larger than fft_length (${i})`);if(y!==s)throw new Error(`Length of the window (${y}) must equal frame_length (${s})`);if(r<=0)throw new Error("hop_length must be greater than zero");if(null===l&&null!==m)throw new Error("You have provided `mel_filters` but `power` is `None`. Mel spectrogram computation is not yet supported for complex-valued spectrogram. Specify `power` to fix this issue.");if(c){if("reflect"!==d)throw new Error(`pad_mode="${d}" not implemented yet.`);const t=Math.floor((i-1)/2)+1;e=function(e,t,s){const r=new e.constructor(e.length+t+s),o=e.length-1;for(let s=0;s<e.length;++s)r[t+s]=e[s];for(let s=1;s<=t;++s)r[t-s]=e[(0,n.calculateReflectOffset)(s,o)];for(let a=1;a<=s;++a)r[o+t+a]=e[(0,n.calculateReflectOffset)(o-a,o)];return r}(e,t,t)}let F=Math.floor(1+Math.floor((e.length-s)/r));null!==b&&F<b&&(F=b);const v=u?Math.floor(i/2)+1:i;let C=F,S=F;null!==P&&(P>F?x&&(S=P):S=C=P);const A=new o.FFT(i),E=new Float64Array(i),L=new Float64Array(A.outputBufferSize),I=new Float32Array(v*S);for(let o=0;o<C;++o){const n=o*r,a=Math.min(e.length-n,s);a!==s&&E.fill(0,0,s);for(let t=0;t<a;++t)E[t]=e[n+t];if(T){let e=0;for(let t=0;t<a;++t)e+=E[t];const t=e/a;for(let e=0;e<a;++e)E[e]-=t}if(null!==p){for(let e=a-1;e>=1;--e)E[e]-=p*E[e-1];E[0]*=1-p}for(let e=0;e<t.length;++e)E[e]*=t[e];A.realTransform(L,E);for(let e=0;e<v;++e){const t=e<<1;I[e*S+o]=L[t]**2+L[t+1]**2}}if(null!==l&&2!==l){const e=2/l;for(let t=0;t<I.length;++t)I[t]**=e}const z=m.length;let j=await(0,a.matmul)(new a.Tensor("float32",m.flat(),[z,v]),new a.Tensor("float32",I,[v,S]));k&&(j=j.transpose(1,0));const N=j.data;for(let e=0;e<N.length;++e)N[e]=Math.max(_,N[e]);if(null!==l&&null!==h){const e=Math.min(N.length,C*z);switch(h){case"log":for(let t=0;t<e;++t)N[t]=Math.log(N[t]);break;case"log10":for(let t=0;t<e;++t)N[t]=Math.log10(N[t]);break;case"dB":if(1===l)!function(e,t=1,s=1e-5,r=null){f(e,20,t,s,r)}(N,g,M,w);else{if(2!==l)throw new Error(`Cannot use log_mel option '${h}' with power ${l}`);!function(e,t=1,s=1e-10,r=null){f(e,10,t,s,r)}(N,g,M,w)}break;default:throw new Error(`log_mel must be one of null, 'log', 'log10' or 'dB'. Got '${h}'`)}}return j}function M(e,t,{periodic:s=!0,frame_length:r=null,center:o=!0}={}){const n=s?e+1:e;let a;switch(t){case"boxcar":a=new Float64Array(n).fill(1);break;case"hann":case"hann_window":a=c(n);break;case"hamming":a=d(n);break;case"povey":a=c(n).map((e=>Math.pow(e,.85)));break;default:throw new Error(`Unknown window type ${t}.`)}if(s&&(a=a.subarray(0,e)),null===r)return a;if(e>r)throw new Error(`Length of the window (${e}) may not be larger than frame_length (${r})`);return a}},"./src/utils/constants.js":(e,t,s)=>{s.r(t),s.d(t,{CHAT_TEMPLATE_NAME:()=>l,CONFIG_NAME:()=>o,FEATURE_EXTRACTOR_NAME:()=>n,GENERATION_CONFIG_NAME:()=>c,GITHUB_ISSUE_URL:()=>r,IMAGE_PROCESSOR_NAME:()=>a,PROCESSOR_NAME:()=>i});const r="https://github.com/huggingface/transformers.js/issues/new/choose",o="config.json",n="preprocessor_config.json",a=n,i="processor_config.json",l="chat_template.json",c="generation_config.json"},"./src/utils/core.js":(e,t,s)=>{function r(e,t){e&&e(t)}function o(e){return Object.fromEntries(Object.entries(e).map((([e,t])=>[t,e])))}function n(e){return e.replace(/[.*+?^${}()|[\]\\]/g,"\\$&")}function a(e){return"TypedArray"===e?.prototype?.__proto__?.constructor?.name}function i(e){return Number.isInteger(e)||"bigint"==typeof e}function l(e){return null==e||-1===e}function c(e){const t=[];let s=e;for(;Array.isArray(s);)t.push(s.length),s=s[0];return t}function d(e,t,s=void 0){const r=e[t];if(void 0!==r)return delete e[t],r;if(void 0===s)throw Error(`Key ${t} does not exist in object.`);return s}function u(...e){return Array.prototype.concat.apply([],e)}function p(...e){return e.reduce(((e,t)=>e.flatMap((e=>t.map((t=>[e,t]))))))}function m(e,t){return Math.abs((e+t)%(2*t)-t)}function _(e,t){return Object.assign({},...t.map((t=>{if(void 0!==e[t])return{[t]:e[t]}})))}function h(e){let t=0;for(const s of e)++t;return t}function f(e,t){let s=0;for(const r of e)r===t&&++s;return s}s.r(t),s.d(t,{calculateDimensions:()=>c,calculateReflectOffset:()=>m,count:()=>f,dispatchCallback:()=>r,escapeRegExp:()=>n,isIntegralNumber:()=>i,isNullishDimension:()=>l,isTypedArray:()=>a,len:()=>h,mergeArrays:()=>u,pick:()=>_,pop:()=>d,product:()=>p,reverseDictionary:()=>o})},"./src/utils/data-structures.js":(e,t,s)=>{s.r(t),s.d(t,{CharTrie:()=>o,PriorityQueue:()=>r,TokenLattice:()=>a});class r{constructor(e=(e,t)=>e>t,t=1/0){this._heap=[],this._comparator=e,this._maxSize=t}get size(){return this._heap.length}isEmpty(){return 0===this.size}peek(){return this._heap[0]}push(...e){return this.extend(e)}extend(e){for(const t of e)if(this.size<this._maxSize)this._heap.push(t),this._siftUp();else{const e=this._smallest();this._comparator(t,this._heap[e])&&(this._heap[e]=t,this._siftUpFrom(e))}return this.size}pop(){const e=this.peek(),t=this.size-1;return t>0&&this._swap(0,t),this._heap.pop(),this._siftDown(),e}replace(e){const t=this.peek();return this._heap[0]=e,this._siftDown(),t}_parent(e){return(e+1>>>1)-1}_left(e){return 1+(e<<1)}_right(e){return e+1<<1}_greater(e,t){return this._comparator(this._heap[e],this._heap[t])}_swap(e,t){const s=this._heap[e];this._heap[e]=this._heap[t],this._heap[t]=s}_siftUp(){this._siftUpFrom(this.size-1)}_siftUpFrom(e){for(;e>0&&this._greater(e,this._parent(e));)this._swap(e,this._parent(e)),e=this._parent(e)}_siftDown(){let e=0;for(;this._left(e)<this.size&&this._greater(this._left(e),e)||this._right(e)<this.size&&this._greater(this._right(e),e);){const t=this._right(e)<this.size&&this._greater(this._right(e),this._left(e))?this._right(e):this._left(e);this._swap(e,t),e=t}}_smallest(){return 2**Math.floor(Math.log2(this.size))-1}}class o{constructor(){this.root=n.default()}extend(e){for(const t of e)this.push(t)}push(e){let t=this.root;for(const s of e){let e=t.children.get(s);void 0===e&&(e=n.default(),t.children.set(s,e)),t=e}t.isLeaf=!0}*commonPrefixSearch(e){let t=this.root;if(void 0===t)return;let s="";for(const r of e){if(s+=r,t=t.children.get(r),void 0===t)return;t.isLeaf&&(yield s)}}}class n{constructor(e,t){this.isLeaf=e,this.children=t}static default(){return new n(!1,new Map)}}class a{constructor(e,t,s){this.chars=Array.from(e),this.len=this.chars.length,this.bosTokenId=t,this.eosTokenId=s,this.nodes=[],this.beginNodes=Array.from({length:this.len+1},(()=>[])),this.endNodes=Array.from({length:this.len+1},(()=>[]));const r=new i(this.bosTokenId,0,0,0,0),o=new i(this.eosTokenId,1,this.len,0,0);this.nodes.push(r.clone()),this.nodes.push(o.clone()),this.beginNodes[this.len].push(o),this.endNodes[0].push(r)}insert(e,t,s,r){const o=this.nodes.length,n=new i(r,o,e,t,s);this.beginNodes[e].push(n),this.endNodes[e+t].push(n),this.nodes.push(n)}viterbi(){const e=this.len;let t=0;for(;t<=e;){if(0==this.beginNodes[t].length)return[];for(let e of this.beginNodes[t]){e.prev=null;let s=0,r=null;for(let o of this.endNodes[t]){const t=o.backtraceScore+e.score;(null===r||t>s)&&(r=o.clone(),s=t)}if(null===r)return[];e.prev=r,e.backtraceScore=s}++t}const s=[],r=this.beginNodes[e][0].prev;if(null===r)return[];let o=r.clone();for(;null!==o.prev;){s.push(o.clone());const e=o.clone();o=e.prev.clone()}return s.reverse(),s}piece(e){return this.chars.slice(e.pos,e.pos+e.length).join("")}tokens(){return this.viterbi().map((e=>this.piece(e)))}tokenIds(){return this.viterbi().map((e=>e.tokenId))}}class i{constructor(e,t,s,r,o){this.tokenId=e,this.nodeId=t,this.pos=s,this.length=r,this.score=o,this.prev=null,this.backtraceScore=0}clone(){const e=new i(this.tokenId,this.nodeId,this.pos,this.length,this.score);return e.prev=this.prev,e.backtraceScore=this.backtraceScore,e}}},"./src/utils/devices.js":(e,t,s)=>{s.r(t),s.d(t,{DEVICE_TYPES:()=>r});const r=Object.freeze({auto:"auto",gpu:"gpu",cpu:"cpu",wasm:"wasm",webgpu:"webgpu",cuda:"cuda",dml:"dml",webnn:"webnn","webnn-npu":"webnn-npu","webnn-gpu":"webnn-gpu","webnn-cpu":"webnn-cpu"})},"./src/utils/dtypes.js":(e,t,s)=>{s.r(t),s.d(t,{DATA_TYPES:()=>a,DEFAULT_DEVICE_DTYPE_MAPPING:()=>i,DEFAULT_DTYPE_SUFFIX_MAPPING:()=>l,isWebGpuFp16Supported:()=>n});var r=s("./src/env.js"),o=s("./src/utils/devices.js");const n=function(){let e;return async function(){if(void 0===e)if(r.apis.IS_WEBGPU_AVAILABLE)try{const t=await navigator.gpu.requestAdapter();e=t.features.has("shader-f16")}catch(t){e=!1}else e=!1;return e}}(),a=Object.freeze({auto:"auto",fp32:"fp32",fp16:"fp16",q8:"q8",int8:"int8",uint8:"uint8",q4:"q4",bnb4:"bnb4",q4f16:"q4f16"}),i=Object.freeze({[o.DEVICE_TYPES.wasm]:a.q8}),l=Object.freeze({[a.fp32]:"",[a.fp16]:"_fp16",[a.int8]:"_int8",[a.uint8]:"_uint8",[a.q8]:"_quantized",[a.q4]:"_q4",[a.q4f16]:"_q4f16",[a.bnb4]:"_bnb4"})},"./src/utils/generic.js":(e,t,s)=>{s.r(t),s.d(t,{Callable:()=>r});const r=class{constructor(){let e=function(...t){return e._call(...t)};return Object.setPrototypeOf(e,new.target.prototype)}_call(...e){throw Error("Must implement _call method in subclass")}}},"./src/utils/hub.js":(e,t,s)=>{s.r(t),s.d(t,{getFile:()=>d,getModelFile:()=>m,getModelJSON:()=>_});var r=s("fs"),o=s("path"),n=s("./src/env.js"),a=s("./src/utils/core.js");const i={txt:"text/plain",html:"text/html",css:"text/css",js:"text/javascript",json:"application/json",png:"image/png",jpg:"image/jpeg",jpeg:"image/jpeg",gif:"image/gif"};class l{constructor(e){if(this.filePath=e,this.headers=new Headers,this.exists=r.default.existsSync(e),this.exists){this.status=200,this.statusText="OK";let t=r.default.statSync(e);this.headers.set("content-length",t.size.toString()),this.updateContentType();let s=this;this.body=new ReadableStream({start(e){s.arrayBuffer().then((t=>{e.enqueue(new Uint8Array(t)),e.close()}))}})}else this.status=404,this.statusText="Not Found",this.body=null}updateContentType(){const e=this.filePath.toString().split(".").pop().toLowerCase();this.headers.set("content-type",i[e]??"application/octet-stream")}clone(){let e=new l(this.filePath);return e.exists=this.exists,e.status=this.status,e.statusText=this.statusText,e.headers=new Headers(this.headers),e}async arrayBuffer(){return(await r.default.promises.readFile(this.filePath)).buffer}async blob(){const e=await r.default.promises.readFile(this.filePath);return new Blob([e],{type:this.headers.get("content-type")})}async text(){return await r.default.promises.readFile(this.filePath,"utf8")}async json(){return JSON.parse(await this.text())}}function c(e,t=null,s=null){let r;try{r=new URL(e)}catch(e){return!1}return!(t&&!t.includes(r.protocol))&&!(s&&!s.includes(r.hostname))}async function d(e){if(n.env.useFS&&!c(e,["http:","https:","blob:"]))return new l(e);if("undefined"!=typeof process&&"node"===process?.release?.name){const t=!!process.env?.TESTING_REMOTELY,s=n.env.version,r=new Headers;r.set("User-Agent",`transformers.js/${s}; is_ci/${t};`);if(c(e,["http:","https:"],["huggingface.co","hf.co"])){const e=process.env?.HF_TOKEN??process.env?.HF_ACCESS_TOKEN;e&&r.set("Authorization",`Bearer ${e}`)}return fetch(e,{headers:r})}return fetch(e)}const u={400:"Bad request error occurred while trying to load file",401:"Unauthorized access to file",403:"Forbidden access to file",404:"Could not locate file",408:"Request timeout error occurred while trying to load file",500:"Internal server error error occurred while trying to load file",502:"Bad gateway error occurred while trying to load file",503:"Service unavailable error occurred while trying to load file",504:"Gateway timeout error occurred while trying to load file"};class p{constructor(e){this.path=e}async match(e){let t=o.default.join(this.path,e),s=new l(t);return s.exists?s:void 0}async put(e,t){const s=Buffer.from(await t.arrayBuffer());let n=o.default.join(this.path,e);try{await r.default.promises.mkdir(o.default.dirname(n),{recursive:!0}),await r.default.promises.writeFile(n,s)}catch(e){console.warn("An error occurred while writing the file to cache:",e)}}}async function m(e,t,s=!0,r={}){if(!n.env.allowLocalModels){if(r.local_files_only)throw Error("Invalid configuration detected: local models are disabled (`env.allowLocalModels=false`) but you have requested to only use local models (`local_files_only=true`).");if(!n.env.allowRemoteModels)throw Error("Invalid configuration detected: both local and remote models are disabled. Fix by setting `env.allowLocalModels` or `env.allowRemoteModels` to `true`.")}let o;if((0,a.dispatchCallback)(r.progress_callback,{status:"initiate",name:e,file:t}),!o&&n.env.useBrowserCache){if("undefined"==typeof caches)throw Error("Browser cache is not available in this environment.");try{o=await caches.open("transformers-cache")}catch(e){console.warn("An error occurred while opening the browser cache:",e)}}if(!o&&n.env.useFSCache&&(o=new p(r.cache_dir??n.env.cacheDir)),!o&&n.env.useCustomCache){if(!n.env.customCache)throw Error("`env.useCustomCache=true`, but `env.customCache` is not defined.");if(!n.env.customCache.match||!n.env.customCache.put)throw new Error("`env.customCache` must be an object which implements the `match` and `put` functions of the Web Cache API. For more information, see https://developer.mozilla.org/en-US/docs/Web/API/Cache");o=n.env.customCache}const i=r.revision??"main";let l,m,_=h(e,t),f=h(n.env.localModelPath,_),g=h(n.env.remoteHost,n.env.remotePathTemplate.replaceAll("{model}",e).replaceAll("{revision}",encodeURIComponent(i)),t),M="main"===i?_:h(e,i,t),w=o instanceof p?M:g,T=!1;o&&(m=await async function(e,...t){for(let s of t)try{let t=await e.match(s);if(t)return t}catch(e){continue}}(o,f,w));const b=void 0!==m;if(void 0===m){if(n.env.allowLocalModels){if(c(_,["http:","https:"])){if(r.local_files_only)throw new Error(`\`local_files_only=true\`, but attempted to load a remote file from: ${_}.`);if(!n.env.allowRemoteModels)throw new Error(`\`env.allowRemoteModels=false\`, but attempted to load a remote file from: ${_}.`)}else try{m=await d(f),l=f}catch(e){console.warn(`Unable to load from local path "${f}": "${e}"`)}}if(void 0===m||404===m.status){if(r.local_files_only||!n.env.allowRemoteModels){if(s)throw Error(`\`local_files_only=true\` or \`env.allowRemoteModels=false\` and file was not found locally at "${f}".`);return null}if(m=await d(g),200!==m.status)return function(e,t,s){if(!s)return null;const r=u[e]??`Error (${e}) occurred while trying to load file`;throw Error(`${r}: "${t}".`)}(m.status,g,s);l=w}T=o&&"undefined"!=typeof Response&&m instanceof Response&&200===m.status}let P;return(0,a.dispatchCallback)(r.progress_callback,{status:"download",name:e,file:t}),r.progress_callback?b&&"undefined"!=typeof navigator&&/firefox/i.test(navigator.userAgent)?(P=new Uint8Array(await m.arrayBuffer()),(0,a.dispatchCallback)(r.progress_callback,{status:"progress",name:e,file:t,progress:100,loaded:P.length,total:P.length})):P=await async function(e,t){const s=e.headers.get("Content-Length");null===s&&console.warn("Unable to determine content-length from response headers. Will expand buffer when needed.");let r=parseInt(s??"0"),o=new Uint8Array(r),n=0;const a=e.body.getReader();async function i(){const{done:e,value:s}=await a.read();if(e)return;let l=n+s.length;if(l>r){r=l;let e=new Uint8Array(r);e.set(o),o=e}o.set(s,n),n=l;return t({progress:n/r*100,loaded:n,total:r}),i()}return await i(),o}(m,(s=>{(0,a.dispatchCallback)(r.progress_callback,{status:"progress",name:e,file:t,...s})})):P=new Uint8Array(await m.arrayBuffer()),T&&l&&void 0===await o.match(l)&&await o.put(l,new Response(P,{headers:m.headers})).catch((e=>{console.warn(`Unable to add response to browser cache: ${e}.`)})),(0,a.dispatchCallback)(r.progress_callback,{status:"done",name:e,file:t}),P}async function _(e,t,s=!0,r={}){let o=await m(e,t,s,r);if(null===o)return{};let n=new TextDecoder("utf-8").decode(o);return JSON.parse(n)}function h(...e){return(e=e.map(((t,s)=>(s&&(t=t.replace(new RegExp("^/"),"")),s!==e.length-1&&(t=t.replace(new RegExp("/$"),"")),t)))).join("/")}},"./src/utils/image.js":(e,t,s)=>{s.r(t),s.d(t,{RawImage:()=>_,load_image:()=>h});var r=s("./src/utils/core.js"),o=s("./src/utils/hub.js"),n=s("./src/env.js"),a=s("./src/utils/tensor.js"),i=s("sharp");let l,c,d;const u=n.apis.IS_BROWSER_ENV||n.apis.IS_WEBWORKER_ENV;if(u)l=(e,t)=>{if(!self.OffscreenCanvas)throw new Error("OffscreenCanvas not supported by this browser.");return new self.OffscreenCanvas(e,t)},d=self.createImageBitmap,c=self.ImageData;else{if(!i.default)throw new Error("Unable to load image processing library.");d=async e=>{const t=(await e.metadata()).channels,{data:s,info:r}=await e.rotate().raw().toBuffer({resolveWithObject:!0}),o=new _(new Uint8ClampedArray(s),r.width,r.height,r.channels);return void 0!==t&&t!==r.channels&&o.convert(t),o}}const p={0:"nearest",1:"lanczos",2:"bilinear",3:"bicubic",4:"box",5:"hamming"},m=new Map([["png","image/png"],["jpg","image/jpeg"],["jpeg","image/jpeg"],["gif","image/gif"]]);class _{constructor(e,t,s,r){this.data=e,this.width=t,this.height=s,this.channels=r}get size(){return[this.width,this.height]}static async read(e){if(e instanceof _)return e;if("string"==typeof e||e instanceof URL)return await this.fromURL(e);throw new Error("Unsupported input type: "+typeof e)}static fromCanvas(e){if(!u)throw new Error("fromCanvas() is only supported in browser environments.");const t=e.getContext("2d").getImageData(0,0,e.width,e.height).data;return new _(t,e.width,e.height,4)}static async fromURL(e){const t=await(0,o.getFile)(e);if(200!==t.status)throw new Error(`Unable to read image from "${e}" (${t.status} ${t.statusText})`);const s=await t.blob();return this.fromBlob(s)}static async fromBlob(e){if(u){const t=await d(e),s=l(t.width,t.height).getContext("2d");return s.drawImage(t,0,0),new this(s.getImageData(0,0,t.width,t.height).data,t.width,t.height,4)}{const t=(0,i.default)(await e.arrayBuffer());return await d(t)}}static fromTensor(e,t="CHW"){if(3!==e.dims.length)throw new Error(`Tensor should have 3 dimensions, but has ${e.dims.length} dimensions.`);if("CHW"===t)e=e.transpose(1,2,0);else if("HWC"!==t)throw new Error(`Unsupported channel format: ${t}`);if(!(e.data instanceof Uint8ClampedArray||e.data instanceof Uint8Array))throw new Error(`Unsupported tensor type: ${e.type}`);switch(e.dims[2]){case 1:case 2:case 3:case 4:return new _(e.data,e.dims[1],e.dims[0],e.dims[2]);default:throw new Error(`Unsupported number of channels: ${e.dims[2]}`)}}grayscale(){if(1===this.channels)return this;const e=new Uint8ClampedArray(this.width*this.height*1);switch(this.channels){case 3:case 4:for(let t=0,s=0;t<this.data.length;t+=this.channels){const r=this.data[t],o=this.data[t+1],n=this.data[t+2];e[s++]=Math.round(.2989*r+.587*o+.114*n)}break;default:throw new Error(`Conversion failed due to unsupported number of channels: ${this.channels}`)}return this._update(e,this.width,this.height,1)}rgb(){if(3===this.channels)return this;const e=new Uint8ClampedArray(this.width*this.height*3);switch(this.channels){case 1:for(let t=0,s=0;t<this.data.length;++t)e[s++]=this.data[t],e[s++]=this.data[t],e[s++]=this.data[t];break;case 4:for(let t=0,s=0;t<this.data.length;t+=4)e[s++]=this.data[t],e[s++]=this.data[t+1],e[s++]=this.data[t+2];break;default:throw new Error(`Conversion failed due to unsupported number of channels: ${this.channels}`)}return this._update(e,this.width,this.height,3)}rgba(){if(4===this.channels)return this;const e=new Uint8ClampedArray(this.width*this.height*4);switch(this.channels){case 1:for(let t=0,s=0;t<this.data.length;++t)e[s++]=this.data[t],e[s++]=this.data[t],e[s++]=this.data[t],e[s++]=255;break;case 3:for(let t=0,s=0;t<this.data.length;t+=3)e[s++]=this.data[t],e[s++]=this.data[t+1],e[s++]=this.data[t+2],e[s++]=255;break;default:throw new Error(`Conversion failed due to unsupported number of channels: ${this.channels}`)}return this._update(e,this.width,this.height,4)}putAlpha(e){if(e.width!==this.width||e.height!==this.height)throw new Error(`Expected mask size to be ${this.width}x${this.height}, but got ${e.width}x${e.height}`);if(1!==e.channels)throw new Error(`Expected mask to have 1 channel, but got ${e.channels}`);const t=this.data,s=e.data,r=this.width*this.height;if(3===this.channels){const e=new Uint8ClampedArray(4*r);for(let o=0,n=0,a=0;o<r;++o)e[a++]=t[n++],e[a++]=t[n++],e[a++]=t[n++],e[a++]=s[o];return this._update(e,this.width,this.height,4)}if(4===this.channels){for(let e=0;e<r;++e)t[4*e+3]=s[e];return this}throw new Error(`Expected image to have 3 or 4 channels, but got ${this.channels}`)}async resize(e,t,{resample:s=2}={}){if(this.width===e&&this.height===t)return this;let o=p[s]??s;const n=(0,r.isNullishDimension)(e),a=(0,r.isNullishDimension)(t);if(n&&a)return this;if(n?e=t/this.height*this.width:a&&(t=e/this.width*this.height),u){const s=this.channels,r=this.toCanvas(),o=l(e,t).getContext("2d");o.drawImage(r,0,0,e,t);return new _(o.getImageData(0,0,e,t).data,e,t,4).convert(s)}{let s=this.toSharp();switch(o){case"box":case"hamming":"box"!==o&&"hamming"!==o||(console.warn(`Resampling method ${o} is not yet supported. Using bilinear instead.`),o="bilinear");case"nearest":case"bilinear":case"bicubic":s=s.affine([e/this.width,0,0,t/this.height],{interpolator:o});break;case"lanczos":s=s.resize({width:e,height:t,fit:"fill",kernel:"lanczos3"});break;default:throw new Error(`Resampling method ${o} is not supported.`)}return await d(s)}}async pad([e,t,s,r]){if(e=Math.max(e,0),t=Math.max(t,0),s=Math.max(s,0),r=Math.max(r,0),0===e&&0===t&&0===s&&0===r)return this;if(u){const o=this.channels,n=this.toCanvas(),a=this.width+e+t,i=this.height+s+r,c=l(a,i).getContext("2d");c.drawImage(n,0,0,this.width,this.height,e,s,this.width,this.height);return new _(c.getImageData(0,0,a,i).data,a,i,4).convert(o)}{const o=this.toSharp().extend({left:e,right:t,top:s,bottom:r});return await d(o)}}async crop([e,t,s,r]){if(e=Math.max(e,0),t=Math.max(t,0),s=Math.min(s,this.width-1),r=Math.min(r,this.height-1),0===e&&0===t&&s===this.width-1&&r===this.height-1)return this;const o=s-e+1,n=r-t+1;if(u){const s=this.channels,r=this.toCanvas(),a=l(o,n).getContext("2d");a.drawImage(r,e,t,o,n,0,0,o,n);return new _(a.getImageData(0,0,o,n).data,o,n,4).convert(s)}{const s=this.toSharp().extract({left:e,top:t,width:o,height:n});return await d(s)}}async center_crop(e,t){if(this.width===e&&this.height===t)return this;const s=(this.width-e)/2,r=(this.height-t)/2;if(u){const o=this.channels,n=this.toCanvas(),a=l(e,t).getContext("2d");let i=0,c=0,d=0,u=0;s>=0?i=s:d=-s,r>=0?c=r:u=-r,a.drawImage(n,i,c,e,t,d,u,e,t);return new _(a.getImageData(0,0,e,t).data,e,t,4).convert(o)}{let o=this.toSharp();if(s>=0&&r>=0)o=o.extract({left:Math.floor(s),top:Math.floor(r),width:e,height:t});else if(s<=0&&r<=0){const n=Math.floor(-r),a=Math.floor(-s);o=o.extend({top:n,left:a,right:e-this.width-a,bottom:t-this.height-n})}else{let n=[0,0],a=0;r<0?(n[0]=Math.floor(-r),n[1]=t-this.height-n[0]):a=Math.floor(r);let i=[0,0],l=0;s<0?(i[0]=Math.floor(-s),i[1]=e-this.width-i[0]):l=Math.floor(s),o=o.extend({top:n[0],bottom:n[1],left:i[0],right:i[1]}).extract({left:l,top:a,width:e,height:t})}return await d(o)}}async toBlob(e="image/png",t=1){if(!u)throw new Error("toBlob() is only supported in browser environments.");const s=this.toCanvas();return await s.convertToBlob({type:e,quality:t})}toTensor(e="CHW"){let t=new a.Tensor("uint8",new Uint8Array(this.data),[this.height,this.width,this.channels]);if("HWC"===e);else{if("CHW"!==e)throw new Error(`Unsupported channel format: ${e}`);t=t.permute(2,0,1)}return t}toCanvas(){if(!u)throw new Error("toCanvas() is only supported in browser environments.");const e=this.clone().rgba(),t=l(e.width,e.height),s=new c(e.data,e.width,e.height);return t.getContext("2d").putImageData(s,0,0),t}split(){const{data:e,width:t,height:s,channels:r}=this,o=e.constructor,n=e.length/r,a=Array.from({length:r},(()=>new o(n)));for(let t=0;t<n;++t){const s=r*t;for(let o=0;o<r;++o)a[o][t]=e[s+o]}return a.map((e=>new _(e,t,s,1)))}_update(e,t,s,r=null){return this.data=e,this.width=t,this.height=s,null!==r&&(this.channels=r),this}clone(){return new _(this.data.slice(),this.width,this.height,this.channels)}convert(e){if(this.channels===e)return this;switch(e){case 1:this.grayscale();break;case 3:this.rgb();break;case 4:this.rgba();break;default:throw new Error(`Conversion failed due to unsupported number of channels: ${this.channels}`)}return this}async save(e){if(!u){if(n.env.useFS){const t=this.toSharp();return await t.toFile(e)}throw new Error("Unable to save the image because filesystem is disabled in this environment.")}{if(n.apis.IS_WEBWORKER_ENV)throw new Error("Unable to save an image from a Web Worker.");const t=e.split(".").pop().toLowerCase(),s=m.get(t)??"image/png",r=await this.toBlob(s),o=URL.createObjectURL(r),a=document.createElement("a");a.href=o,a.download=e,a.click(),a.remove()}}toSharp(){if(u)throw new Error("toSharp() is only supported in server-side environments.");return(0,i.default)(this.data,{raw:{width:this.width,height:this.height,channels:this.channels}})}}const h=_.read.bind(_)},"./src/utils/maths.js":(e,t,s)=>{function r(e,[t,s,r],[o,n],a="bilinear",i=!1){const l=n/r,c=o/s,d=new e.constructor(o*n*t),u=s*r,p=o*n;for(let a=0;a<o;++a)for(let o=0;o<n;++o){const i=a*n+o,m=(o+.5)/l-.5,_=(a+.5)/c-.5;let h=Math.floor(m),f=Math.floor(_);const g=Math.min(h+1,r-1),M=Math.min(f+1,s-1);h=Math.max(h,0),f=Math.max(f,0);const w=m-h,T=_-f,b=(1-w)*(1-T),P=w*(1-T),x=(1-w)*T,k=w*T,y=f*r,F=M*r,v=y+h,C=y+g,S=F+h,A=F+g;for(let s=0;s<t;++s){const t=s*u;d[s*p+i]=b*e[t+v]+P*e[t+C]+x*e[t+S]+k*e[t+A]}}return d}function o(e,t,s){const r=new Array(s.length),o=new Array(s.length);for(let e=s.length-1,n=1;e>=0;--e)o[e]=n,r[e]=t[s[e]],n*=r[e];const n=s.map(((e,t)=>o[s.indexOf(t)])),a=new e.constructor(e.length);for(let s=0;s<e.length;++s){let r=0;for(let e=t.length-1,o=s;e>=0;--e)r+=o%t[e]*n[e],o=Math.floor(o/t[e]);a[r]=e[s]}return[a,r]}function n(e){const t=u(e)[0],s=e.map((e=>Math.exp(e-t))),r=s.reduce(((e,t)=>e+t),0);return s.map((e=>e/r))}function a(e){const t=u(e)[0];let s=0;for(let r=0;r<e.length;++r)s+=Math.exp(e[r]-t);const r=Math.log(s);return e.map((e=>e-t-r))}function i(e,t){let s=0;for(let r=0;r<e.length;++r)s+=e[r]*t[r];return s}function l(e,t){return i(e,t)/(c(e)*c(t))}function c(e){return Math.sqrt(e.reduce(((e,t)=>e+t*t),0))}function d(e){if(0===e.length)throw Error("Array must not be empty");let t=e[0],s=0;for(let r=1;r<e.length;++r)e[r]<t&&(t=e[r],s=r);return[t,s]}function u(e){if(0===e.length)throw Error("Array must not be empty");let t=e[0],s=0;for(let r=1;r<e.length;++r)e[r]>t&&(t=e[r],s=r);return[Number(t),s]}function p(e){return e>0&&!(e&e-1)}s.r(t),s.d(t,{FFT:()=>h,bankers_round:()=>M,cos_sim:()=>l,dot:()=>i,dynamic_time_warping:()=>w,interpolate_data:()=>r,log_softmax:()=>a,magnitude:()=>c,max:()=>u,medianFilter:()=>f,min:()=>d,permute_data:()=>o,round:()=>g,softmax:()=>n});class m{constructor(e){if(this.size=0|e,this.size<=1||!p(this.size))throw new Error("FFT size must be a power of two larger than 1");this._csize=e<<1,this.table=new Float64Array(2*this.size);for(let e=0;e<this.table.length;e+=2){const t=Math.PI*e/this.size;this.table[e]=Math.cos(t),this.table[e+1]=-Math.sin(t)}let t=0;for(let e=1;this.size>e;e<<=1)++t;this._width=t%2==0?t-1:t,this._bitrev=new Int32Array(1<<this._width);for(let e=0;e<this._bitrev.length;++e){this._bitrev[e]=0;for(let t=0;t<this._width;t+=2){const s=this._width-t-2;this._bitrev[e]|=(e>>>t&3)<<s}}}createComplexArray(){return new Float64Array(this._csize)}fromComplexArray(e,t){const s=t||new Array(e.length>>>1);for(let t=0;t<e.length;t+=2)s[t>>>1]=e[t];return s}toComplexArray(e,t){const s=t||this.createComplexArray();for(let t=0;t<s.length;t+=2)s[t]=e[t>>>1],s[t+1]=0;return s}transform(e,t){if(e===t)throw new Error("Input and output buffers must be different");this._transform4(e,t,1)}realTransform(e,t){if(e===t)throw new Error("Input and output buffers must be different");this._realTransform4(e,t,1)}inverseTransform(e,t){if(e===t)throw new Error("Input and output buffers must be different");this._transform4(e,t,-1);for(let t=0;t<e.length;++t)e[t]/=this.size}_transform4(e,t,s){const r=this._csize;let o,n,a=1<<this._width,i=r/a<<1;const l=this._bitrev;if(4===i)for(o=0,n=0;o<r;o+=i,++n){const s=l[n];this._singleTransform2(t,e,o,s,a)}else for(o=0,n=0;o<r;o+=i,++n){const r=l[n];this._singleTransform4(t,e,o,r,a,s)}const c=this.table;for(a>>=2;a>=2;a>>=2){i=r/a<<1;const t=i>>>2;for(o=0;o<r;o+=i){const r=o+t-1;for(let n=o,i=0;n<r;n+=2,i+=a){const r=n,o=r+t,a=o+t,l=a+t,d=e[r],u=e[r+1],p=e[o],m=e[o+1],_=e[a],h=e[a+1],f=e[l],g=e[l+1],M=c[i],w=s*c[i+1],T=p*M-m*w,b=p*w+m*M,P=c[2*i],x=s*c[2*i+1],k=_*P-h*x,y=_*x+h*P,F=c[3*i],v=s*c[3*i+1],C=f*F-g*v,S=f*v+g*F,A=d+k,E=u+y,L=d-k,I=u-y,z=T+C,j=b+S,N=s*(T-C),V=s*(b-S);e[r]=A+z,e[r+1]=E+j,e[o]=L+V,e[o+1]=I-N,e[a]=A-z,e[a+1]=E-j,e[l]=L-V,e[l+1]=I+N}}}}_singleTransform2(e,t,s,r,o){const n=e[r],a=e[r+1],i=e[r+o],l=e[r+o+1];t[s]=n+i,t[s+1]=a+l,t[s+2]=n-i,t[s+3]=a-l}_singleTransform4(e,t,s,r,o,n){const a=2*o,i=3*o,l=e[r],c=e[r+1],d=e[r+o],u=e[r+o+1],p=e[r+a],m=e[r+a+1],_=e[r+i],h=e[r+i+1],f=l+p,g=c+m,M=l-p,w=c-m,T=d+_,b=u+h,P=n*(d-_),x=n*(u-h);t[s]=f+T,t[s+1]=g+b,t[s+2]=M+x,t[s+3]=w-P,t[s+4]=f-T,t[s+5]=g-b,t[s+6]=M-x,t[s+7]=w+P}_realTransform4(e,t,s){const r=this._csize;let o,n,a=1<<this._width,i=r/a<<1;const l=this._bitrev;if(4===i)for(o=0,n=0;o<r;o+=i,++n){const s=l[n];this._singleRealTransform2(t,e,o,s>>>1,a>>>1)}else for(o=0,n=0;o<r;o+=i,++n){const r=l[n];this._singleRealTransform4(t,e,o,r>>>1,a>>>1,s)}const c=this.table;for(a>>=2;a>=2;a>>=2){i=r/a<<1;const t=i>>>1,n=t>>>1,l=n>>>1;for(o=0;o<r;o+=i)for(let r=0,i=0;r<=l;r+=2,i+=a){const a=o+r,d=a+n,u=d+n,p=u+n,m=e[a],_=e[a+1],h=e[d],f=e[d+1],g=e[u],M=e[u+1],w=e[p],T=e[p+1],b=m,P=_,x=c[i],k=s*c[i+1],y=h*x-f*k,F=h*k+f*x,v=c[2*i],C=s*c[2*i+1],S=g*v-M*C,A=g*C+M*v,E=c[3*i],L=s*c[3*i+1],I=w*E-T*L,z=w*L+T*E,j=b+S,N=P+A,V=b-S,O=P-A,B=y+I,D=F+z,G=s*(y-I),R=s*(F-z);if(e[a]=j+B,e[a+1]=N+D,e[d]=V+R,e[d+1]=O-G,0===r){e[u]=j-B,e[u+1]=N-D;continue}if(r===l)continue;const q=o+n-r,$=o+t-r;e[q]=V-s*R,e[q+1]=-O-s*G,e[$]=j-s*B,e[$+1]=s*D-N}}const d=r>>>1;for(let t=2;t<d;t+=2)e[r-t]=e[t],e[r-t+1]=-e[t+1]}_singleRealTransform2(e,t,s,r,o){const n=e[r],a=e[r+o];t[s]=n+a,t[s+1]=0,t[s+2]=n-a,t[s+3]=0}_singleRealTransform4(e,t,s,r,o,n){const a=2*o,i=3*o,l=e[r],c=e[r+o],d=e[r+a],u=e[r+i],p=l+d,m=l-d,_=c+u,h=n*(c-u);t[s]=p+_,t[s+1]=0,t[s+2]=m,t[s+3]=-h,t[s+4]=p-_,t[s+5]=0,t[s+6]=m,t[s+7]=h}}class _{constructor(e){const t=2*(e-1),s=2*(2*e-1),r=2**Math.ceil(Math.log2(s));this.bufferSize=r,this._a=t;const o=new Float64Array(s),n=new Float64Array(r);this._chirpBuffer=new Float64Array(r),this._buffer1=new Float64Array(r),this._buffer2=new Float64Array(r),this._outBuffer1=new Float64Array(r),this._outBuffer2=new Float64Array(r);const a=-2*Math.PI/e,i=Math.cos(a),l=Math.sin(a);for(let t=0;t<s>>1;++t){const s=(t+1-e)**2/2,r=Math.sqrt(i**2+l**2)**s,a=s*Math.atan2(l,i),c=2*t;o[c]=r*Math.cos(a),o[c+1]=r*Math.sin(a),n[c]=o[c],n[c+1]=-o[c+1]}this._slicedChirpBuffer=o.subarray(t,s),this._f=new m(r>>1),this._f.transform(this._chirpBuffer,n)}_transform(e,t,s){const r=this._buffer1,o=this._buffer2,n=this._outBuffer1,a=this._outBuffer2,i=this._chirpBuffer,l=this._slicedChirpBuffer,c=this._a;if(s)for(let e=0;e<l.length;e+=2){const s=e+1,o=t[e>>1];r[e]=o*l[e],r[s]=o*l[s]}else for(let e=0;e<l.length;e+=2){const s=e+1;r[e]=t[e]*l[e]-t[s]*l[s],r[s]=t[e]*l[s]+t[s]*l[e]}this._f.transform(n,r);for(let e=0;e<i.length;e+=2){const t=e+1;o[e]=n[e]*i[e]-n[t]*i[t],o[t]=n[e]*i[t]+n[t]*i[e]}this._f.inverseTransform(a,o);for(let t=0;t<a.length;t+=2){const s=a[t+c],r=a[t+c+1],o=l[t],n=l[t+1];e[t]=s*o-r*n,e[t+1]=s*n+r*o}}transform(e,t){this._transform(e,t,!1)}realTransform(e,t){this._transform(e,t,!0)}}class h{constructor(e){this.fft_length=e,this.isPowerOfTwo=p(e),this.isPowerOfTwo?(this.fft=new m(e),this.outputBufferSize=2*e):(this.fft=new _(e),this.outputBufferSize=this.fft.bufferSize)}realTransform(e,t){this.fft.realTransform(e,t)}transform(e,t){this.fft.transform(e,t)}}function f(e,t){if(t%2==0||t<=0)throw new Error("Window size must be a positive odd number");const s=new e.constructor(e.length),r=new e.constructor(t),o=Math.floor(t/2);for(let t=0;t<e.length;++t){let n=0;for(let s=-o;s<=o;++s){let o=t+s;o<0?o=Math.abs(o):o>=e.length&&(o=2*(e.length-1)-o),r[n++]=e[o]}r.sort(),s[t]=r[o]}return s}function g(e,t){const s=Math.pow(10,t);return Math.round(e*s)/s}function M(e){const t=Math.round(e);return Math.abs(e)%1==.5?t%2==0?t:t-1:t}function w(e){const t=e.length,s=e[0].length,r=[t+1,s+1],o=Array.from({length:r[0]},(()=>Array(r[1]).fill(1/0)));o[0][0]=0;const n=Array.from({length:r[0]},(()=>Array(r[1]).fill(-1)));for(let t=1;t<r[1];++t)for(let s=1;s<r[0];++s){const r=o[s-1][t-1],a=o[s-1][t],i=o[s][t-1];let l,c;r<a&&r<i?(l=r,c=0):a<r&&a<i?(l=a,c=1):(l=i,c=2),o[s][t]=e[s-1][t-1]+l,n[s][t]=c}for(let e=0;e<r[1];++e)n[0][e]=2;for(let e=0;e<r[0];++e)n[e][0]=1;let a=t,i=s,l=[],c=[];for(;a>0||i>0;)switch(l.push(a-1),c.push(i-1),n[a][i]){case 0:--a,--i;break;case 1:--a;break;case 2:--i;break;default:throw new Error(`Internal error in dynamic time warping. Unexpected trace[${a}, ${i}]. Please file a bug report.`)}return l.reverse(),c.reverse(),[l,c]}},"./src/utils/tensor.js":(e,t,s)=>{s.r(t),s.d(t,{Tensor:()=>i,cat:()=>w,full:()=>k,full_like:()=>y,interpolate:()=>c,interpolate_4d:()=>d,layer_norm:()=>h,matmul:()=>u,mean:()=>P,mean_pooling:()=>_,ones:()=>F,ones_like:()=>v,permute:()=>l,quantize_embeddings:()=>A,rfft:()=>p,stack:()=>T,std_mean:()=>b,topk:()=>m,zeros:()=>C,zeros_like:()=>S});var r=s("./src/utils/maths.js"),o=s("./src/backends/onnx.js"),n=s("./src/ops/registry.js");const a=Object.freeze({float32:Float32Array,float16:Uint16Array,float64:Float64Array,string:Array,int8:Int8Array,uint8:Uint8Array,int16:Int16Array,uint16:Uint16Array,int32:Int32Array,uint32:Uint32Array,int64:BigInt64Array,uint64:BigUint64Array,bool:Uint8Array,uint4:Uint8Array,int4:Int8Array});class i{get dims(){return this.ort_tensor.dims}set dims(e){this.ort_tensor.dims=e}get type(){return this.ort_tensor.type}get data(){return this.ort_tensor.data}get size(){return this.ort_tensor.size}get location(){return this.ort_tensor.location}ort_tensor;constructor(...e){return(0,o.isONNXTensor)(e[0])?this.ort_tensor=e[0]:this.ort_tensor=new o.Tensor(e[0],e[1],e[2]),new Proxy(this,{get:(e,t)=>{if("string"==typeof t){let s=Number(t);if(Number.isInteger(s))return e._getitem(s)}return e[t]},set:(e,t,s)=>e[t]=s})}dispose(){this.ort_tensor.dispose()}*[Symbol.iterator](){const[e,...t]=this.dims;if(t.length>0){const s=t.reduce(((e,t)=>e*t));for(let r=0;r<e;++r)yield this._subarray(r,s,t)}else yield*this.data}_getitem(e){const[t,...s]=this.dims;if(e=M(e,t),s.length>0){const t=s.reduce(((e,t)=>e*t));return this._subarray(e,t,s)}return new i(this.type,[this.data[e]],s)}indexOf(e){const t=this.data;for(let s=0;s<t.length;++s)if(t[s]==e)return s;return-1}_subarray(e,t,s){const r=e*t,o=(e+1)*t,n="subarray"in this.data?this.data.subarray(r,o):this.data.slice(r,o);return new i(this.type,n,s)}item(){const e=this.data;if(1!==e.length)throw new Error(`a Tensor with ${e.length} elements cannot be converted to Scalar`);return e[0]}tolist(){return function(e,t){const s=e.length,r=t.reduce(((e,t)=>e*t));if(s!==r)throw Error(`cannot reshape array of size ${s} into shape (${t})`);let o=e;for(let e=t.length-1;e>=0;e--)o=o.reduce(((s,r)=>{let o=s[s.length-1];return o.length<t[e]?o.push(r):s.push([r]),s}),[[]]);return o[0]}(this.data,this.dims)}sigmoid(){return this.clone().sigmoid_()}sigmoid_(){const e=this.data;for(let t=0;t<e.length;++t)e[t]=1/(1+Math.exp(-e[t]));return this}map(e){return this.clone().map_(e)}map_(e){const t=this.data;for(let s=0;s<t.length;++s)t[s]=e(t[s],s,t);return this}mul(e){return this.clone().mul_(e)}mul_(e){const t=this.data;for(let s=0;s<t.length;++s)t[s]*=e;return this}div(e){return this.clone().div_(e)}div_(e){const t=this.data;for(let s=0;s<t.length;++s)t[s]/=e;return this}add(e){return this.clone().add_(e)}add_(e){const t=this.data;for(let s=0;s<t.length;++s)t[s]+=e;return this}sub(e){return this.clone().sub_(e)}sub_(e){const t=this.data;for(let s=0;s<t.length;++s)t[s]-=e;return this}clone(){return new i(this.type,this.data.slice(),this.dims.slice())}slice(...e){const t=[],s=[];for(let r=0;r<this.dims.length;++r){let o=e[r];if(null==o)s.push([0,this.dims[r]]),t.push(this.dims[r]);else if("number"==typeof o)o=M(o,this.dims[r],r),s.push([o,o+1]);else{if(!Array.isArray(o)||2!==o.length)throw new Error(`Invalid slice: ${o}`);{let[e,n]=o;if(e=null===e?0:M(e,this.dims[r],r,!1),n=null===n?this.dims[r]:M(n,this.dims[r],r,!1),e>n)throw new Error(`Invalid slice: ${o}`);const a=[Math.max(e,0),Math.min(n,this.dims[r])];s.push(a),t.push(a[1]-a[0])}}}const r=s.map((([e,t])=>t-e)),o=r.reduce(((e,t)=>e*t)),n=this.data,a=new n.constructor(o),l=this.stride();for(let e=0;e<o;++e){let t=0;for(let o=r.length-1,n=e;o>=0;--o){const e=r[o];t+=(n%e+s[o][0])*l[o],n=Math.floor(n/e)}a[e]=n[t]}return new i(this.type,a,t)}permute(...e){return l(this,e)}transpose(...e){return this.permute(...e)}sum(e=null,t=!1){return this.norm(1,e,t)}norm(e="fro",t=null,s=!1){if("fro"===e)e=2;else if("string"==typeof e)throw Error(`Unsupported norm: ${e}`);const r=this.data;if(null===t){let t=r.reduce(((t,s)=>t+s**e),0)**(1/e);return new i(this.type,[t],[])}t=M(t,this.dims.length);const o=this.dims.slice();o[t]=1;const n=new r.constructor(r.length/this.dims[t]);for(let s=0;s<r.length;++s){let a=0;for(let e=this.dims.length-1,r=s,n=1;e>=0;--e){const s=this.dims[e];if(e!==t){a+=r%s*n,n*=o[e]}r=Math.floor(r/s)}n[a]+=r[s]**e}if(1!==e)for(let t=0;t<n.length;++t)n[t]=n[t]**(1/e);return s||o.splice(t,1),new i(this.type,n,o)}normalize_(e=2,t=1){t=M(t,this.dims.length);const s=this.norm(e,t,!0),r=this.data,o=s.data;for(let e=0;e<r.length;++e){let s=0;for(let r=this.dims.length-1,o=e,n=1;r>=0;--r){const e=this.dims[r];if(r!==t){s+=o%e*n,n*=this.dims[r]}o=Math.floor(o/e)}r[e]/=o[s]}return this}normalize(e=2,t=1){return this.clone().normalize_(e,t)}stride(){return function(e){const t=new Array(e.length);for(let s=e.length-1,r=1;s>=0;--s)t[s]=r,r*=e[s];return t}(this.dims)}squeeze(e=null){return new i(this.type,this.data,f(this.dims,e))}squeeze_(e=null){return this.dims=f(this.dims,e),this}unsqueeze(e=null){return new i(this.type,this.data,g(this.dims,e))}unsqueeze_(e=null){return this.dims=g(this.dims,e),this}flatten_(e=0,t=-1){t=(t+this.dims.length)%this.dims.length;let s=this.dims.slice(0,e),r=this.dims.slice(e,t+1),o=this.dims.slice(t+1);return this.dims=[...s,r.reduce(((e,t)=>e*t),1),...o],this}flatten(e=0,t=-1){return this.clone().flatten_(e,t)}view(...e){let t=-1;for(let s=0;s<e.length;++s)if(-1===e[s]){if(-1!==t)throw new Error("Only one dimension can be inferred");t=s}const s=this.data;if(-1!==t){const r=e.reduce(((e,s,r)=>r!==t?e*s:e),1);e[t]=s.length/r}return new i(this.type,s,e)}neg_(){const e=this.data;for(let t=0;t<e.length;++t)e[t]=-e[t];return this}neg(){return this.clone().neg_()}clamp_(e,t){const s=this.data;for(let r=0;r<s.length;++r)s[r]=Math.min(Math.max(s[r],e),t);return this}clamp(e,t){return this.clone().clamp_(e,t)}round_(){const e=this.data;for(let t=0;t<e.length;++t)e[t]=Math.round(e[t]);return this}round(){return this.clone().round_()}mean(e=null,t=!1){return P(this,e,t)}to(e){if(this.type===e)return this;if(!a.hasOwnProperty(e))throw new Error(`Unsupported type: ${e}`);return new i(e,a[e].from(this.data),this.dims)}}function l(e,t){const[s,o]=(0,r.permute_data)(e.data,e.dims,t);return new i(e.type,s,o)}function c(e,[t,s],o="bilinear",n=!1){const a=e.dims.at(-3)??1,l=e.dims.at(-2),c=e.dims.at(-1);let d=(0,r.interpolate_data)(e.data,[a,l,c],[t,s],o,n);return new i(e.type,d,[a,t,s])}async function d(e,{size:t=null,mode:s="bilinear"}={}){if(4!==e.dims.length)throw new Error("`interpolate_4d` currently only supports 4D input.");if(!t)throw new Error("`interpolate_4d` requires a `size` argument.");let r,o;if(2===t.length)r=[...e.dims.slice(0,2),...t];else if(3===t.length)r=[e.dims[0],...t];else{if(4!==t.length)throw new Error("`size` must be of length 2, 3, or 4.");r=t}if("bilinear"===s)o=await n.TensorOpRegistry.bilinear_interpolate_4d;else{if("bicubic"!==s)throw new Error(`Unsupported mode: ${s}`);o=await n.TensorOpRegistry.bicubic_interpolate_4d}const a=new i("int64",new BigInt64Array(r.map(BigInt)),[r.length]);return await o({x:e,s:a})}async function u(e,t){const s=await n.TensorOpRegistry.matmul;return await s({a:e,b:t})}async function p(e,t){const s=await n.TensorOpRegistry.rfft;return await s({x:e,a:t})}async function m(e,t){const s=await n.TensorOpRegistry.top_k;return t=null===t?e.dims.at(-1):Math.min(t,e.dims.at(-1)),await s({x:e,k:new i("int64",[BigInt(t)],[1])})}function _(e,t){const s=e.data,r=t.data,o=[e.dims[0],e.dims[2]],n=new s.constructor(o[0]*o[1]),[a,l,c]=e.dims;let d=0;for(let e=0;e<a;++e){const t=e*c*l;for(let o=0;o<c;++o){let a=0,i=0;const u=e*l,p=t+o;for(let e=0;e<l;++e){const t=Number(r[u+e]);i+=t,a+=s[p+e*c]*t}const m=a/i;n[d++]=m}}return new i(e.type,n,o)}function h(e,t,{eps:s=1e-5}={}){if(2!==e.dims.length)throw new Error("`layer_norm` currently only supports 2D input.");const[r,o]=e.dims;if(1!==t.length&&t[0]!==o)throw new Error("`normalized_shape` must be a 1D array with shape `[input.dims[1]]`.");const[n,a]=b(e,1,0,!0),l=n.data,c=a.data,d=e.data,u=new d.constructor(d.length);for(let e=0;e<r;++e){const t=e*o;for(let r=0;r<o;++r){const o=t+r;u[o]=(d[o]-c[e])/(l[e]+s)}}return new i(e.type,u,e.dims)}function f(e,t){return e=e.slice(),null===t?e=e.filter((e=>1!==e)):"number"==typeof t?1===e[t]&&e.splice(t,1):Array.isArray(t)&&(e=e.filter(((e,s)=>1!==e||!t.includes(s)))),e}function g(e,t){return t=M(t,e.length+1),(e=e.slice()).splice(t,0,1),e}function M(e,t,s=null,r=!0){if(r&&(e<-t||e>=t))throw new Error(`IndexError: index ${e} is out of bounds for dimension${null===s?"":" "+s} with size ${t}`);return e<0&&(e=(e%t+t)%t),e}function w(e,t=0){t=M(t,e[0].dims.length);const s=e[0].dims.slice();s[t]=e.reduce(((e,s)=>e+s.dims[t]),0);const r=s.reduce(((e,t)=>e*t),1),o=new e[0].data.constructor(r),n=e[0].type;if(0===t){let t=0;for(const s of e){const e=s.data;o.set(e,t),t+=e.length}}else{let r=0;for(let n=0;n<e.length;++n){const{data:a,dims:i}=e[n];for(let e=0;e<a.length;++e){let n=0;for(let o=i.length-1,a=e,l=1;o>=0;--o){const e=i[o];let c=a%e;o===t&&(c+=r),n+=c*l,l*=s[o],a=Math.floor(a/e)}o[n]=a[e]}r+=i[t]}}return new i(n,o,s)}function T(e,t=0){return w(e.map((e=>e.unsqueeze(t))),t)}function b(e,t=null,s=1,r=!1){const o=e.data,n=e.dims;if(null===t){const t=o.reduce(((e,t)=>e+t),0)/o.length,r=Math.sqrt(o.reduce(((e,s)=>e+(s-t)**2),0)/(o.length-s)),n=new i(e.type,[t],[]);return[new i(e.type,[r],[]),n]}const a=P(e,t=M(t,n.length),r),l=a.data,c=n.slice();c[t]=1;const d=new o.constructor(o.length/n[t]);for(let e=0;e<o.length;++e){let s=0;for(let r=n.length-1,o=e,a=1;r>=0;--r){const e=n[r];if(r!==t){s+=o%e*a,a*=c[r]}o=Math.floor(o/e)}d[s]+=(o[e]-l[s])**2}for(let e=0;e<d.length;++e)d[e]=Math.sqrt(d[e]/(n[t]-s));r||c.splice(t,1);return[new i(e.type,d,c),a]}function P(e,t=null,s=!1){const r=e.data;if(null===t){const t=r.reduce(((e,t)=>e+t),0);return new i(e.type,[t/r.length],[])}const o=e.dims;t=M(t,o.length);const n=o.slice();n[t]=1;const a=new r.constructor(r.length/o[t]);for(let e=0;e<r.length;++e){let s=0;for(let r=o.length-1,a=e,i=1;r>=0;--r){const e=o[r];if(r!==t){s+=a%e*i,i*=n[r]}a=Math.floor(a/e)}a[s]+=r[e]}if(1!==o[t])for(let e=0;e<a.length;++e)a[e]=a[e]/o[t];return s||n.splice(t,1),new i(e.type,a,n)}function x(e,t,s,r){const o=e.reduce(((e,t)=>e*t),1);return new i(s,new r(o).fill(t),e)}function k(e,t){let s,r;if("number"==typeof t)s="float32",r=Float32Array;else if("bigint"==typeof t)s="int64",r=BigInt64Array;else{if("boolean"!=typeof t)throw new Error("Unsupported data type: "+typeof t);s="bool",r=Uint8Array}return x(e,t,s,r)}function y(e,t){return k(e.dims,t)}function F(e){return x(e,1n,"int64",BigInt64Array)}function v(e){return F(e.dims)}function C(e){return x(e,0n,"int64",BigInt64Array)}function S(e){return C(e.dims)}function A(e,t){if(2!==e.dims.length)throw new Error("The tensor must have 2 dimensions");if(e.dims.at(-1)%8!=0)throw new Error("The last dimension of the tensor must be a multiple of 8");if(!["binary","ubinary"].includes(t))throw new Error("The precision must be either 'binary' or 'ubinary'");const s="binary"===t,r=s?"int8":"uint8",o=s?Int8Array:Uint8Array,n=e.data,a=new o(n.length/8);for(let e=0;e<n.length;++e){const t=n[e]>0?1:0,r=Math.floor(e/8),o=e%8;a[r]|=t<<7-o,s&&0===o&&(a[r]-=128)}return new i(r,a,[e.dims[0],e.dims[1]/8])}}},l={};function c(e){var t=l[e];if(void 0!==t)return t.exports;var s=l[e]={exports:{}};return i[e](s,s.exports,c),s.exports}a=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__,c.t=function(e,t){if(1&t&&(e=this(e)),8&t)return e;if("object"==typeof e&&e){if(4&t&&e.__esModule)return e;if(16&t&&"function"==typeof e.then)return e}var s=Object.create(null);c.r(s);var r={};n=n||[null,a({}),a([]),a(a)];for(var o=2&t&&e;"object"==typeof o&&!~n.indexOf(o);o=a(o))Object.getOwnPropertyNames(o).forEach((t=>r[t]=()=>e[t]));return r.default=()=>e,c.d(s,r),s},c.d=(e,t)=>{for(var s in t)c.o(t,s)&&!c.o(e,s)&&Object.defineProperty(e,s,{enumerable:!0,get:t[s]})},c.o=(e,t)=>Object.prototype.hasOwnProperty.call(e,t),c.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})};var d={};(()=>{c.r(d),c.d(d,{ASTFeatureExtractor:()=>p.ASTFeatureExtractor,ASTForAudioClassification:()=>s.ASTForAudioClassification,ASTModel:()=>s.ASTModel,ASTPreTrainedModel:()=>s.ASTPreTrainedModel,AlbertForMaskedLM:()=>s.AlbertForMaskedLM,AlbertForQuestionAnswering:()=>s.AlbertForQuestionAnswering,AlbertForSequenceClassification:()=>s.AlbertForSequenceClassification,AlbertModel:()=>s.AlbertModel,AlbertPreTrainedModel:()=>s.AlbertPreTrainedModel,AlbertTokenizer:()=>r.AlbertTokenizer,AudioClassificationPipeline:()=>t.AudioClassificationPipeline,AutoConfig:()=>o.AutoConfig,AutoFeatureExtractor:()=>m.AutoFeatureExtractor,AutoImageProcessor:()=>f.AutoImageProcessor,AutoModel:()=>s.AutoModel,AutoModelForAudioClassification:()=>s.AutoModelForAudioClassification,AutoModelForAudioFrameClassification:()=>s.AutoModelForAudioFrameClassification,AutoModelForCTC:()=>s.AutoModelForCTC,AutoModelForCausalLM:()=>s.AutoModelForCausalLM,AutoModelForDepthEstimation:()=>s.AutoModelForDepthEstimation,AutoModelForDocumentQuestionAnswering:()=>s.AutoModelForDocumentQuestionAnswering,AutoModelForImageClassification:()=>s.AutoModelForImageClassification,AutoModelForImageFeatureExtraction:()=>s.AutoModelForImageFeatureExtraction,AutoModelForImageMatting:()=>s.AutoModelForImageMatting,AutoModelForImageSegmentation:()=>s.AutoModelForImageSegmentation,AutoModelForImageToImage:()=>s.AutoModelForImageToImage,AutoModelForMaskGeneration:()=>s.AutoModelForMaskGeneration,AutoModelForMaskedLM:()=>s.AutoModelForMaskedLM,AutoModelForNormalEstimation:()=>s.AutoModelForNormalEstimation,AutoModelForObjectDetection:()=>s.AutoModelForObjectDetection,AutoModelForPoseEstimation:()=>s.AutoModelForPoseEstimation,AutoModelForQuestionAnswering:()=>s.AutoModelForQuestionAnswering,AutoModelForSemanticSegmentation:()=>s.AutoModelForSemanticSegmentation,AutoModelForSeq2SeqLM:()=>s.AutoModelForSeq2SeqLM,AutoModelForSequenceClassification:()=>s.AutoModelForSequenceClassification,AutoModelForSpeechSeq2Seq:()=>s.AutoModelForSpeechSeq2Seq,AutoModelForTextToSpectrogram:()=>s.AutoModelForTextToSpectrogram,AutoModelForTextToWaveform:()=>s.AutoModelForTextToWaveform,AutoModelForTokenClassification:()=>s.AutoModelForTokenClassification,AutoModelForUniversalSegmentation:()=>s.AutoModelForUniversalSegmentation,AutoModelForVision2Seq:()=>s.AutoModelForVision2Seq,AutoModelForXVector:()=>s.AutoModelForXVector,AutoModelForZeroShotObjectDetection:()=>s.AutoModelForZeroShotObjectDetection,AutoProcessor:()=>w.AutoProcessor,AutoTokenizer:()=>r.AutoTokenizer,AutomaticSpeechRecognitionPipeline:()=>t.AutomaticSpeechRecognitionPipeline,BartForConditionalGeneration:()=>s.BartForConditionalGeneration,BartForSequenceClassification:()=>s.BartForSequenceClassification,BartModel:()=>s.BartModel,BartPretrainedModel:()=>s.BartPretrainedModel,BartTokenizer:()=>r.BartTokenizer,BaseModelOutput:()=>s.BaseModelOutput,BaseStreamer:()=>T.BaseStreamer,BeitFeatureExtractor:()=>h.BeitFeatureExtractor,BeitForImageClassification:()=>s.BeitForImageClassification,BeitModel:()=>s.BeitModel,BeitPreTrainedModel:()=>s.BeitPreTrainedModel,BertForMaskedLM:()=>s.BertForMaskedLM,BertForQuestionAnswering:()=>s.BertForQuestionAnswering,BertForSequenceClassification:()=>s.BertForSequenceClassification,BertForTokenClassification:()=>s.BertForTokenClassification,BertModel:()=>s.BertModel,BertPreTrainedModel:()=>s.BertPreTrainedModel,BertTokenizer:()=>r.BertTokenizer,BitImageProcessor:()=>h.BitImageProcessor,BlenderbotForConditionalGeneration:()=>s.BlenderbotForConditionalGeneration,BlenderbotModel:()=>s.BlenderbotModel,BlenderbotPreTrainedModel:()=>s.BlenderbotPreTrainedModel,BlenderbotSmallForConditionalGeneration:()=>s.BlenderbotSmallForConditionalGeneration,BlenderbotSmallModel:()=>s.BlenderbotSmallModel,BlenderbotSmallPreTrainedModel:()=>s.BlenderbotSmallPreTrainedModel,BlenderbotSmallTokenizer:()=>r.BlenderbotSmallTokenizer,BlenderbotTokenizer:()=>r.BlenderbotTokenizer,BloomForCausalLM:()=>s.BloomForCausalLM,BloomModel:()=>s.BloomModel,BloomPreTrainedModel:()=>s.BloomPreTrainedModel,BloomTokenizer:()=>r.BloomTokenizer,CLIPFeatureExtractor:()=>h.CLIPFeatureExtractor,CLIPImageProcessor:()=>h.CLIPImageProcessor,CLIPModel:()=>s.CLIPModel,CLIPPreTrainedModel:()=>s.CLIPPreTrainedModel,CLIPSegForImageSegmentation:()=>s.CLIPSegForImageSegmentation,CLIPSegModel:()=>s.CLIPSegModel,CLIPSegPreTrainedModel:()=>s.CLIPSegPreTrainedModel,CLIPTextModel:()=>s.CLIPTextModel,CLIPTextModelWithProjection:()=>s.CLIPTextModelWithProjection,CLIPTokenizer:()=>r.CLIPTokenizer,CLIPVisionModel:()=>s.CLIPVisionModel,CLIPVisionModelWithProjection:()=>s.CLIPVisionModelWithProjection,CamembertForMaskedLM:()=>s.CamembertForMaskedLM,CamembertForQuestionAnswering:()=>s.CamembertForQuestionAnswering,CamembertForSequenceClassification:()=>s.CamembertForSequenceClassification,CamembertForTokenClassification:()=>s.CamembertForTokenClassification,CamembertModel:()=>s.CamembertModel,CamembertPreTrainedModel:()=>s.CamembertPreTrainedModel,CamembertTokenizer:()=>r.CamembertTokenizer,CausalLMOutput:()=>s.CausalLMOutput,CausalLMOutputWithPast:()=>s.CausalLMOutputWithPast,ChineseCLIPFeatureExtractor:()=>h.ChineseCLIPFeatureExtractor,ChineseCLIPModel:()=>s.ChineseCLIPModel,ChineseCLIPPreTrainedModel:()=>s.ChineseCLIPPreTrainedModel,ClapAudioModelWithProjection:()=>s.ClapAudioModelWithProjection,ClapFeatureExtractor:()=>p.ClapFeatureExtractor,ClapModel:()=>s.ClapModel,ClapPreTrainedModel:()=>s.ClapPreTrainedModel,ClapTextModelWithProjection:()=>s.ClapTextModelWithProjection,ClassifierFreeGuidanceLogitsProcessor:()=>P.ClassifierFreeGuidanceLogitsProcessor,CodeGenForCausalLM:()=>s.CodeGenForCausalLM,CodeGenModel:()=>s.CodeGenModel,CodeGenPreTrainedModel:()=>s.CodeGenPreTrainedModel,CodeGenTokenizer:()=>r.CodeGenTokenizer,CodeLlamaTokenizer:()=>r.CodeLlamaTokenizer,CohereForCausalLM:()=>s.CohereForCausalLM,CohereModel:()=>s.CohereModel,CoherePreTrainedModel:()=>s.CoherePreTrainedModel,CohereTokenizer:()=>r.CohereTokenizer,ConvBertForMaskedLM:()=>s.ConvBertForMaskedLM,ConvBertForQuestionAnswering:()=>s.ConvBertForQuestionAnswering,ConvBertForSequenceClassification:()=>s.ConvBertForSequenceClassification,ConvBertForTokenClassification:()=>s.ConvBertForTokenClassification,ConvBertModel:()=>s.ConvBertModel,ConvBertPreTrainedModel:()=>s.ConvBertPreTrainedModel,ConvBertTokenizer:()=>r.ConvBertTokenizer,ConvNextFeatureExtractor:()=>h.ConvNextFeatureExtractor,ConvNextForImageClassification:()=>s.ConvNextForImageClassification,ConvNextImageProcessor:()=>h.ConvNextImageProcessor,ConvNextModel:()=>s.ConvNextModel,ConvNextPreTrainedModel:()=>s.ConvNextPreTrainedModel,ConvNextV2ForImageClassification:()=>s.ConvNextV2ForImageClassification,ConvNextV2Model:()=>s.ConvNextV2Model,ConvNextV2PreTrainedModel:()=>s.ConvNextV2PreTrainedModel,DPTFeatureExtractor:()=>h.DPTFeatureExtractor,DPTForDepthEstimation:()=>s.DPTForDepthEstimation,DPTImageProcessor:()=>h.DPTImageProcessor,DPTModel:()=>s.DPTModel,DPTPreTrainedModel:()=>s.DPTPreTrainedModel,DebertaForMaskedLM:()=>s.DebertaForMaskedLM,DebertaForQuestionAnswering:()=>s.DebertaForQuestionAnswering,DebertaForSequenceClassification:()=>s.DebertaForSequenceClassification,DebertaForTokenClassification:()=>s.DebertaForTokenClassification,DebertaModel:()=>s.DebertaModel,DebertaPreTrainedModel:()=>s.DebertaPreTrainedModel,DebertaTokenizer:()=>r.DebertaTokenizer,DebertaV2ForMaskedLM:()=>s.DebertaV2ForMaskedLM,DebertaV2ForQuestionAnswering:()=>s.DebertaV2ForQuestionAnswering,DebertaV2ForSequenceClassification:()=>s.DebertaV2ForSequenceClassification,DebertaV2ForTokenClassification:()=>s.DebertaV2ForTokenClassification,DebertaV2Model:()=>s.DebertaV2Model,DebertaV2PreTrainedModel:()=>s.DebertaV2PreTrainedModel,DebertaV2Tokenizer:()=>r.DebertaV2Tokenizer,DecisionTransformerModel:()=>s.DecisionTransformerModel,DecisionTransformerPreTrainedModel:()=>s.DecisionTransformerPreTrainedModel,DeiTFeatureExtractor:()=>h.DeiTFeatureExtractor,DeiTForImageClassification:()=>s.DeiTForImageClassification,DeiTImageProcessor:()=>h.DeiTImageProcessor,DeiTModel:()=>s.DeiTModel,DeiTPreTrainedModel:()=>s.DeiTPreTrainedModel,DepthAnythingForDepthEstimation:()=>s.DepthAnythingForDepthEstimation,DepthAnythingPreTrainedModel:()=>s.DepthAnythingPreTrainedModel,DepthEstimationPipeline:()=>t.DepthEstimationPipeline,DepthProForDepthEstimation:()=>s.DepthProForDepthEstimation,DepthProPreTrainedModel:()=>s.DepthProPreTrainedModel,DetrFeatureExtractor:()=>h.DetrFeatureExtractor,DetrForObjectDetection:()=>s.DetrForObjectDetection,DetrForSegmentation:()=>s.DetrForSegmentation,DetrImageProcessor:()=>h.DetrImageProcessor,DetrModel:()=>s.DetrModel,DetrObjectDetectionOutput:()=>s.DetrObjectDetectionOutput,DetrPreTrainedModel:()=>s.DetrPreTrainedModel,DetrSegmentationOutput:()=>s.DetrSegmentationOutput,Dinov2ForImageClassification:()=>s.Dinov2ForImageClassification,Dinov2Model:()=>s.Dinov2Model,Dinov2PreTrainedModel:()=>s.Dinov2PreTrainedModel,DistilBertForMaskedLM:()=>s.DistilBertForMaskedLM,DistilBertForQuestionAnswering:()=>s.DistilBertForQuestionAnswering,DistilBertForSequenceClassification:()=>s.DistilBertForSequenceClassification,DistilBertForTokenClassification:()=>s.DistilBertForTokenClassification,DistilBertModel:()=>s.DistilBertModel,DistilBertPreTrainedModel:()=>s.DistilBertPreTrainedModel,DistilBertTokenizer:()=>r.DistilBertTokenizer,DocumentQuestionAnsweringPipeline:()=>t.DocumentQuestionAnsweringPipeline,DonutFeatureExtractor:()=>h.DonutFeatureExtractor,DonutImageProcessor:()=>h.DonutImageProcessor,DonutSwinModel:()=>s.DonutSwinModel,DonutSwinPreTrainedModel:()=>s.DonutSwinPreTrainedModel,EfficientNetForImageClassification:()=>s.EfficientNetForImageClassification,EfficientNetImageProcessor:()=>h.EfficientNetImageProcessor,EfficientNetModel:()=>s.EfficientNetModel,EfficientNetPreTrainedModel:()=>s.EfficientNetPreTrainedModel,ElectraForMaskedLM:()=>s.ElectraForMaskedLM,ElectraForQuestionAnswering:()=>s.ElectraForQuestionAnswering,ElectraForSequenceClassification:()=>s.ElectraForSequenceClassification,ElectraForTokenClassification:()=>s.ElectraForTokenClassification,ElectraModel:()=>s.ElectraModel,ElectraPreTrainedModel:()=>s.ElectraPreTrainedModel,ElectraTokenizer:()=>r.ElectraTokenizer,EosTokenCriteria:()=>b.EosTokenCriteria,EsmForMaskedLM:()=>s.EsmForMaskedLM,EsmForSequenceClassification:()=>s.EsmForSequenceClassification,EsmForTokenClassification:()=>s.EsmForTokenClassification,EsmModel:()=>s.EsmModel,EsmPreTrainedModel:()=>s.EsmPreTrainedModel,EsmTokenizer:()=>r.EsmTokenizer,FFT:()=>l.FFT,FalconForCausalLM:()=>s.FalconForCausalLM,FalconModel:()=>s.FalconModel,FalconPreTrainedModel:()=>s.FalconPreTrainedModel,FalconTokenizer:()=>r.FalconTokenizer,FastViTForImageClassification:()=>s.FastViTForImageClassification,FastViTModel:()=>s.FastViTModel,FastViTPreTrainedModel:()=>s.FastViTPreTrainedModel,FeatureExtractionPipeline:()=>t.FeatureExtractionPipeline,FeatureExtractor:()=>u.FeatureExtractor,FillMaskPipeline:()=>t.FillMaskPipeline,Florence2ForConditionalGeneration:()=>s.Florence2ForConditionalGeneration,Florence2PreTrainedModel:()=>s.Florence2PreTrainedModel,Florence2Processor:()=>M.Florence2Processor,ForcedBOSTokenLogitsProcessor:()=>P.ForcedBOSTokenLogitsProcessor,ForcedEOSTokenLogitsProcessor:()=>P.ForcedEOSTokenLogitsProcessor,GLPNFeatureExtractor:()=>h.GLPNFeatureExtractor,GLPNForDepthEstimation:()=>s.GLPNForDepthEstimation,GLPNModel:()=>s.GLPNModel,GLPNPreTrainedModel:()=>s.GLPNPreTrainedModel,GPT2LMHeadModel:()=>s.GPT2LMHeadModel,GPT2Model:()=>s.GPT2Model,GPT2PreTrainedModel:()=>s.GPT2PreTrainedModel,GPT2Tokenizer:()=>r.GPT2Tokenizer,GPTBigCodeForCausalLM:()=>s.GPTBigCodeForCausalLM,GPTBigCodeModel:()=>s.GPTBigCodeModel,GPTBigCodePreTrainedModel:()=>s.GPTBigCodePreTrainedModel,GPTJForCausalLM:()=>s.GPTJForCausalLM,GPTJModel:()=>s.GPTJModel,GPTJPreTrainedModel:()=>s.GPTJPreTrainedModel,GPTNeoForCausalLM:()=>s.GPTNeoForCausalLM,GPTNeoModel:()=>s.GPTNeoModel,GPTNeoPreTrainedModel:()=>s.GPTNeoPreTrainedModel,GPTNeoXForCausalLM:()=>s.GPTNeoXForCausalLM,GPTNeoXModel:()=>s.GPTNeoXModel,GPTNeoXPreTrainedModel:()=>s.GPTNeoXPreTrainedModel,GPTNeoXTokenizer:()=>r.GPTNeoXTokenizer,Gemma2ForCausalLM:()=>s.Gemma2ForCausalLM,Gemma2Model:()=>s.Gemma2Model,Gemma2PreTrainedModel:()=>s.Gemma2PreTrainedModel,GemmaForCausalLM:()=>s.GemmaForCausalLM,GemmaModel:()=>s.GemmaModel,GemmaPreTrainedModel:()=>s.GemmaPreTrainedModel,GemmaTokenizer:()=>r.GemmaTokenizer,GraniteForCausalLM:()=>s.GraniteForCausalLM,GraniteModel:()=>s.GraniteModel,GranitePreTrainedModel:()=>s.GranitePreTrainedModel,Grok1Tokenizer:()=>r.Grok1Tokenizer,GroupViTModel:()=>s.GroupViTModel,GroupViTPreTrainedModel:()=>s.GroupViTPreTrainedModel,HerbertTokenizer:()=>r.HerbertTokenizer,HieraForImageClassification:()=>s.HieraForImageClassification,HieraModel:()=>s.HieraModel,HieraPreTrainedModel:()=>s.HieraPreTrainedModel,HubertForCTC:()=>s.HubertForCTC,HubertForSequenceClassification:()=>s.HubertForSequenceClassification,HubertModel:()=>s.HubertModel,HubertPreTrainedModel:()=>s.HubertPreTrainedModel,IJepaForImageClassification:()=>s.IJepaForImageClassification,IJepaModel:()=>s.IJepaModel,IJepaPreTrainedModel:()=>s.IJepaPreTrainedModel,Idefics3ForConditionalGeneration:()=>s.Idefics3ForConditionalGeneration,Idefics3ImageProcessor:()=>h.Idefics3ImageProcessor,Idefics3PreTrainedModel:()=>s.Idefics3PreTrainedModel,Idefics3Processor:()=>M.Idefics3Processor,ImageClassificationPipeline:()=>t.ImageClassificationPipeline,ImageFeatureExtractionPipeline:()=>t.ImageFeatureExtractionPipeline,ImageFeatureExtractor:()=>p.ImageFeatureExtractor,ImageMattingOutput:()=>s.ImageMattingOutput,ImageProcessor:()=>_.ImageProcessor,ImageSegmentationPipeline:()=>t.ImageSegmentationPipeline,ImageToImagePipeline:()=>t.ImageToImagePipeline,ImageToTextPipeline:()=>t.ImageToTextPipeline,InterruptableStoppingCriteria:()=>b.InterruptableStoppingCriteria,JAISLMHeadModel:()=>s.JAISLMHeadModel,JAISModel:()=>s.JAISModel,JAISPreTrainedModel:()=>s.JAISPreTrainedModel,JinaCLIPImageProcessor:()=>h.JinaCLIPImageProcessor,JinaCLIPModel:()=>s.JinaCLIPModel,JinaCLIPPreTrainedModel:()=>s.JinaCLIPPreTrainedModel,JinaCLIPProcessor:()=>M.JinaCLIPProcessor,JinaCLIPTextModel:()=>s.JinaCLIPTextModel,JinaCLIPVisionModel:()=>s.JinaCLIPVisionModel,LlamaForCausalLM:()=>s.LlamaForCausalLM,LlamaModel:()=>s.LlamaModel,LlamaPreTrainedModel:()=>s.LlamaPreTrainedModel,LlamaTokenizer:()=>r.LlamaTokenizer,LlavaForConditionalGeneration:()=>s.LlavaForConditionalGeneration,LlavaOnevisionForConditionalGeneration:()=>s.LlavaOnevisionForConditionalGeneration,LlavaOnevisionImageProcessor:()=>h.LlavaOnevisionImageProcessor,LlavaPreTrainedModel:()=>s.LlavaPreTrainedModel,LogitsProcessor:()=>P.LogitsProcessor,LogitsProcessorList:()=>P.LogitsProcessorList,LogitsWarper:()=>P.LogitsWarper,LongT5ForConditionalGeneration:()=>s.LongT5ForConditionalGeneration,LongT5Model:()=>s.LongT5Model,LongT5PreTrainedModel:()=>s.LongT5PreTrainedModel,M2M100ForConditionalGeneration:()=>s.M2M100ForConditionalGeneration,M2M100Model:()=>s.M2M100Model,M2M100PreTrainedModel:()=>s.M2M100PreTrainedModel,M2M100Tokenizer:()=>r.M2M100Tokenizer,MBart50Tokenizer:()=>r.MBart50Tokenizer,MBartForCausalLM:()=>s.MBartForCausalLM,MBartForConditionalGeneration:()=>s.MBartForConditionalGeneration,MBartForSequenceClassification:()=>s.MBartForSequenceClassification,MBartModel:()=>s.MBartModel,MBartPreTrainedModel:()=>s.MBartPreTrainedModel,MBartTokenizer:()=>r.MBartTokenizer,MPNetForMaskedLM:()=>s.MPNetForMaskedLM,MPNetForQuestionAnswering:()=>s.MPNetForQuestionAnswering,MPNetForSequenceClassification:()=>s.MPNetForSequenceClassification,MPNetForTokenClassification:()=>s.MPNetForTokenClassification,MPNetModel:()=>s.MPNetModel,MPNetPreTrainedModel:()=>s.MPNetPreTrainedModel,MPNetTokenizer:()=>r.MPNetTokenizer,MT5ForConditionalGeneration:()=>s.MT5ForConditionalGeneration,MT5Model:()=>s.MT5Model,MT5PreTrainedModel:()=>s.MT5PreTrainedModel,MarianMTModel:()=>s.MarianMTModel,MarianModel:()=>s.MarianModel,MarianPreTrainedModel:()=>s.MarianPreTrainedModel,MarianTokenizer:()=>r.MarianTokenizer,Mask2FormerImageProcessor:()=>h.Mask2FormerImageProcessor,MaskFormerFeatureExtractor:()=>h.MaskFormerFeatureExtractor,MaskFormerForInstanceSegmentation:()=>s.MaskFormerForInstanceSegmentation,MaskFormerImageProcessor:()=>h.MaskFormerImageProcessor,MaskFormerModel:()=>s.MaskFormerModel,MaskFormerPreTrainedModel:()=>s.MaskFormerPreTrainedModel,MaskedLMOutput:()=>s.MaskedLMOutput,MaxLengthCriteria:()=>b.MaxLengthCriteria,MgpstrForSceneTextRecognition:()=>s.MgpstrForSceneTextRecognition,MgpstrModelOutput:()=>s.MgpstrModelOutput,MgpstrPreTrainedModel:()=>s.MgpstrPreTrainedModel,MgpstrProcessor:()=>M.MgpstrProcessor,MgpstrTokenizer:()=>r.MgpstrTokenizer,MinLengthLogitsProcessor:()=>P.MinLengthLogitsProcessor,MinNewTokensLengthLogitsProcessor:()=>P.MinNewTokensLengthLogitsProcessor,MistralForCausalLM:()=>s.MistralForCausalLM,MistralModel:()=>s.MistralModel,MistralPreTrainedModel:()=>s.MistralPreTrainedModel,MobileBertForMaskedLM:()=>s.MobileBertForMaskedLM,MobileBertForQuestionAnswering:()=>s.MobileBertForQuestionAnswering,MobileBertForSequenceClassification:()=>s.MobileBertForSequenceClassification,MobileBertModel:()=>s.MobileBertModel,MobileBertPreTrainedModel:()=>s.MobileBertPreTrainedModel,MobileBertTokenizer:()=>r.MobileBertTokenizer,MobileLLMForCausalLM:()=>s.MobileLLMForCausalLM,MobileLLMModel:()=>s.MobileLLMModel,MobileLLMPreTrainedModel:()=>s.MobileLLMPreTrainedModel,MobileNetV1FeatureExtractor:()=>h.MobileNetV1FeatureExtractor,MobileNetV1ForImageClassification:()=>s.MobileNetV1ForImageClassification,MobileNetV1ImageProcessor:()=>h.MobileNetV1ImageProcessor,MobileNetV1Model:()=>s.MobileNetV1Model,MobileNetV1PreTrainedModel:()=>s.MobileNetV1PreTrainedModel,MobileNetV2FeatureExtractor:()=>h.MobileNetV2FeatureExtractor,MobileNetV2ForImageClassification:()=>s.MobileNetV2ForImageClassification,MobileNetV2ImageProcessor:()=>h.MobileNetV2ImageProcessor,MobileNetV2Model:()=>s.MobileNetV2Model,MobileNetV2PreTrainedModel:()=>s.MobileNetV2PreTrainedModel,MobileNetV3FeatureExtractor:()=>h.MobileNetV3FeatureExtractor,MobileNetV3ForImageClassification:()=>s.MobileNetV3ForImageClassification,MobileNetV3ImageProcessor:()=>h.MobileNetV3ImageProcessor,MobileNetV3Model:()=>s.MobileNetV3Model,MobileNetV3PreTrainedModel:()=>s.MobileNetV3PreTrainedModel,MobileNetV4FeatureExtractor:()=>h.MobileNetV4FeatureExtractor,MobileNetV4ForImageClassification:()=>s.MobileNetV4ForImageClassification,MobileNetV4ImageProcessor:()=>h.MobileNetV4ImageProcessor,MobileNetV4Model:()=>s.MobileNetV4Model,MobileNetV4PreTrainedModel:()=>s.MobileNetV4PreTrainedModel,MobileViTFeatureExtractor:()=>h.MobileViTFeatureExtractor,MobileViTForImageClassification:()=>s.MobileViTForImageClassification,MobileViTImageProcessor:()=>h.MobileViTImageProcessor,MobileViTModel:()=>s.MobileViTModel,MobileViTPreTrainedModel:()=>s.MobileViTPreTrainedModel,MobileViTV2ForImageClassification:()=>s.MobileViTV2ForImageClassification,MobileViTV2Model:()=>s.MobileViTV2Model,MobileViTV2PreTrainedModel:()=>s.MobileViTV2PreTrainedModel,ModelOutput:()=>s.ModelOutput,Moondream1ForConditionalGeneration:()=>s.Moondream1ForConditionalGeneration,MptForCausalLM:()=>s.MptForCausalLM,MptModel:()=>s.MptModel,MptPreTrainedModel:()=>s.MptPreTrainedModel,MultiModalityCausalLM:()=>s.MultiModalityCausalLM,MultiModalityPreTrainedModel:()=>s.MultiModalityPreTrainedModel,MusicgenForCausalLM:()=>s.MusicgenForCausalLM,MusicgenForConditionalGeneration:()=>s.MusicgenForConditionalGeneration,MusicgenModel:()=>s.MusicgenModel,MusicgenPreTrainedModel:()=>s.MusicgenPreTrainedModel,NllbTokenizer:()=>r.NllbTokenizer,NoBadWordsLogitsProcessor:()=>P.NoBadWordsLogitsProcessor,NoRepeatNGramLogitsProcessor:()=>P.NoRepeatNGramLogitsProcessor,NomicBertModel:()=>s.NomicBertModel,NomicBertPreTrainedModel:()=>s.NomicBertPreTrainedModel,NougatImageProcessor:()=>h.NougatImageProcessor,NougatTokenizer:()=>r.NougatTokenizer,OPTForCausalLM:()=>s.OPTForCausalLM,OPTModel:()=>s.OPTModel,OPTPreTrainedModel:()=>s.OPTPreTrainedModel,ObjectDetectionPipeline:()=>t.ObjectDetectionPipeline,Olmo2ForCausalLM:()=>s.Olmo2ForCausalLM,Olmo2Model:()=>s.Olmo2Model,Olmo2PreTrainedModel:()=>s.Olmo2PreTrainedModel,OlmoForCausalLM:()=>s.OlmoForCausalLM,OlmoModel:()=>s.OlmoModel,OlmoPreTrainedModel:()=>s.OlmoPreTrainedModel,OpenELMForCausalLM:()=>s.OpenELMForCausalLM,OpenELMModel:()=>s.OpenELMModel,OpenELMPreTrainedModel:()=>s.OpenELMPreTrainedModel,OwlViTFeatureExtractor:()=>h.OwlViTFeatureExtractor,OwlViTForObjectDetection:()=>s.OwlViTForObjectDetection,OwlViTImageProcessor:()=>h.OwlViTImageProcessor,OwlViTModel:()=>s.OwlViTModel,OwlViTPreTrainedModel:()=>s.OwlViTPreTrainedModel,OwlViTProcessor:()=>M.OwlViTProcessor,Owlv2ForObjectDetection:()=>s.Owlv2ForObjectDetection,Owlv2ImageProcessor:()=>h.Owlv2ImageProcessor,Owlv2Model:()=>s.Owlv2Model,Owlv2PreTrainedModel:()=>s.Owlv2PreTrainedModel,PaliGemmaForConditionalGeneration:()=>s.PaliGemmaForConditionalGeneration,PaliGemmaPreTrainedModel:()=>s.PaliGemmaPreTrainedModel,PaliGemmaProcessor:()=>M.PaliGemmaProcessor,PatchTSMixerForPrediction:()=>s.PatchTSMixerForPrediction,PatchTSMixerModel:()=>s.PatchTSMixerModel,PatchTSMixerPreTrainedModel:()=>s.PatchTSMixerPreTrainedModel,PatchTSTForPrediction:()=>s.PatchTSTForPrediction,PatchTSTModel:()=>s.PatchTSTModel,PatchTSTPreTrainedModel:()=>s.PatchTSTPreTrainedModel,Phi3ForCausalLM:()=>s.Phi3ForCausalLM,Phi3Model:()=>s.Phi3Model,Phi3PreTrainedModel:()=>s.Phi3PreTrainedModel,PhiForCausalLM:()=>s.PhiForCausalLM,PhiModel:()=>s.PhiModel,PhiPreTrainedModel:()=>s.PhiPreTrainedModel,Pipeline:()=>t.Pipeline,PreTrainedModel:()=>s.PreTrainedModel,PreTrainedTokenizer:()=>r.PreTrainedTokenizer,PretrainedConfig:()=>o.PretrainedConfig,PretrainedMixin:()=>s.PretrainedMixin,Processor:()=>g.Processor,PvtForImageClassification:()=>s.PvtForImageClassification,PvtImageProcessor:()=>h.PvtImageProcessor,PvtModel:()=>s.PvtModel,PvtPreTrainedModel:()=>s.PvtPreTrainedModel,PyAnnoteFeatureExtractor:()=>p.PyAnnoteFeatureExtractor,PyAnnoteForAudioFrameClassification:()=>s.PyAnnoteForAudioFrameClassification,PyAnnoteModel:()=>s.PyAnnoteModel,PyAnnotePreTrainedModel:()=>s.PyAnnotePreTrainedModel,PyAnnoteProcessor:()=>M.PyAnnoteProcessor,QuestionAnsweringModelOutput:()=>s.QuestionAnsweringModelOutput,QuestionAnsweringPipeline:()=>t.QuestionAnsweringPipeline,Qwen2ForCausalLM:()=>s.Qwen2ForCausalLM,Qwen2Model:()=>s.Qwen2Model,Qwen2PreTrainedModel:()=>s.Qwen2PreTrainedModel,Qwen2Tokenizer:()=>r.Qwen2Tokenizer,Qwen2VLForConditionalGeneration:()=>s.Qwen2VLForConditionalGeneration,Qwen2VLImageProcessor:()=>h.Qwen2VLImageProcessor,Qwen2VLPreTrainedModel:()=>s.Qwen2VLPreTrainedModel,Qwen2VLProcessor:()=>M.Qwen2VLProcessor,RTDetrForObjectDetection:()=>s.RTDetrForObjectDetection,RTDetrImageProcessor:()=>h.RTDetrImageProcessor,RTDetrModel:()=>s.RTDetrModel,RTDetrObjectDetectionOutput:()=>s.RTDetrObjectDetectionOutput,RTDetrPreTrainedModel:()=>s.RTDetrPreTrainedModel,RawImage:()=>a.RawImage,RepetitionPenaltyLogitsProcessor:()=>P.RepetitionPenaltyLogitsProcessor,ResNetForImageClassification:()=>s.ResNetForImageClassification,ResNetModel:()=>s.ResNetModel,ResNetPreTrainedModel:()=>s.ResNetPreTrainedModel,RoFormerForMaskedLM:()=>s.RoFormerForMaskedLM,RoFormerForQuestionAnswering:()=>s.RoFormerForQuestionAnswering,RoFormerForSequenceClassification:()=>s.RoFormerForSequenceClassification,RoFormerForTokenClassification:()=>s.RoFormerForTokenClassification,RoFormerModel:()=>s.RoFormerModel,RoFormerPreTrainedModel:()=>s.RoFormerPreTrainedModel,RoFormerTokenizer:()=>r.RoFormerTokenizer,RobertaForMaskedLM:()=>s.RobertaForMaskedLM,RobertaForQuestionAnswering:()=>s.RobertaForQuestionAnswering,RobertaForSequenceClassification:()=>s.RobertaForSequenceClassification,RobertaForTokenClassification:()=>s.RobertaForTokenClassification,RobertaModel:()=>s.RobertaModel,RobertaPreTrainedModel:()=>s.RobertaPreTrainedModel,RobertaTokenizer:()=>r.RobertaTokenizer,SamImageProcessor:()=>h.SamImageProcessor,SamImageSegmentationOutput:()=>s.SamImageSegmentationOutput,SamModel:()=>s.SamModel,SamPreTrainedModel:()=>s.SamPreTrainedModel,SamProcessor:()=>M.SamProcessor,SapiensForDepthEstimation:()=>s.SapiensForDepthEstimation,SapiensForNormalEstimation:()=>s.SapiensForNormalEstimation,SapiensForSemanticSegmentation:()=>s.SapiensForSemanticSegmentation,SapiensPreTrainedModel:()=>s.SapiensPreTrainedModel,SeamlessM4TFeatureExtractor:()=>p.SeamlessM4TFeatureExtractor,SegformerFeatureExtractor:()=>h.SegformerFeatureExtractor,SegformerForImageClassification:()=>s.SegformerForImageClassification,SegformerForSemanticSegmentation:()=>s.SegformerForSemanticSegmentation,SegformerImageProcessor:()=>h.SegformerImageProcessor,SegformerModel:()=>s.SegformerModel,SegformerPreTrainedModel:()=>s.SegformerPreTrainedModel,Seq2SeqLMOutput:()=>s.Seq2SeqLMOutput,SequenceClassifierOutput:()=>s.SequenceClassifierOutput,SiglipImageProcessor:()=>h.SiglipImageProcessor,SiglipModel:()=>s.SiglipModel,SiglipPreTrainedModel:()=>s.SiglipPreTrainedModel,SiglipTextModel:()=>s.SiglipTextModel,SiglipTokenizer:()=>r.SiglipTokenizer,SiglipVisionModel:()=>s.SiglipVisionModel,SpeechT5FeatureExtractor:()=>p.SpeechT5FeatureExtractor,SpeechT5ForSpeechToText:()=>s.SpeechT5ForSpeechToText,SpeechT5ForTextToSpeech:()=>s.SpeechT5ForTextToSpeech,SpeechT5HifiGan:()=>s.SpeechT5HifiGan,SpeechT5Model:()=>s.SpeechT5Model,SpeechT5PreTrainedModel:()=>s.SpeechT5PreTrainedModel,SpeechT5Processor:()=>M.SpeechT5Processor,SpeechT5Tokenizer:()=>r.SpeechT5Tokenizer,SqueezeBertForMaskedLM:()=>s.SqueezeBertForMaskedLM,SqueezeBertForQuestionAnswering:()=>s.SqueezeBertForQuestionAnswering,SqueezeBertForSequenceClassification:()=>s.SqueezeBertForSequenceClassification,SqueezeBertModel:()=>s.SqueezeBertModel,SqueezeBertPreTrainedModel:()=>s.SqueezeBertPreTrainedModel,SqueezeBertTokenizer:()=>r.SqueezeBertTokenizer,StableLmForCausalLM:()=>s.StableLmForCausalLM,StableLmModel:()=>s.StableLmModel,StableLmPreTrainedModel:()=>s.StableLmPreTrainedModel,Starcoder2ForCausalLM:()=>s.Starcoder2ForCausalLM,Starcoder2Model:()=>s.Starcoder2Model,Starcoder2PreTrainedModel:()=>s.Starcoder2PreTrainedModel,StoppingCriteria:()=>b.StoppingCriteria,StoppingCriteriaList:()=>b.StoppingCriteriaList,SummarizationPipeline:()=>t.SummarizationPipeline,SuppressTokensAtBeginLogitsProcessor:()=>P.SuppressTokensAtBeginLogitsProcessor,Swin2SRForImageSuperResolution:()=>s.Swin2SRForImageSuperResolution,Swin2SRImageProcessor:()=>h.Swin2SRImageProcessor,Swin2SRModel:()=>s.Swin2SRModel,Swin2SRPreTrainedModel:()=>s.Swin2SRPreTrainedModel,SwinForImageClassification:()=>s.SwinForImageClassification,SwinModel:()=>s.SwinModel,SwinPreTrainedModel:()=>s.SwinPreTrainedModel,T5ForConditionalGeneration:()=>s.T5ForConditionalGeneration,T5Model:()=>s.T5Model,T5PreTrainedModel:()=>s.T5PreTrainedModel,T5Tokenizer:()=>r.T5Tokenizer,TableTransformerForObjectDetection:()=>s.TableTransformerForObjectDetection,TableTransformerModel:()=>s.TableTransformerModel,TableTransformerObjectDetectionOutput:()=>s.TableTransformerObjectDetectionOutput,TableTransformerPreTrainedModel:()=>s.TableTransformerPreTrainedModel,TemperatureLogitsWarper:()=>P.TemperatureLogitsWarper,Tensor:()=>i.Tensor,Text2TextGenerationPipeline:()=>t.Text2TextGenerationPipeline,TextClassificationPipeline:()=>t.TextClassificationPipeline,TextGenerationPipeline:()=>t.TextGenerationPipeline,TextStreamer:()=>T.TextStreamer,TextToAudioPipeline:()=>t.TextToAudioPipeline,TokenClassificationPipeline:()=>t.TokenClassificationPipeline,TokenClassifierOutput:()=>s.TokenClassifierOutput,TokenizerModel:()=>r.TokenizerModel,TopKLogitsWarper:()=>P.TopKLogitsWarper,TopPLogitsWarper:()=>P.TopPLogitsWarper,TrOCRForCausalLM:()=>s.TrOCRForCausalLM,TrOCRPreTrainedModel:()=>s.TrOCRPreTrainedModel,TranslationPipeline:()=>t.TranslationPipeline,UniSpeechForCTC:()=>s.UniSpeechForCTC,UniSpeechForSequenceClassification:()=>s.UniSpeechForSequenceClassification,UniSpeechModel:()=>s.UniSpeechModel,UniSpeechPreTrainedModel:()=>s.UniSpeechPreTrainedModel,UniSpeechSatForAudioFrameClassification:()=>s.UniSpeechSatForAudioFrameClassification,UniSpeechSatForCTC:()=>s.UniSpeechSatForCTC,UniSpeechSatForSequenceClassification:()=>s.UniSpeechSatForSequenceClassification,UniSpeechSatModel:()=>s.UniSpeechSatModel,UniSpeechSatPreTrainedModel:()=>s.UniSpeechSatPreTrainedModel,VLChatProcessor:()=>M.VLChatProcessor,VLMImageProcessor:()=>h.VLMImageProcessor,ViTFeatureExtractor:()=>h.ViTFeatureExtractor,ViTForImageClassification:()=>s.ViTForImageClassification,ViTImageProcessor:()=>h.ViTImageProcessor,ViTMAEModel:()=>s.ViTMAEModel,ViTMAEPreTrainedModel:()=>s.ViTMAEPreTrainedModel,ViTMSNForImageClassification:()=>s.ViTMSNForImageClassification,ViTMSNModel:()=>s.ViTMSNModel,ViTMSNPreTrainedModel:()=>s.ViTMSNPreTrainedModel,ViTModel:()=>s.ViTModel,ViTPreTrainedModel:()=>s.ViTPreTrainedModel,VisionEncoderDecoderModel:()=>s.VisionEncoderDecoderModel,VitMatteForImageMatting:()=>s.VitMatteForImageMatting,VitMatteImageProcessor:()=>h.VitMatteImageProcessor,VitMattePreTrainedModel:()=>s.VitMattePreTrainedModel,VitPoseForPoseEstimation:()=>s.VitPoseForPoseEstimation,VitPoseImageProcessor:()=>h.VitPoseImageProcessor,VitPosePreTrainedModel:()=>s.VitPosePreTrainedModel,VitsModel:()=>s.VitsModel,VitsModelOutput:()=>s.VitsModelOutput,VitsPreTrainedModel:()=>s.VitsPreTrainedModel,VitsTokenizer:()=>r.VitsTokenizer,Wav2Vec2BertForCTC:()=>s.Wav2Vec2BertForCTC,Wav2Vec2BertForSequenceClassification:()=>s.Wav2Vec2BertForSequenceClassification,Wav2Vec2BertModel:()=>s.Wav2Vec2BertModel,Wav2Vec2BertPreTrainedModel:()=>s.Wav2Vec2BertPreTrainedModel,Wav2Vec2CTCTokenizer:()=>r.Wav2Vec2CTCTokenizer,Wav2Vec2FeatureExtractor:()=>p.Wav2Vec2FeatureExtractor,Wav2Vec2ForAudioFrameClassification:()=>s.Wav2Vec2ForAudioFrameClassification,Wav2Vec2ForCTC:()=>s.Wav2Vec2ForCTC,Wav2Vec2ForSequenceClassification:()=>s.Wav2Vec2ForSequenceClassification,Wav2Vec2Model:()=>s.Wav2Vec2Model,Wav2Vec2PreTrainedModel:()=>s.Wav2Vec2PreTrainedModel,Wav2Vec2ProcessorWithLM:()=>M.Wav2Vec2ProcessorWithLM,WavLMForAudioFrameClassification:()=>s.WavLMForAudioFrameClassification,WavLMForCTC:()=>s.WavLMForCTC,WavLMForSequenceClassification:()=>s.WavLMForSequenceClassification,WavLMForXVector:()=>s.WavLMForXVector,WavLMModel:()=>s.WavLMModel,WavLMPreTrainedModel:()=>s.WavLMPreTrainedModel,WeSpeakerFeatureExtractor:()=>p.WeSpeakerFeatureExtractor,WeSpeakerResNetModel:()=>s.WeSpeakerResNetModel,WeSpeakerResNetPreTrainedModel:()=>s.WeSpeakerResNetPreTrainedModel,WhisperFeatureExtractor:()=>p.WhisperFeatureExtractor,WhisperForConditionalGeneration:()=>s.WhisperForConditionalGeneration,WhisperModel:()=>s.WhisperModel,WhisperPreTrainedModel:()=>s.WhisperPreTrainedModel,WhisperProcessor:()=>M.WhisperProcessor,WhisperTextStreamer:()=>T.WhisperTextStreamer,WhisperTimeStampLogitsProcessor:()=>P.WhisperTimeStampLogitsProcessor,WhisperTokenizer:()=>r.WhisperTokenizer,XLMForQuestionAnswering:()=>s.XLMForQuestionAnswering,XLMForSequenceClassification:()=>s.XLMForSequenceClassification,XLMForTokenClassification:()=>s.XLMForTokenClassification,XLMModel:()=>s.XLMModel,XLMPreTrainedModel:()=>s.XLMPreTrainedModel,XLMRobertaForMaskedLM:()=>s.XLMRobertaForMaskedLM,XLMRobertaForQuestionAnswering:()=>s.XLMRobertaForQuestionAnswering,XLMRobertaForSequenceClassification:()=>s.XLMRobertaForSequenceClassification,XLMRobertaForTokenClassification:()=>s.XLMRobertaForTokenClassification,XLMRobertaModel:()=>s.XLMRobertaModel,XLMRobertaPreTrainedModel:()=>s.XLMRobertaPreTrainedModel,XLMRobertaTokenizer:()=>r.XLMRobertaTokenizer,XLMTokenizer:()=>r.XLMTokenizer,XLMWithLMHeadModel:()=>s.XLMWithLMHeadModel,XVectorOutput:()=>s.XVectorOutput,YolosFeatureExtractor:()=>h.YolosFeatureExtractor,YolosForObjectDetection:()=>s.YolosForObjectDetection,YolosImageProcessor:()=>h.YolosImageProcessor,YolosModel:()=>s.YolosModel,YolosObjectDetectionOutput:()=>s.YolosObjectDetectionOutput,YolosPreTrainedModel:()=>s.YolosPreTrainedModel,ZeroShotAudioClassificationPipeline:()=>t.ZeroShotAudioClassificationPipeline,ZeroShotClassificationPipeline:()=>t.ZeroShotClassificationPipeline,ZeroShotImageClassificationPipeline:()=>t.ZeroShotImageClassificationPipeline,ZeroShotObjectDetectionPipeline:()=>t.ZeroShotObjectDetectionPipeline,bankers_round:()=>l.bankers_round,cat:()=>i.cat,cos_sim:()=>l.cos_sim,dot:()=>l.dot,dynamic_time_warping:()=>l.dynamic_time_warping,env:()=>e.env,full:()=>i.full,full_like:()=>i.full_like,getKeyValueShapes:()=>o.getKeyValueShapes,hamming:()=>n.hamming,hanning:()=>n.hanning,interpolate:()=>i.interpolate,interpolate_4d:()=>i.interpolate_4d,interpolate_data:()=>l.interpolate_data,is_chinese_char:()=>r.is_chinese_char,layer_norm:()=>i.layer_norm,load_image:()=>a.load_image,log_softmax:()=>l.log_softmax,magnitude:()=>l.magnitude,matmul:()=>i.matmul,max:()=>l.max,mean:()=>i.mean,mean_pooling:()=>i.mean_pooling,medianFilter:()=>l.medianFilter,mel_filter_bank:()=>n.mel_filter_bank,min:()=>l.min,ones:()=>i.ones,ones_like:()=>i.ones_like,permute:()=>i.permute,permute_data:()=>l.permute_data,pipeline:()=>t.pipeline,quantize_embeddings:()=>i.quantize_embeddings,read_audio:()=>n.read_audio,rfft:()=>i.rfft,round:()=>l.round,softmax:()=>l.softmax,spectrogram:()=>n.spectrogram,stack:()=>i.stack,std_mean:()=>i.std_mean,topk:()=>i.topk,window_function:()=>n.window_function,zeros:()=>i.zeros,zeros_like:()=>i.zeros_like});var e=c("./src/env.js"),t=c("./src/pipelines.js"),s=c("./src/models.js"),r=c("./src/tokenizers.js"),o=c("./src/configs.js"),n=c("./src/utils/audio.js"),a=c("./src/utils/image.js"),i=c("./src/utils/tensor.js"),l=c("./src/utils/maths.js"),u=c("./src/base/feature_extraction_utils.js"),p=c("./src/models/feature_extractors.js"),m=c("./src/models/auto/feature_extraction_auto.js"),_=c("./src/base/image_processors_utils.js"),h=c("./src/models/image_processors.js"),f=c("./src/models/auto/image_processing_auto.js"),g=c("./src/base/processing_utils.js"),M=c("./src/models/processors.js"),w=c("./src/models/auto/processing_auto.js"),T=c("./src/generation/streamers.js"),b=c("./src/generation/stopping_criteria.js"),P=c("./src/generation/logits_process.js")})();var u=d.ASTFeatureExtractor,p=d.ASTForAudioClassification,m=d.ASTModel,_=d.ASTPreTrainedModel,h=d.AlbertForMaskedLM,f=d.AlbertForQuestionAnswering,g=d.AlbertForSequenceClassification,M=d.AlbertModel,w=d.AlbertPreTrainedModel,T=d.AlbertTokenizer,b=d.AudioClassificationPipeline,P=d.AutoConfig,x=d.AutoFeatureExtractor,k=d.AutoImageProcessor,y=d.AutoModel,F=d.AutoModelForAudioClassification,v=d.AutoModelForAudioFrameClassification,C=d.AutoModelForCTC,S=d.AutoModelForCausalLM,A=d.AutoModelForDepthEstimation,E=d.AutoModelForDocumentQuestionAnswering,L=d.AutoModelForImageClassification,I=d.AutoModelForImageFeatureExtraction,z=d.AutoModelForImageMatting,j=d.AutoModelForImageSegmentation,N=d.AutoModelForImageToImage,V=d.AutoModelForMaskGeneration,O=d.AutoModelForMaskedLM,B=d.AutoModelForNormalEstimation,D=d.AutoModelForObjectDetection,G=d.AutoModelForPoseEstimation,R=d.AutoModelForQuestionAnswering,q=d.AutoModelForSemanticSegmentation,$=d.AutoModelForSeq2SeqLM,W=d.AutoModelForSequenceClassification,U=d.AutoModelForSpeechSeq2Seq,Q=d.AutoModelForTextToSpectrogram,X=d.AutoModelForTextToWaveform,H=d.AutoModelForTokenClassification,J=d.AutoModelForUniversalSegmentation,Y=d.AutoModelForVision2Seq,K=d.AutoModelForXVector,Z=d.AutoModelForZeroShotObjectDetection,ee=d.AutoProcessor,te=d.AutoTokenizer,se=d.AutomaticSpeechRecognitionPipeline,re=d.BartForConditionalGeneration,oe=d.BartForSequenceClassification,ne=d.BartModel,ae=d.BartPretrainedModel,ie=d.BartTokenizer,le=d.BaseModelOutput,ce=d.BaseStreamer,de=d.BeitFeatureExtractor,ue=d.BeitForImageClassification,pe=d.BeitModel,me=d.BeitPreTrainedModel,_e=d.BertForMaskedLM,he=d.BertForQuestionAnswering,fe=d.BertForSequenceClassification,ge=d.BertForTokenClassification,Me=d.BertModel,we=d.BertPreTrainedModel,Te=d.BertTokenizer,be=d.BitImageProcessor,Pe=d.BlenderbotForConditionalGeneration,xe=d.BlenderbotModel,ke=d.BlenderbotPreTrainedModel,ye=d.BlenderbotSmallForConditionalGeneration,Fe=d.BlenderbotSmallModel,ve=d.BlenderbotSmallPreTrainedModel,Ce=d.BlenderbotSmallTokenizer,Se=d.BlenderbotTokenizer,Ae=d.BloomForCausalLM,Ee=d.BloomModel,Le=d.BloomPreTrainedModel,Ie=d.BloomTokenizer,ze=d.CLIPFeatureExtractor,je=d.CLIPImageProcessor,Ne=d.CLIPModel,Ve=d.CLIPPreTrainedModel,Oe=d.CLIPSegForImageSegmentation,Be=d.CLIPSegModel,De=d.CLIPSegPreTrainedModel,Ge=d.CLIPTextModel,Re=d.CLIPTextModelWithProjection,qe=d.CLIPTokenizer,$e=d.CLIPVisionModel,We=d.CLIPVisionModelWithProjection,Ue=d.CamembertForMaskedLM,Qe=d.CamembertForQuestionAnswering,Xe=d.CamembertForSequenceClassification,He=d.CamembertForTokenClassification,Je=d.CamembertModel,Ye=d.CamembertPreTrainedModel,Ke=d.CamembertTokenizer,Ze=d.CausalLMOutput,et=d.CausalLMOutputWithPast,tt=d.ChineseCLIPFeatureExtractor,st=d.ChineseCLIPModel,rt=d.ChineseCLIPPreTrainedModel,ot=d.ClapAudioModelWithProjection,nt=d.ClapFeatureExtractor,at=d.ClapModel,it=d.ClapPreTrainedModel,lt=d.ClapTextModelWithProjection,ct=d.ClassifierFreeGuidanceLogitsProcessor,dt=d.CodeGenForCausalLM,ut=d.CodeGenModel,pt=d.CodeGenPreTrainedModel,mt=d.CodeGenTokenizer,_t=d.CodeLlamaTokenizer,ht=d.CohereForCausalLM,ft=d.CohereModel,gt=d.CoherePreTrainedModel,Mt=d.CohereTokenizer,wt=d.ConvBertForMaskedLM,Tt=d.ConvBertForQuestionAnswering,bt=d.ConvBertForSequenceClassification,Pt=d.ConvBertForTokenClassification,xt=d.ConvBertModel,kt=d.ConvBertPreTrainedModel,yt=d.ConvBertTokenizer,Ft=d.ConvNextFeatureExtractor,vt=d.ConvNextForImageClassification,Ct=d.ConvNextImageProcessor,St=d.ConvNextModel,At=d.ConvNextPreTrainedModel,Et=d.ConvNextV2ForImageClassification,Lt=d.ConvNextV2Model,It=d.ConvNextV2PreTrainedModel,zt=d.DPTFeatureExtractor,jt=d.DPTForDepthEstimation,Nt=d.DPTImageProcessor,Vt=d.DPTModel,Ot=d.DPTPreTrainedModel,Bt=d.DebertaForMaskedLM,Dt=d.DebertaForQuestionAnswering,Gt=d.DebertaForSequenceClassification,Rt=d.DebertaForTokenClassification,qt=d.DebertaModel,$t=d.DebertaPreTrainedModel,Wt=d.DebertaTokenizer,Ut=d.DebertaV2ForMaskedLM,Qt=d.DebertaV2ForQuestionAnswering,Xt=d.DebertaV2ForSequenceClassification,Ht=d.DebertaV2ForTokenClassification,Jt=d.DebertaV2Model,Yt=d.DebertaV2PreTrainedModel,Kt=d.DebertaV2Tokenizer,Zt=d.DecisionTransformerModel,es=d.DecisionTransformerPreTrainedModel,ts=d.DeiTFeatureExtractor,ss=d.DeiTForImageClassification,rs=d.DeiTImageProcessor,os=d.DeiTModel,ns=d.DeiTPreTrainedModel,as=d.DepthAnythingForDepthEstimation,is=d.DepthAnythingPreTrainedModel,ls=d.DepthEstimationPipeline,cs=d.DepthProForDepthEstimation,ds=d.DepthProPreTrainedModel,us=d.DetrFeatureExtractor,ps=d.DetrForObjectDetection,ms=d.DetrForSegmentation,_s=d.DetrImageProcessor,hs=d.DetrModel,fs=d.DetrObjectDetectionOutput,gs=d.DetrPreTrainedModel,Ms=d.DetrSegmentationOutput,ws=d.Dinov2ForImageClassification,Ts=d.Dinov2Model,bs=d.Dinov2PreTrainedModel,Ps=d.DistilBertForMaskedLM,xs=d.DistilBertForQuestionAnswering,ks=d.DistilBertForSequenceClassification,ys=d.DistilBertForTokenClassification,Fs=d.DistilBertModel,vs=d.DistilBertPreTrainedModel,Cs=d.DistilBertTokenizer,Ss=d.DocumentQuestionAnsweringPipeline,As=d.DonutFeatureExtractor,Es=d.DonutImageProcessor,Ls=d.DonutSwinModel,Is=d.DonutSwinPreTrainedModel,zs=d.EfficientNetForImageClassification,js=d.EfficientNetImageProcessor,Ns=d.EfficientNetModel,Vs=d.EfficientNetPreTrainedModel,Os=d.ElectraForMaskedLM,Bs=d.ElectraForQuestionAnswering,Ds=d.ElectraForSequenceClassification,Gs=d.ElectraForTokenClassification,Rs=d.ElectraModel,qs=d.ElectraPreTrainedModel,$s=d.ElectraTokenizer,Ws=d.EosTokenCriteria,Us=d.EsmForMaskedLM,Qs=d.EsmForSequenceClassification,Xs=d.EsmForTokenClassification,Hs=d.EsmModel,Js=d.EsmPreTrainedModel,Ys=d.EsmTokenizer,Ks=d.FFT,Zs=d.FalconForCausalLM,er=d.FalconModel,tr=d.FalconPreTrainedModel,sr=d.FalconTokenizer,rr=d.FastViTForImageClassification,or=d.FastViTModel,nr=d.FastViTPreTrainedModel,ar=d.FeatureExtractionPipeline,ir=d.FeatureExtractor,lr=d.FillMaskPipeline,cr=d.Florence2ForConditionalGeneration,dr=d.Florence2PreTrainedModel,ur=d.Florence2Processor,pr=d.ForcedBOSTokenLogitsProcessor,mr=d.ForcedEOSTokenLogitsProcessor,_r=d.GLPNFeatureExtractor,hr=d.GLPNForDepthEstimation,fr=d.GLPNModel,gr=d.GLPNPreTrainedModel,Mr=d.GPT2LMHeadModel,wr=d.GPT2Model,Tr=d.GPT2PreTrainedModel,br=d.GPT2Tokenizer,Pr=d.GPTBigCodeForCausalLM,xr=d.GPTBigCodeModel,kr=d.GPTBigCodePreTrainedModel,yr=d.GPTJForCausalLM,Fr=d.GPTJModel,vr=d.GPTJPreTrainedModel,Cr=d.GPTNeoForCausalLM,Sr=d.GPTNeoModel,Ar=d.GPTNeoPreTrainedModel,Er=d.GPTNeoXForCausalLM,Lr=d.GPTNeoXModel,Ir=d.GPTNeoXPreTrainedModel,zr=d.GPTNeoXTokenizer,jr=d.Gemma2ForCausalLM,Nr=d.Gemma2Model,Vr=d.Gemma2PreTrainedModel,Or=d.GemmaForCausalLM,Br=d.GemmaModel,Dr=d.GemmaPreTrainedModel,Gr=d.GemmaTokenizer,Rr=d.GraniteForCausalLM,qr=d.GraniteModel,$r=d.GranitePreTrainedModel,Wr=d.Grok1Tokenizer,Ur=d.GroupViTModel,Qr=d.GroupViTPreTrainedModel,Xr=d.HerbertTokenizer,Hr=d.HieraForImageClassification,Jr=d.HieraModel,Yr=d.HieraPreTrainedModel,Kr=d.HubertForCTC,Zr=d.HubertForSequenceClassification,eo=d.HubertModel,to=d.HubertPreTrainedModel,so=d.IJepaForImageClassification,ro=d.IJepaModel,oo=d.IJepaPreTrainedModel,no=d.Idefics3ForConditionalGeneration,ao=d.Idefics3ImageProcessor,io=d.Idefics3PreTrainedModel,lo=d.Idefics3Processor,co=d.ImageClassificationPipeline,uo=d.ImageFeatureExtractionPipeline,po=d.ImageFeatureExtractor,mo=d.ImageMattingOutput,_o=d.ImageProcessor,ho=d.ImageSegmentationPipeline,fo=d.ImageToImagePipeline,go=d.ImageToTextPipeline,Mo=d.InterruptableStoppingCriteria,wo=d.JAISLMHeadModel,To=d.JAISModel,bo=d.JAISPreTrainedModel,Po=d.JinaCLIPImageProcessor,xo=d.JinaCLIPModel,ko=d.JinaCLIPPreTrainedModel,yo=d.JinaCLIPProcessor,Fo=d.JinaCLIPTextModel,vo=d.JinaCLIPVisionModel,Co=d.LlamaForCausalLM,So=d.LlamaModel,Ao=d.LlamaPreTrainedModel,Eo=d.LlamaTokenizer,Lo=d.LlavaForConditionalGeneration,Io=d.LlavaOnevisionForConditionalGeneration,zo=d.LlavaOnevisionImageProcessor,jo=d.LlavaPreTrainedModel,No=d.LogitsProcessor,Vo=d.LogitsProcessorList,Oo=d.LogitsWarper,Bo=d.LongT5ForConditionalGeneration,Do=d.LongT5Model,Go=d.LongT5PreTrainedModel,Ro=d.M2M100ForConditionalGeneration,qo=d.M2M100Model,$o=d.M2M100PreTrainedModel,Wo=d.M2M100Tokenizer,Uo=d.MBart50Tokenizer,Qo=d.MBartForCausalLM,Xo=d.MBartForConditionalGeneration,Ho=d.MBartForSequenceClassification,Jo=d.MBartModel,Yo=d.MBartPreTrainedModel,Ko=d.MBartTokenizer,Zo=d.MPNetForMaskedLM,en=d.MPNetForQuestionAnswering,tn=d.MPNetForSequenceClassification,sn=d.MPNetForTokenClassification,rn=d.MPNetModel,on=d.MPNetPreTrainedModel,nn=d.MPNetTokenizer,an=d.MT5ForConditionalGeneration,ln=d.MT5Model,cn=d.MT5PreTrainedModel,dn=d.MarianMTModel,un=d.MarianModel,pn=d.MarianPreTrainedModel,mn=d.MarianTokenizer,_n=d.Mask2FormerImageProcessor,hn=d.MaskFormerFeatureExtractor,fn=d.MaskFormerForInstanceSegmentation,gn=d.MaskFormerImageProcessor,Mn=d.MaskFormerModel,wn=d.MaskFormerPreTrainedModel,Tn=d.MaskedLMOutput,bn=d.MaxLengthCriteria,Pn=d.MgpstrForSceneTextRecognition,xn=d.MgpstrModelOutput,kn=d.MgpstrPreTrainedModel,yn=d.MgpstrProcessor,Fn=d.MgpstrTokenizer,vn=d.MinLengthLogitsProcessor,Cn=d.MinNewTokensLengthLogitsProcessor,Sn=d.MistralForCausalLM,An=d.MistralModel,En=d.MistralPreTrainedModel,Ln=d.MobileBertForMaskedLM,In=d.MobileBertForQuestionAnswering,zn=d.MobileBertForSequenceClassification,jn=d.MobileBertModel,Nn=d.MobileBertPreTrainedModel,Vn=d.MobileBertTokenizer,On=d.MobileLLMForCausalLM,Bn=d.MobileLLMModel,Dn=d.MobileLLMPreTrainedModel,Gn=d.MobileNetV1FeatureExtractor,Rn=d.MobileNetV1ForImageClassification,qn=d.MobileNetV1ImageProcessor,$n=d.MobileNetV1Model,Wn=d.MobileNetV1PreTrainedModel,Un=d.MobileNetV2FeatureExtractor,Qn=d.MobileNetV2ForImageClassification,Xn=d.MobileNetV2ImageProcessor,Hn=d.MobileNetV2Model,Jn=d.MobileNetV2PreTrainedModel,Yn=d.MobileNetV3FeatureExtractor,Kn=d.MobileNetV3ForImageClassification,Zn=d.MobileNetV3ImageProcessor,ea=d.MobileNetV3Model,ta=d.MobileNetV3PreTrainedModel,sa=d.MobileNetV4FeatureExtractor,ra=d.MobileNetV4ForImageClassification,oa=d.MobileNetV4ImageProcessor,na=d.MobileNetV4Model,aa=d.MobileNetV4PreTrainedModel,ia=d.MobileViTFeatureExtractor,la=d.MobileViTForImageClassification,ca=d.MobileViTImageProcessor,da=d.MobileViTModel,ua=d.MobileViTPreTrainedModel,pa=d.MobileViTV2ForImageClassification,ma=d.MobileViTV2Model,_a=d.MobileViTV2PreTrainedModel,ha=d.ModelOutput,fa=d.Moondream1ForConditionalGeneration,ga=d.MptForCausalLM,Ma=d.MptModel,wa=d.MptPreTrainedModel,Ta=d.MultiModalityCausalLM,ba=d.MultiModalityPreTrainedModel,Pa=d.MusicgenForCausalLM,xa=d.MusicgenForConditionalGeneration,ka=d.MusicgenModel,ya=d.MusicgenPreTrainedModel,Fa=d.NllbTokenizer,va=d.NoBadWordsLogitsProcessor,Ca=d.NoRepeatNGramLogitsProcessor,Sa=d.NomicBertModel,Aa=d.NomicBertPreTrainedModel,Ea=d.NougatImageProcessor,La=d.NougatTokenizer,Ia=d.OPTForCausalLM,za=d.OPTModel,ja=d.OPTPreTrainedModel,Na=d.ObjectDetectionPipeline,Va=d.Olmo2ForCausalLM,Oa=d.Olmo2Model,Ba=d.Olmo2PreTrainedModel,Da=d.OlmoForCausalLM,Ga=d.OlmoModel,Ra=d.OlmoPreTrainedModel,qa=d.OpenELMForCausalLM,$a=d.OpenELMModel,Wa=d.OpenELMPreTrainedModel,Ua=d.OwlViTFeatureExtractor,Qa=d.OwlViTForObjectDetection,Xa=d.OwlViTImageProcessor,Ha=d.OwlViTModel,Ja=d.OwlViTPreTrainedModel,Ya=d.OwlViTProcessor,Ka=d.Owlv2ForObjectDetection,Za=d.Owlv2ImageProcessor,ei=d.Owlv2Model,ti=d.Owlv2PreTrainedModel,si=d.PaliGemmaForConditionalGeneration,ri=d.PaliGemmaPreTrainedModel,oi=d.PaliGemmaProcessor,ni=d.PatchTSMixerForPrediction,ai=d.PatchTSMixerModel,ii=d.PatchTSMixerPreTrainedModel,li=d.PatchTSTForPrediction,ci=d.PatchTSTModel,di=d.PatchTSTPreTrainedModel,ui=d.Phi3ForCausalLM,pi=d.Phi3Model,mi=d.Phi3PreTrainedModel,_i=d.PhiForCausalLM,hi=d.PhiModel,fi=d.PhiPreTrainedModel,gi=d.Pipeline,Mi=d.PreTrainedModel,wi=d.PreTrainedTokenizer,Ti=d.PretrainedConfig,bi=d.PretrainedMixin,Pi=d.Processor,xi=d.PvtForImageClassification,ki=d.PvtImageProcessor,yi=d.PvtModel,Fi=d.PvtPreTrainedModel,vi=d.PyAnnoteFeatureExtractor,Ci=d.PyAnnoteForAudioFrameClassification,Si=d.PyAnnoteModel,Ai=d.PyAnnotePreTrainedModel,Ei=d.PyAnnoteProcessor,Li=d.QuestionAnsweringModelOutput,Ii=d.QuestionAnsweringPipeline,zi=d.Qwen2ForCausalLM,ji=d.Qwen2Model,Ni=d.Qwen2PreTrainedModel,Vi=d.Qwen2Tokenizer,Oi=d.Qwen2VLForConditionalGeneration,Bi=d.Qwen2VLImageProcessor,Di=d.Qwen2VLPreTrainedModel,Gi=d.Qwen2VLProcessor,Ri=d.RTDetrForObjectDetection,qi=d.RTDetrImageProcessor,$i=d.RTDetrModel,Wi=d.RTDetrObjectDetectionOutput,Ui=d.RTDetrPreTrainedModel,Qi=d.RawImage,Xi=d.RepetitionPenaltyLogitsProcessor,Hi=d.ResNetForImageClassification,Ji=d.ResNetModel,Yi=d.ResNetPreTrainedModel,Ki=d.RoFormerForMaskedLM,Zi=d.RoFormerForQuestionAnswering,el=d.RoFormerForSequenceClassification,tl=d.RoFormerForTokenClassification,sl=d.RoFormerModel,rl=d.RoFormerPreTrainedModel,ol=d.RoFormerTokenizer,nl=d.RobertaForMaskedLM,al=d.RobertaForQuestionAnswering,il=d.RobertaForSequenceClassification,ll=d.RobertaForTokenClassification,cl=d.RobertaModel,dl=d.RobertaPreTrainedModel,ul=d.RobertaTokenizer,pl=d.SamImageProcessor,ml=d.SamImageSegmentationOutput,_l=d.SamModel,hl=d.SamPreTrainedModel,fl=d.SamProcessor,gl=d.SapiensForDepthEstimation,Ml=d.SapiensForNormalEstimation,wl=d.SapiensForSemanticSegmentation,Tl=d.SapiensPreTrainedModel,bl=d.SeamlessM4TFeatureExtractor,Pl=d.SegformerFeatureExtractor,xl=d.SegformerForImageClassification,kl=d.SegformerForSemanticSegmentation,yl=d.SegformerImageProcessor,Fl=d.SegformerModel,vl=d.SegformerPreTrainedModel,Cl=d.Seq2SeqLMOutput,Sl=d.SequenceClassifierOutput,Al=d.SiglipImageProcessor,El=d.SiglipModel,Ll=d.SiglipPreTrainedModel,Il=d.SiglipTextModel,zl=d.SiglipTokenizer,jl=d.SiglipVisionModel,Nl=d.SpeechT5FeatureExtractor,Vl=d.SpeechT5ForSpeechToText,Ol=d.SpeechT5ForTextToSpeech,Bl=d.SpeechT5HifiGan,Dl=d.SpeechT5Model,Gl=d.SpeechT5PreTrainedModel,Rl=d.SpeechT5Processor,ql=d.SpeechT5Tokenizer,$l=d.SqueezeBertForMaskedLM,Wl=d.SqueezeBertForQuestionAnswering,Ul=d.SqueezeBertForSequenceClassification,Ql=d.SqueezeBertModel,Xl=d.SqueezeBertPreTrainedModel,Hl=d.SqueezeBertTokenizer,Jl=d.StableLmForCausalLM,Yl=d.StableLmModel,Kl=d.StableLmPreTrainedModel,Zl=d.Starcoder2ForCausalLM,ec=d.Starcoder2Model,tc=d.Starcoder2PreTrainedModel,sc=d.StoppingCriteria,rc=d.StoppingCriteriaList,oc=d.SummarizationPipeline,nc=d.SuppressTokensAtBeginLogitsProcessor,ac=d.Swin2SRForImageSuperResolution,ic=d.Swin2SRImageProcessor,lc=d.Swin2SRModel,cc=d.Swin2SRPreTrainedModel,dc=d.SwinForImageClassification,uc=d.SwinModel,pc=d.SwinPreTrainedModel,mc=d.T5ForConditionalGeneration,_c=d.T5Model,hc=d.T5PreTrainedModel,fc=d.T5Tokenizer,gc=d.TableTransformerForObjectDetection,Mc=d.TableTransformerModel,wc=d.TableTransformerObjectDetectionOutput,Tc=d.TableTransformerPreTrainedModel,bc=d.TemperatureLogitsWarper,Pc=d.Tensor,xc=d.Text2TextGenerationPipeline,kc=d.TextClassificationPipeline,yc=d.TextGenerationPipeline,Fc=d.TextStreamer,vc=d.TextToAudioPipeline,Cc=d.TokenClassificationPipeline,Sc=d.TokenClassifierOutput,Ac=d.TokenizerModel,Ec=d.TopKLogitsWarper,Lc=d.TopPLogitsWarper,Ic=d.TrOCRForCausalLM,zc=d.TrOCRPreTrainedModel,jc=d.TranslationPipeline,Nc=d.UniSpeechForCTC,Vc=d.UniSpeechForSequenceClassification,Oc=d.UniSpeechModel,Bc=d.UniSpeechPreTrainedModel,Dc=d.UniSpeechSatForAudioFrameClassification,Gc=d.UniSpeechSatForCTC,Rc=d.UniSpeechSatForSequenceClassification,qc=d.UniSpeechSatModel,$c=d.UniSpeechSatPreTrainedModel,Wc=d.VLChatProcessor,Uc=d.VLMImageProcessor,Qc=d.ViTFeatureExtractor,Xc=d.ViTForImageClassification,Hc=d.ViTImageProcessor,Jc=d.ViTMAEModel,Yc=d.ViTMAEPreTrainedModel,Kc=d.ViTMSNForImageClassification,Zc=d.ViTMSNModel,ed=d.ViTMSNPreTrainedModel,td=d.ViTModel,sd=d.ViTPreTrainedModel,rd=d.VisionEncoderDecoderModel,od=d.VitMatteForImageMatting,nd=d.VitMatteImageProcessor,ad=d.VitMattePreTrainedModel,id=d.VitPoseForPoseEstimation,ld=d.VitPoseImageProcessor,cd=d.VitPosePreTrainedModel,dd=d.VitsModel,ud=d.VitsModelOutput,pd=d.VitsPreTrainedModel,md=d.VitsTokenizer,_d=d.Wav2Vec2BertForCTC,hd=d.Wav2Vec2BertForSequenceClassification,fd=d.Wav2Vec2BertModel,gd=d.Wav2Vec2BertPreTrainedModel,Md=d.Wav2Vec2CTCTokenizer,wd=d.Wav2Vec2FeatureExtractor,Td=d.Wav2Vec2ForAudioFrameClassification,bd=d.Wav2Vec2ForCTC,Pd=d.Wav2Vec2ForSequenceClassification,xd=d.Wav2Vec2Model,kd=d.Wav2Vec2PreTrainedModel,yd=d.Wav2Vec2ProcessorWithLM,Fd=d.WavLMForAudioFrameClassification,vd=d.WavLMForCTC,Cd=d.WavLMForSequenceClassification,Sd=d.WavLMForXVector,Ad=d.WavLMModel,Ed=d.WavLMPreTrainedModel,Ld=d.WeSpeakerFeatureExtractor,Id=d.WeSpeakerResNetModel,zd=d.WeSpeakerResNetPreTrainedModel,jd=d.WhisperFeatureExtractor,Nd=d.WhisperForConditionalGeneration,Vd=d.WhisperModel,Od=d.WhisperPreTrainedModel,Bd=d.WhisperProcessor,Dd=d.WhisperTextStreamer,Gd=d.WhisperTimeStampLogitsProcessor,Rd=d.WhisperTokenizer,qd=d.XLMForQuestionAnswering,$d=d.XLMForSequenceClassification,Wd=d.XLMForTokenClassification,Ud=d.XLMModel,Qd=d.XLMPreTrainedModel,Xd=d.XLMRobertaForMaskedLM,Hd=d.XLMRobertaForQuestionAnswering,Jd=d.XLMRobertaForSequenceClassification,Yd=d.XLMRobertaForTokenClassification,Kd=d.XLMRobertaModel,Zd=d.XLMRobertaPreTrainedModel,eu=d.XLMRobertaTokenizer,tu=d.XLMTokenizer,su=d.XLMWithLMHeadModel,ru=d.XVectorOutput,ou=d.YolosFeatureExtractor,nu=d.YolosForObjectDetection,au=d.YolosImageProcessor,iu=d.YolosModel,lu=d.YolosObjectDetectionOutput,cu=d.YolosPreTrainedModel,du=d.ZeroShotAudioClassificationPipeline,uu=d.ZeroShotClassificationPipeline,pu=d.ZeroShotImageClassificationPipeline,mu=d.ZeroShotObjectDetectionPipeline,_u=d.bankers_round,hu=d.cat,fu=d.cos_sim,gu=d.dot,Mu=d.dynamic_time_warping,wu=d.env,Tu=d.full,bu=d.full_like,Pu=d.getKeyValueShapes,xu=d.hamming,ku=d.hanning,yu=d.interpolate,Fu=d.interpolate_4d,vu=d.interpolate_data,Cu=d.is_chinese_char,Su=d.layer_norm,Au=d.load_image,Eu=d.log_softmax,Lu=d.magnitude,Iu=d.matmul,zu=d.max,ju=d.mean,Nu=d.mean_pooling,Vu=d.medianFilter,Ou=d.mel_filter_bank,Bu=d.min,Du=d.ones,Gu=d.ones_like,Ru=d.permute,qu=d.permute_data,$u=d.pipeline,Wu=d.quantize_embeddings,Uu=d.read_audio,Qu=d.rfft,Xu=d.round,Hu=d.softmax,Ju=d.spectrogram,Yu=d.stack,Ku=d.std_mean,Zu=d.topk,ep=d.window_function,tp=d.zeros,sp=d.zeros_like;export{u as ASTFeatureExtractor,p as ASTForAudioClassification,m as ASTModel,_ as ASTPreTrainedModel,h as AlbertForMaskedLM,f as AlbertForQuestionAnswering,g as AlbertForSequenceClassification,M as AlbertModel,w as AlbertPreTrainedModel,T as AlbertTokenizer,b as AudioClassificationPipeline,P as AutoConfig,x as AutoFeatureExtractor,k as AutoImageProcessor,y as AutoModel,F as AutoModelForAudioClassification,v as AutoModelForAudioFrameClassification,C as AutoModelForCTC,S as AutoModelForCausalLM,A as AutoModelForDepthEstimation,E as AutoModelForDocumentQuestionAnswering,L as AutoModelForImageClassification,I as AutoModelForImageFeatureExtraction,z as AutoModelForImageMatting,j as AutoModelForImageSegmentation,N as AutoModelForImageToImage,V as AutoModelForMaskGeneration,O as AutoModelForMaskedLM,B as AutoModelForNormalEstimation,D as AutoModelForObjectDetection,G as AutoModelForPoseEstimation,R as AutoModelForQuestionAnswering,q as AutoModelForSemanticSegmentation,$ as AutoModelForSeq2SeqLM,W as AutoModelForSequenceClassification,U as AutoModelForSpeechSeq2Seq,Q as AutoModelForTextToSpectrogram,X as AutoModelForTextToWaveform,H as AutoModelForTokenClassification,J as AutoModelForUniversalSegmentation,Y as AutoModelForVision2Seq,K as AutoModelForXVector,Z as AutoModelForZeroShotObjectDetection,ee as AutoProcessor,te as AutoTokenizer,se as AutomaticSpeechRecognitionPipeline,re as BartForConditionalGeneration,oe as BartForSequenceClassification,ne as BartModel,ae as BartPretrainedModel,ie as BartTokenizer,le as BaseModelOutput,ce as BaseStreamer,de as BeitFeatureExtractor,ue as BeitForImageClassification,pe as BeitModel,me as BeitPreTrainedModel,_e as BertForMaskedLM,he as BertForQuestionAnswering,fe as BertForSequenceClassification,ge as BertForTokenClassification,Me as BertModel,we as BertPreTrainedModel,Te as BertTokenizer,be as BitImageProcessor,Pe as BlenderbotForConditionalGeneration,xe as BlenderbotModel,ke as BlenderbotPreTrainedModel,ye as BlenderbotSmallForConditionalGeneration,Fe as BlenderbotSmallModel,ve as BlenderbotSmallPreTrainedModel,Ce as BlenderbotSmallTokenizer,Se as BlenderbotTokenizer,Ae as BloomForCausalLM,Ee as BloomModel,Le as BloomPreTrainedModel,Ie as BloomTokenizer,ze as CLIPFeatureExtractor,je as CLIPImageProcessor,Ne as CLIPModel,Ve as CLIPPreTrainedModel,Oe as CLIPSegForImageSegmentation,Be as CLIPSegModel,De as CLIPSegPreTrainedModel,Ge as CLIPTextModel,Re as CLIPTextModelWithProjection,qe as CLIPTokenizer,$e as CLIPVisionModel,We as CLIPVisionModelWithProjection,Ue as CamembertForMaskedLM,Qe as CamembertForQuestionAnswering,Xe as CamembertForSequenceClassification,He as CamembertForTokenClassification,Je as CamembertModel,Ye as CamembertPreTrainedModel,Ke as CamembertTokenizer,Ze as CausalLMOutput,et as CausalLMOutputWithPast,tt as ChineseCLIPFeatureExtractor,st as ChineseCLIPModel,rt as ChineseCLIPPreTrainedModel,ot as ClapAudioModelWithProjection,nt as ClapFeatureExtractor,at as ClapModel,it as ClapPreTrainedModel,lt as ClapTextModelWithProjection,ct as ClassifierFreeGuidanceLogitsProcessor,dt as CodeGenForCausalLM,ut as CodeGenModel,pt as CodeGenPreTrainedModel,mt as CodeGenTokenizer,_t as CodeLlamaTokenizer,ht as CohereForCausalLM,ft as CohereModel,gt as CoherePreTrainedModel,Mt as CohereTokenizer,wt as ConvBertForMaskedLM,Tt as ConvBertForQuestionAnswering,bt as ConvBertForSequenceClassification,Pt as ConvBertForTokenClassification,xt as ConvBertModel,kt as ConvBertPreTrainedModel,yt as ConvBertTokenizer,Ft as ConvNextFeatureExtractor,vt as ConvNextForImageClassification,Ct as ConvNextImageProcessor,St as ConvNextModel,At as ConvNextPreTrainedModel,Et as ConvNextV2ForImageClassification,Lt as ConvNextV2Model,It as ConvNextV2PreTrainedModel,zt as DPTFeatureExtractor,jt as DPTForDepthEstimation,Nt as DPTImageProcessor,Vt as DPTModel,Ot as DPTPreTrainedModel,Bt as DebertaForMaskedLM,Dt as DebertaForQuestionAnswering,Gt as DebertaForSequenceClassification,Rt as DebertaForTokenClassification,qt as DebertaModel,$t as DebertaPreTrainedModel,Wt as DebertaTokenizer,Ut as DebertaV2ForMaskedLM,Qt as DebertaV2ForQuestionAnswering,Xt as DebertaV2ForSequenceClassification,Ht as DebertaV2ForTokenClassification,Jt as DebertaV2Model,Yt as DebertaV2PreTrainedModel,Kt as DebertaV2Tokenizer,Zt as DecisionTransformerModel,es as DecisionTransformerPreTrainedModel,ts as DeiTFeatureExtractor,ss as DeiTForImageClassification,rs as DeiTImageProcessor,os as DeiTModel,ns as DeiTPreTrainedModel,as as DepthAnythingForDepthEstimation,is as DepthAnythingPreTrainedModel,ls as DepthEstimationPipeline,cs as DepthProForDepthEstimation,ds as DepthProPreTrainedModel,us as DetrFeatureExtractor,ps as DetrForObjectDetection,ms as DetrForSegmentation,_s as DetrImageProcessor,hs as DetrModel,fs as DetrObjectDetectionOutput,gs as DetrPreTrainedModel,Ms as DetrSegmentationOutput,ws as Dinov2ForImageClassification,Ts as Dinov2Model,bs as Dinov2PreTrainedModel,Ps as DistilBertForMaskedLM,xs as DistilBertForQuestionAnswering,ks as DistilBertForSequenceClassification,ys as DistilBertForTokenClassification,Fs as DistilBertModel,vs as DistilBertPreTrainedModel,Cs as DistilBertTokenizer,Ss as DocumentQuestionAnsweringPipeline,As as DonutFeatureExtractor,Es as DonutImageProcessor,Ls as DonutSwinModel,Is as DonutSwinPreTrainedModel,zs as EfficientNetForImageClassification,js as EfficientNetImageProcessor,Ns as EfficientNetModel,Vs as EfficientNetPreTrainedModel,Os as ElectraForMaskedLM,Bs as ElectraForQuestionAnswering,Ds as ElectraForSequenceClassification,Gs as ElectraForTokenClassification,Rs as ElectraModel,qs as ElectraPreTrainedModel,$s as ElectraTokenizer,Ws as EosTokenCriteria,Us as EsmForMaskedLM,Qs as EsmForSequenceClassification,Xs as EsmForTokenClassification,Hs as EsmModel,Js as EsmPreTrainedModel,Ys as EsmTokenizer,Ks as FFT,Zs as FalconForCausalLM,er as FalconModel,tr as FalconPreTrainedModel,sr as FalconTokenizer,rr as FastViTForImageClassification,or as FastViTModel,nr as FastViTPreTrainedModel,ar as FeatureExtractionPipeline,ir as FeatureExtractor,lr as FillMaskPipeline,cr as Florence2ForConditionalGeneration,dr as Florence2PreTrainedModel,ur as Florence2Processor,pr as ForcedBOSTokenLogitsProcessor,mr as ForcedEOSTokenLogitsProcessor,_r as GLPNFeatureExtractor,hr as GLPNForDepthEstimation,fr as GLPNModel,gr as GLPNPreTrainedModel,Mr as GPT2LMHeadModel,wr as GPT2Model,Tr as GPT2PreTrainedModel,br as GPT2Tokenizer,Pr as GPTBigCodeForCausalLM,xr as GPTBigCodeModel,kr as GPTBigCodePreTrainedModel,yr as GPTJForCausalLM,Fr as GPTJModel,vr as GPTJPreTrainedModel,Cr as GPTNeoForCausalLM,Sr as GPTNeoModel,Ar as GPTNeoPreTrainedModel,Er as GPTNeoXForCausalLM,Lr as GPTNeoXModel,Ir as GPTNeoXPreTrainedModel,zr as GPTNeoXTokenizer,jr as Gemma2ForCausalLM,Nr as Gemma2Model,Vr as Gemma2PreTrainedModel,Or as GemmaForCausalLM,Br as GemmaModel,Dr as GemmaPreTrainedModel,Gr as GemmaTokenizer,Rr as GraniteForCausalLM,qr as GraniteModel,$r as GranitePreTrainedModel,Wr as Grok1Tokenizer,Ur as GroupViTModel,Qr as GroupViTPreTrainedModel,Xr as HerbertTokenizer,Hr as HieraForImageClassification,Jr as HieraModel,Yr as HieraPreTrainedModel,Kr as HubertForCTC,Zr as HubertForSequenceClassification,eo as HubertModel,to as HubertPreTrainedModel,so as IJepaForImageClassification,ro as IJepaModel,oo as IJepaPreTrainedModel,no as Idefics3ForConditionalGeneration,ao as Idefics3ImageProcessor,io as Idefics3PreTrainedModel,lo as Idefics3Processor,co as ImageClassificationPipeline,uo as ImageFeatureExtractionPipeline,po as ImageFeatureExtractor,mo as ImageMattingOutput,_o as ImageProcessor,ho as ImageSegmentationPipeline,fo as ImageToImagePipeline,go as ImageToTextPipeline,Mo as InterruptableStoppingCriteria,wo as JAISLMHeadModel,To as JAISModel,bo as JAISPreTrainedModel,Po as JinaCLIPImageProcessor,xo as JinaCLIPModel,ko as JinaCLIPPreTrainedModel,yo as JinaCLIPProcessor,Fo as JinaCLIPTextModel,vo as JinaCLIPVisionModel,Co as LlamaForCausalLM,So as LlamaModel,Ao as LlamaPreTrainedModel,Eo as LlamaTokenizer,Lo as LlavaForConditionalGeneration,Io as LlavaOnevisionForConditionalGeneration,zo as LlavaOnevisionImageProcessor,jo as LlavaPreTrainedModel,No as LogitsProcessor,Vo as LogitsProcessorList,Oo as LogitsWarper,Bo as LongT5ForConditionalGeneration,Do as LongT5Model,Go as LongT5PreTrainedModel,Ro as M2M100ForConditionalGeneration,qo as M2M100Model,$o as M2M100PreTrainedModel,Wo as M2M100Tokenizer,Uo as MBart50Tokenizer,Qo as MBartForCausalLM,Xo as MBartForConditionalGeneration,Ho as MBartForSequenceClassification,Jo as MBartModel,Yo as MBartPreTrainedModel,Ko as MBartTokenizer,Zo as MPNetForMaskedLM,en as MPNetForQuestionAnswering,tn as MPNetForSequenceClassification,sn as MPNetForTokenClassification,rn as MPNetModel,on as MPNetPreTrainedModel,nn as MPNetTokenizer,an as MT5ForConditionalGeneration,ln as MT5Model,cn as MT5PreTrainedModel,dn as MarianMTModel,un as MarianModel,pn as MarianPreTrainedModel,mn as MarianTokenizer,_n as Mask2FormerImageProcessor,hn as MaskFormerFeatureExtractor,fn as MaskFormerForInstanceSegmentation,gn as MaskFormerImageProcessor,Mn as MaskFormerModel,wn as MaskFormerPreTrainedModel,Tn as MaskedLMOutput,bn as MaxLengthCriteria,Pn as MgpstrForSceneTextRecognition,xn as MgpstrModelOutput,kn as MgpstrPreTrainedModel,yn as MgpstrProcessor,Fn as MgpstrTokenizer,vn as MinLengthLogitsProcessor,Cn as MinNewTokensLengthLogitsProcessor,Sn as MistralForCausalLM,An as MistralModel,En as MistralPreTrainedModel,Ln as MobileBertForMaskedLM,In as MobileBertForQuestionAnswering,zn as MobileBertForSequenceClassification,jn as MobileBertModel,Nn as MobileBertPreTrainedModel,Vn as MobileBertTokenizer,On as MobileLLMForCausalLM,Bn as MobileLLMModel,Dn as MobileLLMPreTrainedModel,Gn as MobileNetV1FeatureExtractor,Rn as MobileNetV1ForImageClassification,qn as MobileNetV1ImageProcessor,$n as MobileNetV1Model,Wn as MobileNetV1PreTrainedModel,Un as MobileNetV2FeatureExtractor,Qn as MobileNetV2ForImageClassification,Xn as MobileNetV2ImageProcessor,Hn as MobileNetV2Model,Jn as MobileNetV2PreTrainedModel,Yn as MobileNetV3FeatureExtractor,Kn as MobileNetV3ForImageClassification,Zn as MobileNetV3ImageProcessor,ea as MobileNetV3Model,ta as MobileNetV3PreTrainedModel,sa as MobileNetV4FeatureExtractor,ra as MobileNetV4ForImageClassification,oa as MobileNetV4ImageProcessor,na as MobileNetV4Model,aa as MobileNetV4PreTrainedModel,ia as MobileViTFeatureExtractor,la as MobileViTForImageClassification,ca as MobileViTImageProcessor,da as MobileViTModel,ua as MobileViTPreTrainedModel,pa as MobileViTV2ForImageClassification,ma as MobileViTV2Model,_a as MobileViTV2PreTrainedModel,ha as ModelOutput,fa as Moondream1ForConditionalGeneration,ga as MptForCausalLM,Ma as MptModel,wa as MptPreTrainedModel,Ta as MultiModalityCausalLM,ba as MultiModalityPreTrainedModel,Pa as MusicgenForCausalLM,xa as MusicgenForConditionalGeneration,ka as MusicgenModel,ya as MusicgenPreTrainedModel,Fa as NllbTokenizer,va as NoBadWordsLogitsProcessor,Ca as NoRepeatNGramLogitsProcessor,Sa as NomicBertModel,Aa as NomicBertPreTrainedModel,Ea as NougatImageProcessor,La as NougatTokenizer,Ia as OPTForCausalLM,za as OPTModel,ja as OPTPreTrainedModel,Na as ObjectDetectionPipeline,Va as Olmo2ForCausalLM,Oa as Olmo2Model,Ba as Olmo2PreTrainedModel,Da as OlmoForCausalLM,Ga as OlmoModel,Ra as OlmoPreTrainedModel,qa as OpenELMForCausalLM,$a as OpenELMModel,Wa as OpenELMPreTrainedModel,Ua as OwlViTFeatureExtractor,Qa as OwlViTForObjectDetection,Xa as OwlViTImageProcessor,Ha as OwlViTModel,Ja as OwlViTPreTrainedModel,Ya as OwlViTProcessor,Ka as Owlv2ForObjectDetection,Za as Owlv2ImageProcessor,ei as Owlv2Model,ti as Owlv2PreTrainedModel,si as PaliGemmaForConditionalGeneration,ri as PaliGemmaPreTrainedModel,oi as PaliGemmaProcessor,ni as PatchTSMixerForPrediction,ai as PatchTSMixerModel,ii as PatchTSMixerPreTrainedModel,li as PatchTSTForPrediction,ci as PatchTSTModel,di as PatchTSTPreTrainedModel,ui as Phi3ForCausalLM,pi as Phi3Model,mi as Phi3PreTrainedModel,_i as PhiForCausalLM,hi as PhiModel,fi as PhiPreTrainedModel,gi as Pipeline,Mi as PreTrainedModel,wi as PreTrainedTokenizer,Ti as PretrainedConfig,bi as PretrainedMixin,Pi as Processor,xi as PvtForImageClassification,ki as PvtImageProcessor,yi as PvtModel,Fi as PvtPreTrainedModel,vi as PyAnnoteFeatureExtractor,Ci as PyAnnoteForAudioFrameClassification,Si as PyAnnoteModel,Ai as PyAnnotePreTrainedModel,Ei as PyAnnoteProcessor,Li as QuestionAnsweringModelOutput,Ii as QuestionAnsweringPipeline,zi as Qwen2ForCausalLM,ji as Qwen2Model,Ni as Qwen2PreTrainedModel,Vi as Qwen2Tokenizer,Oi as Qwen2VLForConditionalGeneration,Bi as Qwen2VLImageProcessor,Di as Qwen2VLPreTrainedModel,Gi as Qwen2VLProcessor,Ri as RTDetrForObjectDetection,qi as RTDetrImageProcessor,$i as RTDetrModel,Wi as RTDetrObjectDetectionOutput,Ui as RTDetrPreTrainedModel,Qi as RawImage,Xi as RepetitionPenaltyLogitsProcessor,Hi as ResNetForImageClassification,Ji as ResNetModel,Yi as ResNetPreTrainedModel,Ki as RoFormerForMaskedLM,Zi as RoFormerForQuestionAnswering,el as RoFormerForSequenceClassification,tl as RoFormerForTokenClassification,sl as RoFormerModel,rl as RoFormerPreTrainedModel,ol as RoFormerTokenizer,nl as RobertaForMaskedLM,al as RobertaForQuestionAnswering,il as RobertaForSequenceClassification,ll as RobertaForTokenClassification,cl as RobertaModel,dl as RobertaPreTrainedModel,ul as RobertaTokenizer,pl as SamImageProcessor,ml as SamImageSegmentationOutput,_l as SamModel,hl as SamPreTrainedModel,fl as SamProcessor,gl as SapiensForDepthEstimation,Ml as SapiensForNormalEstimation,wl as SapiensForSemanticSegmentation,Tl as SapiensPreTrainedModel,bl as SeamlessM4TFeatureExtractor,Pl as SegformerFeatureExtractor,xl as SegformerForImageClassification,kl as SegformerForSemanticSegmentation,yl as SegformerImageProcessor,Fl as SegformerModel,vl as SegformerPreTrainedModel,Cl as Seq2SeqLMOutput,Sl as SequenceClassifierOutput,Al as SiglipImageProcessor,El as SiglipModel,Ll as SiglipPreTrainedModel,Il as SiglipTextModel,zl as SiglipTokenizer,jl as SiglipVisionModel,Nl as SpeechT5FeatureExtractor,Vl as SpeechT5ForSpeechToText,Ol as SpeechT5ForTextToSpeech,Bl as SpeechT5HifiGan,Dl as SpeechT5Model,Gl as SpeechT5PreTrainedModel,Rl as SpeechT5Processor,ql as SpeechT5Tokenizer,$l as SqueezeBertForMaskedLM,Wl as SqueezeBertForQuestionAnswering,Ul as SqueezeBertForSequenceClassification,Ql as SqueezeBertModel,Xl as SqueezeBertPreTrainedModel,Hl as SqueezeBertTokenizer,Jl as StableLmForCausalLM,Yl as StableLmModel,Kl as StableLmPreTrainedModel,Zl as Starcoder2ForCausalLM,ec as Starcoder2Model,tc as Starcoder2PreTrainedModel,sc as StoppingCriteria,rc as StoppingCriteriaList,oc as SummarizationPipeline,nc as SuppressTokensAtBeginLogitsProcessor,ac as Swin2SRForImageSuperResolution,ic as Swin2SRImageProcessor,lc as Swin2SRModel,cc as Swin2SRPreTrainedModel,dc as SwinForImageClassification,uc as SwinModel,pc as SwinPreTrainedModel,mc as T5ForConditionalGeneration,_c as T5Model,hc as T5PreTrainedModel,fc as T5Tokenizer,gc as TableTransformerForObjectDetection,Mc as TableTransformerModel,wc as TableTransformerObjectDetectionOutput,Tc as TableTransformerPreTrainedModel,bc as TemperatureLogitsWarper,Pc as Tensor,xc as Text2TextGenerationPipeline,kc as TextClassificationPipeline,yc as TextGenerationPipeline,Fc as TextStreamer,vc as TextToAudioPipeline,Cc as TokenClassificationPipeline,Sc as TokenClassifierOutput,Ac as TokenizerModel,Ec as TopKLogitsWarper,Lc as TopPLogitsWarper,Ic as TrOCRForCausalLM,zc as TrOCRPreTrainedModel,jc as TranslationPipeline,Nc as UniSpeechForCTC,Vc as UniSpeechForSequenceClassification,Oc as UniSpeechModel,Bc as UniSpeechPreTrainedModel,Dc as UniSpeechSatForAudioFrameClassification,Gc as UniSpeechSatForCTC,Rc as UniSpeechSatForSequenceClassification,qc as UniSpeechSatModel,$c as UniSpeechSatPreTrainedModel,Wc as VLChatProcessor,Uc as VLMImageProcessor,Qc as ViTFeatureExtractor,Xc as ViTForImageClassification,Hc as ViTImageProcessor,Jc as ViTMAEModel,Yc as ViTMAEPreTrainedModel,Kc as ViTMSNForImageClassification,Zc as ViTMSNModel,ed as ViTMSNPreTrainedModel,td as ViTModel,sd as ViTPreTrainedModel,rd as VisionEncoderDecoderModel,od as VitMatteForImageMatting,nd as VitMatteImageProcessor,ad as VitMattePreTrainedModel,id as VitPoseForPoseEstimation,ld as VitPoseImageProcessor,cd as VitPosePreTrainedModel,dd as VitsModel,ud as VitsModelOutput,pd as VitsPreTrainedModel,md as VitsTokenizer,_d as Wav2Vec2BertForCTC,hd as Wav2Vec2BertForSequenceClassification,fd as Wav2Vec2BertModel,gd as Wav2Vec2BertPreTrainedModel,Md as Wav2Vec2CTCTokenizer,wd as Wav2Vec2FeatureExtractor,Td as Wav2Vec2ForAudioFrameClassification,bd as Wav2Vec2ForCTC,Pd as Wav2Vec2ForSequenceClassification,xd as Wav2Vec2Model,kd as Wav2Vec2PreTrainedModel,yd as Wav2Vec2ProcessorWithLM,Fd as WavLMForAudioFrameClassification,vd as WavLMForCTC,Cd as WavLMForSequenceClassification,Sd as WavLMForXVector,Ad as WavLMModel,Ed as WavLMPreTrainedModel,Ld as WeSpeakerFeatureExtractor,Id as WeSpeakerResNetModel,zd as WeSpeakerResNetPreTrainedModel,jd as WhisperFeatureExtractor,Nd as WhisperForConditionalGeneration,Vd as WhisperModel,Od as WhisperPreTrainedModel,Bd as WhisperProcessor,Dd as WhisperTextStreamer,Gd as WhisperTimeStampLogitsProcessor,Rd as WhisperTokenizer,qd as XLMForQuestionAnswering,$d as XLMForSequenceClassification,Wd as XLMForTokenClassification,Ud as XLMModel,Qd as XLMPreTrainedModel,Xd as XLMRobertaForMaskedLM,Hd as XLMRobertaForQuestionAnswering,Jd as XLMRobertaForSequenceClassification,Yd as XLMRobertaForTokenClassification,Kd as XLMRobertaModel,Zd as XLMRobertaPreTrainedModel,eu as XLMRobertaTokenizer,tu as XLMTokenizer,su as XLMWithLMHeadModel,ru as XVectorOutput,ou as YolosFeatureExtractor,nu as YolosForObjectDetection,au as YolosImageProcessor,iu as YolosModel,lu as YolosObjectDetectionOutput,cu as YolosPreTrainedModel,du as ZeroShotAudioClassificationPipeline,uu as ZeroShotClassificationPipeline,pu as ZeroShotImageClassificationPipeline,mu as ZeroShotObjectDetectionPipeline,_u as bankers_round,hu as cat,fu as cos_sim,gu as dot,Mu as dynamic_time_warping,wu as env,Tu as full,bu as full_like,Pu as getKeyValueShapes,xu as hamming,ku as hanning,yu as interpolate,Fu as interpolate_4d,vu as interpolate_data,Cu as is_chinese_char,Su as layer_norm,Au as load_image,Eu as log_softmax,Lu as magnitude,Iu as matmul,zu as max,ju as mean,Nu as mean_pooling,Vu as medianFilter,Ou as mel_filter_bank,Bu as min,Du as ones,Gu as ones_like,Ru as permute,qu as permute_data,$u as pipeline,Wu as quantize_embeddings,Uu as read_audio,Qu as rfft,Xu as round,Hu as softmax,Ju as spectrogram,Yu as stack,Ku as std_mean,Zu as topk,ep as window_function,tp as zeros,sp as zeros_like};
359
2
  //# sourceMappingURL=transformers.min.mjs.map