@huggingface/tasks 0.13.4 → 0.13.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (221) hide show
  1. package/dist/commonjs/hardware.d.ts +12 -0
  2. package/dist/commonjs/hardware.d.ts.map +1 -1
  3. package/dist/commonjs/hardware.js +15 -3
  4. package/dist/commonjs/local-apps.d.ts.map +1 -1
  5. package/dist/commonjs/local-apps.js +40 -21
  6. package/dist/commonjs/local-apps.spec.d.ts +2 -0
  7. package/dist/commonjs/local-apps.spec.d.ts.map +1 -0
  8. package/dist/commonjs/local-apps.spec.js +114 -0
  9. package/dist/commonjs/model-libraries-snippets.d.ts.map +1 -1
  10. package/dist/commonjs/model-libraries-snippets.js +23 -12
  11. package/dist/commonjs/model-libraries-snippets.spec.d.ts +2 -0
  12. package/dist/commonjs/model-libraries-snippets.spec.d.ts.map +1 -0
  13. package/dist/commonjs/model-libraries-snippets.spec.js +51 -0
  14. package/dist/commonjs/snippets/curl.js +4 -4
  15. package/dist/commonjs/snippets/js.js +5 -5
  16. package/dist/commonjs/tasks/audio-classification/inference.d.ts +4 -3
  17. package/dist/commonjs/tasks/audio-classification/inference.d.ts.map +1 -1
  18. package/dist/commonjs/tasks/automatic-speech-recognition/inference.d.ts +1 -5
  19. package/dist/commonjs/tasks/automatic-speech-recognition/inference.d.ts.map +1 -1
  20. package/dist/commonjs/tasks/chat-completion/inference.d.ts +18 -6
  21. package/dist/commonjs/tasks/chat-completion/inference.d.ts.map +1 -1
  22. package/dist/commonjs/tasks/depth-estimation/inference.d.ts +1 -1
  23. package/dist/commonjs/tasks/document-question-answering/inference.d.ts +1 -3
  24. package/dist/commonjs/tasks/document-question-answering/inference.d.ts.map +1 -1
  25. package/dist/commonjs/tasks/feature-extraction/inference.d.ts +1 -1
  26. package/dist/commonjs/tasks/fill-mask/inference.d.ts +1 -3
  27. package/dist/commonjs/tasks/fill-mask/inference.d.ts.map +1 -1
  28. package/dist/commonjs/tasks/image-classification/inference.d.ts +4 -3
  29. package/dist/commonjs/tasks/image-classification/inference.d.ts.map +1 -1
  30. package/dist/commonjs/tasks/image-segmentation/inference.d.ts +1 -3
  31. package/dist/commonjs/tasks/image-segmentation/inference.d.ts.map +1 -1
  32. package/dist/commonjs/tasks/image-to-image/inference.d.ts +1 -3
  33. package/dist/commonjs/tasks/image-to-image/inference.d.ts.map +1 -1
  34. package/dist/commonjs/tasks/image-to-text/inference.d.ts +1 -5
  35. package/dist/commonjs/tasks/image-to-text/inference.d.ts.map +1 -1
  36. package/dist/commonjs/tasks/object-detection/inference.d.ts +1 -3
  37. package/dist/commonjs/tasks/object-detection/inference.d.ts.map +1 -1
  38. package/dist/commonjs/tasks/question-answering/inference.d.ts +1 -3
  39. package/dist/commonjs/tasks/question-answering/inference.d.ts.map +1 -1
  40. package/dist/commonjs/tasks/sentence-similarity/inference.d.ts +1 -1
  41. package/dist/commonjs/tasks/summarization/inference.d.ts +1 -3
  42. package/dist/commonjs/tasks/summarization/inference.d.ts.map +1 -1
  43. package/dist/commonjs/tasks/table-question-answering/inference.d.ts +1 -1
  44. package/dist/commonjs/tasks/text-classification/inference.d.ts +4 -3
  45. package/dist/commonjs/tasks/text-classification/inference.d.ts.map +1 -1
  46. package/dist/commonjs/tasks/text-to-audio/inference.d.ts +1 -5
  47. package/dist/commonjs/tasks/text-to-audio/inference.d.ts.map +1 -1
  48. package/dist/commonjs/tasks/text-to-image/inference.d.ts +1 -3
  49. package/dist/commonjs/tasks/text-to-image/inference.d.ts.map +1 -1
  50. package/dist/commonjs/tasks/text-to-speech/inference.d.ts +1 -5
  51. package/dist/commonjs/tasks/text-to-speech/inference.d.ts.map +1 -1
  52. package/dist/commonjs/tasks/text2text-generation/inference.d.ts +1 -3
  53. package/dist/commonjs/tasks/text2text-generation/inference.d.ts.map +1 -1
  54. package/dist/commonjs/tasks/token-classification/inference.d.ts +1 -3
  55. package/dist/commonjs/tasks/token-classification/inference.d.ts.map +1 -1
  56. package/dist/commonjs/tasks/translation/inference.d.ts +1 -3
  57. package/dist/commonjs/tasks/translation/inference.d.ts.map +1 -1
  58. package/dist/commonjs/tasks/video-classification/inference.d.ts +4 -3
  59. package/dist/commonjs/tasks/video-classification/inference.d.ts.map +1 -1
  60. package/dist/commonjs/tasks/visual-question-answering/inference.d.ts +1 -3
  61. package/dist/commonjs/tasks/visual-question-answering/inference.d.ts.map +1 -1
  62. package/dist/commonjs/tasks/zero-shot-classification/inference.d.ts +1 -3
  63. package/dist/commonjs/tasks/zero-shot-classification/inference.d.ts.map +1 -1
  64. package/dist/commonjs/tasks/zero-shot-image-classification/inference.d.ts +1 -3
  65. package/dist/commonjs/tasks/zero-shot-image-classification/inference.d.ts.map +1 -1
  66. package/dist/commonjs/tasks/zero-shot-object-detection/inference.d.ts +1 -3
  67. package/dist/commonjs/tasks/zero-shot-object-detection/inference.d.ts.map +1 -1
  68. package/dist/esm/hardware.d.ts +12 -0
  69. package/dist/esm/hardware.d.ts.map +1 -1
  70. package/dist/esm/hardware.js +15 -3
  71. package/dist/esm/local-apps.d.ts.map +1 -1
  72. package/dist/esm/local-apps.js +40 -21
  73. package/dist/esm/local-apps.spec.d.ts +2 -0
  74. package/dist/esm/local-apps.spec.d.ts.map +1 -0
  75. package/dist/esm/local-apps.spec.js +112 -0
  76. package/dist/esm/model-libraries-snippets.d.ts.map +1 -1
  77. package/dist/esm/model-libraries-snippets.js +23 -12
  78. package/dist/esm/model-libraries-snippets.spec.d.ts +2 -0
  79. package/dist/esm/model-libraries-snippets.spec.d.ts.map +1 -0
  80. package/dist/esm/model-libraries-snippets.spec.js +49 -0
  81. package/dist/esm/snippets/curl.js +4 -4
  82. package/dist/esm/snippets/js.js +5 -5
  83. package/dist/esm/tasks/audio-classification/inference.d.ts +4 -3
  84. package/dist/esm/tasks/audio-classification/inference.d.ts.map +1 -1
  85. package/dist/esm/tasks/automatic-speech-recognition/inference.d.ts +1 -5
  86. package/dist/esm/tasks/automatic-speech-recognition/inference.d.ts.map +1 -1
  87. package/dist/esm/tasks/chat-completion/inference.d.ts +18 -6
  88. package/dist/esm/tasks/chat-completion/inference.d.ts.map +1 -1
  89. package/dist/esm/tasks/depth-estimation/inference.d.ts +1 -1
  90. package/dist/esm/tasks/document-question-answering/inference.d.ts +1 -3
  91. package/dist/esm/tasks/document-question-answering/inference.d.ts.map +1 -1
  92. package/dist/esm/tasks/feature-extraction/inference.d.ts +1 -1
  93. package/dist/esm/tasks/fill-mask/inference.d.ts +1 -3
  94. package/dist/esm/tasks/fill-mask/inference.d.ts.map +1 -1
  95. package/dist/esm/tasks/image-classification/inference.d.ts +4 -3
  96. package/dist/esm/tasks/image-classification/inference.d.ts.map +1 -1
  97. package/dist/esm/tasks/image-segmentation/inference.d.ts +1 -3
  98. package/dist/esm/tasks/image-segmentation/inference.d.ts.map +1 -1
  99. package/dist/esm/tasks/image-to-image/inference.d.ts +1 -3
  100. package/dist/esm/tasks/image-to-image/inference.d.ts.map +1 -1
  101. package/dist/esm/tasks/image-to-text/inference.d.ts +1 -5
  102. package/dist/esm/tasks/image-to-text/inference.d.ts.map +1 -1
  103. package/dist/esm/tasks/object-detection/inference.d.ts +1 -3
  104. package/dist/esm/tasks/object-detection/inference.d.ts.map +1 -1
  105. package/dist/esm/tasks/question-answering/inference.d.ts +1 -3
  106. package/dist/esm/tasks/question-answering/inference.d.ts.map +1 -1
  107. package/dist/esm/tasks/sentence-similarity/inference.d.ts +1 -1
  108. package/dist/esm/tasks/summarization/inference.d.ts +1 -3
  109. package/dist/esm/tasks/summarization/inference.d.ts.map +1 -1
  110. package/dist/esm/tasks/table-question-answering/inference.d.ts +1 -1
  111. package/dist/esm/tasks/text-classification/inference.d.ts +4 -3
  112. package/dist/esm/tasks/text-classification/inference.d.ts.map +1 -1
  113. package/dist/esm/tasks/text-to-audio/inference.d.ts +1 -5
  114. package/dist/esm/tasks/text-to-audio/inference.d.ts.map +1 -1
  115. package/dist/esm/tasks/text-to-image/inference.d.ts +1 -3
  116. package/dist/esm/tasks/text-to-image/inference.d.ts.map +1 -1
  117. package/dist/esm/tasks/text-to-speech/inference.d.ts +1 -5
  118. package/dist/esm/tasks/text-to-speech/inference.d.ts.map +1 -1
  119. package/dist/esm/tasks/text2text-generation/inference.d.ts +1 -3
  120. package/dist/esm/tasks/text2text-generation/inference.d.ts.map +1 -1
  121. package/dist/esm/tasks/token-classification/inference.d.ts +1 -3
  122. package/dist/esm/tasks/token-classification/inference.d.ts.map +1 -1
  123. package/dist/esm/tasks/translation/inference.d.ts +1 -3
  124. package/dist/esm/tasks/translation/inference.d.ts.map +1 -1
  125. package/dist/esm/tasks/video-classification/inference.d.ts +4 -3
  126. package/dist/esm/tasks/video-classification/inference.d.ts.map +1 -1
  127. package/dist/esm/tasks/visual-question-answering/inference.d.ts +1 -3
  128. package/dist/esm/tasks/visual-question-answering/inference.d.ts.map +1 -1
  129. package/dist/esm/tasks/zero-shot-classification/inference.d.ts +1 -3
  130. package/dist/esm/tasks/zero-shot-classification/inference.d.ts.map +1 -1
  131. package/dist/esm/tasks/zero-shot-image-classification/inference.d.ts +1 -3
  132. package/dist/esm/tasks/zero-shot-image-classification/inference.d.ts.map +1 -1
  133. package/dist/esm/tasks/zero-shot-object-detection/inference.d.ts +1 -3
  134. package/dist/esm/tasks/zero-shot-object-detection/inference.d.ts.map +1 -1
  135. package/package.json +1 -1
  136. package/src/hardware.ts +15 -3
  137. package/src/local-apps.spec.ts +123 -0
  138. package/src/local-apps.ts +37 -18
  139. package/src/model-libraries-snippets.spec.ts +54 -0
  140. package/src/model-libraries-snippets.ts +24 -11
  141. package/src/snippets/curl.ts +4 -4
  142. package/src/snippets/js.ts +5 -5
  143. package/src/tasks/audio-classification/inference.ts +4 -3
  144. package/src/tasks/audio-classification/spec/input.json +3 -3
  145. package/src/tasks/automatic-speech-recognition/inference.ts +1 -5
  146. package/src/tasks/automatic-speech-recognition/spec/input.json +1 -2
  147. package/src/tasks/chat-completion/inference.ts +19 -6
  148. package/src/tasks/chat-completion/spec/input.json +14 -19
  149. package/src/tasks/common-definitions.json +0 -1
  150. package/src/tasks/depth-estimation/inference.ts +1 -1
  151. package/src/tasks/depth-estimation/spec/input.json +1 -2
  152. package/src/tasks/document-question-answering/inference.ts +1 -3
  153. package/src/tasks/document-question-answering/spec/input.json +1 -2
  154. package/src/tasks/feature-extraction/inference.ts +1 -1
  155. package/src/tasks/feature-extraction/spec/input.json +1 -1
  156. package/src/tasks/fill-mask/inference.ts +1 -3
  157. package/src/tasks/fill-mask/spec/input.json +1 -2
  158. package/src/tasks/image-classification/inference.ts +4 -3
  159. package/src/tasks/image-classification/spec/input.json +3 -3
  160. package/src/tasks/image-segmentation/inference.ts +1 -3
  161. package/src/tasks/image-segmentation/spec/input.json +1 -2
  162. package/src/tasks/image-to-image/inference.ts +1 -3
  163. package/src/tasks/image-to-image/spec/input.json +1 -2
  164. package/src/tasks/image-to-text/inference.ts +1 -5
  165. package/src/tasks/image-to-text/spec/input.json +1 -2
  166. package/src/tasks/object-detection/inference.ts +1 -3
  167. package/src/tasks/object-detection/spec/input.json +1 -2
  168. package/src/tasks/placeholder/spec/input.json +1 -2
  169. package/src/tasks/question-answering/inference.ts +1 -3
  170. package/src/tasks/question-answering/spec/input.json +1 -2
  171. package/src/tasks/sentence-similarity/inference.ts +1 -1
  172. package/src/tasks/sentence-similarity/spec/input.json +1 -2
  173. package/src/tasks/summarization/inference.ts +1 -3
  174. package/src/tasks/summarization/spec/input.json +1 -2
  175. package/src/tasks/table-question-answering/inference.ts +1 -1
  176. package/src/tasks/table-question-answering/spec/input.json +1 -2
  177. package/src/tasks/text-classification/inference.ts +4 -3
  178. package/src/tasks/text-classification/spec/input.json +3 -3
  179. package/src/tasks/text-to-audio/inference.ts +1 -5
  180. package/src/tasks/text-to-audio/spec/input.json +1 -2
  181. package/src/tasks/text-to-image/inference.ts +1 -3
  182. package/src/tasks/text-to-image/spec/input.json +1 -2
  183. package/src/tasks/text-to-speech/inference.ts +1 -5
  184. package/src/tasks/text-to-speech/spec/input.json +1 -2
  185. package/src/tasks/text2text-generation/inference.ts +1 -3
  186. package/src/tasks/text2text-generation/spec/input.json +1 -2
  187. package/src/tasks/token-classification/inference.ts +1 -3
  188. package/src/tasks/token-classification/spec/input.json +1 -2
  189. package/src/tasks/translation/inference.ts +1 -3
  190. package/src/tasks/translation/spec/input.json +1 -2
  191. package/src/tasks/video-classification/inference.ts +4 -3
  192. package/src/tasks/video-classification/spec/input.json +3 -3
  193. package/src/tasks/visual-question-answering/inference.ts +1 -3
  194. package/src/tasks/visual-question-answering/spec/input.json +1 -2
  195. package/src/tasks/zero-shot-classification/inference.ts +1 -3
  196. package/src/tasks/zero-shot-classification/spec/input.json +1 -2
  197. package/src/tasks/zero-shot-image-classification/inference.ts +1 -3
  198. package/src/tasks/zero-shot-image-classification/spec/input.json +1 -2
  199. package/src/tasks/zero-shot-object-detection/inference.ts +1 -3
  200. package/src/tasks/zero-shot-object-detection/spec/input.json +1 -2
  201. package/dist/commonjs/snippets/curl.spec.d.ts +0 -2
  202. package/dist/commonjs/snippets/curl.spec.d.ts.map +0 -1
  203. package/dist/commonjs/snippets/curl.spec.js +0 -89
  204. package/dist/commonjs/snippets/js.spec.d.ts +0 -2
  205. package/dist/commonjs/snippets/js.spec.d.ts.map +0 -1
  206. package/dist/commonjs/snippets/js.spec.js +0 -141
  207. package/dist/commonjs/snippets/python.spec.d.ts +0 -2
  208. package/dist/commonjs/snippets/python.spec.d.ts.map +0 -1
  209. package/dist/commonjs/snippets/python.spec.js +0 -135
  210. package/dist/esm/snippets/curl.spec.d.ts +0 -2
  211. package/dist/esm/snippets/curl.spec.d.ts.map +0 -1
  212. package/dist/esm/snippets/curl.spec.js +0 -87
  213. package/dist/esm/snippets/js.spec.d.ts +0 -2
  214. package/dist/esm/snippets/js.spec.d.ts.map +0 -1
  215. package/dist/esm/snippets/js.spec.js +0 -139
  216. package/dist/esm/snippets/python.spec.d.ts +0 -2
  217. package/dist/esm/snippets/python.spec.d.ts.map +0 -1
  218. package/dist/esm/snippets/python.spec.js +0 -133
  219. package/src/snippets/curl.spec.ts +0 -94
  220. package/src/snippets/js.spec.ts +0 -148
  221. package/src/snippets/python.spec.ts +0 -144
@@ -1 +1 @@
1
- {"version":3,"file":"inference.d.ts","sourceRoot":"","sources":["../../../../src/tasks/zero-shot-image-classification/inference.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AACH;;GAEG;AACH,MAAM,WAAW,gCAAgC;IAChD;;OAEG;IACH,MAAM,EAAE,MAAM,CAAC;IACf;;OAEG;IACH,UAAU,EAAE,qCAAqC,CAAC;IAClD,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD;;;;GAIG;AACH,MAAM,WAAW,qCAAqC;IACrD;;OAEG;IACH,gBAAgB,EAAE,MAAM,EAAE,CAAC;IAC3B;;;OAGG;IACH,mBAAmB,CAAC,EAAE,MAAM,CAAC;IAC7B,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD,MAAM,MAAM,iCAAiC,GAAG,wCAAwC,EAAE,CAAC;AAC3F;;GAEG;AACH,MAAM,WAAW,wCAAwC;IACxD;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B"}
1
+ {"version":3,"file":"inference.d.ts","sourceRoot":"","sources":["../../../../src/tasks/zero-shot-image-classification/inference.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AACH;;GAEG;AACH,MAAM,WAAW,gCAAgC;IAChD;;OAEG;IACH,MAAM,EAAE,MAAM,CAAC;IACf;;OAEG;IACH,UAAU,EAAE,qCAAqC,CAAC;IAClD,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD;;GAEG;AACH,MAAM,WAAW,qCAAqC;IACrD;;OAEG;IACH,gBAAgB,EAAE,MAAM,EAAE,CAAC;IAC3B;;;OAGG;IACH,mBAAmB,CAAC,EAAE,MAAM,CAAC;IAC7B,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD,MAAM,MAAM,iCAAiC,GAAG,wCAAwC,EAAE,CAAC;AAC3F;;GAEG;AACH,MAAM,WAAW,wCAAwC;IACxD;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B"}
@@ -12,14 +12,12 @@ export interface ZeroShotObjectDetectionInput {
12
12
  */
13
13
  inputs: string;
14
14
  /**
15
- * Additional inference parameters
15
+ * Additional inference parameters for Zero Shot Object Detection
16
16
  */
17
17
  parameters: ZeroShotObjectDetectionParameters;
18
18
  [property: string]: unknown;
19
19
  }
20
20
  /**
21
- * Additional inference parameters
22
- *
23
21
  * Additional inference parameters for Zero Shot Object Detection
24
22
  */
25
23
  export interface ZeroShotObjectDetectionParameters {
@@ -1 +1 @@
1
- {"version":3,"file":"inference.d.ts","sourceRoot":"","sources":["../../../../src/tasks/zero-shot-object-detection/inference.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AACH;;GAEG;AACH,MAAM,WAAW,4BAA4B;IAC5C;;OAEG;IACH,MAAM,EAAE,MAAM,CAAC;IACf;;OAEG;IACH,UAAU,EAAE,iCAAiC,CAAC;IAC9C,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD;;;;GAIG;AACH,MAAM,WAAW,iCAAiC;IACjD;;OAEG;IACH,gBAAgB,EAAE,MAAM,EAAE,CAAC;IAC3B,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD;;;GAGG;AACH,MAAM,WAAW,WAAW;IAC3B,IAAI,EAAE,MAAM,CAAC;IACb,IAAI,EAAE,MAAM,CAAC;IACb,IAAI,EAAE,MAAM,CAAC;IACb,IAAI,EAAE,MAAM,CAAC;IACb,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD,MAAM,MAAM,6BAA6B,GAAG,oCAAoC,EAAE,CAAC;AACnF;;GAEG;AACH,MAAM,WAAW,oCAAoC;IACpD;;;OAGG;IACH,GAAG,EAAE,WAAW,CAAC;IACjB;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B"}
1
+ {"version":3,"file":"inference.d.ts","sourceRoot":"","sources":["../../../../src/tasks/zero-shot-object-detection/inference.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AACH;;GAEG;AACH,MAAM,WAAW,4BAA4B;IAC5C;;OAEG;IACH,MAAM,EAAE,MAAM,CAAC;IACf;;OAEG;IACH,UAAU,EAAE,iCAAiC,CAAC;IAC9C,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD;;GAEG;AACH,MAAM,WAAW,iCAAiC;IACjD;;OAEG;IACH,gBAAgB,EAAE,MAAM,EAAE,CAAC;IAC3B,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD;;;GAGG;AACH,MAAM,WAAW,WAAW;IAC3B,IAAI,EAAE,MAAM,CAAC;IACb,IAAI,EAAE,MAAM,CAAC;IACb,IAAI,EAAE,MAAM,CAAC;IACb,IAAI,EAAE,MAAM,CAAC;IACb,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD,MAAM,MAAM,6BAA6B,GAAG,oCAAoC,EAAE,CAAC;AACnF;;GAEG;AACH,MAAM,WAAW,oCAAoC;IACpD;;;OAGG;IACH,GAAG,EAAE,WAAW,CAAC;IACjB;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B"}
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@huggingface/tasks",
3
3
  "packageManager": "pnpm@8.10.5",
4
- "version": "0.13.4",
4
+ "version": "0.13.6",
5
5
  "description": "List of ML tasks for huggingface.co/tasks",
6
6
  "repository": "https://github.com/huggingface/huggingface.js.git",
7
7
  "publishConfig": {
package/src/hardware.ts CHANGED
@@ -452,7 +452,7 @@ export const SKUS = {
452
452
  memory: [8, 16, 24],
453
453
  },
454
454
  "Apple M2 Pro": {
455
- tflops: 13.6,
455
+ tflops: 6.8,
456
456
  memory: [16, 24, 32],
457
457
  },
458
458
  "Apple M2 Max": {
@@ -464,17 +464,29 @@ export const SKUS = {
464
464
  memory: [64, 96, 128, 192],
465
465
  },
466
466
  "Apple M3": {
467
- tflops: 2.84,
467
+ tflops: 4.1,
468
468
  memory: [8, 16, 24],
469
469
  },
470
470
  "Apple M3 Pro": {
471
- tflops: 14,
471
+ tflops: 7.4,
472
472
  memory: [18, 36],
473
473
  },
474
474
  "Apple M3 Max": {
475
475
  tflops: 14.2,
476
476
  memory: [36, 48, 64, 96, 128],
477
477
  },
478
+ "Apple M4": {
479
+ tflops: 4.6,
480
+ memory: [16, 24, 32],
481
+ },
482
+ "Apple M4 Pro": {
483
+ tflops: 9.2,
484
+ memory: [24, 48],
485
+ },
486
+ "Apple M4 Max": {
487
+ tflops: 18.4,
488
+ memory: [36, 48, 64, 128],
489
+ },
478
490
  },
479
491
  },
480
492
  } satisfies Record<string, Record<string, Record<string, HardwareSpec>>>;
@@ -0,0 +1,123 @@
1
+ import { describe, expect, it } from "vitest";
2
+ import { LOCAL_APPS } from "./local-apps.js";
3
+ import type { ModelData } from "./model-data.js";
4
+
5
+ describe("local-apps", () => {
6
+ it("llama.cpp conversational", async () => {
7
+ const { snippet: snippetFunc } = LOCAL_APPS["llama.cpp"];
8
+ const model: ModelData = {
9
+ id: "bartowski/Llama-3.2-3B-Instruct-GGUF",
10
+ tags: ["conversational"],
11
+ inference: "",
12
+ };
13
+ const snippet = snippetFunc(model);
14
+
15
+ expect(snippet[0].content).toEqual(`# Load and run the model:
16
+ llama-cli \\
17
+ --hf-repo "bartowski/Llama-3.2-3B-Instruct-GGUF" \\
18
+ --hf-file {{GGUF_FILE}} \\
19
+ -p "You are a helpful assistant" \\
20
+ --conversation`);
21
+ });
22
+
23
+ it("llama.cpp non-conversational", async () => {
24
+ const { snippet: snippetFunc } = LOCAL_APPS["llama.cpp"];
25
+ const model: ModelData = {
26
+ id: "mlabonne/gemma-2b-GGUF",
27
+ tags: [],
28
+ inference: "",
29
+ };
30
+ const snippet = snippetFunc(model);
31
+
32
+ expect(snippet[0].content).toEqual(`# Load and run the model:
33
+ llama-cli \\
34
+ --hf-repo "mlabonne/gemma-2b-GGUF" \\
35
+ --hf-file {{GGUF_FILE}} \\
36
+ -p "Once upon a time,"`);
37
+ });
38
+
39
+ it("vLLM conversational llm", async () => {
40
+ const { snippet: snippetFunc } = LOCAL_APPS["vllm"];
41
+ const model: ModelData = {
42
+ id: "meta-llama/Llama-3.2-3B-Instruct",
43
+ pipeline_tag: "text-generation",
44
+ tags: ["conversational"],
45
+ inference: "",
46
+ };
47
+ const snippet = snippetFunc(model);
48
+
49
+ expect((snippet[0].content as string[]).join("\n")).toEqual(`# Load and run the model:
50
+ vllm serve "meta-llama/Llama-3.2-3B-Instruct"
51
+ # Call the server using curl:
52
+ curl -X POST "http://localhost:8000/v1/chat/completions" \\
53
+ -H "Content-Type: application/json" \\
54
+ --data '{
55
+ "model": "meta-llama/Llama-3.2-3B-Instruct",
56
+ "messages": [
57
+ {
58
+ "role": "user",
59
+ "content": "What is the capital of France?"
60
+ }
61
+ ]
62
+ }'`);
63
+ });
64
+
65
+ it("vLLM non-conversational llm", async () => {
66
+ const { snippet: snippetFunc } = LOCAL_APPS["vllm"];
67
+ const model: ModelData = {
68
+ id: "meta-llama/Llama-3.2-3B",
69
+ tags: [""],
70
+ inference: "",
71
+ };
72
+ const snippet = snippetFunc(model);
73
+
74
+ expect((snippet[0].content as string[]).join("\n")).toEqual(`# Load and run the model:
75
+ vllm serve "meta-llama/Llama-3.2-3B"
76
+ # Call the server using curl:
77
+ curl -X POST "http://localhost:8000/v1/completions" \\
78
+ -H "Content-Type: application/json" \\
79
+ --data '{
80
+ "model": "meta-llama/Llama-3.2-3B",
81
+ "prompt": "Once upon a time,",
82
+ "max_tokens": 512,
83
+ "temperature": 0.5
84
+ }'`);
85
+ });
86
+
87
+ it("vLLM conversational vlm", async () => {
88
+ const { snippet: snippetFunc } = LOCAL_APPS["vllm"];
89
+ const model: ModelData = {
90
+ id: "meta-llama/Llama-3.2-11B-Vision-Instruct",
91
+ pipeline_tag: "image-text-to-text",
92
+ tags: ["conversational"],
93
+ inference: "",
94
+ };
95
+ const snippet = snippetFunc(model);
96
+
97
+ expect((snippet[0].content as string[]).join("\n")).toEqual(`# Load and run the model:
98
+ vllm serve "meta-llama/Llama-3.2-11B-Vision-Instruct"
99
+ # Call the server using curl:
100
+ curl -X POST "http://localhost:8000/v1/chat/completions" \\
101
+ -H "Content-Type: application/json" \\
102
+ --data '{
103
+ "model": "meta-llama/Llama-3.2-11B-Vision-Instruct",
104
+ "messages": [
105
+ {
106
+ "role": "user",
107
+ "content": [
108
+ {
109
+ "type": "text",
110
+ "text": "Describe this image in one sentence."
111
+ },
112
+ {
113
+ "type": "image_url",
114
+ "image_url": {
115
+ "url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
116
+ }
117
+ }
118
+ ]
119
+ }
120
+ ]
121
+ }'`);
122
+ });
123
+ });
package/src/local-apps.ts CHANGED
@@ -1,6 +1,9 @@
1
1
  import { parseGGUFQuantLabel } from "./gguf.js";
2
2
  import type { ModelData } from "./model-data.js";
3
3
  import type { PipelineType } from "./pipelines.js";
4
+ import { stringifyMessages } from "./snippets/common.js";
5
+ import { getModelInputSnippet } from "./snippets/inputs.js";
6
+ import type { ChatCompletionInputMessage } from "./tasks/index.js";
4
7
 
5
8
  export interface LocalAppSnippet {
6
9
  /**
@@ -92,15 +95,20 @@ function isMlxModel(model: ModelData) {
92
95
  }
93
96
 
94
97
  const snippetLlamacpp = (model: ModelData, filepath?: string): LocalAppSnippet[] => {
95
- const command = (binary: string) =>
96
- [
98
+ const command = (binary: string) => {
99
+ const snippet = [
97
100
  "# Load and run the model:",
98
101
  `${binary} \\`,
99
102
  ` --hf-repo "${model.id}" \\`,
100
103
  ` --hf-file ${filepath ?? "{{GGUF_FILE}}"} \\`,
101
- ' -p "You are a helpful assistant" \\',
102
- " --conversation",
103
- ].join("\n");
104
+ ` -p "${model.tags.includes("conversational") ? "You are a helpful assistant" : "Once upon a time,"}"`,
105
+ ];
106
+ if (model.tags.includes("conversational")) {
107
+ snippet[snippet.length - 1] += " \\";
108
+ snippet.push(" --conversation");
109
+ }
110
+ return snippet.join("\n");
111
+ };
104
112
  return [
105
113
  {
106
114
  title: "Install from brew",
@@ -178,22 +186,33 @@ const snippetLocalAI = (model: ModelData, filepath?: string): LocalAppSnippet[]
178
186
  };
179
187
 
180
188
  const snippetVllm = (model: ModelData): LocalAppSnippet[] => {
181
- const runCommand = [
182
- "# Call the server using curl:",
183
- `curl -X POST "http://localhost:8000/v1/chat/completions" \\`,
184
- ` -H "Content-Type: application/json" \\`,
185
- ` --data '{`,
186
- ` "model": "${model.id}",`,
187
- ` "messages": [`,
188
- ` {"role": "user", "content": "Hello!"}`,
189
- ` ]`,
190
- ` }'`,
191
- ];
189
+ const messages = getModelInputSnippet(model) as ChatCompletionInputMessage[];
190
+ const runCommandInstruct = `# Call the server using curl:
191
+ curl -X POST "http://localhost:8000/v1/chat/completions" \\
192
+ -H "Content-Type: application/json" \\
193
+ --data '{
194
+ "model": "${model.id}",
195
+ "messages": ${stringifyMessages(messages, {
196
+ indent: "\t\t",
197
+ attributeKeyQuotes: true,
198
+ customContentEscaper: (str) => str.replace(/'/g, "'\\''"),
199
+ })}
200
+ }'`;
201
+ const runCommandNonInstruct = `# Call the server using curl:
202
+ curl -X POST "http://localhost:8000/v1/completions" \\
203
+ -H "Content-Type: application/json" \\
204
+ --data '{
205
+ "model": "${model.id}",
206
+ "prompt": "Once upon a time,",
207
+ "max_tokens": 512,
208
+ "temperature": 0.5
209
+ }'`;
210
+ const runCommand = model.tags.includes("conversational") ? runCommandInstruct : runCommandNonInstruct;
192
211
  return [
193
212
  {
194
213
  title: "Install from pip",
195
214
  setup: ["# Install vLLM from pip:", "pip install vllm"].join("\n"),
196
- content: [`# Load and run the model:\nvllm serve "${model.id}"`, runCommand.join("\n")],
215
+ content: [`# Load and run the model:\nvllm serve "${model.id}"`, runCommand],
197
216
  },
198
217
  {
199
218
  title: "Use Docker images",
@@ -210,7 +229,7 @@ const snippetVllm = (model: ModelData): LocalAppSnippet[] => {
210
229
  ].join("\n"),
211
230
  content: [
212
231
  `# Load and run the model:\ndocker exec -it my_vllm_container bash -c "vllm serve ${model.id}"`,
213
- runCommand.join("\n"),
232
+ runCommand,
214
233
  ],
215
234
  },
216
235
  ];
@@ -0,0 +1,54 @@
1
+ import { describe, expect, it } from "vitest";
2
+ import type { ModelData } from "./model-data.js";
3
+ import { llama_cpp_python } from "./model-libraries-snippets.js";
4
+
5
+ describe("model-libraries-snippets", () => {
6
+ it("llama_cpp_python conversational", async () => {
7
+ const model: ModelData = {
8
+ id: "bartowski/Llama-3.2-3B-Instruct-GGUF",
9
+ pipeline_tag: "text-generation",
10
+ tags: ["conversational"],
11
+ inference: "",
12
+ };
13
+ const snippet = llama_cpp_python(model);
14
+
15
+ expect(snippet.join("\n")).toEqual(`from llama_cpp import Llama
16
+
17
+ llm = Llama.from_pretrained(
18
+ repo_id="bartowski/Llama-3.2-3B-Instruct-GGUF",
19
+ filename="{{GGUF_FILE}}",
20
+ )
21
+
22
+ llm.create_chat_completion(
23
+ messages = [
24
+ {
25
+ "role": "user",
26
+ "content": "What is the capital of France?"
27
+ }
28
+ ]
29
+ )`);
30
+ });
31
+
32
+ it("llama_cpp_python non-conversational", async () => {
33
+ const model: ModelData = {
34
+ id: "mlabonne/gemma-2b-GGUF",
35
+ tags: [""],
36
+ inference: "",
37
+ };
38
+ const snippet = llama_cpp_python(model);
39
+
40
+ expect(snippet.join("\n")).toEqual(`from llama_cpp import Llama
41
+
42
+ llm = Llama.from_pretrained(
43
+ repo_id="mlabonne/gemma-2b-GGUF",
44
+ filename="{{GGUF_FILE}}",
45
+ )
46
+
47
+ output = llm(
48
+ "Once upon a time,",
49
+ max_tokens=512,
50
+ echo=True
51
+ )
52
+ print(output)`);
53
+ });
54
+ });
@@ -1,6 +1,9 @@
1
1
  import type { ModelData } from "./model-data.js";
2
2
  import type { WidgetExampleTextInput, WidgetExampleSentenceSimilarityInput } from "./widget-example.js";
3
3
  import { LIBRARY_TASK_MAPPING } from "./library-to-tasks.js";
4
+ import { getModelInputSnippet } from "./snippets/inputs.js";
5
+ import type { ChatCompletionInputMessage } from "./tasks/index.js";
6
+ import { stringifyMessages } from "./snippets/common.js";
4
7
 
5
8
  const TAG_CUSTOM_CODE = "custom_code";
6
9
 
@@ -418,23 +421,33 @@ model = keras_hub.models.CausalLM.from_preset("hf://${model.id}", dtype="bfloat1
418
421
  `,
419
422
  ];
420
423
 
421
- export const llama_cpp_python = (model: ModelData): string[] => [
422
- `from llama_cpp import Llama
424
+ export const llama_cpp_python = (model: ModelData): string[] => {
425
+ const snippets = [
426
+ `from llama_cpp import Llama
423
427
 
424
428
  llm = Llama.from_pretrained(
425
429
  repo_id="${model.id}",
426
430
  filename="{{GGUF_FILE}}",
427
431
  )
432
+ `,
433
+ ];
428
434
 
429
- llm.create_chat_completion(
430
- messages = [
431
- {
432
- "role": "user",
433
- "content": "What is the capital of France?"
434
- }
435
- ]
436
- )`,
437
- ];
435
+ if (model.tags.includes("conversational")) {
436
+ const messages = getModelInputSnippet(model) as ChatCompletionInputMessage[];
437
+ snippets.push(`llm.create_chat_completion(
438
+ messages = ${stringifyMessages(messages, { attributeKeyQuotes: true, indent: "\t" })}
439
+ )`);
440
+ } else {
441
+ snippets.push(`output = llm(
442
+ "Once upon a time,",
443
+ max_tokens=512,
444
+ echo=True
445
+ )
446
+ print(output)`);
447
+ }
448
+
449
+ return snippets;
450
+ };
438
451
 
439
452
  export const tf_keras = (model: ModelData): string[] => [
440
453
  `# Note: 'keras<3.x' or 'tf_keras' must be installed (legacy)
@@ -9,7 +9,7 @@ export const snippetBasic = (model: ModelDataMinimal, accessToken: string): Infe
9
9
  -X POST \\
10
10
  -d '{"inputs": ${getModelInputSnippet(model, true)}}' \\
11
11
  -H 'Content-Type: application/json' \\
12
- -H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}"`,
12
+ -H 'Authorization: Bearer ${accessToken || `{API_TOKEN}`}'`,
13
13
  });
14
14
 
15
15
  export const snippetTextGeneration = (
@@ -36,7 +36,7 @@ export const snippetTextGeneration = (
36
36
  };
37
37
  return {
38
38
  content: `curl 'https://api-inference.huggingface.co/models/${model.id}/v1/chat/completions' \\
39
- -H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}" \\
39
+ -H 'Authorization: Bearer ${accessToken || `{API_TOKEN}`}' \\
40
40
  -H 'Content-Type: application/json' \\
41
41
  --data '{
42
42
  "model": "${model.id}",
@@ -63,14 +63,14 @@ export const snippetZeroShotClassification = (model: ModelDataMinimal, accessTok
63
63
  -X POST \\
64
64
  -d '{"inputs": ${getModelInputSnippet(model, true)}, "parameters": {"candidate_labels": ["refund", "legal", "faq"]}}' \\
65
65
  -H 'Content-Type: application/json' \\
66
- -H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}"`,
66
+ -H 'Authorization: Bearer ${accessToken || `{API_TOKEN}`}'`,
67
67
  });
68
68
 
69
69
  export const snippetFile = (model: ModelDataMinimal, accessToken: string): InferenceSnippet => ({
70
70
  content: `curl https://api-inference.huggingface.co/models/${model.id} \\
71
71
  -X POST \\
72
72
  --data-binary '@${getModelInputSnippet(model, true, true)}' \\
73
- -H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}"`,
73
+ -H 'Authorization: Bearer ${accessToken || `{API_TOKEN}`}'`,
74
74
  });
75
75
 
76
76
  export const curlSnippets: Partial<
@@ -10,7 +10,7 @@ export const snippetBasic = (model: ModelDataMinimal, accessToken: string): Infe
10
10
  "https://api-inference.huggingface.co/models/${model.id}",
11
11
  {
12
12
  headers: {
13
- Authorization: "Bearer ${accessToken || `{API_TOKEN}`}"
13
+ Authorization: "Bearer ${accessToken || `{API_TOKEN}`}",
14
14
  "Content-Type": "application/json",
15
15
  },
16
16
  method: "POST",
@@ -151,7 +151,7 @@ export const snippetZeroShotClassification = (model: ModelDataMinimal, accessTok
151
151
  "https://api-inference.huggingface.co/models/${model.id}",
152
152
  {
153
153
  headers: {
154
- Authorization: "Bearer ${accessToken || `{API_TOKEN}`}"
154
+ Authorization: "Bearer ${accessToken || `{API_TOKEN}`}",
155
155
  "Content-Type": "application/json",
156
156
  },
157
157
  method: "POST",
@@ -175,7 +175,7 @@ export const snippetTextToImage = (model: ModelDataMinimal, accessToken: string)
175
175
  "https://api-inference.huggingface.co/models/${model.id}",
176
176
  {
177
177
  headers: {
178
- Authorization: "Bearer ${accessToken || `{API_TOKEN}`}"
178
+ Authorization: "Bearer ${accessToken || `{API_TOKEN}`}",
179
179
  "Content-Type": "application/json",
180
180
  },
181
181
  method: "POST",
@@ -196,7 +196,7 @@ export const snippetTextToAudio = (model: ModelDataMinimal, accessToken: string)
196
196
  "https://api-inference.huggingface.co/models/${model.id}",
197
197
  {
198
198
  headers: {
199
- Authorization: "Bearer ${accessToken || `{API_TOKEN}`}"
199
+ Authorization: "Bearer ${accessToken || `{API_TOKEN}`}",
200
200
  "Content-Type": "application/json",
201
201
  },
202
202
  method: "POST",
@@ -238,7 +238,7 @@ export const snippetFile = (model: ModelDataMinimal, accessToken: string): Infer
238
238
  "https://api-inference.huggingface.co/models/${model.id}",
239
239
  {
240
240
  headers: {
241
- Authorization: "Bearer ${accessToken || `{API_TOKEN}`}"
241
+ Authorization: "Bearer ${accessToken || `{API_TOKEN}`}",
242
242
  "Content-Type": "application/json",
243
243
  },
244
244
  method: "POST",
@@ -13,17 +13,18 @@ export interface AudioClassificationInput {
13
13
  */
14
14
  inputs: string;
15
15
  /**
16
- * Additional inference parameters
16
+ * Additional inference parameters for Audio Classification
17
17
  */
18
18
  parameters?: AudioClassificationParameters;
19
19
  [property: string]: unknown;
20
20
  }
21
21
  /**
22
- * Additional inference parameters
23
- *
24
22
  * Additional inference parameters for Audio Classification
25
23
  */
26
24
  export interface AudioClassificationParameters {
25
+ /**
26
+ * The function to apply to the model outputs in order to retrieve the scores.
27
+ */
27
28
  function_to_apply?: ClassificationOutputTransform;
28
29
  /**
29
30
  * When specified, limits the output to the top K most probable classes.
@@ -10,19 +10,19 @@
10
10
  "type": "string"
11
11
  },
12
12
  "parameters": {
13
- "description": "Additional inference parameters",
13
+ "description": "Additional inference parameters for Audio Classification",
14
14
  "$ref": "#/$defs/AudioClassificationParameters"
15
15
  }
16
16
  },
17
17
  "$defs": {
18
18
  "AudioClassificationParameters": {
19
19
  "title": "AudioClassificationParameters",
20
- "description": "Additional inference parameters for Audio Classification",
21
20
  "type": "object",
22
21
  "properties": {
23
22
  "function_to_apply": {
24
23
  "title": "AudioClassificationOutputTransform",
25
- "$ref": "/inference/schemas/common-definitions.json#/definitions/ClassificationOutputTransform"
24
+ "$ref": "/inference/schemas/common-definitions.json#/definitions/ClassificationOutputTransform",
25
+ "description": "The function to apply to the model outputs in order to retrieve the scores."
26
26
  },
27
27
  "top_k": {
28
28
  "type": "integer",
@@ -14,15 +14,13 @@ export interface AutomaticSpeechRecognitionInput {
14
14
  */
15
15
  inputs: string;
16
16
  /**
17
- * Additional inference parameters
17
+ * Additional inference parameters for Automatic Speech Recognition
18
18
  */
19
19
  parameters?: AutomaticSpeechRecognitionParameters;
20
20
  [property: string]: unknown;
21
21
  }
22
22
 
23
23
  /**
24
- * Additional inference parameters
25
- *
26
24
  * Additional inference parameters for Automatic Speech Recognition
27
25
  */
28
26
  export interface AutomaticSpeechRecognitionParameters {
@@ -39,8 +37,6 @@ export interface AutomaticSpeechRecognitionParameters {
39
37
 
40
38
  /**
41
39
  * Parametrization of the text generation process
42
- *
43
- * Ad-hoc parametrization of the text generation process
44
40
  */
45
41
  export interface GenerationParameters {
46
42
  /**
@@ -10,14 +10,13 @@
10
10
  "type": "string"
11
11
  },
12
12
  "parameters": {
13
- "description": "Additional inference parameters",
13
+ "description": "Additional inference parameters for Automatic Speech Recognition",
14
14
  "$ref": "#/$defs/AutomaticSpeechRecognitionParameters"
15
15
  }
16
16
  },
17
17
  "$defs": {
18
18
  "AutomaticSpeechRecognitionParameters": {
19
19
  "title": "AutomaticSpeechRecognitionParameters",
20
- "description": "Additional inference parameters for Automatic Speech Recognition",
21
20
  "type": "object",
22
21
  "properties": {
23
22
  "return_timestamps": {
@@ -79,7 +79,7 @@ export interface ChatCompletionInput {
79
79
  * We generally recommend altering this or `top_p` but not both.
80
80
  */
81
81
  temperature?: number;
82
- tool_choice?: ChatCompletionInputTool;
82
+ tool_choice?: ChatCompletionInputToolChoice;
83
83
  /**
84
84
  * A prompt to be appended before the tools
85
85
  */
@@ -89,7 +89,7 @@ export interface ChatCompletionInput {
89
89
  * Use this to provide a list of
90
90
  * functions the model may generate JSON inputs for.
91
91
  */
92
- tools?: ToolElement[];
92
+ tools?: ChatCompletionInputTool[];
93
93
  /**
94
94
  * An integer between 0 and 5 specifying the number of most likely tokens to return at each
95
95
  * token position, each with
@@ -154,10 +154,23 @@ export interface ChatCompletionInputStreamOptions {
154
154
  [property: string]: unknown;
155
155
  }
156
156
 
157
- export type ChatCompletionInputTool = ChatCompletionInputToolType | string;
157
+ /**
158
+ *
159
+ * <https://platform.openai.com/docs/guides/function-calling/configuring-function-calling-behavior-using-the-tool_choice-parameter>
160
+ */
161
+ export type ChatCompletionInputToolChoice = ChatCompletionInputToolChoiceEnum | ChatCompletionInputToolChoiceObject;
162
+
163
+ /**
164
+ * Means the model can pick between generating a message or calling one or more tools.
165
+ *
166
+ * Means the model will not call any tool and instead generates a message.
167
+ *
168
+ * Means the model must call one or more tools.
169
+ */
170
+ export type ChatCompletionInputToolChoiceEnum = "auto" | "none" | "required";
158
171
 
159
- export interface ChatCompletionInputToolType {
160
- function?: ChatCompletionInputFunctionName;
172
+ export interface ChatCompletionInputToolChoiceObject {
173
+ function: ChatCompletionInputFunctionName;
161
174
  [property: string]: unknown;
162
175
  }
163
176
 
@@ -166,7 +179,7 @@ export interface ChatCompletionInputFunctionName {
166
179
  [property: string]: unknown;
167
180
  }
168
181
 
169
- export interface ToolElement {
182
+ export interface ChatCompletionInputTool {
170
183
  function: ChatCompletionInputFunctionDefinition;
171
184
  type: string;
172
185
  [property: string]: unknown;