@huggingface/tasks 0.10.13 → 0.10.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -5937,7 +5937,7 @@ brew install llama.cpp
5937
5937
  # Load and run the model
5938
5938
  llama \\
5939
5939
  --hf-repo "${model.id}" \\
5940
- --hf-file file.gguf \\
5940
+ --hf-file {{GGUF_FILE}} \\
5941
5941
  -p "I believe the meaning of life is" \\
5942
5942
  -n 128`,
5943
5943
  `# Option 2: build llama.cpp from source with curl support
package/dist/index.js CHANGED
@@ -5898,7 +5898,7 @@ brew install llama.cpp
5898
5898
  # Load and run the model
5899
5899
  llama \\
5900
5900
  --hf-repo "${model.id}" \\
5901
- --hf-file file.gguf \\
5901
+ --hf-file {{GGUF_FILE}} \\
5902
5902
  -p "I believe the meaning of life is" \\
5903
5903
  -n 128`,
5904
5904
  `# Option 2: build llama.cpp from source with curl support
@@ -33,6 +33,7 @@ export type LocalApp = {
33
33
  } | {
34
34
  /**
35
35
  * And if not (mostly llama.cpp), snippet to copy/paste in your terminal
36
+ * Support the placeholder {{GGUF_FILE}} that will be replaced by the gguf file path or the list of available files.
36
37
  */
37
38
  snippet: (model: ModelData) => string | string[];
38
39
  });
@@ -1 +1 @@
1
- {"version":3,"file":"local-apps.d.ts","sourceRoot":"","sources":["../../src/local-apps.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,cAAc,CAAC;AAC9C,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,aAAa,CAAC;AAEhD;;GAEG;AACH,MAAM,MAAM,QAAQ,GAAG;IACtB;;OAEG;IACH,WAAW,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,OAAO,EAAE,MAAM,CAAC;IAChB;;OAEG;IACH,QAAQ,EAAE,YAAY,CAAC;IACvB;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IAEpB,UAAU,CAAC,EAAE,OAAO,CAAC;IACrB;;OAEG;IACH,kBAAkB,EAAE,CAAC,KAAK,EAAE,SAAS,KAAK,OAAO,CAAC;CAClD,GAAG,CACD;IACA;;OAEG;IACH,QAAQ,EAAE,CAAC,KAAK,EAAE,SAAS,KAAK,GAAG,CAAC;CACnC,GACD;IACA;;OAEG;IACH,OAAO,EAAE,CAAC,KAAK,EAAE,SAAS,KAAK,MAAM,GAAG,MAAM,EAAE,CAAC;CAChD,CACH,CAAC;AAEF,iBAAS,WAAW,CAAC,KAAK,EAAE,SAAS,WAEpC;AA2BD;;;;;;;;;;GAUG;AACH,eAAO,MAAM,UAAU;;;;;;yBApCS,SAAS,KAAG,MAAM,EAAE;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CAkIhB,CAAC;AAErC,MAAM,MAAM,WAAW,GAAG,MAAM,OAAO,UAAU,CAAC"}
1
+ {"version":3,"file":"local-apps.d.ts","sourceRoot":"","sources":["../../src/local-apps.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,cAAc,CAAC;AAC9C,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,aAAa,CAAC;AAEhD;;GAEG;AACH,MAAM,MAAM,QAAQ,GAAG;IACtB;;OAEG;IACH,WAAW,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,OAAO,EAAE,MAAM,CAAC;IAChB;;OAEG;IACH,QAAQ,EAAE,YAAY,CAAC;IACvB;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IAEpB,UAAU,CAAC,EAAE,OAAO,CAAC;IACrB;;OAEG;IACH,kBAAkB,EAAE,CAAC,KAAK,EAAE,SAAS,KAAK,OAAO,CAAC;CAClD,GAAG,CACD;IACA;;OAEG;IACH,QAAQ,EAAE,CAAC,KAAK,EAAE,SAAS,KAAK,GAAG,CAAC;CACnC,GACD;IACA;;;OAGG;IACH,OAAO,EAAE,CAAC,KAAK,EAAE,SAAS,KAAK,MAAM,GAAG,MAAM,EAAE,CAAC;CAChD,CACH,CAAC;AAEF,iBAAS,WAAW,CAAC,KAAK,EAAE,SAAS,WAEpC;AA2BD;;;;;;;;;;GAUG;AACH,eAAO,MAAM,UAAU;;;;;;yBApCS,SAAS,KAAG,MAAM,EAAE;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CAkIhB,CAAC;AAErC,MAAM,MAAM,WAAW,GAAG,MAAM,OAAO,UAAU,CAAC"}
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@huggingface/tasks",
3
3
  "packageManager": "pnpm@8.10.5",
4
- "version": "0.10.13",
4
+ "version": "0.10.14",
5
5
  "description": "List of ML tasks for huggingface.co/tasks",
6
6
  "repository": "https://github.com/huggingface/huggingface.js.git",
7
7
  "publishConfig": {
package/src/local-apps.ts CHANGED
@@ -37,6 +37,7 @@ export type LocalApp = {
37
37
  | {
38
38
  /**
39
39
  * And if not (mostly llama.cpp), snippet to copy/paste in your terminal
40
+ * Support the placeholder {{GGUF_FILE}} that will be replaced by the gguf file path or the list of available files.
40
41
  */
41
42
  snippet: (model: ModelData) => string | string[];
42
43
  }
@@ -54,7 +55,7 @@ brew install llama.cpp
54
55
  # Load and run the model
55
56
  llama \\
56
57
  --hf-repo "${model.id}" \\
57
- --hf-file file.gguf \\
58
+ --hf-file {{GGUF_FILE}} \\
58
59
  -p "I believe the meaning of life is" \\
59
60
  -n 128`,
60
61
  `# Option 2: build llama.cpp from source with curl support