@huggingface/tasks 0.10.10 → 0.10.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -5862,20 +5862,25 @@ function isGgufModel(model) {
5862
5862
  }
5863
5863
  var snippetLlamacpp = (model) => {
5864
5864
  return [
5865
- `## Install llama.cpp via brew
5865
+ `# Option 1: use llama.cpp with brew
5866
5866
  brew install llama.cpp
5867
5867
 
5868
- ## or from source with curl support
5869
- ## see llama.cpp README for compilation flags to optimize for your hardware
5870
- git clone https://github.com/ggerganov/llama.cpp
5871
- cd llama.cpp
5872
- LLAMA_CURL=1 make
5873
- `,
5874
- `## Load and run the model
5868
+ # Load and run the model
5875
5869
  llama \\
5876
5870
  --hf-repo "${model.id}" \\
5877
5871
  --hf-file file.gguf \\
5878
5872
  -p "I believe the meaning of life is" \\
5873
+ -n 128`,
5874
+ `# Option 2: build llama.cpp from source with curl support
5875
+ git clone https://github.com/ggerganov/llama.cpp.git
5876
+ cd llama.cpp
5877
+ LLAMA_CURL=1 make
5878
+
5879
+ # Load and run the model
5880
+ ./main \\
5881
+ --hf-repo "${model.id}" \\
5882
+ -m file.gguf \\
5883
+ -p "I believe the meaning of life is" \\
5879
5884
  -n 128`
5880
5885
  ];
5881
5886
  };
package/dist/index.js CHANGED
@@ -5824,20 +5824,25 @@ function isGgufModel(model) {
5824
5824
  }
5825
5825
  var snippetLlamacpp = (model) => {
5826
5826
  return [
5827
- `## Install llama.cpp via brew
5827
+ `# Option 1: use llama.cpp with brew
5828
5828
  brew install llama.cpp
5829
5829
 
5830
- ## or from source with curl support
5831
- ## see llama.cpp README for compilation flags to optimize for your hardware
5832
- git clone https://github.com/ggerganov/llama.cpp
5833
- cd llama.cpp
5834
- LLAMA_CURL=1 make
5835
- `,
5836
- `## Load and run the model
5830
+ # Load and run the model
5837
5831
  llama \\
5838
5832
  --hf-repo "${model.id}" \\
5839
5833
  --hf-file file.gguf \\
5840
5834
  -p "I believe the meaning of life is" \\
5835
+ -n 128`,
5836
+ `# Option 2: build llama.cpp from source with curl support
5837
+ git clone https://github.com/ggerganov/llama.cpp.git
5838
+ cd llama.cpp
5839
+ LLAMA_CURL=1 make
5840
+
5841
+ # Load and run the model
5842
+ ./main \\
5843
+ --hf-repo "${model.id}" \\
5844
+ -m file.gguf \\
5845
+ -p "I believe the meaning of life is" \\
5841
5846
  -n 128`
5842
5847
  ];
5843
5848
  };
@@ -1 +1 @@
1
- {"version":3,"file":"local-apps.d.ts","sourceRoot":"","sources":["../../src/local-apps.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,cAAc,CAAC;AAC9C,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,aAAa,CAAC;AAEhD;;GAEG;AACH,MAAM,MAAM,QAAQ,GAAG;IACtB;;OAEG;IACH,WAAW,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,OAAO,EAAE,MAAM,CAAC;IAChB;;OAEG;IACH,QAAQ,EAAE,YAAY,CAAC;IACvB;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IAEpB,UAAU,CAAC,EAAE,OAAO,CAAC;IACrB;;OAEG;IACH,kBAAkB,EAAE,CAAC,KAAK,EAAE,SAAS,KAAK,OAAO,CAAC;CAClD,GAAG,CACD;IACA;;OAEG;IACH,QAAQ,EAAE,CAAC,KAAK,EAAE,SAAS,KAAK,GAAG,CAAC;CACnC,GACD;IACA;;OAEG;IACH,OAAO,EAAE,CAAC,KAAK,EAAE,SAAS,KAAK,MAAM,GAAG,MAAM,EAAE,CAAC;CAChD,CACH,CAAC;AAEF,iBAAS,WAAW,CAAC,KAAK,EAAE,SAAS,WAEpC;AAsBD;;;;;;;;;;GAUG;AACH,eAAO,MAAM,UAAU;;;;;;yBA/BS,SAAS,KAAG,MAAM,EAAE;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CA2FhB,CAAC;AAErC,MAAM,MAAM,WAAW,GAAG,MAAM,OAAO,UAAU,CAAC"}
1
+ {"version":3,"file":"local-apps.d.ts","sourceRoot":"","sources":["../../src/local-apps.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,cAAc,CAAC;AAC9C,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,aAAa,CAAC;AAEhD;;GAEG;AACH,MAAM,MAAM,QAAQ,GAAG;IACtB;;OAEG;IACH,WAAW,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,OAAO,EAAE,MAAM,CAAC;IAChB;;OAEG;IACH,QAAQ,EAAE,YAAY,CAAC;IACvB;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IAEpB,UAAU,CAAC,EAAE,OAAO,CAAC;IACrB;;OAEG;IACH,kBAAkB,EAAE,CAAC,KAAK,EAAE,SAAS,KAAK,OAAO,CAAC;CAClD,GAAG,CACD;IACA;;OAEG;IACH,QAAQ,EAAE,CAAC,KAAK,EAAE,SAAS,KAAK,GAAG,CAAC;CACnC,GACD;IACA;;OAEG;IACH,OAAO,EAAE,CAAC,KAAK,EAAE,SAAS,KAAK,MAAM,GAAG,MAAM,EAAE,CAAC;CAChD,CACH,CAAC;AAEF,iBAAS,WAAW,CAAC,KAAK,EAAE,SAAS,WAEpC;AA2BD;;;;;;;;;;GAUG;AACH,eAAO,MAAM,UAAU;;;;;;yBApCS,SAAS,KAAG,MAAM,EAAE;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CAgGhB,CAAC;AAErC,MAAM,MAAM,WAAW,GAAG,MAAM,OAAO,UAAU,CAAC"}
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@huggingface/tasks",
3
3
  "packageManager": "pnpm@8.10.5",
4
- "version": "0.10.10",
4
+ "version": "0.10.11",
5
5
  "description": "List of ML tasks for huggingface.co/tasks",
6
6
  "repository": "https://github.com/huggingface/huggingface.js.git",
7
7
  "publishConfig": {
package/src/local-apps.ts CHANGED
@@ -48,20 +48,25 @@ function isGgufModel(model: ModelData) {
48
48
 
49
49
  const snippetLlamacpp = (model: ModelData): string[] => {
50
50
  return [
51
- `## Install llama.cpp via brew
51
+ `# Option 1: use llama.cpp with brew
52
52
  brew install llama.cpp
53
53
 
54
- ## or from source with curl support
55
- ## see llama.cpp README for compilation flags to optimize for your hardware
56
- git clone https://github.com/ggerganov/llama.cpp
57
- cd llama.cpp
58
- LLAMA_CURL=1 make
59
- `,
60
- `## Load and run the model
54
+ # Load and run the model
61
55
  llama \\
62
56
  --hf-repo "${model.id}" \\
63
57
  --hf-file file.gguf \\
64
58
  -p "I believe the meaning of life is" \\
59
+ -n 128`,
60
+ `# Option 2: build llama.cpp from source with curl support
61
+ git clone https://github.com/ggerganov/llama.cpp.git
62
+ cd llama.cpp
63
+ LLAMA_CURL=1 make
64
+
65
+ # Load and run the model
66
+ ./main \\
67
+ --hf-repo "${model.id}" \\
68
+ -m file.gguf \\
69
+ -p "I believe the meaning of life is" \\
65
70
  -n 128`,
66
71
  ];
67
72
  };