@huggingface/tasks 0.12.15 → 0.12.17

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -7140,9 +7140,6 @@ var SKUS = {
7140
7140
  };
7141
7141
 
7142
7142
  // src/local-apps.ts
7143
- function isGgufModel(model) {
7144
- return model.tags.includes("gguf");
7145
- }
7146
7143
  function isAwqModel(model) {
7147
7144
  return model.config?.quantization_config?.quant_method === "awq";
7148
7145
  }
@@ -7219,12 +7216,11 @@ var snippetLocalAI = (model, filepath) => {
7219
7216
  };
7220
7217
  var snippetVllm = (model) => {
7221
7218
  const runCommand = [
7222
- "",
7223
7219
  "# Call the server using curl:",
7224
7220
  `curl -X POST "http://localhost:8000/v1/chat/completions" \\ `,
7225
7221
  ` -H "Content-Type: application/json" \\ `,
7226
7222
  ` --data '{`,
7227
- ` "model": "${model.id}"`,
7223
+ ` "model": "${model.id}",`,
7228
7224
  ` "messages": [`,
7229
7225
  ` {"role": "user", "content": "Hello!"}`,
7230
7226
  ` ]`,
@@ -7234,7 +7230,8 @@ var snippetVllm = (model) => {
7234
7230
  {
7235
7231
  title: "Install from pip",
7236
7232
  setup: ["# Install vLLM from pip:", "pip install vllm"].join("\n"),
7237
- content: ["# Load and run the model:", `vllm serve "${model.id}"`, ...runCommand].join("\n")
7233
+ content: [`# Load and run the model:
7234
+ vllm serve "${model.id}"`, runCommand.join("\n")]
7238
7235
  },
7239
7236
  {
7240
7237
  title: "Use Docker images",
@@ -7250,10 +7247,10 @@ var snippetVllm = (model) => {
7250
7247
  ` --model ${model.id}`
7251
7248
  ].join("\n"),
7252
7249
  content: [
7253
- "# Load and run the model:",
7254
- `docker exec -it my_vllm_container bash -c "vllm serve ${model.id}"`,
7255
- ...runCommand
7256
- ].join("\n")
7250
+ `# Load and run the model:
7251
+ docker exec -it my_vllm_container bash -c "vllm serve ${model.id}"`,
7252
+ runCommand.join("\n")
7253
+ ]
7257
7254
  }
7258
7255
  ];
7259
7256
  };
@@ -7269,7 +7266,7 @@ var LOCAL_APPS = {
7269
7266
  prettyLabel: "vLLM",
7270
7267
  docsUrl: "https://docs.vllm.ai",
7271
7268
  mainTask: "text-generation",
7272
- displayOnModelPage: (model) => isAwqModel(model) || isGptqModel(model) || isAqlmModel(model) || isMarlinModel(model) || isGgufModel(model) || isTransformersModel(model),
7269
+ displayOnModelPage: (model) => isAwqModel(model) || isGptqModel(model) || isAqlmModel(model) || isMarlinModel(model) || isLlamaCppGgufModel(model) || isTransformersModel(model),
7273
7270
  snippet: snippetVllm
7274
7271
  },
7275
7272
  lmstudio: {
package/dist/index.js CHANGED
@@ -7102,9 +7102,6 @@ var SKUS = {
7102
7102
  };
7103
7103
 
7104
7104
  // src/local-apps.ts
7105
- function isGgufModel(model) {
7106
- return model.tags.includes("gguf");
7107
- }
7108
7105
  function isAwqModel(model) {
7109
7106
  return model.config?.quantization_config?.quant_method === "awq";
7110
7107
  }
@@ -7181,12 +7178,11 @@ var snippetLocalAI = (model, filepath) => {
7181
7178
  };
7182
7179
  var snippetVllm = (model) => {
7183
7180
  const runCommand = [
7184
- "",
7185
7181
  "# Call the server using curl:",
7186
7182
  `curl -X POST "http://localhost:8000/v1/chat/completions" \\ `,
7187
7183
  ` -H "Content-Type: application/json" \\ `,
7188
7184
  ` --data '{`,
7189
- ` "model": "${model.id}"`,
7185
+ ` "model": "${model.id}",`,
7190
7186
  ` "messages": [`,
7191
7187
  ` {"role": "user", "content": "Hello!"}`,
7192
7188
  ` ]`,
@@ -7196,7 +7192,8 @@ var snippetVllm = (model) => {
7196
7192
  {
7197
7193
  title: "Install from pip",
7198
7194
  setup: ["# Install vLLM from pip:", "pip install vllm"].join("\n"),
7199
- content: ["# Load and run the model:", `vllm serve "${model.id}"`, ...runCommand].join("\n")
7195
+ content: [`# Load and run the model:
7196
+ vllm serve "${model.id}"`, runCommand.join("\n")]
7200
7197
  },
7201
7198
  {
7202
7199
  title: "Use Docker images",
@@ -7212,10 +7209,10 @@ var snippetVllm = (model) => {
7212
7209
  ` --model ${model.id}`
7213
7210
  ].join("\n"),
7214
7211
  content: [
7215
- "# Load and run the model:",
7216
- `docker exec -it my_vllm_container bash -c "vllm serve ${model.id}"`,
7217
- ...runCommand
7218
- ].join("\n")
7212
+ `# Load and run the model:
7213
+ docker exec -it my_vllm_container bash -c "vllm serve ${model.id}"`,
7214
+ runCommand.join("\n")
7215
+ ]
7219
7216
  }
7220
7217
  ];
7221
7218
  };
@@ -7231,7 +7228,7 @@ var LOCAL_APPS = {
7231
7228
  prettyLabel: "vLLM",
7232
7229
  docsUrl: "https://docs.vllm.ai",
7233
7230
  mainTask: "text-generation",
7234
- displayOnModelPage: (model) => isAwqModel(model) || isGptqModel(model) || isAqlmModel(model) || isMarlinModel(model) || isGgufModel(model) || isTransformersModel(model),
7231
+ displayOnModelPage: (model) => isAwqModel(model) || isGptqModel(model) || isAqlmModel(model) || isMarlinModel(model) || isLlamaCppGgufModel(model) || isTransformersModel(model),
7235
7232
  snippet: snippetVllm
7236
7233
  },
7237
7234
  lmstudio: {
@@ -12,7 +12,7 @@ export interface LocalAppSnippet {
12
12
  /**
13
13
  * Content (or command) to be run
14
14
  */
15
- content: string;
15
+ content: string | string[];
16
16
  }
17
17
  /**
18
18
  * Elements configurable by a local app.
@@ -1 +1 @@
1
- {"version":3,"file":"local-apps.d.ts","sourceRoot":"","sources":["../../src/local-apps.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,cAAc,CAAC;AAC9C,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,aAAa,CAAC;AAEhD,MAAM,WAAW,eAAe;IAC/B;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd;;OAEG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf;;OAEG;IACH,OAAO,EAAE,MAAM,CAAC;CAChB;AAED;;GAEG;AACH,MAAM,MAAM,QAAQ,GAAG;IACtB;;OAEG;IACH,WAAW,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,OAAO,EAAE,MAAM,CAAC;IAChB;;OAEG;IACH,QAAQ,EAAE,YAAY,CAAC;IACvB;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IAEpB,UAAU,CAAC,EAAE,OAAO,CAAC;IACrB;;OAEG;IACH,kBAAkB,EAAE,CAAC,KAAK,EAAE,SAAS,KAAK,OAAO,CAAC;CAClD,GAAG,CACD;IACA;;OAEG;IACH,QAAQ,EAAE,CAAC,KAAK,EAAE,SAAS,EAAE,QAAQ,CAAC,EAAE,MAAM,KAAK,GAAG,CAAC;CACtD,GACD;IACA;;;OAGG;IACH,OAAO,EAAE,CAAC,KAAK,EAAE,SAAS,EAAE,QAAQ,CAAC,EAAE,MAAM,KAAK,MAAM,GAAG,MAAM,EAAE,GAAG,eAAe,GAAG,eAAe,EAAE,CAAC;CACzG,CACH,CAAC;AA0BF,iBAAS,mBAAmB,CAAC,KAAK,EAAE,SAAS,WAE5C;AAuGD;;;;;;;;;;GAUG;AACH,eAAO,MAAM,UAAU;;;;;;yBAhHS,SAAS,aAAa,MAAM,KAAG,eAAe,EAAE;;;;;;oCA4HlD,SAAS;yBAhEX,SAAS,KAAG,eAAe,EAAE;;;;;;;;;;;;;;yBAvB1B,SAAS,aAAa,MAAM,KAAG,eAAe,EAAE;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CA6M3C,CAAC;AAErC,MAAM,MAAM,WAAW,GAAG,MAAM,OAAO,UAAU,CAAC"}
1
+ {"version":3,"file":"local-apps.d.ts","sourceRoot":"","sources":["../../src/local-apps.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,cAAc,CAAC;AAC9C,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,aAAa,CAAC;AAEhD,MAAM,WAAW,eAAe;IAC/B;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd;;OAEG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf;;OAEG;IACH,OAAO,EAAE,MAAM,GAAG,MAAM,EAAE,CAAC;CAC3B;AAED;;GAEG;AACH,MAAM,MAAM,QAAQ,GAAG;IACtB;;OAEG;IACH,WAAW,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,OAAO,EAAE,MAAM,CAAC;IAChB;;OAEG;IACH,QAAQ,EAAE,YAAY,CAAC;IACvB;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IAEpB,UAAU,CAAC,EAAE,OAAO,CAAC;IACrB;;OAEG;IACH,kBAAkB,EAAE,CAAC,KAAK,EAAE,SAAS,KAAK,OAAO,CAAC;CAClD,GAAG,CACD;IACA;;OAEG;IACH,QAAQ,EAAE,CAAC,KAAK,EAAE,SAAS,EAAE,QAAQ,CAAC,EAAE,MAAM,KAAK,GAAG,CAAC;CACtD,GACD;IACA;;;OAGG;IACH,OAAO,EAAE,CAAC,KAAK,EAAE,SAAS,EAAE,QAAQ,CAAC,EAAE,MAAM,KAAK,MAAM,GAAG,MAAM,EAAE,GAAG,eAAe,GAAG,eAAe,EAAE,CAAC;CACzG,CACH,CAAC;AAsBF,iBAAS,mBAAmB,CAAC,KAAK,EAAE,SAAS,WAE5C;AAqGD;;;;;;;;;;GAUG;AACH,eAAO,MAAM,UAAU;;;;;;yBA9GS,SAAS,aAAa,MAAM,KAAG,eAAe,EAAE;;;;;;oCA0HlD,SAAS;yBA9DX,SAAS,KAAG,eAAe,EAAE;;;;;;;;;;;;;;yBAvB1B,SAAS,aAAa,MAAM,KAAG,eAAe,EAAE;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CA2M3C,CAAC;AAErC,MAAM,MAAM,WAAW,GAAG,MAAM,OAAO,UAAU,CAAC"}
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@huggingface/tasks",
3
3
  "packageManager": "pnpm@8.10.5",
4
- "version": "0.12.15",
4
+ "version": "0.12.17",
5
5
  "description": "List of ML tasks for huggingface.co/tasks",
6
6
  "repository": "https://github.com/huggingface/huggingface.js.git",
7
7
  "publishConfig": {
package/src/local-apps.ts CHANGED
@@ -13,7 +13,7 @@ export interface LocalAppSnippet {
13
13
  /**
14
14
  * Content (or command) to be run
15
15
  */
16
- content: string;
16
+ content: string | string[];
17
17
  }
18
18
 
19
19
  /**
@@ -58,10 +58,6 @@ export type LocalApp = {
58
58
  }
59
59
  );
60
60
 
61
- function isGgufModel(model: ModelData): boolean {
62
- return model.tags.includes("gguf");
63
- }
64
-
65
61
  function isAwqModel(model: ModelData): boolean {
66
62
  return model.config?.quantization_config?.quant_method === "awq";
67
63
  }
@@ -148,12 +144,11 @@ const snippetLocalAI = (model: ModelData, filepath?: string): LocalAppSnippet[]
148
144
 
149
145
  const snippetVllm = (model: ModelData): LocalAppSnippet[] => {
150
146
  const runCommand = [
151
- "",
152
147
  "# Call the server using curl:",
153
148
  `curl -X POST "http://localhost:8000/v1/chat/completions" \\ `,
154
149
  ` -H "Content-Type: application/json" \\ `,
155
150
  ` --data '{`,
156
- ` "model": "${model.id}"`,
151
+ ` "model": "${model.id}",`,
157
152
  ` "messages": [`,
158
153
  ` {"role": "user", "content": "Hello!"}`,
159
154
  ` ]`,
@@ -163,7 +158,7 @@ const snippetVllm = (model: ModelData): LocalAppSnippet[] => {
163
158
  {
164
159
  title: "Install from pip",
165
160
  setup: ["# Install vLLM from pip:", "pip install vllm"].join("\n"),
166
- content: ["# Load and run the model:", `vllm serve "${model.id}"`, ...runCommand].join("\n"),
161
+ content: [`# Load and run the model:\nvllm serve "${model.id}"`, runCommand.join("\n")],
167
162
  },
168
163
  {
169
164
  title: "Use Docker images",
@@ -179,10 +174,9 @@ const snippetVllm = (model: ModelData): LocalAppSnippet[] => {
179
174
  ` --model ${model.id}`,
180
175
  ].join("\n"),
181
176
  content: [
182
- "# Load and run the model:",
183
- `docker exec -it my_vllm_container bash -c "vllm serve ${model.id}"`,
184
- ...runCommand,
185
- ].join("\n"),
177
+ `# Load and run the model:\ndocker exec -it my_vllm_container bash -c "vllm serve ${model.id}"`,
178
+ runCommand.join("\n"),
179
+ ],
186
180
  },
187
181
  ];
188
182
  };
@@ -215,7 +209,7 @@ export const LOCAL_APPS = {
215
209
  isGptqModel(model) ||
216
210
  isAqlmModel(model) ||
217
211
  isMarlinModel(model) ||
218
- isGgufModel(model) ||
212
+ isLlamaCppGgufModel(model) ||
219
213
  isTransformersModel(model),
220
214
  snippet: snippetVllm,
221
215
  },