@huggingface/tasks 0.12.15 → 0.12.16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -7140,9 +7140,6 @@ var SKUS = {
7140
7140
  };
7141
7141
 
7142
7142
  // src/local-apps.ts
7143
- function isGgufModel(model) {
7144
- return model.tags.includes("gguf");
7145
- }
7146
7143
  function isAwqModel(model) {
7147
7144
  return model.config?.quantization_config?.quant_method === "awq";
7148
7145
  }
@@ -7224,7 +7221,7 @@ var snippetVllm = (model) => {
7224
7221
  `curl -X POST "http://localhost:8000/v1/chat/completions" \\ `,
7225
7222
  ` -H "Content-Type: application/json" \\ `,
7226
7223
  ` --data '{`,
7227
- ` "model": "${model.id}"`,
7224
+ ` "model": "${model.id}",`,
7228
7225
  ` "messages": [`,
7229
7226
  ` {"role": "user", "content": "Hello!"}`,
7230
7227
  ` ]`,
@@ -7234,7 +7231,8 @@ var snippetVllm = (model) => {
7234
7231
  {
7235
7232
  title: "Install from pip",
7236
7233
  setup: ["# Install vLLM from pip:", "pip install vllm"].join("\n"),
7237
- content: ["# Load and run the model:", `vllm serve "${model.id}"`, ...runCommand].join("\n")
7234
+ content: [`# Load and run the model:
7235
+ vllm serve "${model.id}"`, runCommand.join("\n")]
7238
7236
  },
7239
7237
  {
7240
7238
  title: "Use Docker images",
@@ -7250,10 +7248,10 @@ var snippetVllm = (model) => {
7250
7248
  ` --model ${model.id}`
7251
7249
  ].join("\n"),
7252
7250
  content: [
7253
- "# Load and run the model:",
7254
- `docker exec -it my_vllm_container bash -c "vllm serve ${model.id}"`,
7255
- ...runCommand
7256
- ].join("\n")
7251
+ `# Load and run the model:
7252
+ docker exec -it my_vllm_container bash -c "vllm serve ${model.id}"`,
7253
+ runCommand.join("\n")
7254
+ ]
7257
7255
  }
7258
7256
  ];
7259
7257
  };
@@ -7269,7 +7267,7 @@ var LOCAL_APPS = {
7269
7267
  prettyLabel: "vLLM",
7270
7268
  docsUrl: "https://docs.vllm.ai",
7271
7269
  mainTask: "text-generation",
7272
- displayOnModelPage: (model) => isAwqModel(model) || isGptqModel(model) || isAqlmModel(model) || isMarlinModel(model) || isGgufModel(model) || isTransformersModel(model),
7270
+ displayOnModelPage: (model) => isAwqModel(model) || isGptqModel(model) || isAqlmModel(model) || isMarlinModel(model) || isLlamaCppGgufModel(model) || isTransformersModel(model),
7273
7271
  snippet: snippetVllm
7274
7272
  },
7275
7273
  lmstudio: {
package/dist/index.js CHANGED
@@ -7102,9 +7102,6 @@ var SKUS = {
7102
7102
  };
7103
7103
 
7104
7104
  // src/local-apps.ts
7105
- function isGgufModel(model) {
7106
- return model.tags.includes("gguf");
7107
- }
7108
7105
  function isAwqModel(model) {
7109
7106
  return model.config?.quantization_config?.quant_method === "awq";
7110
7107
  }
@@ -7186,7 +7183,7 @@ var snippetVllm = (model) => {
7186
7183
  `curl -X POST "http://localhost:8000/v1/chat/completions" \\ `,
7187
7184
  ` -H "Content-Type: application/json" \\ `,
7188
7185
  ` --data '{`,
7189
- ` "model": "${model.id}"`,
7186
+ ` "model": "${model.id}",`,
7190
7187
  ` "messages": [`,
7191
7188
  ` {"role": "user", "content": "Hello!"}`,
7192
7189
  ` ]`,
@@ -7196,7 +7193,8 @@ var snippetVllm = (model) => {
7196
7193
  {
7197
7194
  title: "Install from pip",
7198
7195
  setup: ["# Install vLLM from pip:", "pip install vllm"].join("\n"),
7199
- content: ["# Load and run the model:", `vllm serve "${model.id}"`, ...runCommand].join("\n")
7196
+ content: [`# Load and run the model:
7197
+ vllm serve "${model.id}"`, runCommand.join("\n")]
7200
7198
  },
7201
7199
  {
7202
7200
  title: "Use Docker images",
@@ -7212,10 +7210,10 @@ var snippetVllm = (model) => {
7212
7210
  ` --model ${model.id}`
7213
7211
  ].join("\n"),
7214
7212
  content: [
7215
- "# Load and run the model:",
7216
- `docker exec -it my_vllm_container bash -c "vllm serve ${model.id}"`,
7217
- ...runCommand
7218
- ].join("\n")
7213
+ `# Load and run the model:
7214
+ docker exec -it my_vllm_container bash -c "vllm serve ${model.id}"`,
7215
+ runCommand.join("\n")
7216
+ ]
7219
7217
  }
7220
7218
  ];
7221
7219
  };
@@ -7231,7 +7229,7 @@ var LOCAL_APPS = {
7231
7229
  prettyLabel: "vLLM",
7232
7230
  docsUrl: "https://docs.vllm.ai",
7233
7231
  mainTask: "text-generation",
7234
- displayOnModelPage: (model) => isAwqModel(model) || isGptqModel(model) || isAqlmModel(model) || isMarlinModel(model) || isGgufModel(model) || isTransformersModel(model),
7232
+ displayOnModelPage: (model) => isAwqModel(model) || isGptqModel(model) || isAqlmModel(model) || isMarlinModel(model) || isLlamaCppGgufModel(model) || isTransformersModel(model),
7235
7233
  snippet: snippetVllm
7236
7234
  },
7237
7235
  lmstudio: {
@@ -12,7 +12,7 @@ export interface LocalAppSnippet {
12
12
  /**
13
13
  * Content (or command) to be run
14
14
  */
15
- content: string;
15
+ content: string | string[];
16
16
  }
17
17
  /**
18
18
  * Elements configurable by a local app.
@@ -1 +1 @@
1
- {"version":3,"file":"local-apps.d.ts","sourceRoot":"","sources":["../../src/local-apps.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,cAAc,CAAC;AAC9C,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,aAAa,CAAC;AAEhD,MAAM,WAAW,eAAe;IAC/B;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd;;OAEG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf;;OAEG;IACH,OAAO,EAAE,MAAM,CAAC;CAChB;AAED;;GAEG;AACH,MAAM,MAAM,QAAQ,GAAG;IACtB;;OAEG;IACH,WAAW,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,OAAO,EAAE,MAAM,CAAC;IAChB;;OAEG;IACH,QAAQ,EAAE,YAAY,CAAC;IACvB;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IAEpB,UAAU,CAAC,EAAE,OAAO,CAAC;IACrB;;OAEG;IACH,kBAAkB,EAAE,CAAC,KAAK,EAAE,SAAS,KAAK,OAAO,CAAC;CAClD,GAAG,CACD;IACA;;OAEG;IACH,QAAQ,EAAE,CAAC,KAAK,EAAE,SAAS,EAAE,QAAQ,CAAC,EAAE,MAAM,KAAK,GAAG,CAAC;CACtD,GACD;IACA;;;OAGG;IACH,OAAO,EAAE,CAAC,KAAK,EAAE,SAAS,EAAE,QAAQ,CAAC,EAAE,MAAM,KAAK,MAAM,GAAG,MAAM,EAAE,GAAG,eAAe,GAAG,eAAe,EAAE,CAAC;CACzG,CACH,CAAC;AA0BF,iBAAS,mBAAmB,CAAC,KAAK,EAAE,SAAS,WAE5C;AAuGD;;;;;;;;;;GAUG;AACH,eAAO,MAAM,UAAU;;;;;;yBAhHS,SAAS,aAAa,MAAM,KAAG,eAAe,EAAE;;;;;;oCA4HlD,SAAS;yBAhEX,SAAS,KAAG,eAAe,EAAE;;;;;;;;;;;;;;yBAvB1B,SAAS,aAAa,MAAM,KAAG,eAAe,EAAE;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CA6M3C,CAAC;AAErC,MAAM,MAAM,WAAW,GAAG,MAAM,OAAO,UAAU,CAAC"}
1
+ {"version":3,"file":"local-apps.d.ts","sourceRoot":"","sources":["../../src/local-apps.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,cAAc,CAAC;AAC9C,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,aAAa,CAAC;AAEhD,MAAM,WAAW,eAAe;IAC/B;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd;;OAEG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf;;OAEG;IACH,OAAO,EAAE,MAAM,GAAG,MAAM,EAAE,CAAC;CAC3B;AAED;;GAEG;AACH,MAAM,MAAM,QAAQ,GAAG;IACtB;;OAEG;IACH,WAAW,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,OAAO,EAAE,MAAM,CAAC;IAChB;;OAEG;IACH,QAAQ,EAAE,YAAY,CAAC;IACvB;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IAEpB,UAAU,CAAC,EAAE,OAAO,CAAC;IACrB;;OAEG;IACH,kBAAkB,EAAE,CAAC,KAAK,EAAE,SAAS,KAAK,OAAO,CAAC;CAClD,GAAG,CACD;IACA;;OAEG;IACH,QAAQ,EAAE,CAAC,KAAK,EAAE,SAAS,EAAE,QAAQ,CAAC,EAAE,MAAM,KAAK,GAAG,CAAC;CACtD,GACD;IACA;;;OAGG;IACH,OAAO,EAAE,CAAC,KAAK,EAAE,SAAS,EAAE,QAAQ,CAAC,EAAE,MAAM,KAAK,MAAM,GAAG,MAAM,EAAE,GAAG,eAAe,GAAG,eAAe,EAAE,CAAC;CACzG,CACH,CAAC;AAsBF,iBAAS,mBAAmB,CAAC,KAAK,EAAE,SAAS,WAE5C;AAsGD;;;;;;;;;;GAUG;AACH,eAAO,MAAM,UAAU;;;;;;yBA/GS,SAAS,aAAa,MAAM,KAAG,eAAe,EAAE;;;;;;oCA2HlD,SAAS;yBA/DX,SAAS,KAAG,eAAe,EAAE;;;;;;;;;;;;;;yBAvB1B,SAAS,aAAa,MAAM,KAAG,eAAe,EAAE;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CA4M3C,CAAC;AAErC,MAAM,MAAM,WAAW,GAAG,MAAM,OAAO,UAAU,CAAC"}
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@huggingface/tasks",
3
3
  "packageManager": "pnpm@8.10.5",
4
- "version": "0.12.15",
4
+ "version": "0.12.16",
5
5
  "description": "List of ML tasks for huggingface.co/tasks",
6
6
  "repository": "https://github.com/huggingface/huggingface.js.git",
7
7
  "publishConfig": {
package/src/local-apps.ts CHANGED
@@ -13,7 +13,7 @@ export interface LocalAppSnippet {
13
13
  /**
14
14
  * Content (or command) to be run
15
15
  */
16
- content: string;
16
+ content: string | string[];
17
17
  }
18
18
 
19
19
  /**
@@ -58,10 +58,6 @@ export type LocalApp = {
58
58
  }
59
59
  );
60
60
 
61
- function isGgufModel(model: ModelData): boolean {
62
- return model.tags.includes("gguf");
63
- }
64
-
65
61
  function isAwqModel(model: ModelData): boolean {
66
62
  return model.config?.quantization_config?.quant_method === "awq";
67
63
  }
@@ -153,7 +149,7 @@ const snippetVllm = (model: ModelData): LocalAppSnippet[] => {
153
149
  `curl -X POST "http://localhost:8000/v1/chat/completions" \\ `,
154
150
  ` -H "Content-Type: application/json" \\ `,
155
151
  ` --data '{`,
156
- ` "model": "${model.id}"`,
152
+ ` "model": "${model.id}",`,
157
153
  ` "messages": [`,
158
154
  ` {"role": "user", "content": "Hello!"}`,
159
155
  ` ]`,
@@ -163,7 +159,7 @@ const snippetVllm = (model: ModelData): LocalAppSnippet[] => {
163
159
  {
164
160
  title: "Install from pip",
165
161
  setup: ["# Install vLLM from pip:", "pip install vllm"].join("\n"),
166
- content: ["# Load and run the model:", `vllm serve "${model.id}"`, ...runCommand].join("\n"),
162
+ content: [`# Load and run the model:\nvllm serve "${model.id}"`, runCommand.join("\n")],
167
163
  },
168
164
  {
169
165
  title: "Use Docker images",
@@ -179,10 +175,9 @@ const snippetVllm = (model: ModelData): LocalAppSnippet[] => {
179
175
  ` --model ${model.id}`,
180
176
  ].join("\n"),
181
177
  content: [
182
- "# Load and run the model:",
183
- `docker exec -it my_vllm_container bash -c "vllm serve ${model.id}"`,
184
- ...runCommand,
185
- ].join("\n"),
178
+ `# Load and run the model:\ndocker exec -it my_vllm_container bash -c "vllm serve ${model.id}"`,
179
+ runCommand.join("\n"),
180
+ ],
186
181
  },
187
182
  ];
188
183
  };
@@ -215,7 +210,7 @@ export const LOCAL_APPS = {
215
210
  isGptqModel(model) ||
216
211
  isAqlmModel(model) ||
217
212
  isMarlinModel(model) ||
218
- isGgufModel(model) ||
213
+ isLlamaCppGgufModel(model) ||
219
214
  isTransformersModel(model),
220
215
  snippet: snippetVllm,
221
216
  },