vec-inf 0.6.0__py3-none-any.whl → 0.6.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
vec_inf/cli/_cli.py CHANGED
@@ -18,6 +18,7 @@ metrics
18
18
  Stream real-time performance metrics
19
19
  """
20
20
 
21
+ import json
21
22
  import time
22
23
  from typing import Optional, Union
23
24
 
@@ -72,6 +73,21 @@ def cli() -> None:
72
73
  type=str,
73
74
  help="Quality of service",
74
75
  )
76
+ @click.option(
77
+ "--exclude",
78
+ type=str,
79
+ help="Exclude certain nodes from the resources granted to the job",
80
+ )
81
+ @click.option(
82
+ "--node-list",
83
+ type=str,
84
+ help="Request a specific list of nodes for deployment",
85
+ )
86
+ @click.option(
87
+ "--bind",
88
+ type=str,
89
+ help="Additional binds for the singularity container as a comma separated list of bind paths",
90
+ )
75
91
  @click.option(
76
92
  "--time",
77
93
  type=str,
@@ -124,8 +140,16 @@ def launch(
124
140
  Number of nodes to use
125
141
  - gpus_per_node : int, optional
126
142
  Number of GPUs per node
143
+ - account : str, optional
144
+ Charge resources used by this job to specified account
127
145
  - qos : str, optional
128
146
  Quality of service tier
147
+ - exclude : str, optional
148
+ Exclude certain nodes from the resources granted to the job
149
+ - node_list : str, optional
150
+ Request a specific list of nodes for deployment
151
+ - bind : str, optional
152
+ Additional binds for the singularity container
129
153
  - time : str, optional
130
154
  Time limit for job
131
155
  - venv : str, optional
@@ -157,8 +181,9 @@ def launch(
157
181
 
158
182
  # Display launch information
159
183
  launch_formatter = LaunchResponseFormatter(model_name, launch_response.config)
184
+
160
185
  if json_mode:
161
- click.echo(launch_response.config)
186
+ click.echo(json.dumps(launch_response.config))
162
187
  else:
163
188
  launch_info_table = launch_formatter.format_table_output()
164
189
  CONSOLE.print(launch_info_table)
@@ -21,7 +21,12 @@ SLURM_JOB_CONFIG_ARGS : dict
21
21
  from pathlib import Path
22
22
  from typing import TypedDict
23
23
 
24
- from vec_inf.client.slurm_vars import SINGULARITY_LOAD_CMD
24
+ from vec_inf.client.slurm_vars import (
25
+ LD_LIBRARY_PATH,
26
+ SINGULARITY_IMAGE,
27
+ SINGULARITY_LOAD_CMD,
28
+ VLLM_NCCL_SO_PATH,
29
+ )
25
30
 
26
31
 
27
32
  MODEL_READY_SIGNATURE = "INFO: Application startup complete."
@@ -60,6 +65,8 @@ SLURM_JOB_CONFIG_ARGS = {
60
65
  "qos": "qos",
61
66
  "time": "time",
62
67
  "nodes": "num_nodes",
68
+ "exclude": "exclude",
69
+ "nodelist": "node_list",
63
70
  "gpus-per-node": "gpus_per_node",
64
71
  "cpus-per-task": "cpus_per_task",
65
72
  "mem": "mem_per_node",
@@ -71,7 +78,12 @@ SLURM_JOB_CONFIG_ARGS = {
71
78
  VLLM_SHORT_TO_LONG_MAP = {
72
79
  "-tp": "--tensor-parallel-size",
73
80
  "-pp": "--pipeline-parallel-size",
81
+ "-dp": "--data-parallel-size",
82
+ "-dpl": "--data-parallel-size-local",
83
+ "-dpa": "--data-parallel-address",
84
+ "-dpp": "--data-parallel-rpc-port",
74
85
  "-O": "--compilation-config",
86
+ "-q": "--quantization",
75
87
  }
76
88
 
77
89
 
@@ -117,6 +129,8 @@ class SlurmScriptTemplate(TypedDict):
117
129
  Commands for Singularity container setup
118
130
  imports : str
119
131
  Import statements and source commands
132
+ env_vars : list[str]
133
+ Environment variables to set
120
134
  singularity_command : str
121
135
  Template for Singularity execution command
122
136
  activate_venv : str
@@ -134,6 +148,7 @@ class SlurmScriptTemplate(TypedDict):
134
148
  shebang: ShebangConfig
135
149
  singularity_setup: list[str]
136
150
  imports: str
151
+ env_vars: list[str]
137
152
  singularity_command: str
138
153
  activate_venv: str
139
154
  server_setup: ServerSetupConfig
@@ -152,10 +167,14 @@ SLURM_SCRIPT_TEMPLATE: SlurmScriptTemplate = {
152
167
  },
153
168
  "singularity_setup": [
154
169
  SINGULARITY_LOAD_CMD,
155
- "singularity exec {singularity_image} ray stop",
170
+ f"singularity exec {SINGULARITY_IMAGE} ray stop",
156
171
  ],
157
172
  "imports": "source {src_dir}/find_port.sh",
158
- "singularity_command": "singularity exec --nv --bind {model_weights_path}:{model_weights_path} --containall {singularity_image}",
173
+ "env_vars": [
174
+ f"export LD_LIBRARY_PATH={LD_LIBRARY_PATH}",
175
+ f"export VLLM_NCCL_SO_PATH={VLLM_NCCL_SO_PATH}",
176
+ ],
177
+ "singularity_command": f"singularity exec --nv --bind {{model_weights_path}}{{additional_binds}} --containall {SINGULARITY_IMAGE}",
159
178
  "activate_venv": "source {venv}/bin/activate",
160
179
  "server_setup": {
161
180
  "single_node": [
@@ -203,8 +222,7 @@ SLURM_SCRIPT_TEMPLATE: SlurmScriptTemplate = {
203
222
  ' && mv temp.json "$json_path"',
204
223
  ],
205
224
  "launch_cmd": [
206
- "python3.10 -m vllm.entrypoints.openai.api_server \\",
207
- " --model {model_weights_path} \\",
225
+ "vllm serve {model_weights_path} \\",
208
226
  " --served-model-name {model_name} \\",
209
227
  ' --host "0.0.0.0" \\',
210
228
  " --port $vllm_port_number \\",
vec_inf/client/_helper.py CHANGED
@@ -5,7 +5,6 @@ metrics collection, and model registry operations.
5
5
  """
6
6
 
7
7
  import json
8
- import os
9
8
  import time
10
9
  import warnings
11
10
  from pathlib import Path
@@ -36,10 +35,6 @@ from vec_inf.client.models import (
36
35
  ModelType,
37
36
  StatusResponse,
38
37
  )
39
- from vec_inf.client.slurm_vars import (
40
- LD_LIBRARY_PATH,
41
- VLLM_NCCL_SO_PATH,
42
- )
43
38
 
44
39
 
45
40
  class ModelLauncher:
@@ -230,11 +225,6 @@ class ModelLauncher:
230
225
 
231
226
  return params
232
227
 
233
- def _set_env_vars(self) -> None:
234
- """Set environment variables for the launch command."""
235
- os.environ["LD_LIBRARY_PATH"] = LD_LIBRARY_PATH
236
- os.environ["VLLM_NCCL_SO_PATH"] = VLLM_NCCL_SO_PATH
237
-
238
228
  def _build_launch_command(self) -> str:
239
229
  """Generate the slurm script and construct the launch command.
240
230
 
@@ -259,9 +249,6 @@ class ModelLauncher:
259
249
  SlurmJobError
260
250
  If SLURM job submission fails
261
251
  """
262
- # Set environment variables
263
- self._set_env_vars()
264
-
265
252
  # Build and execute the launch command
266
253
  command_output, stderr = utils.run_bash_command(self._build_launch_command())
267
254
 
@@ -12,7 +12,6 @@ from vec_inf.client._client_vars import (
12
12
  SLURM_JOB_CONFIG_ARGS,
13
13
  SLURM_SCRIPT_TEMPLATE,
14
14
  )
15
- from vec_inf.client.slurm_vars import SINGULARITY_IMAGE
16
15
 
17
16
 
18
17
  class SlurmScriptGenerator:
@@ -40,6 +39,9 @@ class SlurmScriptGenerator:
40
39
  self.params = params
41
40
  self.is_multinode = int(self.params["num_nodes"]) > 1
42
41
  self.use_singularity = self.params["venv"] == "singularity"
42
+ self.additional_binds = self.params.get("bind", "")
43
+ if self.additional_binds:
44
+ self.additional_binds = f" --bind {self.additional_binds}"
43
45
  self.model_weights_path = str(
44
46
  Path(params["model_weights_parent_dir"], params["model_name"])
45
47
  )
@@ -87,11 +89,8 @@ class SlurmScriptGenerator:
87
89
  """
88
90
  server_script = ["\n"]
89
91
  if self.use_singularity:
90
- server_script.append(
91
- "\n".join(SLURM_SCRIPT_TEMPLATE["singularity_setup"]).format(
92
- singularity_image=SINGULARITY_IMAGE,
93
- )
94
- )
92
+ server_script.append("\n".join(SLURM_SCRIPT_TEMPLATE["singularity_setup"]))
93
+ server_script.append("\n".join(SLURM_SCRIPT_TEMPLATE["env_vars"]))
95
94
  server_script.append(
96
95
  SLURM_SCRIPT_TEMPLATE["imports"].format(src_dir=self.params["src_dir"])
97
96
  )
@@ -104,7 +103,7 @@ class SlurmScriptGenerator:
104
103
  "SINGULARITY_PLACEHOLDER",
105
104
  SLURM_SCRIPT_TEMPLATE["singularity_command"].format(
106
105
  model_weights_path=self.model_weights_path,
107
- singularity_image=SINGULARITY_IMAGE,
106
+ additional_binds=self.additional_binds,
108
107
  ),
109
108
  )
110
109
  else:
@@ -136,7 +135,7 @@ class SlurmScriptGenerator:
136
135
  launcher_script.append(
137
136
  SLURM_SCRIPT_TEMPLATE["singularity_command"].format(
138
137
  model_weights_path=self.model_weights_path,
139
- singularity_image=SINGULARITY_IMAGE,
138
+ additional_binds=self.additional_binds,
140
139
  )
141
140
  + " \\"
142
141
  )
vec_inf/client/config.py CHANGED
@@ -108,6 +108,16 @@ class ModelConfig(BaseModel):
108
108
  partition: Union[PARTITION, str] = Field(
109
109
  default=cast(str, DEFAULT_ARGS["partition"]), description="GPU partition type"
110
110
  )
111
+ exclude: Optional[str] = Field(
112
+ default=None,
113
+ description="Exclude certain nodes from the resources granted to the job",
114
+ )
115
+ node_list: Optional[str] = Field(
116
+ default=None, description="Request a specific list of nodes for deployment"
117
+ )
118
+ bind: Optional[str] = Field(
119
+ default=None, description="Additional binds for the singularity container"
120
+ )
111
121
  venv: str = Field(
112
122
  default="singularity", description="Virtual environment/container system"
113
123
  )
vec_inf/client/models.py CHANGED
@@ -170,6 +170,12 @@ class LaunchOptions:
170
170
  Quality of Service level
171
171
  time : str, optional
172
172
  Time limit for the job
173
+ exclude : str, optional
174
+ Exclude certain nodes from the resources granted to the job
175
+ node_list : str, optional
176
+ Request a specific list of nodes for deployment
177
+ bind : str, optional
178
+ Additional binds for the singularity container
173
179
  vocab_size : int, optional
174
180
  Size of model vocabulary
175
181
  data_type : str, optional
@@ -191,6 +197,9 @@ class LaunchOptions:
191
197
  gpus_per_node: Optional[int] = None
192
198
  account: Optional[str] = None
193
199
  qos: Optional[str] = None
200
+ exclude: Optional[str] = None
201
+ node_list: Optional[str] = None
202
+ bind: Optional[str] = None
194
203
  time: Optional[str] = None
195
204
  vocab_size: Optional[int] = None
196
205
  data_type: Optional[str] = None
@@ -14,7 +14,6 @@ models:
14
14
  --tensor-parallel-size: 4
15
15
  --max-model-len: 8192
16
16
  --max-num-seqs: 256
17
- --compilation-config: 3
18
17
  c4ai-command-r-plus-08-2024:
19
18
  model_family: c4ai-command-r
20
19
  model_variant: plus-08-2024
@@ -30,7 +29,6 @@ models:
30
29
  --tensor-parallel-size: 4
31
30
  --max-model-len: 65536
32
31
  --max-num-seqs: 256
33
- --compilation-config: 3
34
32
  c4ai-command-r-08-2024:
35
33
  model_family: c4ai-command-r
36
34
  model_variant: 08-2024
@@ -494,7 +492,6 @@ models:
494
492
  --tensor-parallel-size: 4
495
493
  --max-model-len: 16384
496
494
  --max-num-seqs: 256
497
- --compilation-config: 3
498
495
  Mistral-7B-Instruct-v0.1:
499
496
  model_family: Mistral
500
497
  model_variant: 7B-Instruct-v0.1
@@ -566,7 +563,6 @@ models:
566
563
  --tensor-parallel-size: 4
567
564
  --max-model-len: 32768
568
565
  --max-num-seqs: 256
569
- --compilation-config: 3
570
566
  Mistral-Large-Instruct-2411:
571
567
  model_family: Mistral
572
568
  model_variant: Large-Instruct-2411
@@ -582,7 +578,6 @@ models:
582
578
  --tensor-parallel-size: 4
583
579
  --max-model-len: 32768
584
580
  --max-num-seqs: 256
585
- --compilation-config: 3
586
581
  Mixtral-8x7B-Instruct-v0.1:
587
582
  model_family: Mixtral
588
583
  model_variant: 8x7B-Instruct-v0.1
@@ -613,7 +608,6 @@ models:
613
608
  --tensor-parallel-size: 4
614
609
  --max-model-len: 65536
615
610
  --max-num-seqs: 256
616
- --compilation-config: 3
617
611
  Mixtral-8x22B-Instruct-v0.1:
618
612
  model_family: Mixtral
619
613
  model_variant: 8x22B-Instruct-v0.1
@@ -629,7 +623,6 @@ models:
629
623
  --tensor-parallel-size: 4
630
624
  --max-model-len: 65536
631
625
  --max-num-seqs: 256
632
- --compilation-config: 3
633
626
  Phi-3-medium-128k-instruct:
634
627
  model_family: Phi-3
635
628
  model_variant: medium-128k-instruct
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: vec-inf
3
- Version: 0.6.0
3
+ Version: 0.6.1
4
4
  Summary: Efficient LLM inference on Slurm clusters using vLLM.
5
5
  Author-email: Marshall Wang <marshall.wang@vectorinstitute.ai>
6
6
  License-Expression: MIT
@@ -29,6 +29,7 @@ Description-Content-Type: text/markdown
29
29
  [![code checks](https://github.com/VectorInstitute/vector-inference/actions/workflows/code_checks.yml/badge.svg)](https://github.com/VectorInstitute/vector-inference/actions/workflows/code_checks.yml)
30
30
  [![docs](https://github.com/VectorInstitute/vector-inference/actions/workflows/docs.yml/badge.svg)](https://github.com/VectorInstitute/vector-inference/actions/workflows/docs.yml)
31
31
  [![codecov](https://codecov.io/github/VectorInstitute/vector-inference/branch/main/graph/badge.svg?token=NI88QSIGAC)](https://app.codecov.io/github/VectorInstitute/vector-inference/tree/main)
32
+ [![vLLM](https://img.shields.io/badge/vllm-0.8.5.post1-blue)](https://docs.vllm.ai/en/v0.8.5.post1/index.html)
32
33
  ![GitHub License](https://img.shields.io/github/license/VectorInstitute/vector-inference)
33
34
 
34
35
  This repository provides an easy-to-use solution to run inference servers on [Slurm](https://slurm.schedmd.com/overview.html)-managed computing clusters using [vLLM](https://docs.vllm.ai/en/latest/). **All scripts in this repository runs natively on the Vector Institute cluster environment**. To adapt to other environments, update the environment variables in [`vec_inf/client/slurm_vars.py`](vec_inf/client/slurm_vars.py), and the model config for cached model weights in [`vec_inf/config/models.yaml`](vec_inf/config/models.yaml) accordingly.
@@ -39,7 +40,7 @@ If you are using the Vector cluster environment, and you don't need any customiz
39
40
  ```bash
40
41
  pip install vec-inf
41
42
  ```
42
- Otherwise, we recommend using the provided [`Dockerfile`](Dockerfile) to set up your own environment with the package
43
+ Otherwise, we recommend using the provided [`Dockerfile`](Dockerfile) to set up your own environment with the package. The latest image has `vLLM` version `0.8.5.post1`.
43
44
 
44
45
  ## Usage
45
46
 
@@ -107,7 +108,7 @@ models:
107
108
  vllm_args:
108
109
  --max-model-len: 1010000
109
110
  --max-num-seqs: 256
110
- --compilation-confi: 3
111
+ --compilation-config: 3
111
112
  ```
112
113
 
113
114
  You would then set the `VEC_INF_CONFIG` path using:
@@ -116,7 +117,11 @@ You would then set the `VEC_INF_CONFIG` path using:
116
117
  export VEC_INF_CONFIG=/h/<username>/my-model-config.yaml
117
118
  ```
118
119
 
119
- Note that there are other parameters that can also be added to the config but not shown in this example, check the [`ModelConfig`](vec_inf/client/config.py) for details.
120
+ **NOTE**
121
+ * There are other parameters that can also be added to the config but not shown in this example, check the [`ModelConfig`](vec_inf/client/config.py) for details.
122
+ * Check [vLLM Engine Arguments](https://docs.vllm.ai/en/stable/serving/engine_args.html) for the full list of available vLLM engine arguments, the default parallel size for any parallelization is default to 1, so none of the sizes were set specifically in this example
123
+ * For GPU partitions with non-Ampere architectures, e.g. `rtx6000`, `t4v2`, BF16 isn't supported. For models that have BF16 as the default type, when using a non-Ampere GPU, use FP16 instead, i.e. `--dtype: float16`
124
+ * Setting `--compilation-config` to `3` currently breaks multi-node model launches, so we don't set them for models that require multiple nodes of GPUs.
120
125
 
121
126
  #### Other commands
122
127
 
@@ -182,8 +187,9 @@ Once the inference server is ready, you can start sending in inference requests.
182
187
  },
183
188
  "prompt_logprobs":null
184
189
  }
190
+
185
191
  ```
186
- **NOTE**: For multimodal models, currently only `ChatCompletion` is available, and only one image can be provided for each prompt.
192
+ **NOTE**: Certain models don't adhere to OpenAI's chat template, e.g. Mistral family. For these models, you can either change your prompt to follow the model's default chat template or provide your own chat template via `--chat-template: TEMPLATE_PATH`.
187
193
 
188
194
  ## SSH tunnel from your local device
189
195
  If you want to run inference from your local device, you can open a SSH tunnel to your cluster environment like the following:
@@ -2,24 +2,24 @@ vec_inf/README.md,sha256=3ocJHfV3kRftXFUCdHw3B-p4QQlXuNqkHnjPPNkCgfM,543
2
2
  vec_inf/__init__.py,sha256=bHwSIz9lebYuxIemni-lP0h3gwJHVbJnwExQKGJWw_Q,23
3
3
  vec_inf/find_port.sh,sha256=bGQ6LYSFVSsfDIGatrSg5YvddbZfaPL0R-Bjo4KYD6I,1088
4
4
  vec_inf/cli/__init__.py,sha256=5XIvGQCOnaGl73XMkwetjC-Ul3xuXGrWDXdYJ3aUzvU,27
5
- vec_inf/cli/_cli.py,sha256=bqyLvFK4Vqoh-wAaUPg50_qYbrW-c9Cl_-YySgVk5_M,9871
5
+ vec_inf/cli/_cli.py,sha256=pqZeQr5WxAsV7KSYcUnx_mRL7RnHWk1zf9CcW_ct5uI,10663
6
6
  vec_inf/cli/_helper.py,sha256=i1QvJeIT3z7me6bv2Vot5c3NY555Dgo3q8iRlxhOlZ4,13047
7
7
  vec_inf/cli/_utils.py,sha256=23vSbmvNOWY1-W1aOAwYqNDkDDmx-5UVlCiXAtxUZ8A,1057
8
8
  vec_inf/cli/_vars.py,sha256=V6DrJs_BuUa4yNcbBSSnMwpcyXwEBsizy3D0ubIg2fA,777
9
9
  vec_inf/client/__init__.py,sha256=OLlUJ4kL1R-Kh-nXNbvKlAZ3mtHcnozHprVufkVCNWk,739
10
- vec_inf/client/_client_vars.py,sha256=eVQjpuASd8beBjAeAbQnMRZM8nCLZMHx-x62BcXVnYA,7163
10
+ vec_inf/client/_client_vars.py,sha256=KG-xImVIzJH3aj5nMUzT9w9LpH-7YGrOew6N77Fj0Js,7638
11
11
  vec_inf/client/_exceptions.py,sha256=94Nx_5k1SriJNXzbdnwyXFZolyMutydU08Gsikawzzo,749
12
- vec_inf/client/_helper.py,sha256=76OTCroNR5e3e7T2qSV_tkexDaUQsJrs8bFiMJ5NaxU,22718
13
- vec_inf/client/_slurm_script_generator.py,sha256=jFgr2Pu7b_Uqli3DBvxUr9MI1-3TA6wwxg07O2rTwPs,6299
12
+ vec_inf/client/_helper.py,sha256=DcEFogbrSb4A8Kc2zixNZNL4nt4iswPk2n5blZgwEWQ,22338
13
+ vec_inf/client/_slurm_script_generator.py,sha256=XYCsadCLDEu9KrrjrNCNgoc0ITmjys9u7yWR9PkFAos,6376
14
14
  vec_inf/client/_utils.py,sha256=1dB2O1neEhZNk6MJbBybLQm42vsmEevA2TI0F_kGi0o,8796
15
15
  vec_inf/client/api.py,sha256=TYn4lP5Ene8MEuXWYo6ZbGYw9aPnaMlT32SH7jLCifM,9605
16
- vec_inf/client/config.py,sha256=kOhxoepsvArxRFNlwq1sLDHsxDewLwxRV1VwsL0MrGU,4683
17
- vec_inf/client/models.py,sha256=JZDUMBX3XKOClaq_yJUpDUSgiDy42nT5Dq5bxQWiO2I,5778
16
+ vec_inf/client/config.py,sha256=lPVHwiaGZjKd5M9G7vcsk3DMausFP_telq3JQngBkH8,5080
17
+ vec_inf/client/models.py,sha256=qjocUa5egJTVeVF3962kYOecs1dTaEb2e6TswkYFXM0,6141
18
18
  vec_inf/client/slurm_vars.py,sha256=lroK41L4gEVVZNxxE3bEpbKsdMwnH79-7iCKd4zWEa4,1069
19
19
  vec_inf/config/README.md,sha256=OlgnD_Ojei_xLkNyS7dGvYMFUzQFqjVRVw0V-QMk_3g,17863
20
- vec_inf/config/models.yaml,sha256=PR91vOzINVOkAco9S-_VIXQ5Un6ekeoWz2Pj8DMR8LQ,29630
21
- vec_inf-0.6.0.dist-info/METADATA,sha256=-xadTsrAR3tOfPyxTdGB9DLuhWMu_mnp_JF5Aa-1-08,9755
22
- vec_inf-0.6.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
23
- vec_inf-0.6.0.dist-info/entry_points.txt,sha256=uNRXjCuJSR2nveEqD3IeMznI9oVI9YLZh5a24cZg6B0,49
24
- vec_inf-0.6.0.dist-info/licenses/LICENSE,sha256=mq8zeqpvVSF1EsxmydeXcokt8XnEIfSofYn66S2-cJI,1073
25
- vec_inf-0.6.0.dist-info/RECORD,,
20
+ vec_inf/config/models.yaml,sha256=xImSOjG9yL6LqqYkSLL7_wBZhqKM10-eFaQJ82gP4ig,29420
21
+ vec_inf-0.6.1.dist-info/METADATA,sha256=0YHT8rhEZINfmMF1hQBqU0HBpRbwX-1IeqY_Mla4g28,10682
22
+ vec_inf-0.6.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
23
+ vec_inf-0.6.1.dist-info/entry_points.txt,sha256=uNRXjCuJSR2nveEqD3IeMznI9oVI9YLZh5a24cZg6B0,49
24
+ vec_inf-0.6.1.dist-info/licenses/LICENSE,sha256=mq8zeqpvVSF1EsxmydeXcokt8XnEIfSofYn66S2-cJI,1073
25
+ vec_inf-0.6.1.dist-info/RECORD,,