gpuq 1.3.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
gpuq-1.3.0/LICENSE ADDED
@@ -0,0 +1,19 @@
1
+ Copyright © 2025 Mako
2
+
3
+ Permission is hereby granted, free of charge, to any person obtaining a copy of
4
+ this software and associated documentation files (the “Software”), to deal in
5
+ the Software without restriction, including without limitation the rights to
6
+ use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
7
+ of the Software, and to permit persons to whom the Software is furnished to do
8
+ so, subject to the following conditions:
9
+
10
+ The above copyright notice and this permission notice shall be included in all
11
+ copies or substantial portions of the Software.
12
+
13
+ THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19
+ SOFTWARE.
gpuq-1.3.0/PKG-INFO ADDED
@@ -0,0 +1,121 @@
1
+ Metadata-Version: 2.4
2
+ Name: gpuq
3
+ Version: 1.3.0
4
+ Summary: A multi-vendor GPU querying utility with minimal dependencies
5
+ Home-page: https://github.com/makodevai/gpuq
6
+ Download-URL: https://github.com/makodevai/gpuq
7
+ Author: Mako
8
+ Author-email: support@mako.dev
9
+ Requires-Python: >=3.10.0
10
+ Description-Content-Type: text/markdown
11
+ License-File: LICENSE
12
+ Provides-Extra: dev
13
+ Requires-Dist: GitPython; extra == "dev"
14
+ Requires-Dist: mypy; extra == "dev"
15
+ Requires-Dist: black; extra == "dev"
16
+ Requires-Dist: pytest; extra == "dev"
17
+ Dynamic: author
18
+ Dynamic: author-email
19
+ Dynamic: description
20
+ Dynamic: description-content-type
21
+ Dynamic: download-url
22
+ Dynamic: home-page
23
+ Dynamic: license-file
24
+ Dynamic: provides-extra
25
+ Dynamic: requires-python
26
+ Dynamic: summary
27
+
28
+ # *gpuq* - multi-vendor *GPU* *q*uerying utility with minimal dependencies
29
+
30
+ This small library is a direct answer to the lack of a lightweight, cross-compatible utility to query available GPUs - regardless of what vendor, distro, or overall environment one might be using.
31
+
32
+ In particular, the implementation meets the following requirements:
33
+ - works with multiple downstream runtimes (currently supported: CUDA and HIP)
34
+ - will also work if you have multiple present at the same time (do you really, though?)
35
+ - no build- or install-time dependencies (including any python packages)
36
+ - any runtime dependencies are soft - unless the user explicitly asks for the status/presence of a particular downstream runtime, most methods will fail silently
37
+ - consequently, the package should install and run on pretty much any machine
38
+ - your laptop does not have a GPU? -> the package will report 0 GPUs available (duh), no exceptions, linker errors, etc.
39
+ - allows for easy mocking (for unit tests, etc.)
40
+ - fully typed (conforms to `mypy --strict` checking)
41
+
42
+ Compared to some existing alternatives, it has the following differences:
43
+ - `torch.cuda` - not lightweight, also requires a different wheel for NVidia and HIP
44
+ - `gputil` - NVidia specific, also broken dependencies (as of 2025)
45
+ - `gpuinfo` - NVidia specific, broken `import gpuinfo`...
46
+ - `gpuinfonv` - NVidia specific, requires pynvml
47
+ - `pyamdgpuinfo` - AMD specific
48
+ - `igpu` - NVidia specific, broken installation (as of 2025)
49
+ - and so on...
50
+
51
+ The primary functionality offered is:
52
+ - check how many GPUs are available
53
+ - query properties for each available device - will tell you some basic info about the provider (CUDA/HIP) and other info similar to `cudaGetDeviceProperties`
54
+ - the returned list is not comprehensive, though
55
+ - respects `*_VISIBLE_DEVICES` and provides mapping between local (visible) and global indices
56
+ - **NOTE: this temporarily modifies env variables and therefore is not thread-safe**
57
+ - if requested, lazily provides some runtime information about each GPU as well
58
+ - in particular, PIDs of processes using the GPU will be returned
59
+ - NOTE: this is currently done rather naively by parsing outputs of tools like `nvidia-smi` or `rocm-smi`
60
+ - allows to check for runtime errors that might have occurred while trying to load
61
+
62
+ ### How it works:
63
+
64
+ The implementation will attempt to dynamically lazy-load `libcudart.so` and `libamdhip64.so` at runtime.
65
+ For GPUs to be properly reported, the libraries have to be found by the dynamic linker at the moment any relevant function call is made for the first time.
66
+ (If a library fails to load, loading will be retried every time a function call is made).
67
+
68
+ ## Examples
69
+
70
+ Install with:
71
+ ```bash
72
+ pip install gpu
73
+ ```
74
+
75
+ Return the number of available GPUs:
76
+ ```python
77
+ import gpuq as G
78
+
79
+ print(G.count()) # this includes GPUs from all providers, disregarding *_VISIBLE_DEVICES
80
+ print(G.count(visible_only=True)) # only visible GPUs from all providers
81
+ print(G.count(provider=G.Provider.HIP, visible_only=True)) # only visible HIP devices
82
+ # etc.
83
+ ```
84
+
85
+ Return a list of gpu properties:
86
+ ```python
87
+ import gpuq as G
88
+
89
+ for gpu in G.query(visible_only=True, provider=G.Provider.ANY):
90
+ print(gpu.name)
91
+
92
+ # return all visible GPUs, raise an error if no CUDA devices are present
93
+ # (note: the required check if done against the global set, not the return set - see the docs)
94
+ gpus = G.query(visible_only=True, provider=G.Provider.ANY, required=G.Provider.CUDA)
95
+ ```
96
+
97
+ Provide mapping between local and global GPU indices:
98
+ ```python
99
+ import gpuq as G
100
+
101
+ # assume a system with 8 GPUs and CUDA_VISIBLE_DEVICES=1,7
102
+ for gpu in G.query(): # by default return visible GPUs only
103
+ print(gpu.index, gpu.system_index)
104
+
105
+ # should print:
106
+ # 0 1
107
+ # 1 7
108
+
109
+ for gpu in G.query(visible_only=False):
110
+ print(gpu.index, gpu.system_index, gpu.is_visible)
111
+
112
+ # should print:
113
+ # None 0 False
114
+ # 0 1 True
115
+ # None 2 False
116
+ # None 3 False
117
+ # None 4 False
118
+ # None 5 False
119
+ # None 6 False
120
+ # 1 7 True
121
+ ```
gpuq-1.3.0/README.md ADDED
@@ -0,0 +1,94 @@
1
+ # *gpuq* - multi-vendor *GPU* *q*uerying utility with minimal dependencies
2
+
3
+ This small library is a direct answer to the lack of a lightweight, cross-compatible utility to query available GPUs - regardless of what vendor, distro, or overall environment one might be using.
4
+
5
+ In particular, the implementation meets the following requirements:
6
+ - works with multiple downstream runtimes (currently supported: CUDA and HIP)
7
+ - will also work if you have multiple present at the same time (do you really, though?)
8
+ - no build- or install-time dependencies (including any python packages)
9
+ - any runtime dependencies are soft - unless the user explicitly asks for the status/presence of a particular downstream runtime, most methods will fail silently
10
+ - consequently, the package should install and run on pretty much any machine
11
+ - your laptop does not have a GPU? -> the package will report 0 GPUs available (duh), no exceptions, linker errors, etc.
12
+ - allows for easy mocking (for unit tests, etc.)
13
+ - fully typed (conforms to `mypy --strict` checking)
14
+
15
+ Compared to some existing alternatives, it has the following differences:
16
+ - `torch.cuda` - not lightweight, also requires a different wheel for NVidia and HIP
17
+ - `gputil` - NVidia specific, also broken dependencies (as of 2025)
18
+ - `gpuinfo` - NVidia specific, broken `import gpuinfo`...
19
+ - `gpuinfonv` - NVidia specific, requires pynvml
20
+ - `pyamdgpuinfo` - AMD specific
21
+ - `igpu` - NVidia specific, broken installation (as of 2025)
22
+ - and so on...
23
+
24
+ The primary functionality offered is:
25
+ - check how many GPUs are available
26
+ - query properties for each available device - will tell you some basic info about the provider (CUDA/HIP) and other info similar to `cudaGetDeviceProperties`
27
+ - the returned list is not comprehensive, though
28
+ - respects `*_VISIBLE_DEVICES` and provides mapping between local (visible) and global indices
29
+ - **NOTE: this temporarily modifies env variables and therefore is not thread-safe**
30
+ - if requested, lazily provides some runtime information about each GPU as well
31
+ - in particular, PIDs of processes using the GPU will be returned
32
+ - NOTE: this is currently done rather naively by parsing outputs of tools like `nvidia-smi` or `rocm-smi`
33
+ - allows to check for runtime errors that might have occurred while trying to load
34
+
35
+ ### How it works:
36
+
37
+ The implementation will attempt to dynamically lazy-load `libcudart.so` and `libamdhip64.so` at runtime.
38
+ For GPUs to be properly reported, the libraries have to be found by the dynamic linker at the moment any relevant function call is made for the first time.
39
+ (If a library fails to load, loading will be retried every time a function call is made).
40
+
41
+ ## Examples
42
+
43
+ Install with:
44
+ ```bash
45
+ pip install gpu
46
+ ```
47
+
48
+ Return the number of available GPUs:
49
+ ```python
50
+ import gpuq as G
51
+
52
+ print(G.count()) # this includes GPUs from all providers, disregarding *_VISIBLE_DEVICES
53
+ print(G.count(visible_only=True)) # only visible GPUs from all providers
54
+ print(G.count(provider=G.Provider.HIP, visible_only=True)) # only visible HIP devices
55
+ # etc.
56
+ ```
57
+
58
+ Return a list of gpu properties:
59
+ ```python
60
+ import gpuq as G
61
+
62
+ for gpu in G.query(visible_only=True, provider=G.Provider.ANY):
63
+ print(gpu.name)
64
+
65
+ # return all visible GPUs, raise an error if no CUDA devices are present
66
+ # (note: the required check if done against the global set, not the return set - see the docs)
67
+ gpus = G.query(visible_only=True, provider=G.Provider.ANY, required=G.Provider.CUDA)
68
+ ```
69
+
70
+ Provide mapping between local and global GPU indices:
71
+ ```python
72
+ import gpuq as G
73
+
74
+ # assume a system with 8 GPUs and CUDA_VISIBLE_DEVICES=1,7
75
+ for gpu in G.query(): # by default return visible GPUs only
76
+ print(gpu.index, gpu.system_index)
77
+
78
+ # should print:
79
+ # 0 1
80
+ # 1 7
81
+
82
+ for gpu in G.query(visible_only=False):
83
+ print(gpu.index, gpu.system_index, gpu.is_visible)
84
+
85
+ # should print:
86
+ # None 0 False
87
+ # 0 1 True
88
+ # None 2 False
89
+ # None 3 False
90
+ # None 4 False
91
+ # None 5 False
92
+ # None 6 False
93
+ # 1 7 True
94
+ ```
gpuq-1.3.0/gpuq/C.pyi ADDED
@@ -0,0 +1,25 @@
1
+ class Properties:
2
+ ord: int
3
+ uuid: str
4
+ provider: str
5
+ index: int
6
+ name: str
7
+ major: int
8
+ minor: int
9
+ total_memory: int
10
+ sms_count: int
11
+ sm_threads: int
12
+ sm_shared_memory: int
13
+ sm_blocks: int
14
+ block_threads: int
15
+ block_shared_memory: int
16
+ warp_size: int
17
+ l2_cache_size: int
18
+ concurrent_threads: bool
19
+ async_engines_count: int
20
+ cooperative: bool
21
+
22
+ def checkcuda() -> str: ...
23
+ def checkamd() -> str: ...
24
+ def count() -> int: ...
25
+ def get(index: int) -> Properties: ...
@@ -0,0 +1,376 @@
1
+ import os
2
+ from threading import local
3
+ from contextlib import contextmanager
4
+ from typing import Generator, Literal
5
+
6
+ from .datatypes import Provider, Properties
7
+ from .impl import Implementation, GenuineImplementation, MockImplementation
8
+ from .utils import add_module_properties, staticproperty, default, int_or_none, int_list
9
+
10
+
11
+ _current_implementation = local()
12
+ _default_impl = None
13
+
14
+
15
+ default_impl: Implementation
16
+
17
+
18
+ def _get_default_impl() -> Implementation:
19
+ global _default_impl
20
+ if _default_impl is not None:
21
+ return _default_impl
22
+
23
+ _impl_name = os.environ.get("MAKO_MOCK_GPU", "").strip().lower()
24
+ if not _impl_name or _impl_name in ["0", "false", "no", "none"]:
25
+ _default_impl = genuine()
26
+ else:
27
+ _default_impl = mock()
28
+
29
+ return _default_impl
30
+
31
+
32
+ def _get_impl() -> Implementation:
33
+ return getattr(_current_implementation, "value", _get_default_impl())
34
+
35
+
36
+ def _set_impl(impl: Implementation | None) -> Implementation:
37
+ current = _get_impl()
38
+ _current_implementation.value = impl if impl is not None else _get_default_impl()
39
+ return current
40
+
41
+
42
+ @contextmanager
43
+ def _with_impl(impl: Implementation | None) -> Generator[Implementation, None, None]:
44
+ if impl is None:
45
+ impl = _get_default_impl()
46
+ curr = _set_impl(impl)
47
+ try:
48
+ yield impl
49
+ finally:
50
+ _set_impl(curr)
51
+
52
+
53
+ def _global_to_visible(system_index: int, visible: list[int] | None) -> int | None:
54
+ if visible is None:
55
+ return system_index
56
+
57
+ try:
58
+ return visible.index(system_index)
59
+ except ValueError:
60
+ return None
61
+
62
+
63
+ def query(
64
+ provider: Provider = Provider.any(),
65
+ required: Provider | None | Literal[True] = None,
66
+ visible_only: bool = True,
67
+ impl: Implementation | None = None,
68
+ ) -> list[Properties]:
69
+ """Return a list of all GPUs matching the provided criteria.
70
+
71
+ ``provider`` should be a bitwise-or'ed mask of providers whose GPUs should
72
+ be returned. The values of ``ALL``, ``ANY`` and ``None`` all mean that
73
+ all providers should be included when returning GPUs.
74
+
75
+ ``required`` is another bitwise-or'ed mask of providers that can additionally
76
+ be used to make the function raise an error (RuntimeError) if GPUs of
77
+ a particular provider are not present:
78
+ - ``None`` means nothing is required
79
+ - ``True`` means that at least one GPU should be returned
80
+ - ``ANY`` means at least one GPU should be present (but not necessarily returned,
81
+ see the note below)
82
+ - `anything else (including ``ALL``) means that at least one GPU of each provider
83
+ included in the mask has to be present.
84
+
85
+ > **Note:** ``required`` and ``provider`` are mostly independent. For example,
86
+ > a call like ``query(provider=CUDA, required=HIP)`` is valid and will raise an
87
+ > error if there are no HIP devices but will only return CUDA devices (potentially
88
+ > an empty list). This means, ``required=ANY`` might be a bit counter-intuitive,
89
+ > since it will only fail if there are no GPUs whatsoever on the system.
90
+ > The only exception to this rule is the ``required=True`` case, which
91
+ > could be understood as "make sure at least one GPU is returned", while
92
+ > taking into account the provided ``providers`` value.
93
+
94
+ If ``visible_only`` is True, any processing of GPU by function (including checking
95
+ for providers and GPUs as described above) will only consider GPUs that are visible
96
+ according to the relevant *_VISIBLE_DEVICES environmental variable. Otherwise
97
+ the variables are ignored and all GPUs are always considered.
98
+
99
+ > **Note:** the implementation will temporarily remove any *_VISIBLE_DEVICES variables
100
+ > when obtaining information about GPUs, regardless of ``visible_only`` argument.
101
+ > This might cause race conditions if the variables are also used/modified by other
102
+ > parts of the system at the same time. Please keep this in mind when using it.
103
+ """
104
+ nonempty = False
105
+ if required is True:
106
+ required = Provider.any()
107
+ nonempty = True
108
+
109
+ if provider == Provider.any() or provider is None:
110
+ provider = Provider.all()
111
+
112
+ if impl is None:
113
+ impl = _get_impl()
114
+
115
+ if required:
116
+ for p in Provider:
117
+ if p & required:
118
+ if err := impl.provider_check(p):
119
+ raise RuntimeError(
120
+ f"Provider {p.name} is required but the relevant runtime is missing from the system or failed to load, error: {err}!"
121
+ )
122
+
123
+ with impl.save_visible() as visible:
124
+ num = impl.c_count()
125
+
126
+ if not num:
127
+ if required is not None or nonempty:
128
+ raise RuntimeError("No GPUs detected")
129
+ return []
130
+
131
+ ret = []
132
+
133
+ for idx in range(num):
134
+ dev = impl.c_get(idx)
135
+ prov = Provider[dev.provider]
136
+
137
+ visible_set = visible.get(prov)
138
+ local_index = _global_to_visible(dev.index, visible_set)
139
+ if visible_only and local_index is None: # not visible
140
+ continue
141
+
142
+ if required is not None and prov & required:
143
+ required &= ~prov # mark the current provider as no longer required
144
+
145
+ if provider & prov:
146
+ ret.append(Properties(dev, local_index, impl))
147
+
148
+ if required:
149
+ missing = [p for p in Provider if p & required]
150
+ raise RuntimeError(
151
+ f"GPUs of the following required providers could not be found: {missing}"
152
+ )
153
+
154
+ if not ret and nonempty:
155
+ raise RuntimeError("No suitable GPUs detected")
156
+
157
+ return ret
158
+
159
+
160
+ def count(
161
+ provider: Provider = Provider.all(),
162
+ visible_only: bool = False,
163
+ impl: Implementation | None = None,
164
+ ) -> int:
165
+ """Return the overall amount of GPUs for the specified provider (by default all providers).
166
+
167
+ ``providers`` can be a bitwise mask of valid providers.
168
+ if ``visible_only`` is True, return the number of matching GPUs that visible according to
169
+ *_VISIBLE_DEVICES environment variables. Otherwise the number of all GPUs matching the
170
+ criteria is returned.
171
+
172
+ > **Note:** the implementation will temporarily remove any *_VISIBLE_DEVICES variables
173
+ > when obtaining information about GPUs, if ``visible_only`` is False.
174
+ > This might cause race conditions if the variables are also used/modified by other
175
+ > parts of the system at the same time. Please keep this in mind when using it.
176
+ """
177
+ if provider == Provider.any() or provider is None:
178
+ provider = Provider.all()
179
+
180
+ if impl is None:
181
+ impl = _get_impl()
182
+
183
+ if provider == Provider.all():
184
+ if visible_only:
185
+ return impl.c_count()
186
+ else:
187
+ with impl.save_visible():
188
+ return impl.c_count()
189
+ else:
190
+ return len(
191
+ query(
192
+ provider=provider, required=None, visible_only=visible_only, impl=impl
193
+ )
194
+ )
195
+
196
+
197
+ def get(
198
+ idx: int,
199
+ provider: Provider = Provider.all(),
200
+ visible_only: bool = False,
201
+ impl: Implementation | None = None,
202
+ ) -> Properties:
203
+ """Return the ``idx``-th GPU from the list of GPus for the specified provider(s).
204
+ If ``visible_only`` is True, only visible devices according to *_VISIBLE_DEVICES
205
+ environment variables are considered for indexing (see ``count``).
206
+
207
+ > **Note:** the implementation will temporarily remove any *_VISIBLE_DEVICES variables
208
+ > when obtaining information about GPUs, regardless of ``visible_only`` argument.
209
+ > This might cause race conditions if the variables are also used/modified by other
210
+ > parts of the system at the same time. Please keep this in mind when using it.
211
+ """
212
+ if provider == Provider.any() or provider is None:
213
+ provider = Provider.all()
214
+
215
+ if impl is None:
216
+ impl = _get_impl()
217
+
218
+ if provider == Provider.all() and not visible_only:
219
+ with impl.save_visible() as visible:
220
+ cobj = impl.c_get(idx)
221
+ prov = Provider[cobj.provider]
222
+ visible_set = visible.get(prov)
223
+ local_index = _global_to_visible(cobj.index, visible_set)
224
+ return Properties(cobj, local_index, impl)
225
+ else:
226
+ ret: list[Properties] = query(
227
+ provider=provider, required=None, visible_only=visible_only, impl=impl
228
+ )
229
+ if not ret:
230
+ raise RuntimeError("No GPUs available")
231
+ if idx < 0 or idx >= len(ret):
232
+ raise IndexError("Invalid GPU index")
233
+
234
+ return ret[idx]
235
+
236
+
237
+ def checkprovider(p: Provider, impl: Implementation | None = None) -> str:
238
+ """Return error string if a runtime error occurred while checking for
239
+ the presence of a given provider. Otherwise returns an empty string.
240
+
241
+ Runtime errors include any dynamic linker errors or errors
242
+ originating from a relevant downstream runtime, which occurred
243
+ while querying the number of available GPUs.
244
+ """
245
+ if impl is None:
246
+ impl = _get_impl()
247
+ return impl.provider_check(p)
248
+
249
+
250
+ def checkcuda(impl: Implementation | None = None) -> str:
251
+ """Shorthand for `checkprovider(Provider.CUDA)`"""
252
+ if impl is None:
253
+ impl = _get_impl()
254
+ return impl.provider_check(Provider.CUDA)
255
+
256
+
257
+ def checkamd(impl: Implementation | None = None) -> str:
258
+ """Shorthand for `checkprovider(Provider.HIP)`"""
259
+ if impl is None:
260
+ impl = _get_impl()
261
+ return impl.provider_check(Provider.HIP)
262
+
263
+
264
+ def hasprovider(p: Provider, impl: Implementation | None = None) -> bool:
265
+ """Return true if the given provider is available on the system.
266
+ This does not yet mean that any devices from that provider are present.
267
+
268
+ Calling this function is equivalent to checking `checkprovider(p) == ""`
269
+ """
270
+ return not checkprovider(p, impl)
271
+
272
+
273
+ def hascuda(impl: Implementation | None = None) -> bool:
274
+ return not checkcuda(impl)
275
+
276
+
277
+ def hasamd(impl: Implementation | None = None) -> bool:
278
+ return not checkamd(impl)
279
+
280
+
281
+ def mock(
282
+ cuda_count: int | None | default = default(1, "MAKO_MOCK_GPU_CUDA", int_or_none),
283
+ hip_count: int | None | default = default(None, "MAKO_MOCK_GPU_HIP", int_or_none),
284
+ cuda_visible: list[int] | None | default = default(
285
+ None, "CUDA_VISIBLE_DEVICES", int_list
286
+ ),
287
+ hip_visible: list[int] | None | default = default(
288
+ None, "HIP_VISIBLE_DEVICES", int_list
289
+ ),
290
+ name: str | default = default("{} Mock Device", "MAKO_MOCK_GPU_NAME", str),
291
+ major: int = 1,
292
+ minor: int = 2,
293
+ total_memory: int = 8 * 1024**3,
294
+ sms_count: int = 12,
295
+ sm_threads: int = 2048,
296
+ sm_shared_memory: int = 16 * 1024,
297
+ sm_registers: int = 512,
298
+ sm_blocks: int = 4,
299
+ block_threads: int = 1024,
300
+ block_shared_memory: int = 8 * 1024,
301
+ block_registers: int = 256,
302
+ warp_size: int = 32,
303
+ l2_cache_size: int = 8 * 1024**2,
304
+ concurrent_kernels: bool = True,
305
+ async_engines_count: int = 0,
306
+ cooperative: bool = True,
307
+ # cuda runtime args
308
+ cuda_utilisation: int = 0,
309
+ cuda_memory: int = 1,
310
+ cuda_pids: list[int] = [],
311
+ # hip runtime args
312
+ hip_gfx: str = "942",
313
+ hip_drm: int = 128,
314
+ hip_node_idx: int = 2,
315
+ hip_pids: list[int] = [],
316
+ _hip_drm_stride: int = 8,
317
+ ) -> Implementation:
318
+ args = {
319
+ name: (arg if not isinstance(arg, default) else arg.get())
320
+ for name, arg in locals().items()
321
+ }
322
+ return MockImplementation(**args)
323
+
324
+
325
+ def genuine() -> Implementation:
326
+ return GenuineImplementation()
327
+
328
+
329
+ def _get_version() -> str:
330
+ from . import version
331
+
332
+ return version.version
333
+
334
+
335
+ def _get_has_repo() -> bool:
336
+ from . import version
337
+
338
+ return version.has_repo
339
+
340
+
341
+ def _get_repo() -> str:
342
+ from . import version
343
+
344
+ return version.repo
345
+
346
+
347
+ def _get_commit() -> str:
348
+ from . import version
349
+
350
+ return version.commit
351
+
352
+
353
+ add_module_properties(
354
+ __name__,
355
+ {
356
+ "__version__": staticproperty(staticmethod(_get_version)),
357
+ "__has_repo__": staticproperty(staticmethod(_get_has_repo)),
358
+ "__repo__": staticproperty(staticmethod(_get_repo)),
359
+ "__commit__": staticproperty(staticmethod(_get_commit)),
360
+ "default_impl": staticproperty(staticmethod(_get_default_impl)),
361
+ },
362
+ )
363
+
364
+
365
+ __all__ = [
366
+ "Properties",
367
+ "Provider",
368
+ "query",
369
+ "count",
370
+ "get",
371
+ "hasprovider",
372
+ "hascuda",
373
+ "hasamd",
374
+ "mock",
375
+ "genuine",
376
+ ]
@@ -0,0 +1,4 @@
1
+ version = '1.3.0'
2
+ repo = 'git@github.com:a2labs-ai/gpuinfo.git'
3
+ commit = '51e6d202bf9ca60302cc903947addc4b582e93f8'
4
+ has_repo = True