tinygrad 0.8.0__py3-none-any.whl → 0.9.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tinygrad/__init__.py +6 -6
- tinygrad/codegen/__init__.py +0 -0
- tinygrad/codegen/kernel.py +253 -225
- tinygrad/codegen/linearizer.py +398 -436
- tinygrad/codegen/uops.py +451 -0
- tinygrad/device.py +268 -274
- tinygrad/dtype.py +56 -40
- tinygrad/engine/__init__.py +0 -0
- tinygrad/engine/graph.py +100 -0
- tinygrad/engine/jit.py +198 -0
- tinygrad/engine/realize.py +192 -0
- tinygrad/engine/schedule.py +370 -0
- tinygrad/engine/search.py +199 -0
- tinygrad/{mlops.py → function.py} +40 -32
- tinygrad/helpers.py +144 -46
- tinygrad/lazy.py +143 -242
- tinygrad/multi.py +173 -0
- tinygrad/nn/__init__.py +180 -9
- tinygrad/nn/datasets.py +8 -0
- tinygrad/nn/optim.py +106 -28
- tinygrad/nn/state.py +87 -19
- tinygrad/ops.py +104 -45
- tinygrad/renderer/__init__.py +65 -0
- tinygrad/renderer/assembly.py +269 -0
- tinygrad/renderer/cstyle.py +308 -210
- tinygrad/renderer/llvmir.py +119 -124
- tinygrad/runtime/__init__.py +0 -0
- tinygrad/runtime/autogen/amd_gpu.py +13403 -0
- tinygrad/runtime/autogen/comgr.py +891 -0
- tinygrad/runtime/autogen/cuda.py +5923 -0
- tinygrad/runtime/autogen/hip.py +5909 -0
- tinygrad/runtime/autogen/hsa.py +5893 -0
- tinygrad/runtime/autogen/io_uring.py +1486 -0
- tinygrad/runtime/autogen/kfd.py +812 -0
- tinygrad/runtime/autogen/nv_gpu.py +33597 -0
- tinygrad/runtime/autogen/opencl.py +1795 -0
- tinygrad/runtime/driver/__init__.py +0 -0
- tinygrad/runtime/driver/hip_comgr.py +56 -0
- tinygrad/runtime/graph/__init__.py +0 -0
- tinygrad/runtime/graph/clang.py +39 -0
- tinygrad/runtime/graph/cuda.py +59 -54
- tinygrad/runtime/graph/hcq.py +187 -0
- tinygrad/runtime/graph/metal.py +37 -41
- tinygrad/runtime/ops_amd.py +550 -0
- tinygrad/runtime/ops_clang.py +16 -14
- tinygrad/runtime/ops_cuda.py +129 -37
- tinygrad/runtime/ops_disk.py +111 -43
- tinygrad/runtime/ops_gpu.py +52 -50
- tinygrad/runtime/ops_llvm.py +36 -56
- tinygrad/runtime/ops_metal.py +41 -24
- tinygrad/runtime/ops_npy.py +9 -0
- tinygrad/runtime/ops_nv.py +625 -0
- tinygrad/runtime/ops_python.py +208 -0
- tinygrad/shape/__init__.py +0 -0
- tinygrad/shape/shapetracker.py +46 -107
- tinygrad/shape/symbolic.py +99 -98
- tinygrad/shape/view.py +162 -45
- tinygrad/tensor.py +2492 -483
- {tinygrad-0.8.0.dist-info → tinygrad-0.9.1.dist-info}/LICENSE +1 -1
- {tinygrad-0.8.0.dist-info → tinygrad-0.9.1.dist-info}/METADATA +31 -13
- tinygrad-0.9.1.dist-info/RECORD +63 -0
- {tinygrad-0.8.0.dist-info → tinygrad-0.9.1.dist-info}/WHEEL +1 -1
- tinygrad/features/image.py +0 -93
- tinygrad/features/multi.py +0 -103
- tinygrad/features/search.py +0 -160
- tinygrad/graph.py +0 -106
- tinygrad/jit.py +0 -152
- tinygrad/realize.py +0 -50
- tinygrad/runtime/graph/hip.py +0 -24
- tinygrad/runtime/ops_cpu.py +0 -45
- tinygrad/runtime/ops_hip.py +0 -97
- tinygrad/runtime/ops_torch.py +0 -49
- tinygrad-0.8.0.dist-info/RECORD +0 -41
- {tinygrad-0.8.0.dist-info → tinygrad-0.9.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,812 @@
|
|
1
|
+
# mypy: ignore-errors
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
#
|
4
|
+
# TARGET arch is: []
|
5
|
+
# WORD_SIZE is: 8
|
6
|
+
# POINTER_SIZE is: 8
|
7
|
+
# LONGDOUBLE_SIZE is: 16
|
8
|
+
#
|
9
|
+
import ctypes, os
|
10
|
+
|
11
|
+
|
12
|
+
class AsDictMixin:
|
13
|
+
@classmethod
|
14
|
+
def as_dict(cls, self):
|
15
|
+
result = {}
|
16
|
+
if not isinstance(self, AsDictMixin):
|
17
|
+
# not a structure, assume it's already a python object
|
18
|
+
return self
|
19
|
+
if not hasattr(cls, "_fields_"):
|
20
|
+
return result
|
21
|
+
# sys.version_info >= (3, 5)
|
22
|
+
# for (field, *_) in cls._fields_: # noqa
|
23
|
+
for field_tuple in cls._fields_: # noqa
|
24
|
+
field = field_tuple[0]
|
25
|
+
if field.startswith('PADDING_'):
|
26
|
+
continue
|
27
|
+
value = getattr(self, field)
|
28
|
+
type_ = type(value)
|
29
|
+
if hasattr(value, "_length_") and hasattr(value, "_type_"):
|
30
|
+
# array
|
31
|
+
if not hasattr(type_, "as_dict"):
|
32
|
+
value = [v for v in value]
|
33
|
+
else:
|
34
|
+
type_ = type_._type_
|
35
|
+
value = [type_.as_dict(v) for v in value]
|
36
|
+
elif hasattr(value, "contents") and hasattr(value, "_type_"):
|
37
|
+
# pointer
|
38
|
+
try:
|
39
|
+
if not hasattr(type_, "as_dict"):
|
40
|
+
value = value.contents
|
41
|
+
else:
|
42
|
+
type_ = type_._type_
|
43
|
+
value = type_.as_dict(value.contents)
|
44
|
+
except ValueError:
|
45
|
+
# nullptr
|
46
|
+
value = None
|
47
|
+
elif isinstance(value, AsDictMixin):
|
48
|
+
# other structure
|
49
|
+
value = type_.as_dict(value)
|
50
|
+
result[field] = value
|
51
|
+
return result
|
52
|
+
|
53
|
+
|
54
|
+
class Structure(ctypes.Structure, AsDictMixin):
|
55
|
+
|
56
|
+
def __init__(self, *args, **kwds):
|
57
|
+
# We don't want to use positional arguments fill PADDING_* fields
|
58
|
+
|
59
|
+
args = dict(zip(self.__class__._field_names_(), args))
|
60
|
+
args.update(kwds)
|
61
|
+
super(Structure, self).__init__(**args)
|
62
|
+
|
63
|
+
@classmethod
|
64
|
+
def _field_names_(cls):
|
65
|
+
if hasattr(cls, '_fields_'):
|
66
|
+
return (f[0] for f in cls._fields_ if not f[0].startswith('PADDING'))
|
67
|
+
else:
|
68
|
+
return ()
|
69
|
+
|
70
|
+
@classmethod
|
71
|
+
def get_type(cls, field):
|
72
|
+
for f in cls._fields_:
|
73
|
+
if f[0] == field:
|
74
|
+
return f[1]
|
75
|
+
return None
|
76
|
+
|
77
|
+
@classmethod
|
78
|
+
def bind(cls, bound_fields):
|
79
|
+
fields = {}
|
80
|
+
for name, type_ in cls._fields_:
|
81
|
+
if hasattr(type_, "restype"):
|
82
|
+
if name in bound_fields:
|
83
|
+
if bound_fields[name] is None:
|
84
|
+
fields[name] = type_()
|
85
|
+
else:
|
86
|
+
# use a closure to capture the callback from the loop scope
|
87
|
+
fields[name] = (
|
88
|
+
type_((lambda callback: lambda *args: callback(*args))(
|
89
|
+
bound_fields[name]))
|
90
|
+
)
|
91
|
+
del bound_fields[name]
|
92
|
+
else:
|
93
|
+
# default callback implementation (does nothing)
|
94
|
+
try:
|
95
|
+
default_ = type_(0).restype().value
|
96
|
+
except TypeError:
|
97
|
+
default_ = None
|
98
|
+
fields[name] = type_((
|
99
|
+
lambda default_: lambda *args: default_)(default_))
|
100
|
+
else:
|
101
|
+
# not a callback function, use default initialization
|
102
|
+
if name in bound_fields:
|
103
|
+
fields[name] = bound_fields[name]
|
104
|
+
del bound_fields[name]
|
105
|
+
else:
|
106
|
+
fields[name] = type_()
|
107
|
+
if len(bound_fields) != 0:
|
108
|
+
raise ValueError(
|
109
|
+
"Cannot bind the following unknown callback(s) {}.{}".format(
|
110
|
+
cls.__name__, bound_fields.keys()
|
111
|
+
))
|
112
|
+
return cls(**fields)
|
113
|
+
|
114
|
+
|
115
|
+
class Union(ctypes.Union, AsDictMixin):
|
116
|
+
pass
|
117
|
+
|
118
|
+
|
119
|
+
|
120
|
+
|
121
|
+
|
122
|
+
KFD_IOCTL_H_INCLUDED = True # macro
|
123
|
+
KFD_IOCTL_MAJOR_VERSION = 1 # macro
|
124
|
+
KFD_IOCTL_MINOR_VERSION = 6 # macro
|
125
|
+
KFD_IOC_QUEUE_TYPE_COMPUTE = 0x0 # macro
|
126
|
+
KFD_IOC_QUEUE_TYPE_SDMA = 0x1 # macro
|
127
|
+
KFD_IOC_QUEUE_TYPE_COMPUTE_AQL = 0x2 # macro
|
128
|
+
KFD_IOC_QUEUE_TYPE_SDMA_XGMI = 0x3 # macro
|
129
|
+
KFD_MAX_QUEUE_PERCENTAGE = 100 # macro
|
130
|
+
KFD_MAX_QUEUE_PRIORITY = 15 # macro
|
131
|
+
KFD_IOC_CACHE_POLICY_COHERENT = 0 # macro
|
132
|
+
KFD_IOC_CACHE_POLICY_NONCOHERENT = 1 # macro
|
133
|
+
NUM_OF_SUPPORTED_GPUS = 7 # macro
|
134
|
+
MAX_ALLOWED_NUM_POINTS = 100 # macro
|
135
|
+
MAX_ALLOWED_AW_BUFF_SIZE = 4096 # macro
|
136
|
+
MAX_ALLOWED_WAC_BUFF_SIZE = 128 # macro
|
137
|
+
KFD_IOC_EVENT_SIGNAL = 0 # macro
|
138
|
+
KFD_IOC_EVENT_NODECHANGE = 1 # macro
|
139
|
+
KFD_IOC_EVENT_DEVICESTATECHANGE = 2 # macro
|
140
|
+
KFD_IOC_EVENT_HW_EXCEPTION = 3 # macro
|
141
|
+
KFD_IOC_EVENT_SYSTEM_EVENT = 4 # macro
|
142
|
+
KFD_IOC_EVENT_DEBUG_EVENT = 5 # macro
|
143
|
+
KFD_IOC_EVENT_PROFILE_EVENT = 6 # macro
|
144
|
+
KFD_IOC_EVENT_QUEUE_EVENT = 7 # macro
|
145
|
+
KFD_IOC_EVENT_MEMORY = 8 # macro
|
146
|
+
KFD_IOC_WAIT_RESULT_COMPLETE = 0 # macro
|
147
|
+
KFD_IOC_WAIT_RESULT_TIMEOUT = 1 # macro
|
148
|
+
KFD_IOC_WAIT_RESULT_FAIL = 2 # macro
|
149
|
+
KFD_SIGNAL_EVENT_LIMIT = 4096 # macro
|
150
|
+
KFD_HW_EXCEPTION_WHOLE_GPU_RESET = 0 # macro
|
151
|
+
KFD_HW_EXCEPTION_PER_ENGINE_RESET = 1 # macro
|
152
|
+
KFD_HW_EXCEPTION_GPU_HANG = 0 # macro
|
153
|
+
KFD_HW_EXCEPTION_ECC = 1 # macro
|
154
|
+
KFD_MEM_ERR_NO_RAS = 0 # macro
|
155
|
+
KFD_MEM_ERR_SRAM_ECC = 1 # macro
|
156
|
+
KFD_MEM_ERR_POISON_CONSUMED = 2 # macro
|
157
|
+
KFD_MEM_ERR_GPU_HANG = 3 # macro
|
158
|
+
KFD_IOC_ALLOC_MEM_FLAGS_VRAM = (1<<0) # macro
|
159
|
+
KFD_IOC_ALLOC_MEM_FLAGS_GTT = (1<<1) # macro
|
160
|
+
KFD_IOC_ALLOC_MEM_FLAGS_USERPTR = (1<<2) # macro
|
161
|
+
KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL = (1<<3) # macro
|
162
|
+
KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP = (1<<4) # macro
|
163
|
+
KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE = (1<<31) # macro
|
164
|
+
KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE = (1<<30) # macro
|
165
|
+
KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC = (1<<29) # macro
|
166
|
+
KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE = (1<<28) # macro
|
167
|
+
KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM = (1<<27) # macro
|
168
|
+
KFD_IOC_ALLOC_MEM_FLAGS_COHERENT = (1<<26) # macro
|
169
|
+
KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED = (1<<25) # macro
|
170
|
+
# def KFD_SMI_EVENT_MASK_FROM_INDEX(i): # macro
|
171
|
+
# return (1<<((i)-1))
|
172
|
+
KFD_IOCTL_SVM_FLAG_HOST_ACCESS = 0x00000001 # macro
|
173
|
+
KFD_IOCTL_SVM_FLAG_COHERENT = 0x00000002 # macro
|
174
|
+
KFD_IOCTL_SVM_FLAG_HIVE_LOCAL = 0x00000004 # macro
|
175
|
+
KFD_IOCTL_SVM_FLAG_GPU_RO = 0x00000008 # macro
|
176
|
+
KFD_IOCTL_SVM_FLAG_GPU_EXEC = 0x00000010 # macro
|
177
|
+
KFD_IOCTL_SVM_FLAG_GPU_READ_MOSTLY = 0x00000020 # macro
|
178
|
+
AMDKFD_IOCTL_BASE = 'K' # macro
|
179
|
+
# def AMDKFD_IO(nr): # macro
|
180
|
+
# return _IO('K',nr)
|
181
|
+
# def AMDKFD_IOR(nr, type): # macro
|
182
|
+
# return _IOR('K',nr,type)
|
183
|
+
# def AMDKFD_IOW(nr, type): # macro
|
184
|
+
# return _IOW('K',nr,type)
|
185
|
+
# def AMDKFD_IOWR(nr, type): # macro
|
186
|
+
# return _IOWR('K',nr,type)
|
187
|
+
# AMDKFD_IOC_GET_VERSION = _IOR('K',nr,type) ( 0x01 , struct kfd_ioctl_get_version_args ) # macro
|
188
|
+
# AMDKFD_IOC_CREATE_QUEUE = _IOWR('K',nr,type) ( 0x02 , struct kfd_ioctl_create_queue_args ) # macro
|
189
|
+
# AMDKFD_IOC_DESTROY_QUEUE = _IOWR('K',nr,type) ( 0x03 , struct kfd_ioctl_destroy_queue_args ) # macro
|
190
|
+
# AMDKFD_IOC_SET_MEMORY_POLICY = _IOW('K',nr,type) ( 0x04 , struct kfd_ioctl_set_memory_policy_args ) # macro
|
191
|
+
# AMDKFD_IOC_GET_CLOCK_COUNTERS = _IOWR('K',nr,type) ( 0x05 , struct kfd_ioctl_get_clock_counters_args ) # macro
|
192
|
+
# AMDKFD_IOC_GET_PROCESS_APERTURES = _IOR('K',nr,type) ( 0x06 , struct kfd_ioctl_get_process_apertures_args ) # macro
|
193
|
+
# AMDKFD_IOC_UPDATE_QUEUE = _IOW('K',nr,type) ( 0x07 , struct kfd_ioctl_update_queue_args ) # macro
|
194
|
+
# AMDKFD_IOC_CREATE_EVENT = _IOWR('K',nr,type) ( 0x08 , struct kfd_ioctl_create_event_args ) # macro
|
195
|
+
# AMDKFD_IOC_DESTROY_EVENT = _IOW('K',nr,type) ( 0x09 , struct kfd_ioctl_destroy_event_args ) # macro
|
196
|
+
# AMDKFD_IOC_SET_EVENT = _IOW('K',nr,type) ( 0x0A , struct kfd_ioctl_set_event_args ) # macro
|
197
|
+
# AMDKFD_IOC_RESET_EVENT = _IOW('K',nr,type) ( 0x0B , struct kfd_ioctl_reset_event_args ) # macro
|
198
|
+
# AMDKFD_IOC_WAIT_EVENTS = _IOWR('K',nr,type) ( 0x0C , struct kfd_ioctl_wait_events_args ) # macro
|
199
|
+
# AMDKFD_IOC_DBG_REGISTER = _IOW('K',nr,type) ( 0x0D , struct kfd_ioctl_dbg_register_args ) # macro
|
200
|
+
# AMDKFD_IOC_DBG_UNREGISTER = _IOW('K',nr,type) ( 0x0E , struct kfd_ioctl_dbg_unregister_args ) # macro
|
201
|
+
# AMDKFD_IOC_DBG_ADDRESS_WATCH = _IOW('K',nr,type) ( 0x0F , struct kfd_ioctl_dbg_address_watch_args ) # macro
|
202
|
+
# AMDKFD_IOC_DBG_WAVE_CONTROL = _IOW('K',nr,type) ( 0x10 , struct kfd_ioctl_dbg_wave_control_args ) # macro
|
203
|
+
# AMDKFD_IOC_SET_SCRATCH_BACKING_VA = _IOWR('K',nr,type) ( 0x11 , struct kfd_ioctl_set_scratch_backing_va_args ) # macro
|
204
|
+
# AMDKFD_IOC_GET_TILE_CONFIG = _IOWR('K',nr,type) ( 0x12 , struct kfd_ioctl_get_tile_config_args ) # macro
|
205
|
+
# AMDKFD_IOC_SET_TRAP_HANDLER = _IOW('K',nr,type) ( 0x13 , struct kfd_ioctl_set_trap_handler_args ) # macro
|
206
|
+
# AMDKFD_IOC_GET_PROCESS_APERTURES_NEW = _IOWR('K',nr,type) ( 0x14 , struct kfd_ioctl_get_process_apertures_new_args ) # macro
|
207
|
+
# AMDKFD_IOC_ACQUIRE_VM = _IOW('K',nr,type) ( 0x15 , struct kfd_ioctl_acquire_vm_args ) # macro
|
208
|
+
# AMDKFD_IOC_ALLOC_MEMORY_OF_GPU = _IOWR('K',nr,type) ( 0x16 , struct kfd_ioctl_alloc_memory_of_gpu_args ) # macro
|
209
|
+
# AMDKFD_IOC_FREE_MEMORY_OF_GPU = _IOW('K',nr,type) ( 0x17 , struct kfd_ioctl_free_memory_of_gpu_args ) # macro
|
210
|
+
# AMDKFD_IOC_MAP_MEMORY_TO_GPU = _IOWR('K',nr,type) ( 0x18 , struct kfd_ioctl_map_memory_to_gpu_args ) # macro
|
211
|
+
# AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU = _IOWR('K',nr,type) ( 0x19 , struct kfd_ioctl_unmap_memory_from_gpu_args ) # macro
|
212
|
+
# AMDKFD_IOC_SET_CU_MASK = _IOW('K',nr,type) ( 0x1A , struct kfd_ioctl_set_cu_mask_args ) # macro
|
213
|
+
# AMDKFD_IOC_GET_QUEUE_WAVE_STATE = _IOWR('K',nr,type) ( 0x1B , struct kfd_ioctl_get_queue_wave_state_args ) # macro
|
214
|
+
# AMDKFD_IOC_GET_DMABUF_INFO = _IOWR('K',nr,type) ( 0x1C , struct kfd_ioctl_get_dmabuf_info_args ) # macro
|
215
|
+
# AMDKFD_IOC_IMPORT_DMABUF = _IOWR('K',nr,type) ( 0x1D , struct kfd_ioctl_import_dmabuf_args ) # macro
|
216
|
+
# AMDKFD_IOC_ALLOC_QUEUE_GWS = _IOWR('K',nr,type) ( 0x1E , struct kfd_ioctl_alloc_queue_gws_args ) # macro
|
217
|
+
# AMDKFD_IOC_SMI_EVENTS = _IOWR('K',nr,type) ( 0x1F , struct kfd_ioctl_smi_events_args ) # macro
|
218
|
+
# AMDKFD_IOC_SVM = _IOWR('K',nr,type) ( 0x20 , struct kfd_ioctl_svm_args ) # macro
|
219
|
+
# AMDKFD_IOC_SET_XNACK_MODE = _IOWR('K',nr,type) ( 0x21 , struct kfd_ioctl_set_xnack_mode_args ) # macro
|
220
|
+
AMDKFD_COMMAND_START = 0x01 # macro
|
221
|
+
AMDKFD_COMMAND_END = 0x22 # macro
|
222
|
+
class struct_kfd_ioctl_get_version_args(Structure):
|
223
|
+
pass
|
224
|
+
|
225
|
+
struct_kfd_ioctl_get_version_args._pack_ = 1 # source:False
|
226
|
+
struct_kfd_ioctl_get_version_args._fields_ = [
|
227
|
+
('major_version', ctypes.c_uint32),
|
228
|
+
('minor_version', ctypes.c_uint32),
|
229
|
+
]
|
230
|
+
|
231
|
+
class struct_kfd_ioctl_create_queue_args(Structure):
|
232
|
+
pass
|
233
|
+
|
234
|
+
struct_kfd_ioctl_create_queue_args._pack_ = 1 # source:False
|
235
|
+
struct_kfd_ioctl_create_queue_args._fields_ = [
|
236
|
+
('ring_base_address', ctypes.c_uint64),
|
237
|
+
('write_pointer_address', ctypes.c_uint64),
|
238
|
+
('read_pointer_address', ctypes.c_uint64),
|
239
|
+
('doorbell_offset', ctypes.c_uint64),
|
240
|
+
('ring_size', ctypes.c_uint32),
|
241
|
+
('gpu_id', ctypes.c_uint32),
|
242
|
+
('queue_type', ctypes.c_uint32),
|
243
|
+
('queue_percentage', ctypes.c_uint32),
|
244
|
+
('queue_priority', ctypes.c_uint32),
|
245
|
+
('queue_id', ctypes.c_uint32),
|
246
|
+
('eop_buffer_address', ctypes.c_uint64),
|
247
|
+
('eop_buffer_size', ctypes.c_uint64),
|
248
|
+
('ctx_save_restore_address', ctypes.c_uint64),
|
249
|
+
('ctx_save_restore_size', ctypes.c_uint32),
|
250
|
+
('ctl_stack_size', ctypes.c_uint32),
|
251
|
+
]
|
252
|
+
|
253
|
+
class struct_kfd_ioctl_destroy_queue_args(Structure):
|
254
|
+
pass
|
255
|
+
|
256
|
+
struct_kfd_ioctl_destroy_queue_args._pack_ = 1 # source:False
|
257
|
+
struct_kfd_ioctl_destroy_queue_args._fields_ = [
|
258
|
+
('queue_id', ctypes.c_uint32),
|
259
|
+
('pad', ctypes.c_uint32),
|
260
|
+
]
|
261
|
+
|
262
|
+
class struct_kfd_ioctl_update_queue_args(Structure):
|
263
|
+
pass
|
264
|
+
|
265
|
+
struct_kfd_ioctl_update_queue_args._pack_ = 1 # source:False
|
266
|
+
struct_kfd_ioctl_update_queue_args._fields_ = [
|
267
|
+
('ring_base_address', ctypes.c_uint64),
|
268
|
+
('queue_id', ctypes.c_uint32),
|
269
|
+
('ring_size', ctypes.c_uint32),
|
270
|
+
('queue_percentage', ctypes.c_uint32),
|
271
|
+
('queue_priority', ctypes.c_uint32),
|
272
|
+
]
|
273
|
+
|
274
|
+
class struct_kfd_ioctl_set_cu_mask_args(Structure):
|
275
|
+
pass
|
276
|
+
|
277
|
+
struct_kfd_ioctl_set_cu_mask_args._pack_ = 1 # source:False
|
278
|
+
struct_kfd_ioctl_set_cu_mask_args._fields_ = [
|
279
|
+
('queue_id', ctypes.c_uint32),
|
280
|
+
('num_cu_mask', ctypes.c_uint32),
|
281
|
+
('cu_mask_ptr', ctypes.c_uint64),
|
282
|
+
]
|
283
|
+
|
284
|
+
class struct_kfd_ioctl_get_queue_wave_state_args(Structure):
|
285
|
+
pass
|
286
|
+
|
287
|
+
struct_kfd_ioctl_get_queue_wave_state_args._pack_ = 1 # source:False
|
288
|
+
struct_kfd_ioctl_get_queue_wave_state_args._fields_ = [
|
289
|
+
('ctl_stack_address', ctypes.c_uint64),
|
290
|
+
('ctl_stack_used_size', ctypes.c_uint32),
|
291
|
+
('save_area_used_size', ctypes.c_uint32),
|
292
|
+
('queue_id', ctypes.c_uint32),
|
293
|
+
('pad', ctypes.c_uint32),
|
294
|
+
]
|
295
|
+
|
296
|
+
class struct_kfd_ioctl_set_memory_policy_args(Structure):
|
297
|
+
pass
|
298
|
+
|
299
|
+
struct_kfd_ioctl_set_memory_policy_args._pack_ = 1 # source:False
|
300
|
+
struct_kfd_ioctl_set_memory_policy_args._fields_ = [
|
301
|
+
('alternate_aperture_base', ctypes.c_uint64),
|
302
|
+
('alternate_aperture_size', ctypes.c_uint64),
|
303
|
+
('gpu_id', ctypes.c_uint32),
|
304
|
+
('default_policy', ctypes.c_uint32),
|
305
|
+
('alternate_policy', ctypes.c_uint32),
|
306
|
+
('pad', ctypes.c_uint32),
|
307
|
+
]
|
308
|
+
|
309
|
+
class struct_kfd_ioctl_get_clock_counters_args(Structure):
|
310
|
+
pass
|
311
|
+
|
312
|
+
struct_kfd_ioctl_get_clock_counters_args._pack_ = 1 # source:False
|
313
|
+
struct_kfd_ioctl_get_clock_counters_args._fields_ = [
|
314
|
+
('gpu_clock_counter', ctypes.c_uint64),
|
315
|
+
('cpu_clock_counter', ctypes.c_uint64),
|
316
|
+
('system_clock_counter', ctypes.c_uint64),
|
317
|
+
('system_clock_freq', ctypes.c_uint64),
|
318
|
+
('gpu_id', ctypes.c_uint32),
|
319
|
+
('pad', ctypes.c_uint32),
|
320
|
+
]
|
321
|
+
|
322
|
+
class struct_kfd_process_device_apertures(Structure):
|
323
|
+
pass
|
324
|
+
|
325
|
+
struct_kfd_process_device_apertures._pack_ = 1 # source:False
|
326
|
+
struct_kfd_process_device_apertures._fields_ = [
|
327
|
+
('lds_base', ctypes.c_uint64),
|
328
|
+
('lds_limit', ctypes.c_uint64),
|
329
|
+
('scratch_base', ctypes.c_uint64),
|
330
|
+
('scratch_limit', ctypes.c_uint64),
|
331
|
+
('gpuvm_base', ctypes.c_uint64),
|
332
|
+
('gpuvm_limit', ctypes.c_uint64),
|
333
|
+
('gpu_id', ctypes.c_uint32),
|
334
|
+
('pad', ctypes.c_uint32),
|
335
|
+
]
|
336
|
+
|
337
|
+
class struct_kfd_ioctl_get_process_apertures_args(Structure):
|
338
|
+
pass
|
339
|
+
|
340
|
+
struct_kfd_ioctl_get_process_apertures_args._pack_ = 1 # source:False
|
341
|
+
struct_kfd_ioctl_get_process_apertures_args._fields_ = [
|
342
|
+
('process_apertures', struct_kfd_process_device_apertures * 7),
|
343
|
+
('num_of_nodes', ctypes.c_uint32),
|
344
|
+
('pad', ctypes.c_uint32),
|
345
|
+
]
|
346
|
+
|
347
|
+
class struct_kfd_ioctl_get_process_apertures_new_args(Structure):
|
348
|
+
pass
|
349
|
+
|
350
|
+
struct_kfd_ioctl_get_process_apertures_new_args._pack_ = 1 # source:False
|
351
|
+
struct_kfd_ioctl_get_process_apertures_new_args._fields_ = [
|
352
|
+
('kfd_process_device_apertures_ptr', ctypes.c_uint64),
|
353
|
+
('num_of_nodes', ctypes.c_uint32),
|
354
|
+
('pad', ctypes.c_uint32),
|
355
|
+
]
|
356
|
+
|
357
|
+
class struct_kfd_ioctl_dbg_register_args(Structure):
|
358
|
+
pass
|
359
|
+
|
360
|
+
struct_kfd_ioctl_dbg_register_args._pack_ = 1 # source:False
|
361
|
+
struct_kfd_ioctl_dbg_register_args._fields_ = [
|
362
|
+
('gpu_id', ctypes.c_uint32),
|
363
|
+
('pad', ctypes.c_uint32),
|
364
|
+
]
|
365
|
+
|
366
|
+
class struct_kfd_ioctl_dbg_unregister_args(Structure):
|
367
|
+
pass
|
368
|
+
|
369
|
+
struct_kfd_ioctl_dbg_unregister_args._pack_ = 1 # source:False
|
370
|
+
struct_kfd_ioctl_dbg_unregister_args._fields_ = [
|
371
|
+
('gpu_id', ctypes.c_uint32),
|
372
|
+
('pad', ctypes.c_uint32),
|
373
|
+
]
|
374
|
+
|
375
|
+
class struct_kfd_ioctl_dbg_address_watch_args(Structure):
|
376
|
+
pass
|
377
|
+
|
378
|
+
struct_kfd_ioctl_dbg_address_watch_args._pack_ = 1 # source:False
|
379
|
+
struct_kfd_ioctl_dbg_address_watch_args._fields_ = [
|
380
|
+
('content_ptr', ctypes.c_uint64),
|
381
|
+
('gpu_id', ctypes.c_uint32),
|
382
|
+
('buf_size_in_bytes', ctypes.c_uint32),
|
383
|
+
]
|
384
|
+
|
385
|
+
class struct_kfd_ioctl_dbg_wave_control_args(Structure):
|
386
|
+
pass
|
387
|
+
|
388
|
+
struct_kfd_ioctl_dbg_wave_control_args._pack_ = 1 # source:False
|
389
|
+
struct_kfd_ioctl_dbg_wave_control_args._fields_ = [
|
390
|
+
('content_ptr', ctypes.c_uint64),
|
391
|
+
('gpu_id', ctypes.c_uint32),
|
392
|
+
('buf_size_in_bytes', ctypes.c_uint32),
|
393
|
+
]
|
394
|
+
|
395
|
+
class struct_kfd_ioctl_create_event_args(Structure):
|
396
|
+
pass
|
397
|
+
|
398
|
+
struct_kfd_ioctl_create_event_args._pack_ = 1 # source:False
|
399
|
+
struct_kfd_ioctl_create_event_args._fields_ = [
|
400
|
+
('event_page_offset', ctypes.c_uint64),
|
401
|
+
('event_trigger_data', ctypes.c_uint32),
|
402
|
+
('event_type', ctypes.c_uint32),
|
403
|
+
('auto_reset', ctypes.c_uint32),
|
404
|
+
('node_id', ctypes.c_uint32),
|
405
|
+
('event_id', ctypes.c_uint32),
|
406
|
+
('event_slot_index', ctypes.c_uint32),
|
407
|
+
]
|
408
|
+
|
409
|
+
class struct_kfd_ioctl_destroy_event_args(Structure):
|
410
|
+
pass
|
411
|
+
|
412
|
+
struct_kfd_ioctl_destroy_event_args._pack_ = 1 # source:False
|
413
|
+
struct_kfd_ioctl_destroy_event_args._fields_ = [
|
414
|
+
('event_id', ctypes.c_uint32),
|
415
|
+
('pad', ctypes.c_uint32),
|
416
|
+
]
|
417
|
+
|
418
|
+
class struct_kfd_ioctl_set_event_args(Structure):
|
419
|
+
pass
|
420
|
+
|
421
|
+
struct_kfd_ioctl_set_event_args._pack_ = 1 # source:False
|
422
|
+
struct_kfd_ioctl_set_event_args._fields_ = [
|
423
|
+
('event_id', ctypes.c_uint32),
|
424
|
+
('pad', ctypes.c_uint32),
|
425
|
+
]
|
426
|
+
|
427
|
+
class struct_kfd_ioctl_reset_event_args(Structure):
|
428
|
+
pass
|
429
|
+
|
430
|
+
struct_kfd_ioctl_reset_event_args._pack_ = 1 # source:False
|
431
|
+
struct_kfd_ioctl_reset_event_args._fields_ = [
|
432
|
+
('event_id', ctypes.c_uint32),
|
433
|
+
('pad', ctypes.c_uint32),
|
434
|
+
]
|
435
|
+
|
436
|
+
class struct_kfd_memory_exception_failure(Structure):
|
437
|
+
pass
|
438
|
+
|
439
|
+
struct_kfd_memory_exception_failure._pack_ = 1 # source:False
|
440
|
+
struct_kfd_memory_exception_failure._fields_ = [
|
441
|
+
('NotPresent', ctypes.c_uint32),
|
442
|
+
('ReadOnly', ctypes.c_uint32),
|
443
|
+
('NoExecute', ctypes.c_uint32),
|
444
|
+
('imprecise', ctypes.c_uint32),
|
445
|
+
]
|
446
|
+
|
447
|
+
class struct_kfd_hsa_memory_exception_data(Structure):
|
448
|
+
pass
|
449
|
+
|
450
|
+
struct_kfd_hsa_memory_exception_data._pack_ = 1 # source:False
|
451
|
+
struct_kfd_hsa_memory_exception_data._fields_ = [
|
452
|
+
('failure', struct_kfd_memory_exception_failure),
|
453
|
+
('va', ctypes.c_uint64),
|
454
|
+
('gpu_id', ctypes.c_uint32),
|
455
|
+
('ErrorType', ctypes.c_uint32),
|
456
|
+
]
|
457
|
+
|
458
|
+
class struct_kfd_hsa_hw_exception_data(Structure):
|
459
|
+
pass
|
460
|
+
|
461
|
+
struct_kfd_hsa_hw_exception_data._pack_ = 1 # source:False
|
462
|
+
struct_kfd_hsa_hw_exception_data._fields_ = [
|
463
|
+
('reset_type', ctypes.c_uint32),
|
464
|
+
('reset_cause', ctypes.c_uint32),
|
465
|
+
('memory_lost', ctypes.c_uint32),
|
466
|
+
('gpu_id', ctypes.c_uint32),
|
467
|
+
]
|
468
|
+
|
469
|
+
class struct_kfd_event_data(Structure):
|
470
|
+
pass
|
471
|
+
|
472
|
+
class union_kfd_event_data_0(Union):
|
473
|
+
pass
|
474
|
+
|
475
|
+
union_kfd_event_data_0._pack_ = 1 # source:False
|
476
|
+
union_kfd_event_data_0._fields_ = [
|
477
|
+
('memory_exception_data', struct_kfd_hsa_memory_exception_data),
|
478
|
+
('hw_exception_data', struct_kfd_hsa_hw_exception_data),
|
479
|
+
('PADDING_0', ctypes.c_ubyte * 16),
|
480
|
+
]
|
481
|
+
|
482
|
+
struct_kfd_event_data._pack_ = 1 # source:False
|
483
|
+
struct_kfd_event_data._anonymous_ = ('_0',)
|
484
|
+
struct_kfd_event_data._fields_ = [
|
485
|
+
('_0', union_kfd_event_data_0),
|
486
|
+
('kfd_event_data_ext', ctypes.c_uint64),
|
487
|
+
('event_id', ctypes.c_uint32),
|
488
|
+
('pad', ctypes.c_uint32),
|
489
|
+
]
|
490
|
+
|
491
|
+
class struct_kfd_ioctl_wait_events_args(Structure):
|
492
|
+
pass
|
493
|
+
|
494
|
+
struct_kfd_ioctl_wait_events_args._pack_ = 1 # source:False
|
495
|
+
struct_kfd_ioctl_wait_events_args._fields_ = [
|
496
|
+
('events_ptr', ctypes.c_uint64),
|
497
|
+
('num_events', ctypes.c_uint32),
|
498
|
+
('wait_for_all', ctypes.c_uint32),
|
499
|
+
('timeout', ctypes.c_uint32),
|
500
|
+
('wait_result', ctypes.c_uint32),
|
501
|
+
]
|
502
|
+
|
503
|
+
class struct_kfd_ioctl_set_scratch_backing_va_args(Structure):
|
504
|
+
pass
|
505
|
+
|
506
|
+
struct_kfd_ioctl_set_scratch_backing_va_args._pack_ = 1 # source:False
|
507
|
+
struct_kfd_ioctl_set_scratch_backing_va_args._fields_ = [
|
508
|
+
('va_addr', ctypes.c_uint64),
|
509
|
+
('gpu_id', ctypes.c_uint32),
|
510
|
+
('pad', ctypes.c_uint32),
|
511
|
+
]
|
512
|
+
|
513
|
+
class struct_kfd_ioctl_get_tile_config_args(Structure):
|
514
|
+
pass
|
515
|
+
|
516
|
+
struct_kfd_ioctl_get_tile_config_args._pack_ = 1 # source:False
|
517
|
+
struct_kfd_ioctl_get_tile_config_args._fields_ = [
|
518
|
+
('tile_config_ptr', ctypes.c_uint64),
|
519
|
+
('macro_tile_config_ptr', ctypes.c_uint64),
|
520
|
+
('num_tile_configs', ctypes.c_uint32),
|
521
|
+
('num_macro_tile_configs', ctypes.c_uint32),
|
522
|
+
('gpu_id', ctypes.c_uint32),
|
523
|
+
('gb_addr_config', ctypes.c_uint32),
|
524
|
+
('num_banks', ctypes.c_uint32),
|
525
|
+
('num_ranks', ctypes.c_uint32),
|
526
|
+
]
|
527
|
+
|
528
|
+
class struct_kfd_ioctl_set_trap_handler_args(Structure):
|
529
|
+
pass
|
530
|
+
|
531
|
+
struct_kfd_ioctl_set_trap_handler_args._pack_ = 1 # source:False
|
532
|
+
struct_kfd_ioctl_set_trap_handler_args._fields_ = [
|
533
|
+
('tba_addr', ctypes.c_uint64),
|
534
|
+
('tma_addr', ctypes.c_uint64),
|
535
|
+
('gpu_id', ctypes.c_uint32),
|
536
|
+
('pad', ctypes.c_uint32),
|
537
|
+
]
|
538
|
+
|
539
|
+
class struct_kfd_ioctl_acquire_vm_args(Structure):
|
540
|
+
pass
|
541
|
+
|
542
|
+
struct_kfd_ioctl_acquire_vm_args._pack_ = 1 # source:False
|
543
|
+
struct_kfd_ioctl_acquire_vm_args._fields_ = [
|
544
|
+
('drm_fd', ctypes.c_uint32),
|
545
|
+
('gpu_id', ctypes.c_uint32),
|
546
|
+
]
|
547
|
+
|
548
|
+
class struct_kfd_ioctl_alloc_memory_of_gpu_args(Structure):
|
549
|
+
pass
|
550
|
+
|
551
|
+
struct_kfd_ioctl_alloc_memory_of_gpu_args._pack_ = 1 # source:False
|
552
|
+
struct_kfd_ioctl_alloc_memory_of_gpu_args._fields_ = [
|
553
|
+
('va_addr', ctypes.c_uint64),
|
554
|
+
('size', ctypes.c_uint64),
|
555
|
+
('handle', ctypes.c_uint64),
|
556
|
+
('mmap_offset', ctypes.c_uint64),
|
557
|
+
('gpu_id', ctypes.c_uint32),
|
558
|
+
('flags', ctypes.c_uint32),
|
559
|
+
]
|
560
|
+
|
561
|
+
class struct_kfd_ioctl_free_memory_of_gpu_args(Structure):
|
562
|
+
pass
|
563
|
+
|
564
|
+
struct_kfd_ioctl_free_memory_of_gpu_args._pack_ = 1 # source:False
|
565
|
+
struct_kfd_ioctl_free_memory_of_gpu_args._fields_ = [
|
566
|
+
('handle', ctypes.c_uint64),
|
567
|
+
]
|
568
|
+
|
569
|
+
class struct_kfd_ioctl_map_memory_to_gpu_args(Structure):
|
570
|
+
pass
|
571
|
+
|
572
|
+
struct_kfd_ioctl_map_memory_to_gpu_args._pack_ = 1 # source:False
|
573
|
+
struct_kfd_ioctl_map_memory_to_gpu_args._fields_ = [
|
574
|
+
('handle', ctypes.c_uint64),
|
575
|
+
('device_ids_array_ptr', ctypes.c_uint64),
|
576
|
+
('n_devices', ctypes.c_uint32),
|
577
|
+
('n_success', ctypes.c_uint32),
|
578
|
+
]
|
579
|
+
|
580
|
+
class struct_kfd_ioctl_unmap_memory_from_gpu_args(Structure):
|
581
|
+
pass
|
582
|
+
|
583
|
+
struct_kfd_ioctl_unmap_memory_from_gpu_args._pack_ = 1 # source:False
|
584
|
+
struct_kfd_ioctl_unmap_memory_from_gpu_args._fields_ = [
|
585
|
+
('handle', ctypes.c_uint64),
|
586
|
+
('device_ids_array_ptr', ctypes.c_uint64),
|
587
|
+
('n_devices', ctypes.c_uint32),
|
588
|
+
('n_success', ctypes.c_uint32),
|
589
|
+
]
|
590
|
+
|
591
|
+
class struct_kfd_ioctl_alloc_queue_gws_args(Structure):
|
592
|
+
pass
|
593
|
+
|
594
|
+
struct_kfd_ioctl_alloc_queue_gws_args._pack_ = 1 # source:False
|
595
|
+
struct_kfd_ioctl_alloc_queue_gws_args._fields_ = [
|
596
|
+
('queue_id', ctypes.c_uint32),
|
597
|
+
('num_gws', ctypes.c_uint32),
|
598
|
+
('first_gws', ctypes.c_uint32),
|
599
|
+
('pad', ctypes.c_uint32),
|
600
|
+
]
|
601
|
+
|
602
|
+
class struct_kfd_ioctl_get_dmabuf_info_args(Structure):
|
603
|
+
pass
|
604
|
+
|
605
|
+
struct_kfd_ioctl_get_dmabuf_info_args._pack_ = 1 # source:False
|
606
|
+
struct_kfd_ioctl_get_dmabuf_info_args._fields_ = [
|
607
|
+
('size', ctypes.c_uint64),
|
608
|
+
('metadata_ptr', ctypes.c_uint64),
|
609
|
+
('metadata_size', ctypes.c_uint32),
|
610
|
+
('gpu_id', ctypes.c_uint32),
|
611
|
+
('flags', ctypes.c_uint32),
|
612
|
+
('dmabuf_fd', ctypes.c_uint32),
|
613
|
+
]
|
614
|
+
|
615
|
+
class struct_kfd_ioctl_import_dmabuf_args(Structure):
|
616
|
+
pass
|
617
|
+
|
618
|
+
struct_kfd_ioctl_import_dmabuf_args._pack_ = 1 # source:False
|
619
|
+
struct_kfd_ioctl_import_dmabuf_args._fields_ = [
|
620
|
+
('va_addr', ctypes.c_uint64),
|
621
|
+
('handle', ctypes.c_uint64),
|
622
|
+
('gpu_id', ctypes.c_uint32),
|
623
|
+
('dmabuf_fd', ctypes.c_uint32),
|
624
|
+
]
|
625
|
+
|
626
|
+
|
627
|
+
# values for enumeration 'kfd_smi_event'
|
628
|
+
kfd_smi_event__enumvalues = {
|
629
|
+
0: 'KFD_SMI_EVENT_NONE',
|
630
|
+
1: 'KFD_SMI_EVENT_VMFAULT',
|
631
|
+
2: 'KFD_SMI_EVENT_THERMAL_THROTTLE',
|
632
|
+
3: 'KFD_SMI_EVENT_GPU_PRE_RESET',
|
633
|
+
4: 'KFD_SMI_EVENT_GPU_POST_RESET',
|
634
|
+
}
|
635
|
+
KFD_SMI_EVENT_NONE = 0
|
636
|
+
KFD_SMI_EVENT_VMFAULT = 1
|
637
|
+
KFD_SMI_EVENT_THERMAL_THROTTLE = 2
|
638
|
+
KFD_SMI_EVENT_GPU_PRE_RESET = 3
|
639
|
+
KFD_SMI_EVENT_GPU_POST_RESET = 4
|
640
|
+
kfd_smi_event = ctypes.c_uint32 # enum
|
641
|
+
class struct_kfd_ioctl_smi_events_args(Structure):
|
642
|
+
pass
|
643
|
+
|
644
|
+
struct_kfd_ioctl_smi_events_args._pack_ = 1 # source:False
|
645
|
+
struct_kfd_ioctl_smi_events_args._fields_ = [
|
646
|
+
('gpuid', ctypes.c_uint32),
|
647
|
+
('anon_fd', ctypes.c_uint32),
|
648
|
+
]
|
649
|
+
|
650
|
+
|
651
|
+
# values for enumeration 'kfd_mmio_remap'
|
652
|
+
kfd_mmio_remap__enumvalues = {
|
653
|
+
0: 'KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL',
|
654
|
+
4: 'KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL',
|
655
|
+
}
|
656
|
+
KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL = 0
|
657
|
+
KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL = 4
|
658
|
+
kfd_mmio_remap = ctypes.c_uint32 # enum
|
659
|
+
|
660
|
+
# values for enumeration 'kfd_ioctl_svm_op'
|
661
|
+
kfd_ioctl_svm_op__enumvalues = {
|
662
|
+
0: 'KFD_IOCTL_SVM_OP_SET_ATTR',
|
663
|
+
1: 'KFD_IOCTL_SVM_OP_GET_ATTR',
|
664
|
+
}
|
665
|
+
KFD_IOCTL_SVM_OP_SET_ATTR = 0
|
666
|
+
KFD_IOCTL_SVM_OP_GET_ATTR = 1
|
667
|
+
kfd_ioctl_svm_op = ctypes.c_uint32 # enum
|
668
|
+
|
669
|
+
# values for enumeration 'kfd_ioctl_svm_location'
|
670
|
+
kfd_ioctl_svm_location__enumvalues = {
|
671
|
+
0: 'KFD_IOCTL_SVM_LOCATION_SYSMEM',
|
672
|
+
4294967295: 'KFD_IOCTL_SVM_LOCATION_UNDEFINED',
|
673
|
+
}
|
674
|
+
KFD_IOCTL_SVM_LOCATION_SYSMEM = 0
|
675
|
+
KFD_IOCTL_SVM_LOCATION_UNDEFINED = 4294967295
|
676
|
+
kfd_ioctl_svm_location = ctypes.c_uint32 # enum
|
677
|
+
|
678
|
+
# values for enumeration 'kfd_ioctl_svm_attr_type'
|
679
|
+
kfd_ioctl_svm_attr_type__enumvalues = {
|
680
|
+
0: 'KFD_IOCTL_SVM_ATTR_PREFERRED_LOC',
|
681
|
+
1: 'KFD_IOCTL_SVM_ATTR_PREFETCH_LOC',
|
682
|
+
2: 'KFD_IOCTL_SVM_ATTR_ACCESS',
|
683
|
+
3: 'KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE',
|
684
|
+
4: 'KFD_IOCTL_SVM_ATTR_NO_ACCESS',
|
685
|
+
5: 'KFD_IOCTL_SVM_ATTR_SET_FLAGS',
|
686
|
+
6: 'KFD_IOCTL_SVM_ATTR_CLR_FLAGS',
|
687
|
+
7: 'KFD_IOCTL_SVM_ATTR_GRANULARITY',
|
688
|
+
}
|
689
|
+
KFD_IOCTL_SVM_ATTR_PREFERRED_LOC = 0
|
690
|
+
KFD_IOCTL_SVM_ATTR_PREFETCH_LOC = 1
|
691
|
+
KFD_IOCTL_SVM_ATTR_ACCESS = 2
|
692
|
+
KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE = 3
|
693
|
+
KFD_IOCTL_SVM_ATTR_NO_ACCESS = 4
|
694
|
+
KFD_IOCTL_SVM_ATTR_SET_FLAGS = 5
|
695
|
+
KFD_IOCTL_SVM_ATTR_CLR_FLAGS = 6
|
696
|
+
KFD_IOCTL_SVM_ATTR_GRANULARITY = 7
|
697
|
+
kfd_ioctl_svm_attr_type = ctypes.c_uint32 # enum
|
698
|
+
class struct_kfd_ioctl_svm_attribute(Structure):
|
699
|
+
pass
|
700
|
+
|
701
|
+
struct_kfd_ioctl_svm_attribute._pack_ = 1 # source:False
|
702
|
+
struct_kfd_ioctl_svm_attribute._fields_ = [
|
703
|
+
('type', ctypes.c_uint32),
|
704
|
+
('value', ctypes.c_uint32),
|
705
|
+
]
|
706
|
+
|
707
|
+
class struct_kfd_ioctl_svm_args(Structure):
|
708
|
+
pass
|
709
|
+
|
710
|
+
struct_kfd_ioctl_svm_args._pack_ = 1 # source:False
|
711
|
+
struct_kfd_ioctl_svm_args._fields_ = [
|
712
|
+
('start_addr', ctypes.c_uint64),
|
713
|
+
('size', ctypes.c_uint64),
|
714
|
+
('op', ctypes.c_uint32),
|
715
|
+
('nattr', ctypes.c_uint32),
|
716
|
+
('attrs', struct_kfd_ioctl_svm_attribute * 0),
|
717
|
+
]
|
718
|
+
|
719
|
+
class struct_kfd_ioctl_set_xnack_mode_args(Structure):
|
720
|
+
pass
|
721
|
+
|
722
|
+
struct_kfd_ioctl_set_xnack_mode_args._pack_ = 1 # source:False
|
723
|
+
struct_kfd_ioctl_set_xnack_mode_args._fields_ = [
|
724
|
+
('xnack_enabled', ctypes.c_int32),
|
725
|
+
]
|
726
|
+
|
727
|
+
__all__ = \
|
728
|
+
['AMDKFD_COMMAND_END', 'AMDKFD_COMMAND_START',
|
729
|
+
'AMDKFD_IOCTL_BASE', 'KFD_HW_EXCEPTION_ECC',
|
730
|
+
'KFD_HW_EXCEPTION_GPU_HANG', 'KFD_HW_EXCEPTION_PER_ENGINE_RESET',
|
731
|
+
'KFD_HW_EXCEPTION_WHOLE_GPU_RESET', 'KFD_IOCTL_H_INCLUDED',
|
732
|
+
'KFD_IOCTL_MAJOR_VERSION', 'KFD_IOCTL_MINOR_VERSION',
|
733
|
+
'KFD_IOCTL_SVM_ATTR_ACCESS', 'KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE',
|
734
|
+
'KFD_IOCTL_SVM_ATTR_CLR_FLAGS', 'KFD_IOCTL_SVM_ATTR_GRANULARITY',
|
735
|
+
'KFD_IOCTL_SVM_ATTR_NO_ACCESS',
|
736
|
+
'KFD_IOCTL_SVM_ATTR_PREFERRED_LOC',
|
737
|
+
'KFD_IOCTL_SVM_ATTR_PREFETCH_LOC', 'KFD_IOCTL_SVM_ATTR_SET_FLAGS',
|
738
|
+
'KFD_IOCTL_SVM_FLAG_COHERENT', 'KFD_IOCTL_SVM_FLAG_GPU_EXEC',
|
739
|
+
'KFD_IOCTL_SVM_FLAG_GPU_READ_MOSTLY', 'KFD_IOCTL_SVM_FLAG_GPU_RO',
|
740
|
+
'KFD_IOCTL_SVM_FLAG_HIVE_LOCAL', 'KFD_IOCTL_SVM_FLAG_HOST_ACCESS',
|
741
|
+
'KFD_IOCTL_SVM_LOCATION_SYSMEM',
|
742
|
+
'KFD_IOCTL_SVM_LOCATION_UNDEFINED', 'KFD_IOCTL_SVM_OP_GET_ATTR',
|
743
|
+
'KFD_IOCTL_SVM_OP_SET_ATTR',
|
744
|
+
'KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM',
|
745
|
+
'KFD_IOC_ALLOC_MEM_FLAGS_COHERENT',
|
746
|
+
'KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL',
|
747
|
+
'KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE',
|
748
|
+
'KFD_IOC_ALLOC_MEM_FLAGS_GTT',
|
749
|
+
'KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP',
|
750
|
+
'KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE',
|
751
|
+
'KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC',
|
752
|
+
'KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED',
|
753
|
+
'KFD_IOC_ALLOC_MEM_FLAGS_USERPTR', 'KFD_IOC_ALLOC_MEM_FLAGS_VRAM',
|
754
|
+
'KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE',
|
755
|
+
'KFD_IOC_CACHE_POLICY_COHERENT',
|
756
|
+
'KFD_IOC_CACHE_POLICY_NONCOHERENT', 'KFD_IOC_EVENT_DEBUG_EVENT',
|
757
|
+
'KFD_IOC_EVENT_DEVICESTATECHANGE', 'KFD_IOC_EVENT_HW_EXCEPTION',
|
758
|
+
'KFD_IOC_EVENT_MEMORY', 'KFD_IOC_EVENT_NODECHANGE',
|
759
|
+
'KFD_IOC_EVENT_PROFILE_EVENT', 'KFD_IOC_EVENT_QUEUE_EVENT',
|
760
|
+
'KFD_IOC_EVENT_SIGNAL', 'KFD_IOC_EVENT_SYSTEM_EVENT',
|
761
|
+
'KFD_IOC_QUEUE_TYPE_COMPUTE', 'KFD_IOC_QUEUE_TYPE_COMPUTE_AQL',
|
762
|
+
'KFD_IOC_QUEUE_TYPE_SDMA', 'KFD_IOC_QUEUE_TYPE_SDMA_XGMI',
|
763
|
+
'KFD_IOC_WAIT_RESULT_COMPLETE', 'KFD_IOC_WAIT_RESULT_FAIL',
|
764
|
+
'KFD_IOC_WAIT_RESULT_TIMEOUT', 'KFD_MAX_QUEUE_PERCENTAGE',
|
765
|
+
'KFD_MAX_QUEUE_PRIORITY', 'KFD_MEM_ERR_GPU_HANG',
|
766
|
+
'KFD_MEM_ERR_NO_RAS', 'KFD_MEM_ERR_POISON_CONSUMED',
|
767
|
+
'KFD_MEM_ERR_SRAM_ECC', 'KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL',
|
768
|
+
'KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL', 'KFD_SIGNAL_EVENT_LIMIT',
|
769
|
+
'KFD_SMI_EVENT_GPU_POST_RESET', 'KFD_SMI_EVENT_GPU_PRE_RESET',
|
770
|
+
'KFD_SMI_EVENT_NONE', 'KFD_SMI_EVENT_THERMAL_THROTTLE',
|
771
|
+
'KFD_SMI_EVENT_VMFAULT', 'MAX_ALLOWED_AW_BUFF_SIZE',
|
772
|
+
'MAX_ALLOWED_NUM_POINTS', 'MAX_ALLOWED_WAC_BUFF_SIZE',
|
773
|
+
'NUM_OF_SUPPORTED_GPUS', 'kfd_ioctl_svm_attr_type',
|
774
|
+
'kfd_ioctl_svm_location', 'kfd_ioctl_svm_op', 'kfd_mmio_remap',
|
775
|
+
'kfd_smi_event', 'struct_kfd_event_data',
|
776
|
+
'struct_kfd_hsa_hw_exception_data',
|
777
|
+
'struct_kfd_hsa_memory_exception_data',
|
778
|
+
'struct_kfd_ioctl_acquire_vm_args',
|
779
|
+
'struct_kfd_ioctl_alloc_memory_of_gpu_args',
|
780
|
+
'struct_kfd_ioctl_alloc_queue_gws_args',
|
781
|
+
'struct_kfd_ioctl_create_event_args',
|
782
|
+
'struct_kfd_ioctl_create_queue_args',
|
783
|
+
'struct_kfd_ioctl_dbg_address_watch_args',
|
784
|
+
'struct_kfd_ioctl_dbg_register_args',
|
785
|
+
'struct_kfd_ioctl_dbg_unregister_args',
|
786
|
+
'struct_kfd_ioctl_dbg_wave_control_args',
|
787
|
+
'struct_kfd_ioctl_destroy_event_args',
|
788
|
+
'struct_kfd_ioctl_destroy_queue_args',
|
789
|
+
'struct_kfd_ioctl_free_memory_of_gpu_args',
|
790
|
+
'struct_kfd_ioctl_get_clock_counters_args',
|
791
|
+
'struct_kfd_ioctl_get_dmabuf_info_args',
|
792
|
+
'struct_kfd_ioctl_get_process_apertures_args',
|
793
|
+
'struct_kfd_ioctl_get_process_apertures_new_args',
|
794
|
+
'struct_kfd_ioctl_get_queue_wave_state_args',
|
795
|
+
'struct_kfd_ioctl_get_tile_config_args',
|
796
|
+
'struct_kfd_ioctl_get_version_args',
|
797
|
+
'struct_kfd_ioctl_import_dmabuf_args',
|
798
|
+
'struct_kfd_ioctl_map_memory_to_gpu_args',
|
799
|
+
'struct_kfd_ioctl_reset_event_args',
|
800
|
+
'struct_kfd_ioctl_set_cu_mask_args',
|
801
|
+
'struct_kfd_ioctl_set_event_args',
|
802
|
+
'struct_kfd_ioctl_set_memory_policy_args',
|
803
|
+
'struct_kfd_ioctl_set_scratch_backing_va_args',
|
804
|
+
'struct_kfd_ioctl_set_trap_handler_args',
|
805
|
+
'struct_kfd_ioctl_set_xnack_mode_args',
|
806
|
+
'struct_kfd_ioctl_smi_events_args', 'struct_kfd_ioctl_svm_args',
|
807
|
+
'struct_kfd_ioctl_svm_attribute',
|
808
|
+
'struct_kfd_ioctl_unmap_memory_from_gpu_args',
|
809
|
+
'struct_kfd_ioctl_update_queue_args',
|
810
|
+
'struct_kfd_ioctl_wait_events_args',
|
811
|
+
'struct_kfd_memory_exception_failure',
|
812
|
+
'struct_kfd_process_device_apertures', 'union_kfd_event_data_0']
|