numba-cuda 0.17.0__py3-none-any.whl → 0.18.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of numba-cuda might be problematic. Click here for more details.

Files changed (64) hide show
  1. numba_cuda/VERSION +1 -1
  2. numba_cuda/numba/cuda/__init__.py +0 -8
  3. numba_cuda/numba/cuda/_internal/cuda_fp16.py +14225 -0
  4. numba_cuda/numba/cuda/api_util.py +6 -0
  5. numba_cuda/numba/cuda/cgutils.py +1291 -0
  6. numba_cuda/numba/cuda/codegen.py +32 -14
  7. numba_cuda/numba/cuda/compiler.py +113 -10
  8. numba_cuda/numba/cuda/core/caching.py +741 -0
  9. numba_cuda/numba/cuda/core/callconv.py +338 -0
  10. numba_cuda/numba/cuda/core/codegen.py +168 -0
  11. numba_cuda/numba/cuda/core/compiler.py +205 -0
  12. numba_cuda/numba/cuda/core/typed_passes.py +139 -0
  13. numba_cuda/numba/cuda/cudadecl.py +0 -268
  14. numba_cuda/numba/cuda/cudadrv/devicearray.py +3 -0
  15. numba_cuda/numba/cuda/cudadrv/driver.py +2 -1
  16. numba_cuda/numba/cuda/cudadrv/nvvm.py +1 -1
  17. numba_cuda/numba/cuda/cudaimpl.py +4 -178
  18. numba_cuda/numba/cuda/debuginfo.py +469 -3
  19. numba_cuda/numba/cuda/device_init.py +0 -1
  20. numba_cuda/numba/cuda/dispatcher.py +310 -11
  21. numba_cuda/numba/cuda/extending.py +2 -1
  22. numba_cuda/numba/cuda/fp16.py +348 -0
  23. numba_cuda/numba/cuda/intrinsics.py +1 -1
  24. numba_cuda/numba/cuda/libdeviceimpl.py +2 -1
  25. numba_cuda/numba/cuda/lowering.py +1833 -8
  26. numba_cuda/numba/cuda/mathimpl.py +2 -90
  27. numba_cuda/numba/cuda/nvvmutils.py +2 -1
  28. numba_cuda/numba/cuda/printimpl.py +2 -1
  29. numba_cuda/numba/cuda/serialize.py +264 -0
  30. numba_cuda/numba/cuda/simulator/__init__.py +2 -0
  31. numba_cuda/numba/cuda/simulator/dispatcher.py +7 -0
  32. numba_cuda/numba/cuda/stubs.py +0 -308
  33. numba_cuda/numba/cuda/target.py +13 -5
  34. numba_cuda/numba/cuda/testing.py +156 -5
  35. numba_cuda/numba/cuda/tests/complex_usecases.py +113 -0
  36. numba_cuda/numba/cuda/tests/core/serialize_usecases.py +110 -0
  37. numba_cuda/numba/cuda/tests/core/test_serialize.py +359 -0
  38. numba_cuda/numba/cuda/tests/cudadrv/test_context_stack.py +10 -4
  39. numba_cuda/numba/cuda/tests/cudadrv/test_cuda_ndarray.py +33 -0
  40. numba_cuda/numba/cuda/tests/cudadrv/test_runtime.py +2 -2
  41. numba_cuda/numba/cuda/tests/cudadrv/test_streams.py +1 -0
  42. numba_cuda/numba/cuda/tests/cudapy/extensions_usecases.py +1 -1
  43. numba_cuda/numba/cuda/tests/cudapy/test_caching.py +5 -10
  44. numba_cuda/numba/cuda/tests/cudapy/test_compiler.py +15 -0
  45. numba_cuda/numba/cuda/tests/cudapy/test_complex.py +1 -1
  46. numba_cuda/numba/cuda/tests/cudapy/test_debuginfo.py +381 -0
  47. numba_cuda/numba/cuda/tests/cudapy/test_enums.py +1 -1
  48. numba_cuda/numba/cuda/tests/cudapy/test_extending.py +1 -1
  49. numba_cuda/numba/cuda/tests/cudapy/test_inspect.py +108 -24
  50. numba_cuda/numba/cuda/tests/cudapy/test_intrinsics.py +37 -23
  51. numba_cuda/numba/cuda/tests/cudapy/test_operator.py +43 -27
  52. numba_cuda/numba/cuda/tests/cudapy/test_ufuncs.py +26 -9
  53. numba_cuda/numba/cuda/tests/cudapy/test_warning.py +27 -2
  54. numba_cuda/numba/cuda/tests/enum_usecases.py +56 -0
  55. numba_cuda/numba/cuda/tests/nocuda/test_library_lookup.py +1 -2
  56. numba_cuda/numba/cuda/tests/nocuda/test_nvvm.py +1 -1
  57. numba_cuda/numba/cuda/utils.py +785 -0
  58. numba_cuda/numba/cuda/vector_types.py +1 -1
  59. {numba_cuda-0.17.0.dist-info → numba_cuda-0.18.1.dist-info}/METADATA +18 -4
  60. {numba_cuda-0.17.0.dist-info → numba_cuda-0.18.1.dist-info}/RECORD +63 -50
  61. numba_cuda/numba/cuda/cpp_function_wrappers.cu +0 -46
  62. {numba_cuda-0.17.0.dist-info → numba_cuda-0.18.1.dist-info}/WHEEL +0 -0
  63. {numba_cuda-0.17.0.dist-info → numba_cuda-0.18.1.dist-info}/licenses/LICENSE +0 -0
  64. {numba_cuda-0.17.0.dist-info → numba_cuda-0.18.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1291 @@
1
+ """
2
+ Generic helpers for LLVM code generation.
3
+ """
4
+
5
+ import collections
6
+ from contextlib import contextmanager, ExitStack
7
+ import functools
8
+
9
+ from llvmlite import ir
10
+
11
+ from numba.core import types, debuginfo
12
+ from numba.cuda import config, utils
13
+ import numba.core.datamodel
14
+
15
+
16
+ bool_t = ir.IntType(1)
17
+ int8_t = ir.IntType(8)
18
+ int32_t = ir.IntType(32)
19
+ intp_t = ir.IntType(utils.MACHINE_BITS)
20
+ voidptr_t = int8_t.as_pointer()
21
+
22
+ true_bit = bool_t(1)
23
+ false_bit = bool_t(0)
24
+ true_byte = int8_t(1)
25
+ false_byte = int8_t(0)
26
+
27
+
28
+ def as_bool_bit(builder, value):
29
+ return builder.icmp_unsigned("!=", value, value.type(0))
30
+
31
+
32
+ def make_anonymous_struct(builder, values, struct_type=None):
33
+ """
34
+ Create an anonymous struct containing the given LLVM *values*.
35
+ """
36
+ if struct_type is None:
37
+ struct_type = ir.LiteralStructType([v.type for v in values])
38
+ struct_val = struct_type(ir.Undefined)
39
+ for i, v in enumerate(values):
40
+ struct_val = builder.insert_value(struct_val, v, i)
41
+ return struct_val
42
+
43
+
44
+ def make_bytearray(buf):
45
+ """
46
+ Make a byte array constant from *buf*.
47
+ """
48
+ b = bytearray(buf)
49
+ n = len(b)
50
+ return ir.Constant(ir.ArrayType(ir.IntType(8), n), b)
51
+
52
+
53
+ _struct_proxy_cache = {}
54
+
55
+
56
+ def create_struct_proxy(fe_type, kind="value"):
57
+ """
58
+ Returns a specialized StructProxy subclass for the given fe_type.
59
+ """
60
+ cache_key = (fe_type, kind)
61
+ res = _struct_proxy_cache.get(cache_key)
62
+ if res is None:
63
+ base = {
64
+ "value": ValueStructProxy,
65
+ "data": DataStructProxy,
66
+ }[kind]
67
+ clsname = base.__name__ + "_" + str(fe_type)
68
+ bases = (base,)
69
+ clsmembers = dict(_fe_type=fe_type)
70
+ res = type(clsname, bases, clsmembers)
71
+
72
+ _struct_proxy_cache[cache_key] = res
73
+ return res
74
+
75
+
76
+ def copy_struct(dst, src, repl={}):
77
+ """
78
+ Copy structure from *src* to *dst* with replacement from *repl*.
79
+ """
80
+ repl = repl.copy()
81
+ # copy data from src or use those in repl
82
+ for k in src._datamodel._fields:
83
+ v = repl.pop(k, getattr(src, k))
84
+ setattr(dst, k, v)
85
+ # use remaining key-values in repl
86
+ for k, v in repl.items():
87
+ setattr(dst, k, v)
88
+ return dst
89
+
90
+
91
+ class _StructProxy(object):
92
+ """
93
+ Creates a `Structure` like interface that is constructed with information
94
+ from DataModel instance. FE type must have a data model that is a
95
+ subclass of StructModel.
96
+ """
97
+
98
+ # The following class members must be overridden by subclass
99
+ _fe_type = None
100
+
101
+ def __init__(self, context, builder, value=None, ref=None):
102
+ self._context = context
103
+ self._datamodel = self._context.data_model_manager[self._fe_type]
104
+ if not isinstance(self._datamodel, numba.core.datamodel.StructModel):
105
+ raise TypeError(
106
+ "Not a structure model: {0}".format(self._datamodel)
107
+ )
108
+ self._builder = builder
109
+
110
+ self._be_type = self._get_be_type(self._datamodel)
111
+ assert not is_pointer(self._be_type)
112
+
113
+ outer_ref, ref = self._make_refs(ref)
114
+ if ref.type.pointee != self._be_type:
115
+ raise AssertionError(
116
+ "bad ref type: expected %s, got %s"
117
+ % (self._be_type.as_pointer(), ref.type)
118
+ )
119
+
120
+ if value is not None:
121
+ if value.type != outer_ref.type.pointee:
122
+ raise AssertionError(
123
+ "bad value type: expected %s, got %s"
124
+ % (outer_ref.type.pointee, value.type)
125
+ )
126
+ self._builder.store(value, outer_ref)
127
+
128
+ self._value = ref
129
+ self._outer_ref = outer_ref
130
+
131
+ def _make_refs(self, ref):
132
+ """
133
+ Return an (outer ref, value ref) pair. By default, these are
134
+ the same pointers, but a derived class may override this.
135
+ """
136
+ if ref is None:
137
+ ref = alloca_once(self._builder, self._be_type, zfill=True)
138
+ return ref, ref
139
+
140
+ def _get_be_type(self, datamodel):
141
+ raise NotImplementedError
142
+
143
+ def _cast_member_to_value(self, index, val):
144
+ raise NotImplementedError
145
+
146
+ def _cast_member_from_value(self, index, val):
147
+ raise NotImplementedError
148
+
149
+ def _get_ptr_by_index(self, index):
150
+ return gep_inbounds(self._builder, self._value, 0, index)
151
+
152
+ def _get_ptr_by_name(self, attrname):
153
+ index = self._datamodel.get_field_position(attrname)
154
+ return self._get_ptr_by_index(index)
155
+
156
+ def __getattr__(self, field):
157
+ """
158
+ Load the LLVM value of the named *field*.
159
+ """
160
+ if not field.startswith("_"):
161
+ return self[self._datamodel.get_field_position(field)]
162
+ else:
163
+ raise AttributeError(field)
164
+
165
+ def __setattr__(self, field, value):
166
+ """
167
+ Store the LLVM *value* into the named *field*.
168
+ """
169
+ if field.startswith("_"):
170
+ return super(_StructProxy, self).__setattr__(field, value)
171
+ self[self._datamodel.get_field_position(field)] = value
172
+
173
+ def __getitem__(self, index):
174
+ """
175
+ Load the LLVM value of the field at *index*.
176
+ """
177
+ member_val = self._builder.load(self._get_ptr_by_index(index))
178
+ return self._cast_member_to_value(index, member_val)
179
+
180
+ def __setitem__(self, index, value):
181
+ """
182
+ Store the LLVM *value* into the field at *index*.
183
+ """
184
+ ptr = self._get_ptr_by_index(index)
185
+ value = self._cast_member_from_value(index, value)
186
+ if value.type != ptr.type.pointee:
187
+ if (
188
+ is_pointer(value.type)
189
+ and is_pointer(ptr.type.pointee)
190
+ and value.type.pointee == ptr.type.pointee.pointee
191
+ ):
192
+ # Differ by address-space only
193
+ # Auto coerce it
194
+ value = self._context.addrspacecast(
195
+ self._builder, value, ptr.type.pointee.addrspace
196
+ )
197
+ else:
198
+ raise TypeError(
199
+ "Invalid store of {value.type} to "
200
+ "{ptr.type.pointee} in "
201
+ "{self._datamodel} "
202
+ "(trying to write member #{index})".format(
203
+ value=value, ptr=ptr, self=self, index=index
204
+ )
205
+ )
206
+ self._builder.store(value, ptr)
207
+
208
+ def __len__(self):
209
+ """
210
+ Return the number of fields.
211
+ """
212
+ return self._datamodel.field_count
213
+
214
+ def _getpointer(self):
215
+ """
216
+ Return the LLVM pointer to the underlying structure.
217
+ """
218
+ return self._outer_ref
219
+
220
+ def _getvalue(self):
221
+ """
222
+ Load and return the value of the underlying LLVM structure.
223
+ """
224
+ return self._builder.load(self._outer_ref)
225
+
226
+ def _setvalue(self, value):
227
+ """
228
+ Store the value in this structure.
229
+ """
230
+ assert not is_pointer(value.type)
231
+ assert value.type == self._be_type, (value.type, self._be_type)
232
+ self._builder.store(value, self._value)
233
+
234
+
235
+ class ValueStructProxy(_StructProxy):
236
+ """
237
+ Create a StructProxy suitable for accessing regular values
238
+ (e.g. LLVM values or alloca slots).
239
+ """
240
+
241
+ def _get_be_type(self, datamodel):
242
+ return datamodel.get_value_type()
243
+
244
+ def _cast_member_to_value(self, index, val):
245
+ return val
246
+
247
+ def _cast_member_from_value(self, index, val):
248
+ return val
249
+
250
+
251
+ class DataStructProxy(_StructProxy):
252
+ """
253
+ Create a StructProxy suitable for accessing data persisted in memory.
254
+ """
255
+
256
+ def _get_be_type(self, datamodel):
257
+ return datamodel.get_data_type()
258
+
259
+ def _cast_member_to_value(self, index, val):
260
+ model = self._datamodel.get_model(index)
261
+ return model.from_data(self._builder, val)
262
+
263
+ def _cast_member_from_value(self, index, val):
264
+ model = self._datamodel.get_model(index)
265
+ return model.as_data(self._builder, val)
266
+
267
+
268
+ class Structure(object):
269
+ """
270
+ A high-level object wrapping a alloca'ed LLVM structure, including
271
+ named fields and attribute access.
272
+ """
273
+
274
+ # XXX Should this warrant several separate constructors?
275
+ def __init__(self, context, builder, value=None, ref=None, cast_ref=False):
276
+ self._type = context.get_struct_type(self)
277
+ self._context = context
278
+ self._builder = builder
279
+ if ref is None:
280
+ self._value = alloca_once(builder, self._type, zfill=True)
281
+ if value is not None:
282
+ assert not is_pointer(value.type)
283
+ assert value.type == self._type, (value.type, self._type)
284
+ builder.store(value, self._value)
285
+ else:
286
+ assert value is None
287
+ assert is_pointer(ref.type)
288
+ if self._type != ref.type.pointee:
289
+ if cast_ref:
290
+ ref = builder.bitcast(ref, self._type.as_pointer())
291
+ else:
292
+ raise TypeError(
293
+ "mismatching pointer type: got %s, expected %s"
294
+ % (ref.type.pointee, self._type)
295
+ )
296
+ self._value = ref
297
+
298
+ self._namemap = {}
299
+ self._fdmap = []
300
+ self._typemap = []
301
+ base = int32_t(0)
302
+ for i, (k, tp) in enumerate(self._fields):
303
+ self._namemap[k] = i
304
+ self._fdmap.append((base, int32_t(i)))
305
+ self._typemap.append(tp)
306
+
307
+ def _get_ptr_by_index(self, index):
308
+ ptr = self._builder.gep(self._value, self._fdmap[index], inbounds=True)
309
+ return ptr
310
+
311
+ def _get_ptr_by_name(self, attrname):
312
+ return self._get_ptr_by_index(self._namemap[attrname])
313
+
314
+ def __getattr__(self, field):
315
+ """
316
+ Load the LLVM value of the named *field*.
317
+ """
318
+ if not field.startswith("_"):
319
+ return self[self._namemap[field]]
320
+ else:
321
+ raise AttributeError(field)
322
+
323
+ def __setattr__(self, field, value):
324
+ """
325
+ Store the LLVM *value* into the named *field*.
326
+ """
327
+ if field.startswith("_"):
328
+ return super(Structure, self).__setattr__(field, value)
329
+ self[self._namemap[field]] = value
330
+
331
+ def __getitem__(self, index):
332
+ """
333
+ Load the LLVM value of the field at *index*.
334
+ """
335
+
336
+ return self._builder.load(self._get_ptr_by_index(index))
337
+
338
+ def __setitem__(self, index, value):
339
+ """
340
+ Store the LLVM *value* into the field at *index*.
341
+ """
342
+ ptr = self._get_ptr_by_index(index)
343
+ if ptr.type.pointee != value.type:
344
+ fmt = "Type mismatch: __setitem__(%d, ...) expected %r but got %r"
345
+ raise AssertionError(
346
+ fmt % (index, str(ptr.type.pointee), str(value.type))
347
+ )
348
+ self._builder.store(value, ptr)
349
+
350
+ def __len__(self):
351
+ """
352
+ Return the number of fields.
353
+ """
354
+ return len(self._namemap)
355
+
356
+ def _getpointer(self):
357
+ """
358
+ Return the LLVM pointer to the underlying structure.
359
+ """
360
+ return self._value
361
+
362
+ def _getvalue(self):
363
+ """
364
+ Load and return the value of the underlying LLVM structure.
365
+ """
366
+ return self._builder.load(self._value)
367
+
368
+ def _setvalue(self, value):
369
+ """Store the value in this structure"""
370
+ assert not is_pointer(value.type)
371
+ assert value.type == self._type, (value.type, self._type)
372
+ self._builder.store(value, self._value)
373
+
374
+ # __iter__ is derived by Python from __len__ and __getitem__
375
+
376
+
377
+ def alloca_once(builder, ty, size=None, name="", zfill=False):
378
+ """Allocate stack memory at the entry block of the current function
379
+ pointed by ``builder`` with llvm type ``ty``. The optional ``size`` arg
380
+ set the number of element to allocate. The default is 1. The optional
381
+ ``name`` arg set the symbol name inside the llvm IR for debugging.
382
+ If ``zfill`` is set, fill the memory with zeros at the current
383
+ use-site location. Note that the memory is always zero-filled after the
384
+ ``alloca`` at init-site (the entry block).
385
+ """
386
+ if isinstance(size, int):
387
+ size = ir.Constant(intp_t, size)
388
+ # suspend debug metadata emission else it links up python source lines with
389
+ # alloca in the entry block as well as their actual location and it makes
390
+ # the debug info "jump about".
391
+ with debuginfo.suspend_emission(builder):
392
+ with builder.goto_entry_block():
393
+ ptr = builder.alloca(ty, size=size, name=name)
394
+ # Always zero-fill at init-site. This is safe.
395
+ builder.store(ty(None), ptr)
396
+ # Also zero-fill at the use-site
397
+ if zfill:
398
+ builder.store(ptr.type.pointee(None), ptr)
399
+ return ptr
400
+
401
+
402
+ def sizeof(builder, ptr_type):
403
+ """Compute sizeof using GEP"""
404
+ null = ptr_type(None)
405
+ offset = null.gep([int32_t(1)])
406
+ return builder.ptrtoint(offset, intp_t)
407
+
408
+
409
+ def alloca_once_value(builder, value, name="", zfill=False):
410
+ """
411
+ Like alloca_once(), but passing a *value* instead of a type. The
412
+ type is inferred and the allocated slot is also initialized with the
413
+ given value.
414
+ """
415
+ storage = alloca_once(builder, value.type, zfill=zfill)
416
+ builder.store(value, storage)
417
+ return storage
418
+
419
+
420
+ def insert_pure_function(module, fnty, name):
421
+ """
422
+ Insert a pure function (in the functional programming sense) in the
423
+ given module.
424
+ """
425
+ fn = get_or_insert_function(module, fnty, name)
426
+ fn.attributes.add("readonly")
427
+ fn.attributes.add("nounwind")
428
+ return fn
429
+
430
+
431
+ def get_or_insert_function(module, fnty, name):
432
+ """
433
+ Get the function named *name* with type *fnty* from *module*, or insert it
434
+ if it doesn't exist.
435
+ """
436
+ fn = module.globals.get(name, None)
437
+ if fn is None:
438
+ fn = ir.Function(module, fnty, name)
439
+ return fn
440
+
441
+
442
+ def get_or_insert_named_metadata(module, name):
443
+ try:
444
+ return module.get_named_metadata(name)
445
+ except KeyError:
446
+ return module.add_named_metadata(name)
447
+
448
+
449
+ def add_global_variable(module, ty, name, addrspace=0):
450
+ unique_name = module.get_unique_name(name)
451
+ return ir.GlobalVariable(module, ty, unique_name, addrspace)
452
+
453
+
454
+ def terminate(builder, bbend):
455
+ bb = builder.basic_block
456
+ if bb.terminator is None:
457
+ builder.branch(bbend)
458
+
459
+
460
+ def get_null_value(ltype):
461
+ return ltype(None)
462
+
463
+
464
+ def is_null(builder, val):
465
+ null = get_null_value(val.type)
466
+ return builder.icmp_unsigned("==", null, val)
467
+
468
+
469
+ def is_not_null(builder, val):
470
+ null = get_null_value(val.type)
471
+ return builder.icmp_unsigned("!=", null, val)
472
+
473
+
474
+ def if_unlikely(builder, pred):
475
+ return builder.if_then(pred, likely=False)
476
+
477
+
478
+ def if_likely(builder, pred):
479
+ return builder.if_then(pred, likely=True)
480
+
481
+
482
+ def ifnot(builder, pred):
483
+ return builder.if_then(builder.not_(pred))
484
+
485
+
486
+ def increment_index(builder, val):
487
+ """
488
+ Increment an index *val*.
489
+ """
490
+ one = val.type(1)
491
+ # We pass the "nsw" flag in the hope that LLVM understands the index
492
+ # never changes sign. Unfortunately this doesn't always work
493
+ # (e.g. ndindex()).
494
+ return builder.add(val, one, flags=["nsw"])
495
+
496
+
497
+ Loop = collections.namedtuple("Loop", ("index", "do_break"))
498
+
499
+
500
+ @contextmanager
501
+ def for_range(builder, count, start=None, intp=None):
502
+ """
503
+ Generate LLVM IR for a for-loop in [start, count).
504
+ *start* is equal to 0 by default.
505
+
506
+ Yields a Loop namedtuple with the following members:
507
+ - `index` is the loop index's value
508
+ - `do_break` is a no-argument callable to break out of the loop
509
+ """
510
+ if intp is None:
511
+ intp = count.type
512
+ if start is None:
513
+ start = intp(0)
514
+ stop = count
515
+
516
+ bbcond = builder.append_basic_block("for.cond")
517
+ bbbody = builder.append_basic_block("for.body")
518
+ bbend = builder.append_basic_block("for.end")
519
+
520
+ def do_break():
521
+ builder.branch(bbend)
522
+
523
+ bbstart = builder.basic_block
524
+ builder.branch(bbcond)
525
+
526
+ with builder.goto_block(bbcond):
527
+ index = builder.phi(intp, name="loop.index")
528
+ pred = builder.icmp_signed("<", index, stop)
529
+ builder.cbranch(pred, bbbody, bbend)
530
+
531
+ with builder.goto_block(bbbody):
532
+ yield Loop(index, do_break)
533
+ # Update bbbody as a new basic block may have been activated
534
+ bbbody = builder.basic_block
535
+ incr = increment_index(builder, index)
536
+ terminate(builder, bbcond)
537
+
538
+ index.add_incoming(start, bbstart)
539
+ index.add_incoming(incr, bbbody)
540
+
541
+ builder.position_at_end(bbend)
542
+
543
+
544
+ @contextmanager
545
+ def for_range_slice(builder, start, stop, step, intp=None, inc=True):
546
+ """
547
+ Generate LLVM IR for a for-loop based on a slice. Yields a
548
+ (index, count) tuple where `index` is the slice index's value
549
+ inside the loop, and `count` the iteration count.
550
+
551
+ Parameters
552
+ -------------
553
+ builder : object
554
+ IRBuilder object
555
+ start : int
556
+ The beginning value of the slice
557
+ stop : int
558
+ The end value of the slice
559
+ step : int
560
+ The step value of the slice
561
+ intp :
562
+ The data type
563
+ inc : boolean, optional
564
+ Signals whether the step is positive (True) or negative (False).
565
+
566
+ Returns
567
+ -----------
568
+ None
569
+ """
570
+ if intp is None:
571
+ intp = start.type
572
+
573
+ bbcond = builder.append_basic_block("for.cond")
574
+ bbbody = builder.append_basic_block("for.body")
575
+ bbend = builder.append_basic_block("for.end")
576
+ bbstart = builder.basic_block
577
+ builder.branch(bbcond)
578
+
579
+ with builder.goto_block(bbcond):
580
+ index = builder.phi(intp, name="loop.index")
581
+ count = builder.phi(intp, name="loop.count")
582
+ if inc:
583
+ pred = builder.icmp_signed("<", index, stop)
584
+ else:
585
+ pred = builder.icmp_signed(">", index, stop)
586
+ builder.cbranch(pred, bbbody, bbend)
587
+
588
+ with builder.goto_block(bbbody):
589
+ yield index, count
590
+ bbbody = builder.basic_block
591
+ incr = builder.add(index, step)
592
+ next_count = increment_index(builder, count)
593
+ terminate(builder, bbcond)
594
+
595
+ index.add_incoming(start, bbstart)
596
+ index.add_incoming(incr, bbbody)
597
+ count.add_incoming(ir.Constant(intp, 0), bbstart)
598
+ count.add_incoming(next_count, bbbody)
599
+ builder.position_at_end(bbend)
600
+
601
+
602
+ @contextmanager
603
+ def for_range_slice_generic(builder, start, stop, step):
604
+ """
605
+ A helper wrapper for for_range_slice(). This is a context manager which
606
+ yields two for_range_slice()-alike context managers, the first for
607
+ the positive step case, the second for the negative step case.
608
+
609
+ Use:
610
+ with for_range_slice_generic(...) as (pos_range, neg_range):
611
+ with pos_range as (idx, count):
612
+ ...
613
+ with neg_range as (idx, count):
614
+ ...
615
+ """
616
+ intp = start.type
617
+ is_pos_step = builder.icmp_signed(">=", step, ir.Constant(intp, 0))
618
+
619
+ pos_for_range = for_range_slice(builder, start, stop, step, intp, inc=True)
620
+ neg_for_range = for_range_slice(builder, start, stop, step, intp, inc=False)
621
+
622
+ @contextmanager
623
+ def cm_cond(cond, inner_cm):
624
+ with cond:
625
+ with inner_cm as value:
626
+ yield value
627
+
628
+ with builder.if_else(is_pos_step, likely=True) as (then, otherwise):
629
+ yield cm_cond(then, pos_for_range), cm_cond(otherwise, neg_for_range)
630
+
631
+
632
+ @contextmanager
633
+ def loop_nest(builder, shape, intp, order="C"):
634
+ """
635
+ Generate a loop nest walking a N-dimensional array.
636
+ Yields a tuple of N indices for use in the inner loop body,
637
+ iterating over the *shape* space.
638
+
639
+ If *order* is 'C' (the default), indices are incremented inside-out
640
+ (i.e. (0,0), (0,1), (0,2), (1,0) etc.).
641
+ If *order* is 'F', they are incremented outside-in
642
+ (i.e. (0,0), (1,0), (2,0), (0,1) etc.).
643
+ This has performance implications when walking an array as it impacts
644
+ the spatial locality of memory accesses.
645
+ """
646
+ assert order in "CF"
647
+ if not shape:
648
+ # 0-d array
649
+ yield ()
650
+ else:
651
+ if order == "F":
652
+ _swap = lambda x: x[::-1]
653
+ else:
654
+ _swap = lambda x: x
655
+ with _loop_nest(builder, _swap(shape), intp) as indices:
656
+ assert len(indices) == len(shape)
657
+ yield _swap(indices)
658
+
659
+
660
+ @contextmanager
661
+ def _loop_nest(builder, shape, intp):
662
+ with for_range(builder, shape[0], intp=intp) as loop:
663
+ if len(shape) > 1:
664
+ with _loop_nest(builder, shape[1:], intp) as indices:
665
+ yield (loop.index,) + indices
666
+ else:
667
+ yield (loop.index,)
668
+
669
+
670
+ def pack_array(builder, values, ty=None):
671
+ """
672
+ Pack a sequence of values in a LLVM array. *ty* should be given
673
+ if the array may be empty, in which case the type can't be inferred
674
+ from the values.
675
+ """
676
+ n = len(values)
677
+ if ty is None:
678
+ ty = values[0].type
679
+ ary = ir.ArrayType(ty, n)(ir.Undefined)
680
+ for i, v in enumerate(values):
681
+ ary = builder.insert_value(ary, v, i)
682
+ return ary
683
+
684
+
685
+ def pack_struct(builder, values):
686
+ """
687
+ Pack a sequence of values into a LLVM struct.
688
+ """
689
+ structty = ir.LiteralStructType([v.type for v in values])
690
+ st = structty(ir.Undefined)
691
+ for i, v in enumerate(values):
692
+ st = builder.insert_value(st, v, i)
693
+ return st
694
+
695
+
696
+ def unpack_tuple(builder, tup, count=None):
697
+ """
698
+ Unpack an array or structure of values, return a Python tuple.
699
+ """
700
+ if count is None:
701
+ # Assuming *tup* is an aggregate
702
+ count = len(tup.type.elements)
703
+ vals = [builder.extract_value(tup, i) for i in range(count)]
704
+ return vals
705
+
706
+
707
+ def get_item_pointer(
708
+ context, builder, aryty, ary, inds, wraparound=False, boundscheck=False
709
+ ):
710
+ # Set boundscheck=True for any pointer access that should be
711
+ # boundschecked. do_boundscheck() will handle enabling or disabling the
712
+ # actual boundschecking based on the user config.
713
+ shapes = unpack_tuple(builder, ary.shape, count=aryty.ndim)
714
+ strides = unpack_tuple(builder, ary.strides, count=aryty.ndim)
715
+ return get_item_pointer2(
716
+ context,
717
+ builder,
718
+ data=ary.data,
719
+ shape=shapes,
720
+ strides=strides,
721
+ layout=aryty.layout,
722
+ inds=inds,
723
+ wraparound=wraparound,
724
+ boundscheck=boundscheck,
725
+ )
726
+
727
+
728
+ def do_boundscheck(context, builder, ind, dimlen, axis=None):
729
+ def _dbg():
730
+ # Remove this when we figure out how to include this information
731
+ # in the error message.
732
+ if axis is not None:
733
+ if isinstance(axis, int):
734
+ printf(
735
+ builder,
736
+ "debug: IndexError: index %d is out of bounds "
737
+ "for axis {} with size %d\n".format(axis),
738
+ ind,
739
+ dimlen,
740
+ )
741
+ else:
742
+ printf(
743
+ builder,
744
+ "debug: IndexError: index %d is out of bounds "
745
+ "for axis %d with size %d\n",
746
+ ind,
747
+ axis,
748
+ dimlen,
749
+ )
750
+ else:
751
+ printf(
752
+ builder,
753
+ "debug: IndexError: index %d is out of bounds for size %d\n",
754
+ ind,
755
+ dimlen,
756
+ )
757
+
758
+ msg = "index is out of bounds"
759
+ out_of_bounds_upper = builder.icmp_signed(">=", ind, dimlen)
760
+ with if_unlikely(builder, out_of_bounds_upper):
761
+ if config.FULL_TRACEBACKS:
762
+ _dbg()
763
+ context.call_conv.return_user_exc(builder, IndexError, (msg,))
764
+ out_of_bounds_lower = builder.icmp_signed("<", ind, ind.type(0))
765
+ with if_unlikely(builder, out_of_bounds_lower):
766
+ if config.FULL_TRACEBACKS:
767
+ _dbg()
768
+ context.call_conv.return_user_exc(builder, IndexError, (msg,))
769
+
770
+
771
+ def get_item_pointer2(
772
+ context,
773
+ builder,
774
+ data,
775
+ shape,
776
+ strides,
777
+ layout,
778
+ inds,
779
+ wraparound=False,
780
+ boundscheck=False,
781
+ ):
782
+ # Set boundscheck=True for any pointer access that should be
783
+ # boundschecked. do_boundscheck() will handle enabling or disabling the
784
+ # actual boundschecking based on the user config.
785
+ if wraparound:
786
+ # Wraparound
787
+ indices = []
788
+ for ind, dimlen in zip(inds, shape):
789
+ negative = builder.icmp_signed("<", ind, ind.type(0))
790
+ wrapped = builder.add(dimlen, ind)
791
+ selected = builder.select(negative, wrapped, ind)
792
+ indices.append(selected)
793
+ else:
794
+ indices = inds
795
+ if boundscheck:
796
+ for axis, (ind, dimlen) in enumerate(zip(indices, shape)):
797
+ do_boundscheck(context, builder, ind, dimlen, axis)
798
+
799
+ if not indices:
800
+ # Indexing with empty tuple
801
+ return builder.gep(data, [int32_t(0)])
802
+ intp = indices[0].type
803
+ # Indexing code
804
+ if layout in "CF":
805
+ steps = []
806
+ # Compute steps for each dimension
807
+ if layout == "C":
808
+ # C contiguous
809
+ for i in range(len(shape)):
810
+ last = intp(1)
811
+ for j in shape[i + 1 :]:
812
+ last = builder.mul(last, j)
813
+ steps.append(last)
814
+ elif layout == "F":
815
+ # F contiguous
816
+ for i in range(len(shape)):
817
+ last = intp(1)
818
+ for j in shape[:i]:
819
+ last = builder.mul(last, j)
820
+ steps.append(last)
821
+ else:
822
+ raise Exception("unreachable")
823
+
824
+ # Compute index
825
+ loc = intp(0)
826
+ for i, s in zip(indices, steps):
827
+ tmp = builder.mul(i, s)
828
+ loc = builder.add(loc, tmp)
829
+ ptr = builder.gep(data, [loc])
830
+ return ptr
831
+ else:
832
+ # Any layout
833
+ dimoffs = [builder.mul(s, i) for s, i in zip(strides, indices)]
834
+ offset = functools.reduce(builder.add, dimoffs)
835
+ return pointer_add(builder, data, offset)
836
+
837
+
838
+ def _scalar_pred_against_zero(builder, value, fpred, icond):
839
+ nullval = value.type(0)
840
+ if isinstance(value.type, (ir.FloatType, ir.DoubleType)):
841
+ isnull = fpred(value, nullval)
842
+ elif isinstance(value.type, ir.IntType):
843
+ isnull = builder.icmp_signed(icond, value, nullval)
844
+ else:
845
+ raise TypeError("unexpected value type %s" % (value.type,))
846
+ return isnull
847
+
848
+
849
+ def is_scalar_zero(builder, value):
850
+ """
851
+ Return a predicate representing whether *value* is equal to zero.
852
+ """
853
+ return _scalar_pred_against_zero(
854
+ builder, value, functools.partial(builder.fcmp_ordered, "=="), "=="
855
+ )
856
+
857
+
858
+ def is_not_scalar_zero(builder, value):
859
+ """
860
+ Return a predicate representing whether a *value* is not equal to zero.
861
+ (not exactly "not is_scalar_zero" because of nans)
862
+ """
863
+ return _scalar_pred_against_zero(
864
+ builder, value, functools.partial(builder.fcmp_unordered, "!="), "!="
865
+ )
866
+
867
+
868
+ def is_scalar_zero_or_nan(builder, value):
869
+ """
870
+ Return a predicate representing whether *value* is equal to either zero
871
+ or NaN.
872
+ """
873
+ return _scalar_pred_against_zero(
874
+ builder, value, functools.partial(builder.fcmp_unordered, "=="), "=="
875
+ )
876
+
877
+
878
+ is_true = is_not_scalar_zero
879
+ is_false = is_scalar_zero
880
+
881
+
882
+ def is_scalar_neg(builder, value):
883
+ """
884
+ Is *value* negative? Assumes *value* is signed.
885
+ """
886
+ return _scalar_pred_against_zero(
887
+ builder, value, functools.partial(builder.fcmp_ordered, "<"), "<"
888
+ )
889
+
890
+
891
+ @contextmanager
892
+ def early_exit_if(builder, stack: ExitStack, cond):
893
+ """
894
+ The Python code::
895
+
896
+ with contextlib.ExitStack() as stack:
897
+ with early_exit_if(builder, stack, cond):
898
+ cleanup()
899
+ body()
900
+
901
+ emits the code::
902
+
903
+ if (cond) {
904
+ <cleanup>
905
+ }
906
+ else {
907
+ <body>
908
+ }
909
+
910
+ This can be useful for generating code with lots of early exits, without
911
+ having to increase the indentation each time.
912
+ """
913
+ then, otherwise = stack.enter_context(builder.if_else(cond, likely=False))
914
+ with then:
915
+ yield
916
+ stack.enter_context(otherwise)
917
+
918
+
919
+ def early_exit_if_null(builder, stack, obj):
920
+ """
921
+ A convenience wrapper for :func:`early_exit_if`, for the common case where
922
+ the CPython API indicates an error by returning ``NULL``.
923
+ """
924
+ return early_exit_if(builder, stack, is_null(builder, obj))
925
+
926
+
927
+ def guard_null(context, builder, value, exc_tuple):
928
+ """
929
+ Guard against *value* being null or zero.
930
+ *exc_tuple* should be a (exception type, arguments...) tuple.
931
+ """
932
+ with builder.if_then(is_scalar_zero(builder, value), likely=False):
933
+ exc = exc_tuple[0]
934
+ exc_args = exc_tuple[1:] or None
935
+ context.call_conv.return_user_exc(builder, exc, exc_args)
936
+
937
+
938
+ def guard_memory_error(context, builder, pointer, msg=None):
939
+ """
940
+ Guard against *pointer* being NULL (and raise a MemoryError).
941
+ """
942
+ assert isinstance(pointer.type, ir.PointerType), pointer.type
943
+ exc_args = (msg,) if msg else ()
944
+ with builder.if_then(is_null(builder, pointer), likely=False):
945
+ context.call_conv.return_user_exc(builder, MemoryError, exc_args)
946
+
947
+
948
+ @contextmanager
949
+ def if_zero(builder, value, likely=False):
950
+ """
951
+ Execute the given block if the scalar value is zero.
952
+ """
953
+ with builder.if_then(is_scalar_zero(builder, value), likely=likely):
954
+ yield
955
+
956
+
957
+ guard_zero = guard_null
958
+
959
+
960
+ def is_pointer(ltyp):
961
+ """
962
+ Whether the LLVM type *typ* is a struct type.
963
+ """
964
+ return isinstance(ltyp, ir.PointerType)
965
+
966
+
967
+ def get_record_member(builder, record, offset, typ):
968
+ pval = gep_inbounds(builder, record, 0, offset)
969
+ assert not is_pointer(pval.type.pointee)
970
+ return builder.bitcast(pval, typ.as_pointer())
971
+
972
+
973
+ def is_neg_int(builder, val):
974
+ return builder.icmp_signed("<", val, val.type(0))
975
+
976
+
977
+ def gep_inbounds(builder, ptr, *inds, **kws):
978
+ """
979
+ Same as *gep*, but add the `inbounds` keyword.
980
+ """
981
+ return gep(builder, ptr, *inds, inbounds=True, **kws)
982
+
983
+
984
+ def gep(builder, ptr, *inds, **kws):
985
+ """
986
+ Emit a getelementptr instruction for the given pointer and indices.
987
+ The indices can be LLVM values or Python int constants.
988
+ """
989
+ name = kws.pop("name", "")
990
+ inbounds = kws.pop("inbounds", False)
991
+ assert not kws
992
+ idx = []
993
+ for i in inds:
994
+ if isinstance(i, int):
995
+ # NOTE: llvm only accepts int32 inside structs, not int64
996
+ ind = int32_t(i)
997
+ else:
998
+ ind = i
999
+ idx.append(ind)
1000
+ return builder.gep(ptr, idx, name=name, inbounds=inbounds)
1001
+
1002
+
1003
+ def pointer_add(builder, ptr, offset, return_type=None):
1004
+ """
1005
+ Add an integral *offset* to pointer *ptr*, and return a pointer
1006
+ of *return_type* (or, if omitted, the same type as *ptr*).
1007
+
1008
+ Note the computation is done in bytes, and ignores the width of
1009
+ the pointed item type.
1010
+ """
1011
+ intptr = builder.ptrtoint(ptr, intp_t)
1012
+ if isinstance(offset, int):
1013
+ offset = intp_t(offset)
1014
+ intptr = builder.add(intptr, offset)
1015
+ return builder.inttoptr(intptr, return_type or ptr.type)
1016
+
1017
+
1018
+ def memset(builder, ptr, size, value):
1019
+ """
1020
+ Fill *size* bytes starting from *ptr* with *value*.
1021
+ """
1022
+ fn = builder.module.declare_intrinsic("llvm.memset", (voidptr_t, size.type))
1023
+ ptr = builder.bitcast(ptr, voidptr_t)
1024
+ if isinstance(value, int):
1025
+ value = int8_t(value)
1026
+ builder.call(fn, [ptr, value, size, bool_t(0)])
1027
+
1028
+
1029
+ def memset_padding(builder, ptr):
1030
+ """
1031
+ Fill padding bytes of the pointee with zeros.
1032
+ """
1033
+ # Load existing value
1034
+ val = builder.load(ptr)
1035
+ # Fill pointee with zeros
1036
+ memset(builder, ptr, sizeof(builder, ptr.type), 0)
1037
+ # Store value back
1038
+ builder.store(val, ptr)
1039
+
1040
+
1041
+ def global_constant(builder_or_module, name, value, linkage="internal"):
1042
+ """
1043
+ Get or create a (LLVM module-)global constant with *name* or *value*.
1044
+ """
1045
+ if isinstance(builder_or_module, ir.Module):
1046
+ module = builder_or_module
1047
+ else:
1048
+ module = builder_or_module.module
1049
+ data = add_global_variable(module, value.type, name)
1050
+ data.linkage = linkage
1051
+ data.global_constant = True
1052
+ data.initializer = value
1053
+ return data
1054
+
1055
+
1056
+ def divmod_by_constant(builder, val, divisor):
1057
+ """
1058
+ Compute the (quotient, remainder) of *val* divided by the constant
1059
+ positive *divisor*. The semantics reflects those of Python integer
1060
+ floor division, rather than C's / LLVM's signed division and modulo.
1061
+ The difference lies with a negative *val*.
1062
+ """
1063
+ assert divisor > 0
1064
+ divisor = val.type(divisor)
1065
+ one = val.type(1)
1066
+
1067
+ quot = alloca_once(builder, val.type)
1068
+
1069
+ with builder.if_else(is_neg_int(builder, val)) as (if_neg, if_pos):
1070
+ with if_pos:
1071
+ # quot = val / divisor
1072
+ quot_val = builder.sdiv(val, divisor)
1073
+ builder.store(quot_val, quot)
1074
+ with if_neg:
1075
+ # quot = -1 + (val + 1) / divisor
1076
+ val_plus_one = builder.add(val, one)
1077
+ quot_val = builder.sdiv(val_plus_one, divisor)
1078
+ builder.store(builder.sub(quot_val, one), quot)
1079
+
1080
+ # rem = val - quot * divisor
1081
+ # (should be slightly faster than a separate modulo operation)
1082
+ quot_val = builder.load(quot)
1083
+ rem_val = builder.sub(val, builder.mul(quot_val, divisor))
1084
+ return quot_val, rem_val
1085
+
1086
+
1087
+ def cbranch_or_continue(builder, cond, bbtrue):
1088
+ """
1089
+ Branch conditionally or continue.
1090
+
1091
+ Note: a new block is created and builder is moved to the end of the new
1092
+ block.
1093
+ """
1094
+ bbcont = builder.append_basic_block(".continue")
1095
+ builder.cbranch(cond, bbtrue, bbcont)
1096
+ builder.position_at_end(bbcont)
1097
+ return bbcont
1098
+
1099
+
1100
+ def memcpy(builder, dst, src, count):
1101
+ """
1102
+ Emit a memcpy to the builder.
1103
+
1104
+ Copies each element of dst to src. Unlike the C equivalent, each element
1105
+ can be any LLVM type.
1106
+
1107
+ Assumes
1108
+ -------
1109
+ * dst.type == src.type
1110
+ * count is positive
1111
+ """
1112
+ # Note this does seem to be optimized as a raw memcpy() by LLVM
1113
+ # whenever possible...
1114
+ assert dst.type == src.type
1115
+ with for_range(builder, count, intp=count.type) as loop:
1116
+ out_ptr = builder.gep(dst, [loop.index])
1117
+ in_ptr = builder.gep(src, [loop.index])
1118
+ builder.store(builder.load(in_ptr), out_ptr)
1119
+
1120
+
1121
+ def _raw_memcpy(builder, func_name, dst, src, count, itemsize, align):
1122
+ size_t = count.type
1123
+ if isinstance(itemsize, int):
1124
+ itemsize = ir.Constant(size_t, itemsize)
1125
+
1126
+ memcpy = builder.module.declare_intrinsic(
1127
+ func_name, [voidptr_t, voidptr_t, size_t]
1128
+ )
1129
+ is_volatile = false_bit
1130
+ builder.call(
1131
+ memcpy,
1132
+ [
1133
+ builder.bitcast(dst, voidptr_t),
1134
+ builder.bitcast(src, voidptr_t),
1135
+ builder.mul(count, itemsize),
1136
+ is_volatile,
1137
+ ],
1138
+ )
1139
+
1140
+
1141
+ def raw_memcpy(builder, dst, src, count, itemsize, align=1):
1142
+ """
1143
+ Emit a raw memcpy() call for `count` items of size `itemsize`
1144
+ from `src` to `dest`.
1145
+ """
1146
+ return _raw_memcpy(builder, "llvm.memcpy", dst, src, count, itemsize, align)
1147
+
1148
+
1149
+ def raw_memmove(builder, dst, src, count, itemsize, align=1):
1150
+ """
1151
+ Emit a raw memmove() call for `count` items of size `itemsize`
1152
+ from `src` to `dest`.
1153
+ """
1154
+ return _raw_memcpy(
1155
+ builder, "llvm.memmove", dst, src, count, itemsize, align
1156
+ )
1157
+
1158
+
1159
+ def muladd_with_overflow(builder, a, b, c):
1160
+ """
1161
+ Compute (a * b + c) and return a (result, overflow bit) pair.
1162
+ The operands must be signed integers.
1163
+ """
1164
+ p = builder.smul_with_overflow(a, b)
1165
+ prod = builder.extract_value(p, 0)
1166
+ prod_ovf = builder.extract_value(p, 1)
1167
+ s = builder.sadd_with_overflow(prod, c)
1168
+ res = builder.extract_value(s, 0)
1169
+ ovf = builder.or_(prod_ovf, builder.extract_value(s, 1))
1170
+ return res, ovf
1171
+
1172
+
1173
+ def printf(builder, format, *args):
1174
+ """
1175
+ Calls printf().
1176
+ Argument `format` is expected to be a Python string.
1177
+ Values to be printed are listed in `args`.
1178
+
1179
+ Note: There is no checking to ensure there is correct number of values
1180
+ in `args` and there type matches the declaration in the format string.
1181
+ """
1182
+ assert isinstance(format, str)
1183
+ mod = builder.module
1184
+ # Make global constant for format string
1185
+ cstring = voidptr_t
1186
+ fmt_bytes = make_bytearray((format + "\00").encode("ascii"))
1187
+ global_fmt = global_constant(mod, "printf_format", fmt_bytes)
1188
+ fnty = ir.FunctionType(int32_t, [cstring], var_arg=True)
1189
+ # Insert printf()
1190
+ try:
1191
+ fn = mod.get_global("printf")
1192
+ except KeyError:
1193
+ fn = ir.Function(mod, fnty, name="printf")
1194
+ # Call
1195
+ ptr_fmt = builder.bitcast(global_fmt, cstring)
1196
+ return builder.call(fn, [ptr_fmt] + list(args))
1197
+
1198
+
1199
+ def snprintf(builder, buffer, bufsz, format, *args):
1200
+ """Calls libc snprintf(buffer, bufsz, format, ...args)"""
1201
+ assert isinstance(format, str)
1202
+ mod = builder.module
1203
+ # Make global constant for format string
1204
+ cstring = voidptr_t
1205
+ fmt_bytes = make_bytearray((format + "\00").encode("ascii"))
1206
+ global_fmt = global_constant(mod, "snprintf_format", fmt_bytes)
1207
+ fnty = ir.FunctionType(
1208
+ int32_t,
1209
+ [cstring, intp_t, cstring],
1210
+ var_arg=True,
1211
+ )
1212
+ # Actual symbol name of snprintf is different on win32.
1213
+ symbol = "snprintf"
1214
+ if config.IS_WIN32:
1215
+ symbol = "_" + symbol
1216
+ # Insert snprintf()
1217
+ try:
1218
+ fn = mod.get_global(symbol)
1219
+ except KeyError:
1220
+ fn = ir.Function(mod, fnty, name=symbol)
1221
+ # Call
1222
+ ptr_fmt = builder.bitcast(global_fmt, cstring)
1223
+ return builder.call(fn, [buffer, bufsz, ptr_fmt] + list(args))
1224
+
1225
+
1226
+ def snprintf_stackbuffer(builder, bufsz, format, *args):
1227
+ """Similar to `snprintf()` but the buffer is stack allocated to size
1228
+ *bufsz*.
1229
+
1230
+ Returns the buffer pointer as i8*.
1231
+ """
1232
+ assert isinstance(bufsz, int)
1233
+ spacety = ir.ArrayType(ir.IntType(8), bufsz)
1234
+ space = alloca_once(builder, spacety, zfill=True)
1235
+ buffer = builder.bitcast(space, voidptr_t)
1236
+ snprintf(builder, buffer, intp_t(bufsz), format, *args)
1237
+ return buffer
1238
+
1239
+
1240
+ def normalize_ir_text(text):
1241
+ """
1242
+ Normalize the given string to latin1 compatible encoding that is
1243
+ suitable for use in LLVM IR.
1244
+ """
1245
+ # Just re-encoding to latin1 is enough
1246
+ return text.encode("utf8").decode("latin1")
1247
+
1248
+
1249
+ def hexdump(builder, ptr, nbytes):
1250
+ """Debug print the memory region in *ptr* to *ptr + nbytes*
1251
+ as hex.
1252
+ """
1253
+ bytes_per_line = 16
1254
+ nbytes = builder.zext(nbytes, intp_t)
1255
+ printf(builder, "hexdump p=%p n=%zu", ptr, nbytes)
1256
+ byte_t = ir.IntType(8)
1257
+ ptr = builder.bitcast(ptr, byte_t.as_pointer())
1258
+ # Loop to print the bytes in *ptr* as hex
1259
+ with for_range(builder, nbytes) as idx:
1260
+ div_by = builder.urem(idx.index, intp_t(bytes_per_line))
1261
+ do_new_line = builder.icmp_unsigned("==", div_by, intp_t(0))
1262
+ with builder.if_then(do_new_line):
1263
+ printf(builder, "\n")
1264
+
1265
+ offset = builder.gep(ptr, [idx.index])
1266
+ val = builder.load(offset)
1267
+ printf(builder, " %02x", val)
1268
+ printf(builder, "\n")
1269
+
1270
+
1271
+ def is_nonelike(ty):
1272
+ """returns if 'ty' is none"""
1273
+ return (
1274
+ ty is None
1275
+ or isinstance(ty, types.NoneType)
1276
+ or isinstance(ty, types.Omitted)
1277
+ )
1278
+
1279
+
1280
+ def is_empty_tuple(ty):
1281
+ """returns if 'ty' is an empty tuple"""
1282
+ return isinstance(ty, types.Tuple) and len(ty.types) == 0
1283
+
1284
+
1285
+ def create_constant_array(ty, val):
1286
+ """
1287
+ Create an LLVM-constant of a fixed-length array from Python values.
1288
+
1289
+ The type provided is the type of the elements.
1290
+ """
1291
+ return ir.Constant(ir.ArrayType(ty, len(val)), val)