klongpy 0.6.8__py3-none-any.whl → 0.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. klongpy/__init__.py +19 -1
  2. klongpy/adverbs.py +5 -5
  3. klongpy/autograd.py +308 -0
  4. klongpy/backend.py +167 -99
  5. klongpy/backends/__init__.py +94 -0
  6. klongpy/backends/base.py +320 -0
  7. klongpy/backends/numpy_backend.py +122 -0
  8. klongpy/backends/torch_backend.py +995 -0
  9. klongpy-0.6.8.data/scripts/kgpy → klongpy/cli.py +65 -88
  10. klongpy/core.py +228 -106
  11. klongpy/db/sys_fn_db.py +4 -3
  12. klongpy/dyads.py +173 -32
  13. klongpy/interpreter.py +31 -3
  14. klongpy/lib/help.kg +2 -2
  15. klongpy/monads.py +49 -12
  16. klongpy/repl.py +91 -0
  17. klongpy/sys_fn.py +129 -18
  18. klongpy/sys_fn_autograd.py +290 -0
  19. klongpy/sys_fn_ipc.py +18 -7
  20. klongpy/sys_fn_timer.py +13 -3
  21. klongpy/web/sys_fn_web.py +28 -6
  22. klongpy-0.7.0.dist-info/METADATA +493 -0
  23. klongpy-0.7.0.dist-info/RECORD +48 -0
  24. {klongpy-0.6.8.dist-info → klongpy-0.7.0.dist-info}/WHEEL +1 -1
  25. klongpy-0.7.0.dist-info/entry_points.txt +2 -0
  26. {klongpy-0.6.8.dist-info → klongpy-0.7.0.dist-info}/top_level.txt +0 -1
  27. klongpy-0.6.8.dist-info/METADATA +0 -412
  28. klongpy-0.6.8.dist-info/RECORD +0 -72
  29. tests/__init__.py +0 -6
  30. tests/gen_join_over.py +0 -119
  31. tests/gen_py_suite.py +0 -77
  32. tests/gen_test_fn.py +0 -259
  33. tests/perf_async.py +0 -25
  34. tests/perf_avg.py +0 -18
  35. tests/perf_duckdb.py +0 -32
  36. tests/perf_gen.py +0 -38
  37. tests/perf_ipc_overhead.py +0 -34
  38. tests/perf_join.py +0 -53
  39. tests/perf_load.py +0 -17
  40. tests/perf_prog.py +0 -18
  41. tests/perf_serdes.py +0 -52
  42. tests/perf_sys_fn_db.py +0 -263
  43. tests/perf_vector.py +0 -40
  44. tests/test_accel.py +0 -227
  45. tests/test_df_cache.py +0 -85
  46. tests/test_examples.py +0 -64
  47. tests/test_extra_suite.py +0 -382
  48. tests/test_file_cache.py +0 -185
  49. tests/test_interop.py +0 -181
  50. tests/test_kgtests.py +0 -65
  51. tests/test_known_bugs.py +0 -206
  52. tests/test_prog.py +0 -107
  53. tests/test_suite.py +0 -1479
  54. tests/test_suite_file.py +0 -153
  55. tests/test_sys_fn.py +0 -420
  56. tests/test_sys_fn_db.py +0 -88
  57. tests/test_sys_fn_ipc.py +0 -587
  58. tests/test_sys_fn_timer.py +0 -133
  59. tests/test_util.py +0 -233
  60. tests/utils.py +0 -126
  61. {klongpy-0.6.8.dist-info → klongpy-0.7.0.dist-info/licenses}/LICENSE +0 -0
klongpy/__init__.py CHANGED
@@ -1,2 +1,20 @@
1
1
  from .interpreter import KlongInterpreter, KlongException
2
- __all__ = ["KlongInterpreter", "KlongException"]
2
+ from .backend import TorchUnsupportedDtypeError
3
+ from .backends import (
4
+ get_backend,
5
+ register_backend,
6
+ list_backends,
7
+ BackendProvider,
8
+ UnsupportedDtypeError,
9
+ )
10
+
11
+ __all__ = [
12
+ "KlongInterpreter",
13
+ "KlongException",
14
+ "TorchUnsupportedDtypeError",
15
+ "UnsupportedDtypeError",
16
+ "get_backend",
17
+ "register_backend",
18
+ "list_backends",
19
+ "BackendProvider",
20
+ ]
klongpy/adverbs.py CHANGED
@@ -1,5 +1,6 @@
1
1
  from .core import *
2
2
  from .dyads import eval_dyad_add, eval_dyad_subtract, eval_dyad_multiply, eval_dyad_divide
3
+ from .backend import kg_asarray, is_number, str_to_chr_arr, kg_equal
3
4
  import functools
4
5
  import itertools
5
6
 
@@ -154,7 +155,7 @@ def eval_adverb_each_left(f, a, b):
154
155
  1,:/[2 3 4] --> [[2 1] [3 1] [4 1]]
155
156
  """
156
157
  b = str_to_chr_arr(b) if isinstance(b,str) else b
157
- return np.asarray([f(a,x) for x in b])
158
+ return kg_asarray([f(a,x) for x in b])
158
159
 
159
160
 
160
161
  def eval_adverb_each_right(f, a, b):
@@ -162,7 +163,7 @@ def eval_adverb_each_right(f, a, b):
162
163
  see: eval_dyad_adverb_each_left
163
164
  """
164
165
  b = str_to_chr_arr(b) if isinstance(b,str) else b
165
- return np.asarray([f(x,a) for x in b])
166
+ return kg_asarray([f(x,a) for x in b])
166
167
 
167
168
 
168
169
 
@@ -227,8 +228,7 @@ def eval_adverb_over(f, a, op):
227
228
  return a
228
229
  if len(a) == 1:
229
230
  return a[0]
230
- # https://docs.cupy.dev/en/stable/reference/ufunc.html
231
- # TODO: can we use NumPy reduce when CuPy backend primary?
231
+ # Use NumPy/PyTorch ufunc reduce when available for better performance
232
232
  if isinstance(op, KGOp):
233
233
  if safe_eq(op.a,'+'):
234
234
  return np.add.reduce(a)
@@ -320,7 +320,7 @@ def eval_adverb_scan_over(f, a, op):
320
320
  """
321
321
  if is_atom(a):
322
322
  return a
323
- # https://docs.cupy.dev/en/stable/reference/ufunc.html
323
+ # Use NumPy/PyTorch ufunc accumulate when available for better performance
324
324
  if safe_eq(f, eval_dyad_add) and hasattr(np.add, 'accumulate'):
325
325
  return np.add.accumulate(a)
326
326
  elif safe_eq(f, eval_dyad_subtract) and hasattr(np.subtract, 'accumulate'):
klongpy/autograd.py ADDED
@@ -0,0 +1,308 @@
1
+ import numpy as np
2
+ from .core import KGLambda, KGCall, KGSym, KGFn
3
+ from .backend import get_default_backend
4
+
5
+
6
+ class AutogradError(Exception):
7
+ """Base class for autograd-related errors."""
8
+ pass
9
+
10
+
11
+ class AutogradChainBrokenError(AutogradError):
12
+ """Raised when the gradient computation chain is broken."""
13
+
14
+ def __init__(self, context, expected, actual, suggestion=None):
15
+ self.context = context
16
+ self.expected = expected
17
+ self.actual = actual
18
+ self.suggestion = suggestion
19
+ msg = f"Autograd chain broken at {context}: expected {expected}, got {actual}."
20
+ if suggestion:
21
+ msg += f" {suggestion}"
22
+ super().__init__(msg)
23
+
24
+
25
+ class NonScalarLossError(AutogradError):
26
+ """Raised when the loss function returns a non-scalar value."""
27
+
28
+ def __init__(self, shape):
29
+ self.shape = shape
30
+ super().__init__(
31
+ f"Loss function must return a scalar, got shape {shape}. "
32
+ "Use sum (+/) or mean (%#) to reduce to a scalar."
33
+ )
34
+
35
+
36
+ def _get_float_dtype(backend):
37
+ """Get the appropriate float dtype for the current backend."""
38
+ # MPS doesn't support float64
39
+ if hasattr(backend, 'supports_float64') and not backend.supports_float64():
40
+ return np.float32
41
+ return np.float64
42
+
43
+
44
+ def _scalar_value(x, backend):
45
+ """Extract scalar value from various array/tensor types.
46
+
47
+ Raises:
48
+ NonScalarLossError: If x is not a scalar value.
49
+ """
50
+ x = backend.to_numpy(x) if backend.is_backend_array(x) else x
51
+ if isinstance(x, np.ndarray):
52
+ if x.ndim == 0:
53
+ return float(x.item())
54
+ elif x.size == 1:
55
+ return float(x.flat[0])
56
+ else:
57
+ raise NonScalarLossError(tuple(x.shape))
58
+ return float(x)
59
+
60
+
61
+ def _to_func_input(x, backend, require_grad=False):
62
+ """Convert numpy array to appropriate input type for function call.
63
+
64
+ Args:
65
+ x: Input array (numpy)
66
+ backend: Backend provider
67
+ require_grad: If True and backend supports autograd, create grad tensor.
68
+ For numeric gradient, this should be False.
69
+ """
70
+ if require_grad and backend.supports_autograd():
71
+ return backend.create_grad_tensor(x)
72
+ return x
73
+
74
+
75
+ def _invoke_fn(klong, fn, args):
76
+ """Invoke a Klong function with the given arguments.
77
+
78
+ Handles all function types uniformly:
79
+ - KGSym, KGLambda: wrap in KGCall with args
80
+ - KGFn, KGCall: extract inner function, wrap in KGCall with args
81
+ - callable: call directly with args
82
+ """
83
+ if callable(fn) and not isinstance(fn, (KGSym, KGLambda, KGFn)):
84
+ return fn(*args)
85
+ inner = fn.a if isinstance(fn, KGFn) else fn
86
+ return klong.call(KGCall(inner, list(args), len(args)))
87
+
88
+
89
+ def numeric_grad(func, x, backend, eps=None):
90
+ """Compute numeric gradient of scalar-valued function."""
91
+ # Get appropriate float dtype
92
+ float_dtype = _get_float_dtype(backend)
93
+
94
+ # Use larger epsilon for float32 to maintain precision
95
+ if eps is None:
96
+ eps = 1e-4 if float_dtype == np.float32 else 1e-6
97
+
98
+ # Convert backend tensors to numpy for gradient computation
99
+ if backend.is_backend_array(x):
100
+ x = backend.to_numpy(x)
101
+ x = np.asarray(x, dtype=float_dtype)
102
+
103
+ grad = np.zeros_like(x, dtype=float_dtype)
104
+ it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
105
+ while not it.finished:
106
+ idx = it.multi_index
107
+ orig = float(x[idx])
108
+ x[idx] = orig + eps
109
+ f_pos = _scalar_value(func(_to_func_input(x.copy(), backend)), backend)
110
+ x[idx] = orig - eps
111
+ f_neg = _scalar_value(func(_to_func_input(x.copy(), backend)), backend)
112
+ grad[idx] = (f_pos - f_neg) / (2 * eps)
113
+ x[idx] = orig
114
+ it.iternext()
115
+ return grad
116
+
117
+
118
+ def grad_of_fn(klong, fn, x):
119
+ """
120
+ Return gradient of Klong or Python function ``fn`` at ``x``.
121
+
122
+ Uses PyTorch autograd when available (USE_TORCH=1), otherwise
123
+ falls back to numeric differentiation.
124
+ """
125
+ backend = klong._backend
126
+ call_fn = lambda v: _invoke_fn(klong, fn, [v])
127
+
128
+ if backend.supports_autograd():
129
+ return backend.compute_autograd(call_fn, x)
130
+ else:
131
+ return numeric_grad(call_fn, x, backend)
132
+
133
+
134
+ def torch_autograd(func, x):
135
+ """Compute gradient using PyTorch autograd (requires torch backend)."""
136
+ backend = get_default_backend()
137
+ if not backend.supports_autograd():
138
+ raise RuntimeError("PyTorch autograd requires torch backend (USE_TORCH=1)")
139
+ return backend.compute_autograd(func, x)
140
+
141
+
142
+ def numeric_jacobian(func, x, backend, eps=None):
143
+ """
144
+ Compute Jacobian matrix of func at point x using finite differences.
145
+
146
+ For f: R^n -> R^m, returns m x n matrix where J[i,j] = df_i/dx_j.
147
+
148
+ Args:
149
+ func: Callable that takes an array and returns an array
150
+ x: Input point (array)
151
+ backend: Backend provider
152
+ eps: Step size for finite differences (default: 1e-6 or 1e-4 for float32)
153
+
154
+ Returns:
155
+ Jacobian matrix as numpy array
156
+ """
157
+ float_dtype = _get_float_dtype(backend)
158
+ if eps is None:
159
+ eps = 1e-4 if float_dtype == np.float32 else 1e-6
160
+
161
+ # Convert to numpy
162
+ if backend.is_backend_array(x):
163
+ x = backend.to_numpy(x)
164
+ x = np.asarray(x, dtype=float_dtype).flatten()
165
+
166
+ # Evaluate function at x to get output shape
167
+ f0 = func(_to_func_input(x.copy(), backend))
168
+ if backend.is_backend_array(f0):
169
+ f0 = backend.to_numpy(f0)
170
+ f0 = np.asarray(f0, dtype=float_dtype).flatten()
171
+
172
+ n = len(x) # Input dimension
173
+ m = len(f0) # Output dimension
174
+ jacobian = np.zeros((m, n), dtype=float_dtype)
175
+
176
+ for j in range(n):
177
+ x_plus = x.copy()
178
+ x_plus[j] += eps
179
+ x_minus = x.copy()
180
+ x_minus[j] -= eps
181
+
182
+ f_plus = func(_to_func_input(x_plus, backend))
183
+ f_minus = func(_to_func_input(x_minus, backend))
184
+
185
+ if backend.is_backend_array(f_plus):
186
+ f_plus = backend.to_numpy(f_plus)
187
+ if backend.is_backend_array(f_minus):
188
+ f_minus = backend.to_numpy(f_minus)
189
+
190
+ f_plus = np.asarray(f_plus, dtype=float_dtype).flatten()
191
+ f_minus = np.asarray(f_minus, dtype=float_dtype).flatten()
192
+
193
+ jacobian[:, j] = (f_plus - f_minus) / (2 * eps)
194
+
195
+ return jacobian
196
+
197
+
198
+ def jacobian_of_fn(klong, fn, x):
199
+ """
200
+ Compute Jacobian matrix of Klong function fn at point x.
201
+
202
+ For f: R^n -> R^m, returns m x n matrix where J[i,j] = df_i/dx_j.
203
+
204
+ Args:
205
+ klong: KlongInterpreter instance
206
+ fn: Function (KGSym, KGLambda, KGFn, KGCall, or callable)
207
+ x: Input point
208
+
209
+ Returns:
210
+ Jacobian matrix
211
+ """
212
+ backend = klong._backend
213
+ call_fn = lambda v: _invoke_fn(klong, fn, [v])
214
+
215
+ if backend.supports_autograd():
216
+ try:
217
+ return backend.compute_jacobian(call_fn, x)
218
+ except Exception:
219
+ # Fall back to numeric if torch jacobian fails
220
+ return numeric_jacobian(call_fn, x, backend=backend)
221
+ else:
222
+ return numeric_jacobian(call_fn, x, backend=backend)
223
+
224
+
225
+ def multi_jacobian_of_fn(klong, fn, param_syms):
226
+ """
227
+ Compute Jacobians for multiple parameters in one call.
228
+
229
+ Args:
230
+ klong: KlongInterpreter instance
231
+ fn: Function (KGSym, KGLambda, KGFn, KGCall, or callable)
232
+ Should be a niladic function that references the parameters
233
+ param_syms: List of KGSym parameter symbols to differentiate with respect to
234
+
235
+ Returns:
236
+ List of Jacobian matrices, one per parameter
237
+ """
238
+ backend = klong._backend
239
+ param_values = [klong[sym] for sym in param_syms]
240
+ call_fn = lambda: _invoke_fn(klong, fn, [])
241
+
242
+ jacobians = []
243
+ for sym, val in zip(param_syms, param_values):
244
+ original = klong[sym]
245
+
246
+ def single_param_fn(v, s=sym, orig=original):
247
+ """Wrapper that sets param to v, calls fn, restores param."""
248
+ klong[s] = v
249
+ try:
250
+ return call_fn()
251
+ finally:
252
+ klong[s] = orig
253
+
254
+ if backend.supports_autograd():
255
+ try:
256
+ jac = backend.compute_jacobian(single_param_fn, val)
257
+ except Exception:
258
+ jac = numeric_jacobian(single_param_fn, val, backend=backend)
259
+ else:
260
+ jac = numeric_jacobian(single_param_fn, val, backend=backend)
261
+
262
+ # Restore original value after jacobian computation
263
+ klong[sym] = original
264
+ jacobians.append(jac)
265
+
266
+ return jacobians
267
+
268
+
269
+ def multi_grad_of_fn(klong, fn, param_syms):
270
+ """
271
+ Compute gradients for multiple parameters in one call.
272
+
273
+ Args:
274
+ klong: KlongInterpreter instance
275
+ fn: Loss function (KGSym, KGLambda, KGFn, KGCall, or callable)
276
+ Should be a niladic function that references the parameters
277
+ param_syms: List of KGSym parameter symbols to differentiate with respect to
278
+
279
+ Returns:
280
+ List of gradients, one per parameter
281
+ """
282
+ backend = klong._backend
283
+ # Access context directly to avoid KGFnWrapper wrapping
284
+ param_values = [klong._context[sym] for sym in param_syms]
285
+
286
+ def call_fn_with_tensors(tensors):
287
+ """Call the loss function with tensor values temporarily bound to symbols."""
288
+ originals = {sym: klong._context[sym] for sym in param_syms}
289
+ try:
290
+ for sym, tensor in zip(param_syms, tensors):
291
+ klong[sym] = tensor
292
+ return _invoke_fn(klong, fn, [])
293
+ finally:
294
+ for sym, orig in originals.items():
295
+ klong[sym] = orig
296
+
297
+ if backend.supports_autograd():
298
+ return backend.compute_multi_autograd(call_fn_with_tensors, param_values)
299
+ else:
300
+ # Fallback: compute numeric gradients one at a time
301
+ grads = []
302
+ for i, sym in enumerate(param_syms):
303
+ def single_param_fn(v, idx=i):
304
+ vals = list(param_values)
305
+ vals[idx] = v
306
+ return call_fn_with_tensors(vals)
307
+ grads.append(numeric_grad(single_param_fn, param_values[i], backend))
308
+ return grads
klongpy/backend.py CHANGED
@@ -1,103 +1,171 @@
1
- import os
2
- import warnings
3
-
4
- # Attempt to import CuPy. If not available, set use_gpu to False.
5
- use_gpu = bool(os.environ.get('USE_GPU') == '1')
6
- if use_gpu:
7
- try:
8
- import cupy as np
9
- use_gpu = True
10
- except ImportError:
11
- import numpy as np
12
- use_gpu = False
13
- else:
14
- import numpy as np
15
-
16
-
17
- def is_supported_type(x):
18
- """
19
- CuPy does not support strings or jagged arrays.
20
- Note: add any other unsupported types here.
21
- """
22
- if isinstance(x, str) or is_jagged_array(x):
23
- return False
24
- return True
1
+ """
2
+ Backend compatibility module for KlongPy.
3
+
4
+ This module provides backward compatibility with the old global backend system.
5
+ New code should use the backends package directly:
6
+
7
+ from klongpy.backends import get_backend, BackendProvider
8
+
9
+ For per-interpreter backends, use:
10
+
11
+ klong = KlongInterpreter(backend='torch')
12
+ """
13
+ import numpy as real_np
14
+
15
+ from .backends import (
16
+ get_backend,
17
+ register_backend,
18
+ list_backends,
19
+ BackendProvider,
20
+ UnsupportedDtypeError,
21
+ TorchUnsupportedDtypeError,
22
+ NumpyBackendProvider,
23
+ TorchBackendProvider,
24
+ KGChar,
25
+ is_jagged_array,
26
+ is_supported_type,
27
+ )
28
+
29
+
30
+ # Global backend state for backward compatibility
31
+ # This is used by modules that import `np` and `use_torch` directly
32
+ _default_backend = get_backend()
33
+
34
+ # Backward compatibility: expose np and use_torch at module level
35
+ np = _default_backend.np
36
+ use_torch = _default_backend.name == 'torch'
37
+
38
+
39
+ def get_default_backend():
40
+ """Get the default backend provider."""
41
+ return _default_backend
25
42
 
26
43
 
27
- def is_jagged_array(x):
44
+ def to_numpy(x):
45
+ """Convert tensor/array to numpy, handling device transfers and 0-dim arrays."""
46
+ result = _default_backend.to_numpy(x)
47
+ if isinstance(result, real_np.ndarray) and result.ndim == 0:
48
+ return result.item()
49
+ return result
50
+
51
+
52
+ def to_display(x):
53
+ """Convert value to display-friendly format."""
54
+ return _default_backend.to_display(x)
55
+
56
+
57
+ def array_size(a):
28
58
  """
29
- Check if an array is jagged.
59
+ Get the total number of elements in an array/tensor.
60
+
61
+ Works with both numpy arrays and torch tensors.
30
62
  """
31
- if isinstance(x, list):
32
- # If the lengths of sublists vary, it's a jagged array.
33
- return len(set(map(len, x))) > 1
34
- return False
35
-
36
- if use_gpu:
37
- import cupy
38
- import numpy
39
-
40
- class CuPyReductionKernelWrapper:
41
- def __init__(self, fn, reduce_fn_1, reduce_fn_2):
42
- self.fn = fn
43
- self.reduce_fn_1 = reduce_fn_1
44
- self.reduce_fn_2 = reduce_fn_2
45
-
46
- def __call__(self, *args, **kwargs):
47
- return self.fn(*args, **kwargs)
48
-
49
- def reduce(self, x):
50
- return self.reduce_fn_1(x) if x.ndim == 1 else self.reduce_fn_2(x[0], x[1])
51
-
52
- add_reduce_2 = cupy.ElementwiseKernel(
53
- 'T x, T y',
54
- 'T z',
55
- 'z = (x + y)',
56
- 'add_reduce_2')
57
- np.add = CuPyReductionKernelWrapper(cupy.add, cupy.sum, add_reduce_2)
58
-
59
- def subtract_reduce_1(x):
60
- return 2*x[0] - cupy.sum(x)
61
-
62
- subtract_reduce_2 = cupy.ElementwiseKernel(
63
- 'T x, T y',
64
- 'T z',
65
- 'z = (x - y)',
66
- 'subtract_reduce_2')
67
- np.subtract = CuPyReductionKernelWrapper(cupy.subtract, subtract_reduce_1, subtract_reduce_2)
68
-
69
- multiply_reduce_1 = cupy.ReductionKernel(
70
- 'T x',
71
- 'T y',
72
- 'x',
73
- 'a * b',
74
- 'y = a',
75
- '1',
76
- 'multiply_reduce_1'
77
- )
78
- multiply_reduce_2 = cupy.ElementwiseKernel(
79
- 'T x, T y',
80
- 'T z',
81
- 'z = (x * y)',
82
- 'multiply_reduce_2')
83
- np.multiply = CuPyReductionKernelWrapper(cupy.multiply, multiply_reduce_1, multiply_reduce_2)
84
-
85
- def divide_reduce_1(x):
86
- raise NotImplementedError()
87
-
88
- divide_reduce_2 = cupy.ElementwiseKernel(
89
- 'T x, T y',
90
- 'T z',
91
- 'z = (x / y)',
92
- 'divide_reduce_2')
93
- np.divide = CuPyReductionKernelWrapper(cupy.divide, divide_reduce_1, divide_reduce_2)
94
-
95
- np.isarray = lambda x: isinstance(x, (numpy.ndarray, cupy.ndarray))
96
-
97
- # np.hstack = lambda x: cupy.hstack(x) if use_gpu and is_supported_type(x) else numpy.hstack(x)
98
- else:
99
- np.seterr(divide='ignore')
100
- warnings.filterwarnings("error", category=np.VisibleDeprecationWarning)
101
- np.isarray = lambda x: isinstance(x, np.ndarray)
102
-
103
- np
63
+ return _default_backend.array_size(a)
64
+
65
+
66
+ def safe_equal(x, y):
67
+ """Compare two values for equality, handling backend-specific array types."""
68
+ return _default_backend.safe_equal(x, y)
69
+
70
+
71
+ def detach_if_needed(x):
72
+ """Detach array from computation graph if needed."""
73
+ return _default_backend.detach_if_needed(x)
74
+
75
+
76
+ def to_int_array(a):
77
+ """Convert array to integer type."""
78
+ return _default_backend.to_int_array(a)
79
+
80
+
81
+ def power(a, b):
82
+ """Compute a^b, handling gradient tracking if applicable."""
83
+ return _default_backend.power(a, b)
84
+
85
+
86
+ def has_gradient(x):
87
+ """Check if x is tracking gradients (for autograd)."""
88
+ return _default_backend.has_gradient(x)
89
+
90
+
91
+ def kg_asarray(a):
92
+ """Convert input to array using the default backend's kg_asarray method."""
93
+ return _default_backend.kg_asarray(a)
94
+
95
+
96
+ def is_integer(x):
97
+ """Check if x is an integer type using the default backend."""
98
+ return _default_backend.is_integer(x)
99
+
100
+
101
+ def is_float(x):
102
+ """Check if x is a float type using the default backend."""
103
+ return _default_backend.is_float(x)
104
+
105
+
106
+ def is_number(a):
107
+ """Check if a is a number (integer or float) using the default backend."""
108
+ return _default_backend.is_number(a)
109
+
110
+
111
+ def get_dtype_kind(arr):
112
+ """Get the dtype 'kind' character for an array using the default backend."""
113
+ return _default_backend.get_dtype_kind(arr)
114
+
115
+
116
+ def str_to_chr_arr(s):
117
+ """Convert string to character array using the default backend."""
118
+ return _default_backend.str_to_chr_arr(s)
119
+
120
+
121
+ def kg_argsort(a, descending=False):
122
+ """Argsort array using the default backend."""
123
+ from .core import kg_argsort as core_kg_argsort
124
+ return core_kg_argsort(a, _default_backend, descending=descending)
125
+
126
+
127
+ def vec_fn(a, f):
128
+ """Apply a function f to an array a, with support for nested arrays."""
129
+ from .core import vec_fn as core_vec_fn
130
+ return core_vec_fn(a, f, _default_backend)
131
+
132
+
133
+ def kg_equal(a, b):
134
+ """Compare two values or arrays for equality using the default backend."""
135
+ from .core import kg_equal as core_kg_equal
136
+ return core_kg_equal(a, b, _default_backend)
137
+
138
+
139
+ __all__ = [
140
+ 'np',
141
+ 'use_torch',
142
+ 'get_backend',
143
+ 'get_default_backend',
144
+ 'register_backend',
145
+ 'list_backends',
146
+ 'BackendProvider',
147
+ 'UnsupportedDtypeError',
148
+ 'TorchUnsupportedDtypeError',
149
+ 'NumpyBackendProvider',
150
+ 'TorchBackendProvider',
151
+ 'KGChar',
152
+ 'is_supported_type',
153
+ 'is_jagged_array',
154
+ 'to_numpy',
155
+ 'to_display',
156
+ 'array_size',
157
+ 'safe_equal',
158
+ 'detach_if_needed',
159
+ 'to_int_array',
160
+ 'power',
161
+ 'has_gradient',
162
+ 'kg_asarray',
163
+ 'is_integer',
164
+ 'is_float',
165
+ 'is_number',
166
+ 'get_dtype_kind',
167
+ 'str_to_chr_arr',
168
+ 'kg_argsort',
169
+ 'vec_fn',
170
+ 'kg_equal',
171
+ ]