klongpy 0.6.9__py3-none-any.whl → 0.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. klongpy/__init__.py +19 -1
  2. klongpy/adverbs.py +5 -5
  3. klongpy/autograd.py +308 -0
  4. klongpy/backend.py +167 -99
  5. klongpy/backends/__init__.py +94 -0
  6. klongpy/backends/base.py +320 -0
  7. klongpy/backends/numpy_backend.py +122 -0
  8. klongpy/backends/torch_backend.py +995 -0
  9. klongpy-0.6.9.data/scripts/kgpy → klongpy/cli.py +65 -88
  10. klongpy/core.py +228 -108
  11. klongpy/db/sys_fn_db.py +4 -3
  12. klongpy/dyads.py +159 -28
  13. klongpy/interpreter.py +31 -3
  14. klongpy/monads.py +39 -3
  15. klongpy/repl.py +21 -3
  16. klongpy/sys_fn.py +128 -17
  17. klongpy/sys_fn_autograd.py +290 -0
  18. klongpy/sys_fn_ipc.py +18 -6
  19. klongpy/sys_fn_timer.py +13 -3
  20. klongpy/web/sys_fn_web.py +14 -4
  21. klongpy-0.7.0.dist-info/METADATA +493 -0
  22. klongpy-0.7.0.dist-info/RECORD +48 -0
  23. {klongpy-0.6.9.dist-info → klongpy-0.7.0.dist-info}/WHEEL +1 -1
  24. klongpy-0.7.0.dist-info/entry_points.txt +2 -0
  25. {klongpy-0.6.9.dist-info → klongpy-0.7.0.dist-info}/top_level.txt +0 -1
  26. klongpy-0.6.9.dist-info/METADATA +0 -448
  27. klongpy-0.6.9.dist-info/RECORD +0 -77
  28. tests/__init__.py +0 -6
  29. tests/gen_join_over.py +0 -119
  30. tests/gen_py_suite.py +0 -77
  31. tests/gen_test_fn.py +0 -259
  32. tests/perf_async.py +0 -25
  33. tests/perf_avg.py +0 -18
  34. tests/perf_duckdb.py +0 -32
  35. tests/perf_gen.py +0 -38
  36. tests/perf_ipc_overhead.py +0 -34
  37. tests/perf_join.py +0 -53
  38. tests/perf_load.py +0 -17
  39. tests/perf_prog.py +0 -18
  40. tests/perf_serdes.py +0 -52
  41. tests/perf_sys_fn_db.py +0 -263
  42. tests/perf_vector.py +0 -40
  43. tests/test_accel.py +0 -227
  44. tests/test_df_cache.py +0 -85
  45. tests/test_eval_monad_list.py +0 -34
  46. tests/test_examples.py +0 -64
  47. tests/test_extra_suite.py +0 -382
  48. tests/test_file_cache.py +0 -185
  49. tests/test_interop.py +0 -180
  50. tests/test_kg_asarray.py +0 -94
  51. tests/test_kgtests.py +0 -65
  52. tests/test_known_bugs.py +0 -206
  53. tests/test_prog.py +0 -107
  54. tests/test_reshape_strings.py +0 -33
  55. tests/test_suite.py +0 -1480
  56. tests/test_suite_file.py +0 -153
  57. tests/test_sys_fn.py +0 -420
  58. tests/test_sys_fn_db.py +0 -88
  59. tests/test_sys_fn_ipc.py +0 -587
  60. tests/test_sys_fn_timer.py +0 -133
  61. tests/test_sys_fn_web.py +0 -50
  62. tests/test_util.py +0 -233
  63. tests/utils.py +0 -126
  64. {klongpy-0.6.9.dist-info → klongpy-0.7.0.dist-info}/licenses/LICENSE +0 -0
klongpy/dyads.py CHANGED
@@ -1,5 +1,11 @@
1
1
  from .core import *
2
+ from .autograd import grad_of_fn, numeric_grad, jacobian_of_fn, multi_jacobian_of_fn, multi_grad_of_fn
3
+ from .backend import (
4
+ to_numpy, safe_equal, detach_if_needed, to_int_array, power as backend_power, has_gradient,
5
+ kg_asarray, str_to_chr_arr, kg_equal, is_integer, is_float, get_dtype_kind, array_size
6
+ )
2
7
  import sys
8
+ import numpy
3
9
 
4
10
 
5
11
  def eval_dyad_add(a, b):
@@ -254,6 +260,11 @@ def eval_dyad_drop(a, b):
254
260
  return b[a:] if a >= 0 else b[:a]
255
261
 
256
262
 
263
+ def _safe_equal(x, y):
264
+ """Compare two values, handling torch tensors correctly."""
265
+ return kg_truth(safe_equal(x, y))
266
+
267
+
257
268
  def eval_dyad_equal(a, b):
258
269
  """
259
270
 
@@ -279,7 +290,7 @@ def eval_dyad_equal(a, b):
279
290
  [1 2 3]=[1 4 3] --> [1 0 1]
280
291
 
281
292
  """
282
- return vec_fn2(a, b, lambda x, y: kg_truth(np.asarray(x,dtype=object) == np.asarray(y,dtype=object)))
293
+ return vec_fn2(a, b, _safe_equal)
283
294
 
284
295
 
285
296
  def finditer(s, sub):
@@ -459,10 +470,10 @@ def eval_dyad_index_in_depth(a, b):
459
470
  return np.asarray(a)[tuple(b) if is_list(b) else b] if not is_empty(b) else b
460
471
 
461
472
 
462
- def _e_dyad_integer_divide(x,y):
473
+ def _e_dyad_integer_divide(x, y):
463
474
  a = np.divide(x, y)
464
- a = kg_asarray(rec_fn(a,np.trunc)) if np.isarray(a) else a
465
- return np.asarray(a,dtype='int') if np.isarray(a) else int(a)
475
+ a = kg_asarray(rec_fn(a, np.trunc)) if np.isarray(a) else a
476
+ return to_int_array(a)
466
477
 
467
478
  def eval_dyad_integer_divide(a, b):
468
479
  """
@@ -540,18 +551,28 @@ def eval_dyad_join(a, b):
540
551
  return b
541
552
 
542
553
  if np.isarray(a) and np.isarray(b):
543
- if len(a) == 0:
544
- return b
545
- if len(a.shape) == len(b.shape) and a.shape[-1] == b.shape[-1]:
546
- return np.concatenate((a,b))
554
+ # Only use fast path for 1D+ arrays (not 0D scalars)
555
+ a_is_1d_plus = hasattr(a, 'ndim') and a.ndim >= 1
556
+ b_is_1d_plus = hasattr(b, 'ndim') and b.ndim >= 1
557
+ if a_is_1d_plus and b_is_1d_plus:
558
+ if len(a) == 0:
559
+ return b
560
+ if len(a.shape) == len(b.shape) and a.shape[-1] == b.shape[-1]:
561
+ return np.concatenate((a,b))
547
562
 
548
563
  aa = _arr_to_list(a)
549
564
  bb = _arr_to_list(b)
550
565
 
551
566
  r = [*aa,*bb]
552
567
  nr = kg_asarray(r)
553
- t = nr.dtype.type
554
- return nr if issubclass(t, np.integer) or issubclass(t, np.floating) else np.asarray(r,dtype=object)
568
+ # Check dtype kind for compatibility with both numpy and torch
569
+ dtype_kind = get_dtype_kind(nr)
570
+ if dtype_kind in ('i', 'f', 'u'):
571
+ return nr
572
+ # Use numpy directly for object arrays (torch backend doesn't support object dtype)
573
+ # Convert any torch tensors to numpy first (needed for MPS tensors)
574
+ r_numpy = [to_numpy(x) if np.isarray(x) else x for x in r]
575
+ return numpy.asarray(r_numpy, dtype=object)
555
576
 
556
577
 
557
578
  def eval_dyad_less(a, b):
@@ -708,10 +729,26 @@ def eval_dyad_multiply(a, b):
708
729
  return np.multiply(a, b)
709
730
 
710
731
 
711
- def _e_dyad_power(a,b):
712
- r = np.power(float(a) if is_integer(a) else a, b)
713
- br = all([np.trunc(x) == x for x in r]) if is_list(r) else np.trunc(r) == r
714
- return np.dtype('int').type(r) if br else r
732
+ def _e_dyad_power(a, b):
733
+ # Check if input requires grad - if so, preserve float for autograd
734
+ input_has_grad = has_gradient(a)
735
+ # Use backend power function which handles torch.pow for gradients
736
+ r = backend_power(a, b)
737
+ # If input had gradients, keep result as float to preserve autograd
738
+ if input_has_grad:
739
+ return r
740
+ # Check if result is integer using vectorized operations
741
+ r_val = detach_if_needed(r)
742
+ if is_list(r_val):
743
+ # Vectorized check: trunc(r) == r for all elements
744
+ trunc_r = numpy.trunc(r_val) if isinstance(r_val, numpy.ndarray) else r_val.trunc()
745
+ br = bool((trunc_r == r_val).all())
746
+ else:
747
+ val = float(r_val) if hasattr(r_val, 'item') else r_val
748
+ br = numpy.trunc(val) == val
749
+ if br:
750
+ return to_int_array(r)
751
+ return r
715
752
 
716
753
  def eval_dyad_power(a, b):
717
754
  """
@@ -810,20 +847,22 @@ def eval_dyad_reshape(a, b):
810
847
  y = np.where(a < 0)[0]
811
848
  if len(y) > 0:
812
849
  a = np.copy(a)
813
- a[y] = b.size // 2
814
- b_s = b.size
815
- a_s = np.prod(a)
850
+ a[y] = array_size(b) // 2
851
+ b_s = array_size(b)
852
+ a_s = int(np.prod(a)) # Ensure it's a Python int for comparison
853
+ # Convert shape to tuple of ints for torch compatibility
854
+ a_shape = tuple(int(x) for x in (a.tolist() if hasattr(a, 'tolist') else a))
816
855
  if a_s > b_s:
817
856
  b = np.tile(b.flatten(), (a_s // b_s))
818
- b = np.concatenate((b, b[:a_s - b.size]))
819
- b_s = b.size
820
- r = b.reshape(a)
857
+ b = np.concatenate((b, b[:a_s - array_size(b)]))
858
+ b_s = array_size(b)
859
+ r = b.reshape(a_shape)
821
860
  r = np.asarray(["".join(x) for x in r]) if j else r
822
861
  j = False
823
862
  elif a_s == b_s:
824
- r = b.reshape(a)
863
+ r = b.reshape(a_shape)
825
864
  else:
826
- r = np.resize(b, a)
865
+ r = np.resize(b, a_shape)
827
866
  else:
828
867
  r = np.full(a, b)
829
868
  else:
@@ -860,7 +899,7 @@ def eval_dyad_rotate(a, b):
860
899
  rotated will be a!#b.
861
900
 
862
901
  Note that n:+M rotates the rows of a matrix M (i.e. it rotates
863
- it vertically); to rotate its columns (horizontally), use n:+:\M
902
+ it vertically); to rotate its columns (horizontally), use n:+:\\M
864
903
  (Rotate-Each-Left).
865
904
 
866
905
  Examples: 1:+[1 2 3 4 5] --> [5 1 2 3 4]
@@ -966,14 +1005,106 @@ def eval_dyad_take(a, b):
966
1005
  """
967
1006
  j = isinstance(b,str)
968
1007
  b = str_to_chr_arr(b) if j else np.asarray(b)
969
- aa = np.abs(a)
970
- if aa > b.size:
971
- b = np.tile(b,aa // len(b))
972
- b = np.concatenate((b, b[:aa-b.size]) if a > 0 else (b[-(aa-b.size):],b))
973
- r = b[a:] if a < 0 else b[:a]
1008
+ aa = int(np.abs(a)) if hasattr(np.abs(a), 'item') else np.abs(a) # Convert tensor to int
1009
+ b_size = array_size(b)
1010
+ if b_size == 0:
1011
+ # Handle empty array/string case
1012
+ r = b
1013
+ elif aa > b_size:
1014
+ b = np.tile(b, aa // len(b))
1015
+ b = np.concatenate((b, b[:aa-array_size(b)]) if a > 0 else (b[-(aa-array_size(b)):], b))
1016
+ r = b[a:] if a < 0 else b[:a]
1017
+ else:
1018
+ r = b[a:] if a < 0 else b[:a]
974
1019
  return "".join(r) if j else r
975
1020
 
976
1021
 
1022
+ def eval_dyad_grad(klong, a, b):
1023
+ """
1024
+
1025
+ a∇b [Grad]
1026
+
1027
+ Compute the numeric gradient of the monadic function ``b`` at ``a``
1028
+ using finite differences. Always uses numeric differentiation.
1029
+
1030
+ For automatic differentiation, use the :> operator instead.
1031
+
1032
+ """
1033
+ def call_fn(v):
1034
+ if isinstance(b, (KGSym, KGLambda, KGFn, KGCall)):
1035
+ return klong.call(KGCall(b, [v], 1))
1036
+ return b(v)
1037
+
1038
+ if isinstance(a, KGSym):
1039
+ orig = klong[a]
1040
+
1041
+ def func(v):
1042
+ klong[a] = v
1043
+ try:
1044
+ return call_fn(v)
1045
+ finally:
1046
+ klong[a] = orig
1047
+
1048
+ return numeric_grad(func, orig, klong._backend)
1049
+ else:
1050
+ return numeric_grad(call_fn, a, klong._backend)
1051
+
1052
+
1053
+ def eval_dyad_jacobian(klong, a, b):
1054
+ """
1055
+
1056
+ a∂b [Jacobian]
1057
+
1058
+ Compute Jacobian matrix of function ``b`` at point ``a``.
1059
+ For f: R^n -> R^m, returns m x n matrix where J[i,j] = df_i/dx_j.
1060
+
1061
+ Two modes based on what ``a`` contains:
1062
+ 1. Single point: [1 2]∂f -> Jacobian at that point
1063
+ 2. List of symbols: [w b]∂f -> [J_w J_b] (multi-param mode)
1064
+
1065
+ In multi-param mode, ``b`` should be a niladic function
1066
+ that references the parameter symbols.
1067
+
1068
+ Examples:
1069
+ [1 2]∂{[x@0^2 x@1^2]} --> [[2 0] [0 4]]
1070
+ [w b]∂f --> [J_w J_b] (multi-param mode)
1071
+
1072
+ """
1073
+ # Check if a is a list of symbols (multi-param mode)
1074
+ if is_list(a) and len(a) > 0 and all(isinstance(p, KGSym) for p in a):
1075
+ return multi_jacobian_of_fn(klong, b, list(a))
1076
+ else:
1077
+ return jacobian_of_fn(klong, b, a) # Note: a is point, b is function
1078
+
1079
+
1080
+ def eval_dyad_autograd(klong, a, b):
1081
+ """
1082
+
1083
+ a:>b [Autograd]
1084
+
1085
+ Compute gradient of function ``a`` with respect to ``b``.
1086
+
1087
+ Two modes based on what ``b`` contains:
1088
+ 1. Single param/point: a:>x or a:>[1 2 3] -> gradient at that point
1089
+ 2. List of symbols: a:>[w b] -> [grad_w grad_b] (multi-param mode)
1090
+
1091
+ In multi-param mode, ``a`` should be a niladic function (loss)
1092
+ that references the parameter symbols.
1093
+
1094
+ Examples:
1095
+ {x^2}:>3.0 --> 6.0 (derivative of x^2 at x=3)
1096
+ {x^3}:>2.0 --> 12.0 (derivative of x^3 at x=2)
1097
+ {+/x^2}:>[1 2 3] --> [2 4 6] (gradient of sum of squares)
1098
+ loss:>[w b] --> [grad_w grad_b] (multi-param mode)
1099
+
1100
+ """
1101
+ # Check if b is a list of symbols (multi-param mode)
1102
+ if is_list(b) and len(b) > 0 and all(isinstance(p, KGSym) for p in b):
1103
+ return multi_grad_of_fn(klong, a, list(b))
1104
+ else:
1105
+ return grad_of_fn(klong, a, b)
1106
+
1107
+
977
1108
  def create_dyad_functions(klong):
978
1109
  def _get_name(s):
979
1110
  s = s.strip()
klongpy/interpreter.py CHANGED
@@ -2,10 +2,13 @@ import time
2
2
  from collections import deque
3
3
 
4
4
  from .adverbs import get_adverb_fn
5
+ from .backends import get_backend
5
6
  from .core import *
7
+ from .backend import is_number
6
8
  from .dyads import create_dyad_functions
7
9
  from .monads import create_monad_functions
8
10
  from .sys_fn import create_system_functions
11
+ from .sys_fn_autograd import create_system_functions_autograd
9
12
  from .sys_fn_ipc import create_system_functions_ipc, create_system_var_ipc
10
13
  from .sys_fn_timer import create_system_functions_timer
11
14
  from .sys_var import *
@@ -141,6 +144,7 @@ def create_system_contexts():
141
144
 
142
145
  sys_d = {}
143
146
  add_context_key_values(sys_d, create_system_functions())
147
+ add_context_key_values(sys_d, create_system_functions_autograd())
144
148
  add_context_key_values(sys_d, create_system_functions_ipc())
145
149
  add_context_key_values(sys_d, create_system_functions_timer())
146
150
  set_context_var(sys_d, KGSym('.e'), eval_sys_var_epsilon()) # TODO: support lambda
@@ -210,13 +214,36 @@ def chain_adverbs(klong, arr):
210
214
 
211
215
  class KlongInterpreter():
212
216
 
213
- def __init__(self):
217
+ def __init__(self, backend=None, device=None):
218
+ """
219
+ Initialize a Klong interpreter.
220
+
221
+ Parameters
222
+ ----------
223
+ backend : str, optional
224
+ Backend name ('numpy' or 'torch'). If None, uses the default
225
+ backend (numpy, unless KLONGPY_BACKEND or USE_TORCH env vars are set).
226
+ device : str, optional
227
+ Device for torch backend ('cpu', 'cuda', 'mps'). Only applies
228
+ when backend='torch'. If None, auto-selects best available device.
229
+ """
230
+ self._backend = get_backend(backend, device=device)
214
231
  self._context = KlongContext(create_system_contexts())
215
232
  self._vd = create_dyad_functions(self)
216
233
  self._vm = create_monad_functions(self)
217
234
  self._start_time = time.time()
218
235
  self._module = None
219
236
 
237
+ @property
238
+ def backend(self):
239
+ """Return the backend provider for this interpreter."""
240
+ return self._backend
241
+
242
+ @property
243
+ def np(self):
244
+ """Return the numpy-compatible array module for this interpreter."""
245
+ return self._backend.np
246
+
220
247
  def __setitem__(self, k, v):
221
248
  k = k if isinstance(k, KGSym) else KGSym(k)
222
249
  self._context[k] = v
@@ -224,7 +251,8 @@ class KlongInterpreter():
224
251
  def __getitem__(self, k):
225
252
  k = k if isinstance(k, KGSym) else KGSym(k)
226
253
  r = self._context[k]
227
- return KGFnWrapper(self, r) if issubclass(type(r), KGFn) else r
254
+ # Pass the symbol name to avoid O(n) context search
255
+ return KGFnWrapper(self, r, sym=k) if issubclass(type(r), KGFn) else r
228
256
 
229
257
  def __delitem__(self, k):
230
258
  k = k if isinstance(k, KGSym) else KGSym(k)
@@ -618,7 +646,7 @@ class KlongInterpreter():
618
646
  f = self._get_op_fn(x.a.a, x.a.arity)
619
647
  fa = (x.args if isinstance(x.args, list) else [x.args]) if x.args is not None else x.args
620
648
  _y = self.eval(fa[1]) if x.a.arity == 2 else None
621
- _x = fa[0] if x.a.a == '::' else self.eval(fa[0])
649
+ _x = fa[0] if x.a.a in ['::','∇'] else self.eval(fa[0])
622
650
  return f(_x) if x.a.arity == 1 else f(_x, _y)
623
651
  elif x.is_adverb_chain():
624
652
  return chain_adverbs(self, x.a)()
klongpy/monads.py CHANGED
@@ -1,4 +1,8 @@
1
1
  from .core import *
2
+ from .autograd import grad_of_fn
3
+ from .backend import (
4
+ kg_asarray, is_integer, is_number, str_to_chr_arr, kg_argsort, array_size, vec_fn
5
+ )
2
6
  import sys
3
7
 
4
8
  def eval_monad_atom(a):
@@ -115,7 +119,15 @@ def eval_monad_floor(a):
115
119
  _1e100 --> 1.0e+100 :"if precision < 100 digits"
116
120
 
117
121
  """
118
- return vec_fn(a, lambda x: np.floor(np.asarray(x, dtype=float)).astype(int))
122
+ def _floor_to_int(x):
123
+ result = np.floor(np.asarray(x, dtype=float))
124
+ # Handle both numpy arrays and torch tensors
125
+ if hasattr(result, 'astype'):
126
+ return result.astype(int)
127
+ elif hasattr(result, 'to'): # torch tensor - .to(int) works
128
+ return result.to(int)
129
+ return int(result)
130
+ return vec_fn(a, _floor_to_int)
119
131
 
120
132
 
121
133
  def eval_monad_format(a):
@@ -196,7 +208,7 @@ def eval_monad_groupby(a):
196
208
 
197
209
  """
198
210
  arr = kg_asarray(a)
199
- if arr.size == 0:
211
+ if array_size(arr) == 0:
200
212
  return arr
201
213
  vals, inverse = np.unique(arr, return_inverse=True)
202
214
  groups = [np.where(inverse == i)[0] for i in range(len(vals))]
@@ -215,7 +227,7 @@ def eval_monad_list(a):
215
227
  ,"xyz" --> ["xyz"]
216
228
  ,[1] --> [[1]]
217
229
  """
218
- if isinstance(a, KGChar):
230
+ if is_char(a):
219
231
  return str(a)
220
232
  if isinstance(a, KGSym):
221
233
  return np.asarray([a],dtype=object) # np interprets ':foo" as ':fo"
@@ -453,6 +465,28 @@ def eval_monad_undefined(a):
453
465
  return kg_truth(a is None or (np.isinf(a) if is_number(a) else False))
454
466
 
455
467
 
468
+ def eval_monad_track(a):
469
+ """
470
+
471
+ ˙a [Track]
472
+
473
+ Identity operator used when marking values for gradient tracking.
474
+
475
+ """
476
+ return a
477
+
478
+
479
+ def eval_monad_grad(klong, a):
480
+ """
481
+
482
+ ∇a [Grad]
483
+
484
+ Return a function that computes the numeric gradient of ``a``.
485
+
486
+ """
487
+ return KGLambda(lambda x, fn=a, k=klong: grad_of_fn(k, fn, x))
488
+
489
+
456
490
  def create_monad_functions(klong):
457
491
  def _get_name(s):
458
492
  s = s.strip()
@@ -464,6 +498,8 @@ def create_monad_functions(klong):
464
498
  for x in filter(lambda n: n.startswith("eval_monad_"), dir(m)):
465
499
  fn = getattr(m,x)
466
500
  name = _get_name(fn.__doc__)
501
+ if fn.__code__.co_argcount == 2 and 'klong' in fn.__code__.co_varnames:
502
+ fn = lambda a,f=fn,klong=klong: f(klong, a)
467
503
  registry[name] = fn
468
504
 
469
505
  return registry
klongpy/repl.py CHANGED
@@ -3,12 +3,30 @@ import threading
3
3
  import time
4
4
  import os
5
5
  import importlib.resources
6
+ from typing import Optional
6
7
 
7
8
  from . import KlongInterpreter
8
9
  from .utils import CallbackEvent
9
10
 
10
11
 
11
- def start_loop(loop: asyncio.AbstractEventLoop, stop_event: asyncio.Event) -> None:
12
+ class LoopStopper:
13
+ def __init__(self, loop: asyncio.AbstractEventLoop) -> None:
14
+ self._loop = loop
15
+ self._future = loop.create_future()
16
+
17
+ def set(self) -> None:
18
+ if self._future.done():
19
+ return
20
+ if self._loop.is_running():
21
+ self._loop.call_soon_threadsafe(self._future.set_result, None)
22
+ else:
23
+ self._future.set_result(None)
24
+
25
+ async def wait(self) -> None:
26
+ await self._future
27
+
28
+
29
+ def start_loop(loop: asyncio.AbstractEventLoop, stop_event: LoopStopper) -> None:
12
30
  asyncio.set_event_loop(loop)
13
31
  loop.run_until_complete(stop_event.wait())
14
32
 
@@ -18,13 +36,13 @@ def setup_async_loop(debug: bool = False, slow_callback_duration: float = 86400.
18
36
  loop.slow_callback_duration = slow_callback_duration
19
37
  if debug:
20
38
  loop.set_debug(True)
21
- stop_event = asyncio.Event()
39
+ stop_event = LoopStopper(loop)
22
40
  thread = threading.Thread(target=start_loop, args=(loop, stop_event), daemon=True)
23
41
  thread.start()
24
42
  return loop, thread, stop_event
25
43
 
26
44
 
27
- def cleanup_async_loop(loop: asyncio.AbstractEventLoop, loop_thread: threading.Thread, stop_event: asyncio.Event, debug: bool = False, name: str | None = None) -> None:
45
+ def cleanup_async_loop(loop: asyncio.AbstractEventLoop, loop_thread: threading.Thread, stop_event: LoopStopper, debug: bool = False, name: Optional[str] = None) -> None:
28
46
  if loop.is_closed():
29
47
  return
30
48
 
klongpy/sys_fn.py CHANGED
@@ -12,8 +12,24 @@ from inspect import Parameter
12
12
  import numpy
13
13
 
14
14
  from .core import (KGChannel, KGChannelDir, KGLambda, KGSym, KlongException,
15
- is_dict, is_empty, is_list, kg_asarray, kg_read, kg_write, np,
15
+ is_dict, is_empty, is_list, kg_read, kg_write, np,
16
16
  reserved_fn_args, reserved_fn_symbol_map, safe_eq, safe_inspect)
17
+ from .backend import to_numpy, get_default_backend, kg_asarray
18
+
19
+
20
+ def _to_display_value(x):
21
+ """Convert backend tensors to numpy for cleaner display."""
22
+ backend = get_default_backend()
23
+ # Convert backend arrays (tensors) to numpy
24
+ if backend.is_backend_array(x):
25
+ return to_numpy(x)
26
+ # Handle numpy arrays with tensors inside (object arrays)
27
+ if isinstance(x, numpy.ndarray) and x.dtype == object:
28
+ return numpy.array([_to_display_value(item) for item in x], dtype=object)
29
+ # Handle lists with tensors
30
+ if isinstance(x, list):
31
+ return [_to_display_value(item) for item in x]
32
+ return x
17
33
 
18
34
 
19
35
  def eval_sys_append_channel(x):
@@ -47,10 +63,25 @@ def eval_sys_display(klong, x):
47
63
 
48
64
  .d(x) [Display]
49
65
 
50
- See [Write].
66
+ Display the object "x". Tensors are converted to numpy for cleaner output.
67
+ Use .bkd() for raw backend-specific display.
68
+
69
+ """
70
+ x = _to_display_value(x)
71
+ r = kg_write(x, klong._backend, display=True)
72
+ klong['.sys.cout'].raw.write(r)
73
+ return r
74
+
75
+
76
+ def eval_sys_backend_display(klong, x):
77
+ """
78
+
79
+ .bkd(x) [Backend-Display]
80
+
81
+ Display the object "x" in raw backend format (tensors shown as-is).
51
82
 
52
83
  """
53
- r = kg_write(x, display=True)
84
+ r = kg_write(x, klong._backend, display=True)
54
85
  klong['.sys.cout'].raw.write(r)
55
86
  return r
56
87
 
@@ -272,10 +303,26 @@ def eval_sys_print(klong, x):
272
303
  .p(x) [Print]
273
304
 
274
305
  Pretty-print the object "x" (like Display) and then print a
275
- newline sequence. .p("") will just print a newline.
306
+ newline sequence. Tensors are converted to numpy for cleaner output.
307
+ Use .bkp() for raw backend-specific print.
308
+
309
+ """
310
+ x = _to_display_value(x)
311
+ o = kg_write(x, klong._backend, display=True)
312
+ klong['.sys.cout'].raw.write(o+"\n")
313
+ return o
314
+
315
+
316
+ def eval_sys_backend_print(klong, x):
317
+ """
318
+
319
+ .bkp(x) [Backend-Print]
320
+
321
+ Pretty-print the object "x" in raw backend format (tensors shown as-is)
322
+ and then print a newline sequence.
276
323
 
277
324
  """
278
- o = kg_write(x, display=True)
325
+ o = kg_write(x, klong._backend, display=True)
279
326
  klong['.sys.cout'].raw.write(o+"\n")
280
327
  return o
281
328
 
@@ -341,20 +388,30 @@ def _handle_import(item):
341
388
  if n_args <= len(reserved_fn_args):
342
389
  item = KGLambda(item, args=reserved_fn_args[:n_args])
343
390
  else:
344
- args = safe_inspect(item, follow_wrapped=True)
345
- if 'args' in args:
391
+ sig_args = safe_inspect(item, follow_wrapped=True)
392
+ if 'args' in sig_args:
346
393
  item = KGLambda(item, args=None, wildcard=True)
347
394
  n_args = 3
348
395
  else:
349
- args = [k for k,v in args.items() if (v.kind == Parameter.POSITIONAL_OR_KEYWORD and v.default == Parameter.empty) or (v.kind == Parameter.POSITIONAL_ONLY)]
350
- n_args = len(args)
351
- # if there are kwargs, then .pyc() must be used to call this function to override them
352
- if 'klong' in args:
353
- n_args -= 1
354
- assert n_args <= len(reserved_fn_args)
355
- item = KGLambda(item, args=reserved_fn_args[:n_args], provide_klong=True)
356
- elif n_args <= len(reserved_fn_args):
357
- item = KGLambda(item, args=reserved_fn_args[:n_args])
396
+ # Get required args (no default)
397
+ required_args = [k for k,v in sig_args.items() if (v.kind == Parameter.POSITIONAL_OR_KEYWORD and v.default == Parameter.empty) or (v.kind == Parameter.POSITIONAL_ONLY)]
398
+ # Get optional args (have default)
399
+ optional_args = [k for k,v in sig_args.items() if v.kind == Parameter.POSITIONAL_OR_KEYWORD and v.default != Parameter.empty]
400
+ # Use required args count, but if there are optional args and no required args,
401
+ # use wildcard mode so the function can accept 0-3 args
402
+ if not required_args and optional_args:
403
+ item = KGLambda(item, args=None, wildcard=True)
404
+ n_args = 3
405
+ else:
406
+ args = required_args
407
+ n_args = len(args)
408
+ # if there are kwargs, then .pyc() must be used to call this function to override them
409
+ if 'klong' in args:
410
+ n_args -= 1
411
+ assert n_args <= len(reserved_fn_args)
412
+ item = KGLambda(item, args=reserved_fn_args[:n_args], provide_klong=True)
413
+ elif n_args <= len(reserved_fn_args):
414
+ item = KGLambda(item, args=reserved_fn_args[:n_args])
358
415
  except Exception:
359
416
  if hasattr(item, "__class__") and hasattr(item.__class__, '__module__') and item.__class__.__module__ == "builtins":
360
417
  # LOOK AWAY. You didn't see this.
@@ -415,6 +472,17 @@ def _import_module(klong, x, from_set=None):
415
472
  except Exception as e:
416
473
  # TODO: this should be logged
417
474
  print(f"failed to import function: {name}", e)
475
+
476
+ # For from_set imports, also check for lazy-loaded attributes not in __dict__
477
+ # (e.g., numpy.random in numpy 2.x)
478
+ if from_set is not None:
479
+ for name in from_set:
480
+ if name not in export_items and hasattr(module, name):
481
+ try:
482
+ item = getattr(module, name)
483
+ klong[name] = _handle_import(item)
484
+ except Exception as e:
485
+ print(f"failed to import function: {name}", e)
418
486
  finally:
419
487
  klong._context.push(ctx)
420
488
 
@@ -578,6 +646,49 @@ def eval_sys_python_from(klong, x, y):
578
646
  return _import_module(klong, x, from_set=set(y))
579
647
 
580
648
 
649
+ def eval_sys_backend_fn(klong, x):
650
+ """
651
+
652
+ .bkf(x) [Backend-Function]
653
+
654
+ Import functions from the current backend's array module.
655
+ This is similar to .pyf() but uses backend-aware functions that
656
+ work with both numpy and torch backends.
657
+
658
+ When using the torch backend, these functions preserve gradient
659
+ tracking for autograd.
660
+
661
+ Example:
662
+
663
+ .bkf("exp")
664
+ exp(1.0) --> 2.718...
665
+
666
+ .bkf(["exp";"sin";"cos"])
667
+ sin(1.0) --> 0.841...
668
+
669
+ Common functions available: exp, sin, cos, tan, tanh, sqrt, abs,
670
+ log, log10, floor, ceil, round
671
+
672
+ """
673
+ if isinstance(x, str):
674
+ x = [x]
675
+ if not (is_list(x) and all(map(lambda p: isinstance(p, str), x))):
676
+ raise RuntimeError("function name(s) must be a string or list of strings")
677
+
678
+ backend = klong._backend
679
+ ctx = klong._context.pop()
680
+ try:
681
+ for fn_name in x:
682
+ if hasattr(backend.np, fn_name):
683
+ fn = getattr(backend.np, fn_name)
684
+ klong[fn_name] = _handle_import(fn)
685
+ else:
686
+ raise RuntimeError(f"Backend does not have function: {fn_name}")
687
+ finally:
688
+ klong._context.push(ctx)
689
+ return None
690
+
691
+
581
692
  def eval_sys_random_number():
582
693
  """
583
694
 
@@ -722,7 +833,7 @@ def eval_sys_write(klong, x):
722
833
  sequence. Use .p (Print) to do so.
723
834
 
724
835
  """
725
- r = kg_write(x)
836
+ r = kg_write(x, klong._backend)
726
837
  klong['.sys.cout'].raw.write(r)
727
838
  return x
728
839