klongpy 0.6.8__py3-none-any.whl → 0.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. klongpy/__init__.py +19 -1
  2. klongpy/adverbs.py +5 -5
  3. klongpy/autograd.py +308 -0
  4. klongpy/backend.py +167 -99
  5. klongpy/backends/__init__.py +94 -0
  6. klongpy/backends/base.py +320 -0
  7. klongpy/backends/numpy_backend.py +122 -0
  8. klongpy/backends/torch_backend.py +995 -0
  9. klongpy-0.6.8.data/scripts/kgpy → klongpy/cli.py +65 -88
  10. klongpy/core.py +228 -106
  11. klongpy/db/sys_fn_db.py +4 -3
  12. klongpy/dyads.py +173 -32
  13. klongpy/interpreter.py +31 -3
  14. klongpy/lib/help.kg +2 -2
  15. klongpy/monads.py +49 -12
  16. klongpy/repl.py +91 -0
  17. klongpy/sys_fn.py +129 -18
  18. klongpy/sys_fn_autograd.py +290 -0
  19. klongpy/sys_fn_ipc.py +18 -7
  20. klongpy/sys_fn_timer.py +13 -3
  21. klongpy/web/sys_fn_web.py +28 -6
  22. klongpy-0.7.0.dist-info/METADATA +493 -0
  23. klongpy-0.7.0.dist-info/RECORD +48 -0
  24. {klongpy-0.6.8.dist-info → klongpy-0.7.0.dist-info}/WHEEL +1 -1
  25. klongpy-0.7.0.dist-info/entry_points.txt +2 -0
  26. {klongpy-0.6.8.dist-info → klongpy-0.7.0.dist-info}/top_level.txt +0 -1
  27. klongpy-0.6.8.dist-info/METADATA +0 -412
  28. klongpy-0.6.8.dist-info/RECORD +0 -72
  29. tests/__init__.py +0 -6
  30. tests/gen_join_over.py +0 -119
  31. tests/gen_py_suite.py +0 -77
  32. tests/gen_test_fn.py +0 -259
  33. tests/perf_async.py +0 -25
  34. tests/perf_avg.py +0 -18
  35. tests/perf_duckdb.py +0 -32
  36. tests/perf_gen.py +0 -38
  37. tests/perf_ipc_overhead.py +0 -34
  38. tests/perf_join.py +0 -53
  39. tests/perf_load.py +0 -17
  40. tests/perf_prog.py +0 -18
  41. tests/perf_serdes.py +0 -52
  42. tests/perf_sys_fn_db.py +0 -263
  43. tests/perf_vector.py +0 -40
  44. tests/test_accel.py +0 -227
  45. tests/test_df_cache.py +0 -85
  46. tests/test_examples.py +0 -64
  47. tests/test_extra_suite.py +0 -382
  48. tests/test_file_cache.py +0 -185
  49. tests/test_interop.py +0 -181
  50. tests/test_kgtests.py +0 -65
  51. tests/test_known_bugs.py +0 -206
  52. tests/test_prog.py +0 -107
  53. tests/test_suite.py +0 -1479
  54. tests/test_suite_file.py +0 -153
  55. tests/test_sys_fn.py +0 -420
  56. tests/test_sys_fn_db.py +0 -88
  57. tests/test_sys_fn_ipc.py +0 -587
  58. tests/test_sys_fn_timer.py +0 -133
  59. tests/test_util.py +0 -233
  60. tests/utils.py +0 -126
  61. {klongpy-0.6.8.dist-info → klongpy-0.7.0.dist-info/licenses}/LICENSE +0 -0
klongpy/dyads.py CHANGED
@@ -1,5 +1,11 @@
1
1
  from .core import *
2
+ from .autograd import grad_of_fn, numeric_grad, jacobian_of_fn, multi_jacobian_of_fn, multi_grad_of_fn
3
+ from .backend import (
4
+ to_numpy, safe_equal, detach_if_needed, to_int_array, power as backend_power, has_gradient,
5
+ kg_asarray, str_to_chr_arr, kg_equal, is_integer, is_float, get_dtype_kind, array_size
6
+ )
2
7
  import sys
8
+ import numpy
3
9
 
4
10
 
5
11
  def eval_dyad_add(a, b):
@@ -181,7 +187,11 @@ def eval_dyad_at_index(klong, a, b):
181
187
  j = False
182
188
  else:
183
189
  r = a
184
- return "".join(r) if j else r
190
+ if j:
191
+ if np.isarray(r) and r.ndim > 1:
192
+ return np.asarray(["".join(x) for x in r], dtype=object)
193
+ return "".join(r)
194
+ return r
185
195
 
186
196
 
187
197
  def eval_dyad_define(klong, n, v):
@@ -250,6 +260,11 @@ def eval_dyad_drop(a, b):
250
260
  return b[a:] if a >= 0 else b[:a]
251
261
 
252
262
 
263
+ def _safe_equal(x, y):
264
+ """Compare two values, handling torch tensors correctly."""
265
+ return kg_truth(safe_equal(x, y))
266
+
267
+
253
268
  def eval_dyad_equal(a, b):
254
269
  """
255
270
 
@@ -275,7 +290,7 @@ def eval_dyad_equal(a, b):
275
290
  [1 2 3]=[1 4 3] --> [1 0 1]
276
291
 
277
292
  """
278
- return vec_fn2(a, b, lambda x, y: kg_truth(np.asarray(x,dtype=object) == np.asarray(y,dtype=object)))
293
+ return vec_fn2(a, b, _safe_equal)
279
294
 
280
295
 
281
296
  def finditer(s, sub):
@@ -401,9 +416,11 @@ def _e_dyad_format2(a, b):
401
416
  """
402
417
  Unravel the broadcasting of a and b and apply __e_dyad_format2
403
418
  """
419
+ if is_list(a) and is_list(b):
420
+ return kg_asarray([vec_fn2(x, y, _e_dyad_format2) for x, y in zip(to_list(a), to_list(b))])
404
421
  if np.isarray(a) and np.isarray(b):
405
- return np.asarray([vec_fn2(x,y,_e_dyad_format2) for x,y in zip(a,b)])
406
- return __e_dyad_format2(a,b)
422
+ return np.asarray([vec_fn2(x, y, _e_dyad_format2) for x, y in zip(a, b)])
423
+ return __e_dyad_format2(a, b)
407
424
 
408
425
  def eval_dyad_format2(a, b):
409
426
  """
@@ -453,10 +470,10 @@ def eval_dyad_index_in_depth(a, b):
453
470
  return np.asarray(a)[tuple(b) if is_list(b) else b] if not is_empty(b) else b
454
471
 
455
472
 
456
- def _e_dyad_integer_divide(x,y):
473
+ def _e_dyad_integer_divide(x, y):
457
474
  a = np.divide(x, y)
458
- a = kg_asarray(rec_fn(a,np.trunc)) if np.isarray(a) else a
459
- return np.asarray(a,dtype='int') if np.isarray(a) else int(a)
475
+ a = kg_asarray(rec_fn(a, np.trunc)) if np.isarray(a) else a
476
+ return to_int_array(a)
460
477
 
461
478
  def eval_dyad_integer_divide(a, b):
462
479
  """
@@ -534,18 +551,28 @@ def eval_dyad_join(a, b):
534
551
  return b
535
552
 
536
553
  if np.isarray(a) and np.isarray(b):
537
- if len(a) == 0:
538
- return b
539
- if len(a.shape) == len(b.shape) and a.shape[-1] == b.shape[-1]:
540
- return np.concatenate((a,b))
554
+ # Only use fast path for 1D+ arrays (not 0D scalars)
555
+ a_is_1d_plus = hasattr(a, 'ndim') and a.ndim >= 1
556
+ b_is_1d_plus = hasattr(b, 'ndim') and b.ndim >= 1
557
+ if a_is_1d_plus and b_is_1d_plus:
558
+ if len(a) == 0:
559
+ return b
560
+ if len(a.shape) == len(b.shape) and a.shape[-1] == b.shape[-1]:
561
+ return np.concatenate((a,b))
541
562
 
542
563
  aa = _arr_to_list(a)
543
564
  bb = _arr_to_list(b)
544
565
 
545
566
  r = [*aa,*bb]
546
567
  nr = kg_asarray(r)
547
- t = nr.dtype.type
548
- return nr if issubclass(t, np.integer) or issubclass(t, np.floating) else np.asarray(r,dtype=object)
568
+ # Check dtype kind for compatibility with both numpy and torch
569
+ dtype_kind = get_dtype_kind(nr)
570
+ if dtype_kind in ('i', 'f', 'u'):
571
+ return nr
572
+ # Use numpy directly for object arrays (torch backend doesn't support object dtype)
573
+ # Convert any torch tensors to numpy first (needed for MPS tensors)
574
+ r_numpy = [to_numpy(x) if np.isarray(x) else x for x in r]
575
+ return numpy.asarray(r_numpy, dtype=object)
549
576
 
550
577
 
551
578
  def eval_dyad_less(a, b):
@@ -702,10 +729,26 @@ def eval_dyad_multiply(a, b):
702
729
  return np.multiply(a, b)
703
730
 
704
731
 
705
- def _e_dyad_power(a,b):
706
- r = np.power(float(a) if is_integer(a) else a, b)
707
- br = all([np.trunc(x) == x for x in r]) if is_list(r) else np.trunc(r) == r
708
- return np.dtype('int').type(r) if br else r
732
+ def _e_dyad_power(a, b):
733
+ # Check if input requires grad - if so, preserve float for autograd
734
+ input_has_grad = has_gradient(a)
735
+ # Use backend power function which handles torch.pow for gradients
736
+ r = backend_power(a, b)
737
+ # If input had gradients, keep result as float to preserve autograd
738
+ if input_has_grad:
739
+ return r
740
+ # Check if result is integer using vectorized operations
741
+ r_val = detach_if_needed(r)
742
+ if is_list(r_val):
743
+ # Vectorized check: trunc(r) == r for all elements
744
+ trunc_r = numpy.trunc(r_val) if isinstance(r_val, numpy.ndarray) else r_val.trunc()
745
+ br = bool((trunc_r == r_val).all())
746
+ else:
747
+ val = float(r_val) if hasattr(r_val, 'item') else r_val
748
+ br = numpy.trunc(val) == val
749
+ if br:
750
+ return to_int_array(r)
751
+ return r
709
752
 
710
753
  def eval_dyad_power(a, b):
711
754
  """
@@ -804,20 +847,22 @@ def eval_dyad_reshape(a, b):
804
847
  y = np.where(a < 0)[0]
805
848
  if len(y) > 0:
806
849
  a = np.copy(a)
807
- a[y] = b.size // 2
808
- b_s = b.size
809
- a_s = np.prod(a)
850
+ a[y] = array_size(b) // 2
851
+ b_s = array_size(b)
852
+ a_s = int(np.prod(a)) # Ensure it's a Python int for comparison
853
+ # Convert shape to tuple of ints for torch compatibility
854
+ a_shape = tuple(int(x) for x in (a.tolist() if hasattr(a, 'tolist') else a))
810
855
  if a_s > b_s:
811
856
  b = np.tile(b.flatten(), (a_s // b_s))
812
- b = np.concatenate((b, b[:a_s - b.size]))
813
- b_s = b.size
814
- r = b.reshape(a)
857
+ b = np.concatenate((b, b[:a_s - array_size(b)]))
858
+ b_s = array_size(b)
859
+ r = b.reshape(a_shape)
815
860
  r = np.asarray(["".join(x) for x in r]) if j else r
816
861
  j = False
817
862
  elif a_s == b_s:
818
- r = b.reshape(a)
863
+ r = b.reshape(a_shape)
819
864
  else:
820
- r = np.resize(b, a)
865
+ r = np.resize(b, a_shape)
821
866
  else:
822
867
  r = np.full(a, b)
823
868
  else:
@@ -832,7 +877,11 @@ def eval_dyad_reshape(a, b):
832
877
  r = np.concatenate((np.tile(b,ns), b[:a - b.shape[0]*ns[0]]))
833
878
  else:
834
879
  r = np.full((a,), b)
835
- return "".join(r) if j else r
880
+ if j:
881
+ if np.isarray(r) and r.ndim > 1:
882
+ return np.asarray(["".join(x) for x in r], dtype=object)
883
+ return "".join(r)
884
+ return r
836
885
 
837
886
 
838
887
  def eval_dyad_rotate(a, b):
@@ -850,7 +899,7 @@ def eval_dyad_rotate(a, b):
850
899
  rotated will be a!#b.
851
900
 
852
901
  Note that n:+M rotates the rows of a matrix M (i.e. it rotates
853
- it vertically); to rotate its columns (horizontally), use n:+:\M
902
+ it vertically); to rotate its columns (horizontally), use n:+:\\M
854
903
  (Rotate-Each-Left).
855
904
 
856
905
  Examples: 1:+[1 2 3 4 5] --> [5 1 2 3 4]
@@ -956,14 +1005,106 @@ def eval_dyad_take(a, b):
956
1005
  """
957
1006
  j = isinstance(b,str)
958
1007
  b = str_to_chr_arr(b) if j else np.asarray(b)
959
- aa = np.abs(a)
960
- if aa > b.size:
961
- b = np.tile(b,aa // len(b))
962
- b = np.concatenate((b, b[:aa-b.size]) if a > 0 else (b[-(aa-b.size):],b))
963
- r = b[a:] if a < 0 else b[:a]
1008
+ aa = int(np.abs(a)) if hasattr(np.abs(a), 'item') else np.abs(a) # Convert tensor to int
1009
+ b_size = array_size(b)
1010
+ if b_size == 0:
1011
+ # Handle empty array/string case
1012
+ r = b
1013
+ elif aa > b_size:
1014
+ b = np.tile(b, aa // len(b))
1015
+ b = np.concatenate((b, b[:aa-array_size(b)]) if a > 0 else (b[-(aa-array_size(b)):], b))
1016
+ r = b[a:] if a < 0 else b[:a]
1017
+ else:
1018
+ r = b[a:] if a < 0 else b[:a]
964
1019
  return "".join(r) if j else r
965
1020
 
966
1021
 
1022
+ def eval_dyad_grad(klong, a, b):
1023
+ """
1024
+
1025
+ a∇b [Grad]
1026
+
1027
+ Compute the numeric gradient of the monadic function ``b`` at ``a``
1028
+ using finite differences. Always uses numeric differentiation.
1029
+
1030
+ For automatic differentiation, use the :> operator instead.
1031
+
1032
+ """
1033
+ def call_fn(v):
1034
+ if isinstance(b, (KGSym, KGLambda, KGFn, KGCall)):
1035
+ return klong.call(KGCall(b, [v], 1))
1036
+ return b(v)
1037
+
1038
+ if isinstance(a, KGSym):
1039
+ orig = klong[a]
1040
+
1041
+ def func(v):
1042
+ klong[a] = v
1043
+ try:
1044
+ return call_fn(v)
1045
+ finally:
1046
+ klong[a] = orig
1047
+
1048
+ return numeric_grad(func, orig, klong._backend)
1049
+ else:
1050
+ return numeric_grad(call_fn, a, klong._backend)
1051
+
1052
+
1053
+ def eval_dyad_jacobian(klong, a, b):
1054
+ """
1055
+
1056
+ a∂b [Jacobian]
1057
+
1058
+ Compute Jacobian matrix of function ``b`` at point ``a``.
1059
+ For f: R^n -> R^m, returns m x n matrix where J[i,j] = df_i/dx_j.
1060
+
1061
+ Two modes based on what ``a`` contains:
1062
+ 1. Single point: [1 2]∂f -> Jacobian at that point
1063
+ 2. List of symbols: [w b]∂f -> [J_w J_b] (multi-param mode)
1064
+
1065
+ In multi-param mode, ``b`` should be a niladic function
1066
+ that references the parameter symbols.
1067
+
1068
+ Examples:
1069
+ [1 2]∂{[x@0^2 x@1^2]} --> [[2 0] [0 4]]
1070
+ [w b]∂f --> [J_w J_b] (multi-param mode)
1071
+
1072
+ """
1073
+ # Check if a is a list of symbols (multi-param mode)
1074
+ if is_list(a) and len(a) > 0 and all(isinstance(p, KGSym) for p in a):
1075
+ return multi_jacobian_of_fn(klong, b, list(a))
1076
+ else:
1077
+ return jacobian_of_fn(klong, b, a) # Note: a is point, b is function
1078
+
1079
+
1080
+ def eval_dyad_autograd(klong, a, b):
1081
+ """
1082
+
1083
+ a:>b [Autograd]
1084
+
1085
+ Compute gradient of function ``a`` with respect to ``b``.
1086
+
1087
+ Two modes based on what ``b`` contains:
1088
+ 1. Single param/point: a:>x or a:>[1 2 3] -> gradient at that point
1089
+ 2. List of symbols: a:>[w b] -> [grad_w grad_b] (multi-param mode)
1090
+
1091
+ In multi-param mode, ``a`` should be a niladic function (loss)
1092
+ that references the parameter symbols.
1093
+
1094
+ Examples:
1095
+ {x^2}:>3.0 --> 6.0 (derivative of x^2 at x=3)
1096
+ {x^3}:>2.0 --> 12.0 (derivative of x^3 at x=2)
1097
+ {+/x^2}:>[1 2 3] --> [2 4 6] (gradient of sum of squares)
1098
+ loss:>[w b] --> [grad_w grad_b] (multi-param mode)
1099
+
1100
+ """
1101
+ # Check if b is a list of symbols (multi-param mode)
1102
+ if is_list(b) and len(b) > 0 and all(isinstance(p, KGSym) for p in b):
1103
+ return multi_grad_of_fn(klong, a, list(b))
1104
+ else:
1105
+ return grad_of_fn(klong, a, b)
1106
+
1107
+
967
1108
  def create_dyad_functions(klong):
968
1109
  def _get_name(s):
969
1110
  s = s.strip()
klongpy/interpreter.py CHANGED
@@ -2,10 +2,13 @@ import time
2
2
  from collections import deque
3
3
 
4
4
  from .adverbs import get_adverb_fn
5
+ from .backends import get_backend
5
6
  from .core import *
7
+ from .backend import is_number
6
8
  from .dyads import create_dyad_functions
7
9
  from .monads import create_monad_functions
8
10
  from .sys_fn import create_system_functions
11
+ from .sys_fn_autograd import create_system_functions_autograd
9
12
  from .sys_fn_ipc import create_system_functions_ipc, create_system_var_ipc
10
13
  from .sys_fn_timer import create_system_functions_timer
11
14
  from .sys_var import *
@@ -141,6 +144,7 @@ def create_system_contexts():
141
144
 
142
145
  sys_d = {}
143
146
  add_context_key_values(sys_d, create_system_functions())
147
+ add_context_key_values(sys_d, create_system_functions_autograd())
144
148
  add_context_key_values(sys_d, create_system_functions_ipc())
145
149
  add_context_key_values(sys_d, create_system_functions_timer())
146
150
  set_context_var(sys_d, KGSym('.e'), eval_sys_var_epsilon()) # TODO: support lambda
@@ -210,13 +214,36 @@ def chain_adverbs(klong, arr):
210
214
 
211
215
  class KlongInterpreter():
212
216
 
213
- def __init__(self):
217
+ def __init__(self, backend=None, device=None):
218
+ """
219
+ Initialize a Klong interpreter.
220
+
221
+ Parameters
222
+ ----------
223
+ backend : str, optional
224
+ Backend name ('numpy' or 'torch'). If None, uses the default
225
+ backend (numpy, unless KLONGPY_BACKEND or USE_TORCH env vars are set).
226
+ device : str, optional
227
+ Device for torch backend ('cpu', 'cuda', 'mps'). Only applies
228
+ when backend='torch'. If None, auto-selects best available device.
229
+ """
230
+ self._backend = get_backend(backend, device=device)
214
231
  self._context = KlongContext(create_system_contexts())
215
232
  self._vd = create_dyad_functions(self)
216
233
  self._vm = create_monad_functions(self)
217
234
  self._start_time = time.time()
218
235
  self._module = None
219
236
 
237
+ @property
238
+ def backend(self):
239
+ """Return the backend provider for this interpreter."""
240
+ return self._backend
241
+
242
+ @property
243
+ def np(self):
244
+ """Return the numpy-compatible array module for this interpreter."""
245
+ return self._backend.np
246
+
220
247
  def __setitem__(self, k, v):
221
248
  k = k if isinstance(k, KGSym) else KGSym(k)
222
249
  self._context[k] = v
@@ -224,7 +251,8 @@ class KlongInterpreter():
224
251
  def __getitem__(self, k):
225
252
  k = k if isinstance(k, KGSym) else KGSym(k)
226
253
  r = self._context[k]
227
- return KGFnWrapper(self, r) if issubclass(type(r), KGFn) else r
254
+ # Pass the symbol name to avoid O(n) context search
255
+ return KGFnWrapper(self, r, sym=k) if issubclass(type(r), KGFn) else r
228
256
 
229
257
  def __delitem__(self, k):
230
258
  k = k if isinstance(k, KGSym) else KGSym(k)
@@ -618,7 +646,7 @@ class KlongInterpreter():
618
646
  f = self._get_op_fn(x.a.a, x.a.arity)
619
647
  fa = (x.args if isinstance(x.args, list) else [x.args]) if x.args is not None else x.args
620
648
  _y = self.eval(fa[1]) if x.a.arity == 2 else None
621
- _x = fa[0] if x.a.a == '::' else self.eval(fa[0])
649
+ _x = fa[0] if x.a.a in ['::','∇'] else self.eval(fa[0])
622
650
  return f(_x) if x.a.arity == 1 else f(_x, _y)
623
651
  elif x.is_adverb_chain():
624
652
  return chain_adverbs(self, x.a)()
klongpy/lib/help.kg CHANGED
@@ -30,8 +30,8 @@ op.db::[
30
30
  [" a:$b" "Form"
31
31
  "b=string; convert 'b' to an object of the same form (type) as 'a'"]
32
32
  [" $a" "Format" "convert 'a' to a string representing the value of 'a'"]
33
- [" a$b" "Format2"
34
- "a=real; Format 'b', pad with 'a' blanks or to align to x.y digits"]
33
+ [" a$b" "Format2"
34
+ "a=real|list; when both operands are lists, apply pairwise. Format 'b', pad with 'a' blanks or to align to x.y digits"]
35
35
  [" >a" "Grade-Down"
36
36
  "a=vector; vector of indices of elements of 'a' in ascending order"]
37
37
  [" <a" "Grade-Up"
klongpy/monads.py CHANGED
@@ -1,4 +1,8 @@
1
1
  from .core import *
2
+ from .autograd import grad_of_fn
3
+ from .backend import (
4
+ kg_asarray, is_integer, is_number, str_to_chr_arr, kg_argsort, array_size, vec_fn
5
+ )
2
6
  import sys
3
7
 
4
8
  def eval_monad_atom(a):
@@ -74,7 +78,8 @@ def eval_monad_expand_where(a):
74
78
  &[0 1 0 1 0] --> [1 3]
75
79
 
76
80
  """
77
- return np.concatenate([np.zeros(x, dtype=int) + i for i,x in enumerate(a if is_list(a) else [a])])
81
+ arr = a if is_list(a) else [a]
82
+ return np.repeat(np.arange(len(arr)), arr)
78
83
 
79
84
 
80
85
  def eval_monad_first(a):
@@ -114,7 +119,15 @@ def eval_monad_floor(a):
114
119
  _1e100 --> 1.0e+100 :"if precision < 100 digits"
115
120
 
116
121
  """
117
- return vec_fn(a, lambda x: np.floor(np.asarray(x, dtype=float)).astype(int))
122
+ def _floor_to_int(x):
123
+ result = np.floor(np.asarray(x, dtype=float))
124
+ # Handle both numpy arrays and torch tensors
125
+ if hasattr(result, 'astype'):
126
+ return result.astype(int)
127
+ elif hasattr(result, 'to'): # torch tensor - .to(int) works
128
+ return result.to(int)
129
+ return int(result)
130
+ return vec_fn(a, _floor_to_int)
118
131
 
119
132
 
120
133
  def eval_monad_format(a):
@@ -163,7 +176,7 @@ def eval_monad_grade_up(a):
163
176
  >[[1] [2] [3]] --> [2 1 0]
164
177
 
165
178
  """
166
- return kg_argsort(str_to_chr_arr(a) if isinstance(a,str) else a)
179
+ return kg_argsort(kg_asarray(a))
167
180
 
168
181
 
169
182
  def eval_monad_grade_down(a):
@@ -174,7 +187,7 @@ def eval_monad_grade_down(a):
174
187
  See [Grade-Up].
175
188
 
176
189
  """
177
- return kg_argsort(str_to_chr_arr(a) if isinstance(a,str) else a, descending=True)
190
+ return kg_argsort(kg_asarray(a), descending=True)
178
191
 
179
192
 
180
193
  def eval_monad_groupby(a):
@@ -194,12 +207,12 @@ def eval_monad_groupby(a):
194
207
  ="hello foo" --> [[0] [1] [2 3] [4 7 8] [5] [6]]
195
208
 
196
209
  """
197
- q = np.asarray(str_to_chr_arr(a) if isinstance(a, str) else a)
198
- if len(q) == 0:
199
- return q
200
- a = q.argsort()
201
- r = np.split(a, np.where(q[a][1:] != q[a][:-1])[0] + 1)
202
- return np.asarray(r, dtype=object)
210
+ arr = kg_asarray(a)
211
+ if array_size(arr) == 0:
212
+ return arr
213
+ vals, inverse = np.unique(arr, return_inverse=True)
214
+ groups = [np.where(inverse == i)[0] for i in range(len(vals))]
215
+ return kg_asarray(groups)
203
216
 
204
217
 
205
218
  def eval_monad_list(a):
@@ -214,10 +227,10 @@ def eval_monad_list(a):
214
227
  ,"xyz" --> ["xyz"]
215
228
  ,[1] --> [[1]]
216
229
  """
217
- if isinstance(a, KGChar):
230
+ if is_char(a):
218
231
  return str(a)
219
232
  if isinstance(a, KGSym):
220
- np.asarray([a],dtype=object) # np interpets ':foo" as ':fo"
233
+ return np.asarray([a],dtype=object) # np interprets ':foo" as ':fo"
221
234
  return np.asarray([a])
222
235
 
223
236
 
@@ -452,6 +465,28 @@ def eval_monad_undefined(a):
452
465
  return kg_truth(a is None or (np.isinf(a) if is_number(a) else False))
453
466
 
454
467
 
468
+ def eval_monad_track(a):
469
+ """
470
+
471
+ ˙a [Track]
472
+
473
+ Identity operator used when marking values for gradient tracking.
474
+
475
+ """
476
+ return a
477
+
478
+
479
+ def eval_monad_grad(klong, a):
480
+ """
481
+
482
+ ∇a [Grad]
483
+
484
+ Return a function that computes the numeric gradient of ``a``.
485
+
486
+ """
487
+ return KGLambda(lambda x, fn=a, k=klong: grad_of_fn(k, fn, x))
488
+
489
+
455
490
  def create_monad_functions(klong):
456
491
  def _get_name(s):
457
492
  s = s.strip()
@@ -463,6 +498,8 @@ def create_monad_functions(klong):
463
498
  for x in filter(lambda n: n.startswith("eval_monad_"), dir(m)):
464
499
  fn = getattr(m,x)
465
500
  name = _get_name(fn.__doc__)
501
+ if fn.__code__.co_argcount == 2 and 'klong' in fn.__code__.co_varnames:
502
+ fn = lambda a,f=fn,klong=klong: f(klong, a)
466
503
  registry[name] = fn
467
504
 
468
505
  return registry
klongpy/repl.py ADDED
@@ -0,0 +1,91 @@
1
+ import asyncio
2
+ import threading
3
+ import time
4
+ import os
5
+ import importlib.resources
6
+ from typing import Optional
7
+
8
+ from . import KlongInterpreter
9
+ from .utils import CallbackEvent
10
+
11
+
12
+ class LoopStopper:
13
+ def __init__(self, loop: asyncio.AbstractEventLoop) -> None:
14
+ self._loop = loop
15
+ self._future = loop.create_future()
16
+
17
+ def set(self) -> None:
18
+ if self._future.done():
19
+ return
20
+ if self._loop.is_running():
21
+ self._loop.call_soon_threadsafe(self._future.set_result, None)
22
+ else:
23
+ self._future.set_result(None)
24
+
25
+ async def wait(self) -> None:
26
+ await self._future
27
+
28
+
29
+ def start_loop(loop: asyncio.AbstractEventLoop, stop_event: LoopStopper) -> None:
30
+ asyncio.set_event_loop(loop)
31
+ loop.run_until_complete(stop_event.wait())
32
+
33
+
34
+ def setup_async_loop(debug: bool = False, slow_callback_duration: float = 86400.0):
35
+ loop = asyncio.new_event_loop()
36
+ loop.slow_callback_duration = slow_callback_duration
37
+ if debug:
38
+ loop.set_debug(True)
39
+ stop_event = LoopStopper(loop)
40
+ thread = threading.Thread(target=start_loop, args=(loop, stop_event), daemon=True)
41
+ thread.start()
42
+ return loop, thread, stop_event
43
+
44
+
45
+ def cleanup_async_loop(loop: asyncio.AbstractEventLoop, loop_thread: threading.Thread, stop_event: LoopStopper, debug: bool = False, name: Optional[str] = None) -> None:
46
+ if loop.is_closed():
47
+ return
48
+
49
+ loop.call_soon_threadsafe(stop_event.set)
50
+ loop_thread.join()
51
+
52
+ pending_tasks = asyncio.all_tasks(loop=loop)
53
+ if len(pending_tasks) > 0:
54
+ if name:
55
+ print(f"WARNING: pending tasks in {name} loop")
56
+ for task in pending_tasks:
57
+ loop.call_soon_threadsafe(task.cancel)
58
+ while len(asyncio.all_tasks(loop=loop)) > 0:
59
+ time.sleep(0)
60
+
61
+ loop.stop()
62
+
63
+ if not loop.is_closed():
64
+ loop.close()
65
+
66
+
67
+ def append_pkg_resource_path_KLONGPATH() -> None:
68
+ with importlib.resources.as_file(importlib.resources.files('klongpy')) as pkg_path:
69
+ pkg_lib_path = os.path.join(pkg_path, 'lib')
70
+ klongpath = os.environ.get('KLONGPATH', '.:lib')
71
+ klongpath = f"{klongpath}:{pkg_lib_path}" if klongpath else str(pkg_lib_path)
72
+ os.environ['KLONGPATH'] = klongpath
73
+
74
+
75
+ def create_repl(debug: bool = False):
76
+ io_loop, io_thread, io_stop = setup_async_loop(debug=debug)
77
+ klong_loop, klong_thread, klong_stop = setup_async_loop(debug=debug)
78
+
79
+ append_pkg_resource_path_KLONGPATH()
80
+
81
+ klong = KlongInterpreter()
82
+ shutdown_event = CallbackEvent()
83
+ klong['.system'] = {'ioloop': io_loop, 'klongloop': klong_loop, 'closeEvent': shutdown_event}
84
+
85
+ return klong, (io_loop, io_thread, io_stop, klong_loop, klong_thread, klong_stop)
86
+
87
+
88
+ def cleanup_repl(loops, debug: bool = False) -> None:
89
+ io_loop, io_thread, io_stop, klong_loop, klong_thread, klong_stop = loops
90
+ cleanup_async_loop(io_loop, io_thread, io_stop, debug=debug, name='io_loop')
91
+ cleanup_async_loop(klong_loop, klong_thread, klong_stop, debug=debug, name='klong_loop')