klongpy 0.6.9__py3-none-any.whl → 0.7.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. klongpy/__init__.py +17 -1
  2. klongpy/adverbs.py +84 -82
  3. klongpy/autograd.py +299 -0
  4. klongpy/backend.py +38 -103
  5. klongpy/backends/__init__.py +26 -0
  6. klongpy/backends/base.py +469 -0
  7. klongpy/backends/numpy_backend.py +123 -0
  8. klongpy/backends/registry.py +76 -0
  9. klongpy/backends/torch_backend.py +1047 -0
  10. klongpy-0.6.9.data/scripts/kgpy → klongpy/cli.py +110 -90
  11. klongpy/core.py +113 -974
  12. klongpy/db/sys_fn_db.py +7 -6
  13. klongpy/db/sys_fn_kvs.py +2 -4
  14. klongpy/dyads.py +332 -160
  15. klongpy/interpreter.py +60 -15
  16. klongpy/monads.py +121 -75
  17. klongpy/parser.py +328 -0
  18. klongpy/repl.py +23 -5
  19. klongpy/sys_fn.py +170 -21
  20. klongpy/sys_fn_autograd.py +290 -0
  21. klongpy/sys_fn_ipc.py +22 -15
  22. klongpy/sys_fn_timer.py +13 -3
  23. klongpy/types.py +503 -0
  24. klongpy/web/sys_fn_web.py +14 -4
  25. klongpy/writer.py +122 -0
  26. klongpy/ws/sys_fn_ws.py +5 -8
  27. klongpy-0.7.1.dist-info/METADATA +544 -0
  28. klongpy-0.7.1.dist-info/RECORD +52 -0
  29. {klongpy-0.6.9.dist-info → klongpy-0.7.1.dist-info}/WHEEL +1 -1
  30. klongpy-0.7.1.dist-info/entry_points.txt +2 -0
  31. {klongpy-0.6.9.dist-info → klongpy-0.7.1.dist-info}/top_level.txt +0 -1
  32. klongpy-0.6.9.dist-info/METADATA +0 -448
  33. klongpy-0.6.9.dist-info/RECORD +0 -77
  34. tests/__init__.py +0 -6
  35. tests/gen_join_over.py +0 -119
  36. tests/gen_py_suite.py +0 -77
  37. tests/gen_test_fn.py +0 -259
  38. tests/perf_async.py +0 -25
  39. tests/perf_avg.py +0 -18
  40. tests/perf_duckdb.py +0 -32
  41. tests/perf_gen.py +0 -38
  42. tests/perf_ipc_overhead.py +0 -34
  43. tests/perf_join.py +0 -53
  44. tests/perf_load.py +0 -17
  45. tests/perf_prog.py +0 -18
  46. tests/perf_serdes.py +0 -52
  47. tests/perf_sys_fn_db.py +0 -263
  48. tests/perf_vector.py +0 -40
  49. tests/test_accel.py +0 -227
  50. tests/test_df_cache.py +0 -85
  51. tests/test_eval_monad_list.py +0 -34
  52. tests/test_examples.py +0 -64
  53. tests/test_extra_suite.py +0 -382
  54. tests/test_file_cache.py +0 -185
  55. tests/test_interop.py +0 -180
  56. tests/test_kg_asarray.py +0 -94
  57. tests/test_kgtests.py +0 -65
  58. tests/test_known_bugs.py +0 -206
  59. tests/test_prog.py +0 -107
  60. tests/test_reshape_strings.py +0 -33
  61. tests/test_suite.py +0 -1480
  62. tests/test_suite_file.py +0 -153
  63. tests/test_sys_fn.py +0 -420
  64. tests/test_sys_fn_db.py +0 -88
  65. tests/test_sys_fn_ipc.py +0 -587
  66. tests/test_sys_fn_timer.py +0 -133
  67. tests/test_sys_fn_web.py +0 -50
  68. tests/test_util.py +0 -233
  69. tests/utils.py +0 -126
  70. {klongpy-0.6.9.dist-info → klongpy-0.7.1.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,290 @@
1
+ """
2
+ Autograd system functions for KlongPy.
3
+
4
+ Provides .jacobian() for Jacobian matrix computation.
5
+ Provides .compile() for function compilation and graph export (torch only).
6
+
7
+ For optimizers (SGD, Adam, etc.), see examples/autograd/optimizers.py
8
+ which can be copied to your project and customized.
9
+ """
10
+ import sys
11
+
12
+ from .autograd import jacobian_of_fn, _invoke_fn
13
+
14
+
15
+ def eval_sys_jacobian(klong, x, y):
16
+ """
17
+
18
+ .jacobian(x;y) [Jacobian]
19
+
20
+ Compute Jacobian matrix of function x at point y.
21
+
22
+ For f: R^n -> R^m, returns m x n matrix where J[i,j] = df_i/dx_j.
23
+
24
+ Examples:
25
+ f::{[x@0^2 x@1^2]}
26
+ .jacobian(f;[1 2]) --> [[2 0] [0 4]]
27
+
28
+ """
29
+ return jacobian_of_fn(klong, x, y)
30
+
31
+
32
+ def eval_sys_compile(klong, x, y):
33
+ """
34
+
35
+ .compile(x;y) [Compile]
36
+
37
+ Compile a function for optimized execution using torch.compile.
38
+ Requires PyTorch backend (USE_TORCH=1).
39
+
40
+ Arguments:
41
+ x - Function to compile
42
+ y - Example input for tracing the computation graph
43
+
44
+ Returns:
45
+ Compiled function (faster execution)
46
+
47
+ Examples:
48
+ f::{x^2}
49
+ cf::.compile(f;3.0) :" Returns compiled function
50
+ cf(5.0) :" 25.0 (optimized)
51
+
52
+ Notes:
53
+ - Only supported with PyTorch backend
54
+ - Raises error on NumPy backend
55
+ - See .export() for saving graphs to files
56
+
57
+ """
58
+ fn, example_input = x, y
59
+
60
+ backend = klong._backend
61
+ if not backend.supports_autograd():
62
+ raise RuntimeError(
63
+ ".compile() requires PyTorch backend. "
64
+ "Run with USE_TORCH=1 environment variable."
65
+ )
66
+
67
+ # Wrap the Klong function for torch
68
+ def wrapped_fn(v):
69
+ return _invoke_fn(klong, fn, [v])
70
+
71
+ return backend.compile_function(wrapped_fn, example_input, None)
72
+
73
+
74
+ def eval_sys_export(klong, x, y, z):
75
+ """
76
+
77
+ .export(x;y;z) [Export]
78
+
79
+ Export a function's computation graph to a file for inspection.
80
+ Requires PyTorch backend (USE_TORCH=1).
81
+
82
+ Arguments:
83
+ x - Function to export
84
+ y - Example input for tracing the computation graph
85
+ z - Path to save the graph (.pt2 file)
86
+
87
+ Returns:
88
+ Dictionary with:
89
+ "compiled_fn" - The compiled function
90
+ "export_path" - Path where graph was saved
91
+ "graph" - String representation of computation graph
92
+
93
+ Examples:
94
+ f::{x^2}
95
+ info::.export(f;3.0;"model.pt2")
96
+ .p(info@"graph") :" Print computation graph
97
+
98
+ Notes:
99
+ - Only supported with PyTorch backend
100
+ - The exported graph can be loaded with torch.export.load()
101
+ - Use .compile() for just compiling without export
102
+
103
+ """
104
+ fn, example_input, output_path = x, y, z
105
+
106
+ backend = klong._backend
107
+ if not backend.supports_autograd():
108
+ raise RuntimeError(
109
+ ".export() requires PyTorch backend. "
110
+ "Run with USE_TORCH=1 environment variable."
111
+ )
112
+
113
+ # Wrap the Klong function for torch
114
+ def wrapped_fn(v):
115
+ return _invoke_fn(klong, fn, [v])
116
+
117
+ return backend.compile_function(wrapped_fn, example_input, output_path)
118
+
119
+
120
+ def eval_sys_compilex(klong, x, y, z):
121
+ """
122
+
123
+ .compilex(x;y;z) [Compile-Extended]
124
+
125
+ Compile a function with extended options for mode and backend.
126
+ Requires PyTorch backend (USE_TORCH=1).
127
+
128
+ Arguments:
129
+ x - Function to compile
130
+ y - Example input for tracing the computation graph
131
+ z - Options dictionary with compile settings
132
+
133
+ Options (z):
134
+ "mode" - Compilation mode:
135
+ "default" - Balanced (default)
136
+ "reduce-overhead" - Faster compile, less optimization
137
+ "max-autotune" - Slower compile, best runtime
138
+ "backend" - Compilation backend:
139
+ "inductor" - Default with C++/Triton codegen
140
+ "eager" - No compilation (debugging)
141
+ "cudagraphs" - CUDA graphs (GPU only)
142
+ "fullgraph" - 1 to require full graph compilation
143
+ "dynamic" - 1 for dynamic shapes, 0 for static
144
+
145
+ Mode Comparison:
146
+ | Mode | Compile | Runtime | Use Case |
147
+ |-----------------|---------|---------|-------------------|
148
+ | default | Medium | Good | General use |
149
+ | reduce-overhead | Fast | OK | Development |
150
+ | max-autotune | Slow | Best | Production |
151
+
152
+ Returns:
153
+ Compiled function
154
+
155
+ Examples:
156
+ f::{x^2}
157
+
158
+ :" Fast compilation for development
159
+ cf::.compilex(f;3.0;:{["mode" "reduce-overhead"]})
160
+
161
+ :" Maximum optimization for production
162
+ cf::.compilex(f;3.0;:{["mode" "max-autotune"]})
163
+
164
+ :" Debug mode (no compilation)
165
+ cf::.compilex(f;3.0;:{["backend" "eager"]})
166
+
167
+ Notes:
168
+ - Only supported with PyTorch backend
169
+ - Requires C++ compiler for inductor backend
170
+ - Use .cmodes() to see all available options
171
+
172
+ """
173
+ fn, example_input, options = x, y, z
174
+
175
+ backend = klong._backend
176
+ if not backend.supports_autograd():
177
+ raise RuntimeError(
178
+ ".compilex() requires PyTorch backend. "
179
+ "Run with USE_TORCH=1 environment variable."
180
+ )
181
+
182
+ # Extract options from dictionary
183
+ mode = options.get("mode", "default") if isinstance(options, dict) else "default"
184
+ compile_backend = options.get("backend", "inductor") if isinstance(options, dict) else "inductor"
185
+ fullgraph = bool(options.get("fullgraph", 0)) if isinstance(options, dict) else False
186
+ dynamic = None
187
+ if isinstance(options, dict) and "dynamic" in options:
188
+ dynamic = bool(options["dynamic"])
189
+
190
+ # Wrap the Klong function for torch
191
+ def wrapped_fn(v):
192
+ return _invoke_fn(klong, fn, [v])
193
+
194
+ return backend.compile_function(
195
+ wrapped_fn, example_input, None,
196
+ mode=mode, backend=compile_backend, fullgraph=fullgraph, dynamic=dynamic
197
+ )
198
+
199
+
200
+ def eval_sys_cmodes(klong):
201
+ """
202
+
203
+ .cmodes() [Compile-Modes]
204
+
205
+ Get information about available torch.compile modes and backends.
206
+ Requires PyTorch backend (USE_TORCH=1).
207
+
208
+ Returns:
209
+ Dictionary with:
210
+ "modes" - Available compilation modes
211
+ "backends" - Available compilation backends
212
+ "recommendations" - Suggested settings for common use cases
213
+
214
+ Examples:
215
+ info::.cmodes()
216
+ .p(info@"modes") :" Print available modes
217
+ .p(info@"recommendations") :" Print recommended settings
218
+
219
+ Mode Comparison:
220
+ | Mode | Compile Time | Runtime Speed | Best For |
221
+ |-----------------|--------------|---------------|--------------|
222
+ | default | Medium | Good | General use |
223
+ | reduce-overhead | Fast | Moderate | Development |
224
+ | max-autotune | Slow | Best | Production |
225
+
226
+ Backend Comparison:
227
+ | Backend | Description |
228
+ |------------|------------------------------------------|
229
+ | inductor | Default - C++/Triton code generation |
230
+ | eager | No compilation - for debugging |
231
+ | cudagraphs | CUDA graphs - reduces GPU launch overhead|
232
+
233
+ """
234
+ backend = klong._backend
235
+ if not backend.supports_autograd():
236
+ raise RuntimeError(
237
+ ".cmodes() requires PyTorch backend. "
238
+ "Run with USE_TORCH=1 environment variable."
239
+ )
240
+
241
+ return backend.get_compile_modes()
242
+
243
+
244
+ def eval_sys_gradcheck(klong, x, y):
245
+ """
246
+
247
+ .gradcheck(x;y) [Gradcheck]
248
+
249
+ Verify that autograd gradients match numeric gradients.
250
+ Uses torch.autograd.gradcheck for rigorous verification.
251
+ Requires PyTorch backend (USE_TORCH=1).
252
+
253
+ Arguments:
254
+ x - Function to check (should return a scalar)
255
+ y - Input value or list of inputs to check
256
+
257
+ Returns:
258
+ 1 if gradients are correct
259
+ Raises error if gradients don't match
260
+
261
+ Examples:
262
+ f::{x^2}
263
+ .gradcheck(f;3.0) :" Returns 1
264
+
265
+ g::{(x@0^2)+(x@1^2)}
266
+ .gradcheck(g;[1.0 2.0]) :" Returns 1
267
+
268
+ Notes:
269
+ - Only supported with PyTorch backend
270
+ - Uses double precision (float64) when available (CPU/CUDA)
271
+ - Falls back to float32 with relaxed tolerances on MPS
272
+ - Useful for verifying custom gradient implementations
273
+
274
+ """
275
+ return klong._backend.klong_gradcheck(klong, x, y)
276
+
277
+
278
+ def create_system_functions_autograd():
279
+ """Create registry of autograd system functions."""
280
+ def _get_name(s):
281
+ i = s.index('.')
282
+ return s[i:i+s[i:].index('(')]
283
+
284
+ registry = {}
285
+ m = sys.modules[__name__]
286
+ for x in filter(lambda n: n.startswith("eval_sys_"), dir(m)):
287
+ fn = getattr(m, x)
288
+ registry[_get_name(fn.__doc__)] = fn
289
+
290
+ return registry
klongpy/sys_fn_ipc.py CHANGED
@@ -8,10 +8,8 @@ import uuid
8
8
  from asyncio import StreamReader, StreamWriter
9
9
  from asyncio.exceptions import IncompleteReadError
10
10
 
11
- import numpy as np
12
-
13
11
  from klongpy.core import (KGCall, KGFn, KGFnWrapper, KGLambda, KGSym,
14
- KlongException, get_fn_arity_str, is_list,
12
+ KlongException, KLONG_UNDEFINED, get_fn_arity_str, is_list,
15
13
  reserved_fn_args, reserved_fn_symbols, reserved_fn_symbol_map)
16
14
 
17
15
 
@@ -940,20 +938,31 @@ def eval_sys_fn_create_ipc_server(klong, x):
940
938
 
941
939
 
942
940
  class KGAsyncCall(KGLambda):
943
- def __init__(self, klongloop, fn, cb):
941
+ def __init__(self, klongloop, fn, cb, klong):
944
942
  self.klongloop = klongloop
945
- self.cb = cb
946
- self.fn = fn
947
- arity = fn.get_arity() if issubclass(type(self.fn), KGLambda) else fn.arity
943
+ self.klong = klong
944
+
945
+ # Wrap callbacks - KGFnWrapper now handles dynamic resolution automatically
946
+ self.fn = KGFnWrapper(klong, fn) if isinstance(fn, KGFn) else fn
947
+ self.cb = KGFnWrapper(klong, cb) if isinstance(cb, KGFn) else cb
948
+
949
+ arity = fn.get_arity() if issubclass(type(fn), KGLambda) else fn.arity
948
950
  self.args = [reserved_fn_symbol_map[x] for x in reserved_fn_args[:arity]]
949
951
 
950
952
  async def acall(self, klong, params):
953
+ # Execute the function based on its type
951
954
  if issubclass(type(self.fn), KGLambda):
952
955
  ctx = {reserved_fn_symbols[i]:params[i] for i in range(min(len(reserved_fn_args),len(params)))}
953
956
  r = self.fn(klong, ctx)
957
+ elif callable(self.fn):
958
+ r = self.fn(*params)
954
959
  else:
960
+ # Shouldn't reach here, but handle it
955
961
  r = klong.call(KGCall(self.fn.a, [*params], self.fn.arity))
956
- self.cb(r)
962
+
963
+ # Invoke callback - KGFnWrapper handles dynamic resolution automatically
964
+ if self.cb is not None:
965
+ self.cb(r)
957
966
 
958
967
  def __call__(self, klong, ctx):
959
968
  params = [ctx[x] for x in self.args]
@@ -979,7 +988,8 @@ def eval_sys_fn_create_async_wrapper(klong, x, y):
979
988
  raise KlongException("y must be a function")
980
989
  system = klong['.system']
981
990
  klongloop = system['klongloop']
982
- return KGAsyncCall(klongloop, x, KGFnWrapper(klong, y))
991
+ # KGAsyncCall will wrap the callbacks automatically
992
+ return KGAsyncCall(klongloop, x, y, klong)
983
993
 
984
994
 
985
995
  def create_system_functions_ipc():
@@ -999,12 +1009,9 @@ def create_system_functions_ipc():
999
1009
 
1000
1010
  def create_system_var_ipc():
1001
1011
  # populate the .srv.* handlers with undefined values
1002
- # TODO: use real undefined value instead of np.inf
1003
1012
  registry = {
1004
- ".srv.o": np.inf,
1005
- ".srv.c": np.inf,
1006
- ".srv.e": np.inf,
1013
+ ".srv.o": KLONG_UNDEFINED,
1014
+ ".srv.c": KLONG_UNDEFINED,
1015
+ ".srv.e": KLONG_UNDEFINED,
1007
1016
  }
1008
1017
  return registry
1009
-
1010
-
klongpy/sys_fn_timer.py CHANGED
@@ -54,6 +54,8 @@ def eval_sys_fn_timer(klong, x, y, z):
54
54
 
55
55
  The callback function returns 1 to continue, 0 to stop time timer.
56
56
 
57
+ If "z" is a named function, the timer re-resolves it on each tick so redefinitions take effect.
58
+
57
59
  Example:
58
60
 
59
61
  cb::{.p("hello")}
@@ -76,12 +78,20 @@ def eval_sys_fn_timer(klong, x, y, z):
76
78
  y= int(y)
77
79
  if y < 0:
78
80
  return "x must be a non-negative integer"
79
- z = z if isinstance(z, KGCall) else KGFnWrapper(klong, z) if isinstance(z, KGFn) else z
80
- if not callable(z):
81
+
82
+ # Wrap the callback - KGFnWrapper now handles dynamic resolution automatically
83
+ if isinstance(z, KGCall):
84
+ return "z must be a function (not a function call)"
85
+ if isinstance(z, KGFn):
86
+ callback = KGFnWrapper(klong, z)
87
+ elif callable(z):
88
+ callback = z
89
+ else:
81
90
  return "z must be a function"
91
+
82
92
  system = klong['.system']
83
93
  klongloop = system['klongloop']
84
- return _call_periodic(klongloop, x, y, z)
94
+ return _call_periodic(klongloop, x, y, callback)
85
95
 
86
96
 
87
97
  def eval_sys_fn_cancel_timer(x):