klongpy 0.6.9__py3-none-any.whl → 0.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. klongpy/__init__.py +19 -1
  2. klongpy/adverbs.py +5 -5
  3. klongpy/autograd.py +308 -0
  4. klongpy/backend.py +167 -99
  5. klongpy/backends/__init__.py +94 -0
  6. klongpy/backends/base.py +320 -0
  7. klongpy/backends/numpy_backend.py +122 -0
  8. klongpy/backends/torch_backend.py +995 -0
  9. klongpy-0.6.9.data/scripts/kgpy → klongpy/cli.py +65 -88
  10. klongpy/core.py +228 -108
  11. klongpy/db/sys_fn_db.py +4 -3
  12. klongpy/dyads.py +159 -28
  13. klongpy/interpreter.py +31 -3
  14. klongpy/monads.py +39 -3
  15. klongpy/repl.py +21 -3
  16. klongpy/sys_fn.py +128 -17
  17. klongpy/sys_fn_autograd.py +290 -0
  18. klongpy/sys_fn_ipc.py +18 -6
  19. klongpy/sys_fn_timer.py +13 -3
  20. klongpy/web/sys_fn_web.py +14 -4
  21. klongpy-0.7.0.dist-info/METADATA +493 -0
  22. klongpy-0.7.0.dist-info/RECORD +48 -0
  23. {klongpy-0.6.9.dist-info → klongpy-0.7.0.dist-info}/WHEEL +1 -1
  24. klongpy-0.7.0.dist-info/entry_points.txt +2 -0
  25. {klongpy-0.6.9.dist-info → klongpy-0.7.0.dist-info}/top_level.txt +0 -1
  26. klongpy-0.6.9.dist-info/METADATA +0 -448
  27. klongpy-0.6.9.dist-info/RECORD +0 -77
  28. tests/__init__.py +0 -6
  29. tests/gen_join_over.py +0 -119
  30. tests/gen_py_suite.py +0 -77
  31. tests/gen_test_fn.py +0 -259
  32. tests/perf_async.py +0 -25
  33. tests/perf_avg.py +0 -18
  34. tests/perf_duckdb.py +0 -32
  35. tests/perf_gen.py +0 -38
  36. tests/perf_ipc_overhead.py +0 -34
  37. tests/perf_join.py +0 -53
  38. tests/perf_load.py +0 -17
  39. tests/perf_prog.py +0 -18
  40. tests/perf_serdes.py +0 -52
  41. tests/perf_sys_fn_db.py +0 -263
  42. tests/perf_vector.py +0 -40
  43. tests/test_accel.py +0 -227
  44. tests/test_df_cache.py +0 -85
  45. tests/test_eval_monad_list.py +0 -34
  46. tests/test_examples.py +0 -64
  47. tests/test_extra_suite.py +0 -382
  48. tests/test_file_cache.py +0 -185
  49. tests/test_interop.py +0 -180
  50. tests/test_kg_asarray.py +0 -94
  51. tests/test_kgtests.py +0 -65
  52. tests/test_known_bugs.py +0 -206
  53. tests/test_prog.py +0 -107
  54. tests/test_reshape_strings.py +0 -33
  55. tests/test_suite.py +0 -1480
  56. tests/test_suite_file.py +0 -153
  57. tests/test_sys_fn.py +0 -420
  58. tests/test_sys_fn_db.py +0 -88
  59. tests/test_sys_fn_ipc.py +0 -587
  60. tests/test_sys_fn_timer.py +0 -133
  61. tests/test_sys_fn_web.py +0 -50
  62. tests/test_util.py +0 -233
  63. tests/utils.py +0 -126
  64. {klongpy-0.6.9.dist-info → klongpy-0.7.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,290 @@
1
+ """
2
+ Autograd system functions for KlongPy.
3
+
4
+ Provides .jacobian() for Jacobian matrix computation.
5
+ Provides .compile() for function compilation and graph export (torch only).
6
+
7
+ For optimizers (SGD, Adam, etc.), see examples/autograd/optimizers.py
8
+ which can be copied to your project and customized.
9
+ """
10
+ import sys
11
+
12
+ from .autograd import jacobian_of_fn, _invoke_fn
13
+
14
+
15
+ def eval_sys_jacobian(klong, x, y):
16
+ """
17
+
18
+ .jacobian(x;y) [Jacobian]
19
+
20
+ Compute Jacobian matrix of function x at point y.
21
+
22
+ For f: R^n -> R^m, returns m x n matrix where J[i,j] = df_i/dx_j.
23
+
24
+ Examples:
25
+ f::{[x@0^2 x@1^2]}
26
+ .jacobian(f;[1 2]) --> [[2 0] [0 4]]
27
+
28
+ """
29
+ return jacobian_of_fn(klong, x, y)
30
+
31
+
32
+ def eval_sys_compile(klong, x, y):
33
+ """
34
+
35
+ .compile(x;y) [Compile]
36
+
37
+ Compile a function for optimized execution using torch.compile.
38
+ Requires PyTorch backend (USE_TORCH=1).
39
+
40
+ Arguments:
41
+ x - Function to compile
42
+ y - Example input for tracing the computation graph
43
+
44
+ Returns:
45
+ Compiled function (faster execution)
46
+
47
+ Examples:
48
+ f::{x^2}
49
+ cf::.compile(f;3.0) :" Returns compiled function
50
+ cf(5.0) :" 25.0 (optimized)
51
+
52
+ Notes:
53
+ - Only supported with PyTorch backend
54
+ - Raises error on NumPy backend
55
+ - See .export() for saving graphs to files
56
+
57
+ """
58
+ fn, example_input = x, y
59
+
60
+ backend = klong._backend
61
+ if not backend.supports_autograd():
62
+ raise RuntimeError(
63
+ ".compile() requires PyTorch backend. "
64
+ "Run with USE_TORCH=1 environment variable."
65
+ )
66
+
67
+ # Wrap the Klong function for torch
68
+ def wrapped_fn(v):
69
+ return _invoke_fn(klong, fn, [v])
70
+
71
+ return backend.compile_function(wrapped_fn, example_input, None)
72
+
73
+
74
+ def eval_sys_export(klong, x, y, z):
75
+ """
76
+
77
+ .export(x;y;z) [Export]
78
+
79
+ Export a function's computation graph to a file for inspection.
80
+ Requires PyTorch backend (USE_TORCH=1).
81
+
82
+ Arguments:
83
+ x - Function to export
84
+ y - Example input for tracing the computation graph
85
+ z - Path to save the graph (.pt2 file)
86
+
87
+ Returns:
88
+ Dictionary with:
89
+ "compiled_fn" - The compiled function
90
+ "export_path" - Path where graph was saved
91
+ "graph" - String representation of computation graph
92
+
93
+ Examples:
94
+ f::{x^2}
95
+ info::.export(f;3.0;"model.pt2")
96
+ .p(info@"graph") :" Print computation graph
97
+
98
+ Notes:
99
+ - Only supported with PyTorch backend
100
+ - The exported graph can be loaded with torch.export.load()
101
+ - Use .compile() for just compiling without export
102
+
103
+ """
104
+ fn, example_input, output_path = x, y, z
105
+
106
+ backend = klong._backend
107
+ if not backend.supports_autograd():
108
+ raise RuntimeError(
109
+ ".export() requires PyTorch backend. "
110
+ "Run with USE_TORCH=1 environment variable."
111
+ )
112
+
113
+ # Wrap the Klong function for torch
114
+ def wrapped_fn(v):
115
+ return _invoke_fn(klong, fn, [v])
116
+
117
+ return backend.compile_function(wrapped_fn, example_input, output_path)
118
+
119
+
120
+ def eval_sys_compilex(klong, x, y, z):
121
+ """
122
+
123
+ .compilex(x;y;z) [Compile-Extended]
124
+
125
+ Compile a function with extended options for mode and backend.
126
+ Requires PyTorch backend (USE_TORCH=1).
127
+
128
+ Arguments:
129
+ x - Function to compile
130
+ y - Example input for tracing the computation graph
131
+ z - Options dictionary with compile settings
132
+
133
+ Options (z):
134
+ "mode" - Compilation mode:
135
+ "default" - Balanced (default)
136
+ "reduce-overhead" - Faster compile, less optimization
137
+ "max-autotune" - Slower compile, best runtime
138
+ "backend" - Compilation backend:
139
+ "inductor" - Default with C++/Triton codegen
140
+ "eager" - No compilation (debugging)
141
+ "cudagraphs" - CUDA graphs (GPU only)
142
+ "fullgraph" - 1 to require full graph compilation
143
+ "dynamic" - 1 for dynamic shapes, 0 for static
144
+
145
+ Mode Comparison:
146
+ | Mode | Compile | Runtime | Use Case |
147
+ |-----------------|---------|---------|-------------------|
148
+ | default | Medium | Good | General use |
149
+ | reduce-overhead | Fast | OK | Development |
150
+ | max-autotune | Slow | Best | Production |
151
+
152
+ Returns:
153
+ Compiled function
154
+
155
+ Examples:
156
+ f::{x^2}
157
+
158
+ :" Fast compilation for development
159
+ cf::.compilex(f;3.0;:{["mode" "reduce-overhead"]})
160
+
161
+ :" Maximum optimization for production
162
+ cf::.compilex(f;3.0;:{["mode" "max-autotune"]})
163
+
164
+ :" Debug mode (no compilation)
165
+ cf::.compilex(f;3.0;:{["backend" "eager"]})
166
+
167
+ Notes:
168
+ - Only supported with PyTorch backend
169
+ - Requires C++ compiler for inductor backend
170
+ - Use .cmodes() to see all available options
171
+
172
+ """
173
+ fn, example_input, options = x, y, z
174
+
175
+ backend = klong._backend
176
+ if not backend.supports_autograd():
177
+ raise RuntimeError(
178
+ ".compilex() requires PyTorch backend. "
179
+ "Run with USE_TORCH=1 environment variable."
180
+ )
181
+
182
+ # Extract options from dictionary
183
+ mode = options.get("mode", "default") if isinstance(options, dict) else "default"
184
+ compile_backend = options.get("backend", "inductor") if isinstance(options, dict) else "inductor"
185
+ fullgraph = bool(options.get("fullgraph", 0)) if isinstance(options, dict) else False
186
+ dynamic = None
187
+ if isinstance(options, dict) and "dynamic" in options:
188
+ dynamic = bool(options["dynamic"])
189
+
190
+ # Wrap the Klong function for torch
191
+ def wrapped_fn(v):
192
+ return _invoke_fn(klong, fn, [v])
193
+
194
+ return backend.compile_function(
195
+ wrapped_fn, example_input, None,
196
+ mode=mode, backend=compile_backend, fullgraph=fullgraph, dynamic=dynamic
197
+ )
198
+
199
+
200
+ def eval_sys_cmodes(klong):
201
+ """
202
+
203
+ .cmodes() [Compile-Modes]
204
+
205
+ Get information about available torch.compile modes and backends.
206
+ Requires PyTorch backend (USE_TORCH=1).
207
+
208
+ Returns:
209
+ Dictionary with:
210
+ "modes" - Available compilation modes
211
+ "backends" - Available compilation backends
212
+ "recommendations" - Suggested settings for common use cases
213
+
214
+ Examples:
215
+ info::.cmodes()
216
+ .p(info@"modes") :" Print available modes
217
+ .p(info@"recommendations") :" Print recommended settings
218
+
219
+ Mode Comparison:
220
+ | Mode | Compile Time | Runtime Speed | Best For |
221
+ |-----------------|--------------|---------------|--------------|
222
+ | default | Medium | Good | General use |
223
+ | reduce-overhead | Fast | Moderate | Development |
224
+ | max-autotune | Slow | Best | Production |
225
+
226
+ Backend Comparison:
227
+ | Backend | Description |
228
+ |------------|------------------------------------------|
229
+ | inductor | Default - C++/Triton code generation |
230
+ | eager | No compilation - for debugging |
231
+ | cudagraphs | CUDA graphs - reduces GPU launch overhead|
232
+
233
+ """
234
+ backend = klong._backend
235
+ if not backend.supports_autograd():
236
+ raise RuntimeError(
237
+ ".cmodes() requires PyTorch backend. "
238
+ "Run with USE_TORCH=1 environment variable."
239
+ )
240
+
241
+ return backend.get_compile_modes()
242
+
243
+
244
+ def eval_sys_gradcheck(klong, x, y):
245
+ """
246
+
247
+ .gradcheck(x;y) [Gradcheck]
248
+
249
+ Verify that autograd gradients match numeric gradients.
250
+ Uses torch.autograd.gradcheck for rigorous verification.
251
+ Requires PyTorch backend (USE_TORCH=1).
252
+
253
+ Arguments:
254
+ x - Function to check (should return a scalar)
255
+ y - Input value or list of inputs to check
256
+
257
+ Returns:
258
+ 1 if gradients are correct
259
+ Raises error if gradients don't match
260
+
261
+ Examples:
262
+ f::{x^2}
263
+ .gradcheck(f;3.0) :" Returns 1
264
+
265
+ g::{(x@0^2)+(x@1^2)}
266
+ .gradcheck(g;[1.0 2.0]) :" Returns 1
267
+
268
+ Notes:
269
+ - Only supported with PyTorch backend
270
+ - Uses double precision (float64) when available (CPU/CUDA)
271
+ - Falls back to float32 with relaxed tolerances on MPS
272
+ - Useful for verifying custom gradient implementations
273
+
274
+ """
275
+ return klong._backend.klong_gradcheck(klong, x, y)
276
+
277
+
278
+ def create_system_functions_autograd():
279
+ """Create registry of autograd system functions."""
280
+ def _get_name(s):
281
+ i = s.index('.')
282
+ return s[i:i+s[i:].index('(')]
283
+
284
+ registry = {}
285
+ m = sys.modules[__name__]
286
+ for x in filter(lambda n: n.startswith("eval_sys_"), dir(m)):
287
+ fn = getattr(m, x)
288
+ registry[_get_name(fn.__doc__)] = fn
289
+
290
+ return registry
klongpy/sys_fn_ipc.py CHANGED
@@ -940,20 +940,31 @@ def eval_sys_fn_create_ipc_server(klong, x):
940
940
 
941
941
 
942
942
  class KGAsyncCall(KGLambda):
943
- def __init__(self, klongloop, fn, cb):
943
+ def __init__(self, klongloop, fn, cb, klong):
944
944
  self.klongloop = klongloop
945
- self.cb = cb
946
- self.fn = fn
947
- arity = fn.get_arity() if issubclass(type(self.fn), KGLambda) else fn.arity
945
+ self.klong = klong
946
+
947
+ # Wrap callbacks - KGFnWrapper now handles dynamic resolution automatically
948
+ self.fn = KGFnWrapper(klong, fn) if isinstance(fn, KGFn) else fn
949
+ self.cb = KGFnWrapper(klong, cb) if isinstance(cb, KGFn) else cb
950
+
951
+ arity = fn.get_arity() if issubclass(type(fn), KGLambda) else fn.arity
948
952
  self.args = [reserved_fn_symbol_map[x] for x in reserved_fn_args[:arity]]
949
953
 
950
954
  async def acall(self, klong, params):
955
+ # Execute the function based on its type
951
956
  if issubclass(type(self.fn), KGLambda):
952
957
  ctx = {reserved_fn_symbols[i]:params[i] for i in range(min(len(reserved_fn_args),len(params)))}
953
958
  r = self.fn(klong, ctx)
959
+ elif callable(self.fn):
960
+ r = self.fn(*params)
954
961
  else:
962
+ # Shouldn't reach here, but handle it
955
963
  r = klong.call(KGCall(self.fn.a, [*params], self.fn.arity))
956
- self.cb(r)
964
+
965
+ # Invoke callback - KGFnWrapper handles dynamic resolution automatically
966
+ if self.cb is not None:
967
+ self.cb(r)
957
968
 
958
969
  def __call__(self, klong, ctx):
959
970
  params = [ctx[x] for x in self.args]
@@ -979,7 +990,8 @@ def eval_sys_fn_create_async_wrapper(klong, x, y):
979
990
  raise KlongException("y must be a function")
980
991
  system = klong['.system']
981
992
  klongloop = system['klongloop']
982
- return KGAsyncCall(klongloop, x, KGFnWrapper(klong, y))
993
+ # KGAsyncCall will wrap the callbacks automatically
994
+ return KGAsyncCall(klongloop, x, y, klong)
983
995
 
984
996
 
985
997
  def create_system_functions_ipc():
klongpy/sys_fn_timer.py CHANGED
@@ -54,6 +54,8 @@ def eval_sys_fn_timer(klong, x, y, z):
54
54
 
55
55
  The callback function returns 1 to continue, 0 to stop time timer.
56
56
 
57
+ If "z" is a named function, the timer re-resolves it on each tick so redefinitions take effect.
58
+
57
59
  Example:
58
60
 
59
61
  cb::{.p("hello")}
@@ -76,12 +78,20 @@ def eval_sys_fn_timer(klong, x, y, z):
76
78
  y= int(y)
77
79
  if y < 0:
78
80
  return "x must be a non-negative integer"
79
- z = z if isinstance(z, KGCall) else KGFnWrapper(klong, z) if isinstance(z, KGFn) else z
80
- if not callable(z):
81
+
82
+ # Wrap the callback - KGFnWrapper now handles dynamic resolution automatically
83
+ if isinstance(z, KGCall):
84
+ return "z must be a function (not a function call)"
85
+ if isinstance(z, KGFn):
86
+ callback = KGFnWrapper(klong, z)
87
+ elif callable(z):
88
+ callback = z
89
+ else:
81
90
  return "z must be a function"
91
+
82
92
  system = klong['.system']
83
93
  klongloop = system['klongloop']
84
- return _call_periodic(klongloop, x, y, z)
94
+ return _call_periodic(klongloop, x, y, callback)
85
95
 
86
96
 
87
97
  def eval_sys_fn_cancel_timer(x):
klongpy/web/sys_fn_web.py CHANGED
@@ -69,9 +69,14 @@ def eval_sys_fn_create_web_server(klong, x, y, z):
69
69
  if arity != 1:
70
70
  logging.info(f"GET route {route} handler function requires arity 1, got {arity}")
71
71
  continue
72
- fn = fn if isinstance(fn, KGCall) else KGFnWrapper(klong, fn) if isinstance(fn, KGFn) else fn
73
72
 
74
- async def _get(request: web.Request, fn=fn, route=route):
73
+ # Wrap function - KGFnWrapper now handles dynamic resolution automatically
74
+ if isinstance(fn, KGCall):
75
+ logging.info(f"GET route {route} handler cannot be a function call")
76
+ continue
77
+ fn_wrapped = KGFnWrapper(klong, fn) if isinstance(fn, KGFn) else fn
78
+
79
+ async def _get(request: web.Request, fn=fn_wrapped, route=route):
75
80
  try:
76
81
  assert request.method == "GET"
77
82
  return web.Response(text=str(fn(dict(request.rel_url.query))))
@@ -88,9 +93,14 @@ def eval_sys_fn_create_web_server(klong, x, y, z):
88
93
  if arity != 1:
89
94
  logging.info(f"POST route {route} handler function requires arity 1, got {arity}")
90
95
  continue
91
- fn = fn if isinstance(fn, KGCall) else KGFnWrapper(klong, fn) if isinstance(fn, KGFn) else fn
92
96
 
93
- async def _post(request: web.Request, fn=fn):
97
+ # Wrap function - KGFnWrapper now handles dynamic resolution automatically
98
+ if isinstance(fn, KGCall):
99
+ logging.info(f"POST route {route} handler cannot be a function call")
100
+ continue
101
+ fn_wrapped = KGFnWrapper(klong, fn) if isinstance(fn, KGFn) else fn
102
+
103
+ async def _post(request: web.Request, fn=fn_wrapped, route=route):
94
104
  try:
95
105
  assert request.method == "POST"
96
106
  parameters = dict(await request.post())