nebu 0.1.14__tar.gz → 0.1.16__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. {nebu-0.1.14/src/nebu.egg-info → nebu-0.1.16}/PKG-INFO +1 -1
  2. {nebu-0.1.14 → nebu-0.1.16}/pyproject.toml +1 -1
  3. {nebu-0.1.14 → nebu-0.1.16}/src/nebu/processors/consumer.py +20 -16
  4. nebu-0.1.16/src/nebu/processors/decorate.py +502 -0
  5. {nebu-0.1.14 → nebu-0.1.16/src/nebu.egg-info}/PKG-INFO +1 -1
  6. nebu-0.1.14/src/nebu/processors/decorate.py +0 -401
  7. {nebu-0.1.14 → nebu-0.1.16}/LICENSE +0 -0
  8. {nebu-0.1.14 → nebu-0.1.16}/README.md +0 -0
  9. {nebu-0.1.14 → nebu-0.1.16}/setup.cfg +0 -0
  10. {nebu-0.1.14 → nebu-0.1.16}/src/nebu/__init__.py +0 -0
  11. {nebu-0.1.14 → nebu-0.1.16}/src/nebu/auth.py +0 -0
  12. {nebu-0.1.14 → nebu-0.1.16}/src/nebu/config.py +0 -0
  13. {nebu-0.1.14 → nebu-0.1.16}/src/nebu/containers/container.py +0 -0
  14. {nebu-0.1.14 → nebu-0.1.16}/src/nebu/containers/decorator.py +0 -0
  15. {nebu-0.1.14 → nebu-0.1.16}/src/nebu/containers/models.py +0 -0
  16. {nebu-0.1.14 → nebu-0.1.16}/src/nebu/containers/server.py +0 -0
  17. {nebu-0.1.14 → nebu-0.1.16}/src/nebu/meta.py +0 -0
  18. {nebu-0.1.14 → nebu-0.1.16}/src/nebu/processors/default.py +0 -0
  19. {nebu-0.1.14 → nebu-0.1.16}/src/nebu/processors/models.py +0 -0
  20. {nebu-0.1.14 → nebu-0.1.16}/src/nebu/processors/processor.py +0 -0
  21. {nebu-0.1.14 → nebu-0.1.16}/src/nebu/redis/models.py +0 -0
  22. {nebu-0.1.14 → nebu-0.1.16}/src/nebu/services/service.py +0 -0
  23. {nebu-0.1.14 → nebu-0.1.16}/src/nebu.egg-info/SOURCES.txt +0 -0
  24. {nebu-0.1.14 → nebu-0.1.16}/src/nebu.egg-info/dependency_links.txt +0 -0
  25. {nebu-0.1.14 → nebu-0.1.16}/src/nebu.egg-info/requires.txt +0 -0
  26. {nebu-0.1.14 → nebu-0.1.16}/src/nebu.egg-info/top_level.txt +0 -0
  27. {nebu-0.1.14 → nebu-0.1.16}/tests/test_containers.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: nebu
3
- Version: 0.1.14
3
+ Version: 0.1.16
4
4
  Summary: A globally distributed container runtime
5
5
  Requires-Python: >=3.10.14
6
6
  Description-Content-Type: text/markdown
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "nebu"
3
- version = "0.1.14"
3
+ version = "0.1.16"
4
4
  description = "A globally distributed container runtime"
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.10.14"
@@ -97,6 +97,26 @@ try:
97
97
  exec("from nebu.processors.models import *", local_namespace)
98
98
  exec("from nebu.processors.processor import *", local_namespace)
99
99
 
100
+ # Execute included object sources FIRST, as they might define types needed by others
101
+ print("[Consumer] Executing included object sources...")
102
+ for i, (obj_source, args_sources) in enumerate(included_object_sources):
103
+ try:
104
+ exec(obj_source, local_namespace)
105
+ print(f"[Consumer] Successfully executed included object {i} base source")
106
+ for j, arg_source in enumerate(args_sources):
107
+ try:
108
+ exec(arg_source, local_namespace)
109
+ print(
110
+ f"[Consumer] Successfully executed included object {i} arg {j} source"
111
+ )
112
+ except Exception as e:
113
+ print(f"Error executing included object {i} arg {j} source: {e}")
114
+ traceback.print_exc()
115
+ except Exception as e:
116
+ print(f"Error executing included object {i} base source: {e}")
117
+ traceback.print_exc()
118
+ print("[Consumer] Finished executing included object sources.")
119
+
100
120
  # First try to import the module to get any needed dependencies
101
121
  # This is a fallback in case the module is available
102
122
  module_name = os.environ.get("MODULE_NAME")
@@ -176,22 +196,6 @@ try:
176
196
  print(f"Error defining output model: {e}")
177
197
  traceback.print_exc()
178
198
 
179
- # Execute included object sources
180
- for i, (obj_source, args_sources) in enumerate(included_object_sources):
181
- try:
182
- exec(obj_source, local_namespace)
183
- print(f"Successfully executed included object {i} base source")
184
- for j, arg_source in enumerate(args_sources):
185
- try:
186
- exec(arg_source, local_namespace)
187
- print(f"Successfully executed included object {i} arg {j} source")
188
- except Exception as e:
189
- print(f"Error executing included object {i} arg {j} source: {e}")
190
- traceback.print_exc()
191
- except Exception as e:
192
- print(f"Error executing included object {i} base source: {e}")
193
- traceback.print_exc()
194
-
195
199
  # Finally, execute the function code
196
200
  try:
197
201
  exec(function_source, local_namespace)
@@ -0,0 +1,502 @@
1
+ import inspect
2
+ import re # Import re for fallback check
3
+ import textwrap
4
+ from typing import (
5
+ Any,
6
+ Callable,
7
+ Dict,
8
+ List,
9
+ Optional,
10
+ TypeVar,
11
+ get_args,
12
+ get_origin,
13
+ get_type_hints,
14
+ )
15
+
16
+ from pydantic import BaseModel
17
+
18
+ from nebu.containers.models import (
19
+ V1AuthzConfig,
20
+ V1ContainerRequest,
21
+ V1ContainerResources,
22
+ V1EnvVar,
23
+ V1Meter,
24
+ V1VolumePath,
25
+ )
26
+ from nebu.meta import V1ResourceMetaRequest
27
+ from nebu.processors.models import (
28
+ V1Scale,
29
+ V1StreamMessage,
30
+ )
31
+ from nebu.processors.processor import Processor
32
+
33
+ from .default import DEFAULT_MAX_REPLICAS, DEFAULT_MIN_REPLICAS, DEFAULT_SCALE
34
+
35
+ T = TypeVar("T", bound=BaseModel)
36
+ R = TypeVar("R", bound=BaseModel)
37
+
38
+
39
+ def get_model_source(model_class: Any) -> Optional[str]:
40
+ """Get the source code of a model class."""
41
+ try:
42
+ source = inspect.getsource(model_class)
43
+ return textwrap.dedent(source)
44
+ except (IOError, TypeError):
45
+ print(f"[DEBUG get_model_source] Failed for: {model_class}") # Added debug
46
+ return None
47
+
48
+
49
+ def get_type_source(type_obj: Any) -> Optional[Any]:
50
+ """Get the source code for a type, including generic parameters."""
51
+ # If it's a class, get its source
52
+ if isinstance(type_obj, type):
53
+ return get_model_source(type_obj)
54
+
55
+ # If it's a GenericAlias (like V1StreamMessage[SomeType])
56
+ # Use get_origin and get_args for robustness
57
+ origin = get_origin(type_obj)
58
+ args = get_args(type_obj)
59
+
60
+ if origin is not None:
61
+ origin_source = get_model_source(origin)
62
+ args_sources = []
63
+
64
+ # Get sources for all type arguments
65
+ for arg in args:
66
+ arg_source = get_type_source(arg)
67
+ if arg_source:
68
+ args_sources.append(arg_source)
69
+
70
+ # Return tuple only if origin source and some arg sources were found
71
+ if origin_source or args_sources:
72
+ return (
73
+ origin_source,
74
+ args_sources,
75
+ ) # Return even if origin_source is None if args_sources exist
76
+
77
+ return None # Fallback if not a class or recognizable generic alias
78
+
79
+
80
+ def processor(
81
+ image: str,
82
+ setup_script: Optional[str] = None,
83
+ scale: V1Scale = DEFAULT_SCALE,
84
+ min_replicas: int = DEFAULT_MIN_REPLICAS,
85
+ max_replicas: int = DEFAULT_MAX_REPLICAS,
86
+ platform: Optional[str] = None,
87
+ accelerators: Optional[List[str]] = None,
88
+ namespace: Optional[str] = None,
89
+ labels: Optional[Dict[str, str]] = None,
90
+ env: Optional[List[V1EnvVar]] = None,
91
+ volumes: Optional[List[V1VolumePath]] = None,
92
+ resources: Optional[V1ContainerResources] = None,
93
+ meters: Optional[List[V1Meter]] = None,
94
+ authz: Optional[V1AuthzConfig] = None,
95
+ python_cmd: str = "python",
96
+ no_delete: bool = False,
97
+ include: Optional[List[Any]] = None,
98
+ ):
99
+ """
100
+ Decorator that converts a function into a Processor.
101
+
102
+ Args:
103
+ image: The container image to use for the processor
104
+ setup_script: Optional setup script to run before starting the processor
105
+ scale: Optional scaling configuration
106
+ min_replicas: Minimum number of replicas to maintain
107
+ max_replicas: Maximum number of replicas to scale to
108
+ platform: Optional compute platform to run on
109
+ accelerators: Optional list of accelerator types
110
+ namespace: Optional namespace for the processor
111
+ labels: Optional labels to apply to the processor
112
+ env: Optional environment variables
113
+ volumes: Optional volume mounts
114
+ resources: Optional resource requirements
115
+ meters: Optional metering configuration
116
+ authz: Optional authorization configuration
117
+ python_cmd: Optional python command to use
118
+ no_delete: Whether to prevent deleting the processor on updates
119
+ include: Optional list of Python objects whose source code should be included
120
+ """
121
+
122
+ def decorator(
123
+ func: Callable[[Any], Any],
124
+ ) -> Processor: # Changed T/R to Any for broader compatibility
125
+ # Prepare environment variables early
126
+ all_env = env or []
127
+
128
+ # --- Process Included Objects First ---
129
+ included_sources: Dict[Any, Any] = {} # Store source keyed by the object itself
130
+ if include:
131
+ print(f"[DEBUG Decorator] Processing included objects: {include}")
132
+ for i, obj in enumerate(include):
133
+ # Directly use get_model_source as include expects types/classes usually
134
+ obj_source = get_model_source(obj)
135
+ if obj_source:
136
+ print(f"[DEBUG Decorator] Found source for included object: {obj}")
137
+ included_sources[obj] = obj_source # Store source by object
138
+ # Add to env vars immediately (simplifies later logic)
139
+ env_key = f"INCLUDED_OBJECT_{i}_SOURCE"
140
+ all_env.append(V1EnvVar(key=env_key, value=obj_source))
141
+ print(f"[DEBUG Decorator] Added {env_key} for {obj}")
142
+
143
+ else:
144
+ # Optionally raise an error or log a warning if source can't be found
145
+ print(
146
+ f"Warning: Could not retrieve source via get_model_source for included object: {obj}. Decorator might fail if this type is needed but cannot be auto-detected."
147
+ )
148
+ print(
149
+ f"[DEBUG Decorator] Finished processing included objects. Sources found: {len(included_sources)}"
150
+ )
151
+ # --- End Included Objects Processing ---
152
+
153
+ # Validate function signature
154
+ sig = inspect.signature(func)
155
+ params = list(sig.parameters.values())
156
+
157
+ if len(params) != 1:
158
+ raise TypeError(f"Function {func.__name__} must take exactly one parameter")
159
+
160
+ # Check parameter type hint
161
+ try:
162
+ # Use eval_str=True for forward references if needed, requires Python 3.10+ globals/locals
163
+ type_hints = get_type_hints(
164
+ func, globalns=func.__globals__, localns=None
165
+ ) # Pass globals
166
+ except Exception as e:
167
+ print(
168
+ f"[DEBUG Decorator] Error getting type hints for {func.__name__}: {e}"
169
+ )
170
+ raise TypeError(
171
+ f"Could not evaluate type hints for {func.__name__}. Ensure all types are defined or imported."
172
+ ) from e
173
+
174
+ param_name = params[0].name
175
+ if param_name not in type_hints:
176
+ raise TypeError(
177
+ f"Parameter {param_name} in function {func.__name__} must have a type annotation"
178
+ )
179
+ param_type = type_hints[param_name]
180
+
181
+ # --- Determine Input Type, Content Type, and is_stream_message ---
182
+ print(f"[DEBUG Decorator] Full type_hints: {type_hints}")
183
+ print(f"[DEBUG Decorator] Detected param_type: {param_type}")
184
+ origin = get_origin(param_type)
185
+ args = get_args(param_type)
186
+ print(f"[DEBUG Decorator] Param type origin (using get_origin): {origin}")
187
+ print(f"[DEBUG Decorator] Param type args (using get_args): {args}")
188
+ if origin:
189
+ print(
190
+ f"[DEBUG Decorator] Origin name: {getattr(origin, '__name__', 'N/A')}, module: {getattr(origin, '__module__', 'N/A')}"
191
+ )
192
+ print(
193
+ f"[DEBUG Decorator] V1StreamMessage name: {V1StreamMessage.__name__}, module: {V1StreamMessage.__module__}"
194
+ )
195
+
196
+ is_stream_message = False
197
+ content_type = None
198
+
199
+ # Check 1: Standard check using get_origin
200
+ if (
201
+ origin is not None
202
+ and origin.__name__ == V1StreamMessage.__name__
203
+ and origin.__module__ == V1StreamMessage.__module__
204
+ ):
205
+ is_stream_message = True
206
+ print("[DEBUG Decorator] V1StreamMessage detected via origin check.")
207
+ if args:
208
+ content_type = args[0]
209
+
210
+ # Check 2: Fallback check using string representation
211
+ elif origin is None:
212
+ type_str = str(param_type)
213
+ match = re.match(
214
+ r"<class 'nebu\.processors\.models\.V1StreamMessage\[(.*)\]\'>",
215
+ type_str,
216
+ )
217
+ if match:
218
+ print(
219
+ "[DEBUG Decorator] V1StreamMessage detected via string regex check (origin/args failed)."
220
+ )
221
+ content_type_name = match.group(1)
222
+ print(
223
+ f"[DEBUG Decorator] Manually parsed content_type name: {content_type_name}"
224
+ )
225
+ # Attempt to find the type
226
+ resolved_type = None
227
+ func_globals = func.__globals__
228
+ if content_type_name in func_globals:
229
+ resolved_type = func_globals[content_type_name]
230
+ print(
231
+ f"[DEBUG Decorator] Found content type '{content_type_name}' in function globals."
232
+ )
233
+ else:
234
+ func_module = inspect.getmodule(func)
235
+ if func_module and hasattr(func_module, content_type_name):
236
+ resolved_type = getattr(func_module, content_type_name)
237
+ print(
238
+ f"[DEBUG Decorator] Found content type '{content_type_name}' in function module."
239
+ )
240
+
241
+ if resolved_type:
242
+ content_type = resolved_type
243
+ is_stream_message = True # Set flag *only if* resolved
244
+ else:
245
+ print(
246
+ f"[DEBUG Decorator] Fallback failed: Could not find type '{content_type_name}' in globals or module. Use 'include'."
247
+ )
248
+ # else: Fallback regex did not match
249
+
250
+ # Check 3: Handle direct V1StreamMessage
251
+ elif param_type is V1StreamMessage:
252
+ print("[DEBUG Decorator] V1StreamMessage detected via direct type check.")
253
+ is_stream_message = True
254
+ # content_type remains None
255
+
256
+ print(f"[DEBUG Decorator] Final is_stream_message: {is_stream_message}")
257
+ print(f"[DEBUG Decorator] Final content_type: {content_type}")
258
+ # --- End Input Type Determination ---
259
+
260
+ # --- Validate Parameter Type is BaseModel ---
261
+ type_to_check_for_basemodel = None
262
+ if is_stream_message:
263
+ if content_type:
264
+ type_to_check_for_basemodel = content_type
265
+ # else: Base V1StreamMessage itself is a BaseModel, no need to check further
266
+ else:
267
+ type_to_check_for_basemodel = param_type
268
+
269
+ if type_to_check_for_basemodel:
270
+ actual_type_to_check = (
271
+ get_origin(type_to_check_for_basemodel) or type_to_check_for_basemodel
272
+ )
273
+ if not issubclass(actual_type_to_check, BaseModel):
274
+ raise TypeError(
275
+ f"Parameter '{param_name}' effective type ({actual_type_to_check.__name__}) in function '{func.__name__}' must be a BaseModel subclass"
276
+ )
277
+ # --- End Parameter Validation ---
278
+
279
+ # --- Validate Return Type ---
280
+ if "return" not in type_hints:
281
+ raise TypeError(
282
+ f"Function {func.__name__} must have a return type annotation"
283
+ )
284
+ return_type = type_hints["return"]
285
+ actual_return_type = get_origin(return_type) or return_type
286
+ if not issubclass(actual_return_type, BaseModel):
287
+ raise TypeError(
288
+ f"Return value of function {func.__name__} must be a BaseModel subclass"
289
+ )
290
+ # --- End Return Type Validation ---
291
+
292
+ # --- Get Function Source ---
293
+ processor_name = func.__name__
294
+ try:
295
+ raw_function_source = inspect.getsource(func)
296
+ # ... (rest of source processing remains the same) ...
297
+ lines = raw_function_source.splitlines()
298
+ func_def_index = -1
299
+ decorator_lines = 0
300
+ in_decorator = False
301
+ for i, line in enumerate(lines):
302
+ stripped_line = line.strip()
303
+ if stripped_line.startswith("@"):
304
+ in_decorator = True
305
+ decorator_lines += 1
306
+ continue # Skip decorator line
307
+ if in_decorator and stripped_line.endswith(
308
+ ")"
309
+ ): # Simple check for end of decorator args
310
+ in_decorator = False
311
+ decorator_lines += 1
312
+ continue
313
+ if in_decorator:
314
+ decorator_lines += 1
315
+ continue # Skip multi-line decorator args
316
+
317
+ if stripped_line.startswith("def "):
318
+ func_def_index = i
319
+ break
320
+
321
+ if func_def_index != -1:
322
+ # Keep lines from the 'def' line onwards
323
+ function_source = "\n".join(lines[func_def_index:])
324
+ else:
325
+ raise ValueError(
326
+ f"Could not find function definition 'def' in source for {func.__name__}"
327
+ )
328
+
329
+ print(
330
+ f"[DEBUG Decorator] Processed function source for {func.__name__}:\n{function_source[:200]}..."
331
+ )
332
+
333
+ except (IOError, TypeError) as e:
334
+ print(f"[DEBUG Decorator] Error getting source for {func.__name__}: {e}")
335
+ raise ValueError(
336
+ f"Could not retrieve source code for function {func.__name__}: {e}"
337
+ ) from e
338
+ # --- End Function Source ---
339
+
340
+ # --- Get Model Sources (Prioritizing Included) ---
341
+ input_model_source = None
342
+ output_model_source = None
343
+ content_type_source = None
344
+ stream_message_source = get_model_source(V1StreamMessage) # Still get this
345
+
346
+ # Get content_type source (if applicable)
347
+ if is_stream_message and content_type:
348
+ if content_type in included_sources:
349
+ content_type_source = included_sources[content_type]
350
+ print(
351
+ f"[DEBUG Decorator] Using included source for content_type: {content_type}"
352
+ )
353
+ else:
354
+ print(
355
+ f"[DEBUG Decorator] Attempting get_type_source for content_type: {content_type}"
356
+ )
357
+ content_type_source = get_type_source(content_type)
358
+ if content_type_source is None:
359
+ print(
360
+ f"[DEBUG Decorator] Warning: get_type_source failed for content_type: {content_type}. Consumer might fail if not included."
361
+ )
362
+
363
+ print(
364
+ f"[DEBUG Decorator] Final content_type_source: {str(content_type_source)[:100]}..."
365
+ )
366
+
367
+ # Get input_model source (which is V1StreamMessage if is_stream_message)
368
+ if is_stream_message:
369
+ input_model_source = (
370
+ stream_message_source # Always use base stream message source
371
+ )
372
+ elif (
373
+ param_type in included_sources
374
+ ): # Check if non-stream-message input type was included
375
+ input_model_source = included_sources[param_type]
376
+ print(
377
+ f"[DEBUG Decorator] Using included source for param_type: {param_type}"
378
+ )
379
+ else: # Fallback for non-stream-message, non-included input type
380
+ print(
381
+ f"[DEBUG Decorator] Attempting get_type_source for param_type: {param_type}"
382
+ )
383
+ input_model_source = get_type_source(param_type)
384
+ if input_model_source is None:
385
+ print(
386
+ f"[DEBUG Decorator] Warning: get_type_source failed for param_type: {param_type}. Consumer might fail if not included."
387
+ )
388
+ print(
389
+ f"[DEBUG Decorator] Final input_model_source: {str(input_model_source)[:100]}..."
390
+ )
391
+
392
+ # Get output_model source
393
+ if return_type in included_sources:
394
+ output_model_source = included_sources[return_type]
395
+ print(
396
+ f"[DEBUG Decorator] Using included source for return_type: {return_type}"
397
+ )
398
+ else:
399
+ print(
400
+ f"[DEBUG Decorator] Attempting get_type_source for return_type: {return_type}"
401
+ )
402
+ output_model_source = get_type_source(return_type)
403
+ if output_model_source is None:
404
+ print(
405
+ f"[DEBUG Decorator] Warning: get_type_source failed for return_type: {return_type}. Consumer might fail if not included."
406
+ )
407
+ print(
408
+ f"[DEBUG Decorator] Final output_model_source: {str(output_model_source)[:100]}..."
409
+ )
410
+ # --- End Model Sources ---
411
+
412
+ # --- Populate Environment Variables ---
413
+ print("[DEBUG Decorator] Populating environment variables...")
414
+ all_env.append(V1EnvVar(key="FUNCTION_SOURCE", value=function_source))
415
+ all_env.append(V1EnvVar(key="FUNCTION_NAME", value=func.__name__))
416
+
417
+ # Add model source codes (handle tuples from get_type_source if necessary, although unlikely with prioritization)
418
+ def add_source_to_env(key_base: str, source: Any):
419
+ if source:
420
+ if isinstance(source, tuple):
421
+ # This path is less likely now with include prioritization
422
+ if source[0]: # Origin source
423
+ all_env.append(
424
+ V1EnvVar(key=f"{key_base}_SOURCE", value=source[0])
425
+ )
426
+ for i, arg_source in enumerate(source[1]): # Arg sources
427
+ all_env.append(
428
+ V1EnvVar(key=f"{key_base}_ARG_{i}_SOURCE", value=arg_source)
429
+ )
430
+ else: # Simple string source
431
+ all_env.append(V1EnvVar(key=f"{key_base}_SOURCE", value=source))
432
+
433
+ add_source_to_env("INPUT_MODEL", input_model_source)
434
+ add_source_to_env("OUTPUT_MODEL", output_model_source)
435
+ add_source_to_env("CONTENT_TYPE", content_type_source)
436
+ add_source_to_env(
437
+ "STREAM_MESSAGE", stream_message_source
438
+ ) # Add base stream message source
439
+
440
+ # Type names for consumer validation/parsing
441
+ all_env.append(
442
+ V1EnvVar(
443
+ key="PARAM_TYPE_STR", value=str(param_type)
444
+ ) # Send string representation
445
+ )
446
+ all_env.append(
447
+ V1EnvVar(
448
+ key="RETURN_TYPE_STR", value=str(return_type)
449
+ ) # Send string representation
450
+ )
451
+ all_env.append(V1EnvVar(key="IS_STREAM_MESSAGE", value=str(is_stream_message)))
452
+ if content_type:
453
+ all_env.append(
454
+ V1EnvVar(key="CONTENT_TYPE_NAME", value=content_type.__name__)
455
+ )
456
+
457
+ all_env.append(V1EnvVar(key="MODULE_NAME", value=func.__module__))
458
+ # --- End Environment Variables ---
459
+
460
+ # --- Final Setup ---
461
+ metadata = V1ResourceMetaRequest(
462
+ name=processor_name, namespace=namespace, labels=labels
463
+ )
464
+ consumer_command = f"{python_cmd} -m nebu.processors.consumer"
465
+ final_command = f"{python_cmd} -m pip install redis nebu\n\n{setup_script or ''}\n\n{consumer_command}"
466
+
467
+ container_request = V1ContainerRequest(
468
+ image=image,
469
+ command=final_command,
470
+ env=all_env,
471
+ volumes=volumes,
472
+ accelerators=accelerators,
473
+ resources=resources,
474
+ meters=meters,
475
+ restart="Always",
476
+ authz=authz,
477
+ platform=platform,
478
+ metadata=metadata,
479
+ )
480
+ print("[DEBUG Decorator] Final Container Request Env Vars:")
481
+ for env_var in all_env:
482
+ print(
483
+ f"[DEBUG Decorator] {env_var.key}: {str(env_var.value)[:70]}..."
484
+ ) # Print key and start of value
485
+
486
+ processor_instance = Processor(
487
+ name=processor_name,
488
+ stream=processor_name, # Default stream name to processor name
489
+ namespace=namespace,
490
+ labels=labels,
491
+ container=container_request,
492
+ schema_=None,
493
+ common_schema=None,
494
+ min_replicas=min_replicas,
495
+ max_replicas=max_replicas,
496
+ scale_config=scale,
497
+ no_delete=no_delete,
498
+ )
499
+
500
+ return processor_instance
501
+
502
+ return decorator
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: nebu
3
- Version: 0.1.14
3
+ Version: 0.1.16
4
4
  Summary: A globally distributed container runtime
5
5
  Requires-Python: >=3.10.14
6
6
  Description-Content-Type: text/markdown
@@ -1,401 +0,0 @@
1
- import inspect
2
- import textwrap
3
- from typing import Any, Callable, Dict, List, Optional, TypeVar, get_type_hints
4
-
5
- from pydantic import BaseModel
6
-
7
- from nebu.containers.models import (
8
- V1AuthzConfig,
9
- V1ContainerRequest,
10
- V1ContainerResources,
11
- V1EnvVar,
12
- V1Meter,
13
- V1VolumePath,
14
- )
15
- from nebu.meta import V1ResourceMetaRequest
16
- from nebu.processors.models import (
17
- V1Scale,
18
- V1StreamMessage,
19
- )
20
- from nebu.processors.processor import Processor
21
-
22
- from .default import DEFAULT_MAX_REPLICAS, DEFAULT_MIN_REPLICAS, DEFAULT_SCALE
23
-
24
- T = TypeVar("T", bound=BaseModel)
25
- R = TypeVar("R", bound=BaseModel)
26
-
27
-
28
- def get_model_source(model_class: Any) -> Optional[str]:
29
- """Get the source code of a model class."""
30
- try:
31
- source = inspect.getsource(model_class)
32
- return textwrap.dedent(source)
33
- except (IOError, TypeError):
34
- return None
35
-
36
-
37
- def get_type_source(type_obj: Any) -> Optional[Any]:
38
- """Get the source code for a type, including generic parameters."""
39
- # If it's a class, get its source
40
- if isinstance(type_obj, type):
41
- return get_model_source(type_obj)
42
-
43
- # If it's a GenericAlias (like V1StreamMessage[SomeType])
44
- if hasattr(type_obj, "__origin__") and hasattr(type_obj, "__args__"):
45
- origin_source = get_model_source(type_obj.__origin__)
46
- args_sources = []
47
-
48
- # Get sources for all type arguments
49
- for arg in type_obj.__args__:
50
- arg_source = get_type_source(arg)
51
- if arg_source:
52
- args_sources.append(arg_source)
53
-
54
- return origin_source, args_sources
55
-
56
- return None
57
-
58
-
59
- def processor(
60
- image: str,
61
- setup_script: Optional[str] = None,
62
- scale: V1Scale = DEFAULT_SCALE,
63
- min_replicas: int = DEFAULT_MIN_REPLICAS,
64
- max_replicas: int = DEFAULT_MAX_REPLICAS,
65
- platform: Optional[str] = None,
66
- accelerators: Optional[List[str]] = None,
67
- namespace: Optional[str] = None,
68
- labels: Optional[Dict[str, str]] = None,
69
- env: Optional[List[V1EnvVar]] = None,
70
- volumes: Optional[List[V1VolumePath]] = None,
71
- resources: Optional[V1ContainerResources] = None,
72
- meters: Optional[List[V1Meter]] = None,
73
- authz: Optional[V1AuthzConfig] = None,
74
- python_cmd: str = "python",
75
- no_delete: bool = False,
76
- include: Optional[List[Any]] = None,
77
- ):
78
- """
79
- Decorator that converts a function into a Processor.
80
-
81
- Args:
82
- image: The container image to use for the processor
83
- setup_script: Optional setup script to run before starting the processor
84
- scale: Optional scaling configuration
85
- min_replicas: Minimum number of replicas to maintain
86
- max_replicas: Maximum number of replicas to scale to
87
- platform: Optional compute platform to run on
88
- accelerators: Optional list of accelerator types
89
- namespace: Optional namespace for the processor
90
- labels: Optional labels to apply to the processor
91
- env: Optional environment variables
92
- volumes: Optional volume mounts
93
- resources: Optional resource requirements
94
- meters: Optional metering configuration
95
- authz: Optional authorization configuration
96
- python_cmd: Optional python command to use
97
- no_delete: Whether to prevent deleting the processor on updates
98
- include: Optional list of Python objects whose source code should be included
99
- """
100
-
101
- def decorator(func: Callable[[T], R]) -> Processor:
102
- # Validate that the function takes a single parameter that is a BaseModel
103
- sig = inspect.signature(func)
104
- params = list(sig.parameters.values())
105
-
106
- if len(params) != 1:
107
- raise TypeError(f"Function {func.__name__} must take exactly one parameter")
108
-
109
- # Check parameter type
110
- type_hints = get_type_hints(func)
111
- param_name = params[0].name
112
- if param_name not in type_hints:
113
- raise TypeError(
114
- f"Parameter {param_name} in function {func.__name__} must have a type annotation"
115
- )
116
-
117
- param_type = type_hints[param_name]
118
-
119
- # Check if input type is V1StreamMessage or a subclass
120
- is_stream_message = False
121
- content_type = None
122
-
123
- # Handle generic V1StreamMessage
124
- if (
125
- hasattr(param_type, "__origin__")
126
- and param_type.__origin__ == V1StreamMessage
127
- ):
128
- is_stream_message = True
129
- # Extract the content type from V1StreamMessage[ContentType]
130
- if hasattr(param_type, "__args__") and param_type.__args__:
131
- content_type = param_type.__args__[0]
132
- # Handle direct V1StreamMessage
133
- elif param_type is V1StreamMessage:
134
- is_stream_message = True
135
-
136
- # Ensure the parameter is a BaseModel
137
- actual_type = (
138
- param_type.__origin__ if hasattr(param_type, "__origin__") else param_type # type: ignore
139
- )
140
- if not issubclass(actual_type, BaseModel):
141
- raise TypeError(
142
- f"Parameter {param_name} in function {func.__name__} must be a BaseModel"
143
- )
144
-
145
- # Check return type
146
- if "return" not in type_hints:
147
- raise TypeError(
148
- f"Function {func.__name__} must have a return type annotation"
149
- )
150
-
151
- return_type = type_hints["return"]
152
- actual_return_type = (
153
- return_type.__origin__
154
- if hasattr(return_type, "__origin__")
155
- else return_type
156
- )
157
- if not issubclass(actual_return_type, BaseModel):
158
- raise TypeError(
159
- f"Return value of function {func.__name__} must be a BaseModel"
160
- )
161
-
162
- # Get function name to use as processor name
163
- processor_name = func.__name__
164
-
165
- # Prepare environment variables
166
- all_env = env or []
167
-
168
- # Get the source code of the function
169
- try:
170
- raw_function_source = inspect.getsource(func)
171
- print(
172
- f"[DEBUG Decorator] Raw source for {func.__name__}:\n{raw_function_source}"
173
- )
174
-
175
- # Clean up the indentation
176
- dedented_function_source = textwrap.dedent(raw_function_source)
177
- print(
178
- f"[DEBUG Decorator] Dedented source for {func.__name__}:\n{dedented_function_source}"
179
- )
180
-
181
- # Find the start of the function definition ('def')
182
- # Skip lines starting with '@' or empty lines until 'def' is found
183
- lines = dedented_function_source.splitlines()
184
- func_def_index = -1
185
- for i, line in enumerate(lines):
186
- stripped_line = line.strip()
187
- if stripped_line.startswith("def "):
188
- func_def_index = i
189
- break
190
- # Simply continue if it's not the def line.
191
- # This skips decorators and their arguments, regardless of multi-line formatting.
192
- continue
193
-
194
- if func_def_index != -1:
195
- # Keep lines from the 'def' line onwards
196
- function_source = "\n".join(
197
- lines[func_def_index:]
198
- ) # Use \n for env var
199
- else:
200
- # If 'def' wasn't found (shouldn't happen with valid function source)
201
- raise ValueError(
202
- f"Could not find function definition 'def' in source for {func.__name__}"
203
- )
204
-
205
- print(
206
- f"[DEBUG Decorator] Processed function source for {func.__name__}:\n{function_source}"
207
- )
208
-
209
- except (IOError, TypeError) as e:
210
- print(f"[DEBUG Decorator] Error getting source for {func.__name__}: {e}")
211
- raise ValueError(
212
- f"Could not retrieve source code for function {func.__name__}: {e}"
213
- ) from e
214
-
215
- # Get source code for the models
216
- input_model_source = None
217
- output_model_source = None
218
- content_type_source = None
219
-
220
- # Get the V1StreamMessage class source
221
- stream_message_source = get_model_source(V1StreamMessage)
222
-
223
- # Get input model source
224
- if is_stream_message:
225
- input_model_source = stream_message_source
226
- if content_type:
227
- content_type_source = get_type_source(content_type)
228
- else:
229
- input_model_source = get_type_source(param_type)
230
-
231
- # Get output model source
232
- output_model_source = get_type_source(return_type)
233
-
234
- # Add function source code to environment variables
235
- print(
236
- f"[DEBUG Decorator] Setting FUNCTION_SOURCE: {function_source[:100]}..."
237
- ) # Print first 100 chars
238
- all_env.append(V1EnvVar(key="FUNCTION_SOURCE", value=function_source))
239
- print(f"[DEBUG Decorator] Setting FUNCTION_NAME: {func.__name__}")
240
- all_env.append(V1EnvVar(key="FUNCTION_NAME", value=func.__name__))
241
-
242
- # Add model source codes
243
- if input_model_source:
244
- if isinstance(input_model_source, tuple):
245
- all_env.append(
246
- V1EnvVar(key="INPUT_MODEL_SOURCE", value=input_model_source[0])
247
- )
248
- # Add generic args sources
249
- for i, arg_source in enumerate(input_model_source[1]):
250
- all_env.append(
251
- V1EnvVar(key=f"INPUT_MODEL_ARG_{i}_SOURCE", value=arg_source)
252
- )
253
- else:
254
- all_env.append(
255
- V1EnvVar(key="INPUT_MODEL_SOURCE", value=input_model_source)
256
- )
257
-
258
- if output_model_source:
259
- if isinstance(output_model_source, tuple):
260
- all_env.append(
261
- V1EnvVar(key="OUTPUT_MODEL_SOURCE", value=output_model_source[0])
262
- )
263
- # Add generic args sources
264
- for i, arg_source in enumerate(output_model_source[1]):
265
- all_env.append(
266
- V1EnvVar(key=f"OUTPUT_MODEL_ARG_{i}_SOURCE", value=arg_source)
267
- )
268
- else:
269
- all_env.append(
270
- V1EnvVar(key="OUTPUT_MODEL_SOURCE", value=output_model_source)
271
- )
272
-
273
- if stream_message_source:
274
- all_env.append(
275
- V1EnvVar(key="STREAM_MESSAGE_SOURCE", value=stream_message_source)
276
- )
277
-
278
- if content_type_source:
279
- if isinstance(content_type_source, tuple):
280
- all_env.append(
281
- V1EnvVar(key="CONTENT_TYPE_SOURCE", value=content_type_source[0])
282
- )
283
- # Add generic args sources for content type
284
- for i, arg_source in enumerate(content_type_source[1]):
285
- all_env.append(
286
- V1EnvVar(key=f"CONTENT_TYPE_ARG_{i}_SOURCE", value=arg_source)
287
- )
288
- else:
289
- all_env.append(
290
- V1EnvVar(key="CONTENT_TYPE_SOURCE", value=content_type_source)
291
- )
292
-
293
- # Add included object sources
294
- if include:
295
- for i, obj in enumerate(include):
296
- obj_source = get_type_source(
297
- obj
298
- ) # Reuse existing function for source retrieval
299
- if obj_source:
300
- if isinstance(obj_source, tuple):
301
- # Handle complex types (like generics) if needed, similar to models
302
- all_env.append(
303
- V1EnvVar(
304
- key=f"INCLUDED_OBJECT_{i}_SOURCE", value=obj_source[0]
305
- )
306
- )
307
- for j, arg_source in enumerate(obj_source[1]):
308
- all_env.append(
309
- V1EnvVar(
310
- key=f"INCLUDED_OBJECT_{i}_ARG_{j}_SOURCE",
311
- value=arg_source,
312
- )
313
- )
314
- else:
315
- all_env.append(
316
- V1EnvVar(
317
- key=f"INCLUDED_OBJECT_{i}_SOURCE", value=obj_source
318
- )
319
- )
320
- else:
321
- # Optionally raise an error or log a warning if source can't be found
322
- print(
323
- f"Warning: Could not retrieve source for included object: {obj}"
324
- )
325
-
326
- # Add parameter and return type info for runtime validation
327
- all_env.append(
328
- V1EnvVar(
329
- key="PARAM_TYPE_NAME",
330
- value=param_type.__name__
331
- if hasattr(param_type, "__name__")
332
- else str(param_type),
333
- )
334
- )
335
- all_env.append(
336
- V1EnvVar(
337
- key="RETURN_TYPE_NAME",
338
- value=return_type.__name__
339
- if hasattr(return_type, "__name__")
340
- else str(return_type),
341
- )
342
- )
343
- all_env.append(V1EnvVar(key="IS_STREAM_MESSAGE", value=str(is_stream_message)))
344
-
345
- if content_type:
346
- all_env.append(
347
- V1EnvVar(
348
- key="CONTENT_TYPE_NAME",
349
- value=content_type.__name__
350
- if hasattr(content_type, "__name__")
351
- else str(content_type),
352
- )
353
- )
354
-
355
- # We still add the module for reference, but we won't rely on importing it
356
- all_env.append(V1EnvVar(key="MODULE_NAME", value=func.__module__))
357
-
358
- # Prepare metadata
359
- metadata = V1ResourceMetaRequest(
360
- name=processor_name, namespace=namespace, labels=labels
361
- )
362
-
363
- # Create the command to run the consumer directly
364
- consumer_command = f"{python_cmd} -m nebu.processors.consumer"
365
-
366
- final_command = f"{python_cmd} -m pip install redis nebu\n\n{setup_script}\n\n{consumer_command}"
367
-
368
- # Create the V1ContainerRequest
369
- container_request = V1ContainerRequest(
370
- image=image,
371
- command=final_command,
372
- env=all_env,
373
- volumes=volumes,
374
- accelerators=accelerators,
375
- resources=resources,
376
- meters=meters,
377
- restart="Always",
378
- authz=authz,
379
- platform=platform,
380
- metadata=metadata,
381
- )
382
- print("container_request", container_request)
383
-
384
- # Create the processor instance
385
- processor_instance = Processor(
386
- name=processor_name,
387
- stream=processor_name,
388
- namespace=namespace,
389
- labels=labels,
390
- container=container_request,
391
- schema_=None, # TODO
392
- common_schema=None,
393
- min_replicas=min_replicas,
394
- max_replicas=max_replicas,
395
- scale_config=scale,
396
- no_delete=no_delete,
397
- )
398
-
399
- return processor_instance
400
-
401
- return decorator
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes