nebu 0.1.8__py3-none-any.whl → 0.1.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
nebu/auth.py ADDED
@@ -0,0 +1,35 @@
1
+ from typing import Dict, Optional
2
+
3
+ import requests
4
+ from pydantic import BaseModel
5
+
6
+ from nebu.config import GlobalConfig
7
+
8
+
9
+ class V1UserProfile(BaseModel):
10
+ email: str
11
+ display_name: Optional[str] = None
12
+ handle: Optional[str] = None
13
+ picture: Optional[str] = None
14
+ organization: Optional[str] = None
15
+ role: Optional[str] = None
16
+ external_id: Optional[str] = None
17
+ actor: Optional[str] = None
18
+ # structure is {"org_id": {"org_name": <name>, "org_role": <role>}}
19
+ organizations: Optional[Dict[str, Dict[str, str]]] = None
20
+ created: Optional[int] = None
21
+ updated: Optional[int] = None
22
+ token: Optional[str] = None
23
+
24
+
25
+ def get_user_profile(api_key: str) -> V1UserProfile:
26
+ config = GlobalConfig.read()
27
+ current_server_config = config.get_current_server_config()
28
+ if current_server_config is None:
29
+ raise ValueError("No current server config found")
30
+ url = f"{current_server_config.server}/v1/users/me"
31
+
32
+ response = requests.get(url, headers={"Authorization": f"Bearer {api_key}"})
33
+ response.raise_for_status()
34
+
35
+ return V1UserProfile.model_validate(response.json())
nebu/containers/models.py CHANGED
@@ -75,6 +75,7 @@ class V1ContainerStatus(BaseModel):
75
75
  accelerator: Optional[str] = None
76
76
  tailnet_url: Optional[str] = None
77
77
  cost_per_hr: Optional[float] = None
78
+ ready: Optional[bool] = None
78
79
 
79
80
 
80
81
  class V1AuthzSecretRef(BaseModel):
@@ -0,0 +1,414 @@
1
+ #!/usr/bin/env python3
2
+ import json
3
+ import os
4
+ import sys
5
+ import time
6
+ import traceback
7
+ from datetime import datetime
8
+ from typing import Dict, TypeVar
9
+
10
+ import redis
11
+
12
+ # Define TypeVar for generic models
13
+ T = TypeVar("T")
14
+
15
+ # Get function and model source code and create them dynamically
16
+ try:
17
+ function_source = os.environ.get("FUNCTION_SOURCE")
18
+ function_name = os.environ.get("FUNCTION_NAME")
19
+ stream_message_source = os.environ.get("STREAM_MESSAGE_SOURCE")
20
+ input_model_source = os.environ.get("INPUT_MODEL_SOURCE")
21
+ output_model_source = os.environ.get("OUTPUT_MODEL_SOURCE")
22
+ content_type_source = os.environ.get("CONTENT_TYPE_SOURCE")
23
+ is_stream_message = os.environ.get("IS_STREAM_MESSAGE") == "True"
24
+ param_type_name = os.environ.get("PARAM_TYPE_NAME")
25
+ return_type_name = os.environ.get("RETURN_TYPE_NAME")
26
+ content_type_name = os.environ.get("CONTENT_TYPE_NAME")
27
+
28
+ # Check for generic type arguments
29
+ input_model_args = []
30
+ output_model_args = []
31
+ content_type_args = []
32
+
33
+ # Get input model arg sources
34
+ i = 0
35
+ while True:
36
+ arg_source = os.environ.get(f"INPUT_MODEL_ARG_{i}_SOURCE")
37
+ if arg_source:
38
+ input_model_args.append(arg_source)
39
+ i += 1
40
+ else:
41
+ break
42
+
43
+ # Get output model arg sources
44
+ i = 0
45
+ while True:
46
+ arg_source = os.environ.get(f"OUTPUT_MODEL_ARG_{i}_SOURCE")
47
+ if arg_source:
48
+ output_model_args.append(arg_source)
49
+ i += 1
50
+ else:
51
+ break
52
+
53
+ # Get content type arg sources
54
+ i = 0
55
+ while True:
56
+ arg_source = os.environ.get(f"CONTENT_TYPE_ARG_{i}_SOURCE")
57
+ if arg_source:
58
+ content_type_args.append(arg_source)
59
+ i += 1
60
+ else:
61
+ break
62
+
63
+ # Get included object sources
64
+ included_object_sources = []
65
+ i = 0
66
+ while True:
67
+ obj_source = os.environ.get(f"INCLUDED_OBJECT_{i}_SOURCE")
68
+ if obj_source:
69
+ args = []
70
+ j = 0
71
+ while True:
72
+ arg_source = os.environ.get(f"INCLUDED_OBJECT_{i}_ARG_{j}_SOURCE")
73
+ if arg_source:
74
+ args.append(arg_source)
75
+ j += 1
76
+ else:
77
+ break
78
+ included_object_sources.append((obj_source, args))
79
+ i += 1
80
+ else:
81
+ break
82
+
83
+ if not function_source or not function_name:
84
+ print("FUNCTION_SOURCE or FUNCTION_NAME environment variables not set")
85
+ sys.exit(1)
86
+
87
+ # Create a local namespace for executing the function
88
+ local_namespace = {}
89
+
90
+ # Include pydantic BaseModel and typing tools for type annotations
91
+ exec("from pydantic import BaseModel, Field", local_namespace)
92
+ exec(
93
+ "from typing import Optional, List, Dict, Any, Generic, TypeVar",
94
+ local_namespace,
95
+ )
96
+ exec("T = TypeVar('T')", local_namespace)
97
+
98
+ # First try to import the module to get any needed dependencies
99
+ # This is a fallback in case the module is available
100
+ module_name = os.environ.get("MODULE_NAME")
101
+ try:
102
+ if module_name:
103
+ exec(f"import {module_name}", local_namespace)
104
+ print(f"Successfully imported module {module_name}")
105
+ except Exception as e:
106
+ print(f"Warning: Could not import module {module_name}: {e}")
107
+ print(
108
+ "This is expected if running in a Jupyter notebook. Will use dynamic execution."
109
+ )
110
+
111
+ # Define the models
112
+ # First define stream message class if needed
113
+ if stream_message_source:
114
+ try:
115
+ exec(stream_message_source, local_namespace)
116
+ print("Successfully defined V1StreamMessage class")
117
+ except Exception as e:
118
+ print(f"Error defining V1StreamMessage: {e}")
119
+ traceback.print_exc()
120
+
121
+ # Define content type if available
122
+ if content_type_source:
123
+ try:
124
+ exec(content_type_source, local_namespace)
125
+ print(f"Successfully defined content type {content_type_name}")
126
+
127
+ # Define any content type args
128
+ for arg_source in content_type_args:
129
+ try:
130
+ exec(arg_source, local_namespace)
131
+ print("Successfully defined content type argument")
132
+ except Exception as e:
133
+ print(f"Error defining content type argument: {e}")
134
+ traceback.print_exc()
135
+ except Exception as e:
136
+ print(f"Error defining content type: {e}")
137
+ traceback.print_exc()
138
+
139
+ # Define input model if different from stream message
140
+ if input_model_source and (
141
+ not is_stream_message or input_model_source != stream_message_source
142
+ ):
143
+ try:
144
+ exec(input_model_source, local_namespace)
145
+ print(f"Successfully defined input model {param_type_name}")
146
+
147
+ # Define any input model args
148
+ for arg_source in input_model_args:
149
+ try:
150
+ exec(arg_source, local_namespace)
151
+ print("Successfully defined input model argument")
152
+ except Exception as e:
153
+ print(f"Error defining input model argument: {e}")
154
+ traceback.print_exc()
155
+ except Exception as e:
156
+ print(f"Error defining input model: {e}")
157
+ traceback.print_exc()
158
+
159
+ # Define output model
160
+ if output_model_source:
161
+ try:
162
+ exec(output_model_source, local_namespace)
163
+ print(f"Successfully defined output model {return_type_name}")
164
+
165
+ # Define any output model args
166
+ for arg_source in output_model_args:
167
+ try:
168
+ exec(arg_source, local_namespace)
169
+ print("Successfully defined output model argument")
170
+ except Exception as e:
171
+ print(f"Error defining output model argument: {e}")
172
+ traceback.print_exc()
173
+ except Exception as e:
174
+ print(f"Error defining output model: {e}")
175
+ traceback.print_exc()
176
+
177
+ # Execute included object sources
178
+ for i, (obj_source, args_sources) in enumerate(included_object_sources):
179
+ try:
180
+ exec(obj_source, local_namespace)
181
+ print(f"Successfully executed included object {i} base source")
182
+ for j, arg_source in enumerate(args_sources):
183
+ try:
184
+ exec(arg_source, local_namespace)
185
+ print(f"Successfully executed included object {i} arg {j} source")
186
+ except Exception as e:
187
+ print(f"Error executing included object {i} arg {j} source: {e}")
188
+ traceback.print_exc()
189
+ except Exception as e:
190
+ print(f"Error executing included object {i} base source: {e}")
191
+ traceback.print_exc()
192
+
193
+ # Finally, execute the function code
194
+ try:
195
+ exec(function_source, local_namespace)
196
+ target_function = local_namespace[function_name]
197
+ print(f"Successfully loaded function {function_name}")
198
+ except Exception as e:
199
+ print(f"Error creating function from source: {e}")
200
+ traceback.print_exc()
201
+ sys.exit(1)
202
+
203
+ except Exception as e:
204
+ print(f"Error setting up function: {e}")
205
+ traceback.print_exc()
206
+ sys.exit(1)
207
+
208
+ # Get Redis connection parameters from environment
209
+ REDIS_URL = os.environ.get("REDIS_URL", "")
210
+ REDIS_CONSUMER_GROUP = os.environ.get("REDIS_CONSUMER_GROUP")
211
+ REDIS_STREAM = os.environ.get("REDIS_STREAM")
212
+
213
+ if not all([REDIS_URL, REDIS_CONSUMER_GROUP, REDIS_STREAM]):
214
+ print("Missing required Redis environment variables")
215
+ sys.exit(1)
216
+
217
+ # Connect to Redis
218
+ try:
219
+ r = redis.from_url(REDIS_URL)
220
+ redis_info = REDIS_URL.split("@")[-1] if "@" in REDIS_URL else REDIS_URL
221
+ print(f"Connected to Redis at {redis_info}")
222
+ except Exception as e:
223
+ print(f"Failed to connect to Redis: {e}")
224
+ traceback.print_exc()
225
+ sys.exit(1)
226
+
227
+ # Create consumer group if it doesn't exist
228
+ try:
229
+ r.xgroup_create(REDIS_STREAM, REDIS_CONSUMER_GROUP, id="0", mkstream=True)
230
+ print(f"Created consumer group {REDIS_CONSUMER_GROUP} for stream {REDIS_STREAM}")
231
+ except redis.exceptions.ResponseError as e:
232
+ if "BUSYGROUP" in str(e):
233
+ print(f"Consumer group {REDIS_CONSUMER_GROUP} already exists")
234
+ else:
235
+ print(f"Error creating consumer group: {e}")
236
+ traceback.print_exc()
237
+
238
+
239
+ # Function to process messages
240
+ def process_message(message_id: bytes, message_data: Dict[bytes, bytes]) -> None:
241
+ # Initialize variables that need to be accessible in the except block
242
+ return_stream = None
243
+ user_id = None
244
+
245
+ try:
246
+ # Get the message content from field 'data'
247
+ if b"data" not in message_data:
248
+ print(f"Message {message_id} has no 'data' field")
249
+ return
250
+
251
+ # Parse the message data
252
+ raw_payload = json.loads(message_data[b"data"].decode("utf-8"))
253
+
254
+ # Extract fields from the Rust structure
255
+ # These fields are extracted for completeness and potential future use
256
+ _ = raw_payload.get("kind", "") # kind
257
+ msg_id = raw_payload.get("id", "") # msg_id
258
+ content_raw = raw_payload.get("content", {})
259
+ created_at = raw_payload.get("created_at", 0) # created_at
260
+ return_stream = raw_payload.get("return_stream")
261
+ user_id = raw_payload.get("user_id")
262
+ orgs = raw_payload.get("organizations") # organizations
263
+ handle = raw_payload.get("handle") # handle
264
+ adapter = raw_payload.get("adapter") # adapter
265
+
266
+ # Parse the content field if it's a string
267
+ if isinstance(content_raw, str):
268
+ try:
269
+ content = json.loads(content_raw)
270
+ except json.JSONDecodeError:
271
+ content = content_raw
272
+ else:
273
+ content = content_raw
274
+
275
+ # For StreamMessage, construct the proper input object
276
+ if is_stream_message and "V1StreamMessage" in local_namespace:
277
+ # If we have a content type, try to construct it
278
+ if content_type_name and content_type_name in local_namespace:
279
+ # Try to create the content type model first
280
+ try:
281
+ content_model = local_namespace[content_type_name](**content)
282
+ input_obj = local_namespace["V1StreamMessage"](
283
+ kind=_,
284
+ id=msg_id,
285
+ content=content_model,
286
+ created_at=created_at,
287
+ return_stream=return_stream,
288
+ user_id=user_id,
289
+ orgs=orgs,
290
+ handle=handle,
291
+ adapter=adapter,
292
+ )
293
+ except Exception as e:
294
+ print(f"Error creating content type model: {e}")
295
+ # Fallback to using raw content
296
+ input_obj = local_namespace["V1StreamMessage"](
297
+ kind=_,
298
+ id=msg_id,
299
+ content=content,
300
+ created_at=created_at,
301
+ return_stream=return_stream,
302
+ user_id=user_id,
303
+ orgs=orgs,
304
+ handle=handle,
305
+ adapter=adapter,
306
+ )
307
+ else:
308
+ # Just use the raw content
309
+ input_obj = local_namespace["V1StreamMessage"](
310
+ kind=_,
311
+ id=msg_id,
312
+ content=content,
313
+ created_at=created_at,
314
+ return_stream=return_stream,
315
+ user_id=user_id,
316
+ orgs=orgs,
317
+ handle=handle,
318
+ adapter=adapter,
319
+ )
320
+ else:
321
+ # Otherwise use the param type directly
322
+ try:
323
+ if param_type_name in local_namespace:
324
+ input_obj = local_namespace[param_type_name](**content)
325
+ else:
326
+ # If we can't find the exact type, just pass the content directly
327
+ input_obj = content
328
+ except Exception as e:
329
+ print(f"Error creating input model: {e}, using raw content")
330
+ input_obj = content
331
+
332
+ # Execute the function
333
+ result = target_function(input_obj)
334
+
335
+ # If the result is a Pydantic model, convert to dict
336
+ if hasattr(result, "model_dump"):
337
+ result = result.model_dump()
338
+
339
+ # Prepare the response
340
+ response = {
341
+ "kind": "StreamResponseMessage",
342
+ "id": message_id.decode("utf-8"),
343
+ "content": result,
344
+ "status": "success",
345
+ "created_at": datetime.now().isoformat(),
346
+ "user_id": user_id,
347
+ }
348
+
349
+ # Send the result to the return stream
350
+ if return_stream:
351
+ r.xadd(return_stream, {"data": json.dumps(response)})
352
+ print(
353
+ f"Processed message {message_id.decode('utf-8')}, result sent to {return_stream}"
354
+ )
355
+
356
+ # Acknowledge the message
357
+ r.xack(REDIS_STREAM, REDIS_CONSUMER_GROUP, message_id)
358
+
359
+ except Exception as e:
360
+ print(f"Error processing message {message_id.decode('utf-8')}: {e}")
361
+ traceback.print_exc()
362
+
363
+ # Prepare the error response
364
+ error_response = {
365
+ "kind": "StreamResponseMessage",
366
+ "id": message_id.decode("utf-8"),
367
+ "content": {
368
+ "error": str(e),
369
+ "traceback": traceback.format_exc(),
370
+ },
371
+ "status": "error",
372
+ "created_at": datetime.now().isoformat(),
373
+ "user_id": user_id,
374
+ }
375
+
376
+ # Send the error to the return stream
377
+ if return_stream:
378
+ r.xadd(return_stream, {"data": json.dumps(error_response)})
379
+ else:
380
+ r.xadd(f"{REDIS_STREAM}.errors", {"data": json.dumps(error_response)})
381
+
382
+ # Still acknowledge the message so we don't reprocess it
383
+ r.xack(REDIS_STREAM, REDIS_CONSUMER_GROUP, message_id)
384
+
385
+
386
+ # Main loop
387
+ print(f"Starting consumer for stream {REDIS_STREAM} in group {REDIS_CONSUMER_GROUP}")
388
+ consumer_name = f"consumer-{os.getpid()}"
389
+
390
+ while True:
391
+ try:
392
+ # Read from stream with blocking
393
+ streams = {REDIS_STREAM: ">"} # '>' means read only new messages
394
+ messages = r.xreadgroup(
395
+ REDIS_CONSUMER_GROUP, consumer_name, streams, count=1, block=5000
396
+ )
397
+
398
+ if not messages:
399
+ # No messages received, continue waiting
400
+ continue
401
+
402
+ stream_name, stream_messages = messages[0]
403
+
404
+ for message_id, message_data in stream_messages:
405
+ process_message(message_id, message_data)
406
+
407
+ except redis.exceptions.ConnectionError as e:
408
+ print(f"Redis connection error: {e}")
409
+ time.sleep(5) # Wait before retrying
410
+
411
+ except Exception as e:
412
+ print(f"Unexpected error: {e}")
413
+ traceback.print_exc()
414
+ time.sleep(1) # Brief pause before continuing
@@ -0,0 +1,360 @@
1
+ import inspect
2
+ import textwrap
3
+ from typing import Any, Callable, Dict, List, Optional, TypeVar, get_type_hints
4
+
5
+ from pydantic import BaseModel
6
+
7
+ from nebu.containers.models import (
8
+ V1AuthzConfig,
9
+ V1ContainerRequest,
10
+ V1ContainerResources,
11
+ V1EnvVar,
12
+ V1Meter,
13
+ V1VolumePath,
14
+ )
15
+ from nebu.meta import V1ResourceMetaRequest
16
+ from nebu.processors.models import (
17
+ V1Scale,
18
+ V1StreamMessage,
19
+ )
20
+ from nebu.processors.processor import Processor
21
+
22
+ from .default import DEFAULT_MAX_REPLICAS, DEFAULT_MIN_REPLICAS, DEFAULT_SCALE
23
+
24
+ T = TypeVar("T", bound=BaseModel)
25
+ R = TypeVar("R", bound=BaseModel)
26
+
27
+
28
+ def get_model_source(model_class: Any) -> Optional[str]:
29
+ """Get the source code of a model class."""
30
+ try:
31
+ source = inspect.getsource(model_class)
32
+ return textwrap.dedent(source)
33
+ except (IOError, TypeError):
34
+ return None
35
+
36
+
37
+ def get_type_source(type_obj: Any) -> Optional[Any]:
38
+ """Get the source code for a type, including generic parameters."""
39
+ # If it's a class, get its source
40
+ if isinstance(type_obj, type):
41
+ return get_model_source(type_obj)
42
+
43
+ # If it's a GenericAlias (like V1StreamMessage[SomeType])
44
+ if hasattr(type_obj, "__origin__") and hasattr(type_obj, "__args__"):
45
+ origin_source = get_model_source(type_obj.__origin__)
46
+ args_sources = []
47
+
48
+ # Get sources for all type arguments
49
+ for arg in type_obj.__args__:
50
+ arg_source = get_type_source(arg)
51
+ if arg_source:
52
+ args_sources.append(arg_source)
53
+
54
+ return origin_source, args_sources
55
+
56
+ return None
57
+
58
+
59
+ def processor(
60
+ image: str,
61
+ setup_script: Optional[str] = None,
62
+ scale: V1Scale = DEFAULT_SCALE,
63
+ min_replicas: int = DEFAULT_MIN_REPLICAS,
64
+ max_replicas: int = DEFAULT_MAX_REPLICAS,
65
+ platform: Optional[str] = None,
66
+ accelerators: Optional[List[str]] = None,
67
+ namespace: Optional[str] = None,
68
+ labels: Optional[Dict[str, str]] = None,
69
+ env: Optional[List[V1EnvVar]] = None,
70
+ volumes: Optional[List[V1VolumePath]] = None,
71
+ resources: Optional[V1ContainerResources] = None,
72
+ meters: Optional[List[V1Meter]] = None,
73
+ authz: Optional[V1AuthzConfig] = None,
74
+ python_cmd: str = "python",
75
+ no_delete: bool = False,
76
+ include: Optional[List[Any]] = None,
77
+ ):
78
+ """
79
+ Decorator that converts a function into a Processor.
80
+
81
+ Args:
82
+ image: The container image to use for the processor
83
+ setup_script: Optional setup script to run before starting the processor
84
+ scale: Optional scaling configuration
85
+ min_replicas: Minimum number of replicas to maintain
86
+ max_replicas: Maximum number of replicas to scale to
87
+ platform: Optional compute platform to run on
88
+ accelerators: Optional list of accelerator types
89
+ namespace: Optional namespace for the processor
90
+ labels: Optional labels to apply to the processor
91
+ env: Optional environment variables
92
+ volumes: Optional volume mounts
93
+ resources: Optional resource requirements
94
+ meters: Optional metering configuration
95
+ authz: Optional authorization configuration
96
+ python_cmd: Optional python command to use
97
+ no_delete: Whether to prevent deleting the processor on updates
98
+ include: Optional list of Python objects whose source code should be included
99
+ """
100
+
101
+ def decorator(func: Callable[[T], R]) -> Processor:
102
+ # Validate that the function takes a single parameter that is a BaseModel
103
+ sig = inspect.signature(func)
104
+ params = list(sig.parameters.values())
105
+
106
+ if len(params) != 1:
107
+ raise TypeError(f"Function {func.__name__} must take exactly one parameter")
108
+
109
+ # Check parameter type
110
+ type_hints = get_type_hints(func)
111
+ param_name = params[0].name
112
+ if param_name not in type_hints:
113
+ raise TypeError(
114
+ f"Parameter {param_name} in function {func.__name__} must have a type annotation"
115
+ )
116
+
117
+ param_type = type_hints[param_name]
118
+
119
+ # Check if input type is V1StreamMessage or a subclass
120
+ is_stream_message = False
121
+ content_type = None
122
+
123
+ # Handle generic V1StreamMessage
124
+ if (
125
+ hasattr(param_type, "__origin__")
126
+ and param_type.__origin__ == V1StreamMessage
127
+ ):
128
+ is_stream_message = True
129
+ # Extract the content type from V1StreamMessage[ContentType]
130
+ if hasattr(param_type, "__args__") and param_type.__args__:
131
+ content_type = param_type.__args__[0]
132
+ # Handle direct V1StreamMessage
133
+ elif param_type is V1StreamMessage:
134
+ is_stream_message = True
135
+
136
+ # Ensure the parameter is a BaseModel
137
+ actual_type = (
138
+ param_type.__origin__ if hasattr(param_type, "__origin__") else param_type # type: ignore
139
+ )
140
+ if not issubclass(actual_type, BaseModel):
141
+ raise TypeError(
142
+ f"Parameter {param_name} in function {func.__name__} must be a BaseModel"
143
+ )
144
+
145
+ # Check return type
146
+ if "return" not in type_hints:
147
+ raise TypeError(
148
+ f"Function {func.__name__} must have a return type annotation"
149
+ )
150
+
151
+ return_type = type_hints["return"]
152
+ actual_return_type = (
153
+ return_type.__origin__
154
+ if hasattr(return_type, "__origin__")
155
+ else return_type
156
+ )
157
+ if not issubclass(actual_return_type, BaseModel):
158
+ raise TypeError(
159
+ f"Return value of function {func.__name__} must be a BaseModel"
160
+ )
161
+
162
+ # Get function name to use as processor name
163
+ processor_name = func.__name__
164
+
165
+ # Prepare environment variables
166
+ all_env = env or []
167
+
168
+ # Get the source code of the function
169
+ try:
170
+ function_source = inspect.getsource(func)
171
+ # Clean up the indentation
172
+ function_source = textwrap.dedent(function_source)
173
+ except (IOError, TypeError):
174
+ raise ValueError(
175
+ f"Could not retrieve source code for function {func.__name__}"
176
+ )
177
+
178
+ # Get source code for the models
179
+ input_model_source = None
180
+ output_model_source = None
181
+ content_type_source = None
182
+
183
+ # Get the V1StreamMessage class source
184
+ stream_message_source = get_model_source(V1StreamMessage)
185
+
186
+ # Get input model source
187
+ if is_stream_message:
188
+ input_model_source = stream_message_source
189
+ if content_type:
190
+ content_type_source = get_type_source(content_type)
191
+ else:
192
+ input_model_source = get_type_source(param_type)
193
+
194
+ # Get output model source
195
+ output_model_source = get_type_source(return_type)
196
+
197
+ # Add function source code to environment variables
198
+ all_env.append(V1EnvVar(key="FUNCTION_SOURCE", value=function_source))
199
+ all_env.append(V1EnvVar(key="FUNCTION_NAME", value=func.__name__))
200
+
201
+ # Add model source codes
202
+ if input_model_source:
203
+ if isinstance(input_model_source, tuple):
204
+ all_env.append(
205
+ V1EnvVar(key="INPUT_MODEL_SOURCE", value=input_model_source[0])
206
+ )
207
+ # Add generic args sources
208
+ for i, arg_source in enumerate(input_model_source[1]):
209
+ all_env.append(
210
+ V1EnvVar(key=f"INPUT_MODEL_ARG_{i}_SOURCE", value=arg_source)
211
+ )
212
+ else:
213
+ all_env.append(
214
+ V1EnvVar(key="INPUT_MODEL_SOURCE", value=input_model_source)
215
+ )
216
+
217
+ if output_model_source:
218
+ if isinstance(output_model_source, tuple):
219
+ all_env.append(
220
+ V1EnvVar(key="OUTPUT_MODEL_SOURCE", value=output_model_source[0])
221
+ )
222
+ # Add generic args sources
223
+ for i, arg_source in enumerate(output_model_source[1]):
224
+ all_env.append(
225
+ V1EnvVar(key=f"OUTPUT_MODEL_ARG_{i}_SOURCE", value=arg_source)
226
+ )
227
+ else:
228
+ all_env.append(
229
+ V1EnvVar(key="OUTPUT_MODEL_SOURCE", value=output_model_source)
230
+ )
231
+
232
+ if stream_message_source:
233
+ all_env.append(
234
+ V1EnvVar(key="STREAM_MESSAGE_SOURCE", value=stream_message_source)
235
+ )
236
+
237
+ if content_type_source:
238
+ if isinstance(content_type_source, tuple):
239
+ all_env.append(
240
+ V1EnvVar(key="CONTENT_TYPE_SOURCE", value=content_type_source[0])
241
+ )
242
+ # Add generic args sources for content type
243
+ for i, arg_source in enumerate(content_type_source[1]):
244
+ all_env.append(
245
+ V1EnvVar(key=f"CONTENT_TYPE_ARG_{i}_SOURCE", value=arg_source)
246
+ )
247
+ else:
248
+ all_env.append(
249
+ V1EnvVar(key="CONTENT_TYPE_SOURCE", value=content_type_source)
250
+ )
251
+
252
+ # Add included object sources
253
+ if include:
254
+ for i, obj in enumerate(include):
255
+ obj_source = get_type_source(
256
+ obj
257
+ ) # Reuse existing function for source retrieval
258
+ if obj_source:
259
+ if isinstance(obj_source, tuple):
260
+ # Handle complex types (like generics) if needed, similar to models
261
+ all_env.append(
262
+ V1EnvVar(
263
+ key=f"INCLUDED_OBJECT_{i}_SOURCE", value=obj_source[0]
264
+ )
265
+ )
266
+ for j, arg_source in enumerate(obj_source[1]):
267
+ all_env.append(
268
+ V1EnvVar(
269
+ key=f"INCLUDED_OBJECT_{i}_ARG_{j}_SOURCE",
270
+ value=arg_source,
271
+ )
272
+ )
273
+ else:
274
+ all_env.append(
275
+ V1EnvVar(
276
+ key=f"INCLUDED_OBJECT_{i}_SOURCE", value=obj_source
277
+ )
278
+ )
279
+ else:
280
+ # Optionally raise an error or log a warning if source can't be found
281
+ print(
282
+ f"Warning: Could not retrieve source for included object: {obj}"
283
+ )
284
+
285
+ # Add parameter and return type info for runtime validation
286
+ all_env.append(
287
+ V1EnvVar(
288
+ key="PARAM_TYPE_NAME",
289
+ value=param_type.__name__
290
+ if hasattr(param_type, "__name__")
291
+ else str(param_type),
292
+ )
293
+ )
294
+ all_env.append(
295
+ V1EnvVar(
296
+ key="RETURN_TYPE_NAME",
297
+ value=return_type.__name__
298
+ if hasattr(return_type, "__name__")
299
+ else str(return_type),
300
+ )
301
+ )
302
+ all_env.append(V1EnvVar(key="IS_STREAM_MESSAGE", value=str(is_stream_message)))
303
+
304
+ if content_type:
305
+ all_env.append(
306
+ V1EnvVar(
307
+ key="CONTENT_TYPE_NAME",
308
+ value=content_type.__name__
309
+ if hasattr(content_type, "__name__")
310
+ else str(content_type),
311
+ )
312
+ )
313
+
314
+ # We still add the module for reference, but we won't rely on importing it
315
+ all_env.append(V1EnvVar(key="MODULE_NAME", value=func.__module__))
316
+
317
+ # Prepare metadata
318
+ metadata = V1ResourceMetaRequest(
319
+ name=processor_name, namespace=namespace, labels=labels
320
+ )
321
+
322
+ # Create the command to run the consumer directly
323
+ consumer_command = f"{python_cmd} -m nebu.processors.consumer"
324
+
325
+ final_command = f"{python_cmd} -m pip install redis nebu\n\n{setup_script}\n\n{consumer_command}"
326
+
327
+ # Create the V1ContainerRequest
328
+ container_request = V1ContainerRequest(
329
+ image=image,
330
+ command=final_command,
331
+ env=all_env,
332
+ volumes=volumes,
333
+ accelerators=accelerators,
334
+ resources=resources,
335
+ meters=meters,
336
+ restart="Always",
337
+ authz=authz,
338
+ platform=platform,
339
+ metadata=metadata,
340
+ )
341
+ print("container_request", container_request)
342
+
343
+ # Create the processor instance
344
+ processor_instance = Processor(
345
+ name=processor_name,
346
+ stream=processor_name,
347
+ namespace=namespace,
348
+ labels=labels,
349
+ container=container_request,
350
+ schema_=None, # TODO
351
+ common_schema=None,
352
+ min_replicas=min_replicas,
353
+ max_replicas=max_replicas,
354
+ scale_config=scale,
355
+ no_delete=no_delete,
356
+ )
357
+
358
+ return processor_instance
359
+
360
+ return decorator
@@ -0,0 +1,18 @@
1
+ from nebu.processors.models import V1Scale, V1ScaleDown, V1ScaleUp, V1ScaleZero
2
+
3
+ DEFAULT_SCALE = V1Scale(
4
+ up=V1ScaleUp(
5
+ above_pressure=30,
6
+ duration="1m",
7
+ ),
8
+ down=V1ScaleDown(
9
+ below_pressure=2,
10
+ duration="1m",
11
+ ),
12
+ zero=V1ScaleZero(
13
+ duration="5m",
14
+ ),
15
+ )
16
+
17
+ DEFAULT_MIN_REPLICAS = 1
18
+ DEFAULT_MAX_REPLICAS = 10
nebu/processors/models.py CHANGED
@@ -1,16 +1,13 @@
1
- from typing import Any, Optional
1
+ from typing import Any, Generic, List, Optional, TypeVar
2
2
 
3
- from pydantic import BaseModel, Field
3
+ from pydantic import BaseModel
4
4
 
5
- from nebu.containers.models import V1Container
6
- from nebu.meta import V1ResourceMeta, V1ResourceMetaRequest
5
+ # Assuming these are imported from other modules
6
+ from nebu.containers.models import V1ContainerRequest
7
+ from nebu.meta import V1ResourceMeta, V1ResourceMetaRequest, V1ResourceReference
7
8
 
8
- # If these are in another module, import them as:
9
- # from .containers import V1Container, V1ResourceMeta, V1ResourceMetaRequest
10
- # For demonstration, simply assume they're available in scope:
11
- # class V1Container(BaseModel): ...
12
- # class V1ResourceMeta(BaseModel): ...
13
- # class V1ResourceMetaRequest(BaseModel): ...
9
+ # Type variable for content that must be a BaseModel
10
+ T = TypeVar("T", bound=BaseModel)
14
11
 
15
12
 
16
13
  class V1ProcessorStatus(BaseModel):
@@ -39,29 +36,100 @@ class V1Scale(BaseModel):
39
36
  zero: Optional[V1ScaleZero] = None
40
37
 
41
38
 
42
- DEFAULT_PROCESSOR_KIND = "Processor"
43
-
44
-
45
39
  class V1Processor(BaseModel):
46
- kind: str = Field(default=DEFAULT_PROCESSOR_KIND)
40
+ kind: str = "Processor"
47
41
  metadata: V1ResourceMeta
48
- container: Optional["V1Container"] = None
49
- stream: Optional[str] = None
50
- schema_: Optional[Any] = None # Or Dict[str, Any], if you know the schema format
42
+ container: Optional[V1ContainerRequest] = None
43
+ stream: str
44
+ schema_: Optional[Any] = None
51
45
  common_schema: Optional[str] = None
52
46
  min_replicas: Optional[int] = None
53
47
  max_replicas: Optional[int] = None
54
48
  scale: Optional[V1Scale] = None
55
49
  status: Optional[V1ProcessorStatus] = None
56
50
 
51
+ def to_resource_reference(self) -> V1ResourceReference:
52
+ return V1ResourceReference(
53
+ kind=self.kind,
54
+ name=self.metadata.name,
55
+ namespace=self.metadata.namespace,
56
+ )
57
+
57
58
 
58
59
  class V1ProcessorRequest(BaseModel):
59
- kind: str = Field(default=DEFAULT_PROCESSOR_KIND)
60
+ kind: str = "Processor"
60
61
  metadata: V1ResourceMetaRequest
61
- container: Optional["V1Container"] = None
62
- stream: Optional[str] = None
62
+ container: Optional[V1ContainerRequest] = None
63
63
  schema_: Optional[Any] = None
64
64
  common_schema: Optional[str] = None
65
65
  min_replicas: Optional[int] = None
66
66
  max_replicas: Optional[int] = None
67
67
  scale: Optional[V1Scale] = None
68
+
69
+
70
+ class V1Processors(BaseModel):
71
+ processors: List[V1Processor] = []
72
+
73
+
74
+ class V1ProcessorScaleRequest(BaseModel):
75
+ replicas: Optional[int] = None
76
+ min_replicas: Optional[int] = None
77
+
78
+
79
+ class V1UpdateProcessor(BaseModel):
80
+ kind: Optional[str] = None
81
+ metadata: Optional[V1ResourceMetaRequest] = None
82
+ container: Optional[V1ContainerRequest] = None
83
+ stream: Optional[str] = None
84
+ min_replicas: Optional[int] = None
85
+ max_replicas: Optional[int] = None
86
+ scale: Optional[V1Scale] = None
87
+ schema_: Optional[Any] = None
88
+ common_schema: Optional[str] = None
89
+ no_delete: Optional[bool] = None
90
+
91
+
92
+ class V1StreamData(BaseModel):
93
+ content: Any = None
94
+ wait: Optional[bool] = None
95
+
96
+
97
+ class V1StreamMessage(Generic[T], BaseModel):
98
+ kind: str = "StreamMessage"
99
+ id: str
100
+ content: Optional[T] = None
101
+ created_at: int
102
+ return_stream: Optional[str] = None
103
+ user_id: Optional[str] = None
104
+ orgs: Optional[Any] = None
105
+ handle: Optional[str] = None
106
+ adapter: Optional[str] = None
107
+
108
+
109
+ class V1StreamResponseMessage(BaseModel):
110
+ kind: str = "StreamResponseMessage"
111
+ id: str
112
+ content: Any = None
113
+ status: Optional[str] = None
114
+ created_at: int
115
+ user_id: Optional[str] = None
116
+
117
+
118
+ class V1OpenAIStreamMessage(BaseModel):
119
+ kind: str = "OpenAIStreamMessage"
120
+ id: str
121
+ content: Any # Using Any for ChatCompletionRequest
122
+ created_at: int
123
+ return_stream: Optional[str] = None
124
+ user_id: Optional[str] = None
125
+ orgs: Optional[Any] = None
126
+ handle: Optional[str] = None
127
+ adapter: Optional[str] = None
128
+
129
+
130
+ class V1OpenAIStreamResponse(BaseModel):
131
+ kind: str = "OpenAIStreamResponse"
132
+ id: str
133
+ content: Any # Using Any for ChatCompletionResponse
134
+ created_at: int
135
+ user_id: Optional[str] = None
@@ -0,0 +1,277 @@
1
+ from typing import Any, Dict, List, Optional
2
+
3
+ import requests
4
+
5
+ from nebu.auth import get_user_profile
6
+ from nebu.config import GlobalConfig
7
+ from nebu.meta import V1ResourceMetaRequest
8
+ from nebu.processors.models import (
9
+ V1ContainerRequest,
10
+ V1Processor,
11
+ V1ProcessorRequest,
12
+ V1Processors,
13
+ V1ProcessorScaleRequest,
14
+ V1Scale,
15
+ V1UpdateProcessor,
16
+ )
17
+
18
+
19
+ class Processor:
20
+ """
21
+ A class for managing Processor instances.
22
+ """
23
+
24
+ def __init__(
25
+ self,
26
+ name: str,
27
+ stream: str,
28
+ namespace: Optional[str] = None,
29
+ labels: Optional[Dict[str, str]] = None,
30
+ container: Optional[V1ContainerRequest] = None,
31
+ schema_: Optional[Any] = None,
32
+ common_schema: Optional[str] = None,
33
+ min_replicas: Optional[int] = None,
34
+ max_replicas: Optional[int] = None,
35
+ scale_config: Optional[V1Scale] = None,
36
+ config: Optional[GlobalConfig] = None,
37
+ no_delete: bool = False,
38
+ ):
39
+ self.config = config or GlobalConfig.read()
40
+ if not self.config:
41
+ raise ValueError("No config found")
42
+ current_server = self.config.get_current_server_config()
43
+ if not current_server:
44
+ raise ValueError("No server config found")
45
+ self.current_server = current_server
46
+ self.api_key = current_server.api_key
47
+ self.orign_host = current_server.server
48
+ self.name = name
49
+ self.namespace = namespace
50
+ self.labels = labels
51
+ self.stream = stream
52
+ self.container = container
53
+ self.schema_ = schema_
54
+ self.common_schema = common_schema
55
+ self.min_replicas = min_replicas
56
+ self.max_replicas = max_replicas
57
+ self.scale_config = scale_config
58
+ self.processors_url = f"{self.orign_host}/v1/processors"
59
+
60
+ # Fetch existing Processors
61
+ response = requests.get(
62
+ self.processors_url, headers={"Authorization": f"Bearer {self.api_key}"}
63
+ )
64
+ response.raise_for_status()
65
+
66
+ if not namespace:
67
+ if not self.api_key:
68
+ raise ValueError("No API key provided")
69
+
70
+ user_profile = get_user_profile(self.api_key)
71
+ namespace = user_profile.handle
72
+
73
+ if not namespace:
74
+ namespace = user_profile.email.replace("@", "-").replace(".", "-")
75
+
76
+ print(f"Using namespace: {namespace}")
77
+
78
+ existing_processors = V1Processors.model_validate(response.json())
79
+ print(f"Existing processors: {existing_processors}")
80
+ self.processor: Optional[V1Processor] = next(
81
+ (
82
+ processor_val
83
+ for processor_val in existing_processors.processors
84
+ if processor_val.metadata.name == name
85
+ and processor_val.metadata.namespace == namespace
86
+ ),
87
+ None,
88
+ )
89
+ print(f"Processor: {self.processor}")
90
+
91
+ # If not found, create
92
+ if not self.processor:
93
+ print("Creating processor")
94
+ # Create metadata and processor request
95
+ metadata = V1ResourceMetaRequest(
96
+ name=name, namespace=namespace, labels=labels
97
+ )
98
+
99
+ processor_request = V1ProcessorRequest(
100
+ metadata=metadata,
101
+ container=container,
102
+ schema_=schema_,
103
+ common_schema=common_schema,
104
+ min_replicas=min_replicas,
105
+ max_replicas=max_replicas,
106
+ scale=scale_config,
107
+ )
108
+
109
+ print("Request:")
110
+ print(processor_request.model_dump(exclude_none=True))
111
+ create_response = requests.post(
112
+ self.processors_url,
113
+ json=processor_request.model_dump(exclude_none=True),
114
+ headers={"Authorization": f"Bearer {self.api_key}"},
115
+ )
116
+ create_response.raise_for_status()
117
+ self.processor = V1Processor.model_validate(create_response.json())
118
+ print(f"Created Processor {self.processor.metadata.name}")
119
+ else:
120
+ # Else, update
121
+ print(
122
+ f"Found Processor {self.processor.metadata.name}, updating if necessary"
123
+ )
124
+
125
+ update_processor = V1UpdateProcessor(
126
+ stream=stream,
127
+ container=container,
128
+ schema_=schema_,
129
+ common_schema=common_schema,
130
+ min_replicas=min_replicas,
131
+ max_replicas=max_replicas,
132
+ scale=scale_config,
133
+ no_delete=no_delete,
134
+ )
135
+
136
+ print("Update request:")
137
+ print(update_processor.model_dump(exclude_none=True))
138
+ patch_response = requests.patch(
139
+ f"{self.processors_url}/{self.processor.metadata.namespace}/{self.processor.metadata.name}",
140
+ json=update_processor.model_dump(exclude_none=True),
141
+ headers={"Authorization": f"Bearer {self.api_key}"},
142
+ )
143
+ patch_response.raise_for_status()
144
+ print(f"Updated Processor {self.processor.metadata.name}")
145
+
146
+ def send(self, data: Dict[str, Any]) -> Dict[str, Any]:
147
+ """
148
+ Send data to the processor.
149
+ """
150
+ if not self.processor or not self.processor.metadata.name:
151
+ raise ValueError("Processor not found")
152
+
153
+ url = f"{self.processors_url}/{self.processor.metadata.namespace}/{self.processor.metadata.name}/send"
154
+
155
+ response = requests.get(
156
+ url,
157
+ params=data,
158
+ headers={"Authorization": f"Bearer {self.api_key}"},
159
+ )
160
+ response.raise_for_status()
161
+ return response.json()
162
+
163
+ def scale(self, replicas: int) -> Dict[str, Any]:
164
+ """
165
+ Scale the processor.
166
+ """
167
+ if not self.processor or not self.processor.metadata.name:
168
+ raise ValueError("Processor not found")
169
+
170
+ url = f"{self.processors_url}/{self.processor.metadata.namespace}/{self.processor.metadata.name}/scale"
171
+ scale_request = V1ProcessorScaleRequest(replicas=replicas)
172
+
173
+ response = requests.post(
174
+ url,
175
+ json=scale_request.model_dump(exclude_none=True),
176
+ headers={"Authorization": f"Bearer {self.api_key}"},
177
+ )
178
+ response.raise_for_status()
179
+ return response.json()
180
+
181
+ @classmethod
182
+ def load(
183
+ cls,
184
+ name: str,
185
+ namespace: Optional[str] = None,
186
+ config: Optional[GlobalConfig] = None,
187
+ ):
188
+ """
189
+ Get a Processor from the remote server.
190
+ """
191
+ processors = cls.get(namespace=namespace, name=name, config=config)
192
+ if not processors:
193
+ raise ValueError("Processor not found")
194
+ processor_v1 = processors[0]
195
+
196
+ out = cls.__new__(cls)
197
+ out.processor = processor_v1
198
+ out.config = config or GlobalConfig.read()
199
+ if not out.config:
200
+ raise ValueError("No config found")
201
+ out.current_server = out.config.get_current_server_config()
202
+ if not out.current_server:
203
+ raise ValueError("No server config found")
204
+ out.api_key = out.current_server.api_key
205
+ out.orign_host = out.current_server.server
206
+ out.processors_url = f"{out.orign_host}/v1/processors"
207
+ out.name = name
208
+ out.namespace = namespace
209
+
210
+ # Set specific fields from the processor
211
+ out.stream = processor_v1.stream
212
+ out.container = processor_v1.container
213
+ out.schema_ = processor_v1.schema_
214
+ out.common_schema = processor_v1.common_schema
215
+ out.min_replicas = processor_v1.min_replicas
216
+ out.max_replicas = processor_v1.max_replicas
217
+ out.scale_config = processor_v1.scale
218
+
219
+ return out
220
+
221
+ @classmethod
222
+ def get(
223
+ cls,
224
+ name: Optional[str] = None,
225
+ namespace: Optional[str] = None,
226
+ config: Optional[GlobalConfig] = None,
227
+ ) -> List[V1Processor]:
228
+ """
229
+ Get a list of Processors that match the optional name and/or namespace filters.
230
+ """
231
+ config = config or GlobalConfig.read()
232
+ if not config:
233
+ raise ValueError("No config found")
234
+ current_server = config.get_current_server_config()
235
+ if not current_server:
236
+ raise ValueError("No server config found")
237
+ processors_url = f"{current_server.server}/v1/processors"
238
+
239
+ response = requests.get(
240
+ processors_url,
241
+ headers={"Authorization": f"Bearer {current_server.api_key}"},
242
+ )
243
+ response.raise_for_status()
244
+
245
+ processors_response = V1Processors.model_validate(response.json())
246
+ filtered_processors = processors_response.processors
247
+
248
+ if name:
249
+ filtered_processors = [
250
+ p for p in filtered_processors if p.metadata.name == name
251
+ ]
252
+ if namespace:
253
+ filtered_processors = [
254
+ p for p in filtered_processors if p.metadata.namespace == namespace
255
+ ]
256
+
257
+ return filtered_processors
258
+
259
+ def delete(self):
260
+ """
261
+ Delete the Processor.
262
+ """
263
+ if not self.processor or not self.processor.metadata.name:
264
+ raise ValueError("Processor not found")
265
+
266
+ url = f"{self.processors_url}/{self.processor.metadata.namespace}/{self.processor.metadata.name}"
267
+ response = requests.delete(
268
+ url, headers={"Authorization": f"Bearer {self.api_key}"}
269
+ )
270
+ response.raise_for_status()
271
+ return
272
+
273
+ def ref(self) -> str:
274
+ """
275
+ Get the resource ref for the processor.
276
+ """
277
+ return f"{self.name}.{self.namespace}.Processor"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: nebu
3
- Version: 0.1.8
3
+ Version: 0.1.10
4
4
  Summary: A globally distributed container runtime
5
5
  Requires-Python: >=3.10.14
6
6
  Description-Content-Type: text/markdown
@@ -0,0 +1,20 @@
1
+ nebu/__init__.py,sha256=EbdC8ZKnRTt6jkX0WN0p1pnaDEzb2InqZ1r8QZWzph0,195
2
+ nebu/auth.py,sha256=rApCd-7_c3GpIb7gjCB79rR7SOcmkG7MmaTE6zMbvr0,1125
3
+ nebu/config.py,sha256=XBY7uKgcJX9d1HGxqqpx87o_9DuF3maUlUnKkcpUrKU,4565
4
+ nebu/meta.py,sha256=CzFHMND9seuewzq9zNNx9WTr6JvrCBExe7BLqDSr7lM,745
5
+ nebu/containers/container.py,sha256=yb7KaPTVXnEEAlrpdlUi4HNqF6P7z9bmwAILGlq6iqU,13502
6
+ nebu/containers/decorator.py,sha256=qiM7hbHne9MhSp1gDgX5z5bimsXr_YPjTIZoe09dwr4,2741
7
+ nebu/containers/models.py,sha256=0j6NGy4yto-enRDh_4JH_ZTbHrLdSpuMOqNQPnIrwC4,6815
8
+ nebu/containers/server.py,sha256=yFa2Y9PzBn59E1HftKiv0iapPonli2rbGAiU6r-wwe0,2513
9
+ nebu/processors/consumer.py,sha256=rFqd6gg2OYgXi3gf11GFpuaOOzuK1TYaPO-t_leSR8Y,15097
10
+ nebu/processors/decorate.py,sha256=8jemT7QvY-2aAJAOiYzjClja-Zso0QgWXZH377uIW4I,13126
11
+ nebu/processors/default.py,sha256=W4slJenG59rvyTlJ7gRp58eFfXcNOTT2Hfi6zzJAobI,365
12
+ nebu/processors/models.py,sha256=JpX5GouqdIdCQWUUNzQh68OsD57_Po2aQreDQ9VMRec,3654
13
+ nebu/processors/processor.py,sha256=oy2YdI-cy6qQWxrZhpZahJV46oWZlu_Im-jm811R_oo,9667
14
+ nebu/redis/models.py,sha256=coPovAcVXnOU1Xh_fpJL4PO3QctgK9nBe5QYoqEcnxg,1230
15
+ nebu/services/service.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
+ nebu-0.1.10.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
17
+ nebu-0.1.10.dist-info/METADATA,sha256=apL1G3eCfv0BncDZL5Sqq4mc2e4yeA2sTmbMgACQOlU,1588
18
+ nebu-0.1.10.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
19
+ nebu-0.1.10.dist-info/top_level.txt,sha256=uLIbEKJeGSHWOAJN5S0i5XBGwybALlF9bYoB1UhdEgQ,5
20
+ nebu-0.1.10.dist-info/RECORD,,
@@ -1,16 +0,0 @@
1
- nebu/__init__.py,sha256=EbdC8ZKnRTt6jkX0WN0p1pnaDEzb2InqZ1r8QZWzph0,195
2
- nebu/config.py,sha256=XBY7uKgcJX9d1HGxqqpx87o_9DuF3maUlUnKkcpUrKU,4565
3
- nebu/meta.py,sha256=CzFHMND9seuewzq9zNNx9WTr6JvrCBExe7BLqDSr7lM,745
4
- nebu/containers/container.py,sha256=yb7KaPTVXnEEAlrpdlUi4HNqF6P7z9bmwAILGlq6iqU,13502
5
- nebu/containers/decorator.py,sha256=qiM7hbHne9MhSp1gDgX5z5bimsXr_YPjTIZoe09dwr4,2741
6
- nebu/containers/models.py,sha256=_d6BS6puoVWvyHhWX-74WFHJSOE8WJaFt2zGMTm9EEA,6782
7
- nebu/containers/server.py,sha256=yFa2Y9PzBn59E1HftKiv0iapPonli2rbGAiU6r-wwe0,2513
8
- nebu/processors/models.py,sha256=6XSw4iM77XYJf6utm8QReN9fyMS0dK40a5sVwsC7RRA,1970
9
- nebu/processors/processor.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
- nebu/redis/models.py,sha256=coPovAcVXnOU1Xh_fpJL4PO3QctgK9nBe5QYoqEcnxg,1230
11
- nebu/services/service.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
12
- nebu-0.1.8.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
13
- nebu-0.1.8.dist-info/METADATA,sha256=MoqFzOItyQ4Knu3jfmjPEadooBFOPGZJTep7T0j1jeI,1587
14
- nebu-0.1.8.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
15
- nebu-0.1.8.dist-info/top_level.txt,sha256=uLIbEKJeGSHWOAJN5S0i5XBGwybALlF9bYoB1UhdEgQ,5
16
- nebu-0.1.8.dist-info/RECORD,,
File without changes