nebu 0.1.114__py3-none-any.whl → 0.1.116__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- nebu/processors/decorate.py +64 -5
- nebu/processors/processor.py +97 -60
- {nebu-0.1.114.dist-info → nebu-0.1.116.dist-info}/METADATA +1 -1
- {nebu-0.1.114.dist-info → nebu-0.1.116.dist-info}/RECORD +7 -7
- {nebu-0.1.114.dist-info → nebu-0.1.116.dist-info}/WHEEL +0 -0
- {nebu-0.1.114.dist-info → nebu-0.1.116.dist-info}/licenses/LICENSE +0 -0
- {nebu-0.1.114.dist-info → nebu-0.1.116.dist-info}/top_level.txt +0 -0
nebu/processors/decorate.py
CHANGED
@@ -802,7 +802,13 @@ def processor(
|
|
802
802
|
)
|
803
803
|
origin = get_origin(param_type) if param_type else None
|
804
804
|
args = get_args(param_type) if param_type else tuple()
|
805
|
-
logger.debug(
|
805
|
+
logger.debug(
|
806
|
+
f"Decorator: For param_type '{param_type_str_repr}': origin = {origin!s}, args = {args!s}"
|
807
|
+
) # More detailed log
|
808
|
+
print(
|
809
|
+
f"Decorator: For param_type '{param_type_str_repr}': origin = {origin!s}, args = {args!s}"
|
810
|
+
) # More detailed log
|
811
|
+
|
806
812
|
is_stream_message = False
|
807
813
|
content_type = None
|
808
814
|
content_type_name_from_regex = None # Store regex result here
|
@@ -1194,6 +1200,52 @@ def processor(
|
|
1194
1200
|
|
1195
1201
|
# --- Final Setup ---
|
1196
1202
|
logger.debug("Decorator: Preparing final Processor object...")
|
1203
|
+
|
1204
|
+
# Determine ResolvedInputType for Processor Generic
|
1205
|
+
ResolvedInputType: type[BaseModel] = (
|
1206
|
+
BaseModel # Default to BaseModel to satisfy generic bound
|
1207
|
+
)
|
1208
|
+
if is_stream_message:
|
1209
|
+
if (
|
1210
|
+
content_type
|
1211
|
+
and isinstance(content_type, type)
|
1212
|
+
and issubclass(content_type, BaseModel)
|
1213
|
+
):
|
1214
|
+
ResolvedInputType = content_type
|
1215
|
+
else:
|
1216
|
+
logger.warning(
|
1217
|
+
f"Decorator: Message type hint found, but ContentType '{content_type!s}' is not a valid Pydantic Model. Defaulting InputType to BaseModel."
|
1218
|
+
)
|
1219
|
+
# ResolvedInputType remains BaseModel (default)
|
1220
|
+
elif (
|
1221
|
+
param_type
|
1222
|
+
and isinstance(param_type, type)
|
1223
|
+
and issubclass(param_type, BaseModel)
|
1224
|
+
):
|
1225
|
+
ResolvedInputType = param_type # Function takes the data model directly
|
1226
|
+
else:
|
1227
|
+
logger.warning(
|
1228
|
+
f"Decorator: Parameter type '{param_type!s}' is not a valid Pydantic Model. Defaulting InputType to BaseModel."
|
1229
|
+
)
|
1230
|
+
# ResolvedInputType remains BaseModel (default)
|
1231
|
+
|
1232
|
+
ResolvedOutputType: type[BaseModel] = BaseModel # Default to BaseModel
|
1233
|
+
if (
|
1234
|
+
return_type
|
1235
|
+
and isinstance(return_type, type)
|
1236
|
+
and issubclass(return_type, BaseModel)
|
1237
|
+
):
|
1238
|
+
ResolvedOutputType = return_type
|
1239
|
+
elif return_type is not None: # It was something, but not a BaseModel subclass
|
1240
|
+
logger.warning(
|
1241
|
+
f"Decorator: Return type '{return_type!s}' is not a valid Pydantic Model. Defaulting OutputType to BaseModel."
|
1242
|
+
)
|
1243
|
+
# Else (return_type is None), ResolvedOutputType remains BaseModel
|
1244
|
+
|
1245
|
+
logger.debug(
|
1246
|
+
f"Decorator: Resolved Generic Types for Processor: InputType={ResolvedInputType.__name__}, OutputType={ResolvedOutputType.__name__}"
|
1247
|
+
)
|
1248
|
+
|
1197
1249
|
metadata = V1ResourceMetaRequest(
|
1198
1250
|
name=processor_name, namespace=effective_namespace, labels=labels
|
1199
1251
|
)
|
@@ -1225,7 +1277,7 @@ def processor(
|
|
1225
1277
|
final_command = "\n".join(all_commands)
|
1226
1278
|
|
1227
1279
|
logger.debug(
|
1228
|
-
f"Decorator: Final container command
|
1280
|
+
f"Decorator: Final container command:\\n-------\\n{final_command}\\n-------"
|
1229
1281
|
)
|
1230
1282
|
|
1231
1283
|
container_request = V1ContainerRequest(
|
@@ -1259,21 +1311,28 @@ def processor(
|
|
1259
1311
|
else:
|
1260
1312
|
logger.debug(f" {env_var.key}: {value_str}")
|
1261
1313
|
|
1262
|
-
|
1314
|
+
# Create the generically typed Processor instance
|
1315
|
+
_processor_instance = Processor[ResolvedInputType, ResolvedOutputType](
|
1263
1316
|
name=processor_name,
|
1264
1317
|
namespace=effective_namespace,
|
1265
1318
|
labels=labels,
|
1266
1319
|
container=container_request,
|
1267
|
-
|
1320
|
+
input_model_cls=ResolvedInputType,
|
1321
|
+
output_model_cls=ResolvedOutputType,
|
1268
1322
|
common_schema=None,
|
1269
1323
|
min_replicas=min_replicas,
|
1270
1324
|
max_replicas=max_replicas,
|
1271
1325
|
scale_config=scale,
|
1326
|
+
config=effective_config,
|
1327
|
+
api_key=api_key,
|
1272
1328
|
no_delete=no_delete,
|
1273
1329
|
wait_for_healthy=wait_for_healthy,
|
1274
1330
|
)
|
1331
|
+
# Type hint for the variable. The instance itself IS correctly typed with specific models.
|
1332
|
+
processor_instance: Processor[BaseModel, BaseModel] = _processor_instance
|
1333
|
+
|
1275
1334
|
logger.debug(
|
1276
|
-
f"Decorator: Processor instance '{processor_name}' created successfully."
|
1335
|
+
f"Decorator: Processor instance '{processor_name}' created successfully with generic types."
|
1277
1336
|
)
|
1278
1337
|
# Store original func for potential local invocation/testing? Keep for now.
|
1279
1338
|
# TODO: Add original_func to Processor model definition if this is desired
|
nebu/processors/processor.py
CHANGED
@@ -2,7 +2,17 @@ import json
|
|
2
2
|
import threading
|
3
3
|
import time
|
4
4
|
import uuid
|
5
|
-
from typing import
|
5
|
+
from typing import (
|
6
|
+
Any,
|
7
|
+
Dict,
|
8
|
+
Generic,
|
9
|
+
List,
|
10
|
+
Optional,
|
11
|
+
TypeVar,
|
12
|
+
cast,
|
13
|
+
get_args,
|
14
|
+
get_origin,
|
15
|
+
)
|
6
16
|
|
7
17
|
import requests
|
8
18
|
from pydantic import BaseModel
|
@@ -101,7 +111,8 @@ class Processor(Generic[InputType, OutputType]):
|
|
101
111
|
namespace: Optional[str] = None,
|
102
112
|
labels: Optional[Dict[str, str]] = None,
|
103
113
|
container: Optional[V1ContainerRequest] = None,
|
104
|
-
|
114
|
+
input_model_cls: Optional[type[BaseModel]] = None,
|
115
|
+
output_model_cls: Optional[type[BaseModel]] = None,
|
105
116
|
common_schema: Optional[str] = None,
|
106
117
|
min_replicas: Optional[int] = None,
|
107
118
|
max_replicas: Optional[int] = None,
|
@@ -124,7 +135,8 @@ class Processor(Generic[InputType, OutputType]):
|
|
124
135
|
self.namespace = namespace
|
125
136
|
self.labels = labels
|
126
137
|
self.container = container
|
127
|
-
self.
|
138
|
+
self.input_model_cls = input_model_cls
|
139
|
+
self.output_model_cls = output_model_cls
|
128
140
|
self.common_schema = common_schema
|
129
141
|
self.min_replicas = min_replicas
|
130
142
|
self.max_replicas = max_replicas
|
@@ -132,37 +144,26 @@ class Processor(Generic[InputType, OutputType]):
|
|
132
144
|
self.processors_url = f"{self.orign_host}/v1/processors"
|
133
145
|
self._log_thread: Optional[threading.Thread] = None
|
134
146
|
|
135
|
-
#
|
136
|
-
|
137
|
-
print("self.__dict__: ", self.__dict__)
|
138
|
-
if self.schema_ is None and hasattr(self, "__orig_class__"):
|
147
|
+
# Infer OutputType Pydantic class if output_model_cls is not provided
|
148
|
+
if self.output_model_cls is None and hasattr(self, "__orig_class__"):
|
139
149
|
type_args = get_args(self.__orig_class__) # type: ignore
|
140
|
-
print(">>> type_args: ", type_args)
|
141
150
|
if len(type_args) == 2:
|
142
151
|
output_type_candidate = type_args[1]
|
143
|
-
print(">>> output_type_candidate: ", output_type_candidate)
|
144
|
-
# Check if it looks like a Pydantic model class
|
145
152
|
if isinstance(output_type_candidate, type) and issubclass(
|
146
153
|
output_type_candidate, BaseModel
|
147
154
|
):
|
148
|
-
print(">>> output_type_candidate is a Pydantic model class")
|
149
155
|
logger.debug(
|
150
|
-
f"Inferred
|
156
|
+
f"Inferred output_model_cls {output_type_candidate.__name__} from generic arguments."
|
151
157
|
)
|
152
|
-
self.
|
158
|
+
self.output_model_cls = output_type_candidate
|
153
159
|
else:
|
154
|
-
print(">>> output_type_candidate is not a Pydantic model class")
|
155
160
|
logger.debug(
|
156
161
|
f"Second generic argument {output_type_candidate} is not a Pydantic BaseModel. "
|
157
|
-
"Cannot infer
|
162
|
+
"Cannot infer output_model_cls."
|
158
163
|
)
|
159
164
|
else:
|
160
|
-
print(
|
161
|
-
"Could not infer OutputType from generic arguments: wrong number of type args found "
|
162
|
-
f"(expected 2, got {len(type_args) if type_args else 0})."
|
163
|
-
)
|
164
165
|
logger.debug(
|
165
|
-
"Could not infer
|
166
|
+
"Could not infer output_model_cls from generic arguments: wrong number of type args found "
|
166
167
|
f"(expected 2, got {len(type_args) if type_args else 0})."
|
167
168
|
)
|
168
169
|
|
@@ -201,7 +202,6 @@ class Processor(Generic[InputType, OutputType]):
|
|
201
202
|
processor_request = V1ProcessorRequest(
|
202
203
|
metadata=metadata,
|
203
204
|
container=container,
|
204
|
-
schema_=schema_,
|
205
205
|
common_schema=common_schema,
|
206
206
|
min_replicas=min_replicas,
|
207
207
|
max_replicas=max_replicas,
|
@@ -226,7 +226,6 @@ class Processor(Generic[InputType, OutputType]):
|
|
226
226
|
|
227
227
|
update_processor = V1UpdateProcessor(
|
228
228
|
container=container,
|
229
|
-
schema_=schema_,
|
230
229
|
common_schema=common_schema,
|
231
230
|
min_replicas=min_replicas,
|
232
231
|
max_replicas=max_replicas,
|
@@ -312,6 +311,10 @@ class Processor(Generic[InputType, OutputType]):
|
|
312
311
|
)
|
313
312
|
response.raise_for_status()
|
314
313
|
raw_response_json = response.json()
|
314
|
+
|
315
|
+
if "error" in raw_response_json:
|
316
|
+
raise Exception(raw_response_json["error"])
|
317
|
+
|
315
318
|
raw_content = raw_response_json.get("content")
|
316
319
|
logger.debug(f">>> Raw content: {raw_content}")
|
317
320
|
|
@@ -341,38 +344,37 @@ class Processor(Generic[InputType, OutputType]):
|
|
341
344
|
|
342
345
|
# Attempt to parse into OutputType if conditions are met
|
343
346
|
print(f">>> wait: {wait}")
|
344
|
-
print(f">>> self.
|
345
|
-
print(">>> type(self.
|
346
|
-
print(
|
347
|
+
print(f">>> self.output_model_cls: {self.output_model_cls}")
|
348
|
+
print(">>> type(self.output_model_cls): ", type(self.output_model_cls))
|
349
|
+
print(
|
350
|
+
f">>> isinstance(self.output_model_cls, type): {isinstance(self.output_model_cls, type)}"
|
351
|
+
)
|
347
352
|
print(f">>> isinstance(raw_content, dict): {isinstance(raw_content, dict)}")
|
348
353
|
if (
|
349
354
|
wait
|
350
|
-
and self.
|
351
|
-
and isinstance(self.
|
352
|
-
and issubclass(self.
|
355
|
+
and self.output_model_cls
|
356
|
+
and isinstance(self.output_model_cls, type)
|
357
|
+
and issubclass(self.output_model_cls, BaseModel) # type: ignore
|
353
358
|
and isinstance(raw_content, dict)
|
354
|
-
):
|
359
|
+
):
|
355
360
|
print(f">>> raw_content: {raw_content}")
|
356
361
|
try:
|
357
|
-
|
358
|
-
# Parse raw_content instead of the full response
|
359
|
-
parsed_model = self.schema_.model_validate(raw_content)
|
362
|
+
parsed_model = self.output_model_cls.model_validate(raw_content)
|
360
363
|
print(f">>> parsed_model: {parsed_model}")
|
361
|
-
# Cast to OutputType to satisfy the linter with generics
|
362
364
|
parsed_output: OutputType = cast(OutputType, parsed_model)
|
363
365
|
print(f">>> parsed_output: {parsed_output}")
|
364
366
|
return parsed_output
|
365
|
-
except
|
366
|
-
Exception
|
367
|
-
) as e: # Consider pydantic.ValidationError for more specific handling
|
367
|
+
except Exception as e:
|
368
368
|
print(f">>> error: {e}")
|
369
|
-
|
369
|
+
model_name = getattr(
|
370
|
+
self.output_model_cls, "__name__", str(self.output_model_cls)
|
371
|
+
)
|
370
372
|
logger.error(
|
371
|
-
f"Processor {processor_name}: Failed to parse 'content' field into output type {
|
373
|
+
f"Processor {processor_name}: Failed to parse 'content' field into output type {model_name}. "
|
372
374
|
f"Error: {e}. Returning raw JSON response."
|
373
375
|
)
|
374
|
-
# Fallback to returning the raw JSON response
|
375
376
|
return raw_content
|
377
|
+
# Fallback logic using self.schema_ has been removed.
|
376
378
|
|
377
379
|
return raw_content
|
378
380
|
|
@@ -401,6 +403,8 @@ class Processor(Generic[InputType, OutputType]):
|
|
401
403
|
namespace: Optional[str] = None,
|
402
404
|
config: Optional[GlobalConfig] = None,
|
403
405
|
api_key: Optional[str] = None,
|
406
|
+
input_model_cls: Optional[type[BaseModel]] = None,
|
407
|
+
output_model_cls: Optional[type[BaseModel]] = None,
|
404
408
|
):
|
405
409
|
"""
|
406
410
|
Get a Processor from the remote server.
|
@@ -412,27 +416,60 @@ class Processor(Generic[InputType, OutputType]):
|
|
412
416
|
raise ValueError("Processor not found")
|
413
417
|
processor_v1 = processors[0]
|
414
418
|
|
415
|
-
|
419
|
+
# Try to infer Input/Output model classes if Processor.load is called as generic
|
420
|
+
# e.g., MyProcessor = Processor[MyInput, MyOutput]; MyProcessor.load(...)
|
421
|
+
loaded_input_model_cls: Optional[type[BaseModel]] = None
|
422
|
+
loaded_output_model_cls: Optional[type[BaseModel]] = None
|
423
|
+
|
424
|
+
# __orig_bases__ usually contains the generic version of the class if it was parameterized.
|
425
|
+
# We look for Processor[...] in the bases.
|
426
|
+
if hasattr(cls, "__orig_bases__"):
|
427
|
+
for base in cls.__orig_bases__: # type: ignore
|
428
|
+
if get_origin(base) is Processor:
|
429
|
+
type_args = get_args(base)
|
430
|
+
if len(type_args) == 2:
|
431
|
+
input_arg, output_arg = type_args
|
432
|
+
if isinstance(input_arg, type) and issubclass(
|
433
|
+
input_arg, BaseModel
|
434
|
+
):
|
435
|
+
loaded_input_model_cls = input_arg
|
436
|
+
if isinstance(output_arg, type) and issubclass(
|
437
|
+
output_arg, BaseModel
|
438
|
+
):
|
439
|
+
loaded_output_model_cls = output_arg
|
440
|
+
break # Found Processor generic base
|
441
|
+
|
442
|
+
# Determine final model classes, prioritizing overrides
|
443
|
+
final_input_model_cls = (
|
444
|
+
input_model_cls if input_model_cls is not None else loaded_input_model_cls
|
445
|
+
)
|
446
|
+
final_output_model_cls = (
|
447
|
+
output_model_cls
|
448
|
+
if output_model_cls is not None
|
449
|
+
else loaded_output_model_cls
|
450
|
+
)
|
451
|
+
|
452
|
+
out = cls.__new__(cls) # type: ignore
|
453
|
+
# If generic types were successfully inferred or overridden, pass them to init
|
454
|
+
# Otherwise, they will be None, and __init__ might try __orig_class__ if called on instance
|
455
|
+
out.__init__( # type: ignore
|
456
|
+
name=processor_v1.metadata.name, # Use name from fetched metadata
|
457
|
+
namespace=processor_v1.metadata.namespace, # Use namespace from fetched metadata
|
458
|
+
labels=processor_v1.metadata.labels, # Use labels from fetched metadata
|
459
|
+
container=processor_v1.container,
|
460
|
+
input_model_cls=final_input_model_cls, # Use determined input model
|
461
|
+
output_model_cls=final_output_model_cls, # Use determined output model
|
462
|
+
common_schema=processor_v1.common_schema,
|
463
|
+
min_replicas=processor_v1.min_replicas,
|
464
|
+
max_replicas=processor_v1.max_replicas,
|
465
|
+
scale_config=processor_v1.scale,
|
466
|
+
config=config, # Pass original config
|
467
|
+
api_key=api_key, # Pass original api_key
|
468
|
+
)
|
469
|
+
# The __init__ call above handles most setup. We store the fetched processor data.
|
416
470
|
out.processor = processor_v1
|
417
|
-
|
418
|
-
|
419
|
-
raise ValueError("No config found")
|
420
|
-
out.current_server = out.config.get_current_server_config()
|
421
|
-
if not out.current_server:
|
422
|
-
raise ValueError("No server config found")
|
423
|
-
out.api_key = api_key or out.current_server.api_key
|
424
|
-
out.orign_host = out.current_server.server
|
425
|
-
out.processors_url = f"{out.orign_host}/v1/processors"
|
426
|
-
out.name = name
|
427
|
-
out.namespace = namespace
|
428
|
-
|
429
|
-
# Set specific fields from the processor
|
430
|
-
out.container = processor_v1.container
|
431
|
-
out.schema_ = processor_v1.schema_
|
432
|
-
out.common_schema = processor_v1.common_schema
|
433
|
-
out.min_replicas = processor_v1.min_replicas
|
434
|
-
out.max_replicas = processor_v1.max_replicas
|
435
|
-
out.scale_config = processor_v1.scale
|
471
|
+
# self.schema_ was removed, so no assignment for it here from processor_v1.schema_
|
472
|
+
# out.common_schema = processor_v1.common_schema # This is now set in __init__
|
436
473
|
|
437
474
|
return out
|
438
475
|
|
@@ -549,7 +586,7 @@ class Processor(Generic[InputType, OutputType]):
|
|
549
586
|
|
550
587
|
# Send health check and wait for response
|
551
588
|
response = self.send(
|
552
|
-
data=health_check_data, # type: ignore
|
589
|
+
data=health_check_data, # type: ignore[arg-type]
|
553
590
|
wait=True,
|
554
591
|
timeout=30.0, # Short timeout for individual health check
|
555
592
|
)
|
@@ -15,14 +15,14 @@ nebu/namespaces/models.py,sha256=EqUOpzhVBhvJw2P92ONDUbIgC31M9jMmcaG5vyOrsWg,497
|
|
15
15
|
nebu/namespaces/namespace.py,sha256=oeZyGqsIGIrppyjif1ZONsdTmqRgd9oSLFE1BChXTTE,5247
|
16
16
|
nebu/processors/consumer.py,sha256=9WapzBTPuXRunH-vjPerTlGZy__hn_d4m13l1ajebY8,62732
|
17
17
|
nebu/processors/consumer_process_worker.py,sha256=h--eNFKaLbUayxn88mB8oGGdrU2liE1dnwm_TPlewX8,36960
|
18
|
-
nebu/processors/decorate.py,sha256=
|
18
|
+
nebu/processors/decorate.py,sha256=5p9pQrk_H8-Fj0UjsgSVCYx7Jk7KFuhMZtNhkKvpmkQ,61306
|
19
19
|
nebu/processors/default.py,sha256=cy4ETMdbdRGkrvbYec1o60h7mGDlGN5JsuUph0ENtDU,364
|
20
20
|
nebu/processors/models.py,sha256=g4B1t6Rgoy-NUEHBLeQc0EENzHXLDlWSio8Muv7cTDU,4093
|
21
|
-
nebu/processors/processor.py,sha256=
|
21
|
+
nebu/processors/processor.py,sha256=PQTWxo0-XvdvoDTcchBwYA2OZQi3-uTwhKZPhDQ2zaM,24673
|
22
22
|
nebu/redis/models.py,sha256=coPovAcVXnOU1Xh_fpJL4PO3QctgK9nBe5QYoqEcnxg,1230
|
23
23
|
nebu/services/service.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
24
|
-
nebu-0.1.
|
25
|
-
nebu-0.1.
|
26
|
-
nebu-0.1.
|
27
|
-
nebu-0.1.
|
28
|
-
nebu-0.1.
|
24
|
+
nebu-0.1.116.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
25
|
+
nebu-0.1.116.dist-info/METADATA,sha256=S-hzlObs1u38wGTPRvAyb6v21nzziM8joMlEIXZ2dio,1798
|
26
|
+
nebu-0.1.116.dist-info/WHEEL,sha256=zaaOINJESkSfm_4HQVc5ssNzHCPXhJm0kEUakpsEHaU,91
|
27
|
+
nebu-0.1.116.dist-info/top_level.txt,sha256=uLIbEKJeGSHWOAJN5S0i5XBGwybALlF9bYoB1UhdEgQ,5
|
28
|
+
nebu-0.1.116.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|