nebu 0.1.24__py3-none-any.whl → 0.1.29__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- nebu/__init__.py +6 -0
- nebu/adapter.py +11 -0
- nebu/auth.py +15 -0
- nebu/cache.py +90 -0
- nebu/chatx/convert.py +206 -0
- nebu/chatx/openai.py +976 -0
- nebu/config.py +38 -2
- nebu/data.py +855 -0
- nebu/processors/consumer.py +1 -4
- nebu/processors/decorate.py +1 -1
- nebu/processors/processor.py +3 -7
- nebu/processors/remote.py +47 -0
- {nebu-0.1.24.dist-info → nebu-0.1.29.dist-info}/METADATA +4 -1
- nebu-0.1.29.dist-info/RECORD +26 -0
- nebu-0.1.24.dist-info/RECORD +0 -20
- {nebu-0.1.24.dist-info → nebu-0.1.29.dist-info}/WHEEL +0 -0
- {nebu-0.1.24.dist-info → nebu-0.1.29.dist-info}/licenses/LICENSE +0 -0
- {nebu-0.1.24.dist-info → nebu-0.1.29.dist-info}/top_level.txt +0 -0
nebu/__init__.py
CHANGED
@@ -3,9 +3,15 @@
|
|
3
3
|
# ruff: noqa: F401
|
4
4
|
# ruff: noqa: F403
|
5
5
|
|
6
|
+
from .adapter import *
|
7
|
+
from .auth import is_allowed
|
8
|
+
from .cache import Cache, OwnedValue
|
9
|
+
from .chatx.convert import *
|
10
|
+
from .chatx.openai import *
|
6
11
|
from .config import *
|
7
12
|
from .containers.container import Container
|
8
13
|
from .containers.models import *
|
14
|
+
from .data import *
|
9
15
|
from .meta import *
|
10
16
|
from .processors.decorate import *
|
11
17
|
from .processors.models import *
|
nebu/adapter.py
ADDED
nebu/auth.py
CHANGED
@@ -33,3 +33,18 @@ def get_user_profile(api_key: str) -> V1UserProfile:
|
|
33
33
|
response.raise_for_status()
|
34
34
|
|
35
35
|
return V1UserProfile.model_validate(response.json())
|
36
|
+
|
37
|
+
|
38
|
+
def is_allowed(
|
39
|
+
resource_owner: str,
|
40
|
+
user_id: Optional[str] = None,
|
41
|
+
orgs: Optional[Dict[str, Dict[str, str]]] = None,
|
42
|
+
) -> bool:
|
43
|
+
if orgs is None:
|
44
|
+
orgs = {}
|
45
|
+
owners = []
|
46
|
+
for org_id, _ in orgs.items():
|
47
|
+
owners.append(org_id)
|
48
|
+
if user_id:
|
49
|
+
owners.append(user_id)
|
50
|
+
return resource_owner in owners
|
nebu/cache.py
ADDED
@@ -0,0 +1,90 @@
|
|
1
|
+
import os
|
2
|
+
import time
|
3
|
+
from typing import Any, Optional, cast
|
4
|
+
|
5
|
+
import redis
|
6
|
+
from pydantic import BaseModel, Field
|
7
|
+
|
8
|
+
|
9
|
+
class OwnedValue(BaseModel):
|
10
|
+
created_at: int = Field(default_factory=lambda: int(time.time()))
|
11
|
+
value: str
|
12
|
+
user_id: Optional[str] = None
|
13
|
+
orgs: Optional[Any] = None
|
14
|
+
handle: Optional[str] = None
|
15
|
+
owner: Optional[str] = None
|
16
|
+
|
17
|
+
|
18
|
+
class Cache:
|
19
|
+
"""
|
20
|
+
A simple cache class that connects to Redis.
|
21
|
+
"""
|
22
|
+
|
23
|
+
def __init__(self, host: str = "localhost", port: int = 6379, db: int = 0):
|
24
|
+
"""
|
25
|
+
Initializes the Redis connection.
|
26
|
+
Pulls connection details from environment variables REDIS_HOST,
|
27
|
+
REDIS_PORT, and REDIS_DB if available, otherwise uses defaults.
|
28
|
+
"""
|
29
|
+
redis_host = os.environ.get("REDIS_HOST", host)
|
30
|
+
redis_port = int(os.environ.get("REDIS_PORT", port))
|
31
|
+
redis_db = int(os.environ.get("REDIS_DB", db))
|
32
|
+
namespace = os.environ.get("NEBU_NAMESPACE")
|
33
|
+
if not namespace:
|
34
|
+
raise ValueError("NEBU_NAMESPACE environment variable is not set")
|
35
|
+
|
36
|
+
try:
|
37
|
+
# decode_responses=True ensures keys and values are returned as strings
|
38
|
+
self.redis_client = redis.StrictRedis(
|
39
|
+
host=redis_host, port=redis_port, db=redis_db, decode_responses=True
|
40
|
+
)
|
41
|
+
# Ping the server to ensure connection is established
|
42
|
+
self.redis_client.ping()
|
43
|
+
print(
|
44
|
+
f"Successfully connected to Redis at {redis_host}:{redis_port}/{redis_db}"
|
45
|
+
)
|
46
|
+
|
47
|
+
self.prefix = f"cache:{namespace}"
|
48
|
+
except Exception as e:
|
49
|
+
print(f"Error connecting to Redis: {e}")
|
50
|
+
self.redis_client = None # Set client to None if connection fails
|
51
|
+
|
52
|
+
def get(self, key: str) -> str | None:
|
53
|
+
"""
|
54
|
+
Gets the value associated with a key from Redis.
|
55
|
+
Returns None if the key does not exist or connection failed.
|
56
|
+
"""
|
57
|
+
if not self.redis_client:
|
58
|
+
print("Redis client not connected.")
|
59
|
+
return None
|
60
|
+
try:
|
61
|
+
key = f"{self.prefix}:{key}"
|
62
|
+
# Cast the result to str | None as expected
|
63
|
+
result = self.redis_client.get(key)
|
64
|
+
return cast(str | None, result)
|
65
|
+
except Exception as e:
|
66
|
+
print(f"Error getting key '{key}' from Redis: {e}")
|
67
|
+
return None
|
68
|
+
|
69
|
+
def set(self, key: str, value: str, expiry_seconds: int | None = None) -> bool:
|
70
|
+
"""
|
71
|
+
Sets a key-value pair in Redis.
|
72
|
+
Optionally sets an expiry time for the key in seconds.
|
73
|
+
Returns True if successful, False otherwise (e.g., connection failed).
|
74
|
+
"""
|
75
|
+
if not self.redis_client:
|
76
|
+
print("Redis client not connected.")
|
77
|
+
return False
|
78
|
+
try:
|
79
|
+
key = f"{self.prefix}:{key}"
|
80
|
+
if expiry_seconds:
|
81
|
+
# Cast the result to bool
|
82
|
+
result = self.redis_client.setex(key, expiry_seconds, value)
|
83
|
+
return cast(bool, result)
|
84
|
+
else:
|
85
|
+
# Cast the result to bool
|
86
|
+
result = self.redis_client.set(key, value)
|
87
|
+
return cast(bool, result)
|
88
|
+
except Exception as e:
|
89
|
+
print(f"Error setting key '{key}' in Redis: {e}")
|
90
|
+
return False
|
nebu/chatx/convert.py
ADDED
@@ -0,0 +1,206 @@
|
|
1
|
+
import base64
|
2
|
+
import binascii
|
3
|
+
import io
|
4
|
+
from io import BytesIO
|
5
|
+
from typing import Any, Dict, List, Tuple
|
6
|
+
|
7
|
+
import requests
|
8
|
+
from PIL import Image, UnidentifiedImageError
|
9
|
+
|
10
|
+
|
11
|
+
def convert_to_unsloth_inference(
|
12
|
+
old_schema: List[Dict[str, Any]],
|
13
|
+
) -> Tuple[List[Dict[str, Any]], List[Image.Image]]:
|
14
|
+
"""
|
15
|
+
Convert from an old OpenAI message format that may look like:
|
16
|
+
[
|
17
|
+
{
|
18
|
+
"role": "user",
|
19
|
+
"content": [
|
20
|
+
{"type": "text", "text": "some text"},
|
21
|
+
{"type": "image_url", "image_url": {"url": "https://..."}},
|
22
|
+
...
|
23
|
+
],
|
24
|
+
}
|
25
|
+
]
|
26
|
+
|
27
|
+
to a new format:
|
28
|
+
[
|
29
|
+
{
|
30
|
+
"role": "user",
|
31
|
+
"content": [
|
32
|
+
{"type": "image"},
|
33
|
+
{"type": "text", "text": "merged user text"}
|
34
|
+
],
|
35
|
+
}
|
36
|
+
]
|
37
|
+
|
38
|
+
Along with the new format, return a list of downloaded PIL Image objects.
|
39
|
+
"""
|
40
|
+
|
41
|
+
new_schema = []
|
42
|
+
all_images = [] # Will store PIL images as we convert them
|
43
|
+
|
44
|
+
for message in old_schema:
|
45
|
+
role = message.get("role", "user")
|
46
|
+
|
47
|
+
# Collect all text pieces and all image URLs
|
48
|
+
text_chunks = []
|
49
|
+
image_urls = []
|
50
|
+
|
51
|
+
for content_item in message.get("content", []):
|
52
|
+
content_type = content_item.get("type")
|
53
|
+
if content_type == "text":
|
54
|
+
text_chunks.append(content_item.get("text", ""))
|
55
|
+
elif content_type == "image_url":
|
56
|
+
image_url = content_item.get("image_url", {}).get("url")
|
57
|
+
if image_url:
|
58
|
+
image_urls.append(image_url)
|
59
|
+
|
60
|
+
# Merge text chunks into one
|
61
|
+
merged_text = " ".join(text_chunks).strip()
|
62
|
+
|
63
|
+
# Convert each URL into a PIL image
|
64
|
+
for url in image_urls:
|
65
|
+
# Download the image
|
66
|
+
response = requests.get(url)
|
67
|
+
response.raise_for_status()
|
68
|
+
image_data = BytesIO(response.content)
|
69
|
+
pil_img = Image.open(image_data).convert("RGB")
|
70
|
+
all_images.append(pil_img)
|
71
|
+
|
72
|
+
# Construct new message format
|
73
|
+
# For simplicity, this example only places one {"type": "image"} placeholder
|
74
|
+
# regardless of how many images were found, and merges all text into one block.
|
75
|
+
new_content = []
|
76
|
+
if image_urls:
|
77
|
+
new_content.append({"type": "image"})
|
78
|
+
if merged_text:
|
79
|
+
new_content.append({"type": "text", "text": merged_text})
|
80
|
+
|
81
|
+
new_schema.append({"role": role, "content": new_content})
|
82
|
+
|
83
|
+
return new_schema, all_images
|
84
|
+
|
85
|
+
|
86
|
+
def oai_to_unsloth(
|
87
|
+
messages_input: List[Dict[str, Any]],
|
88
|
+
) -> Dict[str, List[Dict[str, Any]]]:
|
89
|
+
"""
|
90
|
+
Converts a list of messages from an OpenAI-like chat format to the Nebulous conversation format.
|
91
|
+
Images specified by URLs or base64 strings are loaded into PIL.Image objects.
|
92
|
+
|
93
|
+
Input format example:
|
94
|
+
[
|
95
|
+
{
|
96
|
+
"role": "user",
|
97
|
+
"content": [
|
98
|
+
{"type": "input_text", "text": "Describe the image."},
|
99
|
+
{"type": "input_image", "image_url": "http://... or base64 string"},
|
100
|
+
]
|
101
|
+
},
|
102
|
+
{
|
103
|
+
"role": "assistant",
|
104
|
+
"content": [{"type": "text", "text": "This is an image of..."}] # Or potentially just a string
|
105
|
+
}
|
106
|
+
]
|
107
|
+
|
108
|
+
Output format example:
|
109
|
+
{
|
110
|
+
"messages": [
|
111
|
+
{
|
112
|
+
"role": "user",
|
113
|
+
"content": [
|
114
|
+
{"type": "text", "text": "Describe the image."},
|
115
|
+
{"type": "image", "image": <PIL.Image.Image object>},
|
116
|
+
]
|
117
|
+
},
|
118
|
+
{
|
119
|
+
"role": "assistant",
|
120
|
+
"content": [{"type": "text", "text": "This is an image of..."}]
|
121
|
+
}
|
122
|
+
]
|
123
|
+
}
|
124
|
+
"""
|
125
|
+
nebu_conversation = []
|
126
|
+
for message in messages_input:
|
127
|
+
role = message.get("role")
|
128
|
+
input_content = message.get("content") # Can be list or string
|
129
|
+
|
130
|
+
processed_content = []
|
131
|
+
|
132
|
+
if isinstance(input_content, list):
|
133
|
+
# Process list content (multi-modal)
|
134
|
+
for item in input_content:
|
135
|
+
item_type = item.get("type")
|
136
|
+
if item_type in ("input_text", "text"):
|
137
|
+
processed_content.append(
|
138
|
+
{"type": "text", "text": item.get("text", "")}
|
139
|
+
)
|
140
|
+
elif item_type in (
|
141
|
+
"input_image",
|
142
|
+
"image_url",
|
143
|
+
"image",
|
144
|
+
): # Accept 'image' as source key too
|
145
|
+
# Use "image_url" first, then fallback to "image" if needed
|
146
|
+
image_source = item.get("image_url", item.get("image"))
|
147
|
+
if image_source:
|
148
|
+
pil_image = None
|
149
|
+
try:
|
150
|
+
if isinstance(
|
151
|
+
image_source, str
|
152
|
+
) and image_source.startswith(("http://", "https://")):
|
153
|
+
# Handle URL
|
154
|
+
response = requests.get(image_source, stream=True)
|
155
|
+
response.raise_for_status() # Raise an exception for bad status codes
|
156
|
+
pil_image = Image.open(response.raw)
|
157
|
+
elif isinstance(image_source, str):
|
158
|
+
# Handle base64 string
|
159
|
+
# Remove potential data URI prefix (e.g., "data:image/png;base64,")
|
160
|
+
if "," in image_source:
|
161
|
+
image_source = image_source.split(",", 1)[1]
|
162
|
+
image_bytes = base64.b64decode(image_source)
|
163
|
+
pil_image = Image.open(io.BytesIO(image_bytes))
|
164
|
+
|
165
|
+
elif isinstance(image_source, Image.Image):
|
166
|
+
# Handle direct PIL.Image input
|
167
|
+
pil_image = image_source
|
168
|
+
|
169
|
+
if pil_image:
|
170
|
+
processed_content.append(
|
171
|
+
{"type": "image", "image": pil_image}
|
172
|
+
)
|
173
|
+
else:
|
174
|
+
print(
|
175
|
+
f"Warning: Could not load image from source: {type(image_source)}"
|
176
|
+
)
|
177
|
+
|
178
|
+
except requests.exceptions.RequestException as e:
|
179
|
+
print(
|
180
|
+
f"Warning: Failed to fetch image from URL {image_source}: {e}"
|
181
|
+
)
|
182
|
+
except (binascii.Error, ValueError) as e:
|
183
|
+
print(f"Warning: Failed to decode base64 image string: {e}")
|
184
|
+
except (IOError, UnidentifiedImageError) as e:
|
185
|
+
print(f"Warning: Failed to open image: {e}")
|
186
|
+
except Exception as e:
|
187
|
+
print(
|
188
|
+
f"Warning: An unexpected error occurred while processing image: {e}"
|
189
|
+
)
|
190
|
+
|
191
|
+
else:
|
192
|
+
print(
|
193
|
+
"Warning: Image item provided but 'image_url' or 'image' key is missing or empty."
|
194
|
+
)
|
195
|
+
|
196
|
+
# Add handling for other potential input types if necessary
|
197
|
+
elif isinstance(input_content, str):
|
198
|
+
# Handle simple string content (common for assistant messages)
|
199
|
+
processed_content.append({"type": "text", "text": input_content})
|
200
|
+
# else: Handle unexpected content format (e.g., log warning, skip message)
|
201
|
+
|
202
|
+
if role and processed_content:
|
203
|
+
nebu_conversation.append({"role": role, "content": processed_content})
|
204
|
+
# else: Handle missing role or empty content if needed
|
205
|
+
|
206
|
+
return {"messages": nebu_conversation}
|