nebu 0.1.91__py3-none-any.whl → 0.1.93__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
nebu/cache.py CHANGED
@@ -7,6 +7,8 @@ import redis
7
7
  import socks # Add socks import
8
8
  from pydantic import BaseModel, Field
9
9
 
10
+ from nebu.logging import logger # Import the logger
11
+
10
12
 
11
13
  class OwnedValue(BaseModel):
12
14
  created_at: int = Field(default_factory=lambda: int(time.time()))
@@ -30,11 +32,11 @@ class Cache:
30
32
  Also checks for REDIS_URL and prefers that if set.
31
33
  """
32
34
  redis_url = os.environ.get("REDIS_URL")
33
- print("REDIS_URL: ", redis_url)
35
+ logger.debug(f"REDIS_URL: {redis_url}")
34
36
  namespace = os.environ.get("NEBU_NAMESPACE")
35
37
  if not namespace:
36
38
  raise ValueError("NEBU_NAMESPACE environment variable is not set")
37
- print("NAMESPACE: ", namespace)
39
+ logger.debug(f"NAMESPACE: {namespace}")
38
40
  self.redis_client = None
39
41
  connection_info = ""
40
42
 
@@ -43,9 +45,11 @@ class Cache:
43
45
  # Use the proxy settings provided by tailscaled
44
46
  socks.set_default_proxy(socks.SOCKS5, "localhost", 1055)
45
47
  socket.socket = socks.socksocket
46
- print("Configured SOCKS5 proxy for socket connections via localhost:1055")
48
+ logger.info(
49
+ "Configured SOCKS5 proxy for socket connections via localhost:1055"
50
+ )
47
51
  except Exception as proxy_err:
48
- print(f"Failed to configure SOCKS proxy: {proxy_err}")
52
+ logger.warning(f"Failed to configure SOCKS proxy: {proxy_err}")
49
53
  # Depending on requirements, you might want to raise an error here
50
54
  # or proceed without the proxy if it's optional.
51
55
  # For now, we'll print the error and continue, but the Redis connection
@@ -70,12 +74,12 @@ class Cache:
70
74
 
71
75
  # Ping the server to ensure connection is established
72
76
  self.redis_client.ping()
73
- print(f"Successfully connected to Redis using {connection_info}")
77
+ logger.info(f"Successfully connected to Redis using {connection_info}")
74
78
 
75
79
  self.prefix = f"cache:{namespace}"
76
- print("using prefix", self.prefix)
80
+ logger.info(f"Using cache prefix: {self.prefix}")
77
81
  except Exception as e:
78
- print(f"Error connecting to Redis: {e}")
82
+ logger.error(f"Error connecting to Redis: {e}")
79
83
  # Ensure client is None if connection fails at any point
80
84
  self.redis_client = None
81
85
 
@@ -85,7 +89,7 @@ class Cache:
85
89
  Returns None if the key does not exist or connection failed.
86
90
  """
87
91
  if not self.redis_client:
88
- print("Redis client not connected.")
92
+ logger.warning("Redis client not connected.")
89
93
  return None
90
94
  try:
91
95
  key = f"{self.prefix}:{key}"
@@ -93,7 +97,7 @@ class Cache:
93
97
  result = self.redis_client.get(key)
94
98
  return cast(str | None, result)
95
99
  except Exception as e:
96
- print(f"Error getting key '{key}' from Redis: {e}")
100
+ logger.error(f"Error getting key '{key}' from Redis: {e}")
97
101
  return None
98
102
 
99
103
  def set(self, key: str, value: str, expiry_seconds: int | None = None) -> bool:
@@ -103,7 +107,7 @@ class Cache:
103
107
  Returns True if successful, False otherwise (e.g., connection failed).
104
108
  """
105
109
  if not self.redis_client:
106
- print("Redis client not connected.")
110
+ logger.warning("Redis client not connected.")
107
111
  return False
108
112
  try:
109
113
  key = f"{self.prefix}:{key}"
@@ -116,5 +120,5 @@ class Cache:
116
120
  result = self.redis_client.set(key, value)
117
121
  return cast(bool, result)
118
122
  except Exception as e:
119
- print(f"Error setting key '{key}' in Redis: {e}")
123
+ logger.error(f"Error setting key '{key}' in Redis: {e}")
120
124
  return False
@@ -18,6 +18,7 @@ from nebu.containers.models import (
18
18
  V1SSHKey,
19
19
  V1VolumePath,
20
20
  )
21
+ from nebu.logging import logger # Import the logger
21
22
  from nebu.meta import V1ResourceReference
22
23
 
23
24
 
@@ -53,8 +54,8 @@ class Container:
53
54
  self.nebu_host = current_server.server
54
55
  self.config = config
55
56
 
56
- # print(f"nebu_host: {self.nebu_host}")
57
- # print(f"api_key: {self.api_key}")
57
+ logger.debug(f"nebu_host: {self.nebu_host}")
58
+ logger.debug(f"api_key: {self.api_key}")
58
59
 
59
60
  # Construct the containers base URL
60
61
  self.containers_url = f"{self.nebu_host}/v1/containers"
@@ -72,7 +73,7 @@ class Container:
72
73
  )
73
74
 
74
75
  containers = V1Containers.model_validate(response.json())
75
- print(f"containers: {containers}")
76
+ logger.debug(f"containers: {containers}")
76
77
  existing = next(
77
78
  (
78
79
  c
@@ -82,7 +83,7 @@ class Container:
82
83
  None,
83
84
  )
84
85
 
85
- print(f"existing: {existing}")
86
+ logger.debug(f"existing: {existing}")
86
87
 
87
88
  if not existing:
88
89
  # If there's no existing container, create one:
@@ -115,7 +116,7 @@ class Container:
115
116
  )
116
117
  create_response.raise_for_status()
117
118
  self.container = V1Container.model_validate(create_response.json())
118
- print(f"Created container {self.container.metadata.name}")
119
+ logger.info(f"Created container {self.container.metadata.name}")
119
120
  else:
120
121
  # If container is found, check if anything has changed
121
122
  # Gather the updated fields from the function arguments
@@ -155,11 +156,13 @@ class Container:
155
156
 
156
157
  if not fields_changed:
157
158
  # Nothing changed—do nothing
158
- print(f"No changes detected for container {existing.metadata.name}.")
159
+ logger.info(
160
+ f"No changes detected for container {existing.metadata.name}."
161
+ )
159
162
  self.container = existing
160
163
  return
161
164
 
162
- print(
165
+ logger.info(
163
166
  f"Detected changes for container {existing.metadata.name}, deleting and recreating."
164
167
  )
165
168
 
@@ -176,7 +179,7 @@ class Container:
176
179
  headers={"Authorization": f"Bearer {self.api_key}"},
177
180
  )
178
181
  delete_response.raise_for_status()
179
- print(f"Deleted container {existing.metadata.name}")
182
+ logger.info(f"Deleted container {existing.metadata.name}")
180
183
 
181
184
  # Now recreate the container using the updated parameters
182
185
  create_request = V1ContainerRequest(
@@ -205,7 +208,7 @@ class Container:
205
208
  )
206
209
  create_response.raise_for_status()
207
210
  self.container = V1Container.model_validate(create_response.json())
208
- print(f"Recreated container {self.container.metadata.name}")
211
+ logger.info(f"Recreated container {self.container.metadata.name}")
209
212
 
210
213
  # Save constructor params to `self` for reference, like you do in ReplayBuffer.
211
214
  self.kind = "Container"
@@ -243,7 +246,7 @@ class Container:
243
246
  headers={"Authorization": f"Bearer {self.api_key}"},
244
247
  )
245
248
  response.raise_for_status()
246
- print(f"Deleted container {self.name} in namespace {self.namespace}")
249
+ logger.info(f"Deleted container {self.name} in namespace {self.namespace}")
247
250
 
248
251
  @classmethod
249
252
  def get(