autonomous-app 0.3.18__py3-none-any.whl → 0.3.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,244 @@
1
+ import io
2
+ import json
3
+ import os
4
+ import random
5
+ import wave
6
+
7
+ from google import genai
8
+ from google.genai import types
9
+ from pydub import AudioSegment
10
+
11
+ from autonomous import log
12
+ from autonomous.model.autoattr import DictAttr, ListAttr, StringAttr
13
+ from autonomous.model.automodel import AutoModel
14
+
15
+
16
+ class GeminiAIModel(AutoModel):
17
+ _client = None
18
+ _text_model = "gemini-2.5-pro"
19
+ _summary_model = "gemini-2.5-flash"
20
+ _image_model = "gemini-2.5-flash-image-preview"
21
+ _json_model = "gemini-2.5-pro"
22
+ _stt_model = "gemini-2.5-flash"
23
+ _tts_model = "gemini-2.5-flash-preview-tts"
24
+ messages = ListAttr(StringAttr(default=[]))
25
+ name = StringAttr(default="agent")
26
+ instructions = StringAttr(
27
+ default="You are highly skilled AI trained to assist with various tasks."
28
+ )
29
+ description = StringAttr(
30
+ default="A helpful AI assistant trained to assist with various tasks."
31
+ )
32
+
33
+ @property
34
+ def client(self):
35
+ if not self._client:
36
+ self._client = genai.Client(api_key=os.environ.get("GOOGLEAI_KEY"))
37
+ return self._client
38
+
39
+ def _add_function(self, user_function):
40
+ # This function is now a bit more advanced to conform to the Tool Use schema
41
+ tool_schema = {
42
+ "name": user_function.get("name"),
43
+ "description": user_function.get("description"),
44
+ "parameters": user_function.get("parameters"),
45
+ }
46
+
47
+ # Validate that the schema has a name, description, and parameters
48
+ if not all(
49
+ [tool_schema["name"], tool_schema["description"], tool_schema["parameters"]]
50
+ ):
51
+ raise ValueError(
52
+ "Tool schema must have a 'name', 'description', and 'parameters' field."
53
+ )
54
+
55
+ return tool_schema
56
+
57
+ def _create_wav_header(
58
+ self, raw_audio_bytes, channels=1, rate=24000, sample_width=2
59
+ ):
60
+ """Creates an in-memory WAV file from raw PCM audio bytes."""
61
+ buffer = io.BytesIO()
62
+ with wave.open(buffer, "wb") as wav_file:
63
+ # Set audio parameters
64
+ wav_file.setnchannels(channels)
65
+ wav_file.setsampwidth(sample_width)
66
+ wav_file.setframerate(rate) # 16,000 Hz sample rate
67
+
68
+ # Write the raw audio data
69
+ wav_file.writeframes(raw_audio_bytes)
70
+
71
+ buffer.seek(0)
72
+ return buffer
73
+
74
+ def generate_json(self, message, function, additional_instructions=""):
75
+ # The API call must use the 'tools' parameter instead of 'response_json_schema'
76
+ function_definition = self._add_function(function)
77
+
78
+ response = self.client.models.generate_content(
79
+ model=self._json_model,
80
+ contents=message,
81
+ config=types.GenerateContentConfig(
82
+ system_instruction=f"{self.instructions}.{additional_instructions}",
83
+ tools=[types.Tool(function_declarations=[function_definition])],
84
+ tool_config={
85
+ "function_calling_config": {
86
+ "mode": "ANY", # Force a function call
87
+ }
88
+ },
89
+ ),
90
+ )
91
+
92
+ # The response is now a ToolCall, not a JSON string
93
+ try:
94
+ # log(response.candidates[0].content.parts[0].function_call, _print=True)
95
+ tool_call = response.candidates[0].content.parts[0].function_call
96
+ if tool_call and tool_call.name == function["name"]:
97
+ return tool_call.args
98
+ else:
99
+ log(
100
+ "==== Model did not return a tool call or returned the wrong one. ===="
101
+ )
102
+ log(f"Response: {response.text}", _print=True)
103
+ return {}
104
+ except Exception as e:
105
+ log(f"==== Failed to parse ToolCall response: {e} ====")
106
+ return {}
107
+
108
+ def generate_text(self, message, additional_instructions=""):
109
+ response = self.client.models.generate_content(
110
+ model=self._text_model,
111
+ config=types.GenerateContentConfig(
112
+ system_instruction=f"{self.instructions}.{additional_instructions}",
113
+ ),
114
+ contents=message,
115
+ )
116
+
117
+ # log(results, _print=True)
118
+ # log("=================== END REPORT ===================", _print=True)
119
+ return response.text
120
+
121
+ def generate_audio_text(self, audio_file):
122
+ response = self.client.models.generate_content(
123
+ model=self._stt_model,
124
+ contents=[
125
+ "Transcribe this audio clip",
126
+ types.Part.from_bytes(
127
+ data=audio_file,
128
+ mime_type="audio/mp3",
129
+ ),
130
+ ],
131
+ )
132
+ return response.text
133
+
134
+ def generate_audio(self, prompt, voice=None):
135
+ voice = voice or random.choice(
136
+ [
137
+ "Zephyr",
138
+ "Puck",
139
+ "Charon",
140
+ "Kore",
141
+ "Fenrir",
142
+ "Leda",
143
+ "Orus",
144
+ "Aoede",
145
+ "Callirhoe",
146
+ "Autonoe",
147
+ "Enceladus",
148
+ "Iapetus",
149
+ "Umbriel",
150
+ "Algieba",
151
+ "Despina",
152
+ "Erinome",
153
+ "Algenib",
154
+ "Rasalgethi",
155
+ "Laomedeia",
156
+ "Achernar",
157
+ "Alnilam",
158
+ "Schedar",
159
+ "Gacrux",
160
+ "Pulcherrima",
161
+ "Achird",
162
+ "Zubenelgenubi",
163
+ "Vindemiatrix",
164
+ "Sadachbia",
165
+ "Sadaltager",
166
+ "Sulafar",
167
+ ]
168
+ )
169
+
170
+ try:
171
+ response = self.client.models.generate_content(
172
+ model=self._tts_model,
173
+ contents=prompt,
174
+ config=types.GenerateContentConfig(
175
+ response_modalities=["AUDIO"],
176
+ speech_config=types.SpeechConfig(
177
+ voice_config=types.VoiceConfig(
178
+ prebuilt_voice_config=types.PrebuiltVoiceConfig(
179
+ voice_name=voice,
180
+ )
181
+ )
182
+ ),
183
+ ),
184
+ )
185
+ blob = response.candidates[0].content.parts[0].inline_data
186
+
187
+ # Create a WAV file in memory from the raw audio bytes
188
+ wav_buffer = self._create_wav_header(blob.data)
189
+
190
+ # 2. Load the WAV audio using pydub, which will now correctly read the header
191
+ audio_segment = AudioSegment.from_file(wav_buffer, format="wav")
192
+
193
+ # 3. Create a new in-memory buffer for the MP3 output
194
+ mp3_buffer = io.BytesIO()
195
+
196
+ # 4. Export the audio segment directly to the in-memory buffer
197
+ audio_segment.export(mp3_buffer, format="mp3")
198
+
199
+ # 5. Return the bytes from the buffer, not the filename
200
+ return mp3_buffer.getvalue()
201
+
202
+ except Exception as e:
203
+ log(
204
+ f"==== Error: Unable to generate audio ====\n{type(e)}:{e}", _print=True
205
+ )
206
+ # You can return a default empty byte string or re-raise the exception
207
+ raise e
208
+
209
+ def generate_image(self, prompt, **kwargs):
210
+ image = None
211
+ try:
212
+ response = self.client.models.generate_content(
213
+ model=self._image_model,
214
+ contents=[prompt],
215
+ )
216
+ image_parts = [
217
+ part.inline_data.data
218
+ for part in response.candidates[0].content.parts
219
+ if part.inline_data
220
+ ]
221
+ # log(image_parts, _print=True)
222
+ image = image_parts[0]
223
+ except Exception as e:
224
+ log(f"==== Error: Unable to create image ====\n\n{e}", _print=True)
225
+ raise e
226
+ return image
227
+
228
+ def summarize_text(self, text, primer=""):
229
+ primer = primer or self.instructions
230
+ response = self.client.models.generate_content(
231
+ model=self._summary_model,
232
+ config=types.GenerateContentConfig(
233
+ system_instruction=f"{primer}",
234
+ ),
235
+ contents=text,
236
+ )
237
+ log(response)
238
+ try:
239
+ result = response.candidates[0].content.parts[0].text
240
+ except Exception as e:
241
+ log(f"{type(e)}:{e}\n\n Unable to generate content ====")
242
+ return None
243
+
244
+ return result
@@ -0,0 +1,99 @@
1
+ import io
2
+ import json
3
+ import os
4
+ import random
5
+ import time
6
+ from base64 import b64decode
7
+
8
+ import openai
9
+ from ollama import ChatResponse, chat
10
+
11
+ from autonomous import log
12
+ from autonomous.model.autoattr import DictAttr, ListAttr, StringAttr
13
+ from autonomous.model.automodel import AutoModel
14
+
15
+
16
+ class LocalAIModel(AutoModel):
17
+ _client = None
18
+ instructions = StringAttr(
19
+ default="You are highly skilled AI trained to assist with various tasks."
20
+ )
21
+ description = StringAttr(
22
+ default="A helpful AI assistant trained to assist with various tasks."
23
+ )
24
+
25
+ @property
26
+ def client(self):
27
+ if not self._client:
28
+ self._client = "deepseek-r1" # OpenAI(api_key=os.environ.get("OPENAI_KEY"))
29
+ return self._client
30
+
31
+ def clear_agent(self):
32
+ pass
33
+
34
+ def clear_agents(self):
35
+ pass
36
+
37
+ # def _get_agent_id(self):
38
+ # pass
39
+
40
+ # def _add_function(self, user_function):
41
+ pass
42
+
43
+ def _format_messages(self, messages):
44
+ pass
45
+
46
+ def clear_files(self, file_id=None):
47
+ pass
48
+
49
+ def attach_file(self, file_contents, filename="dbdata.json"):
50
+ pass
51
+
52
+ def generate_json(self, messages, function, additional_instructions=""):
53
+ message = messages + additional_instructions
54
+ message += f"""
55
+ IMPORTANT: Respond in JSON FORMAT using the SCHEMA below. DO NOT add any text to the response outside of the supplied JSON schema:
56
+ {function}
57
+ """
58
+ response: ChatResponse = chat(
59
+ model=self.client,
60
+ messages=[
61
+ {
62
+ "role": "user",
63
+ "content": message,
64
+ },
65
+ ],
66
+ )
67
+ return response.message.content
68
+
69
+ def generate_text(self, messages, additional_instructions=""):
70
+ message = messages + additional_instructions
71
+ response: ChatResponse = chat(
72
+ model=self.client,
73
+ messages=[
74
+ {
75
+ "role": "user",
76
+ "content": message,
77
+ },
78
+ ],
79
+ )
80
+ return response.message.content
81
+
82
+ def generate_audio(self, prompt, **kwargs):
83
+ raise NotImplementedError
84
+
85
+ def generate_image(self, prompt, **kwargs):
86
+ raise NotImplementedError
87
+
88
+ def summarize_text(self, text, primer=""):
89
+ response: ChatResponse = chat(
90
+ model=self.client,
91
+ messages=[
92
+ {
93
+ "role": "system",
94
+ "content": f"You are a highly skilled AI trained in language comprehension and summarization.{primer}",
95
+ },
96
+ {"role": "user", "content": text},
97
+ ],
98
+ )
99
+ return response.message.content
@@ -16,7 +16,7 @@ from autonomous.model.automodel import AutoModel
16
16
 
17
17
  class OpenAIModel(AutoModel):
18
18
  _client = None
19
- _text_model = "gpt-4o-mini"
19
+ _text_model = "o3-mini"
20
20
  _image_model = "dall-e-3"
21
21
  _json_model = "gpt-4o"
22
22
  agent_id = StringAttr()
@@ -40,7 +40,10 @@ class OpenAIModel(AutoModel):
40
40
  def delete(self):
41
41
  self.clear_files()
42
42
  if self.agent_id:
43
- self.client.beta.assistants.delete(self.agent_id)
43
+ try:
44
+ self.client.beta.assistants.delete(self.agent_id)
45
+ except openai_NotFoundError:
46
+ log(f"==== Agent with ID: {self.agent_id} not found ====")
44
47
  return super().delete()
45
48
 
46
49
  def clear_agent(self):
@@ -51,15 +54,16 @@ class OpenAIModel(AutoModel):
51
54
 
52
55
  def clear_agents(self):
53
56
  assistants = self.client.beta.assistants.list().data
54
- log(assistants)
55
- for assistant in assistants:
56
- log(f"==== Deleting Agent with ID: {assistant.id} ====")
57
- try:
58
- self.client.beta.assistants.delete(assistant.id)
59
- except openai_NotFoundError:
60
- log(f"==== Agent with ID: {assistant.id} not found ====")
61
- self.agent_id = ""
62
- self.save()
57
+ if assistants:
58
+ log(assistants)
59
+ for assistant in assistants:
60
+ log(f"==== Deleting Agent with ID: {assistant.id} ====")
61
+ try:
62
+ self.client.beta.assistants.delete(assistant.id)
63
+ except openai_NotFoundError:
64
+ log(f"==== Agent with ID: {assistant.id} not found ====")
65
+ self.agent_id = ""
66
+ self.save()
63
67
 
64
68
  def _get_agent_id(self):
65
69
  try:
@@ -79,9 +83,9 @@ class OpenAIModel(AutoModel):
79
83
 
80
84
  def clear_files(self, file_id=None):
81
85
  if not file_id:
82
- for vs in self.client.beta.vector_stores.list().data:
86
+ for vs in self.client.vector_stores.list().data:
83
87
  try:
84
- self.client.beta.vector_stores.delete(vs.id)
88
+ self.client.vector_stores.delete(vs.id)
85
89
  except openai_NotFoundError:
86
90
  log(f"==== Vector Store {vs.id} not found ====")
87
91
  for sf in self.client.files.list().data:
@@ -97,8 +101,8 @@ class OpenAIModel(AutoModel):
97
101
  self.tools["file_search"] = {"type": "file_search"}
98
102
  # Create a vector store
99
103
  try:
100
- if vs := self.client.beta.vector_stores.list().data:
101
- self.vector_store = self.client.beta.vector_stores.retrieve(
104
+ if vs := self.client.vector_stores.list().data:
105
+ self.vector_store = self.client.vector_stores.retrieve(
102
106
  vector_store_id=vs[0].id
103
107
  ).id
104
108
  else:
@@ -106,17 +110,17 @@ class OpenAIModel(AutoModel):
106
110
  self.client.files.delete(file_id=sf.id)
107
111
  raise FileNotFoundError("No vector store found")
108
112
  except FileNotFoundError:
109
- self.vector_store = self.client.beta.vector_stores.create(
113
+ self.vector_store = self.client.vector_stores.create(
110
114
  name="World Reference",
111
115
  expires_after={"anchor": "last_active_at", "days": 14},
112
116
  ).id
113
- log(f"==== Vector Store ID: {self.vector_store}====")
117
+ log(f"==== Vector Store ID: {self.vector_store}====", _print=True)
114
118
  # Attach File
115
119
  file_obj = self.client.files.create(
116
120
  file=(filename, file_contents), purpose="assistants"
117
121
  )
118
- log(f"==== FileStore ID: {file_obj.id}====")
119
- self.client.beta.vector_stores.files.create(
122
+ log(f"==== FileStore ID: {file_obj.id}====", _print=True)
123
+ self.client.vector_stores.files.create(
120
124
  vector_store_id=self.vector_store,
121
125
  file_id=file_obj.id,
122
126
  )
@@ -198,14 +202,14 @@ IMPORTANT: Always use the function 'response' tool to respond to the user with o
198
202
  ]:
199
203
  running_job = False
200
204
 
201
- except openai.BadRequestError as e:
205
+ except openai.BadRequestError as err:
202
206
  # Handle specific bad request errors
203
- error_message = e.json_body.get("error", {}).get("message", "")
204
- if "already has an active run" in error_message:
207
+ log(f"==== Error: {err} ====", _print=True)
208
+ if "already has an active run" in str(err):
205
209
  log("Previous run is still active. Waiting...", _print=True)
206
210
  time.sleep(2) # wait before retrying or checking run status
207
211
  else:
208
- raise e
212
+ raise err
209
213
 
210
214
  # while run.status in ["queued", "in_progress"]:
211
215
  # run = self.client.beta.threads.runs.retrieve(
@@ -217,7 +221,7 @@ IMPORTANT: Always use the function 'response' tool to respond to the user with o
217
221
  log(f"==== !!! ERROR !!!: {run.last_error} ====", _print=True)
218
222
  return None
219
223
  log("=================== RUN COMPLETED ===================", _print=True)
220
- log(run.status, _print=True)
224
+ # log(run.status, _print=True)
221
225
  if run.status == "completed":
222
226
  response = self.client.beta.threads.messages.list(thread_id=thread.id)
223
227
  results = response.data[0].content[0].text.value
@@ -236,8 +240,8 @@ IMPORTANT: Always use the function 'response' tool to respond to the user with o
236
240
  log(f"==== Invalid JSON:\n{results}", _print=True)
237
241
  return {}
238
242
  else:
239
- log(f"==== Results: {results}", _print=True)
240
- log("=================== END REPORT ===================", _print=True)
243
+ # log(f"==== Results: {results}", _print=True)
244
+ # log("=================== END REPORT ===================", _print=True)
241
245
  return results
242
246
 
243
247
  def generate_text(self, messages, additional_instructions=""):
@@ -278,16 +282,34 @@ IMPORTANT: Always use the function 'response' tool to respond to the user with o
278
282
 
279
283
  def generate_audio(self, prompt, **kwargs):
280
284
  voice = kwargs.get("voice") or random.choice(
281
- ["alloy", "echo", "fable", "onyx", "nova", "shimmer"]
285
+ [
286
+ "alloy",
287
+ "ash",
288
+ "ballad",
289
+ "coral",
290
+ "echo",
291
+ "fable",
292
+ "onyx",
293
+ "nova",
294
+ "sage",
295
+ "shimmer",
296
+ ]
282
297
  )
283
298
  response = self.client.audio.speech.create(
284
299
  model="tts-1",
285
300
  voice=voice,
286
301
  input=prompt,
287
302
  )
288
- log(response, _print=True)
303
+ # log(response, _print=True)
289
304
  return response.read()
290
305
 
306
+ def generate_audio_text(self, audio_file, **kwargs):
307
+ response = self.client.audio.transcriptions.create(
308
+ model="gpt-4o-transcribe", file=audio_file, language="en", **kwargs
309
+ )
310
+ log(response, _print=True)
311
+ return response.text
312
+
291
313
  def generate_image(self, prompt, **kwargs):
292
314
  image = None
293
315
  try:
@@ -299,7 +321,8 @@ IMPORTANT: Always use the function 'response' tool to respond to the user with o
299
321
  )
300
322
  image_dict = response.data[0]
301
323
  except Exception as e:
302
- print(f"==== Error: Unable to create image ====\n\n{e}")
324
+ log(f"==== Error: Unable to create image ====\n\n{e}", _print=True)
325
+ raise e
303
326
  else:
304
327
  image = b64decode(image_dict.b64_json)
305
328
  return image
@@ -14,12 +14,6 @@ class TextAgent(BaseAgent):
14
14
  default="A helpful AI assistant trained to assist with generating text according to the given requirements."
15
15
  )
16
16
 
17
- def clear_files(self, file_id=None):
18
- return self.get_client().clear_files(file_id)
19
-
20
- def attach_file(self, file_contents, filename="dbdata.json"):
21
- return self.get_client().attach_file(file_contents, filename)
22
-
23
17
  def summarize_text(self, text, primer=""):
24
18
  return self.get_client().summarize_text(text, primer)
25
19
 
autonomous/db/fields.py CHANGED
@@ -721,8 +721,7 @@ class EmbeddedDocumentField(BaseField):
721
721
  or issubclass(document_type, EmbeddedDocument)
722
722
  ):
723
723
  self.error(
724
- "Invalid embedded document class provided to an "
725
- "EmbeddedDocumentField"
724
+ "Invalid embedded document class provided to an EmbeddedDocumentField"
726
725
  )
727
726
 
728
727
  self.document_type_obj = document_type
@@ -117,10 +117,7 @@ class BaseQuerySet:
117
117
  if q_obj:
118
118
  # Make sure proper query object is passed.
119
119
  if not isinstance(q_obj, QNode):
120
- msg = (
121
- "Not a query object: %s. "
122
- "Did you intend to use key=value?" % q_obj
123
- )
120
+ msg = "Not a query object: %s. Did you intend to use key=value?" % q_obj
124
121
  raise InvalidQueryError(msg)
125
122
  query &= q_obj
126
123
 
@@ -1,3 +1,4 @@
1
+ from autonomous import log
1
2
  from autonomous.db.errors import OperationError
2
3
  from autonomous.db.queryset.base import (
3
4
  CASCADE,
@@ -4,6 +4,7 @@ import pymongo
4
4
  from bson import SON, ObjectId
5
5
  from bson.dbref import DBRef
6
6
 
7
+ from autonomous import log
7
8
  from autonomous.db.base import UPDATE_OPERATORS
8
9
  from autonomous.db.common import _import_class
9
10
  from autonomous.db.errors import InvalidQueryError
@@ -76,14 +77,18 @@ def query(_doc_cls=None, **kwargs):
76
77
  """Transform a query from Django-style format to Mongo format."""
77
78
  mongo_query = {}
78
79
  merge_query = defaultdict(list)
80
+
81
+ # Iterate over sorted keyword arguments
79
82
  for key, value in sorted(kwargs.items()):
80
83
  if key == "__raw__":
81
84
  handle_raw_query(value, mongo_query)
82
85
  continue
83
86
 
87
+ # Split the key into parts based on '__'
84
88
  parts = key.rsplit("__")
85
89
  indices = [(i, p) for i, p in enumerate(parts) if p.isdigit()]
86
90
  parts = [part for part in parts if not part.isdigit()]
91
+
87
92
  # Check for an operator and transform to mongo-style if there is
88
93
  op = None
89
94
  if len(parts) > 1 and parts[-1] in MATCH_OPERATORS:
@@ -93,6 +98,7 @@ def query(_doc_cls=None, **kwargs):
93
98
  if len(parts) > 1 and parts[-1] == "":
94
99
  parts.pop()
95
100
 
101
+ # Check for negation
96
102
  negate = False
97
103
  if len(parts) > 1 and parts[-1] == "not":
98
104
  parts.pop()
@@ -115,7 +121,7 @@ def query(_doc_cls=None, **kwargs):
115
121
  if isinstance(field, str):
116
122
  parts.append(field)
117
123
  append_field = False
118
- # is last and CachedReferenceField
124
+ # Handle CachedReferenceField
119
125
  elif isinstance(field, CachedReferenceField) and fields[-1] == field:
120
126
  parts.append("%s._id" % field.db_field)
121
127
  else:
@@ -139,17 +145,14 @@ def query(_doc_cls=None, **kwargs):
139
145
  # Raise an error if the in/nin/all/near param is not iterable.
140
146
  value = _prepare_query_for_iterable(field, op, value)
141
147
 
142
- # If we're querying a GenericReferenceField, we need to alter the
143
- # key depending on the value:
144
- # * If the value is a DBRef, the key should be "field_name._ref".
145
- # * If the value is an ObjectId, the key should be "field_name._ref.$id".
148
+ # Handle GenericReferenceField
146
149
  if isinstance(field, GenericReferenceField):
147
150
  if isinstance(value, DBRef):
148
151
  parts[-1] += "._ref"
149
152
  elif isinstance(value, ObjectId):
150
153
  parts[-1] += "._ref.$id"
151
154
 
152
- # if op and op not in COMPARISON_OPERATORS:
155
+ # Handle different operators
153
156
  if op:
154
157
  if op in GEO_OPERATORS:
155
158
  value = _geo_operator(field, op, value)
@@ -166,9 +169,7 @@ def query(_doc_cls=None, **kwargs):
166
169
  value = field.prepare_query_value(op, value)
167
170
  value = {"$elemMatch": value}
168
171
  elif op in CUSTOM_OPERATORS:
169
- NotImplementedError(
170
- 'Custom method "%s" has not ' "been implemented" % op
171
- )
172
+ NotImplementedError('Custom method "%s" has not been implemented' % op)
172
173
  elif op not in STRING_OPERATORS:
173
174
  value = {"$" + op: value}
174
175
 
@@ -439,7 +440,7 @@ def _geo_operator(field, op, value):
439
440
  value = {"$within": {"$box": value}}
440
441
  else:
441
442
  raise NotImplementedError(
442
- 'Geo method "%s" has not been ' "implemented for a GeoPointField" % op
443
+ 'Geo method "%s" has not been implemented for a GeoPointField' % op
443
444
  )
444
445
  else:
445
446
  if op == "geo_within":
@@ -64,6 +64,13 @@ class ReferenceAttr(GenericReferenceField):
64
64
  class ListAttr(ListField):
65
65
  def __get__(self, instance, owner):
66
66
  results = super().__get__(instance, owner)
67
+
68
+ # sanity check
69
+ if not isinstance(results, list):
70
+ super().__set__(instance, [])
71
+ results = super().__get__(instance, owner)
72
+
73
+ # log(f"ListAttr: {results}")
67
74
  if isinstance(self.field, ReferenceAttr):
68
75
  i = 0
69
76
  while i < len(results):
@@ -78,9 +85,6 @@ class ListAttr(ListField):
78
85
  # log(f"Object Not Found: {results[i]}")
79
86
  return results
80
87
 
81
- # def append(self, obj):
82
- # results = super().__get__(instance, owner) or []
83
-
84
88
 
85
89
  class DictAttr(DictField):
86
90
  def __get__(self, instance, owner):
@@ -96,6 +100,16 @@ class DictAttr(DictField):
96
100
  results[key] = lazy_obj
97
101
  return results
98
102
 
103
+ # def __set__(self, instance, value):
104
+ # import traceback
105
+
106
+ # traceback.print_stack()
107
+
108
+ # log(value, instance.player_messages, _print=True)
109
+ # result = super().__set__(instance, value) or {}
110
+ # log(value, instance.player_messages, _print=True)
111
+ # return result
112
+
99
113
 
100
114
  class EnumAttr(EnumField):
101
115
  pass