autonomous-app 0.3.32__py3-none-any.whl → 0.3.34__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- autonomous/__init__.py +1 -1
- autonomous/ai/audioagent.py +19 -10
- autonomous/ai/baseagent.py +96 -12
- autonomous/ai/imageagent.py +23 -2
- autonomous/ai/jsonagent.py +17 -11
- autonomous/ai/models/gemini.py +78 -59
- autonomous/ai/models/local_model.py +49 -111
- autonomous/ai/textagent.py +14 -10
- autonomous/db/db_sync.py +7 -8
- autonomous/taskrunner/__init__.py +1 -0
- {autonomous_app-0.3.32.dist-info → autonomous_app-0.3.34.dist-info}/METADATA +1 -1
- {autonomous_app-0.3.32.dist-info → autonomous_app-0.3.34.dist-info}/RECORD +16 -19
- autonomous/ai/models/aws.py +0 -317
- autonomous/ai/models/deepseek.py +0 -99
- autonomous/ai/models/openai.py +0 -347
- autonomous/tasks/__init__.py +0 -1
- /autonomous/{tasks/autotask.py → taskrunner/autotasks.py} +0 -0
- /autonomous/{tasks → taskrunner}/task_router.py +0 -0
- {autonomous_app-0.3.32.dist-info → autonomous_app-0.3.34.dist-info}/WHEEL +0 -0
- {autonomous_app-0.3.32.dist-info → autonomous_app-0.3.34.dist-info}/top_level.txt +0 -0
autonomous/__init__.py
CHANGED
autonomous/ai/audioagent.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
|
-
import
|
|
1
|
+
import os
|
|
2
|
+
|
|
2
3
|
from autonomous import log
|
|
3
4
|
from autonomous.ai.baseagent import BaseAgent
|
|
4
5
|
from autonomous.model.autoattr import StringAttr
|
|
@@ -6,6 +7,9 @@ from autonomous.model.autoattr import StringAttr
|
|
|
6
7
|
|
|
7
8
|
class AudioAgent(BaseAgent):
|
|
8
9
|
name = StringAttr(default="audioagent")
|
|
10
|
+
|
|
11
|
+
provider = StringAttr(default="gemini")
|
|
12
|
+
|
|
9
13
|
instructions = StringAttr(
|
|
10
14
|
default="You are highly skilled AI trained to assist with generating audio files."
|
|
11
15
|
)
|
|
@@ -13,16 +17,21 @@ class AudioAgent(BaseAgent):
|
|
|
13
17
|
default="A helpful AI assistant trained to assist with generating audio files."
|
|
14
18
|
)
|
|
15
19
|
|
|
16
|
-
def generate(self, prompt,
|
|
17
|
-
return self.get_client(
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
def generate_text(self, audio, **kwargs):
|
|
21
|
-
log("AudioAgent.generate_text is deprecated; use transcribe instead.")
|
|
22
|
-
return self.get_client().generate_audio_text(audio, **kwargs)
|
|
20
|
+
def generate(self, prompt, voice=None):
|
|
21
|
+
return self.get_client(
|
|
22
|
+
os.environ.get("TTS_AI_AGENT", self.provider)
|
|
23
|
+
).generate_audio(prompt, voice=voice)
|
|
23
24
|
|
|
24
|
-
def transcribe(
|
|
25
|
-
|
|
25
|
+
def transcribe(
|
|
26
|
+
self, audio, prompt="Transcribe this audio clip", display_name="audio.mp3"
|
|
27
|
+
):
|
|
28
|
+
return self.get_client(
|
|
29
|
+
os.environ.get("TRANSCRIBE_AI_AGENT", self.provider)
|
|
30
|
+
).generate_transcription(
|
|
31
|
+
audio,
|
|
32
|
+
prompt=prompt,
|
|
33
|
+
display_name=display_name,
|
|
34
|
+
)
|
|
26
35
|
|
|
27
36
|
def available_voices(self, filters=[]):
|
|
28
37
|
return self.get_client().list_voices(filters=filters)
|
autonomous/ai/baseagent.py
CHANGED
|
@@ -1,18 +1,25 @@
|
|
|
1
1
|
from autonomous import log
|
|
2
|
-
from autonomous.model.autoattr import ReferenceAttr
|
|
2
|
+
from autonomous.model.autoattr import ReferenceAttr, StringAttr
|
|
3
3
|
from autonomous.model.automodel import AutoModel
|
|
4
4
|
|
|
5
5
|
from .models.gemini import GeminiAIModel
|
|
6
6
|
from .models.local_model import LocalAIModel
|
|
7
|
-
from .models.openai import OpenAIModel
|
|
8
7
|
|
|
9
8
|
|
|
10
9
|
class BaseAgent(AutoModel):
|
|
11
10
|
meta = {"abstract": True, "allow_inheritance": True, "strict": False}
|
|
12
11
|
|
|
13
|
-
|
|
12
|
+
# 2. Map string names to classes
|
|
13
|
+
MODEL_REGISTRY = {
|
|
14
|
+
"local": LocalAIModel,
|
|
15
|
+
"gemini": GeminiAIModel,
|
|
16
|
+
}
|
|
14
17
|
|
|
15
|
-
|
|
18
|
+
# 3. Allow client to be ANY of the supported models
|
|
19
|
+
client = ReferenceAttr(choices=[LocalAIModel, GeminiAIModel])
|
|
20
|
+
|
|
21
|
+
# 4. Add a provider field (default to local, can be overridden per agent)
|
|
22
|
+
provider = StringAttr(default="gemini")
|
|
16
23
|
|
|
17
24
|
def delete(self):
|
|
18
25
|
if self.client:
|
|
@@ -22,12 +29,89 @@ class BaseAgent(AutoModel):
|
|
|
22
29
|
def get_agent_id(self):
|
|
23
30
|
return self.get_client().id
|
|
24
31
|
|
|
25
|
-
def get_client(self):
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
32
|
+
def get_client(self, provider=None):
|
|
33
|
+
# 5. Determine which class to use based on the provider string
|
|
34
|
+
model_class = self.MODEL_REGISTRY.get(provider or self.provider, LocalAIModel)
|
|
35
|
+
# If we already have a client, but it's the WRONG type (e.g. we switched providers), we might want to re-instantiate. For simplicity, we check if it exists first.
|
|
36
|
+
if not isinstance(self.client, model_class):
|
|
37
|
+
if self.client:
|
|
38
|
+
log(
|
|
39
|
+
f"Re-instantiating client for agent {self.name} from {type(self.client).__name__} to {model_class.__name__}"
|
|
40
|
+
)
|
|
41
|
+
self.client.delete()
|
|
42
|
+
self.client = model_class(
|
|
43
|
+
name=self.name,
|
|
44
|
+
instructions=self.instructions,
|
|
45
|
+
description=self.description,
|
|
46
|
+
)
|
|
47
|
+
self.client.save()
|
|
48
|
+
self.save()
|
|
49
|
+
|
|
33
50
|
return self.client
|
|
51
|
+
|
|
52
|
+
# def get_embedding(self, text):
|
|
53
|
+
# """Helper to get embeddings for vector search"""
|
|
54
|
+
# try:
|
|
55
|
+
# res = requests.post(f"{self._media_url}/embeddings", json={"text": text})
|
|
56
|
+
# res.raise_for_status()
|
|
57
|
+
# return res.json()["embedding"]
|
|
58
|
+
# except Exception:
|
|
59
|
+
# return []
|
|
60
|
+
|
|
61
|
+
# def gather_context(self, prompt, focus_object_id=None):
|
|
62
|
+
# """
|
|
63
|
+
# Retrieves context string from Mongo/Redis.
|
|
64
|
+
# Previously 'build_hybrid_context' in LocalAIModel.
|
|
65
|
+
# """
|
|
66
|
+
# context_str = ""
|
|
67
|
+
|
|
68
|
+
# # 1. Fetch from MongoDB (Focus Object)
|
|
69
|
+
# if focus_object_id:
|
|
70
|
+
# try:
|
|
71
|
+
# oid = (
|
|
72
|
+
# ObjectId(focus_object_id)
|
|
73
|
+
# if isinstance(focus_object_id, str)
|
|
74
|
+
# else focus_object_id
|
|
75
|
+
# )
|
|
76
|
+
# if main_obj := self._mongo_db.objects.find_one({"_id": oid}):
|
|
77
|
+
# context_str += f"### FOCUS OBJECT ###\n{main_obj}\n"
|
|
78
|
+
# ref_ids = main_obj.get("associations", []) or []
|
|
79
|
+
# if world_id := main_obj.get("world"):
|
|
80
|
+
# ref_ids.append(world_id)
|
|
81
|
+
# ref_ids.extend(main_obj.get("stories", []) or [])
|
|
82
|
+
# ref_ids.extend(main_obj.get("events", []) or [])
|
|
83
|
+
|
|
84
|
+
# if ref_ids:
|
|
85
|
+
# valid_oids = [
|
|
86
|
+
# ObjectId(rid) if isinstance(rid, str) else rid
|
|
87
|
+
# for rid in ref_ids
|
|
88
|
+
# ]
|
|
89
|
+
# if valid_oids:
|
|
90
|
+
# associated_objs = self._mongo_db.objects.find(
|
|
91
|
+
# {"_id": {"$in": valid_oids}}
|
|
92
|
+
# )
|
|
93
|
+
# context_str += "\n### ASSOCIATED REFERENCES ###\n"
|
|
94
|
+
# for obj in associated_objs:
|
|
95
|
+
# context_str += f"- {obj}\n"
|
|
96
|
+
# context_str += "\n"
|
|
97
|
+
# except Exception as e:
|
|
98
|
+
# print(f"Context Error: {e}")
|
|
99
|
+
|
|
100
|
+
# # 2. Fetch from Redis (Vector Search)
|
|
101
|
+
# if len(prompt) > 10:
|
|
102
|
+
# vector = self.get_embedding(prompt)
|
|
103
|
+
# if vector:
|
|
104
|
+
# try:
|
|
105
|
+
# q = "*=>[KNN 2 @vector $blob AS score]"
|
|
106
|
+
# params = {"blob": np.array(vector, dtype=np.float32).tobytes()}
|
|
107
|
+
# results = self._redis.ft("search_index").search(
|
|
108
|
+
# q, query_params=params
|
|
109
|
+
# )
|
|
110
|
+
# if results.docs:
|
|
111
|
+
# context_str += "\n### RELEVANT LORE ###\n"
|
|
112
|
+
# for doc in results.docs:
|
|
113
|
+
# context_str += f"- {doc.content}\n"
|
|
114
|
+
# except Exception:
|
|
115
|
+
# pass
|
|
116
|
+
|
|
117
|
+
# return context_str
|
autonomous/ai/imageagent.py
CHANGED
|
@@ -1,9 +1,15 @@
|
|
|
1
|
+
import os
|
|
2
|
+
|
|
1
3
|
from autonomous.ai.baseagent import BaseAgent
|
|
2
4
|
from autonomous.model.autoattr import StringAttr
|
|
3
5
|
|
|
4
6
|
|
|
5
7
|
class ImageAgent(BaseAgent):
|
|
6
8
|
name = StringAttr(default="imageagent")
|
|
9
|
+
|
|
10
|
+
# Force this agent to use Gemini
|
|
11
|
+
provider = StringAttr(default="gemini")
|
|
12
|
+
|
|
7
13
|
instructions = StringAttr(
|
|
8
14
|
default="You are highly skilled AI trained to assist with generating images."
|
|
9
15
|
)
|
|
@@ -11,5 +17,20 @@ class ImageAgent(BaseAgent):
|
|
|
11
17
|
default="A helpful AI assistant trained to assist with generating images."
|
|
12
18
|
)
|
|
13
19
|
|
|
14
|
-
def generate(
|
|
15
|
-
|
|
20
|
+
def generate(
|
|
21
|
+
self,
|
|
22
|
+
prompt,
|
|
23
|
+
negative_prompt="",
|
|
24
|
+
aspect_ratio="3:4",
|
|
25
|
+
image_size="2K",
|
|
26
|
+
files=None,
|
|
27
|
+
):
|
|
28
|
+
return self.get_client(
|
|
29
|
+
os.environ.get("IMAGE_AI_AGENT", self.provider)
|
|
30
|
+
).generate_image(
|
|
31
|
+
prompt,
|
|
32
|
+
aspect_ratio=aspect_ratio,
|
|
33
|
+
negative_prompt=negative_prompt,
|
|
34
|
+
image_size=image_size,
|
|
35
|
+
files=files,
|
|
36
|
+
)
|
autonomous/ai/jsonagent.py
CHANGED
|
@@ -1,14 +1,16 @@
|
|
|
1
1
|
import json
|
|
2
|
+
import os
|
|
2
3
|
|
|
3
4
|
from autonomous.ai.baseagent import BaseAgent
|
|
4
|
-
from autonomous.model.autoattr import
|
|
5
|
-
from autonomous.model.automodel import AutoModel
|
|
6
|
-
|
|
7
|
-
from .models.openai import OpenAIModel
|
|
5
|
+
from autonomous.model.autoattr import StringAttr
|
|
8
6
|
|
|
9
7
|
|
|
10
8
|
class JSONAgent(BaseAgent):
|
|
11
9
|
name = StringAttr(default="jsonagent")
|
|
10
|
+
|
|
11
|
+
# Force this agent to use Gemini
|
|
12
|
+
provider = StringAttr(default="gemini")
|
|
13
|
+
|
|
12
14
|
instructions = StringAttr(
|
|
13
15
|
default="You are highly skilled AI trained to assist with generating JSON formatted data."
|
|
14
16
|
)
|
|
@@ -16,15 +18,19 @@ class JSONAgent(BaseAgent):
|
|
|
16
18
|
default="A helpful AI assistant trained to assist with generating JSON formatted data."
|
|
17
19
|
)
|
|
18
20
|
|
|
19
|
-
def generate(
|
|
20
|
-
|
|
21
|
-
|
|
21
|
+
def generate(
|
|
22
|
+
self, message, function, additional_instructions="", uri="", context=""
|
|
23
|
+
):
|
|
24
|
+
result = self.get_client(
|
|
25
|
+
os.environ.get("JSON_AI_AGENT", self.provider)
|
|
26
|
+
).generate_json(
|
|
27
|
+
message, function, additional_instructions, uri=uri, context=context
|
|
22
28
|
)
|
|
23
29
|
if isinstance(result, str):
|
|
24
|
-
|
|
30
|
+
try:
|
|
31
|
+
result = json.loads(result)
|
|
32
|
+
except json.JSONDecodeError:
|
|
33
|
+
raise ValueError(f"Invalid JSON response from AI model.\n\n{result}")
|
|
25
34
|
elif not isinstance(result, dict):
|
|
26
35
|
raise ValueError(f"Invalid JSON response from AI model.\n\n{result}")
|
|
27
36
|
return result
|
|
28
|
-
|
|
29
|
-
def upload(self, file):
|
|
30
|
-
return self.get_client().upload(file=file)
|
autonomous/ai/models/gemini.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import io
|
|
2
|
+
import json
|
|
2
3
|
import os
|
|
3
4
|
import random
|
|
4
5
|
import re
|
|
@@ -32,6 +33,7 @@ class GeminiAIModel(AutoModel):
|
|
|
32
33
|
description = StringAttr(
|
|
33
34
|
default="A helpful AI assistant trained to assist with various tasks."
|
|
34
35
|
)
|
|
36
|
+
file_refs = ListAttr(StringAttr(default=[]))
|
|
35
37
|
|
|
36
38
|
MAX_FILES = 14
|
|
37
39
|
MAX_SUMMARY_TOKEN_LENGTH = 10000
|
|
@@ -111,56 +113,69 @@ class GeminiAIModel(AutoModel):
|
|
|
111
113
|
buffer.seek(0)
|
|
112
114
|
return buffer
|
|
113
115
|
|
|
114
|
-
def
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
116
|
+
def _add_context(self, context):
|
|
117
|
+
# Create in-memory file
|
|
118
|
+
context_data = (
|
|
119
|
+
json.dumps(context, indent=2) if isinstance(context, dict) else str(context)
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
f = io.BytesIO(context_data.encode("utf-8"))
|
|
123
|
+
f.name = f"context-{self.pk}"
|
|
124
|
+
return self._add_files([{"name": f.name, "file": f}])
|
|
125
|
+
|
|
126
|
+
def _add_files(self, file_list, mime_type="application/json"):
|
|
127
|
+
uploaded_files = []
|
|
128
|
+
for f in file_list[: self.MAX_FILES]:
|
|
129
|
+
fn = f["name"]
|
|
130
|
+
try:
|
|
131
|
+
result = self.client.files.delete(name=fn)
|
|
132
|
+
except Exception as e:
|
|
133
|
+
pass
|
|
134
|
+
# log(f"No existing file to delete for {fn}: {e}", _print=True)
|
|
135
|
+
else:
|
|
136
|
+
pass
|
|
137
|
+
# log(f"Deleting old version of {fn}: {result}", _print=True)
|
|
138
|
+
|
|
139
|
+
# If the content is raw bytes, wrap it in BytesIO
|
|
140
|
+
file_content = f["file"]
|
|
141
|
+
if isinstance(file_content, bytes):
|
|
142
|
+
fileobj = io.BytesIO(file_content)
|
|
143
|
+
else:
|
|
144
|
+
fileobj = file_content
|
|
133
145
|
uploaded_file = self.client.files.upload(
|
|
134
|
-
file=fileobj
|
|
135
|
-
config={"mime_type":
|
|
146
|
+
file=fileobj,
|
|
147
|
+
config={"mime_type": mime_type, "display_name": fn},
|
|
136
148
|
)
|
|
137
|
-
|
|
149
|
+
uploaded_files.append(uploaded_file)
|
|
138
150
|
|
|
139
151
|
# This ensures the file is 'ACTIVE' before you use it in a prompt.
|
|
140
152
|
while uploaded_file.state.name == "PROCESSING":
|
|
141
|
-
time.sleep(
|
|
153
|
+
time.sleep(0.5)
|
|
142
154
|
uploaded_file = self.client.get_file(uploaded_file.name)
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
def upload(self, file):
|
|
147
|
-
return self._add_files([file])
|
|
155
|
+
self.file_refs = [f.name for f in self.client.files.list()] # Update file_refs
|
|
156
|
+
self.save()
|
|
157
|
+
return uploaded_files
|
|
148
158
|
|
|
149
|
-
def generate_json(
|
|
150
|
-
|
|
159
|
+
def generate_json(
|
|
160
|
+
self, message, function, additional_instructions="", uri="", context={}
|
|
161
|
+
):
|
|
151
162
|
function_definition = self._add_function(function)
|
|
152
163
|
|
|
153
164
|
contents = [message]
|
|
154
|
-
if
|
|
165
|
+
if context:
|
|
166
|
+
contents.extend(self._add_context(context))
|
|
167
|
+
additional_instructions += (
|
|
168
|
+
f"\nUse the uploaded context file for reference: context-{self.pk}\n"
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
if uri:
|
|
155
172
|
contents.append(
|
|
156
173
|
Part.from_uri(
|
|
157
174
|
file_uri=uri,
|
|
158
175
|
mime_type="application/json",
|
|
159
176
|
),
|
|
160
177
|
)
|
|
161
|
-
|
|
162
|
-
if files := kwargs.get("files"):
|
|
163
|
-
contents += self._add_files(files)
|
|
178
|
+
additional_instructions += "\nUse the provided uri file for reference\n"
|
|
164
179
|
|
|
165
180
|
response = self.client.models.generate_content(
|
|
166
181
|
model=self._json_model,
|
|
@@ -192,9 +207,15 @@ class GeminiAIModel(AutoModel):
|
|
|
192
207
|
log(f"==== Failed to parse ToolCall response: {e} ====")
|
|
193
208
|
return {}
|
|
194
209
|
|
|
195
|
-
def generate_text(self, message, additional_instructions="",
|
|
210
|
+
def generate_text(self, message, additional_instructions="", uri="", context={}):
|
|
196
211
|
contents = [message]
|
|
197
|
-
if
|
|
212
|
+
if context:
|
|
213
|
+
contents.extend(self._add_context(context))
|
|
214
|
+
additional_instructions += (
|
|
215
|
+
f"\nUse the uploaded context file for reference: context-{self.pk}\n"
|
|
216
|
+
)
|
|
217
|
+
|
|
218
|
+
if uri:
|
|
198
219
|
contents.append(
|
|
199
220
|
Part.from_uri(
|
|
200
221
|
file_uri=uri,
|
|
@@ -202,9 +223,6 @@ class GeminiAIModel(AutoModel):
|
|
|
202
223
|
),
|
|
203
224
|
)
|
|
204
225
|
|
|
205
|
-
if files := kwargs.get("files"):
|
|
206
|
-
contents += self._add_files(files)
|
|
207
|
-
|
|
208
226
|
response = self.client.models.generate_content(
|
|
209
227
|
model=self._text_model,
|
|
210
228
|
config=types.GenerateContentConfig(
|
|
@@ -217,7 +235,7 @@ class GeminiAIModel(AutoModel):
|
|
|
217
235
|
# log("=================== END REPORT ===================", _print=True)
|
|
218
236
|
return response.text
|
|
219
237
|
|
|
220
|
-
def summarize_text(self, text, primer=""
|
|
238
|
+
def summarize_text(self, text, primer=""):
|
|
221
239
|
primer = primer or self.instructions
|
|
222
240
|
|
|
223
241
|
updated_prompt_list = []
|
|
@@ -248,14 +266,17 @@ class GeminiAIModel(AutoModel):
|
|
|
248
266
|
full_summary += summary + "\n"
|
|
249
267
|
return summary
|
|
250
268
|
|
|
251
|
-
def
|
|
252
|
-
self,
|
|
269
|
+
def generate_transcription(
|
|
270
|
+
self,
|
|
271
|
+
audio_file,
|
|
272
|
+
prompt="Transcribe this audio clip",
|
|
273
|
+
display_name="audio.mp3",
|
|
253
274
|
):
|
|
254
275
|
myfile = self.client.files.upload(
|
|
255
276
|
file=io.BytesIO(audio_file),
|
|
256
277
|
config={
|
|
257
278
|
"mime_type": "audio/mp3",
|
|
258
|
-
"display_name":
|
|
279
|
+
"display_name": display_name,
|
|
259
280
|
},
|
|
260
281
|
)
|
|
261
282
|
|
|
@@ -277,7 +298,7 @@ class GeminiAIModel(AutoModel):
|
|
|
277
298
|
voices.append(voice)
|
|
278
299
|
return voices
|
|
279
300
|
|
|
280
|
-
def generate_audio(self, prompt, voice=None
|
|
301
|
+
def generate_audio(self, prompt, voice=None):
|
|
281
302
|
voice = voice or random.choice(self.list_voices())
|
|
282
303
|
try:
|
|
283
304
|
response = self.client.models.generate_content(
|
|
@@ -318,22 +339,20 @@ class GeminiAIModel(AutoModel):
|
|
|
318
339
|
# You can return a default empty byte string or re-raise the exception
|
|
319
340
|
raise e
|
|
320
341
|
|
|
321
|
-
def generate_image(
|
|
342
|
+
def generate_image(
|
|
343
|
+
self,
|
|
344
|
+
prompt,
|
|
345
|
+
negative_prompt="",
|
|
346
|
+
files=None,
|
|
347
|
+
aspect_ratio="3:4",
|
|
348
|
+
image_size="2K",
|
|
349
|
+
):
|
|
322
350
|
image = None
|
|
323
351
|
contents = [prompt]
|
|
324
352
|
|
|
325
|
-
if
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
media = io.BytesIO(f)
|
|
329
|
-
myfile = self.client.files.upload(
|
|
330
|
-
file=media,
|
|
331
|
-
config={"mime_type": "image/webp", "display_name": fn},
|
|
332
|
-
)
|
|
333
|
-
contents += [myfile]
|
|
334
|
-
counter += 1
|
|
335
|
-
if counter >= self.MAX_FILES:
|
|
336
|
-
break
|
|
353
|
+
if files:
|
|
354
|
+
filerefs = self._add_files(files, mime_type="image/webp")
|
|
355
|
+
contents.extend(filerefs)
|
|
337
356
|
|
|
338
357
|
try:
|
|
339
358
|
# log(self._image_model, contents, _print=True)
|
|
@@ -364,8 +383,8 @@ class GeminiAIModel(AutoModel):
|
|
|
364
383
|
),
|
|
365
384
|
],
|
|
366
385
|
image_config=types.ImageConfig(
|
|
367
|
-
aspect_ratio=
|
|
368
|
-
image_size=
|
|
386
|
+
aspect_ratio=aspect_ratio,
|
|
387
|
+
image_size=image_size,
|
|
369
388
|
),
|
|
370
389
|
),
|
|
371
390
|
)
|