aient 1.0.43__py3-none-any.whl → 1.0.45__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
aient/core/models.py CHANGED
@@ -86,6 +86,9 @@ class Thinking(BaseModel):
86
86
  budget_tokens: Optional[int] = None
87
87
  type: Optional[Literal["enabled", "disabled"]] = None
88
88
 
89
+ class StreamOptions(BaseModel):
90
+ include_usage: Optional[bool] = None
91
+
89
92
  class RequestModel(BaseRequest):
90
93
  model: str
91
94
  messages: List[Message]
@@ -105,6 +108,7 @@ class RequestModel(BaseRequest):
105
108
  tools: Optional[List[Tool]] = None
106
109
  response_format: Optional[ResponseFormat] = None
107
110
  thinking: Optional[Thinking] = None
111
+ stream_options: Optional[StreamOptions] = None
108
112
 
109
113
  def get_last_text_message(self) -> Optional[str]:
110
114
  for message in reversed(self.messages):
aient/core/request.py CHANGED
@@ -144,7 +144,8 @@ async def get_gemini_payload(request, engine, provider, api_key=None):
144
144
  'include_usage',
145
145
  'logprobs',
146
146
  'top_logprobs',
147
- 'response_format'
147
+ 'response_format',
148
+ 'stream_options',
148
149
  ]
149
150
  generation_config = {}
150
151
 
@@ -190,9 +191,12 @@ async def get_gemini_payload(request, engine, provider, api_key=None):
190
191
  else:
191
192
  payload[field] = value
192
193
 
193
- if generation_config:
194
- payload["generationConfig"] = generation_config
195
- if "maxOutputTokens" not in generation_config:
194
+ max_token_65k_models = ["gemini-2.5-pro", "gemini-2.0-pro", "gemini-2.0-flash-thinking"]
195
+ payload["generationConfig"] = generation_config
196
+ if "maxOutputTokens" not in generation_config:
197
+ if any(pro_model in original_model for pro_model in max_token_65k_models):
198
+ payload["generationConfig"]["maxOutputTokens"] = 65536
199
+ else:
196
200
  payload["generationConfig"]["maxOutputTokens"] = 8192
197
201
 
198
202
  if request.model.endswith("-search"):
@@ -277,7 +281,8 @@ async def get_vertex_gemini_payload(request, engine, provider, api_key=None):
277
281
  original_model = model_dict[request.model]
278
282
  search_tool = None
279
283
 
280
- if "gemini-2.0" in original_model or "gemini-exp" in original_model:
284
+ pro_models = ["gemini-2.5-pro", "gemini-2.0-pro", "gemini-exp"]
285
+ if any(pro_model in original_model for pro_model in pro_models):
281
286
  location = gemini2
282
287
  search_tool = {"googleSearch": {}}
283
288
  else:
@@ -384,7 +389,8 @@ async def get_vertex_gemini_payload(request, engine, provider, api_key=None):
384
389
  'user',
385
390
  'include_usage',
386
391
  'logprobs',
387
- 'top_logprobs'
392
+ 'top_logprobs',
393
+ 'stream_options',
388
394
  ]
389
395
  generation_config = {}
390
396
 
@@ -549,6 +555,7 @@ async def get_vertex_claude_payload(request, engine, provider, api_key=None):
549
555
  'n',
550
556
  'user',
551
557
  'include_usage',
558
+ 'stream_options',
552
559
  ]
553
560
 
554
561
  for field, value in request.model_dump(exclude_unset=True).items():
@@ -845,6 +852,7 @@ async def get_openrouter_payload(request, engine, provider, api_key=None):
845
852
  'n',
846
853
  'user',
847
854
  'include_usage',
855
+ 'stream_options',
848
856
  ]
849
857
 
850
858
  for field, value in request.model_dump(exclude_unset=True).items():
@@ -912,7 +920,8 @@ async def get_cohere_payload(request, engine, provider, api_key=None):
912
920
  'user',
913
921
  'include_usage',
914
922
  'logprobs',
915
- 'top_logprobs'
923
+ 'top_logprobs',
924
+ 'stream_options',
916
925
  ]
917
926
 
918
927
  for field, value in request.model_dump(exclude_unset=True).items():
@@ -959,7 +968,8 @@ async def get_cloudflare_payload(request, engine, provider, api_key=None):
959
968
  'user',
960
969
  'include_usage',
961
970
  'logprobs',
962
- 'top_logprobs'
971
+ 'top_logprobs',
972
+ 'stream_options',
963
973
  ]
964
974
 
965
975
  for field, value in request.model_dump(exclude_unset=True).items():
@@ -1141,6 +1151,7 @@ async def get_claude_payload(request, engine, provider, api_key=None):
1141
1151
  'n',
1142
1152
  'user',
1143
1153
  'include_usage',
1154
+ 'stream_options',
1144
1155
  ]
1145
1156
 
1146
1157
  for field, value in request.model_dump(exclude_unset=True).items():
aient/core/response.py CHANGED
@@ -50,7 +50,6 @@ async def fetch_gemini_response_stream(client, url, headers, payload, model):
50
50
  try:
51
51
  json_data = json.loads( "{" + line + "}")
52
52
  content = json_data.get('text', '')
53
- content = "\n".join(content.split("\\n"))
54
53
  # content = content.replace("\n", "\n\n")
55
54
  # if last_text_line == 0 and is_thinking:
56
55
  # content = "> " + content.lstrip()
@@ -108,7 +107,6 @@ async def fetch_vertex_claude_response_stream(client, url, headers, payload, mod
108
107
  try:
109
108
  json_data = json.loads( "{" + line + "}")
110
109
  content = json_data.get('text', '')
111
- content = "\n".join(content.split("\\n"))
112
110
  sse_string = await generate_sse_response(timestamp, model, content=content)
113
111
  yield sse_string
114
112
  except json.JSONDecodeError:
aient/models/chatgpt.py CHANGED
@@ -176,6 +176,10 @@ class chatgpt(BaseLLM):
176
176
  if type(self.conversation[convo_id][message_index]["content"]) == dict \
177
177
  and type(self.conversation[convo_id][message_index + 1]["content"]) == list:
178
178
  self.conversation[convo_id][message_index]["content"] = [self.conversation[convo_id][message_index]["content"]]
179
+ if type(self.conversation[convo_id][message_index]["content"]) == dict \
180
+ and type(self.conversation[convo_id][message_index + 1]["content"]) == dict:
181
+ self.conversation[convo_id][message_index]["content"] = [self.conversation[convo_id][message_index]["content"]]
182
+ self.conversation[convo_id][message_index + 1]["content"] = [self.conversation[convo_id][message_index + 1]["content"]]
179
183
  self.conversation[convo_id][message_index]["content"] += self.conversation[convo_id][message_index + 1]["content"]
180
184
  self.conversation[convo_id].pop(message_index + 1)
181
185
  conversation_len = conversation_len - 1
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: aient
3
- Version: 1.0.43
3
+ Version: 1.0.45
4
4
  Summary: Aient: The Awakening of Agent.
5
5
  Description-Content-Type: text/markdown
6
6
  License-File: LICENSE
@@ -2,9 +2,9 @@ aient/__init__.py,sha256=SRfF7oDVlOOAi6nGKiJIUK6B_arqYLO9iSMp-2IZZps,21
2
2
  aient/core/.git,sha256=lrAcW1SxzRBUcUiuKL5tS9ykDmmTXxyLP3YYU-Y-Q-I,45
3
3
  aient/core/__init__.py,sha256=NxjebTlku35S4Dzr16rdSqSTWUvvwEeACe8KvHJnjPg,34
4
4
  aient/core/log_config.py,sha256=kz2_yJv1p-o3lUQOwA3qh-LSc3wMHv13iCQclw44W9c,274
5
- aient/core/models.py,sha256=8MsuiYHBHVR5UMQ_cNLkvntoxalS7NpVwaNwHA0iZmk,7379
6
- aient/core/request.py,sha256=-nyFwGM86LB8Zn6ScRJvAbkJ9LPCHjgg51tO_edAIZ4,48422
7
- aient/core/response.py,sha256=7s1Jil0E5nnbL9xQldcjHIqSp0MFeWQo9mNX_iAuvSk,25954
5
+ aient/core/models.py,sha256=H3_XuWA7aS25MWZPK1c-5RBiiuxWJbTfE3RAk0Pkc9A,7504
6
+ aient/core/request.py,sha256=6c9drOddcvfeuLoUmDUWxP0gekW-ov839wiYETsNiZ0,48895
7
+ aient/core/response.py,sha256=7RVSFfGHisejv2SlsHvp0t-N_8OpTS4edQU_NOi5BGU,25822
8
8
  aient/core/utils.py,sha256=i9ZwyywBLIhRM0fNmFSD3jF3dBL5QqVMOtSlG_ddv-I,24101
9
9
  aient/core/test/test_base_api.py,sha256=CjfFzMG26r8C4xCPoVkKb3Ac6pp9gy5NUCbZJHoSSsM,393
10
10
  aient/core/test/test_image.py,sha256=_T4peNGdXKBHHxyQNx12u-NTyFE8TlYI6NvvagsG2LE,319
@@ -12,7 +12,7 @@ aient/core/test/test_payload.py,sha256=8jBiJY1uidm1jzL-EiK0s6UGmW9XkdsuuKFGrwFhF
12
12
  aient/models/__init__.py,sha256=ouNDNvoBBpIFrLsk09Q_sq23HR0GbLAKfGLIFmfEuXE,219
13
13
  aient/models/audio.py,sha256=kRd-8-WXzv4vwvsTGwnstK-WR8--vr9CdfCZzu8y9LA,1934
14
14
  aient/models/base.py,sha256=Loyt2F2WrDMBbK-sdmTtgkLVtdUXxK5tg4qoI6nc0Xo,7527
15
- aient/models/chatgpt.py,sha256=c-yx2NA7Pl1yBsuRr0elAthYT6BxuUDpGJTm-0lRDoM,41215
15
+ aient/models/chatgpt.py,sha256=rF95RmO4C3h4PKRqE3Qk6fKoR0yIf-3zp8t7KBF_kjA,41685
16
16
  aient/models/claude.py,sha256=thK9P8qkaaoUN3OOJ9Shw4KDs-pAGKPoX4FOPGFXva8,28597
17
17
  aient/models/duckduckgo.py,sha256=1l7vYCs9SG5SWPCbcl7q6pCcB5AUF_r-a4l9frz3Ogo,8115
18
18
  aient/models/gemini.py,sha256=chGLc-8G_DAOxr10HPoOhvVFW1RvMgHd6mt--VyAW98,14730
@@ -29,8 +29,8 @@ aient/plugins/websearch.py,sha256=yiBzqXK5X220ibR-zko3VDsn4QOnLu1k6E2YOygCeTQ,15
29
29
  aient/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
30
30
  aient/utils/prompt.py,sha256=UcSzKkFE4-h_1b6NofI6xgk3GoleqALRKY8VBaXLjmI,11311
31
31
  aient/utils/scripts.py,sha256=obrf5oxzFQPCu1A5MYDDiZv_LM6l9C1QSkgWIqcu28k,25690
32
- aient-1.0.43.dist-info/licenses/LICENSE,sha256=XNdbcWldt0yaNXXWB_Bakoqnxb3OVhUft4MgMA_71ds,1051
33
- aient-1.0.43.dist-info/METADATA,sha256=N35V_tqmSB42te120KO-wvIsdJuFwCCYCfXzykvZh8c,4986
34
- aient-1.0.43.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
35
- aient-1.0.43.dist-info/top_level.txt,sha256=3oXzrP5sAVvyyqabpeq8A2_vfMtY554r4bVE-OHBrZk,6
36
- aient-1.0.43.dist-info/RECORD,,
32
+ aient-1.0.45.dist-info/licenses/LICENSE,sha256=XNdbcWldt0yaNXXWB_Bakoqnxb3OVhUft4MgMA_71ds,1051
33
+ aient-1.0.45.dist-info/METADATA,sha256=Wt-dsD5uQjMdMfWGA052WOC7ITd8UQy84MiGPkABO8A,4986
34
+ aient-1.0.45.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
35
+ aient-1.0.45.dist-info/top_level.txt,sha256=3oXzrP5sAVvyyqabpeq8A2_vfMtY554r4bVE-OHBrZk,6
36
+ aient-1.0.45.dist-info/RECORD,,
File without changes