aient 1.0.58__py3-none-any.whl → 1.0.60__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
aient/core/models.py CHANGED
@@ -162,6 +162,7 @@ class AudioTranscriptionRequest(BaseRequest):
162
162
  response_format: Optional[str] = None
163
163
  temperature: Optional[float] = None
164
164
  stream: bool = False
165
+ timestamp_granularities: Optional[List[str]] = Field(default=["segment"])
165
166
 
166
167
  class Config:
167
168
  arbitrary_types_allowed = True
aient/core/request.py CHANGED
@@ -1273,6 +1273,10 @@ async def get_whisper_payload(request, engine, provider, api_key=None):
1273
1273
  if request.language:
1274
1274
  payload["language"] = request.language
1275
1275
 
1276
+ # https://platform.openai.com/docs/api-reference/audio/createTranscription
1277
+ if request.timestamp_granularities:
1278
+ payload["timestamp_granularities[]"] = request.timestamp_granularities
1279
+
1276
1280
  return url, headers, payload
1277
1281
 
1278
1282
  async def get_moderation_payload(request, engine, provider, api_key=None):
aient/core/response.py CHANGED
@@ -46,7 +46,10 @@ async def fetch_gemini_response_stream(client, url, headers, payload, model):
46
46
  line, buffer = buffer.split("\n", 1)
47
47
  # line_index += 1
48
48
 
49
+ # https://ai.google.dev/api/generate-content?hl=zh-cn#FinishReason
49
50
  if line and '\"finishReason\": \"' in line:
51
+ if "stop" not in line.lower():
52
+ logger.error(f"finishReason: {line}")
50
53
  is_finish = True
51
54
  if is_finish and '\"promptTokenCount\": ' in line:
52
55
  json_data = parse_json_safely( "{" + line + "}")
@@ -188,7 +191,7 @@ async def fetch_gpt_response_stream(client, url, headers, payload):
188
191
  while "\n" in buffer:
189
192
  line, buffer = buffer.split("\n", 1)
190
193
  # logger.info("line: %s", repr(line))
191
- if line and line != "data: " and line != "data:" and not line.startswith(": ") and (result:=line.lstrip("data: ").strip()):
194
+ if line and not line.startswith(": ") and (result:=line.lstrip("data: ").strip()):
192
195
  if result.strip() == "[DONE]":
193
196
  break
194
197
  line = json.loads(result)
@@ -295,8 +298,7 @@ async def fetch_azure_response_stream(client, url, headers, payload):
295
298
  while "\n" in buffer:
296
299
  line, buffer = buffer.split("\n", 1)
297
300
  # logger.info("line: %s", repr(line))
298
- if line and line != "data: " and line != "data:" and not line.startswith(": "):
299
- result = line.lstrip("data: ")
301
+ if line and not line.startswith(": ") and (result:=line.lstrip("data: ").strip()):
300
302
  if result.strip() == "[DONE]":
301
303
  break
302
304
  line = json.loads(result)
aient/models/base.py CHANGED
@@ -82,6 +82,7 @@ class BaseLLM:
82
82
  ],
83
83
  }
84
84
  self.tokens_usage = defaultdict(int)
85
+ self.current_tokens = defaultdict(int)
85
86
  self.function_calls_counter = {}
86
87
  self.function_call_max_loop = 10
87
88
  self.use_plugins = use_plugins
aient/models/chatgpt.py CHANGED
@@ -207,6 +207,7 @@ class chatgpt(BaseLLM):
207
207
  history_len = history_len - 1
208
208
 
209
209
  if total_tokens:
210
+ self.current_tokens[convo_id] = total_tokens
210
211
  self.tokens_usage[convo_id] += total_tokens
211
212
 
212
213
  def truncate_conversation(self, convo_id: str = "default") -> None:
@@ -215,25 +216,17 @@ class chatgpt(BaseLLM):
215
216
  """
216
217
  while True:
217
218
  if (
218
- self.tokens_usage[convo_id] > self.truncate_limit
219
+ self.current_tokens[convo_id] > self.truncate_limit
219
220
  and len(self.conversation[convo_id]) > 1
220
221
  ):
221
222
  # Don't remove the first message
222
223
  mess = self.conversation[convo_id].pop(1)
224
+ string_mess = json.dumps(mess, ensure_ascii=False)
225
+ self.current_tokens[convo_id] -= len(string_mess) / 4
223
226
  print("Truncate message:", mess)
224
227
  else:
225
228
  break
226
229
 
227
- def extract_values(self, obj):
228
- if isinstance(obj, dict):
229
- for value in obj.values():
230
- yield from self.extract_values(value)
231
- elif isinstance(obj, list):
232
- for item in obj:
233
- yield from self.extract_values(item)
234
- else:
235
- yield obj
236
-
237
230
  async def get_post_body(
238
231
  self,
239
232
  prompt: str,
@@ -789,6 +782,7 @@ class chatgpt(BaseLLM):
789
782
  {"role": "system", "content": self.system_prompt},
790
783
  ]
791
784
  self.tokens_usage[convo_id] = 0
785
+ self.current_tokens[convo_id] = 0
792
786
 
793
787
  def save(self, file: str, *keys: str) -> None:
794
788
  """
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: aient
3
- Version: 1.0.58
3
+ Version: 1.0.60
4
4
  Summary: Aient: The Awakening of Agent.
5
5
  Description-Content-Type: text/markdown
6
6
  License-File: LICENSE
@@ -2,17 +2,17 @@ aient/__init__.py,sha256=SRfF7oDVlOOAi6nGKiJIUK6B_arqYLO9iSMp-2IZZps,21
2
2
  aient/core/.git,sha256=lrAcW1SxzRBUcUiuKL5tS9ykDmmTXxyLP3YYU-Y-Q-I,45
3
3
  aient/core/__init__.py,sha256=NxjebTlku35S4Dzr16rdSqSTWUvvwEeACe8KvHJnjPg,34
4
4
  aient/core/log_config.py,sha256=kz2_yJv1p-o3lUQOwA3qh-LSc3wMHv13iCQclw44W9c,274
5
- aient/core/models.py,sha256=H3_XuWA7aS25MWZPK1c-5RBiiuxWJbTfE3RAk0Pkc9A,7504
6
- aient/core/request.py,sha256=c963OuUEBe7j1jxiiwipUyzGrbsCwXQIw_XGF_KdL-4,49491
7
- aient/core/response.py,sha256=iu_sWRWdy-Xs2CpEwhDXC_OLLnOc4sRwg28xZ2Qa69E,27772
5
+ aient/core/models.py,sha256=_1wYZg_n9kb2A3C8xCboyqleH2iHc9scwOvtx9DPeok,7582
6
+ aient/core/request.py,sha256=oavFyF5P0BL-85cse_lhyfP3UErFzeSdj6kTaSWzQrg,49690
7
+ aient/core/response.py,sha256=6Fq-EIL7ua1zeT6GMc1fgQeRs_MCP6UST_pL0tM3f3I,27879
8
8
  aient/core/utils.py,sha256=DFpFU8Y-8lzgQlhaDUnao8HmviGoh3-oN8jZR3Dha7E,26150
9
9
  aient/core/test/test_base_api.py,sha256=CjfFzMG26r8C4xCPoVkKb3Ac6pp9gy5NUCbZJHoSSsM,393
10
10
  aient/core/test/test_image.py,sha256=_T4peNGdXKBHHxyQNx12u-NTyFE8TlYI6NvvagsG2LE,319
11
11
  aient/core/test/test_payload.py,sha256=8jBiJY1uidm1jzL-EiK0s6UGmW9XkdsuuKFGrwFhFkw,2755
12
12
  aient/models/__init__.py,sha256=ouNDNvoBBpIFrLsk09Q_sq23HR0GbLAKfGLIFmfEuXE,219
13
13
  aient/models/audio.py,sha256=kRd-8-WXzv4vwvsTGwnstK-WR8--vr9CdfCZzu8y9LA,1934
14
- aient/models/base.py,sha256=PBfQTQI73OE2OuHvw-XN6vD-_y6k_vLjmT_8J4Gelvo,6813
15
- aient/models/chatgpt.py,sha256=TRvUpgBsQW0oJpxOpT0__fYooy2hbkMSYVdM7CqusnY,42296
14
+ aient/models/base.py,sha256=vRCDEwabEm0u0RU4J62T2t271xg7YGCPjE3s9HzhtV8,6860
15
+ aient/models/chatgpt.py,sha256=gOLBHWimbg2mbt9EjDXr5Za60Jt0ICqe-Hah95taupc,42217
16
16
  aient/models/claude.py,sha256=thK9P8qkaaoUN3OOJ9Shw4KDs-pAGKPoX4FOPGFXva8,28597
17
17
  aient/models/duckduckgo.py,sha256=1l7vYCs9SG5SWPCbcl7q6pCcB5AUF_r-a4l9frz3Ogo,8115
18
18
  aient/models/gemini.py,sha256=chGLc-8G_DAOxr10HPoOhvVFW1RvMgHd6mt--VyAW98,14730
@@ -35,8 +35,8 @@ aient/prompt/agent.py,sha256=h39WOoafv0_lh2coBCiG9k-VWa3Yi9HRZV0hnvsc4gs,23598
35
35
  aient/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
36
36
  aient/utils/prompt.py,sha256=UcSzKkFE4-h_1b6NofI6xgk3GoleqALRKY8VBaXLjmI,11311
37
37
  aient/utils/scripts.py,sha256=n0jR5eXCBIK12W4bIx-xU1FVl1hZ4zDC7hq_BWQHYJU,27537
38
- aient-1.0.58.dist-info/licenses/LICENSE,sha256=XNdbcWldt0yaNXXWB_Bakoqnxb3OVhUft4MgMA_71ds,1051
39
- aient-1.0.58.dist-info/METADATA,sha256=7hHVXLWC6vfFXqNHHx3tOR_jL7imJTXKUNBpIEwtjyA,4973
40
- aient-1.0.58.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
41
- aient-1.0.58.dist-info/top_level.txt,sha256=3oXzrP5sAVvyyqabpeq8A2_vfMtY554r4bVE-OHBrZk,6
42
- aient-1.0.58.dist-info/RECORD,,
38
+ aient-1.0.60.dist-info/licenses/LICENSE,sha256=XNdbcWldt0yaNXXWB_Bakoqnxb3OVhUft4MgMA_71ds,1051
39
+ aient-1.0.60.dist-info/METADATA,sha256=A78P4-U_ExGyY5oFvV_32VVdgv_ZtjWwMiBUezBHNQc,4973
40
+ aient-1.0.60.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
41
+ aient-1.0.60.dist-info/top_level.txt,sha256=3oXzrP5sAVvyyqabpeq8A2_vfMtY554r4bVE-OHBrZk,6
42
+ aient-1.0.60.dist-info/RECORD,,
File without changes