lionagi 0.0.110__py3-none-any.whl → 0.0.111__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
@@ -31,7 +31,9 @@ class OpenAIRateLimiter(RateLimiter):
31
31
  Calculates the required tokens for a request.
32
32
  """
33
33
 
34
- def __init__(self, max_requests_per_minute: int, max_tokens_per_minute: int) -> None:
34
+ def __init__(
35
+ self, max_requests_per_minute: int, max_tokens_per_minute: int
36
+ ) -> None:
35
37
  """
36
38
  Initializes the rate limiter with specific limits for OpenAI API.
37
39
 
@@ -41,9 +43,18 @@ class OpenAIRateLimiter(RateLimiter):
41
43
  max_tokens_per_minute (int): The maximum number of tokens that can accumulate per minute.
42
44
  """
43
45
  super().__init__(max_requests_per_minute, max_tokens_per_minute)
44
- if not os.getenv('env_readthedocs'):
45
- self.rate_limit_replenisher_task = asyncio.create_task(self.rate_limit_replenisher())
46
-
46
+
47
+ @classmethod
48
+ async def create(
49
+ cls, max_requests_per_minute: int, max_tokens_per_minute: int
50
+ ) -> None:
51
+ self = cls(max_requests_per_minute, max_tokens_per_minute)
52
+ if not os.getenv("env_readthedocs"):
53
+ self.rate_limit_replenisher_task = await asyncio.create_task(
54
+ self.rate_limit_replenisher()
55
+ )
56
+ return self
57
+
47
58
  async def rate_limit_replenisher(self) -> NoReturn:
48
59
  """
49
60
  Asynchronously replenishes the rate limit capacities at regular intervals.
@@ -61,9 +72,13 @@ class OpenAIRateLimiter(RateLimiter):
61
72
  await asyncio.sleep(60) # Replenishes every 60 seconds
62
73
  self.available_request_capacity = self.max_requests_per_minute
63
74
  self.available_token_capacity = self.max_tokens_per_minute
64
-
65
- def calculate_num_token(self, payload: Dict[str, Any] =None,
66
- api_endpoint: str =None, token_encoding_name: str =None) -> int:
75
+
76
+ def calculate_num_token(
77
+ self,
78
+ payload: Dict[str, Any] = None,
79
+ api_endpoint: str = None,
80
+ token_encoding_name: str = None,
81
+ ) -> int:
67
82
  """
68
83
  Calculates the number of tokens required for a request based on the payload and API endpoint.
69
84
 
@@ -76,6 +91,8 @@ class OpenAIRateLimiter(RateLimiter):
76
91
 
77
92
  api_endpoint (str): The specific API endpoint for the request.
78
93
 
94
+ token_encoding_name (str): The name of the token encoding method.
95
+
79
96
  Returns:
80
97
  int: The estimated number of tokens required for the request.
81
98
 
@@ -100,7 +117,9 @@ class OpenAIRateLimiter(RateLimiter):
100
117
  for key, value in message.items():
101
118
  num_tokens += len(encoding.encode(value))
102
119
  if key == "name": # if there's a name, the role is omitted
103
- num_tokens -= 1 # role is always required and always 1 token
120
+ num_tokens -= (
121
+ 1 # role is always required and always 1 token
122
+ )
104
123
  num_tokens += 2 # every reply is primed with <im_start>assistant
105
124
  return num_tokens + completion_tokens
106
125
  # normal completions
@@ -134,7 +153,7 @@ class OpenAIRateLimiter(RateLimiter):
134
153
  raise NotImplementedError(
135
154
  f'API endpoint "{api_endpoint}" not implemented in this script'
136
155
  )
137
-
156
+
138
157
 
139
158
  class OpenAIService(BaseAPIService):
140
159
  """
@@ -159,9 +178,9 @@ class OpenAIService(BaseAPIService):
159
178
  max_attempts: int = 3,
160
179
  max_requests_per_minute: int = 500,
161
180
  max_tokens_per_minute: int = 150_000,
162
- ratelimiter = OpenAIRateLimiter,
181
+ ratelimiter=OpenAIRateLimiter,
163
182
  status_tracker: Optional[StatusTracker] = None,
164
- queue: Optional[AsyncQueue] = None
183
+ queue: Optional[AsyncQueue] = None,
165
184
  ):
166
185
  """
167
186
  Initializes the OpenAI service with configuration for API interaction.
@@ -191,11 +210,20 @@ class OpenAIService(BaseAPIService):
191
210
  # Service is configured for interacting with OpenAI API.
192
211
  """
193
212
  api_key = api_key or os.getenv("OPENAI_API_KEY")
194
- super().__init__(api_key, token_encoding_name, max_attempts,
195
- max_requests_per_minute, max_tokens_per_minute,
196
- ratelimiter, status_tracker, queue)
197
-
198
- async def call_api(self, http_session, endpoint, payload: Dict[str, any] = None) -> Optional[Dict[str, any]]:
213
+ super().__init__(
214
+ api_key,
215
+ token_encoding_name,
216
+ max_attempts,
217
+ max_requests_per_minute,
218
+ max_tokens_per_minute,
219
+ ratelimiter,
220
+ status_tracker,
221
+ queue,
222
+ )
223
+
224
+ async def call_api(
225
+ self, http_session, endpoint, payload: Dict[str, any] = None
226
+ ) -> Optional[Dict[str, any]]:
199
227
  """
200
228
  Call an OpenAI API endpoint with a specific payload and handle the response.
201
229
 
@@ -221,15 +249,20 @@ class OpenAIService(BaseAPIService):
221
249
  ... )
222
250
  # Calls the specified API endpoint with the given payload.
223
251
  """
224
- endpoint = self.api_endpoint_from_url(self.base_url+endpoint)
225
-
252
+ endpoint = self.api_endpoint_from_url(self.base_url + endpoint)
253
+
226
254
  while True:
227
- if self.rate_limiter.available_request_capacity < 1 or self.rate_limiter.available_token_capacity < 10: # Minimum token count
255
+ if (
256
+ self.rate_limiter.available_request_capacity < 1
257
+ or self.rate_limiter.available_token_capacity < 10
258
+ ): # Minimum token count
228
259
  await asyncio.sleep(1) # Wait for capacity
229
260
  continue
230
-
231
- required_tokens = self.rate_limiter.calculate_num_token(payload, endpoint, self.token_encoding_name)
232
-
261
+
262
+ required_tokens = self.rate_limiter.calculate_num_token(
263
+ payload, endpoint, self.token_encoding_name
264
+ )
265
+
233
266
  if self.rate_limiter.available_token_capacity >= required_tokens:
234
267
  self.rate_limiter.available_request_capacity -= 1
235
268
  self.rate_limiter.available_token_capacity -= required_tokens
@@ -240,7 +273,9 @@ class OpenAIService(BaseAPIService):
240
273
  while attempts_left > 0:
241
274
  try:
242
275
  async with http_session.post(
243
- url=(self.base_url+endpoint), headers=request_headers, json=payload
276
+ url=(self.base_url + endpoint),
277
+ headers=request_headers,
278
+ json=payload,
244
279
  ) as response:
245
280
  response_json = await response.json()
246
281
 
@@ -250,7 +285,9 @@ class OpenAIService(BaseAPIService):
250
285
  )
251
286
  attempts_left -= 1
252
287
 
253
- if "Rate limit" in response_json["error"].get("message", ""):
288
+ if "Rate limit" in response_json["error"].get(
289
+ "message", ""
290
+ ):
254
291
  await asyncio.sleep(15)
255
292
  else:
256
293
  return response_json
@@ -1,9 +1,10 @@
1
+ import os
1
2
  import aiohttp
2
3
  import asyncio
3
4
  import json
4
5
  from typing import Any
5
6
 
6
- import lionagi
7
+
7
8
  from .conversation import Conversation
8
9
  from ..utils.sys_util import to_list, l_call, al_call
9
10
  from ..utils.log_util import DataLogger
@@ -13,7 +14,7 @@ from ..api.oai_service import OpenAIService
13
14
  from ..api.oai_config import oai_llmconfig
14
15
 
15
16
  status_tracker = StatusTracker()
16
- OAIService = OpenAIService()
17
+ OAIService = OpenAIService(api_key=os.getenv('OPENAI_API_KEY'))
17
18
 
18
19
 
19
20
  class Session():
@@ -52,6 +53,9 @@ class Session():
52
53
  _output(output, invoke=True, out=True) -> Any:
53
54
  Process the output, invoke tools if needed, and optionally return the output.
54
55
 
56
+ _is_invoked():
57
+ Checks if the current message indicates the invocation of a function call.
58
+
55
59
  register_tools(tools, funcs, update=False, new=False, prefix=None, postfix=None):
56
60
  Register tools and their corresponding functions.
57
61
 
@@ -64,10 +68,10 @@ class Session():
64
68
  auto_followup(self, instruct, num=3, tool_parser=None, **kwags):
65
69
  Automates the follow-up process for a specified number of times or until the session concludes.
66
70
 
67
- create_payload_chatcompletion(**kwargs) -> dict:
71
+ _create_payload_chatcompletion(**kwargs) -> dict:
68
72
  Create a payload for chat completion based on the conversation state and configuration.
69
73
 
70
- call_chatcompletion(sleep=0.1, **kwargs) -> None:
74
+ _call_chatcompletion(sleep=0.1, **kwargs) -> None:
71
75
  Make a call to the chat completion API and process the response.
72
76
 
73
77
  messages_to_csv(dir=None, filename="_messages.csv", **kwargs) -> None:
lionagi/utils/api_util.py CHANGED
@@ -288,18 +288,18 @@ class BaseAPIService(ABC):
288
288
  The maximum number of retry attempts for API calls.
289
289
  status_tracker (StatusTracker):
290
290
  Tracker for API call statuses.
291
- rate_limiter (RateLimiter):
292
- Limiter to control the rate of API calls.
293
291
  queue (AsyncQueue):
294
292
  Queue for managing API call tasks.
293
+ rate_limiter (RateLimiter):
294
+ Limiter to control the rate of API calls.
295
+ append_to_jsonl (callable):
296
+ Callable for appending data to a file in JSONL format.
295
297
 
296
298
  Methods:
297
299
  call_api:
298
300
  Abstract method to define API call mechanism in subclasses.
299
301
  handle_error:
300
302
  Handle errors by logging and saving details to a JSONL file.
301
- append_to_jsonl:
302
- Append data to a file in JSONL format.
303
303
  api_endpoint_from_url:
304
304
  Extract the API endpoint from a URL.
305
305
  task_id_generator_function:
lionagi/version.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.0.110"
1
+ __version__ = "0.0.111"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: lionagi
3
- Version: 0.0.110
3
+ Version: 0.0.111
4
4
  Summary: Towards automated general intelligence.
5
5
  Author: HaiyangLi
6
6
  Author-email: Haiyang Li <ocean@lionagi.ai>
@@ -1,20 +1,20 @@
1
1
  lionagi/__init__.py,sha256=2Rko3tw94ZFVN_GSvcxAY1O77FxswcaMxNHKH5Bj7jc,788
2
- lionagi/version.py,sha256=00B0bSjQW8FClEUTZrcxxlOoilJS1Cz7oACh4nBKyM4,24
2
+ lionagi/version.py,sha256=BtvlzlO7UNG5nD9ftqoP0J2wHDhVCXpzuMsjYBw0Wfg,24
3
3
  lionagi/api/__init__.py,sha256=PF-fWsB0axACGDbm8FluZgDQMyQf3PUUJ1tIXW535TQ,178
4
4
  lionagi/api/oai_config.py,sha256=yhyZ4aEaF6r3XBbhxI47r8CL2-amc-4IKJhbXv2W9CM,356
5
- lionagi/api/oai_service.py,sha256=mxsMlg__Ck4kNtzoH6bTkgOBu67RTEmQAX5UTvubA6Y,11283
5
+ lionagi/api/oai_service.py,sha256=9FBmw_UTBsgnPBP7f8KOX4PwS5TXeknnUw6-kAYJxqk,11919
6
6
  lionagi/session/__init__.py,sha256=qAf0IAA2D1ZhwvRopSgi8X13DH4Y0E5HaA2DVr6FxG0,152
7
7
  lionagi/session/conversation.py,sha256=5EePoSVIcyaZJEtkNxv1yCFby_3_SLx5AmaSxS9pwI8,4058
8
8
  lionagi/session/message.py,sha256=L4QhTvay-xqL_PWWzr_lTig70cr7zEc5YjONILmAoWU,6504
9
- lionagi/session/session.py,sha256=Kt4Ks5CCfwdI5cobPWzgi1LiGpjKAFAt5lsFkoM1-ac,14604
9
+ lionagi/session/session.py,sha256=wmAwNxk6pm1MTV3kqjG8i5a89xssDP0Ln8m2_e_zdek,14748
10
10
  lionagi/utils/__init__.py,sha256=e5aEzyHofUYZ8olcyHxq7wqTGRsRZwag3vyZ0T4_ByQ,842
11
- lionagi/utils/api_util.py,sha256=Mnk5DdY1DlixI_365Q3xl1oDmSz0zY-F3IhXsCbW4EQ,15230
11
+ lionagi/utils/api_util.py,sha256=xPWwQkWMnkGOUAKpwOkGkMv2SWCSCHjBsggnrSfHJhs,15257
12
12
  lionagi/utils/doc_util.py,sha256=ZEuLKzc3EH692FW1LXRXBHgextMfb1OaLE3z_NhBBT0,12882
13
13
  lionagi/utils/log_util.py,sha256=mfLmvjv4hvTYMel46tpKJyqLbj1PZimCgKigz48osZY,3158
14
14
  lionagi/utils/sys_util.py,sha256=q4I_d61Zwe-WvukNoa53Gd8ycDcTpfOhw3yER8ZoiCg,28449
15
15
  lionagi/utils/tool_util.py,sha256=0mWGW_rfUPTay_L05dckGzEXdg4ZdhFyGA1lve9tnj8,7410
16
- lionagi-0.0.110.dist-info/LICENSE,sha256=TBnSyG8fs_tMRtK805GzA1cIyExleKyzoN_kuVxT9IY,11358
17
- lionagi-0.0.110.dist-info/METADATA,sha256=F2tKrl8Z0kjVHJ2bZ2Vu_DDqPnxfL-7wUGjQQI5h61Y,17414
18
- lionagi-0.0.110.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
19
- lionagi-0.0.110.dist-info/top_level.txt,sha256=szvch_d2jE1Lu9ZIKsl26Ll6BGfYfbOgt5lm-UpFSo4,8
20
- lionagi-0.0.110.dist-info/RECORD,,
16
+ lionagi-0.0.111.dist-info/LICENSE,sha256=TBnSyG8fs_tMRtK805GzA1cIyExleKyzoN_kuVxT9IY,11358
17
+ lionagi-0.0.111.dist-info/METADATA,sha256=drb1EbWKei2M7WjH6Md8Qq_vOMRc8Sgz1oyxawPZQEY,17414
18
+ lionagi-0.0.111.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
19
+ lionagi-0.0.111.dist-info/top_level.txt,sha256=szvch_d2jE1Lu9ZIKsl26Ll6BGfYfbOgt5lm-UpFSo4,8
20
+ lionagi-0.0.111.dist-info/RECORD,,