lionagi 0.0.208__tar.gz → 0.0.210__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
Files changed (130) hide show
  1. {lionagi-0.0.208 → lionagi-0.0.210}/PKG-INFO +28 -29
  2. {lionagi-0.0.208 → lionagi-0.0.210}/README.md +27 -28
  3. {lionagi-0.0.208 → lionagi-0.0.210}/lionagi/__init__.py +4 -6
  4. lionagi-0.0.210/lionagi/api_service/base_endpoint.py +65 -0
  5. lionagi-0.0.210/lionagi/api_service/base_rate_limiter.py +121 -0
  6. lionagi-0.0.210/lionagi/api_service/base_service.py +146 -0
  7. lionagi-0.0.210/lionagi/api_service/chat_completion.py +6 -0
  8. lionagi-0.0.210/lionagi/api_service/embeddings.py +6 -0
  9. lionagi-0.0.210/lionagi/api_service/payload_package.py +47 -0
  10. lionagi-0.0.210/lionagi/api_service/status_tracker.py +29 -0
  11. lionagi-0.0.210/lionagi/core/__init__.py +7 -0
  12. lionagi-0.0.210/lionagi/core/branch.py +1191 -0
  13. lionagi-0.0.210/lionagi/core/flow.py +423 -0
  14. {lionagi-0.0.208/lionagi/core/instruction_set → lionagi-0.0.210/lionagi/core}/instruction_set.py +3 -3
  15. lionagi-0.0.210/lionagi/core/session.py +872 -0
  16. lionagi-0.0.210/lionagi/schema/__init__.py +8 -0
  17. lionagi-0.0.210/lionagi/schema/base_schema.py +821 -0
  18. {lionagi-0.0.208/lionagi/_services → lionagi-0.0.210/lionagi/services}/base_service.py +4 -4
  19. {lionagi-0.0.208/lionagi/_services → lionagi-0.0.210/lionagi/services}/oai.py +4 -4
  20. {lionagi-0.0.208 → lionagi-0.0.210}/lionagi/structures/graph.py +1 -1
  21. {lionagi-0.0.208 → lionagi-0.0.210}/lionagi/structures/relationship.py +1 -1
  22. {lionagi-0.0.208 → lionagi-0.0.210}/lionagi/structures/structure.py +1 -1
  23. {lionagi-0.0.208 → lionagi-0.0.210}/lionagi/tools/tool_util.py +2 -1
  24. lionagi-0.0.210/lionagi/utils/__init__.py +24 -0
  25. {lionagi-0.0.208 → lionagi-0.0.210}/lionagi/utils/api_util.py +63 -2
  26. lionagi-0.0.210/lionagi/utils/core_utils.py +338 -0
  27. {lionagi-0.0.208 → lionagi-0.0.210}/lionagi/utils/sys_util.py +3 -3
  28. lionagi-0.0.210/lionagi/version.py +1 -0
  29. {lionagi-0.0.208 → lionagi-0.0.210}/lionagi.egg-info/PKG-INFO +28 -29
  30. lionagi-0.0.210/lionagi.egg-info/SOURCES.txt +61 -0
  31. lionagi-0.0.208/lionagi/_services/anthropic.py +0 -79
  32. lionagi-0.0.208/lionagi/_services/azure.py +0 -1
  33. lionagi-0.0.208/lionagi/_services/bedrock.py +0 -0
  34. lionagi-0.0.208/lionagi/_services/everlyai.py +0 -0
  35. lionagi-0.0.208/lionagi/_services/gemini.py +0 -0
  36. lionagi-0.0.208/lionagi/_services/gpt4all.py +0 -0
  37. lionagi-0.0.208/lionagi/_services/huggingface.py +0 -0
  38. lionagi-0.0.208/lionagi/_services/litellm.py +0 -33
  39. lionagi-0.0.208/lionagi/_services/localai.py +0 -0
  40. lionagi-0.0.208/lionagi/_services/openllm.py +0 -0
  41. lionagi-0.0.208/lionagi/_services/openrouter.py +0 -44
  42. lionagi-0.0.208/lionagi/_services/perplexity.py +0 -0
  43. lionagi-0.0.208/lionagi/_services/predibase.py +0 -0
  44. lionagi-0.0.208/lionagi/_services/rungpt.py +0 -0
  45. lionagi-0.0.208/lionagi/_services/vllm.py +0 -0
  46. lionagi-0.0.208/lionagi/_services/xinference.py +0 -0
  47. lionagi-0.0.208/lionagi/agents/planner.py +0 -1
  48. lionagi-0.0.208/lionagi/agents/prompter.py +0 -1
  49. lionagi-0.0.208/lionagi/agents/scorer.py +0 -1
  50. lionagi-0.0.208/lionagi/agents/summarizer.py +0 -1
  51. lionagi-0.0.208/lionagi/agents/validator.py +0 -1
  52. lionagi-0.0.208/lionagi/bridge/__init__.py +0 -22
  53. lionagi-0.0.208/lionagi/bridge/langchain.py +0 -195
  54. lionagi-0.0.208/lionagi/bridge/llama_index.py +0 -266
  55. lionagi-0.0.208/lionagi/core/__init__.py +0 -11
  56. lionagi-0.0.208/lionagi/core/branch/branch.py +0 -841
  57. lionagi-0.0.208/lionagi/core/branch/cluster.py +0 -1
  58. lionagi-0.0.208/lionagi/core/branch/conversation.py +0 -787
  59. lionagi-0.0.208/lionagi/core/core_util.py +0 -0
  60. lionagi-0.0.208/lionagi/core/flow/flow.py +0 -19
  61. lionagi-0.0.208/lionagi/core/flow/flow_util.py +0 -62
  62. lionagi-0.0.208/lionagi/core/instruction_set/__init__.py +0 -0
  63. lionagi-0.0.208/lionagi/core/messages/__init__.py +0 -0
  64. lionagi-0.0.208/lionagi/core/sessions/__init__.py +0 -0
  65. lionagi-0.0.208/lionagi/core/sessions/session.py +0 -504
  66. lionagi-0.0.208/lionagi/datastores/__init__.py +0 -1
  67. lionagi-0.0.208/lionagi/datastores/chroma.py +0 -1
  68. lionagi-0.0.208/lionagi/datastores/deeplake.py +0 -1
  69. lionagi-0.0.208/lionagi/datastores/elasticsearch.py +0 -1
  70. lionagi-0.0.208/lionagi/datastores/lantern.py +0 -1
  71. lionagi-0.0.208/lionagi/datastores/pinecone.py +0 -1
  72. lionagi-0.0.208/lionagi/datastores/postgres.py +0 -1
  73. lionagi-0.0.208/lionagi/datastores/qdrant.py +0 -1
  74. lionagi-0.0.208/lionagi/loaders/__init__.py +0 -18
  75. lionagi-0.0.208/lionagi/loaders/chunker.py +0 -166
  76. lionagi-0.0.208/lionagi/loaders/load_util.py +0 -240
  77. lionagi-0.0.208/lionagi/loaders/reader.py +0 -122
  78. lionagi-0.0.208/lionagi/models/__init__.py +0 -0
  79. lionagi-0.0.208/lionagi/models/base_model.py +0 -0
  80. lionagi-0.0.208/lionagi/models/imodel.py +0 -53
  81. lionagi-0.0.208/lionagi/schema/__init__.py +0 -11
  82. lionagi-0.0.208/lionagi/schema/async_queue.py +0 -158
  83. lionagi-0.0.208/lionagi/schema/base_condition.py +0 -1
  84. lionagi-0.0.208/lionagi/schema/base_node.py +0 -422
  85. lionagi-0.0.208/lionagi/schema/base_tool.py +0 -44
  86. lionagi-0.0.208/lionagi/schema/data_logger.py +0 -126
  87. lionagi-0.0.208/lionagi/schema/data_node.py +0 -88
  88. lionagi-0.0.208/lionagi/schema/status_tracker.py +0 -37
  89. lionagi-0.0.208/lionagi/tests/__init__.py +0 -0
  90. lionagi-0.0.208/lionagi/tests/test_utils/__init__.py +0 -0
  91. lionagi-0.0.208/lionagi/tests/test_utils/test_encrypt_util.py +0 -323
  92. lionagi-0.0.208/lionagi/tools/tool_manager.py +0 -163
  93. lionagi-0.0.208/lionagi/utils/__init__.py +0 -31
  94. lionagi-0.0.208/lionagi/utils/encrypt_util.py +0 -283
  95. lionagi-0.0.208/lionagi/utils/url_util.py +0 -55
  96. lionagi-0.0.208/lionagi/version.py +0 -1
  97. lionagi-0.0.208/lionagi.egg-info/SOURCES.txt +0 -111
  98. {lionagi-0.0.208 → lionagi-0.0.210}/LICENSE +0 -0
  99. {lionagi-0.0.208 → lionagi-0.0.210}/README.rst +0 -0
  100. {lionagi-0.0.208/lionagi/agents → lionagi-0.0.210/lionagi/api_service}/__init__.py +0 -0
  101. {lionagi-0.0.208 → lionagi-0.0.210}/lionagi/configs/__init__.py +0 -0
  102. {lionagi-0.0.208 → lionagi-0.0.210}/lionagi/configs/oai_configs.py +0 -0
  103. {lionagi-0.0.208 → lionagi-0.0.210}/lionagi/configs/openrouter_configs.py +0 -0
  104. {lionagi-0.0.208/lionagi/core/branch → lionagi-0.0.210/lionagi/core}/branch_manager.py +0 -0
  105. {lionagi-0.0.208/lionagi/core/messages → lionagi-0.0.210/lionagi/core}/messages.py +3 -3
  106. {lionagi-0.0.208/lionagi/_services → lionagi-0.0.210/lionagi/services}/__init__.py +0 -0
  107. {lionagi-0.0.208/lionagi/_services → lionagi-0.0.210/lionagi/services}/mistralai.py +0 -0
  108. {lionagi-0.0.208/lionagi/_services → lionagi-0.0.210/lionagi/services}/mlx_service.py +0 -0
  109. {lionagi-0.0.208/lionagi/_services → lionagi-0.0.210/lionagi/services}/ollama.py +0 -0
  110. {lionagi-0.0.208/lionagi/_services → lionagi-0.0.210/lionagi/services}/services.py +0 -0
  111. {lionagi-0.0.208/lionagi/_services → lionagi-0.0.210/lionagi/services}/transformers.py +0 -0
  112. {lionagi-0.0.208 → lionagi-0.0.210}/lionagi/structures/__init__.py +0 -0
  113. {lionagi-0.0.208/lionagi/core/branch → lionagi-0.0.210/lionagi/tests}/__init__.py +0 -0
  114. {lionagi-0.0.208/lionagi/core/flow → lionagi-0.0.210/lionagi/tests/test_utils}/__init__.py +0 -0
  115. {lionagi-0.0.208 → lionagi-0.0.210}/lionagi/tests/test_utils/test_api_util.py +0 -0
  116. {lionagi-0.0.208 → lionagi-0.0.210}/lionagi/tests/test_utils/test_call_util.py +0 -0
  117. {lionagi-0.0.208 → lionagi-0.0.210}/lionagi/tests/test_utils/test_io_util.py +0 -0
  118. {lionagi-0.0.208 → lionagi-0.0.210}/lionagi/tests/test_utils/test_nested_util.py +0 -0
  119. {lionagi-0.0.208 → lionagi-0.0.210}/lionagi/tests/test_utils/test_sys_util.py +0 -0
  120. {lionagi-0.0.208 → lionagi-0.0.210}/lionagi/tools/__init__.py +0 -0
  121. /lionagi-0.0.208/lionagi/_services/anyscale.py → /lionagi-0.0.210/lionagi/tools/tool_manager.py +0 -0
  122. {lionagi-0.0.208 → lionagi-0.0.210}/lionagi/utils/call_util.py +0 -0
  123. {lionagi-0.0.208 → lionagi-0.0.210}/lionagi/utils/io_util.py +0 -0
  124. {lionagi-0.0.208 → lionagi-0.0.210}/lionagi/utils/nested_util.py +0 -0
  125. {lionagi-0.0.208 → lionagi-0.0.210}/lionagi.egg-info/dependency_links.txt +0 -0
  126. {lionagi-0.0.208 → lionagi-0.0.210}/lionagi.egg-info/requires.txt +0 -0
  127. {lionagi-0.0.208 → lionagi-0.0.210}/lionagi.egg-info/top_level.txt +0 -0
  128. {lionagi-0.0.208 → lionagi-0.0.210}/pyproject.toml +0 -0
  129. {lionagi-0.0.208 → lionagi-0.0.210}/setup.cfg +0 -0
  130. {lionagi-0.0.208 → lionagi-0.0.210}/setup.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: lionagi
3
- Version: 0.0.208
3
+ Version: 0.0.210
4
4
  Summary: Towards automated general intelligence.
5
5
  Author: HaiyangLi
6
6
  Author-email: Haiyang Li <ocean@lionagi.ai>
@@ -234,42 +234,30 @@ Requires-Dist: pandas>=2.1.0
234
234
  **Towards Automated General Intelligence**
235
235
 
236
236
 
237
- LionAGI is a cutting-edge **intelligent agent framework**. It integrates data manipulation with advanced machine learning tools, such as Large Language Models (i.e. OpenAI's GPT).
238
- - Designed for data-centric, production-level projects,
239
- - dramatically lowers the barrier in creating intelligent, automated systems
240
- - that can understand and interact meaningfully with large volumes of data.
237
+ LionAGI is an **intelligent agent framework** tailored for **big data analysis** with advanced **machine learning** tools. Designed for data-centric, production-level projects. Lionagi allows flexible and rapid design of agentic workflow, customed for your own data. Lionagi `agents` can manage and direct other agents, can also use multiple different tools in parallel.
238
+
239
+ <img width="1002" alt="image" src="https://github.com/lion-agi/lionagi/assets/122793010/3fd75c2a-a9e9-4ab4-8ae9-f9cd71c69aec">
241
240
 
242
- Install LionAGI with pip:
243
241
 
244
- ```bash
245
- pip install lionagi
246
- ```
247
- Download the `.env_template` file, input your appropriate `API_KEY`, save the file, rename as `.env` and put in your project's root directory.
248
- by default we use `OPENAI_API_KEY`.
242
+ #### Integrate any Advanced Model into your existing workflow.
249
243
 
244
+ <img width="1100" alt="Screenshot 2024-02-14 at 8 54 01 AM" src="https://github.com/lion-agi/lionagi/assets/122793010/cfbc403c-cece-49e7-bc3a-015e035d3607">
250
245
 
251
246
 
252
- ### Features
253
- - Robust and scalable. Create a production ready LLM application **in hours**, with more than 100 models
254
- - Efficient and verstile data operations for reading, chunking, binning, writing, storing data with support for `langchain` and `llamaindex`
255
- - Built-in support for **chain/graph-of-thoughts, ReAct, Concurrent parallel function calling**
256
- - Unified interface with any LLM provider, API or local
257
- - Fast and **concurrent** API call with **configurable rate limit**
258
- - (Work In Progress) support for models both API and local
259
- ---
260
- LionAGI is designed to be `asynchronous` only, please check python official documentation on how `async` work: [here](https://docs.python.org/3/library/asyncio.html)
261
247
 
262
248
 
263
- **Notice**:
264
- * calling API with maximum throughput over large set of data with advanced models i.e. gpt-4 can get **EXPENSIVE IN JUST SECONDS**,
265
- * please know what you are doing, and check the usage on OpenAI regularly
266
- * default rate limits are set to be **tier 1** of OpenAI model `gpt-4-1104-preview`, please check the [OpenAI usage limit documentation](https://platform.openai.com/docs/guides/rate-limits?context=tier-free) you can modify token rate parameters to fit different use cases.
267
- * if you would like to build from source, please download the [latest release](https://github.com/lion-agi/lionagi/releases), **main is under development and will be changed without notice**
249
+ ### Install LionAGI with pip:
250
+
251
+ ```bash
252
+ pip install lionagi
253
+ ```
254
+ Download the `.env_template` file, input your appropriate `API_KEY`, save the file, rename as `.env` and put in your project's root directory.
255
+ by default we use `OPENAI_API_KEY`.
268
256
 
269
257
 
270
258
  ### Quick Start
271
259
 
272
- The following example shows how to use LionAGI's `Session` object to interact with `gpt-4` model:
260
+ The following example shows how to use LionAGI's `Session` object to interact with `gpt-4-turbo` model:
273
261
 
274
262
  ```python
275
263
 
@@ -285,7 +273,7 @@ import lionagi as li
285
273
 
286
274
  calculator = li.Session(system=system)
287
275
  result = await calculator.chat(
288
- instruction=instruction, context=context, model="gpt-4-1106-preview"
276
+ instruction=instruction, context=context, model="gpt-4-turbo-preview"
289
277
  )
290
278
 
291
279
  print(f"Calculation Result: {result}")
@@ -303,7 +291,7 @@ import lionagi as li
303
291
  async def main():
304
292
  calculator = li.Session(system=system)
305
293
  result = await calculator.chat(
306
- instruction=instruction, context=context, model="gpt-4-1106-preview"
294
+ instruction=instruction, context=context, model="gpt-4-turbo-preview"
307
295
  )
308
296
  print(f"Calculation Result: {result}")
309
297
 
@@ -311,7 +299,18 @@ if __name__ == "__main__":
311
299
  asyncio.run(main())
312
300
  ```
313
301
 
314
- Visit our notebooks for our examples.
302
+ Visit our notebooks for examples.
303
+
304
+ LionAGI is designed to be `asynchronous` only, please check python official documentation on how `async` work: [here](https://docs.python.org/3/library/asyncio.html)
305
+
306
+ ---
307
+
308
+ **Notice**:
309
+ * calling API with maximum throughput over large set of data with advanced models i.e. gpt-4 can get **EXPENSIVE IN JUST SECONDS**,
310
+ * please know what you are doing, and check the usage on OpenAI regularly
311
+ * default rate limits are set to be 1,000 requests, 100,000 tokens per miniute, please check the [OpenAI usage limit documentation](https://platform.openai.com/docs/guides/rate-limits?context=tier-free) you can modify token rate parameters to fit different use cases.
312
+ * if you would like to build from source, please download the [latest release](https://github.com/lion-agi/lionagi/releases), **main is under development and will be changed without notice**
313
+
315
314
 
316
315
  ### Community
317
316
 
@@ -9,42 +9,30 @@
9
9
  **Towards Automated General Intelligence**
10
10
 
11
11
 
12
- LionAGI is a cutting-edge **intelligent agent framework**. It integrates data manipulation with advanced machine learning tools, such as Large Language Models (i.e. OpenAI's GPT).
13
- - Designed for data-centric, production-level projects,
14
- - dramatically lowers the barrier in creating intelligent, automated systems
15
- - that can understand and interact meaningfully with large volumes of data.
12
+ LionAGI is an **intelligent agent framework** tailored for **big data analysis** with advanced **machine learning** tools. Designed for data-centric, production-level projects. Lionagi allows flexible and rapid design of agentic workflow, customed for your own data. Lionagi `agents` can manage and direct other agents, can also use multiple different tools in parallel.
13
+
14
+ <img width="1002" alt="image" src="https://github.com/lion-agi/lionagi/assets/122793010/3fd75c2a-a9e9-4ab4-8ae9-f9cd71c69aec">
16
15
 
17
- Install LionAGI with pip:
18
16
 
19
- ```bash
20
- pip install lionagi
21
- ```
22
- Download the `.env_template` file, input your appropriate `API_KEY`, save the file, rename as `.env` and put in your project's root directory.
23
- by default we use `OPENAI_API_KEY`.
17
+ #### Integrate any Advanced Model into your existing workflow.
24
18
 
19
+ <img width="1100" alt="Screenshot 2024-02-14 at 8 54 01 AM" src="https://github.com/lion-agi/lionagi/assets/122793010/cfbc403c-cece-49e7-bc3a-015e035d3607">
25
20
 
26
21
 
27
- ### Features
28
- - Robust and scalable. Create a production ready LLM application **in hours**, with more than 100 models
29
- - Efficient and verstile data operations for reading, chunking, binning, writing, storing data with support for `langchain` and `llamaindex`
30
- - Built-in support for **chain/graph-of-thoughts, ReAct, Concurrent parallel function calling**
31
- - Unified interface with any LLM provider, API or local
32
- - Fast and **concurrent** API call with **configurable rate limit**
33
- - (Work In Progress) support for models both API and local
34
- ---
35
- LionAGI is designed to be `asynchronous` only, please check python official documentation on how `async` work: [here](https://docs.python.org/3/library/asyncio.html)
36
22
 
37
23
 
38
- **Notice**:
39
- * calling API with maximum throughput over large set of data with advanced models i.e. gpt-4 can get **EXPENSIVE IN JUST SECONDS**,
40
- * please know what you are doing, and check the usage on OpenAI regularly
41
- * default rate limits are set to be **tier 1** of OpenAI model `gpt-4-1104-preview`, please check the [OpenAI usage limit documentation](https://platform.openai.com/docs/guides/rate-limits?context=tier-free) you can modify token rate parameters to fit different use cases.
42
- * if you would like to build from source, please download the [latest release](https://github.com/lion-agi/lionagi/releases), **main is under development and will be changed without notice**
24
+ ### Install LionAGI with pip:
25
+
26
+ ```bash
27
+ pip install lionagi
28
+ ```
29
+ Download the `.env_template` file, input your appropriate `API_KEY`, save the file, rename as `.env` and put in your project's root directory.
30
+ by default we use `OPENAI_API_KEY`.
43
31
 
44
32
 
45
33
  ### Quick Start
46
34
 
47
- The following example shows how to use LionAGI's `Session` object to interact with `gpt-4` model:
35
+ The following example shows how to use LionAGI's `Session` object to interact with `gpt-4-turbo` model:
48
36
 
49
37
  ```python
50
38
 
@@ -60,7 +48,7 @@ import lionagi as li
60
48
 
61
49
  calculator = li.Session(system=system)
62
50
  result = await calculator.chat(
63
- instruction=instruction, context=context, model="gpt-4-1106-preview"
51
+ instruction=instruction, context=context, model="gpt-4-turbo-preview"
64
52
  )
65
53
 
66
54
  print(f"Calculation Result: {result}")
@@ -78,7 +66,7 @@ import lionagi as li
78
66
  async def main():
79
67
  calculator = li.Session(system=system)
80
68
  result = await calculator.chat(
81
- instruction=instruction, context=context, model="gpt-4-1106-preview"
69
+ instruction=instruction, context=context, model="gpt-4-turbo-preview"
82
70
  )
83
71
  print(f"Calculation Result: {result}")
84
72
 
@@ -86,7 +74,18 @@ if __name__ == "__main__":
86
74
  asyncio.run(main())
87
75
  ```
88
76
 
89
- Visit our notebooks for our examples.
77
+ Visit our notebooks for examples.
78
+
79
+ LionAGI is designed to be `asynchronous` only, please check python official documentation on how `async` work: [here](https://docs.python.org/3/library/asyncio.html)
80
+
81
+ ---
82
+
83
+ **Notice**:
84
+ * calling API with maximum throughput over large set of data with advanced models i.e. gpt-4 can get **EXPENSIVE IN JUST SECONDS**,
85
+ * please know what you are doing, and check the usage on OpenAI regularly
86
+ * default rate limits are set to be 1,000 requests, 100,000 tokens per miniute, please check the [OpenAI usage limit documentation](https://platform.openai.com/docs/guides/rate-limits?context=tier-free) you can modify token rate parameters to fit different use cases.
87
+ * if you would like to build from source, please download the [latest release](https://github.com/lion-agi/lionagi/releases), **main is under development and will be changed without notice**
88
+
90
89
 
91
90
  ### Community
92
91
 
@@ -16,17 +16,15 @@ Copyright 2023 HaiyangLi <ocean@lionagi.ai>
16
16
 
17
17
  import logging
18
18
  from .version import __version__
19
-
19
+ from dotenv import load_dotenv
20
20
 
21
21
  from .utils import *
22
- from .schema import *
22
+ from .schema.base_schema import *
23
23
  from .structures import *
24
24
  from .loaders import *
25
- from ._services import *
25
+ from .api_service import *
26
26
  from .tools import *
27
- from .core import *
28
-
29
-
30
27
 
31
28
  logger = logging.getLogger(__name__)
32
29
  logger.setLevel(logging.INFO)
30
+ load_dotenv()
@@ -0,0 +1,65 @@
1
+ from typing import Any, Dict, NoReturn, Optional, Type, List, Union
2
+ from .base_rate_limiter import BaseRateLimiter, SimpleRateLimiter
3
+
4
+
5
+ class BaseEndpoint:
6
+ """
7
+ Represents an API endpoint with rate limiting capabilities.
8
+
9
+ This class encapsulates the details of an API endpoint, including its rate limiter.
10
+
11
+ Attributes:
12
+ endpoint (str): The API endpoint path.
13
+ rate_limiter_class (Type[li.BaseRateLimiter]): The class used for rate limiting requests to the endpoint.
14
+ max_requests (int): The maximum number of requests allowed per interval.
15
+ max_tokens (int): The maximum number of tokens allowed per interval.
16
+ interval (int): The time interval in seconds for replenishing rate limit capacities.
17
+ config (Dict): Configuration parameters for the endpoint.
18
+ rate_limiter (Optional[li.BaseRateLimiter]): The rate limiter instance for this endpoint.
19
+
20
+ Examples:
21
+ # Example usage of EndPoint with SimpleRateLimiter
22
+ endpoint = EndPoint(
23
+ max_requests=100,
24
+ max_tokens=1000,
25
+ interval=60,
26
+ endpoint_='chat/completions',
27
+ rate_limiter_class=li.SimpleRateLimiter,
28
+ config={'param1': 'value1'}
29
+ )
30
+ asyncio.run(endpoint.init_rate_limiter())
31
+ """
32
+
33
+ def __init__(
34
+ self,
35
+ max_requests: int = 1_000,
36
+ max_tokens: int = 100_000,
37
+ interval: int = 60,
38
+ endpoint_: Optional[str] = None,
39
+ rate_limiter_class: Type[BaseRateLimiter] = SimpleRateLimiter,
40
+ encode_kwargs=None,
41
+ token_encoding_name=None,
42
+ config: Dict = None,
43
+ ) -> None:
44
+ self.endpoint = endpoint_ or 'chat/completions'
45
+ self.rate_limiter_class = rate_limiter_class
46
+ self.max_requests = max_requests
47
+ self.max_tokens = max_tokens
48
+ self.interval = interval
49
+ self.token_encoding_name = token_encoding_name
50
+ self.config = config or {}
51
+ self.rate_limiter: Optional[BaseRateLimiter] = None
52
+ self._has_initialized = False
53
+ self.encode_kwargs = encode_kwargs or {}
54
+
55
+ async def init_rate_limiter(self) -> None:
56
+ """Initializes the rate limiter for the endpoint."""
57
+ self.rate_limiter = await self.rate_limiter_class.create(
58
+ self.max_requests, self.max_tokens, self.interval, self.token_encoding_name
59
+ )
60
+ self._has_initialized = True
61
+
62
+
63
+ class Embedding(BaseEndpoint):
64
+ ...
65
+
@@ -0,0 +1,121 @@
1
+ import asyncio
2
+ import logging
3
+ from abc import ABC
4
+ from typing import Dict, NoReturn, Optional
5
+
6
+ from ..utils import APIUtil
7
+
8
+
9
+ class BaseRateLimiter(ABC):
10
+ def __init__(self, max_requests: int, max_tokens: int, interval: int = 60, token_encoding_name=None) -> None:
11
+ self.interval: int = interval
12
+ self.max_requests: int = max_requests
13
+ self.max_tokens: int = max_tokens
14
+ self.available_request_capacity: int = max_requests
15
+ self.available_token_capacity: int = max_tokens
16
+ self.rate_limit_replenisher_task: Optional[asyncio.Task[NoReturn]] = None
17
+ self._stop_replenishing: asyncio.Event = asyncio.Event()
18
+ self._lock: asyncio.Lock = asyncio.Lock()
19
+ self.token_encoding_name = token_encoding_name
20
+
21
+ async def start_replenishing(self) -> NoReturn:
22
+ """Starts the replenishment of rate limit capacities at regular intervals."""
23
+ try:
24
+ while not self._stop_replenishing.is_set():
25
+ await asyncio.sleep(self.interval)
26
+ async with self._lock:
27
+ self.available_request_capacity = self.max_requests
28
+ self.available_token_capacity = self.max_tokens
29
+ except asyncio.CancelledError:
30
+ logging.info("Rate limit replenisher task cancelled.")
31
+
32
+ except Exception as e:
33
+ logging.error(f"An error occurred in the rate limit replenisher: {e}")
34
+
35
+ async def stop_replenishing(self) -> None:
36
+ """Stops the replenishment task."""
37
+ if self.rate_limit_replenisher_task:
38
+ self.rate_limit_replenisher_task.cancel()
39
+ await self.rate_limit_replenisher_task
40
+ self._stop_replenishing.set()
41
+
42
+ async def request_permission(self, required_tokens) -> bool:
43
+ """Requests permission to make an API call.
44
+
45
+ Returns True if the request can be made immediately, otherwise False.
46
+ """
47
+ async with self._lock:
48
+ if self.available_request_capacity > 0 and self.available_token_capacity > 0:
49
+ self.available_request_capacity -= 1
50
+ self.available_token_capacity -= required_tokens # Assuming 1 token per request for simplicity
51
+ return True
52
+ return False
53
+
54
+ async def _call_api(
55
+ self,
56
+ http_session,
57
+ endpoint: str,
58
+ base_url: str,
59
+ api_key: str,
60
+ max_attempts: int = 3,
61
+ method: str = "post",
62
+ payload: Dict[str, any]=None,
63
+ **kwargs,
64
+ ) -> Optional[Dict[str, any]]:
65
+ endpoint = APIUtil.api_endpoint_from_url(base_url + endpoint)
66
+ while True:
67
+ if self.available_request_capacity < 1 or self.available_token_capacity < 10: # Minimum token count
68
+ await asyncio.sleep(1) # Wait for capacity
69
+ continue
70
+ required_tokens = APIUtil.calculate_num_token(payload, endpoint, self.token_encoding_name, **kwargs)
71
+
72
+ if await self.request_permission(required_tokens):
73
+ request_headers = {"Authorization": f"Bearer {api_key}"}
74
+ attempts_left = max_attempts
75
+
76
+ while attempts_left > 0:
77
+ try:
78
+ method = APIUtil.api_method(http_session, method)
79
+ async with method(
80
+ url=(base_url+endpoint), headers=request_headers, json=payload
81
+ ) as response:
82
+ response_json = await response.json()
83
+
84
+ if "error" in response_json:
85
+ logging.warning(
86
+ f"API call failed with error: {response_json['error']}"
87
+ )
88
+ attempts_left -= 1
89
+
90
+ if "Rate limit" in response_json["error"].get("message", ""):
91
+ await asyncio.sleep(15)
92
+ else:
93
+ return response_json
94
+ except Exception as e:
95
+ logging.warning(f"API call failed with exception: {e}")
96
+ attempts_left -= 1
97
+
98
+ logging.error("API call failed after all attempts.")
99
+ break
100
+ else:
101
+ await asyncio.sleep(1)
102
+
103
+ @classmethod
104
+ async def create(cls, max_requests: int, max_tokens: int, interval: int = 60, token_encoding_name = None) -> 'BaseRateLimiter':
105
+ instance = cls(max_requests, max_tokens, interval, token_encoding_name)
106
+ instance.rate_limit_replenisher_task = asyncio.create_task(
107
+ instance.start_replenishing()
108
+ )
109
+ return instance
110
+
111
+
112
+ class SimpleRateLimiter(BaseRateLimiter):
113
+ """
114
+ A simple implementation of a rate limiter.
115
+
116
+ Inherits from BaseRateLimiter and provides a basic rate limiting mechanism.
117
+ """
118
+
119
+ def __init__(self, max_requests: int, max_tokens: int, interval: int = 60, token_encoding_name=None) -> None:
120
+ """Initializes the SimpleRateLimiter with the specified parameters."""
121
+ super().__init__(max_requests, max_tokens, interval, token_encoding_name)
@@ -0,0 +1,146 @@
1
+ import asyncio
2
+ import logging
3
+ import aiohttp
4
+ from abc import ABC
5
+ from dataclasses import dataclass
6
+ from typing import Any, Dict, NoReturn, Optional, Type, List, Union
7
+
8
+ from ..utils import nget, APIUtil, to_list, lcall
9
+ from .base_rate_limiter import BaseRateLimiter, SimpleRateLimiter
10
+ from .status_tracker import StatusTracker
11
+
12
+ from .base_endpoint import BaseEndpoint
13
+
14
+
15
+ class BaseService:
16
+ """
17
+ Base class for services that interact with API endpoints.
18
+
19
+ This class provides a foundation for services that need to make API calls with rate limiting.
20
+
21
+ Attributes:
22
+ api_key (Optional[str]): The API key used for authentication.
23
+ schema (Dict[str, Any]): The schema defining the service's endpoints.
24
+ status_tracker (StatusTracker): The object tracking the status of API calls.
25
+ endpoints (Dict[str, BaseEndpoint]): A dictionary of endpoint objects.
26
+ """
27
+
28
+ base_url: str = ''
29
+ available_endpoints: list = []
30
+
31
+ def __init__(
32
+ self,
33
+ api_key: Optional[str] = None,
34
+ schema: Dict[str, Any] = None,
35
+ token_encoding_name: str = None,
36
+ max_tokens : int = 100_000,
37
+ max_requests : int = 1_000,
38
+ interval: int = 60
39
+ ) -> None:
40
+ self.api_key = api_key
41
+ self.schema = schema or {}
42
+ self.status_tracker = StatusTracker()
43
+ self.endpoints: Dict[str, BaseEndpoint] = {}
44
+ self.token_encoding_name = token_encoding_name
45
+ self.chat_config = {
46
+ 'max_requests': max_requests,
47
+ 'max_tokens': max_tokens,
48
+ 'interval': interval,
49
+ "token_encoding_name": token_encoding_name
50
+ }
51
+
52
+
53
+ async def init_endpoint(self, endpoint_: Optional[Union[List[str], List[BaseEndpoint], str, BaseEndpoint]] = None) -> None:
54
+ """
55
+ Initializes the specified endpoint or all endpoints if none is specified.
56
+
57
+ Args:
58
+ endpoint_: The endpoint(s) to initialize. Can be a string, an BaseEndpoint, a list of strings, or a list of BaseEndpoints.
59
+ """
60
+
61
+ if endpoint_:
62
+ endpoint_ = to_list(endpoint_, flatten=True, dropna=True)
63
+
64
+ for ep in endpoint_:
65
+ self._check_endpoints(ep)
66
+
67
+ if ep not in self.endpoints:
68
+ endpoint_config = self._get_endpoint(ep)
69
+
70
+ if endpoint_config is not None:
71
+ if ep == "chat/completions":
72
+ self.endpoints[ep] = BaseEndpoint(
73
+ max_requests=self.chat_config.get('max_requests', 1000),
74
+ max_tokens=self.chat_config.get('max_tokens', 100000),
75
+ interval=self.chat_config.get('interval', 60),
76
+ endpoint_=ep,
77
+ token_encoding_name=self.token_encoding_name,
78
+ config=endpoint_config,
79
+ )
80
+ else:
81
+ self.endpoints[ep] = BaseEndpoint(
82
+ max_requests=endpoint_config.get('max_requests', 1000) if endpoint_config.get('max_requests', 1000) is not None else 1000,
83
+ max_tokens=endpoint_config.get('max_tokens', 100000) if endpoint_config.get('max_tokens', 100000) is not None else 100000,
84
+ interval=endpoint_config.get('interval', 60) if endpoint_config.get('interval', 60) is not None else 60,
85
+ endpoint_=ep,
86
+ token_encoding_name=self.token_encoding_name,
87
+ config=endpoint_config,
88
+ )
89
+
90
+ if not self.endpoints[ep]._has_initialized:
91
+ await self.endpoints[ep].init_rate_limiter()
92
+
93
+ else:
94
+ for ep in self.available_endpoints:
95
+ endpoint_config = nget(self.schema, [ep, 'config'])
96
+ self.schema.get(ep, {})
97
+ if ep not in self.endpoints:
98
+ self.endpoints[ep] = BaseEndpoint(
99
+ max_requests=endpoint_config.get('max_requests', 1000),
100
+ max_tokens=endpoint_config.get('max_tokens', 100000),
101
+ interval=endpoint_config.get('interval', 60),
102
+ endpoint_=ep,
103
+ token_encoding_name=self.token_encoding_name,
104
+ config=endpoint_config,
105
+ )
106
+ if not self.endpoints[ep]._has_initialized:
107
+ await self.endpoints[ep].init_rate_limiter()
108
+
109
+ async def call_api(self, payload, endpoint, method, **kwargs):
110
+ """
111
+ Calls the specified API endpoint with the given payload and method.
112
+
113
+ Args:
114
+ payload: The payload to send with the API call.
115
+ endpoint: The endpoint to call.
116
+ method: The HTTP method to use for the call.
117
+ kwargs are for tiktoken encoding
118
+
119
+ Returns:
120
+ The response from the API call.
121
+
122
+ Raises:
123
+ ValueError: If the endpoint has not been initialized.
124
+ """
125
+ if endpoint not in self.endpoints.keys():
126
+ raise ValueError(f'The endpoint {endpoint} has not initialized.')
127
+ async with aiohttp.ClientSession() as http_session:
128
+ completion = await self.endpoints[endpoint].rate_limiter._call_api(
129
+ http_session=http_session, endpoint=endpoint, base_url=self.base_url, api_key=self.api_key,
130
+ method=method, payload=payload, **kwargs)
131
+ return completion
132
+
133
+ def _check_endpoints(self, endpoint_):
134
+ f = lambda ep: ValueError (f"Endpoint {ep} not available for service {self.__class__.__name__}")
135
+ if not endpoint_ in self.available_endpoints:
136
+ raise f(endpoint_)
137
+
138
+ def _get_endpoint(self, endpoint_):
139
+ if endpoint_ not in self.endpoints:
140
+ endpoint_config = nget(self.schema, [endpoint_, 'config'])
141
+ self.schema.get(endpoint_, {})
142
+
143
+ if isinstance(endpoint_, BaseEndpoint):
144
+ self.endpoints[endpoint_.endpoint] = endpoint_
145
+ return None
146
+ return endpoint_config
@@ -0,0 +1,6 @@
1
+ from .base_endpoint import BaseEndpoint
2
+ from .payload_package import PayloadCreation
3
+
4
+ class ChatCompletion(BaseEndpoint):
5
+ ...
6
+
@@ -0,0 +1,6 @@
1
+ from .base_endpoint import BaseEndpoint
2
+ from .payload_package import PayloadCreation
3
+
4
+ class Embeddings(BaseEndpoint):
5
+ ...
6
+
@@ -0,0 +1,47 @@
1
+ from lionagi.utils.api_util import APIUtil
2
+
3
+ class PayloadCreation:
4
+
5
+ @classmethod
6
+ def chat_completion(cls, messages, llmconfig, schema, **kwargs):
7
+ """
8
+ Creates a payload for the chat completion operation.
9
+
10
+ Args:
11
+ messages: The messages to include in the chat completion.
12
+ llmconfig: Configuration for the language model.
13
+ schema: The schema describing required and optional fields.
14
+ **kwargs: Additional keyword arguments.
15
+
16
+ Returns:
17
+ The constructed payload.
18
+ """
19
+ return APIUtil._create_payload(
20
+ input_=messages,
21
+ config=llmconfig,
22
+ required_=schema['required'],
23
+ optional_=schema['optional'],
24
+ input_key="messages",
25
+ **kwargs)
26
+
27
+ @classmethod
28
+ def fine_tuning(cls, training_file, llmconfig, schema, **kwargs):
29
+ """
30
+ Creates a payload for the fine-tuning operation.
31
+
32
+ Args:
33
+ training_file: The file containing training data.
34
+ llmconfig: Configuration for the language model.
35
+ schema: The schema describing required and optional fields.
36
+ **kwargs: Additional keyword arguments.
37
+
38
+ Returns:
39
+ The constructed payload.
40
+ """
41
+ return APIUtil._create_payload(
42
+ input_=training_file,
43
+ config=llmconfig,
44
+ required_=schema['required'],
45
+ optional_=schema['optional'],
46
+ input_key="training_file",
47
+ **kwargs)
@@ -0,0 +1,29 @@
1
+ from dataclasses import dataclass
2
+
3
+
4
+ @dataclass
5
+ class StatusTracker:
6
+ """
7
+ Keeps track of various task statuses within a system.
8
+
9
+ Attributes:
10
+ num_tasks_started (int): The number of tasks that have been initiated.
11
+ num_tasks_in_progress (int): The number of tasks currently being processed.
12
+ num_tasks_succeeded (int): The number of tasks that have completed successfully.
13
+ num_tasks_failed (int): The number of tasks that have failed.
14
+ num_rate_limit_errors (int): The number of tasks that failed due to rate limiting.
15
+ num_api_errors (int): The number of tasks that failed due to API errors.
16
+ num_other_errors (int): The number of tasks that failed due to other errors.
17
+
18
+ Examples:
19
+ >>> tracker = StatusTracker()
20
+ >>> tracker.num_tasks_started += 1
21
+ >>> tracker.num_tasks_succeeded += 1
22
+ """
23
+ num_tasks_started: int = 0
24
+ num_tasks_in_progress: int = 0
25
+ num_tasks_succeeded: int = 0
26
+ num_tasks_failed: int = 0
27
+ num_rate_limit_errors: int = 0
28
+ num_api_errors: int = 0
29
+ num_other_errors: int = 0
@@ -0,0 +1,7 @@
1
+ from .branch import Branch
2
+ from .session import Session
3
+
4
+ __all__ = [
5
+ "Branch",
6
+ "Session"
7
+ ]