lionagi 0.0.115__py3-none-any.whl → 0.0.204__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- lionagi/__init__.py +1 -2
- lionagi/_services/__init__.py +5 -0
- lionagi/_services/anthropic.py +79 -0
- lionagi/_services/base_service.py +414 -0
- lionagi/_services/oai.py +98 -0
- lionagi/_services/openrouter.py +44 -0
- lionagi/_services/services.py +91 -0
- lionagi/_services/transformers.py +46 -0
- lionagi/bridge/langchain.py +26 -16
- lionagi/bridge/llama_index.py +50 -20
- lionagi/configs/oai_configs.py +2 -14
- lionagi/configs/openrouter_configs.py +2 -2
- lionagi/core/__init__.py +7 -8
- lionagi/core/branch/branch.py +589 -0
- lionagi/core/branch/branch_manager.py +139 -0
- lionagi/core/branch/conversation.py +484 -0
- lionagi/core/core_util.py +59 -0
- lionagi/core/flow/flow.py +19 -0
- lionagi/core/flow/flow_util.py +62 -0
- lionagi/core/instruction_set/__init__.py +0 -5
- lionagi/core/instruction_set/instruction_set.py +343 -0
- lionagi/core/messages/messages.py +176 -0
- lionagi/core/sessions/__init__.py +0 -5
- lionagi/core/sessions/session.py +428 -0
- lionagi/loaders/chunker.py +51 -47
- lionagi/loaders/load_util.py +2 -2
- lionagi/loaders/reader.py +45 -39
- lionagi/models/imodel.py +53 -0
- lionagi/schema/async_queue.py +158 -0
- lionagi/schema/base_node.py +318 -147
- lionagi/schema/base_tool.py +31 -1
- lionagi/schema/data_logger.py +74 -38
- lionagi/schema/data_node.py +57 -6
- lionagi/structures/graph.py +132 -10
- lionagi/structures/relationship.py +58 -20
- lionagi/structures/structure.py +36 -25
- lionagi/tests/test_utils/test_api_util.py +219 -0
- lionagi/tests/test_utils/test_call_util.py +785 -0
- lionagi/tests/test_utils/test_encrypt_util.py +323 -0
- lionagi/tests/test_utils/test_io_util.py +238 -0
- lionagi/tests/test_utils/test_nested_util.py +338 -0
- lionagi/tests/test_utils/test_sys_util.py +358 -0
- lionagi/tools/tool_manager.py +186 -0
- lionagi/tools/tool_util.py +266 -3
- lionagi/utils/__init__.py +21 -61
- lionagi/utils/api_util.py +359 -71
- lionagi/utils/call_util.py +839 -264
- lionagi/utils/encrypt_util.py +283 -16
- lionagi/utils/io_util.py +178 -93
- lionagi/utils/nested_util.py +672 -0
- lionagi/utils/pd_util.py +57 -0
- lionagi/utils/sys_util.py +284 -156
- lionagi/utils/url_util.py +55 -0
- lionagi/version.py +1 -1
- {lionagi-0.0.115.dist-info → lionagi-0.0.204.dist-info}/METADATA +21 -17
- lionagi-0.0.204.dist-info/RECORD +106 -0
- lionagi/core/conversations/__init__.py +0 -5
- lionagi/core/conversations/conversation.py +0 -107
- lionagi/core/flows/__init__.py +0 -8
- lionagi/core/flows/flow.py +0 -8
- lionagi/core/flows/flow_util.py +0 -62
- lionagi/core/instruction_set/instruction_sets.py +0 -7
- lionagi/core/sessions/sessions.py +0 -185
- lionagi/endpoints/__init__.py +0 -5
- lionagi/endpoints/audio.py +0 -17
- lionagi/endpoints/chatcompletion.py +0 -54
- lionagi/messages/__init__.py +0 -11
- lionagi/messages/instruction.py +0 -15
- lionagi/messages/message.py +0 -110
- lionagi/messages/response.py +0 -33
- lionagi/messages/system.py +0 -12
- lionagi/objs/__init__.py +0 -11
- lionagi/objs/abc_objs.py +0 -39
- lionagi/objs/async_queue.py +0 -135
- lionagi/objs/messenger.py +0 -85
- lionagi/objs/tool_manager.py +0 -253
- lionagi/services/__init__.py +0 -11
- lionagi/services/base_api_service.py +0 -230
- lionagi/services/oai.py +0 -34
- lionagi/services/openrouter.py +0 -31
- lionagi/tests/test_api_util.py +0 -46
- lionagi/tests/test_call_util.py +0 -115
- lionagi/tests/test_convert_util.py +0 -202
- lionagi/tests/test_encrypt_util.py +0 -33
- lionagi/tests/test_flat_util.py +0 -426
- lionagi/tests/test_sys_util.py +0 -0
- lionagi/utils/convert_util.py +0 -229
- lionagi/utils/flat_util.py +0 -599
- lionagi-0.0.115.dist-info/RECORD +0 -110
- /lionagi/{services → _services}/anyscale.py +0 -0
- /lionagi/{services → _services}/azure.py +0 -0
- /lionagi/{services → _services}/bedrock.py +0 -0
- /lionagi/{services → _services}/everlyai.py +0 -0
- /lionagi/{services → _services}/gemini.py +0 -0
- /lionagi/{services → _services}/gpt4all.py +0 -0
- /lionagi/{services → _services}/huggingface.py +0 -0
- /lionagi/{services → _services}/litellm.py +0 -0
- /lionagi/{services → _services}/localai.py +0 -0
- /lionagi/{services → _services}/mistralai.py +0 -0
- /lionagi/{services → _services}/ollama.py +0 -0
- /lionagi/{services → _services}/openllm.py +0 -0
- /lionagi/{services → _services}/perplexity.py +0 -0
- /lionagi/{services → _services}/predibase.py +0 -0
- /lionagi/{services → _services}/rungpt.py +0 -0
- /lionagi/{services → _services}/vllm.py +0 -0
- /lionagi/{services → _services}/xinference.py +0 -0
- /lionagi/{endpoints/assistants.py → agents/__init__.py} +0 -0
- /lionagi/{tools → agents}/planner.py +0 -0
- /lionagi/{tools → agents}/prompter.py +0 -0
- /lionagi/{tools → agents}/scorer.py +0 -0
- /lionagi/{tools → agents}/summarizer.py +0 -0
- /lionagi/{tools → agents}/validator.py +0 -0
- /lionagi/{endpoints/embeddings.py → core/branch/__init__.py} +0 -0
- /lionagi/{services/anthropic.py → core/branch/cluster.py} +0 -0
- /lionagi/{endpoints/finetune.py → core/flow/__init__.py} +0 -0
- /lionagi/{endpoints/image.py → core/messages/__init__.py} +0 -0
- /lionagi/{endpoints/moderation.py → models/__init__.py} +0 -0
- /lionagi/{endpoints/vision.py → models/base_model.py} +0 -0
- /lionagi/{objs → schema}/status_tracker.py +0 -0
- /lionagi/tests/{test_io_util.py → test_utils/__init__.py} +0 -0
- {lionagi-0.0.115.dist-info → lionagi-0.0.204.dist-info}/LICENSE +0 -0
- {lionagi-0.0.115.dist-info → lionagi-0.0.204.dist-info}/WHEEL +0 -0
- {lionagi-0.0.115.dist-info → lionagi-0.0.204.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: lionagi
|
3
|
-
Version: 0.0.
|
3
|
+
Version: 0.0.204
|
4
4
|
Summary: Towards automated general intelligence.
|
5
5
|
Author: HaiyangLi
|
6
6
|
Author-email: Haiyang Li <ocean@lionagi.ai>
|
@@ -217,8 +217,11 @@ Description-Content-Type: text/markdown
|
|
217
217
|
License-File: LICENSE
|
218
218
|
Requires-Dist: aiohttp >=3.9.0
|
219
219
|
Requires-Dist: python-dotenv ==1.0.0
|
220
|
-
Requires-Dist: tiktoken
|
221
|
-
Requires-Dist:
|
220
|
+
Requires-Dist: tiktoken >=0.5.1
|
221
|
+
Requires-Dist: pydantic >=2.6.0
|
222
|
+
Requires-Dist: cryptography >=42.0.0
|
223
|
+
Requires-Dist: aiocache ==0.12.2
|
224
|
+
Requires-Dist: pandas >=2.1.0
|
222
225
|
|
223
226
|
![PyPI - Version](https://img.shields.io/pypi/v/lionagi?labelColor=233476aa&color=231fc935) ![Read the Docs](https://img.shields.io/readthedocs/lionagi) ![PyPI - License](https://img.shields.io/pypi/l/lionagi?color=231fc935) ![PyPI - Downloads](https://img.shields.io/pypi/dm/lionagi?color=blue)
|
224
227
|
|
@@ -247,12 +250,12 @@ by default we use `OPENAI_API_KEY`.
|
|
247
250
|
|
248
251
|
|
249
252
|
### Features
|
250
|
-
- Create a production ready LLM application **in hours**, with more than 100 models
|
251
|
-
-
|
252
|
-
-
|
253
|
+
- Robust and scalable. Create a production ready LLM application **in hours**, with more than 100 models
|
254
|
+
- Efficient and verstile data operations for reading, chunking, binning, writing, storing data with support for `langchain` and `llamaindex`
|
255
|
+
- Built-in support for **chain/graph-of-thoughts, ReAct, Concurrent parallel function calling**
|
253
256
|
- Unified interface with any LLM provider, API or local
|
254
257
|
- Fast and **concurrent** API call with **configurable rate limit**
|
255
|
-
- (Work In Progress) support for
|
258
|
+
- (Work In Progress) support for models both API and local
|
256
259
|
---
|
257
260
|
LionAGI is designed to be `asynchronous` only, please check python official documentation on how `async` work: [here](https://docs.python.org/3/library/asyncio.html)
|
258
261
|
|
@@ -269,7 +272,6 @@ LionAGI is designed to be `asynchronous` only, please check python official docu
|
|
269
272
|
The following example shows how to use LionAGI's `Session` object to interact with `gpt-4` model:
|
270
273
|
|
271
274
|
```python
|
272
|
-
import lionagi as li
|
273
275
|
|
274
276
|
# define system messages, context and user instruction
|
275
277
|
system = "You are a helpful assistant designed to perform calculations."
|
@@ -279,10 +281,12 @@ context = {"x": 10, "y": 5}
|
|
279
281
|
|
280
282
|
```python
|
281
283
|
# in interactive environment (.ipynb for example)
|
284
|
+
import lionagi as li
|
285
|
+
|
282
286
|
calculator = li.Session(system=system)
|
283
|
-
result = await calculator.
|
284
|
-
|
285
|
-
|
287
|
+
result = await calculator.chat(
|
288
|
+
instruction=instruction, context=context, model="gpt-4-1106-preview"
|
289
|
+
)
|
286
290
|
|
287
291
|
print(f"Calculation Result: {result}")
|
288
292
|
```
|
@@ -290,15 +294,17 @@ print(f"Calculation Result: {result}")
|
|
290
294
|
```python
|
291
295
|
# or otherwise, you can use
|
292
296
|
import asyncio
|
293
|
-
from dotenv import
|
297
|
+
from dotenv import load_dotenv
|
294
298
|
|
295
299
|
load_dotenv()
|
296
300
|
|
301
|
+
import lionagi as li
|
302
|
+
|
297
303
|
async def main():
|
298
304
|
calculator = li.Session(system=system)
|
299
|
-
result = await calculator.
|
300
|
-
|
301
|
-
|
305
|
+
result = await calculator.chat(
|
306
|
+
instruction=instruction, context=context, model="gpt-4-1106-preview"
|
307
|
+
)
|
302
308
|
print(f"Calculation Result: {result}")
|
303
309
|
|
304
310
|
if __name__ == "__main__":
|
@@ -325,8 +331,6 @@ When referencing LionAGI in your projects or research, please cite:
|
|
325
331
|
}
|
326
332
|
```
|
327
333
|
|
328
|
-
## Star History
|
329
|
-
![Star History Chart](https://api.star-history.com/svg?repos=lion-agi/lionagi&type=Date)
|
330
334
|
|
331
335
|
### Requirements
|
332
336
|
Python 3.9 or higher.
|
@@ -0,0 +1,106 @@
|
|
1
|
+
lionagi/__init__.py,sha256=pRm9tnfYQnj_ZCan3xr3HQO7tbUeqwRhSBtwOvXJ-o8,886
|
2
|
+
lionagi/version.py,sha256=DFD_QxPb5C0pITp5fF5NUF2YLCrCu9FEfCgCY7YVy4I,24
|
3
|
+
lionagi/_services/__init__.py,sha256=zU5sxmSI9-Jtp_WsI-Zsb6hmT8y5zF9YtJ7XikAjnbs,60
|
4
|
+
lionagi/_services/anthropic.py,sha256=pLebbnr2H1A41bzXrJrU7yQbZY35swKrSi4mktXoIyk,3195
|
5
|
+
lionagi/_services/anyscale.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
6
|
+
lionagi/_services/azure.py,sha256=chG3GNX2BBDTWIuSVfZUJ_YF_ZVBSoel2d_AN0OChS0,6
|
7
|
+
lionagi/_services/base_service.py,sha256=bbPQ9xTaY5jxrHe6vW_PeyRkoxVaxWmpWlCqQxSfRI8,17314
|
8
|
+
lionagi/_services/bedrock.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
9
|
+
lionagi/_services/everlyai.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
10
|
+
lionagi/_services/gemini.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
11
|
+
lionagi/_services/gpt4all.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
12
|
+
lionagi/_services/huggingface.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
13
|
+
lionagi/_services/litellm.py,sha256=chG3GNX2BBDTWIuSVfZUJ_YF_ZVBSoel2d_AN0OChS0,6
|
14
|
+
lionagi/_services/localai.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
15
|
+
lionagi/_services/mistralai.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
16
|
+
lionagi/_services/oai.py,sha256=Jiuw_TibpYhN6x1PmHYUMi_0Nugrxmi5G1eimAuw21I,3958
|
17
|
+
lionagi/_services/ollama.py,sha256=chG3GNX2BBDTWIuSVfZUJ_YF_ZVBSoel2d_AN0OChS0,6
|
18
|
+
lionagi/_services/openllm.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
19
|
+
lionagi/_services/openrouter.py,sha256=MuuwoT2ro9FmY7O1jzCenRrL2YfiYUMM8-0kKoGZHAY,1799
|
20
|
+
lionagi/_services/perplexity.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
21
|
+
lionagi/_services/predibase.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
22
|
+
lionagi/_services/rungpt.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
23
|
+
lionagi/_services/services.py,sha256=kY2TpT98pDS_qCrCO4H1YBlDYNDqekx5S9rWQdWm5Ck,4007
|
24
|
+
lionagi/_services/transformers.py,sha256=tHlo9QVV5ycB2xUEKNRf-b665o_21fPC6c4ycH3Hjk4,1444
|
25
|
+
lionagi/_services/vllm.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
26
|
+
lionagi/_services/xinference.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
27
|
+
lionagi/agents/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
28
|
+
lionagi/agents/planner.py,sha256=chG3GNX2BBDTWIuSVfZUJ_YF_ZVBSoel2d_AN0OChS0,6
|
29
|
+
lionagi/agents/prompter.py,sha256=chG3GNX2BBDTWIuSVfZUJ_YF_ZVBSoel2d_AN0OChS0,6
|
30
|
+
lionagi/agents/scorer.py,sha256=chG3GNX2BBDTWIuSVfZUJ_YF_ZVBSoel2d_AN0OChS0,6
|
31
|
+
lionagi/agents/summarizer.py,sha256=chG3GNX2BBDTWIuSVfZUJ_YF_ZVBSoel2d_AN0OChS0,6
|
32
|
+
lionagi/agents/validator.py,sha256=chG3GNX2BBDTWIuSVfZUJ_YF_ZVBSoel2d_AN0OChS0,6
|
33
|
+
lionagi/bridge/__init__.py,sha256=YDKi-pniFEknEXTSEvX6yEe4Y69f0kLvbjvEQ0TdrTU,575
|
34
|
+
lionagi/bridge/langchain.py,sha256=NuuUOJ5oV0l6ae4EeXxaeB82nJ6FDJHVC4AFBoIqiDs,5726
|
35
|
+
lionagi/bridge/llama_index.py,sha256=oggpASmfVA6IhfgBOJS8CWJrQb3zrGaDApUQ7MWT8OM,6652
|
36
|
+
lionagi/configs/__init__.py,sha256=QOd4Rs7vjIpNWvIocxWQeU-q-MPRC-AOxh-gM-eBJ2o,142
|
37
|
+
lionagi/configs/oai_configs.py,sha256=Q2ESc5QiMprnRc_w7SeMlaTYUWl_Y4SEzZSE4iOkz4Q,2646
|
38
|
+
lionagi/configs/openrouter_configs.py,sha256=IBQHqb8mo4Jb3kYAm_7NOHSKRPwSdGbPpDJoiwHxLYw,1269
|
39
|
+
lionagi/core/__init__.py,sha256=6uDjq1WCWBahNiCpzGaUCJe7GDg6lwUD_cGNT86GhwM,200
|
40
|
+
lionagi/core/core_util.py,sha256=80fmVywc0UC9FklXpQr5_dHdtdHdosv5Zxwy9tC-Ufg,2339
|
41
|
+
lionagi/core/branch/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
42
|
+
lionagi/core/branch/branch.py,sha256=w3GZAws8_qgY17XOn6svmO4ECQqhNQ_rWMzXiwBQVO8,23403
|
43
|
+
lionagi/core/branch/branch_manager.py,sha256=kLVYUYVUmFQ2CPV34vfVRrkR6fhlizoAr3Dw2ilMX6M,4904
|
44
|
+
lionagi/core/branch/cluster.py,sha256=chG3GNX2BBDTWIuSVfZUJ_YF_ZVBSoel2d_AN0OChS0,6
|
45
|
+
lionagi/core/branch/conversation.py,sha256=q05EwDouTAH5eMDCp2wwRHK0ipbm0MYFngk8R6vOTB0,18664
|
46
|
+
lionagi/core/flow/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
47
|
+
lionagi/core/flow/flow.py,sha256=Fd6xVZKgjjpr-rcVAjvuMGP04OxNK4LVkbB9VnEP21k,813
|
48
|
+
lionagi/core/flow/flow_util.py,sha256=OoQ2-ktkpQs9f1m1VI1pucUeq75Mx1aKqz8KdINMt8M,2083
|
49
|
+
lionagi/core/instruction_set/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
50
|
+
lionagi/core/instruction_set/instruction_set.py,sha256=bxrxPxLJdaenvZ2CMaiucNB4fZ_5AWVz49MYs3mG2G8,13682
|
51
|
+
lionagi/core/messages/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
52
|
+
lionagi/core/messages/messages.py,sha256=bokGS8hu4wQxa-4Gsf2Eg7Q99ANccjjixZpE-DC0P7g,6533
|
53
|
+
lionagi/core/sessions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
54
|
+
lionagi/core/sessions/session.py,sha256=-dSlIIzHmD2dGd3zu4m_U-UE0BKraKYyt1H_DAzpngk,17694
|
55
|
+
lionagi/datastores/__init__.py,sha256=chG3GNX2BBDTWIuSVfZUJ_YF_ZVBSoel2d_AN0OChS0,6
|
56
|
+
lionagi/datastores/chroma.py,sha256=chG3GNX2BBDTWIuSVfZUJ_YF_ZVBSoel2d_AN0OChS0,6
|
57
|
+
lionagi/datastores/deeplake.py,sha256=chG3GNX2BBDTWIuSVfZUJ_YF_ZVBSoel2d_AN0OChS0,6
|
58
|
+
lionagi/datastores/elasticsearch.py,sha256=chG3GNX2BBDTWIuSVfZUJ_YF_ZVBSoel2d_AN0OChS0,6
|
59
|
+
lionagi/datastores/lantern.py,sha256=chG3GNX2BBDTWIuSVfZUJ_YF_ZVBSoel2d_AN0OChS0,6
|
60
|
+
lionagi/datastores/pinecone.py,sha256=chG3GNX2BBDTWIuSVfZUJ_YF_ZVBSoel2d_AN0OChS0,6
|
61
|
+
lionagi/datastores/postgres.py,sha256=chG3GNX2BBDTWIuSVfZUJ_YF_ZVBSoel2d_AN0OChS0,6
|
62
|
+
lionagi/datastores/qdrant.py,sha256=chG3GNX2BBDTWIuSVfZUJ_YF_ZVBSoel2d_AN0OChS0,6
|
63
|
+
lionagi/loaders/__init__.py,sha256=vOOwHkdI0yIA_jV_pNyOrdkS5Ghs0k4od75S1U60jJE,451
|
64
|
+
lionagi/loaders/chunker.py,sha256=UY6GrC8qC0MLRaHiSfgG5HMnrtWSTuIvaRPhYfdm9ak,6438
|
65
|
+
lionagi/loaders/load_util.py,sha256=4fzhMk3H6OvcQcViUwlLPSTMpcY4alfdel16lJgXz8Y,8358
|
66
|
+
lionagi/loaders/reader.py,sha256=xI1uxw9qGJ_rWev_s3vtW8Ep9YaK-15q7ts-Jy61PGg,4625
|
67
|
+
lionagi/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
68
|
+
lionagi/models/base_model.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
69
|
+
lionagi/models/imodel.py,sha256=VultpAAZ5PBT6_Cps-LjgZqHEyuF-km6eVvT-sEPEgo,1491
|
70
|
+
lionagi/schema/__init__.py,sha256=XRWXK9sztycoIMCTzwLEVMxxc1wgWKNUDRbWTpn5Ie0,208
|
71
|
+
lionagi/schema/async_queue.py,sha256=e_wFyDvCeKTxW6MAIU6Q3jxw24uEQuahaZwNDzZMB4k,5674
|
72
|
+
lionagi/schema/base_condition.py,sha256=chG3GNX2BBDTWIuSVfZUJ_YF_ZVBSoel2d_AN0OChS0,6
|
73
|
+
lionagi/schema/base_node.py,sha256=Ol4Y6gn81Dm8FJb3fdKVkJlq4R-OAGIf891exnVv45I,13211
|
74
|
+
lionagi/schema/base_tool.py,sha256=8LR-MYOGkv9zCHd8tWry296OZNYlbTpYeO1xd-oQcOM,1254
|
75
|
+
lionagi/schema/data_logger.py,sha256=2rNPBYF29_2xgv2m226BzKdTfFIVU1YlzQ0PK8zLAvY,4313
|
76
|
+
lionagi/schema/data_node.py,sha256=k80mv5DCqAHrGNlmaIHAkCA7JF4dkaRaRDh_oku4kUY,2272
|
77
|
+
lionagi/schema/status_tracker.py,sha256=6otnTSMrH5jM0PUDiDeK3zG0VOSKfNBDzNN6Bts2tmA,1236
|
78
|
+
lionagi/structures/__init__.py,sha256=wMPekT2vbWwUkJ5aW5o-lzJC9Fzhta6RHDiFPTNUm_0,120
|
79
|
+
lionagi/structures/graph.py,sha256=dqhRq5j3SOQvelUOanC3_DC4TQ1wRv4qK0QmleoLUZs,7788
|
80
|
+
lionagi/structures/relationship.py,sha256=IgiYHD87bc57hzSAYSo27WqLgtk8pzgGbIxfPYJCI-g,6059
|
81
|
+
lionagi/structures/structure.py,sha256=YyL3LxgeQWgkWDhACRxo8GoDn0IjE7idh7B0r5U3mZ4,3485
|
82
|
+
lionagi/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
83
|
+
lionagi/tests/test_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
84
|
+
lionagi/tests/test_utils/test_api_util.py,sha256=7Zyc0J1glZrIWI1HrTRSRhzw8jaUW1L2vVLFAlUhI4g,9721
|
85
|
+
lionagi/tests/test_utils/test_call_util.py,sha256=7xmfFaWvniMQfaOyfwasA2enJQVuSlcAwc8gUyAR_7k,26277
|
86
|
+
lionagi/tests/test_utils/test_encrypt_util.py,sha256=hlkbFjQs2jodr8fgtPli6f1MO3doQbTcsZfzGKVrG5k,12652
|
87
|
+
lionagi/tests/test_utils/test_io_util.py,sha256=cFZCT6EikVeuXB13w-UbtO3YceCHBO5RlNXxGICqg_U,11002
|
88
|
+
lionagi/tests/test_utils/test_nested_util.py,sha256=Z1boHufhjZryw51qW2lABOnnyJ1snAFp26KKzzzD8Bs,12612
|
89
|
+
lionagi/tests/test_utils/test_sys_util.py,sha256=TDCkzll-JLa6NuBbN_-ay5Rw9KTa_HcSHHAq62RVwGI,13545
|
90
|
+
lionagi/tools/__init__.py,sha256=ZEck-ReP5Co05nAA2gUXTpKoDN2QZqrL7DvU9Z09gqg,69
|
91
|
+
lionagi/tools/tool_manager.py,sha256=4yCjMVOrKo4PoEMv0xF6wMirpj20qhLx4J7op0v5h2w,6543
|
92
|
+
lionagi/tools/tool_util.py,sha256=5ln7lnqC_rjhKDDwef10xrBrjP1yLzsQvphllD5crec,9252
|
93
|
+
lionagi/utils/__init__.py,sha256=FJAqYyb19Mp6kiietyjtp0WLlW2857-HAHl7OxqCc5Y,954
|
94
|
+
lionagi/utils/api_util.py,sha256=YV-DKPgR4lCKO_riroSKyq6mO-8vz-SilRo_gWtg4Zg,15282
|
95
|
+
lionagi/utils/call_util.py,sha256=G3K8dkEZ9AehrBdbKHK6Xtr1pZZWxrtqhQEu5xkItuo,32919
|
96
|
+
lionagi/utils/encrypt_util.py,sha256=iZjZdXVvl0lw4Yw_YNzIWriM3F2qKtzai7PgSQ1TExc,9316
|
97
|
+
lionagi/utils/io_util.py,sha256=xoVsq8sP5JGsosuC80Kad3GkGjm8Qm0OLYyTw-U5ru8,6455
|
98
|
+
lionagi/utils/nested_util.py,sha256=67j-ySQtuMGxtjnC-Ty2mwQgqp2g1gZhXRy1MulUu1U,26656
|
99
|
+
lionagi/utils/pd_util.py,sha256=ShLdNRJI-U2nN9TmZEGPdRXHzFMfrmw-sTpUbxNWr1w,2274
|
100
|
+
lionagi/utils/sys_util.py,sha256=iZQu3HvYLl-12mmZ0kk4lX-3FnkRh-EAMAefPt_6P7k,10893
|
101
|
+
lionagi/utils/url_util.py,sha256=fu1uRFwSR9D3dO1nfSYVNRD1b1BZVClcbpgF7tA_U4s,1864
|
102
|
+
lionagi-0.0.204.dist-info/LICENSE,sha256=TBnSyG8fs_tMRtK805GzA1cIyExleKyzoN_kuVxT9IY,11358
|
103
|
+
lionagi-0.0.204.dist-info/METADATA,sha256=kxTcrwAWtkK8cRY0CAxJaOZ8lBIVQgFcy-8HT92mTuc,17894
|
104
|
+
lionagi-0.0.204.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
|
105
|
+
lionagi-0.0.204.dist-info/top_level.txt,sha256=szvch_d2jE1Lu9ZIKsl26Ll6BGfYfbOgt5lm-UpFSo4,8
|
106
|
+
lionagi-0.0.204.dist-info/RECORD,,
|
@@ -1,107 +0,0 @@
|
|
1
|
-
from typing import List, Any
|
2
|
-
|
3
|
-
from lionagi.schema.base_node import BaseNode
|
4
|
-
from lionagi.messages import Message, Response
|
5
|
-
from lionagi.objs.messenger import Messenger
|
6
|
-
|
7
|
-
|
8
|
-
class Conversation(BaseNode):
|
9
|
-
"""
|
10
|
-
A conversation that handles messages and responses.
|
11
|
-
|
12
|
-
Attributes:
|
13
|
-
response_counts (int): A counter for the number of responses in the conversation.
|
14
|
-
messages (List[Message]): A list of message objects in the conversation.
|
15
|
-
msgr (Messenger): An instance of Messenger to create message objects.
|
16
|
-
responses (List[Response]): A list of response objects in the conversation.
|
17
|
-
"""
|
18
|
-
|
19
|
-
response_counts : int = 0
|
20
|
-
messages: List[Message] = []
|
21
|
-
msgr : Any = Messenger()
|
22
|
-
responses: List[Response] = []
|
23
|
-
|
24
|
-
def initiate_conversation(
|
25
|
-
self, system=None, instruction=None,
|
26
|
-
context=None, name=None
|
27
|
-
):
|
28
|
-
"""
|
29
|
-
Initiates a new conversation, erase previous messages and responses.
|
30
|
-
|
31
|
-
Parameters:
|
32
|
-
system (Any, optional): System information to include in the initial message. Defaults to None.
|
33
|
-
instruction (Any, optional): Instruction details to include in the conversation. Defaults to None.
|
34
|
-
context (Any, optional): Contextual information relevant to the conversation. Defaults to None.
|
35
|
-
name (str, optional): The name associated with the conversation. Defaults to None.
|
36
|
-
|
37
|
-
Returns:
|
38
|
-
None
|
39
|
-
"""
|
40
|
-
self.messages, self.responses = [], []
|
41
|
-
self.add_messages(system=system)
|
42
|
-
self.add_messages(instruction=instruction, context=context, name=name)
|
43
|
-
|
44
|
-
# modify the message adding to accomodate tools
|
45
|
-
def add_messages(
|
46
|
-
self, system=None, instruction=None,
|
47
|
-
context=None, response=None, name=None
|
48
|
-
):
|
49
|
-
"""
|
50
|
-
Adds a new message object to the conversation messages list based on the provided parameters.
|
51
|
-
|
52
|
-
Parameters:
|
53
|
-
system (Any, optional): System information to include in the message. Defaults to None.
|
54
|
-
instruction (Any, optional): Instruction details to include in the message. Defaults to None.
|
55
|
-
context (Any, optional): Contextual information relevant to the message. Defaults to None.
|
56
|
-
response (Any, optional): Response details to include in the message. Defaults to None.
|
57
|
-
name (str, optional): The name associated with the message. Defaults to None.
|
58
|
-
|
59
|
-
Returns:
|
60
|
-
None
|
61
|
-
"""
|
62
|
-
msg = self.msgr.create_message(
|
63
|
-
system=system, instruction=instruction,
|
64
|
-
context=context, response=response, name=name
|
65
|
-
)
|
66
|
-
self.messages.append(msg)
|
67
|
-
|
68
|
-
def change_system(self, system):
|
69
|
-
"""
|
70
|
-
Changes the system information of the first message in the conversation.
|
71
|
-
|
72
|
-
Parameters:
|
73
|
-
system (Any): The new system information to be set.
|
74
|
-
|
75
|
-
Returns:
|
76
|
-
None
|
77
|
-
"""
|
78
|
-
self.messages[0] = self.msgr.create_message(system=system)
|
79
|
-
|
80
|
-
|
81
|
-
def keep_last_n_exchanges(self, n: int):
|
82
|
-
"""
|
83
|
-
Keeps only the last n exchanges in the conversation, where an exchange starts with a user message. This function trims the conversation to retain only the specified number of the most recent exchanges.
|
84
|
-
An exchange is defined as a sequence of messages starting with a user message.
|
85
|
-
The first message in the conversation, typically a system message, is always retained.
|
86
|
-
|
87
|
-
Parameters:
|
88
|
-
n (int): The number of exchanges to keep in the conversation.
|
89
|
-
|
90
|
-
Returns:
|
91
|
-
None: The method modifies the conversation in place and does not return a value.
|
92
|
-
|
93
|
-
Raises:
|
94
|
-
ValueError: If n is not a positive integer.
|
95
|
-
|
96
|
-
Note:
|
97
|
-
This function assumes the first message in the conversation is a system message and each user message
|
98
|
-
marks the beginning of a new exchange.
|
99
|
-
"""
|
100
|
-
response_indices = [
|
101
|
-
index for index, message in enumerate(self.messages[1:])
|
102
|
-
if message.role == "user"
|
103
|
-
]
|
104
|
-
if len(response_indices) >= n:
|
105
|
-
first_index_to_keep = response_indices[-n] + 1
|
106
|
-
self.messages = [self.system] + self.messages[first_index_to_keep:]
|
107
|
-
|
lionagi/core/flows/__init__.py
DELETED
lionagi/core/flows/flow.py
DELETED
lionagi/core/flows/flow_util.py
DELETED
@@ -1,62 +0,0 @@
|
|
1
|
-
from ..sessions import Session
|
2
|
-
|
3
|
-
def get_config(temperature, max_tokens, key_scheme, n):
|
4
|
-
f = lambda i:{
|
5
|
-
"temperature": temperature[i],
|
6
|
-
"max_tokens": max_tokens[i],
|
7
|
-
}
|
8
|
-
return {
|
9
|
-
"key": f"{key_scheme}{n+1}",
|
10
|
-
"config": f(n)
|
11
|
-
}
|
12
|
-
|
13
|
-
async def run_workflow(
|
14
|
-
session, prompts, temperature, max_tokens,
|
15
|
-
key_scheme, num_prompts, context
|
16
|
-
):
|
17
|
-
for i in range(num_prompts):
|
18
|
-
key_, config_ = get_config(temperature, max_tokens, key_scheme, i)
|
19
|
-
if i == 0:
|
20
|
-
await session.initiate(instruction=prompts[key_], context=context, **config_)
|
21
|
-
else:
|
22
|
-
await session.followup(instruction=prompts[key_], **config_)
|
23
|
-
|
24
|
-
return session
|
25
|
-
|
26
|
-
async def run_auto_workflow(
|
27
|
-
session, prompts, temperature, max_tokens,
|
28
|
-
key_scheme, num_prompts, context
|
29
|
-
):
|
30
|
-
for i in range(num_prompts):
|
31
|
-
key_, config_ = get_config(temperature, max_tokens, key_scheme, i)
|
32
|
-
if i == 0:
|
33
|
-
await session.initiate(instruction=prompts[key_], context=context, **config_)
|
34
|
-
else:
|
35
|
-
await session.auto_followup(instruction=prompts[key_], **config_)
|
36
|
-
|
37
|
-
return session
|
38
|
-
|
39
|
-
async def run_session(
|
40
|
-
prompts, dir, llmconfig, key_scheme, num_prompts,
|
41
|
-
temperature, max_tokens, type_=None, tools=None
|
42
|
-
):
|
43
|
-
prompts_ = prompts.copy()
|
44
|
-
session = Session(
|
45
|
-
system=prompts_.pop('system', 'You are a helpful assistant'),
|
46
|
-
dir = dir,
|
47
|
-
llmconfig = llmconfig
|
48
|
-
)
|
49
|
-
if tools:
|
50
|
-
session.register_tools(tools)
|
51
|
-
if type_ is None:
|
52
|
-
session = await run_workflow(
|
53
|
-
session, prompts_, temperature, max_tokens,
|
54
|
-
key_scheme=key_scheme, num_prompts=num_prompts
|
55
|
-
)
|
56
|
-
elif type_ == 'auto':
|
57
|
-
session = await run_auto_workflow(
|
58
|
-
session, prompts_, temperature, max_tokens,
|
59
|
-
key_scheme=key_scheme, num_prompts=num_prompts
|
60
|
-
)
|
61
|
-
|
62
|
-
return session
|
@@ -1,185 +0,0 @@
|
|
1
|
-
import json
|
2
|
-
from typing import Any
|
3
|
-
from dotenv import load_dotenv
|
4
|
-
|
5
|
-
from lionagi.schema import DataLogger, Tool
|
6
|
-
from lionagi.utils import lcall, alcall
|
7
|
-
from lionagi.services import OpenAIService
|
8
|
-
from lionagi.endpoints import ChatCompletion
|
9
|
-
from lionagi.objs.tool_manager import ToolManager
|
10
|
-
from lionagi.configs.oai_configs import oai_schema
|
11
|
-
from lionagi.core.conversations.conversation import Conversation
|
12
|
-
|
13
|
-
load_dotenv()
|
14
|
-
OAIService = OpenAIService()
|
15
|
-
|
16
|
-
|
17
|
-
class Session:
|
18
|
-
|
19
|
-
def __init__(
|
20
|
-
self, system, dir=None, llmconfig=oai_schema['chat']['config'],
|
21
|
-
service=OAIService
|
22
|
-
):
|
23
|
-
self.conversation = Conversation()
|
24
|
-
self.system = system
|
25
|
-
self.llmconfig = llmconfig
|
26
|
-
self.logger_ = DataLogger(dir=dir)
|
27
|
-
self.service = service
|
28
|
-
self.tool_manager = ToolManager()
|
29
|
-
|
30
|
-
def set_dir(self, dir):
|
31
|
-
self.logger_.dir = dir
|
32
|
-
|
33
|
-
def set_system(self, system):
|
34
|
-
self.conversation.change_system(system)
|
35
|
-
|
36
|
-
def set_llmconfig(self, llmconfig):
|
37
|
-
self.llmconfig = llmconfig
|
38
|
-
|
39
|
-
def set_service(self, service):
|
40
|
-
self.service = service
|
41
|
-
|
42
|
-
async def _output(self, invoke=True, out=True):
|
43
|
-
if invoke:
|
44
|
-
try:
|
45
|
-
# func, args = self.tool_manager._get_function_call(self.conversation.responses[-1]['content'])
|
46
|
-
# outs = await self.tool_manager.invoke(func, args)
|
47
|
-
# self.conversation.add_messages(response=outs)
|
48
|
-
|
49
|
-
tool_uses = json.loads(self.conversation.responses[-1].message_content)
|
50
|
-
if 'function_list' in tool_uses.keys():
|
51
|
-
func_calls = lcall(tool_uses['function_list'], self.tool_manager.get_function_call)
|
52
|
-
else:
|
53
|
-
func_calls = lcall(tool_uses['tool_uses'], self.tool_manager.get_function_call)
|
54
|
-
|
55
|
-
outs = await alcall(func_calls, self.tool_manager.invoke)
|
56
|
-
for out, f in zip(outs, func_calls):
|
57
|
-
response = {"function": f[0], "arguments": f[1], "output": out}
|
58
|
-
self.conversation.add_messages(response=response)
|
59
|
-
except:
|
60
|
-
pass
|
61
|
-
if out:
|
62
|
-
return self.conversation.responses[-1].message_content
|
63
|
-
|
64
|
-
def _is_invoked(self):
|
65
|
-
content = self.conversation.messages[-1].message_content
|
66
|
-
try:
|
67
|
-
if json.loads(content).keys() >= {'function', 'arguments', 'output'}:
|
68
|
-
return True
|
69
|
-
except:
|
70
|
-
return False
|
71
|
-
|
72
|
-
def register_tools(self, tools): #, update=False, new=False, prefix=None, postfix=None):
|
73
|
-
if not isinstance(tools, list):
|
74
|
-
tools=[tools]
|
75
|
-
self.tool_manager.register_tools(tools=tools) #, update=update, new=new, prefix=prefix, postfix=postfix)
|
76
|
-
# tools_schema = lcall(tools, lambda tool: tool.to_dict()['schema_'])
|
77
|
-
# if self.llmconfig['tools'] is None:
|
78
|
-
# self.llmconfig['tools'] = tools_schema
|
79
|
-
# else:
|
80
|
-
# self.llmconfig['tools'] += tools_schema
|
81
|
-
|
82
|
-
def _tool_parser(self, **kwargs):
|
83
|
-
# 1. single schema: dict
|
84
|
-
# 2. tool: Tool
|
85
|
-
# 3. name: str
|
86
|
-
# 4. list: 3 types of lists
|
87
|
-
def tool_check(tool):
|
88
|
-
if isinstance(tool, dict):
|
89
|
-
return tool
|
90
|
-
elif isinstance(tool, Tool):
|
91
|
-
return tool.schema_
|
92
|
-
elif isinstance(tool, str):
|
93
|
-
if self.tool_manager.name_existed(tool):
|
94
|
-
tool = self.tool_manager.registry[tool]
|
95
|
-
return tool.schema_
|
96
|
-
else:
|
97
|
-
raise ValueError(f'Function {tool} is not registered.')
|
98
|
-
|
99
|
-
if 'tools' in kwargs:
|
100
|
-
if not isinstance(kwargs['tools'], list):
|
101
|
-
kwargs['tools']=[kwargs['tools']]
|
102
|
-
kwargs['tools'] = lcall(kwargs['tools'], tool_check)
|
103
|
-
|
104
|
-
else:
|
105
|
-
tool_kwarg = {"tools": self.tool_manager.to_tool_schema_list()}
|
106
|
-
kwargs = {**tool_kwarg, **kwargs}
|
107
|
-
|
108
|
-
return kwargs
|
109
|
-
|
110
|
-
async def initiate(self, instruction, system=None, context=None,
|
111
|
-
name=None, invoke=True, out=True, **kwargs) -> Any:
|
112
|
-
# if self.tool_manager.registry != {}:
|
113
|
-
# if 'tools' not in kwargs:
|
114
|
-
# tool_kwarg = {"tools": self.tool_manager.to_tool_schema_list()}
|
115
|
-
# kwargs = {**tool_kwarg, **kwargs}
|
116
|
-
if self.tool_manager.registry != {}:
|
117
|
-
kwargs = self._tool_parser(**kwargs)
|
118
|
-
config = {**self.llmconfig, **kwargs}
|
119
|
-
system = system or self.system
|
120
|
-
self.conversation.initiate_conversation(system=system, instruction=instruction, context=context, name=name)
|
121
|
-
await self.call_chatcompletion(**config)
|
122
|
-
|
123
|
-
return await self._output(invoke, out)
|
124
|
-
|
125
|
-
async def followup(self, instruction, system=None, context=None,
|
126
|
-
out=True, name=None, invoke=True, **kwargs) -> Any:
|
127
|
-
if system:
|
128
|
-
self.conversation.change_system(system)
|
129
|
-
self.conversation.add_messages(instruction=instruction, context=context, name=name)
|
130
|
-
|
131
|
-
if 'tool_parsed' in kwargs:
|
132
|
-
kwargs.pop('tool_parsed')
|
133
|
-
else:
|
134
|
-
if self.tool_manager.registry != {}:
|
135
|
-
kwargs = self._tool_parser(**kwargs)
|
136
|
-
# if self.tool_manager.registry != {}:
|
137
|
-
# if 'tools' not in kwargs:
|
138
|
-
# tool_kwarg = {"tools": self.tool_manager.to_tool_schema_list()}
|
139
|
-
# kwargs = {**tool_kwarg, **kwargs}
|
140
|
-
config = {**self.llmconfig, **kwargs}
|
141
|
-
await self.call_chatcompletion(**config)
|
142
|
-
|
143
|
-
return await self._output(invoke, out)
|
144
|
-
|
145
|
-
async def auto_followup(self, instruct, num=3, **kwargs):
|
146
|
-
# if self.tool_manager.registry != {}:
|
147
|
-
# if 'tools' not in kwargs:
|
148
|
-
# tool_kwarg = {"tools": self.tool_manager.to_tool_schema_list()}
|
149
|
-
# kwargs = {**tool_kwarg, **kwargs}
|
150
|
-
if self.tool_manager.registry != {}:
|
151
|
-
kwargs = self._tool_parser(**kwargs)
|
152
|
-
|
153
|
-
cont_ = True
|
154
|
-
while num > 0 and cont_ is True:
|
155
|
-
await self.followup(instruct, tool_choice="auto", tool_parsed=True, **kwargs)
|
156
|
-
num -= 1
|
157
|
-
cont_ = True if self._is_invoked() else False
|
158
|
-
if num == 0:
|
159
|
-
await self.followup(instruct, **kwargs, tool_parsed=True)
|
160
|
-
|
161
|
-
# def messages_to_csv(self, dir=None, filename="messages.csv", **kwargs):
|
162
|
-
# dir = dir or self.logger_.dir
|
163
|
-
# if dir is None:
|
164
|
-
# raise ValueError("No directory specified.")
|
165
|
-
# self.conversation.msg.to_csv(dir=dir, filename=filename, **kwargs)
|
166
|
-
|
167
|
-
# def log_to_csv(self, dir=None, filename="llmlog.csv", **kwargs):
|
168
|
-
# dir = dir or self.logger_.dir
|
169
|
-
# if dir is None:
|
170
|
-
# raise ValueError("No directory specified.")
|
171
|
-
# self.logger_.to_csv(dir=dir, filename=filename, **kwargs)
|
172
|
-
|
173
|
-
async def call_chatcompletion(self, schema=oai_schema['chat'], **kwargs):
|
174
|
-
messages = [message.message for message in self.conversation.messages]
|
175
|
-
payload = ChatCompletion.create_payload(messages=messages, schema=schema, llmconfig=self.llmconfig,**kwargs)
|
176
|
-
completion = await self.service.serve(payload=payload)
|
177
|
-
if "choices" in completion:
|
178
|
-
self.logger_({"input":payload, "output": completion})
|
179
|
-
self.conversation.add_messages(response=completion['choices'][0])
|
180
|
-
self.conversation.responses.append(self.conversation.messages[-1])
|
181
|
-
self.conversation.response_counts += 1
|
182
|
-
self.service.status_tracker.num_tasks_succeeded += 1
|
183
|
-
else:
|
184
|
-
self.service.status_tracker.num_tasks_failed += 1
|
185
|
-
|
lionagi/endpoints/__init__.py
DELETED
lionagi/endpoints/audio.py
DELETED
@@ -1,17 +0,0 @@
|
|
1
|
-
# from .base_endpoint import BaseEndpoint
|
2
|
-
|
3
|
-
|
4
|
-
# class Audio(BaseEndpoint):
|
5
|
-
# endpoint: str = "chat/completions"
|
6
|
-
|
7
|
-
# @classmethod
|
8
|
-
# def create_payload(scls, messages, llmconfig, schema, **kwargs):
|
9
|
-
# config = {**llmconfig, **kwargs}
|
10
|
-
# payload = {"messages": messages}
|
11
|
-
# for key in schema['required']:
|
12
|
-
# payload.update({key: config[key]})
|
13
|
-
|
14
|
-
# for key in schema['optional']:
|
15
|
-
# if bool(config[key]) is True and str(config[key]).lower() != "none":
|
16
|
-
# payload.update({key: config[key]})
|
17
|
-
# return payload
|
@@ -1,54 +0,0 @@
|
|
1
|
-
from lionagi.objs.abc_objs import BaseEndpoint
|
2
|
-
|
3
|
-
|
4
|
-
class ChatCompletion(BaseEndpoint):
|
5
|
-
"""
|
6
|
-
Represents an endpoint for chat completions in an API.
|
7
|
-
|
8
|
-
This class is designed to handle the creation of payloads for chat completion requests. The 'endpoint' attribute specifies the API endpoint for chat completions.
|
9
|
-
|
10
|
-
Attributes:
|
11
|
-
endpoint (str): The API endpoint for chat completions.
|
12
|
-
"""
|
13
|
-
endpoint: str = "chat/completions"
|
14
|
-
|
15
|
-
@classmethod
|
16
|
-
def create_payload(scls, messages, llmconfig, schema, **kwargs):
|
17
|
-
"""
|
18
|
-
Creates a payload for a chat completion request using provided messages, configuration, and schema.
|
19
|
-
|
20
|
-
This method constructs a payload dictionary that includes required and optional parameters
|
21
|
-
as specified in the schema. Required parameters are extracted from 'llmconfig' and 'kwargs',
|
22
|
-
while optional parameters are included only if they are truthy and not equal to the string "none".
|
23
|
-
|
24
|
-
Parameters:
|
25
|
-
messages (list): A list of message objects to include in the payload.
|
26
|
-
llmconfig (dict): A dictionary containing configuration settings for the large language model.
|
27
|
-
schema (dict): A dictionary defining required and optional keys for the payload.
|
28
|
-
The 'required' key should map to a list of required parameter names.
|
29
|
-
The 'optional' key should map to a list of optional parameter names.
|
30
|
-
**kwargs: Additional keyword arguments that can override or supplement 'llmconfig'.
|
31
|
-
|
32
|
-
Returns:
|
33
|
-
dict: A dictionary representing the payload for the chat completion request.
|
34
|
-
|
35
|
-
Example:
|
36
|
-
payload = ChatCompletion.create_payload(
|
37
|
-
messages=[{"text": "Hello, world!"}],
|
38
|
-
llmconfig={"max_tokens": 100},
|
39
|
-
schema={"required": ["max_tokens"], "optional": ["temperature"]}
|
40
|
-
)
|
41
|
-
"""
|
42
|
-
config = {**llmconfig, **kwargs}
|
43
|
-
payload = {"messages": messages}
|
44
|
-
for key in schema['required']:
|
45
|
-
payload.update({key: config[key]})
|
46
|
-
|
47
|
-
for key in schema['optional']:
|
48
|
-
if bool(config[key]) is True and str(config[key]).lower() != "none":
|
49
|
-
payload.update({key: config[key]})
|
50
|
-
return payload
|
51
|
-
|
52
|
-
# def process_response(self, session, payload, completion):
|
53
|
-
# ...
|
54
|
-
|