lionagi 0.0.208__py3-none-any.whl → 0.0.210__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (104) hide show
  1. lionagi/__init__.py +4 -6
  2. lionagi/api_service/base_endpoint.py +65 -0
  3. lionagi/api_service/base_rate_limiter.py +121 -0
  4. lionagi/api_service/base_service.py +146 -0
  5. lionagi/api_service/chat_completion.py +6 -0
  6. lionagi/api_service/embeddings.py +6 -0
  7. lionagi/api_service/payload_package.py +47 -0
  8. lionagi/api_service/status_tracker.py +29 -0
  9. lionagi/core/__init__.py +5 -9
  10. lionagi/core/branch.py +1191 -0
  11. lionagi/core/flow.py +423 -0
  12. lionagi/core/{instruction_set/instruction_set.py → instruction_set.py} +3 -3
  13. lionagi/core/session.py +872 -0
  14. lionagi/schema/__init__.py +5 -8
  15. lionagi/schema/base_schema.py +821 -0
  16. lionagi/{_services → services}/base_service.py +4 -4
  17. lionagi/{_services → services}/oai.py +4 -4
  18. lionagi/structures/graph.py +1 -1
  19. lionagi/structures/relationship.py +1 -1
  20. lionagi/structures/structure.py +1 -1
  21. lionagi/tools/tool_manager.py +0 -163
  22. lionagi/tools/tool_util.py +2 -1
  23. lionagi/utils/__init__.py +7 -14
  24. lionagi/utils/api_util.py +63 -2
  25. lionagi/utils/core_utils.py +338 -0
  26. lionagi/utils/sys_util.py +3 -3
  27. lionagi/version.py +1 -1
  28. {lionagi-0.0.208.dist-info → lionagi-0.0.210.dist-info}/METADATA +28 -29
  29. lionagi-0.0.210.dist-info/RECORD +56 -0
  30. lionagi/_services/anthropic.py +0 -79
  31. lionagi/_services/anyscale.py +0 -0
  32. lionagi/_services/azure.py +0 -1
  33. lionagi/_services/bedrock.py +0 -0
  34. lionagi/_services/everlyai.py +0 -0
  35. lionagi/_services/gemini.py +0 -0
  36. lionagi/_services/gpt4all.py +0 -0
  37. lionagi/_services/huggingface.py +0 -0
  38. lionagi/_services/litellm.py +0 -33
  39. lionagi/_services/localai.py +0 -0
  40. lionagi/_services/openllm.py +0 -0
  41. lionagi/_services/openrouter.py +0 -44
  42. lionagi/_services/perplexity.py +0 -0
  43. lionagi/_services/predibase.py +0 -0
  44. lionagi/_services/rungpt.py +0 -0
  45. lionagi/_services/vllm.py +0 -0
  46. lionagi/_services/xinference.py +0 -0
  47. lionagi/agents/planner.py +0 -1
  48. lionagi/agents/prompter.py +0 -1
  49. lionagi/agents/scorer.py +0 -1
  50. lionagi/agents/summarizer.py +0 -1
  51. lionagi/agents/validator.py +0 -1
  52. lionagi/bridge/__init__.py +0 -22
  53. lionagi/bridge/langchain.py +0 -195
  54. lionagi/bridge/llama_index.py +0 -266
  55. lionagi/core/branch/__init__.py +0 -0
  56. lionagi/core/branch/branch.py +0 -841
  57. lionagi/core/branch/cluster.py +0 -1
  58. lionagi/core/branch/conversation.py +0 -787
  59. lionagi/core/core_util.py +0 -0
  60. lionagi/core/flow/__init__.py +0 -0
  61. lionagi/core/flow/flow.py +0 -19
  62. lionagi/core/flow/flow_util.py +0 -62
  63. lionagi/core/instruction_set/__init__.py +0 -0
  64. lionagi/core/messages/__init__.py +0 -0
  65. lionagi/core/sessions/__init__.py +0 -0
  66. lionagi/core/sessions/session.py +0 -504
  67. lionagi/datastores/__init__.py +0 -1
  68. lionagi/datastores/chroma.py +0 -1
  69. lionagi/datastores/deeplake.py +0 -1
  70. lionagi/datastores/elasticsearch.py +0 -1
  71. lionagi/datastores/lantern.py +0 -1
  72. lionagi/datastores/pinecone.py +0 -1
  73. lionagi/datastores/postgres.py +0 -1
  74. lionagi/datastores/qdrant.py +0 -1
  75. lionagi/loaders/__init__.py +0 -18
  76. lionagi/loaders/chunker.py +0 -166
  77. lionagi/loaders/load_util.py +0 -240
  78. lionagi/loaders/reader.py +0 -122
  79. lionagi/models/__init__.py +0 -0
  80. lionagi/models/base_model.py +0 -0
  81. lionagi/models/imodel.py +0 -53
  82. lionagi/schema/async_queue.py +0 -158
  83. lionagi/schema/base_condition.py +0 -1
  84. lionagi/schema/base_node.py +0 -422
  85. lionagi/schema/base_tool.py +0 -44
  86. lionagi/schema/data_logger.py +0 -126
  87. lionagi/schema/data_node.py +0 -88
  88. lionagi/schema/status_tracker.py +0 -37
  89. lionagi/tests/test_utils/test_encrypt_util.py +0 -323
  90. lionagi/utils/encrypt_util.py +0 -283
  91. lionagi/utils/url_util.py +0 -55
  92. lionagi-0.0.208.dist-info/RECORD +0 -106
  93. lionagi/{agents → api_service}/__init__.py +0 -0
  94. lionagi/core/{branch/branch_manager.py → branch_manager.py} +0 -0
  95. lionagi/core/{messages/messages.py → messages.py} +3 -3
  96. /lionagi/{_services → services}/__init__.py +0 -0
  97. /lionagi/{_services → services}/mistralai.py +0 -0
  98. /lionagi/{_services → services}/mlx_service.py +0 -0
  99. /lionagi/{_services → services}/ollama.py +0 -0
  100. /lionagi/{_services → services}/services.py +0 -0
  101. /lionagi/{_services → services}/transformers.py +0 -0
  102. {lionagi-0.0.208.dist-info → lionagi-0.0.210.dist-info}/LICENSE +0 -0
  103. {lionagi-0.0.208.dist-info → lionagi-0.0.210.dist-info}/WHEEL +0 -0
  104. {lionagi-0.0.208.dist-info → lionagi-0.0.210.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,338 @@
1
+ import pandas as pd
2
+ import json
3
+ from datetime import datetime
4
+ from typing import Union, Optional
5
+
6
+ from .sys_util import strip_lower, to_df, as_dict
7
+ from .nested_util import nget
8
+
9
+
10
+ class CoreUtil:
11
+
12
+ @staticmethod
13
+ def validate_messages(messages):
14
+ """
15
+ Validates the structure and content of a DataFrame containing conversation messages.
16
+
17
+ Args:
18
+ messages (pd.DataFrame): The DataFrame containing conversation messages to validate.
19
+
20
+ Returns:
21
+ bool: True if the DataFrame is valid, raises a ValueError otherwise.
22
+
23
+ Raises:
24
+ ValueError: If the DataFrame has unmatched columns, contains null values, has an unsupported role, or
25
+ if the content cannot be parsed as a JSON string.
26
+ """
27
+ if list(messages.columns) != ['node_id', 'role', 'sender', 'timestamp', 'content']:
28
+ raise ValueError('Invalid messages dataframe. Unmatched columns.')
29
+ if messages.isnull().values.any():
30
+ raise ValueError('Invalid messages dataframe. Cannot have null.')
31
+ if not all(role in ['system', 'user', 'assistant'] for role in messages['role'].unique()):
32
+ raise ValueError('Invalid messages dataframe. Cannot have role other than ["system", "user", "assistant"].')
33
+ for cont in messages['content']:
34
+ if cont.startswith('Sender'):
35
+ cont = cont.split(':', 1)[1]
36
+ try:
37
+ json.loads(cont)
38
+ except:
39
+ raise ValueError('Invalid messages dataframe. Content expect json string.')
40
+ return True
41
+
42
+ @staticmethod
43
+ def sign_message(messages, sender: str):
44
+ """
45
+ Prefixes each message in the DataFrame with 'Sender <sender>:' to indicate the message's origin.
46
+
47
+ Args:
48
+ messages (pd.DataFrame): The DataFrame containing conversation messages to sign.
49
+ sender (str): The name or identifier of the sender to prefix the messages with.
50
+
51
+ Returns:
52
+ pd.DataFrame: The DataFrame with updated messages signed by the specified sender.
53
+
54
+ Raises:
55
+ ValueError: If the sender is None or equivalent to the string 'none'.
56
+ """
57
+ if sender is None or strip_lower(sender) == 'none':
58
+ raise ValueError("sender cannot be None")
59
+ df = messages.copy()
60
+
61
+ for i in df.index:
62
+ if not df.loc[i, 'content'].startswith('Sender'):
63
+ df.loc[i, 'content'] = f"Sender {sender}: {df.loc[i, 'content']}"
64
+ else:
65
+ content = df.loc[i, 'content'].split(':', 1)[1]
66
+ df.loc[i, 'content'] = f"Sender {sender}: {content}"
67
+
68
+ return to_df(df)
69
+
70
+ @staticmethod
71
+ def search_keywords(
72
+ messages,
73
+ keywords: Union[str, list],
74
+ case_sensitive: bool = False, reset_index=False, dropna=False
75
+ ):
76
+ """
77
+ Searches for keywords in the 'content' column of a DataFrame and returns matching rows.
78
+
79
+ Args:
80
+ messages (pd.DataFrame): The DataFrame to search within.
81
+ keywords (Union[str, List[str]]): Keyword(s) to search for. If a list, combines keywords with an OR condition.
82
+ case_sensitive (bool, optional): Whether the search should be case-sensitive. Defaults to False.
83
+ reset_index (bool, optional): Whether to reset the index of the resulting DataFrame. Defaults to False.
84
+ dropna (bool, optional): Whether to drop rows with NA values in the 'content' column. Defaults to False.
85
+
86
+ Returns:
87
+ pd.DataFrame: A DataFrame containing rows where the 'content' column matches the search criteria.
88
+ """
89
+ out = ''
90
+ if isinstance(keywords, list):
91
+ keywords = '|'.join(keywords)
92
+ if not case_sensitive:
93
+ out = messages[
94
+ messages["content"].str.contains(keywords, case=False)
95
+ ]
96
+ out = messages[messages["content"].str.contains(keywords)]
97
+ if reset_index or dropna:
98
+ out = to_df(out, reset_index=reset_index)
99
+ return out
100
+
101
+ @staticmethod
102
+ def filter_messages_by(
103
+ messages,
104
+ role: Optional[str] = None,
105
+ sender: Optional[str] = None,
106
+ start_time: Optional[datetime] = None,
107
+ end_time: Optional[datetime] = None,
108
+ content_keywords: Optional[Union[str, list]] = None,
109
+ case_sensitive: bool = False
110
+ ) -> pd.DataFrame:
111
+ """
112
+ Filters messages in a DataFrame based on specified criteria such as role, sender, time range, and keywords.
113
+
114
+ Args:
115
+ messages (pd.DataFrame): The DataFrame of messages to filter.
116
+ role (Optional[str]): The role to filter messages by (e.g., 'user', 'assistant').
117
+ sender (Optional[str]): The sender to filter messages by.
118
+ start_time (Optional[datetime]): The start time for filtering messages.
119
+ end_time (Optional[datetime]): The end time for filtering messages.
120
+ content_keywords (Optional[Union[str, list]]): Keywords to filter messages by content.
121
+ case_sensitive (bool): Determines if the keyword search should be case-sensitive.
122
+
123
+ Returns:
124
+ pd.DataFrame: A DataFrame containing messages that match the filter criteria.
125
+
126
+ Raises:
127
+ ValueError: If an error occurs during the filtering process.
128
+ """
129
+
130
+ try:
131
+ outs = messages.copy()
132
+
133
+ if content_keywords:
134
+ outs = CoreUtil.search_keywords(content_keywords, case_sensitive)
135
+
136
+ outs = outs[outs['role'] == role] if role else outs
137
+ outs = outs[outs['sender'] == sender] if sender else outs
138
+ outs = outs[outs['timestamp'] > start_time] if start_time else outs
139
+ outs = outs[outs['timestamp'] < end_time] if end_time else outs
140
+
141
+ return to_df(outs)
142
+
143
+ except Exception as e:
144
+ raise ValueError(f"Error in filtering messages: {e}")
145
+
146
+ @staticmethod
147
+ def replace_keyword(
148
+ df,
149
+ keyword: str,
150
+ replacement: str,
151
+ col='content',
152
+ case_sensitive: bool = False
153
+ ) -> None:
154
+ """
155
+ Replaces occurrences of a keyword within a specified column of a DataFrame with a given replacement.
156
+
157
+ Args:
158
+ df (pd.DataFrame): The DataFrame to operate on.
159
+ keyword (str): The keyword to search for and replace.
160
+ replacement (str): The string to replace the keyword with.
161
+ col (str): The column to search for the keyword in.
162
+ case_sensitive (bool): If True, the search and replacement are case-sensitive.
163
+
164
+ Returns:
165
+ None: This function modifies the DataFrame in place.
166
+ """
167
+ if not case_sensitive:
168
+ df[col] = df[col].str.replace(
169
+ keyword, replacement, case=False
170
+ )
171
+ else:
172
+ df[col] = df[col].str.replace(
173
+ keyword, replacement
174
+ )
175
+
176
+ @staticmethod
177
+ def remove_message(df, node_id: str) -> bool:
178
+ """
179
+ Removes a message from the DataFrame based on its node_id.
180
+
181
+ Args:
182
+ df (pd.DataFrame): The DataFrame from which the message should be removed.
183
+ node_id (str): The node_id of the message to be removed.
184
+
185
+ Returns:
186
+ bool: True if the message was successfully removed, False otherwise.
187
+ """
188
+ initial_length = len(df)
189
+ df = df[df["node_id"] != node_id]
190
+
191
+ return len(df) < initial_length
192
+
193
+ @staticmethod
194
+ def update_row(
195
+ df, node_id = None, col = "node_id", value = None
196
+ ) -> bool:
197
+ """
198
+ Updates the value of a specified column for a row identified by node_id in a DataFrame.
199
+
200
+ Args:
201
+ df (pd.DataFrame): The DataFrame to update.
202
+ node_id (Optional[str]): The node_id of the row to be updated.
203
+ col (str): The column to update.
204
+ value (Any): The new value to be assigned to the column.
205
+
206
+ Returns:
207
+ bool: True if the update was successful, False otherwise.
208
+ """
209
+ index = df.index[df[col] == node_id].tolist()
210
+ if index:
211
+ df.at[index[0], col] = value
212
+ return True
213
+ return False
214
+
215
+ @staticmethod
216
+ def remove_last_n_rows(df, steps: int) -> None:
217
+ """
218
+ Removes the last 'n' rows from a DataFrame.
219
+
220
+ Args:
221
+ df (pd.DataFrame): The DataFrame from which rows will be removed.
222
+ steps (int): The number of rows to remove.
223
+
224
+ Returns:
225
+ pd.DataFrame: The DataFrame after the last 'n' rows have been removed.
226
+
227
+ Raises:
228
+ ValueError: If 'steps' is less than 0 or greater than the number of rows in the DataFrame.
229
+ """
230
+ if steps < 0 or steps > len(df):
231
+ raise ValueError("Steps must be a non-negative integer less than or equal to the number of messages.")
232
+ df = to_df(df[:-steps])
233
+
234
+ @staticmethod
235
+ def get_rows(
236
+ df,
237
+ sender: Optional[str] = None,
238
+ role: Optional[str] = None,
239
+ n: int = 1,
240
+ sign_ = False,
241
+ from_="front",
242
+ ) -> pd.DataFrame:
243
+ """
244
+ Retrieves rows from a DataFrame based on specified sender, role, and quantity, optionally signing them.
245
+
246
+ Args:
247
+ df (pd.DataFrame): The DataFrame to retrieve rows from.
248
+ sender (Optional[str]): The sender based on which to filter rows.
249
+ role (Optional[str]): The role based on which to filter rows.
250
+ n (int): The number of rows to retrieve.
251
+ sign_ (bool): Whether to sign the retrieved rows.
252
+ from_ (str): Direction to retrieve rows ('front' for the first rows, 'last' for the last rows).
253
+
254
+ Returns:
255
+ pd.DataFrame: A DataFrame containing the retrieved rows.
256
+ """
257
+
258
+ if from_ == "last":
259
+ if sender is None and role is None:
260
+ outs = df.iloc[-n:]
261
+ elif sender and role:
262
+ outs = df[(df['sender'] == sender) & (df['role'] == role)].iloc[-n:]
263
+ elif sender:
264
+ outs = df[df['sender'] == sender].iloc[-n:]
265
+ else:
266
+ outs = df[df['role'] == role].iloc[-n:]
267
+
268
+ elif from_ == "front":
269
+ if sender is None and role is None:
270
+ outs = df.iloc[:n]
271
+ elif sender and role:
272
+ outs = df[(df['sender'] == sender) & (df['role'] == role)].iloc[:n]
273
+ elif sender:
274
+ outs = df[df['sender'] == sender].iloc[:n]
275
+ else:
276
+ outs = df[df['role'] == role].iloc[:n]
277
+
278
+ return CoreUtil.sign_message(outs, sender) if sign_ else outs
279
+
280
+ @staticmethod
281
+ def extend(df1: pd.DataFrame, df2: pd.DataFrame, **kwargs) -> pd.DataFrame:
282
+ """
283
+ Extends a DataFrame with another DataFrame, optionally removing duplicates based on specified criteria.
284
+
285
+ Args:
286
+ df1 (pd.DataFrame): The original DataFrame to be extended.
287
+ df2 (pd.DataFrame): The DataFrame containing new rows to add to df1.
288
+ **kwargs: Additional keyword arguments for pandas.DataFrame.drop_duplicates().
289
+
290
+ Returns:
291
+ pd.DataFrame: The extended DataFrame after adding rows from df2 and removing duplicates.
292
+
293
+ Raises:
294
+ ValueError: If an error occurs during the extension process.
295
+ """
296
+ CoreUtil.validate_messages(df2)
297
+ try:
298
+ if len(df2.dropna(how='all')) > 0 and len(df1.dropna(how='all')) > 0:
299
+ df = to_df([df1, df2])
300
+ df.drop_duplicates(
301
+ inplace=True, subset=['node_id'], keep='first', **kwargs
302
+ )
303
+ return to_df(df)
304
+ except Exception as e:
305
+ raise ValueError(f"Error in extending messages: {e}")
306
+
307
+ @staticmethod
308
+ def to_markdown_string(df):
309
+ answers = []
310
+ for _, i in df.iterrows():
311
+ content = as_dict(i.content)
312
+
313
+ if i.role == "assistant":
314
+ try:
315
+ a = nget(content, ['action_response', 'func'])
316
+ b = nget(content, ['action_response', 'arguments'])
317
+ c = nget(content, ['action_response', 'output'])
318
+ if a is not None:
319
+ answers.append(f"Function: {a}")
320
+ answers.append(f"Arguments: {b}")
321
+ answers.append(f"Output: {c}")
322
+ else:
323
+ answers.append(nget(content, ['response']))
324
+ except:
325
+ pass
326
+ elif i.role == "user":
327
+ try:
328
+ answers.append(nget(content, ['instruction']))
329
+ except:
330
+ pass
331
+ else:
332
+ try:
333
+ answers.append(nget(content, ['system_info']))
334
+ except:
335
+ pass
336
+
337
+ out_ = "\n".join(answers)
338
+ return out_
lionagi/utils/sys_util.py CHANGED
@@ -11,7 +11,7 @@ import json
11
11
  import logging
12
12
 
13
13
  import pandas as pd
14
- from typing import Any, List, Dict, Union
14
+ from typing import Any, List, Dict
15
15
 
16
16
 
17
17
  def as_dict(input_: Any) -> Dict[Any, Any]:
@@ -136,8 +136,8 @@ def create_path(
136
136
  name, ext = filename, ''
137
137
  os.makedirs(dir, exist_ok=dir_exist_ok)
138
138
  timestamp_str = get_timestamp() if timestamp else ''
139
- filename = f"{timestamp_str}_{name}" if time_prefix else f"{name}_{timestamp_str}"
140
- return f"{dir}{filename}.{ext}" if ext else f"{dir}{filename}"
139
+ filename = f"{timestamp_str}{name}" if time_prefix else f"{name}{timestamp_str}"
140
+ return f"{dir}{filename}.{ext}" if ext != '' else f"{dir}{filename}"
141
141
 
142
142
  def get_bins(input: List[str], upper: int) -> List[List[int]]:
143
143
  """
lionagi/version.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.0.208"
1
+ __version__ = "0.0.210"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: lionagi
3
- Version: 0.0.208
3
+ Version: 0.0.210
4
4
  Summary: Towards automated general intelligence.
5
5
  Author: HaiyangLi
6
6
  Author-email: Haiyang Li <ocean@lionagi.ai>
@@ -234,42 +234,30 @@ Requires-Dist: pandas >=2.1.0
234
234
  **Towards Automated General Intelligence**
235
235
 
236
236
 
237
- LionAGI is a cutting-edge **intelligent agent framework**. It integrates data manipulation with advanced machine learning tools, such as Large Language Models (i.e. OpenAI's GPT).
238
- - Designed for data-centric, production-level projects,
239
- - dramatically lowers the barrier in creating intelligent, automated systems
240
- - that can understand and interact meaningfully with large volumes of data.
237
+ LionAGI is an **intelligent agent framework** tailored for **big data analysis** with advanced **machine learning** tools. Designed for data-centric, production-level projects. Lionagi allows flexible and rapid design of agentic workflow, customed for your own data. Lionagi `agents` can manage and direct other agents, can also use multiple different tools in parallel.
238
+
239
+ <img width="1002" alt="image" src="https://github.com/lion-agi/lionagi/assets/122793010/3fd75c2a-a9e9-4ab4-8ae9-f9cd71c69aec">
241
240
 
242
- Install LionAGI with pip:
243
241
 
244
- ```bash
245
- pip install lionagi
246
- ```
247
- Download the `.env_template` file, input your appropriate `API_KEY`, save the file, rename as `.env` and put in your project's root directory.
248
- by default we use `OPENAI_API_KEY`.
242
+ #### Integrate any Advanced Model into your existing workflow.
249
243
 
244
+ <img width="1100" alt="Screenshot 2024-02-14 at 8 54 01 AM" src="https://github.com/lion-agi/lionagi/assets/122793010/cfbc403c-cece-49e7-bc3a-015e035d3607">
250
245
 
251
246
 
252
- ### Features
253
- - Robust and scalable. Create a production ready LLM application **in hours**, with more than 100 models
254
- - Efficient and verstile data operations for reading, chunking, binning, writing, storing data with support for `langchain` and `llamaindex`
255
- - Built-in support for **chain/graph-of-thoughts, ReAct, Concurrent parallel function calling**
256
- - Unified interface with any LLM provider, API or local
257
- - Fast and **concurrent** API call with **configurable rate limit**
258
- - (Work In Progress) support for models both API and local
259
- ---
260
- LionAGI is designed to be `asynchronous` only, please check python official documentation on how `async` work: [here](https://docs.python.org/3/library/asyncio.html)
261
247
 
262
248
 
263
- **Notice**:
264
- * calling API with maximum throughput over large set of data with advanced models i.e. gpt-4 can get **EXPENSIVE IN JUST SECONDS**,
265
- * please know what you are doing, and check the usage on OpenAI regularly
266
- * default rate limits are set to be **tier 1** of OpenAI model `gpt-4-1104-preview`, please check the [OpenAI usage limit documentation](https://platform.openai.com/docs/guides/rate-limits?context=tier-free) you can modify token rate parameters to fit different use cases.
267
- * if you would like to build from source, please download the [latest release](https://github.com/lion-agi/lionagi/releases), **main is under development and will be changed without notice**
249
+ ### Install LionAGI with pip:
250
+
251
+ ```bash
252
+ pip install lionagi
253
+ ```
254
+ Download the `.env_template` file, input your appropriate `API_KEY`, save the file, rename as `.env` and put in your project's root directory.
255
+ by default we use `OPENAI_API_KEY`.
268
256
 
269
257
 
270
258
  ### Quick Start
271
259
 
272
- The following example shows how to use LionAGI's `Session` object to interact with `gpt-4` model:
260
+ The following example shows how to use LionAGI's `Session` object to interact with `gpt-4-turbo` model:
273
261
 
274
262
  ```python
275
263
 
@@ -285,7 +273,7 @@ import lionagi as li
285
273
 
286
274
  calculator = li.Session(system=system)
287
275
  result = await calculator.chat(
288
- instruction=instruction, context=context, model="gpt-4-1106-preview"
276
+ instruction=instruction, context=context, model="gpt-4-turbo-preview"
289
277
  )
290
278
 
291
279
  print(f"Calculation Result: {result}")
@@ -303,7 +291,7 @@ import lionagi as li
303
291
  async def main():
304
292
  calculator = li.Session(system=system)
305
293
  result = await calculator.chat(
306
- instruction=instruction, context=context, model="gpt-4-1106-preview"
294
+ instruction=instruction, context=context, model="gpt-4-turbo-preview"
307
295
  )
308
296
  print(f"Calculation Result: {result}")
309
297
 
@@ -311,7 +299,18 @@ if __name__ == "__main__":
311
299
  asyncio.run(main())
312
300
  ```
313
301
 
314
- Visit our notebooks for our examples.
302
+ Visit our notebooks for examples.
303
+
304
+ LionAGI is designed to be `asynchronous` only, please check python official documentation on how `async` work: [here](https://docs.python.org/3/library/asyncio.html)
305
+
306
+ ---
307
+
308
+ **Notice**:
309
+ * calling API with maximum throughput over large set of data with advanced models i.e. gpt-4 can get **EXPENSIVE IN JUST SECONDS**,
310
+ * please know what you are doing, and check the usage on OpenAI regularly
311
+ * default rate limits are set to be 1,000 requests, 100,000 tokens per miniute, please check the [OpenAI usage limit documentation](https://platform.openai.com/docs/guides/rate-limits?context=tier-free) you can modify token rate parameters to fit different use cases.
312
+ * if you would like to build from source, please download the [latest release](https://github.com/lion-agi/lionagi/releases), **main is under development and will be changed without notice**
313
+
315
314
 
316
315
  ### Community
317
316
 
@@ -0,0 +1,56 @@
1
+ lionagi/__init__.py,sha256=txn3JAAymtQVhlS15wiE85kW2o5gjCr8ikDNze9QYlY,921
2
+ lionagi/version.py,sha256=9Haj1irMyv2cKsU0AXTI7BLba6Cbi6TUK35nkUCvjeg,23
3
+ lionagi/api_service/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ lionagi/api_service/base_endpoint.py,sha256=kbGp4vRFlBkkP21YE0_ZRh7EQDt69IojDfST_fogYOY,2461
5
+ lionagi/api_service/base_rate_limiter.py,sha256=_9i1n6IvIR8C90zTT3u9XrVbaltz1aAbKAlqp9i6zjM,5216
6
+ lionagi/api_service/base_service.py,sha256=Vd_FoEa6BKxid5Z8KX3tRRuJtCJvlMBNO2VlBddWVtQ,6254
7
+ lionagi/api_service/chat_completion.py,sha256=IZsdky14jL221FPMNbKF0JWEC_TUVGB-FEB-2z8xYfA,134
8
+ lionagi/api_service/embeddings.py,sha256=VO7I3SvCEmfBCeyWw09nt_VK9qkaauQncaf9O2-PrRc,130
9
+ lionagi/api_service/payload_package.py,sha256=t_LYHyz6gvncQhkpdkkdLACYnO06dLQ2PMESGuf91Yw,1550
10
+ lionagi/api_service/status_tracker.py,sha256=Sg5kUXgrq4mArWuMVw4l7OIZtySkGzVVZmdPiPS792U,1101
11
+ lionagi/configs/__init__.py,sha256=QOd4Rs7vjIpNWvIocxWQeU-q-MPRC-AOxh-gM-eBJ2o,142
12
+ lionagi/configs/oai_configs.py,sha256=Q2ESc5QiMprnRc_w7SeMlaTYUWl_Y4SEzZSE4iOkz4Q,2646
13
+ lionagi/configs/openrouter_configs.py,sha256=IBQHqb8mo4Jb3kYAm_7NOHSKRPwSdGbPpDJoiwHxLYw,1269
14
+ lionagi/core/__init__.py,sha256=qoWWDKV5d-3LMP-GBzMtE-cW0X3q5zaGwGCwijp3y7o,98
15
+ lionagi/core/branch.py,sha256=t5C8fJD4g0evlNnitH1bq2wsIFcgOlvmvqrUVETSsIY,50425
16
+ lionagi/core/branch_manager.py,sha256=zA_dECz9XYKLk7kK6zDWfHLs1ttRGYDfyCj4B7XKDqo,4903
17
+ lionagi/core/flow.py,sha256=8IRcRTSrwCIuLTni33PGYb4PeuR5bjuqr1_YnDh8PFc,16764
18
+ lionagi/core/instruction_set.py,sha256=-hMg6UDXmxG1oYqTXWhona5gDZbNcQxaK06iMuHJr6I,13694
19
+ lionagi/core/messages.py,sha256=I91WqZPW-8kgG4xQ2w6DxJ55V5H2ivgGWO777_y0O9M,6533
20
+ lionagi/core/session.py,sha256=Oj9BapZ4FODadRvvJNzbzsT77WnaiB9x4_K7bJoQC1k,36166
21
+ lionagi/schema/__init__.py,sha256=3D30R38-bxegsdfIYZMOIGHCwhZ4HBYx9E3PPd5weUY,137
22
+ lionagi/schema/base_schema.py,sha256=xB8vfclShpRdCf7aJxFcZoDJQZL4QB6n57a_OcDV48A,26579
23
+ lionagi/services/__init__.py,sha256=zU5sxmSI9-Jtp_WsI-Zsb6hmT8y5zF9YtJ7XikAjnbs,60
24
+ lionagi/services/base_service.py,sha256=FnmljjPaNX6tqEy1P2HS_i4k3ZMRi2B074DA0SiFAP0,17354
25
+ lionagi/services/mistralai.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
26
+ lionagi/services/mlx_service.py,sha256=1AF_RcV4KQmpxC67n2E0zRbssSlsTFmdnojydUXdJn4,1285
27
+ lionagi/services/oai.py,sha256=-xWE8zc_o9eYcyV-NW8GfURMafObcufbJrHIb13V6Yg,4055
28
+ lionagi/services/ollama.py,sha256=I2GvWtWA9WMyq_bhLe2JQlrkP_mAeqm2Bhf8ItOpEzQ,1190
29
+ lionagi/services/services.py,sha256=TwjAizQCTFCv_I8XZbTvpcryJPGt4vQAGYkGdZqJST0,5077
30
+ lionagi/services/transformers.py,sha256=nGfQbcECKzA1biHe4i9l01aKCMgULKFQ4LZyqjtvSXk,2934
31
+ lionagi/structures/__init__.py,sha256=wMPekT2vbWwUkJ5aW5o-lzJC9Fzhta6RHDiFPTNUm_0,120
32
+ lionagi/structures/graph.py,sha256=N_jIsDn9zQt7vr_KurfjRNg1VO81CMFze64lJa6ZjWU,7790
33
+ lionagi/structures/relationship.py,sha256=XBYNCNmkpTLSXfE8f41I7yEUfaqlPOhajmosYVzgiw8,6061
34
+ lionagi/structures/structure.py,sha256=NJOnesHBJ24snSzsSt_Nrx20eElKymJ4WdlRRaG9zDc,3497
35
+ lionagi/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
36
+ lionagi/tests/test_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
37
+ lionagi/tests/test_utils/test_api_util.py,sha256=7Zyc0J1glZrIWI1HrTRSRhzw8jaUW1L2vVLFAlUhI4g,9721
38
+ lionagi/tests/test_utils/test_call_util.py,sha256=7xmfFaWvniMQfaOyfwasA2enJQVuSlcAwc8gUyAR_7k,26277
39
+ lionagi/tests/test_utils/test_io_util.py,sha256=cFZCT6EikVeuXB13w-UbtO3YceCHBO5RlNXxGICqg_U,11002
40
+ lionagi/tests/test_utils/test_nested_util.py,sha256=Z1boHufhjZryw51qW2lABOnnyJ1snAFp26KKzzzD8Bs,12612
41
+ lionagi/tests/test_utils/test_sys_util.py,sha256=TDCkzll-JLa6NuBbN_-ay5Rw9KTa_HcSHHAq62RVwGI,13545
42
+ lionagi/tools/__init__.py,sha256=ZEck-ReP5Co05nAA2gUXTpKoDN2QZqrL7DvU9Z09gqg,69
43
+ lionagi/tools/tool_manager.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
44
+ lionagi/tools/tool_util.py,sha256=FeWWD8hIqPxgT7xC3yZmbPkbgatm7Ozlt1rlfN7LG_U,9255
45
+ lionagi/utils/__init__.py,sha256=HymzWXPhVa2xqijEOVUT2rK14JoQBSnHzmjocqQgfD0,820
46
+ lionagi/utils/api_util.py,sha256=lEsDmnDts7fPUZPNcMI7n1M4Bsx0nOHY6e6EY_xloOI,17610
47
+ lionagi/utils/call_util.py,sha256=nqrqUjRtTAaOSnRoEyLy5j6B7Pauv-MgzApOv9xrVkc,33187
48
+ lionagi/utils/core_utils.py,sha256=56vGEUU1idDZIt6b1YOj7-hH4kNRYJFfvTQqEj3IW50,13120
49
+ lionagi/utils/io_util.py,sha256=xoVsq8sP5JGsosuC80Kad3GkGjm8Qm0OLYyTw-U5ru8,6455
50
+ lionagi/utils/nested_util.py,sha256=67j-ySQtuMGxtjnC-Ty2mwQgqp2g1gZhXRy1MulUu1U,26656
51
+ lionagi/utils/sys_util.py,sha256=DtcIRoGkOPYaXM9iuMSR8FNdEKX3Sjoym348SA3kZ8A,15884
52
+ lionagi-0.0.210.dist-info/LICENSE,sha256=TBnSyG8fs_tMRtK805GzA1cIyExleKyzoN_kuVxT9IY,11358
53
+ lionagi-0.0.210.dist-info/METADATA,sha256=k0Ar_7HbWS0ufwFMfsK4ujVKucfW4xFI3IXpEc7TZIo,17691
54
+ lionagi-0.0.210.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
55
+ lionagi-0.0.210.dist-info/top_level.txt,sha256=szvch_d2jE1Lu9ZIKsl26Ll6BGfYfbOgt5lm-UpFSo4,8
56
+ lionagi-0.0.210.dist-info/RECORD,,
@@ -1,79 +0,0 @@
1
- from os import getenv
2
- from .base_service import BaseService, PayloadCreation
3
-
4
- class AnthropicService(BaseService):
5
- """
6
- A service to interact with Anthropic's API endpoints.
7
-
8
- Attributes:
9
- base_url (str): The base URL for the Anthropic API.
10
- available_endpoints (list): A list of available API endpoints.
11
- schema (dict): The schema configuration for the API.
12
- key_scheme (str): The environment variable name for Anthropic API key.
13
- token_encoding_name (str): The default token encoding scheme.
14
-
15
- Examples:
16
- >>> service = AnthropicService(api_key="your_api_key")
17
- >>> asyncio.run(service.serve("Hello, world!", "chat/completions"))
18
- (payload, completion)
19
- """
20
-
21
- base_url = "https://api.anthropic.com/v1/"
22
- available_endpoints = ['chat/completions']
23
- schema = {} # TODO
24
- key_scheme = "ANTHROPIC_API_KEY"
25
- token_encoding_name = "cl100k_base"
26
-
27
- def __init__(self, api_key = None, key_scheme = None,schema = None, token_encoding_name: str = "cl100k_base", **kwargs):
28
- key_scheme = key_scheme or self.key_scheme
29
- super().__init__(
30
- api_key = api_key or getenv(key_scheme),
31
- schema = schema or self.schema,
32
- token_encoding_name=token_encoding_name,
33
- **kwargs
34
- )
35
- self.active_endpoint = []
36
-
37
- async def serve(self, input_, endpoint="chat/completions", method="post", **kwargs):
38
- """
39
- Serves the input using the specified endpoint and method.
40
-
41
- Args:
42
- input_: The input text to be processed.
43
- endpoint: The API endpoint to use for processing.
44
- method: The HTTP method to use for the request.
45
- **kwargs: Additional keyword arguments to pass to the payload creation.
46
-
47
- Returns:
48
- A tuple containing the payload and the completion response from the API.
49
- """
50
- if endpoint not in self.active_endpoint:
51
- await self. init_endpoint(endpoint)
52
- if endpoint == "chat/completions":
53
- return await self.serve_chat(input_, **kwargs)
54
- else:
55
- return ValueError(f'{endpoint} is currently not supported')
56
-
57
- async def serve_chat(self, messages, **kwargs):
58
- """
59
- Serves the chat completion request with the given messages.
60
-
61
- Args:
62
- messages: The messages to be included in the chat completion.
63
- **kwargs: Additional keyword arguments for payload creation.
64
-
65
- Returns:
66
- A tuple containing the payload and the completion response from the API.
67
- """
68
- if "chat/completions" not in self.active_endpoint:
69
- await self. init_endpoint("chat/completions")
70
- self.active_endpoint.append("chat/completions")
71
- payload = PayloadCreation.chat_completion(
72
- messages, self.endpoints["chat/completions"].config, self.schema["chat/completions"], **kwargs)
73
-
74
- try:
75
- completion = await self.call_api(payload, "chat/completions", "post")
76
- return payload, completion
77
- except Exception as e:
78
- self.status_tracker.num_tasks_failed += 1
79
- raise e
File without changes
@@ -1 +0,0 @@
1
- # TODO
File without changes
File without changes
File without changes
File without changes
File without changes
@@ -1,33 +0,0 @@
1
- from ..utils.sys_util import install_import, is_package_installed
2
- from .base_service import BaseService
3
-
4
-
5
- class LiteLLMService(BaseService):
6
- def __init__(self, model: str = None, **kwargs):
7
- super().__init__()
8
-
9
- try:
10
- if not is_package_installed('litellm'):
11
- install_import(
12
- package_name='litellm',
13
- import_name='acompletion'
14
- )
15
- from litellm import acompletion
16
- self.acompletion = acompletion
17
- except:
18
- raise ImportError(f'Unable to import required module from ollama. Please make sure that ollama is installed.')
19
-
20
- self.model = model
21
- self.kwargs = kwargs
22
-
23
- async def serve_chat(self, messages, **kwargs):
24
- payload = {'messages': messages}
25
- kwargs = {**self.kwargs, **kwargs}
26
-
27
- try:
28
- completion = await self.acompletion(model=self.model, messages=messages, **kwargs)
29
- return payload, completion
30
- except Exception as e:
31
- self.status_tracker.num_tasks_failed += 1
32
- raise e
33
-
File without changes
File without changes