lionagi 0.1.2__py3-none-any.whl → 0.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (268) hide show
  1. lionagi/__init__.py +60 -5
  2. lionagi/core/__init__.py +0 -25
  3. lionagi/core/_setting/_setting.py +59 -0
  4. lionagi/core/action/__init__.py +14 -0
  5. lionagi/core/action/function_calling.py +136 -0
  6. lionagi/core/action/manual.py +1 -0
  7. lionagi/core/action/node.py +109 -0
  8. lionagi/core/action/tool.py +114 -0
  9. lionagi/core/action/tool_manager.py +356 -0
  10. lionagi/core/agent/base_agent.py +27 -13
  11. lionagi/core/agent/eval/evaluator.py +1 -0
  12. lionagi/core/agent/eval/vote.py +40 -0
  13. lionagi/core/agent/learn/learner.py +59 -0
  14. lionagi/core/agent/plan/unit_template.py +1 -0
  15. lionagi/core/collections/__init__.py +17 -0
  16. lionagi/core/{generic/data_logger.py → collections/_logger.py} +69 -55
  17. lionagi/core/collections/abc/__init__.py +53 -0
  18. lionagi/core/collections/abc/component.py +615 -0
  19. lionagi/core/collections/abc/concepts.py +297 -0
  20. lionagi/core/collections/abc/exceptions.py +150 -0
  21. lionagi/core/collections/abc/util.py +45 -0
  22. lionagi/core/collections/exchange.py +161 -0
  23. lionagi/core/collections/flow.py +426 -0
  24. lionagi/core/collections/model.py +419 -0
  25. lionagi/core/collections/pile.py +913 -0
  26. lionagi/core/collections/progression.py +236 -0
  27. lionagi/core/collections/util.py +64 -0
  28. lionagi/core/director/direct.py +314 -0
  29. lionagi/core/director/director.py +2 -0
  30. lionagi/core/{execute/branch_executor.py → engine/branch_engine.py} +134 -97
  31. lionagi/core/{execute/instruction_map_executor.py → engine/instruction_map_engine.py} +80 -55
  32. lionagi/{experimental/directive/evaluator → core/engine}/script_engine.py +17 -1
  33. lionagi/core/executor/base_executor.py +90 -0
  34. lionagi/core/{execute/structure_executor.py → executor/graph_executor.py} +62 -66
  35. lionagi/core/{execute → executor}/neo4j_executor.py +70 -67
  36. lionagi/core/generic/__init__.py +3 -33
  37. lionagi/core/generic/edge.py +29 -79
  38. lionagi/core/generic/edge_condition.py +16 -0
  39. lionagi/core/generic/graph.py +236 -0
  40. lionagi/core/generic/hyperedge.py +1 -0
  41. lionagi/core/generic/node.py +156 -221
  42. lionagi/core/generic/tree.py +48 -0
  43. lionagi/core/generic/tree_node.py +79 -0
  44. lionagi/core/mail/__init__.py +12 -0
  45. lionagi/core/mail/mail.py +25 -0
  46. lionagi/core/mail/mail_manager.py +139 -58
  47. lionagi/core/mail/package.py +45 -0
  48. lionagi/core/mail/start_mail.py +36 -0
  49. lionagi/core/message/__init__.py +19 -0
  50. lionagi/core/message/action_request.py +133 -0
  51. lionagi/core/message/action_response.py +135 -0
  52. lionagi/core/message/assistant_response.py +95 -0
  53. lionagi/core/message/instruction.py +234 -0
  54. lionagi/core/message/message.py +101 -0
  55. lionagi/core/message/system.py +86 -0
  56. lionagi/core/message/util.py +283 -0
  57. lionagi/core/report/__init__.py +4 -0
  58. lionagi/core/report/base.py +217 -0
  59. lionagi/core/report/form.py +231 -0
  60. lionagi/core/report/report.py +166 -0
  61. lionagi/core/report/util.py +28 -0
  62. lionagi/core/rule/_default.py +16 -0
  63. lionagi/core/rule/action.py +99 -0
  64. lionagi/core/rule/base.py +238 -0
  65. lionagi/core/rule/boolean.py +56 -0
  66. lionagi/core/rule/choice.py +47 -0
  67. lionagi/core/rule/mapping.py +96 -0
  68. lionagi/core/rule/number.py +71 -0
  69. lionagi/core/rule/rulebook.py +109 -0
  70. lionagi/core/rule/string.py +52 -0
  71. lionagi/core/rule/util.py +35 -0
  72. lionagi/core/session/branch.py +431 -0
  73. lionagi/core/session/directive_mixin.py +287 -0
  74. lionagi/core/session/session.py +229 -903
  75. lionagi/core/structure/__init__.py +1 -0
  76. lionagi/core/structure/chain.py +1 -0
  77. lionagi/core/structure/forest.py +1 -0
  78. lionagi/core/structure/graph.py +1 -0
  79. lionagi/core/structure/tree.py +1 -0
  80. lionagi/core/unit/__init__.py +5 -0
  81. lionagi/core/unit/parallel_unit.py +245 -0
  82. lionagi/core/unit/template/action.py +81 -0
  83. lionagi/core/unit/template/base.py +51 -0
  84. lionagi/core/unit/template/plan.py +84 -0
  85. lionagi/core/unit/template/predict.py +109 -0
  86. lionagi/core/unit/template/score.py +124 -0
  87. lionagi/core/unit/template/select.py +104 -0
  88. lionagi/core/unit/unit.py +362 -0
  89. lionagi/core/unit/unit_form.py +305 -0
  90. lionagi/core/unit/unit_mixin.py +1168 -0
  91. lionagi/core/unit/util.py +71 -0
  92. lionagi/core/validator/validator.py +364 -0
  93. lionagi/core/work/work.py +76 -0
  94. lionagi/core/work/work_function.py +101 -0
  95. lionagi/core/work/work_queue.py +103 -0
  96. lionagi/core/work/worker.py +258 -0
  97. lionagi/core/work/worklog.py +120 -0
  98. lionagi/experimental/compressor/base.py +46 -0
  99. lionagi/experimental/compressor/llm_compressor.py +247 -0
  100. lionagi/experimental/compressor/llm_summarizer.py +61 -0
  101. lionagi/experimental/compressor/util.py +70 -0
  102. lionagi/experimental/directive/__init__.py +19 -0
  103. lionagi/experimental/directive/parser/base_parser.py +69 -2
  104. lionagi/experimental/directive/{template_ → template}/base_template.py +17 -1
  105. lionagi/{libs/ln_tokenizer.py → experimental/directive/tokenizer.py} +16 -0
  106. lionagi/experimental/{directive/evaluator → evaluator}/ast_evaluator.py +16 -0
  107. lionagi/experimental/{directive/evaluator → evaluator}/base_evaluator.py +16 -0
  108. lionagi/experimental/knowledge/base.py +10 -0
  109. lionagi/experimental/memory/__init__.py +0 -0
  110. lionagi/experimental/strategies/__init__.py +0 -0
  111. lionagi/experimental/strategies/base.py +1 -0
  112. lionagi/integrations/bridge/langchain_/documents.py +4 -0
  113. lionagi/integrations/bridge/llamaindex_/index.py +30 -0
  114. lionagi/integrations/bridge/llamaindex_/llama_index_bridge.py +6 -0
  115. lionagi/integrations/chunker/chunk.py +161 -24
  116. lionagi/integrations/config/oai_configs.py +34 -3
  117. lionagi/integrations/config/openrouter_configs.py +14 -2
  118. lionagi/integrations/loader/load.py +122 -21
  119. lionagi/integrations/loader/load_util.py +6 -77
  120. lionagi/integrations/provider/_mapping.py +46 -0
  121. lionagi/integrations/provider/litellm.py +2 -1
  122. lionagi/integrations/provider/mlx_service.py +16 -9
  123. lionagi/integrations/provider/oai.py +91 -4
  124. lionagi/integrations/provider/ollama.py +6 -5
  125. lionagi/integrations/provider/openrouter.py +115 -8
  126. lionagi/integrations/provider/services.py +2 -2
  127. lionagi/integrations/provider/transformers.py +18 -22
  128. lionagi/integrations/storage/__init__.py +3 -3
  129. lionagi/integrations/storage/neo4j.py +52 -60
  130. lionagi/integrations/storage/storage_util.py +44 -46
  131. lionagi/integrations/storage/structure_excel.py +43 -26
  132. lionagi/integrations/storage/to_excel.py +11 -4
  133. lionagi/libs/__init__.py +22 -1
  134. lionagi/libs/ln_api.py +75 -20
  135. lionagi/libs/ln_context.py +37 -0
  136. lionagi/libs/ln_convert.py +21 -9
  137. lionagi/libs/ln_func_call.py +69 -28
  138. lionagi/libs/ln_image.py +107 -0
  139. lionagi/libs/ln_nested.py +26 -11
  140. lionagi/libs/ln_parse.py +82 -23
  141. lionagi/libs/ln_queue.py +16 -0
  142. lionagi/libs/ln_tokenize.py +164 -0
  143. lionagi/libs/ln_validate.py +16 -0
  144. lionagi/libs/special_tokens.py +172 -0
  145. lionagi/libs/sys_util.py +95 -24
  146. lionagi/lions/coder/code_form.py +13 -0
  147. lionagi/lions/coder/coder.py +50 -3
  148. lionagi/lions/coder/util.py +30 -25
  149. lionagi/tests/libs/test_func_call.py +23 -21
  150. lionagi/tests/libs/test_nested.py +36 -21
  151. lionagi/tests/libs/test_parse.py +1 -1
  152. lionagi/tests/test_core/collections/__init__.py +0 -0
  153. lionagi/tests/test_core/collections/test_component.py +206 -0
  154. lionagi/tests/test_core/collections/test_exchange.py +138 -0
  155. lionagi/tests/test_core/collections/test_flow.py +145 -0
  156. lionagi/tests/test_core/collections/test_pile.py +171 -0
  157. lionagi/tests/test_core/collections/test_progression.py +129 -0
  158. lionagi/tests/test_core/generic/test_edge.py +67 -0
  159. lionagi/tests/test_core/generic/test_graph.py +96 -0
  160. lionagi/tests/test_core/generic/test_node.py +106 -0
  161. lionagi/tests/test_core/generic/test_tree_node.py +73 -0
  162. lionagi/tests/test_core/test_branch.py +115 -294
  163. lionagi/tests/test_core/test_form.py +46 -0
  164. lionagi/tests/test_core/test_report.py +105 -0
  165. lionagi/tests/test_core/test_validator.py +111 -0
  166. lionagi/version.py +1 -1
  167. lionagi-0.2.1.dist-info/LICENSE +202 -0
  168. lionagi-0.2.1.dist-info/METADATA +272 -0
  169. lionagi-0.2.1.dist-info/RECORD +240 -0
  170. lionagi/core/branch/base.py +0 -653
  171. lionagi/core/branch/branch.py +0 -474
  172. lionagi/core/branch/flow_mixin.py +0 -96
  173. lionagi/core/branch/util.py +0 -323
  174. lionagi/core/direct/__init__.py +0 -19
  175. lionagi/core/direct/cot.py +0 -123
  176. lionagi/core/direct/plan.py +0 -164
  177. lionagi/core/direct/predict.py +0 -166
  178. lionagi/core/direct/react.py +0 -171
  179. lionagi/core/direct/score.py +0 -279
  180. lionagi/core/direct/select.py +0 -170
  181. lionagi/core/direct/sentiment.py +0 -1
  182. lionagi/core/direct/utils.py +0 -110
  183. lionagi/core/direct/vote.py +0 -64
  184. lionagi/core/execute/base_executor.py +0 -47
  185. lionagi/core/flow/baseflow.py +0 -23
  186. lionagi/core/flow/monoflow/ReAct.py +0 -240
  187. lionagi/core/flow/monoflow/__init__.py +0 -9
  188. lionagi/core/flow/monoflow/chat.py +0 -95
  189. lionagi/core/flow/monoflow/chat_mixin.py +0 -253
  190. lionagi/core/flow/monoflow/followup.py +0 -215
  191. lionagi/core/flow/polyflow/__init__.py +0 -1
  192. lionagi/core/flow/polyflow/chat.py +0 -251
  193. lionagi/core/form/action_form.py +0 -26
  194. lionagi/core/form/field_validator.py +0 -287
  195. lionagi/core/form/form.py +0 -302
  196. lionagi/core/form/mixin.py +0 -214
  197. lionagi/core/form/scored_form.py +0 -13
  198. lionagi/core/generic/action.py +0 -26
  199. lionagi/core/generic/component.py +0 -532
  200. lionagi/core/generic/condition.py +0 -46
  201. lionagi/core/generic/mail.py +0 -90
  202. lionagi/core/generic/mailbox.py +0 -36
  203. lionagi/core/generic/relation.py +0 -70
  204. lionagi/core/generic/signal.py +0 -22
  205. lionagi/core/generic/structure.py +0 -362
  206. lionagi/core/generic/transfer.py +0 -20
  207. lionagi/core/generic/work.py +0 -40
  208. lionagi/core/graph/graph.py +0 -126
  209. lionagi/core/graph/tree.py +0 -190
  210. lionagi/core/mail/schema.py +0 -63
  211. lionagi/core/messages/schema.py +0 -325
  212. lionagi/core/tool/__init__.py +0 -5
  213. lionagi/core/tool/tool.py +0 -28
  214. lionagi/core/tool/tool_manager.py +0 -283
  215. lionagi/experimental/report/form.py +0 -64
  216. lionagi/experimental/report/report.py +0 -138
  217. lionagi/experimental/report/util.py +0 -47
  218. lionagi/experimental/tool/function_calling.py +0 -43
  219. lionagi/experimental/tool/manual.py +0 -66
  220. lionagi/experimental/tool/schema.py +0 -59
  221. lionagi/experimental/tool/tool_manager.py +0 -138
  222. lionagi/experimental/tool/util.py +0 -16
  223. lionagi/experimental/validator/rule.py +0 -139
  224. lionagi/experimental/validator/validator.py +0 -56
  225. lionagi/experimental/work/__init__.py +0 -10
  226. lionagi/experimental/work/async_queue.py +0 -54
  227. lionagi/experimental/work/schema.py +0 -73
  228. lionagi/experimental/work/work_function.py +0 -67
  229. lionagi/experimental/work/worker.py +0 -56
  230. lionagi/experimental/work2/form.py +0 -371
  231. lionagi/experimental/work2/report.py +0 -289
  232. lionagi/experimental/work2/schema.py +0 -30
  233. lionagi/experimental/work2/tests.py +0 -72
  234. lionagi/experimental/work2/work_function.py +0 -89
  235. lionagi/experimental/work2/worker.py +0 -12
  236. lionagi/integrations/bridge/llamaindex_/get_index.py +0 -294
  237. lionagi/tests/test_core/generic/test_component.py +0 -89
  238. lionagi/tests/test_core/test_base_branch.py +0 -426
  239. lionagi/tests/test_core/test_chat_flow.py +0 -63
  240. lionagi/tests/test_core/test_mail_manager.py +0 -75
  241. lionagi/tests/test_core/test_prompts.py +0 -51
  242. lionagi/tests/test_core/test_session.py +0 -254
  243. lionagi/tests/test_core/test_session_base_util.py +0 -313
  244. lionagi/tests/test_core/test_tool_manager.py +0 -95
  245. lionagi-0.1.2.dist-info/LICENSE +0 -9
  246. lionagi-0.1.2.dist-info/METADATA +0 -174
  247. lionagi-0.1.2.dist-info/RECORD +0 -206
  248. /lionagi/core/{branch → _setting}/__init__.py +0 -0
  249. /lionagi/core/{execute → agent/eval}/__init__.py +0 -0
  250. /lionagi/core/{flow → agent/learn}/__init__.py +0 -0
  251. /lionagi/core/{form → agent/plan}/__init__.py +0 -0
  252. /lionagi/core/{branch/executable_branch.py → agent/plan/plan.py} +0 -0
  253. /lionagi/core/{graph → director}/__init__.py +0 -0
  254. /lionagi/core/{messages → engine}/__init__.py +0 -0
  255. /lionagi/{experimental/directive/evaluator → core/engine}/sandbox_.py +0 -0
  256. /lionagi/{experimental/directive/evaluator → core/executor}/__init__.py +0 -0
  257. /lionagi/{experimental/directive/template_ → core/rule}/__init__.py +0 -0
  258. /lionagi/{experimental/report → core/unit/template}/__init__.py +0 -0
  259. /lionagi/{experimental/tool → core/validator}/__init__.py +0 -0
  260. /lionagi/{experimental/validator → core/work}/__init__.py +0 -0
  261. /lionagi/experimental/{work2 → compressor}/__init__.py +0 -0
  262. /lionagi/{core/flow/mono_chat_mixin.py → experimental/directive/template/__init__.py} +0 -0
  263. /lionagi/experimental/directive/{schema.py → template/schema.py} +0 -0
  264. /lionagi/experimental/{work2/util.py → evaluator/__init__.py} +0 -0
  265. /lionagi/experimental/{work2/work.py → knowledge/__init__.py} +0 -0
  266. /lionagi/{tests/libs/test_async.py → experimental/knowledge/graph.py} +0 -0
  267. {lionagi-0.1.2.dist-info → lionagi-0.2.1.dist-info}/WHEEL +0 -0
  268. {lionagi-0.1.2.dist-info → lionagi-0.2.1.dist-info}/top_level.txt +0 -0
@@ -1,10 +1,10 @@
1
1
  # use utils and schema
2
- import math
3
2
  from enum import Enum
4
3
  from pathlib import Path
5
4
  from typing import List, Union, Dict, Any, Tuple
6
5
 
7
6
  from lionagi.libs import convert, func_call
7
+ from lionagi.libs.ln_tokenize import TokenizeUtil
8
8
  from lionagi.core.generic import Node
9
9
 
10
10
 
@@ -57,7 +57,7 @@ def dir_to_path(
57
57
 
58
58
 
59
59
  def dir_to_nodes(
60
- dir: str,
60
+ dir_: str,
61
61
  ext: Union[List[str], str],
62
62
  recursive: bool = False,
63
63
  flatten: bool = True,
@@ -84,82 +84,11 @@ def dir_to_nodes(
84
84
  # converting them into Node objects.
85
85
  """
86
86
 
87
- path_list = dir_to_path(dir, ext, recursive, flatten)
87
+ path_list = dir_to_path(dir=dir_, ext=ext, recursive=recursive, flatten=flatten)
88
88
  files_info = func_call.lcall(path_list, read_text, clean=clean_text)
89
89
  return func_call.lcall(files_info, lambda x: Node(content=x[0], metadata=x[1]))
90
90
 
91
91
 
92
- def chunk_text(
93
- input: str, chunk_size: int, overlap: float, threshold: int
94
- ) -> List[Union[str, None]]:
95
- """
96
- Chunks the input text into smaller parts, with optional overlap and threshold for final chunk.
97
-
98
- Parameters:
99
- input (str): The input text to chunk.
100
-
101
- chunk_size (int): The size of each chunk.
102
-
103
- overlap (float): The amount of overlap between chunks.
104
-
105
- threshold (int): The minimum size of the final chunk.
106
-
107
- Returns:
108
- List[Union[str, None]]: A list of text chunks.
109
-
110
- Raises:
111
- ValueError: If an error occurs during chunking.
112
- """
113
-
114
- def _chunk_n1():
115
- return [input]
116
-
117
- def _chunk_n2():
118
- chunks = []
119
- chunks.append(input[: chunk_size + overlap_size])
120
-
121
- if len(input) - chunk_size > threshold:
122
- chunks.append(input[chunk_size - overlap_size :])
123
- else:
124
- return _chunk_n1()
125
-
126
- return chunks
127
-
128
- def _chunk_n3():
129
- chunks = []
130
- chunks.append(input[: chunk_size + overlap_size])
131
- for i in range(1, n_chunks - 1):
132
- start_idx = chunk_size * i - overlap_size
133
- end_idx = chunk_size * (i + 1) + overlap_size
134
- chunks.append(input[start_idx:end_idx])
135
-
136
- if len(input) - chunk_size * (n_chunks - 1) > threshold:
137
- chunks.append(input[chunk_size * (n_chunks - 1) - overlap_size :])
138
- else:
139
- chunks[-1] += input[chunk_size * (n_chunks - 1) + overlap_size :]
140
-
141
- return chunks
142
-
143
- try:
144
- if not isinstance(input, str):
145
- input = convert.to_str(input)
146
-
147
- n_chunks = math.ceil(len(input) / chunk_size)
148
- overlap_size = int(overlap / 2)
149
-
150
- if n_chunks == 1:
151
- return _chunk_n1()
152
-
153
- elif n_chunks == 2:
154
- return _chunk_n2()
155
-
156
- elif n_chunks > 2:
157
- return _chunk_n3()
158
-
159
- except Exception as e:
160
- raise ValueError(f"An error occurred while chunking the text. {e}")
161
-
162
-
163
92
  def read_text(filepath: str, clean: bool = True) -> Tuple[str, dict]:
164
93
  """
165
94
  Reads text from a file and optionally cleans it, returning the content and metadata.
@@ -202,9 +131,9 @@ def read_text(filepath: str, clean: bool = True) -> Tuple[str, dict]:
202
131
  content = f.read()
203
132
  if clean:
204
133
  # Define characters to replace and their replacements
205
- replacements = {"\\": " ", "\n": " ", "\t": " ", " ": " ", "'": " "}
134
+ replacements = {"\\": "", "\n\n": "\n"}
206
135
  for old, new in replacements.items():
207
- content = content.replace(old, new)
136
+ content = content.replace(old, new).strip()
208
137
  metadata = _get_metadata()
209
138
  return content, metadata
210
139
  except Exception as e:
@@ -223,7 +152,7 @@ def _file_to_chunks(
223
152
  "chunk_overlap": overlap,
224
153
  "chunk_threshold": threshold,
225
154
  }
226
- chunks = chunk_text(
155
+ chunks = TokenizeUtil.chunk_by_chars(
227
156
  input[field], chunk_size=chunk_size, overlap=overlap, threshold=threshold
228
157
  )
229
158
  logs = []
@@ -0,0 +1,46 @@
1
+ from .oai import OpenAIService
2
+ from .openrouter import OpenRouterService
3
+ from .ollama import OllamaService
4
+ from .transformers import TransformersService
5
+ from .litellm import LiteLLMService
6
+ from .mlx_service import MLXService
7
+ from lionagi.integrations.config.oai_configs import oai_schema
8
+ from lionagi.integrations.config.openrouter_configs import openrouter_schema
9
+
10
+ SERVICE_PROVIDERS_MAPPING = {
11
+ "openai": {
12
+ "service": OpenAIService,
13
+ "schema": oai_schema,
14
+ "default_model": "gpt-3.5-turbo",
15
+ },
16
+ "openrouter": {
17
+ "service": OpenRouterService,
18
+ "schema": openrouter_schema,
19
+ "default_model": "gpt-3.5-turbo",
20
+ },
21
+ "litellm": {
22
+ "service": LiteLLMService,
23
+ "schema": oai_schema,
24
+ "default_model": "gpt-3.5-turbo",
25
+ },
26
+ "ollama": {
27
+ "service": OllamaService,
28
+ "schema": {"model": "llama3"},
29
+ "default_model": "llama3",
30
+ },
31
+ "transformers": {
32
+ "service": TransformersService,
33
+ "schema": {"model": "gpt2"},
34
+ "default_model": "gpt2",
35
+ },
36
+ "mlx": {
37
+ "service": MLXService,
38
+ "schema": {"model": "mlx-community/OLMo-7B-hf-4bit-mlx"},
39
+ "default_model": "mlx-community/OLMo-7B-hf-4bit-mlx",
40
+ },
41
+ }
42
+
43
+ # TODO
44
+ # "Ollama": OllamaService,
45
+ # "Transformers": TransformersService,
46
+ # "MLX": MLXService,
@@ -31,12 +31,13 @@ class LiteLLMService(BaseService):
31
31
  self.acompletion = acompletion
32
32
  self.model = model
33
33
  self.kwargs = kwargs
34
+ self.allowed_kwargs = allowed_kwargs
34
35
 
35
36
  async def serve_chat(self, messages, **kwargs):
36
37
  payload = {"messages": messages}
37
38
  config = {}
38
39
  for k, v in kwargs.items():
39
- if k in allowed_kwargs:
40
+ if k in self.allowed_kwargs:
40
41
  config[k] = v
41
42
 
42
43
  kwargs = {**self.kwargs, **config}
@@ -1,13 +1,18 @@
1
+ import re
1
2
  from lionagi.libs.sys_util import SysUtil
2
3
  import lionagi.libs.ln_convert as convert
3
4
  from lionagi.libs.ln_api import BaseService
4
5
  from lionagi.integrations.config.mlx_configs import model
5
6
 
6
7
 
7
- class MlXService(BaseService):
8
+ class MLXService(BaseService):
8
9
  def __init__(self, model=model, **kwargs):
9
10
 
10
11
  SysUtil.check_import("mlx_lm")
12
+ SysUtil.check_import("ipywidgets")
13
+
14
+ if model is not None and "olmo" in str(model).lower():
15
+ SysUtil.check_import("olmo", pip_name="ai2-olmo")
11
16
 
12
17
  from mlx_lm import load, generate
13
18
 
@@ -19,16 +24,13 @@ class MlXService(BaseService):
19
24
  self.model = model_
20
25
  self.tokenizer = tokenizer
21
26
  self.generate = generate
27
+ self.allowed_kwargs = []
22
28
 
23
29
  async def serve_chat(self, messages, **kwargs):
24
30
  if "verbose" not in kwargs.keys():
25
- verbose = True
31
+ verbose = False
26
32
 
27
- prompts = [
28
- convert.to_dict(msg["content"])["instruction"]
29
- for msg in messages
30
- if msg["role"] == "user"
31
- ]
33
+ prompts = [msg["content"] for msg in messages if msg["role"] == "user"]
32
34
 
33
35
  payload = {"messages": messages}
34
36
 
@@ -39,8 +41,13 @@ class MlXService(BaseService):
39
41
  prompt=f"{prompts[-1]} \nOutput: ",
40
42
  verbose=verbose,
41
43
  )
42
- completion = {"model": self.model_name, "choices": [{"message": response}]}
43
-
44
+ if "```" in response:
45
+ regex = re.compile(r"```[\s\S]*?```")
46
+ matches = regex.findall(response)
47
+ msg = matches[0].strip("```")
48
+ completion = {"choices": [{"message": {"content": msg}}]}
49
+ else:
50
+ completion = {"choices": [{"message": {"content": response}}]}
44
51
  return payload, completion
45
52
  except Exception as e:
46
53
  self.status_tracker.num_tasks_failed += 1
@@ -1,8 +1,46 @@
1
+ """
2
+ Copyright 2024 HaiyangLi
3
+
4
+ Licensed under the Apache License, Version 2.0 (the "License");
5
+ you may not use this file except in compliance with the License.
6
+ You may obtain a copy of the License at
7
+
8
+ http://www.apache.org/licenses/LICENSE-2.0
9
+
10
+ Unless required by applicable law or agreed to in writing, software
11
+ distributed under the License is distributed on an "AS IS" BASIS,
12
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ See the License for the specific language governing permissions and
14
+ limitations under the License.
15
+ """
16
+
1
17
  from os import getenv
2
18
  from lionagi.integrations.config.oai_configs import oai_schema
3
19
  from lionagi.libs.ln_api import BaseService, PayloadPackage
4
20
 
5
21
 
22
+ allowed_kwargs = [
23
+ "model",
24
+ "frequency_penalty",
25
+ "n",
26
+ "presence_penalty",
27
+ "response_format",
28
+ "temperature",
29
+ "top_p",
30
+ "seed",
31
+ "stop",
32
+ "stream",
33
+ "stream_options",
34
+ "tools",
35
+ "tool_choice",
36
+ "user",
37
+ "max_tokens",
38
+ "logprobs",
39
+ "top_logprobs",
40
+ "logit_bias",
41
+ ]
42
+
43
+
6
44
  class OpenAIService(BaseService):
7
45
  """
8
46
  A service to interact with OpenAI's API endpoints.
@@ -30,6 +68,7 @@ class OpenAIService(BaseService):
30
68
  "audio_speech",
31
69
  "audio_transcriptions",
32
70
  "audio_translations",
71
+ "embeddings",
33
72
  ]
34
73
  schema = oai_schema
35
74
  key_scheme = "OPENAI_API_KEY"
@@ -51,6 +90,7 @@ class OpenAIService(BaseService):
51
90
  **kwargs,
52
91
  )
53
92
  self.active_endpoint = []
93
+ self.allowed_kwargs = allowed_kwargs
54
94
 
55
95
  async def serve(self, input_, endpoint="chat/completions", method="post", **kwargs):
56
96
  """
@@ -84,7 +124,7 @@ class OpenAIService(BaseService):
84
124
  else:
85
125
  return ValueError(f"{endpoint} is currently not supported")
86
126
 
87
- async def serve_chat(self, messages, **kwargs):
127
+ async def serve_chat(self, messages, required_tokens=None, **kwargs):
88
128
  """
89
129
  Serves the chat completion request with the given messages.
90
130
 
@@ -101,16 +141,63 @@ class OpenAIService(BaseService):
101
141
  if "chat/completions" not in self.active_endpoint:
102
142
  await self.init_endpoint("chat/completions")
103
143
  self.active_endpoint.append("chat/completions")
144
+
145
+ msgs = []
146
+
147
+ for msg in messages:
148
+ if isinstance(msg, dict):
149
+ content = msg.get("content")
150
+ if isinstance(content, (dict, str)):
151
+ msgs.append({"role": msg["role"], "content": content})
152
+ elif isinstance(content, list):
153
+ _content = []
154
+ for i in content:
155
+ if "text" in i:
156
+ _content.append({"type": "text", "text": str(i["text"])})
157
+ elif "image_url" in i:
158
+ _content.append(
159
+ {
160
+ "type": "image_url",
161
+ "image_url": {
162
+ "url": f"{i['image_url'].get('url')}",
163
+ "detail": i["image_url"].get("detail", "low"),
164
+ },
165
+ }
166
+ )
167
+ msgs.append({"role": msg["role"], "content": _content})
168
+
104
169
  payload = PayloadPackage.chat_completion(
105
- messages,
170
+ msgs,
106
171
  self.endpoints["chat/completions"].config,
107
172
  self.schema["chat/completions"],
108
173
  **kwargs,
109
174
  )
110
-
111
175
  try:
112
- completion = await self.call_api(payload, "chat/completions", "post")
176
+ completion = await self.call_api(
177
+ payload, "chat/completions", "post", required_tokens=required_tokens
178
+ )
113
179
  return payload, completion
114
180
  except Exception as e:
115
181
  self.status_tracker.num_tasks_failed += 1
116
182
  raise e
183
+
184
+ async def serve_embedding(self, embed_str, required_tokens=None, **kwargs):
185
+ if "embeddings" not in self.active_endpoint:
186
+ await self.init_endpoint("embeddings")
187
+ self.active_endpoint.append("embeddings")
188
+
189
+ payload = PayloadPackage.embeddings(
190
+ embed_str,
191
+ self.endpoints["embeddings"].config,
192
+ self.schema["embeddings"],
193
+ **kwargs,
194
+ )
195
+
196
+ try:
197
+ embed = await self.call_api(
198
+ payload, "embeddings", "post", required_tokens=required_tokens
199
+ )
200
+ return payload, embed
201
+ except Exception as e:
202
+ self.status_tracker.num_tasks_failed += 1
203
+ raise e
@@ -4,7 +4,7 @@ from lionagi.integrations.config.ollama_configs import model
4
4
  allowed_kwargs = [
5
5
  "model",
6
6
  "frequency_penalty",
7
- "max_tokens",
7
+ # "max_tokens",
8
8
  "n",
9
9
  "presence_penalty",
10
10
  "response_format",
@@ -32,7 +32,8 @@ class OllamaService(BaseService):
32
32
 
33
33
  self.ollama = ollama
34
34
  self.model = model
35
- self.client = self.ollama.AsyncClient(**kwargs)
35
+ self.client = self.ollama.AsyncClient()
36
+ self.allowed_kwargs = allowed_kwargs
36
37
 
37
38
  async def serve_chat(self, messages, **kwargs):
38
39
  config = {}
@@ -42,11 +43,11 @@ class OllamaService(BaseService):
42
43
 
43
44
  self.ollama.pull(self.model)
44
45
  payload = {"messages": messages}
46
+ if "model" not in config:
47
+ config["model"] = self.model
45
48
 
46
49
  try:
47
- completion = await self.client.chat(
48
- model=self.model, messages=messages, **config
49
- )
50
+ completion = await self.client.chat(messages=messages, **config)
50
51
  completion["choices"] = [{"message": completion.pop("message")}]
51
52
  return payload, completion
52
53
  except Exception as e:
@@ -2,6 +2,27 @@ from os import getenv
2
2
  from lionagi.integrations.config.openrouter_configs import openrouter_schema
3
3
  from lionagi.libs.ln_api import BaseService, PayloadPackage
4
4
 
5
+ allowed_kwargs = [
6
+ "model",
7
+ "frequency_penalty",
8
+ "n",
9
+ "presence_penalty",
10
+ "response_format",
11
+ "temperature",
12
+ "top_p",
13
+ "seed",
14
+ "stop",
15
+ "stream",
16
+ "stream_options",
17
+ "tools",
18
+ "tool_choice",
19
+ "user",
20
+ "max_tokens",
21
+ "logprobs",
22
+ "top_logprobs",
23
+ "logit_bias",
24
+ ]
25
+
5
26
 
6
27
  class OpenRouterService(BaseService):
7
28
  base_url = "https://openrouter.ai/api/v1/"
@@ -26,8 +47,33 @@ class OpenRouterService(BaseService):
26
47
  **kwargs,
27
48
  )
28
49
  self.active_endpoint = []
50
+ self.allowed_kwargs = allowed_kwargs
29
51
 
30
52
  async def serve(self, input_, endpoint="chat/completions", method="post", **kwargs):
53
+ """
54
+ Serves the input using the specified endpoint and method.
55
+
56
+ Args:
57
+ input_: The input text to be processed.
58
+ endpoint: The API endpoint to use for processing.
59
+ method: The HTTP method to use for the request.
60
+ **kwargs: Additional keyword arguments to pass to the payload creation.
61
+
62
+ Returns:
63
+ A tuple containing the payload and the completion assistant_response from the API.
64
+
65
+ Raises:
66
+ ValueError: If the specified endpoint is not supported.
67
+
68
+ Examples:
69
+ >>> service = OpenAIService(api_key="your_api_key")
70
+ >>> asyncio.run(service.serve("Hello, world!","chat/completions"))
71
+ (payload, completion)
72
+
73
+ >>> service = OpenAIService()
74
+ >>> asyncio.run(service.serve("Convert this text to speech.","audio_speech"))
75
+ ValueError: 'audio_speech' is currently not supported
76
+ """
31
77
  if endpoint not in self.active_endpoint:
32
78
  await self.init_endpoint(endpoint)
33
79
  if endpoint == "chat/completions":
@@ -35,19 +81,80 @@ class OpenRouterService(BaseService):
35
81
  else:
36
82
  return ValueError(f"{endpoint} is currently not supported")
37
83
 
38
- async def serve_chat(self, messages, **kwargs):
39
- endpoint = "chat/completions"
84
+ async def serve_chat(self, messages, required_tokens=None, **kwargs):
85
+ """
86
+ Serves the chat completion request with the given messages.
87
+
88
+ Args:
89
+ messages: The messages to be included in the chat completion.
90
+ **kwargs: Additional keyword arguments for payload creation.
91
+
92
+ Returns:
93
+ A tuple containing the payload and the completion assistant_response from the API.
94
+
95
+ Raises:
96
+ Exception: If the API call fails.
97
+ """
98
+ if "chat/completions" not in self.active_endpoint:
99
+ await self.init_endpoint("chat/completions")
100
+ self.active_endpoint.append("chat/completions")
101
+
102
+ msgs = []
103
+
104
+ for msg in messages:
105
+ if isinstance(msg, dict):
106
+ content = msg.get("content")
107
+ if isinstance(content, (dict, str)):
108
+ msgs.append({"role": msg["role"], "content": content})
109
+ elif isinstance(content, list):
110
+ _content = []
111
+ for i in content:
112
+ if "text" in i:
113
+ _content.append({"type": "text", "text": str(i["text"])})
114
+ elif "image_url" in i:
115
+ _content.append(
116
+ {
117
+ "type": "image_url",
118
+ "image_url": {
119
+ "url": f"{i['image_url'].get('url')}",
120
+ "detail": i["image_url"].get("detail", "low"),
121
+ },
122
+ }
123
+ )
124
+ msgs.append({"role": msg["role"], "content": _content})
40
125
 
41
- if endpoint not in self.active_endpoint:
42
- await self.init_endpoint(endpoint)
43
- self.active_endpoint.append(endpoint)
44
126
  payload = PayloadPackage.chat_completion(
45
- messages, self.endpoints[endpoint].config, self.schema[endpoint], **kwargs
127
+ msgs,
128
+ self.endpoints["chat/completions"].config,
129
+ self.schema["chat/completions"],
130
+ **kwargs,
46
131
  )
47
-
48
132
  try:
49
- completion = await self.call_api(payload, endpoint, "post")
133
+ completion = await self.call_api(
134
+ payload, "chat/completions", "post", required_tokens=required_tokens
135
+ )
50
136
  return payload, completion
51
137
  except Exception as e:
52
138
  self.status_tracker.num_tasks_failed += 1
53
139
  raise e
140
+
141
+ # async def serve_embedding(self, embed_str, required_tokens=None, **kwargs):
142
+ # if "embeddings" not in self.active_endpoint:
143
+ # await self.init_endpoint("embeddings")
144
+ # self.active_endpoint.append("embeddings")
145
+
146
+ # payload = PayloadPackage.embeddings(
147
+ # embed_str,
148
+ # self.endpoints["embeddings"].config,
149
+ # self.schema["embeddings"],
150
+ # **kwargs,
151
+ # )
152
+
153
+ # try:
154
+ # embed = await self.call_api(
155
+ # payload, "embeddings", "post", required_tokens=required_tokens
156
+ # )
157
+ # return payload, embed
158
+ # except Exception as e:
159
+ # self.status_tracker.num_tasks_failed += 1
160
+ # raise e
@@ -131,6 +131,6 @@ class Services:
131
131
  kwargs (Optional[Any]): additional kwargs for calling the model
132
132
  """
133
133
 
134
- from lionagi.integrations.provider.mlx_service import MlXService
134
+ from lionagi.integrations.provider.mlx_service import MLXService
135
135
 
136
- return MlXService(**kwargs)
136
+ return MLXService(**kwargs)
@@ -5,7 +5,7 @@ from lionagi.libs.sys_util import SysUtil
5
5
  from lionagi.libs.ln_api import BaseService
6
6
 
7
7
  allowed_kwargs = [
8
- "model",
8
+ # "model",
9
9
  "tokenizer",
10
10
  "modelcard",
11
11
  "framework",
@@ -17,6 +17,9 @@ allowed_kwargs = [
17
17
  "torch_dtype",
18
18
  "min_length_for_response",
19
19
  "minimum_tokens",
20
+ "mask_token",
21
+ "max_length",
22
+ "max_new_tokens",
20
23
  ]
21
24
 
22
25
 
@@ -52,6 +55,7 @@ class TransformersService(BaseService):
52
55
  self.task = task
53
56
  self.model = model
54
57
  self.config = config
58
+ self.allowed_kwargs = allowed_kwargs
55
59
  try:
56
60
  from transformers import pipeline
57
61
 
@@ -59,19 +63,11 @@ class TransformersService(BaseService):
59
63
  except ImportError:
60
64
  try:
61
65
  if not SysUtil.is_package_installed("torch"):
62
- in_ = input(
63
- "PyTorch is required for transformers. Would you like to install it now? (y/n): "
64
- )
65
- if in_ == "y":
66
- install_pytorch()
66
+ install_pytorch()
67
67
  if not SysUtil.is_package_installed("transformers"):
68
- in_ = input(
69
- "transformers is required. Would you like to install it now? (y/n): "
68
+ SysUtil.install_import(
69
+ package_name="transformers", import_name="pipeline"
70
70
  )
71
- if in_ == "y":
72
- SysUtil.install_import(
73
- package_name="transformers", import_name="pipeline"
74
- )
75
71
  from transformers import pipeline
76
72
 
77
73
  self.pipeline = pipeline
@@ -92,19 +88,19 @@ class TransformersService(BaseService):
92
88
  payload = {"messages": messages}
93
89
  config = {}
94
90
  for k, v in kwargs.items():
91
+ if k == "max_tokens":
92
+ config["max_new_tokens"] = v
95
93
  if k in allowed_kwargs:
96
94
  config[k] = v
97
95
 
98
- conversation = self.pipe(str(messages), **config)
99
-
100
- texts = conversation[-1]["generated_text"]
101
- msgs = (
102
- str(texts.split("]")[1:])
103
- .replace("\\n", "")
104
- .replace("['", "")
105
- .replace("\\", "")
106
- )
96
+ msg = "".join([i["content"] for i in messages if i["role"] == "user"])
97
+ conversation = ""
98
+ response = self.pipe(msg, **config)
99
+ try:
100
+ conversation = response[0]["generated_text"]
101
+ except:
102
+ conversation = response
107
103
 
108
- completion = {"model": self.pipe.model, "choices": [{"message": msgs}]}
104
+ completion = {"choices": [{"message": {"content": conversation}}]}
109
105
 
110
106
  return payload, completion
@@ -1,3 +1,3 @@
1
- from lionagi.integrations.storage.neo4j import Neo4j
2
-
3
- __all__ = ["Neo4j"]
1
+ # from lionagi.integrations.storage.neo4j import Neo4j
2
+ #
3
+ # __all__ = ["Neo4j"]