OpenHosta 2.2.0__tar.gz → 2.2.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. {openhosta-2.2.0/src/OpenHosta.egg-info → openhosta-2.2.2}/PKG-INFO +2 -2
  2. {openhosta-2.2.0 → openhosta-2.2.2}/README.md +1 -1
  3. {openhosta-2.2.0 → openhosta-2.2.2}/pyproject.toml +1 -1
  4. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta/OpenHosta.py +4 -4
  5. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta/__init__.py +1 -1
  6. openhosta-2.2.2/src/OpenHosta/core/config.py +93 -0
  7. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta/core/logger.py +69 -1
  8. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta/exec/ask.py +21 -11
  9. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta/exec/emulate.py +51 -67
  10. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta/exec/generate_data.py +12 -6
  11. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta/exec/predict/dataset/oracle.py +2 -2
  12. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta/exec/predict/predict.py +11 -14
  13. openhosta-2.2.2/src/OpenHosta/exec/thinkof.py +165 -0
  14. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta/models/OpenAICompatible.py +9 -4
  15. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta/utils/meta_prompt.py +0 -36
  16. {openhosta-2.2.0 → openhosta-2.2.2/src/OpenHosta.egg-info}/PKG-INFO +2 -2
  17. openhosta-2.2.0/src/OpenHosta/core/config.py +0 -43
  18. openhosta-2.2.0/src/OpenHosta/exec/thinkof.py +0 -133
  19. {openhosta-2.2.0 → openhosta-2.2.2}/LICENSE +0 -0
  20. {openhosta-2.2.0 → openhosta-2.2.2}/setup.cfg +0 -0
  21. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta/asynchrone/__init__.py +0 -0
  22. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta/core/__init__.py +0 -0
  23. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta/core/analizer.py +0 -0
  24. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta/core/hosta_inspector.py +0 -0
  25. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta/core/memory.py +0 -0
  26. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta/core/pydantic_stub.py +0 -0
  27. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta/core/type_converter.py +0 -0
  28. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta/exec/__init__.py +0 -0
  29. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta/exec/example.py +0 -0
  30. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta/exec/predict/__init__.py +0 -0
  31. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta/exec/predict/dataset/__init__.py +0 -0
  32. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta/exec/predict/dataset/dataset.py +0 -0
  33. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta/exec/predict/dataset/sample_type.py +0 -0
  34. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta/exec/predict/encoder/__init__.py +0 -0
  35. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta/exec/predict/encoder/base_encoder.py +0 -0
  36. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta/exec/predict/encoder/simple_encoder.py +0 -0
  37. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta/exec/predict/model/__init__.py +0 -0
  38. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta/exec/predict/model/builtins/__init__.py +0 -0
  39. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta/exec/predict/model/builtins/algo_architecture.py +0 -0
  40. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta/exec/predict/model/builtins/classification.py +0 -0
  41. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta/exec/predict/model/builtins/linear_regression.py +0 -0
  42. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta/exec/predict/model/hosta_model.py +0 -0
  43. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta/exec/predict/model/model_provider.py +0 -0
  44. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta/exec/predict/model/neural_network.py +0 -0
  45. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta/exec/predict/model/neural_network_types.py +0 -0
  46. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta/exec/predict/predict_config.py +0 -0
  47. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta/exec/predict/predict_memory.py +0 -0
  48. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta/exec/predict/stubs.py +0 -0
  49. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta/exec/thought.py +0 -0
  50. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta/exec/use.py +0 -0
  51. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta/utils/__init__.py +0 -0
  52. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta/utils/errors.py +0 -0
  53. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta/utils/hosta_type.py +0 -0
  54. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta/utils/import_handler.py +0 -0
  55. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta/utils/progress_bar.py +0 -0
  56. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta/utils/torch_nn_utils.py +0 -0
  57. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta.egg-info/SOURCES.txt +0 -0
  58. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta.egg-info/dependency_links.txt +0 -0
  59. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta.egg-info/requires.txt +0 -0
  60. {openhosta-2.2.0 → openhosta-2.2.2}/src/OpenHosta.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: OpenHosta
3
- Version: 2.2.0
3
+ Version: 2.2.2
4
4
  Summary: A lightweight library integrating LLM natively into Python
5
5
  Author: Léandre Ramos, Merlin Devillard, William Jolivet, Emmanuel Batt
6
6
  License: MIT License
@@ -80,7 +80,7 @@ Requires-Dist: torch>=2.5.1; extra == "predict"
80
80
  Requires-Dist: numpy>=2.1.3; extra == "predict"
81
81
 
82
82
  # OpenHosta
83
- v2.2.0 - Open-Source Project
83
+ v2.2.2 - Open-Source Project
84
84
 
85
85
  <a href="https://colab.research.google.com/github/hand-e-fr/OpenHosta/blob/main/docs/openhosta_mistral_small.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/> Basic Usage - local LLM (Mistral-Small-2501)</a>
86
86
  <br/>
@@ -1,5 +1,5 @@
1
1
  # OpenHosta
2
- v2.2.0 - Open-Source Project
2
+ v2.2.2 - Open-Source Project
3
3
 
4
4
  <a href="https://colab.research.google.com/github/hand-e-fr/OpenHosta/blob/main/docs/openhosta_mistral_small.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/> Basic Usage - local LLM (Mistral-Small-2501)</a>
5
5
  <br/>
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "OpenHosta"
7
- version = "2.2.0"
7
+ version = "2.2.2"
8
8
  description = "A lightweight library integrating LLM natively into Python"
9
9
  keywords = ["AI", "GPT", "Natural language", "Autommatic", "Easy"]
10
10
  authors = [
@@ -4,12 +4,12 @@ from .core import config
4
4
  from .core.config import Model, DefaultModelPolicy
5
5
  from .core.type_converter import TypeConverter, FunctionMetadata
6
6
 
7
- from .utils.meta_prompt import print_last_prompt, print_last_response
7
+ from .core.logger import print_last_prompt, print_last_response
8
8
  from .utils.import_handler import is_predict_enabled
9
9
  from .utils.meta_prompt import Prompt
10
10
 
11
11
  from .exec.ask import ask, ask_async
12
- from .exec.thinkof import thinkof, thinkof_async
12
+ from .exec.thinkof import thinkof, thinkof_async, return_type
13
13
  from .exec.emulate import emulate, emulate_async
14
14
 
15
15
  from .exec.thought import thought
@@ -29,8 +29,7 @@ else:
29
29
  import os
30
30
 
31
31
  DefaultModelPolicy.set_default_model(
32
- Model(model="gpt-4o", base_url="https://api.openai.com/v1/chat/completions",
33
- api_key=os.getenv("OPENAI_API_KEY") or None)
32
+ Model(model="gpt-4o", base_url="https://api.openai.com/v1/chat/completions")
34
33
  )
35
34
 
36
35
  all = (
@@ -39,6 +38,7 @@ all = (
39
38
  "emulate_async",
40
39
  "thinkof",
41
40
  "thinkof_async",
41
+ "return_type",
42
42
  "ask",
43
43
  "ask_async",
44
44
  "example",
@@ -1,3 +1,3 @@
1
1
  from .OpenHosta import *
2
2
 
3
- __version__ = "2.2.0"
3
+ __version__ = "2.2.2"
@@ -0,0 +1,93 @@
1
+ from __future__ import annotations
2
+
3
+ import sys
4
+
5
+ from ..models.OpenAICompatible import Model
6
+ from ..utils.meta_prompt import EMULATE_PROMPT, Prompt
7
+
8
+ from typing import Tuple
9
+ from abc import ABC, abstractmethod
10
+
11
+ class ModelPolicy(ABC):
12
+ """
13
+ Abstract base class defining the interface for model selection policies.
14
+
15
+ This class provides a template for implementing different strategies
16
+ for selecting and managing AI models.
17
+ """
18
+
19
+ @abstractmethod
20
+ def apply_policy(self, user_desired_model=None, prompt_data=None) -> Tuple[Model, Prompt]:
21
+ """
22
+ Apply the policy to select an appropriate model.
23
+
24
+ Args:
25
+ user_desired_model (Model, optional): The model specifically requested by the user.
26
+
27
+ Returns:
28
+ Model: The selected model according to the policy implementation.
29
+ """
30
+ pass
31
+
32
+ @abstractmethod
33
+ def get_model(self) -> Model:
34
+ """
35
+ Retrieve the current model associated with this policy.
36
+
37
+ Returns:
38
+ Model: The current model instance.
39
+ """
40
+ pass
41
+
42
+ class AlwaysDefaultPolicy(ModelPolicy):
43
+ """
44
+ This policy always use the default prompt with the default model
45
+ """
46
+
47
+ def __init__(self, default_model:Model=None, default_prompt:Prompt=EMULATE_PROMPT):
48
+ self.model = default_model
49
+ self.prompt = default_prompt
50
+
51
+ def set_default_model(self, new):
52
+ if isinstance(new, Model):
53
+ self.model = new
54
+ else:
55
+ sys.stderr.write("[CONFIG_ERROR] Invalid model instance.\n")
56
+
57
+ def set_default_apiKey(self, api_key=None):
58
+ if api_key is not None or isinstance(api_key, str):
59
+ self.model.api_key = api_key
60
+ else:
61
+ sys.stderr.write("[CONFIG_ERROR] Invalid API key.")
62
+
63
+ def get_model(self):
64
+ return self.model
65
+
66
+ def apply_policy(self, desired_model=None, desired_prompt=None, prompt_data=None) -> Tuple[Model, Prompt]:
67
+
68
+ selected_model = desired_model
69
+ selected_prompt = desired_prompt
70
+
71
+ if type(selected_prompt) is str:
72
+ selected_prompt = Prompt(selected_prompt)
73
+
74
+ if selected_model == None:
75
+ selected_model = self.get_model()
76
+
77
+ if selected_prompt == None:
78
+ selected_prompt = self.get_prompt()
79
+
80
+ return selected_model, selected_prompt
81
+
82
+ def get_prompt(self):
83
+ return self.prompt
84
+
85
+ DefaultModelPolicy = AlwaysDefaultPolicy()
86
+
87
+ def set_default_model(new):
88
+ DefaultModelPolicy.set_default_model(new)
89
+
90
+
91
+ def set_default_apiKey(api_key:str=None):
92
+ DefaultModelPolicy.set_default_apiKey(api_key)
93
+
@@ -1,7 +1,9 @@
1
- from typing import Union, Literal, Optional
1
+ from typing import Union, Literal, Optional, Callable
2
2
  from enum import Enum
3
3
  import platform
4
4
 
5
+ from ..core.hosta_inspector import HostaInspector
6
+
5
7
  IS_UNIX = platform.system() != "Windows"
6
8
 
7
9
  class ANSIColor(Enum):
@@ -39,6 +41,8 @@ class ANSIColor(Enum):
39
41
  REVERSED = '\033[7m'
40
42
 
41
43
  class Logger:
44
+
45
+
42
46
  def __init__(self, log_file_path: Optional[str] = None, verbose: Union[Literal[0, 1, 2], bool] = 1):
43
47
  self.log_file_path: Optional[str] = log_file_path
44
48
  if log_file_path:
@@ -106,3 +110,67 @@ class Logger:
106
110
  one_line : bool = False
107
111
  ):
108
112
  self._log(prefix, message, level, color, text_color, one_line)
113
+
114
+ class dialog_logger:
115
+
116
+ def __init__(self, inspection:HostaInspector=None, inner_func=None):
117
+
118
+ self.logging_object = {
119
+ "_last_request": {},
120
+ "_last_response": {}
121
+ }
122
+
123
+ if inspection is not None:
124
+ inspection.set_logging_object(self.logging_object)
125
+
126
+ if inner_func is not None:
127
+ setattr(inner_func, "_last_request", self.logging_object["_last_request"])
128
+ setattr(inner_func, "_last_response", self.logging_object["_last_response"])
129
+
130
+
131
+ def set_sys_prompt(self, prompt_rendered):
132
+ self.logging_object["_last_request"]['sys_prompt']=prompt_rendered
133
+
134
+ def set_user_prompt(self, user_prompt):
135
+ self.logging_object["_last_request"]['user_prompt']=user_prompt
136
+
137
+ def set_response_dict(self, response_dict):
138
+ self.logging_object["_last_response"]["response_dict"] = response_dict
139
+
140
+ def set_response_data(self, response_data):
141
+ self.logging_object["_last_response"]["data"] = response_data
142
+
143
+ def print_last_prompt(function_pointer:Callable):
144
+ """
145
+ Print the last prompt sent to the LLM when using function `function_pointer`.
146
+ """
147
+ if hasattr(function_pointer, "_last_request"):
148
+ if "sys_prompt" in function_pointer._last_request:
149
+ print("[SYSTEM PROMPT]")
150
+ print(function_pointer._last_request["sys_prompt"])
151
+ if "user_prompt" in function_pointer._last_request:
152
+ print("[USER PROMPT]")
153
+ print(function_pointer._last_request["user_prompt"])
154
+ else:
155
+ print("No prompt found for this function.")
156
+
157
+
158
+ def print_last_response(function_pointer:Callable):
159
+ """
160
+ Print the last answer recived from the LLM when using function `function_pointer`.
161
+ """
162
+ if hasattr(function_pointer, "_last_response"):
163
+ if "rational" in function_pointer._last_response:
164
+ print("[THINKING]")
165
+ print(function_pointer._last_response["rational"])
166
+ if "answer" in function_pointer._last_response:
167
+ print("[ANSWER]")
168
+ print(function_pointer._last_response["answer"])
169
+ if "data" in function_pointer._last_response:
170
+ print("[Data]")
171
+ print(function_pointer._last_response["data"])
172
+ else:
173
+ print("[UNFINISHED]")
174
+ print("answer processing was interupted")
175
+ else:
176
+ print("No prompt found for this function.")
@@ -2,10 +2,7 @@ from __future__ import annotations
2
2
 
3
3
  from typing import Any, Optional
4
4
 
5
- import asyncio
6
-
7
5
  from ..core.config import Model, DefaultModelPolicy
8
- from ..core.hosta_inspector import FunctionMetadata
9
6
  from ..utils.errors import RequestError
10
7
 
11
8
  def ask(
@@ -15,24 +12,37 @@ def ask(
15
12
  json_output=False,
16
13
  **api_args
17
14
  ) -> Any:
18
- return asyncio.run(ask_async(user, system, model, json_output, **api_args))
15
+ model, system = DefaultModelPolicy.apply_policy(model, system)
16
+
17
+ response_dict = model.api_call([
18
+ {"role": "system", "content": system.render()},
19
+ {"role": "user", "content": user}
20
+ ],
21
+ json_output,
22
+ **api_args
23
+ )
24
+
25
+ try:
26
+ response = response_dict["choices"][0]["message"]["content"]
27
+ rational, answer = model.split_cot_answer(response)
28
+ except Exception as e:
29
+ raise RequestError(f"[ask] Request failed:\n{e}")
30
+
31
+ return answer
32
+
19
33
 
20
34
  async def ask_async(
21
35
  user: str,
22
- system: Optional[str] = None,
36
+ system: Optional[str] = "You are an helpful assistant.",
23
37
  model: Optional[Model] = None,
24
38
  json_output=False,
25
39
  **api_args
26
40
  ) -> Any:
27
41
 
28
- if model is None:
29
- model = DefaultModelPolicy.get_model()
30
-
31
- if system is None:
32
- system = "You are an helpful assistant."
42
+ model, system = DefaultModelPolicy.apply_policy(model, system)
33
43
 
34
44
  response_dict = await model.api_call_async([
35
- {"role": "system", "content": system},
45
+ {"role": "system", "content": system.render()},
36
46
  {"role": "user", "content": user}
37
47
  ],
38
48
  json_output,
@@ -2,9 +2,8 @@ from __future__ import annotations
2
2
 
3
3
  from typing import Any, Optional, Callable
4
4
 
5
- import asyncio
6
-
7
5
  from ..core.config import Model, DefaultModelPolicy
6
+ from ..core.logger import dialog_logger
8
7
  from ..core.hosta_inspector import HostaInspector
9
8
  from ..utils.meta_prompt import Prompt
10
9
 
@@ -34,15 +33,35 @@ def emulate(
34
33
  - Any: The emulated function's return value, processed by the model and optionally modified by post_callback.
35
34
  """
36
35
  inspection = HostaInspector()
37
- return asyncio.run(_emulate(
38
- inspection=inspection,
39
- model=model,
40
- prompt=prompt,
41
- use_locals_as_ctx=use_locals_as_ctx,
42
- use_self_as_ctx=use_self_as_ctx,
43
- post_callback=post_callback,
44
- llm_args=llm_args,
45
- ))
36
+
37
+ prompt_data = gather_data_for_prompt_template(inspection, use_locals_as_ctx, use_self_as_ctx)
38
+
39
+ model, prompt = DefaultModelPolicy.apply_policy(model, prompt, prompt_data)
40
+
41
+ prompt_rendered = prompt.render(prompt_data)
42
+
43
+ logger = dialog_logger(inspection)
44
+ logger.set_sys_prompt(prompt_rendered)
45
+ logger.set_user_prompt(prompt_data["PRE_FUNCTION_CALL"])
46
+
47
+ response_dict = model.api_call([
48
+ {"role": "system", "content": prompt_rendered},
49
+ {"role": "user", "content": prompt_data["PRE_FUNCTION_CALL"]}
50
+ ],
51
+ llm_args=llm_args
52
+ )
53
+
54
+ logger.set_response_dict(response_dict)
55
+
56
+ untyped_response = model.response_parser(response_dict, inspection._infos)
57
+ response_data = model.type_returned_data(untyped_response, inspection._infos)
58
+
59
+ logger.set_response_data(response_data)
60
+
61
+ if post_callback is not None:
62
+ response_data = post_callback(response_data)
63
+
64
+ return response_data
46
65
 
47
66
 
48
67
  async def emulate_async(
@@ -71,70 +90,35 @@ async def emulate_async(
71
90
  - Any: The emulated function's return value, processed by the model and optionally modified by post_callback.
72
91
  """
73
92
  inspection = HostaInspector()
74
- return await _emulate(
75
- inspection=inspection,
76
- model=model,
77
- prompt=prompt,
78
- use_locals_as_ctx=use_locals_as_ctx,
79
- use_self_as_ctx=use_self_as_ctx,
80
- post_callback=post_callback,
81
- llm_args=llm_args,
82
- )
83
-
84
- async def _emulate(
85
- *,
86
- inspection:HostaInspector,
87
- model: Optional[Model] = None,
88
- prompt: Optional[Prompt] = None,
89
- use_locals_as_ctx: bool = False,
90
- use_self_as_ctx: bool = False,
91
- post_callback: Optional[Callable] = None,
92
- llm_args = {}
93
- ) -> Any:
94
-
93
+
95
94
  prompt_data = gather_data_for_prompt_template(inspection, use_locals_as_ctx, use_self_as_ctx)
96
95
 
97
- if model is None:
98
- model = DefaultModelPolicy.get_model()
99
-
100
- if prompt is None:
101
- prompt = DefaultModelPolicy.get_prompt()
96
+ model, prompt = DefaultModelPolicy.apply_policy(model, prompt, prompt_data)
102
97
 
103
98
  prompt_rendered = prompt.render(prompt_data)
104
99
 
105
- logging_object = {
106
- "_last_request": {},
107
- "_last_response": {}
108
- }
109
-
110
- inspection.set_logging_object(logging_object)
100
+ logger = dialog_logger(inspection)
101
+ logger.set_sys_prompt(prompt_rendered)
102
+ logger.set_user_prompt(prompt_data["PRE_FUNCTION_CALL"])
111
103
 
112
- logging_object["_last_request"]['sys_prompt']=prompt_rendered
113
- logging_object["_last_request"]['user_prompt']=prompt_data["PRE_FUNCTION_CALL"]
104
+ response_dict = await model.api_call_async([
105
+ {"role": "system", "content": prompt_rendered},
106
+ {"role": "user", "content": prompt_data["PRE_FUNCTION_CALL"]}
107
+ ],
108
+ llm_args=llm_args
109
+ )
114
110
 
115
- try:
116
- response_dict = await model.api_call_async([
117
- {"role": "system", "content": prompt_rendered},
118
- {"role": "user", "content": prompt_data["PRE_FUNCTION_CALL"]}
119
- ],
120
- llm_args=llm_args
121
- )
122
-
123
- logging_object["_last_response"]["response_dict"] = response_dict
124
-
125
- l_data = model.response_parser(response_dict, inspection._infos)
126
- l_data = model.type_returned_data(l_data, inspection._infos)
127
-
128
- logging_object["_last_response"]["data"] = l_data
129
-
130
- if post_callback is not None:
131
- l_data = post_callback(l_data)
132
-
133
- except NameError as e:
134
- raise NotImplementedError(
135
- f"[emulate]: {e}\nModel object does not have the required methods.")
111
+ logger.set_response_dict(response_dict)
112
+
113
+ untyped_response = model.response_parser(response_dict, inspection._infos)
114
+ response_data = model.type_returned_data(untyped_response, inspection._infos)
115
+
116
+ logger.set_response_data(response_data)
117
+
118
+ if post_callback is not None:
119
+ response_data = post_callback(response_data)
136
120
 
137
- return l_data
121
+ return response_data
138
122
 
139
123
 
140
124
 
@@ -1,5 +1,4 @@
1
1
  import inspect
2
- import asyncio
3
2
 
4
3
  from typing import Callable, Optional, Union, Literal
5
4
 
@@ -43,11 +42,18 @@ def generate_data(
43
42
  oracle: Optional[Model] = None,
44
43
  verbose: Union[Literal[0, 1, 2], bool] = 2
45
44
  ):
46
- return asyncio.run(generate_data_async(
47
- function_pointer,
48
- ammount,
49
- oracle,
50
- verbose))
45
+ logger: Logger = Logger(verbose=verbose)
46
+ request_amounts = int(ammount / 100) if ammount > 100 else 1
47
+
48
+ logger.log_custom("Data Generation", f"Generating {ammount} examples for function {function_pointer.__name__}")
49
+ data = LLMSyntheticDataGenerator.generate_synthetic_data(
50
+ function_metadata=_analyze_function(function_pointer),
51
+ logger=logger,
52
+ request_amounts=request_amounts,
53
+ examples_in_req=int(ammount / request_amounts),
54
+ model=oracle if oracle is not None else DefaultModelPolicy.get_model()
55
+ )
56
+ return HostaDataset.from_list(data, logger)
51
57
 
52
58
  async def generate_data_async(
53
59
  function_pointer: Callable,
@@ -122,7 +122,7 @@ class LLMSyntheticDataGenerator:
122
122
 
123
123
 
124
124
  @staticmethod
125
- async def generate_synthetic_data(
125
+ def generate_synthetic_data(
126
126
  function_metadata: FunctionMetadata, # The function to generate data for
127
127
  logger: Logger, # Logger to use for logging
128
128
  request_amounts: int = 3, # Amount of requests to the model
@@ -188,7 +188,7 @@ class LLMSyntheticDataGenerator:
188
188
  "content": content
189
189
  })
190
190
 
191
- response = await model.api_call_async(
191
+ response = model.api_call(
192
192
  messages=conversation_history,
193
193
  llm_args={"temperature":1.0},
194
194
  json_output=False,
@@ -1,6 +1,5 @@
1
1
  import os
2
2
  import shutil
3
- import asyncio
4
3
 
5
4
  from pathlib import Path
6
5
  from typing import Union, Optional, Literal, Callable
@@ -17,8 +16,6 @@ from ...core.config import Model, DefaultModelPolicy
17
16
  from ...core.hosta_inspector import HostaInspector, FunctionMetadata
18
17
  from ...core.logger import Logger, ANSIColor
19
18
 
20
-
21
-
22
19
  def predict(
23
20
  config: PredictConfig = PredictConfig(),
24
21
  oracle: Optional[Union[Model, HostaDataset]] = None,
@@ -39,12 +36,12 @@ def predict(
39
36
  assert config is not None, "Please provide a valid configuration not None"
40
37
 
41
38
  inspection = HostaInspector()
42
- return asyncio.run(_predict(
39
+ return _predict(
43
40
  inspection,
44
41
  config,
45
42
  oracle,
46
43
  verbose
47
- ))
44
+ )
48
45
 
49
46
  async def predict_async(
50
47
  config: PredictConfig = PredictConfig(),
@@ -66,7 +63,7 @@ async def predict_async(
66
63
  assert config is not None, "Please provide a valid configuration not None"
67
64
 
68
65
  inspection = HostaInspector()
69
- return await _predict(
66
+ return _predict(
70
67
  inspection,
71
68
  config,
72
69
  oracle,
@@ -74,7 +71,7 @@ async def predict_async(
74
71
  )
75
72
 
76
73
 
77
- async def _predict(
74
+ def _predict(
78
75
  inspection: HostaInspector,
79
76
  config: PredictConfig = PredictConfig(),
80
77
  oracle: Optional[Union[Model, HostaDataset]] = None,
@@ -95,7 +92,7 @@ async def _predict(
95
92
 
96
93
  #TODO: is this thread safe?
97
94
  if not load_weights(inspection, memory, hosta_model, logger):
98
- await train_model(config, memory, hosta_model, oracle, function_metadata, logger)
95
+ train_model(config, memory, hosta_model, oracle, function_metadata, logger)
99
96
 
100
97
  if dataset is None:
101
98
  dataset = HostaDataset.from_input(function_metadata.f_args, logger, config.max_tokens, function_metadata, memory.dictionary.path)
@@ -215,7 +212,7 @@ def load_weights(inspection: HostaInspector, memory: PredictMemory, hosta_model:
215
212
  return False
216
213
 
217
214
 
218
- async def train_model(config: PredictConfig,
215
+ def train_model(config: PredictConfig,
219
216
  memory: PredictMemory,
220
217
  model: HostaModel,
221
218
  oracle: Optional[Union[Model, HostaDataset]],
@@ -234,7 +231,7 @@ async def train_model(config: PredictConfig,
234
231
 
235
232
  else:
236
233
  logger.log_custom("Data", "not found", color=ANSIColor.BRIGHT_YELLOW, level=2)
237
- train_set, val_set = await prepare_dataset(config, memory, function_metadata, oracle, model, logger)
234
+ train_set, val_set = prepare_dataset(config, memory, function_metadata, oracle, model, logger)
238
235
 
239
236
  logger.log_custom("Training", f"epochs: {config.epochs}, batch_size: {config.batch_size}, train_set size: {len(train_set)}, val_set size: {len(val_set)}", color=ANSIColor.BRIGHT_YELLOW, level=2)
240
237
 
@@ -252,7 +249,7 @@ async def train_model(config: PredictConfig,
252
249
  model.save_weights(memory.weights.path)
253
250
 
254
251
 
255
- async def prepare_dataset(config: PredictConfig,
252
+ def prepare_dataset(config: PredictConfig,
256
253
  memory: PredictMemory,
257
254
  function_metadata: FunctionMetadata,
258
255
  oracle: Optional[Union[Model, HostaDataset]],
@@ -273,7 +270,7 @@ async def prepare_dataset(config: PredictConfig,
273
270
  dataset = HostaDataset.from_files(path=config.dataset_path, source_type=None, log=logger)
274
271
  else :
275
272
  logger.log_custom("Dataset", "not found, generate data", color=ANSIColor.BRIGHT_YELLOW, level=2)
276
- dataset = await _generate_data(function_metadata, oracle, config, logger)
273
+ dataset = _generate_data(function_metadata, oracle, config, logger)
277
274
  save_path = os.path.join(memory.predict_dir, "generated_data.csv")
278
275
  dataset.save(save_path, SourceType.CSV)
279
276
  logger.log_custom("Dataset", f"generated and saved at {save_path}", color=ANSIColor.BRIGHT_GREEN, level=2)
@@ -294,7 +291,7 @@ async def prepare_dataset(config: PredictConfig,
294
291
  return train_set, val_set
295
292
 
296
293
 
297
- async def _generate_data(function_metadata: FunctionMetadata,
294
+ def _generate_data(function_metadata: FunctionMetadata,
298
295
  oracle: Optional[Union[Model, HostaDataset]],
299
296
  config: PredictConfig,
300
297
  logger: Logger) -> HostaDataset:
@@ -303,7 +300,7 @@ async def _generate_data(function_metadata: FunctionMetadata,
303
300
  """
304
301
  request_amounts = int(config.generated_data / 100) if config.generated_data > 100 else 1
305
302
 
306
- data = await LLMSyntheticDataGenerator.generate_synthetic_data(
303
+ data = LLMSyntheticDataGenerator.generate_synthetic_data(
307
304
  function_metadata=function_metadata,
308
305
  logger=logger,
309
306
  request_amounts=request_amounts,
@@ -0,0 +1,165 @@
1
+ from __future__ import annotations
2
+
3
+ import json
4
+
5
+ from pydoc import locate
6
+
7
+ from ..core.config import DefaultModelPolicy
8
+ from ..core.hosta_inspector import FunctionMetadata
9
+ from ..core.logger import dialog_logger
10
+ from ..utils.errors import RequestError
11
+ from ..utils.meta_prompt import THOUGHT_PROMPT
12
+
13
+ def return_type(func):
14
+
15
+ if not hasattr(func, "_infos"):
16
+ raise AttributeError("Return type does not exist yet. This is likely because the function has never been called before.")
17
+
18
+ return func._infos.f_type[1]
19
+
20
+ def gather_data_for_prompt_template(
21
+ _infos: FunctionMetadata
22
+ ):
23
+ user_prompt_data = {
24
+ "PRE_DEF":_infos.f_def,
25
+ "PRE_TYPE": _infos.f_type[1],
26
+ "PRE_SCHEMA": _infos.f_schema,
27
+ "PRE_FUNCTION_CALL": _infos.f_call
28
+ }
29
+
30
+ return user_prompt_data
31
+
32
+ def guess_type(key: str, *args) -> object:
33
+ l_default = DefaultModelPolicy.get_model()
34
+
35
+ l_user_prompt = (
36
+ "Function behavior: "
37
+ + f"\"{key}\" applyed on "
38
+ + f"{', '.join([str(arg) for arg in args])}\n"
39
+ )
40
+
41
+ response = l_default.api_call([
42
+ {"role": "system", "content": THOUGHT_PROMPT.render()},
43
+ {"role": "user", "content": l_user_prompt}
44
+ ],
45
+ llm_args={"temperature":0.2},
46
+ )
47
+
48
+ type_json = response["choices"][0]["message"]["content"]
49
+ type_dict = json.loads(type_json)
50
+ type_str = str(type_dict["type"])
51
+ type_object = locate(type_str)
52
+ return type_object
53
+
54
+
55
+ def thinkof_async(key, model=None, prompt=None, llm_args={}):
56
+
57
+ async def inner_func(*args, **kwargs):
58
+ _model = model
59
+ _prompt = prompt
60
+ l_ret = await build_function_async(_model, _prompt, inner_func, key, args, kwargs, llm_args)
61
+ return l_ret
62
+
63
+ return inner_func
64
+
65
+ def thinkof(query_string, model=None, prompt=None, llm_args={}):
66
+
67
+ def inner_func(*args, **kwargs):
68
+ _model = model
69
+ _prompt = prompt
70
+ l_ret = build_function(_model, _prompt, inner_func, query_string, args, kwargs, llm_args)
71
+ return l_ret
72
+
73
+ return inner_func
74
+
75
+
76
+ def build_info(inner_func, query_string, *args, **kwargs):
77
+
78
+ if not hasattr(inner_func, "_infos"):
79
+ return_type = guess_type(query_string, *args, **kwargs)
80
+
81
+ _infos = FunctionMetadata()
82
+ _infos.f_schema = {"type": f"{return_type.__name__}"}
83
+ _infos.f_def = f'''
84
+ ```python
85
+ def lambda_function(*argument)->{return_type.__name__}:
86
+ """
87
+ {query_string}
88
+ """
89
+ ...
90
+ ```
91
+ '''
92
+ _infos.f_call = [str(arg) for arg in args]
93
+ _infos.f_type = ([], return_type)
94
+ setattr(inner_func, "_infos", _infos)
95
+ else:
96
+ _infos = getattr(inner_func, "_infos")
97
+
98
+
99
+ return _infos
100
+
101
+ async def build_function_async(model, prompt, inner_func, query_string, args, kwargs, llm_args):
102
+ _model = model
103
+ _prompt = prompt
104
+
105
+ _infos = build_info(inner_func, query_string, *args, **kwargs)
106
+
107
+ prompt_data = gather_data_for_prompt_template(_infos)
108
+ prompt_data["PRE_FUNCTION_CALL"] = f"lambda_function('" + "', '".join(_infos.f_call) + "')"
109
+
110
+ _model, _prompt = DefaultModelPolicy.apply_policy(_model, _prompt, prompt_data)
111
+
112
+ prompt_rendered = _prompt.render(prompt_data)
113
+
114
+ logger = dialog_logger()
115
+ logger.set_sys_prompt(prompt_rendered)
116
+ logger.set_user_prompt(prompt_data["PRE_FUNCTION_CALL"])
117
+
118
+ response_dict = await _model.api_call_async([
119
+ {"role": "system", "content": prompt_rendered},
120
+ {"role": "user", "content": prompt_data["PRE_FUNCTION_CALL"]}
121
+ ],
122
+ llm_args
123
+ )
124
+
125
+ logger.set_response_dict(response_dict)
126
+
127
+ l_ret = _model.response_parser(response_dict, _infos)
128
+ l_data = _model.type_returned_data(l_ret, _infos)
129
+
130
+ logger.set_response_data(l_data)
131
+
132
+ return l_data
133
+
134
+ def build_function(model, prompt, inner_func, query_string, args, kwargs, llm_args):
135
+ _model = model
136
+ _prompt = prompt
137
+
138
+ _infos = build_info(inner_func, query_string, *args, **kwargs)
139
+
140
+ prompt_data = gather_data_for_prompt_template(_infos)
141
+ prompt_data["PRE_FUNCTION_CALL"] = f"lambda_function('" + "', '".join(_infos.f_call) + "')"
142
+
143
+ _model, _prompt = DefaultModelPolicy.apply_policy(_model, _prompt, prompt_data)
144
+
145
+ prompt_rendered = _prompt.render(prompt_data)
146
+
147
+ logger = dialog_logger()
148
+ logger.set_sys_prompt(prompt_rendered)
149
+ logger.set_user_prompt(prompt_data["PRE_FUNCTION_CALL"])
150
+
151
+ response_dict = _model.api_call([
152
+ {"role": "system", "content": prompt_rendered},
153
+ {"role": "user", "content": prompt_data["PRE_FUNCTION_CALL"]}
154
+ ],
155
+ llm_args
156
+ )
157
+
158
+ logger.set_response_dict(response_dict)
159
+
160
+ l_ret = _model.response_parser(response_dict, _infos)
161
+ l_data = _model.type_returned_data(l_ret, _infos)
162
+
163
+ logger.set_response_data(l_data)
164
+
165
+ return l_data
@@ -3,6 +3,7 @@ from typing import Any, Dict
3
3
 
4
4
  import json
5
5
  import requests
6
+ import os
6
7
  import re
7
8
  import sys
8
9
  import asyncio
@@ -71,9 +72,13 @@ class Model:
71
72
  if json_output is None:
72
73
  json_output = self.json_output
73
74
 
74
- if self.api_key is None or not self.api_key:
75
- raise ApiKeyError("[model.api_call] Empty API key.")
75
+ api_key = self.api_key
76
+ if api_key is None:
77
+ api_key = os.environ.get("OPENAI_API_KEY")
76
78
 
79
+ if api_key is None and "api.openai.com/v1" in self.base_url:
80
+ raise ApiKeyError("[model.api_call] Empty API key.")
81
+
77
82
  l_body = {
78
83
  "model": self.model,
79
84
  "messages": messages,
@@ -83,9 +88,9 @@ class Model:
83
88
  }
84
89
 
85
90
  if "azure.com" in self.base_url:
86
- headers["api-key"] = f"{self.api_key}"
91
+ headers["api-key"] = f"{api_key}"
87
92
  else:
88
- headers["Authorization"] = f"Bearer {self.api_key}"
93
+ headers["Authorization"] = f"Bearer {api_key}"
89
94
 
90
95
  for key, value in self.user_headers.items():
91
96
  headers[key] = value
@@ -1,4 +1,3 @@
1
- from typing import Callable
2
1
  from jinja2 import Template
3
2
 
4
3
  class Prompt(Template):
@@ -129,38 +128,3 @@ Expected response: {"type": "list"}
129
128
  {{ CTX_EXAMPLE }}
130
129
  {% endif %}
131
130
  """)
132
-
133
- def print_last_prompt(function_pointer:Callable):
134
- """
135
- Print the last prompt sent to the LLM when using function `function_pointer`.
136
- """
137
- if hasattr(function_pointer, "_last_request"):
138
- if "sys_prompt" in function_pointer._last_request:
139
- print("[SYSTEM PROMPT]")
140
- print(function_pointer._last_request["sys_prompt"])
141
- if "user_prompt" in function_pointer._last_request:
142
- print("[USER PROMPT]")
143
- print(function_pointer._last_request["user_prompt"])
144
- else:
145
- print("No prompt found for this function.")
146
-
147
-
148
- def print_last_response(function_pointer:Callable):
149
- """
150
- Print the last answer recived from the LLM when using function `function_pointer`.
151
- """
152
- if hasattr(function_pointer, "_last_response"):
153
- if "rational" in function_pointer._last_response:
154
- print("[THINKING]")
155
- print(function_pointer._last_response["rational"])
156
- if "answer" in function_pointer._last_response:
157
- print("[ANSWER]")
158
- print(function_pointer._last_response["answer"])
159
- if "data" in function_pointer._last_response:
160
- print("[Data]")
161
- print(function_pointer._last_response["data"])
162
- else:
163
- print("[UNFINISHED]")
164
- print("answer processing was interupted")
165
- else:
166
- print("No prompt found for this function.")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: OpenHosta
3
- Version: 2.2.0
3
+ Version: 2.2.2
4
4
  Summary: A lightweight library integrating LLM natively into Python
5
5
  Author: Léandre Ramos, Merlin Devillard, William Jolivet, Emmanuel Batt
6
6
  License: MIT License
@@ -80,7 +80,7 @@ Requires-Dist: torch>=2.5.1; extra == "predict"
80
80
  Requires-Dist: numpy>=2.1.3; extra == "predict"
81
81
 
82
82
  # OpenHosta
83
- v2.2.0 - Open-Source Project
83
+ v2.2.2 - Open-Source Project
84
84
 
85
85
  <a href="https://colab.research.google.com/github/hand-e-fr/OpenHosta/blob/main/docs/openhosta_mistral_small.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/> Basic Usage - local LLM (Mistral-Small-2501)</a>
86
86
  <br/>
@@ -1,43 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import sys
4
-
5
- from ..models.OpenAICompatible import Model
6
- from ..utils.meta_prompt import EMULATE_PROMPT, Prompt
7
-
8
- class AlwaysDefaultPolicy:
9
- """
10
- This policy always use the default prompt with the default model
11
- """
12
-
13
- def __init__(self, default_model:Model=None, default_prompt:Prompt=EMULATE_PROMPT):
14
- self.model = default_model
15
- self.prompt = default_prompt
16
-
17
- def set_default_model(self, new):
18
- if isinstance(new, Model):
19
- self.model = new
20
- else:
21
- sys.stderr.write("[CONFIG_ERROR] Invalid model instance.\n")
22
-
23
- def set_default_apiKey(self, api_key=None):
24
- if api_key is not None or isinstance(api_key, str):
25
- self.model.api_key = api_key
26
- else:
27
- sys.stderr.write("[CONFIG_ERROR] Invalid API key.")
28
-
29
- def get_model(self):
30
- return self.model
31
-
32
- def get_prompt(self):
33
- return self.prompt
34
-
35
- DefaultModelPolicy = AlwaysDefaultPolicy()
36
-
37
- def set_default_model(new):
38
- DefaultModelPolicy.set_default_model(new)
39
-
40
-
41
- def set_default_apiKey(api_key:str=None):
42
- DefaultModelPolicy.set_default_apiKey(api_key)
43
-
@@ -1,133 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import json
4
- import asyncio
5
-
6
- from pydoc import locate
7
-
8
- from ..core.config import DefaultModelPolicy
9
- from ..core.hosta_inspector import FunctionMetadata
10
- from ..utils.errors import RequestError
11
- from ..utils.meta_prompt import THOUGHT_PROMPT
12
-
13
- def gather_data_for_prompt_template(
14
- _infos: FunctionMetadata
15
- ):
16
- user_prompt_data = {
17
- "PRE_DEF":_infos.f_def,
18
- "PRE_TYPE": _infos.f_type[1],
19
- "PRE_SCHEMA": _infos.f_schema,
20
- "PRE_FUNCTION_CALL": _infos.f_call
21
- }
22
-
23
- return user_prompt_data
24
-
25
- async def guess_type(key: str, *args) -> object:
26
- l_default = DefaultModelPolicy.get_model()
27
-
28
- l_user_prompt = (
29
- "Function behavior: "
30
- + f"\"{key}\" applyed on "
31
- + f"{', '.join([str(arg) for arg in args])}\n"
32
- )
33
-
34
- response = await l_default.api_call_async([
35
- {"role": "system", "content": THOUGHT_PROMPT.render()},
36
- {"role": "user", "content": l_user_prompt}
37
- ],
38
- llm_args={"temperature":0.2},
39
- )
40
-
41
- type_json = response["choices"][0]["message"]["content"]
42
- type_dict = json.loads(type_json)
43
- type_str = str(type_dict["type"])
44
- type_object = locate(type_str)
45
- return type_object
46
-
47
-
48
- def thinkof_async(key, model=None, prompt=None, llm_args={}):
49
-
50
- async def inner_func(*args, **kwargs):
51
- _model = model
52
- _prompt = prompt
53
- l_ret = await build_function(_model, _prompt, inner_func, key, args, kwargs, llm_args)
54
- return l_ret
55
-
56
- return inner_func
57
-
58
-
59
- def thinkof(query_string, model=None, prompt=None, llm_args={}):
60
-
61
- def inner_func(*args, **kwargs):
62
- _model = model
63
- _prompt = prompt
64
- l_ret = asyncio.run(build_function(_model, _prompt, inner_func, query_string, args, kwargs, llm_args))
65
- return l_ret
66
-
67
- return inner_func
68
-
69
-
70
- async def build_function(model, prompt, inner_func, query_string, args, kwargs, llm_args):
71
- _model = model
72
- _prompt = prompt
73
-
74
- if not hasattr(inner_func, "_infos"):
75
- return_type = await guess_type(query_string, *args, **kwargs)
76
-
77
- _infos = FunctionMetadata()
78
- _infos.f_schema = {"type": f"{return_type.__name__}"}
79
- _infos.f_def = f'''
80
- ```python
81
- def lambda_function(*argument)->{return_type.__name__}:
82
- """
83
- {query_string}
84
- """
85
- ...
86
- ```
87
- '''
88
- _infos.f_call = [str(arg) for arg in args]
89
- _infos.f_type = ([], return_type)
90
- setattr(inner_func, "_infos", _infos)
91
- else:
92
- _infos = getattr(inner_func, "_infos")
93
-
94
- prompt_data = gather_data_for_prompt_template(_infos)
95
- prompt_data["PRE_FUNCTION_CALL"] = f"lambda_function(\"{'", "'.join(_infos.f_call) }\")"
96
-
97
- if _model is None:
98
- _model = DefaultModelPolicy.get_model()
99
-
100
- if _prompt is None:
101
- _prompt = DefaultModelPolicy.get_prompt()
102
-
103
- prompt_rendered = _prompt.render(prompt_data)
104
-
105
- logging_object = {
106
- "_last_request": {},
107
- "_last_response": {}
108
- }
109
-
110
- setattr(inner_func, "_last_request", logging_object["_last_request"])
111
-
112
- logging_object["_last_request"]['sys_prompt']=prompt_rendered
113
- logging_object["_last_request"]['user_prompt']=prompt_data["PRE_FUNCTION_CALL"]
114
-
115
- try:
116
- response_dict = await _model.api_call_async([
117
- {"role": "system", "content": prompt_rendered},
118
- {"role": "user", "content": prompt_data["PRE_FUNCTION_CALL"]}
119
- ],
120
- llm_args
121
- )
122
-
123
- logging_object["_last_response"]["response_dict"] = response_dict
124
-
125
- l_ret = _model.response_parser(response_dict, _infos)
126
- l_data = _model.type_returned_data(l_ret, _infos)
127
-
128
- logging_object["_last_response"]["data"] = l_data
129
- setattr(inner_func, "_last_response", logging_object["_last_response"])
130
-
131
- except Exception as e:
132
- raise RequestError(f"[thinkof] Cannot emulate the function.\n{e}")
133
- return l_data
File without changes
File without changes