llumo 0.2.16b1__py3-none-any.whl → 0.2.16b2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llumo/google.py +57 -25
- llumo/openai.py +1 -2
- {llumo-0.2.16b1.dist-info → llumo-0.2.16b2.dist-info}/METADATA +1 -1
- {llumo-0.2.16b1.dist-info → llumo-0.2.16b2.dist-info}/RECORD +7 -7
- {llumo-0.2.16b1.dist-info → llumo-0.2.16b2.dist-info}/WHEEL +0 -0
- {llumo-0.2.16b1.dist-info → llumo-0.2.16b2.dist-info}/licenses/LICENSE +0 -0
- {llumo-0.2.16b1.dist-info → llumo-0.2.16b2.dist-info}/top_level.txt +0 -0
llumo/google.py
CHANGED
@@ -1,34 +1,66 @@
|
|
1
1
|
from google import generativeai as _genai
|
2
|
+
from .client import LlumoClient
|
3
|
+
|
4
|
+
|
5
|
+
def evaluate_multiple(data, api_key=None, evals=["Response Correctness"]):
|
6
|
+
client = LlumoClient(api_key=api_key)
|
7
|
+
results = client.evaluateMultiple(
|
8
|
+
data,
|
9
|
+
evals=evals,
|
10
|
+
createExperiment=False,
|
11
|
+
prompt_template="Give answer to the query: {{query}}, using context: {{context}}",
|
12
|
+
getDataFrame=False
|
13
|
+
)
|
14
|
+
return results
|
15
|
+
|
16
|
+
|
17
|
+
class ChatCompletionWithEval:
|
18
|
+
def __init__(self, response, evaluation):
|
19
|
+
self._response = response
|
20
|
+
self.evaluation = evaluation
|
21
|
+
|
22
|
+
def __getattr__(self, name):
|
23
|
+
return getattr(self._response, name)
|
24
|
+
|
25
|
+
def __getitem__(self, key):
|
26
|
+
return self._response[key]
|
27
|
+
|
28
|
+
def __repr__(self):
|
29
|
+
return repr(self._response)
|
30
|
+
|
2
31
|
|
3
32
|
class genai:
|
4
|
-
|
5
|
-
|
6
|
-
>>> from google import genai
|
7
|
-
>>> client = genai.Client(api_key=...)
|
8
|
-
"""
|
9
|
-
|
10
|
-
class Client:
|
11
|
-
def __init__(self, api_key: str, default_model: str = "gemini-2.5-flash"):
|
33
|
+
class GenerativeModel:
|
34
|
+
def __init__(self, api_key: str, model: str = "gemini-2.5-flash"):
|
12
35
|
_genai.configure(api_key=api_key)
|
13
|
-
self.
|
14
|
-
self.
|
36
|
+
self._api_key = api_key
|
37
|
+
self._model_name = model
|
38
|
+
self._model_instance = _genai.GenerativeModel(model_name=model)
|
15
39
|
|
16
|
-
|
17
|
-
|
18
|
-
|
40
|
+
def generate_content(self, contents: str | list[str], **kwargs):
|
41
|
+
context = kwargs.pop("context", None)
|
42
|
+
evals = kwargs.pop("evals", [])
|
43
|
+
llumo_key = kwargs.pop("llumo_key", None)
|
19
44
|
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
return model_instance.generate_content(contents=contents, **kwargs)
|
45
|
+
# Run Gemini generation
|
46
|
+
response = self._model_instance.generate_content(contents=contents, **kwargs)
|
47
|
+
output = response.text
|
24
48
|
|
25
|
-
|
49
|
+
eval_input = [{
|
50
|
+
"query": contents,
|
51
|
+
"context": context or contents,
|
52
|
+
"output": output,
|
53
|
+
}]
|
26
54
|
|
27
|
-
|
28
|
-
|
29
|
-
|
55
|
+
evaluation = None
|
56
|
+
try:
|
57
|
+
evaluation = evaluate_multiple(data=eval_input, evals=evals, api_key=llumo_key)
|
58
|
+
except Exception as e:
|
59
|
+
evaluation = None
|
60
|
+
|
61
|
+
if evaluation is None:
|
62
|
+
print("Cannot process your request, please check your api and try again later.")
|
63
|
+
return response
|
64
|
+
|
30
65
|
|
31
|
-
|
32
|
-
"""Change the default model at runtime."""
|
33
|
-
self._defaultModel = model_name
|
34
|
-
self._defaultModelInstance = _genai.GenerativeModel(model_name=model_name)
|
66
|
+
return ChatCompletionWithEval(response, evaluation)
|
llumo/openai.py
CHANGED
@@ -5,7 +5,6 @@ from .client import LlumoClient
|
|
5
5
|
def evaluate_multiple(data, api_key=None,evals=["Response Correctness"]):
|
6
6
|
client = LlumoClient(api_key=api_key)
|
7
7
|
results= client.evaluateMultiple(data, evals=evals,createExperiment=False,prompt_template="Give answer to the query: {{query}}, using context: {{context}}",getDataFrame=False)
|
8
|
-
print(results)
|
9
8
|
return results
|
10
9
|
|
11
10
|
# Wrapper around ChatCompletion to allow custom fields like `.evaluation`
|
@@ -70,7 +69,7 @@ class openai(OpenAIClient):
|
|
70
69
|
|
71
70
|
# If evaluation is None, just return normal response
|
72
71
|
if evaluation is None:
|
73
|
-
print("
|
72
|
+
print("Cannot process your request for evaluation, please check your api and try again later.")
|
74
73
|
return response
|
75
74
|
|
76
75
|
# Otherwise wrap with evaluation attached
|
@@ -4,13 +4,13 @@ llumo/client.py,sha256=rOTbw8QGi5CnQ77QKS4rKh-dSBSVoyVAORrK1i_b5EQ,60339
|
|
4
4
|
llumo/exceptions.py,sha256=Vp_MnanHbnd1Yjuoi6WLrKiwwZbJL3znCox2URMmGU4,2032
|
5
5
|
llumo/execution.py,sha256=nWbJ7AvWuUPcOb6i-JzKRna_PvF-ewZTiK8skS-5n3w,1380
|
6
6
|
llumo/functionCalling.py,sha256=D5jYapu1rIvdIJNUYPYMTyhQ1H-6nkwoOLMi6eekfUE,7241
|
7
|
-
llumo/google.py,sha256=
|
7
|
+
llumo/google.py,sha256=F-n7DCQ7mvORGyK_yGliL14N0AGyVGmjKgO60FqOOKI,2132
|
8
8
|
llumo/helpingFuntions.py,sha256=0W2JNdLyOV92lgESgB_JyJmOUvW5ooRdZyjN5LKDSX0,25296
|
9
9
|
llumo/models.py,sha256=aVEZsOOoQx5LeNtwSyBxqvrINq0izH3QWu_YjsMPE6o,2910
|
10
|
-
llumo/openai.py,sha256=
|
10
|
+
llumo/openai.py,sha256=DGhEwQIJIIycGpw3hYQnyxdj6RFVpZ-gay-fZGqtkhU,3013
|
11
11
|
llumo/sockets.py,sha256=I2JO_eNEctRo_ikgvFVp5zDd-m0VDu04IEUhhsa1Tic,5950
|
12
|
-
llumo-0.2.
|
13
|
-
llumo-0.2.
|
14
|
-
llumo-0.2.
|
15
|
-
llumo-0.2.
|
16
|
-
llumo-0.2.
|
12
|
+
llumo-0.2.16b2.dist-info/licenses/LICENSE,sha256=tF9yAcfPV9xGT3ViWmC8hPvOo8BEk4ZICbUfcEo8Dlk,182
|
13
|
+
llumo-0.2.16b2.dist-info/METADATA,sha256=5Nys3GS9XTD0fE9K4_1IxjD2YG1JpC1Fl2M1q93orf4,1521
|
14
|
+
llumo-0.2.16b2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
15
|
+
llumo-0.2.16b2.dist-info/top_level.txt,sha256=d5zUTMI99llPtLRB8rtSrqELm_bOqX-bNC5IcwlDk88,6
|
16
|
+
llumo-0.2.16b2.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|