llumo 0.2.16b1__tar.gz → 0.2.16b2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {llumo-0.2.16b1/llumo.egg-info → llumo-0.2.16b2}/PKG-INFO +1 -1
- llumo-0.2.16b2/llumo/google.py +66 -0
- {llumo-0.2.16b1 → llumo-0.2.16b2}/llumo/openai.py +1 -2
- {llumo-0.2.16b1 → llumo-0.2.16b2/llumo.egg-info}/PKG-INFO +1 -1
- llumo-0.2.16b1/llumo/google.py +0 -34
- {llumo-0.2.16b1 → llumo-0.2.16b2}/LICENSE +0 -0
- {llumo-0.2.16b1 → llumo-0.2.16b2}/MANIFEST.in +0 -0
- {llumo-0.2.16b1 → llumo-0.2.16b2}/README.md +0 -0
- {llumo-0.2.16b1 → llumo-0.2.16b2}/llumo/__init__.py +0 -0
- {llumo-0.2.16b1 → llumo-0.2.16b2}/llumo/chains.py +0 -0
- {llumo-0.2.16b1 → llumo-0.2.16b2}/llumo/client.py +0 -0
- {llumo-0.2.16b1 → llumo-0.2.16b2}/llumo/exceptions.py +0 -0
- {llumo-0.2.16b1 → llumo-0.2.16b2}/llumo/execution.py +0 -0
- {llumo-0.2.16b1 → llumo-0.2.16b2}/llumo/functionCalling.py +0 -0
- {llumo-0.2.16b1 → llumo-0.2.16b2}/llumo/helpingFuntions.py +0 -0
- {llumo-0.2.16b1 → llumo-0.2.16b2}/llumo/models.py +0 -0
- {llumo-0.2.16b1 → llumo-0.2.16b2}/llumo/sockets.py +0 -0
- {llumo-0.2.16b1 → llumo-0.2.16b2}/llumo.egg-info/SOURCES.txt +0 -0
- {llumo-0.2.16b1 → llumo-0.2.16b2}/llumo.egg-info/dependency_links.txt +0 -0
- {llumo-0.2.16b1 → llumo-0.2.16b2}/llumo.egg-info/requires.txt +0 -0
- {llumo-0.2.16b1 → llumo-0.2.16b2}/llumo.egg-info/top_level.txt +0 -0
- {llumo-0.2.16b1 → llumo-0.2.16b2}/setup.cfg +0 -0
- {llumo-0.2.16b1 → llumo-0.2.16b2}/setup.py +0 -0
@@ -0,0 +1,66 @@
|
|
1
|
+
from google import generativeai as _genai
|
2
|
+
from .client import LlumoClient
|
3
|
+
|
4
|
+
|
5
|
+
def evaluate_multiple(data, api_key=None, evals=["Response Correctness"]):
|
6
|
+
client = LlumoClient(api_key=api_key)
|
7
|
+
results = client.evaluateMultiple(
|
8
|
+
data,
|
9
|
+
evals=evals,
|
10
|
+
createExperiment=False,
|
11
|
+
prompt_template="Give answer to the query: {{query}}, using context: {{context}}",
|
12
|
+
getDataFrame=False
|
13
|
+
)
|
14
|
+
return results
|
15
|
+
|
16
|
+
|
17
|
+
class ChatCompletionWithEval:
|
18
|
+
def __init__(self, response, evaluation):
|
19
|
+
self._response = response
|
20
|
+
self.evaluation = evaluation
|
21
|
+
|
22
|
+
def __getattr__(self, name):
|
23
|
+
return getattr(self._response, name)
|
24
|
+
|
25
|
+
def __getitem__(self, key):
|
26
|
+
return self._response[key]
|
27
|
+
|
28
|
+
def __repr__(self):
|
29
|
+
return repr(self._response)
|
30
|
+
|
31
|
+
|
32
|
+
class genai:
|
33
|
+
class GenerativeModel:
|
34
|
+
def __init__(self, api_key: str, model: str = "gemini-2.5-flash"):
|
35
|
+
_genai.configure(api_key=api_key)
|
36
|
+
self._api_key = api_key
|
37
|
+
self._model_name = model
|
38
|
+
self._model_instance = _genai.GenerativeModel(model_name=model)
|
39
|
+
|
40
|
+
def generate_content(self, contents: str | list[str], **kwargs):
|
41
|
+
context = kwargs.pop("context", None)
|
42
|
+
evals = kwargs.pop("evals", [])
|
43
|
+
llumo_key = kwargs.pop("llumo_key", None)
|
44
|
+
|
45
|
+
# Run Gemini generation
|
46
|
+
response = self._model_instance.generate_content(contents=contents, **kwargs)
|
47
|
+
output = response.text
|
48
|
+
|
49
|
+
eval_input = [{
|
50
|
+
"query": contents,
|
51
|
+
"context": context or contents,
|
52
|
+
"output": output,
|
53
|
+
}]
|
54
|
+
|
55
|
+
evaluation = None
|
56
|
+
try:
|
57
|
+
evaluation = evaluate_multiple(data=eval_input, evals=evals, api_key=llumo_key)
|
58
|
+
except Exception as e:
|
59
|
+
evaluation = None
|
60
|
+
|
61
|
+
if evaluation is None:
|
62
|
+
print("Cannot process your request, please check your api and try again later.")
|
63
|
+
return response
|
64
|
+
|
65
|
+
|
66
|
+
return ChatCompletionWithEval(response, evaluation)
|
@@ -5,7 +5,6 @@ from .client import LlumoClient
|
|
5
5
|
def evaluate_multiple(data, api_key=None,evals=["Response Correctness"]):
|
6
6
|
client = LlumoClient(api_key=api_key)
|
7
7
|
results= client.evaluateMultiple(data, evals=evals,createExperiment=False,prompt_template="Give answer to the query: {{query}}, using context: {{context}}",getDataFrame=False)
|
8
|
-
print(results)
|
9
8
|
return results
|
10
9
|
|
11
10
|
# Wrapper around ChatCompletion to allow custom fields like `.evaluation`
|
@@ -70,7 +69,7 @@ class openai(OpenAIClient):
|
|
70
69
|
|
71
70
|
# If evaluation is None, just return normal response
|
72
71
|
if evaluation is None:
|
73
|
-
print("
|
72
|
+
print("Cannot process your request for evaluation, please check your api and try again later.")
|
74
73
|
return response
|
75
74
|
|
76
75
|
# Otherwise wrap with evaluation attached
|
llumo-0.2.16b1/llumo/google.py
DELETED
@@ -1,34 +0,0 @@
|
|
1
|
-
from google import generativeai as _genai
|
2
|
-
|
3
|
-
class genai:
|
4
|
-
"""
|
5
|
-
Top-level wrapper module to mimic:
|
6
|
-
>>> from google import genai
|
7
|
-
>>> client = genai.Client(api_key=...)
|
8
|
-
"""
|
9
|
-
|
10
|
-
class Client:
|
11
|
-
def __init__(self, api_key: str, default_model: str = "gemini-2.5-flash"):
|
12
|
-
_genai.configure(api_key=api_key)
|
13
|
-
self._defaultModel = default_model
|
14
|
-
self._defaultModelInstance = _genai.GenerativeModel(model_name=default_model)
|
15
|
-
|
16
|
-
class Models:
|
17
|
-
def __init__(self, outer):
|
18
|
-
self._outer = outer
|
19
|
-
|
20
|
-
def generate_content(self, contents: str | list[str], model: str = None, **kwargs):
|
21
|
-
model_name = model or self._outer._defaultModel
|
22
|
-
model_instance = _genai.GenerativeModel(model_name=model_name)
|
23
|
-
return model_instance.generate_content(contents=contents, **kwargs)
|
24
|
-
|
25
|
-
self.models = Models(self)
|
26
|
-
|
27
|
-
def generate(self, prompt: str | list[str], **kwargs):
|
28
|
-
"""Convenience shortcut for single-line generation."""
|
29
|
-
return self._defaultModelInstance.generate_content(prompt, **kwargs)
|
30
|
-
|
31
|
-
def setDefaultModel(self, model_name: str):
|
32
|
-
"""Change the default model at runtime."""
|
33
|
-
self._defaultModel = model_name
|
34
|
-
self._defaultModelInstance = _genai.GenerativeModel(model_name=model_name)
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|