ws-bom-robot-app 0.0.85__py3-none-any.whl → 0.0.86__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ws_bom_robot_app/config.py +3 -1
- ws_bom_robot_app/llm/api.py +65 -3
- ws_bom_robot_app/llm/evaluator.py +319 -0
- ws_bom_robot_app/llm/models/api.py +1 -1
- ws_bom_robot_app/llm/providers/llm_manager.py +27 -9
- ws_bom_robot_app/llm/utils/download.py +22 -22
- ws_bom_robot_app/task_manager.py +14 -10
- {ws_bom_robot_app-0.0.85.dist-info → ws_bom_robot_app-0.0.86.dist-info}/METADATA +20 -20
- {ws_bom_robot_app-0.0.85.dist-info → ws_bom_robot_app-0.0.86.dist-info}/RECORD +11 -10
- {ws_bom_robot_app-0.0.85.dist-info → ws_bom_robot_app-0.0.86.dist-info}/WHEEL +0 -0
- {ws_bom_robot_app-0.0.85.dist-info → ws_bom_robot_app-0.0.86.dist-info}/top_level.txt +0 -0
ws_bom_robot_app/config.py
CHANGED
|
@@ -36,11 +36,12 @@ class Settings(BaseSettings):
|
|
|
36
36
|
OLLAMA_API_URL: str = 'http://localhost:11434'
|
|
37
37
|
GROQ_API_KEY: str = ''
|
|
38
38
|
GOOGLE_API_KEY: str = ''
|
|
39
|
+
GOOGLE_APPLICATION_CREDENTIALS: str = '' # path to google credentials iam file, e.d. ./.secrets/google-credentials.json
|
|
39
40
|
WATSONX_URL: str = ''
|
|
40
41
|
WATSONX_APIKEY: str = ''
|
|
41
42
|
WATSONX_PROJECTID: str = ''
|
|
42
43
|
NEBULY_API_URL: str ='https://backend.nebuly.com/'
|
|
43
|
-
|
|
44
|
+
LANGSMITH_API_KEY: str = '' # app-wide api key to run evaluation
|
|
44
45
|
model_config = ConfigDict(
|
|
45
46
|
env_file='./.env',
|
|
46
47
|
extra='ignore',
|
|
@@ -61,6 +62,7 @@ class Settings(BaseSettings):
|
|
|
61
62
|
os.environ["WATSONX_APIKEY"] = self.WATSONX_APIKEY
|
|
62
63
|
os.environ["WATSONX_PROJECTID"] = self.WATSONX_PROJECTID
|
|
63
64
|
os.environ["NEBULY_API_URL"] = self.NEBULY_API_URL
|
|
65
|
+
os.environ["LANGSMITH_API_KEY"] = self.LANGSMITH_API_KEY
|
|
64
66
|
# dir
|
|
65
67
|
os.makedirs(self.robot_data_folder, exist_ok=True)
|
|
66
68
|
for subfolder in [self.robot_data_db_folder, self.robot_data_attachment_folder, 'db']:
|
ws_bom_robot_app/llm/api.py
CHANGED
|
@@ -1,7 +1,8 @@
|
|
|
1
|
-
from typing import Annotated, Any, Mapping
|
|
1
|
+
from typing import Annotated, Any, Mapping, Union
|
|
2
2
|
from fastapi import APIRouter, HTTPException, Request, Header, Body
|
|
3
3
|
from fastapi.responses import StreamingResponse
|
|
4
4
|
from ws_bom_robot_app.llm.agent_description import AgentDescriptor
|
|
5
|
+
from ws_bom_robot_app.llm.evaluator import EvaluatorRunRequest
|
|
5
6
|
from ws_bom_robot_app.llm.models.api import InvokeRequest, StreamRequest, RulesRequest, KbRequest, VectorDbResponse
|
|
6
7
|
from ws_bom_robot_app.llm.main import invoke, stream
|
|
7
8
|
from ws_bom_robot_app.llm.models.base import IdentifiableEntity
|
|
@@ -52,7 +53,7 @@ async def _kb(rq: KbRequest) -> VectorDbResponse:
|
|
|
52
53
|
|
|
53
54
|
@router.post("/kb/task")
|
|
54
55
|
async def _kb_task(rq: KbRequest, headers: Annotated[TaskHeader, Header()]) -> IdentifiableEntity:
|
|
55
|
-
return task_manager.create_task(lambda: kb(rq),headers)
|
|
56
|
+
return task_manager.create_task(lambda: kb(rq),headers, queue="slow")
|
|
56
57
|
|
|
57
58
|
@router.post("/rules")
|
|
58
59
|
async def _rules(rq: RulesRequest) -> VectorDbResponse:
|
|
@@ -60,7 +61,7 @@ async def _rules(rq: RulesRequest) -> VectorDbResponse:
|
|
|
60
61
|
|
|
61
62
|
@router.post("/rules/task")
|
|
62
63
|
async def _rules_task(rq: RulesRequest, headers: Annotated[TaskHeader, Header()]) -> IdentifiableEntity:
|
|
63
|
-
return task_manager.create_task(lambda: rules(rq), headers)
|
|
64
|
+
return task_manager.create_task(lambda: rules(rq), headers, queue="fast")
|
|
64
65
|
|
|
65
66
|
@router.get("/kb/file/{filename}")
|
|
66
67
|
async def _kb_get_file(filename: str) -> StreamingResponse:
|
|
@@ -115,3 +116,64 @@ async def _send_feedback(feedback: FeedbackConfig):
|
|
|
115
116
|
strategy: FeedbackInterface = strategy_cls(feedback)
|
|
116
117
|
result = strategy.send_feedback()
|
|
117
118
|
return {"result": result}
|
|
119
|
+
|
|
120
|
+
#region evaluate
|
|
121
|
+
@router.get("/evaluation/datasets", tags=["evaluation"])
|
|
122
|
+
async def _evaluation_datasets():
|
|
123
|
+
from ws_bom_robot_app.llm.evaluator import EvaluatorDataSets
|
|
124
|
+
return [ds for ds in EvaluatorDataSets.all()]
|
|
125
|
+
|
|
126
|
+
@router.post("/evaluation/datasets/find", tags=["evaluation"])
|
|
127
|
+
async def _evaluation_find_datasets(project: str):
|
|
128
|
+
from ws_bom_robot_app.llm.evaluator import EvaluatorDataSets
|
|
129
|
+
return [ds for ds in EvaluatorDataSets.find(project)]
|
|
130
|
+
|
|
131
|
+
@router.get("/evaluation/datasets/{id}", tags=["evaluation"])
|
|
132
|
+
async def _evaluation_datasets_by_id(id: str):
|
|
133
|
+
from ws_bom_robot_app.llm.evaluator import EvaluatorDataSets
|
|
134
|
+
return EvaluatorDataSets.example(id)
|
|
135
|
+
|
|
136
|
+
@router.get("/evaluation/evaluators", tags=["evaluation"])
|
|
137
|
+
async def _evaluation_evaluators() -> list:
|
|
138
|
+
from ws_bom_robot_app.llm.evaluator import EvaluatorType
|
|
139
|
+
return EvaluatorType.all()
|
|
140
|
+
|
|
141
|
+
@router.post("/evaluation/run", tags=["evaluation"])
|
|
142
|
+
async def _evaluate(rq: EvaluatorRunRequest):
|
|
143
|
+
from ws_bom_robot_app.llm.evaluator import Evaluator, EvaluatorType
|
|
144
|
+
from langsmith.schemas import Dataset, Example
|
|
145
|
+
|
|
146
|
+
_data: Union[Dataset, list[Example]] = None
|
|
147
|
+
if rq.example and any(rq.example):
|
|
148
|
+
_examples: list[Example] = filter(lambda ex: str(ex.id) in [str(e.get("id")) for e in rq.example],
|
|
149
|
+
await _evaluation_datasets_by_id(rq.example[0].get("dataset_id"))
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
_data = list(_examples)
|
|
153
|
+
else:
|
|
154
|
+
_data = Dataset(**rq.dataset)
|
|
155
|
+
evaluator = Evaluator(
|
|
156
|
+
rq=rq.rq,
|
|
157
|
+
data=_data,
|
|
158
|
+
judge_model=rq.judge
|
|
159
|
+
)
|
|
160
|
+
|
|
161
|
+
if not rq.evaluators is None and any(rq.evaluators):
|
|
162
|
+
def __convert_evaluator_type(evaluator: str) -> EvaluatorType:
|
|
163
|
+
try:
|
|
164
|
+
return EvaluatorType[evaluator.upper()]
|
|
165
|
+
except KeyError:
|
|
166
|
+
pass
|
|
167
|
+
_evaluators = []
|
|
168
|
+
_evaluators.extend(__convert_evaluator_type(evaluator) for evaluator in rq.evaluators)
|
|
169
|
+
if not any(_evaluators):
|
|
170
|
+
_evaluators = None
|
|
171
|
+
else:
|
|
172
|
+
_evaluators = None
|
|
173
|
+
result = await evaluator.run(evaluators=_evaluators)
|
|
174
|
+
return result
|
|
175
|
+
|
|
176
|
+
@router.post("/evaluation/run/task", tags=["evaluation"])
|
|
177
|
+
async def _evaluate_task(rq: EvaluatorRunRequest, headers: Annotated[TaskHeader, Header()]) -> IdentifiableEntity:
|
|
178
|
+
return task_manager.create_task(lambda: _evaluate(rq), headers, queue="fast")
|
|
179
|
+
#endregion evaluate
|
|
@@ -0,0 +1,319 @@
|
|
|
1
|
+
from uuid import UUID
|
|
2
|
+
import requests, base64
|
|
3
|
+
from typing import Iterator, Optional, List, Union
|
|
4
|
+
from enum import Enum
|
|
5
|
+
from ws_bom_robot_app.config import config
|
|
6
|
+
from ws_bom_robot_app.llm.models.api import LlmMessage, StreamRequest
|
|
7
|
+
from langsmith import Client, traceable
|
|
8
|
+
from langsmith.schemas import Dataset, Example, Feedback, Run
|
|
9
|
+
from openevals.llm import create_llm_as_judge
|
|
10
|
+
from openevals.prompts import CORRECTNESS_PROMPT, RAG_HELPFULNESS_PROMPT, CONCISENESS_PROMPT, RAG_GROUNDEDNESS_PROMPT, HALLUCINATION_PROMPT
|
|
11
|
+
from pydantic import BaseModel
|
|
12
|
+
|
|
13
|
+
ls_client = Client()
|
|
14
|
+
|
|
15
|
+
class EvaluatorType(Enum):
|
|
16
|
+
"""Available evaluator types"""
|
|
17
|
+
CORRECTNESS = "correctness"
|
|
18
|
+
HELPFULNESS = "helpfulness"
|
|
19
|
+
CONCISENESS = "conciseness"
|
|
20
|
+
RAG_GROUNDEDNESS = "rag_groundedness"
|
|
21
|
+
RAG_HALLUCINATION = "rag_hallucination"
|
|
22
|
+
|
|
23
|
+
@classmethod
|
|
24
|
+
def all(cls) -> List['EvaluatorType']:
|
|
25
|
+
"""Get all available evaluator types"""
|
|
26
|
+
return list(cls)
|
|
27
|
+
|
|
28
|
+
@classmethod
|
|
29
|
+
def default(cls) -> List['EvaluatorType']:
|
|
30
|
+
"""Get default evaluator types"""
|
|
31
|
+
return [cls.CORRECTNESS]
|
|
32
|
+
|
|
33
|
+
class EvaluatorDataSets:
|
|
34
|
+
|
|
35
|
+
@classmethod
|
|
36
|
+
def all(cls) -> List[Dataset]:
|
|
37
|
+
return list(ls_client.list_datasets())
|
|
38
|
+
@classmethod
|
|
39
|
+
def find(cls, name: str) -> List[Dataset]:
|
|
40
|
+
return [d for d in cls.all() if d.name.lower().__contains__(name.lower())]
|
|
41
|
+
@classmethod
|
|
42
|
+
def get(cls, id: Union[str, UUID]) -> Optional[Dataset]:
|
|
43
|
+
return next((d for d in cls.all() if str(d.id) == str(id)), None)
|
|
44
|
+
@classmethod
|
|
45
|
+
def create(cls, name: str) -> Dataset:
|
|
46
|
+
return ls_client.create_dataset(name=name)
|
|
47
|
+
@classmethod
|
|
48
|
+
def delete(cls, id: str) -> None:
|
|
49
|
+
ls_client.delete_dataset(id=id)
|
|
50
|
+
@classmethod
|
|
51
|
+
def example(cls, id: str) -> List[Example]:
|
|
52
|
+
return list(ls_client.list_examples(dataset_id=id, include_attachments=True))
|
|
53
|
+
@classmethod
|
|
54
|
+
def add_example(cls, dataset_id: str, inputs: dict, outputs: dict) -> Example:
|
|
55
|
+
"""Add an example to the dataset.
|
|
56
|
+
Args:
|
|
57
|
+
inputs (dict): The input data for the example.
|
|
58
|
+
outputs (dict): The output data for the example.
|
|
59
|
+
Sample:
|
|
60
|
+
- inputs: {"question": "What is the capital of France?"}
|
|
61
|
+
outputs: {"answer": "Paris"}
|
|
62
|
+
"""
|
|
63
|
+
return ls_client.create_example(dataset_id=dataset_id, inputs=inputs, outputs=outputs)
|
|
64
|
+
@classmethod
|
|
65
|
+
def feedback(cls, experiment_name: str) -> Iterator[Feedback]:
|
|
66
|
+
return ls_client.list_feedback(
|
|
67
|
+
run_ids=[r.id for r in ls_client.list_runs(project_name=experiment_name)]
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
class Evaluator:
|
|
71
|
+
def __init__(self, rq: StreamRequest, data: Union[Dataset,List[Example]], judge_model: Optional[str] = None):
|
|
72
|
+
"""Evaluator class for assessing model performance.
|
|
73
|
+
|
|
74
|
+
Args:
|
|
75
|
+
rq (StreamRequest): The request object containing input data.
|
|
76
|
+
data (Union[Dataset, List[Example]]): The dataset to use for evaluation or a list of examples.
|
|
77
|
+
judge_model (Optional[str], optional): The model to use for evaluation, defaults to "openai:o4-mini".
|
|
78
|
+
For a list of available models, see the LangChain documentation:
|
|
79
|
+
https://python.langchain.com/api_reference/langchain/chat_models/langchain.chat_models.base.init_chat_model.html
|
|
80
|
+
"""
|
|
81
|
+
self.judge_model: str = judge_model or "openai:o4-mini"
|
|
82
|
+
self.data = data
|
|
83
|
+
self.rq: StreamRequest = rq
|
|
84
|
+
|
|
85
|
+
#region evaluators
|
|
86
|
+
|
|
87
|
+
def _get_evaluator_function(self, evaluator_type: EvaluatorType):
|
|
88
|
+
"""Get the evaluator function for a given type"""
|
|
89
|
+
evaluator_map = {
|
|
90
|
+
EvaluatorType.CORRECTNESS: self.correctness_evaluator,
|
|
91
|
+
EvaluatorType.HELPFULNESS: self.helpfulness_evaluator,
|
|
92
|
+
EvaluatorType.CONCISENESS: self.conciseness_evaluator,
|
|
93
|
+
EvaluatorType.RAG_GROUNDEDNESS: self.rag_groundedness_evaluator,
|
|
94
|
+
EvaluatorType.RAG_HALLUCINATION: self.rag_hallucination_evaluator,
|
|
95
|
+
}
|
|
96
|
+
return evaluator_map.get(evaluator_type)
|
|
97
|
+
|
|
98
|
+
def correctness_evaluator(self, inputs: dict, outputs: dict, reference_outputs: dict):
|
|
99
|
+
evaluator = create_llm_as_judge(
|
|
100
|
+
prompt=CORRECTNESS_PROMPT,
|
|
101
|
+
feedback_key="correctness",
|
|
102
|
+
model=self.judge_model,
|
|
103
|
+
continuous=True,
|
|
104
|
+
choices=[i/10 for i in range(11)]
|
|
105
|
+
)
|
|
106
|
+
return evaluator(
|
|
107
|
+
inputs=inputs,
|
|
108
|
+
outputs=outputs,
|
|
109
|
+
reference_outputs=reference_outputs
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
def helpfulness_evaluator(self, inputs: dict, outputs: dict):
|
|
113
|
+
evaluator = create_llm_as_judge(
|
|
114
|
+
prompt=RAG_HELPFULNESS_PROMPT,
|
|
115
|
+
feedback_key="helpfulness",
|
|
116
|
+
model=self.judge_model,
|
|
117
|
+
continuous=True,
|
|
118
|
+
choices=[i/10 for i in range(11)]
|
|
119
|
+
)
|
|
120
|
+
return evaluator(
|
|
121
|
+
inputs=inputs,
|
|
122
|
+
outputs=outputs,
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
def conciseness_evaluator(self, inputs: dict, outputs: dict, reference_outputs: dict):
|
|
126
|
+
evaluator = create_llm_as_judge(
|
|
127
|
+
prompt=CONCISENESS_PROMPT,
|
|
128
|
+
feedback_key="conciseness",
|
|
129
|
+
model=self.judge_model,
|
|
130
|
+
continuous=True,
|
|
131
|
+
choices=[i/10 for i in range(11)]
|
|
132
|
+
)
|
|
133
|
+
return evaluator(
|
|
134
|
+
inputs=inputs,
|
|
135
|
+
outputs=outputs,
|
|
136
|
+
reference_outputs=reference_outputs
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
def _find_retrievers(self, run: Run) -> List[Run]:
|
|
140
|
+
retrievers = []
|
|
141
|
+
for child in getattr(run, "child_runs", []):
|
|
142
|
+
if child.run_type == "retriever":
|
|
143
|
+
retrievers.append(child)
|
|
144
|
+
retrievers.extend(self._find_retrievers(child))
|
|
145
|
+
return retrievers
|
|
146
|
+
|
|
147
|
+
def _retriever_documents(self, retrievers_run: List[Run]) -> str:
|
|
148
|
+
unique_contents = set()
|
|
149
|
+
for r in retrievers_run:
|
|
150
|
+
for doc in r.outputs.get("documents", []):
|
|
151
|
+
unique_contents.add(doc.page_content)
|
|
152
|
+
return "\n\n".join(unique_contents)
|
|
153
|
+
|
|
154
|
+
def rag_groundedness_evaluator(self, run: Run):
|
|
155
|
+
evaluator = create_llm_as_judge(
|
|
156
|
+
prompt=RAG_GROUNDEDNESS_PROMPT,
|
|
157
|
+
feedback_key="rag_groundedness",
|
|
158
|
+
model=self.judge_model,
|
|
159
|
+
continuous=True,
|
|
160
|
+
choices=[i/10 for i in range(11)]
|
|
161
|
+
)
|
|
162
|
+
retrievers_run = self._find_retrievers(run)
|
|
163
|
+
if retrievers_run:
|
|
164
|
+
try:
|
|
165
|
+
return evaluator(
|
|
166
|
+
outputs=run.outputs["answer"],
|
|
167
|
+
context=self._retriever_documents(retrievers_run)
|
|
168
|
+
)
|
|
169
|
+
except Exception as e:
|
|
170
|
+
return 0.0
|
|
171
|
+
else:
|
|
172
|
+
return 0.0
|
|
173
|
+
|
|
174
|
+
def rag_hallucination_evaluator(self, inputs: dict, outputs: dict, reference_outputs: dict, run: Run):
|
|
175
|
+
evaluator = create_llm_as_judge(
|
|
176
|
+
prompt=HALLUCINATION_PROMPT,
|
|
177
|
+
feedback_key="rag_hallucination",
|
|
178
|
+
model=self.judge_model,
|
|
179
|
+
continuous=True,
|
|
180
|
+
choices=[i/10 for i in range(11)]
|
|
181
|
+
)
|
|
182
|
+
retrievers_run = self._find_retrievers(run)
|
|
183
|
+
if retrievers_run:
|
|
184
|
+
try:
|
|
185
|
+
return evaluator(
|
|
186
|
+
inputs=inputs['question'],
|
|
187
|
+
outputs=outputs['answer'],
|
|
188
|
+
reference_outputs=reference_outputs['answer'],
|
|
189
|
+
context=self._retriever_documents(retrievers_run)
|
|
190
|
+
)
|
|
191
|
+
except Exception as e:
|
|
192
|
+
return 0.0
|
|
193
|
+
else:
|
|
194
|
+
return 0.0
|
|
195
|
+
|
|
196
|
+
#endregion evaluators
|
|
197
|
+
|
|
198
|
+
#region target
|
|
199
|
+
def _parse_rq(self, inputs: dict, attachments: dict) -> StreamRequest:
|
|
200
|
+
_rq = self.rq.__deepcopy__()
|
|
201
|
+
if not attachments is None and len(attachments) > 0:
|
|
202
|
+
_content = []
|
|
203
|
+
_content.append({"type": "text", "text": inputs["question"]})
|
|
204
|
+
for k,v in attachments.items():
|
|
205
|
+
if isinstance(v, dict):
|
|
206
|
+
_content.append({"type": ("image" if "image" in v.get("mime_type","") else "file"), "url": v.get("presigned_url","")})
|
|
207
|
+
_rq.messages = [LlmMessage(role="user", content=_content)]
|
|
208
|
+
else:
|
|
209
|
+
_rq.messages = [LlmMessage(role="user", content=inputs["question"])]
|
|
210
|
+
return _rq
|
|
211
|
+
|
|
212
|
+
@traceable(run_type="chain",name="stream_internal")
|
|
213
|
+
async def target_internal(self,inputs: dict, attachments: dict) -> dict:
|
|
214
|
+
from ws_bom_robot_app.llm.main import stream
|
|
215
|
+
from unittest.mock import Mock
|
|
216
|
+
from fastapi import Request
|
|
217
|
+
_ctx = Mock(spec=Request)
|
|
218
|
+
_ctx.base_url.return_value = "http://evaluator"
|
|
219
|
+
_rq = self._parse_rq(inputs, attachments)
|
|
220
|
+
_chunks = []
|
|
221
|
+
async for chunk in stream(rq=_rq, ctx=_ctx, formatted=False):
|
|
222
|
+
_chunks.append(chunk)
|
|
223
|
+
_content = ''.join(_chunks) if _chunks else ""
|
|
224
|
+
del _rq, _chunks
|
|
225
|
+
return { "answer": _content.strip() }
|
|
226
|
+
|
|
227
|
+
@traceable(run_type="chain",name="stream_http")
|
|
228
|
+
async def target_http(self,inputs: dict, attachments: dict) -> dict:
|
|
229
|
+
_rq = self._parse_rq(inputs, attachments)
|
|
230
|
+
_host= "http://localhost:6001"
|
|
231
|
+
_endpoint = f"{_host}/api/llm/stream/raw"
|
|
232
|
+
_robot_auth =f"Basic {base64.b64encode((config.robot_user + ':' + config.robot_password).encode('utf-8')).decode('utf-8')}"
|
|
233
|
+
_rs = requests.post(_endpoint, data=_rq.model_dump_json(), stream=True, headers={"Authorization": _robot_auth}, verify=True)
|
|
234
|
+
_content = ''.join([chunk.decode('utf-8') for chunk in _rs.iter_content(chunk_size=1024, decode_unicode=False)])
|
|
235
|
+
del _rq, _rs
|
|
236
|
+
return { "answer": _content.strip() }
|
|
237
|
+
#endregion target
|
|
238
|
+
|
|
239
|
+
async def run(self,
|
|
240
|
+
evaluators: Optional[List[EvaluatorType]] = None,
|
|
241
|
+
target_method: str = "target_internal") -> dict:
|
|
242
|
+
"""Run evaluation with specified evaluators
|
|
243
|
+
|
|
244
|
+
Args:
|
|
245
|
+
evaluators: List of evaluator types to use. If None, uses default (correctness only)
|
|
246
|
+
target_method: Method to use for target evaluation ("target_internal" or "target")
|
|
247
|
+
|
|
248
|
+
Returns:
|
|
249
|
+
dict: Evaluation results with scores
|
|
250
|
+
|
|
251
|
+
Usage:
|
|
252
|
+
```
|
|
253
|
+
await evaluator.run() # Uses default (correctness only)
|
|
254
|
+
await evaluator.run([EvaluatorType.CORRECTNESS, EvaluatorType.HELPFULNESS])
|
|
255
|
+
await evaluator.run(EvaluatorType.all()) # Uses all available evaluators
|
|
256
|
+
```
|
|
257
|
+
"""
|
|
258
|
+
try:
|
|
259
|
+
# evaluator functions
|
|
260
|
+
evaluator_functions = []
|
|
261
|
+
if evaluators is None:
|
|
262
|
+
evaluators = EvaluatorType.default()
|
|
263
|
+
for eval_type in evaluators:
|
|
264
|
+
func = self._get_evaluator_function(eval_type)
|
|
265
|
+
if func:
|
|
266
|
+
evaluator_functions.append(func)
|
|
267
|
+
else:
|
|
268
|
+
print(f"Warning: Unknown evaluator type: {eval_type}")
|
|
269
|
+
if not evaluator_functions:
|
|
270
|
+
print("No valid evaluators provided, using default (correctness)")
|
|
271
|
+
evaluator_functions = [self.correctness_evaluator]
|
|
272
|
+
|
|
273
|
+
# target method
|
|
274
|
+
target_func = getattr(self, target_method, self.target_internal)
|
|
275
|
+
|
|
276
|
+
# run
|
|
277
|
+
_dataset: Dataset = self.data if isinstance(self.data, Dataset) else EvaluatorDataSets.get(self.data[0].dataset_id)
|
|
278
|
+
experiment = await ls_client.aevaluate(
|
|
279
|
+
target_func,
|
|
280
|
+
data=_dataset.name if isinstance(self.data, Dataset) else self.data,
|
|
281
|
+
evaluators=evaluator_functions,
|
|
282
|
+
experiment_prefix=_dataset.name,
|
|
283
|
+
upload_results=True,
|
|
284
|
+
max_concurrency=4,
|
|
285
|
+
metadata={
|
|
286
|
+
"app": _dataset.name,
|
|
287
|
+
"model": f"{self.rq.provider}:{self.rq.model}",
|
|
288
|
+
"judge": self.judge_model,
|
|
289
|
+
"evaluators": [e.value for e in evaluators]
|
|
290
|
+
}
|
|
291
|
+
)
|
|
292
|
+
feedback = list(EvaluatorDataSets.feedback(experiment.experiment_name))
|
|
293
|
+
scores = [f.score for f in feedback]
|
|
294
|
+
url = f"{ls_client._host_url}/o/{ls_client._tenant_id}/datasets/{_dataset.id}/compare?selectedSessions={feedback[0].session_id}"
|
|
295
|
+
|
|
296
|
+
# group scores by evaluator type
|
|
297
|
+
evaluator_scores = {}
|
|
298
|
+
for i, eval_type in enumerate(evaluators):
|
|
299
|
+
eval_scores = [f.score for f in feedback if f.key.lower() == eval_type.value.lower()]
|
|
300
|
+
if eval_scores:
|
|
301
|
+
evaluator_scores[eval_type.value] = sum(eval_scores) / len(eval_scores)
|
|
302
|
+
|
|
303
|
+
return {
|
|
304
|
+
"experiment": {"name": experiment.experiment_name, "url": url},
|
|
305
|
+
"overall_score": sum(scores) / len(scores) if scores else 0,
|
|
306
|
+
"evaluator_scores": evaluator_scores
|
|
307
|
+
}
|
|
308
|
+
except Exception as e:
|
|
309
|
+
from traceback import print_exc
|
|
310
|
+
print(f"Error occurred during evaluation: {e}")
|
|
311
|
+
print_exc()
|
|
312
|
+
return {"error": str(e)}
|
|
313
|
+
|
|
314
|
+
class EvaluatorRunRequest(BaseModel):
|
|
315
|
+
dataset: dict
|
|
316
|
+
rq: StreamRequest
|
|
317
|
+
example: Optional[List[dict]] = None
|
|
318
|
+
evaluators: Optional[List[str]] = None
|
|
319
|
+
judge: Optional[str] = None
|
|
@@ -163,7 +163,7 @@ class LlmApp(BaseModel):
|
|
|
163
163
|
return list(set(
|
|
164
164
|
os.path.basename(db) for db in [self.vector_db] +
|
|
165
165
|
([self.rules.vector_db] if self.rules and self.rules.vector_db else []) +
|
|
166
|
-
[db for tool in (self.app_tools or []) for db in [tool.vector_db]]
|
|
166
|
+
[db for tool in (self.app_tools or []) for db in [tool.vector_db] if tool.is_active]
|
|
167
167
|
if db is not None
|
|
168
168
|
))
|
|
169
169
|
def __decompress_zip(self,zip_file_path, extract_to):
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
from typing import Optional
|
|
2
|
+
from urllib.parse import urlparse
|
|
2
3
|
from langchain_core.embeddings import Embeddings
|
|
3
4
|
from langchain_core.language_models import BaseChatModel
|
|
4
5
|
from pydantic import BaseModel, ConfigDict, Field
|
|
@@ -69,7 +70,8 @@ class Anthropic(LlmInterface):
|
|
|
69
70
|
model=self.config.model,
|
|
70
71
|
temperature=self.config.temperature,
|
|
71
72
|
max_tokens=8192,
|
|
72
|
-
streaming=True
|
|
73
|
+
streaming=True,
|
|
74
|
+
#betas=["files-api-2025-04-14"] #https://docs.anthropic.com/en/docs/build-with-claude/files
|
|
73
75
|
)
|
|
74
76
|
|
|
75
77
|
"""
|
|
@@ -86,15 +88,24 @@ class Anthropic(LlmInterface):
|
|
|
86
88
|
response = client.models.list()
|
|
87
89
|
return response.data
|
|
88
90
|
|
|
91
|
+
"""
|
|
89
92
|
async def _format_multimodal_image_message(self, message: dict) -> dict:
|
|
90
93
|
file = await Base64File.from_url(message.get("url"))
|
|
91
94
|
return { "type": "image_url", "image_url": { "url": file.base64_url }}
|
|
95
|
+
"""
|
|
96
|
+
|
|
97
|
+
#https://python.langchain.com/docs/integrations/chat/anthropic/
|
|
98
|
+
#https://python.langchain.com/docs/how_to/multimodal_inputs/
|
|
92
99
|
async def _format_multimodal_file_message(self, message: dict, file: Base64File = None) -> dict:
|
|
93
|
-
|
|
94
|
-
if
|
|
95
|
-
return {"type": "
|
|
100
|
+
_url = str(message.get("url", "")).lower()
|
|
101
|
+
if _url.startswith("http") and any(urlparse(_url).path.endswith(ext) for ext in [".pdf"]):
|
|
102
|
+
return {"type": "file", "source_type": "url", "url": _url}
|
|
96
103
|
else:
|
|
97
|
-
|
|
104
|
+
_file = file or await Base64File.from_url(_url)
|
|
105
|
+
if _file.extension in ["pdf"]:
|
|
106
|
+
return {"type": "document", "source": {"type": "base64", "media_type": _file.mime_type, "data": _file.base64_content}}
|
|
107
|
+
else:
|
|
108
|
+
return await super()._format_multimodal_file_message(message, _file)
|
|
98
109
|
|
|
99
110
|
class OpenAI(LlmInterface):
|
|
100
111
|
def __init__(self, config: LlmConfig):
|
|
@@ -188,7 +199,7 @@ class Google(LlmInterface):
|
|
|
188
199
|
else:
|
|
189
200
|
return await super()._format_multimodal_file_message(message, _file)
|
|
190
201
|
|
|
191
|
-
class
|
|
202
|
+
class GoogleVertex(LlmInterface):
|
|
192
203
|
def get_llm(self):
|
|
193
204
|
from langchain_google_vertexai import ChatVertexAI
|
|
194
205
|
return ChatVertexAI(
|
|
@@ -332,7 +343,7 @@ class Ollama(LlmInterface):
|
|
|
332
343
|
from langchain_ollama.embeddings import OllamaEmbeddings
|
|
333
344
|
return OllamaEmbeddings(
|
|
334
345
|
base_url=self.__base_url,
|
|
335
|
-
model="
|
|
346
|
+
model="mxbai-embed-large" #nomic-embed-text
|
|
336
347
|
)
|
|
337
348
|
def get_models(self):
|
|
338
349
|
import requests
|
|
@@ -354,13 +365,20 @@ class Ollama(LlmInterface):
|
|
|
354
365
|
return { "type": "image_url", "image_url": { "url": file.base64_url }}
|
|
355
366
|
|
|
356
367
|
class LlmManager:
|
|
368
|
+
"""
|
|
369
|
+
Expose the available LLM providers.
|
|
370
|
+
Names are aligned with the LangChain documentation:
|
|
371
|
+
https://python.langchain.com/api_reference/langchain/chat_models/langchain.chat_models.base.init_chat_model.html
|
|
372
|
+
"""
|
|
357
373
|
|
|
358
374
|
#class variables (static)
|
|
359
375
|
_list: dict[str,LlmInterface] = {
|
|
360
376
|
"anthropic": Anthropic,
|
|
361
377
|
"deepseek": DeepSeek,
|
|
362
|
-
"google": Google,
|
|
363
|
-
"
|
|
378
|
+
"google": Google, #deprecated
|
|
379
|
+
"google_genai": Google,
|
|
380
|
+
"gvertex": GoogleVertex,#deprecated
|
|
381
|
+
"google_vertexai": GoogleVertex,
|
|
364
382
|
"groq": Groq,
|
|
365
383
|
"ibm": IBM,
|
|
366
384
|
"openai": OpenAI,
|
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import httpx
|
|
1
2
|
from typing import List,Optional
|
|
2
3
|
import os, logging, aiohttp, asyncio, hashlib, json
|
|
3
4
|
import uuid
|
|
@@ -34,14 +35,13 @@ async def download_file(url: str, destination: str, chunk_size: int = 8192, auth
|
|
|
34
35
|
# Ensure the destination directory exists
|
|
35
36
|
os.makedirs(os.path.dirname(os.path.abspath(destination)), exist_ok=True)
|
|
36
37
|
|
|
37
|
-
async with
|
|
38
|
+
async with httpx.AsyncClient(timeout=30.0) as client:
|
|
38
39
|
if authorization:
|
|
39
40
|
headers = {'Authorization': authorization}
|
|
40
|
-
|
|
41
|
-
async with session.get(url) as response:
|
|
41
|
+
async with client.stream("GET", url, headers=headers) as response:
|
|
42
42
|
# Check if the request was successful
|
|
43
|
-
if response.
|
|
44
|
-
logging.error(f"Failed to download file. Status code: {response.
|
|
43
|
+
if response.status_code != 200:
|
|
44
|
+
logging.error(f"Failed to download file. Status code: {response.status_code}")
|
|
45
45
|
return None
|
|
46
46
|
|
|
47
47
|
# Get the total file size if available
|
|
@@ -55,7 +55,7 @@ async def download_file(url: str, destination: str, chunk_size: int = 8192, auth
|
|
|
55
55
|
unit_scale=True,
|
|
56
56
|
unit_divisor=1024
|
|
57
57
|
) as pbar:
|
|
58
|
-
async for chunk in response.
|
|
58
|
+
async for chunk in response.aiter_bytes(chunk_size):
|
|
59
59
|
if chunk:
|
|
60
60
|
f.write(chunk)
|
|
61
61
|
pbar.update(len(chunk))
|
|
@@ -63,7 +63,7 @@ async def download_file(url: str, destination: str, chunk_size: int = 8192, auth
|
|
|
63
63
|
logging.info(f"File downloaded successfully to {destination}")
|
|
64
64
|
return destination
|
|
65
65
|
|
|
66
|
-
except
|
|
66
|
+
except httpx.RequestError as e:
|
|
67
67
|
logging.error(f"Network error occurred: {str(e)}")
|
|
68
68
|
return None
|
|
69
69
|
except asyncio.TimeoutError:
|
|
@@ -147,21 +147,21 @@ class Base64File(BaseModel):
|
|
|
147
147
|
try:
|
|
148
148
|
if _content := await from_cache(url):
|
|
149
149
|
return _content
|
|
150
|
-
async with
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
150
|
+
async with httpx.AsyncClient(timeout=30.0) as client:
|
|
151
|
+
response = await client.get(url, headers={"User-Agent": "Mozilla/5.0"})
|
|
152
|
+
logging.info(f"Downloading {url} - Status: {response.status_code}")
|
|
153
|
+
response.raise_for_status()
|
|
154
|
+
content = response.read()
|
|
155
|
+
# mime type detection
|
|
156
|
+
mime_type = response.headers.get('content-type', '').split(';')[0]
|
|
157
|
+
if not mime_type:
|
|
158
|
+
mime_type, _ = mimetypes.guess_type(urlparse(url).path)
|
|
159
|
+
if not mime_type:
|
|
160
|
+
mime_type = 'application/octet-stream'
|
|
161
|
+
# to base64
|
|
162
|
+
base64_content = base64.b64encode(content).decode('utf-8')
|
|
163
|
+
name = url.split('/')[-1]
|
|
164
|
+
extension = name.split('.')[-1]
|
|
165
165
|
except Exception as e:
|
|
166
166
|
_error = f"Failed to download file from {url}: {e}"
|
|
167
167
|
logging.error(_error)
|
ws_bom_robot_app/task_manager.py
CHANGED
|
@@ -79,6 +79,7 @@ class TaskEntry(IdentifiableEntity):
|
|
|
79
79
|
coroutine: Any = None
|
|
80
80
|
headers: TaskHeader | None = None
|
|
81
81
|
status: Union[TaskStatus, None] = None
|
|
82
|
+
queue: Literal["slow", "fast"] | None = "slow"
|
|
82
83
|
def _get_coroutine_name(self, func: Any) -> str:
|
|
83
84
|
if inspect.iscoroutine(func):
|
|
84
85
|
return func.cr_code.co_name
|
|
@@ -120,7 +121,7 @@ class TaskStatistics(BaseModel):
|
|
|
120
121
|
class TaskStatisticExecutionInfo(BaseModel):
|
|
121
122
|
retention_days: float = config.robot_task_retention_days
|
|
122
123
|
max_parallelism: int
|
|
123
|
-
slot_available: int
|
|
124
|
+
slot_available: dict[str,int]
|
|
124
125
|
pid: int = os.getpid()
|
|
125
126
|
running: list[TaskStatus]
|
|
126
127
|
slowest: list
|
|
@@ -144,12 +145,12 @@ class TaskManagerStrategy(ABC):
|
|
|
144
145
|
workers = config.runtime_options().number_of_workers
|
|
145
146
|
max_concurrent_tasks = max(1, floor(config.robot_task_max_total_parallelism / max(1, workers)))
|
|
146
147
|
self.max_parallelism = max_concurrent_tasks
|
|
147
|
-
self.semaphore = asyncio.Semaphore(max_concurrent_tasks)
|
|
148
|
+
self.semaphore = {"slow": asyncio.Semaphore(max_concurrent_tasks), "fast": asyncio.Semaphore(max_concurrent_tasks*2)}
|
|
148
149
|
self.running_tasks = dict[str, TaskEntry]()
|
|
149
150
|
self.loop = asyncio.get_event_loop()
|
|
150
151
|
|
|
151
152
|
@abstractmethod
|
|
152
|
-
def create_task(self, coroutine, headers: TaskHeader | None = None) -> IdentifiableEntity:
|
|
153
|
+
def create_task(self, coroutine, headers: TaskHeader | None = None, queue: Literal["slow", "fast"] | None = "slow") -> IdentifiableEntity:
|
|
153
154
|
"""Create a new task.
|
|
154
155
|
Args:
|
|
155
156
|
coroutine (_type_): coroutine or callable to be executed.
|
|
@@ -160,6 +161,7 @@ class TaskManagerStrategy(ABC):
|
|
|
160
161
|
from ws_bom_robot_app.task_manager import task_manager
|
|
161
162
|
task_manager.create_task(my_coroutine, headers=my_headers) -> coroutine executed in-process
|
|
162
163
|
task_manager.create_task(lambda: my_coroutine, headers=my_headers) -> callable using subprocess
|
|
164
|
+
task_manager.create_task(lambda: my_coroutine, headers=my_headers, queue="fast") -> callable using subprocess with "fast" queue
|
|
163
165
|
"""
|
|
164
166
|
pass
|
|
165
167
|
|
|
@@ -227,7 +229,7 @@ class TaskManagerStrategy(ABC):
|
|
|
227
229
|
self._update_task_by_event(task_entry, "callback", None)
|
|
228
230
|
return callback
|
|
229
231
|
|
|
230
|
-
def create_task_entry(self, coroutine_or_callable: Any, headers: TaskHeader | None = None) -> TaskEntry:
|
|
232
|
+
def create_task_entry(self, coroutine_or_callable: Any, headers: TaskHeader | None = None, queue: Literal["slow", "fast"] | None = "slow") -> TaskEntry:
|
|
231
233
|
"""Create a new task entry.
|
|
232
234
|
|
|
233
235
|
Args:
|
|
@@ -251,7 +253,9 @@ class TaskManagerStrategy(ABC):
|
|
|
251
253
|
task_entry = TaskEntry(
|
|
252
254
|
id=_id,
|
|
253
255
|
coroutine=coroutine_or_callable,
|
|
254
|
-
headers=headers
|
|
256
|
+
headers=headers,
|
|
257
|
+
queue=queue
|
|
258
|
+
)
|
|
255
259
|
# Store hint for subprocess capability
|
|
256
260
|
task_entry.status.metadata.extra = task_entry.status.metadata.extra or {}
|
|
257
261
|
task_entry.status.metadata.extra["can_use_subprocess"] = can_use_subprocess
|
|
@@ -263,7 +267,7 @@ class TaskManagerStrategy(ABC):
|
|
|
263
267
|
|
|
264
268
|
async def _run_task_with_semaphore(self, task_entry: TaskEntry):
|
|
265
269
|
"""Run a task with semaphore control to limit concurrency."""
|
|
266
|
-
async with self.semaphore:
|
|
270
|
+
async with self.semaphore[task_entry.queue]:
|
|
267
271
|
await self._execute_task(task_entry)
|
|
268
272
|
|
|
269
273
|
async def _monitor_subprocess(self, task_entry: TaskEntry, proc, conn):
|
|
@@ -291,7 +295,7 @@ class TaskManagerStrategy(ABC):
|
|
|
291
295
|
_log.warning(f"Task {task_entry.id} failure, retrying {task_entry.status.retry}...")
|
|
292
296
|
async def delayed_retry():
|
|
293
297
|
_delay = config.robot_task_mp_retry_delay # help to backpressure when overloaded
|
|
294
|
-
if self.semaphore._value > 0: # free semaphore slots available
|
|
298
|
+
if self.semaphore[task_entry.queue]._value > 0: # free semaphore slots available
|
|
295
299
|
_delay = 5 # small/no delay if retry can run immediately
|
|
296
300
|
await asyncio.sleep(_delay) # delay in seconds
|
|
297
301
|
await self._run_task_with_semaphore(task_entry)
|
|
@@ -392,7 +396,7 @@ class TaskManagerStrategy(ABC):
|
|
|
392
396
|
exec_info=TaskStatistics.TaskStatisticExecutionInfo(
|
|
393
397
|
retention_days=config.robot_task_retention_days,
|
|
394
398
|
max_parallelism=self.max_parallelism,
|
|
395
|
-
slot_available=self.semaphore._value,
|
|
399
|
+
slot_available={queue: self.semaphore[queue]._value for queue in self.semaphore},
|
|
396
400
|
running=[task.status for task in self.running_task()],
|
|
397
401
|
slowest=_slowest
|
|
398
402
|
)
|
|
@@ -406,8 +410,8 @@ class MemoryTaskManagerStrategy(TaskManagerStrategy):
|
|
|
406
410
|
super().__init__(max_concurrent_tasks)
|
|
407
411
|
self.tasks: Dict[str, TaskEntry] = {}
|
|
408
412
|
|
|
409
|
-
def create_task(self, coroutine: Any, headers: TaskHeader | None = None) -> IdentifiableEntity:
|
|
410
|
-
task = self.create_task_entry(coroutine, headers)
|
|
413
|
+
def create_task(self, coroutine: Any, headers: TaskHeader | None = None, queue: Literal["slow", "fast"] | None = "slow") -> IdentifiableEntity:
|
|
414
|
+
task = self.create_task_entry(coroutine, headers, queue)
|
|
411
415
|
self.tasks[task.id] = task
|
|
412
416
|
return IdentifiableEntity(id=task.id)
|
|
413
417
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: ws_bom_robot_app
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.86
|
|
4
4
|
Summary: A FastAPI application serving ws bom/robot/llm platform ai.
|
|
5
5
|
Home-page: https://github.com/websolutespa/bom
|
|
6
6
|
Author: Websolute Spa
|
|
@@ -17,26 +17,26 @@ Requires-Dist: pydantic==2.11.7
|
|
|
17
17
|
Requires-Dist: pydantic-settings==2.10.1
|
|
18
18
|
Requires-Dist: fastapi[standard]==0.116.1
|
|
19
19
|
Requires-Dist: chevron==0.14.0
|
|
20
|
-
Requires-Dist: langchain==0.3.
|
|
21
|
-
Requires-Dist: langchain-community==0.3.
|
|
22
|
-
Requires-Dist: langchain-core==0.3.
|
|
23
|
-
Requires-Dist: langchain-openai==0.3.
|
|
24
|
-
Requires-Dist: langchain-anthropic==0.3.
|
|
25
|
-
Requires-Dist: langchain-ibm==0.3.
|
|
26
|
-
Requires-Dist: langchain-google-genai==2.
|
|
27
|
-
Requires-Dist: langchain-google-vertexai==2.0.
|
|
28
|
-
Requires-Dist: langchain-groq==0.3.
|
|
29
|
-
Requires-Dist: langchain-ollama==0.3.
|
|
30
|
-
Requires-Dist:
|
|
31
|
-
Requires-Dist:
|
|
32
|
-
Requires-Dist:
|
|
33
|
-
Requires-Dist:
|
|
20
|
+
Requires-Dist: langchain==0.3.27
|
|
21
|
+
Requires-Dist: langchain-community==0.3.29
|
|
22
|
+
Requires-Dist: langchain-core==0.3.75
|
|
23
|
+
Requires-Dist: langchain-openai==0.3.32
|
|
24
|
+
Requires-Dist: langchain-anthropic==0.3.19
|
|
25
|
+
Requires-Dist: langchain-ibm==0.3.17
|
|
26
|
+
Requires-Dist: langchain-google-genai==2.1.10
|
|
27
|
+
Requires-Dist: langchain-google-vertexai==2.0.28
|
|
28
|
+
Requires-Dist: langchain-groq==0.3.7
|
|
29
|
+
Requires-Dist: langchain-ollama==0.3.7
|
|
30
|
+
Requires-Dist: openevals==0.1.0
|
|
31
|
+
Requires-Dist: faiss-cpu==1.12.0
|
|
32
|
+
Requires-Dist: chromadb==1.0.20
|
|
33
|
+
Requires-Dist: langchain-chroma==0.2.5
|
|
34
34
|
Requires-Dist: langchain-qdrant==0.2.0
|
|
35
|
-
Requires-Dist: qdrant-client==1.15.
|
|
35
|
+
Requires-Dist: qdrant-client[fastembed]==1.15.1
|
|
36
36
|
Requires-Dist: lark==1.2.2
|
|
37
|
-
Requires-Dist: unstructured==0.18.
|
|
37
|
+
Requires-Dist: unstructured==0.18.14
|
|
38
38
|
Requires-Dist: unstructured[image]
|
|
39
|
-
Requires-Dist: unstructured-ingest==1.2.
|
|
39
|
+
Requires-Dist: unstructured-ingest==1.2.11
|
|
40
40
|
Requires-Dist: unstructured-ingest[azure]
|
|
41
41
|
Requires-Dist: unstructured-ingest[confluence]
|
|
42
42
|
Requires-Dist: unstructured-ingest[dropbox]
|
|
@@ -49,9 +49,9 @@ Requires-Dist: unstructured-ingest[sftp]
|
|
|
49
49
|
Requires-Dist: unstructured-ingest[sharepoint]
|
|
50
50
|
Requires-Dist: unstructured-ingest[slack]
|
|
51
51
|
Requires-Dist: html5lib==1.1
|
|
52
|
-
Requires-Dist: markdownify==1.
|
|
52
|
+
Requires-Dist: markdownify==1.2.0
|
|
53
53
|
Requires-Dist: duckduckgo-search==8.0.4
|
|
54
|
-
Requires-Dist:
|
|
54
|
+
Requires-Dist: langchain-google-community==2.0.7
|
|
55
55
|
Requires-Dist: trafilatura==2.0.0
|
|
56
56
|
Dynamic: author
|
|
57
57
|
Dynamic: author-email
|
|
@@ -1,29 +1,30 @@
|
|
|
1
1
|
ws_bom_robot_app/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
2
|
ws_bom_robot_app/auth.py,sha256=84nIbmJsMrNs0sxIQGEHbjsjc2P6ZrZZGSn8dkiL6is,895
|
|
3
|
-
ws_bom_robot_app/config.py,sha256=
|
|
3
|
+
ws_bom_robot_app/config.py,sha256=TWnFPlPpzN-GWVNib2CXfzY8IYVHCypkxdDJ6rLOatk,5443
|
|
4
4
|
ws_bom_robot_app/cron_manager.py,sha256=TOz7dsQhbGXzYMKW7GboKOSySg9aun4h0yLckj-5w4U,9372
|
|
5
5
|
ws_bom_robot_app/main.py,sha256=5h4qwQ4Ghm6CCSjO5eWvMhWxDATzUayQfQ-__E1Mw1I,6936
|
|
6
6
|
ws_bom_robot_app/subprocess_runner.py,sha256=N71HxPvgMP5TIRlO5w9UzHAEK-JKOA9i16QXM3anpjM,4195
|
|
7
|
-
ws_bom_robot_app/task_manager.py,sha256=
|
|
7
|
+
ws_bom_robot_app/task_manager.py,sha256=N2NzinjaxsRaLu78sREG9MCanMzygtKUU_yXo-aw2wA,24570
|
|
8
8
|
ws_bom_robot_app/util.py,sha256=t1VS6JQNOZe6aenBmjPLxJ_A3ncm7WqTZE8_gR85sQo,5022
|
|
9
9
|
ws_bom_robot_app/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
10
10
|
ws_bom_robot_app/llm/agent_context.py,sha256=uatHJ8wcRly6h0S762BgfzDMpmcwCHwNzwo37aWjeE0,1305
|
|
11
11
|
ws_bom_robot_app/llm/agent_description.py,sha256=yK4aVU3RNk1oP4bEneV3QPAi-208JwWk4R6qHlzqYIg,4656
|
|
12
12
|
ws_bom_robot_app/llm/agent_handler.py,sha256=TnpfChHLWVQ-gCEHNQPW3UXiuS8AmiP8JYwRz9pqbCg,7203
|
|
13
13
|
ws_bom_robot_app/llm/agent_lcel.py,sha256=tVa1JJOuL1CG0tXS5AwOB4gli0E2rGqSBD5oEehHvOY,2480
|
|
14
|
-
ws_bom_robot_app/llm/api.py,sha256=
|
|
14
|
+
ws_bom_robot_app/llm/api.py,sha256=jMoiKiD5HNxGu6gTb5_qZ5UU8d2uJ7UVrdLseDStI6o,7634
|
|
15
15
|
ws_bom_robot_app/llm/defaut_prompt.py,sha256=LlCd_nSMkMmHESfiiiQYfnJyB6Pp-LSs4CEKdYW4vFk,1106
|
|
16
|
+
ws_bom_robot_app/llm/evaluator.py,sha256=tUyPX1oGZEjSiO4JixwNlgv6BI9cUHSmcAsTCpBnIn4,13322
|
|
16
17
|
ws_bom_robot_app/llm/main.py,sha256=U_zUcL51VazXUyEicWFoNGkqwV-55s3tn52BlVPINes,5670
|
|
17
18
|
ws_bom_robot_app/llm/nebuly_handler.py,sha256=Z4_GS-N4vQYPLnlXlwhJrwpUvf2uG53diYSOcteXGTc,7978
|
|
18
19
|
ws_bom_robot_app/llm/feedbacks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
19
20
|
ws_bom_robot_app/llm/feedbacks/feedback_manager.py,sha256=WcKgzlOb8VFG7yqHoIOO_R6LAzdzE4YIRFCVOGBSgfM,2856
|
|
20
21
|
ws_bom_robot_app/llm/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
21
|
-
ws_bom_robot_app/llm/models/api.py,sha256=
|
|
22
|
+
ws_bom_robot_app/llm/models/api.py,sha256=bahqx9rdP6jM9Kk9VGkqT-bhASJeuAzO_5Ir6tBxDIU,12212
|
|
22
23
|
ws_bom_robot_app/llm/models/base.py,sha256=1TqxuTK3rjJEALn7lvgoen_1ba3R2brAgGx6EDTtDZo,152
|
|
23
24
|
ws_bom_robot_app/llm/models/feedback.py,sha256=zh1jLqPRLzNlxInkCMoiJbfSu0-tiOEYHM7FhC46PkM,1692
|
|
24
25
|
ws_bom_robot_app/llm/models/kb.py,sha256=oVSw6_dmNxikAHrPqcfxDXz9M0ezLIYuxpgvzfs_Now,9514
|
|
25
26
|
ws_bom_robot_app/llm/providers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
26
|
-
ws_bom_robot_app/llm/providers/llm_manager.py,sha256=
|
|
27
|
+
ws_bom_robot_app/llm/providers/llm_manager.py,sha256=5XqQNRx0My-bXptCzOlsMTnjLTx3bcX9HRT3_l5IQ_A,16699
|
|
27
28
|
ws_bom_robot_app/llm/tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
28
29
|
ws_bom_robot_app/llm/tools/tool_builder.py,sha256=QTRG1c-EnH4APP10IyfZxEkqK9KitUsutXUvDRKeAhU,3224
|
|
29
30
|
ws_bom_robot_app/llm/tools/tool_manager.py,sha256=1IgRXxdB7DU3gbIlfT_aMUWZyWuanFTAFwu3VaYKxfE,14990
|
|
@@ -35,7 +36,7 @@ ws_bom_robot_app/llm/utils/agent.py,sha256=_CY5Dji3UeAIi2iuU7ttz4fml1q8aCFgVWOv9
|
|
|
35
36
|
ws_bom_robot_app/llm/utils/chunker.py,sha256=zVXjRMloc3KbNEqiDcycYzy4N0Ey1g8XYeq6ftyvkyg,857
|
|
36
37
|
ws_bom_robot_app/llm/utils/cleanup.py,sha256=ARLZTX4mLbkLCEnMdIWYDYEAPOjzfy1laLGkYnxZe30,3063
|
|
37
38
|
ws_bom_robot_app/llm/utils/cms.py,sha256=XhrLQyHQ2JUOInDCCf_uvR4Jiud0YvH2FwwiiuCnnsg,6352
|
|
38
|
-
ws_bom_robot_app/llm/utils/download.py,sha256=
|
|
39
|
+
ws_bom_robot_app/llm/utils/download.py,sha256=CrPWoCwYY6TjpDR8uHI0Do-w7WQ0PtjMcbUaRoEDUbg,7110
|
|
39
40
|
ws_bom_robot_app/llm/utils/print.py,sha256=IsPYEWRJqu-dqlJA3F9OnnIS4rOq_EYX1Ljp3BvDnww,774
|
|
40
41
|
ws_bom_robot_app/llm/utils/secrets.py,sha256=-HtqLIDVIJrpvGC5YhPAVyLsq8P4ChVM5g3GOfdwqVk,878
|
|
41
42
|
ws_bom_robot_app/llm/utils/webhooks.py,sha256=LAAZqyN6VhV13wu4X-X85TwdDgAV2rNvIwQFIIc0FJM,2114
|
|
@@ -68,7 +69,7 @@ ws_bom_robot_app/llm/vector_store/loader/__init__.py,sha256=47DEQpj8HBSa-_TImW-5
|
|
|
68
69
|
ws_bom_robot_app/llm/vector_store/loader/base.py,sha256=GjUS2oaz0LHOSal5pipBkomZtrYUNcKPSd8bzhUU5Dc,6889
|
|
69
70
|
ws_bom_robot_app/llm/vector_store/loader/docling.py,sha256=IOv1A0HSIWiHWQFzI4fdApfxrKgXOqwmC3mPXlKplqQ,4012
|
|
70
71
|
ws_bom_robot_app/llm/vector_store/loader/json_loader.py,sha256=qo9ejRZyKv_k6jnGgXnu1W5uqsMMtgqK_uvPpZQ0p74,833
|
|
71
|
-
ws_bom_robot_app-0.0.
|
|
72
|
-
ws_bom_robot_app-0.0.
|
|
73
|
-
ws_bom_robot_app-0.0.
|
|
74
|
-
ws_bom_robot_app-0.0.
|
|
72
|
+
ws_bom_robot_app-0.0.86.dist-info/METADATA,sha256=SbiF6TLzZjAfbRdKRh9_A2yBLsHQsOLIUFpVHE0nMV0,9985
|
|
73
|
+
ws_bom_robot_app-0.0.86.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
74
|
+
ws_bom_robot_app-0.0.86.dist-info/top_level.txt,sha256=Yl0akyHVbynsBX_N7wx3H3ZTkcMLjYyLJs5zBMDAKcM,17
|
|
75
|
+
ws_bom_robot_app-0.0.86.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|