ws-bom-robot-app 0.0.85__py3-none-any.whl → 0.0.87__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. ws_bom_robot_app/config.py +3 -1
  2. ws_bom_robot_app/llm/agent_description.py +123 -123
  3. ws_bom_robot_app/llm/agent_handler.py +174 -166
  4. ws_bom_robot_app/llm/agent_lcel.py +50 -50
  5. ws_bom_robot_app/llm/api.py +65 -3
  6. ws_bom_robot_app/llm/defaut_prompt.py +15 -15
  7. ws_bom_robot_app/llm/evaluator.py +319 -0
  8. ws_bom_robot_app/llm/feedbacks/feedback_manager.py +66 -66
  9. ws_bom_robot_app/llm/main.py +158 -158
  10. ws_bom_robot_app/llm/models/api.py +1 -1
  11. ws_bom_robot_app/llm/models/feedback.py +30 -30
  12. ws_bom_robot_app/llm/nebuly_handler.py +185 -185
  13. ws_bom_robot_app/llm/providers/llm_manager.py +27 -9
  14. ws_bom_robot_app/llm/tools/tool_builder.py +68 -65
  15. ws_bom_robot_app/llm/tools/tool_manager.py +332 -330
  16. ws_bom_robot_app/llm/tools/utils.py +41 -41
  17. ws_bom_robot_app/llm/utils/agent.py +34 -34
  18. ws_bom_robot_app/llm/utils/cms.py +114 -114
  19. ws_bom_robot_app/llm/utils/download.py +183 -183
  20. ws_bom_robot_app/llm/utils/print.py +29 -29
  21. ws_bom_robot_app/llm/vector_store/generator.py +137 -137
  22. ws_bom_robot_app/llm/vector_store/integration/shopify.py +143 -143
  23. ws_bom_robot_app/llm/vector_store/integration/thron.py +102 -102
  24. ws_bom_robot_app/llm/vector_store/loader/json_loader.py +25 -25
  25. ws_bom_robot_app/task_manager.py +14 -10
  26. {ws_bom_robot_app-0.0.85.dist-info → ws_bom_robot_app-0.0.87.dist-info}/METADATA +20 -20
  27. {ws_bom_robot_app-0.0.85.dist-info → ws_bom_robot_app-0.0.87.dist-info}/RECORD +29 -28
  28. {ws_bom_robot_app-0.0.85.dist-info → ws_bom_robot_app-0.0.87.dist-info}/WHEEL +0 -0
  29. {ws_bom_robot_app-0.0.85.dist-info → ws_bom_robot_app-0.0.87.dist-info}/top_level.txt +0 -0
@@ -1,66 +1,66 @@
1
- from ws_bom_robot_app.llm.models.feedback import NebulyFeedbackPayload, NebulyFeedbackAction, NebulyFeedbackMetadata
2
- from ws_bom_robot_app.config import config
3
- from pydantic import BaseModel, Field
4
- from typing import Optional
5
- import requests
6
-
7
- class FeedbackConfig(BaseModel):
8
- """
9
- FeedbackConfig is a model that represents the configuration for feedback management.
10
- It includes the API key and the URL for the feedback service.
11
- """
12
- api_key: str = Field(..., description="The API key for authentication")
13
- provider: str = Field(..., description="The provider of the feedback service")
14
- user_id: str = Field(..., description="The user ID for the feedback service")
15
- message_input: Optional[str] = Field(default=None, description="The input message to which the feedback refers")
16
- message_output: Optional[str] = Field(default=None, description="The output message to which the feedback refers")
17
- comment: str = Field(..., description="The comment provided by the user")
18
- rating: int = Field(..., description="The rating given by the user (from 1 to 5)", ge=1, le=5)
19
- anonymize: bool = Field(False, description="Boolean flag. If set to true, PII will be removed from the text field")
20
- timestamp: str = Field(..., description="The timestamp of the feedback event")
21
- message_id: Optional[str] = Field(default=None, description="The message ID for the feedback")
22
-
23
- class FeedbackInterface:
24
- def __init__(self, config: FeedbackConfig):
25
- self.config = config
26
-
27
- def send_feedback(self):
28
- raise NotImplementedError
29
-
30
- class NebulyFeedback(FeedbackInterface):
31
- def __init__(self, config: FeedbackConfig):
32
- super().__init__(config)
33
- self.config = config
34
-
35
- def send_feedback(self) -> str:
36
- if not self.config.api_key:
37
- return "Error sending feedback: API key is required for Nebuly feedback"
38
- headers = {
39
- "Authorization": f"Bearer {self.config.api_key}",
40
- "Content-Type": "application/json"
41
- }
42
- action = NebulyFeedbackAction(
43
- slug="rating",
44
- text=self.config.comment,
45
- value=self.config.rating
46
- )
47
- metadata = NebulyFeedbackMetadata(
48
- end_user=self.config.user_id,
49
- timestamp=self.config.timestamp,
50
- anonymize=self.config.anonymize
51
- )
52
- payload = NebulyFeedbackPayload(
53
- action=action,
54
- metadata=metadata
55
- )
56
- url = f"{config.NEBULY_API_URL}/event-ingestion/api/v1/events/feedback"
57
- response = requests.request("POST", url, json=payload.model_dump(), headers=headers)
58
- if response.status_code != 200:
59
- raise Exception(f"Error sending feedback: {response.status_code} - {response.text}")
60
- return response.text
61
-
62
- class FeedbackManager:
63
- #class variables (static)
64
- _list: dict[str,FeedbackInterface] = {
65
- "nebuly": NebulyFeedback,
66
- }
1
+ from ws_bom_robot_app.llm.models.feedback import NebulyFeedbackPayload, NebulyFeedbackAction, NebulyFeedbackMetadata
2
+ from ws_bom_robot_app.config import config
3
+ from pydantic import BaseModel, Field
4
+ from typing import Optional
5
+ import requests
6
+
7
+ class FeedbackConfig(BaseModel):
8
+ """
9
+ FeedbackConfig is a model that represents the configuration for feedback management.
10
+ It includes the API key and the URL for the feedback service.
11
+ """
12
+ api_key: str = Field(..., description="The API key for authentication")
13
+ provider: str = Field(..., description="The provider of the feedback service")
14
+ user_id: str = Field(..., description="The user ID for the feedback service")
15
+ message_input: Optional[str] = Field(default=None, description="The input message to which the feedback refers")
16
+ message_output: Optional[str] = Field(default=None, description="The output message to which the feedback refers")
17
+ comment: str = Field(..., description="The comment provided by the user")
18
+ rating: int = Field(..., description="The rating given by the user (from 1 to 5)", ge=1, le=5)
19
+ anonymize: bool = Field(False, description="Boolean flag. If set to true, PII will be removed from the text field")
20
+ timestamp: str = Field(..., description="The timestamp of the feedback event")
21
+ message_id: Optional[str] = Field(default=None, description="The message ID for the feedback")
22
+
23
+ class FeedbackInterface:
24
+ def __init__(self, config: FeedbackConfig):
25
+ self.config = config
26
+
27
+ def send_feedback(self):
28
+ raise NotImplementedError
29
+
30
+ class NebulyFeedback(FeedbackInterface):
31
+ def __init__(self, config: FeedbackConfig):
32
+ super().__init__(config)
33
+ self.config = config
34
+
35
+ def send_feedback(self) -> str:
36
+ if not self.config.api_key:
37
+ return "Error sending feedback: API key is required for Nebuly feedback"
38
+ headers = {
39
+ "Authorization": f"Bearer {self.config.api_key}",
40
+ "Content-Type": "application/json"
41
+ }
42
+ action = NebulyFeedbackAction(
43
+ slug="rating",
44
+ text=self.config.comment,
45
+ value=self.config.rating
46
+ )
47
+ metadata = NebulyFeedbackMetadata(
48
+ end_user=self.config.user_id,
49
+ timestamp=self.config.timestamp,
50
+ anonymize=self.config.anonymize
51
+ )
52
+ payload = NebulyFeedbackPayload(
53
+ action=action,
54
+ metadata=metadata
55
+ )
56
+ url = f"{config.NEBULY_API_URL}/event-ingestion/api/v1/events/feedback"
57
+ response = requests.request("POST", url, json=payload.model_dump(), headers=headers)
58
+ if response.status_code != 200:
59
+ raise Exception(f"Error sending feedback: {response.status_code} - {response.text}")
60
+ return response.text
61
+
62
+ class FeedbackManager:
63
+ #class variables (static)
64
+ _list: dict[str,FeedbackInterface] = {
65
+ "nebuly": NebulyFeedback,
66
+ }
@@ -1,158 +1,158 @@
1
- from asyncio import Queue
2
- import asyncio, json, logging, os, traceback, re
3
- from fastapi import Request
4
- from langchain.callbacks.tracers import LangChainTracer
5
- from langchain_core.callbacks.base import AsyncCallbackHandler
6
- from langchain_core.messages import BaseMessage, AIMessage, HumanMessage
7
- from langsmith import Client as LangSmithClient
8
- from typing import AsyncGenerator, List
9
- from ws_bom_robot_app.config import config
10
- from ws_bom_robot_app.llm.agent_description import AgentDescriptor
11
- from ws_bom_robot_app.llm.agent_handler import AgentHandler, RawAgentHandler
12
- from ws_bom_robot_app.llm.agent_lcel import AgentLcel
13
- from ws_bom_robot_app.llm.models.api import InvokeRequest, StreamRequest
14
- from ws_bom_robot_app.llm.providers.llm_manager import LlmInterface
15
- from ws_bom_robot_app.llm.tools.tool_builder import get_structured_tools
16
- from ws_bom_robot_app.llm.nebuly_handler import NebulyHandler
17
-
18
- async def invoke(rq: InvokeRequest) -> str:
19
- await rq.initialize()
20
- _msg: str = rq.messages[-1].content
21
- processor = AgentDescriptor(
22
- llm=rq.get_llm(),
23
- prompt=rq.system_message,
24
- mode = rq.mode,
25
- rules=rq.rules if rq.rules else None
26
- )
27
- result: AIMessage = await processor.run_agent(_msg)
28
- return {"result": result.content}
29
-
30
- def _parse_formatted_message(message: str) -> str:
31
- try:
32
- text_fragments = []
33
- quoted_strings = re.findall(r'"([^"\\]*(?:\\.[^"\\]*)*)"', message)
34
- for string in quoted_strings:
35
- if not string.startswith(('threadId', 'type')) and len(string) > 1:
36
- text_fragments.append(string)
37
- result = ''.join(text_fragments)
38
- result = result.replace('\\n', '\n')
39
- except:
40
- result = message
41
- return result
42
-
43
- async def __stream(rq: StreamRequest, ctx: Request, queue: Queue, formatted: bool = True) -> None:
44
- #os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
45
-
46
- # rq initialization
47
- await rq.initialize()
48
- for tool in rq.app_tools:
49
- tool.thread_id = rq.thread_id
50
-
51
- #llm
52
- __llm: LlmInterface = rq.get_llm()
53
-
54
- #chat history
55
- chat_history: list[BaseMessage] = []
56
- for message in rq.messages:
57
- if message.role in ["human","user"]:
58
- _content = message.content
59
- # multimodal content parsing
60
- if isinstance(_content, list):
61
- try:
62
- _content = await __llm.format_multimodal_content(_content)
63
- except Exception as e:
64
- logging.warning(f"Error parsing multimodal content {_content[:100]}: {e}")
65
- chat_history.append(HumanMessage(content=_content))
66
- elif message.role in ["ai","assistant"]:
67
- message_content = ""
68
- if formatted:
69
- if '{\"type\":\"string\"' in message.content:
70
- try:
71
- json_msg = json.loads('[' + message.content[:-1] + ']')
72
- for msg in json_msg:
73
- if msg.get("content"):
74
- message_content += msg["content"]
75
- except:
76
- message_content = _parse_formatted_message(message.content)
77
- elif '{\"type\":\"text\"' in message.content:
78
- try:
79
- json_msg = json.loads('[' + message.content[:-1] + ']')
80
- for msg in json_msg:
81
- if msg.get("text"):
82
- message_content += msg["text"]
83
- except:
84
- message_content = _parse_formatted_message(message.content)
85
- else:
86
- message_content = _parse_formatted_message(message.content)
87
- else:
88
- message_content = message.content
89
- if message_content:
90
- chat_history.append(AIMessage(content=message_content))
91
-
92
-
93
- #agent handler
94
- if formatted:
95
- agent_handler = AgentHandler(queue, rq.provider, rq.thread_id)
96
- else:
97
- agent_handler = RawAgentHandler(queue, rq.provider)
98
- #TODO: move from os.environ to rq
99
- os.environ["AGENT_HANDLER_FORMATTED"] = str(formatted)
100
-
101
- #callbacks
102
- ## agent
103
- callbacks: List[AsyncCallbackHandler] = [agent_handler]
104
- ## langchain tracing
105
- if rq.lang_chain_tracing:
106
- client = LangSmithClient(
107
- api_key= rq.secrets.get("langChainApiKey", "")
108
- )
109
- trace = LangChainTracer(project_name=rq.lang_chain_project,client=client,tags=[str(ctx.base_url) if ctx else ''])
110
- callbacks.append(trace)
111
- ## nebuly tracing
112
- if rq.secrets.get("nebulyApiKey","") != "":
113
- user_id = rq.system_context.user.id if rq.system_context and rq.system_context.user and rq.system_context.user.id else None
114
- nebuly_callback = NebulyHandler(
115
- llm_model=__llm.config.model,
116
- threadId=rq.thread_id,
117
- chat_history=chat_history,
118
- url=config.NEBULY_API_URL,
119
- api_key=rq.secrets.get("nebulyApiKey", None),
120
- user_id=user_id
121
- )
122
- callbacks.append(nebuly_callback)
123
-
124
- # chain
125
- processor = AgentLcel(
126
- llm=__llm,
127
- sys_message=rq.system_message,
128
- sys_context=rq.system_context,
129
- tools=get_structured_tools(__llm, tools=rq.app_tools, callbacks=[callbacks], queue=queue),
130
- rules=rq.rules
131
- )
132
- try:
133
- await processor.executor.ainvoke(
134
- {"chat_history": chat_history},
135
- {"callbacks": callbacks},
136
- )
137
- except Exception as e:
138
- _error = f"Agent invoke ex: {e}"
139
- logging.warning(_error)
140
- if config.runtime_options().debug:
141
- _error += f" | {traceback.format_exc()}"
142
- await queue.put(_error)
143
- await queue.put(None)
144
-
145
- # signal the end of streaming
146
- await queue.put(None)
147
-
148
- async def stream(rq: StreamRequest, ctx: Request, formatted: bool = True) -> AsyncGenerator[str, None]:
149
- queue = Queue()
150
- task = asyncio.create_task(__stream(rq, ctx, queue, formatted))
151
- try:
152
- while True:
153
- token = await queue.get()
154
- if token is None: # None indicates the end of streaming
155
- break
156
- yield token
157
- finally:
158
- await task
1
+ from asyncio import Queue
2
+ import asyncio, json, logging, os, traceback, re
3
+ from fastapi import Request
4
+ from langchain.callbacks.tracers import LangChainTracer
5
+ from langchain_core.callbacks.base import AsyncCallbackHandler
6
+ from langchain_core.messages import BaseMessage, AIMessage, HumanMessage
7
+ from langsmith import Client as LangSmithClient
8
+ from typing import AsyncGenerator, List
9
+ from ws_bom_robot_app.config import config
10
+ from ws_bom_robot_app.llm.agent_description import AgentDescriptor
11
+ from ws_bom_robot_app.llm.agent_handler import AgentHandler, RawAgentHandler
12
+ from ws_bom_robot_app.llm.agent_lcel import AgentLcel
13
+ from ws_bom_robot_app.llm.models.api import InvokeRequest, StreamRequest
14
+ from ws_bom_robot_app.llm.providers.llm_manager import LlmInterface
15
+ from ws_bom_robot_app.llm.tools.tool_builder import get_structured_tools
16
+ from ws_bom_robot_app.llm.nebuly_handler import NebulyHandler
17
+
18
+ async def invoke(rq: InvokeRequest) -> str:
19
+ await rq.initialize()
20
+ _msg: str = rq.messages[-1].content
21
+ processor = AgentDescriptor(
22
+ llm=rq.get_llm(),
23
+ prompt=rq.system_message,
24
+ mode = rq.mode,
25
+ rules=rq.rules if rq.rules else None
26
+ )
27
+ result: AIMessage = await processor.run_agent(_msg)
28
+ return {"result": result.content}
29
+
30
+ def _parse_formatted_message(message: str) -> str:
31
+ try:
32
+ text_fragments = []
33
+ quoted_strings = re.findall(r'"([^"\\]*(?:\\.[^"\\]*)*)"', message)
34
+ for string in quoted_strings:
35
+ if not string.startswith(('threadId', 'type')) and len(string) > 1:
36
+ text_fragments.append(string)
37
+ result = ''.join(text_fragments)
38
+ result = result.replace('\\n', '\n')
39
+ except:
40
+ result = message
41
+ return result
42
+
43
+ async def __stream(rq: StreamRequest, ctx: Request, queue: Queue, formatted: bool = True) -> None:
44
+ #os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
45
+
46
+ # rq initialization
47
+ await rq.initialize()
48
+ for tool in rq.app_tools:
49
+ tool.thread_id = rq.thread_id
50
+
51
+ #llm
52
+ __llm: LlmInterface = rq.get_llm()
53
+
54
+ #chat history
55
+ chat_history: list[BaseMessage] = []
56
+ for message in rq.messages:
57
+ if message.role in ["human","user"]:
58
+ _content = message.content
59
+ # multimodal content parsing
60
+ if isinstance(_content, list):
61
+ try:
62
+ _content = await __llm.format_multimodal_content(_content)
63
+ except Exception as e:
64
+ logging.warning(f"Error parsing multimodal content {_content[:100]}: {e}")
65
+ chat_history.append(HumanMessage(content=_content))
66
+ elif message.role in ["ai","assistant"]:
67
+ message_content = ""
68
+ if formatted:
69
+ if '{\"type\":\"string\"' in message.content:
70
+ try:
71
+ json_msg = json.loads('[' + message.content[:-1] + ']')
72
+ for msg in json_msg:
73
+ if msg.get("content"):
74
+ message_content += msg["content"]
75
+ except:
76
+ message_content = _parse_formatted_message(message.content)
77
+ elif '{\"type\":\"text\"' in message.content:
78
+ try:
79
+ json_msg = json.loads('[' + message.content[:-1] + ']')
80
+ for msg in json_msg:
81
+ if msg.get("text"):
82
+ message_content += msg["text"]
83
+ except:
84
+ message_content = _parse_formatted_message(message.content)
85
+ else:
86
+ message_content = _parse_formatted_message(message.content)
87
+ else:
88
+ message_content = message.content
89
+ if message_content:
90
+ chat_history.append(AIMessage(content=message_content))
91
+
92
+
93
+ #agent handler
94
+ if formatted:
95
+ agent_handler = AgentHandler(queue, rq.provider, rq.thread_id)
96
+ else:
97
+ agent_handler = RawAgentHandler(queue, rq.provider)
98
+ #TODO: move from os.environ to rq
99
+ os.environ["AGENT_HANDLER_FORMATTED"] = str(formatted)
100
+
101
+ #callbacks
102
+ ## agent
103
+ callbacks: List[AsyncCallbackHandler] = [agent_handler]
104
+ ## langchain tracing
105
+ if rq.lang_chain_tracing:
106
+ client = LangSmithClient(
107
+ api_key= rq.secrets.get("langChainApiKey", "")
108
+ )
109
+ trace = LangChainTracer(project_name=rq.lang_chain_project,client=client,tags=[str(ctx.base_url) if ctx else ''])
110
+ callbacks.append(trace)
111
+ ## nebuly tracing
112
+ if rq.secrets.get("nebulyApiKey","") != "":
113
+ user_id = rq.system_context.user.id if rq.system_context and rq.system_context.user and rq.system_context.user.id else None
114
+ nebuly_callback = NebulyHandler(
115
+ llm_model=__llm.config.model,
116
+ threadId=rq.thread_id,
117
+ chat_history=chat_history,
118
+ url=config.NEBULY_API_URL,
119
+ api_key=rq.secrets.get("nebulyApiKey", None),
120
+ user_id=user_id
121
+ )
122
+ callbacks.append(nebuly_callback)
123
+
124
+ # chain
125
+ processor = AgentLcel(
126
+ llm=__llm,
127
+ sys_message=rq.system_message,
128
+ sys_context=rq.system_context,
129
+ tools=get_structured_tools(__llm, tools=rq.app_tools, callbacks=[callbacks], queue=queue),
130
+ rules=rq.rules
131
+ )
132
+ try:
133
+ await processor.executor.ainvoke(
134
+ {"chat_history": chat_history},
135
+ {"callbacks": callbacks},
136
+ )
137
+ except Exception as e:
138
+ _error = f"Agent invoke ex: {e}"
139
+ logging.warning(_error)
140
+ if config.runtime_options().debug:
141
+ _error += f" | {traceback.format_exc()}"
142
+ await queue.put(_error)
143
+ await queue.put(None)
144
+
145
+ # signal the end of streaming
146
+ await queue.put(None)
147
+
148
+ async def stream(rq: StreamRequest, ctx: Request, formatted: bool = True) -> AsyncGenerator[str, None]:
149
+ queue = Queue()
150
+ task = asyncio.create_task(__stream(rq, ctx, queue, formatted))
151
+ try:
152
+ while True:
153
+ token = await queue.get()
154
+ if token is None: # None indicates the end of streaming
155
+ break
156
+ yield token
157
+ finally:
158
+ await task
@@ -163,7 +163,7 @@ class LlmApp(BaseModel):
163
163
  return list(set(
164
164
  os.path.basename(db) for db in [self.vector_db] +
165
165
  ([self.rules.vector_db] if self.rules and self.rules.vector_db else []) +
166
- [db for tool in (self.app_tools or []) for db in [tool.vector_db]]
166
+ [db for tool in (self.app_tools or []) for db in [tool.vector_db] if tool.is_active]
167
167
  if db is not None
168
168
  ))
169
169
  def __decompress_zip(self,zip_file_path, extract_to):
@@ -1,30 +1,30 @@
1
- from pydantic import BaseModel, Field
2
-
3
- class NebulyFeedbackAction(BaseModel):
4
- """
5
- FeedbackAction is a model that represents the action taken by the user
6
- in response to the feedback provided by the LLM.
7
- """
8
- slug: str = Field("rating", description="A string identifier for the feedback action",
9
- enum=["thumbs_up", "thumbs_down", "copy_input", "copy_output", "paste", "rating"])
10
- text: str = Field(..., description="The text content of the feedback")
11
- value: int = Field(..., description="A numeric value associated with the feedback")
12
-
13
- class NebulyFeedbackMetadata(BaseModel):
14
- """
15
- FeedbackMetadata is a model that represents the metadata associated with user feedback.
16
- This includes information about the interaction and the user who provided feedback.
17
- """
18
- input: str = Field(None, description="The input of the interactions to which the action refers to")
19
- output: str = Field(None, description="The output of the interactions to which the action refers to")
20
- end_user: str = Field(..., description="The identifier used for the end-user")
21
- timestamp: str = Field(..., description="The timestamp of the action event")
22
- anonymize: bool = Field(False, description="Boolean flag. If set to true, PII will be removed from the text field")
23
-
24
- class NebulyFeedbackPayload(BaseModel):
25
- """
26
- NebulyFeedback is a model that combines feedback action and metadata.
27
- It represents a complete feedback entry from a user interaction with the LLM.
28
- """
29
- action: NebulyFeedbackAction = Field(..., description="The action taken by the user as feedback")
30
- metadata: NebulyFeedbackMetadata = Field(..., description="Metadata associated with the feedback")
1
+ from pydantic import BaseModel, Field
2
+
3
+ class NebulyFeedbackAction(BaseModel):
4
+ """
5
+ FeedbackAction is a model that represents the action taken by the user
6
+ in response to the feedback provided by the LLM.
7
+ """
8
+ slug: str = Field("rating", description="A string identifier for the feedback action",
9
+ enum=["thumbs_up", "thumbs_down", "copy_input", "copy_output", "paste", "rating"])
10
+ text: str = Field(..., description="The text content of the feedback")
11
+ value: int = Field(..., description="A numeric value associated with the feedback")
12
+
13
+ class NebulyFeedbackMetadata(BaseModel):
14
+ """
15
+ FeedbackMetadata is a model that represents the metadata associated with user feedback.
16
+ This includes information about the interaction and the user who provided feedback.
17
+ """
18
+ input: str = Field(None, description="The input of the interactions to which the action refers to")
19
+ output: str = Field(None, description="The output of the interactions to which the action refers to")
20
+ end_user: str = Field(..., description="The identifier used for the end-user")
21
+ timestamp: str = Field(..., description="The timestamp of the action event")
22
+ anonymize: bool = Field(False, description="Boolean flag. If set to true, PII will be removed from the text field")
23
+
24
+ class NebulyFeedbackPayload(BaseModel):
25
+ """
26
+ NebulyFeedback is a model that combines feedback action and metadata.
27
+ It represents a complete feedback entry from a user interaction with the LLM.
28
+ """
29
+ action: NebulyFeedbackAction = Field(..., description="The action taken by the user as feedback")
30
+ metadata: NebulyFeedbackMetadata = Field(..., description="Metadata associated with the feedback")