ws-bom-robot-app 0.0.104__py3-none-any.whl → 0.0.105__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ws_bom_robot_app/llm/agent_description.py +123 -123
- ws_bom_robot_app/llm/agent_handler.py +176 -176
- ws_bom_robot_app/llm/agent_lcel.py +107 -107
- ws_bom_robot_app/llm/defaut_prompt.py +15 -15
- ws_bom_robot_app/llm/feedbacks/feedback_manager.py +66 -66
- ws_bom_robot_app/llm/main.py +159 -159
- ws_bom_robot_app/llm/models/feedback.py +30 -30
- ws_bom_robot_app/llm/nebuly_handler.py +185 -185
- ws_bom_robot_app/llm/tools/tool_builder.py +68 -68
- ws_bom_robot_app/llm/tools/tool_manager.py +343 -343
- ws_bom_robot_app/llm/tools/utils.py +41 -41
- ws_bom_robot_app/llm/utils/agent.py +34 -34
- ws_bom_robot_app/llm/utils/cms.py +123 -123
- ws_bom_robot_app/llm/utils/download.py +183 -183
- ws_bom_robot_app/llm/utils/print.py +29 -29
- ws_bom_robot_app/llm/vector_store/generator.py +137 -137
- ws_bom_robot_app/llm/vector_store/integration/shopify.py +147 -143
- ws_bom_robot_app/llm/vector_store/integration/thron.py +236 -236
- ws_bom_robot_app/llm/vector_store/loader/json_loader.py +25 -25
- {ws_bom_robot_app-0.0.104.dist-info → ws_bom_robot_app-0.0.105.dist-info}/METADATA +364 -364
- {ws_bom_robot_app-0.0.104.dist-info → ws_bom_robot_app-0.0.105.dist-info}/RECORD +23 -23
- {ws_bom_robot_app-0.0.104.dist-info → ws_bom_robot_app-0.0.105.dist-info}/WHEEL +0 -0
- {ws_bom_robot_app-0.0.104.dist-info → ws_bom_robot_app-0.0.105.dist-info}/top_level.txt +0 -0
ws_bom_robot_app/llm/main.py
CHANGED
|
@@ -1,159 +1,159 @@
|
|
|
1
|
-
from asyncio import Queue
|
|
2
|
-
import asyncio, json, logging, os, traceback, re
|
|
3
|
-
from fastapi import Request
|
|
4
|
-
from langchain.callbacks.tracers import LangChainTracer
|
|
5
|
-
from langchain_core.callbacks.base import AsyncCallbackHandler
|
|
6
|
-
from langchain_core.messages import BaseMessage, AIMessage, HumanMessage
|
|
7
|
-
from langsmith import Client as LangSmithClient
|
|
8
|
-
from typing import AsyncGenerator, List
|
|
9
|
-
from ws_bom_robot_app.config import config
|
|
10
|
-
from ws_bom_robot_app.llm.agent_description import AgentDescriptor
|
|
11
|
-
from ws_bom_robot_app.llm.agent_handler import AgentHandler, RawAgentHandler
|
|
12
|
-
from ws_bom_robot_app.llm.agent_lcel import AgentLcel
|
|
13
|
-
from ws_bom_robot_app.llm.models.api import InvokeRequest, StreamRequest
|
|
14
|
-
from ws_bom_robot_app.llm.providers.llm_manager import LlmInterface
|
|
15
|
-
from ws_bom_robot_app.llm.tools.tool_builder import get_structured_tools
|
|
16
|
-
from ws_bom_robot_app.llm.nebuly_handler import NebulyHandler
|
|
17
|
-
|
|
18
|
-
async def invoke(rq: InvokeRequest) -> str:
|
|
19
|
-
await rq.initialize()
|
|
20
|
-
_msg: str = rq.messages[-1].content
|
|
21
|
-
processor = AgentDescriptor(
|
|
22
|
-
llm=rq.get_llm(),
|
|
23
|
-
prompt=rq.system_message,
|
|
24
|
-
mode = rq.mode,
|
|
25
|
-
rules=rq.rules if rq.rules else None
|
|
26
|
-
)
|
|
27
|
-
result: AIMessage = await processor.run_agent(_msg)
|
|
28
|
-
return {"result": result.content}
|
|
29
|
-
|
|
30
|
-
def _parse_formatted_message(message: str) -> str:
|
|
31
|
-
try:
|
|
32
|
-
text_fragments = []
|
|
33
|
-
quoted_strings = re.findall(r'"([^"\\]*(?:\\.[^"\\]*)*)"', message)
|
|
34
|
-
for string in quoted_strings:
|
|
35
|
-
if not string.startswith(('threadId', 'type')) and len(string) > 1:
|
|
36
|
-
text_fragments.append(string)
|
|
37
|
-
result = ''.join(text_fragments)
|
|
38
|
-
result = result.replace('\\n', '\n')
|
|
39
|
-
except:
|
|
40
|
-
result = message
|
|
41
|
-
return result
|
|
42
|
-
|
|
43
|
-
async def __stream(rq: StreamRequest, ctx: Request, queue: Queue, formatted: bool = True) -> None:
|
|
44
|
-
#os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
|
|
45
|
-
|
|
46
|
-
# rq initialization
|
|
47
|
-
await rq.initialize()
|
|
48
|
-
for tool in rq.app_tools:
|
|
49
|
-
tool.thread_id = rq.thread_id
|
|
50
|
-
|
|
51
|
-
#llm
|
|
52
|
-
__llm: LlmInterface = rq.get_llm()
|
|
53
|
-
|
|
54
|
-
#chat history
|
|
55
|
-
chat_history: list[BaseMessage] = []
|
|
56
|
-
for message in rq.messages:
|
|
57
|
-
if message.role in ["human","user"]:
|
|
58
|
-
_content = message.content
|
|
59
|
-
# multimodal content parsing
|
|
60
|
-
if isinstance(_content, list):
|
|
61
|
-
try:
|
|
62
|
-
_content = await __llm.format_multimodal_content(_content)
|
|
63
|
-
except Exception as e:
|
|
64
|
-
logging.warning(f"Error parsing multimodal content {_content[:100]}: {e}")
|
|
65
|
-
chat_history.append(HumanMessage(content=_content))
|
|
66
|
-
elif message.role in ["ai","assistant"]:
|
|
67
|
-
message_content = ""
|
|
68
|
-
if formatted:
|
|
69
|
-
if '{\"type\":\"string\"' in message.content:
|
|
70
|
-
try:
|
|
71
|
-
json_msg = json.loads('[' + message.content[:-1] + ']')
|
|
72
|
-
for msg in json_msg:
|
|
73
|
-
if msg.get("content"):
|
|
74
|
-
message_content += msg["content"]
|
|
75
|
-
except:
|
|
76
|
-
message_content = _parse_formatted_message(message.content)
|
|
77
|
-
elif '{\"type\":\"text\"' in message.content:
|
|
78
|
-
try:
|
|
79
|
-
json_msg = json.loads('[' + message.content[:-1] + ']')
|
|
80
|
-
for msg in json_msg:
|
|
81
|
-
if msg.get("text"):
|
|
82
|
-
message_content += msg["text"]
|
|
83
|
-
except:
|
|
84
|
-
message_content = _parse_formatted_message(message.content)
|
|
85
|
-
else:
|
|
86
|
-
message_content = _parse_formatted_message(message.content)
|
|
87
|
-
else:
|
|
88
|
-
message_content = message.content
|
|
89
|
-
if message_content:
|
|
90
|
-
chat_history.append(AIMessage(content=message_content))
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
#agent handler
|
|
94
|
-
if formatted:
|
|
95
|
-
agent_handler = AgentHandler(queue, rq.provider, rq.thread_id)
|
|
96
|
-
else:
|
|
97
|
-
agent_handler = RawAgentHandler(queue, rq.provider)
|
|
98
|
-
#TODO: move from os.environ to rq
|
|
99
|
-
os.environ["AGENT_HANDLER_FORMATTED"] = str(formatted)
|
|
100
|
-
|
|
101
|
-
#callbacks
|
|
102
|
-
## agent
|
|
103
|
-
callbacks: List[AsyncCallbackHandler] = [agent_handler]
|
|
104
|
-
## langchain tracing
|
|
105
|
-
if rq.lang_chain_tracing:
|
|
106
|
-
client = LangSmithClient(
|
|
107
|
-
api_key= rq.secrets.get("langChainApiKey", "")
|
|
108
|
-
)
|
|
109
|
-
trace = LangChainTracer(project_name=rq.lang_chain_project,client=client,tags=[str(ctx.base_url) if ctx else ''])
|
|
110
|
-
callbacks.append(trace)
|
|
111
|
-
## nebuly tracing
|
|
112
|
-
if rq.secrets.get("nebulyApiKey","") != "":
|
|
113
|
-
user_id = rq.system_context.user.id if rq.system_context and rq.system_context.user and rq.system_context.user.id else None
|
|
114
|
-
nebuly_callback = NebulyHandler(
|
|
115
|
-
llm_model=__llm.config.model,
|
|
116
|
-
threadId=rq.thread_id,
|
|
117
|
-
chat_history=chat_history,
|
|
118
|
-
url=config.NEBULY_API_URL,
|
|
119
|
-
api_key=rq.secrets.get("nebulyApiKey", None),
|
|
120
|
-
user_id=user_id
|
|
121
|
-
)
|
|
122
|
-
callbacks.append(nebuly_callback)
|
|
123
|
-
|
|
124
|
-
# chain
|
|
125
|
-
processor = AgentLcel(
|
|
126
|
-
llm=__llm,
|
|
127
|
-
sys_message=rq.system_message,
|
|
128
|
-
sys_context=rq.system_context,
|
|
129
|
-
tools=get_structured_tools(__llm, tools=rq.app_tools, callbacks=[callbacks], queue=queue),
|
|
130
|
-
rules=rq.rules,
|
|
131
|
-
json_schema=rq.output_structure.get("outputFormat") if rq.output_structure and rq.output_structure.get("outputType") == "json" else None
|
|
132
|
-
)
|
|
133
|
-
try:
|
|
134
|
-
await processor.executor.ainvoke(
|
|
135
|
-
{"chat_history": chat_history},
|
|
136
|
-
{"callbacks": callbacks},
|
|
137
|
-
)
|
|
138
|
-
except Exception as e:
|
|
139
|
-
_error = f"Agent invoke ex: {e}"
|
|
140
|
-
logging.warning(_error)
|
|
141
|
-
if config.runtime_options().debug:
|
|
142
|
-
_error += f" | {traceback.format_exc()}"
|
|
143
|
-
await queue.put(_error)
|
|
144
|
-
await queue.put(None)
|
|
145
|
-
|
|
146
|
-
# signal the end of streaming
|
|
147
|
-
await queue.put(None)
|
|
148
|
-
|
|
149
|
-
async def stream(rq: StreamRequest, ctx: Request, formatted: bool = True) -> AsyncGenerator[str, None]:
|
|
150
|
-
queue = Queue()
|
|
151
|
-
task = asyncio.create_task(__stream(rq, ctx, queue, formatted))
|
|
152
|
-
try:
|
|
153
|
-
while True:
|
|
154
|
-
token = await queue.get()
|
|
155
|
-
if token is None: # None indicates the end of streaming
|
|
156
|
-
break
|
|
157
|
-
yield token
|
|
158
|
-
finally:
|
|
159
|
-
await task
|
|
1
|
+
from asyncio import Queue
|
|
2
|
+
import asyncio, json, logging, os, traceback, re
|
|
3
|
+
from fastapi import Request
|
|
4
|
+
from langchain.callbacks.tracers import LangChainTracer
|
|
5
|
+
from langchain_core.callbacks.base import AsyncCallbackHandler
|
|
6
|
+
from langchain_core.messages import BaseMessage, AIMessage, HumanMessage
|
|
7
|
+
from langsmith import Client as LangSmithClient
|
|
8
|
+
from typing import AsyncGenerator, List
|
|
9
|
+
from ws_bom_robot_app.config import config
|
|
10
|
+
from ws_bom_robot_app.llm.agent_description import AgentDescriptor
|
|
11
|
+
from ws_bom_robot_app.llm.agent_handler import AgentHandler, RawAgentHandler
|
|
12
|
+
from ws_bom_robot_app.llm.agent_lcel import AgentLcel
|
|
13
|
+
from ws_bom_robot_app.llm.models.api import InvokeRequest, StreamRequest
|
|
14
|
+
from ws_bom_robot_app.llm.providers.llm_manager import LlmInterface
|
|
15
|
+
from ws_bom_robot_app.llm.tools.tool_builder import get_structured_tools
|
|
16
|
+
from ws_bom_robot_app.llm.nebuly_handler import NebulyHandler
|
|
17
|
+
|
|
18
|
+
async def invoke(rq: InvokeRequest) -> str:
|
|
19
|
+
await rq.initialize()
|
|
20
|
+
_msg: str = rq.messages[-1].content
|
|
21
|
+
processor = AgentDescriptor(
|
|
22
|
+
llm=rq.get_llm(),
|
|
23
|
+
prompt=rq.system_message,
|
|
24
|
+
mode = rq.mode,
|
|
25
|
+
rules=rq.rules if rq.rules else None
|
|
26
|
+
)
|
|
27
|
+
result: AIMessage = await processor.run_agent(_msg)
|
|
28
|
+
return {"result": result.content}
|
|
29
|
+
|
|
30
|
+
def _parse_formatted_message(message: str) -> str:
|
|
31
|
+
try:
|
|
32
|
+
text_fragments = []
|
|
33
|
+
quoted_strings = re.findall(r'"([^"\\]*(?:\\.[^"\\]*)*)"', message)
|
|
34
|
+
for string in quoted_strings:
|
|
35
|
+
if not string.startswith(('threadId', 'type')) and len(string) > 1:
|
|
36
|
+
text_fragments.append(string)
|
|
37
|
+
result = ''.join(text_fragments)
|
|
38
|
+
result = result.replace('\\n', '\n')
|
|
39
|
+
except:
|
|
40
|
+
result = message
|
|
41
|
+
return result
|
|
42
|
+
|
|
43
|
+
async def __stream(rq: StreamRequest, ctx: Request, queue: Queue, formatted: bool = True) -> None:
|
|
44
|
+
#os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
|
|
45
|
+
|
|
46
|
+
# rq initialization
|
|
47
|
+
await rq.initialize()
|
|
48
|
+
for tool in rq.app_tools:
|
|
49
|
+
tool.thread_id = rq.thread_id
|
|
50
|
+
|
|
51
|
+
#llm
|
|
52
|
+
__llm: LlmInterface = rq.get_llm()
|
|
53
|
+
|
|
54
|
+
#chat history
|
|
55
|
+
chat_history: list[BaseMessage] = []
|
|
56
|
+
for message in rq.messages:
|
|
57
|
+
if message.role in ["human","user"]:
|
|
58
|
+
_content = message.content
|
|
59
|
+
# multimodal content parsing
|
|
60
|
+
if isinstance(_content, list):
|
|
61
|
+
try:
|
|
62
|
+
_content = await __llm.format_multimodal_content(_content)
|
|
63
|
+
except Exception as e:
|
|
64
|
+
logging.warning(f"Error parsing multimodal content {_content[:100]}: {e}")
|
|
65
|
+
chat_history.append(HumanMessage(content=_content))
|
|
66
|
+
elif message.role in ["ai","assistant"]:
|
|
67
|
+
message_content = ""
|
|
68
|
+
if formatted:
|
|
69
|
+
if '{\"type\":\"string\"' in message.content:
|
|
70
|
+
try:
|
|
71
|
+
json_msg = json.loads('[' + message.content[:-1] + ']')
|
|
72
|
+
for msg in json_msg:
|
|
73
|
+
if msg.get("content"):
|
|
74
|
+
message_content += msg["content"]
|
|
75
|
+
except:
|
|
76
|
+
message_content = _parse_formatted_message(message.content)
|
|
77
|
+
elif '{\"type\":\"text\"' in message.content:
|
|
78
|
+
try:
|
|
79
|
+
json_msg = json.loads('[' + message.content[:-1] + ']')
|
|
80
|
+
for msg in json_msg:
|
|
81
|
+
if msg.get("text"):
|
|
82
|
+
message_content += msg["text"]
|
|
83
|
+
except:
|
|
84
|
+
message_content = _parse_formatted_message(message.content)
|
|
85
|
+
else:
|
|
86
|
+
message_content = _parse_formatted_message(message.content)
|
|
87
|
+
else:
|
|
88
|
+
message_content = message.content
|
|
89
|
+
if message_content:
|
|
90
|
+
chat_history.append(AIMessage(content=message_content))
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
#agent handler
|
|
94
|
+
if formatted:
|
|
95
|
+
agent_handler = AgentHandler(queue, rq.provider, rq.thread_id)
|
|
96
|
+
else:
|
|
97
|
+
agent_handler = RawAgentHandler(queue, rq.provider)
|
|
98
|
+
#TODO: move from os.environ to rq
|
|
99
|
+
os.environ["AGENT_HANDLER_FORMATTED"] = str(formatted)
|
|
100
|
+
|
|
101
|
+
#callbacks
|
|
102
|
+
## agent
|
|
103
|
+
callbacks: List[AsyncCallbackHandler] = [agent_handler]
|
|
104
|
+
## langchain tracing
|
|
105
|
+
if rq.lang_chain_tracing:
|
|
106
|
+
client = LangSmithClient(
|
|
107
|
+
api_key= rq.secrets.get("langChainApiKey", "")
|
|
108
|
+
)
|
|
109
|
+
trace = LangChainTracer(project_name=rq.lang_chain_project,client=client,tags=[str(ctx.base_url) if ctx else ''])
|
|
110
|
+
callbacks.append(trace)
|
|
111
|
+
## nebuly tracing
|
|
112
|
+
if rq.secrets.get("nebulyApiKey","") != "":
|
|
113
|
+
user_id = rq.system_context.user.id if rq.system_context and rq.system_context.user and rq.system_context.user.id else None
|
|
114
|
+
nebuly_callback = NebulyHandler(
|
|
115
|
+
llm_model=__llm.config.model,
|
|
116
|
+
threadId=rq.thread_id,
|
|
117
|
+
chat_history=chat_history,
|
|
118
|
+
url=config.NEBULY_API_URL,
|
|
119
|
+
api_key=rq.secrets.get("nebulyApiKey", None),
|
|
120
|
+
user_id=user_id
|
|
121
|
+
)
|
|
122
|
+
callbacks.append(nebuly_callback)
|
|
123
|
+
|
|
124
|
+
# chain
|
|
125
|
+
processor = AgentLcel(
|
|
126
|
+
llm=__llm,
|
|
127
|
+
sys_message=rq.system_message,
|
|
128
|
+
sys_context=rq.system_context,
|
|
129
|
+
tools=get_structured_tools(__llm, tools=rq.app_tools, callbacks=[callbacks], queue=queue),
|
|
130
|
+
rules=rq.rules,
|
|
131
|
+
json_schema=rq.output_structure.get("outputFormat") if rq.output_structure and rq.output_structure.get("outputType") == "json" else None
|
|
132
|
+
)
|
|
133
|
+
try:
|
|
134
|
+
await processor.executor.ainvoke(
|
|
135
|
+
{"chat_history": chat_history},
|
|
136
|
+
{"callbacks": callbacks},
|
|
137
|
+
)
|
|
138
|
+
except Exception as e:
|
|
139
|
+
_error = f"Agent invoke ex: {e}"
|
|
140
|
+
logging.warning(_error)
|
|
141
|
+
if config.runtime_options().debug:
|
|
142
|
+
_error += f" | {traceback.format_exc()}"
|
|
143
|
+
await queue.put(_error)
|
|
144
|
+
await queue.put(None)
|
|
145
|
+
|
|
146
|
+
# signal the end of streaming
|
|
147
|
+
await queue.put(None)
|
|
148
|
+
|
|
149
|
+
async def stream(rq: StreamRequest, ctx: Request, formatted: bool = True) -> AsyncGenerator[str, None]:
|
|
150
|
+
queue = Queue()
|
|
151
|
+
task = asyncio.create_task(__stream(rq, ctx, queue, formatted))
|
|
152
|
+
try:
|
|
153
|
+
while True:
|
|
154
|
+
token = await queue.get()
|
|
155
|
+
if token is None: # None indicates the end of streaming
|
|
156
|
+
break
|
|
157
|
+
yield token
|
|
158
|
+
finally:
|
|
159
|
+
await task
|
|
@@ -1,30 +1,30 @@
|
|
|
1
|
-
from pydantic import BaseModel, Field
|
|
2
|
-
|
|
3
|
-
class NebulyFeedbackAction(BaseModel):
|
|
4
|
-
"""
|
|
5
|
-
FeedbackAction is a model that represents the action taken by the user
|
|
6
|
-
in response to the feedback provided by the LLM.
|
|
7
|
-
"""
|
|
8
|
-
slug: str = Field("rating", description="A string identifier for the feedback action",
|
|
9
|
-
enum=["thumbs_up", "thumbs_down", "copy_input", "copy_output", "paste", "rating"])
|
|
10
|
-
text: str = Field(..., description="The text content of the feedback")
|
|
11
|
-
value: int = Field(..., description="A numeric value associated with the feedback")
|
|
12
|
-
|
|
13
|
-
class NebulyFeedbackMetadata(BaseModel):
|
|
14
|
-
"""
|
|
15
|
-
FeedbackMetadata is a model that represents the metadata associated with user feedback.
|
|
16
|
-
This includes information about the interaction and the user who provided feedback.
|
|
17
|
-
"""
|
|
18
|
-
input: str = Field(None, description="The input of the interactions to which the action refers to")
|
|
19
|
-
output: str = Field(None, description="The output of the interactions to which the action refers to")
|
|
20
|
-
end_user: str = Field(..., description="The identifier used for the end-user")
|
|
21
|
-
timestamp: str = Field(..., description="The timestamp of the action event")
|
|
22
|
-
anonymize: bool = Field(False, description="Boolean flag. If set to true, PII will be removed from the text field")
|
|
23
|
-
|
|
24
|
-
class NebulyFeedbackPayload(BaseModel):
|
|
25
|
-
"""
|
|
26
|
-
NebulyFeedback is a model that combines feedback action and metadata.
|
|
27
|
-
It represents a complete feedback entry from a user interaction with the LLM.
|
|
28
|
-
"""
|
|
29
|
-
action: NebulyFeedbackAction = Field(..., description="The action taken by the user as feedback")
|
|
30
|
-
metadata: NebulyFeedbackMetadata = Field(..., description="Metadata associated with the feedback")
|
|
1
|
+
from pydantic import BaseModel, Field
|
|
2
|
+
|
|
3
|
+
class NebulyFeedbackAction(BaseModel):
|
|
4
|
+
"""
|
|
5
|
+
FeedbackAction is a model that represents the action taken by the user
|
|
6
|
+
in response to the feedback provided by the LLM.
|
|
7
|
+
"""
|
|
8
|
+
slug: str = Field("rating", description="A string identifier for the feedback action",
|
|
9
|
+
enum=["thumbs_up", "thumbs_down", "copy_input", "copy_output", "paste", "rating"])
|
|
10
|
+
text: str = Field(..., description="The text content of the feedback")
|
|
11
|
+
value: int = Field(..., description="A numeric value associated with the feedback")
|
|
12
|
+
|
|
13
|
+
class NebulyFeedbackMetadata(BaseModel):
|
|
14
|
+
"""
|
|
15
|
+
FeedbackMetadata is a model that represents the metadata associated with user feedback.
|
|
16
|
+
This includes information about the interaction and the user who provided feedback.
|
|
17
|
+
"""
|
|
18
|
+
input: str = Field(None, description="The input of the interactions to which the action refers to")
|
|
19
|
+
output: str = Field(None, description="The output of the interactions to which the action refers to")
|
|
20
|
+
end_user: str = Field(..., description="The identifier used for the end-user")
|
|
21
|
+
timestamp: str = Field(..., description="The timestamp of the action event")
|
|
22
|
+
anonymize: bool = Field(False, description="Boolean flag. If set to true, PII will be removed from the text field")
|
|
23
|
+
|
|
24
|
+
class NebulyFeedbackPayload(BaseModel):
|
|
25
|
+
"""
|
|
26
|
+
NebulyFeedback is a model that combines feedback action and metadata.
|
|
27
|
+
It represents a complete feedback entry from a user interaction with the LLM.
|
|
28
|
+
"""
|
|
29
|
+
action: NebulyFeedbackAction = Field(..., description="The action taken by the user as feedback")
|
|
30
|
+
metadata: NebulyFeedbackMetadata = Field(..., description="Metadata associated with the feedback")
|