versionhq 1.1.4.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,472 @@
1
+ import os
2
+ import uuid
3
+ from abc import ABC
4
+ from typing import Any, Dict, List, Optional, TypeVar, Union
5
+ from dotenv import load_dotenv
6
+ from pydantic import UUID4, BaseModel, Field, InstanceOf, PrivateAttr, model_validator
7
+
8
+ from versionhq._utils.cache_handler import CacheHandler
9
+ from versionhq._utils.logger import Logger
10
+ from versionhq._utils.rpm_controller import RPMController
11
+ from versionhq._utils.usage_metrics import UsageMetrics
12
+ from versionhq.agent.parser import AgentAction
13
+ from versionhq.llm.llm_vars import LLM_VARS
14
+ from versionhq.llm.model import LLM, DEFAULT_CONTEXT_WINDOW
15
+ from versionhq.task import TaskOutputFormat
16
+ from versionhq.task.model import ResponseField
17
+ from versionhq.tool.model import Tool, ToolCalled
18
+ from versionhq.tool.tool_handler import ToolHandler
19
+
20
+ load_dotenv(override=True)
21
+ T = TypeVar("T", bound="Agent")
22
+
23
+
24
+ # def _format_answer(agent, answer: str) -> Union[AgentAction, AgentFinish]:
25
+ # return AgentParser(agent=agent).parse(answer)
26
+
27
+ # def mock_agent_ops_provider():
28
+ # def track_agent(*args, **kwargs):
29
+ # def noop(f):
30
+ # return f
31
+ # return noop
32
+ # return track_agent
33
+
34
+ # track_agent = mock_agent_ops_provider()
35
+
36
+ # agentops = None
37
+ # if os.environ.get("AGENTOPS_API_KEY"):
38
+ # try:
39
+ # from agentops import track_agent
40
+ # except ImportError:
41
+ # track_agent = mock_agent_ops_provider()
42
+ # else:
43
+ # track_agent = mock_agent_ops_provider()
44
+
45
+
46
+ class TokenProcess:
47
+ total_tokens: int = 0
48
+ prompt_tokens: int = 0
49
+ cached_prompt_tokens: int = 0
50
+ completion_tokens: int = 0
51
+ successful_requests: int = 0
52
+
53
+ def sum_prompt_tokens(self, tokens: int):
54
+ self.prompt_tokens = self.prompt_tokens + tokens
55
+ self.total_tokens = self.total_tokens + tokens
56
+
57
+ def sum_completion_tokens(self, tokens: int):
58
+ self.completion_tokens = self.completion_tokens + tokens
59
+ self.total_tokens = self.total_tokens + tokens
60
+
61
+ def sum_cached_prompt_tokens(self, tokens: int):
62
+ self.cached_prompt_tokens = self.cached_prompt_tokens + tokens
63
+
64
+ def sum_successful_requests(self, requests: int):
65
+ self.successful_requests = self.successful_requests + requests
66
+
67
+ def get_summary(self) -> UsageMetrics:
68
+ return UsageMetrics(
69
+ total_tokens=self.total_tokens,
70
+ prompt_tokens=self.prompt_tokens,
71
+ cached_prompt_tokens=self.cached_prompt_tokens,
72
+ completion_tokens=self.completion_tokens,
73
+ successful_requests=self.successful_requests,
74
+ )
75
+
76
+
77
+ # @track_agent()
78
+ class Agent(ABC, BaseModel):
79
+ """
80
+ Base class for third-party agents that LLM runs.
81
+ The agent can execute tasks alone or team. When the agent belongs to team, it needs to prioritize the team.
82
+ * (Temp) Comment out all the optional fields except for Team and LLM settings for convenience.
83
+ """
84
+
85
+ __hash__ = object.__hash__
86
+ _logger: Logger = PrivateAttr(default_factory=lambda: Logger(verbose=False))
87
+ _rpm_controller: Optional[RPMController] = PrivateAttr(default=None)
88
+ _request_within_rpm_limit: Any = PrivateAttr(default=None)
89
+ _token_process: TokenProcess = PrivateAttr(default_factory=TokenProcess)
90
+ _times_executed: int = PrivateAttr(default=0)
91
+
92
+ id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
93
+ agent_ops_agent_name: str = None
94
+ agent_ops_agent_id: str = None
95
+ role: str = Field(description="role of the agent - used in summary and logs")
96
+ goal: str = Field(
97
+ description="concise goal of the agent (details are set in the Task instance)"
98
+ )
99
+ backstory: str = Field(description="context passed to the LLM")
100
+
101
+ # tools
102
+ tools: Optional[List[Any]] = Field(default_factory=list)
103
+ tool_handler: InstanceOf[ToolHandler] = Field(
104
+ default=None, description="handle tool cache and last used tool"
105
+ )
106
+
107
+ # team, rules of task executions
108
+ team: Optional[List[Any]] = Field(
109
+ default=None, description="Team to which the agent belongs"
110
+ )
111
+ allow_delegation: bool = Field(
112
+ default=False,
113
+ description="Enable agent to delegate and ask questions among each other",
114
+ )
115
+ allow_code_execution: Optional[bool] = Field(
116
+ default=False, description="Enable code execution for the agent."
117
+ )
118
+ max_retry_limit: int = Field(
119
+ default=2,
120
+ description="max. number of retries for the task execution when an error occurs. cascaed to the `invoke` function",
121
+ )
122
+ max_iter: Optional[int] = Field(
123
+ default=25,
124
+ description="max. number of iterations for an agent to execute a task",
125
+ )
126
+ step_callback: Optional[Any] = Field(
127
+ default=None,
128
+ description="Callback to be executed after each step of the agent execution",
129
+ )
130
+
131
+ # llm settings cascaded to the LLM model
132
+ llm: Union[str, InstanceOf[LLM], Any] = Field(default=None)
133
+ function_calling_llm: Union[str, InstanceOf[LLM], Any] = Field(default=None)
134
+ respect_context_window: bool = Field(
135
+ default=True,
136
+ description="Keep messages under the context window size by summarizing content",
137
+ )
138
+ max_tokens: Optional[int] = Field(
139
+ default=None, description="max. number of tokens for the agent's execution"
140
+ )
141
+ max_execution_time: Optional[int] = Field(
142
+ default=None, description="max. execution time for an agent to execute a task"
143
+ )
144
+ max_rpm: Optional[int] = Field(
145
+ default=None,
146
+ description="max. number of requests per minute for the agent execution",
147
+ )
148
+
149
+ # prompt rules
150
+ use_system_prompt: Optional[bool] = Field(
151
+ default=True, description="Use system prompt for the agent"
152
+ )
153
+ system_template: Optional[str] = Field(
154
+ default=None, description="System format for the agent."
155
+ )
156
+ prompt_template: Optional[str] = Field(
157
+ default=None, description="Prompt format for the agent."
158
+ )
159
+ response_template: Optional[str] = Field(
160
+ default=None, description="Response format for the agent."
161
+ )
162
+
163
+ # config, cache, error handling
164
+ config: Optional[Dict[str, Any]] = Field(
165
+ default=None, exclude=True, description="Configuration for the agent"
166
+ )
167
+ cache: bool = Field(
168
+ default=True, description="Whether the agent should use a cache for tool usage."
169
+ )
170
+ cache_handler: InstanceOf[CacheHandler] = Field(
171
+ default=None, description="An instance of the CacheHandler class."
172
+ )
173
+ formatting_errors: int = Field(
174
+ default=0, description="Number of formatting errors."
175
+ )
176
+ verbose: bool = Field(
177
+ default=True, description="Verbose mode for the Agent Execution"
178
+ )
179
+
180
+ def __repr__(self):
181
+ return f"Agent(role={self.role}, goal={self.goal}, backstory={self.backstory})"
182
+
183
+ @model_validator(mode="after")
184
+ def set_up_llm(self):
185
+ """
186
+ Set up the base model and function calling model (if any) using the LLM class.
187
+ Pass the model config params: `llm`, `max_tokens`, `max_execution_time`, `step_callback`,`respect_context_window` to the LLM class.
188
+ The base model is selected on the client app, else use the default model.
189
+ """
190
+
191
+ self.agent_ops_agent_name = self.role
192
+ unaccepted_attributes = [
193
+ "AWS_ACCESS_KEY_ID",
194
+ "AWS_SECRET_ACCESS_KEY",
195
+ "AWS_REGION_NAME",
196
+ ]
197
+ callbacks = (
198
+ [
199
+ self.step_callback,
200
+ ]
201
+ if self.step_callback is not None
202
+ else []
203
+ )
204
+
205
+ if isinstance(self.llm, LLM):
206
+ self.llm.timeout = self.max_execution_time
207
+ self.llm.max_token = self.max_tokens
208
+ self.llm.context_window_size = (
209
+ self.llm.get_context_window_size()
210
+ if self.respect_context_window == True
211
+ else DEFAULT_CONTEXT_WINDOW
212
+ )
213
+ self.llm.callbacks = callbacks
214
+
215
+ elif isinstance(self.llm, str):
216
+ self.llm = LLM(
217
+ model=self.llm,
218
+ timeout=self.max_execution_time,
219
+ max_tokens=self.max_tokens,
220
+ callbacks=callbacks,
221
+ )
222
+
223
+ context_window_size = (
224
+ self.llm.get_context_window_size()
225
+ if self.respect_context_window == True
226
+ else DEFAULT_CONTEXT_WINDOW
227
+ )
228
+ self.llm.context_window_size = context_window_size
229
+
230
+ elif self.llm is None:
231
+ model_name = os.environ.get(
232
+ "LITELLM_MODEL_NAME", os.environ.get("MODEL", "gpt-4o-mini")
233
+ )
234
+ llm_params = {
235
+ "model": model_name,
236
+ "timeout": self.max_execution_time,
237
+ "max_tokens": self.max_tokens,
238
+ "callbacks": callbacks,
239
+ }
240
+ api_base = os.environ.get(
241
+ "OPENAI_API_BASE", os.environ.get("OPENAI_BASE_URL", None)
242
+ )
243
+ if api_base:
244
+ llm_params["base_url"] = api_base
245
+
246
+ set_provider = model_name.split("/")[0] if "/" in model_name else "openai"
247
+ for provider, env_vars in LLM_VARS.items():
248
+ if provider == set_provider:
249
+ for env_var in env_vars:
250
+ key_name = env_var.get("key_name")
251
+
252
+ if key_name and key_name not in unaccepted_attributes:
253
+ env_value = os.environ.get(key_name)
254
+ if env_value:
255
+ key_name = (
256
+ "api_key" if "API_KEY" in key_name else key_name
257
+ )
258
+ key_name = (
259
+ "api_base" if "API_BASE" in key_name else key_name
260
+ )
261
+ key_name = (
262
+ "api_version"
263
+ if "API_VERSION" in key_name
264
+ else key_name
265
+ )
266
+ llm_params[key_name] = env_value
267
+ elif env_var.get("default", False):
268
+ for key, value in env_var.items():
269
+ if key not in ["prompt", "key_name", "default"]:
270
+ if key in os.environ:
271
+ llm_params[key] = value
272
+ self.llm = LLM(**llm_params)
273
+ context_window_size = (
274
+ self.llm.get_context_window_size()
275
+ if self.respect_context_window == True
276
+ else DEFAULT_CONTEXT_WINDOW
277
+ )
278
+ self.llm.context_window_size = context_window_size
279
+
280
+ else:
281
+ llm_params = {
282
+ "model": (
283
+ getattr(self.llm, "model_name")
284
+ or getattr(self.llm, "deployment_name")
285
+ or str(self.llm)
286
+ ),
287
+ "max_tokens": (
288
+ getattr(self.llm, "max_tokens") or self.max_tokens or 3000
289
+ ),
290
+ "timeout": getattr(self.llm, "timeout", self.max_execution_time),
291
+ "callbacks": getattr(self.llm, "callbacks") or callbacks,
292
+ "temperature": getattr(self.llm, "temperature", None),
293
+ "logprobs": getattr(self.llm, "logprobs", None),
294
+ "api_key": getattr(self.llm, "api_key", None),
295
+ "base_url": getattr(self.llm, "base_url", None),
296
+ "organization": getattr(self.llm, "organization", None),
297
+ }
298
+ llm_params = {
299
+ k: v for k, v in llm_params.items() if v is not None
300
+ } # factor out None values
301
+ self.llm = LLM(**llm_params)
302
+
303
+ """
304
+ Set up funcion_calling LLM as well. For the sake of convenience, use the same metrics as the base LLM settings.
305
+ """
306
+ if self.function_calling_llm:
307
+ if isinstance(self.function_calling_llm, LLM):
308
+ self.function_calling_llm.timeout = self.max_execution_time
309
+ self.function_calling_llm.max_tokens = self.max_tokens
310
+ self.function_calling_llm.callbacks = callbacks
311
+ context_window_size = (
312
+ self.function_calling_llm.get_context_window_size()
313
+ if self.respect_context_window == True
314
+ else DEFAULT_CONTEXT_WINDOW
315
+ )
316
+ self.function_calling_llm.context_window_size = context_window_size
317
+
318
+ elif isinstance(self.function_calling_llm, str):
319
+ self.function_calling_llm = LLM(
320
+ model=self.function_calling_llm,
321
+ timeout=self.max_execution_time,
322
+ max_tokens=self.max_tokens,
323
+ callbacks=callbacks,
324
+ )
325
+ context_window_size = (
326
+ self.function_calling_llm.get_context_window_size()
327
+ if self.respect_context_window == True
328
+ else DEFAULT_CONTEXT_WINDOW
329
+ )
330
+ self.function_calling_llm.context_window_size = context_window_size
331
+
332
+ else:
333
+ model_name = getattr(
334
+ self.function_calling_llm,
335
+ "model_name",
336
+ getattr(
337
+ self.function_calling_llm,
338
+ "deployment_name",
339
+ str(self.function_calling_llm),
340
+ ),
341
+ )
342
+ if model_name is not None or model_name != "":
343
+ self.function_calling_llm = LLM(
344
+ model=model_name,
345
+ timeout=self.max_execution_time,
346
+ max_tokens=self.max_tokens,
347
+ callbacks=callbacks,
348
+ )
349
+ return self
350
+
351
+ @model_validator(mode="after")
352
+ def set_up_tools(self):
353
+ """
354
+ Similar to the LLM set up, when the agent has tools, we will declare them using the Tool class.
355
+ """
356
+
357
+ if not self.tools:
358
+ pass
359
+
360
+ else:
361
+ tools_in_class_format = []
362
+ for tool in self.tools:
363
+ if isinstance(tool, Tool):
364
+ tools_in_class_format.append(tool)
365
+ elif isinstance(tool, str):
366
+ tool_to_add = Tool(name=tool)
367
+ tools_in_class_format.append(tool_to_add)
368
+ else:
369
+ pass
370
+ self.tools = tools_in_class_format
371
+
372
+ return self
373
+
374
+ def invoke(
375
+ self,
376
+ prompts: str,
377
+ output_formats: List[TaskOutputFormat],
378
+ response_fields: List[ResponseField],
379
+ **kwargs,
380
+ ) -> Dict[str, Any]:
381
+ """
382
+ Receive the system prompt in string and create formatted prompts using the system prompt and the agent's backstory.
383
+ Then call the base model.
384
+ When encountering errors, we try the task execution up to `self.max_retry_limit` times.
385
+ """
386
+
387
+ task_execution_counter = 0
388
+
389
+ messages = []
390
+ messages.append({"role": "user", "content": prompts}) #! REFINEME
391
+ messages.append({"role": "assistant", "content": self.backstory})
392
+ print("Messages sent to the model:", messages)
393
+
394
+ callbacks = kwargs.get("callbacks", None)
395
+
396
+ response = self.llm.call(
397
+ messages=messages,
398
+ output_formats=output_formats,
399
+ field_list=response_fields,
400
+ callbacks=callbacks,
401
+ )
402
+ task_execution_counter += 1
403
+ print("Agent's #1 res: ", response)
404
+
405
+ if (
406
+ response is None or response == ""
407
+ ) and task_execution_counter < self.max_retry_limit:
408
+ while task_execution_counter <= self.max_retry_limit:
409
+ response = self.llm.call(
410
+ messages=messages,
411
+ output_formats=output_formats,
412
+ field_list=response_fields,
413
+ callbacks=callbacks,
414
+ )
415
+ task_execution_counter += 1
416
+ print(f"Agent's #{task_execution_counter} res: ", response)
417
+
418
+ elif response is None or response == "":
419
+ print("Received None or empty response from LLM call.")
420
+ raise ValueError("Invalid response from LLM call - None or empty.")
421
+
422
+ return {"output": response.output if hasattr(response, "output") else response}
423
+
424
+ def execute_task(self, task, context: Optional[str] = None) -> str:
425
+ """
426
+ Execute the task and return the output in string.
427
+ To simplify, the tools are cascaded from the `tools_called` under the `task` Task instance if any.
428
+ When the tools are given, the agent must use them.
429
+ The agent must consider the context to excute the task as well when it is given.
430
+ """
431
+
432
+ task_prompt = task.prompt()
433
+ # if context:
434
+ # task_prompt = self.i18n.slice("task_with_context").format(task=task_prompt, context=context)
435
+
436
+ tool_results = []
437
+ if task.tools_called:
438
+ for tool_called in task.tools_called:
439
+ tool_result = tool_called.tool.run()
440
+ tool_results.append(tool_result)
441
+
442
+ if task.take_tool_res_as_final:
443
+ return tool_results
444
+
445
+ # if self.team and self.team._train:
446
+ # task_prompt = self._training_handler(task_prompt=task_prompt)
447
+ # else:
448
+ # task_prompt = self._use_trained_data(task_prompt=task_prompt)
449
+
450
+ try:
451
+ result = self.invoke(
452
+ prompts=task_prompt,
453
+ output_formats=task.expected_output_formats,
454
+ response_fields=task.output_field_list,
455
+ )["output"]
456
+
457
+ except Exception as e:
458
+ self._times_executed += 1
459
+ if self._times_executed > self.max_retry_limit:
460
+ raise e
461
+ result = self.execute_task(
462
+ task, context, [tool_called.tool for tool_called in task.tools_called]
463
+ )
464
+
465
+ if self.max_rpm and self._rpm_controller:
466
+ self._rpm_controller.stop_rpm_counter()
467
+
468
+ # for tool_result in self.tools_results:
469
+ # if tool_result.get("result_as_answer", False):
470
+ # result = tool_result["result"]
471
+
472
+ return result
@@ -0,0 +1,148 @@
1
+ import re
2
+ from typing import Any, Union
3
+ from json_repair import repair_json
4
+
5
+ from versionhq._utils.i18n import I18N
6
+
7
+
8
+ FINAL_ANSWER_ACTION = "Final Answer:"
9
+ MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE = "I did it wrong. Invalid Format: I missed the 'Action:' after 'Thought:'. I will do right next, and don't use a tool I have already used.\n"
10
+ MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE = "I did it wrong. Invalid Format: I missed the 'Action Input:' after 'Action:'. I will do right next, and don't use a tool I have already used.\n"
11
+ FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE = "I did it wrong. Tried to both perform Action and give a Final Answer at the same time, I must do one or the other"
12
+
13
+
14
+ class AgentAction:
15
+ thought: str
16
+ tool: str
17
+ tool_input: str
18
+ text: str
19
+ result: str
20
+
21
+ def __init__(self, thought: str, tool: str, tool_input: str, text: str):
22
+ self.thought = thought
23
+ self.tool = tool
24
+ self.tool_input = tool_input
25
+ self.text = text
26
+
27
+
28
+ class AgentFinish:
29
+ thought: str
30
+ output: str
31
+ text: str
32
+
33
+ def __init__(self, thought: str, output: str, text: str):
34
+ self.thought = thought
35
+ self.output = output
36
+ self.text = text
37
+
38
+
39
+ class OutputParserException(Exception):
40
+ error: str
41
+
42
+ def __init__(self, error: str):
43
+ self.error = error
44
+
45
+
46
+ class AgentParser:
47
+ """
48
+ Parses ReAct-style LLM calls that have a single tool input.
49
+
50
+ Expects output to be in one of two formats.
51
+
52
+ If the output signals that an action should be taken,
53
+ should be in the below format. This will result in an AgentAction
54
+ being returned.
55
+
56
+ Thought: agent thought here
57
+ Action: search
58
+ Action Input: what is the temperature in SF?
59
+
60
+ If the output signals that a final answer should be given,
61
+ should be in the below format. This will result in an AgentFinish
62
+ being returned.
63
+
64
+ Thought: agent thought here
65
+ Final Answer: The temperature is 100 degrees
66
+ """
67
+
68
+ agent: Any = None
69
+
70
+ def __init__(self, agent: Any):
71
+ self.agent = agent
72
+
73
+ def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
74
+ thought = self._extract_thought(text)
75
+ includes_answer = FINAL_ANSWER_ACTION in text
76
+ regex = (
77
+ r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
78
+ )
79
+ action_match = re.search(regex, text, re.DOTALL)
80
+ if action_match:
81
+ if includes_answer:
82
+ raise OutputParserException(
83
+ f"{FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE}"
84
+ )
85
+ action = action_match.group(1)
86
+ clean_action = self._clean_action(action)
87
+
88
+ action_input = action_match.group(2).strip()
89
+
90
+ tool_input = action_input.strip(" ").strip('"')
91
+ safe_tool_input = self._safe_repair_json(tool_input)
92
+
93
+ return AgentAction(thought, clean_action, safe_tool_input, text)
94
+
95
+ elif includes_answer:
96
+ final_answer = text.split(FINAL_ANSWER_ACTION)[-1].strip()
97
+ return AgentFinish(thought, final_answer, text)
98
+
99
+ if not re.search(r"Action\s*\d*\s*:[\s]*(.*?)", text, re.DOTALL):
100
+ # self.agent.increment_formatting_errors()
101
+ raise OutputParserException(
102
+ f"{MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE}",
103
+ )
104
+ elif not re.search(
105
+ r"[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)", text, re.DOTALL
106
+ ):
107
+ # self.agent.increment_formatting_errors()
108
+ raise OutputParserException(MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE)
109
+ else:
110
+ _i18n = I18N()
111
+ format = self._i18n.slice("format_without_tools")
112
+ error = f"{format}"
113
+ # self.agent.increment_formatting_errors()
114
+ raise OutputParserException(error)
115
+
116
+ def _extract_thought(self, text: str) -> str:
117
+ regex = r"(.*?)(?:\n\nAction|\n\nFinal Answer)"
118
+ thought_match = re.search(regex, text, re.DOTALL)
119
+ if thought_match:
120
+ return thought_match.group(1).strip()
121
+ return ""
122
+
123
+ def _clean_action(self, text: str) -> str:
124
+ """Clean action string by removing non-essential formatting characters."""
125
+ return re.sub(r"^\s*\*+\s*|\s*\*+\s*$", "", text).strip()
126
+
127
+ def _safe_repair_json(self, tool_input: str) -> str:
128
+ UNABLE_TO_REPAIR_JSON_RESULTS = ['""', "{}"]
129
+
130
+ # Skip repair if the input starts and ends with square brackets
131
+ # Explanation: The JSON parser has issues handling inputs that are enclosed in square brackets ('[]').
132
+ # These are typically valid JSON arrays or strings that do not require repair. Attempting to repair such inputs
133
+ # might lead to unintended alterations, such as wrapping the entire input in additional layers or modifying
134
+ # the structure in a way that changes its meaning. By skipping the repair for inputs that start and end with
135
+ # square brackets, we preserve the integrity of these valid JSON structures and avoid unnecessary modifications.
136
+ if tool_input.startswith("[") and tool_input.endswith("]"):
137
+ return tool_input
138
+
139
+ # Before repair, handle common LLM issues:
140
+ # 1. Replace """ with " to avoid JSON parser errors
141
+
142
+ tool_input = tool_input.replace('"""', '"')
143
+
144
+ result = repair_json(tool_input)
145
+ if result in UNABLE_TO_REPAIR_JSON_RESULTS:
146
+ return tool_input
147
+
148
+ return str(result)
File without changes
File without changes
File without changes
@@ -0,0 +1,57 @@
1
+ import uuid
2
+ from abc import ABC
3
+ from typing import Any, Dict, List, Callable, Type, Optional, get_args, get_origin
4
+ from pydantic import (
5
+ UUID4,
6
+ InstanceOf,
7
+ BaseModel,
8
+ ConfigDict,
9
+ Field,
10
+ create_model,
11
+ field_validator,
12
+ model_validator,
13
+ )
14
+ from pydantic_core import PydanticCustomError
15
+
16
+ from versionhq.clients.product.model import Product, ProductProvider
17
+
18
+
19
+ class Customer(ABC, BaseModel):
20
+ """
21
+ Store the minimal information on the customer.
22
+ """
23
+
24
+ id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
25
+ name: Optional[str] = Field(default=None, description="customer's name if any")
26
+ product_list: Optional[List[Product]] = Field(
27
+ default=list, description="store products that the customer is associated with"
28
+ )
29
+ analysis: str = Field(
30
+ default=None, description="store the latest analysis results on the customer"
31
+ )
32
+ on_workflow: bool = Field(
33
+ default=False, description="`True` if they are on some messaging workflows"
34
+ )
35
+ on: Optional[str] = Field(
36
+ default=None, description="destination service for this customer if any"
37
+ )
38
+
39
+ @field_validator("id", mode="before")
40
+ @classmethod
41
+ def _deny_user_set_id(cls, v: Optional[UUID4]) -> None:
42
+ if v:
43
+ raise PydanticCustomError(
44
+ "may_not_set_field", "This field is not to be set by the user.", {}
45
+ )
46
+
47
+ def customer_to(self) -> List[ProductProvider]:
48
+ """
49
+ Return list of ProductProvider if the customer has `product_list`
50
+ """
51
+
52
+ res = list
53
+ if self.product_list:
54
+ for item in self.product_list:
55
+ if item.provider not in res:
56
+ res.appned(item.provider)
57
+ return res
File without changes