versionhq 1.2.4.1__py3-none-any.whl → 1.2.4.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
versionhq/__init__.py CHANGED
@@ -17,7 +17,7 @@ from versionhq.clients.workflow.model import MessagingWorkflow, MessagingCompone
17
17
  from versionhq.knowledge.model import Knowledge, KnowledgeStorage
18
18
  from versionhq.knowledge.source import PDFKnowledgeSource, CSVKnowledgeSource, JSONKnowledgeSource, TextFileKnowledgeSource, ExcelKnowledgeSource, StringKnowledgeSource
19
19
  from versionhq.knowledge.source_docling import DoclingSource
20
- from versionhq.task_graph.model import TaskStatus, TaskGraph, Node, Edge, DependencyType, Condition, ConditionType
20
+ from versionhq.task_graph.model import TaskStatus, TaskGraph, Node, Edge, DependencyType, Condition, ConditionType, ReformTriggerEvent
21
21
  from versionhq.task.model import Task, TaskOutput, ResponseField, TaskExecutionType
22
22
  from versionhq.task.evaluation import Evaluation, EvaluationItem
23
23
  from versionhq.tool.model import Tool, ToolSet
@@ -32,7 +32,7 @@ from versionhq.agent_network.formation import form_agent_network
32
32
  from versionhq.task_graph.draft import workflow
33
33
 
34
34
 
35
- __version__ = "1.2.4.1"
35
+ __version__ = "1.2.4.3"
36
36
  __all__ = [
37
37
  "Agent",
38
38
 
@@ -70,6 +70,7 @@ __all__ = [
70
70
  "DependencyType",
71
71
  "Condition",
72
72
  "ConditionType",
73
+ "ReformTriggerEvent",
73
74
 
74
75
  "Task",
75
76
  "TaskOutput",
@@ -0,0 +1,103 @@
1
+ from typing import List, Optional, Dict
2
+ from typing_extensions import Self
3
+
4
+ from pydantic import InstanceOf, Field
5
+
6
+ from versionhq.agent.model import Agent
7
+ from versionhq.task.model import Task
8
+ from versionhq.task_graph.model import TaskGraph, Node, DependencyType, ReformTriggerEvent
9
+ from versionhq._prompt.model import Prompt
10
+ from versionhq._prompt.constants import REFLECT, INTEGRATE, parameter_sets
11
+
12
+
13
+ class PromptFeedbackGraph(TaskGraph):
14
+ """A Pydantic class to handle auto prompt feedback cycle."""
15
+
16
+ _times_iteration: int = 0
17
+ user_prompts: Optional[Dict[str, str]] = Field(default_factory=dict) # { "0": "...", "1": "..."}
18
+ dev_prompts: Optional[Dict[str, str]] = Field(default_factory=dict)
19
+ prompts: Optional[Dict[str, InstanceOf[Prompt]]] = Field(default_factory=dict)
20
+
21
+
22
+ def __init__(self, prompt: InstanceOf[Prompt] = None, *args, **kwargs):
23
+ super().__init__(*args, **kwargs)
24
+
25
+ if prompt:
26
+ user_prompt, dev_prompt, _ = prompt.format_core()
27
+ self.prompts = { self.key: prompt }
28
+ self.user_prompts = { self.key: user_prompt }
29
+ self.dev_prompts = { self.key: dev_prompt }
30
+
31
+
32
+ def _fetch_latest_prompt(self) -> InstanceOf[Prompt] | None:
33
+ return self.prompts[self.key] if self.key in self.prompts else None
34
+
35
+
36
+ def _generate_agents(self) -> List[Agent] | None:
37
+ agents = []
38
+ prompt = self._fetch_latest_prompt()
39
+
40
+ if not prompt:
41
+ return None
42
+
43
+ agent = prompt.agent
44
+ agent_params = agent.model_dump(exclude={"id", "llm", "llm_config", "self_learning"})
45
+ for params in parameter_sets:
46
+ agent = Agent(**agent_params, llm=agent.llm.model, llm_config={**params}, self_learning=True)
47
+ agents.append(agent)
48
+ return agents
49
+
50
+
51
+ def _reflect(self, original_response: str) -> Task:
52
+ description = REFLECT.format(original_prompt=self.original_prompt, original_response=original_response)
53
+ return Task(description=description)
54
+
55
+
56
+ def set_up_graph(self, **attributes) -> Self:
57
+ """Sets up a TaskGraph object with nodes and edges."""
58
+
59
+ prompt = self._fetch_latest_prompt()
60
+ base_task = prompt.task if prompt else None
61
+ base_agent = prompt.agent if prompt else None
62
+
63
+ if not base_task or not base_agent:
64
+ return None
65
+
66
+ agents = self._generate_agents()
67
+ if not agents:
68
+ return None
69
+
70
+ self.concl_template = base_task.pydantic_output if base_task.pydantic_output else base_task.response_fields if base_task.response_fields else None
71
+ base_agent.callbacks.append(self._reflect)
72
+ init_node = Node(task=base_task, assigned_to=base_agent)
73
+ self.add_node(init_node)
74
+
75
+ final_task = Task(description=INTEGRATE.format(original_prompt=self.original_prompt, responses=""))
76
+ final_node = Node(task=final_task, agent=base_agent)
77
+ self.add_node(node=final_node)
78
+
79
+ for agent in agents:
80
+ node = Node(task=base_task, assigned_to=agent)
81
+ self.add_node(node=node)
82
+ self.add_dependency(source=init_node.identifier, target=node.identifier, dependency_type=DependencyType.FINISH_TO_START, required=True)
83
+ self.add_dependency(source=node.identifier, target=final_node.identifier, dependency_type=DependencyType.FINISH_TO_START, required=True)
84
+
85
+ if attributes:
86
+ for k, v in attributes.items():
87
+ if hasattr(self, k):
88
+ setattr(self, k, v)
89
+
90
+ return self
91
+
92
+ @property
93
+ def index(self) -> str:
94
+ """Returns an index to add new item."""
95
+ return str(len([k for k in self.user_prompts.keys()]))
96
+
97
+ @property
98
+ def original_prompt(self) -> str:
99
+ return str(self.user_prompts["0"]) + str(self.dev_prompts["0"])
100
+
101
+ @property
102
+ def key(self):
103
+ return str(self._times_iteration)
@@ -0,0 +1,30 @@
1
+ REFLECT = "Here is the orignal prompt: {original_prompt}\nHere is the original response: {original_response}\nAnalyze the original prompt and repsonse, check for any pontential issue, and create an improved response."
2
+
3
+ INTEGRATE = "Here is the original prompt: {original_prompt}\nHere are responses: {responses}. Help integrate them as a single response."
4
+
5
+ parameter_sets = [
6
+ {
7
+ "temperature": 0.2,
8
+ "top_p": 0.5,
9
+ "max_tokens": 5000,
10
+ "frequency_penalty": 0.5,
11
+ "presence_penalty": 0.5,
12
+ "stop": ["\n\n", "###"],
13
+ },
14
+ {
15
+ "temperature": 0.7,
16
+ "top_p": 0.8,
17
+ "max_tokens": 8000,
18
+ "frequency_penalty": 0.3,
19
+ "presence_penalty": 0.3,
20
+ "stop": ["\n\n"],
21
+ },
22
+ {
23
+ "temperature": 1.0,
24
+ "top_p": 0.95,
25
+ "max_tokens": 12000,
26
+ "frequency_penalty": 0.0,
27
+ "presence_penalty": 0.0,
28
+ "stop": [],
29
+ }
30
+ ]
@@ -1,34 +1,50 @@
1
- from typing import Any, Dict, List
1
+
2
+ from typing import Dict, List, Tuple, Any
2
3
  from textwrap import dedent
3
4
 
5
+ from pydantic import InstanceOf
6
+
7
+ from versionhq._utils import is_valid_url
8
+
4
9
 
5
10
  class Prompt:
6
11
  """A class to format, store, and manage a prompt."""
7
12
 
8
13
  task: Any = None
9
14
  agent: Any = None
10
- context: str = None
15
+ context: Any = None
11
16
 
12
17
 
13
- def __init__(self, task, agent):
14
- from versionhq.task.model import Task
18
+ def __init__(self, task, agent, context):
15
19
  from versionhq.agent.model import Agent
20
+ from versionhq.task.model import Task
16
21
 
17
22
  self.task = task if isinstance(task, Task) else Task(description=str(task))
18
23
  self.agent = agent if isinstance(agent, Agent) else Agent(role=str(agent))
24
+ self.context = context
19
25
 
20
26
 
21
27
  def _draft_output_prompt(self) -> str:
28
+ """Drafts prompt for output either from `pydantic_output` or `response_fields`"""
29
+
30
+ from versionhq.llm.model import DEFAULT_MODEL_PROVIDER_NAME
31
+
22
32
  output_prompt = ""
33
+ model_provider = self.agent.llm.provider if self.agent else DEFAULT_MODEL_PROVIDER_NAME
23
34
 
24
35
  if self.task.pydantic_output:
25
- output_prompt = f"""Your response MUST STRICTLY follow the given repsonse format:
26
- JSON schema: {str(self.task.pydantic_output)}
27
- """
36
+ output_prompt, output_formats_to_follow = "", dict()
37
+ response_format = str(self.task._structure_response_format(model_provider=model_provider))
38
+ for k, v in self.task.pydantic_output.model_fields.items():
39
+ output_formats_to_follow[k] = f"<Return your answer in {v.annotation}>"
28
40
 
41
+ output_prompt = f"""Your response MUST be a valid JSON string that strictly follows the response format. Use double quotes for all keys and string values. Do not use single quotes, trailing commas, or any other non-standard JSON syntax.
42
+ Response format: {response_format}
43
+ Ref. Output image: {output_formats_to_follow}
44
+ """
29
45
  elif self.task.response_fields:
30
46
  output_prompt, output_formats_to_follow = "", dict()
31
- response_format = str(self.task._structure_response_format(model_provider=self.agent.llm.provider))
47
+ response_format = str(self.task._structure_response_format(model_provider=model_provider))
32
48
  for item in self.task.response_fields:
33
49
  if item:
34
50
  output_formats_to_follow[item.title] = f"<Return your answer in {item.data_type.__name__}>"
@@ -38,43 +54,42 @@ Response format: {response_format}
38
54
  Ref. Output image: {output_formats_to_follow}
39
55
  """
40
56
  else:
41
- output_prompt = "You MUST Return your response as a valid JSON serializable string, enclosed in double quotes. Do not use single quotes, trailing commas, or other non-standard JSON syntax."
57
+ output_prompt = "You MUST return your response as a valid JSON serializable string, enclosed in double quotes. Use double quotes for all keys and string values. Do NOT use single quotes, trailing commas, or other non-standard JSON syntax."
42
58
 
43
59
  return dedent(output_prompt)
44
60
 
45
61
 
46
- def _draft_context_prompt(self) -> str:
62
+ def _draft_context_prompt(self, context: Any = None) -> str:
47
63
  """
48
64
  Create a context prompt from the given context in any format: a task object, task output object, list, dict.
49
65
  """
50
66
  from versionhq.task.model import Task, TaskOutput
51
67
 
52
68
  context_to_add = None
53
- if not self.context:
54
- # Logger().log(level="error", color="red", message="Missing a context to add to the prompt. We'll return ''.")
55
- return
69
+ if not context:
70
+ return context_to_add
56
71
 
57
- match self.context:
72
+ match context:
58
73
  case str():
59
- context_to_add = self.context
74
+ context_to_add = context
60
75
 
61
76
  case Task():
62
- if not self.context.output:
63
- res = self.context.execute()
77
+ if not context.output:
78
+ res = context.execute()
64
79
  context_to_add = res._to_context_prompt()
65
80
 
66
81
  else:
67
- context_to_add = self.context.output.raw
82
+ context_to_add = context.output.raw
68
83
 
69
84
  case TaskOutput():
70
- context_to_add = self.context._to_context_prompt()
85
+ context_to_add = context._to_context_prompt()
71
86
 
72
87
 
73
88
  case dict():
74
- context_to_add = str(self.context)
89
+ context_to_add = str(context)
75
90
 
76
91
  case list():
77
- res = ", ".join([self._draft_context_prompt(context=item) for item in self.context])
92
+ res = ", ".join([self._draft_context_prompt(context=item) for item in context])
78
93
  context_to_add = res
79
94
 
80
95
  case _:
@@ -83,65 +98,121 @@ Ref. Output image: {output_formats_to_follow}
83
98
  return dedent(context_to_add)
84
99
 
85
100
 
86
- def _draft_user_prompt(self) -> str:
101
+ def _format_content_prompt(self) -> Dict[str, str]:
102
+ """Formats content (file, image, audio) prompt message."""
103
+
104
+ import base64
105
+ from pathlib import Path
106
+
107
+ content_messages = {}
108
+
109
+ if self.task.image:
110
+ with open(self.task.image, "rb") as file:
111
+ content = file.read()
112
+ if content:
113
+ encoded_file = base64.b64encode(content).decode("utf-8")
114
+ img_url = f"data:image/jpeg;base64,{encoded_file}"
115
+ content_messages.update({ "type": "image_url", "image_url": { "url": img_url }})
116
+
117
+ if self.task.file:
118
+ if is_valid_url(self.task.file):
119
+ content_messages.update({ "type": "image_url", "image_url": self.file })
120
+
121
+ if self.task.audio and self.agent.llm.provider == "gemini":
122
+ audio_bytes = Path(self.task.audio).read_bytes()
123
+ encoded_data = base64.b64encode(audio_bytes).decode("utf-8")
124
+ content_messages.update({ "type": "image_url", "image_url": "data:audio/mp3;base64,{}".format(encoded_data)})
125
+
126
+ return content_messages
127
+
128
+
129
+ def _find_rag_tools(self) -> List[InstanceOf[Any]]:
130
+ """Find RAG tools from the agent and task object."""
131
+
132
+ from versionhq.tool.rag_tool import RagTool
133
+
134
+ tools = []
135
+ if self.task.tools:
136
+ [tools.append(item) for item in self.task.tools if isinstance(item, RagTool)]
137
+
138
+ if self.agent.tools and self.task.can_use_agent_tools:
139
+ [tools.append(item) for item in self.agent.tools if isinstance(item, RagTool)]
140
+
141
+ return tools
142
+
143
+
144
+ def draft_user_prompt(self) -> str:
145
+ """Draft task prompts from its description and context."""
146
+
87
147
  output_prompt = self._draft_output_prompt()
88
148
  task_slices = [self.task.description, output_prompt, ]
89
149
 
90
150
  if self.context:
91
- context_prompt = self._draft_context_prompt()
151
+ context_prompt = self._draft_context_prompt(context=self.context)
92
152
  task_slices.insert(len(task_slices), f"Consider the following context when responding: {context_prompt}")
93
153
 
94
154
  return "\n".join(task_slices)
95
155
 
96
156
 
97
- def _format_content_prompt(self) -> Dict[str, str]:
98
- """Formats and returns image_url content added to the messages."""
157
+ def format_core(self, rag_tools: List[Any] = None) -> Tuple[str, str, List[Dict[str, str]]]:
158
+ """Formats prompt messages sent to the LLM, then returns task prompt, developer prompt, and messages."""
99
159
 
100
- from versionhq._utils import is_valid_url
160
+ from versionhq.knowledge._utils import extract_knowledge_context
161
+ from versionhq.memory.contextual_memory import ContextualMemory
101
162
 
102
- content_messages = {}
163
+ user_prompt = self.draft_user_prompt()
164
+ rag_tools = rag_tools if rag_tools else self._find_rag_tools()
103
165
 
104
- if self.task.image:
105
- if is_valid_url(self.task.image):
106
- content_messages.update({ "type": "image_url", "image_url": self.task.image })
107
- else:
108
- content_messages.update({ "type": "image_url", "image_url": { "url": self.task.image }})
166
+ if self.agent._knowledge:
167
+ agent_knowledge = self.agent._knowledge.query(query=[user_prompt,], limit=5)
168
+ if agent_knowledge:
169
+ agent_knowledge_context = extract_knowledge_context(knowledge_snippets=agent_knowledge)
170
+ if agent_knowledge_context:
171
+ user_prompt += agent_knowledge_context
109
172
 
110
- if self.task.file:
111
- if is_valid_url(self.task.file):
112
- content_messages.update({ "type": "image_url", "image_url": self.task.file })
113
- else:
114
- content_messages.update({ "type": "image_url", "image_url": { "url": self.task.file }})
173
+ if rag_tools:
174
+ for item in rag_tools:
175
+ rag_tool_context = item.run(agent=self.agent, query=self.task.description)
176
+ if rag_tool_context:
177
+ user_prompt += ",".join(rag_tool_context) if isinstance(rag_tool_context, list) else str(rag_tool_context)
115
178
 
116
- if self.task.audio:
117
- from pathlib import Path
118
- import base64
179
+ if self.agent.with_memory == True:
180
+ contextual_memory = ContextualMemory(
181
+ memory_config=self.agent.memory_config, stm=self.agent.short_term_memory, ltm=self.agent.long_term_memory, um=self.agent.user_memory
182
+ )
183
+ context_str = self._draft_context_prompt(context=self.context)
184
+ query = f"{self.task.description} {context_str}".strip()
185
+ memory = contextual_memory.build_context_for_task(query=query)
186
+ if memory.strip() != "":
187
+ user_prompt += memory.strip()
119
188
 
120
- audio_bytes = Path(self.audio_file_path).read_bytes()
121
- encoded_data = base64.b64encode(audio_bytes).decode("utf-8")
122
- content_messages.update({ "type": "image_url", "image_url": "data:audio/mp3;base64,{}".format(encoded_data)})
123
189
 
124
- return content_messages
190
+ ## comment out - training
191
+ # if self.agent.networks and self.agent.networks._train:
192
+ # user_prompt = self.agent._training_handler(user_prompt=user_prompt)
193
+ # else:
194
+ # user_prompt = self.agent._use_trained_data(user_prompt=user_prompt)
195
+
125
196
 
126
- @property
127
- def messages(self) -> List[Dict[str, str]]:
128
- user_prompt = self._draft_user_prompt()
129
197
  content_prompt = self._format_content_prompt()
130
198
 
131
199
  messages = []
132
- messages.append(
133
- {
134
- "role": "user",
135
- "content": [
136
- {
137
- "type": "text",
138
- "text": user_prompt
139
- },
140
- content_prompt,
141
- ]
142
- })
143
-
144
- if self.use_developer_prompt:
145
- messages.append({ "role": "developer", "content": self.backstory })
146
-
147
- return messages
200
+ if content_prompt:
201
+ messages.append(
202
+ {
203
+ "role": "user",
204
+ "content": [
205
+ {
206
+ "type": "text",
207
+ "text": user_prompt
208
+ },
209
+ content_prompt,
210
+ ]
211
+ })
212
+ else:
213
+ messages.append({ "role": "user", "content": user_prompt })
214
+
215
+ if self.agent.use_developer_prompt:
216
+ messages.append({ "role": "developer", "content": self.agent.backstory })
217
+
218
+ return user_prompt, self.agent.backstory if self.agent.use_developer_prompt else None, messages
@@ -2,3 +2,4 @@ from versionhq._utils.logger import Logger
2
2
  from versionhq._utils.process_config import process_config
3
3
  from versionhq._utils.vars import KNOWLEDGE_DIRECTORY, MAX_FILE_NAME_LENGTH
4
4
  from versionhq._utils.is_valid_url import is_valid_url
5
+ from versionhq._utils.usage_metrics import UsageMetrics, ErrorType
@@ -1,55 +1,72 @@
1
- from pydantic import BaseModel, Field
1
+ import uuid
2
+ import enum
3
+ import datetime
4
+ from typing import Dict, List
5
+ from typing_extensions import Self
6
+
7
+ from pydantic import BaseModel, UUID4, InstanceOf
8
+
9
+
10
+ class ErrorType(enum.Enum):
11
+ FORMAT = 1
12
+ TOOL = 2
13
+ API = 3
14
+ OVERFITTING = 4
15
+ HUMAN_INTERACTION = 5
2
16
 
3
17
 
4
18
  class UsageMetrics(BaseModel):
5
- """
6
- Model to track usage
7
- """
8
-
9
- total_tokens: int = Field(default=0, description="total number of tokens used")
10
- prompt_tokens: int = Field(default=0, description="number of tokens used in prompts")
11
- cached_prompt_tokens: int = Field(default=0, description="number of cached prompt tokens used")
12
- completion_tokens: int = Field(default=0, description="number of tokens used in completions")
13
- successful_requests: int = Field(default=0, description="number of successful requests made")
14
-
15
- def add_usage_metrics(self, usage_metrics: "UsageMetrics") -> None:
16
- """
17
- Add the usage metrics from another UsageMetrics object.
18
- """
19
- self.total_tokens += usage_metrics.total_tokens
20
- self.prompt_tokens += usage_metrics.prompt_tokens
21
- self.cached_prompt_tokens += usage_metrics.cached_prompt_tokens
22
- self.completion_tokens += usage_metrics.completion_tokens
23
- self.successful_requests += usage_metrics.successful_requests
24
-
25
-
26
-
27
- # class TokenProcess:
28
- # total_tokens: int = 0
29
- # prompt_tokens: int = 0
30
- # cached_prompt_tokens: int = 0
31
- # completion_tokens: int = 0
32
- # successful_requests: int = 0
33
-
34
- # def sum_prompt_tokens(self, tokens: int) -> None:
35
- # self.prompt_tokens = self.prompt_tokens + tokens
36
- # self.total_tokens = self.total_tokens + tokens
37
-
38
- # def sum_completion_tokens(self, tokens: int) -> None:
39
- # self.completion_tokens = self.completion_tokens + tokens
40
- # self.total_tokens = self.total_tokens + tokens
41
-
42
- # def sum_cached_prompt_tokens(self, tokens: int) -> None:
43
- # self.cached_prompt_tokens = self.cached_prompt_tokens + tokens
44
-
45
- # def sum_successful_requests(self, requests: int) -> None:
46
- # self.successful_requests = self.successful_requests + requests
47
-
48
- # def get_summary(self) -> UsageMetrics:
49
- # return UsageMetrics(
50
- # total_tokens=self.total_tokens,
51
- # prompt_tokens=self.prompt_tokens,
52
- # cached_prompt_tokens=self.cached_prompt_tokens,
53
- # completion_tokens=self.completion_tokens,
54
- # successful_requests=self.successful_requests,
55
- # )
19
+ """A Pydantic model to manage token usage, errors, job latency."""
20
+
21
+ id: UUID4 = uuid.uuid4() # stores task id or task graph id
22
+ total_tokens: int = 0
23
+ prompt_tokens: int = 0
24
+ completion_tokens: int = 0
25
+ successful_requests: int = 0
26
+ total_errors: int = 0
27
+ error_breakdown: Dict[ErrorType, int] = dict()
28
+ latency: float = 0.0 # in ms
29
+
30
+ def record_token_usage(self, token_usages: List[Dict[str, int]]) -> None:
31
+ """Records usage metrics from the raw response of the model."""
32
+
33
+ if token_usages:
34
+ for item in token_usages:
35
+ self.total_tokens += int(item["total_tokens"]) if "total_tokens" in item else 0
36
+ self.completion_tokens += int(item["completion_tokens"]) if "completion_tokens" in item else 0
37
+ self.prompt_tokens += int(item["prompt_tokens"]) if "prompt_tokens" in item else 0
38
+
39
+
40
+ def record_errors(self, type: ErrorType = None) -> None:
41
+ self.total_errors += 1
42
+ if type:
43
+ if type in self.error_breakdown:
44
+ self.error_breakdown[type] += 1
45
+ else:
46
+ self.error_breakdown[type] = 1
47
+
48
+
49
+ def record_latency(self, start_dt: datetime.datetime, end_dt: datetime.datetime) -> None:
50
+ self.latency += round((end_dt - start_dt).total_seconds() * 1000, 3)
51
+
52
+
53
+ def aggregate(self, metrics: InstanceOf["UsageMetrics"]) -> Self:
54
+ if not metrics:
55
+ return self
56
+
57
+ self.total_tokens += metrics.total_tokens if metrics.total_tokens else 0
58
+ self.prompt_tokens += metrics.prompt_tokens if metrics.prompt_tokens else 0
59
+ self.completion_tokens += metrics.completion_tokens if metrics.completion_tokens else 0
60
+ self.successful_requests += metrics.successful_requests if metrics.successful_requests else 0
61
+ self.total_errors += metrics.total_errors if metrics.total_errors else 0
62
+ self.latency += metrics.latency if metrics.latency else 0.0
63
+ self.latency = round(self.latency, 3)
64
+
65
+ if metrics.error_breakdown:
66
+ for k, v in metrics.error_breakdown.items():
67
+ if self.error_breakdown and k in self.error_breakdown:
68
+ self.error_breakdown[k] += int(v)
69
+ else:
70
+ self.error_breakdown.update({ k: v })
71
+
72
+ return self