annofabcli 1.102.1__py3-none-any.whl → 1.103.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (134) hide show
  1. annofabcli/annotation/annotation_query.py +9 -29
  2. annofabcli/annotation/change_annotation_attributes.py +6 -14
  3. annofabcli/annotation/change_annotation_properties.py +5 -12
  4. annofabcli/annotation/copy_annotation.py +4 -10
  5. annofabcli/annotation/delete_annotation.py +10 -26
  6. annofabcli/annotation/dump_annotation.py +1 -4
  7. annofabcli/annotation/import_annotation.py +16 -40
  8. annofabcli/annotation/list_annotation.py +1 -4
  9. annofabcli/annotation/merge_segmentation.py +5 -15
  10. annofabcli/annotation/remove_segmentation_overlap.py +8 -29
  11. annofabcli/annotation/restore_annotation.py +3 -9
  12. annofabcli/annotation_specs/add_attribute_restriction.py +2 -8
  13. annofabcli/annotation_specs/attribute_restriction.py +2 -10
  14. annofabcli/annotation_specs/export_annotation_specs.py +1 -3
  15. annofabcli/annotation_specs/get_annotation_specs_with_attribute_id_replaced.py +3 -10
  16. annofabcli/annotation_specs/get_annotation_specs_with_choice_id_replaced.py +4 -10
  17. annofabcli/annotation_specs/get_annotation_specs_with_label_id_replaced.py +1 -3
  18. annofabcli/annotation_specs/list_annotation_specs_attribute.py +7 -18
  19. annofabcli/annotation_specs/list_annotation_specs_choice.py +3 -8
  20. annofabcli/annotation_specs/list_annotation_specs_history.py +0 -1
  21. annofabcli/annotation_specs/list_annotation_specs_label.py +3 -8
  22. annofabcli/annotation_specs/list_annotation_specs_label_attribute.py +4 -9
  23. annofabcli/annotation_specs/list_attribute_restriction.py +3 -9
  24. annofabcli/annotation_specs/put_label_color.py +1 -6
  25. annofabcli/comment/delete_comment.py +3 -9
  26. annofabcli/comment/list_all_comment.py +2 -4
  27. annofabcli/comment/list_comment.py +1 -4
  28. annofabcli/comment/put_comment.py +4 -13
  29. annofabcli/comment/put_comment_simply.py +2 -6
  30. annofabcli/comment/put_inspection_comment.py +2 -6
  31. annofabcli/comment/put_inspection_comment_simply.py +3 -6
  32. annofabcli/comment/put_onhold_comment.py +2 -6
  33. annofabcli/comment/put_onhold_comment_simply.py +2 -4
  34. annofabcli/common/cli.py +5 -43
  35. annofabcli/common/download.py +8 -25
  36. annofabcli/common/image.py +5 -9
  37. annofabcli/common/utils.py +1 -3
  38. annofabcli/common/visualize.py +2 -4
  39. annofabcli/filesystem/draw_annotation.py +8 -20
  40. annofabcli/filesystem/filter_annotation.py +7 -24
  41. annofabcli/filesystem/mask_user_info.py +3 -6
  42. annofabcli/filesystem/merge_annotation.py +2 -6
  43. annofabcli/input_data/change_input_data_name.py +3 -7
  44. annofabcli/input_data/copy_input_data.py +6 -14
  45. annofabcli/input_data/delete_input_data.py +7 -24
  46. annofabcli/input_data/delete_metadata_key_of_input_data.py +5 -16
  47. annofabcli/input_data/list_all_input_data.py +5 -14
  48. annofabcli/input_data/list_all_input_data_merged_task.py +8 -23
  49. annofabcli/input_data/list_input_data.py +5 -16
  50. annofabcli/input_data/put_input_data.py +7 -19
  51. annofabcli/input_data/update_metadata_of_input_data.py +6 -14
  52. annofabcli/instruction/list_instruction_history.py +0 -1
  53. annofabcli/instruction/upload_instruction.py +1 -4
  54. annofabcli/job/list_job.py +1 -2
  55. annofabcli/job/list_last_job.py +1 -3
  56. annofabcli/organization/list_organization.py +0 -1
  57. annofabcli/organization_member/change_organization_member.py +1 -3
  58. annofabcli/organization_member/delete_organization_member.py +2 -6
  59. annofabcli/organization_member/invite_organization_member.py +1 -3
  60. annofabcli/organization_member/list_organization_member.py +0 -1
  61. annofabcli/project/change_organization_of_project.py +257 -0
  62. annofabcli/project/change_project_status.py +2 -2
  63. annofabcli/project/copy_project.py +2 -7
  64. annofabcli/project/diff_projects.py +4 -16
  65. annofabcli/project/list_project.py +0 -1
  66. annofabcli/project/put_project.py +2 -6
  67. annofabcli/project/subcommand_project.py +2 -0
  68. annofabcli/project_member/change_project_members.py +2 -2
  69. annofabcli/project_member/copy_project_members.py +2 -7
  70. annofabcli/project_member/drop_project_members.py +1 -3
  71. annofabcli/project_member/invite_project_members.py +1 -3
  72. annofabcli/project_member/list_users.py +0 -1
  73. annofabcli/project_member/put_project_members.py +4 -12
  74. annofabcli/stat_visualization/mask_visualization_dir.py +6 -16
  75. annofabcli/stat_visualization/merge_visualization_dir.py +6 -18
  76. annofabcli/stat_visualization/summarize_whole_performance_csv.py +3 -7
  77. annofabcli/stat_visualization/write_graph.py +5 -15
  78. annofabcli/stat_visualization/write_performance_rating_csv.py +4 -12
  79. annofabcli/statistics/list_annotation_area.py +3 -7
  80. annofabcli/statistics/list_annotation_attribute.py +6 -15
  81. annofabcli/statistics/list_annotation_attribute_filled_count.py +9 -23
  82. annofabcli/statistics/list_annotation_count.py +18 -44
  83. annofabcli/statistics/list_annotation_duration.py +14 -40
  84. annofabcli/statistics/list_video_duration.py +2 -3
  85. annofabcli/statistics/list_worktime.py +0 -1
  86. annofabcli/statistics/scatter.py +3 -9
  87. annofabcli/statistics/summarize_task_count.py +7 -12
  88. annofabcli/statistics/summarize_task_count_by_task_id_group.py +3 -11
  89. annofabcli/statistics/summarize_task_count_by_user.py +1 -5
  90. annofabcli/statistics/visualization/dataframe/annotation_count.py +1 -3
  91. annofabcli/statistics/visualization/dataframe/cumulative_productivity.py +3 -9
  92. annofabcli/statistics/visualization/dataframe/productivity_per_date.py +11 -23
  93. annofabcli/statistics/visualization/dataframe/project_performance.py +1 -3
  94. annofabcli/statistics/visualization/dataframe/task.py +2 -5
  95. annofabcli/statistics/visualization/dataframe/task_worktime_by_phase_user.py +6 -20
  96. annofabcli/statistics/visualization/dataframe/user_performance.py +29 -88
  97. annofabcli/statistics/visualization/dataframe/whole_performance.py +4 -10
  98. annofabcli/statistics/visualization/dataframe/whole_productivity_per_date.py +17 -49
  99. annofabcli/statistics/visualization/dataframe/worktime_per_date.py +3 -9
  100. annofabcli/statistics/visualization/filtering_query.py +2 -6
  101. annofabcli/statistics/visualization/project_dir.py +9 -26
  102. annofabcli/statistics/visualization/visualization_source_files.py +3 -10
  103. annofabcli/statistics/visualize_annotation_count.py +7 -21
  104. annofabcli/statistics/visualize_annotation_duration.py +7 -17
  105. annofabcli/statistics/visualize_statistics.py +17 -52
  106. annofabcli/statistics/visualize_video_duration.py +8 -19
  107. annofabcli/supplementary/delete_supplementary_data.py +7 -23
  108. annofabcli/supplementary/list_supplementary_data.py +1 -1
  109. annofabcli/supplementary/put_supplementary_data.py +5 -15
  110. annofabcli/task/cancel_acceptance.py +3 -4
  111. annofabcli/task/change_operator.py +3 -11
  112. annofabcli/task/change_status_to_break.py +1 -1
  113. annofabcli/task/change_status_to_on_hold.py +5 -18
  114. annofabcli/task/complete_tasks.py +8 -25
  115. annofabcli/task/copy_tasks.py +2 -3
  116. annofabcli/task/delete_metadata_key_of_task.py +2 -6
  117. annofabcli/task/delete_tasks.py +7 -25
  118. annofabcli/task/list_all_tasks.py +2 -4
  119. annofabcli/task/list_tasks.py +2 -6
  120. annofabcli/task/list_tasks_added_task_history.py +7 -21
  121. annofabcli/task/put_tasks.py +2 -3
  122. annofabcli/task/put_tasks_by_count.py +3 -7
  123. annofabcli/task/reject_tasks.py +7 -19
  124. annofabcli/task/update_metadata_of_task.py +1 -1
  125. annofabcli/task_history/list_all_task_history.py +2 -5
  126. annofabcli/task_history/list_task_history.py +0 -1
  127. annofabcli/task_history_event/list_all_task_history_event.py +4 -11
  128. annofabcli/task_history_event/list_worktime.py +4 -14
  129. {annofabcli-1.102.1.dist-info → annofabcli-1.103.0.dist-info}/METADATA +1 -1
  130. annofabcli-1.103.0.dist-info/RECORD +215 -0
  131. annofabcli-1.102.1.dist-info/RECORD +0 -214
  132. {annofabcli-1.102.1.dist-info → annofabcli-1.103.0.dist-info}/WHEEL +0 -0
  133. {annofabcli-1.102.1.dist-info → annofabcli-1.103.0.dist-info}/entry_points.txt +0 -0
  134. {annofabcli-1.102.1.dist-info → annofabcli-1.103.0.dist-info}/licenses/LICENSE +0 -0
@@ -64,15 +64,13 @@ def get_step_for_current_phase(task: Task, number_of_inspections: int) -> int:
64
64
 
65
65
  elif current_phase == TaskPhase.ANNOTATION:
66
66
  number_of_rejections_by_inspection = sum(
67
- get_number_of_rejections(histories_by_phase, phase=TaskPhase.INSPECTION, phase_stage=phase_stage)
68
- for phase_stage in range(1, number_of_inspections + 1)
67
+ get_number_of_rejections(histories_by_phase, phase=TaskPhase.INSPECTION, phase_stage=phase_stage) for phase_stage in range(1, number_of_inspections + 1)
69
68
  )
70
69
  return number_of_rejections_by_inspection + number_of_rejections_by_acceptance + 1
71
70
 
72
71
  elif current_phase == TaskPhase.INSPECTION:
73
72
  number_of_rejections_by_inspection = sum(
74
- get_number_of_rejections(histories_by_phase, phase=TaskPhase.INSPECTION, phase_stage=phase_stage)
75
- for phase_stage in range(current_phase_stage, number_of_inspections + 1)
73
+ get_number_of_rejections(histories_by_phase, phase=TaskPhase.INSPECTION, phase_stage=phase_stage) for phase_stage in range(current_phase_stage, number_of_inspections + 1)
76
74
  )
77
75
  return number_of_rejections_by_inspection + number_of_rejections_by_acceptance + 1
78
76
 
@@ -119,9 +117,7 @@ def create_task_count_summary(task_list: list[Task], number_of_inspections: int)
119
117
  # `observed=True`を指定する理由:以下の警告に対応するため
120
118
  # FutureWarning: The default value of observed=False is deprecated and will change to observed=True in a future version of pandas.
121
119
  # Specify observed=False to silence this warning and retain the current behavior
122
- summary_df = df.pivot_table(
123
- values="task_id", index=["step", "phase", "phase_stage", "simple_status"], aggfunc="count", observed=False
124
- ).reset_index()
120
+ summary_df = df.pivot_table(values="task_id", index=["step", "phase", "phase_stage", "simple_status"], aggfunc="count", observed=False).reset_index()
125
121
  summary_df.rename(columns={"task_id": "task_count"}, inplace=True)
126
122
 
127
123
  summary_df.sort_values(["step", "phase", "phase_stage"])
@@ -146,7 +142,7 @@ class SummarizeTaskCount(CommandLine):
146
142
  # タスク全件ファイルをダウンロードするので、オーナロールかアノテーションユーザロールであることを確認する。
147
143
  super().validate_project(project_id, project_member_roles=[ProjectMemberRole.OWNER, ProjectMemberRole.TRAINING_DATA_USER])
148
144
 
149
- if is_execute_get_tasks_api:
145
+ if is_execute_get_tasks_api: # noqa: SIM108
150
146
  task_list = self.service.wrapper.get_all_tasks(project_id)
151
147
  else:
152
148
  task_list = self.get_task_list_with_downloading_file(project_id, task_json_path, is_latest=is_latest)
@@ -157,7 +153,7 @@ class SummarizeTaskCount(CommandLine):
157
153
 
158
154
  number_of_inspections = self.get_number_of_inspections_for_project(project_id)
159
155
  task_count_df = create_task_count_summary(task_list, number_of_inspections=number_of_inspections)
160
- annofabcli.common.utils.print_csv(task_count_df, output=self.output, to_csv_kwargs=self.csv_format)
156
+ annofabcli.common.utils.print_csv(task_count_df, output=self.output)
161
157
 
162
158
  def get_task_list_with_downloading_file(self, project_id: str, task_json_path: Optional[Path], is_latest: bool) -> list[Task]: # noqa: FBT001
163
159
  if task_json_path is None:
@@ -207,10 +203,9 @@ def parse_args(parser: argparse.ArgumentParser) -> None:
207
203
  parser.add_argument(
208
204
  "--execute_get_tasks_api",
209
205
  action="store_true",
210
- help="[EXPERIMENTAL] ``getTasks`` APIを実行して、タスク情報を参照します。タスク数が少ないプロジェクトで、最新のタスク情報を参照したいときに利用できます。", # noqa: E501
206
+ help="[EXPERIMENTAL] ``getTasks`` APIを実行して、タスク情報を参照します。タスク数が少ないプロジェクトで、最新のタスク情報を参照したいときに利用できます。",
211
207
  )
212
208
 
213
- argument_parser.add_csv_format()
214
209
  argument_parser.add_output()
215
210
 
216
211
  parser.set_defaults(subcommand_func=main)
@@ -226,7 +221,7 @@ def add_parser(subparsers: Optional[argparse._SubParsersAction] = None) -> argpa
226
221
  subcommand_name = "summarize_task_count"
227
222
  subcommand_help = "タスクのフェーズ、ステータス、ステップごとにタスク数を出力します。"
228
223
  description = "タスクのフェーズ、ステータス、ステップごとにタスク数を、CSV形式で出力します。"
229
- epilog = "アノテーションユーザまたはオーナロールを持つユーザで実行できます。ただし``--execute_get_tasks_api``を指定した場合は、どのロールでも実行できます。" # noqa: E501
224
+ epilog = "アノテーションユーザまたはオーナロールを持つユーザで実行できます。ただし``--execute_get_tasks_api``を指定した場合は、どのロールでも実行できます。"
230
225
  parser = annofabcli.common.cli.add_parser(subparsers, subcommand_name, subcommand_help, description=description, epilog=epilog)
231
226
  parse_args(parser)
232
227
  return parser
@@ -97,9 +97,7 @@ def get_task_id_prefix(task_id: str, delimiter: str) -> str:
97
97
  return delimiter.join(tmp_list[0 : len(tmp_list) - 1])
98
98
 
99
99
 
100
- def create_task_count_summary_df(
101
- task_list: list[Task], task_id_delimiter: Optional[str], task_id_groups: Optional[dict[str, list[str]]]
102
- ) -> pandas.DataFrame:
100
+ def create_task_count_summary_df(task_list: list[Task], task_id_delimiter: Optional[str], task_id_groups: Optional[dict[str, list[str]]]) -> pandas.DataFrame:
103
101
  """
104
102
  タスク数を集計したDataFrameを生成する。
105
103
 
@@ -131,16 +129,12 @@ def create_task_count_summary_df(
131
129
 
132
130
  df_task.fillna({"task_id_group": TASK_ID_GROUP_UNKNOWN}, inplace=True)
133
131
 
134
- df_summary = df_task.pivot_table(
135
- values="task_id", index=["task_id_group"], columns=["status_for_summary"], aggfunc="count", fill_value=0
136
- ).reset_index()
132
+ df_summary = df_task.pivot_table(values="task_id", index=["task_id_group"], columns=["status_for_summary"], aggfunc="count", fill_value=0).reset_index()
137
133
 
138
134
  for status in TaskStatusForSummary:
139
135
  add_columns_if_not_exists(df_summary, status.value)
140
136
 
141
- df_summary["sum"] = (
142
- df_task.pivot_table(values="task_id", index=["task_id_group"], aggfunc="count", fill_value=0).reset_index().fillna(0)["task_id"]
143
- )
137
+ df_summary["sum"] = df_task.pivot_table(values="task_id", index=["task_id_group"], aggfunc="count", fill_value=0).reset_index().fillna(0)["task_id"]
144
138
 
145
139
  return df_summary
146
140
 
@@ -152,7 +146,6 @@ class SummarizeTaskCountByTaskId(CommandLine):
152
146
  df[columns],
153
147
  format=FormatArgument(FormatArgument.CSV),
154
148
  output=self.output,
155
- csv_format=self.csv_format,
156
149
  )
157
150
 
158
151
  def main(self) -> None:
@@ -207,7 +200,6 @@ def parse_args(parser: argparse.ArgumentParser) -> None:
207
200
  help="最新のタスク一覧ファイルを参照します。このオプションを指定すると、タスク一覧ファイルを更新するのに数分待ちます。",
208
201
  )
209
202
 
210
- argument_parser.add_csv_format()
211
203
  argument_parser.add_output()
212
204
 
213
205
  parser.set_defaults(subcommand_func=main)
@@ -80,9 +80,7 @@ def create_task_count_summary_df(task_list: list[Task]) -> pandas.DataFrame:
80
80
  df[column] = 0
81
81
 
82
82
  df_task = pandas.DataFrame([add_info_to_task(t) for t in task_list])
83
- df_summary = df_task.pivot_table(
84
- values="task_id", index=["account_id"], columns=["status_for_summary"], aggfunc="count", fill_value=0
85
- ).reset_index()
83
+ df_summary = df_task.pivot_table(values="task_id", index=["account_id"], columns=["status_for_summary"], aggfunc="count", fill_value=0).reset_index()
86
84
  for status in TaskStatusForSummary:
87
85
  add_columns_if_not_exists(df_summary, status.value)
88
86
 
@@ -116,7 +114,6 @@ class SummarizeTaskCountByUser(CommandLine):
116
114
  target_df,
117
115
  format=FormatArgument(FormatArgument.CSV),
118
116
  output=self.output,
119
- csv_format=self.csv_format,
120
117
  )
121
118
 
122
119
  def main(self) -> None:
@@ -160,7 +157,6 @@ def parse_args(parser: argparse.ArgumentParser) -> None:
160
157
  help="最新のタスク一覧ファイルを参照します。このオプションを指定すると、タスク一覧ファイルを更新するのに数分待ちます。",
161
158
  )
162
159
 
163
- argument_parser.add_csv_format()
164
160
  argument_parser.add_output()
165
161
 
166
162
  parser.set_defaults(subcommand_func=main)
@@ -42,9 +42,7 @@ class AnnotationCount:
42
42
  self.df = df
43
43
 
44
44
  @classmethod
45
- def from_annotation_zip(
46
- cls, annotation_zip: Path, project_id: str, *, get_annotation_count_func: Optional[Callable[[dict[str, Any]], int]] = None
47
- ) -> AnnotationCount:
45
+ def from_annotation_zip(cls, annotation_zip: Path, project_id: str, *, get_annotation_count_func: Optional[Callable[[dict[str, Any]], int]] = None) -> AnnotationCount:
48
46
  """
49
47
  アノテーションZIPファイルからインスタンスを生成します。
50
48
 
@@ -72,9 +72,7 @@ def _create_cumulative_dataframe(task_worktime_by_phase_user: TaskWorktimeByPhas
72
72
  class AbstractPhaseCumulativeProductivity(abc.ABC):
73
73
  """ロールごとの累積の生産性をプロットするための抽象クラス"""
74
74
 
75
- def __init__(
76
- self, df: pandas.DataFrame, phase: TaskPhase, *, custom_production_volume_list: Optional[list[ProductionVolumeColumn]] = None
77
- ) -> None:
75
+ def __init__(self, df: pandas.DataFrame, phase: TaskPhase, *, custom_production_volume_list: Optional[list[ProductionVolumeColumn]] = None) -> None:
78
76
  self.df = df
79
77
  self.phase = phase
80
78
  self.phase_name = self._get_phase_name(phase)
@@ -104,9 +102,7 @@ class AbstractPhaseCumulativeProductivity(abc.ABC):
104
102
  return False
105
103
 
106
104
  if len(self.default_user_id_list) == 0:
107
- logger.info(
108
- f"{self.phase_name}作業したタスクが0件なので('first_{self.phase.value}_user_id'がすべて空欄)、{output_file} を出力しません。"
109
- )
105
+ logger.info(f"{self.phase_name}作業したタスクが0件なので('first_{self.phase.value}_user_id'がすべて空欄)、{output_file} を出力しません。")
110
106
  return False
111
107
 
112
108
  return True
@@ -138,9 +134,7 @@ class AbstractPhaseCumulativeProductivity(abc.ABC):
138
134
  たとえば、20000件をプロットする際、すべての列を出力すると、そうでないときに比べてファイルサイズが3倍以上になる
139
135
  """
140
136
  xy_columns = set(itertools.chain.from_iterable(columns for columns in columns_list))
141
- tooltip_columns = set(
142
- itertools.chain.from_iterable(line_graph.tooltip_columns for line_graph in line_graph_list if line_graph.tooltip_columns is not None)
143
- )
137
+ tooltip_columns = set(itertools.chain.from_iterable(line_graph.tooltip_columns for line_graph in line_graph_list if line_graph.tooltip_columns is not None))
144
138
  return list(xy_columns | tooltip_columns)
145
139
 
146
140
  df = self.df
@@ -41,9 +41,7 @@ def create_df_productivity_per_date(task_worktime_by_phase_user: TaskWorktimeByP
41
41
  df = df[df["phase"] == str_phase]
42
42
  df = df.rename(columns={"pointed_out_inspection_comment_count": "inspection_comment_count", "worktime_hour": f"{str_phase}_worktime_hour"})
43
43
 
44
- df[f"first_{str_phase}_started_date"] = df["started_datetime"].map(
45
- lambda e: datetime_to_date(e) if e is not None and isinstance(e, str) else None
46
- )
44
+ df[f"first_{str_phase}_started_date"] = df["started_datetime"].map(lambda e: datetime_to_date(e) if e is not None and isinstance(e, str) else None)
47
45
 
48
46
  # first_annotation_user_id と first_annotation_usernameの両方を指定している理由:
49
47
  # first_annotation_username を取得するため
@@ -82,9 +80,7 @@ class AbstractPhaseProductivityPerDate(abc.ABC):
82
80
  PLOT_WIDTH = 1200
83
81
  PLOT_HEIGHT = 600
84
82
 
85
- def __init__(
86
- self, df: pandas.DataFrame, phase: TaskPhase, *, custom_production_volume_list: Optional[list[ProductionVolumeColumn]] = None
87
- ) -> None:
83
+ def __init__(self, df: pandas.DataFrame, phase: TaskPhase, *, custom_production_volume_list: Optional[list[ProductionVolumeColumn]] = None) -> None:
88
84
  self.df = df
89
85
  self.phase = phase
90
86
  self.custom_production_volume_list = custom_production_volume_list if custom_production_volume_list is not None else []
@@ -209,9 +205,7 @@ class AbstractPhaseProductivityPerDate(abc.ABC):
209
205
  *self.production_volume_columns,
210
206
  ]
211
207
 
212
- velocity_columns = [
213
- f"{numerator}/{denominator}" for numerator in [f"{str_phase}_worktime_hour"] for denominator in self.production_volume_columns
214
- ]
208
+ velocity_columns = [f"{numerator}/{denominator}" for numerator in [f"{str_phase}_worktime_hour"] for denominator in self.production_volume_columns]
215
209
 
216
210
  columns = production_columns + velocity_columns
217
211
 
@@ -337,15 +331,13 @@ class AnnotatorProductivityPerDate(AbstractPhaseProductivityPerDate):
337
331
  continue
338
332
 
339
333
  df_subset = self._get_df_sequential_date(df_subset)
340
- df_subset[f"annotation_worktime_minute/{production_volume_column}"] = (
341
- df_subset["annotation_worktime_hour"] * 60 / df_subset[production_volume_column]
342
- )
334
+ df_subset[f"annotation_worktime_minute/{production_volume_column}"] = df_subset["annotation_worktime_hour"] * 60 / df_subset[production_volume_column]
343
335
  df_subset[f"annotation_worktime_minute/{production_volume_column}{WEEKLY_MOVING_AVERAGE_COLUMN_SUFFIX}"] = (
344
336
  get_weekly_sum(df_subset["annotation_worktime_hour"]) * 60 / get_weekly_sum(df_subset[production_volume_column])
345
337
  )
346
- df_subset[f"inspection_comment_count/{production_volume_column}{WEEKLY_MOVING_AVERAGE_COLUMN_SUFFIX}"] = get_weekly_sum(
347
- df_subset["inspection_comment_count"]
348
- ) / get_weekly_sum(df_subset[production_volume_column])
338
+ df_subset[f"inspection_comment_count/{production_volume_column}{WEEKLY_MOVING_AVERAGE_COLUMN_SUFFIX}"] = get_weekly_sum(df_subset["inspection_comment_count"]) / get_weekly_sum(
339
+ df_subset[production_volume_column]
340
+ )
349
341
 
350
342
  source = ColumnDataSource(data=df_subset)
351
343
  color = get_color_from_palette(user_index)
@@ -414,7 +406,7 @@ class InspectorProductivityPerDate(AbstractPhaseProductivityPerDate):
414
406
 
415
407
  df = self.df.copy()
416
408
 
417
- if target_user_id_list is not None:
409
+ if target_user_id_list is not None: # noqa: SIM108
418
410
  user_id_list = target_user_id_list
419
411
  else:
420
412
  user_id_list = df.sort_values(by="user_id", ascending=False)["user_id"].dropna().unique().tolist()
@@ -475,9 +467,7 @@ class InspectorProductivityPerDate(AbstractPhaseProductivityPerDate):
475
467
  continue
476
468
 
477
469
  df_subset = self._get_df_sequential_date(df_subset)
478
- df_subset[f"inspection_worktime_minute/{production_volume_column}"] = (
479
- df_subset["inspection_worktime_hour"] * 60 / df_subset[production_volume_column]
480
- )
470
+ df_subset[f"inspection_worktime_minute/{production_volume_column}"] = df_subset["inspection_worktime_hour"] * 60 / df_subset[production_volume_column]
481
471
  df_subset[f"inspection_worktime_minute/{production_volume_column}{WEEKLY_MOVING_AVERAGE_COLUMN_SUFFIX}"] = (
482
472
  get_weekly_sum(df_subset["inspection_worktime_hour"]) * 60 / get_weekly_sum(df_subset[production_volume_column])
483
473
  )
@@ -554,7 +544,7 @@ class AcceptorProductivityPerDate(AbstractPhaseProductivityPerDate):
554
544
 
555
545
  df = self.df.copy()
556
546
 
557
- if target_user_id_list is not None:
547
+ if target_user_id_list is not None: # noqa: SIM108
558
548
  user_id_list = target_user_id_list
559
549
  else:
560
550
  user_id_list = df.sort_values(by="user_id", ascending=False)["user_id"].dropna().unique().tolist()
@@ -616,9 +606,7 @@ class AcceptorProductivityPerDate(AbstractPhaseProductivityPerDate):
616
606
  continue
617
607
 
618
608
  df_subset = self._get_df_sequential_date(df_subset)
619
- df_subset[f"acceptance_worktime_minute/{production_volume_column}"] = (
620
- df_subset["acceptance_worktime_hour"] * 60 / df_subset[production_volume_column]
621
- )
609
+ df_subset[f"acceptance_worktime_minute/{production_volume_column}"] = df_subset["acceptance_worktime_hour"] * 60 / df_subset[production_volume_column]
622
610
 
623
611
  df_subset[f"acceptance_worktime_minute/{production_volume_column}{WEEKLY_MOVING_AVERAGE_COLUMN_SUFFIX}"] = (
624
612
  get_weekly_sum(df_subset["acceptance_worktime_hour"]) * 60 / get_weekly_sum(df_subset[production_volume_column])
@@ -89,9 +89,7 @@ class ProjectPerformance:
89
89
  return [e.value for e in TaskPhase if e.value in tmp_set]
90
90
 
91
91
  @classmethod
92
- def from_project_dirs(
93
- cls, project_dir_list: list[ProjectDir], *, custom_production_volume_list: Optional[list[ProductionVolumeColumn]] = None
94
- ) -> ProjectPerformance:
92
+ def from_project_dirs(cls, project_dir_list: list[ProjectDir], *, custom_production_volume_list: Optional[list[ProductionVolumeColumn]] = None) -> ProjectPerformance:
95
93
  row_list: list[pandas.Series] = [cls._get_series_from_project_dir(project_dir) for project_dir in project_dir_list]
96
94
  return cls(pandas.DataFrame(row_list), custom_production_volume_list=custom_production_volume_list)
97
95
 
@@ -71,10 +71,7 @@ class Task:
71
71
  logger.warning("引数`df`に重複したキー(project_id, task_id)が含まれています。")
72
72
 
73
73
  if not self.required_columns_exist(df):
74
- raise ValueError(
75
- f"引数'df'の'columns'に次の列が存在していません。 {self.missing_required_columns(df)} :: "
76
- f"次の列が必須です。{self.required_columns} の列が必要です。"
77
- )
74
+ raise ValueError(f"引数'df'の'columns'に次の列が存在していません。 {self.missing_required_columns(df)} :: 次の列が必須です。{self.required_columns} の列が必要です。")
78
75
 
79
76
  self.df = df
80
77
 
@@ -409,7 +406,7 @@ class Task:
409
406
 
410
407
  # タイムゾーンを指定している理由::
411
408
  # すべてがNaNのseriesをdatetimeに変換すると、型にタイムゾーンが指定されない。
412
- # その状態で加算すると、`TypeError: DatetimeArray subtraction must have the same timezones or no timezones`というエラーが発生するため # noqa: E501
409
+ # その状態で加算すると、`TypeError: DatetimeArray subtraction must have the same timezones or no timezones`というエラーが発生するため
413
410
  if not isinstance(dt1.dtype, pandas.DatetimeTZDtype):
414
411
  dt1 = dt1.dt.tz_localize(pytz.FixedOffset(540))
415
412
  if not isinstance(dt2.dtype, pandas.DatetimeTZDtype):
@@ -107,9 +107,7 @@ class TaskWorktimeByPhaseUser:
107
107
  logger.warning("引数`df`に重複したキー(project_id, task_id, phase, phase_stage, account_id)が含まれています。")
108
108
 
109
109
  if not self.required_columns_exist(df):
110
- raise ValueError(
111
- f"引数'df'の'columns'に次の列が存在していません。 {self.missing_columns(df)} :: 次の列が必須です。{self.columns}の列が必要です。"
112
- )
110
+ raise ValueError(f"引数'df'の'columns'に次の列が存在していません。 {self.missing_columns(df)} :: 次の列が必須です。{self.columns}の列が必要です。")
113
111
 
114
112
  self.df = df
115
113
 
@@ -141,9 +139,7 @@ class TaskWorktimeByPhaseUser:
141
139
  project_id
142
140
  """
143
141
  df_task = task.df
144
- df_worktime_ratio = cls._create_annotation_count_ratio_df(
145
- task_history.df, task.df, custom_production_volume_columns=[e.value for e in task.custom_production_volume_list]
146
- )
142
+ df_worktime_ratio = cls._create_annotation_count_ratio_df(task_history.df, task.df, custom_production_volume_columns=[e.value for e in task.custom_production_volume_list])
147
143
  if len(df_worktime_ratio) == 0:
148
144
  return cls.empty()
149
145
 
@@ -239,9 +235,7 @@ class TaskWorktimeByPhaseUser:
239
235
  return TaskWorktimeByPhaseUser(df, custom_production_volume_list=self.custom_production_volume_list)
240
236
 
241
237
  @staticmethod
242
- def _create_annotation_count_ratio_df(
243
- task_history_df: pandas.DataFrame, task_df: pandas.DataFrame, *, custom_production_volume_columns: Optional[list[str]]
244
- ) -> pandas.DataFrame:
238
+ def _create_annotation_count_ratio_df(task_history_df: pandas.DataFrame, task_df: pandas.DataFrame, *, custom_production_volume_columns: Optional[list[str]]) -> pandas.DataFrame:
245
239
  """
246
240
  task_id, phase, (phase_index), user_idの作業時間比から、アノテーション数などの生産量を求める
247
241
 
@@ -273,11 +267,7 @@ class TaskWorktimeByPhaseUser:
273
267
 
274
268
  task_history_df = task_history_df[task_history_df["task_id"].isin(set(task_df["task_id"]))]
275
269
 
276
- group_obj = (
277
- task_history_df.sort_values("started_datetime")
278
- .groupby(["task_id", "phase", "phase_stage", "account_id"])
279
- .agg({"worktime_hour": "sum", "started_datetime": "first"})
280
- )
270
+ group_obj = task_history_df.sort_values("started_datetime").groupby(["task_id", "phase", "phase_stage", "account_id"]).agg({"worktime_hour": "sum", "started_datetime": "first"})
281
271
  # 担当者だけ変更して作業していないケースを除外する
282
272
  group_obj = group_obj[group_obj["worktime_hour"] > 0]
283
273
 
@@ -285,9 +275,7 @@ class TaskWorktimeByPhaseUser:
285
275
  logger.warning("タスク履歴情報に作業しているタスクがありませんでした。タスク履歴全件ファイルが更新されていない可能性があります。")
286
276
  return pandas.DataFrame()
287
277
 
288
- group_obj["task_count"] = group_obj.groupby(level=["task_id", "phase", "phase_stage"], group_keys=False)[["worktime_hour"]].apply(
289
- lambda e: e / e["worktime_hour"].sum()
290
- )
278
+ group_obj["task_count"] = group_obj.groupby(level=["task_id", "phase", "phase_stage"], group_keys=False)[["worktime_hour"]].apply(lambda e: e / e["worktime_hour"].sum())
291
279
 
292
280
  quantity_columns = [
293
281
  "annotation_count",
@@ -302,9 +290,7 @@ class TaskWorktimeByPhaseUser:
302
290
  group_obj[col] = group_obj.apply(sub_get_quantity_value, axis="columns")
303
291
 
304
292
  new_df = group_obj.reset_index()
305
- new_df["pointed_out_inspection_comment_count"] = new_df["pointed_out_inspection_comment_count"] * new_df["phase"].apply(
306
- lambda e: 1 if e == TaskPhase.ANNOTATION.value else 0
307
- )
293
+ new_df["pointed_out_inspection_comment_count"] = new_df["pointed_out_inspection_comment_count"] * new_df["phase"].apply(lambda e: 1 if e == TaskPhase.ANNOTATION.value else 0)
308
294
  new_df["rejected_count"] = new_df["rejected_count"] * new_df["phase"].apply(lambda e: 1 if e == TaskPhase.ANNOTATION.value else 0)
309
295
 
310
296
  return new_df