annofabcli 1.102.0__py3-none-any.whl → 1.103.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (134) hide show
  1. annofabcli/annotation/annotation_query.py +9 -29
  2. annofabcli/annotation/change_annotation_attributes.py +6 -14
  3. annofabcli/annotation/change_annotation_properties.py +5 -12
  4. annofabcli/annotation/copy_annotation.py +9 -11
  5. annofabcli/annotation/delete_annotation.py +21 -26
  6. annofabcli/annotation/dump_annotation.py +1 -4
  7. annofabcli/annotation/import_annotation.py +16 -40
  8. annofabcli/annotation/list_annotation.py +1 -4
  9. annofabcli/annotation/merge_segmentation.py +10 -16
  10. annofabcli/annotation/remove_segmentation_overlap.py +14 -30
  11. annofabcli/annotation/restore_annotation.py +3 -9
  12. annofabcli/annotation_specs/add_attribute_restriction.py +2 -8
  13. annofabcli/annotation_specs/attribute_restriction.py +2 -10
  14. annofabcli/annotation_specs/export_annotation_specs.py +1 -3
  15. annofabcli/annotation_specs/get_annotation_specs_with_attribute_id_replaced.py +3 -10
  16. annofabcli/annotation_specs/get_annotation_specs_with_choice_id_replaced.py +4 -10
  17. annofabcli/annotation_specs/get_annotation_specs_with_label_id_replaced.py +1 -3
  18. annofabcli/annotation_specs/list_annotation_specs_attribute.py +7 -18
  19. annofabcli/annotation_specs/list_annotation_specs_choice.py +3 -8
  20. annofabcli/annotation_specs/list_annotation_specs_history.py +0 -1
  21. annofabcli/annotation_specs/list_annotation_specs_label.py +3 -8
  22. annofabcli/annotation_specs/list_annotation_specs_label_attribute.py +4 -9
  23. annofabcli/annotation_specs/list_attribute_restriction.py +3 -9
  24. annofabcli/annotation_specs/put_label_color.py +1 -6
  25. annofabcli/comment/delete_comment.py +3 -9
  26. annofabcli/comment/list_all_comment.py +2 -4
  27. annofabcli/comment/list_comment.py +1 -4
  28. annofabcli/comment/put_comment.py +4 -13
  29. annofabcli/comment/put_comment_simply.py +2 -6
  30. annofabcli/comment/put_inspection_comment.py +2 -6
  31. annofabcli/comment/put_inspection_comment_simply.py +3 -6
  32. annofabcli/comment/put_onhold_comment.py +2 -6
  33. annofabcli/comment/put_onhold_comment_simply.py +2 -4
  34. annofabcli/common/cli.py +5 -43
  35. annofabcli/common/download.py +8 -25
  36. annofabcli/common/image.py +5 -9
  37. annofabcli/common/utils.py +1 -3
  38. annofabcli/common/visualize.py +2 -4
  39. annofabcli/filesystem/draw_annotation.py +8 -20
  40. annofabcli/filesystem/filter_annotation.py +7 -24
  41. annofabcli/filesystem/mask_user_info.py +3 -6
  42. annofabcli/filesystem/merge_annotation.py +2 -6
  43. annofabcli/input_data/change_input_data_name.py +3 -7
  44. annofabcli/input_data/copy_input_data.py +6 -14
  45. annofabcli/input_data/delete_input_data.py +7 -24
  46. annofabcli/input_data/delete_metadata_key_of_input_data.py +5 -16
  47. annofabcli/input_data/list_all_input_data.py +5 -14
  48. annofabcli/input_data/list_all_input_data_merged_task.py +8 -23
  49. annofabcli/input_data/list_input_data.py +5 -16
  50. annofabcli/input_data/put_input_data.py +7 -19
  51. annofabcli/input_data/update_metadata_of_input_data.py +6 -14
  52. annofabcli/instruction/list_instruction_history.py +0 -1
  53. annofabcli/instruction/upload_instruction.py +1 -4
  54. annofabcli/job/list_job.py +1 -2
  55. annofabcli/job/list_last_job.py +1 -3
  56. annofabcli/organization/list_organization.py +0 -1
  57. annofabcli/organization_member/change_organization_member.py +1 -3
  58. annofabcli/organization_member/delete_organization_member.py +32 -16
  59. annofabcli/organization_member/invite_organization_member.py +25 -14
  60. annofabcli/organization_member/list_organization_member.py +0 -1
  61. annofabcli/project/change_organization_of_project.py +257 -0
  62. annofabcli/project/change_project_status.py +2 -2
  63. annofabcli/project/copy_project.py +2 -7
  64. annofabcli/project/diff_projects.py +4 -16
  65. annofabcli/project/list_project.py +0 -1
  66. annofabcli/project/put_project.py +2 -6
  67. annofabcli/project/subcommand_project.py +2 -0
  68. annofabcli/project_member/change_project_members.py +2 -2
  69. annofabcli/project_member/copy_project_members.py +2 -7
  70. annofabcli/project_member/drop_project_members.py +1 -3
  71. annofabcli/project_member/invite_project_members.py +1 -3
  72. annofabcli/project_member/list_users.py +0 -1
  73. annofabcli/project_member/put_project_members.py +4 -12
  74. annofabcli/stat_visualization/mask_visualization_dir.py +6 -16
  75. annofabcli/stat_visualization/merge_visualization_dir.py +6 -18
  76. annofabcli/stat_visualization/summarize_whole_performance_csv.py +3 -7
  77. annofabcli/stat_visualization/write_graph.py +5 -15
  78. annofabcli/stat_visualization/write_performance_rating_csv.py +4 -12
  79. annofabcli/statistics/list_annotation_area.py +3 -7
  80. annofabcli/statistics/list_annotation_attribute.py +6 -15
  81. annofabcli/statistics/list_annotation_attribute_filled_count.py +9 -23
  82. annofabcli/statistics/list_annotation_count.py +18 -44
  83. annofabcli/statistics/list_annotation_duration.py +14 -40
  84. annofabcli/statistics/list_video_duration.py +2 -3
  85. annofabcli/statistics/list_worktime.py +0 -1
  86. annofabcli/statistics/scatter.py +3 -9
  87. annofabcli/statistics/summarize_task_count.py +7 -12
  88. annofabcli/statistics/summarize_task_count_by_task_id_group.py +3 -11
  89. annofabcli/statistics/summarize_task_count_by_user.py +1 -5
  90. annofabcli/statistics/visualization/dataframe/annotation_count.py +1 -3
  91. annofabcli/statistics/visualization/dataframe/cumulative_productivity.py +3 -9
  92. annofabcli/statistics/visualization/dataframe/productivity_per_date.py +11 -23
  93. annofabcli/statistics/visualization/dataframe/project_performance.py +1 -3
  94. annofabcli/statistics/visualization/dataframe/task.py +2 -5
  95. annofabcli/statistics/visualization/dataframe/task_worktime_by_phase_user.py +6 -20
  96. annofabcli/statistics/visualization/dataframe/user_performance.py +29 -88
  97. annofabcli/statistics/visualization/dataframe/whole_performance.py +4 -10
  98. annofabcli/statistics/visualization/dataframe/whole_productivity_per_date.py +17 -49
  99. annofabcli/statistics/visualization/dataframe/worktime_per_date.py +3 -9
  100. annofabcli/statistics/visualization/filtering_query.py +2 -6
  101. annofabcli/statistics/visualization/project_dir.py +9 -26
  102. annofabcli/statistics/visualization/visualization_source_files.py +3 -10
  103. annofabcli/statistics/visualize_annotation_count.py +7 -21
  104. annofabcli/statistics/visualize_annotation_duration.py +7 -17
  105. annofabcli/statistics/visualize_statistics.py +17 -52
  106. annofabcli/statistics/visualize_video_duration.py +8 -19
  107. annofabcli/supplementary/delete_supplementary_data.py +7 -23
  108. annofabcli/supplementary/list_supplementary_data.py +1 -1
  109. annofabcli/supplementary/put_supplementary_data.py +5 -15
  110. annofabcli/task/cancel_acceptance.py +3 -4
  111. annofabcli/task/change_operator.py +3 -11
  112. annofabcli/task/change_status_to_break.py +1 -1
  113. annofabcli/task/change_status_to_on_hold.py +5 -18
  114. annofabcli/task/complete_tasks.py +8 -25
  115. annofabcli/task/copy_tasks.py +2 -3
  116. annofabcli/task/delete_metadata_key_of_task.py +2 -6
  117. annofabcli/task/delete_tasks.py +7 -25
  118. annofabcli/task/list_all_tasks.py +2 -4
  119. annofabcli/task/list_tasks.py +2 -6
  120. annofabcli/task/list_tasks_added_task_history.py +7 -21
  121. annofabcli/task/put_tasks.py +2 -3
  122. annofabcli/task/put_tasks_by_count.py +3 -7
  123. annofabcli/task/reject_tasks.py +7 -19
  124. annofabcli/task/update_metadata_of_task.py +1 -1
  125. annofabcli/task_history/list_all_task_history.py +2 -5
  126. annofabcli/task_history/list_task_history.py +0 -1
  127. annofabcli/task_history_event/list_all_task_history_event.py +4 -11
  128. annofabcli/task_history_event/list_worktime.py +4 -14
  129. {annofabcli-1.102.0.dist-info → annofabcli-1.103.0.dist-info}/METADATA +1 -1
  130. annofabcli-1.103.0.dist-info/RECORD +215 -0
  131. annofabcli-1.102.0.dist-info/RECORD +0 -214
  132. {annofabcli-1.102.0.dist-info → annofabcli-1.103.0.dist-info}/WHEEL +0 -0
  133. {annofabcli-1.102.0.dist-info → annofabcli-1.103.0.dist-info}/entry_points.txt +0 -0
  134. {annofabcli-1.102.0.dist-info → annofabcli-1.103.0.dist-info}/licenses/LICENSE +0 -0
@@ -219,13 +219,9 @@ class ListAnnotationCounterByInputData:
219
219
 
220
220
  annotation_count_by_label = collections.Counter([e["label"] for e in details])
221
221
  if self.target_labels is not None:
222
- annotation_count_by_label = collections.Counter(
223
- {label: count for label, count in annotation_count_by_label.items() if label in self.target_labels}
224
- )
222
+ annotation_count_by_label = collections.Counter({label: count for label, count in annotation_count_by_label.items() if label in self.target_labels})
225
223
  if self.non_target_labels is not None:
226
- annotation_count_by_label = collections.Counter(
227
- {label: count for label, count in annotation_count_by_label.items() if label not in self.non_target_labels}
228
- )
224
+ annotation_count_by_label = collections.Counter({label: count for label, count in annotation_count_by_label.items() if label not in self.non_target_labels})
229
225
 
230
226
  attributes_list: list[AttributeValueKey] = []
231
227
  for detail in details:
@@ -413,10 +409,9 @@ class AttributeCountCsv:
413
409
  Args:
414
410
  selective_attribute_value_max_count: 選択肢系の属性の値の個数の上限。これを超えた場合は、非選択肢系属性(トラッキングIDやアノテーションリンクなど)とみなす
415
411
 
416
- """ # noqa: E501
412
+ """
417
413
 
418
- def __init__(self, selective_attribute_value_max_count: int = 20, csv_format: Optional[dict[str, Any]] = None) -> None:
419
- self.csv_format = csv_format
414
+ def __init__(self, selective_attribute_value_max_count: int = 20) -> None:
420
415
  self.selective_attribute_value_max_count = selective_attribute_value_max_count
421
416
 
422
417
  def _only_selective_attribute(self, columns: list[AttributeValueKey]) -> list[AttributeValueKey]:
@@ -429,23 +424,13 @@ class AttributeCountCsv:
429
424
  for label, attribute_name, _ in columns:
430
425
  attribute_name_list.append((label, attribute_name))
431
426
 
432
- non_selective_attribute_names = {
433
- key for key, value in collections.Counter(attribute_name_list).items() if value > self.selective_attribute_value_max_count
434
- }
427
+ non_selective_attribute_names = {key for key, value in collections.Counter(attribute_name_list).items() if value > self.selective_attribute_value_max_count}
435
428
  if len(non_selective_attribute_names) > 0:
436
- logger.debug(
437
- f"以下の属性は値の個数が{self.selective_attribute_value_max_count}を超えていたため、集計しません。 :: {non_selective_attribute_names}"
438
- )
429
+ logger.debug(f"以下の属性は値の個数が{self.selective_attribute_value_max_count}を超えていたため、集計しません。 :: {non_selective_attribute_names}")
439
430
 
440
- return [
441
- (label, attribute_name, attribute_value)
442
- for (label, attribute_name, attribute_value) in columns
443
- if (label, attribute_name) not in non_selective_attribute_names
444
- ]
431
+ return [(label, attribute_name, attribute_value) for (label, attribute_name, attribute_value) in columns if (label, attribute_name) not in non_selective_attribute_names]
445
432
 
446
- def _value_columns(
447
- self, counter_list: Collection[AnnotationCounter], prior_attribute_columns: Optional[list[AttributeValueKey]]
448
- ) -> list[AttributeValueKey]:
433
+ def _value_columns(self, counter_list: Collection[AnnotationCounter], prior_attribute_columns: Optional[list[AttributeValueKey]]) -> list[AttributeValueKey]:
449
434
  all_attr_key_set = {attr_key for c in counter_list for attr_key in c.annotation_count_by_attribute}
450
435
  if prior_attribute_columns is not None:
451
436
  remaining_columns = sorted(all_attr_key_set - set(prior_attribute_columns))
@@ -511,7 +496,7 @@ class AttributeCountCsv:
511
496
  # `task_id`列など`basic_columns`も`fillna`対象だが、nanではないはずので問題ない
512
497
  df.fillna(0, inplace=True)
513
498
 
514
- print_csv(df, output=str(output_file), to_csv_kwargs=self.csv_format)
499
+ print_csv(df, output=str(output_file))
515
500
 
516
501
  def print_csv_by_input_data(
517
502
  self,
@@ -555,7 +540,7 @@ class AttributeCountCsv:
555
540
  value_columns = self._value_columns(counter_list, prior_attribute_columns)
556
541
  df = df.fillna(dict.fromkeys(value_columns, 0))
557
542
 
558
- print_csv(df, output=str(output_file), to_csv_kwargs=self.csv_format)
543
+ print_csv(df, output=str(output_file))
559
544
 
560
545
 
561
546
  class LabelCountCsv:
@@ -565,9 +550,6 @@ class LabelCountCsv:
565
550
 
566
551
  """
567
552
 
568
- def __init__(self, csv_format: Optional[dict[str, Any]] = None) -> None:
569
- self.csv_format = csv_format
570
-
571
553
  def _value_columns(self, counter_list: Collection[AnnotationCounter], prior_label_columns: Optional[list[str]]) -> list[str]:
572
554
  all_attr_key_set = {attr_key for c in counter_list for attr_key in c.annotation_count_by_label}
573
555
  if prior_label_columns is not None:
@@ -615,7 +597,7 @@ class LabelCountCsv:
615
597
  # NaNを0に変換する
616
598
  # `basic_columns`は必ずnanではないので、すべての列に対してfillnaを実行しても問題ないはず
617
599
  df.fillna(0, inplace=True)
618
- print_csv(df, output=str(output_file), to_csv_kwargs=self.csv_format)
600
+ print_csv(df, output=str(output_file))
619
601
 
620
602
  def print_csv_by_input_data(
621
603
  self,
@@ -658,7 +640,7 @@ class LabelCountCsv:
658
640
  value_columns = self._value_columns(counter_list, prior_label_columns)
659
641
  df = df.fillna(dict.fromkeys(value_columns, 0))
660
642
 
661
- print_csv(df, output=str(output_file), to_csv_kwargs=self.csv_format)
643
+ print_csv(df, output=str(output_file))
662
644
 
663
645
 
664
646
  class AnnotationSpecs:
@@ -693,9 +675,7 @@ class AnnotationSpecs:
693
675
  result = [to_label_name(label) for label in self._labels_v1]
694
676
  duplicated_labels = [key for key, value in collections.Counter(result).items() if value > 1]
695
677
  if len(duplicated_labels) > 0:
696
- logger.warning(
697
- f"アノテーション仕様のラベル英語名が重複しています。アノテーション個数が正しく算出できない可能性があります。:: {duplicated_labels}"
698
- )
678
+ logger.warning(f"アノテーション仕様のラベル英語名が重複しています。アノテーション個数が正しく算出できない可能性があります。:: {duplicated_labels}")
699
679
  return result
700
680
 
701
681
  def attribute_name_keys(
@@ -734,9 +714,7 @@ class AnnotationSpecs:
734
714
 
735
715
  duplicated_attribute_names = [key for key, value in collections.Counter(result).items() if value > 1]
736
716
  if len(duplicated_attribute_names) > 0:
737
- logger.warning(
738
- f"アノテーション仕様の属性情報(ラベル英語名、属性英語名)が重複しています。アノテーション個数が正しく算出できない可能性があります。:: {duplicated_attribute_names}" # noqa: E501
739
- )
717
+ logger.warning(f"アノテーション仕様の属性情報(ラベル英語名、属性英語名)が重複しています。アノテーション個数が正しく算出できない可能性があります。:: {duplicated_attribute_names}")
740
718
 
741
719
  return result
742
720
 
@@ -776,7 +754,7 @@ class AnnotationSpecs:
776
754
  duplicated_attributes = [key for key, value in collections.Counter(target_attribute_value_keys).items() if value > 1]
777
755
  if len(duplicated_attributes) > 0:
778
756
  logger.warning(
779
- f"アノテーション仕様の属性情報(ラベル英語名、属性英語名、選択肢英語名)が重複しています。アノテーション個数が正しく算出できない可能性があります。:: {duplicated_attributes}" # noqa: E501
757
+ f"アノテーション仕様の属性情報(ラベル英語名、属性英語名、選択肢英語名)が重複しています。アノテーション個数が正しく算出できない可能性があります。:: {duplicated_attributes}"
780
758
  )
781
759
 
782
760
  return target_attribute_value_keys
@@ -874,9 +852,7 @@ class ListAnnotationCountMain:
874
852
  non_selective_attribute_name_keys = annotation_specs.non_selective_attribute_name_keys()
875
853
 
876
854
  frame_no_map = self.get_frame_no_map(task_json_path) if task_json_path is not None else None
877
- counter_by_input_data = ListAnnotationCounterByInputData(
878
- non_target_attribute_names=non_selective_attribute_name_keys, frame_no_map=frame_no_map
879
- )
855
+ counter_by_input_data = ListAnnotationCounterByInputData(non_target_attribute_names=non_selective_attribute_name_keys, frame_no_map=frame_no_map)
880
856
  counter_list_by_input_data = counter_by_input_data.get_annotation_counter_list(
881
857
  annotation_path,
882
858
  target_task_ids=target_task_ids,
@@ -957,9 +933,7 @@ class ListAnnotationCountMain:
957
933
  non_selective_attribute_name_keys = None
958
934
 
959
935
  frame_no_map = self.get_frame_no_map(task_json_path) if task_json_path is not None else None
960
- counter_list_by_input_data = ListAnnotationCounterByInputData(
961
- non_target_attribute_names=non_selective_attribute_name_keys, frame_no_map=frame_no_map
962
- ).get_annotation_counter_list(
936
+ counter_list_by_input_data = ListAnnotationCounterByInputData(non_target_attribute_names=non_selective_attribute_name_keys, frame_no_map=frame_no_map).get_annotation_counter_list(
963
937
  annotation_path,
964
938
  target_task_ids=target_task_ids,
965
939
  task_query=task_query,
@@ -1198,7 +1172,7 @@ def parse_args(parser: argparse.ArgumentParser) -> None:
1198
1172
  parser.add_argument(
1199
1173
  "--latest",
1200
1174
  action="store_true",
1201
- help="``--annotation`` を指定しないとき、最新のアノテーションzipを参照します。このオプションを指定すると、アノテーションzipを更新するのに数分待ちます。", # noqa: E501
1175
+ help="``--annotation`` を指定しないとき、最新のアノテーションzipを参照します。このオプションを指定すると、アノテーションzipを更新するのに数分待ちます。",
1202
1176
  )
1203
1177
 
1204
1178
  parser.set_defaults(subcommand_func=main)
@@ -203,14 +203,10 @@ class ListAnnotationDurationByInputData:
203
203
  annotation_duration_by_label[detail["label"]] += calculate_annotation_duration_second(detail)
204
204
 
205
205
  if self.target_labels is not None:
206
- annotation_duration_by_label = {
207
- label: duration for label, duration in annotation_duration_by_label.items() if label in self.target_labels
208
- }
206
+ annotation_duration_by_label = {label: duration for label, duration in annotation_duration_by_label.items() if label in self.target_labels}
209
207
 
210
208
  if self.non_target_labels is not None:
211
- annotation_duration_by_label = {
212
- label: duration for label, duration in annotation_duration_by_label.items() if label not in self.non_target_labels
213
- }
209
+ annotation_duration_by_label = {label: duration for label, duration in annotation_duration_by_label.items() if label not in self.non_target_labels}
214
210
 
215
211
  annotation_duration_by_attribute: dict[AttributeValueKey, float] = defaultdict(float)
216
212
  for detail in range_details:
@@ -307,7 +303,7 @@ class AnnotationDurationCsvByAttribute:
307
303
  Args:
308
304
  selective_attribute_value_max_count: 選択肢系の属性の値の個数の上限。これを超えた場合は、非選択肢系属性(トラッキングIDやアノテーションリンクなど)とみなす
309
305
 
310
- """ # noqa: E501
306
+ """
311
307
 
312
308
  def __init__(self, selective_attribute_value_max_count: int = 20) -> None:
313
309
  self.selective_attribute_value_max_count = selective_attribute_value_max_count
@@ -322,23 +318,13 @@ class AnnotationDurationCsvByAttribute:
322
318
  for label, attribute_name, _ in columns:
323
319
  attribute_name_list.append((label, attribute_name))
324
320
 
325
- non_selective_attribute_names = {
326
- key for key, value in collections.Counter(attribute_name_list).items() if value > self.selective_attribute_value_max_count
327
- }
321
+ non_selective_attribute_names = {key for key, value in collections.Counter(attribute_name_list).items() if value > self.selective_attribute_value_max_count}
328
322
  if len(non_selective_attribute_names) > 0:
329
- logger.debug(
330
- f"以下の属性は値の個数が{self.selective_attribute_value_max_count}を超えていたため、集計しません。 :: {non_selective_attribute_names}"
331
- )
323
+ logger.debug(f"以下の属性は値の個数が{self.selective_attribute_value_max_count}を超えていたため、集計しません。 :: {non_selective_attribute_names}")
332
324
 
333
- return [
334
- (label, attribute_name, attribute_value)
335
- for (label, attribute_name, attribute_value) in columns
336
- if (label, attribute_name) not in non_selective_attribute_names
337
- ]
325
+ return [(label, attribute_name, attribute_value) for (label, attribute_name, attribute_value) in columns if (label, attribute_name) not in non_selective_attribute_names]
338
326
 
339
- def _value_columns(
340
- self, annotation_duration_list: Collection[AnnotationDuration], prior_attribute_columns: Optional[list[AttributeValueKey]]
341
- ) -> list[AttributeValueKey]:
327
+ def _value_columns(self, annotation_duration_list: Collection[AnnotationDuration], prior_attribute_columns: Optional[list[AttributeValueKey]]) -> list[AttributeValueKey]:
342
328
  all_attr_key_set = {attr_key for c in annotation_duration_list for attr_key in c.annotation_duration_second_by_attribute}
343
329
  if prior_attribute_columns is not None:
344
330
  remaining_columns = sorted(all_attr_key_set - set(prior_attribute_columns))
@@ -482,9 +468,7 @@ class ListAnnotationDurationMain:
482
468
  def __init__(self, service: annofabapi.Resource) -> None:
483
469
  self.service = service
484
470
 
485
- def print_annotation_duration_csv(
486
- self, annotation_duration_list: list[AnnotationDuration], csv_type: CsvType, output_file: Path, *, annotation_specs: Optional[AnnotationSpecs]
487
- ) -> None:
471
+ def print_annotation_duration_csv(self, annotation_duration_list: list[AnnotationDuration], csv_type: CsvType, output_file: Path, *, annotation_specs: Optional[AnnotationSpecs]) -> None:
488
472
  if csv_type == CsvType.LABEL:
489
473
  # ラベル名の列順が、アノテーション仕様にあるラベル名の順番に対応するようにする。
490
474
  label_columns: Optional[list[str]] = None
@@ -522,9 +506,7 @@ class ListAnnotationDurationMain:
522
506
  annotation_specs = AnnotationSpecs(self.service, project_id, annotation_type=DefaultAnnotationType.RANGE.value)
523
507
  non_selective_attribute_name_keys = annotation_specs.non_selective_attribute_name_keys()
524
508
 
525
- annotation_duration_list = ListAnnotationDurationByInputData(
526
- non_target_attribute_names=non_selective_attribute_name_keys
527
- ).get_annotation_duration_list(
509
+ annotation_duration_list = ListAnnotationDurationByInputData(non_target_attribute_names=non_selective_attribute_name_keys).get_annotation_duration_list(
528
510
  annotation_path,
529
511
  input_data_json_path=input_data_json_path,
530
512
  target_task_ids=target_task_ids,
@@ -535,9 +517,7 @@ class ListAnnotationDurationMain:
535
517
 
536
518
  if arg_format == FormatArgument.CSV:
537
519
  assert csv_type is not None
538
- self.print_annotation_duration_csv(
539
- annotation_duration_list, output_file=output_file, csv_type=csv_type, annotation_specs=annotation_specs
540
- )
520
+ self.print_annotation_duration_csv(annotation_duration_list, output_file=output_file, csv_type=csv_type, annotation_specs=annotation_specs)
541
521
 
542
522
  elif arg_format in [FormatArgument.PRETTY_JSON, FormatArgument.JSON]:
543
523
  json_is_pretty = arg_format == FormatArgument.PRETTY_JSON
@@ -577,9 +557,7 @@ class ListAnnotationDuration(CommandLine):
577
557
  super().validate_project(project_id, project_member_roles=[ProjectMemberRole.OWNER, ProjectMemberRole.TRAINING_DATA_USER])
578
558
  project, _ = self.service.api.get_project(project_id)
579
559
  if project["input_data_type"] != InputDataType.MOVIE.value:
580
- logger.warning(
581
- f"project_id='{project_id}'であるプロジェクトは、動画プロジェクトでないので、出力される区間アノテーションの長さはすべて0秒になります。"
582
- )
560
+ logger.warning(f"project_id='{project_id}'であるプロジェクトは、動画プロジェクトでないので、出力される区間アノテーションの長さはすべて0秒になります。")
583
561
 
584
562
  annotation_path = Path(args.annotation) if args.annotation is not None else None
585
563
 
@@ -622,16 +600,12 @@ class ListAnnotationDuration(CommandLine):
622
600
 
623
601
  if project_id is not None:
624
602
  if args.temp_dir is not None:
625
- download_and_print_annotation_duration(
626
- project_id=project_id, temp_dir=args.temp_dir, is_latest=args.latest, annotation_path=annotation_path
627
- )
603
+ download_and_print_annotation_duration(project_id=project_id, temp_dir=args.temp_dir, is_latest=args.latest, annotation_path=annotation_path)
628
604
  else:
629
605
  # `NamedTemporaryFile`を使わない理由: Windowsで`PermissionError`が発生するため
630
606
  # https://qiita.com/yuji38kwmt/items/c6f50e1fc03dafdcdda0 参考
631
607
  with tempfile.TemporaryDirectory() as str_temp_dir:
632
- download_and_print_annotation_duration(
633
- project_id=project_id, temp_dir=Path(str_temp_dir), is_latest=args.latest, annotation_path=annotation_path
634
- )
608
+ download_and_print_annotation_duration(project_id=project_id, temp_dir=Path(str_temp_dir), is_latest=args.latest, annotation_path=annotation_path)
635
609
  else:
636
610
  assert annotation_path is not None
637
611
  main_obj.print_annotation_duration(
@@ -694,7 +668,7 @@ def parse_args(parser: argparse.ArgumentParser) -> None:
694
668
  parser.add_argument(
695
669
  "--latest",
696
670
  action="store_true",
697
- help="``--annotation`` を指定しないとき、最新のアノテーションzipを参照します。このオプションを指定すると、アノテーションzipを更新するのに数分待ちます。", # noqa: E501
671
+ help="``--annotation`` を指定しないとき、最新のアノテーションzipを参照します。このオプションを指定すると、アノテーションzipを更新するのに数分待ちます。",
698
672
  )
699
673
 
700
674
  parser.add_argument(
@@ -71,7 +71,7 @@ class ListVideoDuration(CommandLine):
71
71
  def validate(self, args: argparse.Namespace) -> bool:
72
72
  if args.project_id is None and (args.input_data_json is None or args.task_json is None):
73
73
  print( # noqa: T201
74
- f"{self.COMMON_MESSAGE} argument --project_id: '--input_data_json'または'--task_json'が未指定のときは、'--project_id' を指定してください。", # noqa: E501
74
+ f"{self.COMMON_MESSAGE} argument --project_id: '--input_data_json'または'--task_json'が未指定のときは、'--project_id' を指定してください。",
75
75
  file=sys.stderr,
76
76
  )
77
77
  return False
@@ -171,8 +171,7 @@ def parse_args(parser: argparse.ArgumentParser) -> None:
171
171
  "--input_data_json",
172
172
  type=Path,
173
173
  required=False,
174
- help="入力データ情報が記載されたJSONファイルのパスを指定します。\n"
175
- "JSONファイルは ``$ annofabcli input_data download`` コマンドで取得できます。",
174
+ help="入力データ情報が記載されたJSONファイルのパスを指定します。\nJSONファイルは ``$ annofabcli input_data download`` コマンドで取得できます。",
176
175
  )
177
176
 
178
177
  parser.add_argument(
@@ -156,7 +156,6 @@ def parse_args(parser: argparse.ArgumentParser) -> None:
156
156
  )
157
157
 
158
158
  argument_parser.add_output()
159
- argument_parser.add_csv_format()
160
159
 
161
160
  parser.set_defaults(subcommand_func=main)
162
161
 
@@ -142,15 +142,11 @@ class ScatterGraph:
142
142
  if legend_label == "":
143
143
  legend_label = "none"
144
144
 
145
- self._scatter_glyphs[legend_label] = self.figure.scatter(
146
- x=x_column_name, y=y_column_name, source=source, legend_label=legend_label, color=color, muted_alpha=0.2, size=6
147
- )
145
+ self._scatter_glyphs[legend_label] = self.figure.scatter(x=x_column_name, y=y_column_name, source=source, legend_label=legend_label, color=color, muted_alpha=0.2, size=6)
148
146
 
149
147
  # 1点ごとに`text`で名前を表示している理由:
150
148
  # `add_multi_choice_widget_for_searching_user`関数で追加したMultiChoice Widgetで、名前の表示スタイルを変更するため
151
- for x, y, username, user_id in zip(
152
- source.data[x_column_name], source.data[y_column_name], source.data[username_column_name], source.data[user_id_column_name]
153
- ):
149
+ for x, y, username, user_id in zip(source.data[x_column_name], source.data[y_column_name], source.data[username_column_name], source.data[user_id_column_name]):
154
150
  self.text_glyphs[user_id] = self.figure.text(
155
151
  x=x,
156
152
  y=y,
@@ -204,9 +200,7 @@ class ScatterGraph:
204
200
 
205
201
  # 1点ごとに`text`で名前を表示している理由:
206
202
  # `add_multi_choice_widget_for_searching_user`関数で追加したMultiChoice Widgetで、名前の表示スタイルを変更するため
207
- for x, y, username, user_id in zip(
208
- source.data[x_column_name], source.data[y_column_name], source.data[username_column_name], source.data[user_id_column_name]
209
- ):
203
+ for x, y, username, user_id in zip(source.data[x_column_name], source.data[y_column_name], source.data[username_column_name], source.data[user_id_column_name]):
210
204
  self.text_glyphs[user_id] = self.figure.text(
211
205
  x=x,
212
206
  y=y,
@@ -64,15 +64,13 @@ def get_step_for_current_phase(task: Task, number_of_inspections: int) -> int:
64
64
 
65
65
  elif current_phase == TaskPhase.ANNOTATION:
66
66
  number_of_rejections_by_inspection = sum(
67
- get_number_of_rejections(histories_by_phase, phase=TaskPhase.INSPECTION, phase_stage=phase_stage)
68
- for phase_stage in range(1, number_of_inspections + 1)
67
+ get_number_of_rejections(histories_by_phase, phase=TaskPhase.INSPECTION, phase_stage=phase_stage) for phase_stage in range(1, number_of_inspections + 1)
69
68
  )
70
69
  return number_of_rejections_by_inspection + number_of_rejections_by_acceptance + 1
71
70
 
72
71
  elif current_phase == TaskPhase.INSPECTION:
73
72
  number_of_rejections_by_inspection = sum(
74
- get_number_of_rejections(histories_by_phase, phase=TaskPhase.INSPECTION, phase_stage=phase_stage)
75
- for phase_stage in range(current_phase_stage, number_of_inspections + 1)
73
+ get_number_of_rejections(histories_by_phase, phase=TaskPhase.INSPECTION, phase_stage=phase_stage) for phase_stage in range(current_phase_stage, number_of_inspections + 1)
76
74
  )
77
75
  return number_of_rejections_by_inspection + number_of_rejections_by_acceptance + 1
78
76
 
@@ -119,9 +117,7 @@ def create_task_count_summary(task_list: list[Task], number_of_inspections: int)
119
117
  # `observed=True`を指定する理由:以下の警告に対応するため
120
118
  # FutureWarning: The default value of observed=False is deprecated and will change to observed=True in a future version of pandas.
121
119
  # Specify observed=False to silence this warning and retain the current behavior
122
- summary_df = df.pivot_table(
123
- values="task_id", index=["step", "phase", "phase_stage", "simple_status"], aggfunc="count", observed=False
124
- ).reset_index()
120
+ summary_df = df.pivot_table(values="task_id", index=["step", "phase", "phase_stage", "simple_status"], aggfunc="count", observed=False).reset_index()
125
121
  summary_df.rename(columns={"task_id": "task_count"}, inplace=True)
126
122
 
127
123
  summary_df.sort_values(["step", "phase", "phase_stage"])
@@ -146,7 +142,7 @@ class SummarizeTaskCount(CommandLine):
146
142
  # タスク全件ファイルをダウンロードするので、オーナロールかアノテーションユーザロールであることを確認する。
147
143
  super().validate_project(project_id, project_member_roles=[ProjectMemberRole.OWNER, ProjectMemberRole.TRAINING_DATA_USER])
148
144
 
149
- if is_execute_get_tasks_api:
145
+ if is_execute_get_tasks_api: # noqa: SIM108
150
146
  task_list = self.service.wrapper.get_all_tasks(project_id)
151
147
  else:
152
148
  task_list = self.get_task_list_with_downloading_file(project_id, task_json_path, is_latest=is_latest)
@@ -157,7 +153,7 @@ class SummarizeTaskCount(CommandLine):
157
153
 
158
154
  number_of_inspections = self.get_number_of_inspections_for_project(project_id)
159
155
  task_count_df = create_task_count_summary(task_list, number_of_inspections=number_of_inspections)
160
- annofabcli.common.utils.print_csv(task_count_df, output=self.output, to_csv_kwargs=self.csv_format)
156
+ annofabcli.common.utils.print_csv(task_count_df, output=self.output)
161
157
 
162
158
  def get_task_list_with_downloading_file(self, project_id: str, task_json_path: Optional[Path], is_latest: bool) -> list[Task]: # noqa: FBT001
163
159
  if task_json_path is None:
@@ -207,10 +203,9 @@ def parse_args(parser: argparse.ArgumentParser) -> None:
207
203
  parser.add_argument(
208
204
  "--execute_get_tasks_api",
209
205
  action="store_true",
210
- help="[EXPERIMENTAL] ``getTasks`` APIを実行して、タスク情報を参照します。タスク数が少ないプロジェクトで、最新のタスク情報を参照したいときに利用できます。", # noqa: E501
206
+ help="[EXPERIMENTAL] ``getTasks`` APIを実行して、タスク情報を参照します。タスク数が少ないプロジェクトで、最新のタスク情報を参照したいときに利用できます。",
211
207
  )
212
208
 
213
- argument_parser.add_csv_format()
214
209
  argument_parser.add_output()
215
210
 
216
211
  parser.set_defaults(subcommand_func=main)
@@ -226,7 +221,7 @@ def add_parser(subparsers: Optional[argparse._SubParsersAction] = None) -> argpa
226
221
  subcommand_name = "summarize_task_count"
227
222
  subcommand_help = "タスクのフェーズ、ステータス、ステップごとにタスク数を出力します。"
228
223
  description = "タスクのフェーズ、ステータス、ステップごとにタスク数を、CSV形式で出力します。"
229
- epilog = "アノテーションユーザまたはオーナロールを持つユーザで実行できます。ただし``--execute_get_tasks_api``を指定した場合は、どのロールでも実行できます。" # noqa: E501
224
+ epilog = "アノテーションユーザまたはオーナロールを持つユーザで実行できます。ただし``--execute_get_tasks_api``を指定した場合は、どのロールでも実行できます。"
230
225
  parser = annofabcli.common.cli.add_parser(subparsers, subcommand_name, subcommand_help, description=description, epilog=epilog)
231
226
  parse_args(parser)
232
227
  return parser
@@ -97,9 +97,7 @@ def get_task_id_prefix(task_id: str, delimiter: str) -> str:
97
97
  return delimiter.join(tmp_list[0 : len(tmp_list) - 1])
98
98
 
99
99
 
100
- def create_task_count_summary_df(
101
- task_list: list[Task], task_id_delimiter: Optional[str], task_id_groups: Optional[dict[str, list[str]]]
102
- ) -> pandas.DataFrame:
100
+ def create_task_count_summary_df(task_list: list[Task], task_id_delimiter: Optional[str], task_id_groups: Optional[dict[str, list[str]]]) -> pandas.DataFrame:
103
101
  """
104
102
  タスク数を集計したDataFrameを生成する。
105
103
 
@@ -131,16 +129,12 @@ def create_task_count_summary_df(
131
129
 
132
130
  df_task.fillna({"task_id_group": TASK_ID_GROUP_UNKNOWN}, inplace=True)
133
131
 
134
- df_summary = df_task.pivot_table(
135
- values="task_id", index=["task_id_group"], columns=["status_for_summary"], aggfunc="count", fill_value=0
136
- ).reset_index()
132
+ df_summary = df_task.pivot_table(values="task_id", index=["task_id_group"], columns=["status_for_summary"], aggfunc="count", fill_value=0).reset_index()
137
133
 
138
134
  for status in TaskStatusForSummary:
139
135
  add_columns_if_not_exists(df_summary, status.value)
140
136
 
141
- df_summary["sum"] = (
142
- df_task.pivot_table(values="task_id", index=["task_id_group"], aggfunc="count", fill_value=0).reset_index().fillna(0)["task_id"]
143
- )
137
+ df_summary["sum"] = df_task.pivot_table(values="task_id", index=["task_id_group"], aggfunc="count", fill_value=0).reset_index().fillna(0)["task_id"]
144
138
 
145
139
  return df_summary
146
140
 
@@ -152,7 +146,6 @@ class SummarizeTaskCountByTaskId(CommandLine):
152
146
  df[columns],
153
147
  format=FormatArgument(FormatArgument.CSV),
154
148
  output=self.output,
155
- csv_format=self.csv_format,
156
149
  )
157
150
 
158
151
  def main(self) -> None:
@@ -207,7 +200,6 @@ def parse_args(parser: argparse.ArgumentParser) -> None:
207
200
  help="最新のタスク一覧ファイルを参照します。このオプションを指定すると、タスク一覧ファイルを更新するのに数分待ちます。",
208
201
  )
209
202
 
210
- argument_parser.add_csv_format()
211
203
  argument_parser.add_output()
212
204
 
213
205
  parser.set_defaults(subcommand_func=main)
@@ -80,9 +80,7 @@ def create_task_count_summary_df(task_list: list[Task]) -> pandas.DataFrame:
80
80
  df[column] = 0
81
81
 
82
82
  df_task = pandas.DataFrame([add_info_to_task(t) for t in task_list])
83
- df_summary = df_task.pivot_table(
84
- values="task_id", index=["account_id"], columns=["status_for_summary"], aggfunc="count", fill_value=0
85
- ).reset_index()
83
+ df_summary = df_task.pivot_table(values="task_id", index=["account_id"], columns=["status_for_summary"], aggfunc="count", fill_value=0).reset_index()
86
84
  for status in TaskStatusForSummary:
87
85
  add_columns_if_not_exists(df_summary, status.value)
88
86
 
@@ -116,7 +114,6 @@ class SummarizeTaskCountByUser(CommandLine):
116
114
  target_df,
117
115
  format=FormatArgument(FormatArgument.CSV),
118
116
  output=self.output,
119
- csv_format=self.csv_format,
120
117
  )
121
118
 
122
119
  def main(self) -> None:
@@ -160,7 +157,6 @@ def parse_args(parser: argparse.ArgumentParser) -> None:
160
157
  help="最新のタスク一覧ファイルを参照します。このオプションを指定すると、タスク一覧ファイルを更新するのに数分待ちます。",
161
158
  )
162
159
 
163
- argument_parser.add_csv_format()
164
160
  argument_parser.add_output()
165
161
 
166
162
  parser.set_defaults(subcommand_func=main)
@@ -42,9 +42,7 @@ class AnnotationCount:
42
42
  self.df = df
43
43
 
44
44
  @classmethod
45
- def from_annotation_zip(
46
- cls, annotation_zip: Path, project_id: str, *, get_annotation_count_func: Optional[Callable[[dict[str, Any]], int]] = None
47
- ) -> AnnotationCount:
45
+ def from_annotation_zip(cls, annotation_zip: Path, project_id: str, *, get_annotation_count_func: Optional[Callable[[dict[str, Any]], int]] = None) -> AnnotationCount:
48
46
  """
49
47
  アノテーションZIPファイルからインスタンスを生成します。
50
48
 
@@ -72,9 +72,7 @@ def _create_cumulative_dataframe(task_worktime_by_phase_user: TaskWorktimeByPhas
72
72
  class AbstractPhaseCumulativeProductivity(abc.ABC):
73
73
  """ロールごとの累積の生産性をプロットするための抽象クラス"""
74
74
 
75
- def __init__(
76
- self, df: pandas.DataFrame, phase: TaskPhase, *, custom_production_volume_list: Optional[list[ProductionVolumeColumn]] = None
77
- ) -> None:
75
+ def __init__(self, df: pandas.DataFrame, phase: TaskPhase, *, custom_production_volume_list: Optional[list[ProductionVolumeColumn]] = None) -> None:
78
76
  self.df = df
79
77
  self.phase = phase
80
78
  self.phase_name = self._get_phase_name(phase)
@@ -104,9 +102,7 @@ class AbstractPhaseCumulativeProductivity(abc.ABC):
104
102
  return False
105
103
 
106
104
  if len(self.default_user_id_list) == 0:
107
- logger.info(
108
- f"{self.phase_name}作業したタスクが0件なので('first_{self.phase.value}_user_id'がすべて空欄)、{output_file} を出力しません。"
109
- )
105
+ logger.info(f"{self.phase_name}作業したタスクが0件なので('first_{self.phase.value}_user_id'がすべて空欄)、{output_file} を出力しません。")
110
106
  return False
111
107
 
112
108
  return True
@@ -138,9 +134,7 @@ class AbstractPhaseCumulativeProductivity(abc.ABC):
138
134
  たとえば、20000件をプロットする際、すべての列を出力すると、そうでないときに比べてファイルサイズが3倍以上になる
139
135
  """
140
136
  xy_columns = set(itertools.chain.from_iterable(columns for columns in columns_list))
141
- tooltip_columns = set(
142
- itertools.chain.from_iterable(line_graph.tooltip_columns for line_graph in line_graph_list if line_graph.tooltip_columns is not None)
143
- )
137
+ tooltip_columns = set(itertools.chain.from_iterable(line_graph.tooltip_columns for line_graph in line_graph_list if line_graph.tooltip_columns is not None))
144
138
  return list(xy_columns | tooltip_columns)
145
139
 
146
140
  df = self.df
@@ -41,9 +41,7 @@ def create_df_productivity_per_date(task_worktime_by_phase_user: TaskWorktimeByP
41
41
  df = df[df["phase"] == str_phase]
42
42
  df = df.rename(columns={"pointed_out_inspection_comment_count": "inspection_comment_count", "worktime_hour": f"{str_phase}_worktime_hour"})
43
43
 
44
- df[f"first_{str_phase}_started_date"] = df["started_datetime"].map(
45
- lambda e: datetime_to_date(e) if e is not None and isinstance(e, str) else None
46
- )
44
+ df[f"first_{str_phase}_started_date"] = df["started_datetime"].map(lambda e: datetime_to_date(e) if e is not None and isinstance(e, str) else None)
47
45
 
48
46
  # first_annotation_user_id と first_annotation_usernameの両方を指定している理由:
49
47
  # first_annotation_username を取得するため
@@ -82,9 +80,7 @@ class AbstractPhaseProductivityPerDate(abc.ABC):
82
80
  PLOT_WIDTH = 1200
83
81
  PLOT_HEIGHT = 600
84
82
 
85
- def __init__(
86
- self, df: pandas.DataFrame, phase: TaskPhase, *, custom_production_volume_list: Optional[list[ProductionVolumeColumn]] = None
87
- ) -> None:
83
+ def __init__(self, df: pandas.DataFrame, phase: TaskPhase, *, custom_production_volume_list: Optional[list[ProductionVolumeColumn]] = None) -> None:
88
84
  self.df = df
89
85
  self.phase = phase
90
86
  self.custom_production_volume_list = custom_production_volume_list if custom_production_volume_list is not None else []
@@ -209,9 +205,7 @@ class AbstractPhaseProductivityPerDate(abc.ABC):
209
205
  *self.production_volume_columns,
210
206
  ]
211
207
 
212
- velocity_columns = [
213
- f"{numerator}/{denominator}" for numerator in [f"{str_phase}_worktime_hour"] for denominator in self.production_volume_columns
214
- ]
208
+ velocity_columns = [f"{numerator}/{denominator}" for numerator in [f"{str_phase}_worktime_hour"] for denominator in self.production_volume_columns]
215
209
 
216
210
  columns = production_columns + velocity_columns
217
211
 
@@ -337,15 +331,13 @@ class AnnotatorProductivityPerDate(AbstractPhaseProductivityPerDate):
337
331
  continue
338
332
 
339
333
  df_subset = self._get_df_sequential_date(df_subset)
340
- df_subset[f"annotation_worktime_minute/{production_volume_column}"] = (
341
- df_subset["annotation_worktime_hour"] * 60 / df_subset[production_volume_column]
342
- )
334
+ df_subset[f"annotation_worktime_minute/{production_volume_column}"] = df_subset["annotation_worktime_hour"] * 60 / df_subset[production_volume_column]
343
335
  df_subset[f"annotation_worktime_minute/{production_volume_column}{WEEKLY_MOVING_AVERAGE_COLUMN_SUFFIX}"] = (
344
336
  get_weekly_sum(df_subset["annotation_worktime_hour"]) * 60 / get_weekly_sum(df_subset[production_volume_column])
345
337
  )
346
- df_subset[f"inspection_comment_count/{production_volume_column}{WEEKLY_MOVING_AVERAGE_COLUMN_SUFFIX}"] = get_weekly_sum(
347
- df_subset["inspection_comment_count"]
348
- ) / get_weekly_sum(df_subset[production_volume_column])
338
+ df_subset[f"inspection_comment_count/{production_volume_column}{WEEKLY_MOVING_AVERAGE_COLUMN_SUFFIX}"] = get_weekly_sum(df_subset["inspection_comment_count"]) / get_weekly_sum(
339
+ df_subset[production_volume_column]
340
+ )
349
341
 
350
342
  source = ColumnDataSource(data=df_subset)
351
343
  color = get_color_from_palette(user_index)
@@ -414,7 +406,7 @@ class InspectorProductivityPerDate(AbstractPhaseProductivityPerDate):
414
406
 
415
407
  df = self.df.copy()
416
408
 
417
- if target_user_id_list is not None:
409
+ if target_user_id_list is not None: # noqa: SIM108
418
410
  user_id_list = target_user_id_list
419
411
  else:
420
412
  user_id_list = df.sort_values(by="user_id", ascending=False)["user_id"].dropna().unique().tolist()
@@ -475,9 +467,7 @@ class InspectorProductivityPerDate(AbstractPhaseProductivityPerDate):
475
467
  continue
476
468
 
477
469
  df_subset = self._get_df_sequential_date(df_subset)
478
- df_subset[f"inspection_worktime_minute/{production_volume_column}"] = (
479
- df_subset["inspection_worktime_hour"] * 60 / df_subset[production_volume_column]
480
- )
470
+ df_subset[f"inspection_worktime_minute/{production_volume_column}"] = df_subset["inspection_worktime_hour"] * 60 / df_subset[production_volume_column]
481
471
  df_subset[f"inspection_worktime_minute/{production_volume_column}{WEEKLY_MOVING_AVERAGE_COLUMN_SUFFIX}"] = (
482
472
  get_weekly_sum(df_subset["inspection_worktime_hour"]) * 60 / get_weekly_sum(df_subset[production_volume_column])
483
473
  )
@@ -554,7 +544,7 @@ class AcceptorProductivityPerDate(AbstractPhaseProductivityPerDate):
554
544
 
555
545
  df = self.df.copy()
556
546
 
557
- if target_user_id_list is not None:
547
+ if target_user_id_list is not None: # noqa: SIM108
558
548
  user_id_list = target_user_id_list
559
549
  else:
560
550
  user_id_list = df.sort_values(by="user_id", ascending=False)["user_id"].dropna().unique().tolist()
@@ -616,9 +606,7 @@ class AcceptorProductivityPerDate(AbstractPhaseProductivityPerDate):
616
606
  continue
617
607
 
618
608
  df_subset = self._get_df_sequential_date(df_subset)
619
- df_subset[f"acceptance_worktime_minute/{production_volume_column}"] = (
620
- df_subset["acceptance_worktime_hour"] * 60 / df_subset[production_volume_column]
621
- )
609
+ df_subset[f"acceptance_worktime_minute/{production_volume_column}"] = df_subset["acceptance_worktime_hour"] * 60 / df_subset[production_volume_column]
622
610
 
623
611
  df_subset[f"acceptance_worktime_minute/{production_volume_column}{WEEKLY_MOVING_AVERAGE_COLUMN_SUFFIX}"] = (
624
612
  get_weekly_sum(df_subset["acceptance_worktime_hour"]) * 60 / get_weekly_sum(df_subset[production_volume_column])
@@ -89,9 +89,7 @@ class ProjectPerformance:
89
89
  return [e.value for e in TaskPhase if e.value in tmp_set]
90
90
 
91
91
  @classmethod
92
- def from_project_dirs(
93
- cls, project_dir_list: list[ProjectDir], *, custom_production_volume_list: Optional[list[ProductionVolumeColumn]] = None
94
- ) -> ProjectPerformance:
92
+ def from_project_dirs(cls, project_dir_list: list[ProjectDir], *, custom_production_volume_list: Optional[list[ProductionVolumeColumn]] = None) -> ProjectPerformance:
95
93
  row_list: list[pandas.Series] = [cls._get_series_from_project_dir(project_dir) for project_dir in project_dir_list]
96
94
  return cls(pandas.DataFrame(row_list), custom_production_volume_list=custom_production_volume_list)
97
95