annofabcli 1.102.1__py3-none-any.whl → 1.104.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- annofabcli/__main__.py +1 -1
- annofabcli/annotation/annotation_query.py +9 -29
- annofabcli/annotation/change_annotation_attributes.py +6 -14
- annofabcli/annotation/change_annotation_properties.py +5 -12
- annofabcli/annotation/copy_annotation.py +4 -10
- annofabcli/annotation/delete_annotation.py +10 -26
- annofabcli/annotation/dump_annotation.py +1 -4
- annofabcli/annotation/import_annotation.py +15 -39
- annofabcli/annotation/list_annotation.py +1 -4
- annofabcli/annotation/merge_segmentation.py +5 -15
- annofabcli/annotation/remove_segmentation_overlap.py +8 -29
- annofabcli/annotation/restore_annotation.py +3 -9
- annofabcli/annotation_specs/add_attribute_restriction.py +2 -8
- annofabcli/annotation_specs/attribute_restriction.py +2 -10
- annofabcli/annotation_specs/export_annotation_specs.py +1 -3
- annofabcli/annotation_specs/get_annotation_specs_with_attribute_id_replaced.py +3 -10
- annofabcli/annotation_specs/get_annotation_specs_with_choice_id_replaced.py +4 -10
- annofabcli/annotation_specs/get_annotation_specs_with_label_id_replaced.py +1 -3
- annofabcli/annotation_specs/list_annotation_specs_attribute.py +7 -18
- annofabcli/annotation_specs/list_annotation_specs_choice.py +3 -8
- annofabcli/annotation_specs/list_annotation_specs_history.py +0 -1
- annofabcli/annotation_specs/list_annotation_specs_label.py +3 -8
- annofabcli/annotation_specs/list_annotation_specs_label_attribute.py +4 -9
- annofabcli/annotation_specs/list_attribute_restriction.py +3 -9
- annofabcli/annotation_specs/put_label_color.py +1 -6
- annofabcli/comment/delete_comment.py +3 -9
- annofabcli/comment/list_all_comment.py +15 -5
- annofabcli/comment/list_comment.py +46 -7
- annofabcli/comment/put_comment.py +4 -13
- annofabcli/comment/put_comment_simply.py +2 -6
- annofabcli/comment/put_inspection_comment.py +2 -6
- annofabcli/comment/put_inspection_comment_simply.py +3 -6
- annofabcli/comment/put_onhold_comment.py +2 -6
- annofabcli/comment/put_onhold_comment_simply.py +2 -4
- annofabcli/common/cli.py +5 -43
- annofabcli/common/download.py +8 -25
- annofabcli/common/image.py +3 -7
- annofabcli/common/utils.py +2 -4
- annofabcli/common/visualize.py +2 -4
- annofabcli/filesystem/draw_annotation.py +6 -18
- annofabcli/filesystem/filter_annotation.py +7 -24
- annofabcli/filesystem/mask_user_info.py +2 -5
- annofabcli/filesystem/merge_annotation.py +2 -6
- annofabcli/input_data/change_input_data_name.py +3 -7
- annofabcli/input_data/copy_input_data.py +6 -14
- annofabcli/input_data/delete_input_data.py +7 -24
- annofabcli/input_data/delete_metadata_key_of_input_data.py +5 -16
- annofabcli/input_data/list_all_input_data.py +5 -14
- annofabcli/input_data/list_all_input_data_merged_task.py +8 -23
- annofabcli/input_data/list_input_data.py +5 -16
- annofabcli/input_data/put_input_data.py +7 -19
- annofabcli/input_data/update_metadata_of_input_data.py +6 -14
- annofabcli/instruction/list_instruction_history.py +0 -1
- annofabcli/instruction/upload_instruction.py +4 -7
- annofabcli/job/list_job.py +2 -3
- annofabcli/job/list_last_job.py +1 -3
- annofabcli/organization/list_organization.py +0 -1
- annofabcli/organization_member/change_organization_member.py +1 -3
- annofabcli/organization_member/delete_organization_member.py +2 -6
- annofabcli/organization_member/invite_organization_member.py +1 -3
- annofabcli/organization_member/list_organization_member.py +0 -1
- annofabcli/project/change_organization_of_project.py +257 -0
- annofabcli/project/change_project_status.py +2 -2
- annofabcli/project/copy_project.py +2 -7
- annofabcli/project/diff_projects.py +4 -16
- annofabcli/project/list_project.py +0 -1
- annofabcli/project/put_project.py +2 -6
- annofabcli/project/subcommand_project.py +2 -0
- annofabcli/project_member/change_project_members.py +1 -1
- annofabcli/project_member/copy_project_members.py +2 -7
- annofabcli/project_member/drop_project_members.py +1 -3
- annofabcli/project_member/invite_project_members.py +2 -4
- annofabcli/project_member/list_users.py +0 -1
- annofabcli/project_member/put_project_members.py +4 -12
- annofabcli/stat_visualization/mask_visualization_dir.py +6 -16
- annofabcli/stat_visualization/merge_visualization_dir.py +7 -19
- annofabcli/stat_visualization/summarize_whole_performance_csv.py +3 -7
- annofabcli/stat_visualization/write_graph.py +5 -15
- annofabcli/stat_visualization/write_performance_rating_csv.py +4 -12
- annofabcli/statistics/list_annotation_area.py +3 -7
- annofabcli/statistics/list_annotation_attribute.py +6 -15
- annofabcli/statistics/list_annotation_attribute_filled_count.py +9 -23
- annofabcli/statistics/list_annotation_count.py +18 -44
- annofabcli/statistics/list_annotation_duration.py +14 -40
- annofabcli/statistics/list_video_duration.py +2 -3
- annofabcli/statistics/list_worktime.py +0 -1
- annofabcli/statistics/scatter.py +3 -9
- annofabcli/statistics/summarize_task_count.py +7 -12
- annofabcli/statistics/summarize_task_count_by_task_id_group.py +3 -11
- annofabcli/statistics/summarize_task_count_by_user.py +1 -5
- annofabcli/statistics/visualization/dataframe/annotation_count.py +2 -4
- annofabcli/statistics/visualization/dataframe/cumulative_productivity.py +6 -12
- annofabcli/statistics/visualization/dataframe/productivity_per_date.py +10 -22
- annofabcli/statistics/visualization/dataframe/project_performance.py +1 -3
- annofabcli/statistics/visualization/dataframe/task.py +2 -5
- annofabcli/statistics/visualization/dataframe/task_history.py +1 -1
- annofabcli/statistics/visualization/dataframe/task_worktime_by_phase_user.py +6 -20
- annofabcli/statistics/visualization/dataframe/user_performance.py +29 -88
- annofabcli/statistics/visualization/dataframe/whole_performance.py +6 -12
- annofabcli/statistics/visualization/dataframe/whole_productivity_per_date.py +17 -49
- annofabcli/statistics/visualization/dataframe/worktime_per_date.py +4 -10
- annofabcli/statistics/visualization/filtering_query.py +2 -6
- annofabcli/statistics/visualization/project_dir.py +9 -26
- annofabcli/statistics/visualization/visualization_source_files.py +3 -10
- annofabcli/statistics/visualize_annotation_count.py +9 -23
- annofabcli/statistics/visualize_annotation_duration.py +5 -15
- annofabcli/statistics/visualize_statistics.py +18 -53
- annofabcli/statistics/visualize_video_duration.py +8 -19
- annofabcli/supplementary/delete_supplementary_data.py +7 -23
- annofabcli/supplementary/list_supplementary_data.py +1 -1
- annofabcli/supplementary/put_supplementary_data.py +5 -15
- annofabcli/task/cancel_acceptance.py +3 -4
- annofabcli/task/change_operator.py +3 -11
- annofabcli/task/change_status_to_break.py +1 -1
- annofabcli/task/change_status_to_on_hold.py +5 -18
- annofabcli/task/complete_tasks.py +8 -25
- annofabcli/task/copy_tasks.py +2 -3
- annofabcli/task/delete_metadata_key_of_task.py +2 -6
- annofabcli/task/delete_tasks.py +8 -26
- annofabcli/task/list_all_tasks.py +2 -4
- annofabcli/task/list_tasks.py +3 -7
- annofabcli/task/list_tasks_added_task_history.py +7 -21
- annofabcli/task/put_tasks.py +2 -3
- annofabcli/task/put_tasks_by_count.py +3 -7
- annofabcli/task/reject_tasks.py +7 -19
- annofabcli/task/update_metadata_of_task.py +2 -2
- annofabcli/task_history/list_all_task_history.py +2 -5
- annofabcli/task_history/list_task_history.py +0 -1
- annofabcli/task_history_event/list_all_task_history_event.py +4 -11
- annofabcli/task_history_event/list_worktime.py +4 -14
- {annofabcli-1.102.1.dist-info → annofabcli-1.104.0.dist-info}/METADATA +1 -1
- annofabcli-1.104.0.dist-info/RECORD +215 -0
- annofabcli-1.102.1.dist-info/RECORD +0 -214
- {annofabcli-1.102.1.dist-info → annofabcli-1.104.0.dist-info}/WHEEL +0 -0
- {annofabcli-1.102.1.dist-info → annofabcli-1.104.0.dist-info}/entry_points.txt +0 -0
- {annofabcli-1.102.1.dist-info → annofabcli-1.104.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -141,9 +141,7 @@ class WholeProductivityPerCompletedDate:
|
|
|
141
141
|
return pandas.DataFrame(index=[e.strftime("%Y-%m-%d") for e in pandas.date_range(start_date, end_date)])
|
|
142
142
|
|
|
143
143
|
@classmethod
|
|
144
|
-
def from_df_wrapper(
|
|
145
|
-
cls, task: Task, worktime_per_date: WorktimePerDate, task_completion_criteria: TaskCompletionCriteria
|
|
146
|
-
) -> WholeProductivityPerCompletedDate:
|
|
144
|
+
def from_df_wrapper(cls, task: Task, worktime_per_date: WorktimePerDate, task_completion_criteria: TaskCompletionCriteria) -> WholeProductivityPerCompletedDate:
|
|
147
145
|
"""
|
|
148
146
|
完了日毎の全体の生産量、生産性を算出する。
|
|
149
147
|
|
|
@@ -250,9 +248,7 @@ class WholeProductivityPerCompletedDate:
|
|
|
250
248
|
"""速度情報の列を追加"""
|
|
251
249
|
df[f"{numerator_column}/{denominator_column}"] = df[numerator_column] / df[denominator_column]
|
|
252
250
|
# 1週間移動平均も出力
|
|
253
|
-
df[f"{numerator_column}/{denominator_column}{WEEKLY_MOVING_AVERAGE_COLUMN_SUFFIX}"] = get_weekly_sum(
|
|
254
|
-
df[numerator_column]
|
|
255
|
-
) / get_weekly_sum(df[denominator_column])
|
|
251
|
+
df[f"{numerator_column}/{denominator_column}{WEEKLY_MOVING_AVERAGE_COLUMN_SUFFIX}"] = get_weekly_sum(df[numerator_column]) / get_weekly_sum(df[denominator_column])
|
|
256
252
|
|
|
257
253
|
# 累計情報を追加
|
|
258
254
|
add_cumsum_column(df, column="task_count")
|
|
@@ -307,16 +303,10 @@ class WholeProductivityPerCompletedDate:
|
|
|
307
303
|
"unmonitored",
|
|
308
304
|
]:
|
|
309
305
|
df[f"{category}_worktime_minute/{denominator}"] = df[f"{category}_worktime_hour"] * 60 / df[denominator]
|
|
310
|
-
df[f"{category}_worktime_minute/{denominator}{WEEKLY_MOVING_AVERAGE_COLUMN_SUFFIX}"] = (
|
|
311
|
-
get_weekly_sum(df[f"{category}_worktime_hour"]) * 60 / get_weekly_sum(df[denominator])
|
|
312
|
-
)
|
|
306
|
+
df[f"{category}_worktime_minute/{denominator}{WEEKLY_MOVING_AVERAGE_COLUMN_SUFFIX}"] = get_weekly_sum(df[f"{category}_worktime_hour"]) * 60 / get_weekly_sum(df[denominator])
|
|
313
307
|
|
|
314
|
-
df[f"actual_worktime_hour/task_count{WEEKLY_MOVING_AVERAGE_COLUMN_SUFFIX}"] = get_weekly_sum(df["actual_worktime_hour"]) / get_weekly_sum(
|
|
315
|
-
|
|
316
|
-
)
|
|
317
|
-
df[f"monitored_worktime_hour/task_count{WEEKLY_MOVING_AVERAGE_COLUMN_SUFFIX}"] = get_weekly_sum(
|
|
318
|
-
df["monitored_worktime_hour"]
|
|
319
|
-
) / get_weekly_sum(df["task_count"])
|
|
308
|
+
df[f"actual_worktime_hour/task_count{WEEKLY_MOVING_AVERAGE_COLUMN_SUFFIX}"] = get_weekly_sum(df["actual_worktime_hour"]) / get_weekly_sum(df["task_count"])
|
|
309
|
+
df[f"monitored_worktime_hour/task_count{WEEKLY_MOVING_AVERAGE_COLUMN_SUFFIX}"] = get_weekly_sum(df["monitored_worktime_hour"]) / get_weekly_sum(df["task_count"])
|
|
320
310
|
|
|
321
311
|
for column in [
|
|
322
312
|
"task_count",
|
|
@@ -349,9 +339,7 @@ class WholeProductivityPerCompletedDate:
|
|
|
349
339
|
)
|
|
350
340
|
line_graph.add_secondary_y_axis(
|
|
351
341
|
"作業時間[時間]",
|
|
352
|
-
secondary_y_axis_range=DataRange1d(
|
|
353
|
-
end=max(df["actual_worktime_hour"].max(), df["monitored_worktime_hour"].max()) * SECONDARY_Y_RANGE_RATIO
|
|
354
|
-
),
|
|
342
|
+
secondary_y_axis_range=DataRange1d(end=max(df["actual_worktime_hour"].max(), df["monitored_worktime_hour"].max()) * SECONDARY_Y_RANGE_RATIO),
|
|
355
343
|
primary_y_axis_range=DataRange1d(end=df["task_count"].max() * SECONDARY_Y_RANGE_RATIO),
|
|
356
344
|
)
|
|
357
345
|
|
|
@@ -396,9 +384,7 @@ class WholeProductivityPerCompletedDate:
|
|
|
396
384
|
)
|
|
397
385
|
line_graph.add_secondary_y_axis(
|
|
398
386
|
"作業時間[時間]",
|
|
399
|
-
secondary_y_axis_range=DataRange1d(
|
|
400
|
-
end=max(df["actual_worktime_hour"].max(), df["monitored_worktime_hour"].max()) * SECONDARY_Y_RANGE_RATIO
|
|
401
|
-
),
|
|
387
|
+
secondary_y_axis_range=DataRange1d(end=max(df["actual_worktime_hour"].max(), df["monitored_worktime_hour"].max()) * SECONDARY_Y_RANGE_RATIO),
|
|
402
388
|
primary_y_axis_range=DataRange1d(end=df["input_data_count"].max() * SECONDARY_Y_RANGE_RATIO),
|
|
403
389
|
)
|
|
404
390
|
|
|
@@ -460,7 +446,7 @@ class WholeProductivityPerCompletedDate:
|
|
|
460
446
|
("monitored_acceptance_worktime", "計測作業時間(受入)"),
|
|
461
447
|
]
|
|
462
448
|
if df["actual_worktime_hour"].sum() > 0:
|
|
463
|
-
# 条件分岐の理由:実績作業時間がないときは、非計測作業時間がマイナス値になり、分かりづらいグラフになるため。必要なときのみ非計測作業時間をプロットする
|
|
449
|
+
# 条件分岐の理由:実績作業時間がないときは、非計測作業時間がマイナス値になり、分かりづらいグラフになるため。必要なときのみ非計測作業時間をプロットする
|
|
464
450
|
phase_prefix.append(("unmonitored_worktime", "非計測作業時間"))
|
|
465
451
|
|
|
466
452
|
fig_info_list = [
|
|
@@ -572,9 +558,7 @@ class WholeProductivityPerCompletedDate:
|
|
|
572
558
|
)
|
|
573
559
|
line_graph.add_secondary_y_axis(
|
|
574
560
|
"作業時間[時間]",
|
|
575
|
-
secondary_y_axis_range=DataRange1d(
|
|
576
|
-
end=max(df["cumsum_actual_worktime_hour"].max(), df["cumsum_monitored_worktime_hour"].max()) * SECONDARY_Y_RANGE_RATIO
|
|
577
|
-
),
|
|
561
|
+
secondary_y_axis_range=DataRange1d(end=max(df["cumsum_actual_worktime_hour"].max(), df["cumsum_monitored_worktime_hour"].max()) * SECONDARY_Y_RANGE_RATIO),
|
|
578
562
|
primary_y_axis_range=DataRange1d(end=df["cumsum_task_count"].max() * SECONDARY_Y_RANGE_RATIO),
|
|
579
563
|
)
|
|
580
564
|
|
|
@@ -628,9 +612,7 @@ class WholeProductivityPerCompletedDate:
|
|
|
628
612
|
)
|
|
629
613
|
line_graph.add_secondary_y_axis(
|
|
630
614
|
"作業時間[時間]",
|
|
631
|
-
secondary_y_axis_range=DataRange1d(
|
|
632
|
-
end=max(df["cumsum_actual_worktime_hour"].max(), df["cumsum_monitored_worktime_hour"].max()) * SECONDARY_Y_RANGE_RATIO
|
|
633
|
-
),
|
|
615
|
+
secondary_y_axis_range=DataRange1d(end=max(df["cumsum_actual_worktime_hour"].max(), df["cumsum_monitored_worktime_hour"].max()) * SECONDARY_Y_RANGE_RATIO),
|
|
634
616
|
primary_y_axis_range=DataRange1d(end=df["cumsum_input_data_count"].max() * SECONDARY_Y_RANGE_RATIO),
|
|
635
617
|
)
|
|
636
618
|
|
|
@@ -762,9 +744,7 @@ class WholeProductivityPerCompletedDate:
|
|
|
762
744
|
write_bokeh_graph(bokeh.layouts.column(element_list), output_file)
|
|
763
745
|
|
|
764
746
|
@classmethod
|
|
765
|
-
def empty(
|
|
766
|
-
cls, *, task_completion_criteria: TaskCompletionCriteria, custom_production_volume_list: Optional[list[ProductionVolumeColumn]] = None
|
|
767
|
-
) -> WholeProductivityPerCompletedDate:
|
|
747
|
+
def empty(cls, *, task_completion_criteria: TaskCompletionCriteria, custom_production_volume_list: Optional[list[ProductionVolumeColumn]] = None) -> WholeProductivityPerCompletedDate:
|
|
768
748
|
df = pandas.DataFrame(columns=cls.get_columns(custom_production_volume_list=custom_production_volume_list))
|
|
769
749
|
return cls(df, task_completion_criteria, custom_production_volume_list=custom_production_volume_list)
|
|
770
750
|
|
|
@@ -868,9 +848,7 @@ class WholeProductivityPerFirstAnnotationStartedDate:
|
|
|
868
848
|
def add_velocity_column(df: pandas.DataFrame, numerator_column: str, denominator_column: str) -> None:
|
|
869
849
|
df[f"{numerator_column}/{denominator_column}"] = df[numerator_column] / df[denominator_column]
|
|
870
850
|
|
|
871
|
-
df[f"{numerator_column}/{denominator_column}{WEEKLY_MOVING_AVERAGE_COLUMN_SUFFIX}"] = get_weekly_moving_average(
|
|
872
|
-
df[numerator_column]
|
|
873
|
-
) / get_weekly_moving_average(df[denominator_column])
|
|
851
|
+
df[f"{numerator_column}/{denominator_column}{WEEKLY_MOVING_AVERAGE_COLUMN_SUFFIX}"] = get_weekly_moving_average(df[numerator_column]) / get_weekly_moving_average(df[denominator_column])
|
|
874
852
|
|
|
875
853
|
# annofab 計測時間から算出したvelocityを追加
|
|
876
854
|
for column in production_volume_columns:
|
|
@@ -907,9 +885,7 @@ class WholeProductivityPerFirstAnnotationStartedDate:
|
|
|
907
885
|
"acceptance_worktime_hour",
|
|
908
886
|
]
|
|
909
887
|
].copy()
|
|
910
|
-
df_sub_task["first_annotation_started_date"] = df_sub_task["first_annotation_started_datetime"].map(
|
|
911
|
-
lambda e: datetime_to_date(e) if not pandas.isna(e) else None
|
|
912
|
-
)
|
|
888
|
+
df_sub_task["first_annotation_started_date"] = df_sub_task["first_annotation_started_datetime"].map(lambda e: datetime_to_date(e) if not pandas.isna(e) else None)
|
|
913
889
|
|
|
914
890
|
value_columns = [
|
|
915
891
|
*production_volume_columns,
|
|
@@ -924,18 +900,14 @@ class WholeProductivityPerFirstAnnotationStartedDate:
|
|
|
924
900
|
aggfunc="sum",
|
|
925
901
|
).fillna(0)
|
|
926
902
|
if len(df_agg_sub_task) > 0:
|
|
927
|
-
df_agg_sub_task["task_count"] = df_sub_task.pivot_table(
|
|
928
|
-
values=["task_id"], index="first_annotation_started_date", aggfunc="count"
|
|
929
|
-
).fillna(0)
|
|
903
|
+
df_agg_sub_task["task_count"] = df_sub_task.pivot_table(values=["task_id"], index="first_annotation_started_date", aggfunc="count").fillna(0)
|
|
930
904
|
else:
|
|
931
905
|
# 列だけ作る
|
|
932
906
|
df_agg_sub_task = df_agg_sub_task.assign(**dict.fromkeys(value_columns, 0), task_count=0)
|
|
933
907
|
|
|
934
908
|
# 日付の一覧を生成
|
|
935
909
|
if len(df_agg_sub_task) > 0:
|
|
936
|
-
df_date_base = pandas.DataFrame(
|
|
937
|
-
index=[e.strftime("%Y-%m-%d") for e in pandas.date_range(start=df_agg_sub_task.index.min(), end=df_agg_sub_task.index.max())]
|
|
938
|
-
)
|
|
910
|
+
df_date_base = pandas.DataFrame(index=[e.strftime("%Y-%m-%d") for e in pandas.date_range(start=df_agg_sub_task.index.min(), end=df_agg_sub_task.index.max())])
|
|
939
911
|
else:
|
|
940
912
|
df_date_base = pandas.DataFrame()
|
|
941
913
|
|
|
@@ -953,9 +925,7 @@ class WholeProductivityPerFirstAnnotationStartedDate:
|
|
|
953
925
|
return True
|
|
954
926
|
|
|
955
927
|
@classmethod
|
|
956
|
-
def empty(
|
|
957
|
-
cls, task_completion_criteria: TaskCompletionCriteria, *, custom_production_volume_list: Optional[list[ProductionVolumeColumn]] = None
|
|
958
|
-
) -> WholeProductivityPerFirstAnnotationStartedDate:
|
|
928
|
+
def empty(cls, task_completion_criteria: TaskCompletionCriteria, *, custom_production_volume_list: Optional[list[ProductionVolumeColumn]] = None) -> WholeProductivityPerFirstAnnotationStartedDate:
|
|
959
929
|
df = pandas.DataFrame(columns=cls.get_columns(custom_production_volume_list=custom_production_volume_list))
|
|
960
930
|
return cls(df, task_completion_criteria, custom_production_volume_list=custom_production_volume_list)
|
|
961
931
|
|
|
@@ -1018,9 +988,7 @@ class WholeProductivityPerFirstAnnotationStartedDate:
|
|
|
1018
988
|
for denominator in [e.value for e in production_volume_list]:
|
|
1019
989
|
for numerator in ["worktime", "annotation_worktime", "inspection_worktime", "acceptance_worktime"]:
|
|
1020
990
|
df[f"{numerator}_minute/{denominator}"] = df[f"{numerator}_hour"] * 60 / df[denominator]
|
|
1021
|
-
df[f"{numerator}_minute/{denominator}{WEEKLY_MOVING_AVERAGE_COLUMN_SUFFIX}"] = (
|
|
1022
|
-
get_weekly_sum(df[f"{numerator}_hour"]) * 60 / get_weekly_sum(df[denominator])
|
|
1023
|
-
)
|
|
991
|
+
df[f"{numerator}_minute/{denominator}{WEEKLY_MOVING_AVERAGE_COLUMN_SUFFIX}"] = get_weekly_sum(df[f"{numerator}_hour"]) * 60 / get_weekly_sum(df[denominator])
|
|
1024
992
|
|
|
1025
993
|
def create_div_element() -> Div:
|
|
1026
994
|
"""
|
|
@@ -110,9 +110,7 @@ class WorktimePerDate:
|
|
|
110
110
|
"""
|
|
111
111
|
df = self.df.copy()
|
|
112
112
|
df["monitored_acceptance_worktime_hour"] = 0
|
|
113
|
-
df["monitored_worktime_hour"] =
|
|
114
|
-
df["monitored_annotation_worktime_hour"] + df["monitored_inspection_worktime_hour"] + df["monitored_acceptance_worktime_hour"]
|
|
115
|
-
)
|
|
113
|
+
df["monitored_worktime_hour"] = df["monitored_annotation_worktime_hour"] + df["monitored_inspection_worktime_hour"] + df["monitored_acceptance_worktime_hour"]
|
|
116
114
|
return WorktimePerDate(df)
|
|
117
115
|
|
|
118
116
|
def is_empty(self) -> bool:
|
|
@@ -180,9 +178,7 @@ class WorktimePerDate:
|
|
|
180
178
|
if f"monitored_{phase}_worktime_hour" not in df.columns:
|
|
181
179
|
df[f"monitored_{phase}_worktime_hour"] = 0
|
|
182
180
|
|
|
183
|
-
df["monitored_worktime_hour"] =
|
|
184
|
-
df["monitored_annotation_worktime_hour"] + df["monitored_inspection_worktime_hour"] + df["monitored_acceptance_worktime_hour"]
|
|
185
|
-
)
|
|
181
|
+
df["monitored_worktime_hour"] = df["monitored_annotation_worktime_hour"] + df["monitored_inspection_worktime_hour"] + df["monitored_acceptance_worktime_hour"]
|
|
186
182
|
|
|
187
183
|
if not actual_worktime.is_empty():
|
|
188
184
|
df = df.merge(
|
|
@@ -374,7 +370,7 @@ class WorktimePerDate:
|
|
|
374
370
|
|
|
375
371
|
logger.debug(f"{output_file} を出力します。")
|
|
376
372
|
|
|
377
|
-
if target_user_id_list is not None:
|
|
373
|
+
if target_user_id_list is not None:
|
|
378
374
|
user_id_list = target_user_id_list
|
|
379
375
|
else:
|
|
380
376
|
user_id_list = self._get_default_user_id_list()
|
|
@@ -450,9 +446,7 @@ class WorktimePerDate:
|
|
|
450
446
|
# またpandas.NAを持つDataFrameも描画できないので、numpy.nanに変換する
|
|
451
447
|
# TODO この問題が解決されたら、削除する
|
|
452
448
|
# https://qiita.com/yuji38kwmt/items/b5da6ed521e827620186
|
|
453
|
-
df_cumulative = df_cumulative.astype(
|
|
454
|
-
{"date": "object", "account_id": "object", "user_id": "object", "username": "object", "biography": "object"}
|
|
455
|
-
)
|
|
449
|
+
df_cumulative = df_cumulative.astype({"date": "object", "account_id": "object", "user_id": "object", "username": "object", "biography": "object"})
|
|
456
450
|
df_cumulative.replace(pandas.NA, numpy.nan, inplace=True)
|
|
457
451
|
|
|
458
452
|
line_count = 0
|
|
@@ -31,9 +31,7 @@ def _get_first_annotation_started_datetime(sub_task_history_list: list[dict[str,
|
|
|
31
31
|
1個のタスクのタスク履歴一覧から、最初に教師付フェーズを作業した日時を取得します。
|
|
32
32
|
"""
|
|
33
33
|
task_history_list_with_annotation_phase = [
|
|
34
|
-
e
|
|
35
|
-
for e in sub_task_history_list
|
|
36
|
-
if e["phase"] == "annotation" and e["account_id"] is not None and isoduration_to_hour(e["accumulated_labor_time_milliseconds"]) > 0
|
|
34
|
+
e for e in sub_task_history_list if e["phase"] == "annotation" and e["account_id"] is not None and isoduration_to_hour(e["accumulated_labor_time_milliseconds"]) > 0
|
|
37
35
|
]
|
|
38
36
|
if len(task_history_list_with_annotation_phase) == 0:
|
|
39
37
|
return None
|
|
@@ -41,9 +39,7 @@ def _get_first_annotation_started_datetime(sub_task_history_list: list[dict[str,
|
|
|
41
39
|
return task_history_list_with_annotation_phase[0]["started_datetime"]
|
|
42
40
|
|
|
43
41
|
|
|
44
|
-
def filter_task_histories(
|
|
45
|
-
task_histories: dict[str, list[dict[str, Any]]], *, start_date: Optional[str] = None, end_date: Optional[str] = None
|
|
46
|
-
) -> dict[str, list[dict[str, Any]]]:
|
|
42
|
+
def filter_task_histories(task_histories: dict[str, list[dict[str, Any]]], *, start_date: Optional[str] = None, end_date: Optional[str] = None) -> dict[str, list[dict[str, Any]]]:
|
|
47
43
|
"""
|
|
48
44
|
タスク履歴を絞り込みます。
|
|
49
45
|
|
|
@@ -173,9 +173,7 @@ class ProjectDir(DataClassJsonMixin):
|
|
|
173
173
|
phase_name = self.get_phase_name_for_filename(phase)
|
|
174
174
|
obj.to_csv(self.project_dir / Path(f"{phase_name}者_{phase_name}開始日list.csv"))
|
|
175
175
|
|
|
176
|
-
def write_performance_line_graph_per_date(
|
|
177
|
-
self, obj: AbstractPhaseProductivityPerDate, phase: TaskPhase, *, user_id_list: Optional[list[str]] = None
|
|
178
|
-
) -> None:
|
|
176
|
+
def write_performance_line_graph_per_date(self, obj: AbstractPhaseProductivityPerDate, phase: TaskPhase, *, user_id_list: Optional[list[str]] = None) -> None:
|
|
179
177
|
"""
|
|
180
178
|
指定したフェーズの開始日ごとの作業時間や生産性情報を、折れ線グラフとして出力します。
|
|
181
179
|
"""
|
|
@@ -199,8 +197,7 @@ class ProjectDir(DataClassJsonMixin):
|
|
|
199
197
|
obj.plot_production_volume_metrics(
|
|
200
198
|
production_volume_column=custom_production_volume.value,
|
|
201
199
|
production_volume_name=custom_production_volume.name,
|
|
202
|
-
output_file=output_dir
|
|
203
|
-
/ Path(f"{phase_name}者用/折れ線-横軸_{phase_name}開始日-縦軸_{custom_production_volume.name}単位の指標-{phase_name}者用.html"),
|
|
200
|
+
output_file=output_dir / Path(f"{phase_name}者用/折れ線-横軸_{phase_name}開始日-縦軸_{custom_production_volume.name}単位の指標-{phase_name}者用.html"),
|
|
204
201
|
target_user_id_list=user_id_list,
|
|
205
202
|
metadata=self.metadata,
|
|
206
203
|
)
|
|
@@ -209,14 +206,10 @@ class ProjectDir(DataClassJsonMixin):
|
|
|
209
206
|
"""`全体の生産性と品質.csv`を読み込む。"""
|
|
210
207
|
file = self.project_dir / self.FILENAME_WHOLE_PERFORMANCE
|
|
211
208
|
if file.exists():
|
|
212
|
-
return WholePerformance.from_csv(
|
|
213
|
-
file, custom_production_volume_list=self.custom_production_volume_list, task_completion_criteria=self.task_completion_criteria
|
|
214
|
-
)
|
|
209
|
+
return WholePerformance.from_csv(file, custom_production_volume_list=self.custom_production_volume_list, task_completion_criteria=self.task_completion_criteria)
|
|
215
210
|
else:
|
|
216
211
|
logger.warning(f"'{file!s}'を読み込もうとしましたが、ファイルは存在しません。")
|
|
217
|
-
return WholePerformance.empty(
|
|
218
|
-
custom_production_volume_list=self.custom_production_volume_list, task_completion_criteria=self.task_completion_criteria
|
|
219
|
-
)
|
|
212
|
+
return WholePerformance.empty(custom_production_volume_list=self.custom_production_volume_list, task_completion_criteria=self.task_completion_criteria)
|
|
220
213
|
|
|
221
214
|
def write_whole_performance(self, whole_performance: WholePerformance) -> None:
|
|
222
215
|
"""`全体の生産性と品質.csv`を出力します。"""
|
|
@@ -234,9 +227,7 @@ class ProjectDir(DataClassJsonMixin):
|
|
|
234
227
|
custom_production_volume_list=self.custom_production_volume_list,
|
|
235
228
|
)
|
|
236
229
|
else:
|
|
237
|
-
return WholeProductivityPerCompletedDate.empty(
|
|
238
|
-
task_completion_criteria=self.task_completion_criteria, custom_production_volume_list=self.custom_production_volume_list
|
|
239
|
-
)
|
|
230
|
+
return WholeProductivityPerCompletedDate.empty(task_completion_criteria=self.task_completion_criteria, custom_production_volume_list=self.custom_production_volume_list)
|
|
240
231
|
|
|
241
232
|
def write_whole_productivity_per_date(self, obj: WholeProductivityPerCompletedDate) -> None:
|
|
242
233
|
"""
|
|
@@ -263,9 +254,7 @@ class ProjectDir(DataClassJsonMixin):
|
|
|
263
254
|
custom_production_volume_list=self.custom_production_volume_list,
|
|
264
255
|
)
|
|
265
256
|
else:
|
|
266
|
-
return WholeProductivityPerFirstAnnotationStartedDate.empty(
|
|
267
|
-
self.task_completion_criteria, custom_production_volume_list=self.custom_production_volume_list
|
|
268
|
-
)
|
|
257
|
+
return WholeProductivityPerFirstAnnotationStartedDate.empty(self.task_completion_criteria, custom_production_volume_list=self.custom_production_volume_list)
|
|
269
258
|
|
|
270
259
|
def write_whole_productivity_per_first_annotation_started_date(self, obj: WholeProductivityPerFirstAnnotationStartedDate) -> None:
|
|
271
260
|
"""
|
|
@@ -285,14 +274,10 @@ class ProjectDir(DataClassJsonMixin):
|
|
|
285
274
|
"""
|
|
286
275
|
file = self.project_dir / self.FILENAME_USER_PERFORMANCE
|
|
287
276
|
if file.exists():
|
|
288
|
-
return UserPerformance.from_csv(
|
|
289
|
-
file, custom_production_volume_list=self.custom_production_volume_list, task_completion_criteria=self.task_completion_criteria
|
|
290
|
-
)
|
|
277
|
+
return UserPerformance.from_csv(file, custom_production_volume_list=self.custom_production_volume_list, task_completion_criteria=self.task_completion_criteria)
|
|
291
278
|
else:
|
|
292
279
|
logger.warning(f"'{file!s}'を読み込もうとしましたが、ファイルは存在しません。")
|
|
293
|
-
return UserPerformance.empty(
|
|
294
|
-
custom_production_volume_list=self.custom_production_volume_list, task_completion_criteria=self.task_completion_criteria
|
|
295
|
-
)
|
|
280
|
+
return UserPerformance.empty(custom_production_volume_list=self.custom_production_volume_list, task_completion_criteria=self.task_completion_criteria)
|
|
296
281
|
|
|
297
282
|
def write_user_performance(self, user_performance: UserPerformance) -> None:
|
|
298
283
|
"""
|
|
@@ -407,9 +392,7 @@ class ProjectDir(DataClassJsonMixin):
|
|
|
407
392
|
|
|
408
393
|
def write_worktime_line_graph(self, obj: WorktimePerDate, user_id_list: Optional[list[str]] = None) -> None:
|
|
409
394
|
"""横軸が日付、縦軸がユーザごとの作業時間である折れ線グラフを出力します。"""
|
|
410
|
-
obj.plot_cumulatively(
|
|
411
|
-
self.project_dir / "line-graph/累積折れ線-横軸_日-縦軸_作業時間.html", target_user_id_list=user_id_list, metadata=self.metadata
|
|
412
|
-
)
|
|
395
|
+
obj.plot_cumulatively(self.project_dir / "line-graph/累積折れ線-横軸_日-縦軸_作業時間.html", target_user_id_list=user_id_list, metadata=self.metadata)
|
|
413
396
|
|
|
414
397
|
def read_project_info(self) -> ProjectInfo:
|
|
415
398
|
"""
|
|
@@ -70,9 +70,7 @@ class VisualizationSourceFiles:
|
|
|
70
70
|
with open(str(self.task_history_json_path), encoding="utf-8") as f: # noqa: PTH123
|
|
71
71
|
task_histories_dict = json.load(f)
|
|
72
72
|
|
|
73
|
-
logger.debug(
|
|
74
|
-
f"{self.logging_prefix}: '{self.task_history_json_path}'を読み込みました。{len(task_histories_dict)}件のタスクの履歴が含まれています。"
|
|
75
|
-
)
|
|
73
|
+
logger.debug(f"{self.logging_prefix}: '{self.task_history_json_path}'を読み込みました。{len(task_histories_dict)}件のタスクの履歴が含まれています。")
|
|
76
74
|
return task_histories_dict
|
|
77
75
|
|
|
78
76
|
def read_task_history_events_json(self) -> list[dict[str, Any]]:
|
|
@@ -85,10 +83,7 @@ class VisualizationSourceFiles:
|
|
|
85
83
|
with self.task_history_event_json_path.open(encoding="utf-8") as f:
|
|
86
84
|
task_history_event_list = json.load(f)
|
|
87
85
|
|
|
88
|
-
logger.debug(
|
|
89
|
-
f"{self.logging_prefix}: '{self.task_history_event_json_path}'を読み込みました。"
|
|
90
|
-
f"{len(task_history_event_list)}件のタスク履歴イベントが含まれています。"
|
|
91
|
-
)
|
|
86
|
+
logger.debug(f"{self.logging_prefix}: '{self.task_history_event_json_path}'を読み込みました。{len(task_history_event_list)}件のタスク履歴イベントが含まれています。")
|
|
92
87
|
return task_history_event_list
|
|
93
88
|
|
|
94
89
|
def read_comments_json(self) -> list[dict[str, Any]]:
|
|
@@ -104,9 +99,7 @@ class VisualizationSourceFiles:
|
|
|
104
99
|
logger.debug(f"{self.logging_prefix}: '{self.comment_json_path}'を読み込みました。{len(comment_list)}件のコメントが含まれています。")
|
|
105
100
|
return comment_list
|
|
106
101
|
|
|
107
|
-
def write_files(
|
|
108
|
-
self, *, is_latest: bool = False, should_get_task_histories_one_of_each: bool = False, should_download_annotation_zip: bool = True
|
|
109
|
-
) -> None:
|
|
102
|
+
def write_files(self, *, is_latest: bool = False, should_get_task_histories_one_of_each: bool = False, should_download_annotation_zip: bool = True) -> None:
|
|
110
103
|
"""
|
|
111
104
|
可視化に必要なファイルを作成します。
|
|
112
105
|
原則、全件ファイルをダウンロードしてファイルを作成します。必要に応じて個別にAPIを実行してファイルを作成します。
|
|
@@ -88,20 +88,12 @@ def get_only_selective_attribute(columns: list[AttributeValueKey]) -> list[Attri
|
|
|
88
88
|
for label, attribute_name, _ in columns:
|
|
89
89
|
attribute_name_list.append((label, attribute_name))
|
|
90
90
|
|
|
91
|
-
non_selective_attribute_names = {
|
|
92
|
-
key for key, value in collections.Counter(attribute_name_list).items() if value > SELECTIVE_ATTRIBUTE_VALUE_MAX_COUNT
|
|
93
|
-
}
|
|
91
|
+
non_selective_attribute_names = {key for key, value in collections.Counter(attribute_name_list).items() if value > SELECTIVE_ATTRIBUTE_VALUE_MAX_COUNT}
|
|
94
92
|
|
|
95
93
|
if len(non_selective_attribute_names) > 0:
|
|
96
|
-
logger.debug(
|
|
97
|
-
f"以下の属性は値の個数が{SELECTIVE_ATTRIBUTE_VALUE_MAX_COUNT}を超えていたため、集計しません。 :: {non_selective_attribute_names}"
|
|
98
|
-
)
|
|
94
|
+
logger.debug(f"以下の属性は値の個数が{SELECTIVE_ATTRIBUTE_VALUE_MAX_COUNT}を超えていたため、集計しません。 :: {non_selective_attribute_names}")
|
|
99
95
|
|
|
100
|
-
return [
|
|
101
|
-
(label, attribute_name, attribute_value)
|
|
102
|
-
for (label, attribute_name, attribute_value) in columns
|
|
103
|
-
if (label, attribute_name) not in non_selective_attribute_names
|
|
104
|
-
]
|
|
96
|
+
return [(label, attribute_name, attribute_value) for (label, attribute_name, attribute_value) in columns if (label, attribute_name) not in non_selective_attribute_names]
|
|
105
97
|
|
|
106
98
|
|
|
107
99
|
def plot_label_histogram(
|
|
@@ -140,7 +132,7 @@ def plot_label_histogram(
|
|
|
140
132
|
|
|
141
133
|
df = create_df()
|
|
142
134
|
|
|
143
|
-
if arrange_bin_edge:
|
|
135
|
+
if arrange_bin_edge:
|
|
144
136
|
histogram_range = (
|
|
145
137
|
df.min(numeric_only=True).min(),
|
|
146
138
|
df.max(numeric_only=True).max(),
|
|
@@ -154,9 +146,7 @@ def plot_label_histogram(
|
|
|
154
146
|
# すべての値が0である列を除外する
|
|
155
147
|
columns = [col for col in df.columns if df[col].sum() > 0]
|
|
156
148
|
if len(columns) < len(df.columns):
|
|
157
|
-
logger.debug(
|
|
158
|
-
f"以下のラベルは、すべてのタスクでアノテーション数が0であるためヒストグラムを描画しません。 :: {set(df.columns) - set(columns)}"
|
|
159
|
-
)
|
|
149
|
+
logger.debug(f"以下のラベルは、すべてのタスクでアノテーション数が0であるためヒストグラムを描画しません。 :: {set(df.columns) - set(columns)}")
|
|
160
150
|
else:
|
|
161
151
|
columns = df.columns
|
|
162
152
|
|
|
@@ -248,7 +238,7 @@ def plot_attribute_histogram( # noqa: PLR0915
|
|
|
248
238
|
df = create_df()
|
|
249
239
|
y_axis_label = _get_y_axis_label(group_by)
|
|
250
240
|
|
|
251
|
-
if arrange_bin_edge:
|
|
241
|
+
if arrange_bin_edge:
|
|
252
242
|
histogram_range = (
|
|
253
243
|
df.min(numeric_only=True).min(),
|
|
254
244
|
df.max(numeric_only=True).max(),
|
|
@@ -260,9 +250,7 @@ def plot_attribute_histogram( # noqa: PLR0915
|
|
|
260
250
|
# すべての値が0である列を除外する
|
|
261
251
|
columns = [col for col in df.columns if df[col].sum() > 0]
|
|
262
252
|
if len(columns) < len(df.columns):
|
|
263
|
-
logger.debug(
|
|
264
|
-
f"以下の属性値は、すべてのタスクでアノテーション数が0であるためヒストグラムを描画しません。 :: {set(df.columns) - set(columns)}"
|
|
265
|
-
)
|
|
253
|
+
logger.debug(f"以下の属性値は、すべてのタスクでアノテーション数が0であるためヒストグラムを描画しません。 :: {set(df.columns) - set(columns)}")
|
|
266
254
|
else:
|
|
267
255
|
columns = df.columns
|
|
268
256
|
|
|
@@ -387,9 +375,7 @@ class VisualizeAnnotationCount(CommandLine):
|
|
|
387
375
|
metadata = {
|
|
388
376
|
"project_id": project_id,
|
|
389
377
|
"project_title": project_title,
|
|
390
|
-
"task_query": {k: v for k, v in task_query.to_dict(encode_json=True).items() if v is not None and v is not False}
|
|
391
|
-
if task_query is not None
|
|
392
|
-
else None,
|
|
378
|
+
"task_query": {k: v for k, v in task_query.to_dict(encode_json=True).items() if v is not None and v is not False} if task_query is not None else None,
|
|
393
379
|
"target_task_ids": target_task_ids,
|
|
394
380
|
}
|
|
395
381
|
|
|
@@ -530,7 +516,7 @@ def parse_args(parser: argparse.ArgumentParser) -> None:
|
|
|
530
516
|
parser.add_argument(
|
|
531
517
|
"--latest",
|
|
532
518
|
action="store_true",
|
|
533
|
-
help="``--annotation`` を指定しないとき、最新のアノテーションzipを参照します。このオプションを指定すると、アノテーションzipを更新するのに数分待ちます。",
|
|
519
|
+
help="``--annotation`` を指定しないとき、最新のアノテーションzipを参照します。このオプションを指定すると、アノテーションzipを更新するのに数分待ちます。",
|
|
534
520
|
)
|
|
535
521
|
|
|
536
522
|
parser.add_argument(
|
|
@@ -114,10 +114,7 @@ def plot_annotation_duration_histogram_by_label( # noqa: PLR0915
|
|
|
114
114
|
# すべての値が0である列を除外する
|
|
115
115
|
columns = [col for col in df.columns if df[col].sum() > 0]
|
|
116
116
|
if len(columns) < len(df.columns):
|
|
117
|
-
logger.debug(
|
|
118
|
-
f"以下の属性値は、すべてのタスクで区間アノテーションの長さが0であるためヒストグラムを描画しません。 :: "
|
|
119
|
-
f"{set(df.columns) - set(columns)}"
|
|
120
|
-
)
|
|
117
|
+
logger.debug(f"以下の属性値は、すべてのタスクで区間アノテーションの長さが0であるためヒストグラムを描画しません。 :: {set(df.columns) - set(columns)}")
|
|
121
118
|
df = df[columns]
|
|
122
119
|
|
|
123
120
|
if bin_width is not None: # noqa: SIM102
|
|
@@ -220,10 +217,7 @@ def plot_annotation_duration_histogram_by_attribute( # noqa: PLR0915
|
|
|
220
217
|
# すべての値が0である列を除外する
|
|
221
218
|
columns = [col for col in df.columns if df[col].sum() > 0]
|
|
222
219
|
if len(columns) < len(df.columns):
|
|
223
|
-
logger.debug(
|
|
224
|
-
f"以下のラベルは、すべてのタスクで区間アノテーションの長さが0であるためヒストグラムを描画しません。 :: "
|
|
225
|
-
f"{set(df.columns) - set(columns)}"
|
|
226
|
-
)
|
|
220
|
+
logger.debug(f"以下のラベルは、すべてのタスクで区間アノテーションの長さが0であるためヒストグラムを描画しません。 :: {set(df.columns) - set(columns)}")
|
|
227
221
|
df = df[columns]
|
|
228
222
|
|
|
229
223
|
histogram_range = get_histogram_range(df)
|
|
@@ -335,9 +329,7 @@ class VisualizeAnnotationDuration(CommandLine):
|
|
|
335
329
|
metadata = {
|
|
336
330
|
"project_id": project_id,
|
|
337
331
|
"project_title": project_title,
|
|
338
|
-
"task_query": {k: v for k, v in task_query.to_dict(encode_json=True).items() if v is not None and v is not False}
|
|
339
|
-
if task_query is not None
|
|
340
|
-
else None,
|
|
332
|
+
"task_query": {k: v for k, v in task_query.to_dict(encode_json=True).items() if v is not None and v is not False} if task_query is not None else None,
|
|
341
333
|
"target_task_ids": target_task_ids,
|
|
342
334
|
}
|
|
343
335
|
plot_annotation_duration_histogram_by_label(
|
|
@@ -372,9 +364,7 @@ class VisualizeAnnotationDuration(CommandLine):
|
|
|
372
364
|
super().validate_project(project_id, project_member_roles=[ProjectMemberRole.OWNER, ProjectMemberRole.TRAINING_DATA_USER])
|
|
373
365
|
project, _ = self.service.api.get_project(project_id)
|
|
374
366
|
if project["input_data_type"] != InputDataType.MOVIE.value:
|
|
375
|
-
logger.warning(
|
|
376
|
-
f"project_id='{project_id}'であるプロジェクトは、動画プロジェクトでないので、出力される区間アノテーションの長さはすべて0秒になります。"
|
|
377
|
-
)
|
|
367
|
+
logger.warning(f"project_id='{project_id}'であるプロジェクトは、動画プロジェクトでないので、出力される区間アノテーションの長さはすべて0秒になります。")
|
|
378
368
|
|
|
379
369
|
output_dir: Path = args.output_dir
|
|
380
370
|
annotation_path = Path(args.annotation) if args.annotation is not None else None
|
|
@@ -479,7 +469,7 @@ def parse_args(parser: argparse.ArgumentParser) -> None:
|
|
|
479
469
|
parser.add_argument(
|
|
480
470
|
"--latest",
|
|
481
471
|
action="store_true",
|
|
482
|
-
help="``--annotation`` を指定しないとき、最新のアノテーションzipを参照します。このオプションを指定すると、アノテーションzipを更新するのに数分待ちます。",
|
|
472
|
+
help="``--annotation`` を指定しないとき、最新のアノテーションzipを参照します。このオプションを指定すると、アノテーションzipを更新するのに数分待ちます。",
|
|
483
473
|
)
|
|
484
474
|
|
|
485
475
|
parser.add_argument(
|