annofabcli 1.111.2__py3-none-any.whl → 1.112.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- annofabcli/__main__.py +1 -2
- annofabcli/annotation/annotation_query.py +10 -10
- annofabcli/annotation/change_annotation_attributes.py +9 -9
- annofabcli/annotation/change_annotation_attributes_per_annotation.py +3 -4
- annofabcli/annotation/change_annotation_properties.py +13 -13
- annofabcli/annotation/copy_annotation.py +5 -5
- annofabcli/annotation/create_classification_annotation.py +6 -6
- annofabcli/annotation/delete_annotation.py +8 -8
- annofabcli/annotation/download_annotation_zip.py +1 -3
- annofabcli/annotation/dump_annotation.py +7 -7
- annofabcli/annotation/import_annotation.py +12 -12
- annofabcli/annotation/list_annotation.py +8 -8
- annofabcli/annotation/list_annotation_count.py +1 -2
- annofabcli/annotation/merge_segmentation.py +5 -5
- annofabcli/annotation/remove_segmentation_overlap.py +4 -4
- annofabcli/annotation/restore_annotation.py +6 -6
- annofabcli/annotation/subcommand_annotation.py +1 -2
- annofabcli/annotation_specs/add_attribute_restriction.py +4 -5
- annofabcli/annotation_specs/attribute_restriction.py +8 -8
- annofabcli/annotation_specs/export_annotation_specs.py +4 -5
- annofabcli/annotation_specs/get_annotation_specs_with_attribute_id_replaced.py +3 -4
- annofabcli/annotation_specs/get_annotation_specs_with_choice_id_replaced.py +3 -4
- annofabcli/annotation_specs/get_annotation_specs_with_label_id_replaced.py +3 -4
- annofabcli/annotation_specs/list_annotation_specs_attribute.py +9 -10
- annofabcli/annotation_specs/list_annotation_specs_choice.py +9 -10
- annofabcli/annotation_specs/list_annotation_specs_history.py +2 -2
- annofabcli/annotation_specs/list_annotation_specs_label.py +8 -9
- annofabcli/annotation_specs/list_annotation_specs_label_attribute.py +10 -11
- annofabcli/annotation_specs/list_attribute_restriction.py +2 -4
- annofabcli/annotation_specs/list_label_color.py +2 -3
- annofabcli/annotation_specs/put_label_color.py +3 -4
- annofabcli/annotation_specs/subcommand_annotation_specs.py +1 -3
- annofabcli/annotation_zip/list_annotation_3d_bounding_box.py +365 -0
- annofabcli/annotation_zip/list_annotation_bounding_box_2d.py +11 -12
- annofabcli/annotation_zip/list_range_annotation.py +24 -14
- annofabcli/annotation_zip/list_single_point_annotation.py +11 -12
- annofabcli/annotation_zip/subcommand_annotation_zip.py +3 -2
- annofabcli/annotation_zip/validate_annotation.py +8 -7
- annofabcli/comment/delete_comment.py +4 -6
- annofabcli/comment/download_comment_json.py +4 -6
- annofabcli/comment/list_all_comment.py +5 -6
- annofabcli/comment/list_comment.py +3 -4
- annofabcli/comment/put_comment.py +9 -10
- annofabcli/comment/put_comment_simply.py +5 -6
- annofabcli/comment/put_inspection_comment.py +1 -3
- annofabcli/comment/put_inspection_comment_simply.py +1 -3
- annofabcli/comment/put_onhold_comment.py +1 -3
- annofabcli/comment/put_onhold_comment_simply.py +1 -3
- annofabcli/comment/subcommand_comment.py +1 -3
- annofabcli/common/bokeh.py +4 -4
- annofabcli/common/cli.py +17 -17
- annofabcli/common/download.py +28 -29
- annofabcli/common/facade.py +37 -38
- annofabcli/common/image.py +14 -14
- annofabcli/common/utils.py +8 -8
- annofabcli/common/visualize.py +13 -13
- annofabcli/experimental/list_out_of_range_annotation_for_movie.py +3 -4
- annofabcli/experimental/subcommand_experimental.py +1 -3
- annofabcli/filesystem/draw_annotation.py +26 -26
- annofabcli/filesystem/filter_annotation.py +9 -10
- annofabcli/filesystem/mask_user_info.py +14 -14
- annofabcli/filesystem/merge_annotation.py +8 -8
- annofabcli/filesystem/subcommand_filesystem.py +1 -3
- annofabcli/input_data/copy_input_data.py +8 -9
- annofabcli/input_data/delete_input_data.py +2 -2
- annofabcli/input_data/delete_metadata_key_of_input_data.py +3 -5
- annofabcli/input_data/download_input_data_json.py +4 -6
- annofabcli/input_data/list_all_input_data.py +8 -8
- annofabcli/input_data/list_all_input_data_merged_task.py +4 -4
- annofabcli/input_data/list_input_data.py +4 -4
- annofabcli/input_data/put_input_data.py +5 -5
- annofabcli/input_data/put_input_data_with_zip.py +2 -3
- annofabcli/input_data/subcommand_input_data.py +1 -3
- annofabcli/input_data/update_input_data.py +6 -8
- annofabcli/input_data/update_metadata_of_input_data.py +3 -5
- annofabcli/instruction/copy_instruction.py +4 -5
- annofabcli/instruction/download_instruction.py +4 -5
- annofabcli/instruction/list_instruction_history.py +2 -2
- annofabcli/instruction/subcommand_instruction.py +1 -3
- annofabcli/instruction/upload_instruction.py +2 -3
- annofabcli/job/delete_job.py +1 -2
- annofabcli/job/list_job.py +4 -4
- annofabcli/job/list_last_job.py +3 -3
- annofabcli/job/subcommand_job.py +1 -3
- annofabcli/job/wait_job.py +4 -5
- annofabcli/my_account/get_my_account.py +1 -2
- annofabcli/my_account/subcommand_my_account.py +1 -3
- annofabcli/organization/list_organization.py +1 -2
- annofabcli/organization/subcommand_organization.py +1 -3
- annofabcli/organization_member/change_organization_member.py +3 -4
- annofabcli/organization_member/delete_organization_member.py +3 -4
- annofabcli/organization_member/invite_organization_member.py +1 -3
- annofabcli/organization_member/list_organization_member.py +2 -2
- annofabcli/organization_member/subcommand_organization_member.py +1 -3
- annofabcli/project/change_organization_of_project.py +3 -3
- annofabcli/project/change_project_status.py +3 -3
- annofabcli/project/copy_project.py +4 -4
- annofabcli/project/create_project.py +7 -7
- annofabcli/project/diff_projects.py +4 -5
- annofabcli/project/list_project.py +4 -4
- annofabcli/project/put_project.py +1 -2
- annofabcli/project/subcommand_project.py +1 -2
- annofabcli/project/update_configuration.py +3 -3
- annofabcli/project/update_project.py +6 -8
- annofabcli/project_member/change_project_members.py +7 -7
- annofabcli/project_member/copy_project_members.py +3 -3
- annofabcli/project_member/drop_project_members.py +1 -2
- annofabcli/project_member/invite_project_members.py +1 -3
- annofabcli/project_member/list_users.py +1 -2
- annofabcli/project_member/put_project_members.py +5 -5
- annofabcli/project_member/subcommand_project_member.py +1 -3
- annofabcli/stat_visualization/mask_visualization_dir.py +8 -9
- annofabcli/stat_visualization/merge_visualization_dir.py +6 -7
- annofabcli/stat_visualization/subcommand_stat_visualization.py +1 -2
- annofabcli/stat_visualization/summarize_whole_performance_csv.py +1 -2
- annofabcli/stat_visualization/write_graph.py +2 -3
- annofabcli/stat_visualization/write_performance_rating_csv.py +20 -27
- annofabcli/statistics/histogram.py +5 -6
- annofabcli/statistics/linegraph.py +13 -14
- annofabcli/statistics/list_annotation_area.py +38 -13
- annofabcli/statistics/list_annotation_attribute.py +9 -10
- annofabcli/statistics/list_annotation_attribute_filled_count.py +30 -31
- annofabcli/statistics/list_annotation_count.py +57 -58
- annofabcli/statistics/list_annotation_duration.py +33 -34
- annofabcli/statistics/list_video_duration.py +4 -5
- annofabcli/statistics/list_worktime.py +3 -3
- annofabcli/statistics/scatter.py +9 -8
- annofabcli/statistics/subcommand_statistics.py +1 -4
- annofabcli/statistics/summarize_task_count.py +4 -6
- annofabcli/statistics/summarize_task_count_by_task_id_group.py +2 -4
- annofabcli/statistics/summarize_task_count_by_user.py +1 -3
- annofabcli/statistics/visualization/dataframe/annotation_count.py +5 -4
- annofabcli/statistics/visualization/dataframe/annotation_duration.py +2 -3
- annofabcli/statistics/visualization/dataframe/cumulative_productivity.py +15 -17
- annofabcli/statistics/visualization/dataframe/productivity_per_date.py +17 -19
- annofabcli/statistics/visualization/dataframe/project_performance.py +3 -12
- annofabcli/statistics/visualization/dataframe/task.py +11 -12
- annofabcli/statistics/visualization/dataframe/task_worktime_by_phase_user.py +9 -10
- annofabcli/statistics/visualization/dataframe/user_performance.py +21 -19
- annofabcli/statistics/visualization/dataframe/whole_performance.py +3 -4
- annofabcli/statistics/visualization/dataframe/whole_productivity_per_date.py +12 -14
- annofabcli/statistics/visualization/dataframe/worktime_per_date.py +11 -13
- annofabcli/statistics/visualization/filtering_query.py +7 -7
- annofabcli/statistics/visualization/project_dir.py +27 -14
- annofabcli/statistics/visualize_annotation_count.py +22 -23
- annofabcli/statistics/visualize_annotation_duration.py +21 -22
- annofabcli/statistics/visualize_statistics.py +36 -33
- annofabcli/statistics/visualize_video_duration.py +18 -20
- annofabcli/supplementary/delete_supplementary_data.py +4 -4
- annofabcli/supplementary/list_supplementary_data.py +3 -3
- annofabcli/supplementary/put_supplementary_data.py +8 -8
- annofabcli/supplementary/subcommand_supplementary.py +1 -3
- annofabcli/task/cancel_acceptance.py +16 -17
- annofabcli/task/change_operator.py +10 -12
- annofabcli/task/change_status_to_break.py +7 -9
- annofabcli/task/change_status_to_on_hold.py +10 -12
- annofabcli/task/complete_tasks.py +17 -18
- annofabcli/task/copy_tasks.py +3 -5
- annofabcli/task/delete_metadata_key_of_task.py +4 -6
- annofabcli/task/delete_tasks.py +6 -6
- annofabcli/task/download_task_json.py +4 -6
- annofabcli/task/list_all_tasks.py +7 -7
- annofabcli/task/list_all_tasks_added_task_history.py +12 -12
- annofabcli/task/list_tasks.py +6 -6
- annofabcli/task/list_tasks_added_task_history.py +9 -9
- annofabcli/task/put_tasks.py +4 -5
- annofabcli/task/put_tasks_by_count.py +1 -2
- annofabcli/task/reject_tasks.py +18 -20
- annofabcli/task/subcommand_task.py +1 -3
- annofabcli/task/update_metadata_of_task.py +5 -6
- annofabcli/task_history/download_task_history_json.py +4 -6
- annofabcli/task_history/list_all_task_history.py +5 -6
- annofabcli/task_history/list_task_history.py +3 -4
- annofabcli/task_history/subcommand_task_history.py +1 -3
- annofabcli/task_history_event/download_task_history_event_json.py +4 -6
- annofabcli/task_history_event/list_all_task_history_event.py +6 -6
- annofabcli/task_history_event/list_worktime.py +15 -15
- annofabcli/task_history_event/subcommand_task_history_event.py +1 -2
- {annofabcli-1.111.2.dist-info → annofabcli-1.112.0.dist-info}/METADATA +9 -15
- annofabcli-1.112.0.dist-info/RECORD +229 -0
- {annofabcli-1.111.2.dist-info → annofabcli-1.112.0.dist-info}/WHEEL +1 -1
- annofabcli-1.111.2.dist-info/RECORD +0 -228
- {annofabcli-1.111.2.dist-info → annofabcli-1.112.0.dist-info}/entry_points.txt +0 -0
- {annofabcli-1.111.2.dist-info → annofabcli-1.112.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -7,11 +7,9 @@ from __future__ import annotations
|
|
|
7
7
|
import abc
|
|
8
8
|
import logging
|
|
9
9
|
from pathlib import Path
|
|
10
|
-
from typing import Any
|
|
10
|
+
from typing import Any
|
|
11
11
|
|
|
12
|
-
import bokeh
|
|
13
12
|
import bokeh.layouts
|
|
14
|
-
import bokeh.palettes
|
|
15
13
|
import pandas
|
|
16
14
|
from annofabapi.models import TaskPhase
|
|
17
15
|
from bokeh.models.ui import UIElement
|
|
@@ -80,7 +78,7 @@ class AbstractPhaseProductivityPerDate(abc.ABC):
|
|
|
80
78
|
PLOT_WIDTH = 1200
|
|
81
79
|
PLOT_HEIGHT = 600
|
|
82
80
|
|
|
83
|
-
def __init__(self, df: pandas.DataFrame, phase: TaskPhase, *, custom_production_volume_list:
|
|
81
|
+
def __init__(self, df: pandas.DataFrame, phase: TaskPhase, *, custom_production_volume_list: list[ProductionVolumeColumn] | None = None) -> None:
|
|
84
82
|
self.df = df
|
|
85
83
|
self.phase = phase
|
|
86
84
|
self.custom_production_volume_list = custom_production_volume_list if custom_production_volume_list is not None else []
|
|
@@ -97,7 +95,7 @@ class AbstractPhaseProductivityPerDate(abc.ABC):
|
|
|
97
95
|
line_graph_list: list[LineGraph],
|
|
98
96
|
plotted_users: list[tuple[str, str]],
|
|
99
97
|
output_file: Path,
|
|
100
|
-
metadata:
|
|
98
|
+
metadata: dict[str, Any] | None,
|
|
101
99
|
) -> None:
|
|
102
100
|
"""
|
|
103
101
|
折れ線グラフを、HTMLファイルに出力します。
|
|
@@ -139,8 +137,8 @@ class AbstractPhaseProductivityPerDate(abc.ABC):
|
|
|
139
137
|
production_volume_name: str,
|
|
140
138
|
output_file: Path,
|
|
141
139
|
*,
|
|
142
|
-
target_user_id_list:
|
|
143
|
-
metadata:
|
|
140
|
+
target_user_id_list: list[str] | None = None,
|
|
141
|
+
metadata: dict[str, Any] | None = None,
|
|
144
142
|
) -> None:
|
|
145
143
|
raise NotImplementedError()
|
|
146
144
|
|
|
@@ -220,7 +218,7 @@ class AbstractPhaseProductivityPerDate(abc.ABC):
|
|
|
220
218
|
class AnnotatorProductivityPerDate(AbstractPhaseProductivityPerDate):
|
|
221
219
|
"""教師付開始日ごとの教師付者の生産性に関する情報"""
|
|
222
220
|
|
|
223
|
-
def __init__(self, df: pandas.DataFrame, *, custom_production_volume_list:
|
|
221
|
+
def __init__(self, df: pandas.DataFrame, *, custom_production_volume_list: list[ProductionVolumeColumn] | None = None) -> None:
|
|
224
222
|
super().__init__(df, phase=TaskPhase.ANNOTATION, custom_production_volume_list=custom_production_volume_list)
|
|
225
223
|
|
|
226
224
|
@classmethod
|
|
@@ -234,8 +232,8 @@ class AnnotatorProductivityPerDate(AbstractPhaseProductivityPerDate):
|
|
|
234
232
|
production_volume_name: str,
|
|
235
233
|
output_file: Path,
|
|
236
234
|
*,
|
|
237
|
-
target_user_id_list:
|
|
238
|
-
metadata:
|
|
235
|
+
target_user_id_list: list[str] | None = None,
|
|
236
|
+
metadata: dict[str, Any] | None = None,
|
|
239
237
|
) -> None:
|
|
240
238
|
"""
|
|
241
239
|
生産性を教師付作業者ごとにプロットする。
|
|
@@ -344,7 +342,7 @@ class AnnotatorProductivityPerDate(AbstractPhaseProductivityPerDate):
|
|
|
344
342
|
username = df_subset.iloc[0]["username"]
|
|
345
343
|
|
|
346
344
|
line_count += 1
|
|
347
|
-
for line_graph, (x_column, y_column) in zip(line_graph_list, columns_list):
|
|
345
|
+
for line_graph, (x_column, y_column) in zip(line_graph_list, columns_list, strict=False):
|
|
348
346
|
if y_column.endswith(WEEKLY_MOVING_AVERAGE_COLUMN_SUFFIX):
|
|
349
347
|
line_graph.add_moving_average_line(
|
|
350
348
|
source=source,
|
|
@@ -380,7 +378,7 @@ class AnnotatorProductivityPerDate(AbstractPhaseProductivityPerDate):
|
|
|
380
378
|
class InspectorProductivityPerDate(AbstractPhaseProductivityPerDate):
|
|
381
379
|
"""検査開始日ごとの検査者の生産性に関する情報"""
|
|
382
380
|
|
|
383
|
-
def __init__(self, df: pandas.DataFrame, *, custom_production_volume_list:
|
|
381
|
+
def __init__(self, df: pandas.DataFrame, *, custom_production_volume_list: list[ProductionVolumeColumn] | None = None) -> None:
|
|
384
382
|
super().__init__(df, phase=TaskPhase.INSPECTION, custom_production_volume_list=custom_production_volume_list)
|
|
385
383
|
|
|
386
384
|
@classmethod
|
|
@@ -394,8 +392,8 @@ class InspectorProductivityPerDate(AbstractPhaseProductivityPerDate):
|
|
|
394
392
|
production_volume_name: str,
|
|
395
393
|
output_file: Path,
|
|
396
394
|
*,
|
|
397
|
-
target_user_id_list:
|
|
398
|
-
metadata:
|
|
395
|
+
target_user_id_list: list[str] | None = None,
|
|
396
|
+
metadata: dict[str, Any] | None = None,
|
|
399
397
|
) -> None:
|
|
400
398
|
"""
|
|
401
399
|
アノテーション単位の生産性を受入作業者ごとにプロットする。
|
|
@@ -477,7 +475,7 @@ class InspectorProductivityPerDate(AbstractPhaseProductivityPerDate):
|
|
|
477
475
|
username = df_subset.iloc[0]["username"]
|
|
478
476
|
|
|
479
477
|
line_count += 1
|
|
480
|
-
for line_graph, (x_column, y_column) in zip(line_graph_list, columns_list):
|
|
478
|
+
for line_graph, (x_column, y_column) in zip(line_graph_list, columns_list, strict=False):
|
|
481
479
|
if y_column.endswith(WEEKLY_MOVING_AVERAGE_COLUMN_SUFFIX):
|
|
482
480
|
line_graph.add_moving_average_line(
|
|
483
481
|
source=source,
|
|
@@ -517,7 +515,7 @@ class InspectorProductivityPerDate(AbstractPhaseProductivityPerDate):
|
|
|
517
515
|
class AcceptorProductivityPerDate(AbstractPhaseProductivityPerDate):
|
|
518
516
|
"""受入開始日ごとの受入者の生産性に関する情報"""
|
|
519
517
|
|
|
520
|
-
def __init__(self, df: pandas.DataFrame, *, custom_production_volume_list:
|
|
518
|
+
def __init__(self, df: pandas.DataFrame, *, custom_production_volume_list: list[ProductionVolumeColumn] | None = None) -> None:
|
|
521
519
|
super().__init__(df, phase=TaskPhase.ACCEPTANCE, custom_production_volume_list=custom_production_volume_list)
|
|
522
520
|
|
|
523
521
|
@classmethod
|
|
@@ -531,8 +529,8 @@ class AcceptorProductivityPerDate(AbstractPhaseProductivityPerDate):
|
|
|
531
529
|
production_volume_name: str,
|
|
532
530
|
output_file: Path,
|
|
533
531
|
*,
|
|
534
|
-
target_user_id_list:
|
|
535
|
-
metadata:
|
|
532
|
+
target_user_id_list: list[str] | None = None,
|
|
533
|
+
metadata: dict[str, Any] | None = None,
|
|
536
534
|
) -> None:
|
|
537
535
|
"""
|
|
538
536
|
アノテーション単位の生産性を受入作業者ごとにプロットする。
|
|
@@ -617,7 +615,7 @@ class AcceptorProductivityPerDate(AbstractPhaseProductivityPerDate):
|
|
|
617
615
|
username = df_subset.iloc[0]["username"]
|
|
618
616
|
|
|
619
617
|
line_count += 1
|
|
620
|
-
for line_graph, (x_column, y_column) in zip(line_graph_list, columns_list):
|
|
618
|
+
for line_graph, (x_column, y_column) in zip(line_graph_list, columns_list, strict=False):
|
|
621
619
|
if y_column.endswith(WEEKLY_MOVING_AVERAGE_COLUMN_SUFFIX):
|
|
622
620
|
line_graph.add_moving_average_line(
|
|
623
621
|
source=source,
|
|
@@ -2,7 +2,6 @@ from __future__ import annotations
|
|
|
2
2
|
|
|
3
3
|
import logging
|
|
4
4
|
from pathlib import Path
|
|
5
|
-
from typing import Optional
|
|
6
5
|
|
|
7
6
|
import numpy
|
|
8
7
|
import pandas
|
|
@@ -23,7 +22,7 @@ class ProjectPerformance:
|
|
|
23
22
|
プロジェクトごとの生産性と品質
|
|
24
23
|
"""
|
|
25
24
|
|
|
26
|
-
def __init__(self, df: pandas.DataFrame, *, custom_production_volume_list:
|
|
25
|
+
def __init__(self, df: pandas.DataFrame, *, custom_production_volume_list: list[ProductionVolumeColumn] | None = None) -> None:
|
|
27
26
|
self.df = df
|
|
28
27
|
self.custom_production_volume_list = custom_production_volume_list if custom_production_volume_list is not None else []
|
|
29
28
|
|
|
@@ -89,7 +88,7 @@ class ProjectPerformance:
|
|
|
89
88
|
return [e.value for e in TaskPhase if e.value in tmp_set]
|
|
90
89
|
|
|
91
90
|
@classmethod
|
|
92
|
-
def from_project_dirs(cls, project_dir_list: list[ProjectDir], *, custom_production_volume_list:
|
|
91
|
+
def from_project_dirs(cls, project_dir_list: list[ProjectDir], *, custom_production_volume_list: list[ProductionVolumeColumn] | None = None) -> ProjectPerformance:
|
|
93
92
|
row_list: list[pandas.Series] = [cls._get_series_from_project_dir(project_dir) for project_dir in project_dir_list]
|
|
94
93
|
return cls(pandas.DataFrame(row_list), custom_production_volume_list=custom_production_volume_list)
|
|
95
94
|
|
|
@@ -139,15 +138,7 @@ class ProjectWorktimePerMonth:
|
|
|
139
138
|
new_index = [str(dt)[0:7] for dt in series.index]
|
|
140
139
|
result = pandas.Series(series.values, index=new_index)
|
|
141
140
|
result["dirname"] = project_dir.project_dir.name
|
|
142
|
-
|
|
143
|
-
try:
|
|
144
|
-
project_info = project_dir.read_project_info()
|
|
145
|
-
project_title = project_info.project_title
|
|
146
|
-
except Exception:
|
|
147
|
-
# 複数のプロジェクトをマージして生産性情報を出力した場合は、`project_info.json`は存在しないので、このブロックに入る
|
|
148
|
-
logger.info(f"'{project_dir}'からプロジェクト情報を読み込むのに失敗しました。project_titleは空文字にします。", exc_info=True)
|
|
149
|
-
project_title = ""
|
|
150
|
-
result["project_title"] = project_title
|
|
141
|
+
result["project_title"] = project_dir.get_project_title()
|
|
151
142
|
return result
|
|
152
143
|
|
|
153
144
|
@classmethod
|
|
@@ -2,12 +2,11 @@ from __future__ import annotations
|
|
|
2
2
|
|
|
3
3
|
import logging
|
|
4
4
|
from pathlib import Path
|
|
5
|
-
from typing import Any
|
|
5
|
+
from typing import Any
|
|
6
6
|
|
|
7
7
|
import annofabapi
|
|
8
8
|
import bokeh
|
|
9
9
|
import bokeh.layouts
|
|
10
|
-
import bokeh.palettes
|
|
11
10
|
import numpy
|
|
12
11
|
import pandas
|
|
13
12
|
import pytz
|
|
@@ -64,7 +63,7 @@ class Task:
|
|
|
64
63
|
"""
|
|
65
64
|
return list(set(self.required_columns) - set(df.columns))
|
|
66
65
|
|
|
67
|
-
def __init__(self, df: pandas.DataFrame, *, custom_production_volume_list:
|
|
66
|
+
def __init__(self, df: pandas.DataFrame, *, custom_production_volume_list: list[ProductionVolumeColumn] | None = None) -> None:
|
|
68
67
|
self.custom_production_volume_list = custom_production_volume_list if custom_production_volume_list is not None else []
|
|
69
68
|
|
|
70
69
|
if self._duplicated_keys(df):
|
|
@@ -184,8 +183,8 @@ class Task:
|
|
|
184
183
|
project_id: str,
|
|
185
184
|
annofab_service: annofabapi.Resource,
|
|
186
185
|
*,
|
|
187
|
-
input_data_count:
|
|
188
|
-
custom_production_volume:
|
|
186
|
+
input_data_count: InputDataCount | None = None,
|
|
187
|
+
custom_production_volume: CustomProductionVolume | None = None,
|
|
189
188
|
) -> Task:
|
|
190
189
|
"""
|
|
191
190
|
APIから取得した情報と、DataFrameのラッパーからインスタンスを生成します。
|
|
@@ -255,7 +254,7 @@ class Task:
|
|
|
255
254
|
return len(self.df) == 0
|
|
256
255
|
|
|
257
256
|
@classmethod
|
|
258
|
-
def empty(cls, *, custom_production_volume_list:
|
|
257
|
+
def empty(cls, *, custom_production_volume_list: list[ProductionVolumeColumn] | None = None) -> Task:
|
|
259
258
|
"""空のデータフレームを持つインスタンスを生成します。"""
|
|
260
259
|
|
|
261
260
|
bool_columns = [
|
|
@@ -316,12 +315,12 @@ class Task:
|
|
|
316
315
|
return cls(df, custom_production_volume_list=custom_production_volume_list)
|
|
317
316
|
|
|
318
317
|
@classmethod
|
|
319
|
-
def from_csv(cls, csv_file: Path, *, custom_production_volume_list:
|
|
318
|
+
def from_csv(cls, csv_file: Path, *, custom_production_volume_list: list[ProductionVolumeColumn] | None = None) -> Task:
|
|
320
319
|
df = pandas.read_csv(str(csv_file))
|
|
321
320
|
return cls(df, custom_production_volume_list=custom_production_volume_list)
|
|
322
321
|
|
|
323
322
|
@staticmethod
|
|
324
|
-
def merge(*obj: Task, custom_production_volume_list:
|
|
323
|
+
def merge(*obj: Task, custom_production_volume_list: list[ProductionVolumeColumn] | None = None) -> Task:
|
|
325
324
|
"""
|
|
326
325
|
複数のインスタンスをマージします。
|
|
327
326
|
|
|
@@ -332,7 +331,7 @@ class Task:
|
|
|
332
331
|
df_merged = pandas.concat(df_list)
|
|
333
332
|
return Task(df_merged, custom_production_volume_list=custom_production_volume_list)
|
|
334
333
|
|
|
335
|
-
def plot_histogram_of_worktime(self, output_file: Path, *, metadata:
|
|
334
|
+
def plot_histogram_of_worktime(self, output_file: Path, *, metadata: dict[str, Any] | None = None) -> None:
|
|
336
335
|
"""作業時間に関する情報をヒストグラムでプロットする。
|
|
337
336
|
|
|
338
337
|
Args:
|
|
@@ -413,7 +412,7 @@ class Task:
|
|
|
413
412
|
bokeh.plotting.save(bokeh_obj)
|
|
414
413
|
logger.debug(f"'{output_file}'を出力しました。")
|
|
415
414
|
|
|
416
|
-
def plot_histogram_of_others(self, output_file: Path, *, metadata:
|
|
415
|
+
def plot_histogram_of_others(self, output_file: Path, *, metadata: dict[str, Any] | None = None) -> None:
|
|
417
416
|
"""アノテーション数や、検査コメント数など、作業時間以外の情報をヒストグラムで表示する。
|
|
418
417
|
|
|
419
418
|
Args:
|
|
@@ -539,8 +538,8 @@ class Task:
|
|
|
539
538
|
|
|
540
539
|
def mask_user_info(
|
|
541
540
|
self,
|
|
542
|
-
to_replace_for_user_id:
|
|
543
|
-
to_replace_for_username:
|
|
541
|
+
to_replace_for_user_id: dict[str, str] | None = None,
|
|
542
|
+
to_replace_for_username: dict[str, str] | None = None,
|
|
544
543
|
) -> Task:
|
|
545
544
|
"""
|
|
546
545
|
引数から渡された情報を元に、インスタンス変数`df`内のユーザー情報をマスクして、新しいインスタンスを返します。
|
|
@@ -3,7 +3,6 @@ from __future__ import annotations
|
|
|
3
3
|
import logging
|
|
4
4
|
from functools import partial
|
|
5
5
|
from pathlib import Path
|
|
6
|
-
from typing import Optional
|
|
7
6
|
|
|
8
7
|
import pandas
|
|
9
8
|
from annofabapi.models import TaskPhase
|
|
@@ -113,7 +112,7 @@ class TaskWorktimeByPhaseUser:
|
|
|
113
112
|
duplicated = df.duplicated(subset=["project_id", "task_id", "phase", "phase_stage", "account_id"])
|
|
114
113
|
return duplicated.any()
|
|
115
114
|
|
|
116
|
-
def __init__(self, df: pandas.DataFrame, *, custom_production_volume_list:
|
|
115
|
+
def __init__(self, df: pandas.DataFrame, *, custom_production_volume_list: list[ProductionVolumeColumn] | None = None) -> None:
|
|
117
116
|
self.custom_production_volume_list = custom_production_volume_list if custom_production_volume_list is not None else []
|
|
118
117
|
|
|
119
118
|
if self._duplicated_keys(df):
|
|
@@ -168,7 +167,7 @@ class TaskWorktimeByPhaseUser:
|
|
|
168
167
|
print_csv(self.df[self.columns], str(output_file))
|
|
169
168
|
|
|
170
169
|
@staticmethod
|
|
171
|
-
def merge(*obj: TaskWorktimeByPhaseUser, custom_production_volume_list:
|
|
170
|
+
def merge(*obj: TaskWorktimeByPhaseUser, custom_production_volume_list: list[ProductionVolumeColumn] | None = None) -> TaskWorktimeByPhaseUser:
|
|
172
171
|
"""
|
|
173
172
|
複数のインスタンスをマージします。
|
|
174
173
|
|
|
@@ -179,7 +178,7 @@ class TaskWorktimeByPhaseUser:
|
|
|
179
178
|
return TaskWorktimeByPhaseUser(df_merged, custom_production_volume_list=custom_production_volume_list)
|
|
180
179
|
|
|
181
180
|
@classmethod
|
|
182
|
-
def empty(cls, *, custom_production_volume_list:
|
|
181
|
+
def empty(cls, *, custom_production_volume_list: list[ProductionVolumeColumn] | None = None) -> TaskWorktimeByPhaseUser:
|
|
183
182
|
"""空のデータフレームを持つインスタンスを生成します。"""
|
|
184
183
|
|
|
185
184
|
df_dtype: dict[str, str] = {
|
|
@@ -216,17 +215,17 @@ class TaskWorktimeByPhaseUser:
|
|
|
216
215
|
return len(self.df) == 0
|
|
217
216
|
|
|
218
217
|
@classmethod
|
|
219
|
-
def from_csv(cls, csv_file: Path, *, custom_production_volume_list:
|
|
218
|
+
def from_csv(cls, csv_file: Path, *, custom_production_volume_list: list[ProductionVolumeColumn] | None = None) -> TaskWorktimeByPhaseUser:
|
|
220
219
|
df = pandas.read_csv(str(csv_file))
|
|
221
220
|
return cls(df, custom_production_volume_list=custom_production_volume_list)
|
|
222
221
|
|
|
223
222
|
def mask_user_info(
|
|
224
223
|
self,
|
|
225
224
|
*,
|
|
226
|
-
to_replace_for_user_id:
|
|
227
|
-
to_replace_for_username:
|
|
228
|
-
to_replace_for_account_id:
|
|
229
|
-
to_replace_for_biography:
|
|
225
|
+
to_replace_for_user_id: dict[str, str] | None = None,
|
|
226
|
+
to_replace_for_username: dict[str, str] | None = None,
|
|
227
|
+
to_replace_for_account_id: dict[str, str] | None = None,
|
|
228
|
+
to_replace_for_biography: dict[str, str] | None = None,
|
|
230
229
|
) -> TaskWorktimeByPhaseUser:
|
|
231
230
|
"""
|
|
232
231
|
引数から渡された情報を元に、インスタンス変数`df`内のユーザー情報をマスクして、新しいインスタンスを返します。
|
|
@@ -248,7 +247,7 @@ class TaskWorktimeByPhaseUser:
|
|
|
248
247
|
return TaskWorktimeByPhaseUser(df, custom_production_volume_list=self.custom_production_volume_list)
|
|
249
248
|
|
|
250
249
|
@staticmethod
|
|
251
|
-
def _create_annotation_count_ratio_df(task_history_df: pandas.DataFrame, task_df: pandas.DataFrame, *, custom_production_volume_columns:
|
|
250
|
+
def _create_annotation_count_ratio_df(task_history_df: pandas.DataFrame, task_df: pandas.DataFrame, *, custom_production_volume_columns: list[str] | None) -> pandas.DataFrame:
|
|
252
251
|
"""
|
|
253
252
|
task_id, phase, (phase_index), user_idの作業時間比から、アノテーション数などの生産量を求める
|
|
254
253
|
|
|
@@ -10,11 +10,9 @@ import math
|
|
|
10
10
|
from collections.abc import Sequence
|
|
11
11
|
from enum import Enum
|
|
12
12
|
from pathlib import Path
|
|
13
|
-
from typing import Any, Literal
|
|
13
|
+
from typing import Any, Literal
|
|
14
14
|
|
|
15
|
-
import bokeh
|
|
16
15
|
import bokeh.layouts
|
|
17
|
-
import bokeh.palettes
|
|
18
16
|
import numpy
|
|
19
17
|
import pandas
|
|
20
18
|
from annofabapi.models import TaskPhase
|
|
@@ -93,7 +91,7 @@ class UserPerformance:
|
|
|
93
91
|
df: pandas.DataFrame,
|
|
94
92
|
task_completion_criteria: TaskCompletionCriteria,
|
|
95
93
|
*,
|
|
96
|
-
custom_production_volume_list:
|
|
94
|
+
custom_production_volume_list: list[ProductionVolumeColumn] | None = None,
|
|
97
95
|
) -> None:
|
|
98
96
|
self.task_completion_criteria = task_completion_criteria
|
|
99
97
|
phase_list = self.get_phase_list(df.columns)
|
|
@@ -184,13 +182,13 @@ class UserPerformance:
|
|
|
184
182
|
csv_file: Path,
|
|
185
183
|
task_completion_criteria: TaskCompletionCriteria,
|
|
186
184
|
*,
|
|
187
|
-
custom_production_volume_list:
|
|
185
|
+
custom_production_volume_list: list[ProductionVolumeColumn] | None = None,
|
|
188
186
|
) -> UserPerformance:
|
|
189
187
|
df = read_multiheader_csv(str(csv_file))
|
|
190
188
|
return cls(df, task_completion_criteria, custom_production_volume_list=custom_production_volume_list)
|
|
191
189
|
|
|
192
190
|
@classmethod
|
|
193
|
-
def empty(cls, task_completion_criteria: TaskCompletionCriteria, *, custom_production_volume_list:
|
|
191
|
+
def empty(cls, task_completion_criteria: TaskCompletionCriteria, *, custom_production_volume_list: list[ProductionVolumeColumn] | None = None) -> UserPerformance:
|
|
194
192
|
"""空のデータフレームを持つインスタンスを生成します。"""
|
|
195
193
|
production_volume_columns = ["input_data_count", "annotation_count"]
|
|
196
194
|
if custom_production_volume_list is not None:
|
|
@@ -440,7 +438,7 @@ class UserPerformance:
|
|
|
440
438
|
("working_days", "acceptance")
|
|
441
439
|
"""
|
|
442
440
|
|
|
443
|
-
def _create_df_first_last_working_date(phase:
|
|
441
|
+
def _create_df_first_last_working_date(phase: str | None) -> pandas.DataFrame:
|
|
444
442
|
"""
|
|
445
443
|
指定したフェーズに対応する作業開始日、作業終了日、作業日数を算出する
|
|
446
444
|
|
|
@@ -730,7 +728,7 @@ class UserPerformance:
|
|
|
730
728
|
print_csv(self.df[self.columns], str(output_file))
|
|
731
729
|
|
|
732
730
|
@staticmethod
|
|
733
|
-
def _get_average_value(df: pandas.DataFrame, numerator_column: tuple[str, str], denominator_column: tuple[str, str]) ->
|
|
731
|
+
def _get_average_value(df: pandas.DataFrame, numerator_column: tuple[str, str], denominator_column: tuple[str, str]) -> float | None:
|
|
734
732
|
numerator = df[numerator_column].sum()
|
|
735
733
|
denominator = df[denominator_column].sum()
|
|
736
734
|
if denominator > 0:
|
|
@@ -831,7 +829,7 @@ class UserPerformance:
|
|
|
831
829
|
worktime_type: WorktimeType,
|
|
832
830
|
production_volume_column: str,
|
|
833
831
|
*,
|
|
834
|
-
metadata:
|
|
832
|
+
metadata: dict[str, Any] | None = None,
|
|
835
833
|
) -> None:
|
|
836
834
|
"""作業時間と生産性の関係をメンバごとにプロットする。
|
|
837
835
|
|
|
@@ -885,7 +883,7 @@ class UserPerformance:
|
|
|
885
883
|
df[(f"{worktime_type.value}_worktime_minute/{production_volume_column}", phase)] = df[(f"{worktime_type.value}_worktime_hour/{production_volume_column}", phase)] * 60
|
|
886
884
|
|
|
887
885
|
for biography_index, biography in enumerate(sorted(set(df["biography"]))):
|
|
888
|
-
for scatter_obj, phase in zip(scatter_obj_list, self.phase_list):
|
|
886
|
+
for scatter_obj, phase in zip(scatter_obj_list, self.phase_list, strict=False):
|
|
889
887
|
filtered_df = df[(df["biography"] == biography) & df[(x_column, phase)].notna() & df[(y_column, phase)].notna()]
|
|
890
888
|
if len(filtered_df) == 0:
|
|
891
889
|
continue
|
|
@@ -900,7 +898,7 @@ class UserPerformance:
|
|
|
900
898
|
color=get_color_from_palette(biography_index),
|
|
901
899
|
)
|
|
902
900
|
|
|
903
|
-
for scatter_obj, phase in zip(scatter_obj_list, self.phase_list):
|
|
901
|
+
for scatter_obj, phase in zip(scatter_obj_list, self.phase_list, strict=False):
|
|
904
902
|
average_hour = self._get_average_value(
|
|
905
903
|
df,
|
|
906
904
|
numerator_column=(f"{worktime_type.value}_worktime_hour", phase),
|
|
@@ -913,7 +911,7 @@ class UserPerformance:
|
|
|
913
911
|
quartile = self._get_quartile_value(df[(f"{worktime_type.value}_worktime_minute/{production_volume_column}", phase)])
|
|
914
912
|
scatter_obj.plot_quartile_line(quartile, dimension="width")
|
|
915
913
|
|
|
916
|
-
scatter_obj.add_multi_choice_widget_for_searching_user(list(zip(df[("user_id", "")], df[("username", "")])))
|
|
914
|
+
scatter_obj.add_multi_choice_widget_for_searching_user(list(zip(df[("user_id", "")], df[("username", "")], strict=False)))
|
|
917
915
|
scatter_obj.process_after_adding_glyphs()
|
|
918
916
|
|
|
919
917
|
div_element = self._create_div_element()
|
|
@@ -923,7 +921,7 @@ class UserPerformance:
|
|
|
923
921
|
|
|
924
922
|
write_bokeh_graph(bokeh.layouts.column(element_list), output_file)
|
|
925
923
|
|
|
926
|
-
def plot_quality(self, output_file: Path, *, metadata:
|
|
924
|
+
def plot_quality(self, output_file: Path, *, metadata: dict[str, Any] | None = None) -> None:
|
|
927
925
|
"""
|
|
928
926
|
メンバごとに品質を散布図でプロットする
|
|
929
927
|
|
|
@@ -981,7 +979,7 @@ class UserPerformance:
|
|
|
981
979
|
]
|
|
982
980
|
|
|
983
981
|
for biography_index, biography in enumerate(sorted(set(df["biography"]))):
|
|
984
|
-
for column_pair, scatter_obj in zip(column_pair_list, scatter_obj_list):
|
|
982
|
+
for column_pair, scatter_obj in zip(column_pair_list, scatter_obj_list, strict=False):
|
|
985
983
|
x_column = column_pair[0]
|
|
986
984
|
y_column = column_pair[1]
|
|
987
985
|
filtered_df = df[(df["biography"] == biography) & df[(x_column, PHASE)].notna() & df[(y_column, PHASE)].notna()]
|
|
@@ -1002,6 +1000,7 @@ class UserPerformance:
|
|
|
1002
1000
|
for column_pair, scatter_obj in zip(
|
|
1003
1001
|
[("rejected_count", "task_count"), ("pointed_out_inspection_comment_count", "annotation_count")],
|
|
1004
1002
|
scatter_obj_list,
|
|
1003
|
+
strict=False,
|
|
1005
1004
|
):
|
|
1006
1005
|
average_value = self._get_average_value(df, numerator_column=(column_pair[0], PHASE), denominator_column=(column_pair[1], PHASE))
|
|
1007
1006
|
if average_value is not None:
|
|
@@ -1010,12 +1009,13 @@ class UserPerformance:
|
|
|
1010
1009
|
for column, scatter_obj in zip(
|
|
1011
1010
|
["rejected_count/task_count", "pointed_out_inspection_comment_count/annotation_count"],
|
|
1012
1011
|
scatter_obj_list,
|
|
1012
|
+
strict=False,
|
|
1013
1013
|
):
|
|
1014
1014
|
quartile = self._get_quartile_value(df[(column, PHASE)])
|
|
1015
1015
|
scatter_obj.plot_quartile_line(quartile, dimension="width")
|
|
1016
1016
|
|
|
1017
1017
|
for scatter_obj in scatter_obj_list:
|
|
1018
|
-
scatter_obj.add_multi_choice_widget_for_searching_user(list(zip(df[("user_id", "")], df[("username", "")])))
|
|
1018
|
+
scatter_obj.add_multi_choice_widget_for_searching_user(list(zip(df[("user_id", "")], df[("username", "")], strict=False)))
|
|
1019
1019
|
scatter_obj.process_after_adding_glyphs()
|
|
1020
1020
|
|
|
1021
1021
|
div_element = self._create_div_element()
|
|
@@ -1025,7 +1025,7 @@ class UserPerformance:
|
|
|
1025
1025
|
|
|
1026
1026
|
write_bokeh_graph(bokeh.layouts.column(element_list), output_file)
|
|
1027
1027
|
|
|
1028
|
-
def plot_quality_and_productivity(self, output_file: Path, worktime_type: WorktimeType, production_volume_column: str, *, metadata:
|
|
1028
|
+
def plot_quality_and_productivity(self, output_file: Path, worktime_type: WorktimeType, production_volume_column: str, *, metadata: dict[str, Any] | None = None) -> None:
|
|
1029
1029
|
"""
|
|
1030
1030
|
作業時間を元に算出した生産性と品質の関係を、メンバごとにプロットする
|
|
1031
1031
|
"""
|
|
@@ -1079,6 +1079,7 @@ class UserPerformance:
|
|
|
1079
1079
|
for column_pair, scatter_obj in zip(
|
|
1080
1080
|
[("rejected_count", "task_count"), ("pointed_out_inspection_comment_count", production_volume_column)],
|
|
1081
1081
|
scatter_obj_list,
|
|
1082
|
+
strict=False,
|
|
1082
1083
|
):
|
|
1083
1084
|
if x_average_minute is not None:
|
|
1084
1085
|
scatter_obj.plot_average_line(x_average_minute, dimension="height")
|
|
@@ -1095,6 +1096,7 @@ class UserPerformance:
|
|
|
1095
1096
|
for column, scatter_obj in zip(
|
|
1096
1097
|
["rejected_count/task_count", f"pointed_out_inspection_comment_count/{production_volume_column}"],
|
|
1097
1098
|
scatter_obj_list,
|
|
1099
|
+
strict=False,
|
|
1098
1100
|
):
|
|
1099
1101
|
scatter_obj.plot_quartile_line(x_quartile, dimension="height")
|
|
1100
1102
|
y_quartile = self._get_quartile_value(df[(column, PHASE)])
|
|
@@ -1133,7 +1135,7 @@ class UserPerformance:
|
|
|
1133
1135
|
]
|
|
1134
1136
|
|
|
1135
1137
|
for biography_index, biography in enumerate(sorted(set(df["biography"]))):
|
|
1136
|
-
for scatter_obj, column_pair in zip(scatter_obj_list, column_pair_list):
|
|
1138
|
+
for scatter_obj, column_pair in zip(scatter_obj_list, column_pair_list, strict=False):
|
|
1137
1139
|
x_column, y_column = column_pair
|
|
1138
1140
|
filtered_df = df[(df["biography"] == biography) & df[(x_column, PHASE)].notna() & df[(y_column, PHASE)].notna()]
|
|
1139
1141
|
if len(filtered_df) == 0:
|
|
@@ -1155,11 +1157,11 @@ class UserPerformance:
|
|
|
1155
1157
|
plot_average_and_quartile_line()
|
|
1156
1158
|
|
|
1157
1159
|
for scatter_obj in scatter_obj_list:
|
|
1158
|
-
scatter_obj.add_multi_choice_widget_for_searching_user(list(zip(df[("user_id", "")], df[("username", "")])))
|
|
1160
|
+
scatter_obj.add_multi_choice_widget_for_searching_user(list(zip(df[("user_id", "")], df[("username", "")], strict=False)))
|
|
1159
1161
|
scatter_obj.process_after_adding_glyphs()
|
|
1160
1162
|
|
|
1161
1163
|
div_element = self._create_div_element()
|
|
1162
|
-
div_element.text = div_element.text + """円の大きさ:作業時間<br>"""
|
|
1164
|
+
div_element.text = div_element.text + """円の大きさ:作業時間<br>"""
|
|
1163
1165
|
|
|
1164
1166
|
element_list = [div_element, *[e.layout for e in scatter_obj_list]]
|
|
1165
1167
|
if metadata is not None:
|
|
@@ -7,7 +7,6 @@ from __future__ import annotations
|
|
|
7
7
|
import logging
|
|
8
8
|
from collections.abc import Sequence
|
|
9
9
|
from pathlib import Path
|
|
10
|
-
from typing import Optional
|
|
11
10
|
|
|
12
11
|
import numpy
|
|
13
12
|
import pandas
|
|
@@ -37,7 +36,7 @@ class WholePerformance:
|
|
|
37
36
|
series: pandas.Series,
|
|
38
37
|
task_completion_criteria: TaskCompletionCriteria,
|
|
39
38
|
*,
|
|
40
|
-
custom_production_volume_list:
|
|
39
|
+
custom_production_volume_list: list[ProductionVolumeColumn] | None = None,
|
|
41
40
|
) -> None:
|
|
42
41
|
self.series = series
|
|
43
42
|
self.task_completion_criteria = task_completion_criteria
|
|
@@ -138,7 +137,7 @@ class WholePerformance:
|
|
|
138
137
|
return cls(df_all.iloc[0], task_completion_criteria, custom_production_volume_list=task_worktime_by_phase_user.custom_production_volume_list)
|
|
139
138
|
|
|
140
139
|
@classmethod
|
|
141
|
-
def empty(cls, task_completion_criteria: TaskCompletionCriteria, *, custom_production_volume_list:
|
|
140
|
+
def empty(cls, task_completion_criteria: TaskCompletionCriteria, *, custom_production_volume_list: list[ProductionVolumeColumn] | None = None) -> WholePerformance:
|
|
142
141
|
"""空のデータフレームを持つインスタンスを生成します。"""
|
|
143
142
|
|
|
144
143
|
production_volume_columns = ["input_data_count", "annotation_count"]
|
|
@@ -202,7 +201,7 @@ class WholePerformance:
|
|
|
202
201
|
csv_file: Path,
|
|
203
202
|
task_completion_criteria: TaskCompletionCriteria,
|
|
204
203
|
*,
|
|
205
|
-
custom_production_volume_list:
|
|
204
|
+
custom_production_volume_list: list[ProductionVolumeColumn] | None = None,
|
|
206
205
|
) -> WholePerformance:
|
|
207
206
|
"""CSVファイルからインスタンスを生成します。"""
|
|
208
207
|
df = pandas.read_csv(str(csv_file), header=None, index_col=[0, 1])
|
|
@@ -8,11 +8,9 @@ from __future__ import annotations
|
|
|
8
8
|
import logging
|
|
9
9
|
from dataclasses import dataclass
|
|
10
10
|
from pathlib import Path
|
|
11
|
-
from typing import Any
|
|
11
|
+
from typing import Any
|
|
12
12
|
|
|
13
|
-
import bokeh
|
|
14
13
|
import bokeh.layouts
|
|
15
|
-
import bokeh.palettes
|
|
16
14
|
import pandas
|
|
17
15
|
from annofabapi.models import TaskPhase, TaskStatus
|
|
18
16
|
from bokeh.models import DataRange1d
|
|
@@ -99,7 +97,7 @@ class WholeProductivityPerCompletedDate:
|
|
|
99
97
|
df: pandas.DataFrame,
|
|
100
98
|
task_completion_criteria: TaskCompletionCriteria,
|
|
101
99
|
*,
|
|
102
|
-
custom_production_volume_list:
|
|
100
|
+
custom_production_volume_list: list[ProductionVolumeColumn] | None = None,
|
|
103
101
|
) -> None:
|
|
104
102
|
self.df = df
|
|
105
103
|
self.task_completion_criteria = task_completion_criteria
|
|
@@ -117,7 +115,7 @@ class WholeProductivityPerCompletedDate:
|
|
|
117
115
|
csv_file: Path,
|
|
118
116
|
task_completion_criteria: TaskCompletionCriteria,
|
|
119
117
|
*,
|
|
120
|
-
custom_production_volume_list:
|
|
118
|
+
custom_production_volume_list: list[ProductionVolumeColumn] | None = None,
|
|
121
119
|
) -> WholeProductivityPerCompletedDate:
|
|
122
120
|
"""CSVファイルからインスタンスを生成します。"""
|
|
123
121
|
df = pandas.read_csv(str(csv_file))
|
|
@@ -298,7 +296,7 @@ class WholeProductivityPerCompletedDate:
|
|
|
298
296
|
self,
|
|
299
297
|
output_file: Path,
|
|
300
298
|
*,
|
|
301
|
-
metadata:
|
|
299
|
+
metadata: dict[str, Any] | None = None,
|
|
302
300
|
) -> None:
|
|
303
301
|
"""
|
|
304
302
|
全体の生産量や生産性をプロットする
|
|
@@ -537,7 +535,7 @@ class WholeProductivityPerCompletedDate:
|
|
|
537
535
|
|
|
538
536
|
write_bokeh_graph(bokeh.layouts.column(element_list), output_file)
|
|
539
537
|
|
|
540
|
-
def plot_cumulatively(self, output_file: Path, *, metadata:
|
|
538
|
+
def plot_cumulatively(self, output_file: Path, *, metadata: dict[str, Any] | None = None) -> None:
|
|
541
539
|
"""
|
|
542
540
|
全体の生産量や作業時間の累積折れ線グラフを出力する
|
|
543
541
|
"""
|
|
@@ -756,7 +754,7 @@ class WholeProductivityPerCompletedDate:
|
|
|
756
754
|
write_bokeh_graph(bokeh.layouts.column(element_list), output_file)
|
|
757
755
|
|
|
758
756
|
@classmethod
|
|
759
|
-
def empty(cls, *, task_completion_criteria: TaskCompletionCriteria, custom_production_volume_list:
|
|
757
|
+
def empty(cls, *, task_completion_criteria: TaskCompletionCriteria, custom_production_volume_list: list[ProductionVolumeColumn] | None = None) -> WholeProductivityPerCompletedDate:
|
|
760
758
|
df = pandas.DataFrame(columns=cls.get_columns(custom_production_volume_list=custom_production_volume_list))
|
|
761
759
|
return cls(df, task_completion_criteria, custom_production_volume_list=custom_production_volume_list)
|
|
762
760
|
|
|
@@ -765,7 +763,7 @@ class WholeProductivityPerCompletedDate:
|
|
|
765
763
|
return self.get_columns(custom_production_volume_list=self.custom_production_volume_list)
|
|
766
764
|
|
|
767
765
|
@staticmethod
|
|
768
|
-
def get_columns(custom_production_volume_list:
|
|
766
|
+
def get_columns(custom_production_volume_list: list[ProductionVolumeColumn] | None = None) -> list[str]:
|
|
769
767
|
production_volume_columns = ["input_data_count", "annotation_count"]
|
|
770
768
|
if custom_production_volume_list is not None:
|
|
771
769
|
production_volume_columns.extend([e.value for e in custom_production_volume_list])
|
|
@@ -833,7 +831,7 @@ class WholeProductivityPerFirstAnnotationStartedDate:
|
|
|
833
831
|
df: pandas.DataFrame,
|
|
834
832
|
task_completion_criteria: TaskCompletionCriteria,
|
|
835
833
|
*,
|
|
836
|
-
custom_production_volume_list:
|
|
834
|
+
custom_production_volume_list: list[ProductionVolumeColumn] | None = None,
|
|
837
835
|
) -> None:
|
|
838
836
|
self.task_completion_criteria = task_completion_criteria
|
|
839
837
|
self.custom_production_volume_list = custom_production_volume_list if custom_production_volume_list is not None else []
|
|
@@ -845,7 +843,7 @@ class WholeProductivityPerFirstAnnotationStartedDate:
|
|
|
845
843
|
csv_file: Path,
|
|
846
844
|
task_completion_criteria: TaskCompletionCriteria,
|
|
847
845
|
*,
|
|
848
|
-
custom_production_volume_list:
|
|
846
|
+
custom_production_volume_list: list[ProductionVolumeColumn] | None = None,
|
|
849
847
|
) -> WholeProductivityPerFirstAnnotationStartedDate:
|
|
850
848
|
"""CSVファイルからインスタンスを生成します。"""
|
|
851
849
|
df = pandas.read_csv(str(csv_file))
|
|
@@ -945,12 +943,12 @@ class WholeProductivityPerFirstAnnotationStartedDate:
|
|
|
945
943
|
return True
|
|
946
944
|
|
|
947
945
|
@classmethod
|
|
948
|
-
def empty(cls, task_completion_criteria: TaskCompletionCriteria, *, custom_production_volume_list:
|
|
946
|
+
def empty(cls, task_completion_criteria: TaskCompletionCriteria, *, custom_production_volume_list: list[ProductionVolumeColumn] | None = None) -> WholeProductivityPerFirstAnnotationStartedDate:
|
|
949
947
|
df = pandas.DataFrame(columns=cls.get_columns(custom_production_volume_list=custom_production_volume_list))
|
|
950
948
|
return cls(df, task_completion_criteria, custom_production_volume_list=custom_production_volume_list)
|
|
951
949
|
|
|
952
950
|
@staticmethod
|
|
953
|
-
def get_columns(*, custom_production_volume_list:
|
|
951
|
+
def get_columns(*, custom_production_volume_list: list[ProductionVolumeColumn] | None = None) -> list[str]:
|
|
954
952
|
production_volume_columns = ["input_data_count", "annotation_count"]
|
|
955
953
|
if custom_production_volume_list is not None:
|
|
956
954
|
production_volume_columns.extend([e.value for e in custom_production_volume_list])
|
|
@@ -989,7 +987,7 @@ class WholeProductivityPerFirstAnnotationStartedDate:
|
|
|
989
987
|
|
|
990
988
|
print_csv(self.df[self.columns], str(output_file))
|
|
991
989
|
|
|
992
|
-
def plot(self, output_file: Path, *, metadata:
|
|
990
|
+
def plot(self, output_file: Path, *, metadata: dict[str, Any] | None = None) -> None: # noqa: PLR0915
|
|
993
991
|
"""
|
|
994
992
|
全体の生産量や生産性をプロットする
|
|
995
993
|
"""
|