annofabcli 1.102.1__py3-none-any.whl → 1.104.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- annofabcli/__main__.py +1 -1
- annofabcli/annotation/annotation_query.py +9 -29
- annofabcli/annotation/change_annotation_attributes.py +6 -14
- annofabcli/annotation/change_annotation_properties.py +5 -12
- annofabcli/annotation/copy_annotation.py +4 -10
- annofabcli/annotation/delete_annotation.py +10 -26
- annofabcli/annotation/dump_annotation.py +1 -4
- annofabcli/annotation/import_annotation.py +15 -39
- annofabcli/annotation/list_annotation.py +1 -4
- annofabcli/annotation/merge_segmentation.py +5 -15
- annofabcli/annotation/remove_segmentation_overlap.py +8 -29
- annofabcli/annotation/restore_annotation.py +3 -9
- annofabcli/annotation_specs/add_attribute_restriction.py +2 -8
- annofabcli/annotation_specs/attribute_restriction.py +2 -10
- annofabcli/annotation_specs/export_annotation_specs.py +1 -3
- annofabcli/annotation_specs/get_annotation_specs_with_attribute_id_replaced.py +3 -10
- annofabcli/annotation_specs/get_annotation_specs_with_choice_id_replaced.py +4 -10
- annofabcli/annotation_specs/get_annotation_specs_with_label_id_replaced.py +1 -3
- annofabcli/annotation_specs/list_annotation_specs_attribute.py +7 -18
- annofabcli/annotation_specs/list_annotation_specs_choice.py +3 -8
- annofabcli/annotation_specs/list_annotation_specs_history.py +0 -1
- annofabcli/annotation_specs/list_annotation_specs_label.py +3 -8
- annofabcli/annotation_specs/list_annotation_specs_label_attribute.py +4 -9
- annofabcli/annotation_specs/list_attribute_restriction.py +3 -9
- annofabcli/annotation_specs/put_label_color.py +1 -6
- annofabcli/comment/delete_comment.py +3 -9
- annofabcli/comment/list_all_comment.py +15 -5
- annofabcli/comment/list_comment.py +46 -7
- annofabcli/comment/put_comment.py +4 -13
- annofabcli/comment/put_comment_simply.py +2 -6
- annofabcli/comment/put_inspection_comment.py +2 -6
- annofabcli/comment/put_inspection_comment_simply.py +3 -6
- annofabcli/comment/put_onhold_comment.py +2 -6
- annofabcli/comment/put_onhold_comment_simply.py +2 -4
- annofabcli/common/cli.py +5 -43
- annofabcli/common/download.py +8 -25
- annofabcli/common/image.py +3 -7
- annofabcli/common/utils.py +2 -4
- annofabcli/common/visualize.py +2 -4
- annofabcli/filesystem/draw_annotation.py +6 -18
- annofabcli/filesystem/filter_annotation.py +7 -24
- annofabcli/filesystem/mask_user_info.py +2 -5
- annofabcli/filesystem/merge_annotation.py +2 -6
- annofabcli/input_data/change_input_data_name.py +3 -7
- annofabcli/input_data/copy_input_data.py +6 -14
- annofabcli/input_data/delete_input_data.py +7 -24
- annofabcli/input_data/delete_metadata_key_of_input_data.py +5 -16
- annofabcli/input_data/list_all_input_data.py +5 -14
- annofabcli/input_data/list_all_input_data_merged_task.py +8 -23
- annofabcli/input_data/list_input_data.py +5 -16
- annofabcli/input_data/put_input_data.py +7 -19
- annofabcli/input_data/update_metadata_of_input_data.py +6 -14
- annofabcli/instruction/list_instruction_history.py +0 -1
- annofabcli/instruction/upload_instruction.py +4 -7
- annofabcli/job/list_job.py +2 -3
- annofabcli/job/list_last_job.py +1 -3
- annofabcli/organization/list_organization.py +0 -1
- annofabcli/organization_member/change_organization_member.py +1 -3
- annofabcli/organization_member/delete_organization_member.py +2 -6
- annofabcli/organization_member/invite_organization_member.py +1 -3
- annofabcli/organization_member/list_organization_member.py +0 -1
- annofabcli/project/change_organization_of_project.py +257 -0
- annofabcli/project/change_project_status.py +2 -2
- annofabcli/project/copy_project.py +2 -7
- annofabcli/project/diff_projects.py +4 -16
- annofabcli/project/list_project.py +0 -1
- annofabcli/project/put_project.py +2 -6
- annofabcli/project/subcommand_project.py +2 -0
- annofabcli/project_member/change_project_members.py +1 -1
- annofabcli/project_member/copy_project_members.py +2 -7
- annofabcli/project_member/drop_project_members.py +1 -3
- annofabcli/project_member/invite_project_members.py +2 -4
- annofabcli/project_member/list_users.py +0 -1
- annofabcli/project_member/put_project_members.py +4 -12
- annofabcli/stat_visualization/mask_visualization_dir.py +6 -16
- annofabcli/stat_visualization/merge_visualization_dir.py +7 -19
- annofabcli/stat_visualization/summarize_whole_performance_csv.py +3 -7
- annofabcli/stat_visualization/write_graph.py +5 -15
- annofabcli/stat_visualization/write_performance_rating_csv.py +4 -12
- annofabcli/statistics/list_annotation_area.py +3 -7
- annofabcli/statistics/list_annotation_attribute.py +6 -15
- annofabcli/statistics/list_annotation_attribute_filled_count.py +9 -23
- annofabcli/statistics/list_annotation_count.py +18 -44
- annofabcli/statistics/list_annotation_duration.py +14 -40
- annofabcli/statistics/list_video_duration.py +2 -3
- annofabcli/statistics/list_worktime.py +0 -1
- annofabcli/statistics/scatter.py +3 -9
- annofabcli/statistics/summarize_task_count.py +7 -12
- annofabcli/statistics/summarize_task_count_by_task_id_group.py +3 -11
- annofabcli/statistics/summarize_task_count_by_user.py +1 -5
- annofabcli/statistics/visualization/dataframe/annotation_count.py +2 -4
- annofabcli/statistics/visualization/dataframe/cumulative_productivity.py +6 -12
- annofabcli/statistics/visualization/dataframe/productivity_per_date.py +10 -22
- annofabcli/statistics/visualization/dataframe/project_performance.py +1 -3
- annofabcli/statistics/visualization/dataframe/task.py +2 -5
- annofabcli/statistics/visualization/dataframe/task_history.py +1 -1
- annofabcli/statistics/visualization/dataframe/task_worktime_by_phase_user.py +6 -20
- annofabcli/statistics/visualization/dataframe/user_performance.py +29 -88
- annofabcli/statistics/visualization/dataframe/whole_performance.py +6 -12
- annofabcli/statistics/visualization/dataframe/whole_productivity_per_date.py +17 -49
- annofabcli/statistics/visualization/dataframe/worktime_per_date.py +4 -10
- annofabcli/statistics/visualization/filtering_query.py +2 -6
- annofabcli/statistics/visualization/project_dir.py +9 -26
- annofabcli/statistics/visualization/visualization_source_files.py +3 -10
- annofabcli/statistics/visualize_annotation_count.py +9 -23
- annofabcli/statistics/visualize_annotation_duration.py +5 -15
- annofabcli/statistics/visualize_statistics.py +18 -53
- annofabcli/statistics/visualize_video_duration.py +8 -19
- annofabcli/supplementary/delete_supplementary_data.py +7 -23
- annofabcli/supplementary/list_supplementary_data.py +1 -1
- annofabcli/supplementary/put_supplementary_data.py +5 -15
- annofabcli/task/cancel_acceptance.py +3 -4
- annofabcli/task/change_operator.py +3 -11
- annofabcli/task/change_status_to_break.py +1 -1
- annofabcli/task/change_status_to_on_hold.py +5 -18
- annofabcli/task/complete_tasks.py +8 -25
- annofabcli/task/copy_tasks.py +2 -3
- annofabcli/task/delete_metadata_key_of_task.py +2 -6
- annofabcli/task/delete_tasks.py +8 -26
- annofabcli/task/list_all_tasks.py +2 -4
- annofabcli/task/list_tasks.py +3 -7
- annofabcli/task/list_tasks_added_task_history.py +7 -21
- annofabcli/task/put_tasks.py +2 -3
- annofabcli/task/put_tasks_by_count.py +3 -7
- annofabcli/task/reject_tasks.py +7 -19
- annofabcli/task/update_metadata_of_task.py +2 -2
- annofabcli/task_history/list_all_task_history.py +2 -5
- annofabcli/task_history/list_task_history.py +0 -1
- annofabcli/task_history_event/list_all_task_history_event.py +4 -11
- annofabcli/task_history_event/list_worktime.py +4 -14
- {annofabcli-1.102.1.dist-info → annofabcli-1.104.0.dist-info}/METADATA +1 -1
- annofabcli-1.104.0.dist-info/RECORD +215 -0
- annofabcli-1.102.1.dist-info/RECORD +0 -214
- {annofabcli-1.102.1.dist-info → annofabcli-1.104.0.dist-info}/WHEEL +0 -0
- {annofabcli-1.102.1.dist-info → annofabcli-1.104.0.dist-info}/entry_points.txt +0 -0
- {annofabcli-1.102.1.dist-info → annofabcli-1.104.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -41,9 +41,7 @@ def create_df_productivity_per_date(task_worktime_by_phase_user: TaskWorktimeByP
|
|
|
41
41
|
df = df[df["phase"] == str_phase]
|
|
42
42
|
df = df.rename(columns={"pointed_out_inspection_comment_count": "inspection_comment_count", "worktime_hour": f"{str_phase}_worktime_hour"})
|
|
43
43
|
|
|
44
|
-
df[f"first_{str_phase}_started_date"] = df["started_datetime"].map(
|
|
45
|
-
lambda e: datetime_to_date(e) if e is not None and isinstance(e, str) else None
|
|
46
|
-
)
|
|
44
|
+
df[f"first_{str_phase}_started_date"] = df["started_datetime"].map(lambda e: datetime_to_date(e) if e is not None and isinstance(e, str) else None)
|
|
47
45
|
|
|
48
46
|
# first_annotation_user_id と first_annotation_usernameの両方を指定している理由:
|
|
49
47
|
# first_annotation_username を取得するため
|
|
@@ -82,9 +80,7 @@ class AbstractPhaseProductivityPerDate(abc.ABC):
|
|
|
82
80
|
PLOT_WIDTH = 1200
|
|
83
81
|
PLOT_HEIGHT = 600
|
|
84
82
|
|
|
85
|
-
def __init__(
|
|
86
|
-
self, df: pandas.DataFrame, phase: TaskPhase, *, custom_production_volume_list: Optional[list[ProductionVolumeColumn]] = None
|
|
87
|
-
) -> None:
|
|
83
|
+
def __init__(self, df: pandas.DataFrame, phase: TaskPhase, *, custom_production_volume_list: Optional[list[ProductionVolumeColumn]] = None) -> None:
|
|
88
84
|
self.df = df
|
|
89
85
|
self.phase = phase
|
|
90
86
|
self.custom_production_volume_list = custom_production_volume_list if custom_production_volume_list is not None else []
|
|
@@ -209,9 +205,7 @@ class AbstractPhaseProductivityPerDate(abc.ABC):
|
|
|
209
205
|
*self.production_volume_columns,
|
|
210
206
|
]
|
|
211
207
|
|
|
212
|
-
velocity_columns = [
|
|
213
|
-
f"{numerator}/{denominator}" for numerator in [f"{str_phase}_worktime_hour"] for denominator in self.production_volume_columns
|
|
214
|
-
]
|
|
208
|
+
velocity_columns = [f"{numerator}/{denominator}" for numerator in [f"{str_phase}_worktime_hour"] for denominator in self.production_volume_columns]
|
|
215
209
|
|
|
216
210
|
columns = production_columns + velocity_columns
|
|
217
211
|
|
|
@@ -259,7 +253,7 @@ class AnnotatorProductivityPerDate(AbstractPhaseProductivityPerDate):
|
|
|
259
253
|
|
|
260
254
|
df = self.df.copy()
|
|
261
255
|
|
|
262
|
-
if target_user_id_list is not None:
|
|
256
|
+
if target_user_id_list is not None:
|
|
263
257
|
user_id_list = target_user_id_list
|
|
264
258
|
else:
|
|
265
259
|
user_id_list = df.sort_values(by="user_id")["user_id"].dropna().unique().tolist()
|
|
@@ -337,15 +331,13 @@ class AnnotatorProductivityPerDate(AbstractPhaseProductivityPerDate):
|
|
|
337
331
|
continue
|
|
338
332
|
|
|
339
333
|
df_subset = self._get_df_sequential_date(df_subset)
|
|
340
|
-
df_subset[f"annotation_worktime_minute/{production_volume_column}"] =
|
|
341
|
-
df_subset["annotation_worktime_hour"] * 60 / df_subset[production_volume_column]
|
|
342
|
-
)
|
|
334
|
+
df_subset[f"annotation_worktime_minute/{production_volume_column}"] = df_subset["annotation_worktime_hour"] * 60 / df_subset[production_volume_column]
|
|
343
335
|
df_subset[f"annotation_worktime_minute/{production_volume_column}{WEEKLY_MOVING_AVERAGE_COLUMN_SUFFIX}"] = (
|
|
344
336
|
get_weekly_sum(df_subset["annotation_worktime_hour"]) * 60 / get_weekly_sum(df_subset[production_volume_column])
|
|
345
337
|
)
|
|
346
|
-
df_subset[f"inspection_comment_count/{production_volume_column}{WEEKLY_MOVING_AVERAGE_COLUMN_SUFFIX}"] = get_weekly_sum(
|
|
347
|
-
df_subset[
|
|
348
|
-
)
|
|
338
|
+
df_subset[f"inspection_comment_count/{production_volume_column}{WEEKLY_MOVING_AVERAGE_COLUMN_SUFFIX}"] = get_weekly_sum(df_subset["inspection_comment_count"]) / get_weekly_sum(
|
|
339
|
+
df_subset[production_volume_column]
|
|
340
|
+
)
|
|
349
341
|
|
|
350
342
|
source = ColumnDataSource(data=df_subset)
|
|
351
343
|
color = get_color_from_palette(user_index)
|
|
@@ -475,9 +467,7 @@ class InspectorProductivityPerDate(AbstractPhaseProductivityPerDate):
|
|
|
475
467
|
continue
|
|
476
468
|
|
|
477
469
|
df_subset = self._get_df_sequential_date(df_subset)
|
|
478
|
-
df_subset[f"inspection_worktime_minute/{production_volume_column}"] =
|
|
479
|
-
df_subset["inspection_worktime_hour"] * 60 / df_subset[production_volume_column]
|
|
480
|
-
)
|
|
470
|
+
df_subset[f"inspection_worktime_minute/{production_volume_column}"] = df_subset["inspection_worktime_hour"] * 60 / df_subset[production_volume_column]
|
|
481
471
|
df_subset[f"inspection_worktime_minute/{production_volume_column}{WEEKLY_MOVING_AVERAGE_COLUMN_SUFFIX}"] = (
|
|
482
472
|
get_weekly_sum(df_subset["inspection_worktime_hour"]) * 60 / get_weekly_sum(df_subset[production_volume_column])
|
|
483
473
|
)
|
|
@@ -616,9 +606,7 @@ class AcceptorProductivityPerDate(AbstractPhaseProductivityPerDate):
|
|
|
616
606
|
continue
|
|
617
607
|
|
|
618
608
|
df_subset = self._get_df_sequential_date(df_subset)
|
|
619
|
-
df_subset[f"acceptance_worktime_minute/{production_volume_column}"] =
|
|
620
|
-
df_subset["acceptance_worktime_hour"] * 60 / df_subset[production_volume_column]
|
|
621
|
-
)
|
|
609
|
+
df_subset[f"acceptance_worktime_minute/{production_volume_column}"] = df_subset["acceptance_worktime_hour"] * 60 / df_subset[production_volume_column]
|
|
622
610
|
|
|
623
611
|
df_subset[f"acceptance_worktime_minute/{production_volume_column}{WEEKLY_MOVING_AVERAGE_COLUMN_SUFFIX}"] = (
|
|
624
612
|
get_weekly_sum(df_subset["acceptance_worktime_hour"]) * 60 / get_weekly_sum(df_subset[production_volume_column])
|
|
@@ -89,9 +89,7 @@ class ProjectPerformance:
|
|
|
89
89
|
return [e.value for e in TaskPhase if e.value in tmp_set]
|
|
90
90
|
|
|
91
91
|
@classmethod
|
|
92
|
-
def from_project_dirs(
|
|
93
|
-
cls, project_dir_list: list[ProjectDir], *, custom_production_volume_list: Optional[list[ProductionVolumeColumn]] = None
|
|
94
|
-
) -> ProjectPerformance:
|
|
92
|
+
def from_project_dirs(cls, project_dir_list: list[ProjectDir], *, custom_production_volume_list: Optional[list[ProductionVolumeColumn]] = None) -> ProjectPerformance:
|
|
95
93
|
row_list: list[pandas.Series] = [cls._get_series_from_project_dir(project_dir) for project_dir in project_dir_list]
|
|
96
94
|
return cls(pandas.DataFrame(row_list), custom_production_volume_list=custom_production_volume_list)
|
|
97
95
|
|
|
@@ -71,10 +71,7 @@ class Task:
|
|
|
71
71
|
logger.warning("引数`df`に重複したキー(project_id, task_id)が含まれています。")
|
|
72
72
|
|
|
73
73
|
if not self.required_columns_exist(df):
|
|
74
|
-
raise ValueError(
|
|
75
|
-
f"引数'df'の'columns'に次の列が存在していません。 {self.missing_required_columns(df)} :: "
|
|
76
|
-
f"次の列が必須です。{self.required_columns} の列が必要です。"
|
|
77
|
-
)
|
|
74
|
+
raise ValueError(f"引数'df'の'columns'に次の列が存在していません。 {self.missing_required_columns(df)} :: 次の列が必須です。{self.required_columns} の列が必要です。")
|
|
78
75
|
|
|
79
76
|
self.df = df
|
|
80
77
|
|
|
@@ -409,7 +406,7 @@ class Task:
|
|
|
409
406
|
|
|
410
407
|
# タイムゾーンを指定している理由::
|
|
411
408
|
# すべてがNaNのseriesをdatetimeに変換すると、型にタイムゾーンが指定されない。
|
|
412
|
-
# その状態で加算すると、`TypeError: DatetimeArray subtraction must have the same timezones or no timezones`というエラーが発生するため
|
|
409
|
+
# その状態で加算すると、`TypeError: DatetimeArray subtraction must have the same timezones or no timezones`というエラーが発生するため
|
|
413
410
|
if not isinstance(dt1.dtype, pandas.DatetimeTZDtype):
|
|
414
411
|
dt1 = dt1.dt.tz_localize(pytz.FixedOffset(540))
|
|
415
412
|
if not isinstance(dt2.dtype, pandas.DatetimeTZDtype):
|
|
@@ -67,7 +67,7 @@ class TaskHistory:
|
|
|
67
67
|
new_task_history["worktime_hour"] = isoduration_to_hour(task_history["accumulated_labor_time_milliseconds"])
|
|
68
68
|
all_task_history_list.append(new_task_history)
|
|
69
69
|
|
|
70
|
-
if len(all_task_history_list) > 0:
|
|
70
|
+
if len(all_task_history_list) > 0:
|
|
71
71
|
df = pandas.DataFrame(all_task_history_list)
|
|
72
72
|
else:
|
|
73
73
|
df = cls.empty()
|
|
@@ -107,9 +107,7 @@ class TaskWorktimeByPhaseUser:
|
|
|
107
107
|
logger.warning("引数`df`に重複したキー(project_id, task_id, phase, phase_stage, account_id)が含まれています。")
|
|
108
108
|
|
|
109
109
|
if not self.required_columns_exist(df):
|
|
110
|
-
raise ValueError(
|
|
111
|
-
f"引数'df'の'columns'に次の列が存在していません。 {self.missing_columns(df)} :: 次の列が必須です。{self.columns}の列が必要です。"
|
|
112
|
-
)
|
|
110
|
+
raise ValueError(f"引数'df'の'columns'に次の列が存在していません。 {self.missing_columns(df)} :: 次の列が必須です。{self.columns}の列が必要です。")
|
|
113
111
|
|
|
114
112
|
self.df = df
|
|
115
113
|
|
|
@@ -141,9 +139,7 @@ class TaskWorktimeByPhaseUser:
|
|
|
141
139
|
project_id
|
|
142
140
|
"""
|
|
143
141
|
df_task = task.df
|
|
144
|
-
df_worktime_ratio = cls._create_annotation_count_ratio_df(
|
|
145
|
-
task_history.df, task.df, custom_production_volume_columns=[e.value for e in task.custom_production_volume_list]
|
|
146
|
-
)
|
|
142
|
+
df_worktime_ratio = cls._create_annotation_count_ratio_df(task_history.df, task.df, custom_production_volume_columns=[e.value for e in task.custom_production_volume_list])
|
|
147
143
|
if len(df_worktime_ratio) == 0:
|
|
148
144
|
return cls.empty()
|
|
149
145
|
|
|
@@ -239,9 +235,7 @@ class TaskWorktimeByPhaseUser:
|
|
|
239
235
|
return TaskWorktimeByPhaseUser(df, custom_production_volume_list=self.custom_production_volume_list)
|
|
240
236
|
|
|
241
237
|
@staticmethod
|
|
242
|
-
def _create_annotation_count_ratio_df(
|
|
243
|
-
task_history_df: pandas.DataFrame, task_df: pandas.DataFrame, *, custom_production_volume_columns: Optional[list[str]]
|
|
244
|
-
) -> pandas.DataFrame:
|
|
238
|
+
def _create_annotation_count_ratio_df(task_history_df: pandas.DataFrame, task_df: pandas.DataFrame, *, custom_production_volume_columns: Optional[list[str]]) -> pandas.DataFrame:
|
|
245
239
|
"""
|
|
246
240
|
task_id, phase, (phase_index), user_idの作業時間比から、アノテーション数などの生産量を求める
|
|
247
241
|
|
|
@@ -273,11 +267,7 @@ class TaskWorktimeByPhaseUser:
|
|
|
273
267
|
|
|
274
268
|
task_history_df = task_history_df[task_history_df["task_id"].isin(set(task_df["task_id"]))]
|
|
275
269
|
|
|
276
|
-
group_obj = (
|
|
277
|
-
task_history_df.sort_values("started_datetime")
|
|
278
|
-
.groupby(["task_id", "phase", "phase_stage", "account_id"])
|
|
279
|
-
.agg({"worktime_hour": "sum", "started_datetime": "first"})
|
|
280
|
-
)
|
|
270
|
+
group_obj = task_history_df.sort_values("started_datetime").groupby(["task_id", "phase", "phase_stage", "account_id"]).agg({"worktime_hour": "sum", "started_datetime": "first"})
|
|
281
271
|
# 担当者だけ変更して作業していないケースを除外する
|
|
282
272
|
group_obj = group_obj[group_obj["worktime_hour"] > 0]
|
|
283
273
|
|
|
@@ -285,9 +275,7 @@ class TaskWorktimeByPhaseUser:
|
|
|
285
275
|
logger.warning("タスク履歴情報に作業しているタスクがありませんでした。タスク履歴全件ファイルが更新されていない可能性があります。")
|
|
286
276
|
return pandas.DataFrame()
|
|
287
277
|
|
|
288
|
-
group_obj["task_count"] = group_obj.groupby(level=["task_id", "phase", "phase_stage"], group_keys=False)[["worktime_hour"]].apply(
|
|
289
|
-
lambda e: e / e["worktime_hour"].sum()
|
|
290
|
-
)
|
|
278
|
+
group_obj["task_count"] = group_obj.groupby(level=["task_id", "phase", "phase_stage"], group_keys=False)[["worktime_hour"]].apply(lambda e: e / e["worktime_hour"].sum())
|
|
291
279
|
|
|
292
280
|
quantity_columns = [
|
|
293
281
|
"annotation_count",
|
|
@@ -302,9 +290,7 @@ class TaskWorktimeByPhaseUser:
|
|
|
302
290
|
group_obj[col] = group_obj.apply(sub_get_quantity_value, axis="columns")
|
|
303
291
|
|
|
304
292
|
new_df = group_obj.reset_index()
|
|
305
|
-
new_df["pointed_out_inspection_comment_count"] = new_df["pointed_out_inspection_comment_count"] * new_df["phase"].apply(
|
|
306
|
-
lambda e: 1 if e == TaskPhase.ANNOTATION.value else 0
|
|
307
|
-
)
|
|
293
|
+
new_df["pointed_out_inspection_comment_count"] = new_df["pointed_out_inspection_comment_count"] * new_df["phase"].apply(lambda e: 1 if e == TaskPhase.ANNOTATION.value else 0)
|
|
308
294
|
new_df["rejected_count"] = new_df["rejected_count"] * new_df["phase"].apply(lambda e: 1 if e == TaskPhase.ANNOTATION.value else 0)
|
|
309
295
|
|
|
310
296
|
return new_df
|
|
@@ -100,9 +100,7 @@ class UserPerformance:
|
|
|
100
100
|
self.custom_production_volume_list = custom_production_volume_list if custom_production_volume_list is not None else []
|
|
101
101
|
self.phase_list = phase_list
|
|
102
102
|
if not self.required_columns_exist(df):
|
|
103
|
-
raise ValueError(
|
|
104
|
-
f"引数'df'の'columns'に次の列が存在していません。 {self.missing_columns(df)} :: 次の列が必須です。{self.columns}の列が必要です。"
|
|
105
|
-
)
|
|
103
|
+
raise ValueError(f"引数'df'の'columns'に次の列が存在していません。 {self.missing_columns(df)} :: 次の列が必須です。{self.columns}の列が必要です。")
|
|
106
104
|
|
|
107
105
|
self.df = df
|
|
108
106
|
|
|
@@ -116,18 +114,14 @@ class UserPerformance:
|
|
|
116
114
|
return len(self.df) == 0
|
|
117
115
|
|
|
118
116
|
@staticmethod
|
|
119
|
-
def _add_ratio_column_for_productivity_per_user(
|
|
120
|
-
df: pandas.DataFrame, phase_list: Sequence[TaskPhaseString], production_volume_columns: list[str]
|
|
121
|
-
) -> None:
|
|
117
|
+
def _add_ratio_column_for_productivity_per_user(df: pandas.DataFrame, phase_list: Sequence[TaskPhaseString], production_volume_columns: list[str]) -> None:
|
|
122
118
|
"""
|
|
123
119
|
ユーザーの生産性に関する列を、DataFrameに追加します。
|
|
124
120
|
"""
|
|
125
121
|
|
|
126
122
|
# 集計対象タスクから算出した計測作業時間(`monitored_worktime_hour`)に対応する実績作業時間を推定で算出する
|
|
127
123
|
# 具体的には、実際の計測作業時間と十先作業時間の比(`real_monitored_worktime_hour/real_actual_worktime_hour`)になるように按分する
|
|
128
|
-
df[("actual_worktime_hour", "sum")] = (
|
|
129
|
-
df[("monitored_worktime_hour", "sum")] / df[("real_monitored_worktime_hour/real_actual_worktime_hour", "sum")]
|
|
130
|
-
)
|
|
124
|
+
df[("actual_worktime_hour", "sum")] = df[("monitored_worktime_hour", "sum")] / df[("real_monitored_worktime_hour/real_actual_worktime_hour", "sum")]
|
|
131
125
|
|
|
132
126
|
for phase in phase_list:
|
|
133
127
|
|
|
@@ -156,23 +150,15 @@ class UserPerformance:
|
|
|
156
150
|
# 生産性を算出
|
|
157
151
|
ratio__actual_vs_monitored_worktime = df[("actual_worktime_hour", phase)] / df[("monitored_worktime_hour", phase)]
|
|
158
152
|
for production_volume_column in production_volume_columns:
|
|
159
|
-
df[(f"monitored_worktime_hour/{production_volume_column}", phase)] = (
|
|
160
|
-
|
|
161
|
-
)
|
|
162
|
-
df[(f"actual_worktime_hour/{production_volume_column}", phase)] = (
|
|
163
|
-
df[("actual_worktime_hour", phase)] / df[(production_volume_column, phase)]
|
|
164
|
-
)
|
|
153
|
+
df[(f"monitored_worktime_hour/{production_volume_column}", phase)] = df[("monitored_worktime_hour", phase)] / df[(production_volume_column, phase)]
|
|
154
|
+
df[(f"actual_worktime_hour/{production_volume_column}", phase)] = df[("actual_worktime_hour", phase)] / df[(production_volume_column, phase)]
|
|
165
155
|
|
|
166
|
-
df[(f"stdev__actual_worktime_hour/{production_volume_column}", phase)] = (
|
|
167
|
-
df[(f"stdev__monitored_worktime_hour/{production_volume_column}", phase)] * ratio__actual_vs_monitored_worktime
|
|
168
|
-
)
|
|
156
|
+
df[(f"stdev__actual_worktime_hour/{production_volume_column}", phase)] = df[(f"stdev__monitored_worktime_hour/{production_volume_column}", phase)] * ratio__actual_vs_monitored_worktime
|
|
169
157
|
|
|
170
158
|
# 品質に関する情報
|
|
171
159
|
phase = TaskPhase.ANNOTATION.value
|
|
172
160
|
for production_volume_column in production_volume_columns:
|
|
173
|
-
df[(f"pointed_out_inspection_comment_count/{production_volume_column}", phase)] = (
|
|
174
|
-
df[("pointed_out_inspection_comment_count", phase)] / df[(production_volume_column, phase)]
|
|
175
|
-
)
|
|
161
|
+
df[(f"pointed_out_inspection_comment_count/{production_volume_column}", phase)] = df[("pointed_out_inspection_comment_count", phase)] / df[(production_volume_column, phase)]
|
|
176
162
|
|
|
177
163
|
df[("rejected_count/task_count", phase)] = df[("rejected_count", phase)] / df[("task_count", phase)]
|
|
178
164
|
|
|
@@ -204,9 +190,7 @@ class UserPerformance:
|
|
|
204
190
|
return cls(df, task_completion_criteria, custom_production_volume_list=custom_production_volume_list)
|
|
205
191
|
|
|
206
192
|
@classmethod
|
|
207
|
-
def empty(
|
|
208
|
-
cls, task_completion_criteria: TaskCompletionCriteria, *, custom_production_volume_list: Optional[list[ProductionVolumeColumn]] = None
|
|
209
|
-
) -> UserPerformance:
|
|
193
|
+
def empty(cls, task_completion_criteria: TaskCompletionCriteria, *, custom_production_volume_list: Optional[list[ProductionVolumeColumn]] = None) -> UserPerformance:
|
|
210
194
|
"""空のデータフレームを持つインスタンスを生成します。"""
|
|
211
195
|
production_volume_columns = ["input_data_count", "annotation_count"]
|
|
212
196
|
if custom_production_volume_list is not None:
|
|
@@ -321,10 +305,7 @@ class UserPerformance:
|
|
|
321
305
|
# `to_csv()`で出力したときにKeyErrorが発生内容にするため、事前に列を追加しておく
|
|
322
306
|
phase = TaskPhase.ANNOTATION.value
|
|
323
307
|
columns = pandas.MultiIndex.from_tuples(
|
|
324
|
-
[
|
|
325
|
-
(f"stdev__monitored_worktime_hour/{production_volume_column}", phase)
|
|
326
|
-
for production_volume_column in task_worktime_by_phase_user.production_volume_columns
|
|
327
|
-
]
|
|
308
|
+
[(f"stdev__monitored_worktime_hour/{production_volume_column}", phase) for production_volume_column in task_worktime_by_phase_user.production_volume_columns]
|
|
328
309
|
)
|
|
329
310
|
df_empty = pandas.DataFrame(columns=columns, index=pandas.Index([], name="account_id"), dtype="float64")
|
|
330
311
|
return df_empty
|
|
@@ -340,9 +321,7 @@ class UserPerformance:
|
|
|
340
321
|
df_stdev_per_volume_count_list = []
|
|
341
322
|
for production_volume_column in task_worktime_by_phase_user.production_volume_columns:
|
|
342
323
|
df_stdev_per_input_data_count = (
|
|
343
|
-
df2[df2[f"worktime_hour/{production_volume_column}"] != float("inf")]
|
|
344
|
-
.groupby(["account_id", "phase"])[[f"worktime_hour/{production_volume_column}"]]
|
|
345
|
-
.std(ddof=0)
|
|
324
|
+
df2[df2[f"worktime_hour/{production_volume_column}"] != float("inf")].groupby(["account_id", "phase"])[[f"worktime_hour/{production_volume_column}"]].std(ddof=0)
|
|
346
325
|
)
|
|
347
326
|
df_stdev_per_volume_count_list.append(df_stdev_per_input_data_count)
|
|
348
327
|
df_stdev = pandas.concat(df_stdev_per_volume_count_list, axis=1)
|
|
@@ -351,9 +330,7 @@ class UserPerformance:
|
|
|
351
330
|
# 前述の処理でinfを除外しているので、NaNが含まれることはないはず
|
|
352
331
|
df_stdev2 = pandas.pivot_table(
|
|
353
332
|
df_stdev,
|
|
354
|
-
values=[
|
|
355
|
-
f"worktime_hour/{production_volume_column}" for production_volume_column in task_worktime_by_phase_user.production_volume_columns
|
|
356
|
-
],
|
|
333
|
+
values=[f"worktime_hour/{production_volume_column}" for production_volume_column in task_worktime_by_phase_user.production_volume_columns],
|
|
357
334
|
index="account_id",
|
|
358
335
|
columns="phase",
|
|
359
336
|
dropna=False,
|
|
@@ -501,10 +478,7 @@ class UserPerformance:
|
|
|
501
478
|
|
|
502
479
|
df = worktime_per_date.df
|
|
503
480
|
|
|
504
|
-
df4_list = [
|
|
505
|
-
_create_df_first_last_working_date(phase)
|
|
506
|
-
for phase in [None, TaskPhase.ANNOTATION.value, TaskPhase.INSPECTION.value, TaskPhase.ACCEPTANCE.value]
|
|
507
|
-
]
|
|
481
|
+
df4_list = [_create_df_first_last_working_date(phase) for phase in [None, TaskPhase.ANNOTATION.value, TaskPhase.INSPECTION.value, TaskPhase.ACCEPTANCE.value]]
|
|
508
482
|
|
|
509
483
|
# joinしない理由: レベル1の列名が空文字のDataFrameをjoinすると、Python3.12のpandas2.2.0で、列名が期待通りにならないため
|
|
510
484
|
# https://github.com/pandas-dev/pandas/issues/57500
|
|
@@ -546,7 +520,7 @@ class UserPerformance:
|
|
|
546
520
|
task_worktime_by_phase_user: タスク、フェーズ、ユーザーごとの作業時間や生産量が格納されたオブジェクト。生産量やタスクにかかった作業時間の取得に利用します。
|
|
547
521
|
|
|
548
522
|
|
|
549
|
-
"""
|
|
523
|
+
"""
|
|
550
524
|
|
|
551
525
|
def drop_unnecessary_columns(df: pandas.DataFrame) -> pandas.DataFrame:
|
|
552
526
|
"""
|
|
@@ -593,9 +567,7 @@ class UserPerformance:
|
|
|
593
567
|
df = df.join(cls._create_df_stdev_monitored_worktime(task_worktime_by_phase_user))
|
|
594
568
|
|
|
595
569
|
# 比例関係の列を計算して追加する
|
|
596
|
-
cls._add_ratio_column_for_productivity_per_user(
|
|
597
|
-
df, phase_list=phase_list, production_volume_columns=task_worktime_by_phase_user.production_volume_columns
|
|
598
|
-
)
|
|
570
|
+
cls._add_ratio_column_for_productivity_per_user(df, phase_list=phase_list, production_volume_columns=task_worktime_by_phase_user.production_volume_columns)
|
|
599
571
|
|
|
600
572
|
# 出力に不要な列を削除する
|
|
601
573
|
df = drop_unnecessary_columns(df)
|
|
@@ -611,9 +583,7 @@ class UserPerformance:
|
|
|
611
583
|
|
|
612
584
|
df = df.sort_values(["user_id"])
|
|
613
585
|
# `df.reset_index()`を実行する理由:indexである`account_id`を列にするため
|
|
614
|
-
return cls(
|
|
615
|
-
df.reset_index(), task_completion_criteria, custom_production_volume_list=task_worktime_by_phase_user.custom_production_volume_list
|
|
616
|
-
)
|
|
586
|
+
return cls(df.reset_index(), task_completion_criteria, custom_production_volume_list=task_worktime_by_phase_user.custom_production_volume_list)
|
|
617
587
|
|
|
618
588
|
@classmethod
|
|
619
589
|
def _convert_column_dtypes(cls, df: pandas.DataFrame) -> pandas.DataFrame:
|
|
@@ -663,11 +633,7 @@ class UserPerformance:
|
|
|
663
633
|
("real_monitored_worktime_hour", "acceptance"),
|
|
664
634
|
]
|
|
665
635
|
|
|
666
|
-
monitored_worktime_columns = (
|
|
667
|
-
[("monitored_worktime_hour", "sum")]
|
|
668
|
-
+ [("monitored_worktime_hour", phase) for phase in phase_list]
|
|
669
|
-
+ [("monitored_worktime_ratio", phase) for phase in phase_list]
|
|
670
|
-
)
|
|
636
|
+
monitored_worktime_columns = [("monitored_worktime_hour", "sum")] + [("monitored_worktime_hour", phase) for phase in phase_list] + [("monitored_worktime_ratio", phase) for phase in phase_list]
|
|
671
637
|
production_columns = [("task_count", phase) for phase in phase_list]
|
|
672
638
|
for production_volume_column in production_volume_columns:
|
|
673
639
|
production_columns.extend([(production_volume_column, phase) for phase in phase_list])
|
|
@@ -685,10 +651,7 @@ class UserPerformance:
|
|
|
685
651
|
|
|
686
652
|
inspection_comment_columns = [
|
|
687
653
|
("pointed_out_inspection_comment_count", TaskPhase.ANNOTATION.value),
|
|
688
|
-
*[
|
|
689
|
-
(f"pointed_out_inspection_comment_count/{production_volume_column}", TaskPhase.ANNOTATION.value)
|
|
690
|
-
for production_volume_column in production_volume_columns
|
|
691
|
-
],
|
|
654
|
+
*[(f"pointed_out_inspection_comment_count/{production_volume_column}", TaskPhase.ANNOTATION.value) for production_volume_column in production_volume_columns],
|
|
692
655
|
]
|
|
693
656
|
|
|
694
657
|
rejected_count_columns = [
|
|
@@ -815,48 +778,32 @@ class UserPerformance:
|
|
|
815
778
|
"""
|
|
816
779
|
# ゼロ割の警告を無視する
|
|
817
780
|
with numpy.errstate(divide="ignore", invalid="ignore"):
|
|
818
|
-
series[("real_monitored_worktime_hour/real_actual_worktime_hour", "sum")] = (
|
|
819
|
-
series[("real_monitored_worktime_hour", "sum")] / series[("real_actual_worktime_hour", "sum")]
|
|
820
|
-
)
|
|
781
|
+
series[("real_monitored_worktime_hour/real_actual_worktime_hour", "sum")] = series[("real_monitored_worktime_hour", "sum")] / series[("real_actual_worktime_hour", "sum")]
|
|
821
782
|
|
|
822
783
|
for phase in phase_list:
|
|
823
784
|
# Annofab時間の比率を算出
|
|
824
|
-
# 計測作業時間の合計値が0により、monitored_worktime_ratioはnanになる場合は、教師付の実績作業時間を実績作業時間の合計値になるようなmonitored_worktime_ratioに変更する
|
|
785
|
+
# 計測作業時間の合計値が0により、monitored_worktime_ratioはnanになる場合は、教師付の実績作業時間を実績作業時間の合計値になるようなmonitored_worktime_ratioに変更する
|
|
825
786
|
if series[("monitored_worktime_hour", "sum")] == 0:
|
|
826
787
|
if phase == TaskPhase.ANNOTATION.value:
|
|
827
788
|
series[("monitored_worktime_ratio", phase)] = 1
|
|
828
789
|
else:
|
|
829
790
|
series[("monitored_worktime_ratio", phase)] = 0
|
|
830
791
|
else:
|
|
831
|
-
series[("monitored_worktime_ratio", phase)] = (
|
|
832
|
-
series[("monitored_worktime_hour", phase)] / series[("monitored_worktime_hour", "sum")]
|
|
833
|
-
)
|
|
792
|
+
series[("monitored_worktime_ratio", phase)] = series[("monitored_worktime_hour", phase)] / series[("monitored_worktime_hour", "sum")]
|
|
834
793
|
|
|
835
794
|
# Annofab時間の比率から、Annowork時間を予測する
|
|
836
795
|
series[("actual_worktime_hour", phase)] = series[("actual_worktime_hour", "sum")] * series[("monitored_worktime_ratio", phase)]
|
|
837
796
|
|
|
838
797
|
# 生産性を算出
|
|
839
|
-
series[("monitored_worktime_hour/input_data_count", phase)] = (
|
|
840
|
-
|
|
841
|
-
)
|
|
842
|
-
series[("actual_worktime_hour/input_data_count", phase)] = (
|
|
843
|
-
series[("actual_worktime_hour", phase)] / series[("input_data_count", phase)]
|
|
844
|
-
)
|
|
798
|
+
series[("monitored_worktime_hour/input_data_count", phase)] = series[("monitored_worktime_hour", phase)] / series[("input_data_count", phase)]
|
|
799
|
+
series[("actual_worktime_hour/input_data_count", phase)] = series[("actual_worktime_hour", phase)] / series[("input_data_count", phase)]
|
|
845
800
|
|
|
846
|
-
series[("monitored_worktime_hour/annotation_count", phase)] = (
|
|
847
|
-
|
|
848
|
-
)
|
|
849
|
-
series[("actual_worktime_hour/annotation_count", phase)] = (
|
|
850
|
-
series[("actual_worktime_hour", phase)] / series[("annotation_count", phase)]
|
|
851
|
-
)
|
|
801
|
+
series[("monitored_worktime_hour/annotation_count", phase)] = series[("monitored_worktime_hour", phase)] / series[("annotation_count", phase)]
|
|
802
|
+
series[("actual_worktime_hour/annotation_count", phase)] = series[("actual_worktime_hour", phase)] / series[("annotation_count", phase)]
|
|
852
803
|
|
|
853
804
|
phase = TaskPhase.ANNOTATION.value
|
|
854
|
-
series[("pointed_out_inspection_comment_count/annotation_count", phase)] = (
|
|
855
|
-
|
|
856
|
-
)
|
|
857
|
-
series[("pointed_out_inspection_comment_count/input_data_count", phase)] = (
|
|
858
|
-
series[("pointed_out_inspection_comment_count", phase)] / series[("input_data_count", phase)]
|
|
859
|
-
)
|
|
805
|
+
series[("pointed_out_inspection_comment_count/annotation_count", phase)] = series[("pointed_out_inspection_comment_count", phase)] / series[("annotation_count", phase)]
|
|
806
|
+
series[("pointed_out_inspection_comment_count/input_data_count", phase)] = series[("pointed_out_inspection_comment_count", phase)] / series[("input_data_count", phase)]
|
|
860
807
|
series[("rejected_count/task_count", phase)] = series[("rejected_count", phase)] / series[("task_count", phase)]
|
|
861
808
|
|
|
862
809
|
def get_production_volume_name(self, production_volume_column: str) -> str:
|
|
@@ -929,9 +876,7 @@ class UserPerformance:
|
|
|
929
876
|
y_column = f"{worktime_type.value}_worktime_minute/{production_volume_column}"
|
|
930
877
|
# 分単位の生産性を算出する
|
|
931
878
|
for phase in self.phase_list:
|
|
932
|
-
df[(f"{worktime_type.value}_worktime_minute/{production_volume_column}", phase)] = (
|
|
933
|
-
df[(f"{worktime_type.value}_worktime_hour/{production_volume_column}", phase)] * 60
|
|
934
|
-
)
|
|
879
|
+
df[(f"{worktime_type.value}_worktime_minute/{production_volume_column}", phase)] = df[(f"{worktime_type.value}_worktime_hour/{production_volume_column}", phase)] * 60
|
|
935
880
|
|
|
936
881
|
for biography_index, biography in enumerate(sorted(set(df["biography"]))):
|
|
937
882
|
for scatter_obj, phase in zip(scatter_obj_list, self.phase_list):
|
|
@@ -1074,9 +1019,7 @@ class UserPerformance:
|
|
|
1074
1019
|
|
|
1075
1020
|
write_bokeh_graph(bokeh.layouts.column(element_list), output_file)
|
|
1076
1021
|
|
|
1077
|
-
def plot_quality_and_productivity(
|
|
1078
|
-
self, output_file: Path, worktime_type: WorktimeType, production_volume_column: str, *, metadata: Optional[dict[str, Any]] = None
|
|
1079
|
-
) -> None:
|
|
1022
|
+
def plot_quality_and_productivity(self, output_file: Path, worktime_type: WorktimeType, production_volume_column: str, *, metadata: Optional[dict[str, Any]] = None) -> None:
|
|
1080
1023
|
"""
|
|
1081
1024
|
作業時間を元に算出した生産性と品質の関係を、メンバごとにプロットする
|
|
1082
1025
|
"""
|
|
@@ -1157,9 +1100,7 @@ class UserPerformance:
|
|
|
1157
1100
|
df = self.convert_df_suitable_for_bokeh(self.df)
|
|
1158
1101
|
PHASE = TaskPhase.ANNOTATION.value # noqa: N806
|
|
1159
1102
|
|
|
1160
|
-
df[(f"{worktime_type.value}_worktime_minute/{production_volume_column}", PHASE)] = (
|
|
1161
|
-
df[(f"{worktime_type.value}_worktime_hour/{production_volume_column}", PHASE)] * 60
|
|
1162
|
-
)
|
|
1103
|
+
df[(f"{worktime_type.value}_worktime_minute/{production_volume_column}", PHASE)] = df[(f"{worktime_type.value}_worktime_hour/{production_volume_column}", PHASE)] * 60
|
|
1163
1104
|
logger.debug(f"{output_file} を出力します。")
|
|
1164
1105
|
|
|
1165
1106
|
production_volume_name = self.get_production_volume_name(production_volume_column)
|
|
@@ -84,9 +84,7 @@ class WholePerformance:
|
|
|
84
84
|
df_task = df_task_worktime_by_phase_user[["project_id", "task_id", "status"]].drop_duplicates()
|
|
85
85
|
|
|
86
86
|
unique_keys_for_worktime = ["project_id", "task_id", "phase", "phase_stage"]
|
|
87
|
-
addable_columns_for_task = list(
|
|
88
|
-
set(df_task_worktime_by_phase_user.columns) - set(user_info_columns) - set(unique_keys_for_worktime) - {"status"}
|
|
89
|
-
)
|
|
87
|
+
addable_columns_for_task = list(set(df_task_worktime_by_phase_user.columns) - set(user_info_columns) - set(unique_keys_for_worktime) - {"status"})
|
|
90
88
|
df_task_worktime_by_phase_user = df_task_worktime_by_phase_user.groupby(unique_keys_for_worktime)[addable_columns_for_task].sum()
|
|
91
89
|
df_task_worktime_by_phase_user[user_info_columns] = PSEUDO_VALUE
|
|
92
90
|
df_task_worktime_by_phase_user = df_task_worktime_by_phase_user.reset_index()
|
|
@@ -95,9 +93,7 @@ class WholePerformance:
|
|
|
95
93
|
|
|
96
94
|
return UserPerformance.from_df_wrapper(
|
|
97
95
|
worktime_per_date=WorktimePerDate(df_worktime_per_date),
|
|
98
|
-
task_worktime_by_phase_user=TaskWorktimeByPhaseUser(
|
|
99
|
-
df_task_worktime_by_phase_user, custom_production_volume_list=task_worktime_by_phase_user.custom_production_volume_list
|
|
100
|
-
),
|
|
96
|
+
task_worktime_by_phase_user=TaskWorktimeByPhaseUser(df_task_worktime_by_phase_user, custom_production_volume_list=task_worktime_by_phase_user.custom_production_volume_list),
|
|
101
97
|
task_completion_criteria=task_completion_criteria,
|
|
102
98
|
)
|
|
103
99
|
|
|
@@ -115,7 +111,7 @@ class WholePerformance:
|
|
|
115
111
|
worktime_per_date: 日ごとの作業時間が記載されたDataFrameを格納したオブジェクト。ユーザー情報の取得や、実際の作業時間(集計タスクに影響しない)の算出に利用します。
|
|
116
112
|
task_worktime_by_phase_user: タスク、フェーズ、ユーザーごとの作業時間や生産量が格納されたオブジェクト。生産量やタスクにかかった作業時間の取得に利用します。
|
|
117
113
|
|
|
118
|
-
"""
|
|
114
|
+
"""
|
|
119
115
|
# 1人が作業した場合のパフォーマンス情報を生成する
|
|
120
116
|
all_user_performance = cls._create_all_user_performance(worktime_per_date, task_worktime_by_phase_user, task_completion_criteria)
|
|
121
117
|
|
|
@@ -142,9 +138,7 @@ class WholePerformance:
|
|
|
142
138
|
return cls(df_all.iloc[0], task_completion_criteria, custom_production_volume_list=task_worktime_by_phase_user.custom_production_volume_list)
|
|
143
139
|
|
|
144
140
|
@classmethod
|
|
145
|
-
def empty(
|
|
146
|
-
cls, task_completion_criteria: TaskCompletionCriteria, *, custom_production_volume_list: Optional[list[ProductionVolumeColumn]] = None
|
|
147
|
-
) -> WholePerformance:
|
|
141
|
+
def empty(cls, task_completion_criteria: TaskCompletionCriteria, *, custom_production_volume_list: Optional[list[ProductionVolumeColumn]] = None) -> WholePerformance:
|
|
148
142
|
"""空のデータフレームを持つインスタンスを生成します。"""
|
|
149
143
|
|
|
150
144
|
production_volume_columns = ["input_data_count", "annotation_count"]
|
|
@@ -219,12 +213,12 @@ class WholePerformance:
|
|
|
219
213
|
# CSVファイル読み込み直後では、数値も文字列として格納されているので、文字列情報以外は数値に変換する
|
|
220
214
|
for key, value in series.items():
|
|
221
215
|
# `first_working_date`など2列目が空欄の場合は、key[1]がnumpy.nanになるため、keyを変換する
|
|
222
|
-
if isinstance(key[1], float) and numpy.isnan(key[1]):
|
|
216
|
+
if isinstance(key[1], float) and numpy.isnan(key[1]):
|
|
223
217
|
key2 = (key[0], "")
|
|
224
218
|
else:
|
|
225
219
|
key2 = key
|
|
226
220
|
|
|
227
|
-
if key2 in cls.STRING_KEYS:
|
|
221
|
+
if key2 in cls.STRING_KEYS:
|
|
228
222
|
value2 = value
|
|
229
223
|
else:
|
|
230
224
|
value2 = float(value)
|