annofabcli 1.102.0__py3-none-any.whl → 1.103.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- annofabcli/annotation/annotation_query.py +9 -29
- annofabcli/annotation/change_annotation_attributes.py +6 -14
- annofabcli/annotation/change_annotation_properties.py +5 -12
- annofabcli/annotation/copy_annotation.py +9 -11
- annofabcli/annotation/delete_annotation.py +21 -26
- annofabcli/annotation/dump_annotation.py +1 -4
- annofabcli/annotation/import_annotation.py +16 -40
- annofabcli/annotation/list_annotation.py +1 -4
- annofabcli/annotation/merge_segmentation.py +10 -16
- annofabcli/annotation/remove_segmentation_overlap.py +14 -30
- annofabcli/annotation/restore_annotation.py +3 -9
- annofabcli/annotation_specs/add_attribute_restriction.py +2 -8
- annofabcli/annotation_specs/attribute_restriction.py +2 -10
- annofabcli/annotation_specs/export_annotation_specs.py +1 -3
- annofabcli/annotation_specs/get_annotation_specs_with_attribute_id_replaced.py +3 -10
- annofabcli/annotation_specs/get_annotation_specs_with_choice_id_replaced.py +4 -10
- annofabcli/annotation_specs/get_annotation_specs_with_label_id_replaced.py +1 -3
- annofabcli/annotation_specs/list_annotation_specs_attribute.py +7 -18
- annofabcli/annotation_specs/list_annotation_specs_choice.py +3 -8
- annofabcli/annotation_specs/list_annotation_specs_history.py +0 -1
- annofabcli/annotation_specs/list_annotation_specs_label.py +3 -8
- annofabcli/annotation_specs/list_annotation_specs_label_attribute.py +4 -9
- annofabcli/annotation_specs/list_attribute_restriction.py +3 -9
- annofabcli/annotation_specs/put_label_color.py +1 -6
- annofabcli/comment/delete_comment.py +3 -9
- annofabcli/comment/list_all_comment.py +2 -4
- annofabcli/comment/list_comment.py +1 -4
- annofabcli/comment/put_comment.py +4 -13
- annofabcli/comment/put_comment_simply.py +2 -6
- annofabcli/comment/put_inspection_comment.py +2 -6
- annofabcli/comment/put_inspection_comment_simply.py +3 -6
- annofabcli/comment/put_onhold_comment.py +2 -6
- annofabcli/comment/put_onhold_comment_simply.py +2 -4
- annofabcli/common/cli.py +5 -43
- annofabcli/common/download.py +8 -25
- annofabcli/common/image.py +5 -9
- annofabcli/common/utils.py +1 -3
- annofabcli/common/visualize.py +2 -4
- annofabcli/filesystem/draw_annotation.py +8 -20
- annofabcli/filesystem/filter_annotation.py +7 -24
- annofabcli/filesystem/mask_user_info.py +3 -6
- annofabcli/filesystem/merge_annotation.py +2 -6
- annofabcli/input_data/change_input_data_name.py +3 -7
- annofabcli/input_data/copy_input_data.py +6 -14
- annofabcli/input_data/delete_input_data.py +7 -24
- annofabcli/input_data/delete_metadata_key_of_input_data.py +5 -16
- annofabcli/input_data/list_all_input_data.py +5 -14
- annofabcli/input_data/list_all_input_data_merged_task.py +8 -23
- annofabcli/input_data/list_input_data.py +5 -16
- annofabcli/input_data/put_input_data.py +7 -19
- annofabcli/input_data/update_metadata_of_input_data.py +6 -14
- annofabcli/instruction/list_instruction_history.py +0 -1
- annofabcli/instruction/upload_instruction.py +1 -4
- annofabcli/job/list_job.py +1 -2
- annofabcli/job/list_last_job.py +1 -3
- annofabcli/organization/list_organization.py +0 -1
- annofabcli/organization_member/change_organization_member.py +1 -3
- annofabcli/organization_member/delete_organization_member.py +32 -16
- annofabcli/organization_member/invite_organization_member.py +25 -14
- annofabcli/organization_member/list_organization_member.py +0 -1
- annofabcli/project/change_organization_of_project.py +257 -0
- annofabcli/project/change_project_status.py +2 -2
- annofabcli/project/copy_project.py +2 -7
- annofabcli/project/diff_projects.py +4 -16
- annofabcli/project/list_project.py +0 -1
- annofabcli/project/put_project.py +2 -6
- annofabcli/project/subcommand_project.py +2 -0
- annofabcli/project_member/change_project_members.py +2 -2
- annofabcli/project_member/copy_project_members.py +2 -7
- annofabcli/project_member/drop_project_members.py +1 -3
- annofabcli/project_member/invite_project_members.py +1 -3
- annofabcli/project_member/list_users.py +0 -1
- annofabcli/project_member/put_project_members.py +4 -12
- annofabcli/stat_visualization/mask_visualization_dir.py +6 -16
- annofabcli/stat_visualization/merge_visualization_dir.py +6 -18
- annofabcli/stat_visualization/summarize_whole_performance_csv.py +3 -7
- annofabcli/stat_visualization/write_graph.py +5 -15
- annofabcli/stat_visualization/write_performance_rating_csv.py +4 -12
- annofabcli/statistics/list_annotation_area.py +3 -7
- annofabcli/statistics/list_annotation_attribute.py +6 -15
- annofabcli/statistics/list_annotation_attribute_filled_count.py +9 -23
- annofabcli/statistics/list_annotation_count.py +18 -44
- annofabcli/statistics/list_annotation_duration.py +14 -40
- annofabcli/statistics/list_video_duration.py +2 -3
- annofabcli/statistics/list_worktime.py +0 -1
- annofabcli/statistics/scatter.py +3 -9
- annofabcli/statistics/summarize_task_count.py +7 -12
- annofabcli/statistics/summarize_task_count_by_task_id_group.py +3 -11
- annofabcli/statistics/summarize_task_count_by_user.py +1 -5
- annofabcli/statistics/visualization/dataframe/annotation_count.py +1 -3
- annofabcli/statistics/visualization/dataframe/cumulative_productivity.py +3 -9
- annofabcli/statistics/visualization/dataframe/productivity_per_date.py +11 -23
- annofabcli/statistics/visualization/dataframe/project_performance.py +1 -3
- annofabcli/statistics/visualization/dataframe/task.py +2 -5
- annofabcli/statistics/visualization/dataframe/task_worktime_by_phase_user.py +6 -20
- annofabcli/statistics/visualization/dataframe/user_performance.py +29 -88
- annofabcli/statistics/visualization/dataframe/whole_performance.py +4 -10
- annofabcli/statistics/visualization/dataframe/whole_productivity_per_date.py +17 -49
- annofabcli/statistics/visualization/dataframe/worktime_per_date.py +3 -9
- annofabcli/statistics/visualization/filtering_query.py +2 -6
- annofabcli/statistics/visualization/project_dir.py +9 -26
- annofabcli/statistics/visualization/visualization_source_files.py +3 -10
- annofabcli/statistics/visualize_annotation_count.py +7 -21
- annofabcli/statistics/visualize_annotation_duration.py +7 -17
- annofabcli/statistics/visualize_statistics.py +17 -52
- annofabcli/statistics/visualize_video_duration.py +8 -19
- annofabcli/supplementary/delete_supplementary_data.py +7 -23
- annofabcli/supplementary/list_supplementary_data.py +1 -1
- annofabcli/supplementary/put_supplementary_data.py +5 -15
- annofabcli/task/cancel_acceptance.py +3 -4
- annofabcli/task/change_operator.py +3 -11
- annofabcli/task/change_status_to_break.py +1 -1
- annofabcli/task/change_status_to_on_hold.py +5 -18
- annofabcli/task/complete_tasks.py +8 -25
- annofabcli/task/copy_tasks.py +2 -3
- annofabcli/task/delete_metadata_key_of_task.py +2 -6
- annofabcli/task/delete_tasks.py +7 -25
- annofabcli/task/list_all_tasks.py +2 -4
- annofabcli/task/list_tasks.py +2 -6
- annofabcli/task/list_tasks_added_task_history.py +7 -21
- annofabcli/task/put_tasks.py +2 -3
- annofabcli/task/put_tasks_by_count.py +3 -7
- annofabcli/task/reject_tasks.py +7 -19
- annofabcli/task/update_metadata_of_task.py +1 -1
- annofabcli/task_history/list_all_task_history.py +2 -5
- annofabcli/task_history/list_task_history.py +0 -1
- annofabcli/task_history_event/list_all_task_history_event.py +4 -11
- annofabcli/task_history_event/list_worktime.py +4 -14
- {annofabcli-1.102.0.dist-info → annofabcli-1.103.0.dist-info}/METADATA +1 -1
- annofabcli-1.103.0.dist-info/RECORD +215 -0
- annofabcli-1.102.0.dist-info/RECORD +0 -214
- {annofabcli-1.102.0.dist-info → annofabcli-1.103.0.dist-info}/WHEEL +0 -0
- {annofabcli-1.102.0.dist-info → annofabcli-1.103.0.dist-info}/entry_points.txt +0 -0
- {annofabcli-1.102.0.dist-info → annofabcli-1.103.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -71,10 +71,7 @@ class Task:
|
|
|
71
71
|
logger.warning("引数`df`に重複したキー(project_id, task_id)が含まれています。")
|
|
72
72
|
|
|
73
73
|
if not self.required_columns_exist(df):
|
|
74
|
-
raise ValueError(
|
|
75
|
-
f"引数'df'の'columns'に次の列が存在していません。 {self.missing_required_columns(df)} :: "
|
|
76
|
-
f"次の列が必須です。{self.required_columns} の列が必要です。"
|
|
77
|
-
)
|
|
74
|
+
raise ValueError(f"引数'df'の'columns'に次の列が存在していません。 {self.missing_required_columns(df)} :: 次の列が必須です。{self.required_columns} の列が必要です。")
|
|
78
75
|
|
|
79
76
|
self.df = df
|
|
80
77
|
|
|
@@ -409,7 +406,7 @@ class Task:
|
|
|
409
406
|
|
|
410
407
|
# タイムゾーンを指定している理由::
|
|
411
408
|
# すべてがNaNのseriesをdatetimeに変換すると、型にタイムゾーンが指定されない。
|
|
412
|
-
# その状態で加算すると、`TypeError: DatetimeArray subtraction must have the same timezones or no timezones`というエラーが発生するため
|
|
409
|
+
# その状態で加算すると、`TypeError: DatetimeArray subtraction must have the same timezones or no timezones`というエラーが発生するため
|
|
413
410
|
if not isinstance(dt1.dtype, pandas.DatetimeTZDtype):
|
|
414
411
|
dt1 = dt1.dt.tz_localize(pytz.FixedOffset(540))
|
|
415
412
|
if not isinstance(dt2.dtype, pandas.DatetimeTZDtype):
|
|
@@ -107,9 +107,7 @@ class TaskWorktimeByPhaseUser:
|
|
|
107
107
|
logger.warning("引数`df`に重複したキー(project_id, task_id, phase, phase_stage, account_id)が含まれています。")
|
|
108
108
|
|
|
109
109
|
if not self.required_columns_exist(df):
|
|
110
|
-
raise ValueError(
|
|
111
|
-
f"引数'df'の'columns'に次の列が存在していません。 {self.missing_columns(df)} :: 次の列が必須です。{self.columns}の列が必要です。"
|
|
112
|
-
)
|
|
110
|
+
raise ValueError(f"引数'df'の'columns'に次の列が存在していません。 {self.missing_columns(df)} :: 次の列が必須です。{self.columns}の列が必要です。")
|
|
113
111
|
|
|
114
112
|
self.df = df
|
|
115
113
|
|
|
@@ -141,9 +139,7 @@ class TaskWorktimeByPhaseUser:
|
|
|
141
139
|
project_id
|
|
142
140
|
"""
|
|
143
141
|
df_task = task.df
|
|
144
|
-
df_worktime_ratio = cls._create_annotation_count_ratio_df(
|
|
145
|
-
task_history.df, task.df, custom_production_volume_columns=[e.value for e in task.custom_production_volume_list]
|
|
146
|
-
)
|
|
142
|
+
df_worktime_ratio = cls._create_annotation_count_ratio_df(task_history.df, task.df, custom_production_volume_columns=[e.value for e in task.custom_production_volume_list])
|
|
147
143
|
if len(df_worktime_ratio) == 0:
|
|
148
144
|
return cls.empty()
|
|
149
145
|
|
|
@@ -239,9 +235,7 @@ class TaskWorktimeByPhaseUser:
|
|
|
239
235
|
return TaskWorktimeByPhaseUser(df, custom_production_volume_list=self.custom_production_volume_list)
|
|
240
236
|
|
|
241
237
|
@staticmethod
|
|
242
|
-
def _create_annotation_count_ratio_df(
|
|
243
|
-
task_history_df: pandas.DataFrame, task_df: pandas.DataFrame, *, custom_production_volume_columns: Optional[list[str]]
|
|
244
|
-
) -> pandas.DataFrame:
|
|
238
|
+
def _create_annotation_count_ratio_df(task_history_df: pandas.DataFrame, task_df: pandas.DataFrame, *, custom_production_volume_columns: Optional[list[str]]) -> pandas.DataFrame:
|
|
245
239
|
"""
|
|
246
240
|
task_id, phase, (phase_index), user_idの作業時間比から、アノテーション数などの生産量を求める
|
|
247
241
|
|
|
@@ -273,11 +267,7 @@ class TaskWorktimeByPhaseUser:
|
|
|
273
267
|
|
|
274
268
|
task_history_df = task_history_df[task_history_df["task_id"].isin(set(task_df["task_id"]))]
|
|
275
269
|
|
|
276
|
-
group_obj = (
|
|
277
|
-
task_history_df.sort_values("started_datetime")
|
|
278
|
-
.groupby(["task_id", "phase", "phase_stage", "account_id"])
|
|
279
|
-
.agg({"worktime_hour": "sum", "started_datetime": "first"})
|
|
280
|
-
)
|
|
270
|
+
group_obj = task_history_df.sort_values("started_datetime").groupby(["task_id", "phase", "phase_stage", "account_id"]).agg({"worktime_hour": "sum", "started_datetime": "first"})
|
|
281
271
|
# 担当者だけ変更して作業していないケースを除外する
|
|
282
272
|
group_obj = group_obj[group_obj["worktime_hour"] > 0]
|
|
283
273
|
|
|
@@ -285,9 +275,7 @@ class TaskWorktimeByPhaseUser:
|
|
|
285
275
|
logger.warning("タスク履歴情報に作業しているタスクがありませんでした。タスク履歴全件ファイルが更新されていない可能性があります。")
|
|
286
276
|
return pandas.DataFrame()
|
|
287
277
|
|
|
288
|
-
group_obj["task_count"] = group_obj.groupby(level=["task_id", "phase", "phase_stage"], group_keys=False)[["worktime_hour"]].apply(
|
|
289
|
-
lambda e: e / e["worktime_hour"].sum()
|
|
290
|
-
)
|
|
278
|
+
group_obj["task_count"] = group_obj.groupby(level=["task_id", "phase", "phase_stage"], group_keys=False)[["worktime_hour"]].apply(lambda e: e / e["worktime_hour"].sum())
|
|
291
279
|
|
|
292
280
|
quantity_columns = [
|
|
293
281
|
"annotation_count",
|
|
@@ -302,9 +290,7 @@ class TaskWorktimeByPhaseUser:
|
|
|
302
290
|
group_obj[col] = group_obj.apply(sub_get_quantity_value, axis="columns")
|
|
303
291
|
|
|
304
292
|
new_df = group_obj.reset_index()
|
|
305
|
-
new_df["pointed_out_inspection_comment_count"] = new_df["pointed_out_inspection_comment_count"] * new_df["phase"].apply(
|
|
306
|
-
lambda e: 1 if e == TaskPhase.ANNOTATION.value else 0
|
|
307
|
-
)
|
|
293
|
+
new_df["pointed_out_inspection_comment_count"] = new_df["pointed_out_inspection_comment_count"] * new_df["phase"].apply(lambda e: 1 if e == TaskPhase.ANNOTATION.value else 0)
|
|
308
294
|
new_df["rejected_count"] = new_df["rejected_count"] * new_df["phase"].apply(lambda e: 1 if e == TaskPhase.ANNOTATION.value else 0)
|
|
309
295
|
|
|
310
296
|
return new_df
|
|
@@ -100,9 +100,7 @@ class UserPerformance:
|
|
|
100
100
|
self.custom_production_volume_list = custom_production_volume_list if custom_production_volume_list is not None else []
|
|
101
101
|
self.phase_list = phase_list
|
|
102
102
|
if not self.required_columns_exist(df):
|
|
103
|
-
raise ValueError(
|
|
104
|
-
f"引数'df'の'columns'に次の列が存在していません。 {self.missing_columns(df)} :: 次の列が必須です。{self.columns}の列が必要です。"
|
|
105
|
-
)
|
|
103
|
+
raise ValueError(f"引数'df'の'columns'に次の列が存在していません。 {self.missing_columns(df)} :: 次の列が必須です。{self.columns}の列が必要です。")
|
|
106
104
|
|
|
107
105
|
self.df = df
|
|
108
106
|
|
|
@@ -116,18 +114,14 @@ class UserPerformance:
|
|
|
116
114
|
return len(self.df) == 0
|
|
117
115
|
|
|
118
116
|
@staticmethod
|
|
119
|
-
def _add_ratio_column_for_productivity_per_user(
|
|
120
|
-
df: pandas.DataFrame, phase_list: Sequence[TaskPhaseString], production_volume_columns: list[str]
|
|
121
|
-
) -> None:
|
|
117
|
+
def _add_ratio_column_for_productivity_per_user(df: pandas.DataFrame, phase_list: Sequence[TaskPhaseString], production_volume_columns: list[str]) -> None:
|
|
122
118
|
"""
|
|
123
119
|
ユーザーの生産性に関する列を、DataFrameに追加します。
|
|
124
120
|
"""
|
|
125
121
|
|
|
126
122
|
# 集計対象タスクから算出した計測作業時間(`monitored_worktime_hour`)に対応する実績作業時間を推定で算出する
|
|
127
123
|
# 具体的には、実際の計測作業時間と十先作業時間の比(`real_monitored_worktime_hour/real_actual_worktime_hour`)になるように按分する
|
|
128
|
-
df[("actual_worktime_hour", "sum")] = (
|
|
129
|
-
df[("monitored_worktime_hour", "sum")] / df[("real_monitored_worktime_hour/real_actual_worktime_hour", "sum")]
|
|
130
|
-
)
|
|
124
|
+
df[("actual_worktime_hour", "sum")] = df[("monitored_worktime_hour", "sum")] / df[("real_monitored_worktime_hour/real_actual_worktime_hour", "sum")]
|
|
131
125
|
|
|
132
126
|
for phase in phase_list:
|
|
133
127
|
|
|
@@ -156,23 +150,15 @@ class UserPerformance:
|
|
|
156
150
|
# 生産性を算出
|
|
157
151
|
ratio__actual_vs_monitored_worktime = df[("actual_worktime_hour", phase)] / df[("monitored_worktime_hour", phase)]
|
|
158
152
|
for production_volume_column in production_volume_columns:
|
|
159
|
-
df[(f"monitored_worktime_hour/{production_volume_column}", phase)] = (
|
|
160
|
-
|
|
161
|
-
)
|
|
162
|
-
df[(f"actual_worktime_hour/{production_volume_column}", phase)] = (
|
|
163
|
-
df[("actual_worktime_hour", phase)] / df[(production_volume_column, phase)]
|
|
164
|
-
)
|
|
153
|
+
df[(f"monitored_worktime_hour/{production_volume_column}", phase)] = df[("monitored_worktime_hour", phase)] / df[(production_volume_column, phase)]
|
|
154
|
+
df[(f"actual_worktime_hour/{production_volume_column}", phase)] = df[("actual_worktime_hour", phase)] / df[(production_volume_column, phase)]
|
|
165
155
|
|
|
166
|
-
df[(f"stdev__actual_worktime_hour/{production_volume_column}", phase)] = (
|
|
167
|
-
df[(f"stdev__monitored_worktime_hour/{production_volume_column}", phase)] * ratio__actual_vs_monitored_worktime
|
|
168
|
-
)
|
|
156
|
+
df[(f"stdev__actual_worktime_hour/{production_volume_column}", phase)] = df[(f"stdev__monitored_worktime_hour/{production_volume_column}", phase)] * ratio__actual_vs_monitored_worktime
|
|
169
157
|
|
|
170
158
|
# 品質に関する情報
|
|
171
159
|
phase = TaskPhase.ANNOTATION.value
|
|
172
160
|
for production_volume_column in production_volume_columns:
|
|
173
|
-
df[(f"pointed_out_inspection_comment_count/{production_volume_column}", phase)] = (
|
|
174
|
-
df[("pointed_out_inspection_comment_count", phase)] / df[(production_volume_column, phase)]
|
|
175
|
-
)
|
|
161
|
+
df[(f"pointed_out_inspection_comment_count/{production_volume_column}", phase)] = df[("pointed_out_inspection_comment_count", phase)] / df[(production_volume_column, phase)]
|
|
176
162
|
|
|
177
163
|
df[("rejected_count/task_count", phase)] = df[("rejected_count", phase)] / df[("task_count", phase)]
|
|
178
164
|
|
|
@@ -204,9 +190,7 @@ class UserPerformance:
|
|
|
204
190
|
return cls(df, task_completion_criteria, custom_production_volume_list=custom_production_volume_list)
|
|
205
191
|
|
|
206
192
|
@classmethod
|
|
207
|
-
def empty(
|
|
208
|
-
cls, task_completion_criteria: TaskCompletionCriteria, *, custom_production_volume_list: Optional[list[ProductionVolumeColumn]] = None
|
|
209
|
-
) -> UserPerformance:
|
|
193
|
+
def empty(cls, task_completion_criteria: TaskCompletionCriteria, *, custom_production_volume_list: Optional[list[ProductionVolumeColumn]] = None) -> UserPerformance:
|
|
210
194
|
"""空のデータフレームを持つインスタンスを生成します。"""
|
|
211
195
|
production_volume_columns = ["input_data_count", "annotation_count"]
|
|
212
196
|
if custom_production_volume_list is not None:
|
|
@@ -321,10 +305,7 @@ class UserPerformance:
|
|
|
321
305
|
# `to_csv()`で出力したときにKeyErrorが発生内容にするため、事前に列を追加しておく
|
|
322
306
|
phase = TaskPhase.ANNOTATION.value
|
|
323
307
|
columns = pandas.MultiIndex.from_tuples(
|
|
324
|
-
[
|
|
325
|
-
(f"stdev__monitored_worktime_hour/{production_volume_column}", phase)
|
|
326
|
-
for production_volume_column in task_worktime_by_phase_user.production_volume_columns
|
|
327
|
-
]
|
|
308
|
+
[(f"stdev__monitored_worktime_hour/{production_volume_column}", phase) for production_volume_column in task_worktime_by_phase_user.production_volume_columns]
|
|
328
309
|
)
|
|
329
310
|
df_empty = pandas.DataFrame(columns=columns, index=pandas.Index([], name="account_id"), dtype="float64")
|
|
330
311
|
return df_empty
|
|
@@ -340,9 +321,7 @@ class UserPerformance:
|
|
|
340
321
|
df_stdev_per_volume_count_list = []
|
|
341
322
|
for production_volume_column in task_worktime_by_phase_user.production_volume_columns:
|
|
342
323
|
df_stdev_per_input_data_count = (
|
|
343
|
-
df2[df2[f"worktime_hour/{production_volume_column}"] != float("inf")]
|
|
344
|
-
.groupby(["account_id", "phase"])[[f"worktime_hour/{production_volume_column}"]]
|
|
345
|
-
.std(ddof=0)
|
|
324
|
+
df2[df2[f"worktime_hour/{production_volume_column}"] != float("inf")].groupby(["account_id", "phase"])[[f"worktime_hour/{production_volume_column}"]].std(ddof=0)
|
|
346
325
|
)
|
|
347
326
|
df_stdev_per_volume_count_list.append(df_stdev_per_input_data_count)
|
|
348
327
|
df_stdev = pandas.concat(df_stdev_per_volume_count_list, axis=1)
|
|
@@ -351,9 +330,7 @@ class UserPerformance:
|
|
|
351
330
|
# 前述の処理でinfを除外しているので、NaNが含まれることはないはず
|
|
352
331
|
df_stdev2 = pandas.pivot_table(
|
|
353
332
|
df_stdev,
|
|
354
|
-
values=[
|
|
355
|
-
f"worktime_hour/{production_volume_column}" for production_volume_column in task_worktime_by_phase_user.production_volume_columns
|
|
356
|
-
],
|
|
333
|
+
values=[f"worktime_hour/{production_volume_column}" for production_volume_column in task_worktime_by_phase_user.production_volume_columns],
|
|
357
334
|
index="account_id",
|
|
358
335
|
columns="phase",
|
|
359
336
|
dropna=False,
|
|
@@ -501,10 +478,7 @@ class UserPerformance:
|
|
|
501
478
|
|
|
502
479
|
df = worktime_per_date.df
|
|
503
480
|
|
|
504
|
-
df4_list = [
|
|
505
|
-
_create_df_first_last_working_date(phase)
|
|
506
|
-
for phase in [None, TaskPhase.ANNOTATION.value, TaskPhase.INSPECTION.value, TaskPhase.ACCEPTANCE.value]
|
|
507
|
-
]
|
|
481
|
+
df4_list = [_create_df_first_last_working_date(phase) for phase in [None, TaskPhase.ANNOTATION.value, TaskPhase.INSPECTION.value, TaskPhase.ACCEPTANCE.value]]
|
|
508
482
|
|
|
509
483
|
# joinしない理由: レベル1の列名が空文字のDataFrameをjoinすると、Python3.12のpandas2.2.0で、列名が期待通りにならないため
|
|
510
484
|
# https://github.com/pandas-dev/pandas/issues/57500
|
|
@@ -546,7 +520,7 @@ class UserPerformance:
|
|
|
546
520
|
task_worktime_by_phase_user: タスク、フェーズ、ユーザーごとの作業時間や生産量が格納されたオブジェクト。生産量やタスクにかかった作業時間の取得に利用します。
|
|
547
521
|
|
|
548
522
|
|
|
549
|
-
"""
|
|
523
|
+
"""
|
|
550
524
|
|
|
551
525
|
def drop_unnecessary_columns(df: pandas.DataFrame) -> pandas.DataFrame:
|
|
552
526
|
"""
|
|
@@ -593,9 +567,7 @@ class UserPerformance:
|
|
|
593
567
|
df = df.join(cls._create_df_stdev_monitored_worktime(task_worktime_by_phase_user))
|
|
594
568
|
|
|
595
569
|
# 比例関係の列を計算して追加する
|
|
596
|
-
cls._add_ratio_column_for_productivity_per_user(
|
|
597
|
-
df, phase_list=phase_list, production_volume_columns=task_worktime_by_phase_user.production_volume_columns
|
|
598
|
-
)
|
|
570
|
+
cls._add_ratio_column_for_productivity_per_user(df, phase_list=phase_list, production_volume_columns=task_worktime_by_phase_user.production_volume_columns)
|
|
599
571
|
|
|
600
572
|
# 出力に不要な列を削除する
|
|
601
573
|
df = drop_unnecessary_columns(df)
|
|
@@ -611,9 +583,7 @@ class UserPerformance:
|
|
|
611
583
|
|
|
612
584
|
df = df.sort_values(["user_id"])
|
|
613
585
|
# `df.reset_index()`を実行する理由:indexである`account_id`を列にするため
|
|
614
|
-
return cls(
|
|
615
|
-
df.reset_index(), task_completion_criteria, custom_production_volume_list=task_worktime_by_phase_user.custom_production_volume_list
|
|
616
|
-
)
|
|
586
|
+
return cls(df.reset_index(), task_completion_criteria, custom_production_volume_list=task_worktime_by_phase_user.custom_production_volume_list)
|
|
617
587
|
|
|
618
588
|
@classmethod
|
|
619
589
|
def _convert_column_dtypes(cls, df: pandas.DataFrame) -> pandas.DataFrame:
|
|
@@ -663,11 +633,7 @@ class UserPerformance:
|
|
|
663
633
|
("real_monitored_worktime_hour", "acceptance"),
|
|
664
634
|
]
|
|
665
635
|
|
|
666
|
-
monitored_worktime_columns = (
|
|
667
|
-
[("monitored_worktime_hour", "sum")]
|
|
668
|
-
+ [("monitored_worktime_hour", phase) for phase in phase_list]
|
|
669
|
-
+ [("monitored_worktime_ratio", phase) for phase in phase_list]
|
|
670
|
-
)
|
|
636
|
+
monitored_worktime_columns = [("monitored_worktime_hour", "sum")] + [("monitored_worktime_hour", phase) for phase in phase_list] + [("monitored_worktime_ratio", phase) for phase in phase_list]
|
|
671
637
|
production_columns = [("task_count", phase) for phase in phase_list]
|
|
672
638
|
for production_volume_column in production_volume_columns:
|
|
673
639
|
production_columns.extend([(production_volume_column, phase) for phase in phase_list])
|
|
@@ -685,10 +651,7 @@ class UserPerformance:
|
|
|
685
651
|
|
|
686
652
|
inspection_comment_columns = [
|
|
687
653
|
("pointed_out_inspection_comment_count", TaskPhase.ANNOTATION.value),
|
|
688
|
-
*[
|
|
689
|
-
(f"pointed_out_inspection_comment_count/{production_volume_column}", TaskPhase.ANNOTATION.value)
|
|
690
|
-
for production_volume_column in production_volume_columns
|
|
691
|
-
],
|
|
654
|
+
*[(f"pointed_out_inspection_comment_count/{production_volume_column}", TaskPhase.ANNOTATION.value) for production_volume_column in production_volume_columns],
|
|
692
655
|
]
|
|
693
656
|
|
|
694
657
|
rejected_count_columns = [
|
|
@@ -815,48 +778,32 @@ class UserPerformance:
|
|
|
815
778
|
"""
|
|
816
779
|
# ゼロ割の警告を無視する
|
|
817
780
|
with numpy.errstate(divide="ignore", invalid="ignore"):
|
|
818
|
-
series[("real_monitored_worktime_hour/real_actual_worktime_hour", "sum")] = (
|
|
819
|
-
series[("real_monitored_worktime_hour", "sum")] / series[("real_actual_worktime_hour", "sum")]
|
|
820
|
-
)
|
|
781
|
+
series[("real_monitored_worktime_hour/real_actual_worktime_hour", "sum")] = series[("real_monitored_worktime_hour", "sum")] / series[("real_actual_worktime_hour", "sum")]
|
|
821
782
|
|
|
822
783
|
for phase in phase_list:
|
|
823
784
|
# Annofab時間の比率を算出
|
|
824
|
-
# 計測作業時間の合計値が0により、monitored_worktime_ratioはnanになる場合は、教師付の実績作業時間を実績作業時間の合計値になるようなmonitored_worktime_ratioに変更する
|
|
785
|
+
# 計測作業時間の合計値が0により、monitored_worktime_ratioはnanになる場合は、教師付の実績作業時間を実績作業時間の合計値になるようなmonitored_worktime_ratioに変更する
|
|
825
786
|
if series[("monitored_worktime_hour", "sum")] == 0:
|
|
826
787
|
if phase == TaskPhase.ANNOTATION.value:
|
|
827
788
|
series[("monitored_worktime_ratio", phase)] = 1
|
|
828
789
|
else:
|
|
829
790
|
series[("monitored_worktime_ratio", phase)] = 0
|
|
830
791
|
else:
|
|
831
|
-
series[("monitored_worktime_ratio", phase)] = (
|
|
832
|
-
series[("monitored_worktime_hour", phase)] / series[("monitored_worktime_hour", "sum")]
|
|
833
|
-
)
|
|
792
|
+
series[("monitored_worktime_ratio", phase)] = series[("monitored_worktime_hour", phase)] / series[("monitored_worktime_hour", "sum")]
|
|
834
793
|
|
|
835
794
|
# Annofab時間の比率から、Annowork時間を予測する
|
|
836
795
|
series[("actual_worktime_hour", phase)] = series[("actual_worktime_hour", "sum")] * series[("monitored_worktime_ratio", phase)]
|
|
837
796
|
|
|
838
797
|
# 生産性を算出
|
|
839
|
-
series[("monitored_worktime_hour/input_data_count", phase)] = (
|
|
840
|
-
|
|
841
|
-
)
|
|
842
|
-
series[("actual_worktime_hour/input_data_count", phase)] = (
|
|
843
|
-
series[("actual_worktime_hour", phase)] / series[("input_data_count", phase)]
|
|
844
|
-
)
|
|
798
|
+
series[("monitored_worktime_hour/input_data_count", phase)] = series[("monitored_worktime_hour", phase)] / series[("input_data_count", phase)]
|
|
799
|
+
series[("actual_worktime_hour/input_data_count", phase)] = series[("actual_worktime_hour", phase)] / series[("input_data_count", phase)]
|
|
845
800
|
|
|
846
|
-
series[("monitored_worktime_hour/annotation_count", phase)] = (
|
|
847
|
-
|
|
848
|
-
)
|
|
849
|
-
series[("actual_worktime_hour/annotation_count", phase)] = (
|
|
850
|
-
series[("actual_worktime_hour", phase)] / series[("annotation_count", phase)]
|
|
851
|
-
)
|
|
801
|
+
series[("monitored_worktime_hour/annotation_count", phase)] = series[("monitored_worktime_hour", phase)] / series[("annotation_count", phase)]
|
|
802
|
+
series[("actual_worktime_hour/annotation_count", phase)] = series[("actual_worktime_hour", phase)] / series[("annotation_count", phase)]
|
|
852
803
|
|
|
853
804
|
phase = TaskPhase.ANNOTATION.value
|
|
854
|
-
series[("pointed_out_inspection_comment_count/annotation_count", phase)] = (
|
|
855
|
-
|
|
856
|
-
)
|
|
857
|
-
series[("pointed_out_inspection_comment_count/input_data_count", phase)] = (
|
|
858
|
-
series[("pointed_out_inspection_comment_count", phase)] / series[("input_data_count", phase)]
|
|
859
|
-
)
|
|
805
|
+
series[("pointed_out_inspection_comment_count/annotation_count", phase)] = series[("pointed_out_inspection_comment_count", phase)] / series[("annotation_count", phase)]
|
|
806
|
+
series[("pointed_out_inspection_comment_count/input_data_count", phase)] = series[("pointed_out_inspection_comment_count", phase)] / series[("input_data_count", phase)]
|
|
860
807
|
series[("rejected_count/task_count", phase)] = series[("rejected_count", phase)] / series[("task_count", phase)]
|
|
861
808
|
|
|
862
809
|
def get_production_volume_name(self, production_volume_column: str) -> str:
|
|
@@ -929,9 +876,7 @@ class UserPerformance:
|
|
|
929
876
|
y_column = f"{worktime_type.value}_worktime_minute/{production_volume_column}"
|
|
930
877
|
# 分単位の生産性を算出する
|
|
931
878
|
for phase in self.phase_list:
|
|
932
|
-
df[(f"{worktime_type.value}_worktime_minute/{production_volume_column}", phase)] = (
|
|
933
|
-
df[(f"{worktime_type.value}_worktime_hour/{production_volume_column}", phase)] * 60
|
|
934
|
-
)
|
|
879
|
+
df[(f"{worktime_type.value}_worktime_minute/{production_volume_column}", phase)] = df[(f"{worktime_type.value}_worktime_hour/{production_volume_column}", phase)] * 60
|
|
935
880
|
|
|
936
881
|
for biography_index, biography in enumerate(sorted(set(df["biography"]))):
|
|
937
882
|
for scatter_obj, phase in zip(scatter_obj_list, self.phase_list):
|
|
@@ -1074,9 +1019,7 @@ class UserPerformance:
|
|
|
1074
1019
|
|
|
1075
1020
|
write_bokeh_graph(bokeh.layouts.column(element_list), output_file)
|
|
1076
1021
|
|
|
1077
|
-
def plot_quality_and_productivity(
|
|
1078
|
-
self, output_file: Path, worktime_type: WorktimeType, production_volume_column: str, *, metadata: Optional[dict[str, Any]] = None
|
|
1079
|
-
) -> None:
|
|
1022
|
+
def plot_quality_and_productivity(self, output_file: Path, worktime_type: WorktimeType, production_volume_column: str, *, metadata: Optional[dict[str, Any]] = None) -> None:
|
|
1080
1023
|
"""
|
|
1081
1024
|
作業時間を元に算出した生産性と品質の関係を、メンバごとにプロットする
|
|
1082
1025
|
"""
|
|
@@ -1157,9 +1100,7 @@ class UserPerformance:
|
|
|
1157
1100
|
df = self.convert_df_suitable_for_bokeh(self.df)
|
|
1158
1101
|
PHASE = TaskPhase.ANNOTATION.value # noqa: N806
|
|
1159
1102
|
|
|
1160
|
-
df[(f"{worktime_type.value}_worktime_minute/{production_volume_column}", PHASE)] = (
|
|
1161
|
-
df[(f"{worktime_type.value}_worktime_hour/{production_volume_column}", PHASE)] * 60
|
|
1162
|
-
)
|
|
1103
|
+
df[(f"{worktime_type.value}_worktime_minute/{production_volume_column}", PHASE)] = df[(f"{worktime_type.value}_worktime_hour/{production_volume_column}", PHASE)] * 60
|
|
1163
1104
|
logger.debug(f"{output_file} を出力します。")
|
|
1164
1105
|
|
|
1165
1106
|
production_volume_name = self.get_production_volume_name(production_volume_column)
|
|
@@ -84,9 +84,7 @@ class WholePerformance:
|
|
|
84
84
|
df_task = df_task_worktime_by_phase_user[["project_id", "task_id", "status"]].drop_duplicates()
|
|
85
85
|
|
|
86
86
|
unique_keys_for_worktime = ["project_id", "task_id", "phase", "phase_stage"]
|
|
87
|
-
addable_columns_for_task = list(
|
|
88
|
-
set(df_task_worktime_by_phase_user.columns) - set(user_info_columns) - set(unique_keys_for_worktime) - {"status"}
|
|
89
|
-
)
|
|
87
|
+
addable_columns_for_task = list(set(df_task_worktime_by_phase_user.columns) - set(user_info_columns) - set(unique_keys_for_worktime) - {"status"})
|
|
90
88
|
df_task_worktime_by_phase_user = df_task_worktime_by_phase_user.groupby(unique_keys_for_worktime)[addable_columns_for_task].sum()
|
|
91
89
|
df_task_worktime_by_phase_user[user_info_columns] = PSEUDO_VALUE
|
|
92
90
|
df_task_worktime_by_phase_user = df_task_worktime_by_phase_user.reset_index()
|
|
@@ -95,9 +93,7 @@ class WholePerformance:
|
|
|
95
93
|
|
|
96
94
|
return UserPerformance.from_df_wrapper(
|
|
97
95
|
worktime_per_date=WorktimePerDate(df_worktime_per_date),
|
|
98
|
-
task_worktime_by_phase_user=TaskWorktimeByPhaseUser(
|
|
99
|
-
df_task_worktime_by_phase_user, custom_production_volume_list=task_worktime_by_phase_user.custom_production_volume_list
|
|
100
|
-
),
|
|
96
|
+
task_worktime_by_phase_user=TaskWorktimeByPhaseUser(df_task_worktime_by_phase_user, custom_production_volume_list=task_worktime_by_phase_user.custom_production_volume_list),
|
|
101
97
|
task_completion_criteria=task_completion_criteria,
|
|
102
98
|
)
|
|
103
99
|
|
|
@@ -115,7 +111,7 @@ class WholePerformance:
|
|
|
115
111
|
worktime_per_date: 日ごとの作業時間が記載されたDataFrameを格納したオブジェクト。ユーザー情報の取得や、実際の作業時間(集計タスクに影響しない)の算出に利用します。
|
|
116
112
|
task_worktime_by_phase_user: タスク、フェーズ、ユーザーごとの作業時間や生産量が格納されたオブジェクト。生産量やタスクにかかった作業時間の取得に利用します。
|
|
117
113
|
|
|
118
|
-
"""
|
|
114
|
+
"""
|
|
119
115
|
# 1人が作業した場合のパフォーマンス情報を生成する
|
|
120
116
|
all_user_performance = cls._create_all_user_performance(worktime_per_date, task_worktime_by_phase_user, task_completion_criteria)
|
|
121
117
|
|
|
@@ -142,9 +138,7 @@ class WholePerformance:
|
|
|
142
138
|
return cls(df_all.iloc[0], task_completion_criteria, custom_production_volume_list=task_worktime_by_phase_user.custom_production_volume_list)
|
|
143
139
|
|
|
144
140
|
@classmethod
|
|
145
|
-
def empty(
|
|
146
|
-
cls, task_completion_criteria: TaskCompletionCriteria, *, custom_production_volume_list: Optional[list[ProductionVolumeColumn]] = None
|
|
147
|
-
) -> WholePerformance:
|
|
141
|
+
def empty(cls, task_completion_criteria: TaskCompletionCriteria, *, custom_production_volume_list: Optional[list[ProductionVolumeColumn]] = None) -> WholePerformance:
|
|
148
142
|
"""空のデータフレームを持つインスタンスを生成します。"""
|
|
149
143
|
|
|
150
144
|
production_volume_columns = ["input_data_count", "annotation_count"]
|
|
@@ -141,9 +141,7 @@ class WholeProductivityPerCompletedDate:
|
|
|
141
141
|
return pandas.DataFrame(index=[e.strftime("%Y-%m-%d") for e in pandas.date_range(start_date, end_date)])
|
|
142
142
|
|
|
143
143
|
@classmethod
|
|
144
|
-
def from_df_wrapper(
|
|
145
|
-
cls, task: Task, worktime_per_date: WorktimePerDate, task_completion_criteria: TaskCompletionCriteria
|
|
146
|
-
) -> WholeProductivityPerCompletedDate:
|
|
144
|
+
def from_df_wrapper(cls, task: Task, worktime_per_date: WorktimePerDate, task_completion_criteria: TaskCompletionCriteria) -> WholeProductivityPerCompletedDate:
|
|
147
145
|
"""
|
|
148
146
|
完了日毎の全体の生産量、生産性を算出する。
|
|
149
147
|
|
|
@@ -250,9 +248,7 @@ class WholeProductivityPerCompletedDate:
|
|
|
250
248
|
"""速度情報の列を追加"""
|
|
251
249
|
df[f"{numerator_column}/{denominator_column}"] = df[numerator_column] / df[denominator_column]
|
|
252
250
|
# 1週間移動平均も出力
|
|
253
|
-
df[f"{numerator_column}/{denominator_column}{WEEKLY_MOVING_AVERAGE_COLUMN_SUFFIX}"] = get_weekly_sum(
|
|
254
|
-
df[numerator_column]
|
|
255
|
-
) / get_weekly_sum(df[denominator_column])
|
|
251
|
+
df[f"{numerator_column}/{denominator_column}{WEEKLY_MOVING_AVERAGE_COLUMN_SUFFIX}"] = get_weekly_sum(df[numerator_column]) / get_weekly_sum(df[denominator_column])
|
|
256
252
|
|
|
257
253
|
# 累計情報を追加
|
|
258
254
|
add_cumsum_column(df, column="task_count")
|
|
@@ -307,16 +303,10 @@ class WholeProductivityPerCompletedDate:
|
|
|
307
303
|
"unmonitored",
|
|
308
304
|
]:
|
|
309
305
|
df[f"{category}_worktime_minute/{denominator}"] = df[f"{category}_worktime_hour"] * 60 / df[denominator]
|
|
310
|
-
df[f"{category}_worktime_minute/{denominator}{WEEKLY_MOVING_AVERAGE_COLUMN_SUFFIX}"] = (
|
|
311
|
-
get_weekly_sum(df[f"{category}_worktime_hour"]) * 60 / get_weekly_sum(df[denominator])
|
|
312
|
-
)
|
|
306
|
+
df[f"{category}_worktime_minute/{denominator}{WEEKLY_MOVING_AVERAGE_COLUMN_SUFFIX}"] = get_weekly_sum(df[f"{category}_worktime_hour"]) * 60 / get_weekly_sum(df[denominator])
|
|
313
307
|
|
|
314
|
-
df[f"actual_worktime_hour/task_count{WEEKLY_MOVING_AVERAGE_COLUMN_SUFFIX}"] = get_weekly_sum(df["actual_worktime_hour"]) / get_weekly_sum(
|
|
315
|
-
|
|
316
|
-
)
|
|
317
|
-
df[f"monitored_worktime_hour/task_count{WEEKLY_MOVING_AVERAGE_COLUMN_SUFFIX}"] = get_weekly_sum(
|
|
318
|
-
df["monitored_worktime_hour"]
|
|
319
|
-
) / get_weekly_sum(df["task_count"])
|
|
308
|
+
df[f"actual_worktime_hour/task_count{WEEKLY_MOVING_AVERAGE_COLUMN_SUFFIX}"] = get_weekly_sum(df["actual_worktime_hour"]) / get_weekly_sum(df["task_count"])
|
|
309
|
+
df[f"monitored_worktime_hour/task_count{WEEKLY_MOVING_AVERAGE_COLUMN_SUFFIX}"] = get_weekly_sum(df["monitored_worktime_hour"]) / get_weekly_sum(df["task_count"])
|
|
320
310
|
|
|
321
311
|
for column in [
|
|
322
312
|
"task_count",
|
|
@@ -349,9 +339,7 @@ class WholeProductivityPerCompletedDate:
|
|
|
349
339
|
)
|
|
350
340
|
line_graph.add_secondary_y_axis(
|
|
351
341
|
"作業時間[時間]",
|
|
352
|
-
secondary_y_axis_range=DataRange1d(
|
|
353
|
-
end=max(df["actual_worktime_hour"].max(), df["monitored_worktime_hour"].max()) * SECONDARY_Y_RANGE_RATIO
|
|
354
|
-
),
|
|
342
|
+
secondary_y_axis_range=DataRange1d(end=max(df["actual_worktime_hour"].max(), df["monitored_worktime_hour"].max()) * SECONDARY_Y_RANGE_RATIO),
|
|
355
343
|
primary_y_axis_range=DataRange1d(end=df["task_count"].max() * SECONDARY_Y_RANGE_RATIO),
|
|
356
344
|
)
|
|
357
345
|
|
|
@@ -396,9 +384,7 @@ class WholeProductivityPerCompletedDate:
|
|
|
396
384
|
)
|
|
397
385
|
line_graph.add_secondary_y_axis(
|
|
398
386
|
"作業時間[時間]",
|
|
399
|
-
secondary_y_axis_range=DataRange1d(
|
|
400
|
-
end=max(df["actual_worktime_hour"].max(), df["monitored_worktime_hour"].max()) * SECONDARY_Y_RANGE_RATIO
|
|
401
|
-
),
|
|
387
|
+
secondary_y_axis_range=DataRange1d(end=max(df["actual_worktime_hour"].max(), df["monitored_worktime_hour"].max()) * SECONDARY_Y_RANGE_RATIO),
|
|
402
388
|
primary_y_axis_range=DataRange1d(end=df["input_data_count"].max() * SECONDARY_Y_RANGE_RATIO),
|
|
403
389
|
)
|
|
404
390
|
|
|
@@ -460,7 +446,7 @@ class WholeProductivityPerCompletedDate:
|
|
|
460
446
|
("monitored_acceptance_worktime", "計測作業時間(受入)"),
|
|
461
447
|
]
|
|
462
448
|
if df["actual_worktime_hour"].sum() > 0:
|
|
463
|
-
# 条件分岐の理由:実績作業時間がないときは、非計測作業時間がマイナス値になり、分かりづらいグラフになるため。必要なときのみ非計測作業時間をプロットする
|
|
449
|
+
# 条件分岐の理由:実績作業時間がないときは、非計測作業時間がマイナス値になり、分かりづらいグラフになるため。必要なときのみ非計測作業時間をプロットする
|
|
464
450
|
phase_prefix.append(("unmonitored_worktime", "非計測作業時間"))
|
|
465
451
|
|
|
466
452
|
fig_info_list = [
|
|
@@ -572,9 +558,7 @@ class WholeProductivityPerCompletedDate:
|
|
|
572
558
|
)
|
|
573
559
|
line_graph.add_secondary_y_axis(
|
|
574
560
|
"作業時間[時間]",
|
|
575
|
-
secondary_y_axis_range=DataRange1d(
|
|
576
|
-
end=max(df["cumsum_actual_worktime_hour"].max(), df["cumsum_monitored_worktime_hour"].max()) * SECONDARY_Y_RANGE_RATIO
|
|
577
|
-
),
|
|
561
|
+
secondary_y_axis_range=DataRange1d(end=max(df["cumsum_actual_worktime_hour"].max(), df["cumsum_monitored_worktime_hour"].max()) * SECONDARY_Y_RANGE_RATIO),
|
|
578
562
|
primary_y_axis_range=DataRange1d(end=df["cumsum_task_count"].max() * SECONDARY_Y_RANGE_RATIO),
|
|
579
563
|
)
|
|
580
564
|
|
|
@@ -628,9 +612,7 @@ class WholeProductivityPerCompletedDate:
|
|
|
628
612
|
)
|
|
629
613
|
line_graph.add_secondary_y_axis(
|
|
630
614
|
"作業時間[時間]",
|
|
631
|
-
secondary_y_axis_range=DataRange1d(
|
|
632
|
-
end=max(df["cumsum_actual_worktime_hour"].max(), df["cumsum_monitored_worktime_hour"].max()) * SECONDARY_Y_RANGE_RATIO
|
|
633
|
-
),
|
|
615
|
+
secondary_y_axis_range=DataRange1d(end=max(df["cumsum_actual_worktime_hour"].max(), df["cumsum_monitored_worktime_hour"].max()) * SECONDARY_Y_RANGE_RATIO),
|
|
634
616
|
primary_y_axis_range=DataRange1d(end=df["cumsum_input_data_count"].max() * SECONDARY_Y_RANGE_RATIO),
|
|
635
617
|
)
|
|
636
618
|
|
|
@@ -762,9 +744,7 @@ class WholeProductivityPerCompletedDate:
|
|
|
762
744
|
write_bokeh_graph(bokeh.layouts.column(element_list), output_file)
|
|
763
745
|
|
|
764
746
|
@classmethod
|
|
765
|
-
def empty(
|
|
766
|
-
cls, *, task_completion_criteria: TaskCompletionCriteria, custom_production_volume_list: Optional[list[ProductionVolumeColumn]] = None
|
|
767
|
-
) -> WholeProductivityPerCompletedDate:
|
|
747
|
+
def empty(cls, *, task_completion_criteria: TaskCompletionCriteria, custom_production_volume_list: Optional[list[ProductionVolumeColumn]] = None) -> WholeProductivityPerCompletedDate:
|
|
768
748
|
df = pandas.DataFrame(columns=cls.get_columns(custom_production_volume_list=custom_production_volume_list))
|
|
769
749
|
return cls(df, task_completion_criteria, custom_production_volume_list=custom_production_volume_list)
|
|
770
750
|
|
|
@@ -868,9 +848,7 @@ class WholeProductivityPerFirstAnnotationStartedDate:
|
|
|
868
848
|
def add_velocity_column(df: pandas.DataFrame, numerator_column: str, denominator_column: str) -> None:
|
|
869
849
|
df[f"{numerator_column}/{denominator_column}"] = df[numerator_column] / df[denominator_column]
|
|
870
850
|
|
|
871
|
-
df[f"{numerator_column}/{denominator_column}{WEEKLY_MOVING_AVERAGE_COLUMN_SUFFIX}"] = get_weekly_moving_average(
|
|
872
|
-
df[numerator_column]
|
|
873
|
-
) / get_weekly_moving_average(df[denominator_column])
|
|
851
|
+
df[f"{numerator_column}/{denominator_column}{WEEKLY_MOVING_AVERAGE_COLUMN_SUFFIX}"] = get_weekly_moving_average(df[numerator_column]) / get_weekly_moving_average(df[denominator_column])
|
|
874
852
|
|
|
875
853
|
# annofab 計測時間から算出したvelocityを追加
|
|
876
854
|
for column in production_volume_columns:
|
|
@@ -907,9 +885,7 @@ class WholeProductivityPerFirstAnnotationStartedDate:
|
|
|
907
885
|
"acceptance_worktime_hour",
|
|
908
886
|
]
|
|
909
887
|
].copy()
|
|
910
|
-
df_sub_task["first_annotation_started_date"] = df_sub_task["first_annotation_started_datetime"].map(
|
|
911
|
-
lambda e: datetime_to_date(e) if not pandas.isna(e) else None
|
|
912
|
-
)
|
|
888
|
+
df_sub_task["first_annotation_started_date"] = df_sub_task["first_annotation_started_datetime"].map(lambda e: datetime_to_date(e) if not pandas.isna(e) else None)
|
|
913
889
|
|
|
914
890
|
value_columns = [
|
|
915
891
|
*production_volume_columns,
|
|
@@ -924,18 +900,14 @@ class WholeProductivityPerFirstAnnotationStartedDate:
|
|
|
924
900
|
aggfunc="sum",
|
|
925
901
|
).fillna(0)
|
|
926
902
|
if len(df_agg_sub_task) > 0:
|
|
927
|
-
df_agg_sub_task["task_count"] = df_sub_task.pivot_table(
|
|
928
|
-
values=["task_id"], index="first_annotation_started_date", aggfunc="count"
|
|
929
|
-
).fillna(0)
|
|
903
|
+
df_agg_sub_task["task_count"] = df_sub_task.pivot_table(values=["task_id"], index="first_annotation_started_date", aggfunc="count").fillna(0)
|
|
930
904
|
else:
|
|
931
905
|
# 列だけ作る
|
|
932
906
|
df_agg_sub_task = df_agg_sub_task.assign(**dict.fromkeys(value_columns, 0), task_count=0)
|
|
933
907
|
|
|
934
908
|
# 日付の一覧を生成
|
|
935
909
|
if len(df_agg_sub_task) > 0:
|
|
936
|
-
df_date_base = pandas.DataFrame(
|
|
937
|
-
index=[e.strftime("%Y-%m-%d") for e in pandas.date_range(start=df_agg_sub_task.index.min(), end=df_agg_sub_task.index.max())]
|
|
938
|
-
)
|
|
910
|
+
df_date_base = pandas.DataFrame(index=[e.strftime("%Y-%m-%d") for e in pandas.date_range(start=df_agg_sub_task.index.min(), end=df_agg_sub_task.index.max())])
|
|
939
911
|
else:
|
|
940
912
|
df_date_base = pandas.DataFrame()
|
|
941
913
|
|
|
@@ -953,9 +925,7 @@ class WholeProductivityPerFirstAnnotationStartedDate:
|
|
|
953
925
|
return True
|
|
954
926
|
|
|
955
927
|
@classmethod
|
|
956
|
-
def empty(
|
|
957
|
-
cls, task_completion_criteria: TaskCompletionCriteria, *, custom_production_volume_list: Optional[list[ProductionVolumeColumn]] = None
|
|
958
|
-
) -> WholeProductivityPerFirstAnnotationStartedDate:
|
|
928
|
+
def empty(cls, task_completion_criteria: TaskCompletionCriteria, *, custom_production_volume_list: Optional[list[ProductionVolumeColumn]] = None) -> WholeProductivityPerFirstAnnotationStartedDate:
|
|
959
929
|
df = pandas.DataFrame(columns=cls.get_columns(custom_production_volume_list=custom_production_volume_list))
|
|
960
930
|
return cls(df, task_completion_criteria, custom_production_volume_list=custom_production_volume_list)
|
|
961
931
|
|
|
@@ -1018,9 +988,7 @@ class WholeProductivityPerFirstAnnotationStartedDate:
|
|
|
1018
988
|
for denominator in [e.value for e in production_volume_list]:
|
|
1019
989
|
for numerator in ["worktime", "annotation_worktime", "inspection_worktime", "acceptance_worktime"]:
|
|
1020
990
|
df[f"{numerator}_minute/{denominator}"] = df[f"{numerator}_hour"] * 60 / df[denominator]
|
|
1021
|
-
df[f"{numerator}_minute/{denominator}{WEEKLY_MOVING_AVERAGE_COLUMN_SUFFIX}"] = (
|
|
1022
|
-
get_weekly_sum(df[f"{numerator}_hour"]) * 60 / get_weekly_sum(df[denominator])
|
|
1023
|
-
)
|
|
991
|
+
df[f"{numerator}_minute/{denominator}{WEEKLY_MOVING_AVERAGE_COLUMN_SUFFIX}"] = get_weekly_sum(df[f"{numerator}_hour"]) * 60 / get_weekly_sum(df[denominator])
|
|
1024
992
|
|
|
1025
993
|
def create_div_element() -> Div:
|
|
1026
994
|
"""
|
|
@@ -110,9 +110,7 @@ class WorktimePerDate:
|
|
|
110
110
|
"""
|
|
111
111
|
df = self.df.copy()
|
|
112
112
|
df["monitored_acceptance_worktime_hour"] = 0
|
|
113
|
-
df["monitored_worktime_hour"] =
|
|
114
|
-
df["monitored_annotation_worktime_hour"] + df["monitored_inspection_worktime_hour"] + df["monitored_acceptance_worktime_hour"]
|
|
115
|
-
)
|
|
113
|
+
df["monitored_worktime_hour"] = df["monitored_annotation_worktime_hour"] + df["monitored_inspection_worktime_hour"] + df["monitored_acceptance_worktime_hour"]
|
|
116
114
|
return WorktimePerDate(df)
|
|
117
115
|
|
|
118
116
|
def is_empty(self) -> bool:
|
|
@@ -180,9 +178,7 @@ class WorktimePerDate:
|
|
|
180
178
|
if f"monitored_{phase}_worktime_hour" not in df.columns:
|
|
181
179
|
df[f"monitored_{phase}_worktime_hour"] = 0
|
|
182
180
|
|
|
183
|
-
df["monitored_worktime_hour"] =
|
|
184
|
-
df["monitored_annotation_worktime_hour"] + df["monitored_inspection_worktime_hour"] + df["monitored_acceptance_worktime_hour"]
|
|
185
|
-
)
|
|
181
|
+
df["monitored_worktime_hour"] = df["monitored_annotation_worktime_hour"] + df["monitored_inspection_worktime_hour"] + df["monitored_acceptance_worktime_hour"]
|
|
186
182
|
|
|
187
183
|
if not actual_worktime.is_empty():
|
|
188
184
|
df = df.merge(
|
|
@@ -450,9 +446,7 @@ class WorktimePerDate:
|
|
|
450
446
|
# またpandas.NAを持つDataFrameも描画できないので、numpy.nanに変換する
|
|
451
447
|
# TODO この問題が解決されたら、削除する
|
|
452
448
|
# https://qiita.com/yuji38kwmt/items/b5da6ed521e827620186
|
|
453
|
-
df_cumulative = df_cumulative.astype(
|
|
454
|
-
{"date": "object", "account_id": "object", "user_id": "object", "username": "object", "biography": "object"}
|
|
455
|
-
)
|
|
449
|
+
df_cumulative = df_cumulative.astype({"date": "object", "account_id": "object", "user_id": "object", "username": "object", "biography": "object"})
|
|
456
450
|
df_cumulative.replace(pandas.NA, numpy.nan, inplace=True)
|
|
457
451
|
|
|
458
452
|
line_count = 0
|
|
@@ -31,9 +31,7 @@ def _get_first_annotation_started_datetime(sub_task_history_list: list[dict[str,
|
|
|
31
31
|
1個のタスクのタスク履歴一覧から、最初に教師付フェーズを作業した日時を取得します。
|
|
32
32
|
"""
|
|
33
33
|
task_history_list_with_annotation_phase = [
|
|
34
|
-
e
|
|
35
|
-
for e in sub_task_history_list
|
|
36
|
-
if e["phase"] == "annotation" and e["account_id"] is not None and isoduration_to_hour(e["accumulated_labor_time_milliseconds"]) > 0
|
|
34
|
+
e for e in sub_task_history_list if e["phase"] == "annotation" and e["account_id"] is not None and isoduration_to_hour(e["accumulated_labor_time_milliseconds"]) > 0
|
|
37
35
|
]
|
|
38
36
|
if len(task_history_list_with_annotation_phase) == 0:
|
|
39
37
|
return None
|
|
@@ -41,9 +39,7 @@ def _get_first_annotation_started_datetime(sub_task_history_list: list[dict[str,
|
|
|
41
39
|
return task_history_list_with_annotation_phase[0]["started_datetime"]
|
|
42
40
|
|
|
43
41
|
|
|
44
|
-
def filter_task_histories(
|
|
45
|
-
task_histories: dict[str, list[dict[str, Any]]], *, start_date: Optional[str] = None, end_date: Optional[str] = None
|
|
46
|
-
) -> dict[str, list[dict[str, Any]]]:
|
|
42
|
+
def filter_task_histories(task_histories: dict[str, list[dict[str, Any]]], *, start_date: Optional[str] = None, end_date: Optional[str] = None) -> dict[str, list[dict[str, Any]]]:
|
|
47
43
|
"""
|
|
48
44
|
タスク履歴を絞り込みます。
|
|
49
45
|
|