annofabcli 1.96.1__py3-none-any.whl → 1.98.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- annofabcli/__version__.py +1 -1
- annofabcli/annotation/merge_segmentation.py +390 -0
- annofabcli/annotation/remove_segmentation_overlap.py +343 -0
- annofabcli/annotation/subcommand_annotation.py +4 -0
- annofabcli/input_data/change_input_data_name.py +5 -7
- annofabcli/input_data/update_metadata_of_input_data.py +2 -1
- annofabcli/project_member/put_project_members.py +32 -44
- annofabcli/statistics/list_annotation_count.py +2 -2
- annofabcli/statistics/list_annotation_duration.py +2 -2
- annofabcli/statistics/visualization/dataframe/productivity_per_date.py +5 -5
- annofabcli/statistics/visualization/dataframe/task.py +26 -7
- annofabcli/statistics/visualization/dataframe/user_performance.py +3 -3
- annofabcli/statistics/visualization/dataframe/whole_performance.py +2 -2
- annofabcli/statistics/visualization/dataframe/whole_productivity_per_date.py +2 -2
- annofabcli/statistics/visualization/dataframe/worktime_per_date.py +1 -1
- annofabcli/supplementary/delete_supplementary_data.py +11 -18
- annofabcli/supplementary/put_supplementary_data.py +58 -81
- annofabcli/task/list_tasks_added_task_history.py +91 -8
- annofabcli/task/update_metadata_of_task.py +2 -1
- {annofabcli-1.96.1.dist-info → annofabcli-1.98.0.dist-info}/METADATA +3 -3
- {annofabcli-1.96.1.dist-info → annofabcli-1.98.0.dist-info}/RECORD +24 -22
- {annofabcli-1.96.1.dist-info → annofabcli-1.98.0.dist-info}/LICENSE +0 -0
- {annofabcli-1.96.1.dist-info → annofabcli-1.98.0.dist-info}/WHEEL +0 -0
- {annofabcli-1.96.1.dist-info → annofabcli-1.98.0.dist-info}/entry_points.txt +0 -0
|
@@ -55,14 +55,14 @@ class Task:
|
|
|
55
55
|
Returns:
|
|
56
56
|
必須の列が存在するかどうか
|
|
57
57
|
"""
|
|
58
|
-
return len(set(self.
|
|
58
|
+
return len(set(self.required_columns) - set(df.columns)) == 0
|
|
59
59
|
|
|
60
|
-
def
|
|
60
|
+
def missing_required_columns(self, df: pandas.DataFrame) -> list[str]:
|
|
61
61
|
"""
|
|
62
|
-
|
|
62
|
+
欠損している必須の列名を取得します。
|
|
63
63
|
|
|
64
64
|
"""
|
|
65
|
-
return list(set(self.
|
|
65
|
+
return list(set(self.required_columns) - set(df.columns))
|
|
66
66
|
|
|
67
67
|
def __init__(self, df: pandas.DataFrame, *, custom_production_volume_list: Optional[list[ProductionVolumeColumn]] = None) -> None:
|
|
68
68
|
self.custom_production_volume_list = custom_production_volume_list if custom_production_volume_list is not None else []
|
|
@@ -72,7 +72,8 @@ class Task:
|
|
|
72
72
|
|
|
73
73
|
if not self.required_columns_exist(df):
|
|
74
74
|
raise ValueError(
|
|
75
|
-
f"引数'df'の'columns'に次の列が存在していません。 {self.
|
|
75
|
+
f"引数'df'の'columns'に次の列が存在していません。 {self.missing_required_columns(df)} :: "
|
|
76
|
+
f"次の列が必須です。{self.required_columns} の列が必要です。"
|
|
76
77
|
)
|
|
77
78
|
|
|
78
79
|
self.df = df
|
|
@@ -97,7 +98,19 @@ class Task:
|
|
|
97
98
|
return Task(df, custom_production_volume_list=self.custom_production_volume_list)
|
|
98
99
|
|
|
99
100
|
@property
|
|
100
|
-
def
|
|
101
|
+
def optional_columns(self) -> list[str]:
|
|
102
|
+
return [
|
|
103
|
+
# 抜取検査または抜取受入によりスキップされたか
|
|
104
|
+
"inspection_is_skipped",
|
|
105
|
+
"acceptance_is_skipped",
|
|
106
|
+
# 差し戻し後の作業時間
|
|
107
|
+
"post_rejection_annotation_worktime_hour",
|
|
108
|
+
"post_rejection_inspection_worktime_hour",
|
|
109
|
+
"post_rejection_acceptance_worktime_hour",
|
|
110
|
+
]
|
|
111
|
+
|
|
112
|
+
@property
|
|
113
|
+
def required_columns(self) -> list[str]:
|
|
101
114
|
return [
|
|
102
115
|
# 基本的な情報
|
|
103
116
|
"project_id",
|
|
@@ -144,6 +157,10 @@ class Task:
|
|
|
144
157
|
"acceptance_is_skipped",
|
|
145
158
|
]
|
|
146
159
|
|
|
160
|
+
@property
|
|
161
|
+
def columns(self) -> list[str]:
|
|
162
|
+
return self.required_columns + self.optional_columns
|
|
163
|
+
|
|
147
164
|
@classmethod
|
|
148
165
|
def from_api_content(
|
|
149
166
|
cls,
|
|
@@ -502,7 +519,9 @@ class Task:
|
|
|
502
519
|
if not self._validate_df_for_output(output_file):
|
|
503
520
|
return
|
|
504
521
|
|
|
505
|
-
|
|
522
|
+
existing_optional_columns = [col for col in self.optional_columns if col in set(self.df.columns)]
|
|
523
|
+
columns = self.required_columns + existing_optional_columns
|
|
524
|
+
print_csv(self.df[columns], str(output_file))
|
|
506
525
|
|
|
507
526
|
def mask_user_info(
|
|
508
527
|
self,
|
|
@@ -571,7 +571,7 @@ class UserPerformance:
|
|
|
571
571
|
level0_columns = ["monitored_worktime_hour", *task_worktime_by_phase_user.quantity_columns]
|
|
572
572
|
columns = [(c0, c1) for c0, c1 in df.columns if c0 in level0_columns]
|
|
573
573
|
|
|
574
|
-
return df.fillna(
|
|
574
|
+
return df.fillna(dict.fromkeys(columns, 0))
|
|
575
575
|
|
|
576
576
|
if task_completion_criteria == TaskCompletionCriteria.ACCEPTANCE_REACHED:
|
|
577
577
|
# 受入フェーズに到達したらタスクの作業が完了したとみなす場合、
|
|
@@ -634,8 +634,8 @@ class UserPerformance:
|
|
|
634
634
|
]
|
|
635
635
|
|
|
636
636
|
value_columns = columns - set(basic_columns)
|
|
637
|
-
dtypes =
|
|
638
|
-
dtypes.update(
|
|
637
|
+
dtypes = dict.fromkeys(basic_columns, "string")
|
|
638
|
+
dtypes.update(dict.fromkeys(value_columns, "float64"))
|
|
639
639
|
return df.astype(dtypes)
|
|
640
640
|
|
|
641
641
|
def _validate_df_for_output(self, output_file: Path) -> bool:
|
|
@@ -197,8 +197,8 @@ class WholePerformance:
|
|
|
197
197
|
("working_days", ""),
|
|
198
198
|
]
|
|
199
199
|
|
|
200
|
-
data: dict[tuple[str, str], float] =
|
|
201
|
-
data.update(
|
|
200
|
+
data: dict[tuple[str, str], float] = dict.fromkeys(worktime_columns + count_columns, 0)
|
|
201
|
+
data.update(dict.fromkeys(ratio_columns + stdev_columns + date_columns, numpy.nan))
|
|
202
202
|
|
|
203
203
|
return cls(pandas.Series(data), task_completion_criteria, custom_production_volume_list=custom_production_volume_list)
|
|
204
204
|
|
|
@@ -187,7 +187,7 @@ class WholeProductivityPerCompletedDate:
|
|
|
187
187
|
df_agg_sub_task["task_count"] = df_sub_task.pivot_table(values=["task_id"], index=date_column, aggfunc="count").fillna(0)
|
|
188
188
|
else:
|
|
189
189
|
# 列だけ作る
|
|
190
|
-
df_agg_sub_task = df_agg_sub_task.assign(**
|
|
190
|
+
df_agg_sub_task = df_agg_sub_task.assign(**dict.fromkeys(production_volume_columns, 0), task_count=0)
|
|
191
191
|
|
|
192
192
|
df_worktime = worktime_per_date.df
|
|
193
193
|
if len(df_worktime) > 0:
|
|
@@ -929,7 +929,7 @@ class WholeProductivityPerFirstAnnotationStartedDate:
|
|
|
929
929
|
).fillna(0)
|
|
930
930
|
else:
|
|
931
931
|
# 列だけ作る
|
|
932
|
-
df_agg_sub_task = df_agg_sub_task.assign(**
|
|
932
|
+
df_agg_sub_task = df_agg_sub_task.assign(**dict.fromkeys(value_columns, 0), task_count=0)
|
|
933
933
|
|
|
934
934
|
# 日付の一覧を生成
|
|
935
935
|
if len(df_agg_sub_task) > 0:
|
|
@@ -200,7 +200,7 @@ class WorktimePerDate:
|
|
|
200
200
|
"monitored_inspection_worktime_hour",
|
|
201
201
|
"monitored_acceptance_worktime_hour",
|
|
202
202
|
]
|
|
203
|
-
df.fillna(
|
|
203
|
+
df.fillna(dict.fromkeys(value_columns, 0), inplace=True)
|
|
204
204
|
|
|
205
205
|
df = df.merge(df_member, how="left", on="account_id")
|
|
206
206
|
return df[
|
|
@@ -31,18 +31,11 @@ key: input_data_id, value: supplementary_data_idのList
|
|
|
31
31
|
|
|
32
32
|
|
|
33
33
|
def get_input_data_supplementary_data_dict_from_csv(csv_path: Path) -> InputDataSupplementaryDataDict:
|
|
34
|
-
df = pandas.read_csv(
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
header=None,
|
|
38
|
-
names=[
|
|
39
|
-
"input_data_id",
|
|
40
|
-
"supplementary_data_id",
|
|
41
|
-
],
|
|
42
|
-
# IDは必ず文字列として読み込むようにする
|
|
43
|
-
dtype={"input_data_id": str, "supplementary_data_id": str},
|
|
34
|
+
df: pandas.DataFrame = pandas.read_csv(
|
|
35
|
+
csv_path,
|
|
36
|
+
dtype={"input_data_id": "string", "supplementary_data_id": "string"},
|
|
44
37
|
)
|
|
45
|
-
input_data_dict = defaultdict(list)
|
|
38
|
+
input_data_dict: InputDataSupplementaryDataDict = defaultdict(list)
|
|
46
39
|
for input_data_id, supplementary_data_id in zip(df["input_data_id"], df["supplementary_data_id"]):
|
|
47
40
|
input_data_dict[input_data_id].append(supplementary_data_id)
|
|
48
41
|
return input_data_dict
|
|
@@ -58,7 +51,7 @@ def get_input_data_supplementary_data_dict_from_list(supplementary_data_list: li
|
|
|
58
51
|
|
|
59
52
|
|
|
60
53
|
class DeleteSupplementaryDataMain(CommandLineWithConfirm):
|
|
61
|
-
def __init__(self, service: annofabapi.Resource, all_yes: bool = False) -> None:
|
|
54
|
+
def __init__(self, service: annofabapi.Resource, *, all_yes: bool = False) -> None:
|
|
62
55
|
self.service = service
|
|
63
56
|
self.facade = AnnofabApiFacade(service)
|
|
64
57
|
CommandLineWithConfirm.__init__(self, all_yes)
|
|
@@ -82,7 +75,7 @@ class DeleteSupplementaryDataMain(CommandLineWithConfirm):
|
|
|
82
75
|
|
|
83
76
|
input_data = self.service.wrapper.get_input_data_or_none(project_id, input_data_id)
|
|
84
77
|
if input_data is None:
|
|
85
|
-
logger.warning(f"input_data_id={input_data_id} の入力データは存在しないのでスキップします。")
|
|
78
|
+
logger.warning(f"input_data_id='{input_data_id}' の入力データは存在しないのでスキップします。")
|
|
86
79
|
return 0
|
|
87
80
|
|
|
88
81
|
supplementary_data_list, _ = self.service.api.get_supplementary_data_list(project_id, input_data_id)
|
|
@@ -122,7 +115,7 @@ class DeleteSupplementaryDataMain(CommandLineWithConfirm):
|
|
|
122
115
|
continue
|
|
123
116
|
return deleted_count
|
|
124
117
|
|
|
125
|
-
def delete_supplementary_data_list(self, project_id: str, input_data_dict: InputDataSupplementaryDataDict)
|
|
118
|
+
def delete_supplementary_data_list(self, project_id: str, input_data_dict: InputDataSupplementaryDataDict) -> None:
|
|
126
119
|
deleted_count = 0
|
|
127
120
|
total_count = sum(len(e) for e in input_data_dict.values())
|
|
128
121
|
for input_data_id, supplementary_data_id_list in input_data_dict.items():
|
|
@@ -168,7 +161,7 @@ class DeleteSupplementaryDataMain(CommandLineWithConfirm):
|
|
|
168
161
|
|
|
169
162
|
return deleted_count
|
|
170
163
|
|
|
171
|
-
def delete_supplementary_data_list_by_input_data_id(self, project_id: str, input_data_id_list: list[str])
|
|
164
|
+
def delete_supplementary_data_list_by_input_data_id(self, project_id: str, input_data_id_list: list[str]) -> None:
|
|
172
165
|
dict_deleted_count: dict[str, int] = {}
|
|
173
166
|
for input_data_id in input_data_id_list:
|
|
174
167
|
input_data = self.service.wrapper.get_input_data_or_none(project_id, input_data_id)
|
|
@@ -261,9 +254,9 @@ def parse_args(parser: argparse.ArgumentParser) -> None:
|
|
|
261
254
|
"削除する補助情報が記載されたCSVファイルのパスを指定してください。\n"
|
|
262
255
|
"CSVのフォーマットは以下の通りです。"
|
|
263
256
|
"詳細は https://annofab-cli.readthedocs.io/ja/latest/command_reference/supplementary/delete.html を参照してください。\n"
|
|
264
|
-
" *
|
|
265
|
-
" *
|
|
266
|
-
" *
|
|
257
|
+
" * ヘッダ行あり, カンマ区切り\n"
|
|
258
|
+
" * input_data_id (required)\n"
|
|
259
|
+
" * supplementary_data_id (required)\n"
|
|
267
260
|
),
|
|
268
261
|
)
|
|
269
262
|
|
|
@@ -41,17 +41,17 @@ def convert_supplementary_data_name_to_supplementary_data_id(supplementary_data_
|
|
|
41
41
|
|
|
42
42
|
|
|
43
43
|
@dataclass
|
|
44
|
-
class
|
|
44
|
+
class CliSupplementaryData(DataClassJsonMixin):
|
|
45
45
|
"""
|
|
46
|
-
|
|
46
|
+
コマンドラインから指定された(`--csv`または`--json`)補助情報
|
|
47
47
|
"""
|
|
48
48
|
|
|
49
49
|
input_data_id: str
|
|
50
|
-
supplementary_data_number: int
|
|
51
50
|
supplementary_data_name: str
|
|
52
51
|
supplementary_data_path: str
|
|
53
52
|
supplementary_data_id: Optional[str] = None
|
|
54
53
|
supplementary_data_type: Optional[str] = None
|
|
54
|
+
supplementary_data_number: Optional[int] = None
|
|
55
55
|
|
|
56
56
|
|
|
57
57
|
@dataclass
|
|
@@ -75,15 +75,13 @@ class SubPutSupplementaryData:
|
|
|
75
75
|
|
|
76
76
|
Args:
|
|
77
77
|
service:
|
|
78
|
-
facade:
|
|
79
78
|
all_yes:
|
|
80
79
|
"""
|
|
81
80
|
|
|
82
|
-
def __init__(self, service: annofabapi.Resource,
|
|
81
|
+
def __init__(self, service: annofabapi.Resource, *, all_yes: bool = False) -> None:
|
|
83
82
|
self.service = service
|
|
84
|
-
self.facade = facade
|
|
85
83
|
self.all_yes = all_yes
|
|
86
|
-
self.supplementary_data_cache: dict[str, list[SupplementaryData]] = {}
|
|
84
|
+
self.supplementary_data_cache: dict[tuple[str, str], list[SupplementaryData]] = {}
|
|
87
85
|
|
|
88
86
|
def put_supplementary_data(self, project_id: str, supplementary_data: SupplementaryDataForPut) -> None:
|
|
89
87
|
file_path = get_file_scheme_path(supplementary_data.supplementary_data_path)
|
|
@@ -155,7 +153,7 @@ class SubPutSupplementaryData:
|
|
|
155
153
|
return yes
|
|
156
154
|
|
|
157
155
|
def confirm_put_supplementary_data(
|
|
158
|
-
self, csv_supplementary_data:
|
|
156
|
+
self, csv_supplementary_data: CliSupplementaryData, supplementary_data_id: str, *, already_exists: bool = False
|
|
159
157
|
) -> bool:
|
|
160
158
|
if already_exists:
|
|
161
159
|
message_for_confirm = (
|
|
@@ -170,66 +168,63 @@ class SubPutSupplementaryData:
|
|
|
170
168
|
|
|
171
169
|
return self.confirm_processing(message_for_confirm)
|
|
172
170
|
|
|
173
|
-
def
|
|
174
|
-
key = f"{project_id},{input_data_id}"
|
|
175
|
-
if key not in self.supplementary_data_cache:
|
|
176
|
-
supplementary_data_list, _ = self.service.api.get_supplementary_data_list(project_id, input_data_id)
|
|
177
|
-
self.supplementary_data_cache[key] = supplementary_data_list if supplementary_data_list is not None else []
|
|
178
|
-
return self.supplementary_data_cache[key]
|
|
179
|
-
|
|
180
|
-
def get_supplementary_data_by_id(self, project_id: str, input_data_id: str, supplementary_data_id: str) -> Optional[SupplementaryData]:
|
|
181
|
-
cached_list = self.get_supplementary_data_list_cached(project_id, input_data_id)
|
|
182
|
-
return first_true(cached_list, pred=lambda e: e["supplementary_data_id"] == supplementary_data_id)
|
|
183
|
-
|
|
184
|
-
def put_supplementary_data_main(self, project_id: str, csv_supplementary_data: CsvSupplementaryData, *, overwrite: bool = False) -> bool:
|
|
171
|
+
def put_supplementary_data_main(self, project_id: str, csv_data: CliSupplementaryData, *, overwrite: bool = False) -> bool:
|
|
185
172
|
last_updated_datetime = None
|
|
186
|
-
input_data_id =
|
|
173
|
+
input_data_id = csv_data.input_data_id
|
|
187
174
|
supplementary_data_id = (
|
|
188
|
-
|
|
189
|
-
if
|
|
190
|
-
else convert_supplementary_data_name_to_supplementary_data_id(
|
|
175
|
+
csv_data.supplementary_data_id
|
|
176
|
+
if csv_data.supplementary_data_id is not None
|
|
177
|
+
else convert_supplementary_data_name_to_supplementary_data_id(csv_data.supplementary_data_name)
|
|
191
178
|
)
|
|
192
|
-
supplementary_data_path = csv_supplementary_data.supplementary_data_path
|
|
193
179
|
|
|
194
|
-
|
|
195
|
-
if
|
|
180
|
+
supplementary_data_list = self.service.wrapper.get_supplementary_data_list_or_none(project_id, input_data_id)
|
|
181
|
+
if supplementary_data_list is None:
|
|
182
|
+
# 入力データが存在しない場合は、`supplementary_data_list`はNoneになる
|
|
196
183
|
logger.warning(f"input_data_id='{input_data_id}'である入力データは存在しないため、補助情報の登録をスキップします。")
|
|
197
184
|
return False
|
|
198
185
|
|
|
199
|
-
old_supplementary_data =
|
|
186
|
+
old_supplementary_data = first_true(supplementary_data_list, pred=lambda e: e["supplementary_data_id"] == supplementary_data_id)
|
|
187
|
+
|
|
188
|
+
# 補助情報numberが未指定の場合は、既存の補助情報numberの最大値+1にする
|
|
189
|
+
max_supplementary_data_number = max((e["supplementary_data_number"] for e in supplementary_data_list), default=0)
|
|
190
|
+
if csv_data.supplementary_data_number is not None:
|
|
191
|
+
supplementary_data_number = csv_data.supplementary_data_number
|
|
192
|
+
elif old_supplementary_data is not None:
|
|
193
|
+
supplementary_data_number = old_supplementary_data["supplementary_data_number"]
|
|
194
|
+
else:
|
|
195
|
+
supplementary_data_number = max_supplementary_data_number + 1
|
|
200
196
|
|
|
201
197
|
if old_supplementary_data is not None:
|
|
202
198
|
if overwrite:
|
|
203
199
|
logger.debug(
|
|
204
200
|
f"supplementary_data_id='{supplementary_data_id}'である補助情報がすでに存在します。 :: "
|
|
205
|
-
f"input_data_id='{input_data_id}', supplementary_data_name='{
|
|
201
|
+
f"input_data_id='{input_data_id}', supplementary_data_name='{csv_data.supplementary_data_name}'"
|
|
206
202
|
)
|
|
207
203
|
last_updated_datetime = old_supplementary_data["updated_datetime"]
|
|
208
204
|
else:
|
|
209
205
|
logger.debug(
|
|
210
206
|
f"supplementary_data_id='{supplementary_data_id}'である補助情報がすでに存在するので、補助情報の登録をスキップします。 :: "
|
|
211
|
-
f"input_data_id='{input_data_id}', supplementary_data_name='{
|
|
207
|
+
f"input_data_id='{input_data_id}', supplementary_data_name='{csv_data.supplementary_data_name}'"
|
|
212
208
|
)
|
|
213
209
|
return False
|
|
214
210
|
|
|
215
|
-
file_path = get_file_scheme_path(supplementary_data_path)
|
|
216
|
-
logger.debug(f"csv_supplementary_data='{csv_supplementary_data}'")
|
|
211
|
+
file_path = get_file_scheme_path(csv_data.supplementary_data_path)
|
|
217
212
|
if file_path is not None: # noqa: SIM102
|
|
218
213
|
if not Path(file_path).exists():
|
|
219
|
-
logger.warning(f"'{supplementary_data_path}'
|
|
214
|
+
logger.warning(f"'{csv_data.supplementary_data_path}' は存在しません。補助情報の登録をスキップします。")
|
|
220
215
|
return False
|
|
221
216
|
|
|
222
|
-
if not self.confirm_put_supplementary_data(
|
|
217
|
+
if not self.confirm_put_supplementary_data(csv_data, supplementary_data_id, already_exists=last_updated_datetime is not None):
|
|
223
218
|
return False
|
|
224
219
|
|
|
225
220
|
# 補助情報を登録
|
|
226
221
|
supplementary_data_for_put = SupplementaryDataForPut(
|
|
227
|
-
input_data_id=
|
|
222
|
+
input_data_id=csv_data.input_data_id,
|
|
228
223
|
supplementary_data_id=supplementary_data_id,
|
|
229
|
-
supplementary_data_name=
|
|
230
|
-
supplementary_data_path=
|
|
231
|
-
supplementary_data_type=
|
|
232
|
-
supplementary_data_number=
|
|
224
|
+
supplementary_data_name=csv_data.supplementary_data_name,
|
|
225
|
+
supplementary_data_path=csv_data.supplementary_data_path,
|
|
226
|
+
supplementary_data_type=csv_data.supplementary_data_type,
|
|
227
|
+
supplementary_data_number=supplementary_data_number,
|
|
233
228
|
last_updated_datetime=last_updated_datetime,
|
|
234
229
|
)
|
|
235
230
|
try:
|
|
@@ -261,7 +256,7 @@ class PutSupplementaryData(CommandLine):
|
|
|
261
256
|
def put_supplementary_data_list(
|
|
262
257
|
self,
|
|
263
258
|
project_id: str,
|
|
264
|
-
supplementary_data_list: list[
|
|
259
|
+
supplementary_data_list: list[CliSupplementaryData],
|
|
265
260
|
*,
|
|
266
261
|
overwrite: bool = False,
|
|
267
262
|
parallelism: Optional[int] = None,
|
|
@@ -282,7 +277,7 @@ class PutSupplementaryData(CommandLine):
|
|
|
282
277
|
|
|
283
278
|
count_put_supplementary_data = 0
|
|
284
279
|
|
|
285
|
-
obj = SubPutSupplementaryData(service=self.service,
|
|
280
|
+
obj = SubPutSupplementaryData(service=self.service, all_yes=self.all_yes)
|
|
286
281
|
if parallelism is not None:
|
|
287
282
|
partial_func = partial(obj.put_supplementary_data_main, project_id, overwrite=overwrite)
|
|
288
283
|
with Pool(parallelism) as pool:
|
|
@@ -291,46 +286,29 @@ class PutSupplementaryData(CommandLine):
|
|
|
291
286
|
|
|
292
287
|
else:
|
|
293
288
|
for csv_supplementary_data in supplementary_data_list:
|
|
294
|
-
result = obj.put_supplementary_data_main(project_id,
|
|
289
|
+
result = obj.put_supplementary_data_main(project_id, csv_data=csv_supplementary_data, overwrite=overwrite)
|
|
295
290
|
if result:
|
|
296
291
|
count_put_supplementary_data += 1
|
|
297
292
|
|
|
298
293
|
logger.info(f"{project_title} に、{count_put_supplementary_data} / {len(supplementary_data_list)} 件の補助情報を登録しました。")
|
|
299
294
|
|
|
300
295
|
@staticmethod
|
|
301
|
-
def get_supplementary_data_list_from_dict(supplementary_data_dict_list: list[dict[str, Any]]) -> list[
|
|
302
|
-
return [
|
|
296
|
+
def get_supplementary_data_list_from_dict(supplementary_data_dict_list: list[dict[str, Any]]) -> list[CliSupplementaryData]:
|
|
297
|
+
return [CliSupplementaryData.from_dict(e) for e in supplementary_data_dict_list]
|
|
303
298
|
|
|
304
299
|
@staticmethod
|
|
305
|
-
def get_supplementary_data_list_from_csv(csv_path: Path) -> list[
|
|
306
|
-
def create_supplementary_data(e: Any) -> CsvSupplementaryData: # noqa: ANN401
|
|
307
|
-
supplementary_data_id = e.supplementary_data_id if not pandas.isna(e.supplementary_data_id) else None
|
|
308
|
-
supplementary_data_type = e.supplementary_data_type if not pandas.isna(e.supplementary_data_type) else None
|
|
309
|
-
return CsvSupplementaryData(
|
|
310
|
-
input_data_id=e.input_data_id,
|
|
311
|
-
supplementary_data_number=e.supplementary_data_number,
|
|
312
|
-
supplementary_data_name=e.supplementary_data_name,
|
|
313
|
-
supplementary_data_path=e.supplementary_data_path,
|
|
314
|
-
supplementary_data_id=supplementary_data_id,
|
|
315
|
-
supplementary_data_type=supplementary_data_type,
|
|
316
|
-
)
|
|
317
|
-
|
|
300
|
+
def get_supplementary_data_list_from_csv(csv_path: Path) -> list[CliSupplementaryData]:
|
|
318
301
|
df = pandas.read_csv(
|
|
319
302
|
str(csv_path),
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
"
|
|
324
|
-
"
|
|
325
|
-
"
|
|
326
|
-
|
|
327
|
-
"supplementary_data_id",
|
|
328
|
-
"supplementary_data_type",
|
|
329
|
-
),
|
|
330
|
-
# IDは必ず文字列として読み込むようにする
|
|
331
|
-
dtype={"input_data_id": str, "supplementary_data_id": str, "supplementary_data_name": str},
|
|
303
|
+
dtype={
|
|
304
|
+
"input_data_id": "string",
|
|
305
|
+
"supplementary_data_id": "string",
|
|
306
|
+
"supplementary_data_name": "string",
|
|
307
|
+
"supplementary_data_path": "string",
|
|
308
|
+
"supplementary_data_number": "Int64",
|
|
309
|
+
},
|
|
332
310
|
)
|
|
333
|
-
supplementary_data_list = [
|
|
311
|
+
supplementary_data_list = [CliSupplementaryData.from_dict(e) for e in df.to_dict("records")]
|
|
334
312
|
return supplementary_data_list
|
|
335
313
|
|
|
336
314
|
COMMON_MESSAGE = "annofabcli supplementary_data put: error:"
|
|
@@ -393,15 +371,15 @@ def parse_args(parser: argparse.ArgumentParser) -> None:
|
|
|
393
371
|
"--csv",
|
|
394
372
|
type=str,
|
|
395
373
|
help=(
|
|
396
|
-
"補助情報が記載された
|
|
374
|
+
"補助情報が記載されたCSVファイルのパスを指定してください。CSVのフォーマットは、以下の通りです。\n"
|
|
397
375
|
"\n"
|
|
398
|
-
" *
|
|
399
|
-
" *
|
|
400
|
-
" *
|
|
401
|
-
" *
|
|
402
|
-
" *
|
|
403
|
-
" *
|
|
404
|
-
" *
|
|
376
|
+
" * ヘッダ行あり, カンマ区切り\n"
|
|
377
|
+
" * input_data_id (required)\n"
|
|
378
|
+
" * supplementary_data_name (required)\n"
|
|
379
|
+
" * supplementary_data_path (required)\n"
|
|
380
|
+
" * supplementary_data_id\n"
|
|
381
|
+
" * supplementary_data_type\n"
|
|
382
|
+
" * supplementary_data_number\n"
|
|
405
383
|
"\n"
|
|
406
384
|
"各項目の詳細は https://annofab-cli.readthedocs.io/ja/latest/command_reference/supplementary/put.html を参照してください。"
|
|
407
385
|
),
|
|
@@ -410,7 +388,6 @@ def parse_args(parser: argparse.ArgumentParser) -> None:
|
|
|
410
388
|
JSON_SAMPLE = [ # noqa: N806
|
|
411
389
|
{
|
|
412
390
|
"input_data_id": "input1",
|
|
413
|
-
"supplementary_data_number": 1,
|
|
414
391
|
"supplementary_data_name": "foo",
|
|
415
392
|
"supplementary_data_path": "file://foo.jpg",
|
|
416
393
|
}
|
|
@@ -431,7 +408,7 @@ def parse_args(parser: argparse.ArgumentParser) -> None:
|
|
|
431
408
|
parser.add_argument(
|
|
432
409
|
"--overwrite",
|
|
433
410
|
action="store_true",
|
|
434
|
-
help="指定した場合、supplementary_data_id
|
|
411
|
+
help="指定した場合、supplementary_data_idがすでに存在していたら上書きします。指定しなければ、スキップします。",
|
|
435
412
|
)
|
|
436
413
|
|
|
437
414
|
parser.add_argument(
|
|
@@ -448,7 +425,7 @@ def add_parser(subparsers: Optional[argparse._SubParsersAction] = None) -> argpa
|
|
|
448
425
|
subcommand_name = "put"
|
|
449
426
|
subcommand_help = "補助情報を登録します。"
|
|
450
427
|
description = "補助情報を登録します。"
|
|
451
|
-
epilog = "
|
|
428
|
+
epilog = "オーナーロールを持つユーザで実行してください。"
|
|
452
429
|
|
|
453
430
|
parser = annofabcli.common.cli.add_parser(subparsers, subcommand_name, subcommand_help, description, epilog=epilog)
|
|
454
431
|
parse_args(parser)
|
|
@@ -9,19 +9,90 @@ import annofabapi
|
|
|
9
9
|
import more_itertools
|
|
10
10
|
import pandas
|
|
11
11
|
from annofabapi.models import Task, TaskHistory, TaskPhase, TaskStatus
|
|
12
|
+
from annofabapi.util.task_history import find_rejected_task_history_indices
|
|
12
13
|
from annofabapi.utils import get_task_history_index_skipped_acceptance, get_task_history_index_skipped_inspection
|
|
13
14
|
|
|
14
15
|
import annofabcli
|
|
15
16
|
from annofabcli.common.cli import ArgumentParser, CommandLine, build_annofabapi_resource_and_login
|
|
16
17
|
from annofabcli.common.enums import FormatArgument
|
|
17
18
|
from annofabcli.common.facade import AnnofabApiFacade
|
|
18
|
-
from annofabcli.common.utils import print_csv, print_json
|
|
19
|
+
from annofabcli.common.utils import isoduration_to_hour, print_csv, print_json
|
|
19
20
|
from annofabcli.common.visualize import AddProps
|
|
20
21
|
from annofabcli.task.list_tasks import ListTasksMain
|
|
21
22
|
|
|
22
23
|
logger = logging.getLogger(__name__)
|
|
23
24
|
|
|
24
25
|
|
|
26
|
+
def get_post_rejection_annotation_worktime_hour(task_histories: list[TaskHistory]) -> float:
|
|
27
|
+
"""
|
|
28
|
+
検査/受入フェーズでの差し戻し後の教師付作業時間を算出します。
|
|
29
|
+
指摘による修正にかかった時間を把握するのに利用できます。
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
task_histories: タスク履歴
|
|
33
|
+
|
|
34
|
+
"""
|
|
35
|
+
rejected_task_history_indices = find_rejected_task_history_indices(task_histories)
|
|
36
|
+
if len(rejected_task_history_indices) == 0:
|
|
37
|
+
return 0.0
|
|
38
|
+
|
|
39
|
+
# 差し戻された履歴の直後で、教師付フェーズの作業時間を算出する
|
|
40
|
+
min_rejected_task_history_index = min(rejected_task_history_indices)
|
|
41
|
+
return sum(
|
|
42
|
+
isoduration_to_hour(history["accumulated_labor_time_milliseconds"])
|
|
43
|
+
for history in task_histories[min_rejected_task_history_index + 1 :]
|
|
44
|
+
if history["phase"] == TaskPhase.ANNOTATION.value
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def get_post_rejection_inspection_worktime_hour(task_histories: list[TaskHistory]) -> float:
|
|
49
|
+
"""
|
|
50
|
+
検査/受入フェーズでの差し戻し後の検査作業時間を算出します。
|
|
51
|
+
|
|
52
|
+
Args:
|
|
53
|
+
task_histories: タスク履歴
|
|
54
|
+
|
|
55
|
+
"""
|
|
56
|
+
rejected_task_history_indices = find_rejected_task_history_indices(task_histories)
|
|
57
|
+
if len(rejected_task_history_indices) == 0:
|
|
58
|
+
return 0.0
|
|
59
|
+
|
|
60
|
+
# 差し戻された履歴の直後で、検査フェーズの作業時間を算出する
|
|
61
|
+
min_rejected_task_history_index = min(rejected_task_history_indices)
|
|
62
|
+
return sum(
|
|
63
|
+
isoduration_to_hour(history["accumulated_labor_time_milliseconds"])
|
|
64
|
+
for history in task_histories[min_rejected_task_history_index + 1 :]
|
|
65
|
+
if history["phase"] == TaskPhase.INSPECTION.value
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def get_post_rejection_acceptance_worktime_hour(task_histories: list[TaskHistory]) -> float:
|
|
70
|
+
"""
|
|
71
|
+
受入フェーズでの差し戻し後の受入作業時間を算出します。
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
Args:
|
|
75
|
+
task_histories: タスク履歴
|
|
76
|
+
|
|
77
|
+
"""
|
|
78
|
+
rejected_task_history_indices = find_rejected_task_history_indices(task_histories)
|
|
79
|
+
|
|
80
|
+
# 検査フェーズでの差し戻しは除外する
|
|
81
|
+
# 検査フェーズでの差し戻しは、受入作業の回数に影響しないため
|
|
82
|
+
acceptance_rejected_indices = [index for index in rejected_task_history_indices if task_histories[index]["phase"] == TaskPhase.ACCEPTANCE.value]
|
|
83
|
+
if len(acceptance_rejected_indices) == 0:
|
|
84
|
+
return 0.0
|
|
85
|
+
|
|
86
|
+
min_rejected_acceptance_task_history_index = min(acceptance_rejected_indices)
|
|
87
|
+
|
|
88
|
+
# 差し戻された履歴の直後以降で、受入フェーズの作業時間を算出する
|
|
89
|
+
return sum(
|
|
90
|
+
isoduration_to_hour(history["accumulated_labor_time_milliseconds"])
|
|
91
|
+
for history in task_histories[min_rejected_acceptance_task_history_index + 1 :]
|
|
92
|
+
if history["phase"] == TaskPhase.ACCEPTANCE.value
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
|
|
25
96
|
class AddingAdditionalInfoToTask:
|
|
26
97
|
"""タスクに付加的な情報を追加するためのクラス
|
|
27
98
|
|
|
@@ -210,12 +281,12 @@ class AddingAdditionalInfoToTask:
|
|
|
210
281
|
}
|
|
211
282
|
)
|
|
212
283
|
|
|
213
|
-
|
|
214
|
-
if
|
|
284
|
+
member = self.visualize.get_project_member_from_account_id(account_id)
|
|
285
|
+
if member is not None:
|
|
215
286
|
task.update(
|
|
216
287
|
{
|
|
217
|
-
f"{column_prefix}_user_id":
|
|
218
|
-
f"{column_prefix}_username":
|
|
288
|
+
f"{column_prefix}_user_id": member["user_id"],
|
|
289
|
+
f"{column_prefix}_username": member["username"],
|
|
219
290
|
}
|
|
220
291
|
)
|
|
221
292
|
else:
|
|
@@ -243,7 +314,7 @@ class AddingAdditionalInfoToTask:
|
|
|
243
314
|
|
|
244
315
|
return task
|
|
245
316
|
|
|
246
|
-
def add_additional_info_to_task(self, task: dict[str, Any])
|
|
317
|
+
def add_additional_info_to_task(self, task: dict[str, Any]) -> None:
|
|
247
318
|
"""タスクの付加的情報を、タスクに追加する。
|
|
248
319
|
以下の列を追加する。
|
|
249
320
|
* user_id
|
|
@@ -298,6 +369,10 @@ class AddingAdditionalInfoToTask:
|
|
|
298
369
|
task["inspection_is_skipped"] = self.is_inspection_phase_skipped(task_histories)
|
|
299
370
|
task["acceptance_is_skipped"] = self.is_acceptance_phase_skipped(task_histories)
|
|
300
371
|
|
|
372
|
+
task["post_rejection_annotation_worktime_hour"] = get_post_rejection_annotation_worktime_hour(task_histories)
|
|
373
|
+
task["post_rejection_inspection_worktime_hour"] = get_post_rejection_inspection_worktime_hour(task_histories)
|
|
374
|
+
task["post_rejection_acceptance_worktime_hour"] = get_post_rejection_acceptance_worktime_hour(task_histories)
|
|
375
|
+
|
|
301
376
|
|
|
302
377
|
class ListTasksAddedTaskHistoryMain:
|
|
303
378
|
def __init__(self, service: annofabapi.Resource, project_id: str) -> None:
|
|
@@ -373,9 +448,17 @@ class TasksAddedTaskHistoryOutput:
|
|
|
373
448
|
for info in ["user_id", "username", "started_datetime", "worktime_hour"]
|
|
374
449
|
]
|
|
375
450
|
|
|
376
|
-
return
|
|
451
|
+
return (
|
|
452
|
+
base_columns
|
|
453
|
+
+ task_history_columns
|
|
454
|
+
+ [
|
|
455
|
+
"post_rejection_annotation_worktime_hour",
|
|
456
|
+
"post_rejection_inspection_worktime_hour",
|
|
457
|
+
"post_rejection_acceptance_worktime_hour",
|
|
458
|
+
]
|
|
459
|
+
)
|
|
377
460
|
|
|
378
|
-
def output(self, output_path: Path, output_format: FormatArgument)
|
|
461
|
+
def output(self, output_path: Path, output_format: FormatArgument) -> None:
|
|
379
462
|
task_list = self.task_list
|
|
380
463
|
if len(task_list) == 0:
|
|
381
464
|
logger.info("タスク一覧の件数が0件のため、出力しません。")
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
3
|
import argparse
|
|
4
|
+
import copy
|
|
4
5
|
import json
|
|
5
6
|
import logging
|
|
6
7
|
import multiprocessing
|
|
@@ -212,7 +213,7 @@ class UpdateMetadataOfTask(CommandLine):
|
|
|
212
213
|
if args.metadata is not None:
|
|
213
214
|
metadata = annofabcli.common.cli.get_json_from_args(args.metadata)
|
|
214
215
|
assert task_id_list is not None, "'--metadata'を指定したときは'--task_id'は必須です。"
|
|
215
|
-
metadata_by_task_id = {task_id: metadata for task_id in task_id_list}
|
|
216
|
+
metadata_by_task_id = {task_id: copy.deepcopy(metadata) for task_id in task_id_list}
|
|
216
217
|
elif args.metadata_by_task_id is not None:
|
|
217
218
|
metadata_by_task_id = annofabcli.common.cli.get_json_from_args(args.metadata_by_task_id)
|
|
218
219
|
if task_id_list is not None:
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: annofabcli
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.98.0
|
|
4
4
|
Summary: Utility Command Line Interface for AnnoFab
|
|
5
5
|
Home-page: https://github.com/kurusugawa-computer/annofab-cli
|
|
6
6
|
License: MIT
|
|
@@ -19,8 +19,8 @@ Classifier: Programming Language :: Python :: 3.11
|
|
|
19
19
|
Classifier: Programming Language :: Python :: 3.12
|
|
20
20
|
Classifier: Topic :: Utilities
|
|
21
21
|
Requires-Dist: Pillow
|
|
22
|
-
Requires-Dist: annofabapi (>=1.1,<2.0)
|
|
23
|
-
Requires-Dist: bokeh (>=3.3,<
|
|
22
|
+
Requires-Dist: annofabapi (>=1.4.1,<2.0.0)
|
|
23
|
+
Requires-Dist: bokeh (>=3.3,<3.7)
|
|
24
24
|
Requires-Dist: dictdiffer
|
|
25
25
|
Requires-Dist: isodate
|
|
26
26
|
Requires-Dist: jmespath
|