annofabcli 1.96.1__py3-none-any.whl → 1.97.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
annofabcli/__version__.py CHANGED
@@ -1 +1 @@
1
- __version__ = "1.96.1" # `poetry-dynamic-versioning`を使ってGitHubのバージョンタグを取得している。変更不要
1
+ __version__ = "1.97.0" # `poetry-dynamic-versioning`を使ってGitHubのバージョンタグを取得している。変更不要
@@ -143,11 +143,9 @@ def create_changed_input_data_list_from_csv(csv_file: Path) -> list[ChangedInput
143
143
  変更対象の入力データのlist
144
144
  """
145
145
  df_input_data = pandas.read_csv(
146
- str(csv_file),
147
- header=None,
148
- names=("input_data_id", "input_data_name"),
146
+ csv_file,
149
147
  # 文字列として読み込むようにする
150
- dtype={"input_data_id": str, "input_data_name": str},
148
+ dtype={"input_data_id": "string", "input_data_name": "string"},
151
149
  )
152
150
 
153
151
  input_data_dict_list = df_input_data.to_dict("records")
@@ -211,9 +209,9 @@ def parse_args(parser: argparse.ArgumentParser) -> None:
211
209
  "変更対象の入力データが記載されたCSVファイルのパスを指定してください。\n"
212
210
  "CSVのフォーマットは以下の通りです。"
213
211
  "\n"
214
- " * ヘッダ行なし, カンマ区切り\n"
215
- " * 1列目: input_data_id (required)\n"
216
- " * 2列目: input_data_name (required)\n"
212
+ " * ヘッダ行あり, カンマ区切り\n"
213
+ " * input_data_id (required)\n"
214
+ " * input_data_name (required)\n"
217
215
  ),
218
216
  )
219
217
 
@@ -1,6 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import argparse
4
+ import copy
4
5
  import json
5
6
  import logging
6
7
  import multiprocessing
@@ -182,7 +183,7 @@ class UpdateMetadata(CommandLine):
182
183
  sys.exit(COMMAND_LINE_ERROR_STATUS_CODE)
183
184
 
184
185
  assert input_data_id_list is not None, "'--metadata'を指定したときは'--input_data_id'は必須です。"
185
- metadata_by_input_data_id = {input_data_id: metadata for input_data_id in input_data_id_list}
186
+ metadata_by_input_data_id = {input_data_id: copy.deepcopy(metadata) for input_data_id in input_data_id_list}
186
187
 
187
188
  elif args.metadata_by_input_data_id is not None:
188
189
  metadata_by_input_data_id = annofabcli.common.cli.get_json_from_args(args.metadata_by_input_data_id)
@@ -5,7 +5,6 @@ from pathlib import Path
5
5
  from typing import Any, Optional
6
6
 
7
7
  import more_itertools
8
- import numpy
9
8
  import pandas
10
9
  import requests
11
10
  from annofabapi.models import ProjectMemberRole, ProjectMemberStatus
@@ -26,8 +25,8 @@ class Member(DataClassJsonMixin):
26
25
 
27
26
  user_id: str
28
27
  member_role: ProjectMemberRole
29
- sampling_inspection_rate: Optional[int]
30
- sampling_acceptance_rate: Optional[int]
28
+ sampling_inspection_rate: Optional[int] = None
29
+ sampling_acceptance_rate: Optional[int] = None
31
30
 
32
31
 
33
32
  class PutProjectMembers(CommandLine):
@@ -44,7 +43,7 @@ class PutProjectMembers(CommandLine):
44
43
  def member_exists(members: list[dict[str, Any]], user_id: str) -> bool:
45
44
  return PutProjectMembers.find_member(members, user_id) is not None
46
45
 
47
- def invite_project_member(self, project_id: str, member: Member, old_project_members: list[dict[str, Any]]): # noqa: ANN201
46
+ def invite_project_member(self, project_id: str, member: Member, old_project_members: list[dict[str, Any]]) -> dict[str, Any]:
48
47
  old_member = self.find_member(old_project_members, member.user_id)
49
48
  last_updated_datetime = old_member["updated_datetime"] if old_member is not None else None
50
49
 
@@ -58,7 +57,7 @@ class PutProjectMembers(CommandLine):
58
57
  updated_project_member = self.service.api.put_project_member(project_id, member.user_id, request_body=request_body)[0]
59
58
  return updated_project_member
60
59
 
61
- def delete_project_member(self, project_id: str, deleted_member: dict[str, Any]): # noqa: ANN201
60
+ def delete_project_member(self, project_id: str, deleted_member: dict[str, Any]) -> dict[str, Any]:
62
61
  request_body = {
63
62
  "member_status": ProjectMemberStatus.INACTIVE.value,
64
63
  "member_role": deleted_member["member_role"],
@@ -67,7 +66,7 @@ class PutProjectMembers(CommandLine):
67
66
  updated_project_member = self.service.api.put_project_member(project_id, deleted_member["user_id"], request_body=request_body)[0]
68
67
  return updated_project_member
69
68
 
70
- def put_project_members(self, project_id: str, members: list[Member], delete: bool = False): # noqa: ANN201, FBT001, FBT002
69
+ def put_project_members(self, project_id: str, members: list[Member], *, delete: bool = False) -> None:
71
70
  """
72
71
  プロジェクトメンバを一括で登録する。
73
72
 
@@ -88,7 +87,7 @@ class PutProjectMembers(CommandLine):
88
87
 
89
88
  count_invite_members = 0
90
89
  # プロジェクトメンバを登録
91
- logger.info(f"{project_title} に、{len(members)} 件のプロジェクトメンバを登録します。")
90
+ logger.info(f"プロジェクト '{project_title}' に、{len(members)} 件のプロジェクトメンバを登録します。")
92
91
  for member in members:
93
92
  if member.user_id == self.service.api.login_user_id:
94
93
  logger.debug(f"ユーザ '{member.user_id}'は自分自身なので、登録しません。")
@@ -99,7 +98,7 @@ class PutProjectMembers(CommandLine):
99
98
  continue
100
99
 
101
100
  message_for_confirm = (
102
- f"ユーザ '{member.user_id}'を、{project_title} プロジェクトのメンバに登録しますか?member_role={member.member_role.value}"
101
+ f"ユーザ '{member.user_id}'を、プロジェクト'{project_title}'のメンバーに登録しますか? member_role='{member.member_role.value}'"
103
102
  )
104
103
  if not self.confirm_processing(message_for_confirm):
105
104
  continue
@@ -107,14 +106,15 @@ class PutProjectMembers(CommandLine):
107
106
  # メンバを登録
108
107
  try:
109
108
  self.invite_project_member(project_id, member, old_project_members)
110
- logger.debug(f"user_id = {member.user_id}, member_role = {member.member_role.value} のユーザをプロジェクトメンバに登録しました。")
109
+ logger.debug(f"user_id = '{member.user_id}', member_role = '{member.member_role.value}' のユーザをプロジェクトメンバに登録しました。")
111
110
  count_invite_members += 1
112
111
 
113
- except requests.exceptions.HTTPError as e:
114
- logger.warning(e)
115
- logger.warning(f"プロジェクトメンバの登録に失敗しました。user_id = {member.user_id}, member_role = {member.member_role.value}")
112
+ except requests.exceptions.HTTPError:
113
+ logger.warning(
114
+ f"プロジェクトメンバの登録に失敗しました。user_id = '{member.user_id}', member_role = '{member.member_role.value}'", exc_info=True
115
+ )
116
116
 
117
- logger.info(f"{project_title} に、{count_invite_members} / {len(members)} 件のプロジェクトメンバを登録しました。")
117
+ logger.info(f"プロジェクト'{project_title}' に、{count_invite_members} / {len(members)} 件のプロジェクトメンバを登録しました。")
118
118
 
119
119
  # プロジェクトメンバを削除
120
120
  if delete:
@@ -125,7 +125,7 @@ class PutProjectMembers(CommandLine):
125
125
  ]
126
126
 
127
127
  count_delete_members = 0
128
- logger.info(f"{project_title} から、{len(deleted_members)} 件のプロジェクトメンバを削除します。")
128
+ logger.info(f"プロジェクト '{project_title}' から、{len(deleted_members)} 件のプロジェクトメンバを削除します。")
129
129
  for deleted_member in deleted_members:
130
130
  message_for_confirm = f"ユーザ '{deleted_member['user_id']}'を、{project_title} のプロジェクトメンバから削除しますか?"
131
131
  if not self.confirm_processing(message_for_confirm):
@@ -135,31 +135,18 @@ class PutProjectMembers(CommandLine):
135
135
  self.delete_project_member(project_id, deleted_member)
136
136
  logger.debug(f"ユーザ '{deleted_member['user_id']}' をプロジェクトメンバから削除しました。")
137
137
  count_delete_members += 1
138
- except requests.exceptions.HTTPError as e:
139
- logger.warning(e)
140
- logger.warning(f"プロジェクトメンバの削除に失敗しました。user_id = '{deleted_member['user_id']}' ")
138
+ except requests.exceptions.HTTPError:
139
+ logger.warning(f"プロジェクトメンバの削除に失敗しました。user_id = '{deleted_member['user_id']}' ", exc_info=True)
141
140
 
142
- logger.info(f"{project_title} から {count_delete_members} / {len(deleted_members)} 件のプロジェクトメンバを削除しました。")
141
+ logger.info(f"プロジェクト '{project_title}' から {count_delete_members} / {len(deleted_members)} 件のプロジェクトメンバを削除しました。")
143
142
 
144
143
  @staticmethod
145
144
  def get_members_from_csv(csv_path: Path) -> list[Member]:
146
- def create_member(e): # noqa: ANN001, ANN202
147
- return Member(
148
- user_id=e.user_id,
149
- member_role=ProjectMemberRole(e.member_role),
150
- sampling_inspection_rate=e.sampling_inspection_rate,
151
- sampling_acceptance_rate=e.sampling_acceptance_rate,
152
- )
153
-
154
145
  df = pandas.read_csv(
155
- str(csv_path),
156
- sep=",",
157
- header=None,
158
- names=("user_id", "member_role", "sampling_inspection_rate", "sampling_acceptance_rate"),
159
- # IDは必ず文字列として読み込むようにする
160
- dtype={"user_id": str},
161
- ).replace({numpy.nan: None})
162
- members = [create_member(e) for e in df.itertuples()]
146
+ csv_path,
147
+ dtype={"user_id": "string", "member_role": "string", "sampling_inspection_rate": "Int64", "sampling_acceptance_rate": "Int64"},
148
+ )
149
+ members = [Member.from_dict(e) for e in df.to_dict("records")]
163
150
  return members
164
151
 
165
152
  def main(self) -> None:
@@ -184,12 +171,14 @@ def parse_args(parser: argparse.ArgumentParser) -> None:
184
171
  type=str,
185
172
  required=True,
186
173
  help=(
187
- "プロジェクトメンバが記載されたCVファイルのパスを指定してください。"
188
- "CSVのフォーマットは、「1列目:user_id(required), 2列目:member_role(required), "
189
- "3列目:sampling_inspection_rate, 4列目:sampling_acceptance_rate, ヘッダ行なし, カンマ区切り」です。"
190
- "member_roleは ``owner``, ``worker``, ``accepter``, ``training_data_user`` のいずれかです。"
191
- "sampling_inspection_rate, sampling_acceptance_rate を省略した場合は未設定になります。"
192
- "ただし自分自身は登録しません。"
174
+ "プロジェクトメンバが記載されたCSVファイルのパスを指定してください。"
175
+ "CSVのフォーマットは、ヘッダあり、カンマ区切りです。\n"
176
+ " * user_id (required)\n"
177
+ " * member_role (required)\n"
178
+ " * sampling_inspection_rate\n"
179
+ " * sampling_acceptance_rate\n"
180
+ "member_roleには ``owner``, ``worker``, ``accepter``, ``training_data_user`` のいずれかを指定します。\n"
181
+ "自分自身は登録できません。"
193
182
  ),
194
183
  )
195
184
 
@@ -202,10 +191,9 @@ def parse_args(parser: argparse.ArgumentParser) -> None:
202
191
 
203
192
  def add_parser(subparsers: Optional[argparse._SubParsersAction] = None) -> argparse.ArgumentParser:
204
193
  subcommand_name = "put"
205
- subcommand_help = "プロジェクトメンバを登録する。"
206
- description = "プロジェクトメンバを登録する。"
207
- epilog = "オーナロールを持つユーザで実行してください。"
194
+ subcommand_help = "プロジェクトメンバを登録します。"
195
+ epilog = "オーナーロールを持つユーザで実行してください。"
208
196
 
209
- parser = annofabcli.common.cli.add_parser(subparsers, subcommand_name, subcommand_help, description, epilog=epilog)
197
+ parser = annofabcli.common.cli.add_parser(subparsers, subcommand_name, subcommand_help, epilog=epilog)
210
198
  parse_args(parser)
211
199
  return parser
@@ -552,7 +552,7 @@ class AttributeCountCsv:
552
552
 
553
553
  # アノテーション数の列のNaNを0に変換する
554
554
  value_columns = self._value_columns(counter_list, prior_attribute_columns)
555
- df = df.fillna({column: 0 for column in value_columns})
555
+ df = df.fillna(dict.fromkeys(value_columns, 0))
556
556
 
557
557
  print_csv(df, output=str(output_file), to_csv_kwargs=self.csv_format)
558
558
 
@@ -655,7 +655,7 @@ class LabelCountCsv:
655
655
 
656
656
  # アノテーション数列のNaNを0に変換する
657
657
  value_columns = self._value_columns(counter_list, prior_label_columns)
658
- df = df.fillna({column: 0 for column in value_columns})
658
+ df = df.fillna(dict.fromkeys(value_columns, 0))
659
659
 
660
660
  print_csv(df, output=str(output_file), to_csv_kwargs=self.csv_format)
661
661
 
@@ -411,7 +411,7 @@ class AnnotationDurationCsvByAttribute:
411
411
 
412
412
  # アノテーション数の列のNaNを0に変換する
413
413
  value_columns = self._value_columns(annotation_duration_list, prior_attribute_columns)
414
- df = df.fillna({column: 0 for column in value_columns})
414
+ df = df.fillna(dict.fromkeys(value_columns, 0))
415
415
  return df
416
416
 
417
417
 
@@ -473,7 +473,7 @@ class AnnotationDurationCsvByLabel:
473
473
 
474
474
  # アノテーション数列のNaNを0に変換する
475
475
  value_columns = self._value_columns(annotation_duration_list, prior_label_columns)
476
- df = df.fillna({column: 0 for column in value_columns})
476
+ df = df.fillna(dict.fromkeys(value_columns, 0))
477
477
 
478
478
  return df
479
479
 
@@ -181,17 +181,17 @@ class AbstractPhaseProductivityPerDate(abc.ABC):
181
181
 
182
182
  # その他の欠損値(作業時間や生産量)を0で埋める
183
183
  df2 = df2.fillna(
184
- {
185
- col: 0
186
- for col in [
184
+ dict.fromkeys(
185
+ [
187
186
  "annotation_worktime_hour",
188
187
  "inspection_worktime_hour",
189
188
  "acceptance_worktime_hour",
190
189
  "task_count",
191
190
  "inspection_comment_count",
192
191
  *self.production_volume_columns,
193
- ]
194
- }
192
+ ],
193
+ 0,
194
+ )
195
195
  )
196
196
 
197
197
  return df2
@@ -571,7 +571,7 @@ class UserPerformance:
571
571
  level0_columns = ["monitored_worktime_hour", *task_worktime_by_phase_user.quantity_columns]
572
572
  columns = [(c0, c1) for c0, c1 in df.columns if c0 in level0_columns]
573
573
 
574
- return df.fillna({col: 0 for col in columns})
574
+ return df.fillna(dict.fromkeys(columns, 0))
575
575
 
576
576
  if task_completion_criteria == TaskCompletionCriteria.ACCEPTANCE_REACHED:
577
577
  # 受入フェーズに到達したらタスクの作業が完了したとみなす場合、
@@ -634,8 +634,8 @@ class UserPerformance:
634
634
  ]
635
635
 
636
636
  value_columns = columns - set(basic_columns)
637
- dtypes = {col: "string" for col in basic_columns}
638
- dtypes.update({col: "float64" for col in value_columns})
637
+ dtypes = dict.fromkeys(basic_columns, "string")
638
+ dtypes.update(dict.fromkeys(value_columns, "float64"))
639
639
  return df.astype(dtypes)
640
640
 
641
641
  def _validate_df_for_output(self, output_file: Path) -> bool:
@@ -197,8 +197,8 @@ class WholePerformance:
197
197
  ("working_days", ""),
198
198
  ]
199
199
 
200
- data: dict[tuple[str, str], float] = {key: 0 for key in worktime_columns + count_columns}
201
- data.update({key: numpy.nan for key in ratio_columns + stdev_columns + date_columns})
200
+ data: dict[tuple[str, str], float] = dict.fromkeys(worktime_columns + count_columns, 0)
201
+ data.update(dict.fromkeys(ratio_columns + stdev_columns + date_columns, numpy.nan))
202
202
 
203
203
  return cls(pandas.Series(data), task_completion_criteria, custom_production_volume_list=custom_production_volume_list)
204
204
 
@@ -187,7 +187,7 @@ class WholeProductivityPerCompletedDate:
187
187
  df_agg_sub_task["task_count"] = df_sub_task.pivot_table(values=["task_id"], index=date_column, aggfunc="count").fillna(0)
188
188
  else:
189
189
  # 列だけ作る
190
- df_agg_sub_task = df_agg_sub_task.assign(**{key: 0 for key in production_volume_columns}, task_count=0)
190
+ df_agg_sub_task = df_agg_sub_task.assign(**dict.fromkeys(production_volume_columns, 0), task_count=0)
191
191
 
192
192
  df_worktime = worktime_per_date.df
193
193
  if len(df_worktime) > 0:
@@ -929,7 +929,7 @@ class WholeProductivityPerFirstAnnotationStartedDate:
929
929
  ).fillna(0)
930
930
  else:
931
931
  # 列だけ作る
932
- df_agg_sub_task = df_agg_sub_task.assign(**{key: 0 for key in value_columns}, task_count=0)
932
+ df_agg_sub_task = df_agg_sub_task.assign(**dict.fromkeys(value_columns, 0), task_count=0)
933
933
 
934
934
  # 日付の一覧を生成
935
935
  if len(df_agg_sub_task) > 0:
@@ -200,7 +200,7 @@ class WorktimePerDate:
200
200
  "monitored_inspection_worktime_hour",
201
201
  "monitored_acceptance_worktime_hour",
202
202
  ]
203
- df.fillna({c: 0 for c in value_columns}, inplace=True)
203
+ df.fillna(dict.fromkeys(value_columns, 0), inplace=True)
204
204
 
205
205
  df = df.merge(df_member, how="left", on="account_id")
206
206
  return df[
@@ -31,18 +31,11 @@ key: input_data_id, value: supplementary_data_idのList
31
31
 
32
32
 
33
33
  def get_input_data_supplementary_data_dict_from_csv(csv_path: Path) -> InputDataSupplementaryDataDict:
34
- df = pandas.read_csv(
35
- str(csv_path),
36
- sep=",",
37
- header=None,
38
- names=[
39
- "input_data_id",
40
- "supplementary_data_id",
41
- ],
42
- # IDは必ず文字列として読み込むようにする
43
- dtype={"input_data_id": str, "supplementary_data_id": str},
34
+ df: pandas.DataFrame = pandas.read_csv(
35
+ csv_path,
36
+ dtype={"input_data_id": "string", "supplementary_data_id": "string"},
44
37
  )
45
- input_data_dict = defaultdict(list)
38
+ input_data_dict: InputDataSupplementaryDataDict = defaultdict(list)
46
39
  for input_data_id, supplementary_data_id in zip(df["input_data_id"], df["supplementary_data_id"]):
47
40
  input_data_dict[input_data_id].append(supplementary_data_id)
48
41
  return input_data_dict
@@ -58,7 +51,7 @@ def get_input_data_supplementary_data_dict_from_list(supplementary_data_list: li
58
51
 
59
52
 
60
53
  class DeleteSupplementaryDataMain(CommandLineWithConfirm):
61
- def __init__(self, service: annofabapi.Resource, all_yes: bool = False) -> None: # noqa: FBT001, FBT002
54
+ def __init__(self, service: annofabapi.Resource, *, all_yes: bool = False) -> None:
62
55
  self.service = service
63
56
  self.facade = AnnofabApiFacade(service)
64
57
  CommandLineWithConfirm.__init__(self, all_yes)
@@ -82,7 +75,7 @@ class DeleteSupplementaryDataMain(CommandLineWithConfirm):
82
75
 
83
76
  input_data = self.service.wrapper.get_input_data_or_none(project_id, input_data_id)
84
77
  if input_data is None:
85
- logger.warning(f"input_data_id={input_data_id} の入力データは存在しないのでスキップします。")
78
+ logger.warning(f"input_data_id='{input_data_id}' の入力データは存在しないのでスキップします。")
86
79
  return 0
87
80
 
88
81
  supplementary_data_list, _ = self.service.api.get_supplementary_data_list(project_id, input_data_id)
@@ -122,7 +115,7 @@ class DeleteSupplementaryDataMain(CommandLineWithConfirm):
122
115
  continue
123
116
  return deleted_count
124
117
 
125
- def delete_supplementary_data_list(self, project_id: str, input_data_dict: InputDataSupplementaryDataDict): # noqa: ANN201
118
+ def delete_supplementary_data_list(self, project_id: str, input_data_dict: InputDataSupplementaryDataDict) -> None:
126
119
  deleted_count = 0
127
120
  total_count = sum(len(e) for e in input_data_dict.values())
128
121
  for input_data_id, supplementary_data_id_list in input_data_dict.items():
@@ -168,7 +161,7 @@ class DeleteSupplementaryDataMain(CommandLineWithConfirm):
168
161
 
169
162
  return deleted_count
170
163
 
171
- def delete_supplementary_data_list_by_input_data_id(self, project_id: str, input_data_id_list: list[str]): # noqa: ANN201
164
+ def delete_supplementary_data_list_by_input_data_id(self, project_id: str, input_data_id_list: list[str]) -> None:
172
165
  dict_deleted_count: dict[str, int] = {}
173
166
  for input_data_id in input_data_id_list:
174
167
  input_data = self.service.wrapper.get_input_data_or_none(project_id, input_data_id)
@@ -261,9 +254,9 @@ def parse_args(parser: argparse.ArgumentParser) -> None:
261
254
  "削除する補助情報が記載されたCSVファイルのパスを指定してください。\n"
262
255
  "CSVのフォーマットは以下の通りです。"
263
256
  "詳細は https://annofab-cli.readthedocs.io/ja/latest/command_reference/supplementary/delete.html を参照してください。\n"
264
- " * ヘッダ行なし, カンマ区切り\n"
265
- " * 1列目: input_data_id (required)\n"
266
- " * 2列目: supplementary_data_id (required)\n"
257
+ " * ヘッダ行あり, カンマ区切り\n"
258
+ " * input_data_id (required)\n"
259
+ " * supplementary_data_id (required)\n"
267
260
  ),
268
261
  )
269
262
 
@@ -41,17 +41,17 @@ def convert_supplementary_data_name_to_supplementary_data_id(supplementary_data_
41
41
 
42
42
 
43
43
  @dataclass
44
- class CsvSupplementaryData(DataClassJsonMixin):
44
+ class CliSupplementaryData(DataClassJsonMixin):
45
45
  """
46
- CSVに記載されている補助情報
46
+ コマンドラインから指定された(`--csv`または`--json`)補助情報
47
47
  """
48
48
 
49
49
  input_data_id: str
50
- supplementary_data_number: int
51
50
  supplementary_data_name: str
52
51
  supplementary_data_path: str
53
52
  supplementary_data_id: Optional[str] = None
54
53
  supplementary_data_type: Optional[str] = None
54
+ supplementary_data_number: Optional[int] = None
55
55
 
56
56
 
57
57
  @dataclass
@@ -75,15 +75,13 @@ class SubPutSupplementaryData:
75
75
 
76
76
  Args:
77
77
  service:
78
- facade:
79
78
  all_yes:
80
79
  """
81
80
 
82
- def __init__(self, service: annofabapi.Resource, facade: AnnofabApiFacade, *, all_yes: bool = False) -> None:
81
+ def __init__(self, service: annofabapi.Resource, *, all_yes: bool = False) -> None:
83
82
  self.service = service
84
- self.facade = facade
85
83
  self.all_yes = all_yes
86
- self.supplementary_data_cache: dict[str, list[SupplementaryData]] = {}
84
+ self.supplementary_data_cache: dict[tuple[str, str], list[SupplementaryData]] = {}
87
85
 
88
86
  def put_supplementary_data(self, project_id: str, supplementary_data: SupplementaryDataForPut) -> None:
89
87
  file_path = get_file_scheme_path(supplementary_data.supplementary_data_path)
@@ -155,7 +153,7 @@ class SubPutSupplementaryData:
155
153
  return yes
156
154
 
157
155
  def confirm_put_supplementary_data(
158
- self, csv_supplementary_data: CsvSupplementaryData, supplementary_data_id: str, *, already_exists: bool = False
156
+ self, csv_supplementary_data: CliSupplementaryData, supplementary_data_id: str, *, already_exists: bool = False
159
157
  ) -> bool:
160
158
  if already_exists:
161
159
  message_for_confirm = (
@@ -170,66 +168,63 @@ class SubPutSupplementaryData:
170
168
 
171
169
  return self.confirm_processing(message_for_confirm)
172
170
 
173
- def get_supplementary_data_list_cached(self, project_id: str, input_data_id: str) -> list[SupplementaryData]:
174
- key = f"{project_id},{input_data_id}"
175
- if key not in self.supplementary_data_cache:
176
- supplementary_data_list, _ = self.service.api.get_supplementary_data_list(project_id, input_data_id)
177
- self.supplementary_data_cache[key] = supplementary_data_list if supplementary_data_list is not None else []
178
- return self.supplementary_data_cache[key]
179
-
180
- def get_supplementary_data_by_id(self, project_id: str, input_data_id: str, supplementary_data_id: str) -> Optional[SupplementaryData]:
181
- cached_list = self.get_supplementary_data_list_cached(project_id, input_data_id)
182
- return first_true(cached_list, pred=lambda e: e["supplementary_data_id"] == supplementary_data_id)
183
-
184
- def put_supplementary_data_main(self, project_id: str, csv_supplementary_data: CsvSupplementaryData, *, overwrite: bool = False) -> bool:
171
+ def put_supplementary_data_main(self, project_id: str, csv_data: CliSupplementaryData, *, overwrite: bool = False) -> bool:
185
172
  last_updated_datetime = None
186
- input_data_id = csv_supplementary_data.input_data_id
173
+ input_data_id = csv_data.input_data_id
187
174
  supplementary_data_id = (
188
- csv_supplementary_data.supplementary_data_id
189
- if csv_supplementary_data.supplementary_data_id is not None
190
- else convert_supplementary_data_name_to_supplementary_data_id(csv_supplementary_data.supplementary_data_name)
175
+ csv_data.supplementary_data_id
176
+ if csv_data.supplementary_data_id is not None
177
+ else convert_supplementary_data_name_to_supplementary_data_id(csv_data.supplementary_data_name)
191
178
  )
192
- supplementary_data_path = csv_supplementary_data.supplementary_data_path
193
179
 
194
- # input_data_idの存在確認
195
- if self.service.wrapper.get_input_data_or_none(project_id, input_data_id) is None:
180
+ supplementary_data_list = self.service.wrapper.get_supplementary_data_list_or_none(project_id, input_data_id)
181
+ if supplementary_data_list is None:
182
+ # 入力データが存在しない場合は、`supplementary_data_list`はNoneになる
196
183
  logger.warning(f"input_data_id='{input_data_id}'である入力データは存在しないため、補助情報の登録をスキップします。")
197
184
  return False
198
185
 
199
- old_supplementary_data = self.get_supplementary_data_by_id(project_id, input_data_id, supplementary_data_id)
186
+ old_supplementary_data = first_true(supplementary_data_list, pred=lambda e: e["supplementary_data_id"] == supplementary_data_id)
187
+
188
+ # 補助情報numberが未指定の場合は、既存の補助情報numberの最大値+1にする
189
+ max_supplementary_data_number = max((e["supplementary_data_number"] for e in supplementary_data_list), default=0)
190
+ if csv_data.supplementary_data_number is not None:
191
+ supplementary_data_number = csv_data.supplementary_data_number
192
+ elif old_supplementary_data is not None:
193
+ supplementary_data_number = old_supplementary_data["supplementary_data_number"]
194
+ else:
195
+ supplementary_data_number = max_supplementary_data_number + 1
200
196
 
201
197
  if old_supplementary_data is not None:
202
198
  if overwrite:
203
199
  logger.debug(
204
200
  f"supplementary_data_id='{supplementary_data_id}'である補助情報がすでに存在します。 :: "
205
- f"input_data_id='{input_data_id}', supplementary_data_name='{csv_supplementary_data.supplementary_data_name}'"
201
+ f"input_data_id='{input_data_id}', supplementary_data_name='{csv_data.supplementary_data_name}'"
206
202
  )
207
203
  last_updated_datetime = old_supplementary_data["updated_datetime"]
208
204
  else:
209
205
  logger.debug(
210
206
  f"supplementary_data_id='{supplementary_data_id}'である補助情報がすでに存在するので、補助情報の登録をスキップします。 :: "
211
- f"input_data_id='{input_data_id}', supplementary_data_name='{csv_supplementary_data.supplementary_data_name}'"
207
+ f"input_data_id='{input_data_id}', supplementary_data_name='{csv_data.supplementary_data_name}'"
212
208
  )
213
209
  return False
214
210
 
215
- file_path = get_file_scheme_path(supplementary_data_path)
216
- logger.debug(f"csv_supplementary_data='{csv_supplementary_data}'")
211
+ file_path = get_file_scheme_path(csv_data.supplementary_data_path)
217
212
  if file_path is not None: # noqa: SIM102
218
213
  if not Path(file_path).exists():
219
- logger.warning(f"'{supplementary_data_path}' は存在しません。")
214
+ logger.warning(f"'{csv_data.supplementary_data_path}' は存在しません。補助情報の登録をスキップします。")
220
215
  return False
221
216
 
222
- if not self.confirm_put_supplementary_data(csv_supplementary_data, supplementary_data_id, already_exists=last_updated_datetime is not None):
217
+ if not self.confirm_put_supplementary_data(csv_data, supplementary_data_id, already_exists=last_updated_datetime is not None):
223
218
  return False
224
219
 
225
220
  # 補助情報を登録
226
221
  supplementary_data_for_put = SupplementaryDataForPut(
227
- input_data_id=csv_supplementary_data.input_data_id,
222
+ input_data_id=csv_data.input_data_id,
228
223
  supplementary_data_id=supplementary_data_id,
229
- supplementary_data_name=csv_supplementary_data.supplementary_data_name,
230
- supplementary_data_path=csv_supplementary_data.supplementary_data_path,
231
- supplementary_data_type=csv_supplementary_data.supplementary_data_type,
232
- supplementary_data_number=csv_supplementary_data.supplementary_data_number,
224
+ supplementary_data_name=csv_data.supplementary_data_name,
225
+ supplementary_data_path=csv_data.supplementary_data_path,
226
+ supplementary_data_type=csv_data.supplementary_data_type,
227
+ supplementary_data_number=supplementary_data_number,
233
228
  last_updated_datetime=last_updated_datetime,
234
229
  )
235
230
  try:
@@ -261,7 +256,7 @@ class PutSupplementaryData(CommandLine):
261
256
  def put_supplementary_data_list(
262
257
  self,
263
258
  project_id: str,
264
- supplementary_data_list: list[CsvSupplementaryData],
259
+ supplementary_data_list: list[CliSupplementaryData],
265
260
  *,
266
261
  overwrite: bool = False,
267
262
  parallelism: Optional[int] = None,
@@ -282,7 +277,7 @@ class PutSupplementaryData(CommandLine):
282
277
 
283
278
  count_put_supplementary_data = 0
284
279
 
285
- obj = SubPutSupplementaryData(service=self.service, facade=self.facade, all_yes=self.all_yes)
280
+ obj = SubPutSupplementaryData(service=self.service, all_yes=self.all_yes)
286
281
  if parallelism is not None:
287
282
  partial_func = partial(obj.put_supplementary_data_main, project_id, overwrite=overwrite)
288
283
  with Pool(parallelism) as pool:
@@ -291,46 +286,29 @@ class PutSupplementaryData(CommandLine):
291
286
 
292
287
  else:
293
288
  for csv_supplementary_data in supplementary_data_list:
294
- result = obj.put_supplementary_data_main(project_id, csv_supplementary_data=csv_supplementary_data, overwrite=overwrite)
289
+ result = obj.put_supplementary_data_main(project_id, csv_data=csv_supplementary_data, overwrite=overwrite)
295
290
  if result:
296
291
  count_put_supplementary_data += 1
297
292
 
298
293
  logger.info(f"{project_title} に、{count_put_supplementary_data} / {len(supplementary_data_list)} 件の補助情報を登録しました。")
299
294
 
300
295
  @staticmethod
301
- def get_supplementary_data_list_from_dict(supplementary_data_dict_list: list[dict[str, Any]]) -> list[CsvSupplementaryData]:
302
- return [CsvSupplementaryData.from_dict(e) for e in supplementary_data_dict_list]
296
+ def get_supplementary_data_list_from_dict(supplementary_data_dict_list: list[dict[str, Any]]) -> list[CliSupplementaryData]:
297
+ return [CliSupplementaryData.from_dict(e) for e in supplementary_data_dict_list]
303
298
 
304
299
  @staticmethod
305
- def get_supplementary_data_list_from_csv(csv_path: Path) -> list[CsvSupplementaryData]:
306
- def create_supplementary_data(e: Any) -> CsvSupplementaryData: # noqa: ANN401
307
- supplementary_data_id = e.supplementary_data_id if not pandas.isna(e.supplementary_data_id) else None
308
- supplementary_data_type = e.supplementary_data_type if not pandas.isna(e.supplementary_data_type) else None
309
- return CsvSupplementaryData(
310
- input_data_id=e.input_data_id,
311
- supplementary_data_number=e.supplementary_data_number,
312
- supplementary_data_name=e.supplementary_data_name,
313
- supplementary_data_path=e.supplementary_data_path,
314
- supplementary_data_id=supplementary_data_id,
315
- supplementary_data_type=supplementary_data_type,
316
- )
317
-
300
+ def get_supplementary_data_list_from_csv(csv_path: Path) -> list[CliSupplementaryData]:
318
301
  df = pandas.read_csv(
319
302
  str(csv_path),
320
- sep=",",
321
- header=None,
322
- names=(
323
- "input_data_id",
324
- "supplementary_data_number",
325
- "supplementary_data_name",
326
- "supplementary_data_path",
327
- "supplementary_data_id",
328
- "supplementary_data_type",
329
- ),
330
- # IDは必ず文字列として読み込むようにする
331
- dtype={"input_data_id": str, "supplementary_data_id": str, "supplementary_data_name": str},
303
+ dtype={
304
+ "input_data_id": "string",
305
+ "supplementary_data_id": "string",
306
+ "supplementary_data_name": "string",
307
+ "supplementary_data_path": "string",
308
+ "supplementary_data_number": "Int64",
309
+ },
332
310
  )
333
- supplementary_data_list = [create_supplementary_data(e) for e in df.itertuples()]
311
+ supplementary_data_list = [CliSupplementaryData.from_dict(e) for e in df.to_dict("records")]
334
312
  return supplementary_data_list
335
313
 
336
314
  COMMON_MESSAGE = "annofabcli supplementary_data put: error:"
@@ -393,15 +371,15 @@ def parse_args(parser: argparse.ArgumentParser) -> None:
393
371
  "--csv",
394
372
  type=str,
395
373
  help=(
396
- "補助情報が記載されたCVファイルのパスを指定してください。CSVのフォーマットは、以下の通りです。\n"
374
+ "補助情報が記載されたCSVファイルのパスを指定してください。CSVのフォーマットは、以下の通りです。\n"
397
375
  "\n"
398
- " * ヘッダ行なし, カンマ区切り\n"
399
- " * 1列目: input_data_id (required)\n"
400
- " * 2列目: supplementary_data_number (required)\n"
401
- " * 3列目: supplementary_data_name (required)\n"
402
- " * 4列目: supplementary_data_path (required)\n"
403
- " * 5列目: supplementary_data_id\n"
404
- " * 6列目: supplementary_data_type\n"
376
+ " * ヘッダ行あり, カンマ区切り\n"
377
+ " * input_data_id (required)\n"
378
+ " * supplementary_data_name (required)\n"
379
+ " * supplementary_data_path (required)\n"
380
+ " * supplementary_data_id\n"
381
+ " * supplementary_data_type\n"
382
+ " * supplementary_data_number\n"
405
383
  "\n"
406
384
  "各項目の詳細は https://annofab-cli.readthedocs.io/ja/latest/command_reference/supplementary/put.html を参照してください。"
407
385
  ),
@@ -410,7 +388,6 @@ def parse_args(parser: argparse.ArgumentParser) -> None:
410
388
  JSON_SAMPLE = [ # noqa: N806
411
389
  {
412
390
  "input_data_id": "input1",
413
- "supplementary_data_number": 1,
414
391
  "supplementary_data_name": "foo",
415
392
  "supplementary_data_path": "file://foo.jpg",
416
393
  }
@@ -431,7 +408,7 @@ def parse_args(parser: argparse.ArgumentParser) -> None:
431
408
  parser.add_argument(
432
409
  "--overwrite",
433
410
  action="store_true",
434
- help="指定した場合、supplementary_data_id(省略時はsupplementary_data_number)がすでに存在していたら上書きします。指定しなければ、スキップします。",
411
+ help="指定した場合、supplementary_data_idがすでに存在していたら上書きします。指定しなければ、スキップします。",
435
412
  )
436
413
 
437
414
  parser.add_argument(
@@ -448,7 +425,7 @@ def add_parser(subparsers: Optional[argparse._SubParsersAction] = None) -> argpa
448
425
  subcommand_name = "put"
449
426
  subcommand_help = "補助情報を登録します。"
450
427
  description = "補助情報を登録します。"
451
- epilog = "オーナロールを持つユーザで実行してください。"
428
+ epilog = "オーナーロールを持つユーザで実行してください。"
452
429
 
453
430
  parser = annofabcli.common.cli.add_parser(subparsers, subcommand_name, subcommand_help, description, epilog=epilog)
454
431
  parse_args(parser)
@@ -1,6 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import argparse
4
+ import copy
4
5
  import json
5
6
  import logging
6
7
  import multiprocessing
@@ -212,7 +213,7 @@ class UpdateMetadataOfTask(CommandLine):
212
213
  if args.metadata is not None:
213
214
  metadata = annofabcli.common.cli.get_json_from_args(args.metadata)
214
215
  assert task_id_list is not None, "'--metadata'を指定したときは'--task_id'は必須です。"
215
- metadata_by_task_id = {task_id: metadata for task_id in task_id_list}
216
+ metadata_by_task_id = {task_id: copy.deepcopy(metadata) for task_id in task_id_list}
216
217
  elif args.metadata_by_task_id is not None:
217
218
  metadata_by_task_id = annofabcli.common.cli.get_json_from_args(args.metadata_by_task_id)
218
219
  if task_id_list is not None:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: annofabcli
3
- Version: 1.96.1
3
+ Version: 1.97.0
4
4
  Summary: Utility Command Line Interface for AnnoFab
5
5
  Home-page: https://github.com/kurusugawa-computer/annofab-cli
6
6
  License: MIT
@@ -20,7 +20,7 @@ Classifier: Programming Language :: Python :: 3.12
20
20
  Classifier: Topic :: Utilities
21
21
  Requires-Dist: Pillow
22
22
  Requires-Dist: annofabapi (>=1.1,<2.0)
23
- Requires-Dist: bokeh (>=3.3,<4.0)
23
+ Requires-Dist: bokeh (>=3.3,<3.7)
24
24
  Requires-Dist: dictdiffer
25
25
  Requires-Dist: isodate
26
26
  Requires-Dist: jmespath
@@ -1,6 +1,6 @@
1
1
  annofabcli/__init__.py,sha256=NMA7kFxmLlCiILQPHJa9mEuqXxtLALw_dwyXYsvz4VM,71
2
2
  annofabcli/__main__.py,sha256=JzfycqVG9ENhWOCxTouZwpHwWTSrI-grLsaMudxjyBM,5283
3
- annofabcli/__version__.py,sha256=fRMeiNR_DG_Pyx4KmlvY_dwyMYU17Xi-HyO9fioFt3g,132
3
+ annofabcli/__version__.py,sha256=dqx_NARSo9UcLPvxc6zJBGKIFxXd0yc57SlNZJUxSbg,132
4
4
  annofabcli/annotation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
5
  annofabcli/annotation/annotation_query.py,sha256=ke3W3RT1-WfFzwt-TXcQwGmghG34vcKJkM_jxgbNKjU,15922
6
6
  annofabcli/annotation/change_annotation_attributes.py,sha256=zHXyENZfbMGL_15xiK7Cy4cQ2sV0GjSVmKuPm3sOX7Y,17173
@@ -67,7 +67,7 @@ annofabcli/filesystem/mask_user_info.py,sha256=Evmr9QhSpMG900bbOXbJNHwXHapUlNfvV
67
67
  annofabcli/filesystem/merge_annotation.py,sha256=MkGy1T9F-1tHq_BS_L_mTtgKpOMmXscrydzfSc0JKAo,10588
68
68
  annofabcli/filesystem/subcommand_filesystem.py,sha256=ZM2td5iZYIQ3TCI-9xAue8LugFlIc3WMRXrJqnjJ8-s,1186
69
69
  annofabcli/input_data/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
70
- annofabcli/input_data/change_input_data_name.py,sha256=8SMgSZbwYFl5Qvs7H6knQXi8yOc_dQuwd7xUvEcHYac,9751
70
+ annofabcli/input_data/change_input_data_name.py,sha256=vhii3jHaEE_J8jcJ2bW66VxC9k-jtWN4tSD9vnatON4,9665
71
71
  annofabcli/input_data/copy_input_data.py,sha256=Lbyq2aW5lmJCPB8-WHdOI93a2ND5E0qOWrhBHRluQyw,14656
72
72
  annofabcli/input_data/delete_input_data.py,sha256=GLg58XNz9_Njq9eq2sc2BAzfLy9tJefcDYL6J4TOim4,8836
73
73
  annofabcli/input_data/delete_metadata_key_of_input_data.py,sha256=PVv9HXQVFLKQh-4RSHas_ckFxDxtqAzWnXnWYFFYy08,8464
@@ -78,7 +78,7 @@ annofabcli/input_data/list_input_data.py,sha256=RBxsHyKg1bVIEQUFDkfrq-nJmEdEYNoC
78
78
  annofabcli/input_data/put_input_data.py,sha256=x54C-rLJVzr1YF2GlMR0w0HJReOE3E7YKiBeuh0RsTI,17934
79
79
  annofabcli/input_data/put_input_data_with_zip.py,sha256=SA4aMAwMBFgc9Lh0zmRCbmkXG4AMrcBqd5zeTSdr8lc,5566
80
80
  annofabcli/input_data/subcommand_input_data.py,sha256=X8EoxsF6PMiKrvk_r7PIe2D0WZuaPlgLJRuTiljPIdM,2048
81
- annofabcli/input_data/update_metadata_of_input_data.py,sha256=txdliirBrtoTabGEyWYLPclZC_DarEpRAY3MTsbViPA,11556
81
+ annofabcli/input_data/update_metadata_of_input_data.py,sha256=_cZh0GYGK6Lx5arKuTjblolkXsRdTlwuKcIHa8Nm5yQ,11583
82
82
  annofabcli/input_data/utils.py,sha256=F3mYbWmRwXfEtiIo9dvSysfuZUdyQzJmmq94eM-C5IY,723
83
83
  annofabcli/instruction/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
84
84
  annofabcli/instruction/copy_instruction.py,sha256=24zBMkelSI2mpJq7Esgzo3uB_f1Z3hitBeX68HFqV00,6704
@@ -117,7 +117,7 @@ annofabcli/project_member/copy_project_members.py,sha256=-ybJ6ftcbRq0Pl092aag5ku
117
117
  annofabcli/project_member/drop_project_members.py,sha256=t_Jqoc89JtnrIpZc1fECjO6s-qttDmsgHHTz03zJTF0,5856
118
118
  annofabcli/project_member/invite_project_members.py,sha256=bxciqakeSrAX5xjE6KwmhcHj4EU1haETcQ_0BYkR6O4,6377
119
119
  annofabcli/project_member/list_users.py,sha256=BoavUdDRoRIQjI7q_sQEZjDMcLmUwWOCZdJsR7JimJ8,4844
120
- annofabcli/project_member/put_project_members.py,sha256=0YZIUJYeblL-cHETnCC9R1oWUm_bLNN6LIIIlXH8G7c,9741
120
+ annofabcli/project_member/put_project_members.py,sha256=-2T-XlZw-I6Am4HDf9dsNDEmaflMDvmJvMzStiya_Fc,9183
121
121
  annofabcli/project_member/subcommand_project_member.py,sha256=6cOoMiZJ82j94rSSp1GzmYggGSm1egk3S9e_YYgzhe8,1466
122
122
  annofabcli/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
123
123
  annofabcli/stat_visualization/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -131,8 +131,8 @@ annofabcli/statistics/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3h
131
131
  annofabcli/statistics/histogram.py,sha256=CvzDxT2cKLSnBGSqkZE6p92PayGxYYja1YyB24M4ALU,3245
132
132
  annofabcli/statistics/linegraph.py,sha256=0kr7jVBNMiM2ECYhv3Ry5RitElKerSl9ZKxbKzfiplI,12494
133
133
  annofabcli/statistics/list_annotation_attribute.py,sha256=87jjNCOXJUbWnmswMCLN7GTjGsBfqpFJ6hViWmnj8Y4,12557
134
- annofabcli/statistics/list_annotation_count.py,sha256=xsqXxRO21MW7Wbs96PEa8gHb3i7bxjQvoMHLB2eT-0g,50360
135
- annofabcli/statistics/list_annotation_duration.py,sha256=9PCg1IA_g4HoHHSfsMVb2zBMLfLJ3u9Id8Qa9L8Qhko,31923
134
+ annofabcli/statistics/list_annotation_count.py,sha256=GVlYYubWqjNLJD7GGxv1WivCYv0rMgYxRLeZsM3Y8hA,50344
135
+ annofabcli/statistics/list_annotation_duration.py,sha256=1OFvhi5QQJDcUO4iHi3lV2fDFK8ZFaAP8vlbGpVR2s0,31907
136
136
  annofabcli/statistics/list_video_duration.py,sha256=uNeMteRBX2JG_AWmcgMJj0Jzbq_qF7bvAwr25GmeIiw,9124
137
137
  annofabcli/statistics/list_worktime.py,sha256=C7Yu3IOW2EvhkJJv6gY3hNdS9_TOLmT_9LZsB7vLJ1o,6493
138
138
  annofabcli/statistics/scatter.py,sha256=IUCwXix9GbZb6V82wjjb5q2eamrT5HQsU_bzDTjAFnM,11011
@@ -148,16 +148,16 @@ annofabcli/statistics/visualization/dataframe/cumulative_productivity.py,sha256=
148
148
  annofabcli/statistics/visualization/dataframe/custom_production_volume.py,sha256=5ELLiQJ5sNKdVKmYYVeZW4nedDg1CVGxMDdF5TUUX5c,2142
149
149
  annofabcli/statistics/visualization/dataframe/input_data_count.py,sha256=wDRFtoIWw_Gy2bPZ7LBx3eMO3LdUdjbQKS9mncXav6I,1654
150
150
  annofabcli/statistics/visualization/dataframe/inspection_comment_count.py,sha256=RxpQzRy4U2hKEpgbksUXotcxH2sKz__NO20mxpMqK1w,4382
151
- annofabcli/statistics/visualization/dataframe/productivity_per_date.py,sha256=KH4vUBLpC-M41EnGroeaOXdwVZz8j0u4HpvLitFbtaA,27250
151
+ annofabcli/statistics/visualization/dataframe/productivity_per_date.py,sha256=tMap7E3z7hibon1zJnZRJnbMmtzqh04ocoV0oxBpssU,27249
152
152
  annofabcli/statistics/visualization/dataframe/project_performance.py,sha256=hdTMPvLfGDMZFjpIl58GtTEOopsOvitbdaj5hQAEp8o,8496
153
153
  annofabcli/statistics/visualization/dataframe/task.py,sha256=KanuLy67ZGORdLry21eN7uSNzkoJvIre1JN7Bq-fRlg,23452
154
154
  annofabcli/statistics/visualization/dataframe/task_history.py,sha256=3b9e4ok6yKE5x647KzRqvp01P33XMAHLEEbLJ5GCmRo,2760
155
155
  annofabcli/statistics/visualization/dataframe/task_worktime_by_phase_user.py,sha256=AtlbeNIkttjLtuxtZYCyZin4eVKRvcYEMnLzEZtZUlY,13134
156
156
  annofabcli/statistics/visualization/dataframe/user.py,sha256=EHn7nlf6D6UX-gsVXy8m_3QaCsHsUhr0iy2rbNozOgc,1707
157
- annofabcli/statistics/visualization/dataframe/user_performance.py,sha256=8o0BB4uPzW6O2rReXyjEZfmGYzYidoWZLEQBm12qnng,57029
158
- annofabcli/statistics/visualization/dataframe/whole_performance.py,sha256=u_fuRUMCwuLjmCSKHJVeThGcbwBFQFmm2XgeLqnTgEE,12565
159
- annofabcli/statistics/visualization/dataframe/whole_productivity_per_date.py,sha256=uECDfdRxt0d9iQRpX9TISSd0q9SZ4XVzuY5FMu7X2kw,52145
160
- annofabcli/statistics/visualization/dataframe/worktime_per_date.py,sha256=2FnyZFSZBfedkIMmRuAdx026deblT7GT3r2qe2Fg71M,21319
157
+ annofabcli/statistics/visualization/dataframe/user_performance.py,sha256=JXJg0oNpURa9HCF3zFYqynOFtNRKD5h9YNuLOq0_h00,57023
158
+ annofabcli/statistics/visualization/dataframe/whole_performance.py,sha256=qcQALkflNSX6QvvtOc9QEZnmQ-TtkjsQkT1vmZ3k0_M,12561
159
+ annofabcli/statistics/visualization/dataframe/whole_productivity_per_date.py,sha256=W8g9_4WcUHxvPw31gUY_MVheBfllsHHY6g-M0FKScUk,52141
160
+ annofabcli/statistics/visualization/dataframe/worktime_per_date.py,sha256=56izC5flXOiu_lKPSk0s0aNGgqr-FiVA4t6pBWSNuwk,21321
161
161
  annofabcli/statistics/visualization/filtering_query.py,sha256=0_3QcS1weGIC9THH7NGqjYCb4rDLiftnmA-2MVgeNDI,4174
162
162
  annofabcli/statistics/visualization/model.py,sha256=5LSp0u63nqeILUl3XdYEhqqi_hq2rFbzyKHYlXZo2S0,896
163
163
  annofabcli/statistics/visualization/project_dir.py,sha256=ijwuGngRaK8TczAaDQoI2VnPvsmiaT-xoGWMHFCLVgo,24071
@@ -167,9 +167,9 @@ annofabcli/statistics/visualize_annotation_duration.py,sha256=J4Z3Ql1sdiTraMnRaR
167
167
  annofabcli/statistics/visualize_statistics.py,sha256=3bP9WRALADkAxPePa1Ix49apahi1V_qjAL6Dbrwtxd0,39550
168
168
  annofabcli/statistics/visualize_video_duration.py,sha256=A6Q91aWx7GO4F0TGxbiQI0za8BFr7IqiUaYyw3__BgY,16842
169
169
  annofabcli/supplementary/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
170
- annofabcli/supplementary/delete_supplementary_data.py,sha256=QeOohZ6RnsxC9rKSPi0JJqJXmj99ZL17psulTqlEQFU,13738
170
+ annofabcli/supplementary/delete_supplementary_data.py,sha256=BZ5NzEQ7CdOIxfCHY7vTLG5vutueXNPh6Teyno9NY-U,13542
171
171
  annofabcli/supplementary/list_supplementary_data.py,sha256=F4iJnQi_4W7S_d7ahqxWFSFcnfiKYhNuysC28v0QUWA,7649
172
- annofabcli/supplementary/put_supplementary_data.py,sha256=FwFRszjcsJt-UrT2RCRAs4zzYea03CbDoiKBUZQW0zc,19581
172
+ annofabcli/supplementary/put_supplementary_data.py,sha256=-ZfqdmirgXFyTNZ8t38IWatPzIL7X4s805xFY5Szar8,18073
173
173
  annofabcli/supplementary/subcommand_supplementary.py,sha256=F8qfuNQzgW5HV1QKB4h0DWN7-kPVQcoFQwPfW_vjZVk,1079
174
174
  annofabcli/task/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
175
175
  annofabcli/task/cancel_acceptance.py,sha256=rHKmAzQE1a25szVWb7dexokeW32V12bkyDIYM_oIl5U,13899
@@ -189,7 +189,7 @@ annofabcli/task/put_tasks.py,sha256=hT2xPowJmcNJhjxoAm-MFiKTw_RFcJUYlpeanegVrAU,
189
189
  annofabcli/task/put_tasks_by_count.py,sha256=MUHfWhqtSAXnB3O36p3bMSSgQ_3Zek9GT5qRvHGx8Lo,6041
190
190
  annofabcli/task/reject_tasks.py,sha256=5ByAN6VnKwvU5BT_cfsHwA1jLDl74bonqk3bwtnrkPU,23139
191
191
  annofabcli/task/subcommand_task.py,sha256=L_5Dwe58eblrtOrUYxjJAvkSmu6savRUxIqGjsFq-R4,2436
192
- annofabcli/task/update_metadata_of_task.py,sha256=3_BxVm9UPsJH9h3CiFoPaxQEx801Uehlsvzz1HOzKM4,12783
192
+ annofabcli/task/update_metadata_of_task.py,sha256=n8RmegF3A1r-ZDOzn6SzNoHuqNuzsSJM9xUJ6EXvTf0,12810
193
193
  annofabcli/task_history/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
194
194
  annofabcli/task_history/download_task_history_json.py,sha256=xZnGNKkPYT6RXIUd_wUqZfPiVpGdW5MTBUSpNKbOAC4,2370
195
195
  annofabcli/task_history/list_all_task_history.py,sha256=VY99AEHTSG71O2HQXS81oPX6AAusbT1scika9QoUZEU,6780
@@ -200,8 +200,8 @@ annofabcli/task_history_event/download_task_history_event_json.py,sha256=hQLVbQ0
200
200
  annofabcli/task_history_event/list_all_task_history_event.py,sha256=JQEgwOIXbbTIfeX23AVaoySHViOR9UGm9uoXuhVEBqo,6446
201
201
  annofabcli/task_history_event/list_worktime.py,sha256=9jsRYa2C9bva8E1Aqxv9CCKDuCP0MvbiaIyQFTDpjqY,13150
202
202
  annofabcli/task_history_event/subcommand_task_history_event.py,sha256=mJVJoT4RXk4HWnY7-Nrsl4If-gtaIIEXd2z7eFZwM2I,1260
203
- annofabcli-1.96.1.dist-info/LICENSE,sha256=pcqWYfxFtxBzhvKp3x9MXNM4xciGb2eFewaRhXUNHlo,1081
204
- annofabcli-1.96.1.dist-info/METADATA,sha256=u94b-Jzx5b2TOxFlJeBqtJbcbkkBxEgCtLomabfLhP4,5626
205
- annofabcli-1.96.1.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
206
- annofabcli-1.96.1.dist-info/entry_points.txt,sha256=A8vlN9fiMhbYRcdBfSpl7piYzAwvkMhRXIPQUAvQFUo,55
207
- annofabcli-1.96.1.dist-info/RECORD,,
203
+ annofabcli-1.97.0.dist-info/LICENSE,sha256=pcqWYfxFtxBzhvKp3x9MXNM4xciGb2eFewaRhXUNHlo,1081
204
+ annofabcli-1.97.0.dist-info/METADATA,sha256=71xtnSNlS97JSBeUVYWYcoas9gqXjL9v735krW5zioc,5626
205
+ annofabcli-1.97.0.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
206
+ annofabcli-1.97.0.dist-info/entry_points.txt,sha256=A8vlN9fiMhbYRcdBfSpl7piYzAwvkMhRXIPQUAvQFUo,55
207
+ annofabcli-1.97.0.dist-info/RECORD,,