annofabcli 1.96.0__py3-none-any.whl → 1.97.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
annofabcli/__version__.py CHANGED
@@ -1 +1 @@
1
- __version__ = "1.96.0" # `poetry-dynamic-versioning`を使ってGitHubのバージョンタグを取得している。変更不要
1
+ __version__ = "1.97.0" # `poetry-dynamic-versioning`を使ってGitHubのバージョンタグを取得している。変更不要
@@ -143,11 +143,9 @@ def create_changed_input_data_list_from_csv(csv_file: Path) -> list[ChangedInput
143
143
  変更対象の入力データのlist
144
144
  """
145
145
  df_input_data = pandas.read_csv(
146
- str(csv_file),
147
- header=None,
148
- names=("input_data_id", "input_data_name"),
146
+ csv_file,
149
147
  # 文字列として読み込むようにする
150
- dtype={"input_data_id": str, "input_data_name": str},
148
+ dtype={"input_data_id": "string", "input_data_name": "string"},
151
149
  )
152
150
 
153
151
  input_data_dict_list = df_input_data.to_dict("records")
@@ -211,9 +209,9 @@ def parse_args(parser: argparse.ArgumentParser) -> None:
211
209
  "変更対象の入力データが記載されたCSVファイルのパスを指定してください。\n"
212
210
  "CSVのフォーマットは以下の通りです。"
213
211
  "\n"
214
- " * ヘッダ行なし, カンマ区切り\n"
215
- " * 1列目: input_data_id (required)\n"
216
- " * 2列目: input_data_name (required)\n"
212
+ " * ヘッダ行あり, カンマ区切り\n"
213
+ " * input_data_id (required)\n"
214
+ " * input_data_name (required)\n"
217
215
  ),
218
216
  )
219
217
 
@@ -1,7 +1,7 @@
1
1
  import argparse
2
2
  import logging
3
+ import re
3
4
  import sys
4
- import uuid
5
5
  from dataclasses import dataclass
6
6
  from functools import partial
7
7
  from multiprocessing import Pool
@@ -25,14 +25,11 @@ from annofabcli.common.cli import (
25
25
  get_json_from_args,
26
26
  prompt_yesnoall,
27
27
  )
28
- from annofabcli.common.dataclasses import WaitOptions
29
28
  from annofabcli.common.facade import AnnofabApiFacade
30
29
  from annofabcli.common.utils import get_file_scheme_path
31
30
 
32
31
  logger = logging.getLogger(__name__)
33
32
 
34
- DEFAULT_WAIT_OPTIONS = WaitOptions(interval=60, max_tries=360)
35
-
36
33
 
37
34
  @dataclass
38
35
  class CsvInputData(DataClassJsonMixin):
@@ -43,7 +40,6 @@ class CsvInputData(DataClassJsonMixin):
43
40
  input_data_name: str
44
41
  input_data_path: str
45
42
  input_data_id: Optional[str] = None
46
- sign_required: Optional[bool] = None
47
43
 
48
44
 
49
45
  @dataclass
@@ -55,7 +51,14 @@ class InputDataForPut(DataClassJsonMixin):
55
51
  input_data_name: str
56
52
  input_data_path: str
57
53
  input_data_id: str
58
- sign_required: Optional[bool]
54
+
55
+
56
+ def convert_input_data_name_to_input_data_id(input_data_name: str) -> str:
57
+ """
58
+ 入力データ名から、入力データIDを生成します。
59
+ * IDに使えない文字以外は`__`に変換する。
60
+ """
61
+ return re.sub(r"[^a-zA-Z0-9_.-]", "__", input_data_name)
59
62
 
60
63
 
61
64
  def read_input_data_csv(csv_file: Path) -> pandas.DataFrame:
@@ -64,7 +67,6 @@ def read_input_data_csv(csv_file: Path) -> pandas.DataFrame:
64
67
  * input_data_name
65
68
  * input_data_path
66
69
  * input_data_id
67
- * sign_required
68
70
 
69
71
  Args:
70
72
  csv_file (Path): CSVファイルのパス
@@ -76,6 +78,12 @@ def read_input_data_csv(csv_file: Path) -> pandas.DataFrame:
76
78
  str(csv_file),
77
79
  sep=",",
78
80
  header=None,
81
+ # names引数に"sign_required"を指定している理由:
82
+ # v1.96.0以前では、`sign_required`列を読み込んでいた。したがって、4列のCSVを使っているユーザーは存在する
83
+ # CSVの列数が`names`引数で指定した列数より大きい場合、右側から列名が割り当てられる。
84
+ # (https://qiita.com/yuji38kwmt/items/ac46c3d0ccac109410ba)
85
+ # したがって、"sign_required"がないと4列のCSVは読み込めない。
86
+ # 4列のCSVもしばらくサポートするため、"sign_required"を指定している。
79
87
  names=("input_data_name", "input_data_path", "input_data_id", "sign_required"),
80
88
  # IDと名前は必ず文字列として読み込むようにする
81
89
  dtype={"input_data_id": str, "input_data_name": str},
@@ -126,8 +134,8 @@ class SubPutInputData:
126
134
 
127
135
  file_path = get_file_scheme_path(csv_input_data.input_data_path)
128
136
  if file_path is not None:
129
- request_body.update({"input_data_name": csv_input_data.input_data_name, "sign_required": csv_input_data.sign_required})
130
- logger.debug(f"'{file_path}'を入力データとして登録します。input_data_name={csv_input_data.input_data_name}")
137
+ request_body.update({"input_data_name": csv_input_data.input_data_name})
138
+ logger.debug(f"'{file_path}'を入力データとして登録します。input_data_name='{csv_input_data.input_data_name}'")
131
139
  self.service.wrapper.put_input_data_from_file(
132
140
  project_id, input_data_id=csv_input_data.input_data_id, file_path=file_path, request_body=request_body
133
141
  )
@@ -137,7 +145,6 @@ class SubPutInputData:
137
145
  {
138
146
  "input_data_name": csv_input_data.input_data_name,
139
147
  "input_data_path": csv_input_data.input_data_path,
140
- "sign_required": csv_input_data.sign_required,
141
148
  }
142
149
  )
143
150
 
@@ -181,12 +188,13 @@ class SubPutInputData:
181
188
 
182
189
  return self.confirm_processing(message_for_confirm)
183
190
 
184
- def put_input_data_main(self, project_id: str, csv_input_data: CsvInputData, overwrite: bool = False) -> bool: # noqa: FBT001, FBT002
191
+ def put_input_data_main(self, project_id: str, csv_input_data: CsvInputData, *, overwrite: bool = False) -> bool:
185
192
  input_data = InputDataForPut(
186
193
  input_data_name=csv_input_data.input_data_name,
187
194
  input_data_path=csv_input_data.input_data_path,
188
- input_data_id=csv_input_data.input_data_id if csv_input_data.input_data_id is not None else str(uuid.uuid4()),
189
- sign_required=csv_input_data.sign_required,
195
+ input_data_id=csv_input_data.input_data_id
196
+ if csv_input_data.input_data_id is not None
197
+ else convert_input_data_name_to_input_data_id(csv_input_data.input_data_name),
190
198
  )
191
199
 
192
200
  last_updated_datetime = None
@@ -194,10 +202,10 @@ class SubPutInputData:
194
202
 
195
203
  if dict_input_data is not None:
196
204
  if overwrite:
197
- logger.debug(f"input_data_id={input_data.input_data_id} はすでに存在します。")
205
+ logger.debug(f"input_data_id='{input_data.input_data_id}' はすでに存在します。")
198
206
  last_updated_datetime = dict_input_data["updated_datetime"]
199
207
  else:
200
- logger.debug(f"input_data_id={input_data.input_data_id} がすでに存在するのでスキップします。")
208
+ logger.debug(f"input_data_id='{input_data.input_data_id}' がすでに存在するのでスキップします。")
201
209
  return False
202
210
 
203
211
  file_path = get_file_scheme_path(input_data.input_data_path)
@@ -278,14 +286,12 @@ class PutInputData(CommandLine):
278
286
 
279
287
  @staticmethod
280
288
  def get_input_data_list_from_df(df: pandas.DataFrame) -> list[CsvInputData]:
281
- def create_input_data(e: Any): # noqa: ANN202, ANN401
289
+ def create_input_data(e: Any) -> CsvInputData: # noqa: ANN401
282
290
  input_data_id = e.input_data_id if not pandas.isna(e.input_data_id) else None
283
- sign_required: Optional[bool] = e.sign_required if pandas.notna(e.sign_required) else None
284
291
  return CsvInputData(
285
292
  input_data_name=e.input_data_name,
286
293
  input_data_path=e.input_data_path,
287
294
  input_data_id=input_data_id,
288
- sign_required=sign_required,
289
295
  )
290
296
 
291
297
  input_data_list = [create_input_data(e) for e in df.itertuples()]
@@ -381,7 +387,6 @@ def parse_args(parser: argparse.ArgumentParser) -> None:
381
387
  " * 1列目: input_data_name (required)\n"
382
388
  " * 2列目: input_data_path (required)\n"
383
389
  " * 3列目: input_data_id\n"
384
- " * 4列目: sign_required (bool)\n"
385
390
  ),
386
391
  )
387
392
 
@@ -400,25 +405,20 @@ def parse_args(parser: argparse.ArgumentParser) -> None:
400
405
  parser.add_argument(
401
406
  "--overwrite",
402
407
  action="store_true",
403
- help="指定した場合、input_data_idがすでに存在していたら上書きします。指定しなければ、スキップします。"
404
- " ``--csv`` , ``--json`` を指定したときのみ有効なオプションです。",
408
+ help="指定した場合、input_data_idがすでに存在していたら上書きします。指定しなければ、スキップします。",
405
409
  )
406
410
 
407
411
  parser.add_argument(
408
412
  "--allow_duplicated_input_data",
409
413
  action="store_true",
410
- help=(
411
- "``--csv`` , ``--json`` に渡した入力データの重複(input_data_name, input_data_path)を許可します。\n"
412
- "``--csv`` , ``--json`` を指定したときのみ有効なオプションです。"
413
- ),
414
+ help=("``--csv`` , ``--json`` に渡した入力データの重複(input_data_name, input_data_path)を許可します。\n"),
414
415
  )
415
416
 
416
417
  parser.add_argument(
417
418
  "--parallelism",
418
419
  type=int,
419
420
  choices=PARALLELISM_CHOICES,
420
- help="並列度。指定しない場合は、逐次的に処理します。"
421
- "``--csv`` , ``--json`` を指定したときのみ有効なオプションです。また、必ず ``--yes`` を指定してください。",
421
+ help="並列度。指定しない場合は、逐次的に処理します。指定する場合は、 ``--yes`` も指定してください。",
422
422
  )
423
423
 
424
424
  parser.set_defaults(subcommand_func=main)
@@ -427,9 +427,8 @@ def parse_args(parser: argparse.ArgumentParser) -> None:
427
427
  def add_parser(subparsers: Optional[argparse._SubParsersAction] = None) -> argparse.ArgumentParser:
428
428
  subcommand_name = "put"
429
429
  subcommand_help = "入力データを登録します。"
430
- description = "CSVに記載された情報から、入力データを登録します。"
431
430
  epilog = "オーナロールを持つユーザで実行してください。"
432
431
 
433
- parser = annofabcli.common.cli.add_parser(subparsers, subcommand_name, subcommand_help, description, epilog=epilog)
432
+ parser = annofabcli.common.cli.add_parser(subparsers, subcommand_name, subcommand_help, epilog=epilog)
434
433
  parse_args(parser)
435
434
  return parser
@@ -1,6 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import argparse
4
+ import copy
4
5
  import json
5
6
  import logging
6
7
  import multiprocessing
@@ -182,7 +183,7 @@ class UpdateMetadata(CommandLine):
182
183
  sys.exit(COMMAND_LINE_ERROR_STATUS_CODE)
183
184
 
184
185
  assert input_data_id_list is not None, "'--metadata'を指定したときは'--input_data_id'は必須です。"
185
- metadata_by_input_data_id = {input_data_id: metadata for input_data_id in input_data_id_list}
186
+ metadata_by_input_data_id = {input_data_id: copy.deepcopy(metadata) for input_data_id in input_data_id_list}
186
187
 
187
188
  elif args.metadata_by_input_data_id is not None:
188
189
  metadata_by_input_data_id = annofabcli.common.cli.get_json_from_args(args.metadata_by_input_data_id)
@@ -5,7 +5,6 @@ from pathlib import Path
5
5
  from typing import Any, Optional
6
6
 
7
7
  import more_itertools
8
- import numpy
9
8
  import pandas
10
9
  import requests
11
10
  from annofabapi.models import ProjectMemberRole, ProjectMemberStatus
@@ -26,8 +25,8 @@ class Member(DataClassJsonMixin):
26
25
 
27
26
  user_id: str
28
27
  member_role: ProjectMemberRole
29
- sampling_inspection_rate: Optional[int]
30
- sampling_acceptance_rate: Optional[int]
28
+ sampling_inspection_rate: Optional[int] = None
29
+ sampling_acceptance_rate: Optional[int] = None
31
30
 
32
31
 
33
32
  class PutProjectMembers(CommandLine):
@@ -44,7 +43,7 @@ class PutProjectMembers(CommandLine):
44
43
  def member_exists(members: list[dict[str, Any]], user_id: str) -> bool:
45
44
  return PutProjectMembers.find_member(members, user_id) is not None
46
45
 
47
- def invite_project_member(self, project_id: str, member: Member, old_project_members: list[dict[str, Any]]): # noqa: ANN201
46
+ def invite_project_member(self, project_id: str, member: Member, old_project_members: list[dict[str, Any]]) -> dict[str, Any]:
48
47
  old_member = self.find_member(old_project_members, member.user_id)
49
48
  last_updated_datetime = old_member["updated_datetime"] if old_member is not None else None
50
49
 
@@ -58,7 +57,7 @@ class PutProjectMembers(CommandLine):
58
57
  updated_project_member = self.service.api.put_project_member(project_id, member.user_id, request_body=request_body)[0]
59
58
  return updated_project_member
60
59
 
61
- def delete_project_member(self, project_id: str, deleted_member: dict[str, Any]): # noqa: ANN201
60
+ def delete_project_member(self, project_id: str, deleted_member: dict[str, Any]) -> dict[str, Any]:
62
61
  request_body = {
63
62
  "member_status": ProjectMemberStatus.INACTIVE.value,
64
63
  "member_role": deleted_member["member_role"],
@@ -67,7 +66,7 @@ class PutProjectMembers(CommandLine):
67
66
  updated_project_member = self.service.api.put_project_member(project_id, deleted_member["user_id"], request_body=request_body)[0]
68
67
  return updated_project_member
69
68
 
70
- def put_project_members(self, project_id: str, members: list[Member], delete: bool = False): # noqa: ANN201, FBT001, FBT002
69
+ def put_project_members(self, project_id: str, members: list[Member], *, delete: bool = False) -> None:
71
70
  """
72
71
  プロジェクトメンバを一括で登録する。
73
72
 
@@ -88,7 +87,7 @@ class PutProjectMembers(CommandLine):
88
87
 
89
88
  count_invite_members = 0
90
89
  # プロジェクトメンバを登録
91
- logger.info(f"{project_title} に、{len(members)} 件のプロジェクトメンバを登録します。")
90
+ logger.info(f"プロジェクト '{project_title}' に、{len(members)} 件のプロジェクトメンバを登録します。")
92
91
  for member in members:
93
92
  if member.user_id == self.service.api.login_user_id:
94
93
  logger.debug(f"ユーザ '{member.user_id}'は自分自身なので、登録しません。")
@@ -99,7 +98,7 @@ class PutProjectMembers(CommandLine):
99
98
  continue
100
99
 
101
100
  message_for_confirm = (
102
- f"ユーザ '{member.user_id}'を、{project_title} プロジェクトのメンバに登録しますか?member_role={member.member_role.value}"
101
+ f"ユーザ '{member.user_id}'を、プロジェクト'{project_title}'のメンバーに登録しますか? member_role='{member.member_role.value}'"
103
102
  )
104
103
  if not self.confirm_processing(message_for_confirm):
105
104
  continue
@@ -107,14 +106,15 @@ class PutProjectMembers(CommandLine):
107
106
  # メンバを登録
108
107
  try:
109
108
  self.invite_project_member(project_id, member, old_project_members)
110
- logger.debug(f"user_id = {member.user_id}, member_role = {member.member_role.value} のユーザをプロジェクトメンバに登録しました。")
109
+ logger.debug(f"user_id = '{member.user_id}', member_role = '{member.member_role.value}' のユーザをプロジェクトメンバに登録しました。")
111
110
  count_invite_members += 1
112
111
 
113
- except requests.exceptions.HTTPError as e:
114
- logger.warning(e)
115
- logger.warning(f"プロジェクトメンバの登録に失敗しました。user_id = {member.user_id}, member_role = {member.member_role.value}")
112
+ except requests.exceptions.HTTPError:
113
+ logger.warning(
114
+ f"プロジェクトメンバの登録に失敗しました。user_id = '{member.user_id}', member_role = '{member.member_role.value}'", exc_info=True
115
+ )
116
116
 
117
- logger.info(f"{project_title} に、{count_invite_members} / {len(members)} 件のプロジェクトメンバを登録しました。")
117
+ logger.info(f"プロジェクト'{project_title}' に、{count_invite_members} / {len(members)} 件のプロジェクトメンバを登録しました。")
118
118
 
119
119
  # プロジェクトメンバを削除
120
120
  if delete:
@@ -125,7 +125,7 @@ class PutProjectMembers(CommandLine):
125
125
  ]
126
126
 
127
127
  count_delete_members = 0
128
- logger.info(f"{project_title} から、{len(deleted_members)} 件のプロジェクトメンバを削除します。")
128
+ logger.info(f"プロジェクト '{project_title}' から、{len(deleted_members)} 件のプロジェクトメンバを削除します。")
129
129
  for deleted_member in deleted_members:
130
130
  message_for_confirm = f"ユーザ '{deleted_member['user_id']}'を、{project_title} のプロジェクトメンバから削除しますか?"
131
131
  if not self.confirm_processing(message_for_confirm):
@@ -135,31 +135,18 @@ class PutProjectMembers(CommandLine):
135
135
  self.delete_project_member(project_id, deleted_member)
136
136
  logger.debug(f"ユーザ '{deleted_member['user_id']}' をプロジェクトメンバから削除しました。")
137
137
  count_delete_members += 1
138
- except requests.exceptions.HTTPError as e:
139
- logger.warning(e)
140
- logger.warning(f"プロジェクトメンバの削除に失敗しました。user_id = '{deleted_member['user_id']}' ")
138
+ except requests.exceptions.HTTPError:
139
+ logger.warning(f"プロジェクトメンバの削除に失敗しました。user_id = '{deleted_member['user_id']}' ", exc_info=True)
141
140
 
142
- logger.info(f"{project_title} から {count_delete_members} / {len(deleted_members)} 件のプロジェクトメンバを削除しました。")
141
+ logger.info(f"プロジェクト '{project_title}' から {count_delete_members} / {len(deleted_members)} 件のプロジェクトメンバを削除しました。")
143
142
 
144
143
  @staticmethod
145
144
  def get_members_from_csv(csv_path: Path) -> list[Member]:
146
- def create_member(e): # noqa: ANN001, ANN202
147
- return Member(
148
- user_id=e.user_id,
149
- member_role=ProjectMemberRole(e.member_role),
150
- sampling_inspection_rate=e.sampling_inspection_rate,
151
- sampling_acceptance_rate=e.sampling_acceptance_rate,
152
- )
153
-
154
145
  df = pandas.read_csv(
155
- str(csv_path),
156
- sep=",",
157
- header=None,
158
- names=("user_id", "member_role", "sampling_inspection_rate", "sampling_acceptance_rate"),
159
- # IDは必ず文字列として読み込むようにする
160
- dtype={"user_id": str},
161
- ).replace({numpy.nan: None})
162
- members = [create_member(e) for e in df.itertuples()]
146
+ csv_path,
147
+ dtype={"user_id": "string", "member_role": "string", "sampling_inspection_rate": "Int64", "sampling_acceptance_rate": "Int64"},
148
+ )
149
+ members = [Member.from_dict(e) for e in df.to_dict("records")]
163
150
  return members
164
151
 
165
152
  def main(self) -> None:
@@ -184,12 +171,14 @@ def parse_args(parser: argparse.ArgumentParser) -> None:
184
171
  type=str,
185
172
  required=True,
186
173
  help=(
187
- "プロジェクトメンバが記載されたCVファイルのパスを指定してください。"
188
- "CSVのフォーマットは、「1列目:user_id(required), 2列目:member_role(required), "
189
- "3列目:sampling_inspection_rate, 4列目:sampling_acceptance_rate, ヘッダ行なし, カンマ区切り」です。"
190
- "member_roleは ``owner``, ``worker``, ``accepter``, ``training_data_user`` のいずれかです。"
191
- "sampling_inspection_rate, sampling_acceptance_rate を省略した場合は未設定になります。"
192
- "ただし自分自身は登録しません。"
174
+ "プロジェクトメンバが記載されたCSVファイルのパスを指定してください。"
175
+ "CSVのフォーマットは、ヘッダあり、カンマ区切りです。\n"
176
+ " * user_id (required)\n"
177
+ " * member_role (required)\n"
178
+ " * sampling_inspection_rate\n"
179
+ " * sampling_acceptance_rate\n"
180
+ "member_roleには ``owner``, ``worker``, ``accepter``, ``training_data_user`` のいずれかを指定します。\n"
181
+ "自分自身は登録できません。"
193
182
  ),
194
183
  )
195
184
 
@@ -202,10 +191,9 @@ def parse_args(parser: argparse.ArgumentParser) -> None:
202
191
 
203
192
  def add_parser(subparsers: Optional[argparse._SubParsersAction] = None) -> argparse.ArgumentParser:
204
193
  subcommand_name = "put"
205
- subcommand_help = "プロジェクトメンバを登録する。"
206
- description = "プロジェクトメンバを登録する。"
207
- epilog = "オーナロールを持つユーザで実行してください。"
194
+ subcommand_help = "プロジェクトメンバを登録します。"
195
+ epilog = "オーナーロールを持つユーザで実行してください。"
208
196
 
209
- parser = annofabcli.common.cli.add_parser(subparsers, subcommand_name, subcommand_help, description, epilog=epilog)
197
+ parser = annofabcli.common.cli.add_parser(subparsers, subcommand_name, subcommand_help, epilog=epilog)
210
198
  parse_args(parser)
211
199
  return parser
@@ -142,19 +142,19 @@ def get_annotation_attribute_list_from_annotation_zipdir_path(
142
142
  def print_annotation_attribute_list_as_csv(annotation_attribute_list: list, output_file: Optional[Path]) -> None:
143
143
  df = pandas.json_normalize(annotation_attribute_list)
144
144
 
145
- # columns = [
146
- # "task_id",
147
- # "task_status"
148
- # "task_phase",
149
- # "task_phase_stage",
150
- # "input_data_id",
151
- # "input_data_name",
152
- # "annotation_id",
153
- # "label",
154
-
155
- # ]
156
-
157
- print_csv(df, output_file)
145
+ base_columns = [
146
+ "task_id",
147
+ "task_status",
148
+ "task_phase",
149
+ "task_phase_stage",
150
+ "input_data_id",
151
+ "input_data_name",
152
+ "annotation_id",
153
+ "label",
154
+ ]
155
+ attribute_columns = [col for col in df.columns if col.startswith("attributes.")]
156
+ columns = base_columns + attribute_columns
157
+ print_csv(df[columns], output_file)
158
158
 
159
159
 
160
160
  def print_annotation_attribute_list(
@@ -552,7 +552,7 @@ class AttributeCountCsv:
552
552
 
553
553
  # アノテーション数の列のNaNを0に変換する
554
554
  value_columns = self._value_columns(counter_list, prior_attribute_columns)
555
- df = df.fillna({column: 0 for column in value_columns})
555
+ df = df.fillna(dict.fromkeys(value_columns, 0))
556
556
 
557
557
  print_csv(df, output=str(output_file), to_csv_kwargs=self.csv_format)
558
558
 
@@ -655,7 +655,7 @@ class LabelCountCsv:
655
655
 
656
656
  # アノテーション数列のNaNを0に変換する
657
657
  value_columns = self._value_columns(counter_list, prior_label_columns)
658
- df = df.fillna({column: 0 for column in value_columns})
658
+ df = df.fillna(dict.fromkeys(value_columns, 0))
659
659
 
660
660
  print_csv(df, output=str(output_file), to_csv_kwargs=self.csv_format)
661
661
 
@@ -411,7 +411,7 @@ class AnnotationDurationCsvByAttribute:
411
411
 
412
412
  # アノテーション数の列のNaNを0に変換する
413
413
  value_columns = self._value_columns(annotation_duration_list, prior_attribute_columns)
414
- df = df.fillna({column: 0 for column in value_columns})
414
+ df = df.fillna(dict.fromkeys(value_columns, 0))
415
415
  return df
416
416
 
417
417
 
@@ -473,7 +473,7 @@ class AnnotationDurationCsvByLabel:
473
473
 
474
474
  # アノテーション数列のNaNを0に変換する
475
475
  value_columns = self._value_columns(annotation_duration_list, prior_label_columns)
476
- df = df.fillna({column: 0 for column in value_columns})
476
+ df = df.fillna(dict.fromkeys(value_columns, 0))
477
477
 
478
478
  return df
479
479
 
@@ -181,17 +181,17 @@ class AbstractPhaseProductivityPerDate(abc.ABC):
181
181
 
182
182
  # その他の欠損値(作業時間や生産量)を0で埋める
183
183
  df2 = df2.fillna(
184
- {
185
- col: 0
186
- for col in [
184
+ dict.fromkeys(
185
+ [
187
186
  "annotation_worktime_hour",
188
187
  "inspection_worktime_hour",
189
188
  "acceptance_worktime_hour",
190
189
  "task_count",
191
190
  "inspection_comment_count",
192
191
  *self.production_volume_columns,
193
- ]
194
- }
192
+ ],
193
+ 0,
194
+ )
195
195
  )
196
196
 
197
197
  return df2
@@ -571,7 +571,7 @@ class UserPerformance:
571
571
  level0_columns = ["monitored_worktime_hour", *task_worktime_by_phase_user.quantity_columns]
572
572
  columns = [(c0, c1) for c0, c1 in df.columns if c0 in level0_columns]
573
573
 
574
- return df.fillna({col: 0 for col in columns})
574
+ return df.fillna(dict.fromkeys(columns, 0))
575
575
 
576
576
  if task_completion_criteria == TaskCompletionCriteria.ACCEPTANCE_REACHED:
577
577
  # 受入フェーズに到達したらタスクの作業が完了したとみなす場合、
@@ -634,8 +634,8 @@ class UserPerformance:
634
634
  ]
635
635
 
636
636
  value_columns = columns - set(basic_columns)
637
- dtypes = {col: "string" for col in basic_columns}
638
- dtypes.update({col: "float64" for col in value_columns})
637
+ dtypes = dict.fromkeys(basic_columns, "string")
638
+ dtypes.update(dict.fromkeys(value_columns, "float64"))
639
639
  return df.astype(dtypes)
640
640
 
641
641
  def _validate_df_for_output(self, output_file: Path) -> bool:
@@ -197,8 +197,8 @@ class WholePerformance:
197
197
  ("working_days", ""),
198
198
  ]
199
199
 
200
- data: dict[tuple[str, str], float] = {key: 0 for key in worktime_columns + count_columns}
201
- data.update({key: numpy.nan for key in ratio_columns + stdev_columns + date_columns})
200
+ data: dict[tuple[str, str], float] = dict.fromkeys(worktime_columns + count_columns, 0)
201
+ data.update(dict.fromkeys(ratio_columns + stdev_columns + date_columns, numpy.nan))
202
202
 
203
203
  return cls(pandas.Series(data), task_completion_criteria, custom_production_volume_list=custom_production_volume_list)
204
204
 
@@ -187,7 +187,7 @@ class WholeProductivityPerCompletedDate:
187
187
  df_agg_sub_task["task_count"] = df_sub_task.pivot_table(values=["task_id"], index=date_column, aggfunc="count").fillna(0)
188
188
  else:
189
189
  # 列だけ作る
190
- df_agg_sub_task = df_agg_sub_task.assign(**{key: 0 for key in production_volume_columns}, task_count=0)
190
+ df_agg_sub_task = df_agg_sub_task.assign(**dict.fromkeys(production_volume_columns, 0), task_count=0)
191
191
 
192
192
  df_worktime = worktime_per_date.df
193
193
  if len(df_worktime) > 0:
@@ -929,7 +929,7 @@ class WholeProductivityPerFirstAnnotationStartedDate:
929
929
  ).fillna(0)
930
930
  else:
931
931
  # 列だけ作る
932
- df_agg_sub_task = df_agg_sub_task.assign(**{key: 0 for key in value_columns}, task_count=0)
932
+ df_agg_sub_task = df_agg_sub_task.assign(**dict.fromkeys(value_columns, 0), task_count=0)
933
933
 
934
934
  # 日付の一覧を生成
935
935
  if len(df_agg_sub_task) > 0:
@@ -200,7 +200,7 @@ class WorktimePerDate:
200
200
  "monitored_inspection_worktime_hour",
201
201
  "monitored_acceptance_worktime_hour",
202
202
  ]
203
- df.fillna({c: 0 for c in value_columns}, inplace=True)
203
+ df.fillna(dict.fromkeys(value_columns, 0), inplace=True)
204
204
 
205
205
  df = df.merge(df_member, how="left", on="account_id")
206
206
  return df[
@@ -31,18 +31,11 @@ key: input_data_id, value: supplementary_data_idのList
31
31
 
32
32
 
33
33
  def get_input_data_supplementary_data_dict_from_csv(csv_path: Path) -> InputDataSupplementaryDataDict:
34
- df = pandas.read_csv(
35
- str(csv_path),
36
- sep=",",
37
- header=None,
38
- names=[
39
- "input_data_id",
40
- "supplementary_data_id",
41
- ],
42
- # IDは必ず文字列として読み込むようにする
43
- dtype={"input_data_id": str, "supplementary_data_id": str},
34
+ df: pandas.DataFrame = pandas.read_csv(
35
+ csv_path,
36
+ dtype={"input_data_id": "string", "supplementary_data_id": "string"},
44
37
  )
45
- input_data_dict = defaultdict(list)
38
+ input_data_dict: InputDataSupplementaryDataDict = defaultdict(list)
46
39
  for input_data_id, supplementary_data_id in zip(df["input_data_id"], df["supplementary_data_id"]):
47
40
  input_data_dict[input_data_id].append(supplementary_data_id)
48
41
  return input_data_dict
@@ -58,7 +51,7 @@ def get_input_data_supplementary_data_dict_from_list(supplementary_data_list: li
58
51
 
59
52
 
60
53
  class DeleteSupplementaryDataMain(CommandLineWithConfirm):
61
- def __init__(self, service: annofabapi.Resource, all_yes: bool = False) -> None: # noqa: FBT001, FBT002
54
+ def __init__(self, service: annofabapi.Resource, *, all_yes: bool = False) -> None:
62
55
  self.service = service
63
56
  self.facade = AnnofabApiFacade(service)
64
57
  CommandLineWithConfirm.__init__(self, all_yes)
@@ -82,7 +75,7 @@ class DeleteSupplementaryDataMain(CommandLineWithConfirm):
82
75
 
83
76
  input_data = self.service.wrapper.get_input_data_or_none(project_id, input_data_id)
84
77
  if input_data is None:
85
- logger.warning(f"input_data_id={input_data_id} の入力データは存在しないのでスキップします。")
78
+ logger.warning(f"input_data_id='{input_data_id}' の入力データは存在しないのでスキップします。")
86
79
  return 0
87
80
 
88
81
  supplementary_data_list, _ = self.service.api.get_supplementary_data_list(project_id, input_data_id)
@@ -92,14 +85,14 @@ class DeleteSupplementaryDataMain(CommandLineWithConfirm):
92
85
  supplementary_data = _get_supplementary_data_list(supplementary_data_id)
93
86
  if supplementary_data is None:
94
87
  logger.warning(
95
- f"input_data_id={input_data_id} の入力データに、"
96
- f"supplementary_data_id={supplementary_data_id} の補助情報は存在しないのでスキップします。"
88
+ f"input_data_id='{input_data_id}' の入力データに、"
89
+ f"supplementary_data_id='{supplementary_data_id}' の補助情報は存在しないのでスキップします。"
97
90
  )
98
91
  continue
99
92
 
100
93
  message_for_confirm = (
101
- f"補助情報 supplementary_data_id={supplementary_data_id}, "
102
- f"supplementary_data_name={supplementary_data['supplementary_data_name']} を削除しますか?"
94
+ f"補助情報 supplementary_data_id='{supplementary_data_id}', "
95
+ f"supplementary_data_name='{supplementary_data['supplementary_data_name']}' を削除しますか?"
103
96
  )
104
97
  if not self.confirm_processing(message_for_confirm):
105
98
  continue
@@ -107,30 +100,29 @@ class DeleteSupplementaryDataMain(CommandLineWithConfirm):
107
100
  try:
108
101
  self.service.api.delete_supplementary_data(project_id, input_data_id=input_data_id, supplementary_data_id=supplementary_data_id)
109
102
  logger.debug(
110
- f"補助情報 supplementary_data_id={supplementary_data_id}, "
111
- f"supplementary_data_name={supplementary_data['supplementary_data_name']} を削除しました。"
112
- f"(入力データ input_data_id={input_data_id}, "
113
- f"input_data_name={input_data['input_data_name']} に紐付いている)"
103
+ f"補助情報 supplementary_data_id='{supplementary_data_id}', "
104
+ f"supplementary_data_name='{supplementary_data['supplementary_data_name']}' を削除しました。"
105
+ f"(入力データ input_data_id='{input_data_id}', "
106
+ f"input_data_name='{input_data['input_data_name']}' に紐付いている)"
114
107
  )
115
108
  deleted_count += 1
116
- except requests.HTTPError as e:
117
- logger.warning(e)
109
+ except requests.HTTPError:
118
110
  logger.warning(
119
- f"補助情報 supplementary_data_id={supplementary_data_id}, "
120
- f"supplementary_data_name={supplementary_data['supplementary_data_name']} の削除に失敗しました。"
111
+ f"補助情報 supplementary_data_id='{supplementary_data_id}', "
112
+ f"supplementary_data_name='{supplementary_data['supplementary_data_name']}' の削除に失敗しました。",
113
+ exc_info=True,
121
114
  )
122
115
  continue
123
116
  return deleted_count
124
117
 
125
- def delete_supplementary_data_list(self, project_id: str, input_data_dict: InputDataSupplementaryDataDict): # noqa: ANN201
118
+ def delete_supplementary_data_list(self, project_id: str, input_data_dict: InputDataSupplementaryDataDict) -> None:
126
119
  deleted_count = 0
127
120
  total_count = sum(len(e) for e in input_data_dict.values())
128
121
  for input_data_id, supplementary_data_id_list in input_data_dict.items():
129
122
  try:
130
123
  deleted_count += self.delete_supplementary_data_list_for_input_data(project_id, input_data_id, supplementary_data_id_list)
131
- except Exception as e: # pylint: disable=broad-except
132
- logger.warning(e)
133
- logger.warning(f"入力データ(input_data_id={input_data_id})配下の補助情報の削除に失敗しました。")
124
+ except Exception: # pylint: disable=broad-except
125
+ logger.warning(f"入力データ(input_data_id='{input_data_id}')配下の補助情報の削除に失敗しました。", exc_info=True)
134
126
 
135
127
  logger.info(f"{deleted_count} / {total_count} 件の補助情報を削除しました。")
136
128
 
@@ -155,26 +147,26 @@ class DeleteSupplementaryDataMain(CommandLineWithConfirm):
155
147
  try:
156
148
  self.service.api.delete_supplementary_data(project_id, input_data_id=input_data_id, supplementary_data_id=supplementary_data_id)
157
149
  logger.debug(
158
- f"補助情報を削除しました。input_data_id={input_data_id}, supplementary_data_id={supplementary_data_id}, "
150
+ f"補助情報を削除しました。input_data_id='{input_data_id}', supplementary_data_id='{supplementary_data_id}', "
159
151
  f"supplementary_data_name={supplementary_data['supplementary_data_name']}"
160
152
  )
161
153
  deleted_count += 1
162
- except requests.HTTPError as e:
163
- logger.warning(e)
154
+ except requests.HTTPError:
164
155
  logger.warning(
165
- f"補助情報の削除に失敗しました。input_data_id={input_data_id}, supplementary_data_id={supplementary_data_id}, "
166
- f"supplementary_data_name={supplementary_data['supplementary_data_name']}"
156
+ f"補助情報の削除に失敗しました。input_data_id='{input_data_id}', supplementary_data_id='{supplementary_data_id}', "
157
+ f"supplementary_data_name='{supplementary_data['supplementary_data_name']}'",
158
+ exc_info=True,
167
159
  )
168
160
  continue
169
161
 
170
162
  return deleted_count
171
163
 
172
- def delete_supplementary_data_list_by_input_data_id(self, project_id: str, input_data_id_list: list[str]): # noqa: ANN201
164
+ def delete_supplementary_data_list_by_input_data_id(self, project_id: str, input_data_id_list: list[str]) -> None:
173
165
  dict_deleted_count: dict[str, int] = {}
174
166
  for input_data_id in input_data_id_list:
175
167
  input_data = self.service.wrapper.get_input_data_or_none(project_id, input_data_id)
176
168
  if input_data is None:
177
- logger.warning(f"input_data_id={input_data_id} の入力データは存在しないので、補助情報の削除をスキップします。")
169
+ logger.warning(f"input_data_id='{input_data_id}' の入力データは存在しないので、補助情報の削除をスキップします。")
178
170
  continue
179
171
  input_data_name = input_data["input_data_name"]
180
172
 
@@ -202,9 +194,8 @@ class DeleteSupplementaryDataMain(CommandLineWithConfirm):
202
194
  f"input_data_name='{input_data_name}') "
203
195
  )
204
196
 
205
- except Exception as e: # pylint: disable=broad-except
206
- logger.warning(e)
207
- logger.warning(f"入力データ(input_data_id={input_data_id})配下の補助情報の削除に失敗しました。")
197
+ except Exception: # pylint: disable=broad-except
198
+ logger.warning(f"入力データ(input_data_id='{input_data_id}')配下の補助情報の削除に失敗しました。", exc_info=True)
208
199
 
209
200
  logger.info(f"{len(dict_deleted_count)} / {len(input_data_id_list)} 件の入力データに紐づく補助情報を削除しました。")
210
201
 
@@ -263,9 +254,9 @@ def parse_args(parser: argparse.ArgumentParser) -> None:
263
254
  "削除する補助情報が記載されたCSVファイルのパスを指定してください。\n"
264
255
  "CSVのフォーマットは以下の通りです。"
265
256
  "詳細は https://annofab-cli.readthedocs.io/ja/latest/command_reference/supplementary/delete.html を参照してください。\n"
266
- " * ヘッダ行なし, カンマ区切り\n"
267
- " * 1列目: input_data_id (required)\n"
268
- " * 2列目: supplementary_data_id (required)\n"
257
+ " * ヘッダ行あり, カンマ区切り\n"
258
+ " * input_data_id (required)\n"
259
+ " * supplementary_data_id (required)\n"
269
260
  ),
270
261
  )
271
262
 
@@ -1,8 +1,8 @@
1
1
  import argparse
2
2
  import json
3
3
  import logging
4
+ import re
4
5
  import sys
5
- import uuid
6
6
  from dataclasses import dataclass
7
7
  from functools import partial
8
8
  from multiprocessing import Pool
@@ -32,18 +32,26 @@ from annofabcli.common.utils import get_file_scheme_path
32
32
  logger = logging.getLogger(__name__)
33
33
 
34
34
 
35
+ def convert_supplementary_data_name_to_supplementary_data_id(supplementary_data_name: str) -> str:
36
+ """
37
+ 補助情報データ名から、補助情報データIDを生成します。
38
+ * IDに使えない文字以外は`__`に変換する。
39
+ """
40
+ return re.sub(r"[^a-zA-Z0-9_.-]", "__", supplementary_data_name)
41
+
42
+
35
43
  @dataclass
36
- class CsvSupplementaryData(DataClassJsonMixin):
44
+ class CliSupplementaryData(DataClassJsonMixin):
37
45
  """
38
- CSVに記載されている補助情報
46
+ コマンドラインから指定された(`--csv`または`--json`)補助情報
39
47
  """
40
48
 
41
49
  input_data_id: str
42
- supplementary_data_number: int
43
50
  supplementary_data_name: str
44
51
  supplementary_data_path: str
45
52
  supplementary_data_id: Optional[str] = None
46
53
  supplementary_data_type: Optional[str] = None
54
+ supplementary_data_number: Optional[int] = None
47
55
 
48
56
 
49
57
  @dataclass
@@ -67,17 +75,15 @@ class SubPutSupplementaryData:
67
75
 
68
76
  Args:
69
77
  service:
70
- facade:
71
78
  all_yes:
72
79
  """
73
80
 
74
- def __init__(self, service: annofabapi.Resource, facade: AnnofabApiFacade, all_yes: bool = False) -> None: # noqa: FBT001, FBT002
81
+ def __init__(self, service: annofabapi.Resource, *, all_yes: bool = False) -> None:
75
82
  self.service = service
76
- self.facade = facade
77
83
  self.all_yes = all_yes
78
- self.supplementary_data_cache: dict[str, list[SupplementaryData]] = {}
84
+ self.supplementary_data_cache: dict[tuple[str, str], list[SupplementaryData]] = {}
79
85
 
80
- def put_supplementary_data(self, project_id: str, supplementary_data: SupplementaryDataForPut): # noqa: ANN201
86
+ def put_supplementary_data(self, project_id: str, supplementary_data: SupplementaryDataForPut) -> None:
81
87
  file_path = get_file_scheme_path(supplementary_data.supplementary_data_path)
82
88
  if file_path is not None:
83
89
  request_body = {
@@ -89,7 +95,12 @@ class SubPutSupplementaryData:
89
95
  if supplementary_data.supplementary_data_type is not None:
90
96
  request_body.update({"supplementary_data_type": supplementary_data.supplementary_data_type})
91
97
 
92
- logger.debug(f"'{file_path}'を補助情報として登録します。supplementary_data_name='{supplementary_data.supplementary_data_name}'")
98
+ logger.debug(
99
+ f"'{file_path}'を補助情報として登録します。 :: "
100
+ f"input_data_id='{supplementary_data.input_data_id}', "
101
+ f"supplementary_data_id='{supplementary_data.supplementary_data_id}', "
102
+ f"supplementary_data_name='{supplementary_data.supplementary_data_name}'"
103
+ )
93
104
  self.service.wrapper.put_supplementary_data_from_file(
94
105
  project_id,
95
106
  input_data_id=supplementary_data.input_data_id,
@@ -141,80 +152,86 @@ class SubPutSupplementaryData:
141
152
 
142
153
  return yes
143
154
 
144
- def confirm_put_supplementary_data(self, csv_supplementary_data: CsvSupplementaryData, already_exists: bool = False) -> bool: # noqa: FBT001, FBT002
145
- message_for_confirm = f"supplementary_data_name='{csv_supplementary_data.supplementary_data_name}' の補助情報を登録しますか?"
155
+ def confirm_put_supplementary_data(
156
+ self, csv_supplementary_data: CliSupplementaryData, supplementary_data_id: str, *, already_exists: bool = False
157
+ ) -> bool:
146
158
  if already_exists:
147
- message_for_confirm += f"supplementary_data_id={csv_supplementary_data.supplementary_data_id} を上書きします。"
148
- return self.confirm_processing(message_for_confirm)
149
-
150
- def get_supplementary_data_list_cached(self, project_id: str, input_data_id: str) -> list[SupplementaryData]:
151
- key = f"{project_id},{input_data_id}"
152
- if key not in self.supplementary_data_cache:
153
- supplementary_data_list, _ = self.service.api.get_supplementary_data_list(project_id, input_data_id)
154
- self.supplementary_data_cache[key] = supplementary_data_list if supplementary_data_list is not None else []
155
- return self.supplementary_data_cache[key]
156
-
157
- def get_supplementary_data_by_id(self, project_id: str, input_data_id: str, supplementary_data_id: str) -> Optional[SupplementaryData]:
158
- cached_list = self.get_supplementary_data_list_cached(project_id, input_data_id)
159
- return first_true(cached_list, pred=lambda e: e["supplementary_data_id"] == supplementary_data_id)
159
+ message_for_confirm = (
160
+ f"supplementary_data_name='{csv_supplementary_data.supplementary_data_name}', "
161
+ f"supplementary_data_id='{supplementary_data_id}'の補助情報を更新しますか?"
162
+ )
163
+ else:
164
+ message_for_confirm = (
165
+ f"supplementary_data_name='{csv_supplementary_data.supplementary_data_name}', "
166
+ f"supplementary_data_id='{supplementary_data_id}'の補助情報を登録しますか?"
167
+ )
160
168
 
161
- def get_supplementary_data_by_number(self, project_id: str, input_data_id: str, supplementary_data_number: int) -> Optional[SupplementaryData]:
162
- cached_list = self.get_supplementary_data_list_cached(project_id, input_data_id)
163
- return first_true(cached_list, pred=lambda e: e["supplementary_data_number"] == supplementary_data_number)
169
+ return self.confirm_processing(message_for_confirm)
164
170
 
165
- def put_supplementary_data_main(self, project_id: str, csv_supplementary_data: CsvSupplementaryData, overwrite: bool = False) -> bool: # noqa: FBT001, FBT002
171
+ def put_supplementary_data_main(self, project_id: str, csv_data: CliSupplementaryData, *, overwrite: bool = False) -> bool:
166
172
  last_updated_datetime = None
167
- input_data_id = csv_supplementary_data.input_data_id
168
- supplementary_data_id = csv_supplementary_data.supplementary_data_id
169
- supplementary_data_path = csv_supplementary_data.supplementary_data_path
173
+ input_data_id = csv_data.input_data_id
174
+ supplementary_data_id = (
175
+ csv_data.supplementary_data_id
176
+ if csv_data.supplementary_data_id is not None
177
+ else convert_supplementary_data_name_to_supplementary_data_id(csv_data.supplementary_data_name)
178
+ )
170
179
 
171
- # input_data_idの存在確認
172
- if self.service.wrapper.get_input_data_or_none(project_id, input_data_id) is None:
180
+ supplementary_data_list = self.service.wrapper.get_supplementary_data_list_or_none(project_id, input_data_id)
181
+ if supplementary_data_list is None:
182
+ # 入力データが存在しない場合は、`supplementary_data_list`はNoneになる
173
183
  logger.warning(f"input_data_id='{input_data_id}'である入力データは存在しないため、補助情報の登録をスキップします。")
174
184
  return False
175
185
 
176
- if supplementary_data_id is not None:
177
- old_supplementary_data_key = f"supplementary_data_id={supplementary_data_id}"
178
- old_supplementary_data = self.get_supplementary_data_by_id(project_id, input_data_id, supplementary_data_id)
186
+ old_supplementary_data = first_true(supplementary_data_list, pred=lambda e: e["supplementary_data_id"] == supplementary_data_id)
187
+
188
+ # 補助情報numberが未指定の場合は、既存の補助情報numberの最大値+1にする
189
+ max_supplementary_data_number = max((e["supplementary_data_number"] for e in supplementary_data_list), default=0)
190
+ if csv_data.supplementary_data_number is not None:
191
+ supplementary_data_number = csv_data.supplementary_data_number
192
+ elif old_supplementary_data is not None:
193
+ supplementary_data_number = old_supplementary_data["supplementary_data_number"]
179
194
  else:
180
- supplementary_data_number = csv_supplementary_data.supplementary_data_number
181
- old_supplementary_data_key = f"input_data_id={input_data_id}, supplementary_data_number={supplementary_data_number}"
182
- old_supplementary_data = self.get_supplementary_data_by_number(project_id, input_data_id, supplementary_data_number)
183
- supplementary_data_id = old_supplementary_data["supplementary_data_id"] if old_supplementary_data is not None else str(uuid.uuid4())
195
+ supplementary_data_number = max_supplementary_data_number + 1
184
196
 
185
197
  if old_supplementary_data is not None:
186
198
  if overwrite:
187
- logger.debug(f"'{old_supplementary_data_key}' はすでに存在します。")
199
+ logger.debug(
200
+ f"supplementary_data_id='{supplementary_data_id}'である補助情報がすでに存在します。 :: "
201
+ f"input_data_id='{input_data_id}', supplementary_data_name='{csv_data.supplementary_data_name}'"
202
+ )
188
203
  last_updated_datetime = old_supplementary_data["updated_datetime"]
189
204
  else:
190
- logger.debug(f"'{old_supplementary_data_key}' がすでに存在するのでスキップします。")
205
+ logger.debug(
206
+ f"supplementary_data_id='{supplementary_data_id}'である補助情報がすでに存在するので、補助情報の登録をスキップします。 :: "
207
+ f"input_data_id='{input_data_id}', supplementary_data_name='{csv_data.supplementary_data_name}'"
208
+ )
191
209
  return False
192
210
 
193
- file_path = get_file_scheme_path(supplementary_data_path)
194
- logger.debug(f"csv_supplementary_data='{csv_supplementary_data}'")
211
+ file_path = get_file_scheme_path(csv_data.supplementary_data_path)
195
212
  if file_path is not None: # noqa: SIM102
196
213
  if not Path(file_path).exists():
197
- logger.warning(f"'{supplementary_data_path}' は存在しません。")
214
+ logger.warning(f"'{csv_data.supplementary_data_path}' は存在しません。補助情報の登録をスキップします。")
198
215
  return False
199
216
 
200
- if not self.confirm_put_supplementary_data(csv_supplementary_data, already_exists=last_updated_datetime is not None):
217
+ if not self.confirm_put_supplementary_data(csv_data, supplementary_data_id, already_exists=last_updated_datetime is not None):
201
218
  return False
202
219
 
203
220
  # 補助情報を登録
204
221
  supplementary_data_for_put = SupplementaryDataForPut(
205
- input_data_id=csv_supplementary_data.input_data_id,
222
+ input_data_id=csv_data.input_data_id,
206
223
  supplementary_data_id=supplementary_data_id,
207
- supplementary_data_name=csv_supplementary_data.supplementary_data_name,
208
- supplementary_data_path=csv_supplementary_data.supplementary_data_path,
209
- supplementary_data_type=csv_supplementary_data.supplementary_data_type,
210
- supplementary_data_number=csv_supplementary_data.supplementary_data_number,
224
+ supplementary_data_name=csv_data.supplementary_data_name,
225
+ supplementary_data_path=csv_data.supplementary_data_path,
226
+ supplementary_data_type=csv_data.supplementary_data_type,
227
+ supplementary_data_number=supplementary_data_number,
211
228
  last_updated_datetime=last_updated_datetime,
212
229
  )
213
230
  try:
214
231
  self.put_supplementary_data(project_id, supplementary_data_for_put)
215
232
  logger.debug(
216
- f"補助情報を登録しました。"
217
- f"input_data_id='{supplementary_data_for_put.input_data_id}',"
233
+ f"補助情報を登録しました。 :: "
234
+ f"input_data_id='{supplementary_data_for_put.input_data_id}', "
218
235
  f"supplementary_data_id='{supplementary_data_for_put.supplementary_data_id}', "
219
236
  f"supplementary_data_name='{supplementary_data_for_put.supplementary_data_name}'"
220
237
  )
@@ -222,7 +239,7 @@ class SubPutSupplementaryData:
222
239
 
223
240
  except requests.exceptions.HTTPError:
224
241
  logger.warning(
225
- f"補助情報の登録に失敗しました。"
242
+ f"補助情報の登録に失敗しました。 ::"
226
243
  f"input_data_id='{supplementary_data_for_put.input_data_id}',"
227
244
  f"supplementary_data_id='{supplementary_data_for_put.supplementary_data_id}', "
228
245
  f"supplementary_data_name='{supplementary_data_for_put.supplementary_data_name}'",
@@ -239,8 +256,9 @@ class PutSupplementaryData(CommandLine):
239
256
  def put_supplementary_data_list(
240
257
  self,
241
258
  project_id: str,
242
- supplementary_data_list: list[CsvSupplementaryData],
243
- overwrite: bool = False, # noqa: FBT001, FBT002
259
+ supplementary_data_list: list[CliSupplementaryData],
260
+ *,
261
+ overwrite: bool = False,
244
262
  parallelism: Optional[int] = None,
245
263
  ) -> None:
246
264
  """
@@ -259,7 +277,7 @@ class PutSupplementaryData(CommandLine):
259
277
 
260
278
  count_put_supplementary_data = 0
261
279
 
262
- obj = SubPutSupplementaryData(service=self.service, facade=self.facade, all_yes=self.all_yes)
280
+ obj = SubPutSupplementaryData(service=self.service, all_yes=self.all_yes)
263
281
  if parallelism is not None:
264
282
  partial_func = partial(obj.put_supplementary_data_main, project_id, overwrite=overwrite)
265
283
  with Pool(parallelism) as pool:
@@ -268,46 +286,29 @@ class PutSupplementaryData(CommandLine):
268
286
 
269
287
  else:
270
288
  for csv_supplementary_data in supplementary_data_list:
271
- result = obj.put_supplementary_data_main(project_id, csv_supplementary_data=csv_supplementary_data, overwrite=overwrite)
289
+ result = obj.put_supplementary_data_main(project_id, csv_data=csv_supplementary_data, overwrite=overwrite)
272
290
  if result:
273
291
  count_put_supplementary_data += 1
274
292
 
275
293
  logger.info(f"{project_title} に、{count_put_supplementary_data} / {len(supplementary_data_list)} 件の補助情報を登録しました。")
276
294
 
277
295
  @staticmethod
278
- def get_supplementary_data_list_from_dict(supplementary_data_dict_list: list[dict[str, Any]]) -> list[CsvSupplementaryData]:
279
- return [CsvSupplementaryData.from_dict(e) for e in supplementary_data_dict_list]
296
+ def get_supplementary_data_list_from_dict(supplementary_data_dict_list: list[dict[str, Any]]) -> list[CliSupplementaryData]:
297
+ return [CliSupplementaryData.from_dict(e) for e in supplementary_data_dict_list]
280
298
 
281
299
  @staticmethod
282
- def get_supplementary_data_list_from_csv(csv_path: Path) -> list[CsvSupplementaryData]:
283
- def create_supplementary_data(e: Any): # noqa: ANN202, ANN401
284
- supplementary_data_id = e.supplementary_data_id if not pandas.isna(e.supplementary_data_id) else None
285
- supplementary_data_type = e.supplementary_data_type if not pandas.isna(e.supplementary_data_type) else None
286
- return CsvSupplementaryData(
287
- input_data_id=e.input_data_id,
288
- supplementary_data_number=e.supplementary_data_number,
289
- supplementary_data_name=e.supplementary_data_name,
290
- supplementary_data_path=e.supplementary_data_path,
291
- supplementary_data_id=supplementary_data_id,
292
- supplementary_data_type=supplementary_data_type,
293
- )
294
-
300
+ def get_supplementary_data_list_from_csv(csv_path: Path) -> list[CliSupplementaryData]:
295
301
  df = pandas.read_csv(
296
302
  str(csv_path),
297
- sep=",",
298
- header=None,
299
- names=(
300
- "input_data_id",
301
- "supplementary_data_number",
302
- "supplementary_data_name",
303
- "supplementary_data_path",
304
- "supplementary_data_id",
305
- "supplementary_data_type",
306
- ),
307
- # IDは必ず文字列として読み込むようにする
308
- dtype={"input_data_id": str, "supplementary_data_id": str, "supplementary_data_name": str},
303
+ dtype={
304
+ "input_data_id": "string",
305
+ "supplementary_data_id": "string",
306
+ "supplementary_data_name": "string",
307
+ "supplementary_data_path": "string",
308
+ "supplementary_data_number": "Int64",
309
+ },
309
310
  )
310
- supplementary_data_list = [create_supplementary_data(e) for e in df.itertuples()]
311
+ supplementary_data_list = [CliSupplementaryData.from_dict(e) for e in df.to_dict("records")]
311
312
  return supplementary_data_list
312
313
 
313
314
  COMMON_MESSAGE = "annofabcli supplementary_data put: error:"
@@ -370,15 +371,15 @@ def parse_args(parser: argparse.ArgumentParser) -> None:
370
371
  "--csv",
371
372
  type=str,
372
373
  help=(
373
- "補助情報が記載されたCVファイルのパスを指定してください。CSVのフォーマットは、以下の通りです。\n"
374
+ "補助情報が記載されたCSVファイルのパスを指定してください。CSVのフォーマットは、以下の通りです。\n"
374
375
  "\n"
375
- " * ヘッダ行なし, カンマ区切り\n"
376
- " * 1列目: input_data_id (required)\n"
377
- " * 2列目: supplementary_data_number (required)\n"
378
- " * 3列目: supplementary_data_name (required)\n"
379
- " * 4列目: supplementary_data_path (required)\n"
380
- " * 5列目: supplementary_data_id\n"
381
- " * 6列目: supplementary_data_type\n"
376
+ " * ヘッダ行あり, カンマ区切り\n"
377
+ " * input_data_id (required)\n"
378
+ " * supplementary_data_name (required)\n"
379
+ " * supplementary_data_path (required)\n"
380
+ " * supplementary_data_id\n"
381
+ " * supplementary_data_type\n"
382
+ " * supplementary_data_number\n"
382
383
  "\n"
383
384
  "各項目の詳細は https://annofab-cli.readthedocs.io/ja/latest/command_reference/supplementary/put.html を参照してください。"
384
385
  ),
@@ -387,7 +388,6 @@ def parse_args(parser: argparse.ArgumentParser) -> None:
387
388
  JSON_SAMPLE = [ # noqa: N806
388
389
  {
389
390
  "input_data_id": "input1",
390
- "supplementary_data_number": 1,
391
391
  "supplementary_data_name": "foo",
392
392
  "supplementary_data_path": "file://foo.jpg",
393
393
  }
@@ -408,7 +408,7 @@ def parse_args(parser: argparse.ArgumentParser) -> None:
408
408
  parser.add_argument(
409
409
  "--overwrite",
410
410
  action="store_true",
411
- help="指定した場合、supplementary_data_id(省略時はsupplementary_data_number)がすでに存在していたら上書きします。指定しなければ、スキップします。",
411
+ help="指定した場合、supplementary_data_idがすでに存在していたら上書きします。指定しなければ、スキップします。",
412
412
  )
413
413
 
414
414
  parser.add_argument(
@@ -425,7 +425,7 @@ def add_parser(subparsers: Optional[argparse._SubParsersAction] = None) -> argpa
425
425
  subcommand_name = "put"
426
426
  subcommand_help = "補助情報を登録します。"
427
427
  description = "補助情報を登録します。"
428
- epilog = "オーナロールを持つユーザで実行してください。"
428
+ epilog = "オーナーロールを持つユーザで実行してください。"
429
429
 
430
430
  parser = annofabcli.common.cli.add_parser(subparsers, subcommand_name, subcommand_help, description, epilog=epilog)
431
431
  parse_args(parser)
@@ -309,9 +309,10 @@ class ListTasksAddedTaskHistoryMain:
309
309
  task_list = list_task_obj.get_task_list(self.project_id, task_id_list=task_id_list, task_query=task_query)
310
310
 
311
311
  obj = AddingAdditionalInfoToTask(self.service, project_id=self.project_id)
312
+ logger.info(f"{len(task_list)} 件のタスクの履歴情報を取得します。")
312
313
 
313
314
  for index, task in enumerate(task_list):
314
- if (index + 1) % 1000 == 0:
315
+ if (index + 1) % 100 == 0:
315
316
  logger.debug(f"{index + 1} 件目のタスク履歴情報を取得します。")
316
317
 
317
318
  obj.add_additional_info_to_task(task)
@@ -1,6 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import argparse
4
+ import copy
4
5
  import json
5
6
  import logging
6
7
  import multiprocessing
@@ -212,7 +213,7 @@ class UpdateMetadataOfTask(CommandLine):
212
213
  if args.metadata is not None:
213
214
  metadata = annofabcli.common.cli.get_json_from_args(args.metadata)
214
215
  assert task_id_list is not None, "'--metadata'を指定したときは'--task_id'は必須です。"
215
- metadata_by_task_id = {task_id: metadata for task_id in task_id_list}
216
+ metadata_by_task_id = {task_id: copy.deepcopy(metadata) for task_id in task_id_list}
216
217
  elif args.metadata_by_task_id is not None:
217
218
  metadata_by_task_id = annofabcli.common.cli.get_json_from_args(args.metadata_by_task_id)
218
219
  if task_id_list is not None:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: annofabcli
3
- Version: 1.96.0
3
+ Version: 1.97.0
4
4
  Summary: Utility Command Line Interface for AnnoFab
5
5
  Home-page: https://github.com/kurusugawa-computer/annofab-cli
6
6
  License: MIT
@@ -20,7 +20,7 @@ Classifier: Programming Language :: Python :: 3.12
20
20
  Classifier: Topic :: Utilities
21
21
  Requires-Dist: Pillow
22
22
  Requires-Dist: annofabapi (>=1.1,<2.0)
23
- Requires-Dist: bokeh (>=3.3,<4.0)
23
+ Requires-Dist: bokeh (>=3.3,<3.7)
24
24
  Requires-Dist: dictdiffer
25
25
  Requires-Dist: isodate
26
26
  Requires-Dist: jmespath
@@ -1,6 +1,6 @@
1
1
  annofabcli/__init__.py,sha256=NMA7kFxmLlCiILQPHJa9mEuqXxtLALw_dwyXYsvz4VM,71
2
2
  annofabcli/__main__.py,sha256=JzfycqVG9ENhWOCxTouZwpHwWTSrI-grLsaMudxjyBM,5283
3
- annofabcli/__version__.py,sha256=vUosqVW-MeQrE_vPK0xlTVGwdsd_pcLlczOaCRqUXgg,132
3
+ annofabcli/__version__.py,sha256=dqx_NARSo9UcLPvxc6zJBGKIFxXd0yc57SlNZJUxSbg,132
4
4
  annofabcli/annotation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
5
  annofabcli/annotation/annotation_query.py,sha256=ke3W3RT1-WfFzwt-TXcQwGmghG34vcKJkM_jxgbNKjU,15922
6
6
  annofabcli/annotation/change_annotation_attributes.py,sha256=zHXyENZfbMGL_15xiK7Cy4cQ2sV0GjSVmKuPm3sOX7Y,17173
@@ -67,7 +67,7 @@ annofabcli/filesystem/mask_user_info.py,sha256=Evmr9QhSpMG900bbOXbJNHwXHapUlNfvV
67
67
  annofabcli/filesystem/merge_annotation.py,sha256=MkGy1T9F-1tHq_BS_L_mTtgKpOMmXscrydzfSc0JKAo,10588
68
68
  annofabcli/filesystem/subcommand_filesystem.py,sha256=ZM2td5iZYIQ3TCI-9xAue8LugFlIc3WMRXrJqnjJ8-s,1186
69
69
  annofabcli/input_data/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
70
- annofabcli/input_data/change_input_data_name.py,sha256=8SMgSZbwYFl5Qvs7H6knQXi8yOc_dQuwd7xUvEcHYac,9751
70
+ annofabcli/input_data/change_input_data_name.py,sha256=vhii3jHaEE_J8jcJ2bW66VxC9k-jtWN4tSD9vnatON4,9665
71
71
  annofabcli/input_data/copy_input_data.py,sha256=Lbyq2aW5lmJCPB8-WHdOI93a2ND5E0qOWrhBHRluQyw,14656
72
72
  annofabcli/input_data/delete_input_data.py,sha256=GLg58XNz9_Njq9eq2sc2BAzfLy9tJefcDYL6J4TOim4,8836
73
73
  annofabcli/input_data/delete_metadata_key_of_input_data.py,sha256=PVv9HXQVFLKQh-4RSHas_ckFxDxtqAzWnXnWYFFYy08,8464
@@ -75,10 +75,10 @@ annofabcli/input_data/download_input_data_json.py,sha256=vxGoeM3ZEggQbWiWsrDK0_G
75
75
  annofabcli/input_data/list_all_input_data.py,sha256=Kq261WCkma5UNKfMDT7O6Z-dzzqp-KP1wL0DvHe5fH8,9879
76
76
  annofabcli/input_data/list_all_input_data_merged_task.py,sha256=UaBJW-nMHytmQ4okg69Ew1jhC2vMNnRMgl2lEBabtUI,12850
77
77
  annofabcli/input_data/list_input_data.py,sha256=RBxsHyKg1bVIEQUFDkfrq-nJmEdEYNoCjJ2L2GgSfeU,11519
78
- annofabcli/input_data/put_input_data.py,sha256=bEbo6rmLkmYTUp-ynoGUXpZoYLdBDLaZYcrxqEm74tw,17951
78
+ annofabcli/input_data/put_input_data.py,sha256=x54C-rLJVzr1YF2GlMR0w0HJReOE3E7YKiBeuh0RsTI,17934
79
79
  annofabcli/input_data/put_input_data_with_zip.py,sha256=SA4aMAwMBFgc9Lh0zmRCbmkXG4AMrcBqd5zeTSdr8lc,5566
80
80
  annofabcli/input_data/subcommand_input_data.py,sha256=X8EoxsF6PMiKrvk_r7PIe2D0WZuaPlgLJRuTiljPIdM,2048
81
- annofabcli/input_data/update_metadata_of_input_data.py,sha256=txdliirBrtoTabGEyWYLPclZC_DarEpRAY3MTsbViPA,11556
81
+ annofabcli/input_data/update_metadata_of_input_data.py,sha256=_cZh0GYGK6Lx5arKuTjblolkXsRdTlwuKcIHa8Nm5yQ,11583
82
82
  annofabcli/input_data/utils.py,sha256=F3mYbWmRwXfEtiIo9dvSysfuZUdyQzJmmq94eM-C5IY,723
83
83
  annofabcli/instruction/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
84
84
  annofabcli/instruction/copy_instruction.py,sha256=24zBMkelSI2mpJq7Esgzo3uB_f1Z3hitBeX68HFqV00,6704
@@ -117,7 +117,7 @@ annofabcli/project_member/copy_project_members.py,sha256=-ybJ6ftcbRq0Pl092aag5ku
117
117
  annofabcli/project_member/drop_project_members.py,sha256=t_Jqoc89JtnrIpZc1fECjO6s-qttDmsgHHTz03zJTF0,5856
118
118
  annofabcli/project_member/invite_project_members.py,sha256=bxciqakeSrAX5xjE6KwmhcHj4EU1haETcQ_0BYkR6O4,6377
119
119
  annofabcli/project_member/list_users.py,sha256=BoavUdDRoRIQjI7q_sQEZjDMcLmUwWOCZdJsR7JimJ8,4844
120
- annofabcli/project_member/put_project_members.py,sha256=0YZIUJYeblL-cHETnCC9R1oWUm_bLNN6LIIIlXH8G7c,9741
120
+ annofabcli/project_member/put_project_members.py,sha256=-2T-XlZw-I6Am4HDf9dsNDEmaflMDvmJvMzStiya_Fc,9183
121
121
  annofabcli/project_member/subcommand_project_member.py,sha256=6cOoMiZJ82j94rSSp1GzmYggGSm1egk3S9e_YYgzhe8,1466
122
122
  annofabcli/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
123
123
  annofabcli/stat_visualization/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -130,9 +130,9 @@ annofabcli/stat_visualization/write_performance_rating_csv.py,sha256=TDn7-poyFt2
130
130
  annofabcli/statistics/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
131
131
  annofabcli/statistics/histogram.py,sha256=CvzDxT2cKLSnBGSqkZE6p92PayGxYYja1YyB24M4ALU,3245
132
132
  annofabcli/statistics/linegraph.py,sha256=0kr7jVBNMiM2ECYhv3Ry5RitElKerSl9ZKxbKzfiplI,12494
133
- annofabcli/statistics/list_annotation_attribute.py,sha256=WM0-Zw0Pm35olqrSXvbHnrb4mbC02qcjshLCrbPE3zg,12432
134
- annofabcli/statistics/list_annotation_count.py,sha256=xsqXxRO21MW7Wbs96PEa8gHb3i7bxjQvoMHLB2eT-0g,50360
135
- annofabcli/statistics/list_annotation_duration.py,sha256=9PCg1IA_g4HoHHSfsMVb2zBMLfLJ3u9Id8Qa9L8Qhko,31923
133
+ annofabcli/statistics/list_annotation_attribute.py,sha256=87jjNCOXJUbWnmswMCLN7GTjGsBfqpFJ6hViWmnj8Y4,12557
134
+ annofabcli/statistics/list_annotation_count.py,sha256=GVlYYubWqjNLJD7GGxv1WivCYv0rMgYxRLeZsM3Y8hA,50344
135
+ annofabcli/statistics/list_annotation_duration.py,sha256=1OFvhi5QQJDcUO4iHi3lV2fDFK8ZFaAP8vlbGpVR2s0,31907
136
136
  annofabcli/statistics/list_video_duration.py,sha256=uNeMteRBX2JG_AWmcgMJj0Jzbq_qF7bvAwr25GmeIiw,9124
137
137
  annofabcli/statistics/list_worktime.py,sha256=C7Yu3IOW2EvhkJJv6gY3hNdS9_TOLmT_9LZsB7vLJ1o,6493
138
138
  annofabcli/statistics/scatter.py,sha256=IUCwXix9GbZb6V82wjjb5q2eamrT5HQsU_bzDTjAFnM,11011
@@ -148,16 +148,16 @@ annofabcli/statistics/visualization/dataframe/cumulative_productivity.py,sha256=
148
148
  annofabcli/statistics/visualization/dataframe/custom_production_volume.py,sha256=5ELLiQJ5sNKdVKmYYVeZW4nedDg1CVGxMDdF5TUUX5c,2142
149
149
  annofabcli/statistics/visualization/dataframe/input_data_count.py,sha256=wDRFtoIWw_Gy2bPZ7LBx3eMO3LdUdjbQKS9mncXav6I,1654
150
150
  annofabcli/statistics/visualization/dataframe/inspection_comment_count.py,sha256=RxpQzRy4U2hKEpgbksUXotcxH2sKz__NO20mxpMqK1w,4382
151
- annofabcli/statistics/visualization/dataframe/productivity_per_date.py,sha256=KH4vUBLpC-M41EnGroeaOXdwVZz8j0u4HpvLitFbtaA,27250
151
+ annofabcli/statistics/visualization/dataframe/productivity_per_date.py,sha256=tMap7E3z7hibon1zJnZRJnbMmtzqh04ocoV0oxBpssU,27249
152
152
  annofabcli/statistics/visualization/dataframe/project_performance.py,sha256=hdTMPvLfGDMZFjpIl58GtTEOopsOvitbdaj5hQAEp8o,8496
153
153
  annofabcli/statistics/visualization/dataframe/task.py,sha256=KanuLy67ZGORdLry21eN7uSNzkoJvIre1JN7Bq-fRlg,23452
154
154
  annofabcli/statistics/visualization/dataframe/task_history.py,sha256=3b9e4ok6yKE5x647KzRqvp01P33XMAHLEEbLJ5GCmRo,2760
155
155
  annofabcli/statistics/visualization/dataframe/task_worktime_by_phase_user.py,sha256=AtlbeNIkttjLtuxtZYCyZin4eVKRvcYEMnLzEZtZUlY,13134
156
156
  annofabcli/statistics/visualization/dataframe/user.py,sha256=EHn7nlf6D6UX-gsVXy8m_3QaCsHsUhr0iy2rbNozOgc,1707
157
- annofabcli/statistics/visualization/dataframe/user_performance.py,sha256=8o0BB4uPzW6O2rReXyjEZfmGYzYidoWZLEQBm12qnng,57029
158
- annofabcli/statistics/visualization/dataframe/whole_performance.py,sha256=u_fuRUMCwuLjmCSKHJVeThGcbwBFQFmm2XgeLqnTgEE,12565
159
- annofabcli/statistics/visualization/dataframe/whole_productivity_per_date.py,sha256=uECDfdRxt0d9iQRpX9TISSd0q9SZ4XVzuY5FMu7X2kw,52145
160
- annofabcli/statistics/visualization/dataframe/worktime_per_date.py,sha256=2FnyZFSZBfedkIMmRuAdx026deblT7GT3r2qe2Fg71M,21319
157
+ annofabcli/statistics/visualization/dataframe/user_performance.py,sha256=JXJg0oNpURa9HCF3zFYqynOFtNRKD5h9YNuLOq0_h00,57023
158
+ annofabcli/statistics/visualization/dataframe/whole_performance.py,sha256=qcQALkflNSX6QvvtOc9QEZnmQ-TtkjsQkT1vmZ3k0_M,12561
159
+ annofabcli/statistics/visualization/dataframe/whole_productivity_per_date.py,sha256=W8g9_4WcUHxvPw31gUY_MVheBfllsHHY6g-M0FKScUk,52141
160
+ annofabcli/statistics/visualization/dataframe/worktime_per_date.py,sha256=56izC5flXOiu_lKPSk0s0aNGgqr-FiVA4t6pBWSNuwk,21321
161
161
  annofabcli/statistics/visualization/filtering_query.py,sha256=0_3QcS1weGIC9THH7NGqjYCb4rDLiftnmA-2MVgeNDI,4174
162
162
  annofabcli/statistics/visualization/model.py,sha256=5LSp0u63nqeILUl3XdYEhqqi_hq2rFbzyKHYlXZo2S0,896
163
163
  annofabcli/statistics/visualization/project_dir.py,sha256=ijwuGngRaK8TczAaDQoI2VnPvsmiaT-xoGWMHFCLVgo,24071
@@ -167,9 +167,9 @@ annofabcli/statistics/visualize_annotation_duration.py,sha256=J4Z3Ql1sdiTraMnRaR
167
167
  annofabcli/statistics/visualize_statistics.py,sha256=3bP9WRALADkAxPePa1Ix49apahi1V_qjAL6Dbrwtxd0,39550
168
168
  annofabcli/statistics/visualize_video_duration.py,sha256=A6Q91aWx7GO4F0TGxbiQI0za8BFr7IqiUaYyw3__BgY,16842
169
169
  annofabcli/supplementary/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
170
- annofabcli/supplementary/delete_supplementary_data.py,sha256=StHGH7qB6mEtuD75FKRQRFdTY2Lfhnl6s4u1Jq8b-MA,13756
170
+ annofabcli/supplementary/delete_supplementary_data.py,sha256=BZ5NzEQ7CdOIxfCHY7vTLG5vutueXNPh6Teyno9NY-U,13542
171
171
  annofabcli/supplementary/list_supplementary_data.py,sha256=F4iJnQi_4W7S_d7ahqxWFSFcnfiKYhNuysC28v0QUWA,7649
172
- annofabcli/supplementary/put_supplementary_data.py,sha256=KLGFUIYfHmB39U7qhj6y6exU9D6XMDmYpYBKzkIxqiU,19130
172
+ annofabcli/supplementary/put_supplementary_data.py,sha256=-ZfqdmirgXFyTNZ8t38IWatPzIL7X4s805xFY5Szar8,18073
173
173
  annofabcli/supplementary/subcommand_supplementary.py,sha256=F8qfuNQzgW5HV1QKB4h0DWN7-kPVQcoFQwPfW_vjZVk,1079
174
174
  annofabcli/task/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
175
175
  annofabcli/task/cancel_acceptance.py,sha256=rHKmAzQE1a25szVWb7dexokeW32V12bkyDIYM_oIl5U,13899
@@ -184,12 +184,12 @@ annofabcli/task/download_task_json.py,sha256=Ocjecmdf2WV_Sq3u1InfMLIsT3XSw0ojyJm
184
184
  annofabcli/task/list_all_tasks.py,sha256=F9GpzzgWffF3lUeGrFIvjweq-iEwJ1c-g8usskO_2dE,6506
185
185
  annofabcli/task/list_all_tasks_added_task_history.py,sha256=fkdiuo64iS7xxvIfGKzSiUPPEMiCVnJjjcAtMxe2Ngs,9551
186
186
  annofabcli/task/list_tasks.py,sha256=O4jjp_zdmurcGNWXFp9JXHJsH4nhlR5e3ok96YnD1SI,10237
187
- annofabcli/task/list_tasks_added_task_history.py,sha256=TVyHODV5lQuB6ALs8Xd4UUm4FYf0F6kycv7ajbm0MkQ,19583
187
+ annofabcli/task/list_tasks_added_task_history.py,sha256=7avkLYFErcdSNxGc8CQXr4FFIF1z9FtQJBSloD-tzBI,19675
188
188
  annofabcli/task/put_tasks.py,sha256=hT2xPowJmcNJhjxoAm-MFiKTw_RFcJUYlpeanegVrAU,13400
189
189
  annofabcli/task/put_tasks_by_count.py,sha256=MUHfWhqtSAXnB3O36p3bMSSgQ_3Zek9GT5qRvHGx8Lo,6041
190
190
  annofabcli/task/reject_tasks.py,sha256=5ByAN6VnKwvU5BT_cfsHwA1jLDl74bonqk3bwtnrkPU,23139
191
191
  annofabcli/task/subcommand_task.py,sha256=L_5Dwe58eblrtOrUYxjJAvkSmu6savRUxIqGjsFq-R4,2436
192
- annofabcli/task/update_metadata_of_task.py,sha256=3_BxVm9UPsJH9h3CiFoPaxQEx801Uehlsvzz1HOzKM4,12783
192
+ annofabcli/task/update_metadata_of_task.py,sha256=n8RmegF3A1r-ZDOzn6SzNoHuqNuzsSJM9xUJ6EXvTf0,12810
193
193
  annofabcli/task_history/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
194
194
  annofabcli/task_history/download_task_history_json.py,sha256=xZnGNKkPYT6RXIUd_wUqZfPiVpGdW5MTBUSpNKbOAC4,2370
195
195
  annofabcli/task_history/list_all_task_history.py,sha256=VY99AEHTSG71O2HQXS81oPX6AAusbT1scika9QoUZEU,6780
@@ -200,8 +200,8 @@ annofabcli/task_history_event/download_task_history_event_json.py,sha256=hQLVbQ0
200
200
  annofabcli/task_history_event/list_all_task_history_event.py,sha256=JQEgwOIXbbTIfeX23AVaoySHViOR9UGm9uoXuhVEBqo,6446
201
201
  annofabcli/task_history_event/list_worktime.py,sha256=9jsRYa2C9bva8E1Aqxv9CCKDuCP0MvbiaIyQFTDpjqY,13150
202
202
  annofabcli/task_history_event/subcommand_task_history_event.py,sha256=mJVJoT4RXk4HWnY7-Nrsl4If-gtaIIEXd2z7eFZwM2I,1260
203
- annofabcli-1.96.0.dist-info/LICENSE,sha256=pcqWYfxFtxBzhvKp3x9MXNM4xciGb2eFewaRhXUNHlo,1081
204
- annofabcli-1.96.0.dist-info/METADATA,sha256=kHn5yPXYeuO3FK-MKUhhRcZ5vAhUakxbnLEOlqM6KsY,5626
205
- annofabcli-1.96.0.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
206
- annofabcli-1.96.0.dist-info/entry_points.txt,sha256=A8vlN9fiMhbYRcdBfSpl7piYzAwvkMhRXIPQUAvQFUo,55
207
- annofabcli-1.96.0.dist-info/RECORD,,
203
+ annofabcli-1.97.0.dist-info/LICENSE,sha256=pcqWYfxFtxBzhvKp3x9MXNM4xciGb2eFewaRhXUNHlo,1081
204
+ annofabcli-1.97.0.dist-info/METADATA,sha256=71xtnSNlS97JSBeUVYWYcoas9gqXjL9v735krW5zioc,5626
205
+ annofabcli-1.97.0.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
206
+ annofabcli-1.97.0.dist-info/entry_points.txt,sha256=A8vlN9fiMhbYRcdBfSpl7piYzAwvkMhRXIPQUAvQFUo,55
207
+ annofabcli-1.97.0.dist-info/RECORD,,