botrun-flow-lang 5.12.264__py3-none-any.whl → 6.2.61__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- botrun_flow_lang/api/langgraph_api.py +8 -3
- botrun_flow_lang/api/langgraph_constants.py +11 -0
- botrun_flow_lang/langgraph_agents/agents/langgraph_react_agent.py +17 -11
- botrun_flow_lang/langgraph_agents/agents/util/custom_vertex_claude.py +406 -0
- botrun_flow_lang/langgraph_agents/agents/util/img_util.py +58 -16
- botrun_flow_lang/langgraph_agents/agents/util/pdf_analyzer.py +112 -36
- botrun_flow_lang/langgraph_agents/agents/util/usage_metadata.py +34 -0
- botrun_flow_lang/mcp_server/default_mcp.py +152 -42
- {botrun_flow_lang-5.12.264.dist-info → botrun_flow_lang-6.2.61.dist-info}/METADATA +5 -6
- {botrun_flow_lang-5.12.264.dist-info → botrun_flow_lang-6.2.61.dist-info}/RECORD +11 -8
- {botrun_flow_lang-5.12.264.dist-info → botrun_flow_lang-6.2.61.dist-info}/WHEEL +0 -0
|
@@ -11,11 +11,13 @@ import asyncio
|
|
|
11
11
|
import base64
|
|
12
12
|
import httpx
|
|
13
13
|
import os
|
|
14
|
-
from typing import List, Dict, Any
|
|
14
|
+
from typing import List, Dict, Any, Tuple
|
|
15
15
|
|
|
16
16
|
from dotenv import load_dotenv
|
|
17
17
|
from google.oauth2 import service_account
|
|
18
18
|
|
|
19
|
+
from botrun_flow_lang.langgraph_agents.agents.util.usage_metadata import UsageMetadata
|
|
20
|
+
|
|
19
21
|
load_dotenv()
|
|
20
22
|
|
|
21
23
|
# 檔案大小閾值(MB)
|
|
@@ -30,16 +32,17 @@ MAX_CONCURRENT_CHUNKS = 5
|
|
|
30
32
|
|
|
31
33
|
def analyze_pdf_with_claude(
|
|
32
34
|
pdf_data: str, user_input: str, model_name: str = "claude-sonnet-4-5-20250929"
|
|
33
|
-
):
|
|
35
|
+
) -> Tuple[str, UsageMetadata]:
|
|
34
36
|
"""
|
|
35
37
|
Analyze a PDF file using Claude API
|
|
36
38
|
|
|
37
39
|
Args:
|
|
38
40
|
pdf_data: Base64-encoded PDF data
|
|
39
41
|
user_input: User's query about the PDF content
|
|
42
|
+
model_name: Claude model name to use
|
|
40
43
|
|
|
41
44
|
Returns:
|
|
42
|
-
str: Claude's analysis
|
|
45
|
+
Tuple[str, UsageMetadata]: Claude's analysis and usage metadata
|
|
43
46
|
"""
|
|
44
47
|
# Initialize Anthropic client
|
|
45
48
|
client = anthropic.Anthropic()
|
|
@@ -66,15 +69,25 @@ def analyze_pdf_with_claude(
|
|
|
66
69
|
],
|
|
67
70
|
)
|
|
68
71
|
|
|
72
|
+
# Extract usage metadata
|
|
73
|
+
usage = UsageMetadata(
|
|
74
|
+
prompt_tokens=message.usage.input_tokens,
|
|
75
|
+
completion_tokens=message.usage.output_tokens,
|
|
76
|
+
total_tokens=message.usage.input_tokens + message.usage.output_tokens,
|
|
77
|
+
cache_creation_input_tokens=getattr(message.usage, 'cache_creation_input_tokens', 0) or 0,
|
|
78
|
+
cache_read_input_tokens=getattr(message.usage, 'cache_read_input_tokens', 0) or 0,
|
|
79
|
+
model=model_name,
|
|
80
|
+
)
|
|
81
|
+
|
|
69
82
|
print(
|
|
70
83
|
f"analyze_pdf_with_claude============> input_token: {message.usage.input_tokens} output_token: {message.usage.output_tokens}",
|
|
71
84
|
)
|
|
72
|
-
return message.content[0].text
|
|
85
|
+
return message.content[0].text, usage
|
|
73
86
|
|
|
74
87
|
|
|
75
88
|
def analyze_pdf_with_gemini(
|
|
76
89
|
pdf_data: str, user_input: str, model_name: str = "gemini-2.5-flash", pdf_url: str = ""
|
|
77
|
-
):
|
|
90
|
+
) -> Tuple[str, UsageMetadata]:
|
|
78
91
|
"""
|
|
79
92
|
Analyze a PDF file using Gemini API
|
|
80
93
|
|
|
@@ -82,9 +95,10 @@ def analyze_pdf_with_gemini(
|
|
|
82
95
|
pdf_data: Base64-encoded PDF data
|
|
83
96
|
user_input: User's query about the PDF content
|
|
84
97
|
model_name: Gemini model name to use
|
|
98
|
+
pdf_url: Original PDF URL for logging
|
|
85
99
|
|
|
86
100
|
Returns:
|
|
87
|
-
str: Gemini's analysis
|
|
101
|
+
Tuple[str, UsageMetadata]: Gemini's analysis and usage metadata
|
|
88
102
|
"""
|
|
89
103
|
# 放到要用的時候才 import,不然loading 會花時間
|
|
90
104
|
from google import genai
|
|
@@ -112,14 +126,25 @@ def analyze_pdf_with_gemini(
|
|
|
112
126
|
),
|
|
113
127
|
],
|
|
114
128
|
)
|
|
115
|
-
|
|
129
|
+
|
|
130
|
+
# Extract usage metadata
|
|
131
|
+
usage = UsageMetadata(model=model_name)
|
|
116
132
|
if hasattr(response, "usage_metadata"):
|
|
133
|
+
usage_meta = response.usage_metadata
|
|
134
|
+
usage = UsageMetadata(
|
|
135
|
+
prompt_tokens=getattr(usage_meta, 'prompt_token_count', 0) or 0,
|
|
136
|
+
completion_tokens=getattr(usage_meta, 'candidates_token_count', 0) or 0,
|
|
137
|
+
total_tokens=getattr(usage_meta, 'total_token_count', 0) or 0,
|
|
138
|
+
cache_creation_input_tokens=0,
|
|
139
|
+
cache_read_input_tokens=getattr(usage_meta, 'cached_content_token_count', 0) or 0,
|
|
140
|
+
model=model_name,
|
|
141
|
+
)
|
|
117
142
|
print(
|
|
118
|
-
f"analyze_pdf_with_gemini============> input_token: {
|
|
143
|
+
f"analyze_pdf_with_gemini============> input_token: {usage_meta.prompt_token_count} output_token: {usage_meta.candidates_token_count}",
|
|
119
144
|
)
|
|
120
145
|
|
|
121
146
|
print(f"{pdf_url} success")
|
|
122
|
-
return response.text
|
|
147
|
+
return response.text, usage
|
|
123
148
|
|
|
124
149
|
|
|
125
150
|
def _analyze_single_chunk(
|
|
@@ -135,7 +160,7 @@ def _analyze_single_chunk(
|
|
|
135
160
|
model_name: 使用的模型名稱
|
|
136
161
|
|
|
137
162
|
Returns:
|
|
138
|
-
Dict: {"page_range": str, "answer": str, "relevant": bool, "error": str|None}
|
|
163
|
+
Dict: {"page_range": str, "answer": str, "relevant": bool, "error": str|None, "usage": UsageMetadata}
|
|
139
164
|
"""
|
|
140
165
|
# 構建切片專用的 prompt
|
|
141
166
|
chunk_prompt = f"""你正在閱讀一份大型 PDF 文件的其中一部分({page_range})。
|
|
@@ -149,15 +174,16 @@ def _analyze_single_chunk(
|
|
|
149
174
|
|
|
150
175
|
try:
|
|
151
176
|
if model_name.startswith("gemini-"):
|
|
152
|
-
answer = analyze_pdf_with_gemini(chunk_data, chunk_prompt, model_name)
|
|
177
|
+
answer, usage = analyze_pdf_with_gemini(chunk_data, chunk_prompt, model_name)
|
|
153
178
|
elif model_name.startswith("claude-"):
|
|
154
|
-
answer = analyze_pdf_with_claude(chunk_data, chunk_prompt, model_name)
|
|
179
|
+
answer, usage = analyze_pdf_with_claude(chunk_data, chunk_prompt, model_name)
|
|
155
180
|
else:
|
|
156
181
|
return {
|
|
157
182
|
"page_range": page_range,
|
|
158
183
|
"answer": "",
|
|
159
184
|
"relevant": False,
|
|
160
185
|
"error": f"Unknown model type: {model_name}",
|
|
186
|
+
"usage": UsageMetadata(),
|
|
161
187
|
}
|
|
162
188
|
|
|
163
189
|
# 判斷是否相關
|
|
@@ -168,6 +194,7 @@ def _analyze_single_chunk(
|
|
|
168
194
|
"answer": answer if is_relevant else "",
|
|
169
195
|
"relevant": is_relevant,
|
|
170
196
|
"error": None,
|
|
197
|
+
"usage": usage,
|
|
171
198
|
}
|
|
172
199
|
|
|
173
200
|
except Exception as e:
|
|
@@ -179,12 +206,13 @@ def _analyze_single_chunk(
|
|
|
179
206
|
"answer": "",
|
|
180
207
|
"relevant": False,
|
|
181
208
|
"error": str(e),
|
|
209
|
+
"usage": UsageMetadata(model=model_name),
|
|
182
210
|
}
|
|
183
211
|
|
|
184
212
|
|
|
185
213
|
async def analyze_pdf_chunks_parallel(
|
|
186
214
|
chunks: List[tuple], user_input: str, model_name: str, max_concurrent: int = 5
|
|
187
|
-
) -> List[Dict[str, Any]]:
|
|
215
|
+
) -> Tuple[List[Dict[str, Any]], List[UsageMetadata]]:
|
|
188
216
|
"""
|
|
189
217
|
平行問答多個 PDF 切片
|
|
190
218
|
|
|
@@ -195,7 +223,7 @@ async def analyze_pdf_chunks_parallel(
|
|
|
195
223
|
max_concurrent: 最大平行數量
|
|
196
224
|
|
|
197
225
|
Returns:
|
|
198
|
-
List[Dict]:
|
|
226
|
+
Tuple[List[Dict], List[UsageMetadata]]: 每個切片的回答結果和每次呼叫的 usage list
|
|
199
227
|
"""
|
|
200
228
|
semaphore = asyncio.Semaphore(max_concurrent)
|
|
201
229
|
|
|
@@ -224,8 +252,9 @@ async def analyze_pdf_chunks_parallel(
|
|
|
224
252
|
# 平行執行
|
|
225
253
|
results = await asyncio.gather(*tasks, return_exceptions=True)
|
|
226
254
|
|
|
227
|
-
#
|
|
255
|
+
# 處理例外並收集 usage list
|
|
228
256
|
processed_results = []
|
|
257
|
+
usage_list = []
|
|
229
258
|
for i, result in enumerate(results):
|
|
230
259
|
if isinstance(result, Exception):
|
|
231
260
|
processed_results.append(
|
|
@@ -234,19 +263,24 @@ async def analyze_pdf_chunks_parallel(
|
|
|
234
263
|
"answer": "",
|
|
235
264
|
"relevant": False,
|
|
236
265
|
"error": str(result),
|
|
266
|
+
"usage": UsageMetadata(model=model_name),
|
|
237
267
|
}
|
|
238
268
|
)
|
|
269
|
+
usage_list.append(UsageMetadata(model=model_name))
|
|
239
270
|
else:
|
|
240
271
|
processed_results.append(result)
|
|
272
|
+
# 收集 usage
|
|
273
|
+
if "usage" in result and isinstance(result["usage"], UsageMetadata):
|
|
274
|
+
usage_list.append(result["usage"])
|
|
241
275
|
|
|
242
|
-
return processed_results
|
|
276
|
+
return processed_results, usage_list
|
|
243
277
|
|
|
244
278
|
|
|
245
279
|
def merge_chunk_results(
|
|
246
280
|
chunk_results: List[Dict[str, Any]],
|
|
247
281
|
user_input: str,
|
|
248
282
|
model_name: str = "gemini-2.5-flash",
|
|
249
|
-
) -> str:
|
|
283
|
+
) -> Tuple[str, UsageMetadata]:
|
|
250
284
|
"""
|
|
251
285
|
使用 LLM 統整多個切片的回答
|
|
252
286
|
|
|
@@ -256,7 +290,7 @@ def merge_chunk_results(
|
|
|
256
290
|
model_name: 統整使用的模型名稱
|
|
257
291
|
|
|
258
292
|
Returns:
|
|
259
|
-
str:
|
|
293
|
+
Tuple[str, UsageMetadata]: 統整後的回答和 usage metadata
|
|
260
294
|
"""
|
|
261
295
|
# 過濾出相關的回答
|
|
262
296
|
relevant_results = [r for r in chunk_results if r.get("relevant", False)]
|
|
@@ -266,12 +300,12 @@ def merge_chunk_results(
|
|
|
266
300
|
error_results = [r for r in chunk_results if r.get("error")]
|
|
267
301
|
if error_results:
|
|
268
302
|
error_msgs = [f"{r['page_range']}: {r['error']}" for r in error_results]
|
|
269
|
-
return f"分析 PDF 時發生錯誤:\n" + "\n".join(error_msgs)
|
|
270
|
-
return "在 PDF 文件中未找到與您問題相關的內容。"
|
|
303
|
+
return f"分析 PDF 時發生錯誤:\n" + "\n".join(error_msgs), UsageMetadata(model=model_name)
|
|
304
|
+
return "在 PDF 文件中未找到與您問題相關的內容。", UsageMetadata(model=model_name)
|
|
271
305
|
|
|
272
|
-
#
|
|
306
|
+
# 只有一個相關結果,直接回傳(不需要額外的 LLM 呼叫)
|
|
273
307
|
if len(relevant_results) == 1:
|
|
274
|
-
return relevant_results[0]["answer"]
|
|
308
|
+
return relevant_results[0]["answer"], UsageMetadata(model=model_name)
|
|
275
309
|
|
|
276
310
|
# 多個相關結果,需要統整
|
|
277
311
|
combined_content = "\n\n".join(
|
|
@@ -310,22 +344,33 @@ def merge_chunk_results(
|
|
|
310
344
|
contents=[merge_prompt],
|
|
311
345
|
)
|
|
312
346
|
|
|
347
|
+
# Extract usage metadata
|
|
348
|
+
usage = UsageMetadata(model=model_name)
|
|
313
349
|
if hasattr(response, "usage_metadata"):
|
|
350
|
+
usage_meta = response.usage_metadata
|
|
351
|
+
usage = UsageMetadata(
|
|
352
|
+
prompt_tokens=getattr(usage_meta, 'prompt_token_count', 0) or 0,
|
|
353
|
+
completion_tokens=getattr(usage_meta, 'candidates_token_count', 0) or 0,
|
|
354
|
+
total_tokens=getattr(usage_meta, 'total_token_count', 0) or 0,
|
|
355
|
+
cache_creation_input_tokens=0,
|
|
356
|
+
cache_read_input_tokens=getattr(usage_meta, 'cached_content_token_count', 0) or 0,
|
|
357
|
+
model=model_name,
|
|
358
|
+
)
|
|
314
359
|
print(
|
|
315
|
-
f"merge_chunk_results============> input_token: {
|
|
360
|
+
f"merge_chunk_results============> input_token: {usage_meta.prompt_token_count} output_token: {usage_meta.candidates_token_count}",
|
|
316
361
|
)
|
|
317
362
|
|
|
318
|
-
return response.text
|
|
363
|
+
return response.text, usage
|
|
319
364
|
|
|
320
365
|
except Exception as e:
|
|
321
366
|
import traceback
|
|
322
367
|
|
|
323
368
|
traceback.print_exc()
|
|
324
369
|
# 統整失敗,直接回傳合併的內容
|
|
325
|
-
return f"統整時發生錯誤,以下是各部分的回答:\n\n{combined_content}"
|
|
370
|
+
return f"統整時發生錯誤,以下是各部分的回答:\n\n{combined_content}", UsageMetadata(model=model_name)
|
|
326
371
|
|
|
327
372
|
|
|
328
|
-
async def analyze_pdf_async(pdf_url: str, user_input: str) -> str:
|
|
373
|
+
async def analyze_pdf_async(pdf_url: str, user_input: str) -> Dict[str, Any]:
|
|
329
374
|
"""
|
|
330
375
|
非同步分析 PDF 檔案(智慧處理策略)
|
|
331
376
|
|
|
@@ -338,8 +383,13 @@ async def analyze_pdf_async(pdf_url: str, user_input: str) -> str:
|
|
|
338
383
|
user_input: 使用者問題
|
|
339
384
|
|
|
340
385
|
Returns:
|
|
341
|
-
str:
|
|
386
|
+
Dict[str, Any]: {
|
|
387
|
+
"result": str, # 分析結果
|
|
388
|
+
"usage_metadata": List[Dict] # 每次 LLM 呼叫的 usage 資訊
|
|
389
|
+
}
|
|
342
390
|
"""
|
|
391
|
+
usage_list: List[UsageMetadata] = []
|
|
392
|
+
|
|
343
393
|
try:
|
|
344
394
|
# 1. 下載 PDF
|
|
345
395
|
print(f"[analyze_pdf_async] 下載 PDF: {pdf_url}")
|
|
@@ -364,9 +414,19 @@ async def analyze_pdf_async(pdf_url: str, user_input: str) -> str:
|
|
|
364
414
|
for model in models:
|
|
365
415
|
try:
|
|
366
416
|
if model.startswith("gemini-"):
|
|
367
|
-
|
|
417
|
+
result, usage = analyze_pdf_with_gemini(pdf_data, user_input, model, pdf_url)
|
|
418
|
+
usage_list.append(usage)
|
|
419
|
+
return {
|
|
420
|
+
"result": result,
|
|
421
|
+
"usage_metadata": [u.to_dict() for u in usage_list],
|
|
422
|
+
}
|
|
368
423
|
elif model.startswith("claude-"):
|
|
369
|
-
|
|
424
|
+
result, usage = analyze_pdf_with_claude(pdf_data, user_input, model)
|
|
425
|
+
usage_list.append(usage)
|
|
426
|
+
return {
|
|
427
|
+
"result": result,
|
|
428
|
+
"usage_metadata": [u.to_dict() for u in usage_list],
|
|
429
|
+
}
|
|
370
430
|
except Exception as e:
|
|
371
431
|
import traceback
|
|
372
432
|
|
|
@@ -374,7 +434,10 @@ async def analyze_pdf_async(pdf_url: str, user_input: str) -> str:
|
|
|
374
434
|
last_error = str(e)
|
|
375
435
|
continue
|
|
376
436
|
|
|
377
|
-
return
|
|
437
|
+
return {
|
|
438
|
+
"result": f"分析 PDF 時所有模型都失敗。最後錯誤: {last_error}",
|
|
439
|
+
"usage_metadata": [u.to_dict() for u in usage_list],
|
|
440
|
+
}
|
|
378
441
|
|
|
379
442
|
# 3. 大檔:壓縮 → 切割 → 平行問答 → 統整
|
|
380
443
|
print(f"[analyze_pdf_async] 大檔模式 (>= {PDF_SIZE_THRESHOLD_MB}MB)")
|
|
@@ -427,9 +490,10 @@ async def analyze_pdf_async(pdf_url: str, user_input: str) -> str:
|
|
|
427
490
|
|
|
428
491
|
# 3.3 平行問答
|
|
429
492
|
print(f"[analyze_pdf_async] 開始平行問答 (最大並行: {MAX_CONCURRENT_CHUNKS})...")
|
|
430
|
-
chunk_results = await analyze_pdf_chunks_parallel(
|
|
493
|
+
chunk_results, chunk_usage_list = await analyze_pdf_chunks_parallel(
|
|
431
494
|
chunks, user_input, primary_model, max_concurrent=MAX_CONCURRENT_CHUNKS
|
|
432
495
|
)
|
|
496
|
+
usage_list.extend(chunk_usage_list)
|
|
433
497
|
|
|
434
498
|
# 統計結果
|
|
435
499
|
relevant_count = sum(1 for r in chunk_results if r.get("relevant", False))
|
|
@@ -441,19 +505,28 @@ async def analyze_pdf_async(pdf_url: str, user_input: str) -> str:
|
|
|
441
505
|
|
|
442
506
|
# 3.4 統整結果
|
|
443
507
|
print("[analyze_pdf_async] 統整結果...")
|
|
444
|
-
result = merge_chunk_results(chunk_results, user_input, primary_model)
|
|
508
|
+
result, merge_usage = merge_chunk_results(chunk_results, user_input, primary_model)
|
|
509
|
+
# 只有當 merge_usage 有實際 token 使用時才加入(避免加入空的 usage)
|
|
510
|
+
if merge_usage.prompt_tokens > 0 or merge_usage.completion_tokens > 0:
|
|
511
|
+
usage_list.append(merge_usage)
|
|
445
512
|
print("[analyze_pdf_async] 完成")
|
|
446
513
|
|
|
447
|
-
return
|
|
514
|
+
return {
|
|
515
|
+
"result": result,
|
|
516
|
+
"usage_metadata": [u.to_dict() for u in usage_list],
|
|
517
|
+
}
|
|
448
518
|
|
|
449
519
|
except Exception as e:
|
|
450
520
|
import traceback
|
|
451
521
|
|
|
452
522
|
traceback.print_exc()
|
|
453
|
-
return
|
|
523
|
+
return {
|
|
524
|
+
"result": f"分析 PDF {pdf_url} 時發生錯誤: {str(e)}",
|
|
525
|
+
"usage_metadata": [u.to_dict() for u in usage_list],
|
|
526
|
+
}
|
|
454
527
|
|
|
455
528
|
|
|
456
|
-
def analyze_pdf(pdf_url: str, user_input: str) -> str:
|
|
529
|
+
def analyze_pdf(pdf_url: str, user_input: str) -> Dict[str, Any]:
|
|
457
530
|
"""
|
|
458
531
|
分析 PDF 檔案(同步包裝函數)
|
|
459
532
|
|
|
@@ -465,7 +538,10 @@ def analyze_pdf(pdf_url: str, user_input: str) -> str:
|
|
|
465
538
|
user_input: 使用者問題
|
|
466
539
|
|
|
467
540
|
Returns:
|
|
468
|
-
str:
|
|
541
|
+
Dict[str, Any]: {
|
|
542
|
+
"result": str, # 分析結果
|
|
543
|
+
"usage_metadata": List[Dict] # 每次 LLM 呼叫的 usage 資訊
|
|
544
|
+
}
|
|
469
545
|
"""
|
|
470
546
|
try:
|
|
471
547
|
# 嘗試取得現有的事件迴圈
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Usage Metadata 模組
|
|
3
|
+
|
|
4
|
+
提供 LLM 呼叫的 token 使用量追蹤功能。
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from dataclasses import dataclass, asdict
|
|
8
|
+
from typing import Dict, Any
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@dataclass
|
|
12
|
+
class UsageMetadata:
|
|
13
|
+
"""Token usage metadata that matches the expected parsing format."""
|
|
14
|
+
prompt_tokens: int = 0
|
|
15
|
+
completion_tokens: int = 0
|
|
16
|
+
total_tokens: int = 0
|
|
17
|
+
cache_creation_input_tokens: int = 0
|
|
18
|
+
cache_read_input_tokens: int = 0
|
|
19
|
+
model: str = ""
|
|
20
|
+
|
|
21
|
+
def __add__(self, other: "UsageMetadata") -> "UsageMetadata":
|
|
22
|
+
"""Combine two UsageMetadata objects."""
|
|
23
|
+
return UsageMetadata(
|
|
24
|
+
prompt_tokens=self.prompt_tokens + other.prompt_tokens,
|
|
25
|
+
completion_tokens=self.completion_tokens + other.completion_tokens,
|
|
26
|
+
total_tokens=self.total_tokens + other.total_tokens,
|
|
27
|
+
cache_creation_input_tokens=self.cache_creation_input_tokens + other.cache_creation_input_tokens,
|
|
28
|
+
cache_read_input_tokens=self.cache_read_input_tokens + other.cache_read_input_tokens,
|
|
29
|
+
model=self.model or other.model,
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
33
|
+
"""Convert to dictionary."""
|
|
34
|
+
return asdict(self)
|