flexllm 0.3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. flexllm/__init__.py +224 -0
  2. flexllm/__main__.py +1096 -0
  3. flexllm/async_api/__init__.py +9 -0
  4. flexllm/async_api/concurrent_call.py +100 -0
  5. flexllm/async_api/concurrent_executor.py +1036 -0
  6. flexllm/async_api/core.py +373 -0
  7. flexllm/async_api/interface.py +12 -0
  8. flexllm/async_api/progress.py +277 -0
  9. flexllm/base_client.py +988 -0
  10. flexllm/batch_tools/__init__.py +16 -0
  11. flexllm/batch_tools/folder_processor.py +317 -0
  12. flexllm/batch_tools/table_processor.py +363 -0
  13. flexllm/cache/__init__.py +10 -0
  14. flexllm/cache/response_cache.py +293 -0
  15. flexllm/chain_of_thought_client.py +1120 -0
  16. flexllm/claudeclient.py +402 -0
  17. flexllm/client_pool.py +698 -0
  18. flexllm/geminiclient.py +563 -0
  19. flexllm/llm_client.py +523 -0
  20. flexllm/llm_parser.py +60 -0
  21. flexllm/mllm_client.py +559 -0
  22. flexllm/msg_processors/__init__.py +174 -0
  23. flexllm/msg_processors/image_processor.py +729 -0
  24. flexllm/msg_processors/image_processor_helper.py +485 -0
  25. flexllm/msg_processors/messages_processor.py +341 -0
  26. flexllm/msg_processors/unified_processor.py +1404 -0
  27. flexllm/openaiclient.py +256 -0
  28. flexllm/pricing/__init__.py +104 -0
  29. flexllm/pricing/data.json +1201 -0
  30. flexllm/pricing/updater.py +223 -0
  31. flexllm/provider_router.py +213 -0
  32. flexllm/token_counter.py +270 -0
  33. flexllm/utils/__init__.py +1 -0
  34. flexllm/utils/core.py +41 -0
  35. flexllm-0.3.3.dist-info/METADATA +573 -0
  36. flexllm-0.3.3.dist-info/RECORD +39 -0
  37. flexllm-0.3.3.dist-info/WHEEL +4 -0
  38. flexllm-0.3.3.dist-info/entry_points.txt +3 -0
  39. flexllm-0.3.3.dist-info/licenses/LICENSE +201 -0
flexllm/llm_client.py ADDED
@@ -0,0 +1,523 @@
1
+ """
2
+ LLMClient - 统一的 LLM 客户端封装
3
+
4
+ 自动根据配置选择 OpenAIClient、GeminiClient 或 ClaudeClient,提供统一接口。
5
+ """
6
+
7
+ from typing import TYPE_CHECKING, List, Union, Optional, Literal
8
+
9
+ from .base_client import LLMClientBase, ChatCompletionResult
10
+ from .openaiclient import OpenAIClient
11
+ from .geminiclient import GeminiClient
12
+ from .claudeclient import ClaudeClient
13
+ from .cache import ResponseCacheConfig
14
+
15
+ if TYPE_CHECKING:
16
+ from .async_api.interface import RequestResult
17
+
18
+
19
+ class LLMClient:
20
+ """
21
+ 统一的 LLM 客户端,支持 OpenAI 兼容 API、Gemini API 和 Claude API
22
+
23
+ 根据 provider 参数自动选择底层客户端:
24
+ - "openai": 使用 OpenAIClient(适用于 OpenAI、vLLM、Ollama 等)
25
+ - "gemini": 使用 GeminiClient(适用于 Google Gemini)
26
+ - "claude": 使用 ClaudeClient(适用于 Anthropic Claude)
27
+
28
+ 所有 LLMClientBase 的方法都可以直接调用,会自动委托给底层客户端。
29
+
30
+ Example (OpenAI 兼容):
31
+ >>> client = LLMClient(
32
+ ... provider="openai",
33
+ ... base_url="https://api.openai.com/v1",
34
+ ... api_key="your-key",
35
+ ... model="gpt-4",
36
+ ... )
37
+ >>> result = await client.chat_completions(messages)
38
+
39
+ Example (Gemini):
40
+ >>> client = LLMClient(
41
+ ... provider="gemini",
42
+ ... api_key="your-google-key",
43
+ ... model="gemini-3-flash-preview",
44
+ ... )
45
+ >>> result = await client.chat_completions(messages)
46
+
47
+ Example (Claude):
48
+ >>> client = LLMClient(
49
+ ... provider="claude",
50
+ ... api_key="your-anthropic-key",
51
+ ... model="claude-3-5-sonnet-20241022",
52
+ ... )
53
+ >>> result = await client.chat_completions(messages)
54
+
55
+ Example (thinking 参数 - 统一的思考控制):
56
+ >>> # 禁用思考(快速响应)
57
+ >>> result = client.chat_completions_sync(
58
+ ... messages=[{"role": "user", "content": "1+1=?"}],
59
+ ... thinking=False,
60
+ ... )
61
+ >>> # 启用思考并获取思考内容
62
+ >>> result = client.chat_completions_sync(
63
+ ... messages=[{"role": "user", "content": "1+1=?"}],
64
+ ... thinking=True,
65
+ ... return_raw=True,
66
+ ... )
67
+ >>> parsed = client.parse_thoughts(result.data)
68
+ >>> print("思考:", parsed["thought"])
69
+ >>> print("答案:", parsed["answer"])
70
+
71
+ thinking 参数值:
72
+ - False: 禁用思考
73
+ - True: 启用思考并返回思考内容
74
+ - "minimal"/"low"/"medium"/"high": 设置思考深度(仅 Gemini)
75
+ - int: 设置 budget_tokens(仅 Claude)
76
+ - None: 使用模型默认行为
77
+ """
78
+
79
+ _client: LLMClientBase
80
+
81
+ def __init__(
82
+ self,
83
+ provider: Literal["openai", "gemini", "claude", "auto"] = "auto",
84
+ # 通用参数
85
+ base_url: str = None,
86
+ api_key: str = None,
87
+ model: str = None,
88
+ concurrency_limit: int = 10,
89
+ max_qps: int = None, # 不同 provider 有不同默认值
90
+ timeout: int = 120,
91
+ retry_times: int = 3,
92
+ retry_delay: float = 1.0,
93
+ cache_image: bool = False,
94
+ cache_dir: str = "image_cache",
95
+ # Gemini/Vertex AI 专用
96
+ use_vertex_ai: bool = False,
97
+ project_id: str = None,
98
+ location: str = "us-central1",
99
+ credentials=None,
100
+ # 响应缓存配置
101
+ cache: Optional[ResponseCacheConfig] = None,
102
+ **kwargs,
103
+ ):
104
+ """
105
+ 初始化统一 LLM 客户端
106
+
107
+ Args:
108
+ provider: 指定使用的 provider
109
+ - "openai": OpenAI 兼容 API
110
+ - "gemini": Google Gemini API
111
+ - "claude": Anthropic Claude API
112
+ - "auto": 根据 base_url 自动推断
113
+ base_url: API 基础 URL
114
+ api_key: API 密钥
115
+ model: 默认模型名称
116
+ concurrency_limit: 并发请求限制
117
+ max_qps: 最大 QPS(openai 默认 1000,gemini 默认 60)
118
+ timeout: 请求超时时间
119
+ retry_times: 重试次数
120
+ retry_delay: 重试延迟
121
+ cache_image: 是否缓存图片
122
+ cache_dir: 图片缓存目录
123
+ use_vertex_ai: 是否使用 Vertex AI(仅 Gemini)
124
+ project_id: GCP 项目 ID(仅 Vertex AI)
125
+ location: GCP 区域(仅 Vertex AI)
126
+ credentials: Google Cloud 凭证(仅 Vertex AI)
127
+ cache: 响应缓存配置,默认启用(24小时TTL)
128
+ """
129
+ # 自动推断 provider
130
+ if provider == "auto":
131
+ provider = self._infer_provider(base_url, use_vertex_ai)
132
+
133
+ self._provider = provider
134
+ self._model = model
135
+
136
+ if provider == "gemini":
137
+ self._client = GeminiClient(
138
+ api_key=api_key,
139
+ model=model,
140
+ base_url=base_url,
141
+ concurrency_limit=concurrency_limit,
142
+ max_qps=max_qps if max_qps is not None else 60,
143
+ timeout=timeout,
144
+ retry_times=retry_times,
145
+ retry_delay=retry_delay,
146
+ cache_image=cache_image,
147
+ cache_dir=cache_dir,
148
+ cache=cache,
149
+ use_vertex_ai=use_vertex_ai,
150
+ project_id=project_id,
151
+ location=location,
152
+ credentials=credentials,
153
+ **kwargs,
154
+ )
155
+ elif provider == "claude":
156
+ if not api_key:
157
+ raise ValueError("Claude provider 需要提供 api_key")
158
+ self._client = ClaudeClient(
159
+ api_key=api_key,
160
+ model=model,
161
+ base_url=base_url,
162
+ concurrency_limit=concurrency_limit,
163
+ max_qps=max_qps if max_qps is not None else 60,
164
+ timeout=timeout,
165
+ retry_times=retry_times,
166
+ retry_delay=retry_delay,
167
+ cache_image=cache_image,
168
+ cache_dir=cache_dir,
169
+ cache=cache,
170
+ **kwargs,
171
+ )
172
+ else: # openai
173
+ if not base_url:
174
+ raise ValueError("OpenAI provider 需要提供 base_url")
175
+ self._client = OpenAIClient(
176
+ base_url=base_url,
177
+ api_key=api_key or "EMPTY",
178
+ model=model,
179
+ concurrency_limit=concurrency_limit,
180
+ max_qps=max_qps if max_qps is not None else 1000,
181
+ timeout=timeout,
182
+ retry_times=retry_times,
183
+ retry_delay=retry_delay,
184
+ cache_image=cache_image,
185
+ cache_dir=cache_dir,
186
+ cache=cache,
187
+ **kwargs,
188
+ )
189
+
190
+ @staticmethod
191
+ def _infer_provider(base_url: str, use_vertex_ai: bool) -> str:
192
+ """根据 base_url 推断 provider"""
193
+ if use_vertex_ai:
194
+ return "gemini"
195
+ if base_url:
196
+ url_lower = base_url.lower()
197
+ if "generativelanguage.googleapis.com" in url_lower:
198
+ return "gemini"
199
+ if "aiplatform.googleapis.com" in url_lower:
200
+ return "gemini"
201
+ if "anthropic.com" in url_lower:
202
+ return "claude"
203
+ return "openai"
204
+
205
+ def __getattr__(self, name):
206
+ """自动委托未显式定义的方法给底层客户端"""
207
+ return getattr(self._client, name)
208
+
209
+ @property
210
+ def provider(self) -> str:
211
+ """返回当前使用的 provider"""
212
+ return self._provider
213
+
214
+ @property
215
+ def client(self) -> LLMClientBase:
216
+ """返回底层客户端实例(用于访问特定功能)"""
217
+ return self._client
218
+
219
+ # ========== 显式定义常用方法(用于 IDE 代码提示)==========
220
+
221
+ async def chat_completions(
222
+ self,
223
+ messages: List[dict],
224
+ model: str = None,
225
+ return_raw: bool = False,
226
+ return_usage: bool = False,
227
+ show_progress: bool = False,
228
+ preprocess_msg: bool = False,
229
+ **kwargs,
230
+ ) -> Union[str, ChatCompletionResult, "RequestResult"]:
231
+ """
232
+ 单条聊天完成
233
+
234
+ Args:
235
+ messages: 消息列表(OpenAI 格式)
236
+ model: 模型名称(可选,使用初始化时的默认值)
237
+ return_raw: 是否返回原始响应(RequestResult)
238
+ return_usage: 是否返回包含 usage 的结果(ChatCompletionResult)
239
+ show_progress: 是否显示进度
240
+ preprocess_msg: 是否预处理消息(图片转 base64)
241
+ **kwargs: 其他参数(max_tokens, temperature, thinking 等)
242
+
243
+ Returns:
244
+ - return_raw=True: RequestResult 原始响应
245
+ - return_usage=True: ChatCompletionResult(content, usage, reasoning_content)
246
+ - 默认: str 内容文本
247
+
248
+ Note:
249
+ 缓存由初始化时的 cache 参数控制,return_raw/return_usage 时自动跳过缓存
250
+ """
251
+ return await self._client.chat_completions(
252
+ messages=messages,
253
+ model=model,
254
+ return_raw=return_raw,
255
+ return_usage=return_usage,
256
+ show_progress=show_progress,
257
+ preprocess_msg=preprocess_msg,
258
+ **kwargs,
259
+ )
260
+
261
+ def chat_completions_sync(
262
+ self,
263
+ messages: List[dict],
264
+ model: str = None,
265
+ return_raw: bool = False,
266
+ return_usage: bool = False,
267
+ **kwargs,
268
+ ) -> Union[str, ChatCompletionResult, "RequestResult"]:
269
+ """
270
+ 同步版本的聊天完成
271
+
272
+ Args:
273
+ messages: 消息列表(OpenAI 格式)
274
+ model: 模型名称
275
+ return_raw: 是否返回原始响应
276
+ return_usage: 是否返回包含 usage 的结果
277
+ **kwargs: 其他参数(max_tokens, temperature, thinking 等)
278
+ """
279
+ return self._client.chat_completions_sync(
280
+ messages=messages,
281
+ model=model,
282
+ return_raw=return_raw,
283
+ return_usage=return_usage,
284
+ **kwargs,
285
+ )
286
+
287
+ async def chat_completions_batch(
288
+ self,
289
+ messages_list: List[List[dict]],
290
+ model: str = None,
291
+ return_raw: bool = False,
292
+ return_usage: bool = False,
293
+ show_progress: bool = True,
294
+ return_summary: bool = False,
295
+ preprocess_msg: bool = False,
296
+ output_jsonl: Optional[str] = None,
297
+ flush_interval: float = 1.0,
298
+ metadata_list: Optional[List[dict]] = None,
299
+ **kwargs,
300
+ ) -> Union[List[str], List[ChatCompletionResult], tuple]:
301
+ """
302
+ 批量聊天完成(支持断点续传)
303
+
304
+ Args:
305
+ messages_list: 消息列表的列表
306
+ model: 模型名称
307
+ return_raw: 是否返回原始响应
308
+ return_usage: 是否返回包含 usage 的结果(ChatCompletionResult 列表)
309
+ show_progress: 是否显示进度条
310
+ return_summary: 是否返回统计摘要
311
+ preprocess_msg: 是否预处理消息
312
+ output_jsonl: 输出文件路径(JSONL),用于断点续传和持久化
313
+ flush_interval: 文件刷新间隔(秒)
314
+ metadata_list: 元数据列表,与 messages_list 等长,每个元素保存到对应输出记录
315
+ **kwargs: 其他参数(max_tokens, temperature, thinking 等)
316
+
317
+ Returns:
318
+ - return_usage=True: List[ChatCompletionResult] 或 (List[ChatCompletionResult], summary)
319
+ - 默认: List[str] 或 (List[str], summary)
320
+
321
+ Note:
322
+ 缓存由初始化时的 cache 参数控制,return_usage=True 时自动跳过缓存
323
+ """
324
+ return await self._client.chat_completions_batch(
325
+ messages_list=messages_list,
326
+ model=model,
327
+ return_raw=return_raw,
328
+ return_usage=return_usage,
329
+ show_progress=show_progress,
330
+ return_summary=return_summary,
331
+ preprocess_msg=preprocess_msg,
332
+ output_jsonl=output_jsonl,
333
+ flush_interval=flush_interval,
334
+ metadata_list=metadata_list,
335
+ **kwargs,
336
+ )
337
+
338
+ def chat_completions_batch_sync(
339
+ self,
340
+ messages_list: List[List[dict]],
341
+ model: str = None,
342
+ return_raw: bool = False,
343
+ return_usage: bool = False,
344
+ show_progress: bool = True,
345
+ return_summary: bool = False,
346
+ output_jsonl: Optional[str] = None,
347
+ flush_interval: float = 1.0,
348
+ metadata_list: Optional[List[dict]] = None,
349
+ **kwargs,
350
+ ) -> Union[List[str], List[ChatCompletionResult], tuple]:
351
+ """同步版本的批量聊天完成"""
352
+ return self._client.chat_completions_batch_sync(
353
+ messages_list=messages_list,
354
+ model=model,
355
+ return_raw=return_raw,
356
+ return_usage=return_usage,
357
+ show_progress=show_progress,
358
+ return_summary=return_summary,
359
+ output_jsonl=output_jsonl,
360
+ flush_interval=flush_interval,
361
+ metadata_list=metadata_list,
362
+ **kwargs,
363
+ )
364
+
365
+ async def iter_chat_completions_batch(
366
+ self,
367
+ messages_list: List[List[dict]],
368
+ model: str = None,
369
+ return_raw: bool = False,
370
+ return_usage: bool = False,
371
+ show_progress: bool = True,
372
+ preprocess_msg: bool = False,
373
+ output_jsonl: Optional[str] = None,
374
+ flush_interval: float = 1.0,
375
+ metadata_list: Optional[List[dict]] = None,
376
+ batch_size: int = None,
377
+ **kwargs,
378
+ ):
379
+ """
380
+ 迭代式批量聊天完成(边请求边返回结果)
381
+
382
+ 与 chat_completions_batch 功能相同,但以流式方式逐条返回结果,
383
+ 适合处理大批量数据时节省内存。
384
+
385
+ Args:
386
+ messages_list: 消息列表的列表
387
+ model: 模型名称
388
+ return_raw: 是否返回原始响应(影响 result.content 的内容)
389
+ return_usage: 是否在 result 对象上添加 usage 属性
390
+ show_progress: 是否显示进度条
391
+ preprocess_msg: 是否预处理消息
392
+ output_jsonl: 输出文件路径(JSONL)
393
+ flush_interval: 文件刷新间隔(秒)
394
+ metadata_list: 元数据列表,与 messages_list 等长,每个元素保存到对应输出记录
395
+ batch_size: 每批返回的数量
396
+ **kwargs: 其他参数(max_tokens, temperature, thinking 等)
397
+
398
+ Yields:
399
+ result: 包含以下属性的结果对象
400
+ - content: 提取后的内容 (str | dict)
401
+ - usage: token 用量信息(仅当 return_usage=True 时)
402
+ - original_idx: 原始索引
403
+ - latency: 请求延迟(秒)
404
+ - status: 状态 ('success', 'error', 'cached')
405
+ - error: 错误信息(如果有)
406
+ - data: 原始响应数据
407
+ - summary: 最后一个 result 包含整体统计 (dict),其他为 None
408
+ - total: 总请求数
409
+ - success: 成功数
410
+ - failed: 失败数
411
+ - cached: 缓存命中数
412
+ - elapsed: 总耗时(秒)
413
+ - avg_latency: 平均延迟(秒)
414
+
415
+ Note:
416
+ 缓存由初始化时的 cache 参数控制,return_usage=True 时自动跳过缓存
417
+ """
418
+ async for result in self._client.iter_chat_completions_batch(
419
+ messages_list=messages_list,
420
+ model=model,
421
+ return_raw=return_raw,
422
+ return_usage=return_usage,
423
+ show_progress=show_progress,
424
+ preprocess_msg=preprocess_msg,
425
+ output_jsonl=output_jsonl,
426
+ flush_interval=flush_interval,
427
+ metadata_list=metadata_list,
428
+ batch_size=batch_size,
429
+ **kwargs,
430
+ ):
431
+ yield result
432
+
433
+ async def chat_completions_stream(
434
+ self,
435
+ messages: List[dict],
436
+ model: str = None,
437
+ return_usage: bool = False,
438
+ preprocess_msg: bool = False,
439
+ timeout: int = None,
440
+ **kwargs,
441
+ ):
442
+ """
443
+ 流式聊天完成 - 逐 token 返回响应
444
+
445
+ Args:
446
+ messages: 消息列表
447
+ model: 模型名称
448
+ return_usage: 是否返回 usage 信息。当为 True 时,yield 的是 dict:
449
+ - {"type": "content", "content": "..."} 表示内容片段
450
+ - {"type": "usage", "usage": {...}} 表示 token 用量(最后一条)
451
+ 当为 False 时(默认),yield 的是 str 内容片段
452
+ preprocess_msg: 是否预处理消息
453
+ timeout: 超时时间(秒)
454
+ **kwargs: 其他参数(max_tokens, temperature, thinking 等)
455
+
456
+ Yields:
457
+ - return_usage=False: str 内容片段
458
+ - return_usage=True: dict,包含 type 和对应数据
459
+ """
460
+ async for chunk in self._client.chat_completions_stream(
461
+ messages=messages,
462
+ model=model,
463
+ return_usage=return_usage,
464
+ preprocess_msg=preprocess_msg,
465
+ timeout=timeout,
466
+ **kwargs,
467
+ ):
468
+ yield chunk
469
+
470
+ def model_list(self) -> List[str]:
471
+ """获取可用模型列表"""
472
+ return self._client.model_list()
473
+
474
+ def parse_thoughts(self, response_data: dict) -> dict:
475
+ """
476
+ 从响应中解析思考内容和答案
477
+
478
+ 根据 provider 自动选择正确的解析方法。
479
+
480
+ Args:
481
+ response_data: 原始响应数据(通过 return_raw=True 获取)
482
+
483
+ Returns:
484
+ dict: {
485
+ "thought": str, # 思考过程(可能为空)
486
+ "answer": str, # 最终答案
487
+ }
488
+
489
+ Example:
490
+ >>> result = client.chat_completions_sync(
491
+ ... messages=[...],
492
+ ... thinking=True,
493
+ ... return_raw=True,
494
+ ... )
495
+ >>> parsed = client.parse_thoughts(result.data)
496
+ >>> print("思考:", parsed["thought"])
497
+ >>> print("答案:", parsed["answer"])
498
+ """
499
+ if self._provider == "gemini":
500
+ return GeminiClient.parse_thoughts(response_data)
501
+ elif self._provider == "claude":
502
+ return ClaudeClient.parse_thoughts(response_data)
503
+ else:
504
+ return OpenAIClient.parse_thoughts(response_data)
505
+
506
+ def close(self):
507
+ """关闭客户端,释放资源"""
508
+ self._client.close()
509
+
510
+ def __enter__(self):
511
+ return self
512
+
513
+ def __exit__(self, *args):
514
+ self.close()
515
+
516
+ async def __aenter__(self):
517
+ return self
518
+
519
+ async def __aexit__(self, *args):
520
+ self.close()
521
+
522
+ def __repr__(self) -> str:
523
+ return f"LLMClient(provider='{self._provider}', model='{self._model}')"
flexllm/llm_parser.py ADDED
@@ -0,0 +1,60 @@
1
+ import ast
2
+ import re
3
+ import json5
4
+ from typing import Optional
5
+
6
+
7
+ def extract_code_snippets(text, strict=True):
8
+ """Extract code snippets"""
9
+ # 首先处理带有 ``` 标志的代码块
10
+ pattern = r"```(\w+)?\s*([\s\S]*?)```"
11
+ matches = re.findall(pattern, text)
12
+
13
+ code_snippets = []
14
+ for lang, code in matches:
15
+ code_snippets.append({
16
+ "language": lang.strip() if lang else "unknown",
17
+ "code": code.strip(),
18
+ })
19
+
20
+ if not strict:
21
+ # 查找并排除已经被处理过的 ``` ... ``` 内的代码块
22
+ text = re.sub(pattern, "", text)
23
+
24
+ # 处理剩下的 { ... } 格式的代码块
25
+ pattern = r"\{[\s\S]*?\}"
26
+ matches = re.findall(pattern, text)
27
+
28
+ for code in matches:
29
+ code_snippets.append({
30
+ "language": "unknown",
31
+ "code": code.strip(),
32
+ })
33
+
34
+ return code_snippets
35
+
36
+
37
+ def parse_to_obj(text: str, strict=False):
38
+ """Parse to obj"""
39
+ code_snippets = extract_code_snippets(text, strict=strict)
40
+ code_snippets = [code_snippet["code"] for code_snippet in code_snippets]
41
+ code_snippets = [code_snippet.strip() for code_snippet in code_snippets if code_snippet.strip()]
42
+ if not code_snippets:
43
+ return None
44
+ code_str = code_snippets[-1]
45
+ try:
46
+ return ast.literal_eval(code_str)
47
+ except:
48
+ return json5.loads(code_str)
49
+
50
+
51
+ def parse_to_code(text: str, strict=False) -> Optional[str]:
52
+ """Parse to code"""
53
+ code_snippets = extract_code_snippets(text, strict=strict)
54
+ code_snippets = [code_snippet["code"] for code_snippet in code_snippets]
55
+ code_snippets = [code_snippet.strip() for code_snippet in code_snippets if code_snippet.strip()]
56
+ if not code_snippets:
57
+ return None
58
+ code_str = code_snippets[-1]
59
+ return code_str
60
+