pydantic-ai-slim 0.1.2__tar.gz → 0.1.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydantic-ai-slim might be problematic. Click here for more details.
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/PKG-INFO +3 -3
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pydantic_ai/_agent_graph.py +13 -1
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pydantic_ai/agent.py +1 -1
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pydantic_ai/common_tools/duckduckgo.py +0 -2
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pydantic_ai/common_tools/tavily.py +0 -2
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pydantic_ai/messages.py +2 -0
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pydantic_ai/models/__init__.py +2 -0
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pydantic_ai/models/anthropic.py +1 -0
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pydantic_ai/models/bedrock.py +7 -8
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pydantic_ai/models/gemini.py +1 -0
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pydantic_ai/models/groq.py +1 -0
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pydantic_ai/models/instrumented.py +6 -0
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pydantic_ai/models/openai.py +2 -0
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pydantic_ai/settings.py +10 -0
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/.gitignore +0 -0
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/README.md +0 -0
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pydantic_ai/__init__.py +0 -0
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pydantic_ai/__main__.py +0 -0
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pydantic_ai/_cli.py +0 -0
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pydantic_ai/_griffe.py +0 -0
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pydantic_ai/_output.py +0 -0
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pydantic_ai/_parts_manager.py +0 -0
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pydantic_ai/_pydantic.py +0 -0
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pydantic_ai/_system_prompt.py +0 -0
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pydantic_ai/_utils.py +0 -0
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pydantic_ai/common_tools/__init__.py +0 -0
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pydantic_ai/exceptions.py +0 -0
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pydantic_ai/format_as_xml.py +0 -0
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pydantic_ai/format_prompt.py +0 -0
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pydantic_ai/mcp.py +0 -0
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pydantic_ai/models/_json_schema.py +0 -0
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pydantic_ai/models/cohere.py +0 -0
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pydantic_ai/models/fallback.py +0 -0
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pydantic_ai/models/function.py +0 -0
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pydantic_ai/models/mistral.py +0 -0
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pydantic_ai/models/test.py +0 -0
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pydantic_ai/models/wrapper.py +0 -0
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pydantic_ai/providers/__init__.py +0 -0
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pydantic_ai/providers/anthropic.py +0 -0
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pydantic_ai/providers/azure.py +0 -0
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pydantic_ai/providers/bedrock.py +0 -0
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pydantic_ai/providers/cohere.py +0 -0
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pydantic_ai/providers/deepseek.py +0 -0
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pydantic_ai/providers/google_gla.py +0 -0
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pydantic_ai/providers/google_vertex.py +0 -0
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pydantic_ai/providers/groq.py +0 -0
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pydantic_ai/providers/mistral.py +0 -0
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pydantic_ai/providers/openai.py +0 -0
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pydantic_ai/py.typed +0 -0
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pydantic_ai/result.py +0 -0
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pydantic_ai/tools.py +0 -0
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pydantic_ai/usage.py +0 -0
- {pydantic_ai_slim-0.1.2 → pydantic_ai_slim-0.1.3}/pyproject.toml +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydantic-ai-slim
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.3
|
|
4
4
|
Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
|
|
5
5
|
Author-email: Samuel Colvin <samuel@pydantic.dev>
|
|
6
6
|
License-Expression: MIT
|
|
@@ -29,7 +29,7 @@ Requires-Dist: exceptiongroup; python_version < '3.11'
|
|
|
29
29
|
Requires-Dist: griffe>=1.3.2
|
|
30
30
|
Requires-Dist: httpx>=0.27
|
|
31
31
|
Requires-Dist: opentelemetry-api>=1.28.0
|
|
32
|
-
Requires-Dist: pydantic-graph==0.1.
|
|
32
|
+
Requires-Dist: pydantic-graph==0.1.3
|
|
33
33
|
Requires-Dist: pydantic>=2.10
|
|
34
34
|
Requires-Dist: typing-inspection>=0.4.0
|
|
35
35
|
Provides-Extra: anthropic
|
|
@@ -45,7 +45,7 @@ Requires-Dist: cohere>=5.13.11; (platform_system != 'Emscripten') and extra == '
|
|
|
45
45
|
Provides-Extra: duckduckgo
|
|
46
46
|
Requires-Dist: duckduckgo-search>=7.0.0; extra == 'duckduckgo'
|
|
47
47
|
Provides-Extra: evals
|
|
48
|
-
Requires-Dist: pydantic-evals==0.1.
|
|
48
|
+
Requires-Dist: pydantic-evals==0.1.3; extra == 'evals'
|
|
49
49
|
Provides-Extra: groq
|
|
50
50
|
Requires-Dist: groq>=0.15.0; extra == 'groq'
|
|
51
51
|
Provides-Extra: logfire
|
|
@@ -427,6 +427,18 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
|
|
|
427
427
|
# No events are emitted during the handling of text responses, so we don't need to yield anything
|
|
428
428
|
self._next_node = await self._handle_text_response(ctx, texts)
|
|
429
429
|
else:
|
|
430
|
+
# we've got an empty response, this sometimes happens with anthropic (and perhaps other models)
|
|
431
|
+
# when the model has already returned text along side tool calls
|
|
432
|
+
# in this scenario, if text responses are allowed, we return text from the most recent model
|
|
433
|
+
# response, if any
|
|
434
|
+
if allow_text_output(ctx.deps.output_schema):
|
|
435
|
+
for message in reversed(ctx.state.message_history):
|
|
436
|
+
if isinstance(message, _messages.ModelResponse):
|
|
437
|
+
last_texts = [p.content for p in message.parts if isinstance(p, _messages.TextPart)]
|
|
438
|
+
if last_texts:
|
|
439
|
+
self._next_node = await self._handle_text_response(ctx, last_texts)
|
|
440
|
+
return
|
|
441
|
+
|
|
430
442
|
raise exceptions.UnexpectedModelBehavior('Received empty model response')
|
|
431
443
|
|
|
432
444
|
self._events_iterator = _run_stream()
|
|
@@ -530,6 +542,7 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
|
|
|
530
542
|
|
|
531
543
|
text = '\n\n'.join(texts)
|
|
532
544
|
if allow_text_output(output_schema):
|
|
545
|
+
# The following cast is safe because we know `str` is an allowed result type
|
|
533
546
|
result_data_input = cast(NodeRunEndT, text)
|
|
534
547
|
try:
|
|
535
548
|
result_data = await _validate_output(result_data_input, ctx, None)
|
|
@@ -537,7 +550,6 @@ class CallToolsNode(AgentNode[DepsT, NodeRunEndT]):
|
|
|
537
550
|
ctx.state.increment_retries(ctx.deps.max_result_retries)
|
|
538
551
|
return ModelRequestNode[DepsT, NodeRunEndT](_messages.ModelRequest(parts=[e.tool_retry]))
|
|
539
552
|
else:
|
|
540
|
-
# The following cast is safe because we know `str` is an allowed result type
|
|
541
553
|
return self._handle_final_result(ctx, result.FinalResult(result_data, None, None), [])
|
|
542
554
|
else:
|
|
543
555
|
ctx.state.increment_retries(ctx.deps.max_result_retries)
|
|
@@ -659,7 +659,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
659
659
|
start_node,
|
|
660
660
|
state=state,
|
|
661
661
|
deps=graph_deps,
|
|
662
|
-
span=use_span(run_span, end_on_exit=True),
|
|
662
|
+
span=use_span(run_span, end_on_exit=True) if run_span.is_recording() else None,
|
|
663
663
|
infer_name=False,
|
|
664
664
|
) as graph_run:
|
|
665
665
|
yield AgentRun(graph_run)
|
|
@@ -54,8 +54,6 @@ class DuckDuckGoSearchTool:
|
|
|
54
54
|
"""
|
|
55
55
|
search = functools.partial(self.client.text, max_results=self.max_results)
|
|
56
56
|
results = await anyio.to_thread.run_sync(search, query)
|
|
57
|
-
if len(results) == 0:
|
|
58
|
-
raise RuntimeError('No search results found.')
|
|
59
57
|
return duckduckgo_ta.validate_python(results)
|
|
60
58
|
|
|
61
59
|
|
|
@@ -63,8 +63,6 @@ class TavilySearchTool:
|
|
|
63
63
|
The search results.
|
|
64
64
|
"""
|
|
65
65
|
results = await self.client.search(query, search_depth=search_deep, topic=topic, time_range=time_range) # type: ignore[reportUnknownMemberType]
|
|
66
|
-
if not results['results']:
|
|
67
|
-
raise RuntimeError('No search results found.')
|
|
68
66
|
return tavily_search_ta.validate_python(results['results'])
|
|
69
67
|
|
|
70
68
|
|
|
@@ -508,6 +508,8 @@ class ToolCallPart:
|
|
|
508
508
|
"""
|
|
509
509
|
if isinstance(self.args, dict):
|
|
510
510
|
return self.args
|
|
511
|
+
if isinstance(self.args, str) and not self.args:
|
|
512
|
+
return {}
|
|
511
513
|
args = pydantic_core.from_json(self.args)
|
|
512
514
|
assert isinstance(args, dict), 'args should be a dict'
|
|
513
515
|
return cast(dict[str, Any], args)
|
|
@@ -106,6 +106,7 @@ KnownModelName = TypeAliasType(
|
|
|
106
106
|
'google-gla:gemini-2.0-flash',
|
|
107
107
|
'google-gla:gemini-2.0-flash-lite-preview-02-05',
|
|
108
108
|
'google-gla:gemini-2.0-pro-exp-02-05',
|
|
109
|
+
'google-gla:gemini-2.5-flash-preview-04-17',
|
|
109
110
|
'google-gla:gemini-2.5-pro-exp-03-25',
|
|
110
111
|
'google-gla:gemini-2.5-pro-preview-03-25',
|
|
111
112
|
'google-vertex:gemini-1.0-pro',
|
|
@@ -118,6 +119,7 @@ KnownModelName = TypeAliasType(
|
|
|
118
119
|
'google-vertex:gemini-2.0-flash',
|
|
119
120
|
'google-vertex:gemini-2.0-flash-lite-preview-02-05',
|
|
120
121
|
'google-vertex:gemini-2.0-pro-exp-02-05',
|
|
122
|
+
'google-vertex:gemini-2.5-flash-preview-04-17',
|
|
121
123
|
'google-vertex:gemini-2.5-pro-exp-03-25',
|
|
122
124
|
'google-vertex:gemini-2.5-pro-preview-03-25',
|
|
123
125
|
'gpt-3.5-turbo',
|
|
@@ -239,6 +239,7 @@ class AnthropicModel(Model):
|
|
|
239
239
|
timeout=model_settings.get('timeout', NOT_GIVEN),
|
|
240
240
|
metadata=model_settings.get('anthropic_metadata', NOT_GIVEN),
|
|
241
241
|
extra_headers={'User-Agent': get_user_agent()},
|
|
242
|
+
extra_body=model_settings.get('extra_body'),
|
|
242
243
|
)
|
|
243
244
|
except APIStatusError as e:
|
|
244
245
|
if (status_code := e.status_code) >= 400:
|
|
@@ -2,10 +2,11 @@ from __future__ import annotations
|
|
|
2
2
|
|
|
3
3
|
import functools
|
|
4
4
|
import typing
|
|
5
|
-
from collections.abc import AsyncIterator, Iterable, Mapping
|
|
5
|
+
from collections.abc import AsyncIterator, Iterable, Iterator, Mapping
|
|
6
6
|
from contextlib import asynccontextmanager
|
|
7
7
|
from dataclasses import dataclass, field
|
|
8
8
|
from datetime import datetime
|
|
9
|
+
from itertools import count
|
|
9
10
|
from typing import TYPE_CHECKING, Any, Generic, Literal, Union, cast, overload
|
|
10
11
|
|
|
11
12
|
import anyio
|
|
@@ -369,13 +370,14 @@ class BedrockConverseModel(Model):
|
|
|
369
370
|
"""Just maps a `pydantic_ai.Message` to the Bedrock `MessageUnionTypeDef`."""
|
|
370
371
|
system_prompt: list[SystemContentBlockTypeDef] = []
|
|
371
372
|
bedrock_messages: list[MessageUnionTypeDef] = []
|
|
373
|
+
document_count: Iterator[int] = count(1)
|
|
372
374
|
for m in messages:
|
|
373
375
|
if isinstance(m, ModelRequest):
|
|
374
376
|
for part in m.parts:
|
|
375
377
|
if isinstance(part, SystemPromptPart):
|
|
376
378
|
system_prompt.append({'text': part.content})
|
|
377
379
|
elif isinstance(part, UserPromptPart):
|
|
378
|
-
bedrock_messages.extend(await self._map_user_prompt(part))
|
|
380
|
+
bedrock_messages.extend(await self._map_user_prompt(part, document_count))
|
|
379
381
|
elif isinstance(part, ToolReturnPart):
|
|
380
382
|
assert part.tool_call_id is not None
|
|
381
383
|
bedrock_messages.append(
|
|
@@ -430,20 +432,18 @@ class BedrockConverseModel(Model):
|
|
|
430
432
|
return system_prompt, bedrock_messages
|
|
431
433
|
|
|
432
434
|
@staticmethod
|
|
433
|
-
async def _map_user_prompt(part: UserPromptPart) -> list[MessageUnionTypeDef]:
|
|
435
|
+
async def _map_user_prompt(part: UserPromptPart, document_count: Iterator[int]) -> list[MessageUnionTypeDef]:
|
|
434
436
|
content: list[ContentBlockUnionTypeDef] = []
|
|
435
437
|
if isinstance(part.content, str):
|
|
436
438
|
content.append({'text': part.content})
|
|
437
439
|
else:
|
|
438
|
-
document_count = 0
|
|
439
440
|
for item in part.content:
|
|
440
441
|
if isinstance(item, str):
|
|
441
442
|
content.append({'text': item})
|
|
442
443
|
elif isinstance(item, BinaryContent):
|
|
443
444
|
format = item.format
|
|
444
445
|
if item.is_document:
|
|
445
|
-
|
|
446
|
-
name = f'Document {document_count}'
|
|
446
|
+
name = f'Document {next(document_count)}'
|
|
447
447
|
assert format in ('pdf', 'txt', 'csv', 'doc', 'docx', 'xls', 'xlsx', 'html', 'md')
|
|
448
448
|
content.append({'document': {'name': name, 'format': format, 'source': {'bytes': item.data}}})
|
|
449
449
|
elif item.is_image:
|
|
@@ -464,8 +464,7 @@ class BedrockConverseModel(Model):
|
|
|
464
464
|
content.append({'image': image})
|
|
465
465
|
|
|
466
466
|
elif item.kind == 'document-url':
|
|
467
|
-
|
|
468
|
-
name = f'Document {document_count}'
|
|
467
|
+
name = f'Document {next(document_count)}'
|
|
469
468
|
data = response.content
|
|
470
469
|
content.append({'document': {'name': name, 'format': item.format, 'source': {'bytes': data}}})
|
|
471
470
|
|
|
@@ -218,6 +218,7 @@ class GroqModel(Model):
|
|
|
218
218
|
frequency_penalty=model_settings.get('frequency_penalty', NOT_GIVEN),
|
|
219
219
|
logit_bias=model_settings.get('logit_bias', NOT_GIVEN),
|
|
220
220
|
extra_headers={'User-Agent': get_user_agent()},
|
|
221
|
+
extra_body=model_settings.get('extra_body'),
|
|
221
222
|
)
|
|
222
223
|
except APIStatusError as e:
|
|
223
224
|
if (status_code := e.status_code) >= 400:
|
|
@@ -261,9 +261,11 @@ class InstrumentedModel(WrapperModel):
|
|
|
261
261
|
@staticmethod
|
|
262
262
|
def messages_to_otel_events(messages: list[ModelMessage]) -> list[Event]:
|
|
263
263
|
events: list[Event] = []
|
|
264
|
+
last_model_request: ModelRequest | None = None
|
|
264
265
|
for message_index, message in enumerate(messages):
|
|
265
266
|
message_events: list[Event] = []
|
|
266
267
|
if isinstance(message, ModelRequest):
|
|
268
|
+
last_model_request = message
|
|
267
269
|
for part in message.parts:
|
|
268
270
|
if hasattr(part, 'otel_event'):
|
|
269
271
|
message_events.append(part.otel_event())
|
|
@@ -275,6 +277,10 @@ class InstrumentedModel(WrapperModel):
|
|
|
275
277
|
**(event.attributes or {}),
|
|
276
278
|
}
|
|
277
279
|
events.extend(message_events)
|
|
280
|
+
if last_model_request and last_model_request.instructions:
|
|
281
|
+
events.insert(
|
|
282
|
+
0, Event('gen_ai.system.message', body={'content': last_model_request.instructions, 'role': 'system'})
|
|
283
|
+
)
|
|
278
284
|
for event in events:
|
|
279
285
|
event.body = InstrumentedModel.serialize_any(event.body)
|
|
280
286
|
return events
|
|
@@ -284,6 +284,7 @@ class OpenAIModel(Model):
|
|
|
284
284
|
reasoning_effort=model_settings.get('openai_reasoning_effort', NOT_GIVEN),
|
|
285
285
|
user=model_settings.get('openai_user', NOT_GIVEN),
|
|
286
286
|
extra_headers={'User-Agent': get_user_agent()},
|
|
287
|
+
extra_body=model_settings.get('extra_body'),
|
|
287
288
|
)
|
|
288
289
|
except APIStatusError as e:
|
|
289
290
|
if (status_code := e.status_code) >= 400:
|
|
@@ -623,6 +624,7 @@ class OpenAIResponsesModel(Model):
|
|
|
623
624
|
reasoning=reasoning,
|
|
624
625
|
user=model_settings.get('openai_user', NOT_GIVEN),
|
|
625
626
|
extra_headers={'User-Agent': get_user_agent()},
|
|
627
|
+
extra_body=model_settings.get('extra_body'),
|
|
626
628
|
)
|
|
627
629
|
except APIStatusError as e:
|
|
628
630
|
if (status_code := e.status_code) >= 400:
|
|
@@ -141,6 +141,16 @@ class ModelSettings(TypedDict, total=False):
|
|
|
141
141
|
* Cohere
|
|
142
142
|
"""
|
|
143
143
|
|
|
144
|
+
extra_body: object
|
|
145
|
+
"""Extra body to send to the model.
|
|
146
|
+
|
|
147
|
+
Supported by:
|
|
148
|
+
|
|
149
|
+
* OpenAI
|
|
150
|
+
* Anthropic
|
|
151
|
+
* Groq
|
|
152
|
+
"""
|
|
153
|
+
|
|
144
154
|
|
|
145
155
|
def merge_model_settings(base: ModelSettings | None, overrides: ModelSettings | None) -> ModelSettings | None:
|
|
146
156
|
"""Merge two sets of model settings, preferring the overrides.
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|