pydantic-ai-slim 0.4.4__tar.gz → 0.4.5__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pydantic-ai-slim might be problematic. Click here for more details.
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/PKG-INFO +3 -3
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/_a2a.py +3 -3
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/agent.py +9 -8
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/format_prompt.py +3 -6
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/models/gemini.py +0 -9
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/models/instrumented.py +6 -1
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/models/openai.py +13 -1
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/.gitignore +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/LICENSE +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/README.md +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/__init__.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/__main__.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/_agent_graph.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/_cli.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/_function_schema.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/_griffe.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/_mcp.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/_output.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/_parts_manager.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/_run_context.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/_system_prompt.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/_thinking_part.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/_tool_manager.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/_utils.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/ag_ui.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/common_tools/__init__.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/common_tools/duckduckgo.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/common_tools/tavily.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/direct.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/exceptions.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/ext/__init__.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/ext/aci.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/ext/langchain.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/format_as_xml.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/mcp.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/messages.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/models/__init__.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/models/anthropic.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/models/bedrock.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/models/cohere.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/models/fallback.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/models/function.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/models/google.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/models/groq.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/models/huggingface.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/models/mcp_sampling.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/models/mistral.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/models/test.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/models/wrapper.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/output.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/profiles/__init__.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/profiles/_json_schema.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/profiles/amazon.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/profiles/anthropic.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/profiles/cohere.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/profiles/deepseek.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/profiles/google.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/profiles/grok.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/profiles/meta.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/profiles/mistral.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/profiles/moonshotai.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/profiles/openai.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/profiles/qwen.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/providers/__init__.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/providers/anthropic.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/providers/azure.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/providers/bedrock.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/providers/cohere.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/providers/deepseek.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/providers/fireworks.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/providers/github.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/providers/google.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/providers/google_gla.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/providers/google_vertex.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/providers/grok.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/providers/groq.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/providers/heroku.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/providers/huggingface.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/providers/mistral.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/providers/openai.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/providers/openrouter.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/providers/together.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/py.typed +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/result.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/settings.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/tools.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/toolsets/__init__.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/toolsets/abstract.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/toolsets/combined.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/toolsets/deferred.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/toolsets/filtered.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/toolsets/function.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/toolsets/prefixed.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/toolsets/prepared.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/toolsets/renamed.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/toolsets/wrapper.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pydantic_ai/usage.py +0 -0
- {pydantic_ai_slim-0.4.4 → pydantic_ai_slim-0.4.5}/pyproject.toml +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydantic-ai-slim
|
|
3
|
-
Version: 0.4.
|
|
3
|
+
Version: 0.4.5
|
|
4
4
|
Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
|
|
5
5
|
Author-email: Samuel Colvin <samuel@pydantic.dev>, Marcelo Trylesinski <marcelotryle@gmail.com>, David Montague <david@pydantic.dev>, Alex Hall <alex@pydantic.dev>, Douwe Maan <douwe@pydantic.dev>
|
|
6
6
|
License-Expression: MIT
|
|
@@ -30,7 +30,7 @@ Requires-Dist: exceptiongroup; python_version < '3.11'
|
|
|
30
30
|
Requires-Dist: griffe>=1.3.2
|
|
31
31
|
Requires-Dist: httpx>=0.27
|
|
32
32
|
Requires-Dist: opentelemetry-api>=1.28.0
|
|
33
|
-
Requires-Dist: pydantic-graph==0.4.
|
|
33
|
+
Requires-Dist: pydantic-graph==0.4.5
|
|
34
34
|
Requires-Dist: pydantic>=2.10
|
|
35
35
|
Requires-Dist: typing-inspection>=0.4.0
|
|
36
36
|
Provides-Extra: a2a
|
|
@@ -51,7 +51,7 @@ Requires-Dist: cohere>=5.13.11; (platform_system != 'Emscripten') and extra == '
|
|
|
51
51
|
Provides-Extra: duckduckgo
|
|
52
52
|
Requires-Dist: ddgs>=9.0.0; extra == 'duckduckgo'
|
|
53
53
|
Provides-Extra: evals
|
|
54
|
-
Requires-Dist: pydantic-evals==0.4.
|
|
54
|
+
Requires-Dist: pydantic-evals==0.4.5; extra == 'evals'
|
|
55
55
|
Provides-Extra: google
|
|
56
56
|
Requires-Dist: google-genai>=1.24.0; extra == 'google'
|
|
57
57
|
Provides-Extra: groq
|
|
@@ -59,12 +59,12 @@ except ImportError as _import_error:
|
|
|
59
59
|
|
|
60
60
|
|
|
61
61
|
@asynccontextmanager
|
|
62
|
-
async def worker_lifespan(app: FastA2A, worker: Worker) -> AsyncIterator[None]:
|
|
62
|
+
async def worker_lifespan(app: FastA2A, worker: Worker, agent: Agent[AgentDepsT, OutputDataT]) -> AsyncIterator[None]:
|
|
63
63
|
"""Custom lifespan that runs the worker during application startup.
|
|
64
64
|
|
|
65
65
|
This ensures the worker is started and ready to process tasks as soon as the application starts.
|
|
66
66
|
"""
|
|
67
|
-
async with app.task_manager:
|
|
67
|
+
async with app.task_manager, agent:
|
|
68
68
|
async with worker.run():
|
|
69
69
|
yield
|
|
70
70
|
|
|
@@ -93,7 +93,7 @@ def agent_to_a2a(
|
|
|
93
93
|
broker = broker or InMemoryBroker()
|
|
94
94
|
worker = AgentWorker(agent=agent, broker=broker, storage=storage)
|
|
95
95
|
|
|
96
|
-
lifespan = lifespan or partial(worker_lifespan, worker=worker)
|
|
96
|
+
lifespan = lifespan or partial(worker_lifespan, worker=worker, agent=agent)
|
|
97
97
|
|
|
98
98
|
return FastA2A(
|
|
99
99
|
storage=storage,
|
|
@@ -843,14 +843,15 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
|
|
|
843
843
|
agent_run = AgentRun(graph_run)
|
|
844
844
|
yield agent_run
|
|
845
845
|
if (final_result := agent_run.result) is not None and run_span.is_recording():
|
|
846
|
-
|
|
847
|
-
|
|
848
|
-
|
|
849
|
-
|
|
850
|
-
|
|
851
|
-
|
|
852
|
-
|
|
853
|
-
|
|
846
|
+
if instrumentation_settings and instrumentation_settings.include_content:
|
|
847
|
+
run_span.set_attribute(
|
|
848
|
+
'final_result',
|
|
849
|
+
(
|
|
850
|
+
final_result.output
|
|
851
|
+
if isinstance(final_result.output, str)
|
|
852
|
+
else json.dumps(InstrumentedModel.serialize_any(final_result.output))
|
|
853
|
+
),
|
|
854
|
+
)
|
|
854
855
|
finally:
|
|
855
856
|
try:
|
|
856
857
|
if instrumentation_settings and run_span.is_recording():
|
|
@@ -13,9 +13,8 @@ __all__ = ('format_as_xml',)
|
|
|
13
13
|
|
|
14
14
|
def format_as_xml(
|
|
15
15
|
obj: Any,
|
|
16
|
-
root_tag: str =
|
|
17
|
-
item_tag: str = '
|
|
18
|
-
include_root_tag: bool = True,
|
|
16
|
+
root_tag: str | None = None,
|
|
17
|
+
item_tag: str = 'item',
|
|
19
18
|
none_str: str = 'null',
|
|
20
19
|
indent: str | None = ' ',
|
|
21
20
|
) -> str:
|
|
@@ -32,8 +31,6 @@ def format_as_xml(
|
|
|
32
31
|
root_tag: Outer tag to wrap the XML in, use `None` to omit the outer tag.
|
|
33
32
|
item_tag: Tag to use for each item in an iterable (e.g. list), this is overridden by the class name
|
|
34
33
|
for dataclasses and Pydantic models.
|
|
35
|
-
include_root_tag: Whether to include the root tag in the output
|
|
36
|
-
(The root tag is always included if it includes a body - e.g. when the input is a simple value).
|
|
37
34
|
none_str: String to use for `None` values.
|
|
38
35
|
indent: Indentation string to use for pretty printing.
|
|
39
36
|
|
|
@@ -55,7 +52,7 @@ def format_as_xml(
|
|
|
55
52
|
```
|
|
56
53
|
"""
|
|
57
54
|
el = _ToXml(item_tag=item_tag, none_str=none_str).to_xml(obj, root_tag)
|
|
58
|
-
if
|
|
55
|
+
if root_tag is None and el.text is None:
|
|
59
56
|
join = '' if indent is None else '\n'
|
|
60
57
|
return join.join(_rootless_xml_elements(el, indent))
|
|
61
58
|
else:
|
|
@@ -91,15 +91,6 @@ class GeminiModelSettings(ModelSettings, total=False):
|
|
|
91
91
|
See the [Gemini API docs](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/add-labels-to-api-calls) for use cases and limitations.
|
|
92
92
|
"""
|
|
93
93
|
|
|
94
|
-
gemini_thinking_config: ThinkingConfig
|
|
95
|
-
"""Thinking is on by default in both the API and AI Studio.
|
|
96
|
-
|
|
97
|
-
Being on by default doesn't mean the model will send back thoughts. For that, you need to set `include_thoughts`
|
|
98
|
-
to `True`. If you want to turn it off, set `thinking_budget` to `0`.
|
|
99
|
-
|
|
100
|
-
See more about it on <https://ai.google.dev/gemini-api/docs/thinking>.
|
|
101
|
-
"""
|
|
102
|
-
|
|
103
94
|
|
|
104
95
|
@dataclass(init=False)
|
|
105
96
|
class GeminiModel(Model):
|
|
@@ -156,7 +156,12 @@ class InstrumentationSettings:
|
|
|
156
156
|
events: list[Event] = []
|
|
157
157
|
instructions = InstrumentedModel._get_instructions(messages) # pyright: ignore [reportPrivateUsage]
|
|
158
158
|
if instructions is not None:
|
|
159
|
-
events.append(
|
|
159
|
+
events.append(
|
|
160
|
+
Event(
|
|
161
|
+
'gen_ai.system.message',
|
|
162
|
+
body={**({'content': instructions} if self.include_content else {}), 'role': 'system'},
|
|
163
|
+
)
|
|
164
|
+
)
|
|
160
165
|
|
|
161
166
|
for message_index, message in enumerate(messages):
|
|
162
167
|
message_events: list[Event] = []
|
|
@@ -8,6 +8,7 @@ from dataclasses import dataclass, field
|
|
|
8
8
|
from datetime import datetime
|
|
9
9
|
from typing import Any, Literal, Union, cast, overload
|
|
10
10
|
|
|
11
|
+
from pydantic import ValidationError
|
|
11
12
|
from typing_extensions import assert_never
|
|
12
13
|
|
|
13
14
|
from pydantic_ai._thinking_part import split_content_into_text_and_thinking
|
|
@@ -347,8 +348,19 @@ class OpenAIModel(Model):
|
|
|
347
348
|
raise ModelHTTPError(status_code=status_code, model_name=self.model_name, body=e.body) from e
|
|
348
349
|
raise # pragma: no cover
|
|
349
350
|
|
|
350
|
-
def _process_response(self, response: chat.ChatCompletion) -> ModelResponse:
|
|
351
|
+
def _process_response(self, response: chat.ChatCompletion | str) -> ModelResponse:
|
|
351
352
|
"""Process a non-streamed response, and prepare a message to return."""
|
|
353
|
+
# Although the OpenAI SDK claims to return a Pydantic model (`ChatCompletion`) from the chat completions function:
|
|
354
|
+
# * it hasn't actually performed validation (presumably they're creating the model with `model_construct` or something?!)
|
|
355
|
+
# * if the endpoint returns plain text, the return type is a string
|
|
356
|
+
# Thus we validate it fully here.
|
|
357
|
+
if not isinstance(response, chat.ChatCompletion):
|
|
358
|
+
raise UnexpectedModelBehavior('Invalid response from OpenAI chat completions endpoint, expected JSON data')
|
|
359
|
+
|
|
360
|
+
try:
|
|
361
|
+
response = chat.ChatCompletion.model_validate(response.model_dump())
|
|
362
|
+
except ValidationError as e:
|
|
363
|
+
raise UnexpectedModelBehavior(f'Invalid response from OpenAI chat completions endpoint: {e}') from e
|
|
352
364
|
timestamp = number_to_datetime(response.created)
|
|
353
365
|
choice = response.choices[0]
|
|
354
366
|
items: list[ModelResponsePart] = []
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|