opencode-llmstack 0.7.2__py3-none-any.whl → 0.7.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llmstack/models.ini +140 -78
- {opencode_llmstack-0.7.2.dist-info → opencode_llmstack-0.7.3.dist-info}/METADATA +1 -1
- {opencode_llmstack-0.7.2.dist-info → opencode_llmstack-0.7.3.dist-info}/RECORD +6 -6
- {opencode_llmstack-0.7.2.dist-info → opencode_llmstack-0.7.3.dist-info}/WHEEL +0 -0
- {opencode_llmstack-0.7.2.dist-info → opencode_llmstack-0.7.3.dist-info}/entry_points.txt +0 -0
- {opencode_llmstack-0.7.2.dist-info → opencode_llmstack-0.7.3.dist-info}/top_level.txt +0 -0
llmstack/models.ini
CHANGED
|
@@ -51,7 +51,6 @@
|
|
|
51
51
|
[DEFAULT]
|
|
52
52
|
host = 127.0.0.1
|
|
53
53
|
router_port = 10101 ; FastAPI auto-router (what opencode hits)
|
|
54
|
-
swap_port = 10102 ; llama-swap manager UI + raw model endpoints
|
|
55
54
|
n_gpu_layers = 999 ; offload everything to Metal on Apple Silicon
|
|
56
55
|
flash_attn = on
|
|
57
56
|
jinja = true
|
|
@@ -72,11 +71,26 @@ ctx_size = 131072 ; native 32k extended via YaRN (factor 4)
|
|
|
72
71
|
rope_scaling = yarn (scale=4, orig_ctx=32768)
|
|
73
72
|
size_gb = 2.5
|
|
74
73
|
quant = Q5_K_M
|
|
75
|
-
status = downloading ; queued by `llmstack.sh download`
|
|
76
|
-
opencode_use = small_model + auto-fast tier
|
|
77
74
|
sampler = temp=0.2, top_p=0.95, top_k=40, min_p=0.05 ; deterministic
|
|
78
75
|
description = Qwen2.5-Coder 3B - autocomplete / FIM / quick Q&A
|
|
79
76
|
|
|
77
|
+
; Bedrock alternative for code-fast -- comment out the [code-fast] block above
|
|
78
|
+
; and uncomment the block below to swap to a hosted fast tier (Claude Haiku
|
|
79
|
+
; 4.5: cheapest + fastest Anthropic model with tool calling, sub-second TTFT).
|
|
80
|
+
; See "BEDROCK NOTES" at the bottom of this file for profile / sampler /
|
|
81
|
+
; access-form details.
|
|
82
|
+
;
|
|
83
|
+
; [code-fast]
|
|
84
|
+
; tier = code
|
|
85
|
+
; role = fast
|
|
86
|
+
; backend = bedrock
|
|
87
|
+
; aws_model_id = eu.anthropic.claude-haiku-4-5-20251001-v1:0
|
|
88
|
+
; aws_region = eu-central-1
|
|
89
|
+
; aws_profile = bedrock-prod
|
|
90
|
+
; ctx_size = 200000
|
|
91
|
+
; sampler = temp=0.2 ; deterministic; Haiku 4.5 accepts ONE of temp / top_p
|
|
92
|
+
; description = Claude Haiku 4.5 on Bedrock - hosted fast tier for autocomplete / FIM / quick Q&A
|
|
93
|
+
|
|
80
94
|
[code-smart]
|
|
81
95
|
tier = code
|
|
82
96
|
role = agent
|
|
@@ -88,11 +102,25 @@ size_gb = 45
|
|
|
88
102
|
size_gb_next = 50
|
|
89
103
|
quant = Q4_K_M
|
|
90
104
|
quant_next = UD-Q4_K_XL
|
|
91
|
-
status = ready (Q4_K_M); UD-Q4_K_XL queued
|
|
92
|
-
opencode_use = agent.build + auto-agent tier
|
|
93
105
|
sampler = temp=0.5, top_p=0.85, top_k=20, min_p=0.05, rep_pen=1.05 ; balanced agent
|
|
94
106
|
description = Qwen3-Coder-Next 80B-A3B MoE - heavy coder for agent loops
|
|
95
107
|
|
|
108
|
+
; Bedrock alternative for code-smart -- comment out the [code-smart] block
|
|
109
|
+
; above and uncomment the block below to swap to a hosted heavy coder
|
|
110
|
+
; (Claude Sonnet 4.6: agent-loop workhorse, heavy tool calling, multi-file
|
|
111
|
+
; edits). See "BEDROCK NOTES" at the bottom of this file.
|
|
112
|
+
;
|
|
113
|
+
; [code-smart]
|
|
114
|
+
; tier = code
|
|
115
|
+
; role = agent
|
|
116
|
+
; backend = bedrock
|
|
117
|
+
; aws_model_id = eu.anthropic.claude-sonnet-4-6
|
|
118
|
+
; aws_region = eu-central-1
|
|
119
|
+
; aws_profile = bedrock-prod
|
|
120
|
+
; ctx_size = 200000
|
|
121
|
+
; sampler = temp=0.5 ; Sonnet 4.6 accepts ONE of temp / top_p; pick `temp` for agent work
|
|
122
|
+
; description = Claude Sonnet 4.6 on Bedrock - heavy coder for agent loops
|
|
123
|
+
|
|
96
124
|
; Top-tier hosted coder. Shipped disabled because it requires boto3 +
|
|
97
125
|
; AWS Bedrock access. `llmstack install` auto-uncomments the block
|
|
98
126
|
; below (by stripping the leading "; " from each line and dropping
|
|
@@ -107,10 +135,9 @@ description = Qwen3-Coder-Next 80B-A3B MoE - heavy coder for agent loops
|
|
|
107
135
|
; role = ultra
|
|
108
136
|
; backend = bedrock
|
|
109
137
|
; aws_model_id = global.anthropic.claude-opus-4-7 ; global.* cross-region inference profile
|
|
110
|
-
; aws_region =
|
|
111
|
-
; aws_profile
|
|
138
|
+
; aws_region = eu-central-1 ; API anchor region; global.* auto-routes inference cross-region (set EU as the anchor for residency)
|
|
139
|
+
; aws_profile = bedrock-prod ; conventional profile name; configure once with `aws configure --profile bedrock-prod` (or change to your own and run `llmstack install`)
|
|
112
140
|
; ctx_size = 200000
|
|
113
|
-
; opencode_use = on-demand top-tier coder for hard agent tasks
|
|
114
141
|
; ; NB: no `sampler =` line. Claude Opus 4.7 explicitly rejects all
|
|
115
142
|
; ; sampler params (temperature, top_p, top_k) -- per the Bedrock
|
|
116
143
|
; ; model card, "the recommended migration path is to omit these
|
|
@@ -134,11 +161,26 @@ size_gb = 9.2
|
|
|
134
161
|
size_gb_next = 12.1
|
|
135
162
|
quant = Q4_K_M
|
|
136
163
|
quant_next = Q6_K
|
|
137
|
-
status = ready (Q4_K_M); Q6_K queued
|
|
138
|
-
opencode_use = agent.plan + auto-plan tier
|
|
139
164
|
sampler = temp=0.7, top_p=0.9, top_k=40, min_p=0.05 ; creative thinking
|
|
140
165
|
description = Qwopus GLM 18B - planning, design discussions, architecture
|
|
141
166
|
|
|
167
|
+
; Bedrock alternative for plan -- comment out the [plan] block above and
|
|
168
|
+
; uncomment the block below to swap to a hosted planner (Claude Opus 4.6:
|
|
169
|
+
; deep reasoning for design discussions and architecture). Opus 4.6 still
|
|
170
|
+
; accepts both temperature and top_p (unlike 4.7), so the local sampler
|
|
171
|
+
; maps over cleanly. See "BEDROCK NOTES" at the bottom of this file.
|
|
172
|
+
;
|
|
173
|
+
; [plan]
|
|
174
|
+
; tier = chat
|
|
175
|
+
; role = plan
|
|
176
|
+
; backend = bedrock
|
|
177
|
+
; aws_model_id = eu.anthropic.claude-opus-4-6-v1
|
|
178
|
+
; aws_region = eu-central-1
|
|
179
|
+
; aws_profile = bedrock-prod
|
|
180
|
+
; ctx_size = 200000
|
|
181
|
+
; sampler = temp=0.7, top_p=0.9 ; creative; Opus 4.6 accepts both
|
|
182
|
+
; description = Claude Opus 4.6 on Bedrock - planning, design discussions, architecture
|
|
183
|
+
|
|
142
184
|
[plan-uncensored]
|
|
143
185
|
tier = chat
|
|
144
186
|
role = plan-uncensored
|
|
@@ -150,11 +192,51 @@ size_gb = 13
|
|
|
150
192
|
size_gb_next = 20
|
|
151
193
|
quant = i1-Q4_K_M
|
|
152
194
|
quant_next = i1-Q6_K
|
|
153
|
-
status = ready (i1-Q4_K_M); i1-Q6_K queued
|
|
154
|
-
opencode_use = agent.plan-nofilter + auto via [nofilter] trigger
|
|
155
195
|
sampler = temp=0.85, top_p=0.95, top_k=50, min_p=0.05 ; max exploration
|
|
156
196
|
description = Mistral-Small 3.2 24B Heretic - no-filter planning
|
|
157
197
|
|
|
198
|
+
; Bedrock alternative for plan-uncensored -- comment out the [plan-uncensored]
|
|
199
|
+
; block above and uncomment ONE of the blocks below. Anthropic models on
|
|
200
|
+
; Bedrock are filtered, so for the uncensored slot we pick the largest
|
|
201
|
+
; open-weights model on Bedrock: Llama 3.1 405B has minimal safety post-
|
|
202
|
+
; training and matches the spirit of the local Heretic tier. NOTE: Meta
|
|
203
|
+
; models do NOT require the AWS use-case form, so this swap unblocks
|
|
204
|
+
; plan-uncensored on a fresh AWS account.
|
|
205
|
+
;
|
|
206
|
+
; REGION CAVEAT: unlike the other tiers above, Llama 3.1 405B has NO
|
|
207
|
+
; cross-region inference profile (no eu.* / global.*) and is only
|
|
208
|
+
; deployed in US regions. Pin to us-west-2 even when the rest of the
|
|
209
|
+
; stack is anchored in eu-central-1. If EU residency is mandatory for
|
|
210
|
+
; this tier, switch to one of the eu.anthropic.* IDs at the cost of
|
|
211
|
+
; losing the "uncensored" property. See "BEDROCK NOTES" at the bottom
|
|
212
|
+
; of this file.
|
|
213
|
+
;
|
|
214
|
+
; [plan-uncensored]
|
|
215
|
+
; tier = chat
|
|
216
|
+
; role = plan-uncensored
|
|
217
|
+
; backend = bedrock
|
|
218
|
+
; aws_model_id = meta.llama3-1-405b-instruct-v1:0
|
|
219
|
+
; aws_region = us-west-2 ; Llama 405B has no EU deployment; keep on US
|
|
220
|
+
; aws_profile = bedrock-prod
|
|
221
|
+
; ctx_size = 128000
|
|
222
|
+
; sampler = temp=0.85, top_p=0.95 ; max exploration
|
|
223
|
+
; description = Llama 3.1 405B on Bedrock - no-filter planning
|
|
224
|
+
;
|
|
225
|
+
; ...or, if your org locks Bedrock access to a VPC endpoint, use this
|
|
226
|
+
; variant instead (same model + sampler, with aws_endpoint_url set):
|
|
227
|
+
;
|
|
228
|
+
; [plan-uncensored]
|
|
229
|
+
; tier = chat
|
|
230
|
+
; role = plan-uncensored
|
|
231
|
+
; backend = bedrock
|
|
232
|
+
; aws_model_id = meta.llama3-1-405b-instruct-v1:0
|
|
233
|
+
; aws_region = us-west-2 ; Llama 405B has no EU deployment
|
|
234
|
+
; aws_profile = bedrock-prod
|
|
235
|
+
; aws_endpoint_url = https://bedrock-runtime.us-west-2.vpce.amazonaws.com
|
|
236
|
+
; ctx_size = 128000
|
|
237
|
+
; sampler = temp=0.85, top_p=0.95
|
|
238
|
+
; description = Llama 3.1 405B on Bedrock (VPC) - no-filter planning
|
|
239
|
+
|
|
158
240
|
;------------------------------------------------------------------------------
|
|
159
241
|
[ROUTING]
|
|
160
242
|
; STEP-DOWN ladder: start at the top of the fidelity ladder for short
|
|
@@ -203,30 +285,46 @@ uncensored_triggers = [nofilter], [uncensored], [heretic], "uncensored:", "nof
|
|
|
203
285
|
ultra_triggers = [ultra], [opus], "ultra:", "opus:" (line start)
|
|
204
286
|
|
|
205
287
|
;------------------------------------------------------------------------------
|
|
206
|
-
; BEDROCK
|
|
288
|
+
; BEDROCK NOTES (referenced by the commented-out alternatives above)
|
|
207
289
|
;------------------------------------------------------------------------------
|
|
208
|
-
;
|
|
209
|
-
;
|
|
210
|
-
;
|
|
211
|
-
;
|
|
212
|
-
;
|
|
290
|
+
; Each tier section above carries a "Bedrock alternative for <tier>" block
|
|
291
|
+
; directly underneath it (commented out by default). To swap a tier:
|
|
292
|
+
;
|
|
293
|
+
; 1. comment out the active local section (GGUF by default);
|
|
294
|
+
; 2. uncomment the Bedrock-alternative block beneath it;
|
|
295
|
+
; 3. run `llmstack install` (and `llmstack restart` if the tier was
|
|
296
|
+
; already loaded -- bedrock creds aren't picked up live).
|
|
213
297
|
;
|
|
214
|
-
;
|
|
215
|
-
;
|
|
298
|
+
; The router auto-detects backend=bedrock from `aws_model_id`, but every
|
|
299
|
+
; alternative block also sets `backend = bedrock` explicitly so the intent
|
|
300
|
+
; is obvious. llama-swap won't load bedrock tiers; the router calls
|
|
301
|
+
; Bedrock directly via boto3 (`pip install 'llmstack[bedrock]'`).
|
|
302
|
+
;
|
|
303
|
+
; PROFILE: every alternative uses `aws_profile = bedrock-prod`, the
|
|
304
|
+
; conventional profile name for this stack. The actual keys / SSO /
|
|
305
|
+
; role chaining live in the standard AWS config files (this file ONLY
|
|
306
|
+
; names a profile -- never put credentials here). One-time setup:
|
|
216
307
|
;
|
|
217
308
|
; aws configure --profile bedrock-prod
|
|
218
|
-
; #
|
|
219
|
-
; #
|
|
309
|
+
; # SSO: aws configure sso --profile bedrock-prod
|
|
310
|
+
; # role chaining: edit ~/.aws/config and add:
|
|
311
|
+
; # [profile bedrock-prod]
|
|
220
312
|
; # role_arn = arn:aws:iam::123456789012:role/llmstack-bedrock
|
|
221
|
-
; # source_profile = bedrock-prod
|
|
313
|
+
; # source_profile = bedrock-prod-base
|
|
314
|
+
;
|
|
315
|
+
; To use a different profile name, edit the `aws_profile` line. To fall
|
|
316
|
+
; back on boto3's default chain (env vars, default profile, instance
|
|
317
|
+
; role), remove the line entirely.
|
|
222
318
|
;
|
|
223
|
-
;
|
|
224
|
-
;
|
|
225
|
-
;
|
|
319
|
+
; UPGRADE PRE-STAGING: optional `aws_model_id_next` (+ `aws_region_next`)
|
|
320
|
+
; is the queued upgrade target -- mirrors gguf `hf_file_next`. The router
|
|
321
|
+
; uses it only when `llmstack start --next` is in effect; permanent
|
|
322
|
+
; promotion is the same as gguf: edit `aws_model_id` and re-run
|
|
323
|
+
; `llmstack install`.
|
|
226
324
|
;
|
|
227
|
-
; SAMPLER
|
|
228
|
-
;
|
|
229
|
-
;
|
|
325
|
+
; SAMPLER: the `sampler = temp=..., top_p=..., top_k=..., ...` line on
|
|
326
|
+
; each tier is the SINGLE SOURCE OF TRUTH for sampling, but how it gets
|
|
327
|
+
; applied depends on the backend:
|
|
230
328
|
;
|
|
231
329
|
; * gguf tiers -- the llama-swap generator bakes the sampler keys
|
|
232
330
|
; into the llama-server startup command line as `--temp`,
|
|
@@ -248,57 +346,21 @@ ultra_triggers = [ultra], [opus], "ultra:", "opus:" (line start)
|
|
|
248
346
|
; opencode.json is sampler-free in both cases by design (the
|
|
249
347
|
; opencode.json generator never emits sampler params on agents).
|
|
250
348
|
;
|
|
251
|
-
; Per-Bedrock-family rules (as of 2026):
|
|
349
|
+
; Per-Bedrock-family sampler rules (as of 2026):
|
|
252
350
|
;
|
|
253
351
|
; * Claude Opus 4.7+ -- rejects all sampler params; OMIT `sampler =`
|
|
254
352
|
; entirely (the router will then pass requests through untouched).
|
|
255
|
-
; * Claude Sonnet 4.5 / Haiku 4.5 -- accept `temp` OR `top_p`,
|
|
256
|
-
; both; pick one.
|
|
353
|
+
; * Claude Sonnet 4.5 / 4.6 / Haiku 4.5 -- accept `temp` OR `top_p`,
|
|
354
|
+
; never both; pick one.
|
|
257
355
|
; * Claude Opus 4.x (4.1, 4.5, 4.6) -- accept `temp` and `top_p`.
|
|
258
|
-
; * Llama / Titan / Cohere / etc. -- accept `temp`
|
|
259
|
-
; the model card if in doubt.
|
|
356
|
+
; * Llama / Titan / Mistral / Cohere / Nova / etc. -- accept `temp`
|
|
357
|
+
; + `top_p`; check the model card if in doubt.
|
|
260
358
|
;
|
|
261
|
-
;
|
|
262
|
-
;
|
|
263
|
-
;
|
|
264
|
-
;
|
|
265
|
-
;
|
|
266
|
-
;
|
|
267
|
-
;
|
|
268
|
-
;
|
|
269
|
-
; role = agent
|
|
270
|
-
; backend = bedrock
|
|
271
|
-
; aws_model_id = anthropic.claude-sonnet-4-5-20250929-v1:0
|
|
272
|
-
; aws_region = us-west-2
|
|
273
|
-
; aws_model_id_next = anthropic.claude-sonnet-5-20260201-v1:0 ; queued
|
|
274
|
-
; aws_region_next = us-east-1 ; (optional) different region for the new model
|
|
275
|
-
; ctx_size = 200000
|
|
276
|
-
; sampler = temp=0.5 ; Sonnet 4.5 accepts ONE of temp / top_p; pick `temp` for agent work
|
|
277
|
-
; description = Claude Sonnet 4.5 on Bedrock - heavy coder for agent loops
|
|
278
|
-
;
|
|
279
|
-
; Example B: planner in a different AWS account, accessed via a named
|
|
280
|
-
; profile that itself uses role-chaining + SSO under ~/.aws/config.
|
|
281
|
-
; (Different tier => different profile name; different account/region.)
|
|
282
|
-
;
|
|
283
|
-
; [plan]
|
|
284
|
-
; tier = chat
|
|
285
|
-
; role = plan
|
|
286
|
-
; aws_model_id = us.anthropic.claude-opus-4-1-20250805-v1:0
|
|
287
|
-
; aws_region = us-east-1
|
|
288
|
-
; aws_profile = bedrock-planning
|
|
289
|
-
; ctx_size = 200000
|
|
290
|
-
; sampler = temp=0.7, top_p=0.9
|
|
291
|
-
; description = Claude Opus 4.1 on Bedrock - planning, design discussions
|
|
292
|
-
;
|
|
293
|
-
; Example C: large model behind a VPC endpoint.
|
|
294
|
-
;
|
|
295
|
-
; [plan-uncensored]
|
|
296
|
-
; tier = chat
|
|
297
|
-
; role = plan-uncensored
|
|
298
|
-
; aws_model_id = meta.llama3-1-405b-instruct-v1:0
|
|
299
|
-
; aws_region = us-west-2
|
|
300
|
-
; aws_profile = bedrock-prod
|
|
301
|
-
; aws_endpoint_url = https://bedrock-runtime.us-west-2.vpce.amazonaws.com
|
|
302
|
-
; ctx_size = 128000
|
|
303
|
-
; sampler = temp=0.85, top_p=0.95
|
|
304
|
-
; description = Llama 3.1 405B on Bedrock - max-exploration planning
|
|
359
|
+
; ACCESS: Anthropic Claude on Bedrock requires a one-time use-case-form
|
|
360
|
+
; approval per AWS account (Bedrock console -> Model catalog -> pick the
|
|
361
|
+
; model -> fill the form). Approval is account-level and persists; once
|
|
362
|
+
; granted, every Claude variant works (bare ID, us./eu./global. cross-
|
|
363
|
+
; region profile, application inference profile ARN). To skip the form
|
|
364
|
+
; entirely, use the Llama 3.1 405B variant under [plan-uncensored] (Meta
|
|
365
|
+
; models don't require the form) or pick another non-Anthropic family
|
|
366
|
+
; (Amazon Nova, Mistral, Cohere, Titan).
|
|
@@ -5,7 +5,7 @@ llmstack/_platform.py,sha256=eDY3T9krkaBigG5xXxqzIbH3MhdZqX3BWe7bozOsAso,13099
|
|
|
5
5
|
llmstack/app.py,sha256=fPyjqJ_4td7qs-OKuDsE1JzBtvNzVV9XYKF2WXBzRas,25795
|
|
6
6
|
llmstack/check_models.py,sha256=WvTS2Td4acp-Q0-yWXUgXAgAgFOmpxiaeSDuAoivirw,4559
|
|
7
7
|
llmstack/cli.py,sha256=Om70PzHrmU81y2Mw1sB6eeUs1fRHP0PnsCEVNC0UNvI,11341
|
|
8
|
-
llmstack/models.ini,sha256=
|
|
8
|
+
llmstack/models.ini,sha256=kmfX_9WHEqnjRfF7srT6zesfC_YIp-0MmW0YbfFkXD8,18381
|
|
9
9
|
llmstack/paths.py,sha256=A8q4-tpwIt5UMGG5ZDESKSuViMGLbPIAL1VoONopJqU,11512
|
|
10
10
|
llmstack/shell_env.py,sha256=MJSW0PP15q-fsppIZ98WZ7XoqYMZmDy4k8N0gzEA6wU,39362
|
|
11
11
|
llmstack/tiers.py,sha256=et738dWftsc74ZElZ3Vt9eEF_SzgJCDuH9kBhzH-scI,14697
|
|
@@ -30,8 +30,8 @@ llmstack/download/ggufs.py,sha256=2hCr-svUiPIV2I3ruwTbXo6lPn9m-VBOqa3DFbvdIcA,54
|
|
|
30
30
|
llmstack/generators/__init__.py,sha256=LfbcReuyYBCdVuT9J5RKo7-f8n585YBU3Hus6DsxqTs,1189
|
|
31
31
|
llmstack/generators/llama_swap.py,sha256=KdYH9N6TJECotZvyxvAjaa3kRyzn4YOi2T6D2UdyVKw,14785
|
|
32
32
|
llmstack/generators/opencode.py,sha256=If7opOQyMWSSbHTj7M9dndsA3BmskSTUsTggMKV0VWM,10669
|
|
33
|
-
opencode_llmstack-0.7.
|
|
34
|
-
opencode_llmstack-0.7.
|
|
35
|
-
opencode_llmstack-0.7.
|
|
36
|
-
opencode_llmstack-0.7.
|
|
37
|
-
opencode_llmstack-0.7.
|
|
33
|
+
opencode_llmstack-0.7.3.dist-info/METADATA,sha256=sobMO1qeP8dsGlofz-odTUKS2jNzKHPDneQcy_WyHz4,34815
|
|
34
|
+
opencode_llmstack-0.7.3.dist-info/WHEEL,sha256=aeYiig01lYGDzBgS8HxWXOg3uV61G9ijOsup-k9o1sk,91
|
|
35
|
+
opencode_llmstack-0.7.3.dist-info/entry_points.txt,sha256=soomjpqvl4KzFScgpQbu96vgcLriOtkB9MbiSC0rvZ8,47
|
|
36
|
+
opencode_llmstack-0.7.3.dist-info/top_level.txt,sha256=tMv9sDWp8RW_DNNY8cuM4Uy4sND-KwTLcsScl5gdcEQ,9
|
|
37
|
+
opencode_llmstack-0.7.3.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|