mcpbr 0.4.16__py3-none-any.whl → 0.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. mcpbr/__init__.py +20 -1
  2. mcpbr/config.py +37 -1
  3. mcpbr/config_migration.py +470 -0
  4. mcpbr/config_wizard.py +647 -0
  5. mcpbr/dashboard.py +619 -0
  6. mcpbr/dataset_streaming.py +491 -0
  7. mcpbr/docker_cache.py +539 -0
  8. mcpbr/docker_env.py +2 -1
  9. mcpbr/docker_prewarm.py +370 -0
  10. mcpbr/dry_run.py +533 -0
  11. mcpbr/formatting.py +444 -0
  12. mcpbr/gpu_support.py +2 -1
  13. mcpbr/graceful_degradation.py +277 -0
  14. mcpbr/harness.py +38 -4
  15. mcpbr/languages.py +228 -0
  16. mcpbr/logging_config.py +207 -0
  17. mcpbr/models.py +66 -0
  18. mcpbr/preflight.py +2 -1
  19. mcpbr/pricing.py +72 -0
  20. mcpbr/providers.py +316 -3
  21. mcpbr/resource_limits.py +487 -0
  22. mcpbr/result_streaming.py +519 -0
  23. mcpbr/sdk.py +264 -0
  24. mcpbr/smoke_test.py +2 -1
  25. mcpbr/task_batching.py +403 -0
  26. mcpbr/task_scheduler.py +468 -0
  27. {mcpbr-0.4.16.dist-info → mcpbr-0.6.0.dist-info}/METADATA +8 -1
  28. {mcpbr-0.4.16.dist-info → mcpbr-0.6.0.dist-info}/RECORD +38 -22
  29. {mcpbr-0.4.16.data → mcpbr-0.6.0.data}/data/mcpbr/data/templates/brave-search.yaml +0 -0
  30. {mcpbr-0.4.16.data → mcpbr-0.6.0.data}/data/mcpbr/data/templates/filesystem.yaml +0 -0
  31. {mcpbr-0.4.16.data → mcpbr-0.6.0.data}/data/mcpbr/data/templates/github.yaml +0 -0
  32. {mcpbr-0.4.16.data → mcpbr-0.6.0.data}/data/mcpbr/data/templates/google-maps.yaml +0 -0
  33. {mcpbr-0.4.16.data → mcpbr-0.6.0.data}/data/mcpbr/data/templates/postgres.yaml +0 -0
  34. {mcpbr-0.4.16.data → mcpbr-0.6.0.data}/data/mcpbr/data/templates/slack.yaml +0 -0
  35. {mcpbr-0.4.16.data → mcpbr-0.6.0.data}/data/mcpbr/data/templates/sqlite.yaml +0 -0
  36. {mcpbr-0.4.16.dist-info → mcpbr-0.6.0.dist-info}/WHEEL +0 -0
  37. {mcpbr-0.4.16.dist-info → mcpbr-0.6.0.dist-info}/entry_points.txt +0 -0
  38. {mcpbr-0.4.16.dist-info → mcpbr-0.6.0.dist-info}/licenses/LICENSE +0 -0
mcpbr/providers.py CHANGED
@@ -1,6 +1,8 @@
1
1
  """Model provider abstractions for different LLM APIs."""
2
2
 
3
+ import json
3
4
  import os
5
+ import uuid
4
6
  from dataclasses import dataclass, field
5
7
  from typing import Any, Protocol, runtime_checkable
6
8
 
@@ -142,8 +144,6 @@ class AnthropicProvider:
142
144
  if block.type == "text":
143
145
  content_text = block.text
144
146
  elif block.type == "tool_use":
145
- import json
146
-
147
147
  tool_calls.append(
148
148
  ToolCall(
149
149
  id=block.id,
@@ -170,8 +170,321 @@ class AnthropicProvider:
170
170
  )
171
171
 
172
172
 
173
+ class OpenAIProvider:
174
+ """Provider for OpenAI API (GPT models)."""
175
+
176
+ def __init__(
177
+ self,
178
+ model: str,
179
+ api_key: str | None = None,
180
+ ) -> None:
181
+ """Initialize OpenAI provider.
182
+
183
+ Args:
184
+ model: OpenAI model ID (e.g., 'gpt-4o', 'gpt-4-turbo').
185
+ api_key: API key. If None, uses OPENAI_API_KEY env var.
186
+ """
187
+ self._model = model
188
+ self._api_key = api_key or os.environ.get("OPENAI_API_KEY")
189
+ if not self._api_key:
190
+ raise ValueError(
191
+ "OpenAI API key required. Set OPENAI_API_KEY environment variable "
192
+ "or pass api_key parameter."
193
+ )
194
+ import openai
195
+
196
+ self._client = openai.OpenAI(api_key=self._api_key)
197
+
198
+ @property
199
+ def model(self) -> str:
200
+ return self._model
201
+
202
+ def get_tool_format(self) -> str:
203
+ return "openai"
204
+
205
+ def chat(
206
+ self,
207
+ messages: list[dict[str, Any]],
208
+ tools: list[dict[str, Any]] | None = None,
209
+ max_tokens: int = 4096,
210
+ ) -> ChatResponse:
211
+ kwargs: dict[str, Any] = {
212
+ "model": self._model,
213
+ "messages": messages,
214
+ "max_tokens": max_tokens,
215
+ }
216
+ if tools:
217
+ kwargs["tools"] = tools
218
+
219
+ response = self._client.chat.completions.create(**kwargs)
220
+
221
+ if not response.choices:
222
+ raise RuntimeError("OpenAI API returned empty response choices")
223
+
224
+ choice = response.choices[0]
225
+ tool_calls = []
226
+ if choice.message.tool_calls:
227
+ for tc in choice.message.tool_calls:
228
+ tool_calls.append(
229
+ ToolCall(
230
+ id=tc.id,
231
+ name=tc.function.name,
232
+ arguments=tc.function.arguments,
233
+ )
234
+ )
235
+
236
+ return ChatResponse(
237
+ message=ChatMessage(
238
+ role="assistant",
239
+ content=choice.message.content,
240
+ tool_calls=tool_calls,
241
+ ),
242
+ finish_reason=choice.finish_reason,
243
+ input_tokens=response.usage.prompt_tokens,
244
+ output_tokens=response.usage.completion_tokens,
245
+ )
246
+
247
+
248
+ class GeminiProvider:
249
+ """Provider for Google Gemini API."""
250
+
251
+ def __init__(
252
+ self,
253
+ model: str,
254
+ api_key: str | None = None,
255
+ ) -> None:
256
+ """Initialize Gemini provider.
257
+
258
+ Args:
259
+ model: Gemini model ID (e.g., 'gemini-2.0-flash', 'gemini-1.5-pro').
260
+ api_key: API key. If None, uses GOOGLE_API_KEY env var.
261
+ """
262
+ self._model = model
263
+ self._api_key = api_key or os.environ.get("GOOGLE_API_KEY")
264
+ if not self._api_key:
265
+ raise ValueError(
266
+ "Google API key required. Set GOOGLE_API_KEY environment variable "
267
+ "or pass api_key parameter."
268
+ )
269
+ import google.generativeai as genai
270
+
271
+ genai.configure(api_key=self._api_key)
272
+ self._genai = genai
273
+ self._client = genai.GenerativeModel(model)
274
+
275
+ @property
276
+ def model(self) -> str:
277
+ return self._model
278
+
279
+ def get_tool_format(self) -> str:
280
+ return "openai"
281
+
282
+ def _convert_messages(
283
+ self, messages: list[dict[str, Any]]
284
+ ) -> tuple[list[dict[str, Any]], str | None]:
285
+ """Convert OpenAI-style messages to Gemini content format.
286
+
287
+ Extracts system messages to use as system_instruction (Gemini's native
288
+ system prompt support), and converts the remaining messages.
289
+
290
+ Args:
291
+ messages: List of OpenAI-style message dicts.
292
+
293
+ Returns:
294
+ Tuple of (contents, system_instruction). system_instruction is None
295
+ if no system message was found.
296
+ """
297
+ contents: list[dict[str, Any]] = []
298
+ system_instruction: str | None = None
299
+ for msg in messages:
300
+ role = msg.get("role", "user")
301
+ if role == "system":
302
+ system_instruction = msg.get("content", "")
303
+ elif role == "assistant":
304
+ contents.append({"role": "model", "parts": [msg.get("content", "")]})
305
+ else:
306
+ contents.append({"role": role, "parts": [msg.get("content", "")]})
307
+ return contents, system_instruction
308
+
309
+ def _convert_tools(self, tools: list[dict[str, Any]] | None) -> list[Any] | None:
310
+ """Convert OpenAI-style tool definitions to Gemini function declarations.
311
+
312
+ Args:
313
+ tools: List of OpenAI-style tool dicts.
314
+
315
+ Returns:
316
+ List of Gemini Tool objects, or None.
317
+ """
318
+ if not tools:
319
+ return None
320
+
321
+ function_declarations = []
322
+ for tool in tools:
323
+ func = tool.get("function", {})
324
+ function_declarations.append(
325
+ self._genai.protos.FunctionDeclaration(
326
+ name=func.get("name", ""),
327
+ description=func.get("description", ""),
328
+ parameters=func.get("parameters"),
329
+ )
330
+ )
331
+ return [self._genai.protos.Tool(function_declarations=function_declarations)]
332
+
333
+ def chat(
334
+ self,
335
+ messages: list[dict[str, Any]],
336
+ tools: list[dict[str, Any]] | None = None,
337
+ max_tokens: int = 4096,
338
+ ) -> ChatResponse:
339
+ contents, system_instruction = self._convert_messages(messages)
340
+ gemini_tools = self._convert_tools(tools)
341
+
342
+ kwargs: dict[str, Any] = {
343
+ "contents": contents,
344
+ "generation_config": {"max_output_tokens": max_tokens},
345
+ }
346
+ if gemini_tools:
347
+ kwargs["tools"] = gemini_tools
348
+ if system_instruction:
349
+ kwargs["system_instruction"] = system_instruction
350
+
351
+ response = self._client.generate_content(**kwargs)
352
+
353
+ if not response.candidates:
354
+ raise RuntimeError("Gemini API returned empty candidates")
355
+
356
+ content_text = ""
357
+ tool_calls = []
358
+ candidate = response.candidates[0]
359
+
360
+ for part in candidate.content.parts:
361
+ if part.function_call and part.function_call.name:
362
+ args_dict = dict(part.function_call.args) if part.function_call.args else {}
363
+ tool_calls.append(
364
+ ToolCall(
365
+ id=f"call_{uuid.uuid4().hex[:24]}",
366
+ name=part.function_call.name,
367
+ arguments=json.dumps(args_dict),
368
+ )
369
+ )
370
+ elif part.text:
371
+ content_text = part.text
372
+
373
+ finish_reason = "stop"
374
+ if tool_calls:
375
+ finish_reason = "tool_calls"
376
+ elif hasattr(candidate.finish_reason, "name"):
377
+ reason_name = candidate.finish_reason.name
378
+ if reason_name == "STOP":
379
+ finish_reason = "stop"
380
+ elif reason_name == "MAX_TOKENS":
381
+ finish_reason = "length"
382
+
383
+ return ChatResponse(
384
+ message=ChatMessage(
385
+ role="assistant",
386
+ content=content_text if content_text else None,
387
+ tool_calls=tool_calls,
388
+ ),
389
+ finish_reason=finish_reason,
390
+ input_tokens=getattr(response.usage_metadata, "prompt_token_count", 0)
391
+ if response.usage_metadata
392
+ else 0,
393
+ output_tokens=getattr(response.usage_metadata, "candidates_token_count", 0)
394
+ if response.usage_metadata
395
+ else 0,
396
+ )
397
+
398
+
399
+ class QwenProvider:
400
+ """Provider for Alibaba Qwen API (OpenAI-compatible via DashScope).
401
+
402
+ Qwen models are accessed through the DashScope international API endpoint
403
+ which provides an OpenAI-compatible interface.
404
+ """
405
+
406
+ DASHSCOPE_BASE_URL = "https://dashscope-intl.aliyuncs.com/compatible-mode/v1"
407
+
408
+ def __init__(
409
+ self,
410
+ model: str,
411
+ api_key: str | None = None,
412
+ ) -> None:
413
+ """Initialize Qwen provider.
414
+
415
+ Args:
416
+ model: Qwen model ID (e.g., 'qwen-plus', 'qwen-turbo', 'qwen-max').
417
+ api_key: API key. If None, uses DASHSCOPE_API_KEY env var.
418
+ """
419
+ self._model = model
420
+ self._api_key = api_key or os.environ.get("DASHSCOPE_API_KEY")
421
+ if not self._api_key:
422
+ raise ValueError(
423
+ "DashScope API key required. Set DASHSCOPE_API_KEY environment variable "
424
+ "or pass api_key parameter."
425
+ )
426
+ import openai
427
+
428
+ self._client = openai.OpenAI(
429
+ api_key=self._api_key,
430
+ base_url=self.DASHSCOPE_BASE_URL,
431
+ )
432
+
433
+ @property
434
+ def model(self) -> str:
435
+ return self._model
436
+
437
+ def get_tool_format(self) -> str:
438
+ return "openai"
439
+
440
+ def chat(
441
+ self,
442
+ messages: list[dict[str, Any]],
443
+ tools: list[dict[str, Any]] | None = None,
444
+ max_tokens: int = 4096,
445
+ ) -> ChatResponse:
446
+ kwargs: dict[str, Any] = {
447
+ "model": self._model,
448
+ "messages": messages,
449
+ "max_tokens": max_tokens,
450
+ }
451
+ if tools:
452
+ kwargs["tools"] = tools
453
+
454
+ response = self._client.chat.completions.create(**kwargs)
455
+
456
+ if not response.choices:
457
+ raise RuntimeError("Qwen API returned empty response choices")
458
+
459
+ choice = response.choices[0]
460
+ tool_calls = []
461
+ if choice.message.tool_calls:
462
+ for tc in choice.message.tool_calls:
463
+ tool_calls.append(
464
+ ToolCall(
465
+ id=tc.id,
466
+ name=tc.function.name,
467
+ arguments=tc.function.arguments,
468
+ )
469
+ )
470
+
471
+ return ChatResponse(
472
+ message=ChatMessage(
473
+ role="assistant",
474
+ content=choice.message.content,
475
+ tool_calls=tool_calls,
476
+ ),
477
+ finish_reason=choice.finish_reason,
478
+ input_tokens=response.usage.prompt_tokens,
479
+ output_tokens=response.usage.completion_tokens,
480
+ )
481
+
482
+
173
483
  PROVIDER_REGISTRY: dict[str, type] = {
174
484
  "anthropic": AnthropicProvider,
485
+ "openai": OpenAIProvider,
486
+ "gemini": GeminiProvider,
487
+ "qwen": QwenProvider,
175
488
  }
176
489
 
177
490
 
@@ -183,7 +496,7 @@ def create_provider(
183
496
  """Factory function to create a model provider.
184
497
 
185
498
  Args:
186
- provider_name: Name of the provider (currently only 'anthropic').
499
+ provider_name: Name of the provider ('anthropic', 'openai', 'gemini', 'qwen').
187
500
  model: Model identifier for the provider.
188
501
  api_key: Optional API key.
189
502