agentic-flow 1.2.6 → 1.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -26,7 +26,7 @@ function getModelForProvider(provider) {
26
26
  };
27
27
  case 'openrouter':
28
28
  return {
29
- model: process.env.COMPLETION_MODEL || 'meta-llama/llama-3.1-8b-instruct',
29
+ model: process.env.COMPLETION_MODEL || 'deepseek/deepseek-chat',
30
30
  apiKey: process.env.OPENROUTER_API_KEY || process.env.ANTHROPIC_API_KEY || '',
31
31
  baseURL: process.env.PROXY_URL || undefined
32
32
  };
@@ -103,13 +103,13 @@ export async function claudeAgent(agent, input, onStream, modelOverride) {
103
103
  try {
104
104
  // MCP server setup - enable in-SDK server and optional external servers
105
105
  const mcpServers = {};
106
- // Enable in-SDK MCP server for custom tools
107
- if (process.env.ENABLE_CLAUDE_FLOW_SDK === 'true') {
106
+ // Enable in-SDK MCP server for custom tools (enabled by default)
107
+ if (process.env.ENABLE_CLAUDE_FLOW_SDK !== 'false') {
108
108
  mcpServers['claude-flow-sdk'] = claudeFlowSdkServer;
109
109
  }
110
- // Optional external MCP servers (disabled by default to avoid subprocess failures)
111
- // Enable by setting ENABLE_CLAUDE_FLOW_MCP=true or ENABLE_FLOW_NEXUS_MCP=true
112
- if (process.env.ENABLE_CLAUDE_FLOW_MCP === 'true') {
110
+ // External MCP servers (enabled by default for full 213-tool access)
111
+ // Disable by setting ENABLE_CLAUDE_FLOW_MCP=false
112
+ if (process.env.ENABLE_CLAUDE_FLOW_MCP !== 'false') {
113
113
  mcpServers['claude-flow'] = {
114
114
  type: 'stdio',
115
115
  command: 'npx',
@@ -121,7 +121,7 @@ export async function claudeAgent(agent, input, onStream, modelOverride) {
121
121
  }
122
122
  };
123
123
  }
124
- if (process.env.ENABLE_FLOW_NEXUS_MCP === 'true') {
124
+ if (process.env.ENABLE_FLOW_NEXUS_MCP !== 'false') {
125
125
  mcpServers['flow-nexus'] = {
126
126
  type: 'stdio',
127
127
  command: 'npx',
@@ -132,7 +132,7 @@ export async function claudeAgent(agent, input, onStream, modelOverride) {
132
132
  }
133
133
  };
134
134
  }
135
- if (process.env.ENABLE_AGENTIC_PAYMENTS_MCP === 'true') {
135
+ if (process.env.ENABLE_AGENTIC_PAYMENTS_MCP !== 'false') {
136
136
  mcpServers['agentic-payments'] = {
137
137
  type: 'stdio',
138
138
  command: 'npx',
@@ -250,7 +250,7 @@ export async function directApiAgent(agent, input, onStream) {
250
250
  const params = {
251
251
  model: provider === 'gemini'
252
252
  ? (process.env.COMPLETION_MODEL || 'gemini-2.0-flash-exp')
253
- : (process.env.COMPLETION_MODEL || 'meta-llama/llama-3.1-8b-instruct'),
253
+ : (process.env.COMPLETION_MODEL || 'deepseek/deepseek-chat'),
254
254
  messages: messagesWithSystem,
255
255
  maxTokens: 8192,
256
256
  temperature: 0.7
@@ -39,7 +39,7 @@ function getProxyConfig(provider, customPort) {
39
39
  provider: 'openrouter',
40
40
  port,
41
41
  baseUrl,
42
- model: process.env.COMPLETION_MODEL || 'mistralai/mistral-small-3.1-24b-instruct',
42
+ model: process.env.COMPLETION_MODEL || 'deepseek/deepseek-chat',
43
43
  apiKey: process.env.OPENROUTER_API_KEY || '',
44
44
  requiresProxy: true
45
45
  };
@@ -120,7 +120,7 @@ async function startProxyServer(config) {
120
120
  proxy = new AnthropicToOpenRouterProxy({
121
121
  openrouterApiKey: config.apiKey,
122
122
  openrouterBaseUrl: process.env.ANTHROPIC_PROXY_BASE_URL,
123
- defaultModel: config.model || 'mistralai/mistral-small-3.1-24b-instruct'
123
+ defaultModel: config.model || 'deepseek/deepseek-chat'
124
124
  });
125
125
  }
126
126
  // Start proxy
package/dist/cli-proxy.js CHANGED
@@ -32,6 +32,7 @@ import { claudeAgent } from "./agents/claudeAgent.js";
32
32
  import { handleConfigCommand } from "./cli/config-wizard.js";
33
33
  import { handleAgentCommand } from "./cli/agent-manager.js";
34
34
  import { ModelOptimizer } from "./utils/modelOptimizer.js";
35
+ import { detectModelCapabilities } from "./utils/modelCapabilities.js";
35
36
  const __filename = fileURLToPath(import.meta.url);
36
37
  const __dirname = dirname(__filename);
37
38
  const packageJson = JSON.parse(readFileSync(pathResolve(__dirname, '../package.json'), 'utf-8'));
@@ -264,11 +265,13 @@ class AgenticFlowCLI {
264
265
  const defaultModel = modelOverride ||
265
266
  process.env.COMPLETION_MODEL ||
266
267
  process.env.REASONING_MODEL ||
267
- 'meta-llama/llama-3.1-8b-instruct';
268
+ 'deepseek/deepseek-chat';
269
+ const capabilities = detectModelCapabilities(defaultModel);
268
270
  const proxy = new AnthropicToOpenRouterProxy({
269
271
  openrouterApiKey: openrouterKey,
270
272
  openrouterBaseUrl: process.env.ANTHROPIC_PROXY_BASE_URL,
271
- defaultModel
273
+ defaultModel,
274
+ capabilities: capabilities
272
275
  });
273
276
  // Start proxy in background
274
277
  proxy.start(this.proxyPort);
@@ -281,7 +284,13 @@ class AgenticFlowCLI {
281
284
  }
282
285
  console.log(`šŸ”— Proxy Mode: OpenRouter`);
283
286
  console.log(`šŸ”§ Proxy URL: http://localhost:${this.proxyPort}`);
284
- console.log(`šŸ¤– Default Model: ${defaultModel}\n`);
287
+ console.log(`šŸ¤– Default Model: ${defaultModel}`);
288
+ if (capabilities.requiresEmulation) {
289
+ console.log(`\nāš™ļø Detected: Model lacks native tool support`);
290
+ console.log(`šŸ”§ Using ${capabilities.emulationStrategy.toUpperCase()} emulation pattern`);
291
+ console.log(`šŸ“Š Expected reliability: ${capabilities.emulationStrategy === 'react' ? '70-85%' : '50-70%'}`);
292
+ }
293
+ console.log('');
285
294
  // Wait for proxy to be ready
286
295
  await new Promise(resolve => setTimeout(resolve, 1500));
287
296
  }
@@ -416,7 +425,7 @@ Get your key at: https://openrouter.ai/keys
416
425
  `);
417
426
  process.exit(1);
418
427
  }
419
- const finalModel = model || process.env.COMPLETION_MODEL || 'meta-llama/llama-3.1-8b-instruct';
428
+ const finalModel = model || process.env.COMPLETION_MODEL || 'deepseek/deepseek-chat';
420
429
  console.log(`šŸš€ Starting OpenRouter → Anthropic Proxy
421
430
  šŸ“ Port: ${port}
422
431
  šŸ¤– Model: ${finalModel}
@@ -551,9 +560,17 @@ EXAMPLES:
551
560
  console.log(`šŸ“ Description: ${agent.description}\n`);
552
561
  console.log(`šŸŽÆ Task: ${task}\n`);
553
562
  if (useOpenRouter) {
554
- const model = options.model || process.env.COMPLETION_MODEL || 'meta-llama/llama-3.1-8b-instruct';
563
+ const model = options.model || process.env.COMPLETION_MODEL || 'deepseek/deepseek-chat';
555
564
  console.log(`šŸ”§ Provider: OpenRouter (via proxy)`);
556
- console.log(`šŸ”§ Model: ${model}\n`);
565
+ console.log(`šŸ”§ Model: ${model}`);
566
+ // Show tool capability information
567
+ const capabilities = detectModelCapabilities(model);
568
+ if (capabilities.requiresEmulation) {
569
+ console.log(`āš™ļø Tool Emulation: ${capabilities.emulationStrategy.toUpperCase()} pattern`);
570
+ console.log(`šŸ“Š Note: This model uses prompt-based tool emulation`);
571
+ console.log(` Tools are handled by Claude Agent SDK (limited to SDK tools)`);
572
+ }
573
+ console.log('');
557
574
  }
558
575
  else if (useGemini) {
559
576
  const model = options.model || 'gemini-2.0-flash-exp';
@@ -153,7 +153,7 @@ Get your key at: https://openrouter.ai/keys
153
153
  `);
154
154
  process.exit(1);
155
155
  }
156
- const model = options.model || process.env.COMPLETION_MODEL || 'meta-llama/llama-3.1-8b-instruct';
156
+ const model = options.model || process.env.COMPLETION_MODEL || 'deepseek/deepseek-chat';
157
157
  console.log(`šŸš€ Starting OpenRouter → Anthropic Proxy
158
158
  šŸ“ Port: ${options.port}
159
159
  šŸ¤– Model: ${model}
@@ -3,16 +3,28 @@
3
3
  import express from 'express';
4
4
  import { logger } from '../utils/logger.js';
5
5
  import { getMaxTokensForModel } from './provider-instructions.js';
6
+ import { detectModelCapabilities } from '../utils/modelCapabilities.js';
7
+ import { ToolEmulator, executeEmulation } from './tool-emulation.js';
6
8
  export class AnthropicToOpenRouterProxy {
7
9
  app;
8
10
  openrouterApiKey;
9
11
  openrouterBaseUrl;
10
12
  defaultModel;
13
+ capabilities;
11
14
  constructor(config) {
12
15
  this.app = express();
13
16
  this.openrouterApiKey = config.openrouterApiKey;
14
17
  this.openrouterBaseUrl = config.openrouterBaseUrl || 'https://openrouter.ai/api/v1';
15
18
  this.defaultModel = config.defaultModel || 'meta-llama/llama-3.1-8b-instruct';
19
+ this.capabilities = config.capabilities;
20
+ // Debug logging
21
+ if (this.capabilities) {
22
+ logger.info('Proxy initialized with capabilities', {
23
+ model: this.defaultModel,
24
+ requiresEmulation: this.capabilities.requiresEmulation,
25
+ strategy: this.capabilities.emulationStrategy
26
+ });
27
+ }
16
28
  this.setupMiddleware();
17
29
  this.setupRoutes();
18
30
  }
@@ -66,99 +78,10 @@ export class AnthropicToOpenRouterProxy {
66
78
  : JSON.stringify(firstMsg.content).substring(0, 200)
67
79
  });
68
80
  }
69
- // Convert Anthropic format to OpenAI format
70
- const openaiReq = this.convertAnthropicToOpenAI(anthropicReq);
71
- // VERBOSE LOGGING: Log converted OpenAI request
72
- logger.info('=== CONVERTED OPENAI REQUEST ===', {
73
- anthropicModel: anthropicReq.model,
74
- openaiModel: openaiReq.model,
75
- messageCount: openaiReq.messages.length,
76
- systemPrompt: openaiReq.messages[0]?.content?.substring(0, 300),
77
- toolCount: openaiReq.tools?.length || 0,
78
- toolNames: openaiReq.tools?.map(t => t.function.name) || [],
79
- maxTokens: openaiReq.max_tokens,
80
- apiKeyPresent: !!this.openrouterApiKey,
81
- apiKeyPrefix: this.openrouterApiKey?.substring(0, 10)
82
- });
83
- // Forward to OpenRouter
84
- const response = await fetch(`${this.openrouterBaseUrl}/chat/completions`, {
85
- method: 'POST',
86
- headers: {
87
- 'Authorization': `Bearer ${this.openrouterApiKey}`,
88
- 'Content-Type': 'application/json',
89
- 'HTTP-Referer': 'https://github.com/ruvnet/agentic-flow',
90
- 'X-Title': 'Agentic Flow'
91
- },
92
- body: JSON.stringify(openaiReq)
93
- });
94
- if (!response.ok) {
95
- const error = await response.text();
96
- logger.error('OpenRouter API error', { status: response.status, error });
97
- return res.status(response.status).json({
98
- error: {
99
- type: 'api_error',
100
- message: error
101
- }
102
- });
103
- }
104
- // VERBOSE LOGGING: Log OpenRouter response status
105
- logger.info('=== OPENROUTER RESPONSE RECEIVED ===', {
106
- status: response.status,
107
- statusText: response.statusText,
108
- headers: Object.fromEntries(response.headers.entries())
109
- });
110
- // Handle streaming vs non-streaming
111
- if (anthropicReq.stream) {
112
- logger.info('Handling streaming response...');
113
- // Stream response
114
- res.setHeader('Content-Type', 'text/event-stream');
115
- res.setHeader('Cache-Control', 'no-cache');
116
- res.setHeader('Connection', 'keep-alive');
117
- const reader = response.body?.getReader();
118
- if (!reader) {
119
- throw new Error('No response body');
120
- }
121
- const decoder = new TextDecoder();
122
- while (true) {
123
- const { done, value } = await reader.read();
124
- if (done)
125
- break;
126
- const chunk = decoder.decode(value);
127
- const anthropicChunk = this.convertOpenAIStreamToAnthropic(chunk);
128
- res.write(anthropicChunk);
129
- }
130
- res.end();
131
- }
132
- else {
133
- logger.info('Handling non-streaming response...');
134
- // Non-streaming response
135
- const openaiRes = await response.json();
136
- // VERBOSE LOGGING: Log raw OpenAI response
137
- logger.info('=== RAW OPENAI RESPONSE ===', {
138
- id: openaiRes.id,
139
- model: openaiRes.model,
140
- choices: openaiRes.choices?.length,
141
- finishReason: openaiRes.choices?.[0]?.finish_reason,
142
- hasToolCalls: !!(openaiRes.choices?.[0]?.message?.tool_calls),
143
- toolCallCount: openaiRes.choices?.[0]?.message?.tool_calls?.length || 0,
144
- toolCallNames: openaiRes.choices?.[0]?.message?.tool_calls?.map((tc) => tc.function.name) || [],
145
- contentPreview: openaiRes.choices?.[0]?.message?.content?.substring(0, 300),
146
- usage: openaiRes.usage
147
- });
148
- const anthropicRes = this.convertOpenAIToAnthropic(openaiRes);
149
- // VERBOSE LOGGING: Log converted Anthropic response
150
- logger.info('=== CONVERTED ANTHROPIC RESPONSE ===', {
151
- id: anthropicRes.id,
152
- model: anthropicRes.model,
153
- role: anthropicRes.role,
154
- stopReason: anthropicRes.stop_reason,
155
- contentBlocks: anthropicRes.content?.length,
156
- contentTypes: anthropicRes.content?.map((c) => c.type),
157
- toolUseCount: anthropicRes.content?.filter((c) => c.type === 'tool_use').length,
158
- textPreview: anthropicRes.content?.find((c) => c.type === 'text')?.text?.substring(0, 200),
159
- usage: anthropicRes.usage
160
- });
161
- res.json(anthropicRes);
81
+ // Route to appropriate handler based on capabilities
82
+ const result = await this.handleRequest(anthropicReq, res);
83
+ if (result) {
84
+ res.json(result);
162
85
  }
163
86
  }
164
87
  catch (error) {
@@ -182,6 +105,168 @@ export class AnthropicToOpenRouterProxy {
182
105
  });
183
106
  });
184
107
  }
108
+ async handleRequest(anthropicReq, res) {
109
+ let model = anthropicReq.model || this.defaultModel;
110
+ // If SDK is requesting a Claude model but we're using OpenRouter with a different default,
111
+ // override to use the CLI-specified model
112
+ if (model.startsWith('claude-') && this.defaultModel && !this.defaultModel.startsWith('claude-')) {
113
+ logger.info(`Overriding SDK Claude model ${model} with CLI-specified ${this.defaultModel}`);
114
+ model = this.defaultModel;
115
+ anthropicReq.model = model;
116
+ }
117
+ const capabilities = this.capabilities || detectModelCapabilities(model);
118
+ // Check if emulation is required
119
+ if (capabilities.requiresEmulation && anthropicReq.tools && anthropicReq.tools.length > 0) {
120
+ logger.info(`Using tool emulation for model: ${model}`);
121
+ return this.handleEmulatedRequest(anthropicReq, capabilities);
122
+ }
123
+ return this.handleNativeRequest(anthropicReq, res);
124
+ }
125
+ async handleNativeRequest(anthropicReq, res) {
126
+ // Convert Anthropic format to OpenAI format
127
+ const openaiReq = this.convertAnthropicToOpenAI(anthropicReq);
128
+ // VERBOSE LOGGING: Log converted OpenAI request
129
+ logger.info('=== CONVERTED OPENAI REQUEST ===', {
130
+ anthropicModel: anthropicReq.model,
131
+ openaiModel: openaiReq.model,
132
+ messageCount: openaiReq.messages.length,
133
+ systemPrompt: openaiReq.messages[0]?.content?.substring(0, 300),
134
+ toolCount: openaiReq.tools?.length || 0,
135
+ toolNames: openaiReq.tools?.map(t => t.function.name) || [],
136
+ maxTokens: openaiReq.max_tokens,
137
+ apiKeyPresent: !!this.openrouterApiKey,
138
+ apiKeyPrefix: this.openrouterApiKey?.substring(0, 10)
139
+ });
140
+ // Forward to OpenRouter
141
+ const response = await fetch(`${this.openrouterBaseUrl}/chat/completions`, {
142
+ method: 'POST',
143
+ headers: {
144
+ 'Authorization': `Bearer ${this.openrouterApiKey}`,
145
+ 'Content-Type': 'application/json',
146
+ 'HTTP-Referer': 'https://github.com/ruvnet/agentic-flow',
147
+ 'X-Title': 'Agentic Flow'
148
+ },
149
+ body: JSON.stringify(openaiReq)
150
+ });
151
+ if (!response.ok) {
152
+ const error = await response.text();
153
+ logger.error('OpenRouter API error', { status: response.status, error });
154
+ res.status(response.status).json({
155
+ error: {
156
+ type: 'api_error',
157
+ message: error
158
+ }
159
+ });
160
+ return null;
161
+ }
162
+ // VERBOSE LOGGING: Log OpenRouter response status
163
+ logger.info('=== OPENROUTER RESPONSE RECEIVED ===', {
164
+ status: response.status,
165
+ statusText: response.statusText,
166
+ headers: Object.fromEntries(response.headers.entries())
167
+ });
168
+ // Handle streaming vs non-streaming
169
+ if (anthropicReq.stream) {
170
+ logger.info('Handling streaming response...');
171
+ // Stream response
172
+ res.setHeader('Content-Type', 'text/event-stream');
173
+ res.setHeader('Cache-Control', 'no-cache');
174
+ res.setHeader('Connection', 'keep-alive');
175
+ const reader = response.body?.getReader();
176
+ if (!reader) {
177
+ throw new Error('No response body');
178
+ }
179
+ const decoder = new TextDecoder();
180
+ while (true) {
181
+ const { done, value } = await reader.read();
182
+ if (done)
183
+ break;
184
+ const chunk = decoder.decode(value);
185
+ const anthropicChunk = this.convertOpenAIStreamToAnthropic(chunk);
186
+ res.write(anthropicChunk);
187
+ }
188
+ res.end();
189
+ return null; // Already sent response
190
+ }
191
+ else {
192
+ logger.info('Handling non-streaming response...');
193
+ // Non-streaming response
194
+ const openaiRes = await response.json();
195
+ // VERBOSE LOGGING: Log raw OpenAI response
196
+ logger.info('=== RAW OPENAI RESPONSE ===', {
197
+ id: openaiRes.id,
198
+ model: openaiRes.model,
199
+ choices: openaiRes.choices?.length,
200
+ finishReason: openaiRes.choices?.[0]?.finish_reason,
201
+ hasToolCalls: !!(openaiRes.choices?.[0]?.message?.tool_calls),
202
+ toolCallCount: openaiRes.choices?.[0]?.message?.tool_calls?.length || 0,
203
+ toolCallNames: openaiRes.choices?.[0]?.message?.tool_calls?.map((tc) => tc.function.name) || [],
204
+ contentPreview: openaiRes.choices?.[0]?.message?.content?.substring(0, 300),
205
+ usage: openaiRes.usage
206
+ });
207
+ const anthropicRes = this.convertOpenAIToAnthropic(openaiRes);
208
+ // VERBOSE LOGGING: Log converted Anthropic response
209
+ logger.info('=== CONVERTED ANTHROPIC RESPONSE ===', {
210
+ id: anthropicRes.id,
211
+ model: anthropicRes.model,
212
+ role: anthropicRes.role,
213
+ stopReason: anthropicRes.stop_reason,
214
+ contentBlocks: anthropicRes.content?.length,
215
+ contentTypes: anthropicRes.content?.map((c) => c.type),
216
+ toolUseCount: anthropicRes.content?.filter((c) => c.type === 'tool_use').length,
217
+ textPreview: anthropicRes.content?.find((c) => c.type === 'text')?.text?.substring(0, 200),
218
+ usage: anthropicRes.usage
219
+ });
220
+ return anthropicRes;
221
+ }
222
+ }
223
+ async handleEmulatedRequest(anthropicReq, capabilities) {
224
+ const emulator = new ToolEmulator(anthropicReq.tools || [], capabilities.emulationStrategy);
225
+ const lastMessage = anthropicReq.messages[anthropicReq.messages.length - 1];
226
+ const userMessage = typeof lastMessage.content === 'string'
227
+ ? lastMessage.content
228
+ : (lastMessage.content.find(c => c.type === 'text')?.text || '');
229
+ const result = await executeEmulation(emulator, userMessage, async (prompt) => {
230
+ // Call model with emulation prompt
231
+ const openaiReq = {
232
+ model: anthropicReq.model || this.defaultModel,
233
+ messages: [{ role: 'user', content: prompt }],
234
+ temperature: anthropicReq.temperature,
235
+ max_tokens: anthropicReq.max_tokens
236
+ };
237
+ const response = await this.callOpenRouter(openaiReq);
238
+ return response.choices[0].message.content;
239
+ }, async (toolCall) => {
240
+ logger.warn(`Tool execution not yet implemented: ${toolCall.name}`);
241
+ return { error: 'Tool execution not implemented in Phase 2' };
242
+ }, { maxIterations: 5, verbose: process.env.VERBOSE === 'true' });
243
+ return {
244
+ id: `emulated_${Date.now()}`,
245
+ type: 'message',
246
+ role: 'assistant',
247
+ content: [{ type: 'text', text: result.finalAnswer || 'No response' }],
248
+ model: anthropicReq.model || this.defaultModel,
249
+ stop_reason: 'end_turn',
250
+ usage: { input_tokens: 0, output_tokens: 0 }
251
+ };
252
+ }
253
+ async callOpenRouter(openaiReq) {
254
+ const response = await fetch(`${this.openrouterBaseUrl}/chat/completions`, {
255
+ method: 'POST',
256
+ headers: {
257
+ 'Authorization': `Bearer ${this.openrouterApiKey}`,
258
+ 'Content-Type': 'application/json',
259
+ 'HTTP-Referer': 'https://github.com/ruvnet/agentic-flow',
260
+ 'X-Title': 'Agentic Flow'
261
+ },
262
+ body: JSON.stringify(openaiReq)
263
+ });
264
+ if (!response.ok) {
265
+ const error = await response.text();
266
+ throw new Error(`OpenRouter API error: ${error}`);
267
+ }
268
+ return response.json();
269
+ }
185
270
  convertAnthropicToOpenAI(anthropicReq) {
186
271
  logger.info('=== STARTING ANTHROPIC TO OPENAI CONVERSION ===');
187
272
  const messages = [];
@@ -507,7 +592,12 @@ export class AnthropicToOpenRouterProxy {
507
592
  });
508
593
  console.log(`\nāœ… Anthropic Proxy running at http://localhost:${port}`);
509
594
  console.log(` OpenRouter Base URL: ${this.openrouterBaseUrl}`);
510
- console.log(` Default Model: ${this.defaultModel}\n`);
595
+ console.log(` Default Model: ${this.defaultModel}`);
596
+ if (this.capabilities?.requiresEmulation) {
597
+ console.log(`\n āš™ļø Tool Emulation: ${this.capabilities.emulationStrategy.toUpperCase()} pattern`);
598
+ console.log(` šŸ“Š Expected reliability: ${this.capabilities.emulationStrategy === 'react' ? '70-85%' : '50-70%'}`);
599
+ }
600
+ console.log('');
511
601
  });
512
602
  }
513
603
  }