codecov-cli 0.3.2__tar.gz → 0.3.4__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {codecov-cli-0.3.2/codecov_cli.egg-info → codecov-cli-0.3.4}/PKG-INFO +1 -1
- codecov-cli-0.3.4/codecov_cli/__init__.py +4 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/commands/labelanalysis.py +54 -11
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/commands/upload.py +9 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/commands/upload_process.py +3 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/runners/python_standard_runner.py +3 -115
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/services/staticanalysis/__init__.py +93 -39
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/services/staticanalysis/analyzers/__init__.py +4 -1
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/services/staticanalysis/analyzers/python/__init__.py +5 -2
- codecov-cli-0.3.4/codecov_cli/services/staticanalysis/analyzers/python/node_wrappers.py +117 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/services/upload/__init__.py +2 -1
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/services/upload/upload_collector.py +3 -1
- {codecov-cli-0.3.2 → codecov-cli-0.3.4/codecov_cli.egg-info}/PKG-INFO +1 -1
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli.egg-info/requires.txt +0 -2
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/setup.py +2 -4
- codecov-cli-0.3.2/codecov_cli/__init__.py +0 -1
- codecov-cli-0.3.2/codecov_cli/services/staticanalysis/analyzers/python/node_wrappers.py +0 -52
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/LICENSE +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/MANIFEST.in +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/README.md +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/commands/__init__.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/commands/base_picking.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/commands/commit.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/commands/create_report_result.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/commands/empty_upload.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/commands/get_report_results.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/commands/report.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/commands/send_notifications.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/commands/staticanalysis.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/fallbacks.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/helpers/__init__.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/helpers/ci_adapters/__init__.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/helpers/ci_adapters/appveyor_ci.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/helpers/ci_adapters/azure_pipelines.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/helpers/ci_adapters/base.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/helpers/ci_adapters/bitbucket_ci.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/helpers/ci_adapters/bitrise_ci.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/helpers/ci_adapters/buildkite.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/helpers/ci_adapters/circleci.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/helpers/ci_adapters/cirrus_ci.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/helpers/ci_adapters/codebuild.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/helpers/ci_adapters/droneci.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/helpers/ci_adapters/github_actions.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/helpers/ci_adapters/gitlab_ci.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/helpers/ci_adapters/heroku.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/helpers/ci_adapters/jenkins.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/helpers/ci_adapters/local.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/helpers/ci_adapters/teamcity.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/helpers/ci_adapters/travis_ci.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/helpers/ci_adapters/woodpeckerci.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/helpers/config.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/helpers/encoder.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/helpers/folder_searcher.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/helpers/git.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/helpers/logging_utils.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/helpers/options.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/helpers/request.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/helpers/validators.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/helpers/versioning_systems.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/main.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/plugins/__init__.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/plugins/compress_pycoverage_contexts.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/plugins/gcov.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/plugins/pycoverage.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/plugins/types.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/plugins/xcode.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/runners/__init__.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/runners/dan_runner.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/runners/types.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/services/__init__.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/services/commit/__init__.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/services/commit/base_picking.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/services/empty_upload/__init__.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/services/report/__init__.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/services/staticanalysis/analyzers/general.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/services/staticanalysis/analyzers/javascript_es6/__init__.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/services/staticanalysis/analyzers/javascript_es6/node_wrappers.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/services/staticanalysis/exceptions.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/services/staticanalysis/finders.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/services/staticanalysis/types.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/services/upload/coverage_file_finder.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/services/upload/legacy_upload_sender.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/services/upload/network_finder.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/services/upload/upload_sender.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/services/upload_completion/__init__.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/types.py +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli.egg-info/SOURCES.txt +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli.egg-info/dependency_links.txt +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli.egg-info/entry_points.txt +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli.egg-info/top_level.txt +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/languages/languages.c +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/languages/treesitterjavascript/src/parser.c +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/languages/treesitterjavascript/src/scanner.c +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/languages/treesitterjavascript/src/tree_sitter/parser.h +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/languages/treesitterpython/src/parser.c +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/languages/treesitterpython/src/scanner.cc +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/languages/treesitterpython/src/tree_sitter/parser.h +0 -0
- {codecov-cli-0.3.2 → codecov-cli-0.3.4}/setup.cfg +0 -0
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import json
|
|
1
2
|
import logging
|
|
2
3
|
import pathlib
|
|
3
4
|
import time
|
|
@@ -55,13 +56,23 @@ logger = logging.getLogger("codecovcli")
|
|
|
55
56
|
@click.option(
|
|
56
57
|
"--dry-run",
|
|
57
58
|
"dry_run",
|
|
58
|
-
help=
|
|
59
|
+
help=(
|
|
60
|
+
"Print list of tests to run AND tests skipped (and options that need to be added to the test runner) to stdout. "
|
|
61
|
+
+ "Also prints the same information in JSON format. "
|
|
62
|
+
+ "JSON will have keys 'ats_tests_to_run', 'ats_tests_to_skip' and 'runner_options'. "
|
|
63
|
+
+ "List of tests to run is prefixed with ATS_TESTS_TO_RUN= "
|
|
64
|
+
+ "List of tests to skip is prefixed with ATS_TESTS_TO_SKIP="
|
|
65
|
+
),
|
|
59
66
|
is_flag=True,
|
|
60
67
|
)
|
|
61
68
|
@click.option(
|
|
62
69
|
"--dry-run-output-path",
|
|
63
70
|
"dry_run_output_path",
|
|
64
|
-
help=
|
|
71
|
+
help=(
|
|
72
|
+
"Prints the dry-run list (ATS_TESTS_TO_RUN) into dry_run_output_path (in addition to stdout)\n"
|
|
73
|
+
+ "AND prints ATS_TESTS_TO_SKIP into dry_run_output_path_skipped\n"
|
|
74
|
+
+ "AND prints dry-run JSON output into dry_run_output_path.json"
|
|
75
|
+
),
|
|
65
76
|
type=pathlib.Path,
|
|
66
77
|
default=None,
|
|
67
78
|
)
|
|
@@ -313,18 +324,50 @@ def _dry_run_output(
|
|
|
313
324
|
runner: LabelAnalysisRunnerInterface,
|
|
314
325
|
dry_run_output_path: Optional[pathlib.Path],
|
|
315
326
|
):
|
|
316
|
-
labels_to_run =
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
327
|
+
labels_to_run = set(
|
|
328
|
+
result.absent_labels + result.global_level_labels + result.present_diff_labels
|
|
329
|
+
)
|
|
330
|
+
labels_skipped = set(result.present_report_labels) - labels_to_run
|
|
331
|
+
# If the test label can contain spaces and dashes the test runner might
|
|
332
|
+
# interpret it as an option and not a label
|
|
333
|
+
# So we wrap it in doublequotes just to be extra sure
|
|
334
|
+
labels_run_wrapped_double_quotes = sorted(
|
|
335
|
+
map(lambda l: '"' + l + '"', labels_to_run)
|
|
336
|
+
)
|
|
337
|
+
labels_skip_wrapped_double_quotes = sorted(
|
|
338
|
+
map(lambda l: '"' + l + '"', labels_skipped)
|
|
339
|
+
)
|
|
340
|
+
|
|
341
|
+
output_as_dict = dict(
|
|
342
|
+
runner_options=runner.dry_run_runner_options,
|
|
343
|
+
ats_tests_to_run=labels_run_wrapped_double_quotes,
|
|
344
|
+
ats_tests_to_skip=labels_skip_wrapped_double_quotes,
|
|
322
345
|
)
|
|
323
|
-
output = runner.dry_run_runner_options + sorted(labels_to_run)
|
|
324
346
|
if dry_run_output_path is not None:
|
|
325
347
|
with open(dry_run_output_path, "w") as fd:
|
|
326
|
-
fd.write(
|
|
327
|
-
|
|
348
|
+
fd.write(
|
|
349
|
+
" ".join(
|
|
350
|
+
runner.dry_run_runner_options + labels_run_wrapped_double_quotes
|
|
351
|
+
)
|
|
352
|
+
+ "\n"
|
|
353
|
+
)
|
|
354
|
+
with open(str(dry_run_output_path) + "_skipped", "w") as fd:
|
|
355
|
+
fd.write(
|
|
356
|
+
" ".join(
|
|
357
|
+
runner.dry_run_runner_options + labels_skip_wrapped_double_quotes
|
|
358
|
+
)
|
|
359
|
+
+ "\n"
|
|
360
|
+
)
|
|
361
|
+
with open(str(dry_run_output_path) + ".json", "w") as fd:
|
|
362
|
+
fd.write(json.dumps(output_as_dict) + "\n")
|
|
363
|
+
|
|
364
|
+
click.echo(json.dumps(output_as_dict))
|
|
365
|
+
click.echo(
|
|
366
|
+
f"ATS_TESTS_TO_RUN={' '.join(runner.dry_run_runner_options + labels_run_wrapped_double_quotes)}"
|
|
367
|
+
)
|
|
368
|
+
click.echo(
|
|
369
|
+
f"ATS_TESTS_TO_SKIP={' '.join(runner.dry_run_runner_options + labels_skip_wrapped_double_quotes)}"
|
|
370
|
+
)
|
|
328
371
|
|
|
329
372
|
|
|
330
373
|
def _fallback_to_collected_labels(
|
|
@@ -65,6 +65,12 @@ _global_upload_options = [
|
|
|
65
65
|
is_flag=True,
|
|
66
66
|
default=False,
|
|
67
67
|
),
|
|
68
|
+
click.option(
|
|
69
|
+
"--disable-file-fixes",
|
|
70
|
+
help="Disable file fixes to ignore common lines from coverage (e.g. blank lines or empty brackets)",
|
|
71
|
+
is_flag=True,
|
|
72
|
+
default=False,
|
|
73
|
+
),
|
|
68
74
|
click.option(
|
|
69
75
|
"-b",
|
|
70
76
|
"--build",
|
|
@@ -178,6 +184,7 @@ def do_upload(
|
|
|
178
184
|
coverage_files_search_exclude_folders: typing.List[pathlib.Path],
|
|
179
185
|
coverage_files_search_explicitly_listed_files: typing.List[pathlib.Path],
|
|
180
186
|
disable_search: bool,
|
|
187
|
+
disable_file_fixes: bool,
|
|
181
188
|
token: typing.Optional[uuid.UUID],
|
|
182
189
|
plugin_names: typing.List[str],
|
|
183
190
|
branch: typing.Optional[str],
|
|
@@ -218,6 +225,7 @@ def do_upload(
|
|
|
218
225
|
git_service=git_service,
|
|
219
226
|
enterprise_url=enterprise_url,
|
|
220
227
|
disable_search=disable_search,
|
|
228
|
+
disable_file_fixes=disable_file_fixes,
|
|
221
229
|
handle_no_reports_found=handle_no_reports_found,
|
|
222
230
|
)
|
|
223
231
|
),
|
|
@@ -254,4 +262,5 @@ def do_upload(
|
|
|
254
262
|
enterprise_url=enterprise_url,
|
|
255
263
|
disable_search=disable_search,
|
|
256
264
|
handle_no_reports_found=handle_no_reports_found,
|
|
265
|
+
disable_file_fixes=disable_file_fixes,
|
|
257
266
|
)
|
|
@@ -37,6 +37,7 @@ def upload_process(
|
|
|
37
37
|
coverage_files_search_exclude_folders: typing.List[pathlib.Path],
|
|
38
38
|
coverage_files_search_explicitly_listed_files: typing.List[pathlib.Path],
|
|
39
39
|
disable_search: bool,
|
|
40
|
+
disable_file_fixes: bool,
|
|
40
41
|
token: typing.Optional[uuid.UUID],
|
|
41
42
|
plugin_names: typing.List[str],
|
|
42
43
|
branch: typing.Optional[str],
|
|
@@ -72,6 +73,7 @@ def upload_process(
|
|
|
72
73
|
pull_request_number=pull_request_number,
|
|
73
74
|
git_service=git_service,
|
|
74
75
|
disable_search=disable_search,
|
|
76
|
+
disable_file_fixes=disable_file_fixes,
|
|
75
77
|
fail_on_error=fail_on_error,
|
|
76
78
|
handle_no_reports_found=handle_no_reports_found,
|
|
77
79
|
)
|
|
@@ -123,4 +125,5 @@ def upload_process(
|
|
|
123
125
|
dry_run=dry_run,
|
|
124
126
|
git_service=git_service,
|
|
125
127
|
handle_no_reports_found=handle_no_reports_found,
|
|
128
|
+
disable_file_fixes=disable_file_fixes,
|
|
126
129
|
)
|
|
@@ -1,17 +1,11 @@
|
|
|
1
1
|
import logging
|
|
2
2
|
import random
|
|
3
3
|
import subprocess
|
|
4
|
-
from contextlib import redirect_stdout
|
|
5
|
-
from io import StringIO, TextIOWrapper
|
|
6
|
-
from multiprocessing import Process, Queue, get_context
|
|
7
|
-
from os import getcwd
|
|
8
|
-
from queue import Empty
|
|
9
4
|
from subprocess import CalledProcessError
|
|
10
|
-
from sys import
|
|
5
|
+
from sys import stdout
|
|
11
6
|
from typing import List, Optional
|
|
12
7
|
|
|
13
8
|
import click
|
|
14
|
-
import pytest
|
|
15
9
|
|
|
16
10
|
from codecov_cli.runners.types import (
|
|
17
11
|
LabelAnalysisRequestResult,
|
|
@@ -43,58 +37,6 @@ class PythonStandardRunnerConfigParams(dict):
|
|
|
43
37
|
"""
|
|
44
38
|
return self.get("coverage_root", "./")
|
|
45
39
|
|
|
46
|
-
@property
|
|
47
|
-
def strict_mode(self) -> bool:
|
|
48
|
-
"""
|
|
49
|
-
Run pytest from within Python instead of using subprocess.run
|
|
50
|
-
This is potentailly safer than using subprocess.run because it guarantees better that
|
|
51
|
-
the program running is indeed pytest.
|
|
52
|
-
But it might not work everytime due to import issues related to Python caching modules.
|
|
53
|
-
"""
|
|
54
|
-
return self.get("strict_mode", False)
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
def _include_curr_dir(method):
|
|
58
|
-
"""
|
|
59
|
-
Account for the difference 'pytest' vs 'python -m pytest'
|
|
60
|
-
https://docs.pytest.org/en/7.1.x/how-to/usage.html#calling-pytest-through-python-m-pytest
|
|
61
|
-
Used only in strict_mode
|
|
62
|
-
"""
|
|
63
|
-
|
|
64
|
-
def call_method(self, *args, **kwargs):
|
|
65
|
-
curr_dir = getcwd()
|
|
66
|
-
path.append(curr_dir)
|
|
67
|
-
|
|
68
|
-
result = method(self, *args, **kwargs)
|
|
69
|
-
|
|
70
|
-
path.remove(curr_dir)
|
|
71
|
-
return result
|
|
72
|
-
|
|
73
|
-
return call_method
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
def _execute_pytest_subprocess(
|
|
77
|
-
pytest_args: List[str],
|
|
78
|
-
queue: Queue,
|
|
79
|
-
parent_stdout: TextIOWrapper,
|
|
80
|
-
capture_output: bool = True,
|
|
81
|
-
):
|
|
82
|
-
"""Runs pytest from python in a subprocess.
|
|
83
|
-
This is because we call it twice in the label-analysis process,
|
|
84
|
-
so we might have import errors if calling it directly.
|
|
85
|
-
Check the warning: https://docs.pytest.org/en/7.1.x/how-to/usage.html#calling-pytest-from-python-code
|
|
86
|
-
|
|
87
|
-
Returns the output value and pytest exit code via queue
|
|
88
|
-
"""
|
|
89
|
-
subproces_stdout = parent_stdout
|
|
90
|
-
if capture_output:
|
|
91
|
-
subproces_stdout = StringIO()
|
|
92
|
-
with redirect_stdout(subproces_stdout):
|
|
93
|
-
result = pytest.main(pytest_args)
|
|
94
|
-
if capture_output:
|
|
95
|
-
queue.put({"output": subproces_stdout.getvalue()})
|
|
96
|
-
queue.put({"result": result})
|
|
97
|
-
|
|
98
40
|
|
|
99
41
|
class PythonStandardRunner(LabelAnalysisRunnerInterface):
|
|
100
42
|
|
|
@@ -106,54 +48,6 @@ class PythonStandardRunner(LabelAnalysisRunnerInterface):
|
|
|
106
48
|
config_params = {}
|
|
107
49
|
self.params = PythonStandardRunnerConfigParams(config_params)
|
|
108
50
|
|
|
109
|
-
def _wait_pytest(self, pytest_process: Process, queue: Queue):
|
|
110
|
-
pytest_process.start()
|
|
111
|
-
result = None
|
|
112
|
-
output = None
|
|
113
|
-
while pytest_process.exitcode == 0 or pytest_process.exitcode == None:
|
|
114
|
-
from_queue = None
|
|
115
|
-
try:
|
|
116
|
-
from_queue = queue.get(timeout=1)
|
|
117
|
-
except Empty:
|
|
118
|
-
pass
|
|
119
|
-
if from_queue and "output" in from_queue:
|
|
120
|
-
output = from_queue["output"]
|
|
121
|
-
if from_queue and "result" in from_queue:
|
|
122
|
-
result = from_queue["result"]
|
|
123
|
-
if result is not None:
|
|
124
|
-
break
|
|
125
|
-
pytest_process.join()
|
|
126
|
-
return result, output
|
|
127
|
-
|
|
128
|
-
@_include_curr_dir
|
|
129
|
-
def _execute_pytest_strict(
|
|
130
|
-
self, pytest_args: List[str], capture_output: bool = True
|
|
131
|
-
) -> str:
|
|
132
|
-
"""Handles calling pytest from Python in a subprocess.
|
|
133
|
-
Raises Exception if pytest fails
|
|
134
|
-
Returns the complete pytest output
|
|
135
|
-
"""
|
|
136
|
-
ctx = get_context(method="fork")
|
|
137
|
-
queue = ctx.Queue(2)
|
|
138
|
-
p = ctx.Process(
|
|
139
|
-
target=_execute_pytest_subprocess,
|
|
140
|
-
args=[pytest_args, queue, stdout, capture_output],
|
|
141
|
-
)
|
|
142
|
-
result, output = self._wait_pytest(p, queue)
|
|
143
|
-
|
|
144
|
-
if p.exitcode != 0 or (result != pytest.ExitCode.OK and result != 0):
|
|
145
|
-
message = f"Pytest exited with non-zero code {result}."
|
|
146
|
-
message += "\nThis is likely not a problem with label-analysis. Check pytest's output and options."
|
|
147
|
-
if capture_output:
|
|
148
|
-
# If pytest failed but we captured its output the user won't know what's wrong
|
|
149
|
-
# So we need to include that in the error message
|
|
150
|
-
message += "\nPYTEST OUTPUT:"
|
|
151
|
-
message += "\n" + output
|
|
152
|
-
else:
|
|
153
|
-
message += "\n(you can check pytest options on the logs before the test session start)"
|
|
154
|
-
raise click.ClickException(message)
|
|
155
|
-
return output
|
|
156
|
-
|
|
157
51
|
def parse_captured_output_error(self, exp: CalledProcessError) -> str:
|
|
158
52
|
result = ""
|
|
159
53
|
for out_stream in [exp.stdout, exp.stderr]:
|
|
@@ -202,10 +96,7 @@ class PythonStandardRunner(LabelAnalysisRunnerInterface):
|
|
|
202
96
|
),
|
|
203
97
|
)
|
|
204
98
|
|
|
205
|
-
|
|
206
|
-
output = self._execute_pytest_strict(options_to_use)
|
|
207
|
-
else:
|
|
208
|
-
output = self._execute_pytest(options_to_use)
|
|
99
|
+
output = self._execute_pytest(options_to_use)
|
|
209
100
|
lines = output.split(sep="\n")
|
|
210
101
|
test_names = list(line for line in lines if ("::" in line and "test" in line))
|
|
211
102
|
return test_names
|
|
@@ -254,10 +145,7 @@ class PythonStandardRunner(LabelAnalysisRunnerInterface):
|
|
|
254
145
|
"List of tests executed",
|
|
255
146
|
extra=dict(extra_log_attributes=dict(executed_tests=tests_to_run)),
|
|
256
147
|
)
|
|
257
|
-
|
|
258
|
-
output = self._execute_pytest_strict(command_array, capture_output=False)
|
|
259
|
-
else:
|
|
260
|
-
output = self._execute_pytest(command_array, capture_output=False)
|
|
148
|
+
output = self._execute_pytest(command_array, capture_output=False)
|
|
261
149
|
logger.info(f"Finished running {len(tests_to_run)} tests successfully")
|
|
262
150
|
logger.info(f" pytest options: \"{' '.join(default_options)}\"")
|
|
263
151
|
logger.debug(output)
|
|
@@ -36,24 +36,15 @@ async def run_analysis_entrypoint(
|
|
|
36
36
|
):
|
|
37
37
|
ff = select_file_finder(config)
|
|
38
38
|
files = list(ff.find_files(folder, pattern, folders_to_exclude))
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
file_results = pool.imap_unordered(mapped_func, files)
|
|
49
|
-
for x in file_results:
|
|
50
|
-
bar.update(1, x)
|
|
51
|
-
if x is not None:
|
|
52
|
-
res = x.asdict()["result"]
|
|
53
|
-
all_data[x.filename] = res
|
|
54
|
-
file_metadata.append(
|
|
55
|
-
{"filepath": x.filename, "file_hash": res["hash"]}
|
|
56
|
-
)
|
|
39
|
+
processing_results = await process_files(files, numberprocesses, config)
|
|
40
|
+
# Let users know if there were processing errors
|
|
41
|
+
# This is here and not in the funcition so we can add an option to ignore those (possibly)
|
|
42
|
+
# Also makes the function easier to test
|
|
43
|
+
processing_errors = processing_results["processing_errors"]
|
|
44
|
+
log_processing_errors(processing_errors)
|
|
45
|
+
# Upload results metadata to codecov to get list of files that we need to upload
|
|
46
|
+
file_metadata = processing_results["file_metadata"]
|
|
47
|
+
all_data = processing_results["all_data"]
|
|
57
48
|
try:
|
|
58
49
|
json_output = {"commit": commit, "filepaths": file_metadata}
|
|
59
50
|
logger.debug(
|
|
@@ -118,7 +109,14 @@ async def run_analysis_entrypoint(
|
|
|
118
109
|
for el in files_that_need_upload:
|
|
119
110
|
all_tasks.append(send_single_upload_put(client, all_data, el))
|
|
120
111
|
bar.update(1, all_data[el["filepath"]])
|
|
121
|
-
|
|
112
|
+
try:
|
|
113
|
+
resps = await asyncio.gather(*all_tasks)
|
|
114
|
+
except asyncio.CancelledError:
|
|
115
|
+
message = (
|
|
116
|
+
"Unknown error cancelled the upload tasks.\n"
|
|
117
|
+
+ f"Uploaded {len(uploaded_files)}/{len(files_that_need_upload)} files successfully."
|
|
118
|
+
)
|
|
119
|
+
raise click.ClickException(message)
|
|
122
120
|
for resp in resps:
|
|
123
121
|
if resp["succeeded"]:
|
|
124
122
|
uploaded_files.append(resp["filepath"])
|
|
@@ -150,37 +148,91 @@ async def run_analysis_entrypoint(
|
|
|
150
148
|
extra_log_attributes=dict(time_taken=response.elapsed.total_seconds())
|
|
151
149
|
),
|
|
152
150
|
)
|
|
151
|
+
log_processing_errors(processing_errors)
|
|
153
152
|
|
|
154
153
|
|
|
155
|
-
|
|
154
|
+
def log_processing_errors(processing_errors: typing.Dict[str, str]) -> None:
|
|
155
|
+
if len(processing_errors) > 0:
|
|
156
|
+
logger.error(
|
|
157
|
+
f"{len(processing_errors)} files have processing errors and have been IGNORED."
|
|
158
|
+
)
|
|
159
|
+
for file, error in processing_errors.items():
|
|
160
|
+
logger.error(f"-> {file}: ERROR {error}")
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
async def process_files(
|
|
164
|
+
files_to_analyze: typing.List[FileAnalysisRequest],
|
|
165
|
+
numberprocesses: int,
|
|
166
|
+
config: typing.Optional[typing.Dict],
|
|
167
|
+
):
|
|
168
|
+
logger.info(f"Running the analyzer on {len(files_to_analyze)} files")
|
|
169
|
+
mapped_func = partial(analyze_file, config)
|
|
170
|
+
all_data = {}
|
|
171
|
+
file_metadata = []
|
|
172
|
+
errors = {}
|
|
173
|
+
with click.progressbar(
|
|
174
|
+
length=len(files_to_analyze),
|
|
175
|
+
label="Analyzing files",
|
|
176
|
+
) as bar:
|
|
177
|
+
with get_context("fork").Pool(processes=numberprocesses) as pool:
|
|
178
|
+
file_results = pool.imap_unordered(mapped_func, files_to_analyze)
|
|
179
|
+
for result in file_results:
|
|
180
|
+
bar.update(1, result)
|
|
181
|
+
if result is not None:
|
|
182
|
+
if result.result:
|
|
183
|
+
all_data[result.filename] = result.result
|
|
184
|
+
file_metadata.append(
|
|
185
|
+
{
|
|
186
|
+
"filepath": result.filename,
|
|
187
|
+
"file_hash": result.result["hash"],
|
|
188
|
+
}
|
|
189
|
+
)
|
|
190
|
+
elif result.error:
|
|
191
|
+
errors[result.filename] = result.error
|
|
192
|
+
logger.info("All files have been processed")
|
|
193
|
+
return dict(
|
|
194
|
+
all_data=all_data, file_metadata=file_metadata, processing_errors=errors
|
|
195
|
+
)
|
|
196
|
+
|
|
197
|
+
|
|
198
|
+
async def send_single_upload_put(client, all_data, el) -> typing.Dict:
|
|
156
199
|
retryable_statuses = (429,)
|
|
157
200
|
presigned_put = el["raw_upload_location"]
|
|
158
201
|
number_retries = 5
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
202
|
+
try:
|
|
203
|
+
for current_retry in range(number_retries):
|
|
204
|
+
response = await client.put(
|
|
205
|
+
presigned_put, data=json.dumps(all_data[el["filepath"]])
|
|
206
|
+
)
|
|
207
|
+
if response.status_code < 300:
|
|
208
|
+
return {
|
|
209
|
+
"status_code": response.status_code,
|
|
210
|
+
"filepath": el["filepath"],
|
|
211
|
+
"succeeded": True,
|
|
212
|
+
}
|
|
213
|
+
if response.status_code in retryable_statuses:
|
|
214
|
+
await asyncio.sleep(2**current_retry)
|
|
215
|
+
status_code = response.status_code
|
|
216
|
+
message_to_warn = response.text
|
|
217
|
+
exception = None
|
|
218
|
+
except httpx.HTTPError as exp:
|
|
219
|
+
status_code = None
|
|
220
|
+
exception = type(exp)
|
|
221
|
+
message_to_warn = str(exp)
|
|
171
222
|
logger.warning(
|
|
172
|
-
"Unable to send
|
|
223
|
+
"Unable to send single_upload_put",
|
|
173
224
|
extra=dict(
|
|
174
225
|
extra_log_attributes=dict(
|
|
175
|
-
|
|
226
|
+
message=message_to_warn,
|
|
227
|
+
exception=exception,
|
|
176
228
|
filepath=el["filepath"],
|
|
177
|
-
|
|
178
|
-
latest_status_code=response.status_code,
|
|
229
|
+
latest_status_code=status_code,
|
|
179
230
|
)
|
|
180
231
|
),
|
|
181
232
|
)
|
|
182
233
|
return {
|
|
183
|
-
"status_code":
|
|
234
|
+
"status_code": status_code,
|
|
235
|
+
"exception": exception,
|
|
184
236
|
"filepath": el["filepath"],
|
|
185
237
|
"succeeded": False,
|
|
186
238
|
}
|
|
@@ -205,7 +257,9 @@ def send_finish_signal(response_json, upload_url: str, token: str):
|
|
|
205
257
|
return response
|
|
206
258
|
|
|
207
259
|
|
|
208
|
-
def analyze_file(
|
|
260
|
+
def analyze_file(
|
|
261
|
+
config, filename: FileAnalysisRequest
|
|
262
|
+
) -> typing.Optional[FileAnalysisResult]:
|
|
209
263
|
try:
|
|
210
264
|
with open(filename.actual_filepath, "rb") as file:
|
|
211
265
|
actual_code = file.read()
|
|
@@ -219,7 +273,7 @@ def analyze_file(config, filename: FileAnalysisRequest):
|
|
|
219
273
|
except AnalysisError as e:
|
|
220
274
|
error_dict = {
|
|
221
275
|
"filename": str(filename.result_filename),
|
|
222
|
-
"
|
|
276
|
+
"error": str(e),
|
|
223
277
|
}
|
|
224
278
|
return FileAnalysisResult(
|
|
225
279
|
filename=str(filename.result_filename), error=error_dict
|
{codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/services/staticanalysis/analyzers/__init__.py
RENAMED
|
@@ -1,9 +1,12 @@
|
|
|
1
1
|
from codecov_cli.services.staticanalysis.analyzers.general import BaseAnalyzer
|
|
2
2
|
from codecov_cli.services.staticanalysis.analyzers.javascript_es6 import ES6Analyzer
|
|
3
3
|
from codecov_cli.services.staticanalysis.analyzers.python import PythonAnalyzer
|
|
4
|
+
from codecov_cli.services.staticanalysis.types import FileAnalysisRequest
|
|
4
5
|
|
|
5
6
|
|
|
6
|
-
def get_best_analyzer(
|
|
7
|
+
def get_best_analyzer(
|
|
8
|
+
filename: FileAnalysisRequest, actual_code: bytes
|
|
9
|
+
) -> BaseAnalyzer:
|
|
7
10
|
if filename.actual_filepath.suffix == ".py":
|
|
8
11
|
return PythonAnalyzer(filename, actual_code)
|
|
9
12
|
if filename.actual_filepath.suffix == ".js":
|
|
@@ -7,6 +7,7 @@ from codecov_cli.services.staticanalysis.analyzers.general import BaseAnalyzer
|
|
|
7
7
|
from codecov_cli.services.staticanalysis.analyzers.python.node_wrappers import (
|
|
8
8
|
NodeVisitor,
|
|
9
9
|
)
|
|
10
|
+
from codecov_cli.services.staticanalysis.types import FileAnalysisRequest
|
|
10
11
|
|
|
11
12
|
_function_query_str = """
|
|
12
13
|
(function_definition
|
|
@@ -52,14 +53,16 @@ class PythonAnalyzer(BaseAnalyzer):
|
|
|
52
53
|
]
|
|
53
54
|
wrappers = ["class_definition", "function_definition"]
|
|
54
55
|
|
|
55
|
-
def __init__(
|
|
56
|
+
def __init__(
|
|
57
|
+
self, file_analysis_request: FileAnalysisRequest, actual_code: bytes, **options
|
|
58
|
+
):
|
|
56
59
|
self.actual_code = actual_code
|
|
57
60
|
self.lines = self.actual_code.split(b"\n")
|
|
58
61
|
self.statements = []
|
|
59
62
|
self.import_lines = set()
|
|
60
63
|
self.definitions_lines = set()
|
|
61
64
|
self.functions = []
|
|
62
|
-
self.path =
|
|
65
|
+
self.path = file_analysis_request.result_filename
|
|
63
66
|
self.PY_LANGUAGE = Language(staticcodecov_languages.__file__, "python")
|
|
64
67
|
self.parser = Parser()
|
|
65
68
|
self.parser.set_language(self.PY_LANGUAGE)
|
|
@@ -0,0 +1,117 @@
|
|
|
1
|
+
from tree_sitter import Node
|
|
2
|
+
|
|
3
|
+
from codecov_cli.services.staticanalysis.exceptions import AnalysisError
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class NodeVisitor(object):
|
|
7
|
+
def __init__(self, analyzer):
|
|
8
|
+
self.analyzer = analyzer
|
|
9
|
+
|
|
10
|
+
def start_visit(self, node):
|
|
11
|
+
self.visit(node)
|
|
12
|
+
|
|
13
|
+
def visit(self, node: Node):
|
|
14
|
+
self.do_visit(node)
|
|
15
|
+
for c in node.children:
|
|
16
|
+
self.visit(c)
|
|
17
|
+
|
|
18
|
+
def _is_function_docstring(self, node: Node):
|
|
19
|
+
"""Skips docstrings for funtions, such as this one.
|
|
20
|
+
Pytest doesn't include them in the report, so I don't think we should either,
|
|
21
|
+
at least for now.
|
|
22
|
+
"""
|
|
23
|
+
# Docstrings have type 'expression_statement
|
|
24
|
+
if node.type != "expression_statement":
|
|
25
|
+
return False
|
|
26
|
+
# Docstrings for a module are OK - they show up in pytest result
|
|
27
|
+
# Docstrings for a class are OK - they show up in pytest result
|
|
28
|
+
# Docstrings for functions are NOT OK - they DONT show up in pytest result
|
|
29
|
+
# Check if it's docstring
|
|
30
|
+
has_single_child = len(node.children) == 1
|
|
31
|
+
only_child_is_string = node.children[0].type == "string"
|
|
32
|
+
# Check if is the first line of a function
|
|
33
|
+
parent_is_block = node.parent.type == "block"
|
|
34
|
+
first_exp_in_block = node.prev_named_sibling is None
|
|
35
|
+
is_in_function_context = (
|
|
36
|
+
parent_is_block and node.parent.parent.type == "function_definition"
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
return (
|
|
40
|
+
has_single_child
|
|
41
|
+
and only_child_is_string
|
|
42
|
+
and parent_is_block
|
|
43
|
+
and first_exp_in_block
|
|
44
|
+
and is_in_function_context
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
def _get_previous_sibling_that_is_not_comment_not_func_docstring(self, node: Node):
|
|
48
|
+
curr = node.prev_named_sibling
|
|
49
|
+
while curr is not None and (
|
|
50
|
+
curr.type == "comment" or self._is_function_docstring(curr)
|
|
51
|
+
):
|
|
52
|
+
curr = curr.prev_named_sibling
|
|
53
|
+
return curr
|
|
54
|
+
|
|
55
|
+
def do_visit(self, node: Node):
|
|
56
|
+
if node.is_named:
|
|
57
|
+
current_line_number = node.start_point[0] + 1
|
|
58
|
+
if node.type in (
|
|
59
|
+
"expression_statement",
|
|
60
|
+
"return_statement",
|
|
61
|
+
"if_statement",
|
|
62
|
+
"for_statement",
|
|
63
|
+
"while_statement",
|
|
64
|
+
):
|
|
65
|
+
if self._is_function_docstring(node):
|
|
66
|
+
# We ignore these
|
|
67
|
+
return
|
|
68
|
+
closest_named_sibling_not_comment_that_is_in_statements = (
|
|
69
|
+
self._get_previous_sibling_that_is_not_comment_not_func_docstring(
|
|
70
|
+
node
|
|
71
|
+
)
|
|
72
|
+
)
|
|
73
|
+
if closest_named_sibling_not_comment_that_is_in_statements:
|
|
74
|
+
self.analyzer.line_surety_ancestorship[current_line_number] = (
|
|
75
|
+
closest_named_sibling_not_comment_that_is_in_statements.start_point[
|
|
76
|
+
0
|
|
77
|
+
]
|
|
78
|
+
+ 1
|
|
79
|
+
)
|
|
80
|
+
self.analyzer.statements.append(
|
|
81
|
+
{
|
|
82
|
+
"current_line": current_line_number,
|
|
83
|
+
"start_column": node.start_point[1],
|
|
84
|
+
"line_hash": self.analyzer._get_code_hash(
|
|
85
|
+
node.start_byte, node.end_byte
|
|
86
|
+
),
|
|
87
|
+
"len": node.end_point[0] + 1 - current_line_number,
|
|
88
|
+
"extra_connected_lines": tuple(),
|
|
89
|
+
}
|
|
90
|
+
)
|
|
91
|
+
if node.type in ("if_statement", "elif_clause"):
|
|
92
|
+
# Some of the children of a node have a field_name associated to them
|
|
93
|
+
# In the case of an if and elif, "consequence" is the code that is executed in that branch of code
|
|
94
|
+
first_if_statement = node.child_by_field_name("consequence")
|
|
95
|
+
try:
|
|
96
|
+
if first_if_statement.type == "block":
|
|
97
|
+
first_if_statement = first_if_statement.children[0] # BUG
|
|
98
|
+
except IndexError:
|
|
99
|
+
raise AnalysisError(
|
|
100
|
+
f"if_statement consequence is empty block @ {self.analyzer.path}:{first_if_statement.start_point[0] + 1}, column {first_if_statement.start_point[1]}"
|
|
101
|
+
)
|
|
102
|
+
self.analyzer.line_surety_ancestorship[
|
|
103
|
+
first_if_statement.start_point[0] + 1
|
|
104
|
+
] = current_line_number
|
|
105
|
+
if node.type in ("for_statement", "while_statement"):
|
|
106
|
+
first_loop_statement = node.child_by_field_name("body")
|
|
107
|
+
try:
|
|
108
|
+
if first_loop_statement.type == "block":
|
|
109
|
+
first_loop_statement = first_loop_statement.children[0]
|
|
110
|
+
except IndexError:
|
|
111
|
+
raise AnalysisError(
|
|
112
|
+
f"loop_statement body is empty block @ {self.analyzer.path}:{first_loop_statement.start_point[0] + 1}, column {first_loop_statement.start_point[1]}"
|
|
113
|
+
)
|
|
114
|
+
self.analyzer.line_surety_ancestorship[
|
|
115
|
+
first_loop_statement.start_point[0] + 1
|
|
116
|
+
] = current_line_number
|
|
117
|
+
pass
|
|
@@ -50,6 +50,7 @@ def do_upload_logic(
|
|
|
50
50
|
enterprise_url: typing.Optional[str],
|
|
51
51
|
disable_search: bool = False,
|
|
52
52
|
handle_no_reports_found: bool = False,
|
|
53
|
+
disable_file_fixes: bool = False,
|
|
53
54
|
):
|
|
54
55
|
preparation_plugins = select_preparation_plugins(cli_config, plugin_names)
|
|
55
56
|
coverage_file_selector = select_coverage_file_finder(
|
|
@@ -60,7 +61,7 @@ def do_upload_logic(
|
|
|
60
61
|
)
|
|
61
62
|
network_finder = select_network_finder(versioning_system)
|
|
62
63
|
collector = UploadCollector(
|
|
63
|
-
preparation_plugins, network_finder, coverage_file_selector
|
|
64
|
+
preparation_plugins, network_finder, coverage_file_selector, disable_file_fixes
|
|
64
65
|
)
|
|
65
66
|
try:
|
|
66
67
|
upload_data = collector.generate_upload_data()
|
|
@@ -29,15 +29,17 @@ class UploadCollector(object):
|
|
|
29
29
|
preparation_plugins: typing.List[PreparationPluginInterface],
|
|
30
30
|
network_finder: NetworkFinder,
|
|
31
31
|
coverage_file_finder: CoverageFileFinder,
|
|
32
|
+
disable_file_fixes: bool = False,
|
|
32
33
|
):
|
|
33
34
|
self.preparation_plugins = preparation_plugins
|
|
34
35
|
self.network_finder = network_finder
|
|
35
36
|
self.coverage_file_finder = coverage_file_finder
|
|
37
|
+
self.disable_file_fixes = disable_file_fixes
|
|
36
38
|
|
|
37
39
|
def _produce_file_fixes_for_network(
|
|
38
40
|
self, network: typing.List[str]
|
|
39
41
|
) -> typing.List[UploadCollectionResultFileFixer]:
|
|
40
|
-
if not network:
|
|
42
|
+
if not network or self.disable_file_fixes:
|
|
41
43
|
return []
|
|
42
44
|
# patterns that we don't need to specify a reason for
|
|
43
45
|
empty_line_regex = re.compile(r"^\s*$")
|
|
@@ -8,8 +8,8 @@ here = path.abspath(path.dirname(__file__))
|
|
|
8
8
|
with open(path.join(here, "README.md"), encoding="utf-8") as f:
|
|
9
9
|
long_description = f.read()
|
|
10
10
|
|
|
11
|
-
with open(path.join(here, "
|
|
12
|
-
version = f.
|
|
11
|
+
with open(path.join(here, "VERSION"), encoding="utf-8") as f:
|
|
12
|
+
version = f.readline().strip()
|
|
13
13
|
|
|
14
14
|
setup(
|
|
15
15
|
name="codecov-cli",
|
|
@@ -24,8 +24,6 @@ setup(
|
|
|
24
24
|
"click==8.*",
|
|
25
25
|
"httpx==0.23.*",
|
|
26
26
|
"ijson==3.*",
|
|
27
|
-
"pytest==7.*",
|
|
28
|
-
"pytest-cov>=3",
|
|
29
27
|
"pyyaml==6.*",
|
|
30
28
|
"responses==0.21.*",
|
|
31
29
|
"smart-open==6.*",
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
__version__ = "0.3.2"
|
|
@@ -1,52 +0,0 @@
|
|
|
1
|
-
class NodeVisitor(object):
|
|
2
|
-
def __init__(self, analyzer):
|
|
3
|
-
self.analyzer = analyzer
|
|
4
|
-
|
|
5
|
-
def start_visit(self, node):
|
|
6
|
-
self.visit(node)
|
|
7
|
-
|
|
8
|
-
def visit(self, node):
|
|
9
|
-
self.do_visit(node)
|
|
10
|
-
for c in node.children:
|
|
11
|
-
self.visit(c)
|
|
12
|
-
|
|
13
|
-
def do_visit(self, node):
|
|
14
|
-
if node.is_named:
|
|
15
|
-
current_line_number = node.start_point[0] + 1
|
|
16
|
-
if node.type in (
|
|
17
|
-
"expression_statement",
|
|
18
|
-
"return_statement",
|
|
19
|
-
"if_statement",
|
|
20
|
-
"for_statement",
|
|
21
|
-
"while_statement",
|
|
22
|
-
):
|
|
23
|
-
if node.prev_named_sibling:
|
|
24
|
-
self.analyzer.line_surety_ancestorship[current_line_number] = (
|
|
25
|
-
node.prev_named_sibling.start_point[0] + 1
|
|
26
|
-
)
|
|
27
|
-
self.analyzer.statements.append(
|
|
28
|
-
{
|
|
29
|
-
"current_line": current_line_number,
|
|
30
|
-
"start_column": node.start_point[1],
|
|
31
|
-
"line_hash": self.analyzer._get_code_hash(
|
|
32
|
-
node.start_byte, node.end_byte
|
|
33
|
-
),
|
|
34
|
-
"len": node.end_point[0] + 1 - current_line_number,
|
|
35
|
-
"extra_connected_lines": tuple(),
|
|
36
|
-
}
|
|
37
|
-
)
|
|
38
|
-
if node.type in ("if_statement", "elif_clause"):
|
|
39
|
-
first_if_statement = node.child_by_field_name("consequence")
|
|
40
|
-
if first_if_statement.type == "block":
|
|
41
|
-
first_if_statement = first_if_statement.children[0]
|
|
42
|
-
self.analyzer.line_surety_ancestorship[
|
|
43
|
-
first_if_statement.start_point[0] + 1
|
|
44
|
-
] = current_line_number
|
|
45
|
-
if node.type in ("for_statement", "while_statement"):
|
|
46
|
-
first_if_statement = node.child_by_field_name("body")
|
|
47
|
-
if first_if_statement.type == "block":
|
|
48
|
-
first_if_statement = first_if_statement.children[0]
|
|
49
|
-
self.analyzer.line_surety_ancestorship[
|
|
50
|
-
first_if_statement.start_point[0] + 1
|
|
51
|
-
] = current_line_number
|
|
52
|
-
pass
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{codecov-cli-0.3.2 → codecov-cli-0.3.4}/codecov_cli/services/staticanalysis/analyzers/general.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{codecov-cli-0.3.2 → codecov-cli-0.3.4}/languages/treesitterjavascript/src/tree_sitter/parser.h
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|