ruff: reformat the project files

This commit is contained in:
Laszlo Nagy
2025-11-01 15:51:56 +11:00
parent 99dfe00051
commit ea6d141e49
19 changed files with 1345 additions and 1383 deletions

View File

@@ -3,7 +3,7 @@
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
""" This module compiles the intercept library. """
"""This module compiles the intercept library."""
import sys
import os
@@ -15,42 +15,39 @@ import contextlib
import logging
import subprocess
__all__ = ['build_libear', 'temporary_directory']
__all__ = ["build_libear", "temporary_directory"]
def build_libear(compiler, dst_dir):
""" Returns the full path to the 'libear' library. """
"""Returns the full path to the 'libear' library."""
try:
src_dir = os.path.dirname(os.path.realpath(__file__))
toolset = make_toolset(src_dir)
toolset.set_compiler(compiler)
toolset.set_language_standard('c99')
toolset.add_definitions(['-D_GNU_SOURCE'])
toolset.set_language_standard("c99")
toolset.add_definitions(["-D_GNU_SOURCE"])
configure = do_configure(toolset)
configure.check_function_exists('execve', 'HAVE_EXECVE')
configure.check_function_exists('execv', 'HAVE_EXECV')
configure.check_function_exists('execvpe', 'HAVE_EXECVPE')
configure.check_function_exists('execvp', 'HAVE_EXECVP')
configure.check_function_exists('execvP', 'HAVE_EXECVP2')
configure.check_function_exists('exect', 'HAVE_EXECT')
configure.check_function_exists('execl', 'HAVE_EXECL')
configure.check_function_exists('execlp', 'HAVE_EXECLP')
configure.check_function_exists('execle', 'HAVE_EXECLE')
configure.check_function_exists('posix_spawn', 'HAVE_POSIX_SPAWN')
configure.check_function_exists('posix_spawnp', 'HAVE_POSIX_SPAWNP')
configure.check_symbol_exists('_NSGetEnviron', 'crt_externs.h',
'HAVE_NSGETENVIRON')
configure.write_by_template(
os.path.join(src_dir, 'config.h.in'),
os.path.join(dst_dir, 'config.h'))
configure.check_function_exists("execve", "HAVE_EXECVE")
configure.check_function_exists("execv", "HAVE_EXECV")
configure.check_function_exists("execvpe", "HAVE_EXECVPE")
configure.check_function_exists("execvp", "HAVE_EXECVP")
configure.check_function_exists("execvP", "HAVE_EXECVP2")
configure.check_function_exists("exect", "HAVE_EXECT")
configure.check_function_exists("execl", "HAVE_EXECL")
configure.check_function_exists("execlp", "HAVE_EXECLP")
configure.check_function_exists("execle", "HAVE_EXECLE")
configure.check_function_exists("posix_spawn", "HAVE_POSIX_SPAWN")
configure.check_function_exists("posix_spawnp", "HAVE_POSIX_SPAWNP")
configure.check_symbol_exists("_NSGetEnviron", "crt_externs.h", "HAVE_NSGETENVIRON")
configure.write_by_template(os.path.join(src_dir, "config.h.in"), os.path.join(dst_dir, "config.h"))
target = create_shared_library('ear', toolset)
target = create_shared_library("ear", toolset)
target.add_include(dst_dir)
target.add_sources('ear.c')
target.add_sources("ear.c")
target.link_against(toolset.dl_libraries())
target.link_against(['pthread'])
target.link_against(["pthread"])
target.build_release(dst_dir)
return os.path.join(dst_dir, target.name)
@@ -61,9 +58,9 @@ def build_libear(compiler, dst_dir):
def execute(cmd, *args, **kwargs):
""" Make subprocess execution silent. """
"""Make subprocess execution silent."""
kwargs.update({'stdout': subprocess.PIPE, 'stderr': subprocess.STDOUT})
kwargs.update({"stdout": subprocess.PIPE, "stderr": subprocess.STDOUT})
return subprocess.check_call(cmd, *args, **kwargs)
@@ -77,7 +74,7 @@ def temporary_directory(**kwargs):
class Toolset(object):
""" Abstract class to represent different toolset. """
"""Abstract class to represent different toolset."""
def __init__(self, src_dir):
self.src_dir = src_dir
@@ -85,15 +82,15 @@ class Toolset(object):
self.c_flags = []
def set_compiler(self, compiler):
""" part of public interface """
"""part of public interface"""
self.compiler = compiler
def set_language_standard(self, standard):
""" part of public interface """
self.c_flags.append('-std=' + standard)
"""part of public interface"""
self.c_flags.append("-std=" + standard)
def add_definitions(self, defines):
""" part of public interface """
"""part of public interface"""
self.c_flags.extend(defines)
def dl_libraries(self):
@@ -103,8 +100,8 @@ class Toolset(object):
raise NotImplementedError()
def shared_library_c_flags(self, release):
extra = ['-DNDEBUG', '-O3'] if release else []
return extra + ['-fPIC'] + self.c_flags
extra = ["-DNDEBUG", "-O3"] if release else []
return extra + ["-fPIC"] + self.c_flags
def shared_library_ld_flags(self, release, name):
raise NotImplementedError()
@@ -118,11 +115,11 @@ class DarwinToolset(Toolset):
return []
def shared_library_name(self, name):
return 'lib' + name + '.dylib'
return "lib" + name + ".dylib"
def shared_library_ld_flags(self, release, name):
extra = ['-dead_strip'] if release else []
return extra + ['-dynamiclib', '-install_name', '@rpath/' + name]
extra = ["-dead_strip"] if release else []
return extra + ["-dynamiclib", "-install_name", "@rpath/" + name]
class UnixToolset(Toolset):
@@ -133,11 +130,11 @@ class UnixToolset(Toolset):
return []
def shared_library_name(self, name):
return 'lib' + name + '.so'
return "lib" + name + ".so"
def shared_library_ld_flags(self, release, name):
extra = [] if release else []
return extra + ['-shared', '-Wl,-soname,' + name]
return extra + ["-shared", "-Wl,-soname," + name]
class LinuxToolset(UnixToolset):
@@ -145,16 +142,16 @@ class LinuxToolset(UnixToolset):
UnixToolset.__init__(self, src_dir)
def dl_libraries(self):
return ['dl']
return ["dl"]
def make_toolset(src_dir):
platform = sys.platform
if platform in {'win32', 'cygwin'}:
raise RuntimeError('not implemented on this platform')
elif platform == 'darwin':
if platform in {"win32", "cygwin"}:
raise RuntimeError("not implemented on this platform")
elif platform == "darwin":
return DarwinToolset(src_dir)
elif platform in {'linux', 'linux2'}:
elif platform in {"linux", "linux2"}:
return LinuxToolset(src_dir)
else:
return UnixToolset(src_dir)
@@ -163,17 +160,16 @@ def make_toolset(src_dir):
class Configure(object):
def __init__(self, toolset):
self.ctx = toolset
self.results = {'APPLE': sys.platform == 'darwin'}
self.results = {"APPLE": sys.platform == "darwin"}
def _try_to_compile_and_link(self, source):
try:
with temporary_directory() as work_dir:
src_file = 'check.c'
with open(os.path.join(work_dir, src_file), 'w') as handle:
src_file = "check.c"
with open(os.path.join(work_dir, src_file), "w") as handle:
handle.write(source)
execute([self.ctx.compiler, src_file] + self.ctx.c_flags,
cwd=work_dir)
execute([self.ctx.compiler, src_file] + self.ctx.c_flags, cwd=work_dir)
return True
except Exception:
return False
@@ -182,38 +178,35 @@ class Configure(object):
template = "int FUNCTION(); int main() { return FUNCTION(); }"
source = template.replace("FUNCTION", function)
logging.debug('Checking function %s', function)
logging.debug("Checking function %s", function)
found = self._try_to_compile_and_link(source)
logging.debug('Checking function %s -- %s', function,
'found' if found else 'not found')
logging.debug("Checking function %s -- %s", function, "found" if found else "not found")
self.results.update({name: found})
def check_symbol_exists(self, symbol, include, name):
template = """#include <INCLUDE>
int main() { return ((int*)(&SYMBOL))[0]; }"""
source = template.replace('INCLUDE', include).replace("SYMBOL", symbol)
source = template.replace("INCLUDE", include).replace("SYMBOL", symbol)
logging.debug('Checking symbol %s', symbol)
logging.debug("Checking symbol %s", symbol)
found = self._try_to_compile_and_link(source)
logging.debug('Checking symbol %s -- %s', symbol,
'found' if found else 'not found')
logging.debug("Checking symbol %s -- %s", symbol, "found" if found else "not found")
self.results.update({name: found})
def write_by_template(self, template, output):
def transform(line, definitions):
pattern = re.compile(r'^#cmakedefine\s+(\S+)')
pattern = re.compile(r"^#cmakedefine\s+(\S+)")
match = pattern.match(line)
if match:
key = match.group(1)
if key not in definitions or not definitions[key]:
return '/* #undef {0} */{1}'.format(key, os.linesep)
return '#define {0}{1}'.format(key, os.linesep)
return "/* #undef {0} */{1}".format(key, os.linesep)
return "#define {0}{1}".format(key, os.linesep)
return line
with open(template, 'r') as src_handle:
logging.debug('Writing config to %s', output)
with open(output, 'w') as dst_handle:
with open(template, "r") as src_handle:
logging.debug("Writing config to %s", output)
with open(output, "w") as dst_handle:
for line in src_handle:
dst_handle.write(transform(line, self.results))
@@ -231,28 +224,32 @@ class SharedLibrary(object):
self.lib = []
def add_include(self, directory):
self.inc.extend(['-I', directory])
self.inc.extend(["-I", directory])
def add_sources(self, source):
self.src.append(source)
def link_against(self, libraries):
self.lib.extend(['-l' + lib for lib in libraries])
self.lib.extend(["-l" + lib for lib in libraries])
def build_release(self, directory):
for src in self.src:
logging.debug('Compiling %s', src)
logging.debug("Compiling %s", src)
execute(
[self.ctx.compiler, '-c', os.path.join(self.ctx.src_dir, src),
'-o', src + '.o'] + self.inc +
self.ctx.shared_library_c_flags(True),
cwd=directory)
logging.debug('Linking %s', self.name)
[self.ctx.compiler, "-c", os.path.join(self.ctx.src_dir, src), "-o", src + ".o"]
+ self.inc
+ self.ctx.shared_library_c_flags(True),
cwd=directory,
)
logging.debug("Linking %s", self.name)
execute(
[self.ctx.compiler] + [src + '.o' for src in self.src] +
['-o', self.name] + self.lib +
self.ctx.shared_library_ld_flags(True, self.name),
cwd=directory)
[self.ctx.compiler]
+ [src + ".o" for src in self.src]
+ ["-o", self.name]
+ self.lib
+ self.ctx.shared_library_ld_flags(True, self.name),
cwd=directory,
)
def create_shared_library(name, toolset):

View File

@@ -3,7 +3,8 @@
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
""" This module is a collection of methods commonly used in this project. """
"""This module is a collection of methods commonly used in this project."""
import collections
import functools
import json
@@ -18,63 +19,60 @@ import pprint
from typing import List, Any, Dict, Callable # noqa: ignore=F401
ENVIRONMENT_KEY = 'INTERCEPT_BUILD'
ENVIRONMENT_KEY = "INTERCEPT_BUILD"
Execution = collections.namedtuple('Execution', ['pid', 'cwd', 'cmd'])
Execution = collections.namedtuple("Execution", ["pid", "cwd", "cmd"])
def shell_split(string):
# type: (str) -> List[str]
""" Takes a command string and returns as a list. """
"""Takes a command string and returns as a list."""
def unescape(arg):
# type: (str) -> str
""" Gets rid of the escaping characters. """
"""Gets rid of the escaping characters."""
if len(arg) >= 2 and arg[0] == arg[-1] and arg[0] == '"':
return re.sub(r'\\(["\\])', r'\1', arg[1:-1])
return re.sub(r'\\([\\ $%&\(\)\[\]\{\}\*|<>@?!])', r'\1', arg)
return re.sub(r'\\(["\\])', r"\1", arg[1:-1])
return re.sub(r"\\([\\ $%&\(\)\[\]\{\}\*|<>@?!])", r"\1", arg)
return [unescape(token) for token in shlex.split(string)]
def run_build(command, *args, **kwargs):
# type: (...) -> int
""" Run and report build command execution
"""Run and report build command execution
:param command: list of tokens
:return: exit code of the process
"""
environment = kwargs.get('env', os.environ)
logging.debug('run build %s, in environment:\n%s',
command,
pprint.pformat(environment, indent=1, width=79))
environment = kwargs.get("env", os.environ)
logging.debug("run build %s, in environment:\n%s", command, pprint.pformat(environment, indent=1, width=79))
exit_code = subprocess.call(command, *args, **kwargs)
logging.debug('build finished with exit code: %d', exit_code)
logging.debug("build finished with exit code: %d", exit_code)
return exit_code
def run_command(command, cwd=None):
# type: (List[str], str) -> List[str]
""" Run a given command and report the execution.
"""Run a given command and report the execution.
:param command: array of tokens
:param cwd: the working directory where the command will be executed
:return: output of the command
"""
def decode_when_needed(result):
# type: (Any) -> str
""" check_output returns bytes or string depend on python version """
"""check_output returns bytes or string depend on python version"""
if not isinstance(result, str):
return result.decode('utf-8')
return result.decode("utf-8")
return result
try:
directory = os.path.abspath(cwd) if cwd else os.getcwd()
logging.debug('exec command %s in %s', command, directory)
output = subprocess.check_output(command,
cwd=directory,
stderr=subprocess.STDOUT)
logging.debug("exec command %s in %s", command, directory)
output = subprocess.check_output(command, cwd=directory, stderr=subprocess.STDOUT)
return decode_when_needed(output).splitlines()
except subprocess.CalledProcessError as ex:
ex.output = decode_when_needed(ex.output).splitlines()
@@ -82,7 +80,7 @@ def run_command(command, cwd=None):
def reconfigure_logging(verbose_level):
""" Reconfigure logging level and format based on the verbose flag.
"""Reconfigure logging level and format based on the verbose flag.
:param verbose_level: number of `-v` flags received by the command
:return: no return value
@@ -97,9 +95,9 @@ def reconfigure_logging(verbose_level):
root.setLevel(level)
# be verbose with messages
if verbose_level <= 3:
fmt_string = '%(name)s: %(levelname)s: %(message)s'
fmt_string = "%(name)s: %(levelname)s: %(message)s"
else:
fmt_string = '%(name)s: %(levelname)s: %(funcName)s: %(message)s'
fmt_string = "%(name)s: %(levelname)s: %(funcName)s: %(message)s"
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter(fmt=fmt_string))
root.handlers = [handler]
@@ -107,37 +105,33 @@ def reconfigure_logging(verbose_level):
def command_entry_point(function):
# type: (Callable[[], int]) -> Callable[[], int]
""" Decorator for command entry methods.
"""Decorator for command entry methods.
The decorator initialize/shutdown logging and guard on programming
errors (catch exceptions).
The decorated method can have arbitrary parameters, the return value will
be the exit code of the process. """
be the exit code of the process."""
@functools.wraps(function)
def wrapper():
# type: () -> int
""" Do housekeeping tasks and execute the wrapped method. """
"""Do housekeeping tasks and execute the wrapped method."""
try:
logging.basicConfig(format='%(name)s: %(message)s',
level=logging.WARNING,
stream=sys.stdout)
logging.basicConfig(format="%(name)s: %(message)s", level=logging.WARNING, stream=sys.stdout)
# this hack to get the executable name as %(name)
logging.getLogger().name = os.path.basename(sys.argv[0])
return function()
except KeyboardInterrupt:
logging.warning('Keyboard interrupt')
logging.warning("Keyboard interrupt")
return 130 # signal received exit code for bash
except (OSError, subprocess.CalledProcessError):
logging.exception('Internal error.')
logging.exception("Internal error.")
if logging.getLogger().isEnabledFor(logging.DEBUG):
logging.error("Please report this bug and attach the output "
"to the bug report")
logging.error("Please report this bug and attach the output to the bug report")
else:
logging.error("Please run this command again and turn on "
"verbose mode (add '-vvvv' as argument).")
logging.error("Please run this command again and turn on verbose mode (add '-vvvv' as argument).")
return 64 # some non used exit code for internal errors
finally:
logging.shutdown()
@@ -147,7 +141,7 @@ def command_entry_point(function):
def wrapper_entry_point(function):
# type: (Callable[[int, Execution], None]) -> Callable[[], int]
""" Implements compiler wrapper base functionality.
"""Implements compiler wrapper base functionality.
A compiler wrapper executes the real compiler, then implement some
functionality, then returns with the real compiler exit code.
@@ -160,48 +154,45 @@ def wrapper_entry_point(function):
The :param function: will receive the following arguments:
:result: the exit code of the compilation.
:execution: the command executed by the wrapper. """
:execution: the command executed by the wrapper."""
def is_cxx_wrapper():
# type: () -> bool
""" Find out was it a C++ compiler call. Compiler wrapper names
"""Find out was it a C++ compiler call. Compiler wrapper names
contain the compiler type. C++ compiler wrappers ends with `c++`,
but might have `.exe` extension on windows. """
but might have `.exe` extension on windows."""
wrapper_command = os.path.basename(sys.argv[0])
return True if re.match(r'(.+)c\+\+(.*)', wrapper_command) else False
return True if re.match(r"(.+)c\+\+(.*)", wrapper_command) else False
def run_compiler(executable):
# type: (List[str]) -> int
""" Execute compilation with the real compiler. """
"""Execute compilation with the real compiler."""
command = executable + sys.argv[1:]
logging.debug('compilation: %s', command)
logging.debug("compilation: %s", command)
result = subprocess.call(command)
logging.debug('compilation exit code: %d', result)
logging.debug("compilation exit code: %d", result)
return result
@functools.wraps(function)
def wrapper():
# type: () -> int
""" It executes the compilation and calls the wrapped method. """
"""It executes the compilation and calls the wrapped method."""
# get relevant parameters from environment
parameters = json.loads(os.environ[ENVIRONMENT_KEY])
reconfigure_logging(parameters['verbose'])
reconfigure_logging(parameters["verbose"])
# execute the requested compilation and crash if anything goes wrong
cxx = is_cxx_wrapper()
compiler = parameters['cxx'] if cxx else parameters['cc']
compiler = parameters["cxx"] if cxx else parameters["cc"]
result = run_compiler(compiler)
# call the wrapped method and ignore it's return value
try:
call = Execution(
pid=os.getpid(),
cwd=os.getcwd(),
cmd=['c++' if cxx else 'cc'] + sys.argv[1:])
call = Execution(pid=os.getpid(), cwd=os.getcwd(), cmd=["c++" if cxx else "cc"] + sys.argv[1:])
function(result, call)
except (OSError, subprocess.CalledProcessError):
logging.exception('Compiler wrapper failed complete.')
logging.exception("Compiler wrapper failed complete.")
# always return the real compiler exit code
return result
@@ -210,12 +201,6 @@ def wrapper_entry_point(function):
def wrapper_environment(args):
# type: (...) -> Dict[str, str]
""" Set up environment for interpose compiler wrapper."""
"""Set up environment for interpose compiler wrapper."""
return {
ENVIRONMENT_KEY: json.dumps({
'verbose': args.verbose,
'cc': shell_split(args.cc),
'cxx': shell_split(args.cxx)
})
}
return {ENVIRONMENT_KEY: json.dumps({"verbose": args.verbose, "cc": shell_split(args.cc), "cxx": shell_split(args.cxx)})}

View File

@@ -3,13 +3,13 @@
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
""" This module implements the 'scan-build' command API.
"""This module implements the 'scan-build' command API.
To run the static analyzer against a build is done in multiple steps:
-- Intercept: capture the compilation command during the build,
-- Analyze: run the analyzer against the captured commands,
-- Report: create a cover report from the analyzer outputs. """
-- Report: create a cover report from the analyzer outputs."""
import re
import os
@@ -26,28 +26,25 @@ import datetime
import argparse # noqa: ignore=F401
from typing import Any, Dict, List, Callable, Iterable, Generator # noqa: ignore=F401
from libscanbuild import command_entry_point, wrapper_entry_point, \
wrapper_environment, run_build, run_command
from libscanbuild.arguments import parse_args_for_scan_build, \
parse_args_for_analyze_build
from libscanbuild import command_entry_point, wrapper_entry_point, wrapper_environment, run_build, run_command
from libscanbuild.arguments import parse_args_for_scan_build, parse_args_for_analyze_build
from libscanbuild.intercept import capture
from libscanbuild.report import document
from libscanbuild.compilation import Compilation, classify_source, \
CompilationDatabase
from libscanbuild.compilation import Compilation, classify_source, CompilationDatabase
from libscanbuild.clang import get_version, get_arguments
from libscanbuild import Execution # noqa: ignore=F401
__all__ = ['scan_build', 'analyze_build', 'analyze_compiler_wrapper']
__all__ = ["scan_build", "analyze_build", "analyze_compiler_wrapper"]
COMPILER_WRAPPER_CC = 'analyze-cc'
COMPILER_WRAPPER_CXX = 'analyze-c++'
ENVIRONMENT_KEY = 'ANALYZE_BUILD'
COMPILER_WRAPPER_CC = "analyze-cc"
COMPILER_WRAPPER_CXX = "analyze-c++"
ENVIRONMENT_KEY = "ANALYZE_BUILD"
@command_entry_point
def scan_build():
# type: () -> int
""" Entry point for scan-build command. """
"""Entry point for scan-build command."""
args = parse_args_for_scan_build()
# will re-assign the report directory as new output
@@ -75,7 +72,7 @@ def scan_build():
@command_entry_point
def analyze_build():
# type: () -> int
""" Entry point for analyze-build command. """
"""Entry point for analyze-build command."""
args = parse_args_for_analyze_build()
# will re-assign the report directory as new output
@@ -91,91 +88,89 @@ def analyze_build():
def need_analyzer(args):
# type: (str) -> bool
""" Check the intent of the build command.
"""Check the intent of the build command.
When static analyzer run against project configure step, it should be
silent and no need to run the analyzer or generate report.
To run `scan-build` against the configure step might be necessary,
when compiler wrappers are used. That's the moment when build setup
check the compiler and capture the location for the build process. """
check the compiler and capture the location for the build process."""
return len(args) > 0 and not re.search(r'configure|autogen', args[0])
return len(args) > 0 and not re.search(r"configure|autogen", args[0])
def analyze_parameters(args):
# type: (argparse.Namespace) -> Dict[str, Any]
""" Mapping between the command line parameters and the analyzer run
"""Mapping between the command line parameters and the analyzer run
method. The run method works with a plain dictionary, while the command
line parameters are in a named tuple.
The keys are very similar, and some values are preprocessed. """
The keys are very similar, and some values are preprocessed."""
def prefix_with(constant, pieces):
# type: (Any, List[Any]) -> List[Any]
""" From a sequence create another sequence where every second element
"""From a sequence create another sequence where every second element
is from the original sequence and the odd elements are the prefix.
eg.: prefix_with(0, [1,2,3]) creates [0, 1, 0, 2, 0, 3] """
eg.: prefix_with(0, [1,2,3]) creates [0, 1, 0, 2, 0, 3]"""
return [elem for piece in pieces for elem in [constant, piece]]
def direct_args(args):
# type: (argparse.Namespace) -> List[str]
""" A group of command line arguments can mapped to command
line arguments of the analyzer. """
"""A group of command line arguments can mapped to command
line arguments of the analyzer."""
result = []
if args.store_model:
result.append('-analyzer-store={0}'.format(args.store_model))
result.append("-analyzer-store={0}".format(args.store_model))
if args.constraints_model:
result.append('-analyzer-constraints={0}'.format(
args.constraints_model))
result.append("-analyzer-constraints={0}".format(args.constraints_model))
if args.internal_stats:
result.append('-analyzer-stats')
result.append("-analyzer-stats")
if args.analyze_headers:
result.append('-analyzer-opt-analyze-headers')
result.append("-analyzer-opt-analyze-headers")
if args.stats:
result.append('-analyzer-checker=debug.Stats')
result.append("-analyzer-checker=debug.Stats")
if args.maxloop:
result.extend(['-analyzer-max-loop', str(args.maxloop)])
result.extend(["-analyzer-max-loop", str(args.maxloop)])
if args.output_format:
result.append('-analyzer-output={0}'.format(args.output_format))
result.append("-analyzer-output={0}".format(args.output_format))
if args.analyzer_config:
result.extend(['-analyzer-config', args.analyzer_config])
result.extend(["-analyzer-config", args.analyzer_config])
if args.verbose >= 4:
result.append('-analyzer-display-progress')
result.append("-analyzer-display-progress")
if args.plugins:
result.extend(prefix_with('-load', args.plugins))
result.extend(prefix_with("-load", args.plugins))
if args.enable_checker:
checkers = ','.join(args.enable_checker)
result.extend(['-analyzer-checker', checkers])
checkers = ",".join(args.enable_checker)
result.extend(["-analyzer-checker", checkers])
if args.disable_checker:
checkers = ','.join(args.disable_checker)
result.extend(['-analyzer-disable-checker', checkers])
checkers = ",".join(args.disable_checker)
result.extend(["-analyzer-disable-checker", checkers])
return prefix_with('-Xclang', result)
return prefix_with("-Xclang", result)
return {
'clang': args.clang,
'output_dir': args.output,
'output_format': args.output_format,
'output_failures': args.output_failures,
'direct_args': direct_args(args),
'analyzer_target': args.analyzer_target,
'force_debug': args.force_debug,
'excludes': args.excludes
"clang": args.clang,
"output_dir": args.output,
"output_format": args.output_format,
"output_failures": args.output_failures,
"direct_args": direct_args(args),
"analyzer_target": args.analyzer_target,
"force_debug": args.force_debug,
"excludes": args.excludes,
}
def run_analyzer_parallel(compilations, args):
# type: (Iterable[Compilation], argparse.Namespace) -> None
""" Runs the analyzer against the given compilations. """
"""Runs the analyzer against the given compilations."""
logging.debug('run analyzer against compilation database')
logging.debug("run analyzer against compilation database")
consts = analyze_parameters(args)
parameters = (dict(compilation.as_dict(), **consts)
for compilation in compilations)
parameters = (dict(compilation.as_dict(), **consts) for compilation in compilations)
# when verbose output requested execute sequentially
pool = multiprocessing.Pool(1 if args.verbose > 2 else None)
for current in pool.imap_unordered(run, parameters):
@@ -186,23 +181,18 @@ def run_analyzer_parallel(compilations, args):
def setup_environment(args):
# type: (argparse.Namespace) -> Dict[str, str]
""" Set up environment for build command to interpose compiler wrapper. """
"""Set up environment for build command to interpose compiler wrapper."""
environment = dict(os.environ)
# to run compiler wrappers
environment.update(wrapper_environment(args))
environment.update({
'CC': COMPILER_WRAPPER_CC,
'CXX': COMPILER_WRAPPER_CXX
})
environment.update({"CC": COMPILER_WRAPPER_CC, "CXX": COMPILER_WRAPPER_CXX})
# pass the relevant parameters to run the analyzer with condition.
# the presence of the environment value will control the run.
if need_analyzer(args.build):
environment.update({
ENVIRONMENT_KEY: json.dumps(analyze_parameters(args))
})
environment.update({ENVIRONMENT_KEY: json.dumps(analyze_parameters(args))})
else:
logging.debug('wrapper should not run analyzer')
logging.debug("wrapper should not run analyzer")
return environment
@@ -210,7 +200,7 @@ def setup_environment(args):
@wrapper_entry_point
def analyze_compiler_wrapper(result, execution):
# type: (int, Execution) -> None
""" Entry point for `analyze-cc` and `analyze-c++` compiler wrappers. """
"""Entry point for `analyze-cc` and `analyze-c++` compiler wrappers."""
# don't run analyzer when compilation fails. or when it's not requested.
if result or not os.getenv(ENVIRONMENT_KEY):
@@ -227,19 +217,19 @@ def analyze_compiler_wrapper(result, execution):
@contextlib.contextmanager
def report_directory(hint, keep):
# type: (str, bool) -> Generator[str, None, None]
""" Responsible for the report directory.
"""Responsible for the report directory.
hint -- could specify the parent directory of the output directory.
keep -- a boolean value to keep or delete the empty report directory. """
keep -- a boolean value to keep or delete the empty report directory."""
stamp_format = 'scan-build-%Y-%m-%d-%H-%M-%S-%f-'
stamp_format = "scan-build-%Y-%m-%d-%H-%M-%S-%f-"
stamp = datetime.datetime.now().strftime(stamp_format)
parent_dir = os.path.abspath(hint)
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
name = tempfile.mkdtemp(prefix=stamp, dir=parent_dir)
logging.info('Report directory created: %s', name)
logging.info("Report directory created: %s", name)
try:
yield name
@@ -259,16 +249,16 @@ def report_directory(hint, keep):
def require(required):
""" Decorator for checking the required values in state.
"""Decorator for checking the required values in state.
It checks the required attributes in the passed state and stop when
any of those is missing. """
any of those is missing."""
def decorator(method):
@functools.wraps(method)
def wrapper(*args, **kwargs):
for key in required:
assert key in args[0], '{} is missing'.format(key)
assert key in args[0], "{} is missing".format(key)
return method(*args, **kwargs)
@@ -277,21 +267,25 @@ def require(required):
return decorator
@require(['flags', # entry from compilation
'compiler', # entry from compilation
'directory', # entry from compilation
'source', # entry from compilation
'clang', # clang executable name (and path)
'direct_args', # arguments from command line
'excludes', # list of directories
'force_debug', # kill non debug macros
'output_dir', # where generated report files shall go
'output_format', # it's 'plist', 'html', 'plist-html',
# 'text' or 'plist-multi-file'
'output_failures']) # generate crash reports or not
@require(
[
"flags", # entry from compilation
"compiler", # entry from compilation
"directory", # entry from compilation
"source", # entry from compilation
"clang", # clang executable name (and path)
"direct_args", # arguments from command line
"excludes", # list of directories
"force_debug", # kill non debug macros
"output_dir", # where generated report files shall go
"output_format", # it's 'plist', 'html', 'plist-html',
# 'text' or 'plist-multi-file'
"output_failures",
]
) # generate crash reports or not
def run(opts):
# type: (Dict[str, Any]) -> Dict[str, Any]
""" Entry point to run (or not) static analyzer against a single entry
"""Entry point to run (or not) static analyzer against a single entry
of the compilation database.
This complex task is decomposed into smaller methods which are calling
@@ -301,44 +295,43 @@ def run(opts):
The passed parameter is a python dictionary. Each method first check
that the needed parameters received. (This is done by the 'require'
decorator. It's like an 'assert' to check the contract between the
caller and the called method.) """
caller and the called method.)"""
command = [opts['compiler'], '-c'] + opts['flags'] + [opts['source']]
command = [opts["compiler"], "-c"] + opts["flags"] + [opts["source"]]
logging.debug("Run analyzer against '%s'", command)
return exclude(opts)
def logging_analyzer_output(opts):
# type: (Dict[str, Any]) -> None
""" Display error message from analyzer. """
"""Display error message from analyzer."""
if opts and 'error_output' in opts:
for line in opts['error_output']:
if opts and "error_output" in opts:
for line in opts["error_output"]:
logging.info(line)
@require(['clang', 'directory', 'flags', 'source', 'output_dir', 'language',
'error_output', 'exit_code'])
@require(["clang", "directory", "flags", "source", "output_dir", "language", "error_output", "exit_code"])
def report_failure(opts):
# type: (Dict[str, Any]) -> None
""" Create report when analyzer failed.
"""Create report when analyzer failed.
The major report is the preprocessor output. The output filename generated
randomly. The compiler output also captured into '.stderr.txt' file.
And some more execution context also saved into '.info.txt' file. """
And some more execution context also saved into '.info.txt' file."""
def extension():
# type: () -> str
""" Generate preprocessor file extension. """
"""Generate preprocessor file extension."""
mapping = {'objective-c++': '.mii', 'objective-c': '.mi', 'c++': '.ii'}
return mapping.get(opts['language'], '.i')
mapping = {"objective-c++": ".mii", "objective-c": ".mi", "c++": ".ii"}
return mapping.get(opts["language"], ".i")
def destination():
# type: () -> str
""" Creates failures directory if not exits yet. """
"""Creates failures directory if not exits yet."""
failures_dir = os.path.join(opts['output_dir'], 'failures')
failures_dir = os.path.join(opts["output_dir"], "failures")
if not os.path.isdir(failures_dir):
os.makedirs(failures_dir)
return failures_dir
@@ -346,126 +339,117 @@ def report_failure(opts):
# Classify error type: when Clang terminated by a signal it's a 'Crash'.
# (python subprocess Popen.returncode is negative when child terminated
# by signal.) Everything else is 'Other Error'.
error = 'crash' if opts['exit_code'] < 0 else 'other_error'
error = "crash" if opts["exit_code"] < 0 else "other_error"
# Create preprocessor output file name. (This is blindly following the
# Perl implementation.)
(fd, name) = tempfile.mkstemp(suffix=extension(),
prefix='clang_' + error + '_',
dir=destination())
(fd, name) = tempfile.mkstemp(suffix=extension(), prefix="clang_" + error + "_", dir=destination())
os.close(fd)
# Execute Clang again, but run the syntax check only.
try:
cwd = opts['directory']
cmd = get_arguments([opts['clang'], '-fsyntax-only', '-E'] +
opts['flags'] + [opts['source'], '-o', name], cwd)
cwd = opts["directory"]
cmd = get_arguments([opts["clang"], "-fsyntax-only", "-E"] + opts["flags"] + [opts["source"], "-o", name], cwd)
run_command(cmd, cwd=cwd)
# write general information about the crash
with open(name + '.info.txt', 'w') as handle:
handle.write(opts['source'] + os.linesep)
handle.write(error.title().replace('_', ' ') + os.linesep)
handle.write(' '.join(cmd) + os.linesep)
handle.write(' '.join(platform.uname()) + os.linesep)
handle.write(get_version(opts['clang']))
with open(name + ".info.txt", "w") as handle:
handle.write(opts["source"] + os.linesep)
handle.write(error.title().replace("_", " ") + os.linesep)
handle.write(" ".join(cmd) + os.linesep)
handle.write(" ".join(platform.uname()) + os.linesep)
handle.write(get_version(opts["clang"]))
handle.close()
# write the captured output too
with open(name + '.stderr.txt', 'w') as handle:
for line in opts['error_output']:
with open(name + ".stderr.txt", "w") as handle:
for line in opts["error_output"]:
handle.write(line)
handle.close()
except (OSError, subprocess.CalledProcessError):
logging.warning('failed to report failure', exc_info=True)
logging.warning("failed to report failure", exc_info=True)
@require(['clang', 'directory', 'flags', 'direct_args', 'source', 'output_dir',
'output_format'])
@require(["clang", "directory", "flags", "direct_args", "source", "output_dir", "output_format"])
def run_analyzer(opts, continuation=report_failure):
# type: (...) -> Dict[str, Any]
""" It assembles the analysis command line and executes it. Capture the
"""It assembles the analysis command line and executes it. Capture the
output of the analysis and returns with it. If failure reports are
requested, it calls the continuation to generate it. """
requested, it calls the continuation to generate it."""
def target():
# type: () -> str
""" Creates output file name for reports. """
if opts['output_format'].startswith('plist'):
(handle, name) = tempfile.mkstemp(prefix='report-',
suffix='.plist',
dir=opts['output_dir'])
"""Creates output file name for reports."""
if opts["output_format"].startswith("plist"):
(handle, name) = tempfile.mkstemp(prefix="report-", suffix=".plist", dir=opts["output_dir"])
os.close(handle)
return name
return opts['output_dir']
return opts["output_dir"]
try:
cwd = opts['directory']
cmd = get_arguments([opts['clang'], '--analyze'] +
opts['direct_args'] + opts['flags'] +
[opts['source'], '-o', target()],
cwd)
cwd = opts["directory"]
cmd = get_arguments(
[opts["clang"], "--analyze"] + opts["direct_args"] + opts["flags"] + [opts["source"], "-o", target()], cwd
)
output = run_command(cmd, cwd=cwd)
return {'error_output': output, 'exit_code': 0}
return {"error_output": output, "exit_code": 0}
except OSError:
message = 'failed to execute "{0}"'.format(opts['clang'])
return {'error_output': [message], 'exit_code': 127}
message = 'failed to execute "{0}"'.format(opts["clang"])
return {"error_output": [message], "exit_code": 127}
except subprocess.CalledProcessError as ex:
logging.warning('analysis failed', exc_info=True)
result = {'error_output': ex.output, 'exit_code': ex.returncode}
if opts.get('output_failures', False):
logging.warning("analysis failed", exc_info=True)
result = {"error_output": ex.output, "exit_code": ex.returncode}
if opts.get("output_failures", False):
opts.update(result)
continuation(opts)
return result
@require(['flags', 'force_debug'])
@require(["flags", "force_debug"])
def filter_debug_flags(opts, continuation=run_analyzer):
# type: (...) -> Dict[str, Any]
""" Filter out nondebug macros when requested. """
"""Filter out nondebug macros when requested."""
if opts.pop('force_debug'):
if opts.pop("force_debug"):
# lazy implementation just append an undefine macro at the end
opts.update({'flags': opts['flags'] + ['-UNDEBUG']})
opts.update({"flags": opts["flags"] + ["-UNDEBUG"]})
return continuation(opts)
@require(['language', 'compiler', 'source', 'flags'])
@require(["language", "compiler", "source", "flags"])
def language_check(opts, continuation=filter_debug_flags):
# type: (...) -> Dict[str, Any]
""" Find out the language from command line parameters or file name
extension. The decision also influenced by the compiler invocation. """
"""Find out the language from command line parameters or file name
extension. The decision also influenced by the compiler invocation."""
accepted = frozenset({
'c', 'c++', 'objective-c', 'objective-c++', 'c-cpp-output',
'c++-cpp-output', 'objective-c-cpp-output'
})
accepted = frozenset(
{"c", "c++", "objective-c", "objective-c++", "c-cpp-output", "c++-cpp-output", "objective-c-cpp-output"}
)
# language can be given as a parameter...
language = opts.pop('language')
compiler = opts.pop('compiler')
language = opts.pop("language")
compiler = opts.pop("compiler")
# ... or find out from source file extension
if language is None and compiler is not None:
language = classify_source(opts['source'], compiler == 'c')
language = classify_source(opts["source"], compiler == "c")
if language is None:
logging.debug('skip analysis, language not known')
logging.debug("skip analysis, language not known")
return dict()
elif language not in accepted:
logging.debug('skip analysis, language not supported')
logging.debug("skip analysis, language not supported")
return dict()
logging.debug('analysis, language: %s', language)
opts.update({'language': language,
'flags': ['-x', language] + opts['flags']})
logging.debug("analysis, language: %s", language)
opts.update({"language": language, "flags": ["-x", language] + opts["flags"]})
return continuation(opts)
@require(['arch_list', 'flags'])
@require(["arch_list", "flags"])
def arch_check(opts, continuation=language_check):
# type: (...) -> Dict[str, Any]
""" Do run analyzer through one of the given architectures. """
"""Do run analyzer through one of the given architectures."""
disabled = frozenset({'ppc', 'ppc64'})
disabled = frozenset({"ppc", "ppc64"})
received_list = opts.pop('arch_list')
received_list = opts.pop("arch_list")
if received_list:
# filter out disabled architectures and -arch switches
filtered_list = [a for a in received_list if a not in disabled]
@@ -475,24 +459,24 @@ def arch_check(opts, continuation=language_check):
# the same, those should not change the pre-processing step.
# But that's the only pass we have before run the analyzer.
current = filtered_list.pop()
logging.debug('analysis, on arch: %s', current)
logging.debug("analysis, on arch: %s", current)
opts.update({'flags': ['-arch', current] + opts['flags']})
opts.update({"flags": ["-arch", current] + opts["flags"]})
return continuation(opts)
logging.debug('skip analysis, found not supported arch')
logging.debug("skip analysis, found not supported arch")
return dict()
logging.debug('analysis, on default arch')
logging.debug("analysis, on default arch")
return continuation(opts)
@require(['analyzer_target', 'flags'])
@require(["analyzer_target", "flags"])
def target_check(opts, continuation=arch_check):
# type: (...) -> Dict[str, Any]
""" Do run analyzer through the given target triple """
"""Do run analyzer through the given target triple"""
target = opts.pop("analyzer_target")
if target is not None:
opts.update({'flags': ['-target', target] + opts['flags']})
opts.update({"flags": ["-target", target] + opts["flags"]})
logging.debug("analysis, target triple is %s", target)
else:
logging.debug("analysis, default target triple")
@@ -504,49 +488,49 @@ def target_check(opts, continuation=arch_check):
#
# Keys are the option name, value number of options to skip
IGNORED_FLAGS = {
'-c': 0, # compile option will be overwritten
'-fsyntax-only': 0, # static analyzer option will be overwritten
'-o': 1, # will set up own output file
"-c": 0, # compile option will be overwritten
"-fsyntax-only": 0, # static analyzer option will be overwritten
"-o": 1, # will set up own output file
# flags below are inherited from the perl implementation.
'-g': 0,
'-save-temps': 0,
'-install_name': 1,
'-exported_symbols_list': 1,
'-current_version': 1,
'-compatibility_version': 1,
'-init': 1,
'-e': 1,
'-seg1addr': 1,
'-bundle_loader': 1,
'-multiply_defined': 1,
'-sectorder': 3,
'--param': 1,
'--serialize-diagnostics': 1
"-g": 0,
"-save-temps": 0,
"-install_name": 1,
"-exported_symbols_list": 1,
"-current_version": 1,
"-compatibility_version": 1,
"-init": 1,
"-e": 1,
"-seg1addr": 1,
"-bundle_loader": 1,
"-multiply_defined": 1,
"-sectorder": 3,
"--param": 1,
"--serialize-diagnostics": 1,
} # type: Dict[str, int]
@require(['flags'])
@require(["flags"])
def classify_parameters(opts, continuation=target_check):
# type: (...) -> Dict[str, Any]
""" Prepare compiler flags (filters some and add others) and take out
language (-x) and architecture (-arch) flags for future processing. """
"""Prepare compiler flags (filters some and add others) and take out
language (-x) and architecture (-arch) flags for future processing."""
# the result of the method
result = {
'flags': [], # the filtered compiler flags
'arch_list': [], # list of architecture flags
'language': None, # compilation language, None, if not specified
"flags": [], # the filtered compiler flags
"arch_list": [], # list of architecture flags
"language": None, # compilation language, None, if not specified
} # type: Dict[str, Any]
# iterate on the compile options
args = iter(opts['flags'])
args = iter(opts["flags"])
for arg in args:
# take arch flags into a separate basket
if arg == '-arch':
result['arch_list'].append(next(args))
if arg == "-arch":
result["arch_list"].append(next(args))
# take language
elif arg == '-x':
result['language'] = next(args)
elif arg == "-x":
result["language"] = next(args)
# ignore some flags
elif arg in IGNORED_FLAGS:
count = IGNORED_FLAGS[arg]
@@ -554,32 +538,32 @@ def classify_parameters(opts, continuation=target_check):
next(args)
# we don't care about extra warnings, but we should suppress ones
# that we don't want to see.
elif re.match(r'^-W.+', arg) and not re.match(r'^-Wno-.+', arg):
elif re.match(r"^-W.+", arg) and not re.match(r"^-Wno-.+", arg):
pass
# and consider everything else as compilation flag.
else:
result['flags'].append(arg)
result["flags"].append(arg)
opts.update(result)
return continuation(opts)
@require(['source', 'excludes'])
@require(["source", "excludes"])
def exclude(opts, continuation=classify_parameters):
# type: (...) -> Dict[str, Any]
""" Analysis might be skipped, when one of the requested excluded
directory contains the file. """
"""Analysis might be skipped, when one of the requested excluded
directory contains the file."""
def contains(directory, entry):
# type: (str, str) -> bool
""" Check is directory contains the given file. """
"""Check is directory contains the given file."""
# When a directory contains a file, then the relative path to the
# file from that directory does not start with a parent dir prefix.
relative = os.path.relpath(entry, directory).split(os.sep)
return len(relative) > 0 and relative[0] != os.pardir
if any(contains(entry, opts['source']) for entry in opts['excludes']):
logging.debug('skip analysis, file requested to exclude')
if any(contains(entry, opts["source"]) for entry in opts["excludes"]):
logging.debug("skip analysis, file requested to exclude")
return dict()
return continuation(opts)

View File

@@ -3,7 +3,7 @@
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
""" This module parses and validates arguments for command-line interfaces.
"""This module parses and validates arguments for command-line interfaces.
It uses argparse module to create the command line parser. (This library is
in the standard python library since 3.2 and backported to 2.7, but not
@@ -26,69 +26,68 @@ from libscanbuild import reconfigure_logging
from libscanbuild.clang import get_checkers
__all__ = ['parse_args_for_intercept_build', 'parse_args_for_analyze_build',
'parse_args_for_scan_build']
__all__ = ["parse_args_for_intercept_build", "parse_args_for_analyze_build", "parse_args_for_scan_build"]
def parse_args_for_intercept_build():
# type: () -> argparse.Namespace
""" Parse and validate command-line arguments for intercept-build. """
"""Parse and validate command-line arguments for intercept-build."""
parser = create_intercept_parser()
args = parser.parse_args()
reconfigure_logging(args.verbose)
logging.debug('Raw arguments %s', sys.argv)
logging.debug("Raw arguments %s", sys.argv)
# short validation logic
if not args.build:
parser.error(message='missing build command')
parser.error(message="missing build command")
logging.debug('Parsed arguments: %s', args)
logging.debug("Parsed arguments: %s", args)
return args
def parse_args_for_analyze_build():
# type: () -> argparse.Namespace
""" Parse and validate command-line arguments for analyze-build. """
"""Parse and validate command-line arguments for analyze-build."""
from_build_command = False
parser = create_analyze_parser(from_build_command)
args = parser.parse_args()
reconfigure_logging(args.verbose)
logging.debug('Raw arguments %s', sys.argv)
logging.debug("Raw arguments %s", sys.argv)
normalize_args_for_analyze(args, from_build_command)
validate_args_for_analyze(parser, args, from_build_command)
logging.debug('Parsed arguments: %s', args)
logging.debug("Parsed arguments: %s", args)
return args
def parse_args_for_scan_build():
# type: () -> argparse.Namespace
""" Parse and validate command-line arguments for scan-build. """
"""Parse and validate command-line arguments for scan-build."""
from_build_command = True
parser = create_analyze_parser(from_build_command)
args = parser.parse_args()
reconfigure_logging(args.verbose)
logging.debug('Raw arguments %s', sys.argv)
logging.debug("Raw arguments %s", sys.argv)
normalize_args_for_analyze(args, from_build_command)
validate_args_for_analyze(parser, args, from_build_command)
logging.debug('Parsed arguments: %s', args)
logging.debug("Parsed arguments: %s", args)
return args
def normalize_args_for_analyze(args, from_build_command):
# type: (argparse.Namespace, bool) -> None
""" Normalize parsed arguments for analyze-build and scan-build.
"""Normalize parsed arguments for analyze-build and scan-build.
:param args: Parsed argument object. (Will be mutated.)
:param from_build_command: Boolean value tells is the command suppose
to run the analyzer against a build command or a compilation db. """
to run the analyzer against a build command or a compilation db."""
# make plugins always a list. (it might be None when not specified.)
if args.plugins is None:
@@ -105,12 +104,12 @@ def normalize_args_for_analyze(args, from_build_command):
# which have good default value.)
if from_build_command:
# add cdb parameter invisibly to make report module working.
args.cdb = 'compile_commands.json'
args.cdb = "compile_commands.json"
def validate_args_for_analyze(parser, args, from_build_command):
# type: (argparse.ArgumentParser, argparse.Namespace, bool) -> None
""" Command line parsing is done by the argparse module, but semantic
"""Command line parsing is done by the argparse module, but semantic
validation still needs to be done. This method is doing it for
analyze-build and scan-build commands.
@@ -119,7 +118,7 @@ def validate_args_for_analyze(parser, args, from_build_command):
:param from_build_command: Boolean value tells is the command suppose
to run the analyzer against a build command or a compilation db.
:return: No return value, but this call might throw when validation
fails. """
fails."""
if args.help_checkers_verbose:
print_checkers(get_checkers(args.clang, args.plugins))
@@ -128,14 +127,14 @@ def validate_args_for_analyze(parser, args, from_build_command):
print_active_checkers(get_checkers(args.clang, args.plugins))
parser.exit(status=0)
elif from_build_command and not args.build:
parser.error(message='missing build command')
parser.error(message="missing build command")
elif not from_build_command and not os.path.exists(args.cdb):
parser.error(message='compilation database is missing')
parser.error(message="compilation database is missing")
def create_intercept_parser():
# type: () -> argparse.ArgumentParser
""" Creates a parser for command-line arguments to 'intercept'. """
"""Creates a parser for command-line arguments to 'intercept'."""
parser = create_default_parser()
parser_add_cdb(parser)
@@ -143,23 +142,23 @@ def create_intercept_parser():
parser_add_prefer_wrapper(parser)
parser_add_compilers(parser)
advanced = parser.add_argument_group('advanced options')
advanced = parser.add_argument_group("advanced options")
advanced.add_argument(
'--append',
action='store_true',
"--append",
action="store_true",
help="""Extend existing compilation database with new entries.
Duplicate entries are detected and not present in the final output.
The output is not continuously updated, it's done when the build
command finished. """)
command finished. """,
)
parser.add_argument(
dest='build', nargs=argparse.REMAINDER, help="""Command to run.""")
parser.add_argument(dest="build", nargs=argparse.REMAINDER, help="""Command to run.""")
return parser
def create_analyze_parser(from_build_command):
# type: (bool) -> argparse.ArgumentParser
""" Creates a parser for command-line arguments to 'analyze'. """
"""Creates a parser for command-line arguments to 'analyze'."""
parser = create_default_parser()
@@ -168,148 +167,159 @@ def create_analyze_parser(from_build_command):
parser_add_compilers(parser)
parser.add_argument(
'--intercept-first',
action='store_true',
"--intercept-first",
action="store_true",
help="""Run the build commands first, intercept compiler
calls and then run the static analyzer afterwards.
Generally speaking it has better coverage on build commands.
With '--override-compiler' it use compiler wrapper, but does
not run the analyzer till the build is finished.""")
not run the analyzer till the build is finished.""",
)
else:
parser_add_cdb(parser)
parser.add_argument(
'--status-bugs',
action='store_true',
"--status-bugs",
action="store_true",
help="""The exit status of '%(prog)s' is the same as the executed
build command. This option ignores the build exit status and sets to
be non zero if it found potential bugs or zero otherwise.""")
be non zero if it found potential bugs or zero otherwise.""",
)
parser.add_argument(
'--exclude',
metavar='<directory>',
dest='excludes',
action='append',
"--exclude",
metavar="<directory>",
dest="excludes",
action="append",
default=[],
help="""Do not run static analyzer against files found in this
directory. (You can specify this option multiple times.)
Could be useful when project contains 3rd party libraries.""")
Could be useful when project contains 3rd party libraries.""",
)
output = parser.add_argument_group('output control options')
output = parser.add_argument_group("output control options")
output.add_argument(
'--output',
'-o',
metavar='<path>',
"--output",
"-o",
metavar="<path>",
default=tempfile.gettempdir(),
help="""Specifies the output directory for analyzer reports.
Subdirectory will be created if default directory is targeted.""")
Subdirectory will be created if default directory is targeted.""",
)
output.add_argument(
'--keep-empty',
action='store_true',
"--keep-empty",
action="store_true",
help="""Don't remove the build results directory even if no issues
were reported.""")
were reported.""",
)
output.add_argument(
'--html-title',
metavar='<title>',
"--html-title",
metavar="<title>",
help="""Specify the title used on generated HTML pages.
If not specified, a default title will be used.""")
If not specified, a default title will be used.""",
)
format_group = output.add_mutually_exclusive_group()
format_group.add_argument(
'--plist',
'-plist',
dest='output_format',
const='plist',
default='html',
action='store_const',
help="""Cause the results as a set of .plist files.""")
"--plist",
"-plist",
dest="output_format",
const="plist",
default="html",
action="store_const",
help="""Cause the results as a set of .plist files.""",
)
format_group.add_argument(
'--plist-html',
'-plist-html',
dest='output_format',
const='plist-html',
default='html',
action='store_const',
help="""Cause the results as a set of .html and .plist files.""")
"--plist-html",
"-plist-html",
dest="output_format",
const="plist-html",
default="html",
action="store_const",
help="""Cause the results as a set of .html and .plist files.""",
)
format_group.add_argument(
'--plist-multi-file',
'-plist-multi-file',
dest='output_format',
const='plist-multi-file',
default='html',
action='store_const',
"--plist-multi-file",
"-plist-multi-file",
dest="output_format",
const="plist-multi-file",
default="html",
action="store_const",
help="""Cause the results as a set of .plist files with extra
information on related files.""")
information on related files.""",
)
# TODO: implement '-view '
advanced = parser.add_argument_group('advanced options')
advanced = parser.add_argument_group("advanced options")
advanced.add_argument(
'--use-analyzer',
metavar='<path>',
dest='clang',
default='clang',
"--use-analyzer",
metavar="<path>",
dest="clang",
default="clang",
help="""'%(prog)s' uses the 'clang' executable relative to itself for
static analysis. One can override this behavior with this option by
using the 'clang' packaged with Xcode (on OS X) or from the PATH.""")
using the 'clang' packaged with Xcode (on OS X) or from the PATH.""",
)
advanced.add_argument(
'--analyzer-target',
dest='analyzer_target',
metavar='<target triple name for analysis>',
"--analyzer-target",
dest="analyzer_target",
metavar="<target triple name for analysis>",
help="""This provides target triple information to clang static
analyzer. It only changes the target for analysis but doesn't change
the target of a real compiler given by --use-cc and --use-c++
options.""")
options.""",
)
advanced.add_argument(
'--no-failure-reports',
'-no-failure-reports',
dest='output_failures',
action='store_false',
"--no-failure-reports",
"-no-failure-reports",
dest="output_failures",
action="store_false",
help="""Do not create a 'failures' subdirectory that includes analyzer
crash reports and preprocessed source files.""")
crash reports and preprocessed source files.""",
)
parser.add_argument(
'--analyze-headers',
action='store_true',
"--analyze-headers",
action="store_true",
help="""Also analyze functions in #included files. By default, such
functions are skipped unless they are called by functions within the
main source file.""")
main source file.""",
)
advanced.add_argument(
'--stats',
'-stats',
action='store_true',
help="""Generates visitation statistics for the project.""")
"--stats", "-stats", action="store_true", help="""Generates visitation statistics for the project."""
)
advanced.add_argument("--internal-stats", action="store_true", help="""Generate internal analyzer statistics.""")
advanced.add_argument(
'--internal-stats',
action='store_true',
help="""Generate internal analyzer statistics.""")
advanced.add_argument(
'--maxloop',
'-maxloop',
metavar='<loop count>',
"--maxloop",
"-maxloop",
metavar="<loop count>",
type=int,
help="""Specifiy the number of times a block can be visited before
giving up. Increase for more comprehensive coverage at a cost of
speed.""")
speed.""",
)
advanced.add_argument(
'--store',
'-store',
metavar='<model>',
dest='store_model',
choices=['region', 'basic'],
"--store",
"-store",
metavar="<model>",
dest="store_model",
choices=["region", "basic"],
help="""Specify the store model used by the analyzer. 'region'
specifies a field- sensitive store model. 'basic' which is far less
precise but can more quickly analyze code. 'basic' was the default
store model for checker-0.221 and earlier.""")
store model for checker-0.221 and earlier.""",
)
advanced.add_argument(
'--constraints',
'-constraints',
metavar='<model>',
dest='constraints_model',
choices=['range', 'basic'],
"--constraints",
"-constraints",
metavar="<model>",
dest="constraints_model",
choices=["range", "basic"],
help="""Specify the constraint engine used by the analyzer. Specifying
'basic' uses a simpler, less powerful constraint model used by
checker-0.160 and earlier.""")
checker-0.160 and earlier.""",
)
advanced.add_argument(
'--analyzer-config',
'-analyzer-config',
metavar='<options>',
"--analyzer-config",
"-analyzer-config",
metavar="<options>",
help="""Provide options to pass through to the analyzer's
-analyzer-config flag. Several options are separated with comma:
'key1=val1,key2=val2'
@@ -319,93 +329,94 @@ def create_analyze_parser(from_build_command):
Switch the page naming to:
report-<filename>-<function/method name>-<id>.html
instead of report-XXXXXX.html""")
instead of report-XXXXXX.html""",
)
advanced.add_argument(
'--force-analyze-debug-code',
dest='force_debug',
action='store_true',
"--force-analyze-debug-code",
dest="force_debug",
action="store_true",
help="""Tells analyzer to enable assertions in code even if they were
disabled during compilation, enabling more precise results.""")
disabled during compilation, enabling more precise results.""",
)
plugins = parser.add_argument_group('checker options')
plugins = parser.add_argument_group("checker options")
plugins.add_argument(
'--load-plugin',
'-load-plugin',
metavar='<plugin library>',
dest='plugins',
action='append',
help="""Loading external checkers using the clang plugin interface.""")
"--load-plugin",
"-load-plugin",
metavar="<plugin library>",
dest="plugins",
action="append",
help="""Loading external checkers using the clang plugin interface.""",
)
plugins.add_argument(
'--enable-checker',
'-enable-checker',
metavar='<checker name>',
"--enable-checker",
"-enable-checker",
metavar="<checker name>",
action=AppendCommaSeparated,
help="""Enable specific checker.""")
help="""Enable specific checker.""",
)
plugins.add_argument(
'--disable-checker',
'-disable-checker',
metavar='<checker name>',
"--disable-checker",
"-disable-checker",
metavar="<checker name>",
action=AppendCommaSeparated,
help="""Disable specific checker.""")
help="""Disable specific checker.""",
)
plugins.add_argument(
'--help-checkers',
action='store_true',
"--help-checkers",
action="store_true",
help="""A default group of checkers is run unless explicitly disabled.
Exactly which checkers constitute the default group is a function of
the operating system in use. These can be printed with this flag.""")
the operating system in use. These can be printed with this flag.""",
)
plugins.add_argument(
'--help-checkers-verbose',
action='store_true',
help="""Print all available checkers and mark the enabled ones.""")
"--help-checkers-verbose", action="store_true", help="""Print all available checkers and mark the enabled ones."""
)
if from_build_command:
parser.add_argument(
dest='build', nargs=argparse.REMAINDER, help="""Command to run.""")
parser.add_argument(dest="build", nargs=argparse.REMAINDER, help="""Command to run.""")
return parser
def create_default_parser():
# type: () -> argparse.ArgumentParser
""" Creates command line parser for all build wrapper commands. """
"""Creates command line parser for all build wrapper commands."""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--verbose',
'-v',
action='count',
"--verbose",
"-v",
action="count",
default=0,
help="""Enable verbose output from '%(prog)s'. A second, third and
fourth flags increases verbosity.""")
fourth flags increases verbosity.""",
)
return parser
def parser_add_cdb(parser):
# type: (argparse.ArgumentParser) -> None
parser.add_argument(
'--cdb',
metavar='<file>',
default="compile_commands.json",
help="""The JSON compilation database.""")
parser.add_argument("--cdb", metavar="<file>", default="compile_commands.json", help="""The JSON compilation database.""")
def parser_add_prefer_wrapper(parser):
# type: (argparse.ArgumentParser) -> None
parser.add_argument(
'--override-compiler',
action='store_true',
"--override-compiler",
action="store_true",
help="""Always resort to the compiler wrapper even when better
intercept methods are available.""")
intercept methods are available.""",
)
def parser_add_compilers(parser):
# type: (argparse.ArgumentParser) -> None
parser.add_argument(
'--use-cc',
metavar='<path>',
dest='cc',
default=os.getenv('CC', 'cc'),
"--use-cc",
metavar="<path>",
dest="cc",
default=os.getenv("CC", "cc"),
help="""When '%(prog)s' analyzes a project by interposing a compiler
wrapper, which executes a real compiler for compilation and do other
tasks (record the compiler invocation). Because of this interposing,
@@ -419,17 +430,19 @@ def parser_add_compilers(parser):
If the given compiler is a cross compiler, you may also need to provide
--analyzer-target option to properly analyze the source code because
static analyzer runs as if the code is compiled for the host machine by
default.""")
default.""",
)
parser.add_argument(
'--use-c++',
metavar='<path>',
dest='cxx',
default=os.getenv('CXX', 'c++'),
help="""This is the same as "--use-cc" but for C++ code.""")
"--use-c++",
metavar="<path>",
dest="cxx",
default=os.getenv("CXX", "c++"),
help="""This is the same as "--use-cc" but for C++ code.""",
)
class AppendCommaSeparated(argparse.Action):
""" argparse Action class to support multiple comma separated lists. """
"""argparse Action class to support multiple comma separated lists."""
def __call__(self, __parser, namespace, values, __option_string=None):
# getattr(obj, attr, default) does not really returns default but none
@@ -437,34 +450,33 @@ class AppendCommaSeparated(argparse.Action):
setattr(namespace, self.dest, [])
# once it's fixed we can use as expected
actual = getattr(namespace, self.dest)
actual.extend(values.split(','))
actual.extend(values.split(","))
setattr(namespace, self.dest, actual)
def print_active_checkers(checkers):
# type: (Dict[str, Tuple[str, bool]]) -> None
""" Print active checkers to stdout. """
"""Print active checkers to stdout."""
for name in sorted(name for name, (_, active) in checkers.items()
if active):
for name in sorted(name for name, (_, active) in checkers.items() if active):
print(name)
def print_checkers(checkers):
# type: (Dict[str, Tuple[str, bool]]) -> None
""" Print verbose checker help to stdout. """
"""Print verbose checker help to stdout."""
print('')
print('available checkers:')
print('')
print("")
print("available checkers:")
print("")
for name in sorted(checkers.keys()):
description, active = checkers[name]
prefix = '+' if active else ' '
prefix = "+" if active else " "
if len(name) > 30:
print(' {0} {1}'.format(prefix, name))
print(' ' * 35 + description)
print(" {0} {1}".format(prefix, name))
print(" " * 35 + description)
else:
print(' {0} {1: <30} {2}'.format(prefix, name, description))
print('')
print(" {0} {1: <30} {2}".format(prefix, name, description))
print("")
print('NOTE: "+" indicates that an analysis is enabled by default.')
print('')
print("")

View File

@@ -3,58 +3,58 @@
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
""" This module is responsible for the Clang executable.
"""This module is responsible for the Clang executable.
Since Clang command line interface is so rich, but this project is using only
a subset of that, it makes sense to create a function specific wrapper. """
a subset of that, it makes sense to create a function specific wrapper."""
import re
from typing import List, Set, FrozenSet, Callable # noqa: ignore=F401
from typing import Iterable, Tuple, Dict # noqa: ignore=F401
from typing import List, Set, FrozenSet, Callable # noqa: ignore=F401
from typing import Iterable, Tuple, Dict # noqa: ignore=F401
from libscanbuild import shell_split, run_command
__all__ = ['get_version', 'get_arguments', 'get_checkers']
__all__ = ["get_version", "get_arguments", "get_checkers"]
# regex for activated checker
ACTIVE_CHECKER_PATTERN = re.compile(r'^-analyzer-checker=(.*)$')
ACTIVE_CHECKER_PATTERN = re.compile(r"^-analyzer-checker=(.*)$")
def get_version(clang):
# type: (str) -> str
""" Returns the compiler version as string.
"""Returns the compiler version as string.
:param clang: the compiler we are using
:return: the version string printed to stderr """
:return: the version string printed to stderr"""
output = run_command([clang, '-v'])
output = run_command([clang, "-v"])
# the relevant version info is in the first line
return output[0]
def get_arguments(command, cwd):
# type: (List[str], str) -> List[str]
""" Capture Clang invocation.
"""Capture Clang invocation.
:param command: the compilation command
:param cwd: the current working directory
:return: the detailed front-end invocation command """
:return: the detailed front-end invocation command"""
cmd = command[:]
cmd.insert(1, '-###')
cmd.insert(1, "-###")
output = run_command(cmd, cwd=cwd)
# The relevant information is in the last line of the output.
# Don't check if finding last line fails, would throw exception anyway.
last_line = output[-1]
if re.search(r'clang(.*): error:', last_line):
if re.search(r"clang(.*): error:", last_line):
raise Exception(last_line)
return shell_split(last_line)
def get_active_checkers(clang, plugins):
# type: (str, List[str]) -> FrozenSet[str]
""" Get the active checker list.
"""Get the active checker list.
:param clang: the compiler we are using
:param plugins: list of plugins which was requested by the user
@@ -63,45 +63,44 @@ def get_active_checkers(clang, plugins):
To get the default checkers we execute Clang to print how this
compilation would be called. And take out the enabled checker from the
arguments. For input file we specify stdin and pass only language
information. """
information."""
def get_active_checkers_for(language):
# type: (str) -> List[str]
""" Returns a list of active checkers for the given language. """
"""Returns a list of active checkers for the given language."""
load_args = [arg
for plugin in plugins
for arg in ['-Xclang', '-load', '-Xclang', plugin]]
cmd = [clang, '--analyze'] + load_args + ['-x', language, '-']
return [candidate.group(1)
for candidate in (ACTIVE_CHECKER_PATTERN.match(arg)
for arg in get_arguments(cmd, '.'))
if candidate]
load_args = [arg for plugin in plugins for arg in ["-Xclang", "-load", "-Xclang", plugin]]
cmd = [clang, "--analyze"] + load_args + ["-x", language, "-"]
return [
candidate.group(1)
for candidate in (ACTIVE_CHECKER_PATTERN.match(arg) for arg in get_arguments(cmd, "."))
if candidate
]
result = set() # type: Set[str]
for language in ['c', 'c++', 'objective-c', 'objective-c++']:
for language in ["c", "c++", "objective-c", "objective-c++"]:
result.update(get_active_checkers_for(language))
return frozenset(result)
def is_active(checkers):
# type: (Iterable[str]) -> Callable[[str], bool]
""" Returns a method, which classifies the checker active or not,
based on the received checker name list. """
"""Returns a method, which classifies the checker active or not,
based on the received checker name list."""
def predicate(checker):
# type: (str) -> bool
""" Returns True if the given checker is active. """
"""Returns True if the given checker is active."""
return any(pattern.match(checker) for pattern in patterns)
patterns = [re.compile(r'^' + a + r'(\.|$)') for a in checkers]
patterns = [re.compile(r"^" + a + r"(\.|$)") for a in checkers]
return predicate
def parse_checkers(stream):
# type: (List[str]) -> Iterable[Tuple[str, str]]
""" Parse clang -analyzer-checker-help output.
"""Parse clang -analyzer-checker-help output.
Below the line 'CHECKERS:' are there the name description pairs.
Many of them are in one line, but some long named checker has the
@@ -115,51 +114,48 @@ def parse_checkers(stream):
:param stream: list of lines to parse
:return: generator of tuples
(<checker name>, <checker description>) """
(<checker name>, <checker description>)"""
lines = iter(stream)
# find checkers header
for line in lines:
if re.match(r'^CHECKERS:', line):
if re.match(r"^CHECKERS:", line):
break
# find entries
state = None
for line in lines:
if state and not re.match(r'^\s\s\S', line):
if state and not re.match(r"^\s\s\S", line):
yield (state, line.strip())
state = None
elif re.match(r'^\s\s\S+$', line.rstrip()):
elif re.match(r"^\s\s\S+$", line.rstrip()):
state = line.strip()
else:
pattern = re.compile(r'^\s\s(?P<key>\S*)\s*(?P<value>.*)')
pattern = re.compile(r"^\s\s(?P<key>\S*)\s*(?P<value>.*)")
match = pattern.match(line.rstrip())
if match:
current = match.groupdict()
yield (current['key'], current['value'])
yield (current["key"], current["value"])
def get_checkers(clang, plugins):
# type: (str, List[str]) -> Dict[str, Tuple[str, bool]]
""" Get all the available checkers from default and from the plugins.
"""Get all the available checkers from default and from the plugins.
:param clang: the compiler we are using
:param plugins: list of plugins which was requested by the user
:return: a dictionary of all available checkers and its status
{<checker name>: (<checker description>, <is active by default>)} """
{<checker name>: (<checker description>, <is active by default>)}"""
load = [elem for plugin in plugins for elem in ['-load', plugin]]
cmd = [clang, '-cc1'] + load + ['-analyzer-checker-help']
load = [elem for plugin in plugins for elem in ["-load", plugin]]
cmd = [clang, "-cc1"] + load + ["-analyzer-checker-help"]
lines = run_command(cmd)
is_active_checker = is_active(get_active_checkers(clang, plugins))
checkers = {
name: (description, is_active_checker(name))
for name, description in parse_checkers(lines)
}
checkers = {name: (description, is_active_checker(name)) for name, description in parse_checkers(lines)}
if not checkers:
raise Exception('Could not query Clang for available checkers.')
raise Exception("Could not query Clang for available checkers.")
return checkers

View File

@@ -3,7 +3,7 @@
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
""" This module is responsible for parsing a compiler invocation. """
"""This module is responsible for parsing a compiler invocation."""
import re
import os
@@ -17,7 +17,7 @@ from typing import Optional # noqa: ignore=F401
from libscanbuild import Execution, shell_split, run_command
__all__ = ['classify_source', 'Compilation', 'CompilationDatabase']
__all__ = ["classify_source", "Compilation", "CompilationDatabase"]
# Map of ignored compiler option for the creation of a compilation database.
# This map is used in _split_command method, which classifies the parameters
@@ -29,85 +29,84 @@ __all__ = ['classify_source', 'Compilation', 'CompilationDatabase']
IGNORED_FLAGS = {
# compiling only flag, ignored because the creator of compilation
# database will explicitly set it.
'-c': 0,
"-c": 0,
# preprocessor macros, ignored because would cause duplicate entries in
# the output (the only difference would be these flags). this is actual
# finding from users, who suffered longer execution time caused by the
# duplicates.
'-MD': 0,
'-MMD': 0,
'-MG': 0,
'-MP': 0,
'-MF': 1,
'-MT': 1,
'-MQ': 1,
"-MD": 0,
"-MMD": 0,
"-MG": 0,
"-MP": 0,
"-MF": 1,
"-MT": 1,
"-MQ": 1,
# linker options, ignored because for compilation database will contain
# compilation commands only. so, the compiler would ignore these flags
# anyway. the benefit to get rid of them is to make the output more
# readable.
'-static': 0,
'-shared': 0,
'-s': 0,
'-rdynamic': 0,
'-l': 1,
'-L': 1,
'-u': 1,
'-z': 1,
'-T': 1,
'-Xlinker': 1,
"-static": 0,
"-shared": 0,
"-s": 0,
"-rdynamic": 0,
"-l": 1,
"-L": 1,
"-u": 1,
"-z": 1,
"-T": 1,
"-Xlinker": 1,
# clang-cl / msvc cl specific flags
# consider moving visual studio specific warning flags also
'-nologo': 0,
'-EHsc': 0,
'-EHa': 0
"-nologo": 0,
"-EHsc": 0,
"-EHa": 0,
} # type: Dict[str, int]
# Known C/C++ compiler wrapper name patterns.
COMPILER_PATTERN_WRAPPER = re.compile(r'^(distcc|ccache)$')
COMPILER_PATTERN_WRAPPER = re.compile(r"^(distcc|ccache)$")
# Known MPI compiler wrapper name patterns.
COMPILER_PATTERNS_MPI_WRAPPER = re.compile(r'^mpi(cc|cxx|CC|c\+\+)$')
COMPILER_PATTERNS_MPI_WRAPPER = re.compile(r"^mpi(cc|cxx|CC|c\+\+)$")
# Known C compiler executable name patterns.
COMPILER_PATTERNS_CC = (
re.compile(r'^([^-]*-)*[mg]cc(-\d+(\.\d+){0,2})?$'),
re.compile(r'^([^-]*-)*clang(-\d+(\.\d+){0,2})?$'),
re.compile(r'^(|i)cc$'),
re.compile(r'^(g|)xlc$'),
re.compile(r"^([^-]*-)*[mg]cc(-\d+(\.\d+){0,2})?$"),
re.compile(r"^([^-]*-)*clang(-\d+(\.\d+){0,2})?$"),
re.compile(r"^(|i)cc$"),
re.compile(r"^(g|)xlc$"),
)
# Known C++ compiler executable name patterns.
COMPILER_PATTERNS_CXX = (
re.compile(r'^(c\+\+|cxx|CC)$'),
re.compile(r'^([^-]*-)*[mg]\+\+(-\d+(\.\d+){0,2})?$'),
re.compile(r'^([^-]*-)*clang\+\+(-\d+(\.\d+){0,2})?$'),
re.compile(r'^icpc$'),
re.compile(r'^(g|)xl(C|c\+\+)$'),
re.compile(r"^(c\+\+|cxx|CC)$"),
re.compile(r"^([^-]*-)*[mg]\+\+(-\d+(\.\d+){0,2})?$"),
re.compile(r"^([^-]*-)*clang\+\+(-\d+(\.\d+){0,2})?$"),
re.compile(r"^icpc$"),
re.compile(r"^(g|)xl(C|c\+\+)$"),
)
CompilationCommand = collections.namedtuple(
'CompilationCommand', ['compiler', 'flags', 'files'])
CompilationCommand = collections.namedtuple("CompilationCommand", ["compiler", "flags", "files"])
class Compilation:
""" Represents a compilation of a single module. """
def __init__(self, # type: Compilation
compiler, # type: str
flags, # type: List[str]
source, # type: str
directory # type: str
):
# type: (...) -> None
""" Constructor for a single compilation.
"""Represents a compilation of a single module."""
This method just normalize the paths and initialize values. """
def __init__(
self, # type: Compilation
compiler, # type: str
flags, # type: List[str]
source, # type: str
directory, # type: str
):
# type: (...) -> None
"""Constructor for a single compilation.
This method just normalize the paths and initialize values."""
self.compiler = compiler
self.flags = flags
self.directory = os.path.normpath(directory)
self.source = source if os.path.isabs(source) else \
os.path.normpath(os.path.join(self.directory, source))
self.source = source if os.path.isabs(source) else os.path.normpath(os.path.join(self.directory, source))
def __hash__(self):
# type: (Compilation) -> int
@@ -119,76 +118,70 @@ class Compilation:
def as_dict(self):
# type: (Compilation) -> Dict[str, str]
""" This method dumps the object attributes into a dictionary. """
"""This method dumps the object attributes into a dictionary."""
return vars(self)
def as_db_entry(self):
# type: (Compilation) -> Dict[str, Any]
""" This method creates a compilation database entry. """
"""This method creates a compilation database entry."""
relative = os.path.relpath(self.source, self.directory)
compiler = 'cc' if self.compiler == 'c' else 'c++'
return {
'file': relative,
'arguments': [compiler, '-c'] + self.flags + [relative],
'directory': self.directory
}
compiler = "cc" if self.compiler == "c" else "c++"
return {"file": relative, "arguments": [compiler, "-c"] + self.flags + [relative], "directory": self.directory}
@classmethod
def from_db_entry(cls, entry):
# type: (Type[Compilation], Dict[str, str]) -> Iterable[Compilation]
""" Parser method for compilation entry.
"""Parser method for compilation entry.
From compilation database entry it creates the compilation object.
:param entry: the compilation database entry
:return: stream of CompilationDbEntry objects """
:return: stream of CompilationDbEntry objects"""
command = shell_split(entry['command']) if 'command' in entry else \
entry['arguments']
execution = Execution(cmd=command, cwd=entry['directory'], pid=0)
command = shell_split(entry["command"]) if "command" in entry else entry["arguments"]
execution = Execution(cmd=command, cwd=entry["directory"], pid=0)
return cls.iter_from_execution(execution)
@classmethod
def iter_from_execution(cls, # type: Type[Compilation]
execution, # type: Execution
cc='cc', # type: str
cxx='c++' # type: str
):
def iter_from_execution(
cls, # type: Type[Compilation]
execution, # type: Execution
cc="cc", # type: str
cxx="c++", # type: str
):
# type: (...) -> Iterable[Compilation]
""" Generator method for compilation entries.
"""Generator method for compilation entries.
From a single compiler call it can generate zero or more entries.
:param execution: executed command and working directory
:param cc: user specified C compiler name
:param cxx: user specified C++ compiler name
:return: stream of CompilationDbEntry objects """
:return: stream of CompilationDbEntry objects"""
candidate = cls._split_command(execution.cmd, cc, cxx)
for source in candidate.files if candidate else []:
result = Compilation(directory=execution.cwd,
source=source,
compiler=candidate.compiler,
flags=candidate.flags)
result = Compilation(directory=execution.cwd, source=source, compiler=candidate.compiler, flags=candidate.flags)
if os.path.isfile(result.source):
yield result
@classmethod
def _split_compiler(cls, # type: Type[Compilation]
command, # type: List[str]
cc, # type: str
cxx # type: str
):
def _split_compiler(
cls, # type: Type[Compilation]
command, # type: List[str]
cc, # type: str
cxx, # type: str
):
# type: (...) -> Optional[Tuple[str, List[str]]]
""" A predicate to decide whether the command is a compiler call.
"""A predicate to decide whether the command is a compiler call.
:param command: the command to classify
:param cc: user specified C compiler name
:param cxx: user specified C++ compiler name
:return: None if the command is not a compilation, or a tuple
(compiler_language, rest of the command) otherwise """
(compiler_language, rest of the command) otherwise"""
def is_wrapper(cmd):
# type: (str) -> bool
@@ -200,13 +193,11 @@ class Compilation:
def is_c_compiler(cmd):
# type: (str) -> bool
return os.path.basename(cc) == cmd or \
any(pattern.match(cmd) for pattern in COMPILER_PATTERNS_CC)
return os.path.basename(cc) == cmd or any(pattern.match(cmd) for pattern in COMPILER_PATTERNS_CC)
def is_cxx_compiler(cmd):
# type: (str) -> bool
return os.path.basename(cxx) == cmd or \
any(pattern.match(cmd) for pattern in COMPILER_PATTERNS_CXX)
return os.path.basename(cxx) == cmd or any(pattern.match(cmd) for pattern in COMPILER_PATTERNS_CXX)
if command: # not empty list will allow to index '0' and '1:'
executable = os.path.basename(command[0]) # type: str
@@ -217,7 +208,7 @@ class Compilation:
if is_wrapper(executable):
result = cls._split_compiler(parameters, cc, cxx)
# Compiler wrapper without compiler is a 'C' compiler.
return ('c', parameters) if result is None else result
return ("c", parameters) if result is None else result
# MPI compiler wrappers add extra parameters
elif is_mpi_wrapper(executable):
# Pass the executable with full path to avoid pick different
@@ -226,81 +217,79 @@ class Compilation:
return cls._split_compiler(mpi_call + parameters, cc, cxx)
# and 'compiler' 'parameters' is valid.
elif is_c_compiler(executable):
return 'c', parameters
return "c", parameters
elif is_cxx_compiler(executable):
return 'c++', parameters
return "c++", parameters
return None
@classmethod
def _split_command(cls, command, cc, cxx):
""" Returns a value when the command is a compilation, None otherwise.
"""Returns a value when the command is a compilation, None otherwise.
:param command: the command to classify
:param cc: user specified C compiler name
:param cxx: user specified C++ compiler name
:return: stream of CompilationCommand objects """
:return: stream of CompilationCommand objects"""
logging.debug('input was: %s', command)
logging.debug("input was: %s", command)
# quit right now, if the program was not a C/C++ compiler
compiler_and_arguments = cls._split_compiler(command, cc, cxx)
if compiler_and_arguments is None:
return None
# the result of this method
result = CompilationCommand(compiler=compiler_and_arguments[0],
flags=[],
files=[])
result = CompilationCommand(compiler=compiler_and_arguments[0], flags=[], files=[])
# iterate on the compile options
args = iter(compiler_and_arguments[1])
for arg in args:
# quit when compilation pass is not involved
if arg in {'-E', '-S', '-cc1', '-M', '-MM', '-###'}:
if arg in {"-E", "-S", "-cc1", "-M", "-MM", "-###"}:
return None
# ignore some flags
elif arg in IGNORED_FLAGS:
count = IGNORED_FLAGS[arg]
for _ in range(count):
next(args)
elif re.match(r'^-(l|L|Wl,).+', arg):
elif re.match(r"^-(l|L|Wl,).+", arg):
pass
# some parameters look like a filename, take those explicitly
elif arg in {'-D', '-I'}:
elif arg in {"-D", "-I"}:
result.flags.extend([arg, next(args)])
# parameter which looks source file is taken...
elif re.match(r'^[^-].+', arg) and classify_source(arg):
elif re.match(r"^[^-].+", arg) and classify_source(arg):
result.files.append(arg)
# and consider everything else as compile option.
else:
result.flags.append(arg)
logging.debug('output is: %s', result)
logging.debug("output is: %s", result)
# do extra check on number of source files
return result if result.files else None
class CompilationDatabase:
""" Compilation Database persistence methods. """
"""Compilation Database persistence methods."""
@staticmethod
def save(filename, iterator):
# type: (str, Iterable[Compilation]) -> None
""" Saves compilations to given file.
"""Saves compilations to given file.
:param filename: the destination file name
:param iterator: iterator of Compilation objects. """
:param iterator: iterator of Compilation objects."""
entries = [entry.as_db_entry() for entry in iterator]
with open(filename, 'w') as handle:
with open(filename, "w") as handle:
json.dump(entries, handle, sort_keys=True, indent=4)
@staticmethod
def load(filename):
# type: (str) -> Iterable[Compilation]
""" Load compilations from file.
"""Load compilations from file.
:param filename: the file to read from
:returns: iterator of Compilation objects. """
:returns: iterator of Compilation objects."""
with open(filename, 'r') as handle:
with open(filename, "r") as handle:
for entry in json.load(handle):
for compilation in Compilation.from_db_entry(entry):
yield compilation
@@ -308,30 +297,30 @@ class CompilationDatabase:
def classify_source(filename, c_compiler=True):
# type: (str, bool) -> Optional[str]
""" Classify source file names and returns the presumed language,
"""Classify source file names and returns the presumed language,
based on the file name extension.
:param filename: the source file name
:param c_compiler: indicate that the compiler is a C compiler,
:return: the language from file name extension. """
:return: the language from file name extension."""
mapping = {
'.c': 'c' if c_compiler else 'c++',
'.i': 'c-cpp-output' if c_compiler else 'c++-cpp-output',
'.ii': 'c++-cpp-output',
'.m': 'objective-c',
'.mi': 'objective-c-cpp-output',
'.mm': 'objective-c++',
'.mii': 'objective-c++-cpp-output',
'.C': 'c++',
'.cc': 'c++',
'.CC': 'c++',
'.cp': 'c++',
'.cpp': 'c++',
'.cxx': 'c++',
'.c++': 'c++',
'.C++': 'c++',
'.txx': 'c++'
".c": "c" if c_compiler else "c++",
".i": "c-cpp-output" if c_compiler else "c++-cpp-output",
".ii": "c++-cpp-output",
".m": "objective-c",
".mi": "objective-c-cpp-output",
".mm": "objective-c++",
".mii": "objective-c++-cpp-output",
".C": "c++",
".cc": "c++",
".CC": "c++",
".cp": "c++",
".cpp": "c++",
".cxx": "c++",
".c++": "c++",
".C++": "c++",
".txx": "c++",
}
__, extension = os.path.splitext(os.path.basename(filename))
@@ -340,10 +329,10 @@ def classify_source(filename, c_compiler=True):
def get_mpi_call(wrapper):
# type: (str) -> List[str]
""" Provide information on how the underlying compiler would have been
invoked without the MPI compiler wrapper. """
"""Provide information on how the underlying compiler would have been
invoked without the MPI compiler wrapper."""
for query_flags in [['-show'], ['--showme']]:
for query_flags in [["-show"], ["--showme"]]:
try:
output = run_command([wrapper] + query_flags)
if output:

View File

@@ -3,7 +3,7 @@
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
""" This module is responsible to capture the compiler invocation of any
"""This module is responsible to capture the compiler invocation of any
build process. The result of that should be a compilation database.
This implementation is using the LD_PRELOAD or DYLD_INSERT_LIBRARIES
@@ -18,7 +18,7 @@ files shall be placed. This parameter is passed as an environment variable.
The module also implements compiler wrappers to intercept the compiler calls.
The module implements the build command execution and the post-processing of
the output files, which will condensates into a compilation database. """
the output files, which will condensates into a compilation database."""
import itertools
import json
@@ -33,23 +33,30 @@ import argparse # noqa: ignore=F401
from typing import Iterable, Dict, Tuple, List # noqa: ignore=F401
from libear import build_libear, temporary_directory
from libscanbuild import command_entry_point, wrapper_entry_point, \
wrapper_environment, run_build, run_command, Execution, shell_split
from libscanbuild import (
command_entry_point,
wrapper_entry_point,
wrapper_environment,
run_build,
run_command,
Execution,
shell_split,
)
from libscanbuild.arguments import parse_args_for_intercept_build
from libscanbuild.compilation import Compilation, CompilationDatabase
__all__ = ['capture', 'intercept_build', 'intercept_compiler_wrapper']
__all__ = ["capture", "intercept_build", "intercept_compiler_wrapper"]
COMPILER_WRAPPER_CC = 'intercept-cc'
COMPILER_WRAPPER_CXX = 'intercept-c++'
TRACE_FILE_PREFIX = 'execution.' # same as in ear.c
WRAPPER_ONLY_PLATFORMS = ('win32', 'cygwin')
COMPILER_WRAPPER_CC = "intercept-cc"
COMPILER_WRAPPER_CXX = "intercept-c++"
TRACE_FILE_PREFIX = "execution." # same as in ear.c
WRAPPER_ONLY_PLATFORMS = ("win32", "cygwin")
@command_entry_point
def intercept_build():
# type: () -> int
""" Entry point for 'intercept-build' command. """
"""Entry point for 'intercept-build' command."""
args = parse_args_for_intercept_build()
exit_code, current = capture(args)
@@ -68,12 +75,12 @@ def intercept_build():
def capture(args):
# type: (argparse.Namespace) -> Tuple[int, Iterable[Compilation]]
""" Implementation of compilation database generation.
"""Implementation of compilation database generation.
:param args: the parsed and validated command line arguments
:return: the exit status of build process. """
:return: the exit status of build process."""
with temporary_directory(prefix='intercept-') as tmp_dir:
with temporary_directory(prefix="intercept-") as tmp_dir:
# run the build command
environment = setup_environment(args, tmp_dir)
exit_code = run_build(args.build, env=environment)
@@ -86,14 +93,14 @@ def capture(args):
def compilations(exec_calls, cc, cxx):
# type: (Iterable[Execution], str, str) -> Iterable[Compilation]
""" Needs to filter out commands which are not compiler calls. And those
"""Needs to filter out commands which are not compiler calls. And those
compiler calls shall be compilation (not pre-processing or linking) calls.
Plus needs to find the source file name from the arguments.
:param exec_calls: iterator of executions
:param cc: user specified C compiler name
:param cxx: user specified C++ compiler name
:return: stream of formatted compilation database entries """
:return: stream of formatted compilation database entries"""
for call in exec_calls:
for compilation in Compilation.iter_from_execution(call, cc, cxx):
@@ -102,7 +109,7 @@ def compilations(exec_calls, cc, cxx):
def setup_environment(args, destination):
# type: (argparse.Namespace, str) -> Dict[str, str]
""" Sets up the environment for the build command.
"""Sets up the environment for the build command.
In order to capture the sub-commands (executed by the build process),
it needs to prepare the environment. It's either the compiler wrappers
@@ -111,28 +118,27 @@ def setup_environment(args, destination):
:param args: command line arguments
:param destination: directory path for the execution trace files
:return: a prepared set of environment variables. """
:return: a prepared set of environment variables."""
use_wrapper = args.override_compiler or is_preload_disabled(sys.platform)
environment = dict(os.environ)
environment.update({'INTERCEPT_BUILD_TARGET_DIR': destination})
environment.update({"INTERCEPT_BUILD_TARGET_DIR": destination})
if use_wrapper:
environment.update(wrapper_environment(args))
environment.update({
'CC': COMPILER_WRAPPER_CC,
'CXX': COMPILER_WRAPPER_CXX,
})
environment.update(
{
"CC": COMPILER_WRAPPER_CC,
"CXX": COMPILER_WRAPPER_CXX,
}
)
else:
intercept_library = build_libear(args.cc, destination)
if sys.platform == 'darwin':
environment.update({
'DYLD_INSERT_LIBRARIES': intercept_library,
'DYLD_FORCE_FLAT_NAMESPACE': '1'
})
if sys.platform == "darwin":
environment.update({"DYLD_INSERT_LIBRARIES": intercept_library, "DYLD_FORCE_FLAT_NAMESPACE": "1"})
else:
environment.update({'LD_PRELOAD': intercept_library})
environment.update({"LD_PRELOAD": intercept_library})
return environment
@@ -141,30 +147,30 @@ def setup_environment(args, destination):
@wrapper_entry_point
def intercept_compiler_wrapper(_, execution):
# type: (int, Execution) -> None
""" Entry point for `intercept-cc` and `intercept-c++` compiler wrappers.
"""Entry point for `intercept-cc` and `intercept-c++` compiler wrappers.
It does generate execution report into target directory.
The target directory name is from environment variables. """
The target directory name is from environment variables."""
message_prefix = 'execution report might be incomplete: %s'
message_prefix = "execution report might be incomplete: %s"
target_dir = os.getenv('INTERCEPT_BUILD_TARGET_DIR')
target_dir = os.getenv("INTERCEPT_BUILD_TARGET_DIR")
if not target_dir:
logging.warning(message_prefix, 'missing target directory')
logging.warning(message_prefix, "missing target directory")
return
# write current execution info to the pid file
try:
target_file_name = TRACE_FILE_PREFIX + str(uuid.uuid4())
target_file = os.path.join(target_dir, target_file_name)
logging.debug('writing execution report to: %s', target_file)
logging.debug("writing execution report to: %s", target_file)
write_exec_trace(target_file, execution)
except IOError:
logging.warning(message_prefix, 'io problem')
logging.warning(message_prefix, "io problem")
def expand_cmd_with_response_files(cmd):
# type: (List[str]) -> List[str]
""" Expand's response file parameters into actual parameters
"""Expand's response file parameters into actual parameters
MSVC's cl and clang-cl has functionality to prevent too long command lines
by reading options from so called temporary "response" files. These files
@@ -172,24 +178,24 @@ def expand_cmd_with_response_files(cmd):
compilation units.
For example, QT's qmake generates nmake based makefiles where the response
file contains all compilation units. """
file contains all compilation units."""
def is_response_file(param):
# type: (str) -> bool
""" Checks if the given command line argument is response file. """
return param[0] == '@' and os.path.isfile(param[1:])
"""Checks if the given command line argument is response file."""
return param[0] == "@" and os.path.isfile(param[1:])
def from_response_file(filename):
# type: (str) -> List[str]
""" Read and return command line argument list from response file.
"""Read and return command line argument list from response file.
Might throw IOException when file operations fails. """
with open(filename[1:], 'r') as file_handle:
Might throw IOException when file operations fails."""
with open(filename[1:], "r") as file_handle:
return [arg.strip() for arg in shell_split(file_handle.read())]
def update_if_needed(arg):
# type: (str) -> List[str]
""" Returns [n,] thats either read from response or has single arg """
"""Returns [n,] thats either read from response or has single arg"""
return from_response_file(arg) if is_response_file(arg) else [arg]
return [n for row in [update_if_needed(arg) for arg in cmd] for n in row]
@@ -197,45 +203,41 @@ def expand_cmd_with_response_files(cmd):
def write_exec_trace(filename, entry):
# type: (str, Execution) -> None
""" Write execution report file.
"""Write execution report file.
This method shall be sync with the execution report writer in interception
library. The entry in the file is a JSON objects.
:param filename: path to the output execution trace file,
:param entry: the Execution object to append to that file. """
:param entry: the Execution object to append to that file."""
call = {'pid': entry.pid, 'cwd': entry.cwd,
'cmd': expand_cmd_with_response_files(entry.cmd)}
with open(filename, 'w') as handler:
call = {"pid": entry.pid, "cwd": entry.cwd, "cmd": expand_cmd_with_response_files(entry.cmd)}
with open(filename, "w") as handler:
json.dump(call, handler)
def parse_exec_trace(filename):
# type: (str) -> Execution
""" Parse execution report file.
"""Parse execution report file.
Given filename points to a file which contains the basic report
generated by the interception library or compiler wrapper.
:param filename: path to an execution trace file to read from,
:return: an Execution object. """
:return: an Execution object."""
logging.debug('parse exec trace file: %s', filename)
with open(filename, 'r') as handler:
logging.debug("parse exec trace file: %s", filename)
with open(filename, "r") as handler:
entry = json.load(handler)
return Execution(
pid=entry['pid'],
cwd=entry['cwd'],
cmd=entry['cmd'])
return Execution(pid=entry["pid"], cwd=entry["cwd"], cmd=entry["cmd"])
def exec_trace_files(directory):
# type: (str) -> Iterable[str]
""" Generates exec trace file names.
"""Generates exec trace file names.
:param directory: path to directory which contains the trace files.
:return: a generator of file names (absolute path). """
:return: a generator of file names (absolute path)."""
for root, _, files in os.walk(directory):
for candidate in files:
@@ -245,20 +247,20 @@ def exec_trace_files(directory):
def is_preload_disabled(platform):
# type: (str) -> bool
""" Library-based interposition will fail silently if SIP is enabled,
"""Library-based interposition will fail silently if SIP is enabled,
so this should be detected. You can detect whether SIP is enabled on
Darwin by checking whether (1) there is a binary called 'csrutil' in
the path and, if so, (2) whether the output of executing 'csrutil status'
contains 'System Integrity Protection status: enabled'.
:param platform: name of the platform (returned by sys.platform),
:return: True if library preload will fail by the dynamic linker. """
:return: True if library preload will fail by the dynamic linker."""
if platform in WRAPPER_ONLY_PLATFORMS:
return True
elif platform == 'darwin':
command = ['csrutil', 'status']
pattern = re.compile(r'System Integrity Protection status:\s+enabled')
elif platform == "darwin":
command = ["csrutil", "status"]
pattern = re.compile(r"System Integrity Protection status:\s+enabled")
try:
return any(pattern.match(line) for line in run_command(command))
except (OSError, subprocess.CalledProcessError):

View File

@@ -3,10 +3,10 @@
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
""" This module is responsible to generate 'index.html' for the report.
"""This module is responsible to generate 'index.html' for the report.
The input for this step is the output directory, where individual reports
could be found. It parses those reports and generates 'index.html'. """
could be found. It parses those reports and generates 'index.html'."""
import re
import os
@@ -25,16 +25,16 @@ import argparse # noqa: ignore=F401
from typing import Dict, List, Tuple, Any, Set, Generator, Iterator, Optional # noqa: ignore=F401
from libscanbuild.clang import get_version
__all__ = ['document']
__all__ = ["document"]
def document(args):
# type: (argparse.Namespace) -> int
""" Generates cover report and returns the number of bugs/crashes. """
"""Generates cover report and returns the number of bugs/crashes."""
html_reports_available = args.output_format in {'html', 'plist-html'}
html_reports_available = args.output_format in {"html", "plist-html"}
logging.debug('count crashes and bugs')
logging.debug("count crashes and bugs")
crash_count = sum(1 for _ in Crash.read(args.output))
bug_counter = create_counters()
for bug in read_bugs(args.output, html_reports_available):
@@ -44,7 +44,7 @@ def document(args):
if html_reports_available and result:
use_cdb = os.path.exists(args.cdb)
logging.debug('generate index.html file')
logging.debug("generate index.html file")
# common prefix for source files to have sorter path
prefix = commonprefix_from(args.cdb) if use_cdb else os.getcwd()
# assemble the cover from multiple fragments
@@ -68,14 +68,16 @@ def document(args):
def assemble_cover(args, prefix, fragments):
# type: (argparse.Namespace, str, List[str]) -> None
""" Put together the fragments into a final report. """
"""Put together the fragments into a final report."""
if args.html_title is None:
args.html_title = os.path.basename(prefix) + ' - analyzer results'
args.html_title = os.path.basename(prefix) + " - analyzer results"
with open(os.path.join(args.output, 'index.html'), 'w') as handle:
with open(os.path.join(args.output, "index.html"), "w") as handle:
indent = 0
handle.write(reindent("""
handle.write(
reindent(
"""
|<!DOCTYPE html>
|<html>
| <head>
@@ -83,9 +85,14 @@ def assemble_cover(args, prefix, fragments):
| <link type="text/css" rel="stylesheet" href="scanview.css"/>
| <script type='text/javascript' src="sorttable.js"></script>
| <script type='text/javascript' src='selectable.js'></script>
| </head>""", indent).format(html_title=args.html_title))
handle.write(comment('SUMMARYENDHEAD'))
handle.write(reindent("""
| </head>""",
indent,
).format(html_title=args.html_title)
)
handle.write(comment("SUMMARYENDHEAD"))
handle.write(
reindent(
"""
| <body>
| <h1>{html_title}</h1>
| <table>
@@ -94,30 +101,41 @@ def assemble_cover(args, prefix, fragments):
| <tr><th>Command Line:</th><td>{cmd_args}</td></tr>
| <tr><th>Clang Version:</th><td>{clang_version}</td></tr>
| <tr><th>Date:</th><td>{date}</td></tr>
| </table>""", indent).format(html_title=args.html_title,
user_name=getpass.getuser(),
host_name=socket.gethostname(),
current_dir=prefix,
cmd_args=' '.join(sys.argv),
clang_version=get_version(args.clang),
date=datetime.datetime.today(
).strftime('%c')))
| </table>""",
indent,
).format(
html_title=args.html_title,
user_name=getpass.getuser(),
host_name=socket.gethostname(),
current_dir=prefix,
cmd_args=" ".join(sys.argv),
clang_version=get_version(args.clang),
date=datetime.datetime.today().strftime("%c"),
)
)
for fragment in fragments:
# copy the content of fragments
with open(fragment, 'r') as input_handle:
with open(fragment, "r") as input_handle:
shutil.copyfileobj(input_handle, handle)
handle.write(reindent("""
handle.write(
reindent(
"""
| </body>
|</html>""", indent))
|</html>""",
indent,
)
)
def bug_summary(output_dir, bug_counter):
""" Bug summary is a HTML table to give a better overview of the bugs. """
"""Bug summary is a HTML table to give a better overview of the bugs."""
name = os.path.join(output_dir, 'summary.html.fragment')
with open(name, 'w') as handle:
name = os.path.join(output_dir, "summary.html.fragment")
with open(name, "w") as handle:
indent = 4
handle.write(reindent("""
handle.write(
reindent(
"""
|<h2>Bug Summary</h2>
|<table>
| <thead>
@@ -127,8 +145,13 @@ def bug_summary(output_dir, bug_counter):
| <td class="sorttable_nosort">Display?</td>
| </tr>
| </thead>
| <tbody>""", indent))
handle.write(reindent("""
| <tbody>""",
indent,
)
)
handle.write(
reindent(
"""
| <tr style="font-weight:bold">
| <td class="SUMM_DESC">All Bugs</td>
| <td class="Q">{0}</td>
@@ -138,14 +161,24 @@ def bug_summary(output_dir, bug_counter):
| onClick="CopyCheckedStateToCheckButtons(this);"/>
| </center>
| </td>
| </tr>""", indent).format(bug_counter.total))
| </tr>""",
indent,
).format(bug_counter.total)
)
for category, types in bug_counter.categories.items():
handle.write(reindent("""
handle.write(
reindent(
"""
| <tr>
| <th>{0}</th><th colspan=2></th>
| </tr>""", indent).format(category))
| </tr>""",
indent,
).format(category)
)
for bug_type in types.values():
handle.write(reindent("""
handle.write(
reindent(
"""
| <tr>
| <td class="SUMM_DESC">{bug_type}</td>
| <td class="Q">{bug_count}</td>
@@ -155,25 +188,35 @@ def bug_summary(output_dir, bug_counter):
| onClick="ToggleDisplay(this,'{bug_type_class}');"/>
| </center>
| </td>
| </tr>""", indent).format(**bug_type))
handle.write(reindent("""
| </tr>""",
indent,
).format(**bug_type)
)
handle.write(
reindent(
"""
| </tbody>
|</table>""", indent))
handle.write(comment('SUMMARYBUGEND'))
|</table>""",
indent,
)
)
handle.write(comment("SUMMARYBUGEND"))
return name
def bug_report(output_dir, prefix):
# type: (str, str) -> str
""" Creates a fragment from the analyzer reports. """
"""Creates a fragment from the analyzer reports."""
# pretty = prettify_bug(prefix, output_dir)
# bugs = (pretty(bug) for bug in read_bugs(output_dir, True))
name = os.path.join(output_dir, 'bugs.html.fragment')
with open(name, 'w') as handle:
name = os.path.join(output_dir, "bugs.html.fragment")
with open(name, "w") as handle:
indent = 4
handle.write(reindent("""
handle.write(
reindent(
"""
|<h2>Reports</h2>
|<table class="sortable" style="table-layout:automatic">
| <thead>
@@ -190,11 +233,16 @@ def bug_report(output_dir, prefix):
| <td class="sorttable_nosort"></td>
| </tr>
| </thead>
| <tbody>""", indent))
handle.write(comment('REPORTBUGCOL'))
| <tbody>""",
indent,
)
)
handle.write(comment("REPORTBUGCOL"))
for bug in read_bugs(output_dir, True):
current = bug.pretty(prefix, output_dir)
handle.write(reindent("""
handle.write(
reindent(
"""
| <tr class="{bug_type_class}">
| <td class="DESC">{bug_category}</td>
| <td class="DESC">{bug_type}</td>
@@ -203,23 +251,33 @@ def bug_report(output_dir, prefix):
| <td class="Q">{bug_line}</td>
| <td class="Q">{bug_path_length}</td>
| <td><a href="{report_file}#EndPath">View Report</a></td>
| </tr>""", indent).format(**current))
handle.write(comment('REPORTBUG', {'id': current['report_file']}))
handle.write(reindent("""
| </tr>""",
indent,
).format(**current)
)
handle.write(comment("REPORTBUG", {"id": current["report_file"]}))
handle.write(
reindent(
"""
| </tbody>
|</table>""", indent))
handle.write(comment('REPORTBUGEND'))
|</table>""",
indent,
)
)
handle.write(comment("REPORTBUGEND"))
return name
def crash_report(output_dir, prefix):
# type: (str, str) -> str
""" Creates a fragment from the compiler crashes. """
"""Creates a fragment from the compiler crashes."""
name = os.path.join(output_dir, 'crashes.html.fragment')
with open(name, 'w') as handle:
name = os.path.join(output_dir, "crashes.html.fragment")
with open(name, "w") as handle:
indent = 4
handle.write(reindent("""
handle.write(
reindent(
"""
|<h2>Analyzer Failures</h2>
|<p>The analyzer had problems processing the following files:</p>
|<table>
@@ -231,32 +289,46 @@ def crash_report(output_dir, prefix):
| <td>STDERR Output</td>
| </tr>
| </thead>
| <tbody>""", indent))
| <tbody>""",
indent,
)
)
for crash in Crash.read(output_dir):
current = crash.pretty(prefix, output_dir)
handle.write(reindent("""
handle.write(
reindent(
"""
| <tr>
| <td>{problem}</td>
| <td>{source}</td>
| <td><a href="{file}">preprocessor output</a></td>
| <td><a href="{stderr}">analyzer std err</a></td>
| </tr>""", indent).format(**current))
handle.write(comment('REPORTPROBLEM', current))
handle.write(reindent("""
| </tr>""",
indent,
).format(**current)
)
handle.write(comment("REPORTPROBLEM", current))
handle.write(
reindent(
"""
| </tbody>
|</table>""", indent))
handle.write(comment('REPORTCRASHES'))
|</table>""",
indent,
)
)
handle.write(comment("REPORTCRASHES"))
return name
class Crash:
def __init__(self,
source, # type: str
problem, # type: str
file, # type: str
info, # type: str
stderr # type: str
):
def __init__(
self,
source, # type: str
problem, # type: str
file, # type: str
info, # type: str
stderr, # type: str
):
# type: (...) -> None
self.source = source
self.problem = problem
@@ -266,20 +338,20 @@ class Crash:
def pretty(self, prefix, output_dir):
# type: (Crash, str, str) -> Dict[str, str]
""" Make safe this values to embed into HTML. """
"""Make safe this values to embed into HTML."""
return {
'source': escape(chop(prefix, self.source)),
'problem': escape(self.problem),
'file': escape(chop(output_dir, self.file)),
'info': escape(chop(output_dir, self.info)),
'stderr': escape(chop(output_dir, self.stderr))
"source": escape(chop(prefix, self.source)),
"problem": escape(self.problem),
"file": escape(chop(output_dir, self.file)),
"info": escape(chop(output_dir, self.info)),
"stderr": escape(chop(output_dir, self.stderr)),
}
@classmethod
def _parse_info_file(cls, filename):
# type: (str) -> Optional[Tuple[str, str]]
""" Parse out the crash information from the report file. """
"""Parse out the crash information from the report file."""
lines = list(safe_readlines(filename))
return None if len(lines) < 2 else (lines[0], lines[1])
@@ -287,11 +359,11 @@ class Crash:
@classmethod
def read(cls, output_dir):
# type: (str) -> Iterator[Crash]
""" Generate a unique sequence of crashes from given directory. """
"""Generate a unique sequence of crashes from given directory."""
pattern = os.path.join(output_dir, 'failures', '*.info.txt')
pattern = os.path.join(output_dir, "failures", "*.info.txt")
for info_filename in glob.iglob(pattern):
base_filename = info_filename[0:-len('.info.txt')]
base_filename = info_filename[0 : -len(".info.txt")]
stderr_filename = "{}.stderr.txt".format(base_filename)
source_and_problem = cls._parse_info_file(info_filename)
@@ -301,75 +373,76 @@ class Crash:
problem=source_and_problem[1],
file=base_filename,
info=info_filename,
stderr=stderr_filename)
stderr=stderr_filename,
)
class Bug:
def __init__(self,
report, # type: str
attributes # type: Dict[str, str]
):
def __init__(
self,
report, # type: str
attributes, # type: Dict[str, str]
):
# type: (...) -> None
self.file = attributes.get('bug_file', '')
self.line = int(attributes.get('bug_line', '0'))
self.path_length = int(attributes.get('bug_path_length', '1'))
self.category = attributes.get('bug_category', 'Other')
self.type = attributes.get('bug_type', '')
self.function = attributes.get('bug_function', 'n/a')
self.file = attributes.get("bug_file", "")
self.line = int(attributes.get("bug_line", "0"))
self.path_length = int(attributes.get("bug_path_length", "1"))
self.category = attributes.get("bug_category", "Other")
self.type = attributes.get("bug_type", "")
self.function = attributes.get("bug_function", "n/a")
self.report = report
def __eq__(self, o):
# type: (Bug, object) -> bool
return isinstance(o, Bug) and \
o.line == self.line and \
o.path_length == self.path_length and \
o.type == self.type and \
o.file == self.file
return (
isinstance(o, Bug)
and o.line == self.line
and o.path_length == self.path_length
and o.type == self.type
and o.file == self.file
)
def __hash__(self):
# type: (Bug) -> int
return hash(self.line) +\
hash(self.path_length) +\
hash(self.type) +\
hash(self.file)
return hash(self.line) + hash(self.path_length) + hash(self.type) + hash(self.file)
def type_class(self):
# type: (Bug) -> str
def smash(key):
# type: (str) -> str
""" Make value ready to be HTML attribute value. """
"""Make value ready to be HTML attribute value."""
return key.lower().replace(' ', '_').replace("'", '')
return key.lower().replace(" ", "_").replace("'", "")
return '_'.join(['bt', smash(self.category), smash(self.type)])
return "_".join(["bt", smash(self.category), smash(self.type)])
def pretty(self, prefix, output_dir):
# type: (Bug, str, str) -> Dict[str, str]
""" Make safe this values to embed into HTML. """
"""Make safe this values to embed into HTML."""
return {
'bug_file': escape(chop(prefix, self.file)),
'bug_line': str(self.line),
'bug_path_length': str(self.path_length),
'bug_category': escape(self.category),
'bug_type': escape(self.type),
'bug_type_class': escape(self.type_class()),
'bug_function': escape(self.function),
'report_file': escape(chop(output_dir, self.report))
"bug_file": escape(chop(prefix, self.file)),
"bug_line": str(self.line),
"bug_path_length": str(self.path_length),
"bug_category": escape(self.category),
"bug_type": escape(self.type),
"bug_type_class": escape(self.type_class()),
"bug_function": escape(self.function),
"report_file": escape(chop(output_dir, self.report)),
}
def read_bugs(output_dir, html):
# type: (str, bool) -> Generator[Bug, None, None]
""" Generate a unique sequence of bugs from given output directory.
"""Generate a unique sequence of bugs from given output directory.
Duplicates can be in a project if the same module was compiled multiple
times with different compiler options. These would be better to show in
the final report (cover) only once. """
the final report (cover) only once."""
def empty(file_name):
return os.stat(file_name).st_size == 0
@@ -377,16 +450,15 @@ def read_bugs(output_dir, html):
# get the right parser for the job.
parser = parse_bug_html if html else parse_bug_plist
# get the input files, which are not empty.
pattern = os.path.join(output_dir, '*.html' if html else '*.plist')
bug_generators = (parser(file)
for file in glob.iglob(pattern) if not empty(file))
pattern = os.path.join(output_dir, "*.html" if html else "*.plist")
bug_generators = (parser(file) for file in glob.iglob(pattern) if not empty(file))
return unique_bugs(itertools.chain.from_iterable(bug_generators))
def unique_bugs(generator):
# type: (Iterator[Bug]) -> Generator[Bug, None, None]
""" Remove duplicates from bug stream """
"""Remove duplicates from bug stream"""
state = set() # type: Set[Bug]
for item in generator:
@@ -397,36 +469,41 @@ def unique_bugs(generator):
def parse_bug_plist(filename):
# type: (str) -> Generator[Bug, None, None]
""" Returns the generator of bugs from a single .plist file. """
"""Returns the generator of bugs from a single .plist file."""
with open(filename, 'rb') as handle:
with open(filename, "rb") as handle:
content = plistlib.load(handle)
files = content.get('files', [])
for bug in content.get('diagnostics', []):
if len(files) <= int(bug['location']['file']):
files = content.get("files", [])
for bug in content.get("diagnostics", []):
if len(files) <= int(bug["location"]["file"]):
logging.warning('Parsing bug from "%s" failed', filename)
continue
yield Bug(filename, {
'bug_type': bug['type'],
'bug_category': bug['category'],
'bug_line': bug['location']['line'],
'bug_path_length': bug['location']['col'],
'bug_file': files[int(bug['location']['file'])]
})
yield Bug(
filename,
{
"bug_type": bug["type"],
"bug_category": bug["category"],
"bug_line": bug["location"]["line"],
"bug_path_length": bug["location"]["col"],
"bug_file": files[int(bug["location"]["file"])],
},
)
def parse_bug_html(filename):
# type: (str) -> Generator[Bug, None, None]
""" Parse out the bug information from HTML output. """
"""Parse out the bug information from HTML output."""
patterns = [re.compile(r'<!-- BUGTYPE (?P<bug_type>.*) -->$'),
re.compile(r'<!-- BUGFILE (?P<bug_file>.*) -->$'),
re.compile(r'<!-- BUGPATHLENGTH (?P<bug_path_length>.*) -->$'),
re.compile(r'<!-- BUGLINE (?P<bug_line>.*) -->$'),
re.compile(r'<!-- BUGCATEGORY (?P<bug_category>.*) -->$'),
re.compile(r'<!-- FUNCTIONNAME (?P<bug_function>.*) -->$')]
endsign = re.compile(r'<!-- BUGMETAEND -->')
patterns = [
re.compile(r"<!-- BUGTYPE (?P<bug_type>.*) -->$"),
re.compile(r"<!-- BUGFILE (?P<bug_file>.*) -->$"),
re.compile(r"<!-- BUGPATHLENGTH (?P<bug_path_length>.*) -->$"),
re.compile(r"<!-- BUGLINE (?P<bug_line>.*) -->$"),
re.compile(r"<!-- BUGCATEGORY (?P<bug_category>.*) -->$"),
re.compile(r"<!-- FUNCTIONNAME (?P<bug_function>.*) -->$"),
]
endsign = re.compile(r"<!-- BUGMETAEND -->")
bug = dict()
for line in safe_readlines(filename):
@@ -445,23 +522,21 @@ def parse_bug_html(filename):
def create_counters():
# type () -> Callable[[Bug], None]
""" Create counters for bug statistics.
"""Create counters for bug statistics.
Two entries are maintained: 'total' is an integer, represents the
number of bugs. The 'categories' is a two level categorisation of bug
counters. The first level is 'bug category' the second is 'bug type'.
Each entry in this classification is a dictionary of 'count', 'type'
and 'label'. """
and 'label'."""
def predicate(bug):
# type (Bug) -> None
current_category = predicate.categories.get(bug.category, dict())
current_type = current_category.get(bug.type, {
'bug_type': bug.type,
'bug_type_class': bug.type_class(),
'bug_count': 0
})
current_type.update({'bug_count': current_type['bug_count'] + 1})
current_type = current_category.get(
bug.type, {"bug_type": bug.type, "bug_type_class": bug.type_class(), "bug_count": 0}
)
current_type.update({"bug_count": current_type["bug_count"] + 1})
current_category.update({bug.type: current_type})
predicate.categories.update({bug.category: current_category})
predicate.total += 1
@@ -473,26 +548,26 @@ def create_counters():
def copy_resource_files(output_dir):
# type: (str) -> None
""" Copy the javascript and css files to the report directory. """
"""Copy the javascript and css files to the report directory."""
this_dir = os.path.dirname(os.path.realpath(__file__))
for resource in os.listdir(os.path.join(this_dir, 'resources')):
shutil.copy(os.path.join(this_dir, 'resources', resource), output_dir)
for resource in os.listdir(os.path.join(this_dir, "resources")):
shutil.copy(os.path.join(this_dir, "resources", resource), output_dir)
def safe_readlines(filename):
# type: (str) -> Iterator[str]
""" Read and return an iterator of lines from file. """
"""Read and return an iterator of lines from file."""
with open(filename, mode='rb') as handler:
with open(filename, mode="rb") as handler:
for line in handler.readlines():
# this is a workaround to fix windows read '\r\n' as new lines.
yield line.decode(errors='ignore').rstrip()
yield line.decode(errors="ignore").rstrip()
def chop(prefix, filename):
# type: (str, str) -> str
""" Create 'filename' from '/prefix/filename' """
"""Create 'filename' from '/prefix/filename'"""
result = filename
if prefix:
try:
@@ -504,55 +579,49 @@ def chop(prefix, filename):
def escape(text):
# type: (str) -> str
""" Paranoid HTML escape method. (Python version independent) """
"""Paranoid HTML escape method. (Python version independent)"""
escape_table = {
'&': '&amp;',
'"': '&quot;',
"'": '&apos;',
'>': '&gt;',
'<': '&lt;'
}
return ''.join(escape_table.get(c, c) for c in text)
escape_table = {"&": "&amp;", '"': "&quot;", "'": "&apos;", ">": "&gt;", "<": "&lt;"}
return "".join(escape_table.get(c, c) for c in text)
def reindent(text, indent):
# type: (str, int) -> str
""" Utility function to format html output and keep indentation. """
"""Utility function to format html output and keep indentation."""
result = ''
result = ""
for line in text.splitlines():
if line.strip():
result += ' ' * indent + line.split('|')[1] + os.linesep
result += " " * indent + line.split("|")[1] + os.linesep
return result
def comment(name, opts=None):
# type: (str, Dict[str, str]) -> str
""" Utility function to format meta information as comment. """
"""Utility function to format meta information as comment."""
attributes = ''
attributes = ""
if opts:
for key, value in opts.items():
attributes += ' {0}="{1}"'.format(key, value)
return '<!-- {0}{1} -->{2}'.format(name, attributes, os.linesep)
return "<!-- {0}{1} -->{2}".format(name, attributes, os.linesep)
def commonprefix_from(filename):
# type: (str) -> str
""" Create file prefix from a compilation database entries. """
"""Create file prefix from a compilation database entries."""
with open(filename, 'r') as handle:
return commonprefix(item['file'] for item in json.load(handle))
with open(filename, "r") as handle:
return commonprefix(item["file"] for item in json.load(handle))
def commonprefix(files):
# type: (Iterator[str]) -> str
""" Fixed version of os.path.commonprefix.
"""Fixed version of os.path.commonprefix.
:param files: list of file names.
:return: the longest path prefix that is a prefix of all files. """
:return: the longest path prefix that is a prefix of all files."""
result = None
for current in files:
if result is not None:
@@ -561,7 +630,7 @@ def commonprefix(files):
result = current
if result is None:
return ''
return ""
elif not os.path.isdir(result):
return os.path.dirname(result)
return os.path.abspath(result)

View File

@@ -50,7 +50,7 @@ dev = [
"pytest>=7.0",
"pytest-cov>=4.0",
"ruff>=0.1.0",
"ty>=0.1.0",
"ty>=0.0",
"lit>=17.0",
]
test = [
@@ -60,7 +60,7 @@ test = [
]
lint = [
"ruff>=0.1.0",
"ty>=0.1.0",
"ty>=0.0",
]
[tool.hatch.build.targets.wheel]

View File

@@ -6,19 +6,19 @@ import sys
import os.path
EXPECTED = frozenset(['far.cxx', 'bar.cc', 'foo.cpp', 'boo.c++'])
EXPECTED = frozenset(["far.cxx", "bar.cc", "foo.cpp", "boo.c++"])
def main():
parser = argparse.ArgumentParser()
parser.add_argument('input', type=argparse.FileType('r'))
parser.add_argument("input", type=argparse.FileType("r"))
args = parser.parse_args()
# file is open, parse the json content
entries = json.load(args.input)
# just get file names
result = set([os.path.basename(entry['file']) for entry in entries])
result = set([os.path.basename(entry["file"]) for entry in entries])
return 0 if result == EXPECTED else 1
if __name__ == '__main__':
if __name__ == "__main__":
sys.exit(main())

View File

@@ -17,26 +17,24 @@ def diff(lhs, rhs):
right = {smooth(entry): entry for entry in rhs}
for key in left.keys():
if key not in right:
yield '> {}'.format(left[key])
yield "> {}".format(left[key])
for key in right.keys():
if key not in left:
yield '< {}'.format(right[key])
yield "< {}".format(right[key])
def smooth(entry):
directory = os.path.normpath(entry['directory'])
source = entry['file'] if os.path.isabs(entry['file']) else \
os.path.normpath(os.path.join(directory, entry['file']))
arguments = entry['command'].split() if 'command' in entry else \
entry['arguments']
return '-'.join([source[::-1]] + arguments)
directory = os.path.normpath(entry["directory"])
source = entry["file"] if os.path.isabs(entry["file"]) else os.path.normpath(os.path.join(directory, entry["file"]))
arguments = entry["command"].split() if "command" in entry else entry["arguments"]
return "-".join([source[::-1]] + arguments)
def main():
""" Semantically diff two compilation databases. """
"""Semantically diff two compilation databases."""
parser = argparse.ArgumentParser()
parser.add_argument('left', type=argparse.FileType('r'))
parser.add_argument('right', type=argparse.FileType('r'))
parser.add_argument("left", type=argparse.FileType("r"))
parser.add_argument("right", type=argparse.FileType("r"))
args = parser.parse_args()
# files are open, parse the json content
lhs = json.load(args.left)

View File

@@ -12,25 +12,21 @@ import json
def main():
""" append entry to a compilation database. """
"""append entry to a compilation database."""
parser = argparse.ArgumentParser()
parser.add_argument('--cdb', required=True)
parser.add_argument('--command', required=True)
parser.add_argument('--file', required=True)
parser.add_argument("--cdb", required=True)
parser.add_argument("--command", required=True)
parser.add_argument("--file", required=True)
args = parser.parse_args()
# read existing content from target file
entries = []
if os.path.exists(args.cdb):
with open(args.cdb, 'r') as handle:
with open(args.cdb, "r") as handle:
entries = json.load(handle)
# update with the current invocation
current = {
'directory': os.getcwd(),
'command': args.command,
'file': args.file
}
current = {"directory": os.getcwd(), "command": args.command, "file": args.file}
entries.append(current)
# write the result back
with open(args.cdb, 'w') as handle:
with open(args.cdb, "w") as handle:
json.dump(list(entries), handle, sort_keys=True, indent=4)
return 0

View File

@@ -12,7 +12,7 @@ import os.path
import glob
import platform
IS_WINDOWS = os.getenv('windows')
IS_WINDOWS = os.getenv("windows")
class Spy(object):
@@ -26,38 +26,31 @@ class Spy(object):
class FilteringFlagsTest(unittest.TestCase):
@staticmethod
def classify_parameters(flags):
spy = Spy()
opts = {'flags': flags}
opts = {"flags": flags}
sut.classify_parameters(opts, spy.call)
return spy.arg
def assertLanguage(self, expected, flags):
self.assertEqual(
expected,
FilteringFlagsTest.classify_parameters(flags)['language'])
self.assertEqual(expected, FilteringFlagsTest.classify_parameters(flags)["language"])
def test_language_captured(self):
self.assertLanguage(None, [])
self.assertLanguage('c', ['-x', 'c'])
self.assertLanguage('cpp', ['-x', 'cpp'])
self.assertLanguage("c", ["-x", "c"])
self.assertLanguage("cpp", ["-x", "cpp"])
def assertArch(self, expected, flags):
self.assertEqual(
expected,
FilteringFlagsTest.classify_parameters(flags)['arch_list'])
self.assertEqual(expected, FilteringFlagsTest.classify_parameters(flags)["arch_list"])
def test_arch(self):
self.assertArch([], [])
self.assertArch(['mips'], ['-arch', 'mips'])
self.assertArch(['mips', 'i386'], ['-arch', 'mips', '-arch', 'i386'])
self.assertArch(["mips"], ["-arch", "mips"])
self.assertArch(["mips", "i386"], ["-arch", "mips", "-arch", "i386"])
def assertFlagsChanged(self, expected, flags):
self.assertEqual(
expected,
FilteringFlagsTest.classify_parameters(flags)['flags'])
self.assertEqual(expected, FilteringFlagsTest.classify_parameters(flags)["flags"])
def assertFlagsUnchanged(self, flags):
self.assertFlagsChanged(flags, flags)
@@ -66,79 +59,78 @@ class FilteringFlagsTest(unittest.TestCase):
self.assertFlagsChanged([], flags)
def test_optimalizations_pass(self):
self.assertFlagsUnchanged(['-O'])
self.assertFlagsUnchanged(['-O1'])
self.assertFlagsUnchanged(['-Os'])
self.assertFlagsUnchanged(['-O2'])
self.assertFlagsUnchanged(['-O3'])
self.assertFlagsUnchanged(["-O"])
self.assertFlagsUnchanged(["-O1"])
self.assertFlagsUnchanged(["-Os"])
self.assertFlagsUnchanged(["-O2"])
self.assertFlagsUnchanged(["-O3"])
def test_include_pass(self):
self.assertFlagsUnchanged([])
self.assertFlagsUnchanged(['-include', '/usr/local/include'])
self.assertFlagsUnchanged(['-I.'])
self.assertFlagsUnchanged(['-I', '.'])
self.assertFlagsUnchanged(['-I/usr/local/include'])
self.assertFlagsUnchanged(['-I', '/usr/local/include'])
self.assertFlagsUnchanged(['-I/opt', '-I', '/opt/otp/include'])
self.assertFlagsUnchanged(['-isystem', '/path'])
self.assertFlagsUnchanged(['-isystem=/path'])
self.assertFlagsUnchanged(["-include", "/usr/local/include"])
self.assertFlagsUnchanged(["-I."])
self.assertFlagsUnchanged(["-I", "."])
self.assertFlagsUnchanged(["-I/usr/local/include"])
self.assertFlagsUnchanged(["-I", "/usr/local/include"])
self.assertFlagsUnchanged(["-I/opt", "-I", "/opt/otp/include"])
self.assertFlagsUnchanged(["-isystem", "/path"])
self.assertFlagsUnchanged(["-isystem=/path"])
def test_define_pass(self):
self.assertFlagsUnchanged(['-DNDEBUG'])
self.assertFlagsUnchanged(['-UNDEBUG'])
self.assertFlagsUnchanged(['-Dvar1=val1', '-Dvar2=val2'])
self.assertFlagsUnchanged(["-DNDEBUG"])
self.assertFlagsUnchanged(["-UNDEBUG"])
self.assertFlagsUnchanged(["-Dvar1=val1", "-Dvar2=val2"])
self.assertFlagsUnchanged(['-Dvar="val ues"'])
def test_output_filtered(self):
self.assertFlagsFiltered(['-o', 'source.o'])
self.assertFlagsFiltered(["-o", "source.o"])
def test_some_warning_filtered(self):
self.assertFlagsFiltered(['-Wall'])
self.assertFlagsFiltered(['-Wnoexcept'])
self.assertFlagsFiltered(['-Wreorder', '-Wunused', '-Wundef'])
self.assertFlagsUnchanged(['-Wno-reorder', '-Wno-unused'])
self.assertFlagsFiltered(["-Wall"])
self.assertFlagsFiltered(["-Wnoexcept"])
self.assertFlagsFiltered(["-Wreorder", "-Wunused", "-Wundef"])
self.assertFlagsUnchanged(["-Wno-reorder", "-Wno-unused"])
def test_compile_only_flags_pass(self):
self.assertFlagsUnchanged(['-std=C99'])
self.assertFlagsUnchanged(['-nostdinc'])
self.assertFlagsUnchanged(['-isystem', '/image/debian'])
self.assertFlagsUnchanged(['-iprefix', '/usr/local'])
self.assertFlagsUnchanged(['-iquote=me'])
self.assertFlagsUnchanged(['-iquote', 'me'])
self.assertFlagsUnchanged(["-std=C99"])
self.assertFlagsUnchanged(["-nostdinc"])
self.assertFlagsUnchanged(["-isystem", "/image/debian"])
self.assertFlagsUnchanged(["-iprefix", "/usr/local"])
self.assertFlagsUnchanged(["-iquote=me"])
self.assertFlagsUnchanged(["-iquote", "me"])
def test_compile_and_link_flags_pass(self):
self.assertFlagsUnchanged(['-fsinged-char'])
self.assertFlagsUnchanged(['-fPIC'])
self.assertFlagsUnchanged(['-stdlib=libc++'])
self.assertFlagsUnchanged(['--sysroot', '/'])
self.assertFlagsUnchanged(['-isysroot', '/'])
self.assertFlagsUnchanged(["-fsinged-char"])
self.assertFlagsUnchanged(["-fPIC"])
self.assertFlagsUnchanged(["-stdlib=libc++"])
self.assertFlagsUnchanged(["--sysroot", "/"])
self.assertFlagsUnchanged(["-isysroot", "/"])
def test_some_flags_filtered(self):
self.assertFlagsFiltered(['-g'])
self.assertFlagsFiltered(['-fsyntax-only'])
self.assertFlagsFiltered(['-save-temps'])
self.assertFlagsFiltered(['-init', 'my_init'])
self.assertFlagsFiltered(['-sectorder', 'a', 'b', 'c'])
self.assertFlagsFiltered(["-g"])
self.assertFlagsFiltered(["-fsyntax-only"])
self.assertFlagsFiltered(["-save-temps"])
self.assertFlagsFiltered(["-init", "my_init"])
self.assertFlagsFiltered(["-sectorder", "a", "b", "c"])
class RunAnalyzerTest(unittest.TestCase):
@staticmethod
def run_analyzer(content, failures_report):
with libear.temporary_directory() as tmpdir:
filename = os.path.join(tmpdir, 'test.cpp')
with open(filename, 'w') as handle:
filename = os.path.join(tmpdir, "test.cpp")
with open(filename, "w") as handle:
handle.write(content)
opts = {
'clang': 'clang',
'directory': os.getcwd(),
'flags': [],
'direct_args': [],
'source': filename,
'output_dir': tmpdir,
'output_format': 'plist',
'output_failures': failures_report
"clang": "clang",
"directory": os.getcwd(),
"flags": [],
"direct_args": [],
"source": filename,
"output_dir": tmpdir,
"output_format": "plist",
"output_failures": failures_report,
}
spy = Spy()
result = sut.run_analyzer(opts, spy.call)
@@ -148,146 +140,137 @@ class RunAnalyzerTest(unittest.TestCase):
content = "int div(int n, int d) { return n / d; }"
(result, fwds) = RunAnalyzerTest.run_analyzer(content, False)
self.assertEqual(None, fwds)
self.assertEqual(0, result['exit_code'])
self.assertEqual(0, result["exit_code"])
def test_run_analyzer_crash(self):
content = "int div(int n, int d) { return n / d }"
(result, fwds) = RunAnalyzerTest.run_analyzer(content, False)
self.assertEqual(None, fwds)
self.assertEqual(1, result['exit_code'])
self.assertEqual(1, result["exit_code"])
def test_run_analyzer_crash_and_forwarded(self):
content = "int div(int n, int d) { return n / d }"
(_, fwds) = RunAnalyzerTest.run_analyzer(content, True)
self.assertEqual(1, fwds['exit_code'])
self.assertTrue(len(fwds['error_output']) > 0)
self.assertEqual(1, fwds["exit_code"])
self.assertTrue(len(fwds["error_output"]) > 0)
class ReportFailureTest(unittest.TestCase):
def assertUnderFailures(self, path):
self.assertEqual('failures', os.path.basename(os.path.dirname(path)))
self.assertEqual("failures", os.path.basename(os.path.dirname(path)))
def test_report_failure_create_files(self):
with libear.temporary_directory() as tmp_dir:
# create input file
filename = os.path.join(tmp_dir, 'test.c')
with open(filename, 'w') as handle:
handle.write('int main() { return 0')
uname_msg = ' '.join(platform.uname()).strip()
error_msg = 'this is my error output'
filename = os.path.join(tmp_dir, "test.c")
with open(filename, "w") as handle:
handle.write("int main() { return 0")
uname_msg = " ".join(platform.uname()).strip()
error_msg = "this is my error output"
# execute test
opts = {
'clang': 'clang',
'directory': os.getcwd(),
'flags': [],
'source': filename,
'output_dir': tmp_dir,
'language': 'c',
'error_output': [error_msg],
'exit_code': 13
"clang": "clang",
"directory": os.getcwd(),
"flags": [],
"source": filename,
"output_dir": tmp_dir,
"language": "c",
"error_output": [error_msg],
"exit_code": 13,
}
sut.report_failure(opts)
# find the info file
pp_files = glob.glob(os.path.join(tmp_dir, 'failures', '*.i'))
pp_files = glob.glob(os.path.join(tmp_dir, "failures", "*.i"))
self.assertIsNot(pp_files, [])
pp_file = pp_files[0]
# info file generated and content dumped
info_file = pp_file + '.info.txt'
info_file = pp_file + ".info.txt"
self.assertTrue(os.path.exists(info_file))
with open(info_file) as info_handler:
lines = [line.strip() for line in info_handler.readlines() if
line.strip()]
self.assertEqual('Other Error', lines[1])
lines = [line.strip() for line in info_handler.readlines() if line.strip()]
self.assertEqual("Other Error", lines[1])
self.assertEqual(uname_msg, lines[3])
# error file generated and content dumped
error_file = pp_file + '.stderr.txt'
error_file = pp_file + ".stderr.txt"
self.assertTrue(os.path.exists(error_file))
with open(error_file) as error_handle:
self.assertEqual([error_msg], error_handle.readlines())
class AnalyzerTest(unittest.TestCase):
def test_nodebug_macros_appended(self):
def test(flags):
spy = Spy()
opts = {'flags': flags, 'force_debug': True}
self.assertEqual(spy.success,
sut.filter_debug_flags(opts, spy.call))
return spy.arg['flags']
opts = {"flags": flags, "force_debug": True}
self.assertEqual(spy.success, sut.filter_debug_flags(opts, spy.call))
return spy.arg["flags"]
self.assertEqual(['-UNDEBUG'], test([]))
self.assertEqual(['-DNDEBUG', '-UNDEBUG'], test(['-DNDEBUG']))
self.assertEqual(['-DSomething', '-UNDEBUG'], test(['-DSomething']))
self.assertEqual(["-UNDEBUG"], test([]))
self.assertEqual(["-DNDEBUG", "-UNDEBUG"], test(["-DNDEBUG"]))
self.assertEqual(["-DSomething", "-UNDEBUG"], test(["-DSomething"]))
def test_set_language_fall_through(self):
def language(expected, input):
spy = Spy()
input.update({'compiler': 'c', 'source': 'test.c'})
input.update({"compiler": "c", "source": "test.c"})
self.assertEqual(spy.success, sut.language_check(input, spy.call))
self.assertEqual(expected, spy.arg['language'])
self.assertEqual(expected, spy.arg["language"])
language('c', {'language': 'c', 'flags': []})
language('c++', {'language': 'c++', 'flags': []})
language("c", {"language": "c", "flags": []})
language("c++", {"language": "c++", "flags": []})
def test_set_language_stops_on_not_supported(self):
spy = Spy()
input = {
'compiler': 'c',
'flags': [],
'source': 'test.java',
'language': 'java'
}
input = {"compiler": "c", "flags": [], "source": "test.java", "language": "java"}
self.assertEquals(dict(), sut.language_check(input, spy.call))
self.assertIsNone(spy.arg)
def test_set_language_sets_flags(self):
def flags(expected, input):
spy = Spy()
input.update({'compiler': 'c', 'source': 'test.c'})
input.update({"compiler": "c", "source": "test.c"})
self.assertEqual(spy.success, sut.language_check(input, spy.call))
self.assertEqual(expected, spy.arg['flags'])
self.assertEqual(expected, spy.arg["flags"])
flags(['-x', 'c'], {'language': 'c', 'flags': []})
flags(['-x', 'c++'], {'language': 'c++', 'flags': []})
flags(["-x", "c"], {"language": "c", "flags": []})
flags(["-x", "c++"], {"language": "c++", "flags": []})
def test_set_language_from_filename(self):
def language(expected, input):
spy = Spy()
input.update({'language': None, 'flags': []})
input.update({"language": None, "flags": []})
self.assertEqual(spy.success, sut.language_check(input, spy.call))
self.assertEqual(expected, spy.arg['language'])
self.assertEqual(expected, spy.arg["language"])
language('c', {'source': 'file.c', 'compiler': 'c'})
language('c++', {'source': 'file.c', 'compiler': 'c++'})
language('c++', {'source': 'file.cxx', 'compiler': 'c'})
language('c++', {'source': 'file.cxx', 'compiler': 'c++'})
language('c++', {'source': 'file.cpp', 'compiler': 'c++'})
language('c-cpp-output', {'source': 'file.i', 'compiler': 'c'})
language('c++-cpp-output', {'source': 'file.i', 'compiler': 'c++'})
language("c", {"source": "file.c", "compiler": "c"})
language("c++", {"source": "file.c", "compiler": "c++"})
language("c++", {"source": "file.cxx", "compiler": "c"})
language("c++", {"source": "file.cxx", "compiler": "c++"})
language("c++", {"source": "file.cpp", "compiler": "c++"})
language("c-cpp-output", {"source": "file.i", "compiler": "c"})
language("c++-cpp-output", {"source": "file.i", "compiler": "c++"})
def test_arch_loop_sets_flags(self):
def flags(archs):
spy = Spy()
input = {'flags': [], 'arch_list': archs}
input = {"flags": [], "arch_list": archs}
sut.arch_check(input, spy.call)
return spy.arg['flags']
return spy.arg["flags"]
self.assertEqual([], flags([]))
self.assertEqual(['-arch', 'i386'], flags(['i386']))
self.assertEqual(['-arch', 'i386'], flags(['i386', 'ppc']))
self.assertEqual(['-arch', 'sparc'], flags(['i386', 'sparc']))
self.assertEqual(["-arch", "i386"], flags(["i386"]))
self.assertEqual(["-arch", "i386"], flags(["i386", "ppc"]))
self.assertEqual(["-arch", "sparc"], flags(["i386", "sparc"]))
def test_arch_loop_stops_on_not_supported(self):
def stop(archs):
spy = Spy()
input = {'flags': [], 'arch_list': archs}
input = {"flags": [], "arch_list": archs}
self.assertEqual(dict(), sut.arch_check(input, spy.call))
self.assertIsNone(spy.arg)
stop(['ppc'])
stop(['ppc64'])
stop(["ppc"])
stop(["ppc64"])
@sut.require([])
@@ -295,45 +278,45 @@ def method_without_expecteds(opts):
return 0
@sut.require(['this', 'that'])
@sut.require(["this", "that"])
def method_with_expecteds(opts):
return 0
@sut.require([])
def method_exception_from_inside(opts):
raise Exception('here is one')
raise Exception("here is one")
class RequireDecoratorTest(unittest.TestCase):
def test_method_without_expecteds(self):
self.assertEqual(method_without_expecteds(dict()), 0)
self.assertEqual(method_without_expecteds({}), 0)
self.assertEqual(method_without_expecteds({'this': 2}), 0)
self.assertEqual(method_without_expecteds({'that': 3}), 0)
self.assertEqual(method_without_expecteds({"this": 2}), 0)
self.assertEqual(method_without_expecteds({"that": 3}), 0)
def test_method_with_expecteds(self):
self.assertRaises(AssertionError, method_with_expecteds, dict())
self.assertRaises(AssertionError, method_with_expecteds, {})
self.assertRaises(AssertionError, method_with_expecteds, {'this': 2})
self.assertRaises(AssertionError, method_with_expecteds, {'that': 3})
self.assertEqual(method_with_expecteds({'this': 0, 'that': 3}), 0)
self.assertRaises(AssertionError, method_with_expecteds, {"this": 2})
self.assertRaises(AssertionError, method_with_expecteds, {"that": 3})
self.assertEqual(method_with_expecteds({"this": 0, "that": 3}), 0)
def test_method_exception_not_caught(self):
self.assertRaises(Exception, method_exception_from_inside, dict())
class ReportDirectoryTest(unittest.TestCase):
# Test that successive report directory names ascend in lexicographic
# order. This is required so that report directories from two runs of
# scan-build can be easily matched up to compare results.
@unittest.skipIf(IS_WINDOWS, 'windows has low resolution timer')
@unittest.skipIf(IS_WINDOWS, "windows has low resolution timer")
def test_directory_name_comparison(self):
with libear.temporary_directory() as tmp_dir, \
sut.report_directory(tmp_dir, False) as report_dir1, \
sut.report_directory(tmp_dir, False) as report_dir2, \
sut.report_directory(tmp_dir, False) as report_dir3:
with (
libear.temporary_directory() as tmp_dir,
sut.report_directory(tmp_dir, False) as report_dir1,
sut.report_directory(tmp_dir, False) as report_dir2,
sut.report_directory(tmp_dir, False) as report_dir3,
):
self.assertLess(report_dir1, report_dir2)
self.assertLess(report_dir2, report_dir3)

View File

@@ -12,40 +12,38 @@ import os.path
class ClangGetVersion(unittest.TestCase):
def test_get_version_is_not_empty(self):
self.assertTrue(sut.get_version('clang'))
self.assertTrue(sut.get_version("clang"))
def test_get_version_throws(self):
with self.assertRaises(OSError):
sut.get_version('notexists')
sut.get_version("notexists")
class ClangGetArgumentsTest(unittest.TestCase):
def test_get_clang_arguments(self):
with libear.temporary_directory() as tmpdir:
filename = os.path.join(tmpdir, 'test.c')
with open(filename, 'w') as handle:
handle.write('')
filename = os.path.join(tmpdir, "test.c")
with open(filename, "w") as handle:
handle.write("")
result = sut.get_arguments(
['clang', '-c', filename, '-DNDEBUG', '-Dvar="this is it"'],
tmpdir)
result = sut.get_arguments(["clang", "-c", filename, "-DNDEBUG", '-Dvar="this is it"'], tmpdir)
self.assertTrue('NDEBUG' in result)
self.assertTrue("NDEBUG" in result)
self.assertTrue('var="this is it"' in result)
def test_get_clang_arguments_fails(self):
with self.assertRaises(Exception):
sut.get_arguments(['clang', '-x', 'c', 'notexist.c'], '.')
sut.get_arguments(["clang", "-x", "c", "notexist.c"], ".")
def test_get_clang_arguments_fails_badly(self):
with self.assertRaises(OSError):
sut.get_arguments(['notexist'], '.')
sut.get_arguments(["notexist"], ".")
class ClangGetCheckersTest(unittest.TestCase):
def test_get_checkers(self):
# this test is only to see is not crashing
result = sut.get_checkers('clang', [])
result = sut.get_checkers("clang", [])
self.assertTrue(result)
# do check result types
for key, value in result.items():
@@ -55,38 +53,39 @@ class ClangGetCheckersTest(unittest.TestCase):
def test_get_active_checkers(self):
# this test is only to see is not crashing
result = sut.get_active_checkers('clang', [])
result = sut.get_active_checkers("clang", [])
self.assertTrue(len(result))
# do check result types
for value in result:
self.assertEqual(str, type(value))
def test_is_active(self):
test = sut.is_active(['a', 'b.b', 'c.c.c'])
test = sut.is_active(["a", "b.b", "c.c.c"])
self.assertTrue(test('a'))
self.assertTrue(test('a.b'))
self.assertTrue(test('b.b'))
self.assertTrue(test('b.b.c'))
self.assertTrue(test('c.c.c.p'))
self.assertTrue(test("a"))
self.assertTrue(test("a.b"))
self.assertTrue(test("b.b"))
self.assertTrue(test("b.b.c"))
self.assertTrue(test("c.c.c.p"))
self.assertFalse(test('ab'))
self.assertFalse(test('ba'))
self.assertFalse(test('bb'))
self.assertFalse(test('c.c'))
self.assertFalse(test('b'))
self.assertFalse(test('d'))
self.assertFalse(test("ab"))
self.assertFalse(test("ba"))
self.assertFalse(test("bb"))
self.assertFalse(test("c.c"))
self.assertFalse(test("b"))
self.assertFalse(test("d"))
def test_parse_checkers(self):
lines = [
'OVERVIEW: Clang Static Analyzer Checkers List',
'',
'CHECKERS:',
' checker.one Checker One description',
' checker.two',
' Checker Two description']
"OVERVIEW: Clang Static Analyzer Checkers List",
"",
"CHECKERS:",
" checker.one Checker One description",
" checker.two",
" Checker Two description",
]
result = dict(sut.parse_checkers(lines))
self.assertTrue('checker.one' in result)
self.assertEqual('Checker One description', result.get('checker.one'))
self.assertTrue('checker.two' in result)
self.assertEqual('Checker Two description', result.get('checker.two'))
self.assertTrue("checker.one" in result)
self.assertEqual("Checker One description", result.get("checker.one"))
self.assertTrue("checker.two" in result)
self.assertEqual("Checker Two description", result.get("checker.two"))

View File

@@ -9,47 +9,46 @@ import unittest
class CompilerTest(unittest.TestCase):
def assert_c_compiler(self, command, cc='nope', cxx='nope++'):
def assert_c_compiler(self, command, cc="nope", cxx="nope++"):
value = sut.Compilation._split_compiler(command, cc, cxx)
self.assertIsNotNone(value)
self.assertEqual(value[0], 'c')
self.assertEqual(value[0], "c")
def assert_cxx_compiler(self, command, cc='nope', cxx='nope++'):
def assert_cxx_compiler(self, command, cc="nope", cxx="nope++"):
value = sut.Compilation._split_compiler(command, cc, cxx)
self.assertIsNotNone(value)
self.assertEqual(value[0], 'c++')
self.assertEqual(value[0], "c++")
def assert_not_compiler(self, command):
value = sut.Compilation._split_compiler(command, 'nope', 'nope')
value = sut.Compilation._split_compiler(command, "nope", "nope")
self.assertIsNone(value)
def test_compiler_call(self):
self.assert_c_compiler(['cc'])
self.assert_cxx_compiler(['CC'])
self.assert_cxx_compiler(['c++'])
self.assert_cxx_compiler(['cxx'])
self.assert_c_compiler(["cc"])
self.assert_cxx_compiler(["CC"])
self.assert_cxx_compiler(["c++"])
self.assert_cxx_compiler(["cxx"])
def test_clang_compiler_call(self):
self.assert_c_compiler(['clang'])
self.assert_c_compiler(['clang-3.6'])
self.assert_cxx_compiler(['clang++'])
self.assert_cxx_compiler(['clang++-3.5.1'])
self.assert_c_compiler(["clang"])
self.assert_c_compiler(["clang-3.6"])
self.assert_cxx_compiler(["clang++"])
self.assert_cxx_compiler(["clang++-3.5.1"])
def test_gcc_compiler_call(self):
self.assert_c_compiler(['gcc'])
self.assert_cxx_compiler(['g++'])
self.assert_c_compiler(["gcc"])
self.assert_cxx_compiler(["g++"])
def test_intel_compiler_call(self):
self.assert_c_compiler(['icc'])
self.assert_cxx_compiler(['icpc'])
self.assert_c_compiler(["icc"])
self.assert_cxx_compiler(["icpc"])
def test_aix_compiler_call(self):
self.assert_c_compiler(['xlc'])
self.assert_cxx_compiler(['xlc++'])
self.assert_cxx_compiler(['xlC'])
self.assert_c_compiler(['gxlc'])
self.assert_cxx_compiler(['gxlc++'])
self.assert_c_compiler(["xlc"])
self.assert_cxx_compiler(["xlc++"])
self.assert_cxx_compiler(["xlC"])
self.assert_c_compiler(["gxlc"])
self.assert_cxx_compiler(["gxlc++"])
# def test_open_mpi_compiler_call(self):
# self.assert_c_compiler(['mpicc'])
@@ -58,103 +57,96 @@ class CompilerTest(unittest.TestCase):
# self.assert_cxx_compiler(['mpic++'])
def test_compiler_call_with_path(self):
self.assert_c_compiler(['/usr/local/bin/gcc'])
self.assert_cxx_compiler(['/usr/local/bin/g++'])
self.assert_c_compiler(['/usr/local/bin/clang'])
self.assert_c_compiler(["/usr/local/bin/gcc"])
self.assert_cxx_compiler(["/usr/local/bin/g++"])
self.assert_c_compiler(["/usr/local/bin/clang"])
def test_cross_compiler_call(self):
self.assert_cxx_compiler(['armv7_neno-linux-gnueabi-g++'])
self.assert_cxx_compiler(["armv7_neno-linux-gnueabi-g++"])
def test_compiler_wrapper_call(self):
self.assert_c_compiler(['distcc'])
self.assert_c_compiler(['distcc', 'cc'])
self.assert_cxx_compiler(['distcc', 'c++'])
self.assert_c_compiler(['ccache'])
self.assert_c_compiler(['ccache', 'cc'])
self.assert_cxx_compiler(['ccache', 'c++'])
self.assert_c_compiler(["distcc"])
self.assert_c_compiler(["distcc", "cc"])
self.assert_cxx_compiler(["distcc", "c++"])
self.assert_c_compiler(["ccache"])
self.assert_c_compiler(["ccache", "cc"])
self.assert_cxx_compiler(["ccache", "c++"])
def test_non_compiler_call(self):
self.assert_not_compiler([])
self.assert_not_compiler([''])
self.assert_not_compiler(['ld'])
self.assert_not_compiler(['as'])
self.assert_not_compiler(['/usr/local/bin/compiler'])
self.assert_not_compiler([""])
self.assert_not_compiler(["ld"])
self.assert_not_compiler(["as"])
self.assert_not_compiler(["/usr/local/bin/compiler"])
def test_specific_compiler_call(self):
self.assert_c_compiler(['nope'], cc='nope')
self.assert_c_compiler(['./nope'], cc='nope')
self.assert_c_compiler(['/path/nope'], cc='nope')
self.assert_cxx_compiler(['nope++'], cxx='nope++')
self.assert_cxx_compiler(['./nope++'], cxx='nope++')
self.assert_cxx_compiler(['/path/nope++'], cxx='nope++')
self.assert_c_compiler(["nope"], cc="nope")
self.assert_c_compiler(["./nope"], cc="nope")
self.assert_c_compiler(["/path/nope"], cc="nope")
self.assert_cxx_compiler(["nope++"], cxx="nope++")
self.assert_cxx_compiler(["./nope++"], cxx="nope++")
self.assert_cxx_compiler(["/path/nope++"], cxx="nope++")
def assert_arguments_equal(self, expected, command):
value = sut.Compilation._split_compiler(command, 'nope', 'nope')
value = sut.Compilation._split_compiler(command, "nope", "nope")
self.assertIsNotNone(value)
self.assertEqual(expected, value[1])
def test_argument_split(self):
arguments = ['-c', 'file.c']
self.assert_arguments_equal(arguments, ['distcc'] + arguments)
self.assert_arguments_equal(arguments, ['distcc', 'cc'] + arguments)
self.assert_arguments_equal(arguments, ['distcc', 'c++'] + arguments)
self.assert_arguments_equal(arguments, ['ccache'] + arguments)
self.assert_arguments_equal(arguments, ['ccache', 'cc'] + arguments)
self.assert_arguments_equal(arguments, ['ccache', 'c++'] + arguments)
arguments = ["-c", "file.c"]
self.assert_arguments_equal(arguments, ["distcc"] + arguments)
self.assert_arguments_equal(arguments, ["distcc", "cc"] + arguments)
self.assert_arguments_equal(arguments, ["distcc", "c++"] + arguments)
self.assert_arguments_equal(arguments, ["ccache"] + arguments)
self.assert_arguments_equal(arguments, ["ccache", "cc"] + arguments)
self.assert_arguments_equal(arguments, ["ccache", "c++"] + arguments)
class SplitTest(unittest.TestCase):
def assert_compilation(self, command):
result = sut.Compilation._split_command(command, 'nope', 'nope')
result = sut.Compilation._split_command(command, "nope", "nope")
self.assertIsNotNone(result)
def assert_non_compilation(self, command):
result = sut.Compilation._split_command(command, 'nope', 'nope')
result = sut.Compilation._split_command(command, "nope", "nope")
self.assertIsNone(result)
def test_action(self):
self.assert_compilation(['clang', 'source.c'])
self.assert_compilation(['clang', '-c', 'source.c'])
self.assert_compilation(['clang', '-c', 'source.c', '-MF', 'a.d'])
self.assert_compilation(["clang", "source.c"])
self.assert_compilation(["clang", "-c", "source.c"])
self.assert_compilation(["clang", "-c", "source.c", "-MF", "a.d"])
self.assert_non_compilation(['clang', '-E', 'source.c'])
self.assert_non_compilation(['clang', '-c', '-E', 'source.c'])
self.assert_non_compilation(['clang', '-c', '-M', 'source.c'])
self.assert_non_compilation(['clang', '-c', '-MM', 'source.c'])
self.assert_non_compilation(["clang", "-E", "source.c"])
self.assert_non_compilation(["clang", "-c", "-E", "source.c"])
self.assert_non_compilation(["clang", "-c", "-M", "source.c"])
self.assert_non_compilation(["clang", "-c", "-MM", "source.c"])
def assert_source_files(self, expected, command):
result = sut.Compilation._split_command(command, 'nope', 'nope')
result = sut.Compilation._split_command(command, "nope", "nope")
self.assertIsNotNone(result)
self.assertEqual(expected, result.files)
def test_source_file(self):
self.assert_source_files(['src.c'], ['clang', 'src.c'])
self.assert_source_files(['src.c'], ['clang', '-c', 'src.c'])
self.assert_source_files(['src.C'], ['clang', '-x', 'c', 'src.C'])
self.assert_source_files(['src.cpp'], ['clang++', '-c', 'src.cpp'])
self.assert_source_files(['s1.c', 's2.c'],
['clang', '-c', 's1.c', 's2.c'])
self.assert_source_files(['s1.c', 's2.c'],
['cc', 's1.c', 's2.c', '-ldp', '-o', 'a.out'])
self.assert_source_files(['src.c'],
['clang', '-c', '-I', './include', 'src.c'])
self.assert_source_files(['src.c'],
['clang', '-c', '-I', '/opt/inc', 'src.c'])
self.assert_source_files(['src.c'],
['clang', '-c', '-Dconfig=file.c', 'src.c'])
self.assert_source_files(["src.c"], ["clang", "src.c"])
self.assert_source_files(["src.c"], ["clang", "-c", "src.c"])
self.assert_source_files(["src.C"], ["clang", "-x", "c", "src.C"])
self.assert_source_files(["src.cpp"], ["clang++", "-c", "src.cpp"])
self.assert_source_files(["s1.c", "s2.c"], ["clang", "-c", "s1.c", "s2.c"])
self.assert_source_files(["s1.c", "s2.c"], ["cc", "s1.c", "s2.c", "-ldp", "-o", "a.out"])
self.assert_source_files(["src.c"], ["clang", "-c", "-I", "./include", "src.c"])
self.assert_source_files(["src.c"], ["clang", "-c", "-I", "/opt/inc", "src.c"])
self.assert_source_files(["src.c"], ["clang", "-c", "-Dconfig=file.c", "src.c"])
self.assert_non_compilation(['cc', 'this.o', 'that.o', '-o', 'a.out'])
self.assert_non_compilation(['cc', 'this.o', '-lthat', '-o', 'a.out'])
self.assert_non_compilation(["cc", "this.o", "that.o", "-o", "a.out"])
self.assert_non_compilation(["cc", "this.o", "-lthat", "-o", "a.out"])
def assert_flags(self, expected, flags):
command = ['clang', '-c', 'src.c'] + flags
result = sut.Compilation._split_command(command, 'nope', 'nope')
command = ["clang", "-c", "src.c"] + flags
result = sut.Compilation._split_command(command, "nope", "nope")
self.assertIsNotNone(result)
self.assertEqual(expected, result.flags)
def test_filter_flags(self):
def same(expected):
self.assert_flags(expected, expected)
@@ -162,50 +154,49 @@ class SplitTest(unittest.TestCase):
self.assert_flags([], flags)
same([])
same(['-I', '/opt/me/include', '-DNDEBUG', '-ULIMITS'])
same(['-O', '-O2'])
same(['-m32', '-mmms'])
same(['-Wall', '-Wno-unused', '-g', '-funroll-loops'])
same(["-I", "/opt/me/include", "-DNDEBUG", "-ULIMITS"])
same(["-O", "-O2"])
same(["-m32", "-mmms"])
same(["-Wall", "-Wno-unused", "-g", "-funroll-loops"])
filtered([])
filtered(['-lclien', '-L/opt/me/lib', '-L', '/opt/you/lib'])
filtered(['-static'])
filtered(['-MD', '-MT', 'something'])
filtered(['-MMD', '-MF', 'something'])
filtered(["-lclien", "-L/opt/me/lib", "-L", "/opt/you/lib"])
filtered(["-static"])
filtered(["-MD", "-MT", "something"])
filtered(["-MMD", "-MF", "something"])
class SourceClassifierTest(unittest.TestCase):
def assert_non_source(self, filename):
result = sut.classify_source(filename)
self.assertIsNone(result)
def assert_c_source(self, filename, force):
result = sut.classify_source(filename, force)
self.assertEqual('c', result)
self.assertEqual("c", result)
def assert_cxx_source(self, filename, force):
result = sut.classify_source(filename, force)
self.assertEqual('c++', result)
self.assertEqual("c++", result)
def test_sources(self):
self.assert_non_source('file.o')
self.assert_non_source('file.exe')
self.assert_non_source('/path/file.o')
self.assert_non_source('clang')
self.assert_non_source("file.o")
self.assert_non_source("file.exe")
self.assert_non_source("/path/file.o")
self.assert_non_source("clang")
self.assert_c_source('file.c', True)
self.assert_cxx_source('file.c', False)
self.assert_c_source("file.c", True)
self.assert_cxx_source("file.c", False)
self.assert_cxx_source('file.cxx', True)
self.assert_cxx_source('file.cxx', False)
self.assert_cxx_source('file.c++', True)
self.assert_cxx_source('file.c++', False)
self.assert_cxx_source('file.cpp', True)
self.assert_cxx_source('file.cpp', False)
self.assert_cxx_source("file.cxx", True)
self.assert_cxx_source("file.cxx", False)
self.assert_cxx_source("file.c++", True)
self.assert_cxx_source("file.c++", False)
self.assert_cxx_source("file.cpp", True)
self.assert_cxx_source("file.cpp", False)
self.assert_c_source('/path/file.c', True)
self.assert_c_source('./path/file.c', True)
self.assert_c_source('../path/file.c', True)
self.assert_c_source('/file.c', True)
self.assert_c_source('./file.c', True)
self.assert_c_source("/path/file.c", True)
self.assert_c_source("./path/file.c", True)
self.assert_c_source("../path/file.c", True)
self.assert_c_source("/file.c", True)
self.assert_c_source("./file.c", True)

View File

@@ -12,58 +12,51 @@ import libear
import libscanbuild.intercept as sut
from libscanbuild import Execution
IS_WINDOWS = os.getenv('windows')
IS_WINDOWS = os.getenv("windows")
class InterceptUtilTest(unittest.TestCase):
def test_read_write_exec_trace(self):
input_one = Execution(
pid=123,
cwd='/path/to/here',
cmd=['cc', '-c', 'this.c'])
input_one = Execution(pid=123, cwd="/path/to/here", cmd=["cc", "-c", "this.c"])
with libear.temporary_directory() as tmp_dir:
temp_file = os.path.join(tmp_dir, 'single_report.cmd')
temp_file = os.path.join(tmp_dir, "single_report.cmd")
sut.write_exec_trace(temp_file, input_one)
result = sut.parse_exec_trace(temp_file)
self.assertEqual(input_one, result)
def test_expand_cmd_with_response_files(self):
with libear.temporary_directory() as tmp_dir:
response_file = os.path.join(tmp_dir, 'response.jom')
with open(response_file, 'w') as response_file_handle:
response_file_handle.write(' Hello\n')
response_file_handle.write(' World!\n')
cmd_input = ['echo', '@'+response_file]
cmd_output = ['echo', 'Hello', 'World!']
self.assertEqual(cmd_output,
sut.expand_cmd_with_response_files(cmd_input))
response_file = os.path.join(tmp_dir, "response.jom")
with open(response_file, "w") as response_file_handle:
response_file_handle.write(" Hello\n")
response_file_handle.write(" World!\n")
cmd_input = ["echo", "@" + response_file]
cmd_output = ["echo", "Hello", "World!"]
self.assertEqual(cmd_output, sut.expand_cmd_with_response_files(cmd_input))
def test_write_exec_trace_with_response(self):
with libear.temporary_directory() as tmp_dir:
response_file_one = os.path.join(tmp_dir, 'response1.jom')
response_file_two = os.path.join(tmp_dir, 'response2.jom')
response_file_one = os.path.join(tmp_dir, "response1.jom")
response_file_two = os.path.join(tmp_dir, "response2.jom")
input_one = Execution(
pid=123,
cwd='/path/to/here',
cmd=['clang-cl', '-c', '@'+response_file_one,
'-Idoes_not_exists', '@'+response_file_two])
cwd="/path/to/here",
cmd=["clang-cl", "-c", "@" + response_file_one, "-Idoes_not_exists", "@" + response_file_two],
)
output_one = Execution(
pid=123,
cwd='/path/to/here',
cmd=['clang-cl', '-c', '-DSOMETHING_HERE',
'-Idoes_not_exists', 'that.cpp'])
with open(response_file_one, 'w') as response_file_one_handle:
response_file_one_handle.write(' -DSOMETHING_HERE\n')
with open(response_file_two, 'w') as response_file_two_handle:
response_file_two_handle.write(' that.cpp\n')
pid=123, cwd="/path/to/here", cmd=["clang-cl", "-c", "-DSOMETHING_HERE", "-Idoes_not_exists", "that.cpp"]
)
with open(response_file_one, "w") as response_file_one_handle:
response_file_one_handle.write(" -DSOMETHING_HERE\n")
with open(response_file_two, "w") as response_file_two_handle:
response_file_two_handle.write(" that.cpp\n")
temp_file = os.path.join(tmp_dir, 'single_report.cmd')
temp_file = os.path.join(tmp_dir, "single_report.cmd")
sut.write_exec_trace(temp_file, input_one)
result = sut.parse_exec_trace(temp_file)
self.assertEqual(output_one, result)
@unittest.skipIf(IS_WINDOWS, 'this code is not running on windows')
@unittest.skipIf(IS_WINDOWS, "this code is not running on windows")
def test_sip(self):
def create_status_report(filename, message):
content = """#!/usr/bin/env sh
@@ -74,24 +67,24 @@ class InterceptUtilTest(unittest.TestCase):
echo 'la-la-la'
""".format(message)
lines = [line.strip() for line in content.split(os.linesep)]
with open(filename, 'w') as handle:
with open(filename, "w") as handle:
handle.write(os.linesep.join(lines))
handle.close()
os.chmod(filename, 0x1ff)
os.chmod(filename, 0x1FF)
def create_csrutil(dest_dir, status):
filename = os.path.join(dest_dir, 'csrutil')
message = 'System Integrity Protection status: {0}'.format(status)
filename = os.path.join(dest_dir, "csrutil")
message = "System Integrity Protection status: {0}".format(status)
return create_status_report(filename, message)
enabled = 'enabled'
disabled = 'disabled'
osx = 'darwin'
enabled = "enabled"
disabled = "disabled"
osx = "darwin"
saved = os.environ['PATH']
saved = os.environ["PATH"]
with libear.temporary_directory() as tmp_dir:
try:
os.environ['PATH'] = os.pathsep.join([tmp_dir, saved])
os.environ["PATH"] = os.pathsep.join([tmp_dir, saved])
create_csrutil(tmp_dir, enabled)
self.assertTrue(sut.is_preload_disabled(osx))
@@ -99,13 +92,13 @@ class InterceptUtilTest(unittest.TestCase):
create_csrutil(tmp_dir, disabled)
self.assertFalse(sut.is_preload_disabled(osx))
finally:
os.environ['PATH'] = saved
os.environ["PATH"] = saved
try:
os.environ['PATH'] = ''
os.environ["PATH"] = ""
# shall be false when it's not in the path
self.assertFalse(sut.is_preload_disabled(osx))
self.assertFalse(sut.is_preload_disabled('unix'))
self.assertFalse(sut.is_preload_disabled("unix"))
finally:
os.environ['PATH'] = saved
os.environ["PATH"] = saved

View File

@@ -24,7 +24,7 @@ class TemporaryDirectoryTest(unittest.TestCase):
with sut.temporary_directory() as tmpdir:
self.assertTrue(os.path.isdir(tmpdir))
dir_name = tmpdir
raise RuntimeError('message')
raise RuntimeError("message")
except Exception:
self.assertIsNotNone(dir_name)
self.assertFalse(os.path.exists(dir_name))

View File

@@ -9,26 +9,17 @@ import unittest
class ShellSplitTest(unittest.TestCase):
def test_regular_commands(self):
self.assertEqual([], sut.shell_split(""))
self.assertEqual(['clang', '-c', 'file.c'],
sut.shell_split('clang -c file.c'))
self.assertEqual(['clang', '-c', 'file.c'],
sut.shell_split('clang -c file.c'))
self.assertEqual(['clang', '-c', 'file.c'],
sut.shell_split('clang -c\tfile.c'))
self.assertEqual(["clang", "-c", "file.c"], sut.shell_split("clang -c file.c"))
self.assertEqual(["clang", "-c", "file.c"], sut.shell_split("clang -c file.c"))
self.assertEqual(["clang", "-c", "file.c"], sut.shell_split("clang -c\tfile.c"))
def test_quoted_commands(self):
self.assertEqual(['clang', '-c', 'file.c'],
sut.shell_split('"clang" -c "file.c"'))
self.assertEqual(['clang', '-c', 'file.c'],
sut.shell_split("'clang' -c 'file.c'"))
self.assertEqual(["clang", "-c", "file.c"], sut.shell_split('"clang" -c "file.c"'))
self.assertEqual(["clang", "-c", "file.c"], sut.shell_split("'clang' -c 'file.c'"))
def test_shell_escaping(self):
self.assertEqual(['clang', '-c', 'file.c', '-Dv=space value'],
sut.shell_split(r'clang -c file.c -Dv="space value"'))
self.assertEqual(['clang', '-c', 'file.c', '-Dv="quote'],
sut.shell_split(r'clang -c file.c -Dv=\"quote'))
self.assertEqual(['clang', '-c', 'file.c', '-Dv=(word)'],
sut.shell_split(r'clang -c file.c -Dv=\(word\)'))
self.assertEqual(["clang", "-c", "file.c", "-Dv=space value"], sut.shell_split(r'clang -c file.c -Dv="space value"'))
self.assertEqual(["clang", "-c", "file.c", '-Dv="quote'], sut.shell_split(r"clang -c file.c -Dv=\"quote"))
self.assertEqual(["clang", "-c", "file.c", "-Dv=(word)"], sut.shell_split(r"clang -c file.c -Dv=\(word\)"))

View File

@@ -10,13 +10,13 @@ import unittest
import os
import os.path
IS_WINDOWS = os.getenv('windows')
IS_WINDOWS = os.getenv("windows")
def run_bug_parse(content):
with libear.temporary_directory() as tmp_dir:
file_name = os.path.join(tmp_dir, 'test.html')
with open(file_name, 'w') as handle:
file_name = os.path.join(tmp_dir, "test.html")
with open(file_name, "w") as handle:
lines = (line + os.linesep for line in content)
handle.writelines(lines)
for bug in sut.parse_bug_html(file_name):
@@ -24,7 +24,6 @@ def run_bug_parse(content):
class ParseFileTest(unittest.TestCase):
def test_parse_bug(self):
content = [
"some header",
@@ -37,30 +36,27 @@ class ParseFileTest(unittest.TestCase):
"<!-- BUGPATHLENGTH 4 -->",
"<!-- BUGMETAEND -->",
"<!-- REPORTHEADER -->",
"some tails"]
"some tails",
]
result = run_bug_parse(content)
self.assertEqual(result.category, 'Logic error')
self.assertEqual(result.category, "Logic error")
self.assertEqual(result.path_length, 4)
self.assertEqual(result.line, 5)
self.assertEqual(result.type, 'Division by zero')
self.assertEqual(result.file, 'xx')
self.assertEqual(result.type, "Division by zero")
self.assertEqual(result.file, "xx")
def test_parse_bug_empty(self):
content = []
result = run_bug_parse(content)
self.assertEqual(result.category, 'Other')
self.assertEqual(result.category, "Other")
self.assertEqual(result.path_length, 1)
self.assertEqual(result.line, 0)
def test_parse_crash(self):
content = [
"/some/path/file.c",
"Some very serious Error",
"bla",
"bla-bla"]
content = ["/some/path/file.c", "Some very serious Error", "bla", "bla-bla"]
with libear.temporary_directory() as tmp_dir:
file_name = os.path.join(tmp_dir, 'file.i.info.txt')
with open(file_name, 'w') as handle:
file_name = os.path.join(tmp_dir, "file.i.info.txt")
with open(file_name, "w") as handle:
handle.write(os.linesep.join(content))
source, problem = sut.Crash._parse_info_file(file_name)
self.assertEqual(source, content[0].rstrip())
@@ -68,20 +64,21 @@ class ParseFileTest(unittest.TestCase):
def test_parse_real_crash(self):
import libscanbuild.analyze as sut2
with libear.temporary_directory() as tmp_dir:
filename = os.path.join(tmp_dir, 'test.c')
with open(filename, 'w') as handle:
handle.write('int main() { return 0')
filename = os.path.join(tmp_dir, "test.c")
with open(filename, "w") as handle:
handle.write("int main() { return 0")
# produce failure report
opts = {
'clang': 'clang',
'directory': os.getcwd(),
'flags': [],
'source': filename,
'output_dir': tmp_dir,
'language': 'c',
'error_output': 'some output',
'exit_code': 13
"clang": "clang",
"directory": os.getcwd(),
"flags": [],
"source": filename,
"output_dir": tmp_dir,
"language": "c",
"error_output": "some output",
"exit_code": 13,
}
sut2.report_failure(opts)
# verify
@@ -89,94 +86,74 @@ class ParseFileTest(unittest.TestCase):
self.assertEqual(1, len(crashes))
crash = crashes[0]
self.assertEqual(filename, crash.source)
self.assertEqual('Other Error', crash.problem)
self.assertEqual(crash.file + '.info.txt', crash.info)
self.assertEqual(crash.file + '.stderr.txt', crash.stderr)
self.assertEqual("Other Error", crash.problem)
self.assertEqual(crash.file + ".info.txt", crash.info)
self.assertEqual(crash.file + ".stderr.txt", crash.stderr)
class ReportMethodTest(unittest.TestCase):
@unittest.skipIf(IS_WINDOWS, 'windows has different path patterns')
@unittest.skipIf(IS_WINDOWS, "windows has different path patterns")
def test_chop(self):
self.assertEqual('file', sut.chop('/prefix', '/prefix/file'))
self.assertEqual('file', sut.chop('/prefix/', '/prefix/file'))
self.assertEqual('lib/file', sut.chop('/prefix/', '/prefix/lib/file'))
self.assertEqual('/prefix/file', sut.chop('', '/prefix/file'))
self.assertEqual("file", sut.chop("/prefix", "/prefix/file"))
self.assertEqual("file", sut.chop("/prefix/", "/prefix/file"))
self.assertEqual("lib/file", sut.chop("/prefix/", "/prefix/lib/file"))
self.assertEqual("/prefix/file", sut.chop("", "/prefix/file"))
@unittest.skipIf(IS_WINDOWS, 'windows has different path patterns')
@unittest.skipIf(IS_WINDOWS, "windows has different path patterns")
def test_chop_when_cwd(self):
self.assertEqual('../src/file', sut.chop('/cwd', '/src/file'))
self.assertEqual('../src/file', sut.chop('/prefix/cwd',
'/prefix/src/file'))
self.assertEqual("../src/file", sut.chop("/cwd", "/src/file"))
self.assertEqual("../src/file", sut.chop("/prefix/cwd", "/prefix/src/file"))
@unittest.skipIf(not IS_WINDOWS, 'windows has different path patterns')
@unittest.skipIf(not IS_WINDOWS, "windows has different path patterns")
def test_chop_on_windows(self):
self.assertEqual('file', sut.chop('c:\\prefix', 'c:\\prefix\\file'))
self.assertEqual('file', sut.chop('c:\\prefix\\', 'c:\\prefix\\file'))
self.assertEqual('lib\\file',
sut.chop('c:\\prefix\\', 'c:\\prefix\\lib\\file'))
self.assertEqual('c:\\prefix\\file', sut.chop('', 'c:\\prefix\\file'))
self.assertEqual('c:\\prefix\\file',
sut.chop('e:\\prefix', 'c:\\prefix\\file'))
self.assertEqual("file", sut.chop("c:\\prefix", "c:\\prefix\\file"))
self.assertEqual("file", sut.chop("c:\\prefix\\", "c:\\prefix\\file"))
self.assertEqual("lib\\file", sut.chop("c:\\prefix\\", "c:\\prefix\\lib\\file"))
self.assertEqual("c:\\prefix\\file", sut.chop("", "c:\\prefix\\file"))
self.assertEqual("c:\\prefix\\file", sut.chop("e:\\prefix", "c:\\prefix\\file"))
@unittest.skipIf(not IS_WINDOWS, 'windows has different path patterns')
@unittest.skipIf(not IS_WINDOWS, "windows has different path patterns")
def test_chop_when_cwd_on_windows(self):
self.assertEqual('..\\src\\file',
sut.chop('c:\\cwd', 'c:\\src\\file'))
self.assertEqual('..\\src\\file',
sut.chop('z:\\prefix\\cwd', 'z:\\prefix\\src\\file'))
self.assertEqual("..\\src\\file", sut.chop("c:\\cwd", "c:\\src\\file"))
self.assertEqual("..\\src\\file", sut.chop("z:\\prefix\\cwd", "z:\\prefix\\src\\file"))
class GetPrefixFromCompilationDatabaseTest(unittest.TestCase):
def test_empty(self):
self.assertEqual(
sut.commonprefix([]), '')
self.assertEqual(sut.commonprefix([]), "")
@unittest.skipIf(IS_WINDOWS, 'windows has different path patterns')
@unittest.skipIf(IS_WINDOWS, "windows has different path patterns")
def test_with_different_filenames(self):
self.assertEqual(
sut.commonprefix(['/tmp/a.c', '/tmp/b.c']), '/tmp')
self.assertEqual(sut.commonprefix(["/tmp/a.c", "/tmp/b.c"]), "/tmp")
@unittest.skipIf(IS_WINDOWS, 'windows has different path patterns')
@unittest.skipIf(IS_WINDOWS, "windows has different path patterns")
def test_with_different_dirnames(self):
self.assertEqual(
sut.commonprefix(['/tmp/abs/a.c', '/tmp/ack/b.c']), '/tmp')
self.assertEqual(sut.commonprefix(["/tmp/abs/a.c", "/tmp/ack/b.c"]), "/tmp")
@unittest.skipIf(IS_WINDOWS, 'windows has different path patterns')
@unittest.skipIf(IS_WINDOWS, "windows has different path patterns")
def test_no_common_prefix(self):
self.assertEqual(
sut.commonprefix(['/tmp/abs/a.c', '/usr/ack/b.c']), '/')
self.assertEqual(sut.commonprefix(["/tmp/abs/a.c", "/usr/ack/b.c"]), "/")
@unittest.skipIf(IS_WINDOWS, 'windows has different path patterns')
@unittest.skipIf(IS_WINDOWS, "windows has different path patterns")
def test_with_single_file(self):
self.assertEqual(
sut.commonprefix(['/tmp/a.c']), '/tmp')
self.assertEqual(sut.commonprefix(["/tmp/a.c"]), "/tmp")
@unittest.skipIf(not IS_WINDOWS, 'windows has different path patterns')
@unittest.skipIf(not IS_WINDOWS, "windows has different path patterns")
def test_with_different_filenames_on_windows(self):
self.assertEqual(
sut.commonprefix(['c:\\tmp\\a.c', 'c:\\tmp\\b.c']), 'c:\\tmp')
self.assertEqual(sut.commonprefix(["c:\\tmp\\a.c", "c:\\tmp\\b.c"]), "c:\\tmp")
@unittest.skipIf(not IS_WINDOWS, 'windows has different path patterns')
@unittest.skipIf(not IS_WINDOWS, "windows has different path patterns")
def test_with_different_dirnames_on_windows(self):
self.assertEqual(
sut.commonprefix(['c:\\tmp\\abs\\a.c', 'c:\\tmp\\ack\\b.c']),
'c:\\tmp')
self.assertEqual(sut.commonprefix(["c:\\tmp\\abs\\a.c", "c:\\tmp\\ack\\b.c"]), "c:\\tmp")
@unittest.skipIf(not IS_WINDOWS, 'windows has different path patterns')
@unittest.skipIf(not IS_WINDOWS, "windows has different path patterns")
def test_no_common_prefix_on_windows(self):
self.assertEqual(
sut.commonprefix(['z:\\tmp\\abs\\a.c', 'z:\\usr\\ack\\b.c']),
'z:\\')
self.assertEqual(sut.commonprefix(["z:\\tmp\\abs\\a.c", "z:\\usr\\ack\\b.c"]), "z:\\")
@unittest.skipIf(not IS_WINDOWS, 'windows has different path patterns')
@unittest.skipIf(not IS_WINDOWS, "windows has different path patterns")
def test_different_drive_on_windows(self):
self.assertEqual(
sut.commonprefix(['c:\\tmp\\abs\\a.c', 'z:\\usr\\ack\\b.c']),
'')
self.assertEqual(sut.commonprefix(["c:\\tmp\\abs\\a.c", "z:\\usr\\ack\\b.c"]), "")
@unittest.skipIf(not IS_WINDOWS, 'windows has different path patterns')
@unittest.skipIf(not IS_WINDOWS, "windows has different path patterns")
def test_with_single_file_on_windows(self):
self.assertEqual(
sut.commonprefix(['z:\\tmp\\a.c']), 'z:\\tmp')
self.assertEqual(sut.commonprefix(["z:\\tmp\\a.c"]), "z:\\tmp")