Remove valgrind and include it using submodule

Change-Id: I4cfdb6bb908a386ad29b9932534cc5d187106f79
This commit is contained in:
Kongqun Yang 2014-03-14 12:00:08 -07:00 committed by KongQun Yang
parent b387b4083c
commit 6046cde3d8
98 changed files with 4 additions and 20752 deletions

3
.gitmodules vendored
View File

@ -23,3 +23,6 @@
[submodule "tools/gyp"] [submodule "tools/gyp"]
path = tools/gyp path = tools/gyp
url = https://chromium.googlesource.com/external/gyp.git url = https://chromium.googlesource.com/external/gyp.git
[submodule "tools/valgrind"]
path = tools/valgrind
url = https://chromium.googlesource.com/chromium/src/tools/valgrind.git

1
tools/valgrind Submodule

@ -0,0 +1 @@
Subproject commit 6cd50460a2a01992f4494955d9f345f1e6139db5

View File

@ -1,5 +0,0 @@
set noparent
bruening@chromium.org
glider@chromium.org
thestig@chromium.org
timurrrr@chromium.org

View File

@ -1,24 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from third_party import asan_symbolize
import re
import sys
def fix_filename(file_name):
for path_to_cut in sys.argv[1:]:
file_name = re.sub(".*" + path_to_cut, "", file_name)
file_name = re.sub(".*asan_[a-z_]*.cc:[0-9]*", "_asan_rtl_", file_name)
file_name = re.sub(".*crtstuff.c:0", "???:0", file_name)
return file_name
def main():
loop = asan_symbolize.SymbolizationLoop(binary_name_filter=fix_filename)
loop.process_stdin()
if __name__ == '__main__':
main()

View File

@ -1,16 +0,0 @@
#!/bin/bash
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# A wrapper that runs the program and filters the output through
# asan_symbolize.py and c++filt
#
# TODO(glider): this should be removed once EmbeddedTool in valgrind_test.py
# starts supporting pipes.
export THISDIR=`dirname $0`
"$@" 2>&1 |
$THISDIR/asan_symbolize.py |
c++filt

View File

@ -1,6 +0,0 @@
Name: asan_symbolize.py
License: University of Illinois Open Source License.
Version: 183006
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/asan/scripts/asan_symbolize.py?view=co&content-type=text%2Fplain
asan_symbolize.py is a verbatim copy of asan_symbolize.py in the LLVM trunk.

View File

@ -1,365 +0,0 @@
#!/usr/bin/env python
#===- lib/asan/scripts/asan_symbolize.py -----------------------------------===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
import bisect
import getopt
import os
import re
import subprocess
import sys
llvm_symbolizer = None
symbolizers = {}
DEBUG = False
demangle = False;
# FIXME: merge the code that calls fix_filename().
def fix_filename(file_name):
for path_to_cut in sys.argv[1:]:
file_name = re.sub('.*' + path_to_cut, '', file_name)
file_name = re.sub('.*asan_[a-z_]*.cc:[0-9]*', '_asan_rtl_', file_name)
file_name = re.sub('.*crtstuff.c:0', '???:0', file_name)
return file_name
class Symbolizer(object):
def __init__(self):
pass
def symbolize(self, addr, binary, offset):
"""Symbolize the given address (pair of binary and offset).
Overriden in subclasses.
Args:
addr: virtual address of an instruction.
binary: path to executable/shared object containing this instruction.
offset: instruction offset in the @binary.
Returns:
list of strings (one string for each inlined frame) describing
the code locations for this instruction (that is, function name, file
name, line and column numbers).
"""
return None
class LLVMSymbolizer(Symbolizer):
def __init__(self, symbolizer_path):
super(LLVMSymbolizer, self).__init__()
self.symbolizer_path = symbolizer_path
self.pipe = self.open_llvm_symbolizer()
def open_llvm_symbolizer(self):
if not os.path.exists(self.symbolizer_path):
return None
cmd = [self.symbolizer_path,
'--use-symbol-table=true',
'--demangle=%s' % demangle,
'--functions=true',
'--inlining=true']
if DEBUG:
print ' '.join(cmd)
return subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
def symbolize(self, addr, binary, offset):
"""Overrides Symbolizer.symbolize."""
if not self.pipe:
return None
result = []
try:
symbolizer_input = '%s %s' % (binary, offset)
if DEBUG:
print symbolizer_input
print >> self.pipe.stdin, symbolizer_input
while True:
function_name = self.pipe.stdout.readline().rstrip()
if not function_name:
break
file_name = self.pipe.stdout.readline().rstrip()
file_name = fix_filename(file_name)
if (not function_name.startswith('??') and
not file_name.startswith('??')):
# Append only valid frames.
result.append('%s in %s %s' % (addr, function_name,
file_name))
except Exception:
result = []
if not result:
result = None
return result
def LLVMSymbolizerFactory(system):
symbolizer_path = os.getenv('LLVM_SYMBOLIZER_PATH')
if not symbolizer_path:
# Assume llvm-symbolizer is in PATH.
symbolizer_path = 'llvm-symbolizer'
return LLVMSymbolizer(symbolizer_path)
class Addr2LineSymbolizer(Symbolizer):
def __init__(self, binary):
super(Addr2LineSymbolizer, self).__init__()
self.binary = binary
self.pipe = self.open_addr2line()
def open_addr2line(self):
cmd = ['addr2line', '-f']
if demangle:
cmd += ['--demangle']
cmd += ['-e', self.binary]
if DEBUG:
print ' '.join(cmd)
return subprocess.Popen(cmd,
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
def symbolize(self, addr, binary, offset):
"""Overrides Symbolizer.symbolize."""
if self.binary != binary:
return None
try:
print >> self.pipe.stdin, offset
function_name = self.pipe.stdout.readline().rstrip()
file_name = self.pipe.stdout.readline().rstrip()
except Exception:
function_name = ''
file_name = ''
file_name = fix_filename(file_name)
return ['%s in %s %s' % (addr, function_name, file_name)]
class DarwinSymbolizer(Symbolizer):
def __init__(self, addr, binary):
super(DarwinSymbolizer, self).__init__()
self.binary = binary
# Guess which arch we're running. 10 = len('0x') + 8 hex digits.
if len(addr) > 10:
self.arch = 'x86_64'
else:
self.arch = 'i386'
self.pipe = None
def write_addr_to_pipe(self, offset):
print >> self.pipe.stdin, '0x%x' % int(offset, 16)
def open_atos(self):
if DEBUG:
print 'atos -o %s -arch %s' % (self.binary, self.arch)
cmdline = ['atos', '-o', self.binary, '-arch', self.arch]
self.pipe = subprocess.Popen(cmdline,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def symbolize(self, addr, binary, offset):
"""Overrides Symbolizer.symbolize."""
if self.binary != binary:
return None
self.open_atos()
self.write_addr_to_pipe(offset)
self.pipe.stdin.close()
atos_line = self.pipe.stdout.readline().rstrip()
# A well-formed atos response looks like this:
# foo(type1, type2) (in object.name) (filename.cc:80)
match = re.match('^(.*) \(in (.*)\) \((.*:\d*)\)$', atos_line)
if DEBUG:
print 'atos_line: ', atos_line
if match:
function_name = match.group(1)
function_name = re.sub('\(.*?\)', '', function_name)
file_name = fix_filename(match.group(3))
return ['%s in %s %s' % (addr, function_name, file_name)]
else:
return ['%s in %s' % (addr, atos_line)]
# Chain several symbolizers so that if one symbolizer fails, we fall back
# to the next symbolizer in chain.
class ChainSymbolizer(Symbolizer):
def __init__(self, symbolizer_list):
super(ChainSymbolizer, self).__init__()
self.symbolizer_list = symbolizer_list
def symbolize(self, addr, binary, offset):
"""Overrides Symbolizer.symbolize."""
for symbolizer in self.symbolizer_list:
if symbolizer:
result = symbolizer.symbolize(addr, binary, offset)
if result:
return result
return None
def append_symbolizer(self, symbolizer):
self.symbolizer_list.append(symbolizer)
def BreakpadSymbolizerFactory(binary):
suffix = os.getenv('BREAKPAD_SUFFIX')
if suffix:
filename = binary + suffix
if os.access(filename, os.F_OK):
return BreakpadSymbolizer(filename)
return None
def SystemSymbolizerFactory(system, addr, binary):
if system == 'Darwin':
return DarwinSymbolizer(addr, binary)
elif system == 'Linux':
return Addr2LineSymbolizer(binary)
class BreakpadSymbolizer(Symbolizer):
def __init__(self, filename):
super(BreakpadSymbolizer, self).__init__()
self.filename = filename
lines = file(filename).readlines()
self.files = []
self.symbols = {}
self.address_list = []
self.addresses = {}
# MODULE mac x86_64 A7001116478B33F18FF9BEDE9F615F190 t
fragments = lines[0].rstrip().split()
self.arch = fragments[2]
self.debug_id = fragments[3]
self.binary = ' '.join(fragments[4:])
self.parse_lines(lines[1:])
def parse_lines(self, lines):
cur_function_addr = ''
for line in lines:
fragments = line.split()
if fragments[0] == 'FILE':
assert int(fragments[1]) == len(self.files)
self.files.append(' '.join(fragments[2:]))
elif fragments[0] == 'PUBLIC':
self.symbols[int(fragments[1], 16)] = ' '.join(fragments[3:])
elif fragments[0] in ['CFI', 'STACK']:
pass
elif fragments[0] == 'FUNC':
cur_function_addr = int(fragments[1], 16)
if not cur_function_addr in self.symbols.keys():
self.symbols[cur_function_addr] = ' '.join(fragments[4:])
else:
# Line starting with an address.
addr = int(fragments[0], 16)
self.address_list.append(addr)
# Tuple of symbol address, size, line, file number.
self.addresses[addr] = (cur_function_addr,
int(fragments[1], 16),
int(fragments[2]),
int(fragments[3]))
self.address_list.sort()
def get_sym_file_line(self, addr):
key = None
if addr in self.addresses.keys():
key = addr
else:
index = bisect.bisect_left(self.address_list, addr)
if index == 0:
return None
else:
key = self.address_list[index - 1]
sym_id, size, line_no, file_no = self.addresses[key]
symbol = self.symbols[sym_id]
filename = self.files[file_no]
if addr < key + size:
return symbol, filename, line_no
else:
return None
def symbolize(self, addr, binary, offset):
if self.binary != binary:
return None
res = self.get_sym_file_line(int(offset, 16))
if res:
function_name, file_name, line_no = res
result = ['%s in %s %s:%d' % (
addr, function_name, file_name, line_no)]
print result
return result
else:
return None
class SymbolizationLoop(object):
def __init__(self, binary_name_filter=None):
# Used by clients who may want to supply a different binary name.
# E.g. in Chrome several binaries may share a single .dSYM.
self.binary_name_filter = binary_name_filter
self.system = os.uname()[0]
if self.system in ['Linux', 'Darwin']:
self.llvm_symbolizer = LLVMSymbolizerFactory(self.system)
else:
raise Exception('Unknown system')
def symbolize_address(self, addr, binary, offset):
# Use the chain of symbolizers:
# Breakpad symbolizer -> LLVM symbolizer -> addr2line/atos
# (fall back to next symbolizer if the previous one fails).
if not binary in symbolizers:
symbolizers[binary] = ChainSymbolizer(
[BreakpadSymbolizerFactory(binary), self.llvm_symbolizer])
result = symbolizers[binary].symbolize(addr, binary, offset)
if result is None:
# Initialize system symbolizer only if other symbolizers failed.
symbolizers[binary].append_symbolizer(
SystemSymbolizerFactory(self.system, addr, binary))
result = symbolizers[binary].symbolize(addr, binary, offset)
# The system symbolizer must produce some result.
assert result
return result
def print_symbolized_lines(self, symbolized_lines):
if not symbolized_lines:
print self.current_line
else:
for symbolized_frame in symbolized_lines:
print ' #' + str(self.frame_no) + ' ' + symbolized_frame.rstrip()
self.frame_no += 1
def process_stdin(self):
self.frame_no = 0
while True:
line = sys.stdin.readline()
if not line:
break
self.current_line = line.rstrip()
#0 0x7f6e35cf2e45 (/blah/foo.so+0x11fe45)
stack_trace_line_format = (
'^( *#([0-9]+) *)(0x[0-9a-f]+) *\((.*)\+(0x[0-9a-f]+)\)')
match = re.match(stack_trace_line_format, line)
if not match:
print self.current_line
continue
if DEBUG:
print line
_, frameno_str, addr, binary, offset = match.groups()
if frameno_str == '0':
# Assume that frame #0 is the first frame of new stack trace.
self.frame_no = 0
original_binary = binary
if self.binary_name_filter:
binary = self.binary_name_filter(binary)
symbolized_line = self.symbolize_address(addr, binary, offset)
if not symbolized_line:
if original_binary != binary:
symbolized_line = self.symbolize_address(addr, binary, offset)
self.print_symbolized_lines(symbolized_line)
if __name__ == '__main__':
opts, args = getopt.getopt(sys.argv[1:], "d", ["demangle"])
for o, a in opts:
if o in ("-d", "--demangle"):
demangle = True;
loop = SymbolizationLoop()
loop.process_stdin()

View File

@ -1,49 +0,0 @@
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import glob
import os
import re
import sys
import subprocess
# TODO(timurrrr): we may use it on POSIX too to avoid code duplication once we
# support layout_tests, remove Dr. Memory specific code and verify it works
# on a "clean" Mac.
testcase_name = None
for arg in sys.argv:
m = re.match("\-\-test\-name=(.*)", arg)
if m:
assert testcase_name is None
testcase_name = m.groups()[0]
# arg #0 is the path to this python script
cmd_to_run = sys.argv[1:]
# TODO(timurrrr): this is Dr. Memory-specific
# Usually, we pass "-logdir" "foo\bar\spam path" args to Dr. Memory.
# To group reports per UI test, we want to put the reports for each test into a
# separate directory. This code can be simplified when we have
# http://code.google.com/p/drmemory/issues/detail?id=684 fixed.
logdir_idx = cmd_to_run.index("-logdir")
old_logdir = cmd_to_run[logdir_idx + 1]
wrapper_pid = str(os.getpid())
# On Windows, there is a chance of PID collision. We avoid it by appending the
# number of entries in the logdir at the end of wrapper_pid.
# This number is monotonic and we can't have two simultaneously running wrappers
# with the same PID.
wrapper_pid += "_%d" % len(glob.glob(old_logdir + "\\*"))
cmd_to_run[logdir_idx + 1] += "\\testcase.%s.logs" % wrapper_pid
os.makedirs(cmd_to_run[logdir_idx + 1])
if testcase_name:
f = open(old_logdir + "\\testcase.%s.name" % wrapper_pid, "w")
print >>f, testcase_name
f.close()
exit(subprocess.call(cmd_to_run))

View File

@ -1,70 +0,0 @@
@echo off
:: Copyright (c) 2011 The Chromium Authors. All rights reserved.
:: Use of this source code is governed by a BSD-style license that can be
:: found in the LICENSE file.
:: TODO(timurrrr): batch files 'export' all the variables to the parent shell
set THISDIR=%~dp0
set TOOL_NAME="unknown"
:: Get the tool name and put it into TOOL_NAME {{{1
:: NB: SHIFT command doesn't modify %*
:PARSE_ARGS_LOOP
if %1 == () GOTO:TOOLNAME_NOT_FOUND
if %1 == --tool GOTO:TOOLNAME_FOUND
SHIFT
goto :PARSE_ARGS_LOOP
:TOOLNAME_NOT_FOUND
echo "Please specify a tool (tsan or drmemory) by using --tool flag"
exit /B 1
:TOOLNAME_FOUND
SHIFT
set TOOL_NAME=%1
:: }}}
if "%TOOL_NAME%" == "drmemory" GOTO :SETUP_DRMEMORY
if "%TOOL_NAME%" == "drmemory_light" GOTO :SETUP_DRMEMORY
if "%TOOL_NAME%" == "drmemory_full" GOTO :SETUP_DRMEMORY
if "%TOOL_NAME%" == "drmemory_pattern" GOTO :SETUP_DRMEMORY
if "%TOOL_NAME%" == "tsan" GOTO :SETUP_TSAN
echo "Unknown tool: `%TOOL_NAME%`! Only tsan and drmemory are supported right now"
exit /B 1
:SETUP_DRMEMORY
if NOT "%DRMEMORY_COMMAND%"=="" GOTO :RUN_TESTS
:: Set up DRMEMORY_COMMAND to invoke Dr. Memory {{{1
set DRMEMORY_PATH=%THISDIR%..\..\third_party\drmemory
set DRMEMORY_SFX=%DRMEMORY_PATH%\drmemory-windows-sfx.exe
if EXIST %DRMEMORY_SFX% GOTO DRMEMORY_BINARY_OK
echo "Can't find Dr. Memory executables."
echo "See http://www.chromium.org/developers/how-tos/using-valgrind/dr-memory"
echo "for the instructions on how to get them."
exit /B 1
:DRMEMORY_BINARY_OK
%DRMEMORY_SFX% -o%DRMEMORY_PATH%\unpacked -y
set DRMEMORY_COMMAND=%DRMEMORY_PATH%\unpacked\bin\drmemory.exe
:: }}}
goto :RUN_TESTS
:SETUP_TSAN
:: Set up PIN_COMMAND to invoke TSan {{{1
set TSAN_PATH=%THISDIR%..\..\third_party\tsan
set TSAN_SFX=%TSAN_PATH%\tsan-x86-windows-sfx.exe
if EXIST %TSAN_SFX% GOTO TSAN_BINARY_OK
echo "Can't find ThreadSanitizer executables."
echo "See http://www.chromium.org/developers/how-tos/using-valgrind/threadsanitizer/threadsanitizer-on-windows"
echo "for the instructions on how to get them."
exit /B 1
:TSAN_BINARY_OK
%TSAN_SFX% -o%TSAN_PATH%\unpacked -y
set PIN_COMMAND=%TSAN_PATH%\unpacked\tsan-x86-windows\tsan.bat
:: }}}
goto :RUN_TESTS
:RUN_TESTS
set PYTHONPATH=%THISDIR%../python/google
set RUNNING_ON_VALGRIND=yes
python %THISDIR%/chrome_tests.py %*

View File

@ -1,626 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
''' Runs various chrome tests through valgrind_test.py.'''
import glob
import logging
import multiprocessing
import optparse
import os
import stat
import sys
import logging_utils
import path_utils
import common
import valgrind_test
class TestNotFound(Exception): pass
class MultipleGTestFiltersSpecified(Exception): pass
class BuildDirNotFound(Exception): pass
class BuildDirAmbiguous(Exception): pass
class ChromeTests:
SLOW_TOOLS = ["memcheck", "tsan", "tsan_rv", "drmemory"]
LAYOUT_TESTS_DEFAULT_CHUNK_SIZE = 500
def __init__(self, options, args, test):
if ':' in test:
(self._test, self._gtest_filter) = test.split(':', 1)
else:
self._test = test
self._gtest_filter = options.gtest_filter
if self._test not in self._test_list:
raise TestNotFound("Unknown test: %s" % test)
if options.gtest_filter and options.gtest_filter != self._gtest_filter:
raise MultipleGTestFiltersSpecified("Can not specify both --gtest_filter "
"and --test %s" % test)
self._options = options
self._args = args
script_dir = path_utils.ScriptDir()
# Compute the top of the tree (the "source dir") from the script dir (where
# this script lives). We assume that the script dir is in tools/valgrind/
# relative to the top of the tree.
self._source_dir = os.path.dirname(os.path.dirname(script_dir))
# since this path is used for string matching, make sure it's always
# an absolute Unix-style path
self._source_dir = os.path.abspath(self._source_dir).replace('\\', '/')
valgrind_test_script = os.path.join(script_dir, "valgrind_test.py")
self._command_preamble = ["--source_dir=%s" % (self._source_dir)]
if not self._options.build_dir:
dirs = [
os.path.join(self._source_dir, "xcodebuild", "Debug"),
os.path.join(self._source_dir, "out", "Debug"),
os.path.join(self._source_dir, "build", "Debug"),
]
build_dir = [d for d in dirs if os.path.isdir(d)]
if len(build_dir) > 1:
raise BuildDirAmbiguous("Found more than one suitable build dir:\n"
"%s\nPlease specify just one "
"using --build_dir" % ", ".join(build_dir))
elif build_dir:
self._options.build_dir = build_dir[0]
else:
self._options.build_dir = None
if self._options.build_dir:
build_dir = os.path.abspath(self._options.build_dir)
self._command_preamble += ["--build_dir=%s" % (self._options.build_dir)]
def _EnsureBuildDirFound(self):
if not self._options.build_dir:
raise BuildDirNotFound("Oops, couldn't find a build dir, please "
"specify it manually using --build_dir")
def _DefaultCommand(self, tool, exe=None, valgrind_test_args=None):
'''Generates the default command array that most tests will use.'''
if exe and common.IsWindows():
exe += '.exe'
cmd = list(self._command_preamble)
# Find all suppressions matching the following pattern:
# tools/valgrind/TOOL/suppressions[_PLATFORM].txt
# and list them with --suppressions= prefix.
script_dir = path_utils.ScriptDir()
tool_name = tool.ToolName();
suppression_file = os.path.join(script_dir, tool_name, "suppressions.txt")
if os.path.exists(suppression_file):
cmd.append("--suppressions=%s" % suppression_file)
# Platform-specific suppression
for platform in common.PlatformNames():
platform_suppression_file = \
os.path.join(script_dir, tool_name, 'suppressions_%s.txt' % platform)
if os.path.exists(platform_suppression_file):
cmd.append("--suppressions=%s" % platform_suppression_file)
if self._options.valgrind_tool_flags:
cmd += self._options.valgrind_tool_flags.split(" ")
if self._options.keep_logs:
cmd += ["--keep_logs"]
if valgrind_test_args != None:
for arg in valgrind_test_args:
cmd.append(arg)
if exe:
self._EnsureBuildDirFound()
cmd.append(os.path.join(self._options.build_dir, exe))
# Valgrind runs tests slowly, so slow tests hurt more; show elapased time
# so we can find the slowpokes.
cmd.append("--gtest_print_time")
if self._options.gtest_repeat:
cmd.append("--gtest_repeat=%s" % self._options.gtest_repeat)
return cmd
def Run(self):
''' Runs the test specified by command-line argument --test '''
logging.info("running test %s" % (self._test))
return self._test_list[self._test](self)
def _AppendGtestFilter(self, tool, name, cmd):
'''Append an appropriate --gtest_filter flag to the googletest binary
invocation.
If the user passed his own filter mentioning only one test, just use it.
Othewise, filter out tests listed in the appropriate gtest_exclude files.
'''
if (self._gtest_filter and
":" not in self._gtest_filter and
"?" not in self._gtest_filter and
"*" not in self._gtest_filter):
cmd.append("--gtest_filter=%s" % self._gtest_filter)
return
filters = []
gtest_files_dir = os.path.join(path_utils.ScriptDir(), "gtest_exclude")
gtest_filter_files = [
os.path.join(gtest_files_dir, name + ".gtest-%s.txt" % tool.ToolName())]
# Use ".gtest.txt" files only for slow tools, as they now contain
# Valgrind- and Dr.Memory-specific filters.
# TODO(glider): rename the files to ".gtest_slow.txt"
if tool.ToolName() in ChromeTests.SLOW_TOOLS:
gtest_filter_files += [os.path.join(gtest_files_dir, name + ".gtest.txt")]
for platform_suffix in common.PlatformNames():
gtest_filter_files += [
os.path.join(gtest_files_dir, name + ".gtest_%s.txt" % platform_suffix),
os.path.join(gtest_files_dir, name + ".gtest-%s_%s.txt" % \
(tool.ToolName(), platform_suffix))]
logging.info("Reading gtest exclude filter files:")
for filename in gtest_filter_files:
# strip the leading absolute path (may be very long on the bot)
# and the following / or \.
readable_filename = filename.replace("\\", "/") # '\' on Windows
readable_filename = readable_filename.replace(self._source_dir, "")[1:]
if not os.path.exists(filename):
logging.info(" \"%s\" - not found" % readable_filename)
continue
logging.info(" \"%s\" - OK" % readable_filename)
f = open(filename, 'r')
for line in f.readlines():
if line.startswith("#") or line.startswith("//") or line.isspace():
continue
line = line.rstrip()
test_prefixes = ["FLAKY", "FAILS"]
for p in test_prefixes:
# Strip prefixes from the test names.
line = line.replace(".%s_" % p, ".")
# Exclude the original test name.
filters.append(line)
if line[-2:] != ".*":
# List all possible prefixes if line doesn't end with ".*".
for p in test_prefixes:
filters.append(line.replace(".", ".%s_" % p))
# Get rid of duplicates.
filters = set(filters)
gtest_filter = self._gtest_filter
if len(filters):
if gtest_filter:
gtest_filter += ":"
if gtest_filter.find("-") < 0:
gtest_filter += "-"
else:
gtest_filter = "-"
gtest_filter += ":".join(filters)
if gtest_filter:
cmd.append("--gtest_filter=%s" % gtest_filter)
@staticmethod
def ShowTests():
test_to_names = {}
for name, test_function in ChromeTests._test_list.iteritems():
test_to_names.setdefault(test_function, []).append(name)
name_to_aliases = {}
for names in test_to_names.itervalues():
names.sort(key=lambda name: len(name))
name_to_aliases[names[0]] = names[1:]
print
print "Available tests:"
print "----------------"
for name, aliases in sorted(name_to_aliases.iteritems()):
if aliases:
print " {} (aka {})".format(name, ', '.join(aliases))
else:
print " {}".format(name)
def SetupLdPath(self, requires_build_dir):
if requires_build_dir:
self._EnsureBuildDirFound()
elif not self._options.build_dir:
return
# Append build_dir to LD_LIBRARY_PATH so external libraries can be loaded.
if (os.getenv("LD_LIBRARY_PATH")):
os.putenv("LD_LIBRARY_PATH", "%s:%s" % (os.getenv("LD_LIBRARY_PATH"),
self._options.build_dir))
else:
os.putenv("LD_LIBRARY_PATH", self._options.build_dir)
def SimpleTest(self, module, name, valgrind_test_args=None, cmd_args=None):
tool = valgrind_test.CreateTool(self._options.valgrind_tool)
cmd = self._DefaultCommand(tool, name, valgrind_test_args)
self._AppendGtestFilter(tool, name, cmd)
cmd.extend(['--test-tiny-timeout=1000'])
if cmd_args:
cmd.extend(cmd_args)
self.SetupLdPath(True)
return tool.Run(cmd, module)
def RunCmdLine(self):
tool = valgrind_test.CreateTool(self._options.valgrind_tool)
cmd = self._DefaultCommand(tool, None, self._args)
self.SetupLdPath(False)
return tool.Run(cmd, None)
def TestAppList(self):
return self.SimpleTest("app_list", "app_list_unittests")
def TestAsh(self):
return self.SimpleTest("ash", "ash_unittests")
def TestAura(self):
return self.SimpleTest("aura", "aura_unittests")
def TestBase(self):
return self.SimpleTest("base", "base_unittests")
def TestChromeOS(self):
return self.SimpleTest("chromeos", "chromeos_unittests")
def TestComponents(self):
return self.SimpleTest("components", "components_unittests")
def TestCompositor(self):
return self.SimpleTest("compositor", "compositor_unittests")
def TestContent(self):
return self.SimpleTest("content", "content_unittests")
def TestContentBrowser(self):
return self.SimpleTest("content", "content_browsertests")
def TestCourgette(self):
return self.SimpleTest("courgette", "courgette_unittests")
def TestCrypto(self):
return self.SimpleTest("crypto", "crypto_unittests")
def TestDevice(self):
return self.SimpleTest("device", "device_unittests")
def TestFFmpeg(self):
return self.SimpleTest("chrome", "ffmpeg_unittests")
def TestFFmpegRegressions(self):
return self.SimpleTest("chrome", "ffmpeg_regression_tests")
def TestGPU(self):
return self.SimpleTest("gpu", "gpu_unittests")
def TestIpc(self):
return self.SimpleTest("ipc", "ipc_tests",
valgrind_test_args=["--trace_children"])
def TestJingle(self):
return self.SimpleTest("chrome", "jingle_unittests")
def TestMedia(self):
return self.SimpleTest("chrome", "media_unittests")
def TestMessageCenter(self):
return self.SimpleTest("message_center", "message_center_unittests")
def TestNet(self):
return self.SimpleTest("net", "net_unittests")
def TestNetPerf(self):
return self.SimpleTest("net", "net_perftests")
def TestPPAPI(self):
return self.SimpleTest("chrome", "ppapi_unittests")
def TestPrinting(self):
return self.SimpleTest("chrome", "printing_unittests")
def TestRemoting(self):
return self.SimpleTest("chrome", "remoting_unittests",
cmd_args=[
"--ui-test-action-timeout=60000",
"--ui-test-action-max-timeout=150000"])
def TestSql(self):
return self.SimpleTest("chrome", "sql_unittests")
def TestSync(self):
return self.SimpleTest("chrome", "sync_unit_tests")
def TestLinuxSandbox(self):
return self.SimpleTest("sandbox", "sandbox_linux_unittests")
def TestUnit(self):
# http://crbug.com/51716
# Disabling all unit tests
# Problems reappeared after r119922
if common.IsMac() and (self._options.valgrind_tool == "memcheck"):
logging.warning("unit_tests are disabled for memcheck on MacOS.")
return 0;
return self.SimpleTest("chrome", "unit_tests")
def TestUIUnit(self):
return self.SimpleTest("chrome", "ui_unittests")
def TestURL(self):
return self.SimpleTest("chrome", "url_unittests")
def TestViews(self):
return self.SimpleTest("views", "views_unittests")
# Valgrind timeouts are in seconds.
UI_VALGRIND_ARGS = ["--timeout=14400", "--trace_children", "--indirect"]
# UI test timeouts are in milliseconds.
UI_TEST_ARGS = ["--ui-test-action-timeout=60000",
"--ui-test-action-max-timeout=150000",
"--no-sandbox"]
# TODO(thestig) fine-tune these values.
# Valgrind timeouts are in seconds.
BROWSER_VALGRIND_ARGS = ["--timeout=50000", "--trace_children", "--indirect"]
# Browser test timeouts are in milliseconds.
BROWSER_TEST_ARGS = ["--ui-test-action-timeout=400000",
"--ui-test-action-max-timeout=800000",
"--no-sandbox"]
def TestAutomatedUI(self):
return self.SimpleTest("chrome", "automated_ui_tests",
valgrind_test_args=self.UI_VALGRIND_ARGS,
cmd_args=self.UI_TEST_ARGS)
def TestBrowser(self):
return self.SimpleTest("chrome", "browser_tests",
valgrind_test_args=self.BROWSER_VALGRIND_ARGS,
cmd_args=self.BROWSER_TEST_ARGS)
def TestInteractiveUI(self):
return self.SimpleTest("chrome", "interactive_ui_tests",
valgrind_test_args=self.UI_VALGRIND_ARGS,
cmd_args=self.UI_TEST_ARGS)
def TestReliability(self):
script_dir = path_utils.ScriptDir()
url_list_file = os.path.join(script_dir, "reliability", "url_list.txt")
return self.SimpleTest("chrome", "reliability_tests",
valgrind_test_args=self.UI_VALGRIND_ARGS,
cmd_args=(self.UI_TEST_ARGS +
["--list=%s" % url_list_file]))
def TestSafeBrowsing(self):
return self.SimpleTest("chrome", "safe_browsing_tests",
valgrind_test_args=self.UI_VALGRIND_ARGS,
cmd_args=(["--ui-test-action-max-timeout=450000"]))
def TestSyncIntegration(self):
return self.SimpleTest("chrome", "sync_integration_tests",
valgrind_test_args=self.UI_VALGRIND_ARGS,
cmd_args=(["--ui-test-action-max-timeout=450000"]))
def TestLayoutChunk(self, chunk_num, chunk_size):
# Run tests [chunk_num*chunk_size .. (chunk_num+1)*chunk_size) from the
# list of tests. Wrap around to beginning of list at end.
# If chunk_size is zero, run all tests in the list once.
# If a text file is given as argument, it is used as the list of tests.
#
# Build the ginormous commandline in 'cmd'.
# It's going to be roughly
# python valgrind_test.py ... python run_webkit_tests.py ...
# but we'll use the --indirect flag to valgrind_test.py
# to avoid valgrinding python.
# Start by building the valgrind_test.py commandline.
tool = valgrind_test.CreateTool(self._options.valgrind_tool)
cmd = self._DefaultCommand(tool)
cmd.append("--trace_children")
cmd.append("--indirect_webkit_layout")
cmd.append("--ignore_exit_code")
# Now build script_cmd, the run_webkits_tests.py commandline
# Store each chunk in its own directory so that we can find the data later
chunk_dir = os.path.join("layout", "chunk_%05d" % chunk_num)
out_dir = os.path.join(path_utils.ScriptDir(), "latest")
out_dir = os.path.join(out_dir, chunk_dir)
if os.path.exists(out_dir):
old_files = glob.glob(os.path.join(out_dir, "*.txt"))
for f in old_files:
os.remove(f)
else:
os.makedirs(out_dir)
script = os.path.join(self._source_dir, "webkit", "tools", "layout_tests",
"run_webkit_tests.py")
# http://crbug.com/260627: After the switch to content_shell from DRT, each
# test now brings up 3 processes. Under Valgrind, they become memory bound
# and can eventually OOM if we don't reduce the total count.
jobs = int(multiprocessing.cpu_count() * 0.3)
script_cmd = ["python", script, "-v",
"--run-singly", # run a separate DumpRenderTree for each test
"--fully-parallel",
"--child-processes=%d" % jobs,
"--time-out-ms=200000",
"--no-retry-failures", # retrying takes too much time
# http://crbug.com/176908: Don't launch a browser when done.
"--no-show-results",
"--nocheck-sys-deps"]
# Pass build mode to run_webkit_tests.py. We aren't passed it directly,
# so parse it out of build_dir. run_webkit_tests.py can only handle
# the two values "Release" and "Debug".
# TODO(Hercules): unify how all our scripts pass around build mode
# (--mode / --target / --build_dir / --debug)
if self._options.build_dir.endswith("Debug"):
script_cmd.append("--debug");
if (chunk_size > 0):
script_cmd.append("--run-chunk=%d:%d" % (chunk_num, chunk_size))
if len(self._args):
# if the arg is a txt file, then treat it as a list of tests
if os.path.isfile(self._args[0]) and self._args[0][-4:] == ".txt":
script_cmd.append("--test-list=%s" % self._args[0])
else:
script_cmd.extend(self._args)
self._AppendGtestFilter(tool, "layout", script_cmd)
# Now run script_cmd with the wrapper in cmd
cmd.extend(["--"])
cmd.extend(script_cmd)
# Layout tests often times fail quickly, but the buildbot remains green.
# Detect this situation when running with the default chunk size.
if chunk_size == self.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE:
min_runtime_in_seconds=120
else:
min_runtime_in_seconds=0
ret = tool.Run(cmd, "layout", min_runtime_in_seconds=min_runtime_in_seconds)
return ret
def TestLayout(self):
# A "chunk file" is maintained in the local directory so that each test
# runs a slice of the layout tests of size chunk_size that increments with
# each run. Since tests can be added and removed from the layout tests at
# any time, this is not going to give exact coverage, but it will allow us
# to continuously run small slices of the layout tests under valgrind rather
# than having to run all of them in one shot.
chunk_size = self._options.num_tests
if (chunk_size == 0):
return self.TestLayoutChunk(0, 0)
chunk_num = 0
chunk_file = os.path.join("valgrind_layout_chunk.txt")
logging.info("Reading state from " + chunk_file)
try:
f = open(chunk_file)
if f:
str = f.read()
if len(str):
chunk_num = int(str)
# This should be enough so that we have a couple of complete runs
# of test data stored in the archive (although note that when we loop
# that we almost guaranteed won't be at the end of the test list)
if chunk_num > 10000:
chunk_num = 0
f.close()
except IOError, (errno, strerror):
logging.error("error reading from file %s (%d, %s)" % (chunk_file,
errno, strerror))
# Save the new chunk size before running the tests. Otherwise if a
# particular chunk hangs the bot, the chunk number will never get
# incremented and the bot will be wedged.
logging.info("Saving state to " + chunk_file)
try:
f = open(chunk_file, "w")
chunk_num += 1
f.write("%d" % chunk_num)
f.close()
except IOError, (errno, strerror):
logging.error("error writing to file %s (%d, %s)" % (chunk_file, errno,
strerror))
# Since we're running small chunks of the layout tests, it's important to
# mark the ones that have errors in them. These won't be visible in the
# summary list for long, but will be useful for someone reviewing this bot.
return self.TestLayoutChunk(chunk_num, chunk_size)
# The known list of tests.
# Recognise the original abbreviations as well as full executable names.
_test_list = {
"cmdline" : RunCmdLine,
"app_list": TestAppList, "app_list_unittests": TestAppList,
"ash": TestAsh, "ash_unittests": TestAsh,
"aura": TestAura, "aura_unittests": TestAura,
"automated_ui" : TestAutomatedUI,
"base": TestBase, "base_unittests": TestBase,
"browser": TestBrowser, "browser_tests": TestBrowser,
"chromeos": TestChromeOS, "chromeos_unittests": TestChromeOS,
"components": TestComponents,"components_unittests": TestComponents,
"compositor": TestCompositor,"compositor_unittests": TestCompositor,
"content": TestContent, "content_unittests": TestContent,
"content_browsertests": TestContentBrowser,
"courgette": TestCourgette, "courgette_unittests": TestCourgette,
"crypto": TestCrypto, "crypto_unittests": TestCrypto,
"device": TestDevice, "device_unittests": TestDevice,
"ffmpeg": TestFFmpeg, "ffmpeg_unittests": TestFFmpeg,
"ffmpeg_regression_tests": TestFFmpegRegressions,
"gpu": TestGPU, "gpu_unittests": TestGPU,
"ipc": TestIpc, "ipc_tests": TestIpc,
"interactive_ui": TestInteractiveUI,
"jingle": TestJingle, "jingle_unittests": TestJingle,
"layout": TestLayout, "layout_tests": TestLayout,
"webkit": TestLayout,
"media": TestMedia, "media_unittests": TestMedia,
"message_center": TestMessageCenter,
"message_center_unittests" : TestMessageCenter,
"net": TestNet, "net_unittests": TestNet,
"net_perf": TestNetPerf, "net_perftests": TestNetPerf,
"ppapi": TestPPAPI, "ppapi_unittests": TestPPAPI,
"printing": TestPrinting, "printing_unittests": TestPrinting,
"reliability": TestReliability, "reliability_tests": TestReliability,
"remoting": TestRemoting, "remoting_unittests": TestRemoting,
"safe_browsing": TestSafeBrowsing, "safe_browsing_tests": TestSafeBrowsing,
"sandbox": TestLinuxSandbox, "sandbox_linux_unittests": TestLinuxSandbox,
"sql": TestSql, "sql_unittests": TestSql,
"sync": TestSync, "sync_unit_tests": TestSync,
"sync_integration_tests": TestSyncIntegration,
"sync_integration": TestSyncIntegration,
"ui_unit": TestUIUnit, "ui_unittests": TestUIUnit,
"unit": TestUnit, "unit_tests": TestUnit,
"url": TestURL, "url_unittests": TestURL,
"views": TestViews, "views_unittests": TestViews,
}
def _main():
parser = optparse.OptionParser("usage: %prog -b <dir> -t <test> "
"[-t <test> ...]")
parser.disable_interspersed_args()
parser.add_option("", "--help-tests", dest="help_tests", action="store_true",
default=False, help="List all available tests")
parser.add_option("-b", "--build_dir",
help="the location of the compiler output")
parser.add_option("-t", "--test", action="append", default=[],
help="which test to run, supports test:gtest_filter format "
"as well.")
parser.add_option("", "--baseline", action="store_true", default=False,
help="generate baseline data instead of validating")
parser.add_option("", "--gtest_filter",
help="additional arguments to --gtest_filter")
parser.add_option("", "--gtest_repeat",
help="argument for --gtest_repeat")
parser.add_option("-v", "--verbose", action="store_true", default=False,
help="verbose output - enable debug log messages")
parser.add_option("", "--tool", dest="valgrind_tool", default="memcheck",
help="specify a valgrind tool to run the tests under")
parser.add_option("", "--tool_flags", dest="valgrind_tool_flags", default="",
help="specify custom flags for the selected valgrind tool")
parser.add_option("", "--keep_logs", action="store_true", default=False,
help="store memory tool logs in the <tool>.logs directory "
"instead of /tmp.\nThis can be useful for tool "
"developers/maintainers.\nPlease note that the <tool>"
".logs directory will be clobbered on tool startup.")
parser.add_option("-n", "--num_tests", type="int",
default=ChromeTests.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE,
help="for layout tests: # of subtests per run. 0 for all.")
# TODO(thestig) Remove this if we can.
parser.add_option("", "--gtest_color", dest="gtest_color", default="no",
help="dummy compatibility flag for sharding_supervisor.")
options, args = parser.parse_args()
if options.verbose:
logging_utils.config_root(logging.DEBUG)
else:
logging_utils.config_root()
if options.help_tests:
ChromeTests.ShowTests()
return 0
if not options.test:
parser.error("--test not specified")
if len(options.test) != 1 and options.gtest_filter:
parser.error("--gtest_filter and multiple tests don't make sense together")
for t in options.test:
tests = ChromeTests(options, args, t)
ret = tests.Run()
if ret: return ret
return 0
if __name__ == "__main__":
sys.exit(_main())

View File

@ -1,122 +0,0 @@
#!/bin/bash
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Set up some paths and re-direct the arguments to chrome_tests.py
export THISDIR=`dirname $0`
ARGV_COPY="$@"
# We need to set CHROME_VALGRIND iff using Memcheck or TSan-Valgrind:
# tools/valgrind/chrome_tests.sh --tool memcheck
# or
# tools/valgrind/chrome_tests.sh --tool=memcheck
# (same for "--tool=tsan")
tool="memcheck" # Default to memcheck.
while (( "$#" ))
do
if [[ "$1" == "--tool" ]]
then
tool="$2"
shift
elif [[ "$1" =~ --tool=(.*) ]]
then
tool="${BASH_REMATCH[1]}"
fi
shift
done
NEEDS_VALGRIND=0
NEEDS_DRMEMORY=0
case "$tool" in
"memcheck")
NEEDS_VALGRIND=1
;;
"tsan" | "tsan_rv")
if [ "`uname -s`" == CYGWIN* ]
then
NEEDS_PIN=1
else
NEEDS_VALGRIND=1
fi
;;
"drmemory" | "drmemory_light" | "drmemory_full" | "drmemory_pattern")
NEEDS_DRMEMORY=1
;;
esac
if [ "$NEEDS_VALGRIND" == "1" ]
then
CHROME_VALGRIND=`sh $THISDIR/locate_valgrind.sh`
if [ "$CHROME_VALGRIND" = "" ]
then
# locate_valgrind.sh failed
exit 1
fi
echo "Using valgrind binaries from ${CHROME_VALGRIND}"
PATH="${CHROME_VALGRIND}/bin:$PATH"
# We need to set these variables to override default lib paths hard-coded into
# Valgrind binary.
export VALGRIND_LIB="$CHROME_VALGRIND/lib/valgrind"
export VALGRIND_LIB_INNER="$CHROME_VALGRIND/lib/valgrind"
# Clean up some /tmp directories that might be stale due to interrupted
# chrome_tests.py execution.
# FYI:
# -mtime +1 <- only print files modified more than 24h ago,
# -print0/-0 are needed to handle possible newlines in the filenames.
echo "Cleanup /tmp from Valgrind stuff"
find /tmp -maxdepth 1 \(\
-name "vgdb-pipe-*" -or -name "vg_logs_*" -or -name "valgrind.*" \
\) -mtime +1 -print0 | xargs -0 rm -rf
fi
if [ "$NEEDS_DRMEMORY" == "1" ]
then
if [ -z "$DRMEMORY_COMMAND" ]
then
DRMEMORY_PATH="$THISDIR/../../third_party/drmemory"
DRMEMORY_SFX="$DRMEMORY_PATH/drmemory-windows-sfx.exe"
if [ ! -f "$DRMEMORY_SFX" ]
then
echo "Can't find Dr. Memory executables."
echo "See http://www.chromium.org/developers/how-tos/using-valgrind/dr-memory"
echo "for the instructions on how to get them."
exit 1
fi
chmod +x "$DRMEMORY_SFX" # Cygwin won't run it without +x.
"$DRMEMORY_SFX" -o"$DRMEMORY_PATH/unpacked" -y
export DRMEMORY_COMMAND="$DRMEMORY_PATH/unpacked/bin/drmemory.exe"
fi
fi
if [ "$NEEDS_PIN" == "1" ]
then
if [ -z "$PIN_COMMAND" ]
then
# Set up PIN_COMMAND to invoke TSan.
TSAN_PATH="$THISDIR/../../third_party/tsan"
TSAN_SFX="$TSAN_PATH/tsan-x86-windows-sfx.exe"
echo "$TSAN_SFX"
if [ ! -f $TSAN_SFX ]
then
echo "Can't find ThreadSanitizer executables."
echo "See http://www.chromium.org/developers/how-tos/using-valgrind/threadsanitizer/threadsanitizer-on-windows"
echo "for the instructions on how to get them."
exit 1
fi
chmod +x "$TSAN_SFX" # Cygwin won't run it without +x.
"$TSAN_SFX" -o"$TSAN_PATH"/unpacked -y
export PIN_COMMAND="$TSAN_PATH/unpacked/tsan-x86-windows/tsan.bat"
fi
fi
PYTHONPATH=$THISDIR/../python/google python \
"$THISDIR/chrome_tests.py" $ARGV_COPY

View File

@ -1,252 +0,0 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import platform
import os
import signal
import subprocess
import sys
import time
class NotImplementedError(Exception):
pass
class TimeoutError(Exception):
pass
def RunSubprocessInBackground(proc):
"""Runs a subprocess in the background. Returns a handle to the process."""
logging.info("running %s in the background" % " ".join(proc))
return subprocess.Popen(proc)
def RunSubprocess(proc, timeout=0):
""" Runs a subprocess, until it finishes or |timeout| is exceeded and the
process is killed with taskkill. A |timeout| <= 0 means no timeout.
Args:
proc: list of process components (exe + args)
timeout: how long to wait before killing, <= 0 means wait forever
"""
logging.info("running %s, timeout %d sec" % (" ".join(proc), timeout))
sys.stdout.flush()
sys.stderr.flush()
# Manually read and print out stdout and stderr.
# By default, the subprocess is supposed to inherit these from its parent,
# however when run under buildbot, it seems unable to read data from a
# grandchild process, so we have to read the child and print the data as if
# it came from us for buildbot to read it. We're not sure why this is
# necessary.
# TODO(erikkay): should we buffer stderr and stdout separately?
p = subprocess.Popen(proc, universal_newlines=True,
bufsize=0, # unbuffered
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
logging.info("started subprocess")
did_timeout = False
if timeout > 0:
wait_until = time.time() + timeout
while p.poll() is None and not did_timeout:
# Have to use readline rather than readlines() or "for line in p.stdout:",
# otherwise we get buffered even with bufsize=0.
line = p.stdout.readline()
while line and not did_timeout:
sys.stdout.write(line)
sys.stdout.flush()
line = p.stdout.readline()
if timeout > 0:
did_timeout = time.time() > wait_until
if did_timeout:
logging.info("process timed out")
else:
logging.info("process ended, did not time out")
if did_timeout:
if IsWindows():
subprocess.call(["taskkill", "/T", "/F", "/PID", str(p.pid)])
else:
# Does this kill all children, too?
os.kill(p.pid, signal.SIGINT)
logging.error("KILLED %d" % p.pid)
# Give the process a chance to actually die before continuing
# so that cleanup can happen safely.
time.sleep(1.0)
logging.error("TIMEOUT waiting for %s" % proc[0])
raise TimeoutError(proc[0])
else:
for line in p.stdout:
sys.stdout.write(line)
if not IsMac(): # stdout flush fails on Mac
logging.info("flushing stdout")
sys.stdout.flush()
logging.info("collecting result code")
result = p.poll()
if result:
logging.error("%s exited with non-zero result code %d" % (proc[0], result))
return result
def IsLinux():
return sys.platform.startswith('linux')
def IsMac():
return sys.platform.startswith('darwin')
def IsWindows():
return sys.platform == 'cygwin' or sys.platform.startswith('win')
def WindowsVersionName():
"""Returns the name of the Windows version if it is known, or None.
Possible return values are: xp, vista, 7, 8, or None
"""
if sys.platform == 'cygwin':
# Windows version number is hiding in system name. Looks like:
# CYGWIN_NT-6.1-WOW64
try:
version_str = platform.uname()[0].split('-')[1]
except:
return None
elif sys.platform.startswith('win'):
# Normal Windows version string. Mine: 6.1.7601
version_str = platform.version()
else:
return None
parts = version_str.split('.')
try:
major = int(parts[0])
minor = int(parts[1])
except:
return None # Can't parse, unknown version.
if major == 5:
return 'xp'
elif major == 6 and minor == 0:
return 'vista'
elif major == 6 and minor == 1:
return '7'
elif major == 6 and minor == 2:
return '8' # Future proof. ;)
return None
def PlatformNames():
"""Return an array of string to be used in paths for the platform
(e.g. suppressions, gtest filters, ignore files etc.)
The first element of the array describes the 'main' platform
"""
if IsLinux():
return ['linux']
if IsMac():
return ['mac']
if IsWindows():
names = ['win32']
version_name = WindowsVersionName()
if version_name is not None:
names.append('win-%s' % version_name)
return names
raise NotImplementedError('Unknown platform "%s".' % sys.platform)
def PutEnvAndLog(env_name, env_value):
os.putenv(env_name, env_value)
logging.info('export %s=%s', env_name, env_value)
def BoringCallers(mangled, use_re_wildcards):
"""Return a list of 'boring' function names (optinally mangled)
with */? wildcards (optionally .*/.).
Boring = we drop off the bottom of stack traces below such functions.
"""
need_mangling = [
# Don't show our testing framework:
("testing::Test::Run", "_ZN7testing4Test3RunEv"),
("testing::TestInfo::Run", "_ZN7testing8TestInfo3RunEv"),
("testing::internal::Handle*ExceptionsInMethodIfSupported*",
"_ZN7testing8internal3?Handle*ExceptionsInMethodIfSupported*"),
# Depend on scheduling:
("MessageLoop::Run", "_ZN11MessageLoop3RunEv"),
("MessageLoop::RunTask", "_ZN11MessageLoop7RunTask*"),
("RunnableMethod*", "_ZN14RunnableMethod*"),
("DispatchToMethod*", "_Z*16DispatchToMethod*"),
("base::internal::Invoker*::DoInvoke*",
"_ZN4base8internal8Invoker*DoInvoke*"), # Invoker{1,2,3}
("base::internal::RunnableAdapter*::Run*",
"_ZN4base8internal15RunnableAdapter*Run*"),
]
ret = []
for pair in need_mangling:
ret.append(pair[1 if mangled else 0])
ret += [
# Also don't show the internals of libc/pthread.
"start_thread",
"main",
"BaseThreadInitThunk",
]
if use_re_wildcards:
for i in range(0, len(ret)):
ret[i] = ret[i].replace('*', '.*').replace('?', '.')
return ret
def NormalizeWindowsPath(path):
"""If we're using Cygwin Python, turn the path into a Windows path.
Don't turn forward slashes into backslashes for easier copy-pasting and
escaping.
TODO(rnk): If we ever want to cut out the subprocess invocation, we can use
_winreg to get the root Cygwin directory from the registry key:
HKEY_LOCAL_MACHINE\SOFTWARE\Cygwin\setup\rootdir.
"""
if sys.platform.startswith("cygwin"):
p = subprocess.Popen(["cygpath", "-m", path],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
if err:
logging.warning("WARNING: cygpath error: %s", err)
return out.strip()
else:
return path
############################
# Common output format code
def PrintUsedSuppressionsList(suppcounts):
""" Prints out the list of used suppressions in a format common to all the
memory tools. If the list is empty, prints nothing and returns False,
otherwise True.
suppcounts: a dictionary of used suppression counts,
Key -> name, Value -> count.
"""
if not suppcounts:
return False
print "-----------------------------------------------------"
print "Suppressions used:"
print " count name"
for (name, count) in sorted(suppcounts.items(), key=lambda (k,v): (v,k)):
print "%7d %s" % (count, name)
print "-----------------------------------------------------"
sys.stdout.flush()
return True

View File

@ -1,5 +0,0 @@
@echo off
:: Copyright (c) 2011 The Chromium Authors. All rights reserved.
:: Use of this source code is governed by a BSD-style license that can be
:: found in the LICENSE file.
%~dp0\chrome_tests.bat -t cmdline --tool drmemory %*

View File

@ -1 +0,0 @@
*

View File

@ -1,35 +0,0 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def CheckChange(input_api, output_api):
"""Checks the DrMemory suppression files for bad suppressions."""
# TODO(timurrrr): find out how to do relative imports
# and remove this ugly hack. Also, the CheckChange function won't be needed.
tools_vg_path = input_api.os_path.join(input_api.PresubmitLocalPath(), '..')
import sys
old_path = sys.path
try:
sys.path = sys.path + [tools_vg_path]
import suppressions
return suppressions.PresubmitCheck(input_api, output_api)
finally:
sys.path = old_path
def CheckChangeOnUpload(input_api, output_api):
return CheckChange(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CheckChange(input_api, output_api)
def GetPreferredTrySlaves():
return ['win_drmemory']

View File

@ -1,456 +0,0 @@
# This file contains suppressions for the Dr.Memory tool, see
# http://dev.chromium.org/developers/how-tos/using-drmemory
#
# This file contains suppressions for the DrMemory reports happening
# in the 'light' mode (a.k.a. drmemory_light) as well as in the 'full' mode.
# Please use suppressions_full.txt for all the reports that can happen only
# in the full mode (drmemory_full),
############################
# Known reports on the third party we have no control over.
# Reports from Sophos antivirus
UNADDRESSABLE ACCESS
name=Sophos UNADDR
...
sophos*.dll!*
UNINITIALIZED READ
name=Sophos UNINIT
...
sophos*.dll!*
LEAK
name=Sophos LEAK
...
sophos*.dll!*
# Reports from Micorosft RDP ActiveX control (mstscax.dll)
GDI USAGE ERROR
name=crbug.com/177832: mstscax.dll causes "GDI USAGE ERROR" errors.
...
mstscax.dll!*
UNADDRESSABLE ACCESS
name=crbug.com/177832: mstscax.dll causes "UNADDRESSABLE ACCESS" errors.
...
mstscax.dll!*
############################
# Suppress some false reports due to bugs in Dr.Memory like wrong analysis
# assumptions or unhandled syscalls
# Please note: the following suppressions were written in the abscense of
# private symbols so may need to be updated when we switch to auto-loading PDBs
UNADDRESSABLE ACCESS
name=http://code.google.com/p/drmemory/issues/detail?id=12 UNADDR
...
SHELL32.dll!SHFileOperation*
UNADDRESSABLE ACCESS
name=http://code.google.com/p/drmemory/issues/detail?id=40 UNADDR
...
WINSPOOL.DRV!*
INVALID HEAP ARGUMENT
name=http://code.google.com/p/drmemory/issues/detail?id=40 INVALID HEAP
...
WINSPOOL.DRV!*
UNADDRESSABLE ACCESS
name=http://code.google.com/p/drmemory/issues/detail?id=59
...
*!SetEnvironmentVariable*
UNADDRESSABLE ACCESS
name=http://code.google.com/p/drmemory/issues/detail?id=68 (UNADDR 1)
...
MSWSOCK.dll!WSPStartup
UNADDRESSABLE ACCESS
name=http://code.google.com/p/drmemory/issues/detail?id=68 (UNADDR 2)
...
ntdll.dll!RtlValidateUnicodeString
############################
# TODO(timurrrr): investigate these
UNADDRESSABLE ACCESS
name=TODO SHParseDisplayName
...
*!SHParseDisplayName
UNADDRESSABLE ACCESS
name=TODO GetCanonicalPathInfo
...
*!GetCanonicalPathInfo*
UNADDRESSABLE ACCESS
name=TODO CreateDC
...
GDI32.dll!CreateDC*
# This one looks interesting
INVALID HEAP ARGUMENT
name=TODO ExitProcess
...
KERNEL32.dll!ExitProcess
INVALID HEAP ARGUMENT
name=http://crbug.com/103365 (a)
ppapi_tests.dll!*
...
ppapi_tests.dll!*
*!base::internal::RunnableAdapter<*>::Run
INVALID HEAP ARGUMENT
name=http://crbug.com/103365 (b)
ppapi_tests.dll!*
...
ppapi_tests.dll!*
*!PP_RunCompletionCallback
...
*!base::internal::RunnableAdapter<*>::Run
INVALID HEAP ARGUMENT
name=http://crbug.com/107567 intentional mismatch in _DebugHeapDelete, no frame
*!std::numpunct<*>::_Tidy
*!std::numpunct<*>::~numpunct<*>
# TODO(rbultje): Investigate if code fix is required instead.
WARNING
name=http://crbug.com/223255 - prefetches in vp8
instruction=prefetch*
ffmpegsumo.dll!ff_prefetch_mmxext
ffmpegsumo.dll!vp8_decode_mb_row_no_filter
############################
# Intentional errors in Chromium tests (ToolsSanityTests)
LEAK
name=sanity test 01 (memory leak)
base_unittests.exe!operator new
base_unittests.exe!operator new[]
base_unittests.exe!base::ToolsSanityTest_MemoryLeak_Test::TestBody
# "..." is needed due to http://code.google.com/p/drmemory/issues/detail?id=666
UNADDRESSABLE ACCESS
name=sanity test 02 (malloc/read left)
base_unittests.exe!*ReadValueOutOfArrayBoundsLeft
...
base_unittests.exe!base::ToolsSanityTest_AccessesToMallocMemory_Test::TestBody
UNADDRESSABLE ACCESS
name=sanity test 03 (malloc/read right)
base_unittests.exe!*ReadValueOutOfArrayBoundsRight
base_unittests.exe!*MakeSomeErrors
base_unittests.exe!base::ToolsSanityTest_AccessesToMallocMemory_Test::TestBody
UNADDRESSABLE ACCESS
name=sanity test 04 (malloc/write left)
base_unittests.exe!*WriteValueOutOfArrayBoundsLeft
base_unittests.exe!*MakeSomeErrors
base_unittests.exe!base::ToolsSanityTest_AccessesToMallocMemory_Test::TestBody
UNADDRESSABLE ACCESS
name=sanity test 05 (malloc/write right)
base_unittests.exe!*WriteValueOutOfArrayBoundsRight
base_unittests.exe!*MakeSomeErrors
base_unittests.exe!base::ToolsSanityTest_AccessesToMallocMemory_Test::TestBody
# "..." is needed due to http://code.google.com/p/drmemory/issues/detail?id=666
UNADDRESSABLE ACCESS
name=sanity test 06 (new/read left)
base_unittests.exe!*ReadValueOutOfArrayBoundsLeft
...
base_unittests.exe!base::ToolsSanityTest_AccessesToNewMemory_Test::TestBody
UNADDRESSABLE ACCESS
name=sanity test 07 (new/read right)
base_unittests.exe!*ReadValueOutOfArrayBoundsRight
base_unittests.exe!*MakeSomeErrors
base_unittests.exe!base::ToolsSanityTest_AccessesToNewMemory_Test::TestBody
UNADDRESSABLE ACCESS
name=sanity test 08 (new/write left)
base_unittests.exe!*WriteValueOutOfArrayBoundsLeft
base_unittests.exe!*MakeSomeErrors
base_unittests.exe!base::ToolsSanityTest_AccessesToNewMemory_Test::TestBody
UNADDRESSABLE ACCESS
name=sanity test 09 (new/write right)
base_unittests.exe!*WriteValueOutOfArrayBoundsRight
base_unittests.exe!*MakeSomeErrors
base_unittests.exe!base::ToolsSanityTest_AccessesToNewMemory_Test::TestBody
UNADDRESSABLE ACCESS
name=sanity test 10 (write after free)
base_unittests.exe!base::ToolsSanityTest_AccessesToMallocMemory_Test::TestBody
UNADDRESSABLE ACCESS
name=sanity test 11 (write after delete)
base_unittests.exe!base::ToolsSanityTest_AccessesToNewMemory_Test::TestBody
INVALID HEAP ARGUMENT
name=sanity test 12 (array deleted without [])
base_unittests.exe!base::ToolsSanityTest_ArrayDeletedWithoutBraces_Test::TestBody
INVALID HEAP ARGUMENT
name=sanity test 13 (single element deleted with [])
base_unittests.exe!base::ToolsSanityTest_SingleElementDeletedWithBraces_Test::TestBody
UNINITIALIZED READ
name=sanity test 14 (malloc/read uninit)
base_unittests.exe!*ReadUninitializedValue
base_unittests.exe!*MakeSomeErrors
base_unittests.exe!base::ToolsSanityTest_AccessesToMallocMemory_Test::TestBody
UNINITIALIZED READ
name=sanity test 15 (new/read uninit)
base_unittests.exe!*ReadUninitializedValue
base_unittests.exe!*MakeSomeErrors
base_unittests.exe!base::ToolsSanityTest_AccessesToNewMemory_Test::TestBody
UNADDRESSABLE ACCESS
name=AboutHandler::AboutCrash deliberate crash
# TODO(bruening): switch to annotation once have support for that
chrome.dll!AboutHandler::AboutCrash
UNADDRESSABLE ACCESS
name=RendererCrashTest.Crash deliberate crash
# function is small, little risk for false negative in rest of it
# TODO(bruening): switch to annotation once have support for that
chrome.dll!HandleRendererErrorTestParameters
UNADDRESSABLE ACCESS
name=NPAPITesterBase.NoHangIfInitCrashes deliberate crash
# function is small, little risk for false negative in rest of it
# TODO(bruening): switch to annotation once have support for that
npapi_test_plugin.dll!NPAPIClient::PluginClient::Initialize
# Deliberate NULL deref to crash the child process
UNADDRESSABLE ACCESS
name=CrashingChildProcess deliberate crash
*!CrashingChildProcess
UNADDRESSABLE ACCESS
name=::Crasher::Run deliberate crash
*!base::`anonymous namespace'::Crasher::Run
############################
# Benign issues in Chromium
WARNING
name=http://crbug.com/72463 - prefetches in generated MemCopy
instruction=prefetch*
<not in a module>
chrome.dll!v8::internal::CopyChars*
WARNING
name=prefetches in NVD3DUM.dll
instruction=prefetch*
NVD3DUM.dll!*
WARNING
name=prefetches in igdumd32.dll
instruction=prefetch*
igdumd32.dll!*
UNADDRESSABLE ACCESS
name=http://code.google.com/p/drmemory/issues/detail?id=582 bizarre cl-generated read-beyond-TOS
instruction=mov 0xfffffffc(%esp) -> %eax
chrome.dll!WebCore::RenderStyle::resetBorder*
INVALID HEAP ARGUMENT
name=http://crbug.com/101537
*!scoped_ptr<_TOKEN_USER>*
INVALID HEAP ARGUMENT
name=http://crbug.com/101717 (1)
*!scoped_ptr<_TOKEN_DEFAULT_DACL>*
INVALID HEAP ARGUMENT
name=http://crbug.com/101717 (2)
*!sandbox::PolicyBase::~PolicyBase
INVALID HEAP ARGUMENT
name=http://crbug.com/101717 (3)
*!scoped_ptr<_UNICODE_STRING>::~scoped_ptr<_UNICODE_STRING>
*!sandbox::GetHandleName
INVALID HEAP ARGUMENT
name=http://crbug.com/101717 (4)
*!scoped_ptr<_OBJECT_NAME_INFORMATION>::~scoped_ptr<_OBJECT_NAME_INFORMATION>
*!sandbox::GetPathFromHandle
GDI USAGE ERROR
name=http://code.google.com/p/drmemory/issues/detail?id=899 deleting bitmap which is probably safe
system call NtGdiDeleteObjectApp
*!skia::`anonymous namespace'::Bitmap::~Bitmap
*!skia::`anonymous namespace'::Bitmap::`scalar deleting destructor'
############################
# Real issues in Chromium
UNADDRESSABLE ACCESS
name=http://crbug.com/88213
*!base::win::ObjectWatcher::StopWatching
*!base::win::ObjectWatcher::WillDestroyCurrentMessageLoop
*!MessageLoop::~MessageLoop
UNADDRESSABLE ACCESS
name=http://crbug.com/96010
*!TestingProfile::FinishInit
*!TestingProfile::TestingProfile
*!BrowserAboutHandlerTest_WillHandleBrowserAboutURL_Test::TestBody
UNADDRESSABLE ACCESS
name=http://crbug.com/106522
npapi_test_plugin.dll!NPAPIClient::PluginTest::id
npapi_test_plugin.dll!NPAPIClient::ExecuteGetJavascriptUrlTest::TimerProc
# Bad GDI teardown sequence.
GDI USAGE ERROR
name=http://crbug.com/109963 a
system call NtGdiDeleteObjectApp
# usually one or two GDI32.dll frames here but sometimes in light mode
# there are zero. still pretty narrow b/c of frames on either side.
...
*!skia::BitmapPlatformDevice::BitmapPlatformDeviceData::~BitmapPlatformDeviceData
GDI USAGE ERROR
name=http://crbug.com/109963 b
system call NtGdiDeleteObjectApp
# usually one or two GDI32.dll frames here but sometimes in light mode
# there are zero. still pretty narrow b/c of frames on either side.
...
*!skia::BitmapPlatformDevice::BitmapPlatformDeviceData::ReleaseBitmapDC
GDI USAGE ERROR
name=http://crbug.com/109963 c
system call NtGdiDeleteObjectApp
GDI32.dll!DeleteDC
content.dll!*
GDI USAGE ERROR
name=http://crbug.com/109963 d
system call NtGdiDeleteObjectApp
GDI32.dll!DeleteDC
*!base::internal::RunnableAdapter*
# GDI usage errors in 3rd-party components
GDI USAGE ERROR
name=http://crbug.com/119552 a
system call NtGdiDeleteObjectApp
...
*!OmniboxViewWin::*
GDI USAGE ERROR
name=http://crbug.com/119552 b
system call Nt*
...
*!ATL::*
GDI USAGE ERROR
name=http://crbug.com/119552 c
# optional gdi32.dll frame followed by user32.dll
# TODO(bruening): once have
# http://code.google.com/p/drmemory/issues/detail?id=846
# I would do "gdi32.dll!...\nuser32.dll!*"
*32.dll!*
...
shell32.dll!SHGetFileInfoW
*!IconLoader::ReadIcon
GDI USAGE ERROR
name=http://crbug.com/119552 d
system call NtGdiDeleteObjectApp
gdi32.dll!DeleteObject
riched20.dll!*
riched20.dll!*
riched20.dll!*
GDI USAGE ERROR
name=http://crbug.com/120157
# "ReleaseDC called from different thread than GetDC"
system call NtUserCallOneParam.RELEASEDC
*!*FontCache::CacheElement::~CacheElement
GDI USAGE ERROR
name=http://crbug.com/158090
# "DC created by one thread and used by another"
...
content.dll!content::*::FontCache::PreCacheFont
content.dll!content::FontCacheDispatcher::OnPreCacheFont
content.dll!DispatchToMethod<content::FontCacheDispatcher*
GDI USAGE ERROR
name=http://crbug.com/158090 c#4
# ReleaseDC for DC called from different thread than the thread that called GetDC
system call NtUserCallOneParam.RELEASEDC
ui.dll!gfx::ReadColorProfile
ui.dll!gfx::GetColorProfile
content.dll!content::RenderMessageFilter::OnGetMonitorColorProfile
content.dll!DispatchToMethod*
INVALID HEAP ARGUMENT
name=http://crbug.com/158350
# allocated with operator new[], freed with operator delete
*!*
*!*
*!*
*!*
*!*
content.dll!*
content.dll!*
content.dll!*
content.dll!*
content.dll!*
*!*
*!*
*!*
*!*
*!*
KERNEL32.dll!*
ntdll.dll!*
ntdll.dll!*
WARNING
name=Security test (new oveflow)
MSVCR100D.dll!operator new
*!operator new
*!operator new[]
*!`anonymous namespace'::SecurityTest_NewOverflow_Test::TestBody
*!testing::internal::HandleExceptionsInMethodIfSupported<testing::Test,void>
WARNING
name=Security test (calloc overflow)
*!`anonymous namespace'::CallocReturnsNull
*!`anonymous namespace'::SecurityTest_CallocOverflow_Test::TestBody
*!testing::internal::HandleExceptionsInMethodIfSupported<testing::Test,void>
GDI USAGE ERROR
name=http://crbug.com/234484
# "DC created by one thread and used by another"
...
*!chrome::`anonymous namespace'::SetOverlayIcon
INVALID HEAP ARGUMENT
name=http://crbug.com/262088
drmemorylib.dll!av_dup_packet
msvcrt.dll!wcsrchr
ntdll.dll!RtlIsCurrentThreadAttachExempt
ntdll.dll!LdrShutdownThread
ntdll.dll!RtlExitUserThread
GDI USAGE ERROR
name=http://crbug.com/266484
skia.dll!HDCOffscreen::draw
skia.dll!SkScalerContext_GDI::generateImage
skia.dll!SkScalerContext::getImage
skia.dll!SkGlyphCache::findImage
skia.dll!D1G_NoBounder_RectClip
skia.dll!SkDraw::drawText
skia.dll!SkDevice::drawText
skia.dll!SkCanvas::drawText
media.dll!media::FakeVideoCaptureDevice::OnCaptureTask

File diff suppressed because it is too large Load Diff

View File

@ -1,196 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# drmemory_analyze.py
''' Given a Dr. Memory output file, parses errors and uniques them.'''
from collections import defaultdict
import common
import hashlib
import logging
import optparse
import os
import re
import subprocess
import sys
import time
class DrMemoryError:
def __init__(self, report, suppression, testcase):
self._report = report
self._testcase = testcase
# Chromium-specific transformations of the suppressions:
# Replace 'any_test.exe' and 'chrome.dll' with '*', then remove the
# Dr.Memory-generated error ids from the name= lines as they don't
# make sense in a multiprocess report.
supp_lines = suppression.split("\n")
for l in xrange(len(supp_lines)):
if supp_lines[l].startswith("name="):
supp_lines[l] = "name=<insert_a_suppression_name_here>"
if supp_lines[l].startswith("chrome.dll!"):
supp_lines[l] = supp_lines[l].replace("chrome.dll!", "*!")
bang_index = supp_lines[l].find("!")
d_exe_index = supp_lines[l].find(".exe!")
if bang_index >= 4 and d_exe_index + 4 == bang_index:
supp_lines[l] = "*" + supp_lines[l][bang_index:]
self._suppression = "\n".join(supp_lines)
def __str__(self):
output = self._report + "\n"
if self._testcase:
output += "The report came from the `%s` test.\n" % self._testcase
output += "Suppression (error hash=#%016X#):\n" % self.ErrorHash()
output += (" For more info on using suppressions see "
"http://dev.chromium.org/developers/how-tos/using-drmemory#TOC-Suppressing-error-reports-from-the-\n")
output += "{\n%s\n}\n" % self._suppression
return output
# This is a device-independent hash identifying the suppression.
# By printing out this hash we can find duplicate reports between tests and
# different shards running on multiple buildbots
def ErrorHash(self):
return int(hashlib.md5(self._suppression).hexdigest()[:16], 16)
def __hash__(self):
return hash(self._suppression)
def __eq__(self, rhs):
return self._suppression == rhs
class DrMemoryAnalyzer:
''' Given a set of Dr.Memory output files, parse all the errors out of
them, unique them and output the results.'''
def __init__(self):
self.known_errors = set()
self.error_count = 0;
def ReadLine(self):
self.line_ = self.cur_fd_.readline()
def ReadSection(self):
result = [self.line_]
self.ReadLine()
while len(self.line_.strip()) > 0:
result.append(self.line_)
self.ReadLine()
return result
def ParseReportFile(self, filename, testcase):
ret = []
# First, read the generated suppressions file so we can easily lookup a
# suppression for a given error.
supp_fd = open(filename.replace("results", "suppress"), 'r')
generated_suppressions = {} # Key -> Error #, Value -> Suppression text.
for line in supp_fd:
# NOTE: this regexp looks fragile. Might break if the generated
# suppression format slightly changes.
m = re.search("# Suppression for Error #([0-9]+)", line.strip())
if not m:
continue
error_id = int(m.groups()[0])
assert error_id not in generated_suppressions
# OK, now read the next suppression:
cur_supp = ""
for supp_line in supp_fd:
if supp_line.startswith("#") or supp_line.strip() == "":
break
cur_supp += supp_line
generated_suppressions[error_id] = cur_supp.strip()
supp_fd.close()
self.cur_fd_ = open(filename, 'r')
while True:
self.ReadLine()
if (self.line_ == ''): break
match = re.search("^Error #([0-9]+): (.*)", self.line_)
if match:
error_id = int(match.groups()[0])
self.line_ = match.groups()[1].strip() + "\n"
report = "".join(self.ReadSection()).strip()
suppression = generated_suppressions[error_id]
ret.append(DrMemoryError(report, suppression, testcase))
if re.search("SUPPRESSIONS USED:", self.line_):
self.ReadLine()
while self.line_.strip() != "":
line = self.line_.strip()
(count, name) = re.match(" *([0-9]+)x(?: \(leaked .*\))?: (.*)",
line).groups()
count = int(count)
self.used_suppressions[name] += count
self.ReadLine()
if self.line_.startswith("ASSERT FAILURE"):
ret.append(self.line_.strip())
self.cur_fd_.close()
return ret
def Report(self, filenames, testcase, check_sanity):
sys.stdout.flush()
# TODO(timurrrr): support positive tests / check_sanity==True
self.used_suppressions = defaultdict(int)
to_report = []
reports_for_this_test = set()
for f in filenames:
cur_reports = self.ParseReportFile(f, testcase)
# Filter out the reports that were there in previous tests.
for r in cur_reports:
if r in reports_for_this_test:
# A similar report is about to be printed for this test.
pass
elif r in self.known_errors:
# A similar report has already been printed in one of the prev tests.
to_report.append("This error was already printed in some "
"other test, see 'hash=#%016X#'" % r.ErrorHash())
reports_for_this_test.add(r)
else:
self.known_errors.add(r)
reports_for_this_test.add(r)
to_report.append(r)
common.PrintUsedSuppressionsList(self.used_suppressions)
if not to_report:
logging.info("PASS: No error reports found")
return 0
sys.stdout.flush()
sys.stderr.flush()
logging.info("Found %i error reports" % len(to_report))
for report in to_report:
self.error_count += 1
logging.info("Report #%d\n%s" % (self.error_count, report))
logging.info("Total: %i error reports" % len(to_report))
sys.stdout.flush()
return -1
def main():
'''For testing only. The DrMemoryAnalyze class should be imported instead.'''
parser = optparse.OptionParser("usage: %prog [options] <files to analyze>")
parser.add_option("", "--source_dir",
help="path to top of source tree for this build"
"(used to normalize source paths in baseline)")
(options, args) = parser.parse_args()
if len(args) == 0:
parser.error("no filename specified")
filenames = args
logging.getLogger().setLevel(logging.INFO)
return DrMemoryAnalyzer().Report(filenames, None, False)
if __name__ == '__main__':
sys.exit(main())

View File

@ -1,87 +0,0 @@
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
''' A bunch of helper functions for querying gdb.'''
import logging
import os
import re
import tempfile
GDB_LINE_RE = re.compile(r'Line ([0-9]*) of "([^"]*)".*')
def _GdbOutputToFileLine(output_line):
''' Parse the gdb output line, return a pair (file, line num) '''
match = GDB_LINE_RE.match(output_line)
if match:
return match.groups()[1], match.groups()[0]
else:
return None
def ResolveAddressesWithinABinary(binary_name, load_address, address_list):
''' For each address, return a pair (file, line num) '''
commands = tempfile.NamedTemporaryFile()
commands.write('add-symbol-file "%s" %s\n' % (binary_name, load_address))
for addr in address_list:
commands.write('info line *%s\n' % addr)
commands.write('quit\n')
commands.flush()
gdb_commandline = 'gdb -batch -x %s 2>/dev/null' % commands.name
gdb_pipe = os.popen(gdb_commandline)
result = gdb_pipe.readlines()
address_count = 0
ret = {}
for line in result:
if line.startswith('Line'):
ret[address_list[address_count]] = _GdbOutputToFileLine(line)
address_count += 1
if line.startswith('No line'):
ret[address_list[address_count]] = (None, None)
address_count += 1
gdb_pipe.close()
commands.close()
return ret
class AddressTable(object):
''' Object to do batched line number lookup. '''
def __init__(self):
self._load_addresses = {}
self._binaries = {}
self._all_resolved = False
def AddBinaryAt(self, binary, load_address):
''' Register a new shared library or executable. '''
self._load_addresses[binary] = load_address
def Add(self, binary, address):
''' Register a lookup request. '''
if binary == '':
logging.warn('adding address %s in empty binary?' % address)
if binary in self._binaries:
self._binaries[binary].append(address)
else:
self._binaries[binary] = [address]
self._all_resolved = False
def ResolveAll(self):
''' Carry out all lookup requests. '''
self._translation = {}
for binary in self._binaries.keys():
if binary != '' and binary in self._load_addresses:
load_address = self._load_addresses[binary]
addr = ResolveAddressesWithinABinary(
binary, load_address, self._binaries[binary])
self._translation[binary] = addr
self._all_resolved = True
def GetFileLine(self, binary, addr):
''' Get the (filename, linenum) result of a previously-registered lookup
request.
'''
if self._all_resolved:
if binary in self._translation:
if addr in self._translation[binary]:
return self._translation[binary][addr]
return (None, None)

View File

@ -1 +0,0 @@
*

View File

@ -1,26 +0,0 @@
# TODO(timurrrr) investigate the failures and enable these tests one-by-one.
RSA*
GmockTest.*
EtwTrace*
StatsTableTest.*
ProcessUtilTest.EnableLFH
ScopedNativeLibrary.Basic
# TODO(zhaoqin) investigate the failures and enable it later, 106043
ConditionVariableTest.LargeFastTaskTest
# Next test creates a child that crashes, which naturally generates an
# unaddressable report as well as a handful of leak reports that we don't need
# to see.
ProcessUtilTest.GetTerminationStatusCrash
# See crbug.com/130668
ProcessUtilTest.GetTerminationStatusKill
ProcessUtilTest.KillSlowChild
ProcessUtilTest.SpawnChild
ScopedProcessInformationTest.Duplicate
ScopedProcessInformationTest.Swap
ScopedProcessInformationTest.TakeBoth
ScopedProcessInformationTest.TakeProcess
ScopedProcessInformationTest.TakeWholeStruct
SharedMemoryProcessTest.Tasks
# crbug/144018
ScopedStartupInfoExTest.InheritStdOut

View File

@ -1,10 +0,0 @@
# Don't run this test under TSan, it takes ~1-2 minutes to pass.
ProcessUtilTest.GetAppOutputRestrictedNoZombies
# Don't run Memcheck sanity tests under ThreadSanitizer since they can
# corrupt memory.
ToolsSanityTest.*Memory*
ToolsSanityTest.*Delete*
# TSan doesn't understand SharedMemory locks, see http://crbug.com/45083
StatsTableTest.*MultipleThreads

View File

@ -1,2 +0,0 @@
# http://crbug.com/29855.
StackTrace.OutputToStream

View File

@ -1,23 +0,0 @@
# Occasionally fails under TSan, see http://crbug.com/54229
ProcessUtilTest.CalcFreeMemory
# This file is copied from Valgrind-on-Wine filter
# TODO(timurrrr): include/investigate the listed tests one-by-one
EtwTraceControllerTest.EnableDisable
EtwTraceConsumer*Test.*
EtwTraceProvider*Test.*
JSONReaderTest.Reading
TimeTicks.*
WMIUtilTest.*
# Too slow under TSan
ConditionVariableTest.LargeFastTaskTest
# Fails under TSan: http://crbug.com/93843
MessageLoopTest.RecursiveDenial3
# Crashes under TSan: http://crbug.com/115107
WorkerPoolTest.PostTask
# Times out on Win7, slow on Vista: http://crbug.com/106531
TraceEventTestFixture.DataCapturedManyThreads

View File

@ -1,26 +0,0 @@
# This test currently times out in valgrind, see http://crbug.com/9194
WatchdogTest.AlarmTest
# These tests occassionally hangs under Valgrind on Mac. valgrind-darwin r9573
# Revisit with better valgrind.
# Valgrind bug: https://bugs.kde.org/show_bug.cgi?id=189661
TimerTest.RepeatingTimer
TimerTest.RepeatingTimer_Cancel
# Crashes occasionally, see http://crbug.com/7477
ObserverListThreadSafeTest.CrossThreadObserver
ObserverListThreadSafeTest.CrossThreadNotifications
# Hangs sometimes on linux, see http://crbug.com/22138
ClipboardTest.*
# These tests trigger a CHECK so they will leak memory. They don't test
# anything else, so just disable them on valgrind. Bug 28179.
OutOfMemoryDeathTest.*
# Flaky under slow tools or just when the VM is under load.
# See http://crbug.com/43972
ConditionVariableTest.LargeFastTaskTest
# Flaky under Valgrind, see http://crbug.com/55517
PlatformFile.TouchGetInfoPlatformFile

View File

@ -1,18 +0,0 @@
# Fails on Valgrind/Mac, see http://crbug.com/43972
ConditionVariableTest.LargeFastTaskTest
# Fails on Valgrind/Mac due to missing syscall wrapper
# for the symlink() syscall. See http://crbug.com/44001
FileUtilTest.NormalizeFilePathSymlinks
# Fails on Valgrind/Mac, see http://crbug.com/53196
CancellationFlagTest.SetOnDifferentThreadDeathTest
# Fails on Valgrind/Mac, see http://crbug.com/93722
ProcessMemoryTest.MacTerminateOnHeapCorruption
# Fails on Valgrind/Mac, see http://crbug.com/122080
ProcessMemoryTest.MacMallocFailureDoesNotTerminate
# Times out on Valgrind/Mac, see http://crbug.com/172044
MessageLoopTest.RecursivePosts

View File

@ -1,2 +0,0 @@
# Fails natively as well: http://crbug.com/251517
PEImageTest.EnumeratesPE

View File

@ -1,12 +0,0 @@
# Too slow under Valgrind/Wine and TSan/Windows
TimeTicks.WinRollover
# Very sensitive to slowdown
TimeTicks.Deltas
TimeTicks.HighResNow
TimerTest.RepeatingTimer*
# This Windows-native sampling profiler test does not work under our tools
# because it assumes the original code runs, not the modified version
# with instrumentation. See http://crbug.com/106829
SamplingProfilerTest.Sample

View File

@ -1,50 +0,0 @@
# TODO(zhaoqin): File bugs for those failing browser tests.
# Dr.Memory i#1052: http://code.google.com/p/drmemory/issues/detail?id=1052
#
# The list is too long for gtest_filter, so we exclude the whole
# test case if any of its tests failed.
*FLAKY*
# it takes too long to run all browser_tests with Dr.Memory,
# and we only select subset to run
# A*: ~70 tests
A*
# DrM-i#1052-c#52
# AutofillTest.*
# AcceleratedCompositingBlockedTest.*
# AppApiTest.*
# BrowserAccessibilityStateImplTest.*
B*
C*
D*
E*
F*
G*
# H*: ~35 tests
H*
# DrM-i#1052-c#53
# HistoryWebUITest.*
# I*: ~10 tests
# DrM-i#1052-c#53
InfoBarsTest.*
# J*: 0 tests
# K*: 1 tests
L*
M*
N*
O*
P*
Q*
R*
S*
T*
# U*: ~20 tests
# DrM-i#1052-c#53
UnloadTest.*
# V*: 5 tests
# W*: ~150 tests
W*
# X*: 0 tests
# Y*: 0 tests
# Z*: 0 tests

View File

@ -1,60 +0,0 @@
# Don't run FLAKY or FAILS ui tests under Valgrind.
# They tend to generate way too many flaky Valgrind reports.
*FLAKY_*
*FAILS_*
# NaCl tests fail with Data Execution Prevention error http://crbug.com/104517
NaClGdbTest.Empty
PPAPINaClGLibcTest.*
PPAPINaClNewlibTest.*
PPAPINaClTest*
# http://crbug.com/109336
OutOfProcessPPAPITest.View_PageHideShow
# TODO(thestig) File bugs for these failing browser tests.
AllUrlsApiTest.WhitelistedExtension
AppBackgroundPageApiTest.NoJsManifestBackgroundPage
BrowserCloseTest.DownloadsCloseCheck_2
BrowserCloseTest.DownloadsCloseCheck_5
BrowserEncodingTest.SLOW_TestEncodingAliasMapping
BrowserNavigatorTest.Disposition_Bookmarks_DoNothingIfIncognitoIsForced
BrowserNavigatorTest.Disposition_Incognito
BrowserNavigatorTest.Disposition_SyncPromo_DoNothingIfIncognitoIsForced
BrowserTest.ForwardDisabledOnForward
ClickToPlayPluginTest.Basic
ClickToPlayPluginTest.LoadAllBlockedPlugins
ClickToPlayPluginTest.NoCallbackAtLoad
DevToolsExperimentalExtensionTest.TestDevToolsExperimentalExtensionAPI
DevToolsExtensionTest.TestDevToolsExtensionMessaging
DownloadExtensionTest.DownloadExtensionTest_FileIcon_Active
DownloadExtensionTest.DownloadExtensionTest_FileIcon_History
DownloadExtensionTest.DownloadExtensionTest_SearchPauseResumeCancelGetFileIconIncognito
DownloadExtensionTestIncognito.DownloadExtensionTest_SearchPauseResumeCancelGetFileIconIncognito
ErrorPageTest.DNSError_Basic
ErrorPageTest.DNSError_GoBack1
ExecuteScriptApiTest.ExecuteScriptPermissions
ExtensionApiTest.FontSettingsIncognito
ExtensionApiTest.PopupBlockingExtension
ExtensionApiTest.PopupBlockingHostedApp
FastShutdown.SlowTermination
IndexedDBLayoutTest.IndexTests
NetInternalsTest.netInternalsPrerenderViewFail
NewTabUIBrowserTest.LoadNTPInExistingProcess
OutOfProcessPPAPITest.NetAddressPrivate_GetAnyAddress
OutOfProcessPPAPITest.NetAddressPrivate_ReplacePort
PageCyclerCachedBrowserTest.PlaybackMode
PageCyclerCachedBrowserTest.URLNotInCache
PPAPITest.ImeInputEvent
PrerenderBrowserTest.*
PrerenderBrowserTestWithNaCl.PrerenderNaClPluginEnabled
PrintPreviewWebUITest.TestPrinterList
PrintPreviewWebUITest.TestPrinterListCloudEmpty
PrintPreviewWebUITest.TestSectionsDisabled
PrintWebViewHelperTest.BlockScriptInitiatedPrinting
SafeBrowsingBlockingPageTest.MalwareDontProceed
SafeBrowsingBlockingPageTest.ProceedDisabled
SocketApiTest.SocketTCPExtension
SocketApiTest.SocketUDPExtension
SSLUITest.TestWSSInvalidCertAndGoForward
WebViewTest.Shim

View File

@ -1,2 +0,0 @@
# http://crbug.com/159234.
WebContentsVideoCaptureDeviceTest.*

View File

@ -1,3 +0,0 @@
# Flaky, see http://crbug.com/227278
WebContentsVideoCaptureDeviceTest.WebContentsDestroyed
CompositingIOSurfaceTransformerTest.*

View File

@ -1,52 +0,0 @@
# http://crbug.com/93245
GeolocationGatewayDataProviderCommonTest.*
GeolocationWifiDataProviderCommonTest.*
# Flaky, see http://crbug.com/131154
WebRTCAudioDeviceTest.FullDuplexAudioWithAGC
# Flaky, see http://crbug.com/155284
WebRTCAudioDeviceTest.StartRecording
WebRTCAudioDeviceTest.PlayLocalFile
# Fail/crash, see http://crbug.com/151939
WebDragDestTest.URL
WebDragDestTest.Data
WebDragSourceMacTest.DragInvalidlyEscapedBookmarklet
# Fail, see http://crbug.com/153007
MacSandboxTest.ClipboardAccess
# mach_override assertion, see http://crbug.com/162728
BlobURLRequestJobTest.*
# Fail, see http://crbug.com/159234
WebContentsVideoCaptureDeviceTest.GoesThroughAllTheMotions
WebContentsVideoCaptureDeviceTest.BadFramesGoodFrames
# Hang at arbitrary point, can't tell where exactly, see http://crbug.com/163314
RenderWidgetHostViewMacTest.*
WebContentsVideoCaptureDeviceTest.*
RenderViewHostTest.*
DeviceMotionEventPumpTest.*
# Speculative disable of hanging tests. http://crbug.com/241919
VideoCaptureControllerTest.*
VideoCaptureHostTest.*
# Hangs under Valgrind, see http://crbug.com/244257
SmoothScrollGestureControllerTest.Tick
# http://crbug.com/247163
VideoCaptureManagerTest.CloseWithoutStop
VideoCaptureManagerTest.CreateAndClose
VideoCaptureManagerTest.StartUsingId
WebRTCAudioDeviceTest.WebRtcPlayoutSetupTime
WebRTCAudioDeviceTest.WebRtcRecordingSetupTime
# http://crbug.com/247601
FontSerializationTest.StyledFonts
MacSandboxTest.FontLoadingTest
# http://crbug.com/270254
DeviceOrientationEventPumpTest.*

View File

@ -1,2 +0,0 @@
# Too slow under TSan
RSAPrivateKeyUnitTest.*

View File

@ -1,34 +0,0 @@
# These test fail due to mmap Valgrind failures, see http://crbug.com/66677
CollectedCookiesTest.DoubleDisplay
CollectedCookiesTest.NavigateAway
InfoBarsUITest.TestInfoBarsCloseOnNewTheme
FastShutdown.SlowTermination
MouseLeaveTest.TestOnMouseOut
NotificationsPermissionTest.TestNoUserGestureInfobar
NotificationsPermissionTest.TestUserGestureInfobar
# These test fail due to timeout or limited buildslave support;
# http://crbug.com/67301
BrowserFocusTest.InterstitialFocus
BrowserFocusTest.FindFocusTest
BrowserFocusTest.FocusTraversalOnInterstitial
# Don't run FLAKY or FAILS tests under Valgrind and TSan
# as they tend to generate too many reports, see http://crbug.com/67959
# NB: Can't use FAILS_/FLAKY_ as it will be turned into *.* by chrome_tests.py!
*.FLAKY*
*.FAILS*
# Fails under Valgrind, see http://crbug.com/68068
DevToolsSanityTest.TestPauseWhenScriptIsRunning
# These tests time out under Valgrind, see http://crbug.com/163880
BrowserFocusTest.FocusOnReload
CommandsApiTest.Basic
ExtensionApiTest.NotificationsHasPermissionManifest
ExtensionCrashRecoveryTest.ReloadTabsWithBackgroundPage
ExtensionCrashRecoveryTest.TwoExtensionsCrashBothAtOnce
ExtensionCrashRecoveryTest.TwoExtensionsCrashFirst
ExtensionCrashRecoveryTest.TwoExtensionsOneByOne
FullscreenControllerInteractiveTest.TestTabExitsMouseLockOnNavigation
OmniboxViewTest.Escape

View File

@ -1,2 +0,0 @@
# TODO(timurrrr): investigate
IPCSyncChannelTest.*

View File

@ -1,2 +0,0 @@
# Fails under TSan, see http://crbug.com/62511
IPCSyncChannelTest.BadMessage

View File

@ -1,6 +0,0 @@
# Takes 27-40 seconds to run.
IPCSyncChannelTest.ChattyServer
# Hangs on Linux sometimes. See http://crbug.com/22141
IPCChannelTest.ChannelTest
# Crashes under Valgrind. See http://crbug.com/46782
IPCSyncChannelTest.Multiple

View File

@ -1,4 +0,0 @@
# Hangs under Dr. Memory
# http://code.google.com/p/drmemory/issues/detail?id=978
WinAudioTest.SyncSocketBasic
AudioBusTest.CopyTo

View File

@ -1,3 +0,0 @@
# Win TSan disturbs ffmpeg's output, causing hash comparison assertion to fail.
# http://crbug.com/120396
PipelineIntegrationTest.BasicPlaybackHashed

View File

@ -1,3 +0,0 @@
# This test tries to record fake audio in real-time.
# This appears to be too sensitive to slowdown, see http://crbug.com/49497
FakeAudioInputTest.BasicCallbacks

View File

@ -1,3 +0,0 @@
# Fails http://crbug.com/256911
MessageCenterImplTest.PopupTimersControllerResetTimer
MessageCenterImplTest.PopupTimersControllerStartMultipleTimersPause

View File

@ -1,3 +0,0 @@
# http://code.google.com/p/drmemory/issues/detail?id=842
# Failing and then crashing.
HttpNetworkTransationSpdy21Test.HttpsProxySpdy*

View File

@ -1,24 +0,0 @@
# See http://crbug.com/82391
URLRequestTestHTTP.HTTPSToHTTPRedirectNoRefererTest
# Times out. See http://crbug.com/134313
URLRequestTestHTTP.GetTest_ManyCookies
# Dr. Memory hits an assertion:
# http://code.google.com/p/drmemory/issues/detail?id=422
HttpAuthTest.*
HttpAuthHandlerFactoryTest.*
X509CertificateTest.*
# Too many uninits and too slow. TODO(timurrrr): investigate uninits
ProxyResolverV8Test.*
# Slow
CookieMonsterTest.GarbageCollectionTriggers
# Hangs only when built in release mode.
# http://crbug.com/105762
ClientSocketPoolBaseTest.DisableCleanupTimer
# Flaky, see http://crbug.com/108422
SSLClientSocketTest.ConnectMismatched

View File

@ -1,26 +0,0 @@
# These tests leak data intentionally, so are inappropriate for Valgrind tests.
# Similar list in ../purify/net_unittests.exe.gtest.txt
# TODO(dkegel): either merge the two files or keep them in sync,
# see http://code.google.com/p/chromium/issues/detail?id=8951
DiskCacheBackendTest.AppCacheInvalidEntry
DiskCacheBackendTest.AppCacheInvalidEntryRead
DiskCacheBackendTest.AppCacheInvalidEntryWithLoad
DiskCacheBackendTest.InvalidEntry
DiskCacheBackendTest.InvalidEntryRead
DiskCacheBackendTest.InvalidEntryWithLoad
DiskCacheBackendTest.TrimInvalidEntry
DiskCacheBackendTest.TrimInvalidEntry2
DiskCacheBackendTest.InvalidEntryEnumeration
DiskCacheBackendTest.NewEvictionInvalidEntry
DiskCacheBackendTest.NewEvictionInvalidEntryRead
DiskCacheBackendTest.NewEvictionInvalidEntryWithLoad
DiskCacheBackendTest.NewEvictionTrimInvalidEntry
DiskCacheBackendTest.NewEvictionTrimInvalidEntry2
DiskCacheBackendTest.NewEvictionInvalidEntryEnumeration
DiskCacheBackendTest.ShutdownWithPendingCreate_Fast
DiskCacheBackendTest.ShutdownWithPendingFileIO_Fast
DiskCacheBackendTest.ShutdownWithPendingIO_Fast
# flaky failure on Linux Tests (valgrind)(2),
# see http://code.google.com/p/chromium/issues/detail?id=117196
SSLClientSocketTest.VerifyReturnChainProperlyOrdered

View File

@ -1,27 +0,0 @@
# These huge tests are flaky and sometimes crash the following tests.
# See http://crbug.com/50346
DiskCacheEntryTest.*HugeSparse*
# SPDY tests tend to crash on both Mac and Windows.
# See http://crbug.com/51144
Spdy/SpdyNetworkTransactionTest.SocketWriteReturnsZero*
# See http://crbug.com/50918
Spdy/SpdyNetworkTransactionTest.CancelledTransactionSendRst*
# See http://crbug.com/51087
Spdy*
# See http://crbug.com/44570
HttpNetworkTransactionTest.StopsReading204
# See http://crbug.com/51145
HttpNetworkTransactionTest.Incomplete100ThenEOF
HttpNetworkTransactionTest.UseAlternateProtocolForNpnSpdyWithExistingSpdySession
HttpNetworkTransactionTest.KeepAliveConnectionEOF
# Crashes silently, see http://crbug.com/76911
URLRequestTest.FileTest
# http://crbug.com/92439
ServerBoundCertServiceTest.*
# Flaky, see http://crbug.com/259781
EmbeddedTestServerTest.*

View File

@ -1,11 +0,0 @@
# WebSocketTest tests are extraordinary slow under ThreadSanitizer,
# (see http://crbug.com/25392)
# TODO(glider): investigate this.
WebSocketTest.*
# Strange reports from __NSThread__main__ appeared with the new TSan binaries
# See http://crbug.com/38926
DirectoryLister*
# Looks like http://crbug.com/78536 depends on this test.
CookieMonsterTest.GarbageCollectionTriggers

View File

@ -1,40 +0,0 @@
# These tests fail due to unknown reasons
# TODO(timurrrr): investigate
CookieMonsterTest.TestLastAccess
SpdyNetwork*Error*
SpdyNetwork*Get*
SpdyNetworkTransactionTest.SynReplyHeadersVary
X509CertificateTest.UnoSoftCertParsing
URLRequestTest.DoNotSaveCookies
URLRequestTest.QuitTest
# See http://crbug.com/47836
ClientSocketPoolBaseTest.CancelPendingSocketAtSocketLimit
# Single-threaded and relatively slow - no reason to test
# See http://crbug.com/59642
CookieMonsterTest.GarbageCollectionTriggers
# Time out, see http://crbug.com/68482
SSLServerSocketTest.*
# See http://crbug.com/102330
SSLClientSocketTest.*
# See http://crbug.com/104805
HostResolverImplTest.AbortOnlyExistingRequestsOnIPAddressChange
# Times out occasionally, http://crbug.com/124452
HostResolverImplTest.StartWithinCallback
# Crash. See crbug.com/234776.
DiskCacheEntryTest.EvictOldEntries
DiskCacheEntryTest.NewEvictionEvictOldEntries
# Hang. crbug.com/265647.
NetworkChangeNotifierWinTest.NetChangeWinBasic
NetworkChangeNotifierWinTest.NetChangeWinSignal
NetworkChangeNotifierWinTest.NetChangeWinFailSignal*

View File

@ -1,17 +0,0 @@
# Very slow under Valgrind.
KeygenHandlerTest.*SmokeTest
KeygenHandlerTest.*ConcurrencyTest
# Hangs, see http://crbug.com/61908
DirectoryListerTest.BigDirRecursiveTest
# http://crbug.com/88228
SSLClientSocketTest.Connect
SSLClientSocketTest.ConnectClientAuthSendNullCert
# These tests are broken http://crbug.com/118883
*SpdyNetworkTransactionSpdy*Test.*
*SpdyHttpStreamSpdy*Test.*
# Fails flakily. http://crbug.com/255775
SimpleIndexFileTest.WriteThenLoadIndex

View File

@ -1,5 +0,0 @@
# Flaky. crbug.com/234776
DiskCacheEntryTest.SimpleCacheStreamAccess
DiskCacheEntryTest.SimpleCacheGrowData
DiskCacheEntryTest.SimpleCacheSizeChanges

View File

@ -1,6 +0,0 @@
# Very slow under Valgrind, (see <http://crbug.com/37289>).
KeygenHandlerTest.SmokeTest
# These tests fail under Valgrind on Mac, see http://crbug.com/62314
SSLClientSocketTest.*
HTTPSRequestTest.*

View File

@ -1,3 +0,0 @@
# CreateDC returns NULL, see http://crbug.com/73652
PrintingContextTest.Base
PrintingContextTest.PrintAll

View File

@ -1,9 +0,0 @@
# This test fails on an assertion, see http://crbug.com/57266
EncoderVp8Test.TestEncoder
DecoderVp8Test.EncodeAndDecode
# These test intentionally generate exceptions to verify if a dump is generated
# during the crash.
BreakpadWinDeathTest.TestAccessViolation
BreakpadWinDeathTest.TestInvalidParameter
BreakpadWinDeathTest.TestDebugbreak

View File

@ -1,2 +0,0 @@
# These tests load mstscax.dll, which generates a bunch of race reports, see http://crbug.com/177832
RdpClientTest.*

View File

@ -1,2 +0,0 @@
# Fails natively as well: http://crbug.com/251517
RdpClientTest.Basic

View File

@ -1,2 +0,0 @@
# Fails on Valgrind/Mac, see http://crbug.com/69280
SafeBrowsingServiceTest.SafeBrowsingSystemTest

View File

@ -1,39 +0,0 @@
{
Test DiskCacheBackendTest.InvalidEntryEnumeration leaks.
Memcheck:Leak
fun:_Znwj
fun:_ZN10disk_cache12StorageBlockINS_12RankingsNodeEE12AllocateDataEv
fun:_ZN10disk_cache12StorageBlockINS_12RankingsNodeEE4LoadEv
fun:_ZN10disk_cache9EntryImpl15LoadNodeAddressEv
fun:_ZN10disk_cache11BackendImpl8NewEntryENS_4AddrEPPNS_9EntryImplEPb
fun:_ZN10disk_cache11BackendImpl10MatchEntryERKSsjb
fun:_ZN10disk_cache11BackendImpl9OpenEntryERKSsPPNS_5EntryE
fun:_ZN49DiskCacheBackendTest_InvalidEntryEnumeration_Test8TestBodyEv
fun:_ZN7testing4Test3RunEv
}
{
Test DiskCacheBackendTest.InvalidEntryRead leaks.
Memcheck:Leak
fun:_Znwj
fun:_ZN10disk_cache11BackendImpl8NewEntryENS_4AddrEPPNS_9EntryImplEPb
fun:_ZN10disk_cache11BackendImpl10MatchEntryERKSsjb
fun:_ZN10disk_cache11BackendImpl9OpenEntryERKSsPPNS_5EntryE
fun:_ZN42DiskCacheBackendTest_InvalidEntryRead_Test8TestBodyEv
fun:_ZN7testing4Test3RunEv
}
{
Test DiskCacheBackendTest.InvalidEntryWithLoad leaks.
Memcheck:Leak
fun:_Znwj
fun:_ZN10disk_cache11BackendImpl11CreateEntryERKSsPPNS_5EntryE
fun:_ZN46DiskCacheBackendTest_InvalidEntryWithLoad_Test8TestBodyEv
fun:_ZN7testing4Test3RunEv
}
{
Test FlipNetworkTransactionTest.WriteError Bug 29004
Memcheck:Leak
fun:_Znw*
...
fun:_ZN3net26FlipNetworkTransactionTest17TransactionHelperERKNS_15HttpRequestInfoEPNS_17DelayedSocketDataE
fun:_ZN3net42FlipNetworkTransactionTest_WriteError_Test8TestBodyEv
}

View File

@ -1,2 +0,0 @@
# Flaky, see http://crbug.com/118370
SyncSchedulerTest.TransientPollFailure

View File

@ -1,5 +0,0 @@
# Flaky, see http://crbug.com/118370
SyncSchedulerTest.TransientPollFailure
# Flaky, http://crbug.com/119467
InvalidationNotifierTest.Basic

View File

@ -1,2 +0,0 @@
# http://crbug.com/222606
RenderTextTest.DisplayRectShowsCursorLTR

View File

@ -1,2 +0,0 @@
# Hangs under TSAN, see http://crbug.com/28332
TextEliderTest.ElideTextLongStrings

View File

@ -1,7 +0,0 @@
# Crashing (!) since forever, needs analysis.
BookmarkNodeDataTest.*
# http://code.google.com/p/drmemory/issues/detail?id=842
# Fails assertion. App data corrupted by DrMemory?
JsonSchemaTest.TestType
JsonSchemaTest.TestNumber

View File

@ -1,69 +0,0 @@
##################################################
# known Dr. Memory bugs:
# http://code.google.com/p/drmemory/issues/detail?id=318
AudioRendererHostTest.*
##################################################
# un-analyzed Dr. Memory bugs:
# http://code.google.com/p/drmemory/issues/detail?id=548
DownloadManagerTest.StartDownload
# http://code.google.com/p/drmemory/issues/detail?id=979
FirefoxProfileImporterTest.Firefox35Importer
# http://code.google.com/p/drmemory/issues/detail?id=980
MetricsLogManagerTest.*
# http://code.google.com/p/drmemory/issues/detail?id=983
ProfileShortcutManagerTest.*
##################################################
# Chromium bugs:
# times out on the bot
# http://crbug.com/87887
VideoCaptureHostTest.*
# crashes due to use-after-free's, http://crbug.com/90980
FirefoxImporterTest.Firefox*NSS3Decryptor
# fails http://crbug.com/92144
ServiceProcessStateTest.ForceShutdown
# fails sporadically: http://crbug.com/108205
MultiProcessLockTest.RecursiveLock
# Poor isolation, DCHECKs when no MessageLoop exists. Breaks when sharded.
# http://crbug.com/117679
WebsiteSettingsModelTest.*
# fails to create thread
# http://crbug.com/144087
DesktopNotificationServiceTest.SettingsForSchemes
TemplateURLFetcherTest.*
# times out on the bot.
# http://crbug.com/148644
GAIAInfoUpdateServiceTest.*
ProfileManagerTest.*
ProfileInfoCacheTest.*
# Failing on the bot. http://crbug.com/167014
BrowserCommandControllerTest.AvatarMenuDisabledWhenOnlyOneProfile
# Failing on the bot. http://crbug.com/168882
UserCloudPolicyStoreTest.LoadWithInvalidFile
UserCloudPolicyStoreTest.LoadWithNoFile
UserCloudPolicyStoreTest.Store
UserCloudPolicyStoreTest.StoreThenClear
UserCloudPolicyStoreTest.StoreThenLoad
UserCloudPolicyStoreTest.StoreTwoTimes
UserCloudPolicyStoreTest.StoreValidationError
# Tests are timing out on the bot. crbug.com/248373.
PnaclTranslationCacheTest.*
# Failing on the bot. crbug.com/266972
OneClickSigninBubbleViewTest.ShowBubble

View File

@ -1,12 +0,0 @@
# Takes too long and may cause bots to time out. http://crbug.com/134400
# This test alone takes 10-15 minutes.
Convolver.SIMDVerification
# Timing issues. http://crbug.com/241051
ExtensionAlarmsTest.*
# SEGV_MAPERR. http://crbug.com/245797
ClientSideDetectionHostTest.NavigationCancelsShouldClassifyUrl
# Test fails on CrOS memcheck only. http://crbug.com/247440
NotificationAudioControllerTest.MultiProfiles

View File

@ -1,12 +0,0 @@
# This test has a possible data race detected by the TSAN bot
# see http://crbug.com/46840
ProfileManagerTest.CreateAndUseTwoProfiles
# Crashing - http://crbug.com/84536
HttpBridgeTest.*
# Takes too long and may cause TSAN bots to time out. http://crbug.com/134400
Convolver.SIMDVerification
# SEGV_MAPERR. http://crbug.com/245797
ClientSideDetectionHostTest.NavigationCancelsShouldClassifyUrl

View File

@ -1,5 +0,0 @@
# http://crbug.com/26214
ExtensionTest.InitFromValueInvalid
# http://crbug.com/38503
TabRestoreServiceTest.DontPersistPostData

View File

@ -1,36 +0,0 @@
# Hangs sometimes; see http://crbug.com/22146
VisitedLinkEventsTest.Coalescense
# Hangs sometimes; see http://crbug.com/22160
VisitedLinkRelayTest.Basics
# Hangs (or takes forever?) reliably on bots; see http://crbug.com/23580
RenderViewTest.ImeComposition
# Hangs sometimes; see http://crbug.com/52844
PredictorTest.MassiveConcurrentLookupTest
# Pure virtual method called: see http://crbug.com/50950
ConnectionTesterTest.RunAllTests
# Following tests fail under valgrind because libjingle has hardcoded
# timeouts for P2P connections, and it makes these tests fail under valgrind.
# TODO(sergeyu): Remove hardcoded timeouts from libjingle.
P2PTransportImplTest.Create
P2PTransportImplTest.ConnectUdp
P2PTransportImplTest.ConnectTcp
P2PTransportImplTest.SendDataUdp
P2PTransportImplTest.SendDataTcp
# Failing on CrOS, see http://crbug.com/79657
SignedSettingsTest.StorePolicyNoPolicyData
# Flaky and not very interesting under Valgrind http://crbug.com/93027
ProcessWatcherTest.ImmediateTermination
# Timing out all over the place. Disabling for now. http://crbug.com/149715
ExtensionWebRequestTest.*
# Timing out all over the place. Disabling for now. http://crbug.com/149882
NativeMessagingTest.*
# Timing out all over the place. Disabling for now. http://crbug.com/164589
StorageInfoProviderTest.*
# Fails under Valgrind, probably timing-related. http://crbug.com/259679
WhitelistManagerTest.DownloadWhitelistRetry

View File

@ -1,34 +0,0 @@
# Fails under Valgrind; see http://crbug.com/36770
URLFetcherBadHTTPSTest.BadHTTPSTest
# Fails under Valgrind; see http://crbug.com/44552
RenderViewTest.OnHandleKeyboardEvent
# http://crbug.com/88221
ConnectionTesterTest.DeleteWhileInProgress
# Crash on CrOS, see http://crbug.com/115979
ClientSideDetectionHostTest.OnPhishingDetectionDoneNotPhishing
ClientSideDetectionHostTest.OnPhishingDetectionDoneVerdictNotPhishing
ClientSideDetectionHostTest.OnPhishingDetectionDoneInvalidVerdict
ClientSideDetectionHostTest.OnPhishingDetectionDoneDisabled
# http://crbug.com/119610
ProfileSyncServiceSessionTest.WriteFilledSessionToNode
ProfileSyncServiceSessionTest.ValidTabs
# http://crbug.com/139652
BackgroundApplicationListModelTest.RandomTest
# http://crbug.com/179427
ExtensionPrefsDelayedInstallInfo.DelayedInstallInfo
ExtensionServiceTest.*
# http://crbug.com/180335
AutocompleteActionPredictorTest.RecommendActionURL
# http://crbug.com/180467
HttpPipeliningCompatibilityClientTest.*
# http://crbug.com/238964
CpuInfoProviderTest.*
# Fails flakily. http://crbug.com/255771
NetworkStatsTestUDP.UDPEcho*

View File

@ -1,39 +0,0 @@
# Times out too often
# crbug.com/15817
IPCSyncChannelTest.*
# Hangs
# http://crbug.com/21890
WebDropTargetTest.URL
WebDropTargetTest.Data
# http://crbug.com/69037
FirefoxImporterTest.Firefox3NSS3Decryptor
# http://crbug.com/69039
ProcessInfoSnapshotMacTest.EffectiveVsRealUserIDTest
# Following tests do not pass memcheck test.
# See http://crbug.com/30393.
NSMenuItemAdditionsTest.TestMOnDifferentLayouts
# Hangs
# See http://crbug.com/75733
BookmarkBarControllerTest.DeleteFromOffTheSideWhileItIsOpen
BookmarkBarControllerTest.HideWhenShowBookmarkBarTrueButDisabled
BookmarkBarControllerTest.HideWhenShowBookmarkBarFalse
# Crashes, see http://crbug.com/86656
MacSandboxTest.FileAccess
# http://crbug.com/87769
BalloonControllerTest.ShowAndCloseTest
BalloonControllerTest.SizesTest
# http://crbug.com/89030
ConnectionTesterTest.DeleteWhileInProgress
# http://crbug.com/93245
GeolocationWifiDataProviderCommonTest.*
# http://crbug.com/96298
FileSystemDirURLRequestJobTest.*
FileSystemURLRequestJobTest.*
FileSystemOperationWriteTest.*

View File

@ -1,77 +0,0 @@
#!/bin/bash
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Prints a path to Valgrind binaries to be used for Chromium.
# Select the valgrind from third_party/valgrind by default,
# but allow users to override this default without editing scripts and
# without specifying a commandline option
export THISDIR=`dirname $0`
# User may use his own valgrind by giving its path with CHROME_VALGRIND env.
if [ "$CHROME_VALGRIND" = "" ]
then
# Guess which binaries we should use by uname
case "$(uname -a)" in
*Linux*x86_64*)
PLATFORM="linux_x64"
;;
*Linux*86*)
PLATFORM="linux_x86"
;;
*Darwin*9.[678].[01]*i386*)
# Didn't test other kernels.
PLATFORM="mac"
;;
*Darwin*10.[0-9].[0-9]*i386*)
PLATFORM="mac_10.6"
;;
*Darwin*10.[0-9].[0-9]*x86_64*)
PLATFORM="mac_10.6"
;;
*Darwin*11.[0-9].[0-9]*x86_64*)
PLATFORM="mac_10.7"
;;
*)
echo "Unknown platform:" >&2
uname -a >&2
echo "We'll try to search for valgrind binaries installed in /usr/local" >&2
PLATFORM=
esac
if [ "$PLATFORM" != "" ]
then
# The binaries should be in third_party/valgrind
# (checked out from deps/third_party/valgrind/binaries).
CHROME_VALGRIND="$THISDIR/../../third_party/valgrind/$PLATFORM"
# TODO(timurrrr): readlink -f is not present on Mac...
if [ "$PLATFORM" != "mac" ] && \
[ "$PLATFORM" != "mac_10.6" ] && \
[ "$PLATFORM" != "mac_10.7" ]
then
# Get rid of all "../" dirs
CHROME_VALGRIND=`readlink -f $CHROME_VALGRIND`
fi
if ! test -x $CHROME_VALGRIND/bin/valgrind
then
# We couldn't find the binaries in third_party/valgrind
CHROME_VALGRIND=""
fi
fi
fi
if ! test -x $CHROME_VALGRIND/bin/valgrind
then
echo "Oops, could not find Valgrind binaries in your checkout." >&2
echo "Please see" >&2
echo " http://dev.chromium.org/developers/how-tos/using-valgrind/get-valgrind" >&2
echo "for the instructions on how to download pre-built binaries." >&2
exit 1
fi
echo $CHROME_VALGRIND

View File

@ -1 +0,0 @@
*

View File

@ -1,81 +0,0 @@
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
import re
def CheckChange(input_api, output_api):
"""Checks the memcheck suppressions files for bad data."""
sup_regex = re.compile('suppressions.*\.txt$')
suppressions = {}
errors = []
check_for_memcheck = False
# skip_next_line has 3 possible values:
# - False: don't skip the next line.
# - 'skip_suppression_name': the next line is a suppression name, skip.
# - 'skip_param': the next line is a system call parameter error, skip.
skip_next_line = False
for f in filter(lambda x: sup_regex.search(x.LocalPath()),
input_api.AffectedFiles()):
for line, line_num in zip(f.NewContents(),
xrange(1, len(f.NewContents()) + 1)):
line = line.lstrip()
if line.startswith('#') or not line:
continue
if skip_next_line:
if skip_next_line == 'skip_suppression_name':
if 'insert_a_suppression_name_here' in line:
errors.append('"insert_a_suppression_name_here" is not a valid '
'suppression name')
if suppressions.has_key(line):
if f.LocalPath() == suppressions[line][1]:
errors.append('suppression with name "%s" at %s line %s '
'has already been defined at line %s' %
(line, f.LocalPath(), line_num,
suppressions[line][1]))
else:
errors.append('suppression with name "%s" at %s line %s '
'has already been defined at %s line %s' %
(line, f.LocalPath(), line_num,
suppressions[line][0], suppressions[line][1]))
else:
suppressions[line] = (f, line_num)
check_for_memcheck = True;
skip_next_line = False
continue
if check_for_memcheck:
if not line.startswith('Memcheck:'):
errors.append('"%s" should be "Memcheck:..." in %s line %s' %
(line, f.LocalPath(), line_num))
check_for_memcheck = False;
if line == '{':
skip_next_line = 'skip_suppression_name'
continue
if line == "Memcheck:Param":
skip_next_line = 'skip_param'
continue
if (line.startswith('fun:') or line.startswith('obj:') or
line.startswith('Memcheck:') or line == '}' or
line == '...'):
continue
errors.append('"%s" is probably wrong: %s line %s' % (line, f.LocalPath(),
line_num))
if errors:
return [output_api.PresubmitError('\n'.join(errors))]
return []
def CheckChangeOnUpload(input_api, output_api):
return CheckChange(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CheckChange(input_api, output_api)
def GetPreferredTrySlaves():
return ['linux_valgrind', 'mac_valgrind']

File diff suppressed because it is too large Load Diff

View File

@ -1,35 +0,0 @@
# There are three kinds of suppressions in this file:
# 1. Third party stuff we have no control over.
#
# 2. Intentional unit test errors, stuff that is somehow a false positive
# in our own code, or stuff that is so trivial it's not worth fixing.
#
# 3. Suppressions for real chromium bugs that are not yet fixed.
# These should all be in chromium's bug tracking system.
# Periodically we should sweep this file and the bug tracker clean by
# running overnight and removing outdated bugs/suppressions.
#
# TODO(rnk): Should we move all of the Linux-only system library suppressions
# over from suppressions.txt? We'd avoid wasting time parsing and matching
# suppressions on non-Linux, which is basically just Mac.
#
#-----------------------------------------------------------------------
# 1. Third party stuff we have no control over.
{
# The InvalidRead error in rc4_wordconv is intentional.
# https://bugzilla.mozilla.org/show_bug.cgi?id=341127
# TODO(wtc): This invalid read has been fixed in NSS 3.15. Remove this
# suppression when the system NSS libraries in Linux distributions are
# version 3.15 or later.
bug_43113 (Intentional)
Memcheck:Unaddressable
fun:rc4_wordconv
fun:RC4_Encrypt
}
# 2. Intentional unit test errors, stuff that is somehow a false positive
# in our own code, or stuff that is so trivial it's not worth fixing.
# 3. Suppressions for real chromium bugs that are not yet fixed.

File diff suppressed because it is too large Load Diff

View File

@ -1,634 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# memcheck_analyze.py
''' Given a valgrind XML file, parses errors and uniques them.'''
import gdb_helper
from collections import defaultdict
import hashlib
import logging
import optparse
import os
import re
import subprocess
import sys
import time
from xml.dom.minidom import parse
from xml.parsers.expat import ExpatError
import common
# Global symbol table (yuck)
TheAddressTable = None
# These are regexps that define functions (using C++ mangled names)
# we don't want to see in stack traces while pretty printing
# or generating suppressions.
# Just stop printing the stack/suppression frames when the current one
# matches any of these.
_BORING_CALLERS = common.BoringCallers(mangled=True, use_re_wildcards=True)
def getTextOf(top_node, name):
''' Returns all text in all DOM nodes with a certain |name| that are children
of |top_node|.
'''
text = ""
for nodes_named in top_node.getElementsByTagName(name):
text += "".join([node.data for node in nodes_named.childNodes
if node.nodeType == node.TEXT_NODE])
return text
def getCDATAOf(top_node, name):
''' Returns all CDATA in all DOM nodes with a certain |name| that are children
of |top_node|.
'''
text = ""
for nodes_named in top_node.getElementsByTagName(name):
text += "".join([node.data for node in nodes_named.childNodes
if node.nodeType == node.CDATA_SECTION_NODE])
if (text == ""):
return None
return text
def shortenFilePath(source_dir, directory):
'''Returns a string with the string prefix |source_dir| removed from
|directory|.'''
prefixes_to_cut = ["build/src/", "valgrind/coregrind/", "out/Release/../../"]
if source_dir:
prefixes_to_cut.append(source_dir)
for p in prefixes_to_cut:
index = directory.rfind(p)
if index != -1:
directory = directory[index + len(p):]
return directory
# Constants that give real names to the abbreviations in valgrind XML output.
INSTRUCTION_POINTER = "ip"
OBJECT_FILE = "obj"
FUNCTION_NAME = "fn"
SRC_FILE_DIR = "dir"
SRC_FILE_NAME = "file"
SRC_LINE = "line"
def gatherFrames(node, source_dir):
frames = []
for frame in node.getElementsByTagName("frame"):
frame_dict = {
INSTRUCTION_POINTER : getTextOf(frame, INSTRUCTION_POINTER),
OBJECT_FILE : getTextOf(frame, OBJECT_FILE),
FUNCTION_NAME : getTextOf(frame, FUNCTION_NAME),
SRC_FILE_DIR : shortenFilePath(
source_dir, getTextOf(frame, SRC_FILE_DIR)),
SRC_FILE_NAME : getTextOf(frame, SRC_FILE_NAME),
SRC_LINE : getTextOf(frame, SRC_LINE)
}
# Ignore this frame and all the following if it's a "boring" function.
enough_frames = False
for regexp in _BORING_CALLERS:
if re.match("^%s$" % regexp, frame_dict[FUNCTION_NAME]):
enough_frames = True
break
if enough_frames:
break
frames += [frame_dict]
global TheAddressTable
if TheAddressTable != None and frame_dict[SRC_LINE] == "":
# Try using gdb
TheAddressTable.Add(frame_dict[OBJECT_FILE],
frame_dict[INSTRUCTION_POINTER])
return frames
class ValgrindError:
''' Takes a <DOM Element: error> node and reads all the data from it. A
ValgrindError is immutable and is hashed on its pretty printed output.
'''
def __init__(self, source_dir, error_node, commandline, testcase):
''' Copies all the relevant information out of the DOM and into object
properties.
Args:
error_node: The <error></error> DOM node we're extracting from.
source_dir: Prefix that should be stripped from the <dir> node.
commandline: The command that was run under valgrind
testcase: The test case name, if known.
'''
# Valgrind errors contain one <what><stack> pair, plus an optional
# <auxwhat><stack> pair, plus an optional <origin><what><stack></origin>,
# plus (since 3.5.0) a <suppression></suppression> pair.
# (Origin is nicely enclosed; too bad the other two aren't.)
# The most common way to see all three in one report is
# a syscall with a parameter that points to uninitialized memory, e.g.
# Format:
# <error>
# <unique>0x6d</unique>
# <tid>1</tid>
# <kind>SyscallParam</kind>
# <what>Syscall param write(buf) points to uninitialised byte(s)</what>
# <stack>
# <frame>
# ...
# </frame>
# </stack>
# <auxwhat>Address 0x5c9af4f is 7 bytes inside a block of ...</auxwhat>
# <stack>
# <frame>
# ...
# </frame>
# </stack>
# <origin>
# <what>Uninitialised value was created by a heap allocation</what>
# <stack>
# <frame>
# ...
# </frame>
# </stack>
# </origin>
# <suppression>
# <sname>insert_a_suppression_name_here</sname>
# <skind>Memcheck:Param</skind>
# <skaux>write(buf)</skaux>
# <sframe> <fun>__write_nocancel</fun> </sframe>
# ...
# <sframe> <fun>main</fun> </sframe>
# <rawtext>
# <![CDATA[
# {
# <insert_a_suppression_name_here>
# Memcheck:Param
# write(buf)
# fun:__write_nocancel
# ...
# fun:main
# }
# ]]>
# </rawtext>
# </suppression>
# </error>
#
# Each frame looks like this:
# <frame>
# <ip>0x83751BC</ip>
# <obj>/data/dkegel/chrome-build/src/out/Release/base_unittests</obj>
# <fn>_ZN7testing8internal12TestInfoImpl7RunTestEPNS_8TestInfoE</fn>
# <dir>/data/dkegel/chrome-build/src/testing/gtest/src</dir>
# <file>gtest-internal-inl.h</file>
# <line>655</line>
# </frame>
# although the dir, file, and line elements are missing if there is
# no debug info.
self._kind = getTextOf(error_node, "kind")
self._backtraces = []
self._suppression = None
self._commandline = commandline
self._testcase = testcase
self._additional = []
# Iterate through the nodes, parsing <what|auxwhat><stack> pairs.
description = None
for node in error_node.childNodes:
if node.localName == "what" or node.localName == "auxwhat":
description = "".join([n.data for n in node.childNodes
if n.nodeType == n.TEXT_NODE])
elif node.localName == "xwhat":
description = getTextOf(node, "text")
elif node.localName == "stack":
assert description
self._backtraces.append([description, gatherFrames(node, source_dir)])
description = None
elif node.localName == "origin":
description = getTextOf(node, "what")
stack = node.getElementsByTagName("stack")[0]
frames = gatherFrames(stack, source_dir)
self._backtraces.append([description, frames])
description = None
stack = None
frames = None
elif description and node.localName != None:
# The lastest description has no stack, e.g. "Address 0x28 is unknown"
self._additional.append(description)
description = None
if node.localName == "suppression":
self._suppression = getCDATAOf(node, "rawtext");
def __str__(self):
''' Pretty print the type and backtrace(s) of this specific error,
including suppression (which is just a mangled backtrace).'''
output = ""
if (self._commandline):
output += self._commandline + "\n"
output += self._kind + "\n"
for backtrace in self._backtraces:
output += backtrace[0] + "\n"
filter = subprocess.Popen("c++filt -n", stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True,
close_fds=True)
buf = ""
for frame in backtrace[1]:
buf += (frame[FUNCTION_NAME] or frame[INSTRUCTION_POINTER]) + "\n"
(stdoutbuf, stderrbuf) = filter.communicate(buf.encode('latin-1'))
demangled_names = stdoutbuf.split("\n")
i = 0
for frame in backtrace[1]:
output += (" " + demangled_names[i])
i = i + 1
global TheAddressTable
if TheAddressTable != None and frame[SRC_FILE_DIR] == "":
# Try using gdb
foo = TheAddressTable.GetFileLine(frame[OBJECT_FILE],
frame[INSTRUCTION_POINTER])
if foo[0] != None:
output += (" (" + foo[0] + ":" + foo[1] + ")")
elif frame[SRC_FILE_DIR] != "":
output += (" (" + frame[SRC_FILE_DIR] + "/" + frame[SRC_FILE_NAME] +
":" + frame[SRC_LINE] + ")")
else:
output += " (" + frame[OBJECT_FILE] + ")"
output += "\n"
for additional in self._additional:
output += additional + "\n"
assert self._suppression != None, "Your Valgrind doesn't generate " \
"suppressions - is it too old?"
if self._testcase:
output += "The report came from the `%s` test.\n" % self._testcase
output += "Suppression (error hash=#%016X#):\n" % self.ErrorHash()
output += (" For more info on using suppressions see "
"http://dev.chromium.org/developers/tree-sheriffs/sheriff-details-chromium/memory-sheriff#TOC-Suppressing-memory-reports")
# Widen suppression slightly to make portable between mac and linux
# TODO(timurrrr): Oops, these transformations should happen
# BEFORE calculating the hash!
supp = self._suppression;
supp = supp.replace("fun:_Znwj", "fun:_Znw*")
supp = supp.replace("fun:_Znwm", "fun:_Znw*")
supp = supp.replace("fun:_Znaj", "fun:_Zna*")
supp = supp.replace("fun:_Znam", "fun:_Zna*")
# Make suppressions even less platform-dependent.
for sz in [1, 2, 4, 8]:
supp = supp.replace("Memcheck:Addr%d" % sz, "Memcheck:Unaddressable")
supp = supp.replace("Memcheck:Value%d" % sz, "Memcheck:Uninitialized")
supp = supp.replace("Memcheck:Cond", "Memcheck:Uninitialized")
# Split into lines so we can enforce length limits
supplines = supp.split("\n")
supp = None # to avoid re-use
# Truncate at line 26 (VG_MAX_SUPP_CALLERS plus 2 for name and type)
# or at the first 'boring' caller.
# (https://bugs.kde.org/show_bug.cgi?id=199468 proposes raising
# VG_MAX_SUPP_CALLERS, but we're probably fine with it as is.)
newlen = min(26, len(supplines));
# Drop boring frames and all the following.
enough_frames = False
for frameno in range(newlen):
for boring_caller in _BORING_CALLERS:
if re.match("^ +fun:%s$" % boring_caller, supplines[frameno]):
newlen = frameno
enough_frames = True
break
if enough_frames:
break
if (len(supplines) > newlen):
supplines = supplines[0:newlen]
supplines.append("}")
for frame in range(len(supplines)):
# Replace the always-changing anonymous namespace prefix with "*".
m = re.match("( +fun:)_ZN.*_GLOBAL__N_.*\.cc_" +
"[0-9a-fA-F]{8}_[0-9a-fA-F]{8}(.*)",
supplines[frame])
if m:
supplines[frame] = "*".join(m.groups())
output += "\n".join(supplines) + "\n"
return output
def UniqueString(self):
''' String to use for object identity. Don't print this, use str(obj)
instead.'''
rep = self._kind + " "
for backtrace in self._backtraces:
for frame in backtrace[1]:
rep += frame[FUNCTION_NAME]
if frame[SRC_FILE_DIR] != "":
rep += frame[SRC_FILE_DIR] + "/" + frame[SRC_FILE_NAME]
else:
rep += frame[OBJECT_FILE]
return rep
# This is a device-independent hash identifying the suppression.
# By printing out this hash we can find duplicate reports between tests and
# different shards running on multiple buildbots
def ErrorHash(self):
return int(hashlib.md5(self.UniqueString()).hexdigest()[:16], 16)
def __hash__(self):
return hash(self.UniqueString())
def __eq__(self, rhs):
return self.UniqueString() == rhs
def log_is_finished(f, force_finish):
f.seek(0)
prev_line = ""
while True:
line = f.readline()
if line == "":
if not force_finish:
return False
# Okay, the log is not finished but we can make it up to be parseable:
if prev_line.strip() in ["</error>", "</errorcounts>", "</status>"]:
f.write("</valgrindoutput>\n")
return True
return False
if '</valgrindoutput>' in line:
# Valgrind often has garbage after </valgrindoutput> upon crash.
f.truncate()
return True
prev_line = line
class MemcheckAnalyzer:
''' Given a set of Valgrind XML files, parse all the errors out of them,
unique them and output the results.'''
SANITY_TEST_SUPPRESSIONS = {
"Memcheck sanity test 01 (memory leak).": 1,
"Memcheck sanity test 02 (malloc/read left).": 1,
"Memcheck sanity test 03 (malloc/read right).": 1,
"Memcheck sanity test 04 (malloc/write left).": 1,
"Memcheck sanity test 05 (malloc/write right).": 1,
"Memcheck sanity test 06 (new/read left).": 1,
"Memcheck sanity test 07 (new/read right).": 1,
"Memcheck sanity test 08 (new/write left).": 1,
"Memcheck sanity test 09 (new/write right).": 1,
"Memcheck sanity test 10 (write after free).": 1,
"Memcheck sanity test 11 (write after delete).": 1,
"Memcheck sanity test 12 (array deleted without []).": 1,
"Memcheck sanity test 13 (single element deleted with []).": 1,
"Memcheck sanity test 14 (malloc/read uninit).": 1,
"Memcheck sanity test 15 (new/read uninit).": 1,
}
# Max time to wait for memcheck logs to complete.
LOG_COMPLETION_TIMEOUT = 180.0
def __init__(self, source_dir, show_all_leaks=False, use_gdb=False):
'''Create a parser for Memcheck logs.
Args:
source_dir: Path to top of source tree for this build
show_all_leaks: Whether to show even less important leaks
use_gdb: Whether to use gdb to resolve source filenames and line numbers
in the report stacktraces
'''
self._source_dir = source_dir
self._show_all_leaks = show_all_leaks
self._use_gdb = use_gdb
# Contains the set of unique errors
self._errors = set()
# Contains the time when the we started analyzing the first log file.
# This variable is used to skip incomplete logs after some timeout.
self._analyze_start_time = None
def Report(self, files, testcase, check_sanity=False):
'''Reads in a set of files and prints Memcheck report.
Args:
files: A list of filenames.
check_sanity: if true, search for SANITY_TEST_SUPPRESSIONS
'''
# Beyond the detailed errors parsed by ValgrindError above,
# the xml file contain records describing suppressions that were used:
# <suppcounts>
# <pair>
# <count>28</count>
# <name>pango_font_leak_todo</name>
# </pair>
# <pair>
# <count>378</count>
# <name>bug_13243</name>
# </pair>
# </suppcounts
# Collect these and print them at the end.
#
# With our patch for https://bugs.kde.org/show_bug.cgi?id=205000 in,
# the file also includes records of the form
# <load_obj><obj>/usr/lib/libgcc_s.1.dylib</obj><ip>0x27000</ip></load_obj>
# giving the filename and load address of each binary that was mapped
# into the process.
global TheAddressTable
if self._use_gdb:
TheAddressTable = gdb_helper.AddressTable()
else:
TheAddressTable = None
cur_report_errors = set()
suppcounts = defaultdict(int)
badfiles = set()
if self._analyze_start_time == None:
self._analyze_start_time = time.time()
start_time = self._analyze_start_time
parse_failed = False
for file in files:
# Wait up to three minutes for valgrind to finish writing all files,
# but after that, just skip incomplete files and warn.
f = open(file, "r+")
pid = re.match(".*\.([0-9]+)$", file)
if pid:
pid = pid.groups()[0]
found = False
running = True
firstrun = True
skip = False
origsize = os.path.getsize(file)
while (running and not found and not skip and
(firstrun or
((time.time() - start_time) < self.LOG_COMPLETION_TIMEOUT))):
firstrun = False
f.seek(0)
if pid:
# Make sure the process is still running so we don't wait for
# 3 minutes if it was killed. See http://crbug.com/17453
ps_out = subprocess.Popen("ps p %s" % pid, shell=True,
stdout=subprocess.PIPE).stdout
if len(ps_out.readlines()) < 2:
running = False
else:
skip = True
running = False
found = log_is_finished(f, False)
if not running and not found:
logging.warn("Valgrind process PID = %s is not running but its "
"XML log has not been finished correctly.\n"
"Make it up by adding some closing tags manually." % pid)
found = log_is_finished(f, not running)
if running and not found:
time.sleep(1)
f.close()
if not found:
badfiles.add(file)
else:
newsize = os.path.getsize(file)
if origsize > newsize+1:
logging.warn(str(origsize - newsize) +
" bytes of junk were after </valgrindoutput> in %s!" %
file)
try:
parsed_file = parse(file);
except ExpatError, e:
parse_failed = True
logging.warn("could not parse %s: %s" % (file, e))
lineno = e.lineno - 1
context_lines = 5
context_start = max(0, lineno - context_lines)
context_end = lineno + context_lines + 1
context_file = open(file, "r")
for i in range(0, context_start):
context_file.readline()
for i in range(context_start, context_end):
context_data = context_file.readline().rstrip()
if i != lineno:
logging.warn(" %s" % context_data)
else:
logging.warn("> %s" % context_data)
context_file.close()
continue
if TheAddressTable != None:
load_objs = parsed_file.getElementsByTagName("load_obj")
for load_obj in load_objs:
obj = getTextOf(load_obj, "obj")
ip = getTextOf(load_obj, "ip")
TheAddressTable.AddBinaryAt(obj, ip)
commandline = None
preamble = parsed_file.getElementsByTagName("preamble")[0];
for node in preamble.getElementsByTagName("line"):
if node.localName == "line":
for x in node.childNodes:
if x.nodeType == node.TEXT_NODE and "Command" in x.data:
commandline = x.data
break
raw_errors = parsed_file.getElementsByTagName("error")
for raw_error in raw_errors:
# Ignore "possible" leaks for now by default.
if (self._show_all_leaks or
getTextOf(raw_error, "kind") != "Leak_PossiblyLost"):
error = ValgrindError(self._source_dir,
raw_error, commandline, testcase)
if error not in cur_report_errors:
# We haven't seen such errors doing this report yet...
if error in self._errors:
# ... but we saw it in earlier reports, e.g. previous UI test
cur_report_errors.add("This error was already printed in "
"some other test, see 'hash=#%016X#'" % \
error.ErrorHash())
else:
# ... and we haven't seen it in other tests as well
self._errors.add(error)
cur_report_errors.add(error)
suppcountlist = parsed_file.getElementsByTagName("suppcounts")
if len(suppcountlist) > 0:
suppcountlist = suppcountlist[0]
for node in suppcountlist.getElementsByTagName("pair"):
count = getTextOf(node, "count");
name = getTextOf(node, "name");
suppcounts[name] += int(count)
if len(badfiles) > 0:
logging.warn("valgrind didn't finish writing %d files?!" % len(badfiles))
for file in badfiles:
logging.warn("Last 20 lines of %s :" % file)
os.system("tail -n 20 '%s' 1>&2" % file)
if parse_failed:
logging.error("FAIL! Couldn't parse Valgrind output file")
return -2
common.PrintUsedSuppressionsList(suppcounts)
retcode = 0
if cur_report_errors:
logging.error("FAIL! There were %s errors: " % len(cur_report_errors))
if TheAddressTable != None:
TheAddressTable.ResolveAll()
for error in cur_report_errors:
logging.error(error)
retcode = -1
# Report tool's insanity even if there were errors.
if check_sanity:
remaining_sanity_supp = MemcheckAnalyzer.SANITY_TEST_SUPPRESSIONS
for (name, count) in suppcounts.iteritems():
if (name in remaining_sanity_supp and
remaining_sanity_supp[name] == count):
del remaining_sanity_supp[name]
if remaining_sanity_supp:
logging.error("FAIL! Sanity check failed!")
logging.info("The following test errors were not handled: ")
for (name, count) in remaining_sanity_supp.iteritems():
logging.info(" * %dx %s" % (count, name))
retcode = -3
if retcode != 0:
return retcode
logging.info("PASS! No errors found!")
return 0
def _main():
'''For testing only. The MemcheckAnalyzer class should be imported instead.'''
parser = optparse.OptionParser("usage: %prog [options] <files to analyze>")
parser.add_option("", "--source_dir",
help="path to top of source tree for this build"
"(used to normalize source paths in baseline)")
(options, args) = parser.parse_args()
if len(args) == 0:
parser.error("no filename specified")
filenames = args
analyzer = MemcheckAnalyzer(options.source_dir, use_gdb=True)
return analyzer.Report(filenames, None)
if __name__ == "__main__":
sys.exit(_main())

View File

@ -1,138 +0,0 @@
#!/bin/sh
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Scape errors from the valgrind bots, reproduce them locally,
# save logs as regrind-TESTNAME.log, and display any errors found.
# Also save files regrind-failed.txt listing failed tests,
# and regrind-failed-map.txt showing which bot URLs have which failed tests
# (handy when filing bugs).
#
# Only scrapes linux layout bot at the moment.
# TODO: handle layout tests that don't have obvious path to test file
# TODO: extend script to handle more kinds of errors and more tests
# where the valgrind layout bot results live
LAYOUT_URL="http://build.chromium.org/p/chromium.memory.fyi/builders/Webkit%20Linux%20(valgrind%20layout)"
# how many builds back to check
LAYOUT_COUNT=250
# regexp to match valgrind errors
PATTERN="are definitely|uninitialised|Unhandled exception|\
Invalid read|Invalid write|Invalid free|Source and desti|Mismatched free|\
unaddressable byte|vex x86|the 'impossible' happened|\
valgrind:.*: Assertion.*failed|VALGRIND INTERNAL ERROR"
usage() {
echo "Usage: regrind.sh [--noscrape][--norepro][--keep]"
echo "--noscrape: don't scrape bots, just use old regrind-failed.txt"
echo "--norepro: don't reproduce locally"
echo "--keep: keep temp files"
exit 1
}
# Given a log on stdin, list all the tests that failed in that log.
layout_list_failed_tests() {
grep "Command:.*LayoutTests" |
sed 's/<.*>//' |
sed 's/.*LayoutTests/LayoutTests/' |
sort -u |
tr -d '\015'
}
# Generate a list of failed tests in regrind-failed.txt by scraping bot.
# Scrape most recent first, so if user interrupts, he is left with fresh-ish data.
scrape_layout() {
rm -f regrind-*.tmp* regrind-failed.txt regrind-failed-map.txt
touch regrind-failed.txt
# First, grab the number of the latest complete build.
wget -q -O regrind-builds.html "$LAYOUT_URL"
latest=`grep "<li><font .*" < regrind-builds.html | head -1 | sed 's/.*#//;s/<.*//'`
echo "Fetching $LAYOUT_COUNT logs from bot"
# Scrape the desired number of runs (150 is about one cycle)
first=`expr $latest - $LAYOUT_COUNT`
i=$latest
while test $i -ge $first
do
url="$LAYOUT_URL/builds/$i/steps/valgrind%20test:%20layout/logs/stdio"
wget -q -O regrind-$i.tmp "$url"
# Did any tests fail in this file?
layout_list_failed_tests < regrind-$i.tmp > regrind-$i.tmp.failed
if test -s regrind-$i.tmp.failed
then
# Yes. Log them to stdout,
echo "$url"
cat regrind-$i.tmp.failed
# to the table regrind-failed-map.txt,
cat regrind-$i.tmp.failed | sed "s,^,$url ," >> regrind-failed-map.txt
# and, if not already there, to regrind-failed.txt.
for test in `cat regrind-$i.tmp.failed`
do
fgrep "$test" regrind-failed.txt > /dev/null 2>&1 || echo "$test" >> regrind-failed.txt
done
else
rm regrind-$i.tmp.failed
fi
# Sleep 1/3 sec per fetch
case $i in
*[036]) sleep 1;;
esac
i=`expr $i - 1`
done
# Finally, munge the logs to identify tests that probably failed.
sh c.sh -l regrind-*.tmp > regrind-errfiles.txt
cat `cat regrind-errfiles.txt` | layout_list_failed_tests > regrind-failed.txt
}
# Run the tests identified in regrind-failed.txt locally under valgrind.
# Save logs in regrind-$TESTNAME.log.
repro_layout() {
echo Running `wc -l < regrind-failed.txt` layout tests.
for test in `cat regrind-failed.txt`
do
logname="`echo $test | tr / _`"
echo "sh tools/valgrind/valgrind_webkit_tests.sh $test"
sh tools/valgrind/valgrind_webkit_tests.sh "$test" > regrind-"$logname".log 2>&1
egrep "$PATTERN" < regrind-"$logname".log | sed 's/==.*==//'
done
}
do_repro=1
do_scrape=1
do_cleanup=1
while test ! -z "$1"
do
case "$1" in
--noscrape) do_scrape=0;;
--norepro) do_repro=0;;
--keep) do_cleanup=0;;
*) usage;;
esac
shift
done
echo "WARNING: This script is not supported and may be out of date"
if test $do_scrape = 0 && test $do_repro = 0
then
usage
fi
if test $do_scrape = 1
then
scrape_layout
fi
if test $do_repro = 1
then
repro_layout
fi
if test $do_cleanup = 1
then
rm -f regrind-errfiles.txt regrind-*.tmp*
fi

View File

@ -1,11 +0,0 @@
www.google.com
maps.google.com
news.google.com
www.youtube.com
build.chromium.org/p/chromium/waterfall
build.chromium.org/p/chromium.memory/console
build.chromium.org/f/chromium/perf/dashboard/overview.html
www.slashdot.org
www.ibanez.co.jp/japan/index.html
www.bbc.co.uk/arabic/
www.uni.edu/becker/chinese2.html

View File

@ -1,227 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import errno
import os
import re
import sys
import urllib
import urllib2
# Where all the data lives.
ROOT_URL = "http://build.chromium.org/p/chromium.memory.fyi/builders"
# TODO(groby) - support multi-line search from the command line. Useful when
# scanning for classes of failures, see below.
SEARCH_STRING = """<p class=\"failure result\">
Failed memory test: content
</p>"""
# Location of the log cache.
CACHE_DIR = "buildlogs.tmp"
# If we don't find anything after searching |CUTOFF| logs, we're probably done.
CUTOFF = 100
def EnsurePath(path):
"""Makes sure |path| does exist, tries to create it if it doesn't."""
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
class Cache(object):
def __init__(self, root_dir):
self._root_dir = os.path.abspath(root_dir)
def _LocalName(self, name):
"""If name is a relative path, treat it as relative to cache root.
If it is absolute and under cache root, pass it through.
Otherwise, raise error.
"""
if os.path.isabs(name):
assert os.path.commonprefix([name, self._root_dir]) == self._root_dir
else:
name = os.path.join(self._root_dir, name)
return name
def _FetchLocal(self, local_name):
local_name = self._LocalName(local_name)
EnsurePath(os.path.dirname(local_name))
if os.path.exists(local_name):
f = open(local_name, 'r')
return f.readlines();
return None
def _FetchRemote(self, remote_name):
try:
response = urllib2.urlopen(remote_name)
except:
print "Could not fetch", remote_name
raise
return response.read()
def Update(self, local_name, remote_name):
local_name = self._LocalName(local_name)
EnsurePath(os.path.dirname(local_name))
blob = self._FetchRemote(remote_name)
f = open(local_name, "w")
f.write(blob)
return blob.splitlines()
def FetchData(self, local_name, remote_name):
result = self._FetchLocal(local_name)
if result:
return result
# If we get here, the local cache does not exist yet. Fetch, and store.
return self.Update(local_name, remote_name)
class Builder(object):
def __init__(self, waterfall, name):
self._name = name
self._waterfall = waterfall
def Name(self):
return self._name
def LatestBuild(self):
return self._waterfall.GetLatestBuild(self._name)
def GetBuildPath(self, build_num):
return "%s/%s/builds/%d" % (
self._waterfall._root_url, urllib.quote(self._name), build_num)
def _FetchBuildLog(self, build_num):
local_build_path = "builds/%s" % self._name
local_build_file = os.path.join(local_build_path, "%d.log" % build_num)
return self._waterfall._cache.FetchData(local_build_file,
self.GetBuildPath(build_num))
def _CheckLog(self, build_num, tester):
log_lines = self._FetchBuildLog(build_num)
return any(tester(line) for line in log_lines)
def ScanLogs(self, tester):
occurrences = []
build = self.LatestBuild()
no_results = 0
while build != 0 and no_results < CUTOFF:
if self._CheckLog(build, tester):
occurrences.append(build)
else:
no_results = no_results + 1
build = build - 1
return occurrences
class Waterfall(object):
def __init__(self, root_url, cache_dir):
self._root_url = root_url
self._builders = {}
self._top_revision = {}
self._cache = Cache(cache_dir)
def Builders(self):
return self._builders.values()
def Update(self):
self._cache.Update("builders", self._root_url)
self.FetchInfo()
def FetchInfo(self):
if self._top_revision:
return
html = self._cache.FetchData("builders", self._root_url)
""" Search for both builders and latest build number in HTML
<td class="box"><a href="builders/<builder-name>"> identifies a builder
<a href="builders/<builder-name>/builds/<build-num>"> is the latest build.
"""
box_matcher = re.compile('.*a href[^>]*>([^<]*)\<')
build_matcher = re.compile('.*a href=\"builders/(.*)/builds/([0-9]+)\".*')
last_builder = ""
for line in html:
if 'a href="builders/' in line:
if 'td class="box"' in line:
last_builder = box_matcher.match(line).group(1)
self._builders[last_builder] = Builder(self, last_builder)
else:
result = build_matcher.match(line)
builder = result.group(1)
assert builder == urllib.quote(last_builder)
self._top_revision[last_builder] = int(result.group(2))
def GetLatestBuild(self, name):
self.FetchInfo()
assert self._top_revision
return self._top_revision[name]
class MultiLineChange(object):
def __init__(self, lines):
self._tracked_lines = lines
self._current = 0
def __call__(self, line):
""" Test a single line against multi-line change.
If it matches the currently active line, advance one line.
If the current line is the last line, report a match.
"""
if self._tracked_lines[self._current] in line:
self._current = self._current + 1
if self._current == len(self._tracked_lines):
self._current = 0
return True
else:
self._current = 0
return False
def main(argv):
# Create argument parser.
parser = argparse.ArgumentParser()
commands = parser.add_mutually_exclusive_group(required=True)
commands.add_argument("--update", action='store_true')
commands.add_argument("--find", metavar='search term')
args = parser.parse_args()
path = os.path.abspath(os.path.dirname(argv[0]))
cache_path = os.path.join(path, CACHE_DIR)
fyi = Waterfall(ROOT_URL, cache_path)
if args.update:
fyi.Update()
for builder in fyi.Builders():
print "Updating", builder.Name()
builder.ScanLogs(lambda x:False)
if args.find:
tester = MultiLineChange(args.find.splitlines())
fyi.FetchInfo()
print "SCANNING FOR ", args.find
for builder in fyi.Builders():
print "Scanning", builder.Name()
occurrences = builder.ScanLogs(tester)
if occurrences:
min_build = min(occurrences)
path = builder.GetBuildPath(min_build)
print "Earliest occurrence in build %d" % min_build
print "Latest occurrence in build %d" % max(occurrences)
print "Latest build: %d" % builder.LatestBuild()
print path
print "%d total" % len(occurrences)
if __name__ == "__main__":
sys.exit(main(sys.argv))

View File

@ -1,128 +0,0 @@
#!/bin/sh
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Script to run tests under tools/valgrind/chrome_tests.sh
# in a loop looking for rare/flaky valgrind warnings, and
# generate suppressions for them, to be later filed as bugs
# and added to our suppressions file.
#
# FIXME: Layout tests are a bit funny - they have their own
# sharding control, and should probably be tweaked to obey
# GTEST_SHARD_INDEX/GTEST_TOTAL_SHARDS like the rest,
# but they take days and days to run, so they are left
# out of this script.
if test ! -d chrome
then
echo "Please run from parent directory of chrome and build directories"
exit 1
fi
if test "$1" = ""
then
echo "Usage: shard-all-tests.sh [BUILDTYPE=Release] target [target ...]"
echo "Example: shard-all-tests.sh ui_tests"
exit 1
fi
set -x
set -e
# Regexp to match any valgrind error
PATTERN="ERROR SUMMARY: [^0]|are definitely|uninitialised|Unhandled exception|\
Invalid read|Invalid write|Invalid free|Source and desti|Mismatched free|\
unaddressable byte|vex x86|impossible|Assertion|INTERNAL ERROR|finish writing|OUCH"
BUILDTYPE=Debug
case "$1" in
BUILDTYPE=Debug) BUILDTYPE=Debug ; shift ;;
BUILDTYPE=Release) BUILDTYPE=Release ; shift ;;
BUILDTYPE=*) echo "unknown build type $1"; exit 1;;
*) ;;
esac
TESTS="$@"
what_to_build() {
echo $TESTS | tr ' ' '\012' | grep -v layout_tests || true
echo $TESTS | grep -q layout_tests && echo test_shell || true
echo $TESTS | grep -q ui_tests && echo chrome || true
}
# Wrap xcodebuild to take same arguments as our make, more or less
xcodemake() {
for target in $*
do
case $target in
chrome) xcodebuild -configuration $BUILDTYPE -project chrome/chrome.xcodeproj -target chrome ;;
ui_tests) xcodebuild -configuration $BUILDTYPE -project chrome/chrome.xcodeproj -target ui_tests ;;
base_unittests) xcodebuild -configuration $BUILDTYPE -project base/base.xcodeproj -target base_unittests ;;
net_unittests) xcodebuild -configuration $BUILDTYPE -project net/net.xcodeproj -target net_unittests ;;
*) echo "dunno how to build $target yet"; exit 1 ;;
esac
done
}
build_tests() {
buildtype=$1
shift
OS=`uname`
case $OS in
Linux)
# Lame way to autodetect whether 'make' or 'hammer' is in use
if test -d out
then
make -j4 BUILDTYPE=$1 $@
else
# fixme: obey buildtype
hammer $@
fi
;;
Darwin)
xcodemake $@
;;
*) echo "don't know how to build on os $OS"
;;
esac
}
TESTS_BUILDABLE=`what_to_build`
echo building $TESTS_BUILDABLE
build_tests $BUILDTYPE $TESTS_BUILDABLE
# Divide each test suite up into 100 shards, as first step
# in tracking down exact source of errors.
export GTEST_TOTAL_SHARDS=100
rm -rf *.vlog *.vtmp || true
iter=0
while test $iter -lt 1000
do
for testname in $TESTS
do
export GTEST_SHARD_INDEX=0
while test $GTEST_SHARD_INDEX -lt $GTEST_TOTAL_SHARDS
do
i=$GTEST_SHARD_INDEX
sh tools/valgrind/chrome_tests.sh -b xcodebuild/$BUILDTYPE -t ${testname} --tool_flags="--nocleanup_on_exit" > ${testname}_$i.vlog 2>&1 || true
mv valgrind.tmp ${testname}_$i.vtmp
GTEST_SHARD_INDEX=`expr $GTEST_SHARD_INDEX + 1`
done
done
# Save any interesting log files from this iteration
# Also show interesting lines on stdout, to make tail -f more interesting
if egrep "$PATTERN" *.vlog
then
mkdir -p shard-results/$iter
mv `egrep -l "$PATTERN" *.vlog` shard-results/$iter
# ideally we'd only save the .vtmp's corresponding to the .vlogs we saved
mv *.vtmp shard-results/$iter
fi
rm -rf *.vlog *.vtmp || true
iter=`expr $iter + 1`
done

File diff suppressed because it is too large Load Diff

View File

@ -1,177 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
from collections import defaultdict
import os
import re
import subprocess
import sys
import suppressions
def ReadReportsFromFile(filename):
""" Returns a list of (report_hash, report) and the URL of the report on the
waterfall.
"""
input_file = file(filename, 'r')
# reports is a list of (error hash, report) pairs.
reports = []
in_suppression = False
cur_supp = []
# This stores the last error hash found while reading the file.
last_hash = ""
for line in input_file:
line = line.strip()
line = line.replace("</span><span class=\"stdout\">", "")
line = line.replace("</span><span class=\"stderr\">", "")
line = line.replace("&lt;", "<")
line = line.replace("&gt;", ">")
if in_suppression:
if line == "}":
cur_supp += ["}"]
reports += [[last_hash, "\n".join(cur_supp)]]
in_suppression = False
cur_supp = []
last_hash = ""
else:
cur_supp += [" "*3 + line]
elif line == "{":
in_suppression = True
cur_supp = ["{"]
elif line.find("Suppression (error hash=#") == 0:
last_hash = line[25:41]
# The line at the end of the file is assumed to store the URL of the report.
return reports,line
def Demangle(names):
""" Demangle a list of C++ symbols, return a list of human-readable symbols.
"""
args = ['c++filt', '-n']
args.extend(names)
pipe = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
stdout, _ = pipe.communicate()
demangled = stdout.split("\n")
# Each line ends with a newline, so the final entry of the split output
# will always be ''.
assert len(demangled) == len(names) + 1
return demangled[:-1]
def GetSymbolsFromReport(report):
"""Extract all symbols from a suppression report."""
symbols = []
prefix = "fun:"
prefix_len = len(prefix)
for line in report.splitlines():
index = line.find(prefix)
if index != -1:
symbols.append(line[index + prefix_len:])
return symbols
def PrintTopSymbols(symbol_reports, top_count):
"""Print the |top_count| symbols with the most occurrences."""
boring_symbols=['malloc', '_Znw*', 'TestBody']
sorted_reports = sorted(filter(lambda x:x[0] not in boring_symbols,
symbol_reports.iteritems()),
key=lambda x:len(x[1]), reverse=True)
symbols = symbol_reports.keys()
demangled = Demangle(symbols)
assert len(demangled) == len(symbols)
symboltable = dict(zip(symbols, demangled))
print "\n"
print "Top %d symbols" % top_count
for (symbol, suppressions) in sorted_reports[:top_count]:
print "%4d occurrences : %s" % (len(suppressions), symboltable[symbol])
def main(argv):
supp = suppressions.GetSuppressions()
# all_reports is a map {report: list of urls containing this report}
all_reports = defaultdict(list)
report_hashes = {}
symbol_reports = defaultdict(list)
# Create argument parser.
parser = argparse.ArgumentParser()
parser.add_argument('--top-symbols', type=int, default=0,
help='Print a list of the top <n> symbols')
parser.add_argument('--symbol-filter', action='append',
help='Filter out all suppressions not containing the specified symbol(s). '
'Matches against the mangled names')
parser.add_argument('reports', metavar='report file', nargs='+',
help='List of report files')
args = parser.parse_args(argv)
for f in args.reports:
f_reports, url = ReadReportsFromFile(f)
for (hash, report) in f_reports:
all_reports[report] += [url]
report_hashes[report] = hash
reports_count = 0
for r in all_reports:
cur_supp = supp['common_suppressions']
if all([re.search("%20Mac%20|mac_valgrind", url)
for url in all_reports[r]]):
# Include mac suppressions if the report is only present on Mac
cur_supp += supp['mac_suppressions']
elif all([re.search("Windows%20", url) for url in all_reports[r]]):
# Include win32 suppressions if the report is only present on Windows
cur_supp += supp['win_suppressions']
elif all([re.search("Linux%20", url) for url in all_reports[r]]):
cur_supp += supp['linux_suppressions']
elif all([re.search("%20Heapcheck", url)
for url in all_reports[r]]):
cur_supp += supp['heapcheck_suppressions']
if all(["DrMemory" in url for url in all_reports[r]]):
cur_supp += supp['drmem_suppressions']
if all(["DrMemory%20full" in url for url in all_reports[r]]):
cur_supp += supp['drmem_full_suppressions']
# Test if this report is already suppressed
skip = False
for s in cur_supp:
if s.Match(r.split("\n")):
skip = True
break
# Skip reports if none of the symbols are in the report.
if args.symbol_filter and all(not s in r for s in args.symbol_filter):
skip = True
if not skip:
reports_count += 1
print "==================================="
print "This report observed at"
for url in all_reports[r]:
print " %s" % url
print "didn't match any suppressions:"
print "Suppression (error hash=#%s#):" % (report_hashes[r])
print r
print "==================================="
if args.top_symbols > 0:
symbols = GetSymbolsFromReport(r)
for symbol in symbols:
symbol_reports[symbol].append(report_hashes[r])
if reports_count > 0:
print ("%d unique reports don't match any of the suppressions" %
reports_count)
if args.top_symbols > 0:
PrintTopSymbols(symbol_reports, args.top_symbols)
else:
print "Congratulations! All reports are suppressed!"
# TODO(timurrrr): also make sure none of the old suppressions
# were narrowed too much.
if __name__ == "__main__":
main(sys.argv[1:])

View File

@ -1 +0,0 @@
*

View File

@ -1,35 +0,0 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def CheckChange(input_api, output_api):
"""Checks the TSan suppressions files for bad suppressions."""
# TODO(timurrrr): find out how to do relative imports
# and remove this ugly hack. Also, the CheckChange function won't be needed.
tools_vg_path = input_api.os_path.join(input_api.PresubmitLocalPath(), '..')
import sys
old_path = sys.path
try:
sys.path = sys.path + [tools_vg_path]
import suppressions
return suppressions.PresubmitCheck(input_api, output_api)
finally:
sys.path = old_path
def CheckChangeOnUpload(input_api, output_api):
return CheckChange(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CheckChange(input_api, output_api)
def GetPreferredTrySlaves():
return ['linux_tsan']

View File

@ -1,188 +0,0 @@
# This file lists the functions, object files and source files
# which should be ignored (i.e. not instrumented) by ThreadSanitizer.
# See http://code.google.com/p/data-race-test/wiki/ThreadSanitizerIgnores.
# ignore these libraries
obj:*/libfreetype*
obj:*/libdbus*
# we ignore the whole NSS library for now since
# its instrumentation is very slow.
# TODO(timurrrr): investigate whether we need to instrument it
obj:*/libnss*
obj:*/nss/*
# ignore pulseaudio - We don't have symbols there and it can be slow otherwise
obj:*/libpulse*.so*
# ignore this standard stuff
fun:clone
fun:fork
fun:pthread_*
fun_r:_pthread_exit
fun_r:_pthread_free_pthread_onstack
fun_r:random_r
fun_r:random
fun_r:rand
fun_r:srand
fun:__new_exitfn
fun:_dl_*
fun:__dl_*
fun:*_setjmp*
# dark magic with 'errno' here.
fun:sys_*
# ignore libc's printf functions
fun_r:_IO_*
fun:fwrite
fun:fflush
# False reports on std::string internals, see
# http://code.google.com/p/data-race-test/issues/detail?id=40
fun:*_M_mutateE*
fun_r:*_M_set_length_and_sharable*
fun:*_M_is_leaked*
fun:*_M_is_shared*
fun:*_M_set_leaked*
fun:*_M_set_sharable*
# Comparison of std::strings sometimes takes a lot of time but we don't really
# need precise stack traces there.
fun_hist:_ZStltIcSt11char_traitsIcESaIcEEbRKSbIT_T0_T1_ES8_
fun_hist:_ZNKSs7compareERKSs
# Don't instrument intercepts
src:*ts_valgrind_intercepts.c
##################################################################
# Don't instrument synchronization code
src:*base/threading/thread_local_storage*
src:*base/stats_counters*
src:*base/synchronization/condition_variable*
src:*base/synchronization/lock*
src:*base/synchronization/waitable_event*
# Don't instrument code dealing with atomics (base::subtle)
fun:*base*subtle*Release_Store*
fun:*base*subtle*NoBarrier_CompareAndSwap*
fun:*base*subtle*NoBarrier_Load*
# Keep some mangling so we don't match NoBarrier_AtomicIncrement
fun:*base*subtle23Barrier_AtomicIncrement*
# MD5 computations are very slow due since sums are computed by
# repeatedly calling tiny functions and is unlikely to race with
# anything.
src:*base/md5*
# Don't instrument tcmalloc
src:*/tcmalloc/*
# This function is heavy in net_unittests
fun_r:*disk_cache*BackendImpl*CheckAllEntries*
# V8 is a hot-spot under ThreadSanitizer.
# Lots of tiny functions there...
# TODO(timurrrr):
# Can we miss data races on V8 objects due to non thread-safe API calls
# if we don't instrument v8::internals?
fun_r:*v8*internal*
# unibrow namespace contains lots of tiny unicode conversion functions.
fun_hist:*unibrow*
# Histogram has tiny functions that can be called frequently
fun_hist:*Histogram*
# Recursively ignore Histrogram::Add and friends, see http://crbug.com/62694.
fun_r:*4base*9Histogram*3Add*
fun_r:*4base*16HistogramSamples*3Add*
fun_r:*4base*13HistogramBase*7AddTime*
# TODO(timurrrr): SKIA - needs separate testing?
# SKIA unittest is single-threaded...
# SKIA uses un-annotated atomic refcount and other sync stuff
# some functions are HEAVY like png, jpeg decoding
src:*third_party/skia*
# WebKit hotspot
fun:*png_write*
# This function generates 25% of memory accesses in net_unittests
fun:*icu_4_2*UnicodeSet*add*
# SQLite has lots of tiny functions and produce too many segments on some tests.
# See http://crbug.com/56511
fun_hist:*sqlite*
# There's some weird failure test going on in this tiny test function in sqlite
fun_r:threadLockingTest
# Ignore accesses below GetCurrentThreadIdentifier.
# There is a benign race which is hard to suppress properly,
# see http://crbug.com/44580
fun_r:*BrowserThread*GetCurrentThreadIdentifier*
# BrowserThread accesses MessageLoop::current() in ::CurrentlyOn.
# We can't use suppressions to hide these reports since the concurrent stack
# is simply "base::Thread::ThreadMain"
# See http://crbug.com/63678
fun_r:*BrowserThread*CurrentlyOn*
# zlib is smarter than we are, see http://www.zlib.net/zlib_faq.html#faq36
fun_r:inflate
# zlib-related reports, not investigated yet. See http://crbug.com/70932
fun_r:*remoting*CompressorZlib*Process*
# X11 reads the _XErrorFunction callback in a racey way, see
# http://crbug.com/65278
fun:XSetErrorHandler
fun:*IPC*Logging*Enable*
fun:*IPC*Logging*Disable*
# TSan doesn't support lockf and hence shared memory locks in this function;
# http://crbug.com/45083
fun_r:*base*StatsTable*AddCounter*
# TSan doesn't understand internal libc locks, see http://crbug.com/71435
fun_r:mbsrtowcs
# gethostbyname2_r is thread-safe, however ThreadSanitizer reports races inside it and
# (sometimes) in __nss_* functions below it.
# This may be related to
# https://bugs.launchpad.net/ubuntu/+source/glibc/+bug/59449
fun_r:gethostbyname2_r*
# TODO(timurrrr): remove this when TSan is updated past r3232
fun_r:gaih_inet
# Strange reports below _IO_getline, every time in "Concurrent access".
# Probably the reports are there since we're missing the libc internal locks
fun_r:_IO_getline*
# A benign race in glib on something called "contention_counter".
fun:g_slice_alloc
# A benign race in glibc on "random_time_bits".
fun:__gen_tempname
# A probably-benign race on '__have_o_cloexec' in opendir/__alloc_dir,
# see http://crbug.com/125928.
fun_r:__alloc_dir
fun_r:opendir
# The sqlite cache is racing against a few different stacktraces,
# so let's ignore it recursively. See http://crbug.com/84094
fun_r:pcache1Fetch
# "Suppress" a data race in TraceLog::GetCategory which has
# fun:MessageLoop::RunTask at the top of the "current" stack which we don't want
# to suppress. See http://crbug.com/98926
fun:*base*debug*TraceLog*GetCategoryInternal*
# libc threading on GCC 4.6
fun:arena_thread_freeres
# __strncasecmp_l_ssse3 overreads the buffer causing TSan to report a data race
# on another object. See http://crbug.com/177074
fun:*strncasecmp*

View File

@ -1,38 +0,0 @@
# This file lists the functions, object files and source files
# which should be ignored (i.e. not instrumented) by ThreadSanitizer on Mac OS.
# At the moment the Chromium binaries' debug info is not available to
# ThreadSanitizer, so we have to define fun:* rules for Mac OS complementing
# the src:* rules defined for Linux.
# we ignore the Security libraries for now since
# their instrumentation is very slow.
# TODO(timurrrr): investigate whether we need to instrument them
obj:*/Security*
obj:*/libcrypto*
# SensitiveAllocator::free is a part of the Security framework.
# It calls bzero (0xffff0633) which can't be resolved and thus should be
# ignored recursively.
fun_r:*SensitiveAllocator*free*
# The CFBag and CFDictionary operators should be thread-safe, but they are not
# annotated properly.
# TODO(glider): replace all the CoreFoundation suppressions with ignores.
fun_r:CFBag*
fun_r:CFDictionary*
fun_r:CFBasicDictionary*
#fun_r:CFBasicHash*
# see crbug.com/46138
fun_r:__CFRunLoopDeallocate
fun_r:__CFRunLoopRemoveAllSources
fun_r:__CFFinalizeRunLoop
# _cthread_fork_child() is called in the child process after the fork syscall.
# This function cleans up the cthread data structures created in the parent,
# so ThreadSanitizer might consider it racey.
fun_r:_cthread_fork_child
# False reports on Snow Leopard.
fun_r: _pthread_exit
fun_r: _dispatch_queue_drain

View File

@ -1,64 +0,0 @@
# This file lists the functions, object files and source files
# which should be ignored (i.e. not instrumented) by ThreadSanitizer on Windows.
# We ignore security libraries for now since their instrumentation is very slow.
# TODO(timurrrr): investigate whether we need to instrument them
obj:*CRYPT32.dll
obj:*RPCRT4.dll
fun_r:*SHA256*
fun_r:*BCryptGenerateSymmetricKey*
fun_r:*CryptAcquireContext*
obj:*WINHTTP.dll
obj:*imagehlp.dll
# Instrumenting IP Helper API causes crashes.
# TODO(szym): investigate http://crbug.com/146119
obj:*IPHLPAPI.dll
# Use less detailed instrumentation of STL
fun_hist:*std::*<*
# Don't instrument some stl internals - they shouldn't be useful
fun_r:*std::_Debug*
fun_r:*std::_Lockit*
# Benign race on mutex unlock
fun:_Mtxunlock
# Benign race during clock initialization
fun_r:*InitializeClock*
# Some unknown Windows guts
fun_r:Ordinal_*
fun:unnamedImageEntryPoint
fun_r:RtlDestroyQueryDebugBuffer
fun:_updatetlocinfoEx_nolock
# Strange reports on net_unittests, maybe related to raising
# a debug exception by PlatformThread
# TODO(timurrrr): investigate
fun_r:*PlatformThread*SetName*
# Recursively ignore Histrogram::Add and friends, see http://crbug.com/62694.
fun_r:base::Histogram::Add
fun_r:base::HistogramSamples::Add
fun_r:base::HistogramBase::AddTime
# ffmpegsumo.dll appears to read a few bytes beyond the end of the buffer.
fun:_ff_prefetch_mmxext
# Shows up as a race in SHELL32.dll when deleting a directory while opening an
# unrelated file in another thread. Revealed by DiskCacheBackendTest.DeleteOld.
# See: https://code.google.com/p/data-race-test/issues/detail?id=114
fun_r:SHFileOperationW
# Ignore internal file I/O synchronization: crbug.com/146724
fun_r:_lock_file
fun_r:_lock_file2
fun_r:_lock
fun_r:_flsbuf
fun_r:_unlock_file
fun_r:_getstream
# http://crbug.com/272065
obj:*NLAapi.dll

File diff suppressed because it is too large Load Diff

View File

@ -1,270 +0,0 @@
# There are two kinds of suppressions in this file.
# 1. third party stuff we have no control over
#
# 2. Intentional unit test errors, or stuff that is somehow a false positive
# in our own code, or stuff that is so trivial it's not worth fixing
#
# 3. Suppressions for real chromium bugs that are not yet fixed.
# These should all be in chromium's bug tracking system (but a few aren't yet).
# Periodically we should sweep this file and the bug tracker clean by
# running overnight and removing outdated bugs/suppressions.
#-----------------------------------------------------------------------
# 1. third party stuff we have no control over
# Several Cocoa-specific races
{
Some Cocoa-specific race in NSRunLoop class
ThreadSanitizer:Race
...
fun:*CFRunLoop*
}
{
A race releasing NSObject
ThreadSanitizer:Race
...
fun:__CFDoExternRefOperation
fun:-[NSObject release]
}
{
A race retaining NSObject
ThreadSanitizer:Race
...
fun:CFBagAddValue
fun:__CFDoExternRefOperation
fun:-[NSObject retain]
}
{
A race retaining NSBundle
ThreadSanitizer:Race
...
fun:CFBagAddValue
fun:__CFDoExternRefOperation
fun:NSIncrementExtraRefCount
fun:-[NSBundle retain]
}
{
A race deallocating NSOperationQueue
ThreadSanitizer:Race
...
fun:_CFRelease
fun:-[NSOperationQueue dealloc]
}
{
Another race deallocating NSOperationQueue
ThreadSanitizer:Race
...
fun:-[NSIndexSet dealloc]
fun:-[NSOperationQueue dealloc]
}
# A related OpenRadar bug is at http://openradar.appspot.com/7396501.
{
A benign race on a debug counter in __NSAutoreleaseObject
ThreadSanitizer:Race
fun:__NSAutoreleaseObject
fun:-[NSObject(NSObject) autorelease]
}
# media_unittests depend on the Darwin libraries which have many reports in
# them. A related OpenRadar bug is at http://openradar.appspot.com/7223948
{
Warnings in the CoreAudio component
ThreadSanitizer:Race
...
obj:/System/Library/Components/CoreAudio.component*
}
{
Warnings in the CoreAudio framework
ThreadSanitizer:Race
...
obj:/System/Library/Frameworks/CoreAudio.framework*
}
{
A warning in CoreAudio framework
ThreadSanitizer:Race
...
fun:*HALRunLoop*
}
{
A warning in the AudioToolbox framework
ThreadSanitizer:Race
...
fun:*CAPThread*
}
{
Warnings inside AQServer_{Stop,EnqueueBuffer}
ThreadSanitizer:Race
...
fun:*AudioQueueObject*
...
fun:AQServer_*
}
{
Warnings inside AudioHardwareGetProperty
ThreadSanitizer:Race
...
fun:AudioHardwareGetProperty
}
{
Benign data race in CAMutex bug_23579
ThreadSanitizer:Race
fun:*CAMutex*ock*
}
{
A warning on destruction of third party ClientAudioQueue object (AudioToolbox)
ThreadSanitizer:Race
...
fun:*AQClient*CheckDisposal*
fun:*ClientAudioQueueD*
fun:AudioQueueDispose
}
{
Destroying invalid lock in krb5int_getaddrinfo while terminating Kerberos.
ThreadSanitizer:InvalidLock
fun:pthread_mutex_destroy
fun:krb5int_getaddrinfo
fun:ImageLoaderMachO::doTermination*
}
{
bug_55946
ThreadSanitizer:Race
...
fun:OSAtomicAdd32
fun:base::subtle::Barrier_AtomicIncrement*
}
#-----------------------------------------------------------------------
# 2. Intentional unit test errors, or stuff that is somehow a false positive
# in our own code, or stuff that is so trivial it's not worth fixing
{
Benign data race inside PCMQueueOutAudioOutputStream::Stop bug_24801
ThreadSanitizer:Race
fun:*PCMQueueOutAudioOutputStream*Stop*
}
{
bug_100313 TSan false positive
ThreadSanitizer:Race
...
fun:__sfp
fun:fopen
fun:file_util::OpenFile
fun:base::SharedMemory::CreateNamed
}
{
Benign race to access status during TrackedObject unittests
ThreadSanitizer:Race
...
fun:tracked_objects::ThreadData::ShutdownSingleThreadedCleanup
}
#-----------------------------------------------------------------------
# 3. Suppressions for real chromium bugs that are not yet fixed.
# These should all be in chromium's bug tracking system (but a few aren't yet).
# Periodically we should sweep this file and the bug tracker clean by
# running overnight and removing outdated bugs/suppressions.
{
bug_93932j
ThreadSanitizer:Race
fun:release_delayed_buffers
fun:frame_thread_free
fun:ff_thread_free
fun:avcodec_close
fun:avcodec_close
fun:media::FFmpegVideoDecoder::ReleaseFFmpegResources
fun:media::FFmpegVideoDecoder::Stop
fun:base::internal::RunnableAdapter::Run
}
{
bug_100772a
ThreadSanitizer:Race
fun:CAGuard::Wait
fun:MIO::DAL::RunLoop::StartOwnThread
fun:MIO::DAL::RunLoop::Start
fun:MIO::DAL::System::CheckOutInstance
fun:TundraObjectGetPropertyDataSize
fun:+[QTCaptureDALDevice _refreshDevices]
fun:+[QTCaptureDALDevice devicesWithIOType:]
fun:+[QTCaptureDevice devicesWithIOType:]
fun:+[QTCaptureDevice inputDevices]
fun:+[QTCaptureDevice inputDevicesWithMediaType:]
fun:+[VideoCaptureDeviceQTKit deviceNames]
fun:media::VideoCaptureDevice::GetDeviceNames
fun:media::VideoCaptureDeviceMac::Init
fun:media::VideoCaptureDevice::Create
fun:media::VideoCaptureDeviceTest_OpenInvalidDevice_Test::TestBody
}
{
bug_100772b
ThreadSanitizer:Race
fun:DVDeviceTerminate
}
{
bug_100772c
ThreadSanitizer:Race
fun:MIO::DAL::RunLoop::StopOwnThread
fun:MIO::DAL::RunLoop::Teardown
fun:MIO::DAL::System::TeardownShell
fun:MIO::DAL::System::AtExitHandler
fun:MIO::DAL::AtExit::AtExitHandler
}
{
bug_100772d
ThreadSanitizer:Race
fun:DVSignalSync
fun:DVDeviceTerminate
}
{
bug_106197
ThreadSanitizer:Race
...
fun:__sfp
fun:fopen
fun:file_util::OpenFile
fun:base::SharedMemory::Create
fun:base::SharedMemory::CreateNamed
fun:base::::MultipleThreadMain::ThreadMain
fun:base::::ThreadFunc
}
{
bug_123112
ThreadSanitizer:Race
fun:media::AUAudioOutputStream::Stop
fun:media::AudioOutputDispatcherImpl::StopStream
fun:media::AudioOutputProxy::Stop
fun:media::AudioOutputController::DoStopCloseAndClearStream
fun:media::AudioOutputController::DoClose
fun:base::internal::RunnableAdapter::Run
}
{
bug_133074_a
ThreadSanitizer:Race
fun:CAMutex::~CAMutex
fun:AudioQueueDispose
fun:media::PCMQueueOutAudioOutputStream::Close
}
{
bug_133074_b
ThreadSanitizer:Race
fun:media::AUAudioOutputStream::Stop
fun:media::AudioOutputMixer::ClosePhysicalStream
fun:media::AudioOutputMixer::Shutdown
fun:media::AudioManagerBase::ShutdownOnAudioThread
}

View File

@ -1,225 +0,0 @@
############################
# Reports on the guts of Windows
{
UuidCreate
ThreadSanitizer:Race
...
fun:UuidCreate
}
{
ILFindLastID
ThreadSanitizer:Race
...
fun:ILFindLastID
}
{
RpcServerUnregisterIf
ThreadSanitizer:Race
...
fun:RpcServerUnregisterIf
}
# http://code.google.com/p/data-race-test/issues/detail?id=45
{
accessing an invalid lock in unnamedImageEntryPoint
ThreadSanitizer:InvalidLock
fun:unnamedImageEntryPoint
}
{
accessing an invalid lock in CoFreeAllLibraries
ThreadSanitizer:InvalidLock
fun:CoFreeAllLibraries
}
{
bug_158099_mmdevice_endpoint_shutdown_too_fast
ThreadSanitizer:Race
fun:GetLocalIdFromEndpointId
...
}
############################
# Chromium
{
Benign race durung InitializeClock
ThreadSanitizer:Race
...
fun:*InitializeClock*
}
{
bug_62560
ThreadSanitizer:Race
...
fun:_initterm
fun:doexit
}
{
accessing an invalid lock under exit/doexit
ThreadSanitizer:InvalidLock
fun:*~Lock*
...
fun:doexit
fun:exit
}
{
bug_81793a
ThreadSanitizer:Race
...
fun:NetTestSuite::InitializeTestThread
}
{
bug_81793b
ThreadSanitizer:Race
...
fun:base::MessageLoop::CalculateDelayedRuntime
fun:base::MessageLoop::Post*Task
}
{
bug_93932a
ThreadSanitizer:Race
fun:avcodec_default_release_buffer
fun:ff_mpeg4video_split
}
{
bug_93932b
ThreadSanitizer:Race
...
fun:avcodec_close
fun:media::FFmpegVideoDecoder::ReleaseFFmpegResources
}
{
bug_93932d
ThreadSanitizer:Race
fun:memcpy
fun:media::CopyPlane
}
{
bug_93932e
ThreadSanitizer:Race
...
fun:ff_thread_finish_setup
fun:ptw32_threadStart@4
}
{
bug_93932f
ThreadSanitizer:Race
...
fun:ff_vp3_h_loop_filter_c
...
fun:ff_thread_flush
fun:media::FFmpegVideoDecoder::Flush
}
{
bug_93932g
ThreadSanitizer:Race
...
fun:av_parser_close
...
fun:BaseThreadInitThunk
}
{
bug_93932h
ThreadSanitizer:Race
...
fun:av_parser_close
...
fun:base::internal::RunnableAdapter::Run
}
{
bug_93932i
ThreadSanitizer:Race
fun:ff_simple_idct_add_mmx
...
fun:BaseThreadInitThunk
}
{
bug_144928_a
ThreadSanitizer:Race
fun:google_breakpad::CrashGenerationServer::Handle*
fun:google_breakpad::CrashGenerationServer::OnPipeConnected
fun:RtlSetTimer
fun:RtlSetTimer
fun:TpReleaseTimer
fun:TpReleaseTimer
fun:RtlMultiByteToUnicodeSize
fun:TpCallbackMayRunLong
fun:TpCallbackMayRunLong
fun:BaseThreadInitThunk
}
{
bug_144928_b
ThreadSanitizer:Race
fun:google_breakpad::CrashGenerationServer::~CrashGenerationServer
fun:google_breakpad::CrashGenerationServer::`scalar deleting destructor'
fun:base::DefaultDeleter*
fun:base::internal::scoped_ptr_impl::~scoped_ptr_impl
fun:remoting::BreakpadWinDeathTest::~BreakpadWinDeathTest
fun:remoting::BreakpadWinDeathTest_TestAccessViolation_Test::`scalar deleting destructor'
fun:testing::Test::DeleteSelf_
fun:testing::internal::HandleExceptionsInMethodIfSupported
}
{
bug_146119
ThreadSanitizer:Race
...
fun:GetAdaptersAddresses
...
fun:base::internal::RunnableAdapter::Run
}
{
bug_157076_a
ThreadSanitizer:Race
fun:win32thread_worker
fun:_callthreadstartex
fun:_threadstartex
fun:BaseThreadInitThunk
}
{
bug_157076_b
ThreadSanitizer:Race
fun:memset
fun:_free_dbg_nolock
fun:_free_dbg
fun:_aligned_free_dbg
fun:_aligned_free
}
{
bug_157076_c
ThreadSanitizer:Race
fun:memset
fun:_heap_alloc_dbg_impl
fun:_nh_malloc_dbg_impl
fun:_nh_malloc_dbg
fun:_malloc_dbg
fun:_aligned_offset_malloc_dbg
fun:_aligned_malloc
fun:base::AlignedAlloc
}
{
bug_170334
ThreadSanitizer:Race
...
fun:net::NetworkChangeNotifierWinTest::~NetworkChangeNotifierWinTest
}
{
bug_239350
ThreadSanitizer:Race
...
fun:av_freep
fun:av_buffer_unref
fun:av_frame_unref
...
fun:media::FFmpegVideoDecoder::Decode
}

View File

@ -1,278 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# tsan_analyze.py
''' Given a ThreadSanitizer output file, parses errors and uniques them.'''
import gdb_helper
from collections import defaultdict
import hashlib
import logging
import optparse
import os
import re
import subprocess
import sys
import time
import common
# Global symbol table (ugh)
TheAddressTable = None
class _StackTraceLine(object):
def __init__(self, line, address, binary):
self.raw_line_ = line
self.address = address
self.binary = binary
def __str__(self):
global TheAddressTable
file, line = TheAddressTable.GetFileLine(self.binary, self.address)
if (file is None) or (line is None):
return self.raw_line_
else:
return self.raw_line_.replace(self.binary, '%s:%s' % (file, line))
class TsanAnalyzer(object):
''' Given a set of ThreadSanitizer output files, parse all the errors out of
them, unique them and output the results.'''
LOAD_LIB_RE = re.compile('--[0-9]+-- ([^(:]*) \((0x[0-9a-f]+)\)')
TSAN_LINE_RE = re.compile('==[0-9]+==\s*[#0-9]+\s*'
'([0-9A-Fa-fx]+):'
'(?:[^ ]* )*'
'([^ :\n]+)'
'')
THREAD_CREATION_STR = ("INFO: T.* "
"(has been created by T.* at this point|is program's main thread)")
SANITY_TEST_SUPPRESSION = ("ThreadSanitizer sanity test "
"(ToolsSanityTest.DataRace)")
TSAN_RACE_DESCRIPTION = "Possible data race"
TSAN_WARNING_DESCRIPTION = ("Unlocking a non-locked lock"
"|accessing an invalid lock"
"|which did not acquire this lock")
RACE_VERIFIER_LINE = "Confirmed a race|unexpected race"
TSAN_ASSERTION = "Assertion failed: "
def __init__(self, source_dir, use_gdb=False):
'''Reads in a set of files.
Args:
source_dir: Path to top of source tree for this build
'''
self._use_gdb = use_gdb
self._cur_testcase = None
def ReadLine(self):
self.line_ = self.cur_fd_.readline()
self.stack_trace_line_ = None
if not self._use_gdb:
return
global TheAddressTable
match = TsanAnalyzer.LOAD_LIB_RE.match(self.line_)
if match:
binary, ip = match.groups()
TheAddressTable.AddBinaryAt(binary, ip)
return
match = TsanAnalyzer.TSAN_LINE_RE.match(self.line_)
if match:
address, binary_name = match.groups()
stack_trace_line = _StackTraceLine(self.line_, address, binary_name)
TheAddressTable.Add(stack_trace_line.binary, stack_trace_line.address)
self.stack_trace_line_ = stack_trace_line
def ReadSection(self):
""" Example of a section:
==4528== WARNING: Possible data race: {{{
==4528== T20 (L{}):
==4528== #0 MyTest::Foo1
==4528== #1 MyThread::ThreadBody
==4528== Concurrent write happened at this point:
==4528== T19 (L{}):
==4528== #0 MyTest::Foo2
==4528== #1 MyThread::ThreadBody
==4528== }}}
------- suppression -------
{
<Put your suppression name here>
ThreadSanitizer:Race
fun:MyTest::Foo1
fun:MyThread::ThreadBody
}
------- end suppression -------
"""
result = [self.line_]
if re.search("{{{", self.line_):
while not re.search('}}}', self.line_):
self.ReadLine()
if self.stack_trace_line_ is None:
result.append(self.line_)
else:
result.append(self.stack_trace_line_)
self.ReadLine()
if re.match('-+ suppression -+', self.line_):
# We need to calculate the suppression hash and prepend a line like
# "Suppression (error hash=#0123456789ABCDEF#):" so the buildbot can
# extract the suppression snippet.
supp = ""
while not re.match('-+ end suppression -+', self.line_):
self.ReadLine()
supp += self.line_
self.ReadLine()
if self._cur_testcase:
result.append("The report came from the `%s` test.\n" % \
self._cur_testcase)
result.append("Suppression (error hash=#%016X#):\n" % \
(int(hashlib.md5(supp).hexdigest()[:16], 16)))
result.append(" For more info on using suppressions see "
"http://dev.chromium.org/developers/how-tos/using-valgrind/threadsanitizer#TOC-Suppressing-data-races\n")
result.append(supp)
else:
self.ReadLine()
return result
def ReadTillTheEnd(self):
result = [self.line_]
while self.line_:
self.ReadLine()
result.append(self.line_)
return result
def ParseReportFile(self, filename):
'''Parses a report file and returns a list of ThreadSanitizer reports.
Args:
filename: report filename.
Returns:
list of (list of (str iff self._use_gdb, _StackTraceLine otherwise)).
'''
ret = []
self.cur_fd_ = open(filename, 'r')
while True:
# Read ThreadSanitizer reports.
self.ReadLine()
if not self.line_:
break
while True:
tmp = []
while re.search(TsanAnalyzer.RACE_VERIFIER_LINE, self.line_):
tmp.append(self.line_)
self.ReadLine()
while re.search(TsanAnalyzer.THREAD_CREATION_STR, self.line_):
tmp.extend(self.ReadSection())
if re.search(TsanAnalyzer.TSAN_RACE_DESCRIPTION, self.line_):
tmp.extend(self.ReadSection())
ret.append(tmp) # includes RaceVerifier and thread creation stacks
elif (re.search(TsanAnalyzer.TSAN_WARNING_DESCRIPTION, self.line_) and
not common.IsWindows()): # workaround for http://crbug.com/53198
tmp.extend(self.ReadSection())
ret.append(tmp)
else:
break
tmp = []
if re.search(TsanAnalyzer.TSAN_ASSERTION, self.line_):
tmp.extend(self.ReadTillTheEnd())
ret.append(tmp)
break
match = re.search("used_suppression:\s+([0-9]+)\s(.*)", self.line_)
if match:
count, supp_name = match.groups()
count = int(count)
self.used_suppressions[supp_name] += count
self.cur_fd_.close()
return ret
def GetReports(self, files):
'''Extracts reports from a set of files.
Reads a set of files and returns a list of all discovered
ThreadSanitizer race reports. As a side effect, populates
self.used_suppressions with appropriate info.
'''
global TheAddressTable
if self._use_gdb:
TheAddressTable = gdb_helper.AddressTable()
else:
TheAddressTable = None
reports = []
self.used_suppressions = defaultdict(int)
for file in files:
reports.extend(self.ParseReportFile(file))
if self._use_gdb:
TheAddressTable.ResolveAll()
# Make each line of each report a string.
reports = map(lambda(x): map(str, x), reports)
return [''.join(report_lines) for report_lines in reports]
def Report(self, files, testcase, check_sanity=False):
'''Reads in a set of files and prints ThreadSanitizer report.
Args:
files: A list of filenames.
check_sanity: if true, search for SANITY_TEST_SUPPRESSIONS
'''
# We set up _cur_testcase class-wide variable to avoid passing it through
# about 5 functions.
self._cur_testcase = testcase
reports = self.GetReports(files)
self._cur_testcase = None # just in case, shouldn't be used anymore
common.PrintUsedSuppressionsList(self.used_suppressions)
retcode = 0
if reports:
sys.stdout.flush()
sys.stderr.flush()
logging.info("FAIL! Found %i report(s)" % len(reports))
for report in reports:
logging.info('\n' + report)
sys.stdout.flush()
retcode = -1
# Report tool's insanity even if there were errors.
if (check_sanity and
TsanAnalyzer.SANITY_TEST_SUPPRESSION not in self.used_suppressions):
logging.error("FAIL! Sanity check failed!")
retcode = -3
if retcode != 0:
return retcode
logging.info("PASS: No reports found")
return 0
def main():
'''For testing only. The TsanAnalyzer class should be imported instead.'''
parser = optparse.OptionParser("usage: %prog [options] <files to analyze>")
parser.add_option("", "--source_dir",
help="path to top of source tree for this build"
"(used to normalize source paths in baseline)")
(options, args) = parser.parse_args()
if not args:
parser.error("no filename specified")
filenames = args
logging.getLogger().setLevel(logging.INFO)
analyzer = TsanAnalyzer(options.source_dir, use_gdb=True)
return analyzer.Report(filenames, None)
if __name__ == '__main__':
sys.exit(main())

View File

@ -1,16 +0,0 @@
# The rules in this file are only applied at compile time.
# Because the Chrome buildsystem does not automatically touch the files
# mentioned here, changing this file requires clobbering all TSan v2 bots.
#
# Please think twice before you add or remove these rules.
# Data races should typically go to suppressions.txt.
# See http://crbug.com/102327
fun:*ThreadData*Initialize*
# Known benign races on histograms. See http://crbug.com/62694.
src:base/metrics/histogram_samples.cc
src:base/metrics/sample_vector.cc
# See http://crbug.com/172104
fun:*v8*internal*ThreadEntry*

View File

@ -1,121 +0,0 @@
# False positives in libflashplayer.so and libglib.so. Since we don't
# instrument them, we cannot reason about the synchronization in them.
race:libflashplayer.so
race:libglib*.so
# Intentional race in ToolsSanityTest.DataRace in base_unittests.
race:base/tools_sanity_unittest.cc
# Data race on WatchdogCounter [test-only]
race:base/threading/watchdog_unittest.cc
# Races in libevent, http://crbug.com/23244
race:libevent/event.c
# http://crbug.com/46840
race:history::HistoryBackend::DeleteFTSIndexDatabases
race:history::InMemoryHistoryBackend::Init
# http://crbug.com/84094
race:sqlite3StatusSet
race:pcache1EnforceMaxPage
race:pcache1AllocPage
# http://crbug.com/102327.
# Test-only race, won't fix.
race:tracked_objects::ThreadData::ShutdownSingleThreadedCleanup
# http://crbug.com/115540
race:*GetCurrentThreadIdentifier
# http://crbug.com/120808
race:base/threading/watchdog.cc
# http://crbug.com/157586
race:third_party/libvpx/source/libvpx/vp8/decoder/threading.c
# http://crbug.com/158718
race:third_party/ffmpeg/libavcodec/pthread.c
race:third_party/ffmpeg/libavcodec/vp8.c
race:third_party/ffmpeg/libavutil/mem.c
race:*HashFrameForTesting
race:third_party/ffmpeg/libavcodec/h264pred.c
race:media::ReleaseData
# http://crbug.com/158922
race:third_party/libvpx/source/libvpx/vp8/encoder/*
# See http://crbug.com/181502
race:_M_rep
race:_M_is_leaked
# http://crbug.com/189177
race:thread_manager
race:v8::Locker::Initialize
# http://crbug.com/223352
race:uprv_malloc_46
race:uprv_realloc_46
# http://crbug.com/223955
race:PassRefPtr
# http://crbug.com/224617
race:base::debug::TraceEventTestFixture_TraceSampling_Test::TestBody
# http://crbug.com/244368
race:skia::BeginPlatformPaint
# http://crbug.com/244385
race:unixTempFileDir
# http://crbug.com/244774
race:webrtc::RTPReceiver::ProcessBitrate
race:webrtc::RTPSender::ProcessBitrate
race:webrtc::VideoCodingModuleImpl::Decode
race:webrtc::RTPSender::SendOutgoingData
race:webrtc::VP8EncoderImpl::GetEncodedPartitions
race:webrtc::VP8EncoderImpl::Encode
race:webrtc::ViEEncoder::DeliverFrame
# http://crbug.com/246968
race:webrtc::VideoCodingModuleImpl::RegisterPacketRequestCallback
# http://crbug.com/246970
race:webrtc::EventPosix::StartTimer
# http://crbug.com/246974
race:content::GpuWatchdogThread::CheckArmed
# http://crbug.com/248101
race:sqlite3Config
race:mem0
# http://crbug.com/257396
race:base::debug::TraceEventTestFixture_TraceSamplingScope_Test::TestBody
# http://crbug.com/257543
race:*GetObjectFromEntryAddress
# http://crbug.com/268924
race:base::g_power_monitor
race:base::PowerMonitor::PowerMonitor
race:base::PowerMonitor::AddObserver
# http://crbug.com/268941
race:tracked_objects::ThreadData::tls_index_
# http://crbug.com/268946
race:CommandLine::HasSwitch
# http://crbug.com/269965
race:DesktopMediaPickerModelTest_UpdateThumbnail_Test
# http://crbug.com/270037
race:gLibCleanupFunctions
# http://crbug.com/270675
race:net::RuleBasedHostResolverProc::Resolve
# http://crbug.com/272095
race:base::g_top_manager

View File

@ -1,24 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import urllib2
import suppressions
def main():
supp = suppressions.GetSuppressions()
all_supps = []
for supps in supp.values():
all_supps += [s.description for s in supps]
sys.stdout.write(urllib2.urlopen(
'http://chromium-build-logs.appspot.com/unused_suppressions',
'\n'.join(all_supps)).read())
return 0
if __name__ == "__main__":
sys.exit(main())

View File

@ -1,124 +0,0 @@
#!/bin/bash
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This is a small script for manually launching valgrind, along with passing
# it the suppression file, and some helpful arguments (automatically attaching
# the debugger on failures, etc). Run it from your repo root, something like:
# $ sh ./tools/valgrind/valgrind.sh ./out/Debug/chrome
#
# This is mostly intended for running the chrome browser interactively.
# To run unit tests, you probably want to run chrome_tests.sh instead.
# That's the script used by the valgrind buildbot.
export THISDIR=`dirname $0`
setup_memcheck() {
RUN_COMMAND="valgrind"
GDB=gdb
EXE_INFO=$(file $1)
if [[ $? -eq 0 ]]; then
# Prefer a gdb that matches the executable if it's available.
if [[ "$EXE_INFO" == *32-bit* && -x /usr/bin/gdb32 ]]; then
GDB="/usr/bin/gdb32";
elif [[ "$EXE_INFO" == *64-bit* && -x /usr/bin/gdb64 ]]; then
GDB="/usr/bin/gdb64";
fi
fi
# Prompt to attach gdb when there was an error detected.
DEFAULT_TOOL_FLAGS=("--db-command=$GDB -nw %f %p" "--db-attach=yes" \
# Keep the registers in gdb in sync with the code.
"--vex-iropt-register-updates=allregs-at-mem-access" \
# Overwrite newly allocated or freed objects
# with 0x41 to catch inproper use.
"--malloc-fill=41" "--free-fill=41" \
# Increase the size of stacks being tracked.
"--num-callers=30")
}
setup_tsan() {
RUN_COMMAND="valgrind-tsan.sh"
IGNORE_FILE="$THISDIR/tsan/ignores.txt"
DEFAULT_TOOL_FLAGS=("--announce-threads" "--pure-happens-before=yes" \
"--ignore=$IGNORE_FILE")
}
setup_unknown() {
echo "Unknown tool \"$TOOL_NAME\" specified, the result is not guaranteed"
DEFAULT_TOOL_FLAGS=()
}
set -e
if [ $# -eq 0 ]; then
echo "usage: <command to run> <arguments ...>"
exit 1
fi
TOOL_NAME="memcheck"
declare -a DEFAULT_TOOL_FLAGS[0]
# Select a tool different from memcheck with --tool=TOOL as a first argument
TMP_STR=`echo $1 | sed 's/^\-\-tool=//'`
if [ "$TMP_STR" != "$1" ]; then
TOOL_NAME="$TMP_STR"
shift
fi
if echo "$@" | grep "\-\-tool" ; then
echo "--tool=TOOL must be the first argument" >&2
exit 1
fi
case $TOOL_NAME in
memcheck*) setup_memcheck "$1";;
tsan*) setup_tsan;;
*) setup_unknown;;
esac
SUPPRESSIONS="$THISDIR/$TOOL_NAME/suppressions.txt"
CHROME_VALGRIND=`sh $THISDIR/locate_valgrind.sh`
if [ "$CHROME_VALGRIND" = "" ]
then
# locate_valgrind.sh failed
exit 1
fi
echo "Using valgrind binaries from ${CHROME_VALGRIND}"
set -x
PATH="${CHROME_VALGRIND}/bin:$PATH"
# We need to set these variables to override default lib paths hard-coded into
# Valgrind binary.
export VALGRIND_LIB="$CHROME_VALGRIND/lib/valgrind"
export VALGRIND_LIB_INNER="$CHROME_VALGRIND/lib/valgrind"
# G_SLICE=always-malloc: make glib use system malloc
# NSS_DISABLE_UNLOAD=1: make nss skip dlclosing dynamically loaded modules,
# which would result in "obj:*" in backtraces.
# NSS_DISABLE_ARENA_FREE_LIST=1: make nss use system malloc
# G_DEBUG=fatal_warnings: make GTK abort on any critical or warning assertions.
# If it crashes on you in the Options menu, you hit bug 19751,
# comment out the G_DEBUG=fatal_warnings line.
#
# GTEST_DEATH_TEST_USE_FORK=1: make gtest death tests valgrind-friendly
#
# When everyone has the latest valgrind, we might want to add
# --show-possibly-lost=no
# to ignore possible but not definite leaks.
G_SLICE=always-malloc \
NSS_DISABLE_UNLOAD=1 \
NSS_DISABLE_ARENA_FREE_LIST=1 \
G_DEBUG=fatal_warnings \
GTEST_DEATH_TEST_USE_FORK=1 \
$RUN_COMMAND \
--trace-children=yes \
--leak-check=yes \
--suppressions="$SUPPRESSIONS" \
"${DEFAULT_TOOL_FLAGS[@]}" \
"$@"

File diff suppressed because it is too large Load Diff

View File

@ -1,222 +0,0 @@
#!/bin/bash
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This script can be used by waterfall sheriffs to fetch the status
# of Valgrind bots on the memory waterfall and test if their local
# suppressions match the reports on the waterfall.
set -e
THISDIR=$(dirname "${0}")
LOGS_DIR=$THISDIR/waterfall.tmp
WATERFALL_PAGE="http://build.chromium.org/p/chromium.memory/builders"
WATERFALL_FYI_PAGE="http://build.chromium.org/p/chromium.memory.fyi/builders"
download() {
# Download a file.
# $1 = URL to download
# $2 = Path to the output file
# {{{1
if [ "$(which curl)" != "" ]
then
if ! curl -s -o "$2" "$1"
then
echo
echo "Failed to download '$1'... aborting"
exit 1
fi
elif [ "$(which wget)" != "" ]
then
if ! wget "$1" -O "$2" -q
then
echo
echo "Failed to download '$1'... aborting"
exit 1
fi
else
echo "Need either curl or wget to download stuff... aborting"
exit 1
fi
# }}}
}
fetch_logs() {
# Fetch Valgrind logs from the waterfall {{{1
# TODO(timurrrr,maruel): use JSON, see
# http://build.chromium.org/p/chromium.memory/json/help
rm -rf "$LOGS_DIR" # Delete old logs
mkdir "$LOGS_DIR"
echo "Fetching the list of builders..."
download $1 "$LOGS_DIR/builders"
SLAVES=$(grep "<a href=\"builders\/" "$LOGS_DIR/builders" | \
grep 'td class="box"' | \
sed "s/.*<a href=\"builders\///" | sed "s/\".*//" | \
sort | uniq)
for S in $SLAVES
do
SLAVE_URL=$1/$S
SLAVE_NAME=$(echo $S | sed -e "s/%20/ /g" -e "s/%28/(/g" -e "s/%29/)/g")
echo -n "Fetching builds by slave '${SLAVE_NAME}'"
download $SLAVE_URL?numbuilds=${NUMBUILDS} "$LOGS_DIR/slave_${S}"
# We speed up the 'fetch' step by skipping the builds/tests which succeeded.
# TODO(timurrrr): OTOH, we won't be able to check
# if some suppression is not used anymore.
#
# The awk script here joins the lines ending with </td> to make it possible
# to find the failed builds.
LIST_OF_BUILDS=$(cat "$LOGS_DIR/slave_$S" | \
awk 'BEGIN { buf = "" }
{
if ($0 ~ /<\/td>/) { buf = (buf $0); }
else {
if (buf) { print buf; buf="" }
print $0
}
}
END {if (buf) print buf}' | \
grep "success\|failure" | \
head -n $NUMBUILDS | \
grep "failure" | \
grep -v "failed compile" | \
sed "s/.*\/builds\///" | sed "s/\".*//")
for BUILD in $LIST_OF_BUILDS
do
# We'll fetch a few tiny URLs now, let's use a temp file.
TMPFILE=$(mktemp -t memory_waterfall.XXXXXX)
download $SLAVE_URL/builds/$BUILD "$TMPFILE"
REPORT_FILE="$LOGS_DIR/report_${S}_${BUILD}"
rm -f $REPORT_FILE 2>/dev/null || true # make sure it doesn't exist
REPORT_URLS=$(grep -o "[0-9]\+/steps/\(memory\|heapcheck\).*/logs/[0-9A-F]\{16\}" \
"$TMPFILE" \
|| true) # `true` is to succeed on empty output
FAILED_TESTS=$(grep -o "[0-9]\+/steps/\(memory\|heapcheck\).*/logs/[A-Za-z0-9_.]\+" \
"$TMPFILE" | grep -v "[0-9A-F]\{16\}" \
| grep -v "stdio" || true)
for REPORT in $REPORT_URLS
do
download "$SLAVE_URL/builds/$REPORT/text" "$TMPFILE"
echo "" >> "$TMPFILE" # Add a newline at the end
cat "$TMPFILE" | tr -d '\r' >> "$REPORT_FILE"
done
for FAILURE in $FAILED_TESTS
do
echo -n "FAILED:" >> "$REPORT_FILE"
echo "$FAILURE" | sed -e "s/.*\/logs\///" -e "s/\/.*//" \
>> "$REPORT_FILE"
done
rm "$TMPFILE"
echo $SLAVE_URL/builds/$BUILD >> "$REPORT_FILE"
done
echo " DONE"
done
# }}}
}
match_suppressions() {
PYTHONPATH=$THISDIR/../python/google \
python "$THISDIR/test_suppressions.py" $@ "$LOGS_DIR/report_"*
}
match_gtest_excludes() {
for PLATFORM in "Linux" "Chromium%20Mac" "Chromium%20OS"
do
echo
echo "Test failures on ${PLATFORM}:" | sed "s/%20/ /"
grep -h -o "^FAILED:.*" -R "$LOGS_DIR"/*${PLATFORM}* | \
grep -v "FAILS\|FLAKY" | sort | uniq | \
sed -e "s/^FAILED://" -e "s/^/ /"
# Don't put any operators between "grep | sed" and "RESULT=$PIPESTATUS"
RESULT=$PIPESTATUS
if [ "$RESULT" == 1 ]
then
echo " None!"
else
echo
echo " Note: we don't check for failures already excluded locally yet"
echo " TODO(timurrrr): don't list tests we've already excluded locally"
fi
done
echo
echo "Note: we don't print FAILS/FLAKY tests and 1200s-timeout failures"
}
usage() {
cat <<EOF
usage: $0 fetch|match options
This script can be used by waterfall sheriffs to fetch the status
of Valgrind bots on the memory waterfall and test if their local
suppressions match the reports on the waterfall.
OPTIONS:
-h Show this message
-n N Fetch N builds from each slave.
COMMANDS:
fetch Fetch Valgrind logs from the memory waterfall
match Test the local suppression files against the downloaded logs
EOF
}
NUMBUILDS=3
CMD=$1
if [ $# != 0 ]; then
shift
fi
# Arguments for "match" are handled in match_suppressions
if [ "$CMD" != "match" ]; then
while getopts “hn:” OPTION
do
case $OPTION in
h)
usage
exit
;;
n)
NUMBUILDS=$OPTARG
;;
?)
usage
exit
;;
esac
done
shift $((OPTIND-1))
if [ $# != 0 ]; then
usage
exit 1
fi
fi
if [ "$CMD" = "fetch" ]; then
echo "Fetching $NUMBUILDS builds"
fetch_logs $WATERFALL_PAGE
fetch_logs $WATERFALL_FYI_PAGE
elif [ "$CMD" = "match" ]; then
match_suppressions $@
match_gtest_excludes
elif [ "$CMD" = "blame" ]; then
echo The blame command died of bitrot. If you need it, please reimplement it.
echo Reimplementation is blocked on http://crbug.com/82688
else
usage
exit 1
fi