Clean up tools directory.

Remove all files and directories except:
clang  emacs  git  gyp  heapcheck  protoc_wrapper  valgrind  vim
xdisplaycheck

Change-Id: I6326e4edad4b843e0d0c2ef39c20ac90f13c8579
This commit is contained in:
Kongqun Yang 2014-01-21 12:36:27 -08:00 committed by KongQun Yang
parent 72deea3493
commit 257b48536a
1341 changed files with 0 additions and 395845 deletions

View File

@ -1,6 +0,0 @@
# checkdeps.py shouldn't check include paths for files in these dirs:
skip_child_includes = [
"clang",
"gyp",
"traceline",
]

View File

@ -1,3 +0,0 @@
*
per-file bisect-builds.py=rsesek@chromium.org

View File

@ -1,48 +0,0 @@
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for bisect trybot.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts for
details on the presubmit API built into gcl.
"""
import imp
def _ExamineBisectConfigFile(input_api, output_api):
for f in input_api.AffectedFiles():
if not f.LocalPath().endswith('run-bisect-perf-regression.cfg'):
continue
try:
cfg_file = imp.load_source('config', 'run-bisect-perf-regression.cfg')
for k, v in cfg_file.config.iteritems():
if v:
return f.LocalPath()
except (IOError, AttributeError, TypeError):
return f.LocalPath()
return None
def _CheckNoChangesToBisectConfigFile(input_api, output_api):
results = _ExamineBisectConfigFile(input_api, output_api)
if results:
return [output_api.PresubmitError(
'The bisection config file should only contain a config dict with '
'empty fields. Changes to this file should never be submitted.',
items=[results])]
return []
def CommonChecks(input_api, output_api):
results = []
results.extend(_CheckNoChangesToBisectConfigFile(input_api, output_api))
return results
def CheckChangeOnUpload(input_api, output_api):
return CommonChecks(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CommonChecks(input_api, output_api)

View File

@ -1,6 +0,0 @@
bulach@chromium.org
digit@chromium.org
michaelbai@chromium.org
pliard@chromium.org
wangxianzhu@chromium.org
yfriedman@chromium.org

View File

@ -1,43 +0,0 @@
// Copyright (c) 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <sys/types.h>
#include <sys/stat.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <unistd.h>
int main(int argc, char ** argv) {
int i = fork();
struct stat ft;
time_t ct;
if (i < 0) {
printf("fork error");
return 1;
}
if (i > 0)
return 0;
/* child (daemon) continues */
int j;
for (j = 0; j < getdtablesize(); j++)
close(j);
setsid(); /* obtain a new process group */
while (1) {
sleep(120);
stat("/sdcard/host_heartbeat", &ft);
time(&ct);
if (ct - ft.st_mtime > 120) {
/* File was not touched for some time. */
system("su -c reboot");
}
}
return 0;
}

View File

@ -1,14 +0,0 @@
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'adb_reboot',
'type': 'executable',
'sources': [
'adb_reboot.c',
],
},
],
}

View File

@ -1,27 +0,0 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
# Intermediate target grouping the android tools needed to run native
# unittests and instrumentation test apks.
{
'target_name': 'android_tools',
'type': 'none',
'dependencies': [
'fake_dns/fake_dns.gyp:fake_dns',
'forwarder2/forwarder.gyp:forwarder2',
'md5sum/md5sum.gyp:md5sum',
'adb_reboot/adb_reboot.gyp:adb_reboot',
],
},
{
'target_name': 'memdump',
'type': 'none',
'dependencies': [
'memdump/memdump.gyp:memdump',
],
}
],
}

View File

@ -1,9 +0,0 @@
#!/system/bin/sh
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
ASAN_OPTIONS=debug=1,verbosity=1,strict_memcmp=0 \
LD_LIBRARY_PATH=/data/local/tmp/asan:$LD_LIBRARY_PATH \
LD_PRELOAD=libclang_rt.asan-arm-android.so \
exec $@

View File

@ -1,107 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "tools/android/common/adb_connection.h"
#include <arpa/inet.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <unistd.h>
#include "base/logging.h"
#include "base/posix/eintr_wrapper.h"
#include "tools/android/common/net.h"
namespace tools {
namespace {
void CloseSocket(int fd) {
if (fd >= 0) {
int old_errno = errno;
(void) HANDLE_EINTR(close(fd));
errno = old_errno;
}
}
} // namespace
int ConnectAdbHostSocket(const char* forward_to) {
// ADB port forward request format: HHHHtcp:port:address.
// HHHH is the hexidecimal length of the "tcp:port:address" part.
const size_t kBufferMaxLength = 30;
const size_t kLengthOfLength = 4;
const size_t kAddressMaxLength = kBufferMaxLength - kLengthOfLength;
const char kAddressPrefix[] = { 't', 'c', 'p', ':' };
size_t address_length = arraysize(kAddressPrefix) + strlen(forward_to);
if (address_length > kBufferMaxLength - kLengthOfLength) {
LOG(ERROR) << "Forward to address is too long: " << forward_to;
return -1;
}
char request[kBufferMaxLength];
memcpy(request + kLengthOfLength, kAddressPrefix, arraysize(kAddressPrefix));
memcpy(request + kLengthOfLength + arraysize(kAddressPrefix),
forward_to, strlen(forward_to));
char length_buffer[kLengthOfLength + 1];
snprintf(length_buffer, arraysize(length_buffer), "%04X",
static_cast<int>(address_length));
memcpy(request, length_buffer, kLengthOfLength);
int host_socket = socket(AF_INET, SOCK_STREAM, 0);
if (host_socket < 0) {
LOG(ERROR) << "Failed to create adb socket: " << strerror(errno);
return -1;
}
DisableNagle(host_socket);
const int kAdbPort = 5037;
sockaddr_in addr;
memset(&addr, 0, sizeof(addr));
addr.sin_family = AF_INET;
addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
addr.sin_port = htons(kAdbPort);
if (HANDLE_EINTR(connect(host_socket, reinterpret_cast<sockaddr*>(&addr),
sizeof(addr))) < 0) {
LOG(ERROR) << "Failed to connect adb socket: " << strerror(errno);
CloseSocket(host_socket);
return -1;
}
size_t bytes_remaining = address_length + kLengthOfLength;
size_t bytes_sent = 0;
while (bytes_remaining > 0) {
int ret = HANDLE_EINTR(send(host_socket, request + bytes_sent,
bytes_remaining, 0));
if (ret < 0) {
LOG(ERROR) << "Failed to send request: " << strerror(errno);
CloseSocket(host_socket);
return -1;
}
bytes_sent += ret;
bytes_remaining -= ret;
}
const size_t kAdbStatusLength = 4;
char response[kBufferMaxLength];
int response_length = HANDLE_EINTR(recv(host_socket, response,
kBufferMaxLength, 0));
if (response_length < kAdbStatusLength ||
strncmp("OKAY", response, kAdbStatusLength) != 0) {
LOG(ERROR) << "Bad response from ADB: length: " << response_length
<< " data: " << DumpBinary(response, response_length);
CloseSocket(host_socket);
return -1;
}
return host_socket;
}
} // namespace tools

View File

@ -1,18 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef TOOLS_ANDROID_COMMON_ADB_CONNECTION_H_
#define TOOLS_ANDROID_COMMON_ADB_CONNECTION_H_
namespace tools {
// Creates a socket that can forward to a host socket through ADB.
// The format of forward_to is <port>:<ip_address>.
// Returns the socket handle, or -1 on any error.
int ConnectAdbHostSocket(const char* forward_to);
} // namespace tools
#endif // TOOLS_ANDROID_COMMON_ADB_CONNECTION_H_

View File

@ -1,26 +0,0 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'android_tools_common',
'type': 'static_library',
'toolsets': ['host', 'target'],
'include_dirs': [
'..',
'../../..',
],
'sources': [
'adb_connection.cc',
'adb_connection.h',
'daemon.cc',
'daemon.h',
'net.cc',
'net.h',
],
},
],
}

View File

@ -1,75 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "tools/android/common/daemon.h"
#include <signal.h>
#include <stdio.h>
#include <sys/types.h>
#include <unistd.h>
#include "base/command_line.h"
#include "base/logging.h"
#include "base/posix/eintr_wrapper.h"
namespace {
const char kNoSpawnDaemon[] = "D";
int g_exit_status = 0;
void Exit(int unused) {
_exit(g_exit_status);
}
void CloseFileDescriptor(int fd) {
int old_errno = errno;
(void) HANDLE_EINTR(close(fd));
errno = old_errno;
}
} // namespace
namespace tools {
bool HasHelpSwitch(const CommandLine& command_line) {
return command_line.HasSwitch("h") || command_line.HasSwitch("help");
}
bool HasNoSpawnDaemonSwitch(const CommandLine& command_line) {
return command_line.HasSwitch(kNoSpawnDaemon);
}
void ShowHelp(const char* program,
const char* extra_title,
const char* extra_descriptions) {
printf("Usage: %s [-%s] %s\n"
" -%s stops from spawning a daemon process\n%s",
program, kNoSpawnDaemon, extra_title, kNoSpawnDaemon,
extra_descriptions);
}
void SpawnDaemon(int exit_status) {
g_exit_status = exit_status;
signal(SIGUSR1, Exit);
if (fork()) {
// In parent process.
sleep(10); // Wait for the child process to finish setsid().
NOTREACHED();
}
// In child process.
setsid(); // Detach the child process from its parent.
kill(getppid(), SIGUSR1); // Inform the parent process to exit.
// Close the standard input and outputs, otherwise the process may block
// adbd when the shell exits.
// Comment out these lines if you want to see outputs for debugging.
CloseFileDescriptor(0);
CloseFileDescriptor(1);
CloseFileDescriptor(2);
}
} // namespace tools

View File

@ -1,28 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef TOOLS_ANDROID_COMMON_DAEMON_H_
#define TOOLS_ANDROID_COMMON_DAEMON_H_
class CommandLine;
namespace tools {
bool HasHelpSwitch(const CommandLine& command_line);
bool HasNoSpawnDaemonSwitch(const CommandLine& command_line);
void ShowHelp(const char* program,
const char* extra_title,
const char* extra_descriptions);
// Spawns a daemon process and exits the current process with exit_status.
// Any code executed after this function returns will be executed in the
// spawned daemon process.
void SpawnDaemon(int exit_status);
} // namespace tools
#endif // TOOLS_ANDROID_COMMON_DAEMON_H_

View File

@ -1,40 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "tools/android/common/net.h"
#include <netinet/in.h>
#include <netinet/tcp.h>
#include <sys/socket.h>
#include <sys/types.h>
#include "base/strings/stringprintf.h"
namespace tools {
int DisableNagle(int socket) {
int on = 1;
return setsockopt(socket, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
}
int DeferAccept(int socket) {
int on = 1;
return setsockopt(socket, IPPROTO_TCP, TCP_DEFER_ACCEPT, &on, sizeof(on));
}
std::string DumpBinary(const char* buffer, size_t length) {
std::string result = "[";
for (int i = 0; i < length; ++i) {
base::StringAppendF(&result, "%02x,",
static_cast<unsigned char>(buffer[i]));
}
if (length)
result.erase(result.length() - 1);
return result + "]";
}
} // namespace tools

View File

@ -1,25 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef TOOLS_ANDROID_COMMON_NET_H_
#define TOOLS_ANDROID_COMMON_NET_H_
#include <string>
namespace tools {
// DisableNagle can improve TCP transmission performance. Both Chrome net stack
// and adb tool use it.
int DisableNagle(int socket);
// Wake up listener only when data arrive.
int DeferAccept(int socket);
// Dumps a binary buffer into a string in a human-readable format.
std::string DumpBinary(const char* buffer, size_t length);
} // namespace tools
#endif // TOOLS_ANDROID_COMMON_NET_H_

View File

@ -1,108 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Dumps CPU and IO stats to a file at a regular interval.
//
// Output may be post processed by host to get top/iotop style information.
#include <signal.h>
#include <unistd.h>
#include <fstream>
#include <string>
#include <vector>
#include "base/command_line.h"
#include "base/file_util.h"
#include "base/strings/string_split.h"
namespace {
const char kIOStatsPath[] = "/proc/diskstats";
const char kCPUStatsPath[] = "/proc/stat";
class DeviceStatsMonitor {
public:
explicit DeviceStatsMonitor(const std::string& out_path)
: out_path_(out_path),
record_(true) {
CHECK(!out_path_.empty());
samples_.reserve(1024 * 1024);
}
// Records stats continuously at |hz| cycles per second util
// StopRecordingAndDumpStats() is called.
//
// Yes, this buffers everything in memory, so it cannot be used for extended
// durations without OOM. But that beats writing during the trace which
// would affect the results.
void Start(int hz) {
const int sample_interval = 1000000 / hz;
const base::FilePath io_stats_path(kIOStatsPath);
const base::FilePath cpu_stats_path(kCPUStatsPath);
std::string out;
while (record_) {
out.clear();
CHECK(file_util::ReadFileToString(io_stats_path, &out));
CHECK(file_util::ReadFileToString(cpu_stats_path, &out));
samples_.push_back(out);
usleep(sample_interval);
}
}
// Stops recording and saves samples to file.
void StopAndDumpStats() {
record_ = false;
usleep(250 * 1000);
std::ofstream out_stream;
out_stream.open(out_path_.value().c_str(), std::ios::out);
for (std::vector<std::string>::const_iterator i = samples_.begin();
i != samples_.end(); ++i) {
out_stream << i->c_str() << std::endl;
}
out_stream.close();
}
private:
const base::FilePath out_path_;
std::vector<std::string> samples_;
bool record_;
DISALLOW_COPY_AND_ASSIGN(DeviceStatsMonitor);
};
DeviceStatsMonitor* g_device_stats_monitor = NULL;
void SigTermHandler(int unused) {
printf("Stopping device stats monitor\n");
g_device_stats_monitor->StopAndDumpStats();
}
} // namespace
int main(int argc, char** argv) {
const int kDefaultHz = 20;
CommandLine command_line(argc, argv);
CommandLine::StringVector args = command_line.GetArgs();
if (command_line.HasSwitch("h") || command_line.HasSwitch("help") ||
args.size() != 1) {
printf("Usage: %s OUTPUT_FILE\n"
" --hz=HZ Number of samples/second. default=%d\n",
argv[0], kDefaultHz);
return 1;
}
int hz = command_line.HasSwitch("hz") ?
atoi(command_line.GetSwitchValueNative("hz").c_str()) :
kDefaultHz;
printf("Starting device stats monitor\n");
g_device_stats_monitor = new DeviceStatsMonitor(args[0]);
signal(SIGTERM, SigTermHandler);
g_device_stats_monitor->Start(hz);
delete g_device_stats_monitor;
return 0;
}

View File

@ -1,41 +0,0 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'device_stats_monitor',
'type': 'none',
'dependencies': [
'device_stats_monitor_symbols',
],
'actions': [
{
'action_name': 'strip_device_stats_monitor',
'inputs': ['<(PRODUCT_DIR)/device_stats_monitor_symbols'],
'outputs': ['<(PRODUCT_DIR)/device_stats_monitor'],
'action': [
'<(android_strip)',
'--strip-unneeded',
'<@(_inputs)',
'-o',
'<@(_outputs)',
],
},
],
}, {
'target_name': 'device_stats_monitor_symbols',
'type': 'executable',
'dependencies': [
'../../../base/base.gyp:base',
],
'include_dirs': [
'../../..',
],
'sources': [
'device_stats_monitor.cc',
],
},
],
}

View File

@ -1,3 +0,0 @@
include_rules = [
"+net",
]

View File

@ -1,238 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <arpa/inet.h>
#include <errno.h>
#include <netinet/in.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <unistd.h>
#include <string>
#include "base/basictypes.h"
#include "base/command_line.h"
#include "base/logging.h"
#include "base/posix/eintr_wrapper.h"
#include "base/safe_strerror_posix.h"
#include "net/base/big_endian.h"
#include "net/base/net_util.h"
#include "net/dns/dns_protocol.h"
#include "tools/android/common/daemon.h"
#include "tools/android/common/net.h"
namespace {
// Mininum request size: 1 question containing 1 QNAME, 1 TYPE and 1 CLASS.
const size_t kMinRequestSize = sizeof(net::dns_protocol::Header) + 6;
// The name reference in the answer pointing to the name in the query.
// Its format is: highest two bits set to 1, then the offset of the name
// which just follows the header.
const uint16 kPointerToQueryName =
static_cast<uint16>(0xc000 | sizeof(net::dns_protocol::Header));
const uint32 kTTL = 86400; // One day.
void PError(const char* msg) {
int current_errno = errno;
LOG(ERROR) << "ERROR: " << msg << ": " << safe_strerror(current_errno);
}
void SendTo(int sockfd, const void* buf, size_t len, int flags,
const sockaddr* dest_addr, socklen_t addrlen) {
if (HANDLE_EINTR(sendto(sockfd, buf, len, flags, dest_addr, addrlen)) == -1)
PError("sendto()");
}
void CloseFileDescriptor(int fd) {
int old_errno = errno;
(void) HANDLE_EINTR(close(fd));
errno = old_errno;
}
void SendRefusedResponse(int sock, const sockaddr_in& client_addr, uint16 id) {
net::dns_protocol::Header response;
response.id = htons(id);
response.flags = htons(net::dns_protocol::kFlagResponse |
net::dns_protocol::kFlagAA |
net::dns_protocol::kFlagRD |
net::dns_protocol::kFlagRA |
net::dns_protocol::kRcodeREFUSED);
response.qdcount = 0;
response.ancount = 0;
response.nscount = 0;
response.arcount = 0;
SendTo(sock, &response, sizeof(response), 0,
reinterpret_cast<const sockaddr*>(&client_addr), sizeof(client_addr));
}
void SendResponse(int sock, const sockaddr_in& client_addr, uint16 id,
uint16 qtype, const char* question, size_t question_length) {
net::dns_protocol::Header header;
header.id = htons(id);
header.flags = htons(net::dns_protocol::kFlagResponse |
net::dns_protocol::kFlagAA |
net::dns_protocol::kFlagRD |
net::dns_protocol::kFlagRA |
net::dns_protocol::kRcodeNOERROR);
header.qdcount = htons(1);
header.ancount = htons(1);
header.nscount = 0;
header.arcount = 0;
// Size of RDATA which is a IPv4 or IPv6 address.
size_t rdata_size = qtype == net::dns_protocol::kTypeA ?
net::kIPv4AddressSize : net::kIPv6AddressSize;
// Size of the whole response which contains the header, the question and
// the answer. 12 is the sum of sizes of the compressed name reference, TYPE,
// CLASS, TTL and RDLENGTH.
size_t response_size = sizeof(header) + question_length + 12 + rdata_size;
if (response_size > net::dns_protocol::kMaxUDPSize) {
LOG(ERROR) << "Response is too large: " << response_size;
SendRefusedResponse(sock, client_addr, id);
return;
}
char response[net::dns_protocol::kMaxUDPSize];
net::BigEndianWriter writer(response, arraysize(response));
writer.WriteBytes(&header, sizeof(header));
// Repeat the question in the response. Some clients (e.g. ping) needs this.
writer.WriteBytes(question, question_length);
// Construct the answer.
writer.WriteU16(kPointerToQueryName);
writer.WriteU16(qtype);
writer.WriteU16(net::dns_protocol::kClassIN);
writer.WriteU32(kTTL);
writer.WriteU16(rdata_size);
if (qtype == net::dns_protocol::kTypeA)
writer.WriteU32(INADDR_LOOPBACK);
else
writer.WriteBytes(&in6addr_loopback, sizeof(in6_addr));
DCHECK(writer.ptr() - response == response_size);
SendTo(sock, response, response_size, 0,
reinterpret_cast<const sockaddr*>(&client_addr), sizeof(client_addr));
}
void HandleRequest(int sock, const char* request, size_t size,
const sockaddr_in& client_addr) {
if (size < kMinRequestSize) {
LOG(ERROR) << "Request is too small " << size
<< "\n" << tools::DumpBinary(request, size);
return;
}
net::BigEndianReader reader(request, size);
net::dns_protocol::Header header;
reader.ReadBytes(&header, sizeof(header));
uint16 id = ntohs(header.id);
uint16 flags = ntohs(header.flags);
uint16 qdcount = ntohs(header.qdcount);
uint16 ancount = ntohs(header.ancount);
uint16 nscount = ntohs(header.nscount);
uint16 arcount = ntohs(header.arcount);
const uint16 kAllowedFlags = 0x07ff;
if ((flags & ~kAllowedFlags) ||
qdcount != 1 || ancount || nscount || arcount) {
LOG(ERROR) << "Unsupported request: FLAGS=" << flags
<< " QDCOUNT=" << qdcount
<< " ANCOUNT=" << ancount
<< " NSCOUNT=" << nscount
<< " ARCOUNT=" << arcount
<< "\n" << tools::DumpBinary(request, size);
SendRefusedResponse(sock, client_addr, id);
return;
}
// request[size - 5] should be the end of the QNAME (a zero byte).
// We don't care about the validity of QNAME because we don't parse it.
const char* qname_end = &request[size - 5];
if (*qname_end) {
LOG(ERROR) << "Error parsing QNAME\n" << tools::DumpBinary(request, size);
SendRefusedResponse(sock, client_addr, id);
return;
}
reader.Skip(qname_end - reader.ptr() + 1);
uint16 qtype;
uint16 qclass;
reader.ReadU16(&qtype);
reader.ReadU16(&qclass);
if ((qtype != net::dns_protocol::kTypeA &&
qtype != net::dns_protocol::kTypeAAAA) ||
qclass != net::dns_protocol::kClassIN) {
LOG(ERROR) << "Unsupported query: QTYPE=" << qtype << " QCLASS=" << qclass
<< "\n" << tools::DumpBinary(request, size);
SendRefusedResponse(sock, client_addr, id);
return;
}
SendResponse(sock, client_addr, id, qtype,
request + sizeof(header), size - sizeof(header));
}
} // namespace
int main(int argc, char** argv) {
printf("Fake DNS server\n");
CommandLine command_line(argc, argv);
if (tools::HasHelpSwitch(command_line) || command_line.GetArgs().size()) {
tools::ShowHelp(argv[0], "", "");
return 0;
}
int sock = socket(AF_INET, SOCK_DGRAM, 0);
if (sock < 0) {
PError("create socket");
return 1;
}
sockaddr_in addr;
memset(&addr, 0, sizeof(addr));
addr.sin_family = AF_INET;
addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
addr.sin_port = htons(53);
int reuse_addr = 1;
if (HANDLE_EINTR(bind(sock, reinterpret_cast<sockaddr*>(&addr),
sizeof(addr))) < 0) {
PError("server bind");
CloseFileDescriptor(sock);
return 1;
}
if (!tools::HasNoSpawnDaemonSwitch(command_line))
tools::SpawnDaemon(0);
while (true) {
sockaddr_in client_addr;
socklen_t client_addr_len = sizeof(client_addr);
char request[net::dns_protocol::kMaxUDPSize];
int size = HANDLE_EINTR(recvfrom(sock, request, sizeof(request),
MSG_WAITALL,
reinterpret_cast<sockaddr*>(&client_addr),
&client_addr_len));
if (size < 0) {
// Unrecoverable error, can only exit.
LOG(ERROR) << "Failed to receive a request: " << strerror(errno);
CloseFileDescriptor(sock);
return 1;
}
if (size > 0)
HandleRequest(sock, request, size, client_addr);
}
}

View File

@ -1,44 +0,0 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'fake_dns',
'type': 'none',
'dependencies': [
'fake_dns_symbols',
],
'actions': [
{
'action_name': 'strip_fake_dns',
'inputs': ['<(PRODUCT_DIR)/fake_dns_symbols'],
'outputs': ['<(PRODUCT_DIR)/fake_dns'],
'action': [
'<(android_strip)',
'--strip-unneeded',
'<@(_inputs)',
'-o',
'<@(_outputs)',
],
},
],
}, {
'target_name': 'fake_dns_symbols',
'type': 'executable',
'dependencies': [
'../../../base/base.gyp:base',
'../../../net/net.gyp:net',
'../common/common.gyp:android_tools_common',
],
'include_dirs': [
'../../..',
],
'sources': [
'fake_dns.cc',
],
},
],
}

View File

@ -1,126 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Lists unused Java strings and other resources."""
import optparse
import re
import subprocess
import sys
def GetApkResources(apk_path):
"""Returns the types and names of resources packaged in an APK.
Args:
apk_path: path to the APK.
Returns:
The resources in the APK as a list of tuples (type, name). Example:
[('drawable', 'arrow'), ('layout', 'month_picker'), ...]
"""
p = subprocess.Popen(
['aapt', 'dump', 'resources', apk_path],
stdout=subprocess.PIPE)
dump_out, _ = p.communicate()
assert p.returncode == 0, 'aapt dump failed'
matches = re.finditer(
r'^\s+spec resource 0x[0-9a-fA-F]+ [\w.]+:(?P<type>\w+)/(?P<name>\w+)',
dump_out, re.MULTILINE)
return [m.group('type', 'name') for m in matches]
def GetUsedResources(source_paths, resource_types):
"""Returns the types and names of resources used in Java or resource files.
Args:
source_paths: a list of files or folders collectively containing all the
Java files, resource files, and the AndroidManifest.xml.
resource_types: a list of resource types to look for. Example:
['string', 'drawable']
Returns:
The resources referenced by the Java and resource files as a list of tuples
(type, name). Example:
[('drawable', 'app_icon'), ('layout', 'month_picker'), ...]
"""
type_regex = '|'.join(map(re.escape, resource_types))
patterns = [r'@(())(%s)/(\w+)' % type_regex,
r'\b((\w+\.)*)R\.(%s)\.(\w+)' % type_regex]
resources = []
for pattern in patterns:
p = subprocess.Popen(
['grep', '-REIhoe', pattern] + source_paths,
stdout=subprocess.PIPE)
grep_out, grep_err = p.communicate()
# Check stderr instead of return code, since return code is 1 when no
# matches are found.
assert not grep_err, 'grep failed'
matches = re.finditer(pattern, grep_out)
for match in matches:
package = match.group(1)
if package == 'android.':
continue
type_ = match.group(3)
name = match.group(4)
resources.append((type_, name))
return resources
def FormatResources(resources):
"""Formats a list of resources for printing.
Args:
resources: a list of resources, given as (type, name) tuples.
"""
return '\n'.join(['%-12s %s' % (t, n) for t, n in sorted(resources)])
def ParseArgs(args):
usage = 'usage: %prog [-v] APK_PATH SOURCE_PATH...'
parser = optparse.OptionParser(usage=usage)
parser.add_option('-v', help='Show verbose output', action='store_true')
options, args = parser.parse_args(args=args)
if len(args) < 2:
parser.error('must provide APK_PATH and SOURCE_PATH arguments')
return options.v, args[0], args[1:]
def main(args=None):
verbose, apk_path, source_paths = ParseArgs(args)
apk_resources = GetApkResources(apk_path)
resource_types = list(set([r[0] for r in apk_resources]))
used_resources = GetUsedResources(source_paths, resource_types)
unused_resources = set(apk_resources) - set(used_resources)
undefined_resources = set(used_resources) - set(apk_resources)
# aapt dump fails silently. Notify the user if things look wrong.
if not apk_resources:
print >> sys.stderr, (
'Warning: No resources found in the APK. Did you provide the correct '
'APK path?')
if not used_resources:
print >> sys.stderr, (
'Warning: No resources references from Java or resource files. Did you '
'provide the correct source paths?')
if undefined_resources:
print >> sys.stderr, (
'Warning: found %d "undefined" resources that are referenced by Java '
'files or by other resources, but are not in the APK. Run with -v to '
'see them.' % len(undefined_resources))
if verbose:
print '%d undefined resources:' % len(undefined_resources)
print FormatResources(undefined_resources), '\n'
print '%d resources packaged into the APK:' % len(apk_resources)
print FormatResources(apk_resources), '\n'
print '%d used resources:' % len(used_resources)
print FormatResources(used_resources), '\n'
print '%d unused resources:' % len(unused_resources)
print FormatResources(unused_resources)
if __name__ == '__main__':
main()

View File

@ -1,15 +0,0 @@
This is the FindBugs plugin for chrome on android.
Currently it detects:
- synchronized method
- synchronized 'this'
We don't want the synchronized method and synchronized 'this' to be
used, the exception is the synchronized method defined in Android
API.
The plugin jar file was prebuilt and checked in, to rebuild the
plugin, you need ant, and run below command, the new jar file will
be in lib directory.
ant install

View File

@ -1,48 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Copyright (c) 2012 The Chromium Authors. All rights reserved.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
-->
<project name="findbugs_plugin" basedir=".">
<description>
Build findbugs_plugin for Chromium Android
</description>
<property name="src.dir" location="src" />
<property name="lib.dir" location="../../../third_party/findbugs/lib" />
<property name="bin.dir" location="lib" />
<property name="intermediate.dir" location="intermediate" />
<property name="jar.name" value="chromiumPlugin.jar" />
<path id="classpath.id">
<fileset dir="${lib.dir}">
<include name="**/*.jar" />
</fileset>
</path>
<target name="makedir">
<mkdir dir="${intermediate.dir}" />
<mkdir dir="${bin.dir}" />
</target>
<target name="findbugs_plugin_classes" depends="makedir">
<javac srcdir="${src.dir}" destdir="${intermediate.dir}"
classpathref="classpath.id" includeantruntime="false" />
</target>
<target name="copy_xml_files" depends="makedir">
<copy file="messages.xml" todir="${intermediate.dir}" />
<copy file="findbugs.xml" todir="${intermediate.dir}" />
</target>
<target name="findbugs_plugin_jar" depends="findbugs_plugin_classes, copy_xml_files">
<jar destfile="${bin.dir}/${jar.name}" basedir="${intermediate.dir}">
</jar>
</target>
<target name="install" depends="findbugs_plugin_jar">
<delete dir="${intermediate.dir}" />
</target>
</project>

View File

@ -1,18 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Copyright (c) 2012 The Chromium Authors. All rights reserved.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
-->
<FindbugsPlugin xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:noNamespaceSchemaLocation="findbugsplugin.xsd"
pluginid="SynchronizedThisDetector"
provider="chromium"
website="http://code.google.com/p/chromium/wiki/UseFindBugsForAndroid">
<Detector class="org.chromium.tools.findbugs.plugin.SynchronizedThisDetector" reports="CHROMIUM_SYNCHRONIZED_THIS" />
<BugPattern type="CHROMIUM_SYNCHRONIZED_THIS" abbrev="CST" category="CORRECTNESS"/>
<Detector class="org.chromium.tools.findbugs.plugin.SynchronizedMethodDetector" reports="CHROMIUM_SYNCHRONIZED_METHOD" />
<BugPattern type="CHROMIUM_SYNCHRONIZED_METHOD" abbrev="CSM" category="CORRECTNESS"/>
</FindbugsPlugin>

View File

@ -1,16 +0,0 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'findbugs_plugin_test',
'type': 'none',
'variables': {
'java_in_dir': 'test/java/',
},
'includes': [ '../../../build/java.gypi' ],
}
]
}

View File

@ -1,56 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Copyright (c) 2012 The Chromium Authors. All rights reserved.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
-->
<MessageCollection xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:noNamespaceSchemaLocation="messagecollection.xsd">
<Plugin>
<ShortDescription>Chromium FindBugs Plugin </ShortDescription>
<Details>Adds style checks enforced in the chromium project.</Details>
</Plugin>
<Detector class="org.chromium.tools.findbugs.plugin.SynchronizedThisDetector">
<Details>
<![CDATA[
Shouldn't use synchronized(this).
]]>
</Details>
</Detector>
<BugPattern type="CHROMIUM_SYNCHRONIZED_THIS">
<ShortDescription>Shouldn't use synchronized(this)</ShortDescription>
<LongDescription>Shouldn't use synchronized(this), please narrow down the synchronization scope.</LongDescription>
<Details>
<![CDATA[
<p>Shouldn't use synchronized(this), please narrow down the synchronization scope.</p>
]]>
</Details>
</BugPattern>
<Detector class="org.chromium.tools.findbugs.plugin.SynchronizedMethodDetector">
<Details>
<![CDATA[
Shouldn't use synchronized method.
]]>
</Details>
</Detector>
<BugPattern type="CHROMIUM_SYNCHRONIZED_METHOD">
<ShortDescription>Shouldn't use synchronized method</ShortDescription>
<LongDescription>Shouldn't use synchronized method, please narrow down the synchronization scope.</LongDescription>
<Details>
<![CDATA[
<p>Shouldn't use synchronized method, please narrow down the synchronization scope.</p>
]]>
</Details>
</BugPattern>
<BugCode abbrev="CHROMIUM">CHROMIUM</BugCode>
</MessageCollection>

View File

@ -1,38 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package org.chromium.tools.findbugs.plugin;
import edu.umd.cs.findbugs.BugInstance;
import edu.umd.cs.findbugs.BugReporter;
import edu.umd.cs.findbugs.bcel.OpcodeStackDetector;
import org.apache.bcel.classfile.Code;
/**
* This class detects the synchronized method.
*/
public class SynchronizedMethodDetector extends OpcodeStackDetector {
private BugReporter mBugReporter;
public SynchronizedMethodDetector(BugReporter bugReporter) {
this.mBugReporter = bugReporter;
}
@Override
public void visit(Code code) {
if (getMethod().isSynchronized()) {
mBugReporter.reportBug(new BugInstance(this, "CHROMIUM_SYNCHRONIZED_METHOD",
NORMAL_PRIORITY)
.addClassAndMethod(this)
.addSourceLine(this));
}
super.visit(code);
}
@Override
public void sawOpcode(int arg0) {
}
}

View File

@ -1,73 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package org.chromium.tools.findbugs.plugin;
import org.apache.bcel.classfile.Code;
import edu.umd.cs.findbugs.BugInstance;
import edu.umd.cs.findbugs.BugReporter;
import edu.umd.cs.findbugs.bcel.OpcodeStackDetector;
/**
* This class detects the synchronized(this).
*
* The pattern of byte code of synchronized(this) is
* aload_0 # Load the 'this' pointer on top of stack
* dup # Duplicate the 'this' pointer
* astore_x # Store this for late use, it might be astore.
* monitorenter
*/
public class SynchronizedThisDetector extends OpcodeStackDetector {
private final int PATTERN[] = {ALOAD_0, DUP, 0xff, 0xff, MONITORENTER};
private int mStep = 0;
private BugReporter mBugReporter;
public SynchronizedThisDetector(BugReporter bugReporter) {
mBugReporter = bugReporter;
}
@Override
public void visit(Code code) {
mStep = 0;
super.visit(code);
}
@Override
public void sawOpcode(int seen) {
if (PATTERN[mStep] == seen) {
mStep++;
if (mStep == PATTERN.length) {
mBugReporter.reportBug(new BugInstance(this, "CHROMIUM_SYNCHRONIZED_THIS",
NORMAL_PRIORITY)
.addClassAndMethod(this)
.addSourceLine(this));
mStep = 0;
return;
}
} else if (mStep == 2) {
// This could be astore_x
switch (seen) {
case ASTORE_0:
case ASTORE_1:
case ASTORE_2:
case ASTORE_3:
mStep += 2;
break;
case ASTORE:
mStep++;
break;
default:
mStep = 0;
break;
}
} else if (mStep == 3) {
// Could be any byte following the ASTORE.
mStep++;
} else {
mStep = 0;
}
}
}

View File

@ -1,3 +0,0 @@
M C CSM: Shouldn't use synchronized method, please narrow down the synchronization scope. At SimpleSynchronizedMethod.java
M C CSM: Shouldn't use synchronized method, please narrow down the synchronization scope. At SimpleSynchronizedStaticMethod.java
M C CST: Shouldn't use synchronized(this), please narrow down the synchronization scope. At SimpleSynchronizedThis.java

View File

@ -1,16 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package org.chromium.tools.findbugs.plugin;
/**
* This class has synchronized method and is used to test
* SynchronizedMethodDetector.
*/
class SimpleSynchronizedMethod {
private int i = 0;
synchronized void synchronizedMethod() {
i++;
}
}

View File

@ -1,15 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package org.chromium.tools.findbugs.plugin;
/**
* This class is used to test SynchronizedMethodDetector
*/
class SimpleSynchronizedStaticMethod {
private static int i = 0;
synchronized static void synchronizedStaticMethod() {
i++;
}
}

View File

@ -1,19 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package org.chromium.tools.findbugs.plugin;
/**
* This class has synchronized(this) statement and is used to test
* SynchronizedThisDetector.
*/
class SimpleSynchronizedThis {
private int i = 0;
void synchronizedThis() {
synchronized(this) {
i++;
}
}
}

View File

@ -1,44 +0,0 @@
#!/usr/bin/env python
#
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This is used to test the findbugs plugin, it calls
# build/android/pylib/utils/findbugs.py to analyze the classes in
# org.chromium.tools.findbugs.plugin package, and expects to get the same
# issue with those in expected_result.txt.
#
# Useful command line:
# --rebaseline to generate the expected_result.txt, please make sure don't
# remove the expected result of exsting tests.
import optparse
import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', '..', '..', '..',
'build', 'android')))
from pylib import constants
from pylib.utils import findbugs
def main(argv):
parser = findbugs.GetCommonParser()
options, _ = parser.parse_args()
if not options.known_bugs:
options.known_bugs = os.path.join(constants.DIR_SOURCE_ROOT, 'tools',
'android', 'findbugs_plugin', 'test',
'expected_result.txt')
if not options.only_analyze:
options.only_analyze = 'org.chromium.tools.findbugs.plugin.*'
return findbugs.Run(options)
if __name__ == '__main__':
sys.exit(main(sys.argv))

View File

@ -1,426 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <errno.h>
#include <fcntl.h>
#include <netinet/in.h>
#include <netinet/tcp.h>
#include <pthread.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/select.h>
#include <sys/socket.h>
#include <sys/wait.h>
#include <unistd.h>
#include "base/command_line.h"
#include "base/logging.h"
#include "base/posix/eintr_wrapper.h"
#include "tools/android/common/adb_connection.h"
#include "tools/android/common/daemon.h"
#include "tools/android/common/net.h"
namespace {
const pthread_t kInvalidThread = static_cast<pthread_t>(-1);
volatile bool g_killed = false;
void CloseSocket(int fd) {
if (fd >= 0) {
int old_errno = errno;
(void) HANDLE_EINTR(close(fd));
errno = old_errno;
}
}
class Buffer {
public:
Buffer()
: bytes_read_(0),
write_offset_(0) {
}
bool CanRead() {
return bytes_read_ == 0;
}
bool CanWrite() {
return write_offset_ < bytes_read_;
}
int Read(int fd) {
int ret = -1;
if (CanRead()) {
ret = HANDLE_EINTR(read(fd, buffer_, kBufferSize));
if (ret > 0)
bytes_read_ = ret;
}
return ret;
}
int Write(int fd) {
int ret = -1;
if (CanWrite()) {
ret = HANDLE_EINTR(write(fd, buffer_ + write_offset_,
bytes_read_ - write_offset_));
if (ret > 0) {
write_offset_ += ret;
if (write_offset_ == bytes_read_) {
write_offset_ = 0;
bytes_read_ = 0;
}
}
}
return ret;
}
private:
// A big buffer to let our file-over-http bridge work more like real file.
static const int kBufferSize = 1024 * 128;
int bytes_read_;
int write_offset_;
char buffer_[kBufferSize];
DISALLOW_COPY_AND_ASSIGN(Buffer);
};
class Server;
struct ForwarderThreadInfo {
ForwarderThreadInfo(Server* a_server, int a_forwarder_index)
: server(a_server),
forwarder_index(a_forwarder_index) {
}
Server* server;
int forwarder_index;
};
struct ForwarderInfo {
time_t start_time;
int socket1;
time_t socket1_last_byte_time;
size_t socket1_bytes;
int socket2;
time_t socket2_last_byte_time;
size_t socket2_bytes;
};
class Server {
public:
Server()
: thread_(kInvalidThread),
socket_(-1) {
memset(forward_to_, 0, sizeof(forward_to_));
memset(&forwarders_, 0, sizeof(forwarders_));
}
int GetFreeForwarderIndex() {
for (int i = 0; i < kMaxForwarders; i++) {
if (forwarders_[i].start_time == 0)
return i;
}
return -1;
}
void DisposeForwarderInfo(int index) {
forwarders_[index].start_time = 0;
}
ForwarderInfo* GetForwarderInfo(int index) {
return &forwarders_[index];
}
void DumpInformation() {
LOG(INFO) << "Server information: " << forward_to_;
LOG(INFO) << "No.: age up(bytes,idle) down(bytes,idle)";
int count = 0;
time_t now = time(NULL);
for (int i = 0; i < kMaxForwarders; i++) {
const ForwarderInfo& info = forwarders_[i];
if (info.start_time) {
count++;
LOG(INFO) << count << ": " << now - info.start_time << " up("
<< info.socket1_bytes << ","
<< now - info.socket1_last_byte_time << " down("
<< info.socket2_bytes << ","
<< now - info.socket2_last_byte_time << ")";
}
}
}
void Shutdown() {
if (socket_ >= 0)
shutdown(socket_, SHUT_RDWR);
}
bool InitSocket(const char* arg);
void StartThread() {
pthread_create(&thread_, NULL, ServerThread, this);
}
void JoinThread() {
if (thread_ != kInvalidThread)
pthread_join(thread_, NULL);
}
private:
static void* ServerThread(void* arg);
// There are 3 kinds of threads that will access the array:
// 1. Server thread will get a free ForwarderInfo and initialize it;
// 2. Forwarder threads will dispose the ForwarderInfo when it finishes;
// 3. Main thread will iterate and print the forwarders.
// Using an array is not optimal, but can avoid locks or other complex
// inter-thread communication.
static const int kMaxForwarders = 512;
ForwarderInfo forwarders_[kMaxForwarders];
pthread_t thread_;
int socket_;
char forward_to_[40];
DISALLOW_COPY_AND_ASSIGN(Server);
};
// Forwards all outputs from one socket to another socket.
void* ForwarderThread(void* arg) {
ForwarderThreadInfo* thread_info =
reinterpret_cast<ForwarderThreadInfo*>(arg);
Server* server = thread_info->server;
int index = thread_info->forwarder_index;
delete thread_info;
ForwarderInfo* info = server->GetForwarderInfo(index);
int socket1 = info->socket1;
int socket2 = info->socket2;
int nfds = socket1 > socket2 ? socket1 + 1 : socket2 + 1;
fd_set read_fds;
fd_set write_fds;
Buffer buffer1;
Buffer buffer2;
while (!g_killed) {
FD_ZERO(&read_fds);
if (buffer1.CanRead())
FD_SET(socket1, &read_fds);
if (buffer2.CanRead())
FD_SET(socket2, &read_fds);
FD_ZERO(&write_fds);
if (buffer1.CanWrite())
FD_SET(socket2, &write_fds);
if (buffer2.CanWrite())
FD_SET(socket1, &write_fds);
if (HANDLE_EINTR(select(nfds, &read_fds, &write_fds, NULL, NULL)) <= 0) {
LOG(ERROR) << "Select error: " << strerror(errno);
break;
}
int now = time(NULL);
if (FD_ISSET(socket1, &read_fds)) {
info->socket1_last_byte_time = now;
int bytes = buffer1.Read(socket1);
if (bytes <= 0)
break;
info->socket1_bytes += bytes;
}
if (FD_ISSET(socket2, &read_fds)) {
info->socket2_last_byte_time = now;
int bytes = buffer2.Read(socket2);
if (bytes <= 0)
break;
info->socket2_bytes += bytes;
}
if (FD_ISSET(socket1, &write_fds)) {
if (buffer2.Write(socket1) <= 0)
break;
}
if (FD_ISSET(socket2, &write_fds)) {
if (buffer1.Write(socket2) <= 0)
break;
}
}
CloseSocket(socket1);
CloseSocket(socket2);
server->DisposeForwarderInfo(index);
return NULL;
}
// Listens to a server socket. On incoming request, forward it to the host.
// static
void* Server::ServerThread(void* arg) {
Server* server = reinterpret_cast<Server*>(arg);
while (!g_killed) {
int forwarder_index = server->GetFreeForwarderIndex();
if (forwarder_index < 0) {
LOG(ERROR) << "Too many forwarders";
continue;
}
struct sockaddr_in addr;
socklen_t addr_len = sizeof(addr);
int socket = HANDLE_EINTR(accept(server->socket_,
reinterpret_cast<sockaddr*>(&addr),
&addr_len));
if (socket < 0) {
LOG(ERROR) << "Failed to accept: " << strerror(errno);
break;
}
tools::DisableNagle(socket);
int host_socket = tools::ConnectAdbHostSocket(server->forward_to_);
if (host_socket >= 0) {
// Set NONBLOCK flag because we use select().
fcntl(socket, F_SETFL, fcntl(socket, F_GETFL) | O_NONBLOCK);
fcntl(host_socket, F_SETFL, fcntl(host_socket, F_GETFL) | O_NONBLOCK);
ForwarderInfo* forwarder_info = server->GetForwarderInfo(forwarder_index);
time_t now = time(NULL);
forwarder_info->start_time = now;
forwarder_info->socket1 = socket;
forwarder_info->socket1_last_byte_time = now;
forwarder_info->socket1_bytes = 0;
forwarder_info->socket2 = host_socket;
forwarder_info->socket2_last_byte_time = now;
forwarder_info->socket2_bytes = 0;
pthread_t thread;
pthread_create(&thread, NULL, ForwarderThread,
new ForwarderThreadInfo(server, forwarder_index));
} else {
// Close the unused client socket which is failed to connect to host.
CloseSocket(socket);
}
}
CloseSocket(server->socket_);
server->socket_ = -1;
return NULL;
}
// Format of arg: <Device port>[:<Forward to port>:<Forward to address>]
bool Server::InitSocket(const char* arg) {
char* endptr;
int local_port = static_cast<int>(strtol(arg, &endptr, 10));
if (local_port < 0)
return false;
if (*endptr != ':') {
snprintf(forward_to_, sizeof(forward_to_), "%d:127.0.0.1", local_port);
} else {
strncpy(forward_to_, endptr + 1, sizeof(forward_to_) - 1);
}
socket_ = socket(AF_INET, SOCK_STREAM, 0);
if (socket_ < 0) {
perror("server socket");
return false;
}
tools::DisableNagle(socket_);
sockaddr_in addr;
memset(&addr, 0, sizeof(addr));
addr.sin_family = AF_INET;
addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
addr.sin_port = htons(local_port);
int reuse_addr = 1;
setsockopt(socket_, SOL_SOCKET, SO_REUSEADDR,
&reuse_addr, sizeof(reuse_addr));
tools::DeferAccept(socket_);
if (HANDLE_EINTR(bind(socket_, reinterpret_cast<sockaddr*>(&addr),
sizeof(addr))) < 0 ||
HANDLE_EINTR(listen(socket_, 5)) < 0) {
perror("server bind");
CloseSocket(socket_);
socket_ = -1;
return false;
}
if (local_port == 0) {
socklen_t addrlen = sizeof(addr);
if (getsockname(socket_, reinterpret_cast<sockaddr*>(&addr), &addrlen)
!= 0) {
perror("get listen address");
CloseSocket(socket_);
socket_ = -1;
return false;
}
local_port = ntohs(addr.sin_port);
}
printf("Forwarding device port %d to host %s\n", local_port, forward_to_);
return true;
}
int g_server_count = 0;
Server* g_servers = NULL;
void KillHandler(int unused) {
g_killed = true;
for (int i = 0; i < g_server_count; i++)
g_servers[i].Shutdown();
}
void DumpInformation(int unused) {
for (int i = 0; i < g_server_count; i++)
g_servers[i].DumpInformation();
}
} // namespace
int main(int argc, char** argv) {
printf("Android device to host TCP forwarder\n");
printf("Like 'adb forward' but in the reverse direction\n");
CommandLine command_line(argc, argv);
CommandLine::StringVector server_args = command_line.GetArgs();
if (tools::HasHelpSwitch(command_line) || server_args.empty()) {
tools::ShowHelp(
argv[0],
"<Device port>[:<Forward to port>:<Forward to address>] ...",
" <Forward to port> default is <Device port>\n"
" <Forward to address> default is 127.0.0.1\n"
"If <Device port> is 0, a port will by dynamically allocated.\n");
return 0;
}
g_servers = new Server[server_args.size()];
g_server_count = 0;
int failed_count = 0;
for (size_t i = 0; i < server_args.size(); i++) {
if (!g_servers[g_server_count].InitSocket(server_args[i].c_str())) {
printf("Couldn't start forwarder server for port spec: %s\n",
server_args[i].c_str());
++failed_count;
} else {
++g_server_count;
}
}
if (g_server_count == 0) {
printf("No forwarder servers could be started. Exiting.\n");
delete [] g_servers;
return failed_count;
}
if (!tools::HasNoSpawnDaemonSwitch(command_line))
tools::SpawnDaemon(failed_count);
signal(SIGTERM, KillHandler);
signal(SIGUSR2, DumpInformation);
for (int i = 0; i < g_server_count; i++)
g_servers[i].StartThread();
for (int i = 0; i < g_server_count; i++)
g_servers[i].JoinThread();
g_server_count = 0;
delete [] g_servers;
return 0;
}

View File

@ -1,43 +0,0 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'forwarder',
'type': 'none',
'dependencies': [
'forwarder_symbols',
],
'actions': [
{
'action_name': 'strip_forwarder',
'inputs': ['<(PRODUCT_DIR)/forwarder_symbols'],
'outputs': ['<(PRODUCT_DIR)/forwarder'],
'action': [
'<(android_strip)',
'--strip-unneeded',
'<@(_inputs)',
'-o',
'<@(_outputs)',
],
},
],
}, {
'target_name': 'forwarder_symbols',
'type': 'executable',
'dependencies': [
'../../../base/base.gyp:base',
'../common/common.gyp:android_tools_common',
],
'include_dirs': [
'../../..',
],
'sources': [
'forwarder.cc',
],
},
],
}

View File

@ -1,96 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "tools/android/forwarder2/command.h"
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "base/logging.h"
#include "base/safe_strerror_posix.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_piece.h"
#include "tools/android/forwarder2/socket.h"
using base::StringPiece;
namespace {
// Command format:
// <port>:<type>
//
// Where:
// <port> is a 5-chars zero-padded ASCII decimal integer
// matching the target port for the command (e.g.
// '08080' for port 8080)
// <type> is a 3-char zero-padded ASCII decimal integer
// matching a command::Type value (e.g. 002 for
// ACK).
// The column (:) is used as a separator for easier reading.
const int kPortStringSize = 5;
const int kCommandTypeStringSize = 2;
// Command string size also includes the ':' separator char.
const int kCommandStringSize = kPortStringSize + kCommandTypeStringSize + 1;
} // namespace
namespace forwarder2 {
bool ReadCommand(Socket* socket,
int* port_out,
command::Type* command_type_out) {
char command_buffer[kCommandStringSize + 1];
// To make logging easier.
command_buffer[kCommandStringSize] = '\0';
int bytes_read = socket->ReadNumBytes(command_buffer, kCommandStringSize);
if (bytes_read != kCommandStringSize) {
if (bytes_read < 0)
LOG(ERROR) << "Read() error: " << safe_strerror(errno);
else if (!bytes_read)
LOG(ERROR) << "Read() error, endpoint was unexpectedly closed.";
else
LOG(ERROR) << "Read() error, not enough data received from the socket.";
return false;
}
StringPiece port_str(command_buffer, kPortStringSize);
if (!StringToInt(port_str, port_out)) {
LOG(ERROR) << "Could not parse the command port string: "
<< port_str;
return false;
}
StringPiece command_type_str(
&command_buffer[kPortStringSize + 1], kCommandTypeStringSize);
int command_type;
if (!StringToInt(command_type_str, &command_type)) {
LOG(ERROR) << "Could not parse the command type string: "
<< command_type_str;
return false;
}
*command_type_out = static_cast<command::Type>(command_type);
return true;
}
bool SendCommand(command::Type command, int port, Socket* socket) {
char buffer[kCommandStringSize + 1];
int len = snprintf(buffer, sizeof(buffer), "%05d:%02d", port, command);
CHECK_EQ(len, kCommandStringSize);
// Write the full command minus the leading \0 char.
return socket->WriteNumBytes(buffer, len) == len;
}
bool ReceivedCommand(command::Type command, Socket* socket) {
int port;
command::Type received_command;
if (!ReadCommand(socket, &port, &received_command))
return false;
return received_command == command;
}
} // namespace forwarder

View File

@ -1,48 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef TOOLS_ANDROID_FORWARDER2_COMMAND_H_
#define TOOLS_ANDROID_FORWARDER2_COMMAND_H_
#include "base/basictypes.h"
namespace forwarder2 {
class Socket;
namespace command {
enum Type {
ACCEPT_ERROR = 0,
ACCEPT_SUCCESS,
ACK,
ADB_DATA_SOCKET_ERROR,
ADB_DATA_SOCKET_SUCCESS,
BIND_ERROR,
BIND_SUCCESS,
DATA_CONNECTION,
HOST_SERVER_ERROR,
HOST_SERVER_SUCCESS,
KILL_ALL_LISTENERS,
LISTEN,
UNLISTEN,
UNLISTEN_ERROR,
UNLISTEN_SUCCESS,
};
} // namespace command
bool ReadCommand(Socket* socket,
int* port_out,
command::Type* command_type_out);
// Helper function to read the command from the |socket| and return true if the
// |command| is equal to the given command parameter.
bool ReceivedCommand(command::Type command, Socket* socket);
bool SendCommand(command::Type command, int port, Socket* socket);
} // namespace forwarder
#endif // TOOLS_ANDROID_FORWARDER2_COMMAND_H_

View File

@ -1,28 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "tools/android/forwarder2/common.h"
#include <errno.h>
#include <unistd.h>
#include "base/logging.h"
#include "base/posix/eintr_wrapper.h"
#include "base/safe_strerror_posix.h"
namespace forwarder2 {
void PError(const char* msg) {
LOG(ERROR) << msg << ": " << safe_strerror(errno);
}
void CloseFD(int fd) {
const int errno_copy = errno;
if (HANDLE_EINTR(close(fd)) < 0) {
PError("close");
errno = errno_copy;
}
}
} // namespace forwarder2

View File

@ -1,89 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Common helper functions/classes used both in the host and device forwarder.
#ifndef TOOLS_ANDROID_FORWARDER2_COMMON_H_
#define TOOLS_ANDROID_FORWARDER2_COMMON_H_
#include <stdarg.h>
#include <stdio.h>
#include <errno.h>
#include "base/basictypes.h"
#include "base/compiler_specific.h"
#include "base/logging.h"
#include "base/posix/eintr_wrapper.h"
// Preserving errno for Close() is important because the function is very often
// used in cleanup code, after an error occurred, and it is very easy to pass an
// invalid file descriptor to close() in this context, or more rarely, a
// spurious signal might make close() return -1 + setting errno to EINTR,
// masking the real reason for the original error. This leads to very unpleasant
// debugging sessions.
#define PRESERVE_ERRNO_HANDLE_EINTR(Func) \
do { \
int local_errno = errno; \
(void) HANDLE_EINTR(Func); \
errno = local_errno; \
} while (false);
// Wrapper around RAW_LOG() which is signal-safe. The only purpose of this macro
// is to avoid documenting uses of RawLog().
#define SIGNAL_SAFE_LOG(Level, Msg) \
RAW_LOG(Level, Msg);
namespace forwarder2 {
// Note that the two following functions are not signal-safe.
// Chromium logging-aware implementation of libc's perror().
void PError(const char* msg);
// Closes the provided file descriptor and logs an error if it failed.
void CloseFD(int fd);
// Helps build a formatted C-string allocated in a fixed-size array. This is
// useful in signal handlers where base::StringPrintf() can't be used safely
// (due to its use of LOG()).
template <int BufferSize>
class FixedSizeStringBuilder {
public:
FixedSizeStringBuilder() {
Reset();
}
const char* buffer() const { return buffer_; }
void Reset() {
buffer_[0] = 0;
write_ptr_ = buffer_;
}
// Returns the number of bytes appended to the underlying buffer or -1 if it
// failed.
int Append(const char* format, ...) PRINTF_FORMAT(/* + 1 for 'this' */ 2, 3) {
if (write_ptr_ >= buffer_ + BufferSize)
return -1;
va_list ap;
va_start(ap, format);
const int bytes_written = vsnprintf(
write_ptr_, BufferSize - (write_ptr_ - buffer_), format, ap);
va_end(ap);
if (bytes_written > 0)
write_ptr_ += bytes_written;
return bytes_written;
}
private:
char* write_ptr_;
char buffer_[BufferSize];
COMPILE_ASSERT(BufferSize >= 1, Size_of_buffer_must_be_at_least_one);
DISALLOW_COPY_AND_ASSIGN(FixedSizeStringBuilder);
};
} // namespace forwarder2
#endif // TOOLS_ANDROID_FORWARDER2_COMMON_H_

View File

@ -1,288 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "tools/android/forwarder2/daemon.h"
#include <errno.h>
#include <fcntl.h>
#include <signal.h>
#include <sys/file.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
#include <cstdlib>
#include <cstring>
#include <string>
#include "base/basictypes.h"
#include "base/file_util.h"
#include "base/files/file_path.h"
#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
#include "base/posix/eintr_wrapper.h"
#include "base/safe_strerror_posix.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/stringprintf.h"
#include "tools/android/forwarder2/common.h"
#include "tools/android/forwarder2/socket.h"
namespace forwarder2 {
namespace {
const int kBufferSize = 256;
// Timeout constant used for polling when connecting to the daemon's Unix Domain
// Socket and also when waiting for its death when it is killed.
const int kNumTries = 100;
const int kIdleTimeMSec = 20;
void InitLoggingForDaemon(const std::string& log_file) {
logging::LoggingSettings settings;
settings.logging_dest =
log_file.empty() ?
logging::LOG_TO_SYSTEM_DEBUG_LOG : logging::LOG_TO_FILE;
settings.log_file = log_file.c_str();
settings.lock_log = logging::DONT_LOCK_LOG_FILE;
settings.dcheck_state =
logging::ENABLE_DCHECK_FOR_NON_OFFICIAL_RELEASE_BUILDS;
CHECK(logging::InitLogging(settings));
}
bool RunServerAcceptLoop(const std::string& welcome_message,
Socket* server_socket,
Daemon::ServerDelegate* server_delegate) {
bool failed = false;
for (;;) {
scoped_ptr<Socket> client_socket(new Socket());
if (!server_socket->Accept(client_socket.get())) {
if (server_socket->DidReceiveEvent())
break;
PError("Accept()");
failed = true;
break;
}
if (!client_socket->Write(welcome_message.c_str(),
welcome_message.length() + 1)) {
PError("Write()");
failed = true;
continue;
}
server_delegate->OnClientConnected(client_socket.Pass());
}
return !failed;
}
void SigChildHandler(int signal_number) {
DCHECK_EQ(signal_number, SIGCHLD);
int status;
pid_t child_pid = waitpid(-1 /* any child */, &status, WNOHANG);
if (child_pid < 0) {
PError("waitpid");
return;
}
if (child_pid == 0)
return;
if (WIFEXITED(status) && WEXITSTATUS(status) == 0)
return;
// Avoid using StringAppendF() since it's unsafe in a signal handler due to
// its use of LOG().
FixedSizeStringBuilder<256> string_builder;
string_builder.Append("Daemon (pid=%d) died unexpectedly with ", child_pid);
if (WIFEXITED(status))
string_builder.Append("status %d.", WEXITSTATUS(status));
else if (WIFSIGNALED(status))
string_builder.Append("signal %d.", WTERMSIG(status));
else
string_builder.Append("unknown reason.");
SIGNAL_SAFE_LOG(ERROR, string_builder.buffer());
}
// Note that 0 is written to |lock_owner_pid| in case the file is not locked.
bool GetFileLockOwnerPid(int fd, pid_t* lock_owner_pid) {
struct flock lock_info = {};
lock_info.l_type = F_WRLCK;
lock_info.l_whence = SEEK_CUR;
const int ret = HANDLE_EINTR(fcntl(fd, F_GETLK, &lock_info));
if (ret < 0) {
if (errno == EBADF) {
// Assume that the provided file descriptor corresponding to the PID file
// was valid until the daemon removed this file.
*lock_owner_pid = 0;
return true;
}
PError("fcntl");
return false;
}
if (lock_info.l_type == F_UNLCK) {
*lock_owner_pid = 0;
return true;
}
CHECK_EQ(F_WRLCK /* exclusive lock */, lock_info.l_type);
*lock_owner_pid = lock_info.l_pid;
return true;
}
scoped_ptr<Socket> ConnectToUnixDomainSocket(
const std::string& socket_name,
int tries_count,
int idle_time_msec,
const std::string& expected_welcome_message) {
for (int i = 0; i < tries_count; ++i) {
scoped_ptr<Socket> socket(new Socket());
if (!socket->ConnectUnix(socket_name)) {
if (idle_time_msec)
usleep(idle_time_msec * 1000);
continue;
}
char buf[kBufferSize];
DCHECK(expected_welcome_message.length() + 1 <= sizeof(buf));
memset(buf, 0, sizeof(buf));
if (socket->Read(buf, expected_welcome_message.length() + 1) < 0) {
perror("read");
continue;
}
if (expected_welcome_message != buf) {
LOG(ERROR) << "Unexpected message read from daemon: " << buf;
break;
}
return socket.Pass();
}
return scoped_ptr<Socket>();
}
} // namespace
Daemon::Daemon(const std::string& log_file_path,
const std::string& identifier,
ClientDelegate* client_delegate,
ServerDelegate* server_delegate,
GetExitNotifierFDCallback get_exit_fd_callback)
: log_file_path_(log_file_path),
identifier_(identifier),
client_delegate_(client_delegate),
server_delegate_(server_delegate),
get_exit_fd_callback_(get_exit_fd_callback) {
DCHECK(client_delegate_);
DCHECK(server_delegate_);
DCHECK(get_exit_fd_callback_);
}
Daemon::~Daemon() {}
bool Daemon::SpawnIfNeeded() {
const int kSingleTry = 1;
const int kNoIdleTime = 0;
scoped_ptr<Socket> client_socket = ConnectToUnixDomainSocket(
identifier_, kSingleTry, kNoIdleTime, identifier_);
if (!client_socket) {
switch (fork()) {
case -1:
PError("fork()");
return false;
// Child.
case 0: {
if (setsid() < 0) { // Detach the child process from its parent.
PError("setsid()");
exit(1);
}
InitLoggingForDaemon(log_file_path_);
CloseFD(STDIN_FILENO);
CloseFD(STDOUT_FILENO);
CloseFD(STDERR_FILENO);
const int null_fd = open("/dev/null", O_RDWR);
CHECK_EQ(null_fd, STDIN_FILENO);
CHECK_EQ(dup(null_fd), STDOUT_FILENO);
CHECK_EQ(dup(null_fd), STDERR_FILENO);
Socket command_socket;
if (!command_socket.BindUnix(identifier_)) {
scoped_ptr<Socket> client_socket = ConnectToUnixDomainSocket(
identifier_, kSingleTry, kNoIdleTime, identifier_);
if (client_socket.get()) {
// The daemon was spawned by a concurrent process.
exit(0);
}
PError("bind()");
exit(1);
}
server_delegate_->Init();
command_socket.AddEventFd(get_exit_fd_callback_());
return RunServerAcceptLoop(
identifier_, &command_socket, server_delegate_);
}
default:
break;
}
}
// Parent.
// Install the custom SIGCHLD handler.
sigset_t blocked_signals_set;
if (sigprocmask(0 /* first arg ignored */, NULL, &blocked_signals_set) < 0) {
PError("sigprocmask()");
return false;
}
struct sigaction old_action;
struct sigaction new_action;
memset(&new_action, 0, sizeof(new_action));
new_action.sa_handler = SigChildHandler;
new_action.sa_flags = SA_NOCLDSTOP;
sigemptyset(&new_action.sa_mask);
if (sigaction(SIGCHLD, &new_action, &old_action) < 0) {
PError("sigaction()");
return false;
}
// Connect to the daemon's Unix Domain Socket.
bool failed = false;
if (!client_socket) {
client_socket = ConnectToUnixDomainSocket(
identifier_, kNumTries, kIdleTimeMSec, identifier_);
if (!client_socket) {
LOG(ERROR) << "Could not connect to daemon's Unix Daemon socket";
failed = true;
}
}
if (!failed)
client_delegate_->OnDaemonReady(client_socket.get());
// Restore the previous signal action for SIGCHLD.
if (sigaction(SIGCHLD, &old_action, NULL) < 0) {
PError("sigaction");
failed = true;
}
return !failed;
}
bool Daemon::Kill() {
pid_t daemon_pid = Socket::GetUnixDomainSocketProcessOwner(identifier_);
if (daemon_pid < 0)
return true; // No daemon running.
if (kill(daemon_pid, SIGTERM) < 0) {
if (errno == ESRCH /* invalid PID */)
// The daemon exited for some reason (e.g. kill by a process other than
// us) right before the call to kill() above.
return true;
PError("kill");
return false;
}
for (int i = 0; i < kNumTries; ++i) {
const pid_t previous_pid = daemon_pid;
daemon_pid = Socket::GetUnixDomainSocketProcessOwner(identifier_);
if (daemon_pid < 0)
return true;
// Since we are polling we might not see the 'daemon exited' event if
// another daemon was spawned during our idle period.
if (daemon_pid != previous_pid) {
LOG(WARNING) << "Daemon (pid=" << previous_pid
<< ") was successfully killed but a new daemon (pid="
<< daemon_pid << ") seems to be running now.";
return true;
}
usleep(kIdleTimeMSec * 1000);
}
LOG(ERROR) << "Timed out while killing daemon. "
"It might still be tearing down.";
return false;
}
} // namespace forwarder2

View File

@ -1,75 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef TOOLS_ANDROID_FORWARDER2_DAEMON_H_
#define TOOLS_ANDROID_FORWARDER2_DAEMON_H_
#include <string>
#include "base/basictypes.h"
#include "base/memory/scoped_ptr.h"
namespace forwarder2 {
class Socket;
// Provides a way to spawn a daemon and communicate with it.
class Daemon {
public:
// Callback used by the daemon to shutdown properly. See pipe_notifier.h for
// more details.
typedef int (*GetExitNotifierFDCallback)();
class ClientDelegate {
public:
virtual ~ClientDelegate() {}
// Called after the daemon is ready to receive commands.
virtual void OnDaemonReady(Socket* daemon_socket) = 0;
};
class ServerDelegate {
public:
virtual ~ServerDelegate() {}
// Called after the daemon bound its Unix Domain Socket. This can be used to
// setup signal handlers or perform global initialization.
virtual void Init() = 0;
virtual void OnClientConnected(scoped_ptr<Socket> client_socket) = 0;
};
// |identifier| should be a unique string identifier. It is used to
// bind/connect the underlying Unix Domain Socket.
// Note that this class does not take ownership of |client_delegate| and
// |server_delegate|.
Daemon(const std::string& log_file_path,
const std::string& identifier,
ClientDelegate* client_delegate,
ServerDelegate* server_delegate,
GetExitNotifierFDCallback get_exit_fd_callback);
~Daemon();
// Returns whether the daemon was successfully spawned. Note that this does
// not necessarily mean that the current process was forked in case the daemon
// is already running.
bool SpawnIfNeeded();
// Kills the daemon and blocks until it exited. Returns whether it succeeded.
bool Kill();
private:
const std::string log_file_path_;
const std::string identifier_;
ClientDelegate* const client_delegate_;
ServerDelegate* const server_delegate_;
const GetExitNotifierFDCallback get_exit_fd_callback_;
DISALLOW_COPY_AND_ASSIGN(Daemon);
};
} // namespace forwarder2
#endif // TOOLS_ANDROID_FORWARDER2_DAEMON_H_

View File

@ -1,154 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "tools/android/forwarder2/device_controller.h"
#include <utility>
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
#include "base/message_loop/message_loop_proxy.h"
#include "base/single_thread_task_runner.h"
#include "tools/android/forwarder2/command.h"
#include "tools/android/forwarder2/device_listener.h"
#include "tools/android/forwarder2/socket.h"
namespace forwarder2 {
// static
scoped_ptr<DeviceController> DeviceController::Create(
const std::string& adb_unix_socket,
int exit_notifier_fd) {
scoped_ptr<DeviceController> device_controller;
scoped_ptr<Socket> host_socket(new Socket());
if (!host_socket->BindUnix(adb_unix_socket)) {
PLOG(ERROR) << "Could not BindAndListen DeviceController socket on port "
<< adb_unix_socket << ": ";
return device_controller.Pass();
}
LOG(INFO) << "Listening on Unix Domain Socket " << adb_unix_socket;
device_controller.reset(
new DeviceController(host_socket.Pass(), exit_notifier_fd));
return device_controller.Pass();
}
DeviceController::~DeviceController() {
DCHECK(construction_task_runner_->RunsTasksOnCurrentThread());
}
void DeviceController::Start() {
AcceptHostCommandSoon();
}
DeviceController::DeviceController(scoped_ptr<Socket> host_socket,
int exit_notifier_fd)
: host_socket_(host_socket.Pass()),
exit_notifier_fd_(exit_notifier_fd),
construction_task_runner_(base::MessageLoopProxy::current()),
weak_ptr_factory_(this) {
host_socket_->AddEventFd(exit_notifier_fd);
}
void DeviceController::AcceptHostCommandSoon() {
base::MessageLoopProxy::current()->PostTask(
FROM_HERE,
base::Bind(&DeviceController::AcceptHostCommandInternal,
base::Unretained(this)));
}
void DeviceController::AcceptHostCommandInternal() {
scoped_ptr<Socket> socket(new Socket);
if (!host_socket_->Accept(socket.get())) {
if (!host_socket_->DidReceiveEvent())
PLOG(ERROR) << "Could not Accept DeviceController socket";
else
LOG(INFO) << "Received exit notification";
return;
}
base::ScopedClosureRunner accept_next_client(
base::Bind(&DeviceController::AcceptHostCommandSoon,
base::Unretained(this)));
// So that |socket| doesn't block on read if it has notifications.
socket->AddEventFd(exit_notifier_fd_);
int port;
command::Type command;
if (!ReadCommand(socket.get(), &port, &command)) {
LOG(ERROR) << "Invalid command received.";
return;
}
const ListenersMap::iterator listener_it = listeners_.find(port);
DeviceListener* const listener = listener_it == listeners_.end()
? static_cast<DeviceListener*>(NULL) : listener_it->second.get();
switch (command) {
case command::LISTEN: {
if (listener != NULL) {
LOG(WARNING) << "Already forwarding port " << port
<< ". Attempting to restart the listener.\n";
// Note that this deletes the listener object.
listeners_.erase(listener_it);
}
scoped_ptr<DeviceListener> new_listener(
DeviceListener::Create(
socket.Pass(), port, base::Bind(&DeviceController::DeleteListener,
weak_ptr_factory_.GetWeakPtr())));
if (!new_listener)
return;
new_listener->Start();
// |port| can be zero, to allow dynamically allocated port, so instead, we
// call DeviceListener::listener_port() to retrieve the currently
// allocated port to this new listener.
const int listener_port = new_listener->listener_port();
listeners_.insert(
std::make_pair(listener_port,
linked_ptr<DeviceListener>(new_listener.release())));
LOG(INFO) << "Forwarding device port " << listener_port << " to host.";
break;
}
case command::DATA_CONNECTION:
if (listener == NULL) {
LOG(ERROR) << "Data Connection command received, but "
<< "listener has not been set up yet for port " << port;
// After this point it is assumed that, once we close our Adb Data
// socket, the Adb forwarder command will propagate the closing of
// sockets all the way to the host side.
break;
}
listener->SetAdbDataSocket(socket.Pass());
break;
case command::UNLISTEN:
if (!listener) {
SendCommand(command::UNLISTEN_ERROR, port, socket.get());
break;
}
listeners_.erase(listener_it);
SendCommand(command::UNLISTEN_SUCCESS, port, socket.get());
break;
default:
// TODO(felipeg): add a KillAllListeners command.
LOG(ERROR) << "Invalid command received. Port: " << port
<< " Command: " << command;
}
}
// static
void DeviceController::DeleteListener(
const base::WeakPtr<DeviceController>& device_controller_ptr,
int listener_port) {
DeviceController* const controller = device_controller_ptr.get();
if (!controller)
return;
DCHECK(controller->construction_task_runner_->RunsTasksOnCurrentThread());
const ListenersMap::iterator listener_it = controller->listeners_.find(
listener_port);
if (listener_it == controller->listeners_.end())
return;
const linked_ptr<DeviceListener> listener = listener_it->second;
// Note that the listener is removed from the map before it gets destroyed in
// case its destructor would access the map.
controller->listeners_.erase(listener_it);
}
} // namespace forwarder

View File

@ -1,66 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef TOOLS_ANDROID_FORWARDER2_DEVICE_CONTROLLER_H_
#define TOOLS_ANDROID_FORWARDER2_DEVICE_CONTROLLER_H_
#include <string>
#include "base/basictypes.h"
#include "base/containers/hash_tables.h"
#include "base/memory/linked_ptr.h"
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
#include "base/memory/weak_ptr.h"
#include "tools/android/forwarder2/socket.h"
namespace base {
class SingleThreadTaskRunner;
} // namespace base
namespace forwarder2 {
class DeviceListener;
// There is a single DeviceController per device_forwarder process, and it is in
// charge of managing all active redirections on the device side (one
// DeviceListener each).
class DeviceController {
public:
static scoped_ptr<DeviceController> Create(const std::string& adb_unix_socket,
int exit_notifier_fd);
~DeviceController();
void Start();
private:
typedef base::hash_map<
int /* port */, linked_ptr<DeviceListener> > ListenersMap;
DeviceController(scoped_ptr<Socket> host_socket, int exit_notifier_fd);
void AcceptHostCommandSoon();
void AcceptHostCommandInternal();
// Note that this can end up being called after the DeviceController is
// destroyed which is why a weak pointer is used.
static void DeleteListener(
const base::WeakPtr<DeviceController>& device_controller_ptr,
int listener_port);
const scoped_ptr<Socket> host_socket_;
// Used to notify the controller to exit.
const int exit_notifier_fd_;
// Lets ensure DeviceListener instances are deleted on the thread they were
// created on.
const scoped_refptr<base::SingleThreadTaskRunner> construction_task_runner_;
base::WeakPtrFactory<DeviceController> weak_ptr_factory_;
ListenersMap listeners_;
DISALLOW_COPY_AND_ASSIGN(DeviceController);
};
} // namespace forwarder
#endif // TOOLS_ANDROID_FORWARDER2_DEVICE_CONTROLLER_H_

View File

@ -1,169 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <signal.h>
#include <stdlib.h>
#include <iostream>
#include <string>
#include "base/at_exit.h"
#include "base/bind.h"
#include "base/command_line.h"
#include "base/compiler_specific.h"
#include "base/logging.h"
#include "base/strings/string_piece.h"
#include "base/strings/stringprintf.h"
#include "base/threading/thread.h"
#include "tools/android/forwarder2/common.h"
#include "tools/android/forwarder2/daemon.h"
#include "tools/android/forwarder2/device_controller.h"
#include "tools/android/forwarder2/pipe_notifier.h"
namespace forwarder2 {
namespace {
// Leaky global instance, accessed from the signal handler.
forwarder2::PipeNotifier* g_notifier = NULL;
const int kBufSize = 256;
const char kUnixDomainSocketPath[] = "chrome_device_forwarder";
const char kDaemonIdentifier[] = "chrome_device_forwarder_daemon";
void KillHandler(int /* unused */) {
CHECK(g_notifier);
if (!g_notifier->Notify())
exit(1);
}
// Lets the daemon fetch the exit notifier file descriptor.
int GetExitNotifierFD() {
DCHECK(g_notifier);
return g_notifier->receiver_fd();
}
class ServerDelegate : public Daemon::ServerDelegate {
public:
ServerDelegate() : initialized_(false) {}
virtual ~ServerDelegate() {
if (!controller_thread_.get())
return;
// The DeviceController instance, if any, is constructed on the controller
// thread. Make sure that it gets deleted on that same thread. Note that
// DeleteSoon() is not used here since it would imply reading |controller_|
// from the main thread while it's set on the internal thread.
controller_thread_->message_loop_proxy()->PostTask(
FROM_HERE,
base::Bind(&ServerDelegate::DeleteControllerOnInternalThread,
base::Unretained(this)));
}
void DeleteControllerOnInternalThread() {
DCHECK(
controller_thread_->message_loop_proxy()->RunsTasksOnCurrentThread());
controller_.reset();
}
// Daemon::ServerDelegate:
virtual void Init() OVERRIDE {
DCHECK(!g_notifier);
g_notifier = new forwarder2::PipeNotifier();
signal(SIGTERM, KillHandler);
signal(SIGINT, KillHandler);
controller_thread_.reset(new base::Thread("controller_thread"));
controller_thread_->Start();
}
virtual void OnClientConnected(scoped_ptr<Socket> client_socket) OVERRIDE {
if (initialized_) {
client_socket->WriteString("OK");
return;
}
controller_thread_->message_loop()->PostTask(
FROM_HERE,
base::Bind(&ServerDelegate::StartController, base::Unretained(this),
GetExitNotifierFD(), base::Passed(&client_socket)));
initialized_ = true;
}
private:
void StartController(int exit_notifier_fd, scoped_ptr<Socket> client_socket) {
DCHECK(!controller_.get());
scoped_ptr<DeviceController> controller(
DeviceController::Create(kUnixDomainSocketPath, exit_notifier_fd));
if (!controller.get()) {
client_socket->WriteString(
base::StringPrintf("ERROR: Could not initialize device controller "
"with ADB socket path: %s",
kUnixDomainSocketPath));
return;
}
controller_.swap(controller);
controller_->Start();
client_socket->WriteString("OK");
client_socket->Close();
}
scoped_ptr<DeviceController> controller_;
scoped_ptr<base::Thread> controller_thread_;
bool initialized_;
};
class ClientDelegate : public Daemon::ClientDelegate {
public:
ClientDelegate() : has_failed_(false) {}
bool has_failed() const { return has_failed_; }
// Daemon::ClientDelegate:
virtual void OnDaemonReady(Socket* daemon_socket) OVERRIDE {
char buf[kBufSize];
const int bytes_read = daemon_socket->Read(
buf, sizeof(buf) - 1 /* leave space for null terminator */);
CHECK_GT(bytes_read, 0);
DCHECK(bytes_read < sizeof(buf));
buf[bytes_read] = 0;
base::StringPiece msg(buf, bytes_read);
if (msg.starts_with("ERROR")) {
LOG(ERROR) << msg;
has_failed_ = true;
return;
}
}
private:
bool has_failed_;
};
int RunDeviceForwarder(int argc, char** argv) {
CommandLine::Init(argc, argv); // Needed by logging.
const bool kill_server = CommandLine::ForCurrentProcess()->HasSwitch(
"kill-server");
if ((kill_server && argc != 2) || (!kill_server && argc != 1)) {
std::cerr << "Usage: device_forwarder [--kill-server]" << std::endl;
return 1;
}
base::AtExitManager at_exit_manager; // Used by base::Thread.
ClientDelegate client_delegate;
ServerDelegate daemon_delegate;
const char kLogFilePath[] = ""; // Log to logcat.
Daemon daemon(kLogFilePath, kDaemonIdentifier, &client_delegate,
&daemon_delegate, &GetExitNotifierFD);
if (kill_server)
return !daemon.Kill();
if (!daemon.SpawnIfNeeded())
return 1;
return client_delegate.has_failed();
}
} // namespace
} // namespace forwarder2
int main(int argc, char** argv) {
return forwarder2::RunDeviceForwarder(argc, argv);
}

View File

@ -1,148 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "tools/android/forwarder2/device_listener.h"
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/callback.h"
#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
#include "base/message_loop/message_loop_proxy.h"
#include "base/single_thread_task_runner.h"
#include "tools/android/forwarder2/command.h"
#include "tools/android/forwarder2/forwarder.h"
#include "tools/android/forwarder2/socket.h"
namespace forwarder2 {
// static
scoped_ptr<DeviceListener> DeviceListener::Create(
scoped_ptr<Socket> host_socket,
int listener_port,
const DeleteCallback& delete_callback) {
scoped_ptr<Socket> listener_socket(new Socket());
scoped_ptr<DeviceListener> device_listener;
if (!listener_socket->BindTcp("", listener_port)) {
LOG(ERROR) << "Device could not bind and listen to local port "
<< listener_port;
SendCommand(command::BIND_ERROR, listener_port, host_socket.get());
return device_listener.Pass();
}
// In case the |listener_port_| was zero, GetPort() will return the
// currently (non-zero) allocated port for this socket.
listener_port = listener_socket->GetPort();
SendCommand(command::BIND_SUCCESS, listener_port, host_socket.get());
device_listener.reset(
new DeviceListener(
scoped_ptr<PipeNotifier>(new PipeNotifier()), listener_socket.Pass(),
host_socket.Pass(), listener_port, delete_callback));
return device_listener.Pass();
}
DeviceListener::~DeviceListener() {
DCHECK(deletion_task_runner_->RunsTasksOnCurrentThread());
exit_notifier_->Notify();
}
void DeviceListener::Start() {
thread_.Start();
AcceptNextClientSoon();
}
void DeviceListener::SetAdbDataSocket(scoped_ptr<Socket> adb_data_socket) {
thread_.message_loop_proxy()->PostTask(
FROM_HERE,
base::Bind(&DeviceListener::OnAdbDataSocketReceivedOnInternalThread,
base::Unretained(this), base::Passed(&adb_data_socket)));
}
DeviceListener::DeviceListener(scoped_ptr<PipeNotifier> pipe_notifier,
scoped_ptr<Socket> listener_socket,
scoped_ptr<Socket> host_socket,
int port,
const DeleteCallback& delete_callback)
: exit_notifier_(pipe_notifier.Pass()),
listener_socket_(listener_socket.Pass()),
host_socket_(host_socket.Pass()),
listener_port_(port),
delete_callback_(delete_callback),
deletion_task_runner_(base::MessageLoopProxy::current()),
thread_("DeviceListener") {
CHECK(host_socket_.get());
DCHECK(deletion_task_runner_.get());
DCHECK(exit_notifier_.get());
host_socket_->AddEventFd(exit_notifier_->receiver_fd());
listener_socket_->AddEventFd(exit_notifier_->receiver_fd());
}
void DeviceListener::AcceptNextClientSoon() {
thread_.message_loop_proxy()->PostTask(
FROM_HERE,
base::Bind(&DeviceListener::AcceptClientOnInternalThread,
base::Unretained(this)));
}
void DeviceListener::AcceptClientOnInternalThread() {
device_data_socket_.reset(new Socket());
if (!listener_socket_->Accept(device_data_socket_.get())) {
if (listener_socket_->DidReceiveEvent()) {
LOG(INFO) << "Received exit notification, stopped accepting clients.";
SelfDelete();
return;
}
LOG(WARNING) << "Could not Accept in ListenerSocket.";
SendCommand(command::ACCEPT_ERROR, listener_port_, host_socket_.get());
SelfDelete();
return;
}
SendCommand(command::ACCEPT_SUCCESS, listener_port_, host_socket_.get());
if (!ReceivedCommand(command::HOST_SERVER_SUCCESS,
host_socket_.get())) {
SendCommand(command::ACK, listener_port_, host_socket_.get());
LOG(ERROR) << "Host could not connect to server.";
device_data_socket_->Close();
if (host_socket_->has_error()) {
LOG(ERROR) << "Adb Control connection lost. "
<< "Listener port: " << listener_port_;
SelfDelete();
return;
}
// It can continue if the host forwarder could not connect to the host
// server but the control connection is still alive (no errors). The device
// acknowledged that (above), and it can re-try later.
AcceptNextClientSoon();
return;
}
}
void DeviceListener::OnAdbDataSocketReceivedOnInternalThread(
scoped_ptr<Socket> adb_data_socket) {
adb_data_socket_.swap(adb_data_socket);
SendCommand(command::ADB_DATA_SOCKET_SUCCESS, listener_port_,
host_socket_.get());
CHECK(adb_data_socket_.get());
StartForwarder(device_data_socket_.Pass(), adb_data_socket_.Pass());
AcceptNextClientSoon();
}
void DeviceListener::SelfDelete() {
if (!deletion_task_runner_->RunsTasksOnCurrentThread()) {
deletion_task_runner_->PostTask(
FROM_HERE,
base::Bind(&DeviceListener::SelfDeleteOnDeletionTaskRunner,
delete_callback_, listener_port_));
return;
}
SelfDeleteOnDeletionTaskRunner(delete_callback_, listener_port_);
}
// static
void DeviceListener::SelfDeleteOnDeletionTaskRunner(
const DeleteCallback& delete_callback,
int listener_port) {
delete_callback.Run(listener_port);
}
} // namespace forwarder

View File

@ -1,114 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef TOOLS_ANDROID_FORWARDER2_DEVICE_LISTENER_H_
#define TOOLS_ANDROID_FORWARDER2_DEVICE_LISTENER_H_
#include "base/basictypes.h"
#include "base/callback.h"
#include "base/compiler_specific.h"
#include "base/logging.h"
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
#include "base/threading/thread.h"
#include "tools/android/forwarder2/pipe_notifier.h"
#include "tools/android/forwarder2/socket.h"
namespace base {
class SingleThreadTaskRunner;
} // namespace base
namespace forwarder2 {
class Forwarder;
// A DeviceListener instance is used in the device_forwarder program to bind to
// a specific device-side |port| and wait for client connections. When a
// connection happens, it informs the corresponding HostController instance
// running on the host, through |host_socket|. Then the class expects a call to
// its SetAdbDataSocket() method (performed by the device controller) once the
// host opened a new connection to the device. When this happens, a new internal
// Forwarder instance is started.
// Note that instances of this class are owned by the device controller which
// creates and destroys them on the same thread. In case an internal error
// happens on the DeviceListener's internal thread, the DeviceListener
// can also self-delete by executing the user-provided callback on the thread
// the DeviceListener was created on.
// Note that the DeviceListener's destructor joins its internal thread (i.e.
// waits for its completion) which means that the internal thread is guaranteed
// not to be running anymore once the object is deleted.
class DeviceListener {
public:
// Callback that is used for self-deletion as a way to let the device
// controller perform some additional cleanup work (e.g. removing the device
// listener instance from its internal map before deleting it).
typedef base::Callback<void (int /* listener port */)> DeleteCallback;
static scoped_ptr<DeviceListener> Create(
scoped_ptr<Socket> host_socket,
int port,
const DeleteCallback& delete_callback);
~DeviceListener();
void Start();
void SetAdbDataSocket(scoped_ptr<Socket> adb_data_socket);
int listener_port() const { return listener_port_; }
private:
DeviceListener(scoped_ptr<PipeNotifier> pipe_notifier,
scoped_ptr<Socket> listener_socket,
scoped_ptr<Socket> host_socket,
int port,
const DeleteCallback& delete_callback);
// Pushes an AcceptClientOnInternalThread() task to the internal thread's
// message queue in order to wait for a new client soon.
void AcceptNextClientSoon();
void AcceptClientOnInternalThread();
void OnAdbDataSocketReceivedOnInternalThread(
scoped_ptr<Socket> adb_data_socket);
void SelfDelete();
// Note that this can be called after the DeviceListener instance gets deleted
// which is why this method is static.
static void SelfDeleteOnDeletionTaskRunner(
const DeleteCallback& delete_callback,
int listener_port);
// Used for the listener thread to be notified on destruction. We have one
// notifier per Listener thread since each Listener thread may be requested to
// exit for different reasons independently from each other and independent
// from the main program, ex. when the host requests to forward/listen the
// same port again. Both the |host_socket_| and |listener_socket_|
// must share the same receiver file descriptor from |exit_notifier_| and it
// is set in the constructor.
const scoped_ptr<PipeNotifier> exit_notifier_;
// The local device listener socket for accepting connections from the local
// port (listener_port_).
const scoped_ptr<Socket> listener_socket_;
// The listener socket for sending control commands.
const scoped_ptr<Socket> host_socket_;
scoped_ptr<Socket> device_data_socket_;
// This is the adb connection to transport the actual data, used for creating
// the forwarder. Ownership transferred to the Forwarder.
scoped_ptr<Socket> adb_data_socket_;
const int listener_port_;
const DeleteCallback delete_callback_;
// Task runner used for deletion set at construction time (i.e. the object is
// deleted on the same thread it is created on).
scoped_refptr<base::SingleThreadTaskRunner> deletion_task_runner_;
base::Thread thread_;
DISALLOW_COPY_AND_ASSIGN(DeviceListener);
};
} // namespace forwarder
#endif // TOOLS_ANDROID_FORWARDER2_DEVICE_LISTENER_H_

View File

@ -1,171 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "tools/android/forwarder2/forwarder.h"
#include "base/basictypes.h"
#include "base/bind.h"
#include "base/logging.h"
#include "base/memory/ref_counted.h"
#include "base/posix/eintr_wrapper.h"
#include "base/single_thread_task_runner.h"
#include "tools/android/forwarder2/socket.h"
namespace forwarder2 {
namespace {
// Helper class to buffer reads and writes from one socket to another.
class BufferedCopier {
public:
// Does NOT own the pointers.
BufferedCopier(Socket* socket_from,
Socket* socket_to)
: socket_from_(socket_from),
socket_to_(socket_to),
bytes_read_(0),
write_offset_(0) {
}
bool AddToReadSet(fd_set* read_fds) {
if (bytes_read_ == 0)
return socket_from_->AddFdToSet(read_fds);
return false;
}
bool AddToWriteSet(fd_set* write_fds) {
if (write_offset_ < bytes_read_)
return socket_to_->AddFdToSet(write_fds);
return false;
}
bool TryRead(const fd_set& read_fds) {
if (!socket_from_->IsFdInSet(read_fds))
return false;
if (bytes_read_ != 0) // Can't read.
return false;
int ret = socket_from_->Read(buffer_, kBufferSize);
if (ret > 0) {
bytes_read_ = ret;
return true;
}
return false;
}
bool TryWrite(const fd_set& write_fds) {
if (!socket_to_->IsFdInSet(write_fds))
return false;
if (write_offset_ >= bytes_read_) // Nothing to write.
return false;
int ret = socket_to_->Write(buffer_ + write_offset_,
bytes_read_ - write_offset_);
if (ret > 0) {
write_offset_ += ret;
if (write_offset_ == bytes_read_) {
write_offset_ = 0;
bytes_read_ = 0;
}
return true;
}
return false;
}
private:
// Not owned.
Socket* socket_from_;
Socket* socket_to_;
// A big buffer to let our file-over-http bridge work more like real file.
static const int kBufferSize = 1024 * 128;
int bytes_read_;
int write_offset_;
char buffer_[kBufferSize];
DISALLOW_COPY_AND_ASSIGN(BufferedCopier);
};
// Internal class that wraps a helper thread to forward traffic between
// |socket1| and |socket2|. After creating a new instance, call its Start()
// method to launch operations. Thread stops automatically if one of the socket
// disconnects, but ensures that all buffered writes to the other, still alive,
// socket, are written first. When this happens, the instance will delete itself
// automatically.
// Note that the instance will always be destroyed on the same thread that
// created it.
class Forwarder {
public:
Forwarder(scoped_ptr<Socket> socket1, scoped_ptr<Socket> socket2)
: socket1_(socket1.Pass()),
socket2_(socket2.Pass()),
destructor_runner_(base::MessageLoopProxy::current()),
thread_("ForwarderThread") {
}
void Start() {
thread_.Start();
thread_.message_loop_proxy()->PostTask(
FROM_HERE,
base::Bind(&Forwarder::ThreadHandler, base::Unretained(this)));
}
private:
void ThreadHandler() {
const int nfds = Socket::GetHighestFileDescriptor(*socket1_, *socket2_) + 1;
fd_set read_fds;
fd_set write_fds;
// Copy from socket1 to socket2
BufferedCopier buffer1(socket1_.get(), socket2_.get());
// Copy from socket2 to socket1
BufferedCopier buffer2(socket2_.get(), socket1_.get());
bool run = true;
while (run) {
FD_ZERO(&read_fds);
FD_ZERO(&write_fds);
buffer1.AddToReadSet(&read_fds);
buffer2.AddToReadSet(&read_fds);
buffer1.AddToWriteSet(&write_fds);
buffer2.AddToWriteSet(&write_fds);
if (HANDLE_EINTR(select(nfds, &read_fds, &write_fds, NULL, NULL)) <= 0) {
PLOG(ERROR) << "select";
break;
}
// When a socket in the read set closes the connection, select() returns
// with that socket descriptor set as "ready to read". When we call
// TryRead() below, it will return false, but the while loop will continue
// to run until all the write operations are finished, to make sure the
// buffers are completely flushed out.
// Keep running while we have some operation to do.
run = buffer1.TryRead(read_fds);
run = run || buffer2.TryRead(read_fds);
run = run || buffer1.TryWrite(write_fds);
run = run || buffer2.TryWrite(write_fds);
}
// Note that the thread that |destruction_runner_| runs tasks on could be
// temporarily blocked on I/O (e.g. select()) therefore it is safer to close
// the sockets now rather than relying on the destructor.
socket1_.reset();
socket2_.reset();
// Note that base::Thread must be destroyed on the thread it was created on.
destructor_runner_->DeleteSoon(FROM_HERE, this);
}
scoped_ptr<Socket> socket1_;
scoped_ptr<Socket> socket2_;
scoped_refptr<base::SingleThreadTaskRunner> destructor_runner_;
base::Thread thread_;
};
} // namespace
void StartForwarder(scoped_ptr<Socket> socket1, scoped_ptr<Socket> socket2) {
(new Forwarder(socket1.Pass(), socket2.Pass()))->Start();
}
} // namespace forwarder2

View File

@ -1,85 +0,0 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'forwarder2',
'type': 'none',
'dependencies': [
'device_forwarder',
'host_forwarder#host',
],
# For the component build, ensure dependent shared libraries are stripped
# and put alongside forwarder to simplify pushing to the device.
'variables': {
'output_dir': '<(PRODUCT_DIR)/forwarder_dist/',
'native_binary': '<(PRODUCT_DIR)/device_forwarder',
},
'includes': ['../../../build/android/native_app_dependencies.gypi'],
},
{
'target_name': 'device_forwarder',
'type': 'executable',
'toolsets': ['target'],
'dependencies': [
'../../../base/base.gyp:base',
'../common/common.gyp:android_tools_common',
],
'include_dirs': [
'../../..',
],
'conditions': [
# Warning: A PIE tool cannot run on ICS 4.0.4, so only
# build it as position-independent when ASAN
# is activated. See b/6587214 for details.
[ 'asan==1', {
'cflags': [
'-fPIE',
],
'ldflags': [
'-pie',
],
}],
],
'sources': [
'command.cc',
'common.cc',
'daemon.cc',
'device_controller.cc',
'device_forwarder_main.cc',
'device_listener.cc',
'forwarder.cc',
'pipe_notifier.cc',
'socket.cc',
],
},
{
'target_name': 'host_forwarder',
'type': 'executable',
'toolsets': ['host'],
'dependencies': [
'../../../base/base.gyp:base',
'../common/common.gyp:android_tools_common',
],
'include_dirs': [
'../../..',
],
'sources': [
'command.cc',
'common.cc',
'daemon.cc',
'forwarder.cc',
'host_controller.cc',
'host_forwarder_main.cc',
'pipe_notifier.cc',
'socket.cc',
# TODO(pliard): Remove this. This is needed to avoid undefined
# references at link time.
'../../../base/message_loop/message_pump_glib.cc',
'../../../base/message_loop/message_pump_gtk.cc',
],
},
],
}

View File

@ -1,19 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef TOOLS_ANDROID_FORWARDER2_FORWARDER_H_
#define TOOLS_ANDROID_FORWARDER2_FORWARDER_H_
#include "base/memory/scoped_ptr.h"
#include "base/threading/thread.h"
namespace forwarder2 {
class Socket;
void StartForwarder(scoped_ptr<Socket> socket1, scoped_ptr<Socket> socket2);
} // namespace forwarder2
#endif // TOOLS_ANDROID_FORWARDER2_FORWARDER_H_

View File

@ -1,186 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "tools/android/forwarder2/host_controller.h"
#include <string>
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
#include "tools/android/forwarder2/command.h"
#include "tools/android/forwarder2/forwarder.h"
#include "tools/android/forwarder2/socket.h"
namespace forwarder2 {
// static
scoped_ptr<HostController> HostController::Create(
int device_port,
int host_port,
int adb_port,
int exit_notifier_fd,
const DeletionCallback& deletion_callback) {
scoped_ptr<HostController> host_controller;
scoped_ptr<PipeNotifier> delete_controller_notifier(new PipeNotifier());
scoped_ptr<Socket> adb_control_socket(new Socket());
adb_control_socket->AddEventFd(exit_notifier_fd);
adb_control_socket->AddEventFd(delete_controller_notifier->receiver_fd());
if (!adb_control_socket->ConnectTcp(std::string(), adb_port)) {
LOG(ERROR) << "Could not connect HostController socket on port: "
<< adb_port;
return host_controller.Pass();
}
// Send the command to the device start listening to the "device_forward_port"
bool send_command_success = SendCommand(
command::LISTEN, device_port, adb_control_socket.get());
CHECK(send_command_success);
int device_port_allocated;
command::Type command;
if (!ReadCommand(
adb_control_socket.get(), &device_port_allocated, &command) ||
command != command::BIND_SUCCESS) {
LOG(ERROR) << "Device binding error using port " << device_port;
return host_controller.Pass();
}
host_controller.reset(
new HostController(
device_port_allocated, host_port, adb_port, exit_notifier_fd,
deletion_callback, adb_control_socket.Pass(),
delete_controller_notifier.Pass()));
return host_controller.Pass();
}
HostController::~HostController() {
DCHECK(deletion_task_runner_->RunsTasksOnCurrentThread());
delete_controller_notifier_->Notify();
// Note that the Forwarder instance (that also received a delete notification)
// might still be running on its own thread at this point. This is not a
// problem since it will self-delete once the socket that it is operating on
// is closed.
}
void HostController::Start() {
thread_.Start();
ReadNextCommandSoon();
}
HostController::HostController(
int device_port,
int host_port,
int adb_port,
int exit_notifier_fd,
const DeletionCallback& deletion_callback,
scoped_ptr<Socket> adb_control_socket,
scoped_ptr<PipeNotifier> delete_controller_notifier)
: device_port_(device_port),
host_port_(host_port),
adb_port_(adb_port),
global_exit_notifier_fd_(exit_notifier_fd),
deletion_callback_(deletion_callback),
adb_control_socket_(adb_control_socket.Pass()),
delete_controller_notifier_(delete_controller_notifier.Pass()),
deletion_task_runner_(base::MessageLoopProxy::current()),
thread_("HostControllerThread") {
}
void HostController::ReadNextCommandSoon() {
thread_.message_loop_proxy()->PostTask(
FROM_HERE,
base::Bind(&HostController::ReadCommandOnInternalThread,
base::Unretained(this)));
}
void HostController::ReadCommandOnInternalThread() {
if (!ReceivedCommand(command::ACCEPT_SUCCESS, adb_control_socket_.get())) {
SelfDelete();
return;
}
// Try to connect to host server.
scoped_ptr<Socket> host_server_data_socket(CreateSocket());
if (!host_server_data_socket->ConnectTcp(std::string(), host_port_)) {
LOG(ERROR) << "Could not Connect HostServerData socket on port: "
<< host_port_;
SendCommand(
command::HOST_SERVER_ERROR, device_port_, adb_control_socket_.get());
if (ReceivedCommand(command::ACK, adb_control_socket_.get())) {
// It can continue if the host forwarder could not connect to the host
// server but the device acknowledged that, so that the device could
// re-try later.
ReadNextCommandSoon();
return;
}
SelfDelete();
return;
}
SendCommand(
command::HOST_SERVER_SUCCESS, device_port_, adb_control_socket_.get());
StartForwarder(host_server_data_socket.Pass());
ReadNextCommandSoon();
}
void HostController::StartForwarder(
scoped_ptr<Socket> host_server_data_socket) {
scoped_ptr<Socket> adb_data_socket(CreateSocket());
if (!adb_data_socket->ConnectTcp("", adb_port_)) {
LOG(ERROR) << "Could not connect AdbDataSocket on port: " << adb_port_;
SelfDelete();
return;
}
// Open the Adb data connection, and send a command with the
// |device_forward_port| as a way for the device to identify the connection.
SendCommand(command::DATA_CONNECTION, device_port_, adb_data_socket.get());
// Check that the device received the new Adb Data Connection. Note that this
// check is done through the |adb_control_socket_| that is handled in the
// DeviceListener thread just after the call to WaitForAdbDataSocket().
if (!ReceivedCommand(command::ADB_DATA_SOCKET_SUCCESS,
adb_control_socket_.get())) {
LOG(ERROR) << "Device could not handle the new Adb Data Connection.";
SelfDelete();
return;
}
forwarder2::StartForwarder(
host_server_data_socket.Pass(), adb_data_socket.Pass());
}
scoped_ptr<Socket> HostController::CreateSocket() {
scoped_ptr<Socket> socket(new Socket());
socket->AddEventFd(global_exit_notifier_fd_);
socket->AddEventFd(delete_controller_notifier_->receiver_fd());
return socket.Pass();
}
void HostController::SelfDelete() {
scoped_ptr<HostController> self_deleter(this);
deletion_task_runner_->PostTask(
FROM_HERE,
base::Bind(&HostController::SelfDeleteOnDeletionTaskRunner,
deletion_callback_, base::Passed(&self_deleter)));
// Tell the device to delete its corresponding controller instance before we
// self-delete.
Socket socket;
if (!socket.ConnectTcp("", adb_port_)) {
LOG(ERROR) << "Could not connect to device on port " << adb_port_;
return;
}
if (!SendCommand(command::UNLISTEN, device_port_, &socket)) {
LOG(ERROR) << "Could not send unmap command for port " << device_port_;
return;
}
if (!ReceivedCommand(command::UNLISTEN_SUCCESS, &socket)) {
LOG(ERROR) << "Unamp command failed for port " << device_port_;
return;
}
}
// static
void HostController::SelfDeleteOnDeletionTaskRunner(
const DeletionCallback& deletion_callback,
scoped_ptr<HostController> controller) {
deletion_callback.Run(controller.Pass());
}
} // namespace forwarder2

View File

@ -1,99 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef TOOLS_ANDROID_FORWARDER2_HOST_CONTROLLER_H_
#define TOOLS_ANDROID_FORWARDER2_HOST_CONTROLLER_H_
#include <string>
#include "base/basictypes.h"
#include "base/callback.h"
#include "base/compiler_specific.h"
#include "base/memory/scoped_ptr.h"
#include "base/threading/thread.h"
#include "tools/android/forwarder2/pipe_notifier.h"
#include "tools/android/forwarder2/socket.h"
namespace forwarder2 {
// This class partners with DeviceController and has the same lifetime and
// threading characteristics as DeviceListener. In a nutshell, this class
// operates on its own thread and is destroyed on the thread it was constructed
// on. The class' deletion can happen in two different ways:
// - Its destructor was called by its owner (HostControllersManager).
// - Its internal thread requested self-deletion after an error happened. In
// this case the owner (HostControllersManager) is notified on the
// construction thread through the provided DeletionCallback invoked with the
// HostController instance. When this callback is invoked, it's up to the
// owner to delete the instance.
class HostController {
public:
// Callback used for self-deletion that lets the client perform some cleanup
// work before deleting the HostController instance.
typedef base::Callback<void (scoped_ptr<HostController>)> DeletionCallback;
// If |device_port| is zero then a dynamic port is allocated (and retrievable
// through device_port() below).
static scoped_ptr<HostController> Create(
int device_port,
int host_port,
int adb_port,
int exit_notifier_fd,
const DeletionCallback& deletion_callback);
~HostController();
// Starts the internal controller thread.
void Start();
int adb_port() const { return adb_port_; }
int device_port() const { return device_port_; }
private:
HostController(int device_port,
int host_port,
int adb_port,
int exit_notifier_fd,
const DeletionCallback& deletion_callback,
scoped_ptr<Socket> adb_control_socket,
scoped_ptr<PipeNotifier> delete_controller_notifier);
void ReadNextCommandSoon();
void ReadCommandOnInternalThread();
void StartForwarder(scoped_ptr<Socket> host_server_data_socket);
// Helper method that creates a socket and adds the appropriate event file
// descriptors.
scoped_ptr<Socket> CreateSocket();
void SelfDelete();
static void SelfDeleteOnDeletionTaskRunner(
const DeletionCallback& deletion_callback,
scoped_ptr<HostController> controller);
const int device_port_;
const int host_port_;
const int adb_port_;
// Used to notify the controller when the process is killed.
const int global_exit_notifier_fd_;
// Used to let the client delete the instance in case an error happened.
const DeletionCallback deletion_callback_;
scoped_ptr<Socket> adb_control_socket_;
scoped_ptr<PipeNotifier> delete_controller_notifier_;
// Used to cancel the pending blocking IO operations when the host controller
// instance is deleted.
// Task runner used for deletion set at construction time (i.e. the object is
// deleted on the same thread it is created on).
const scoped_refptr<base::SingleThreadTaskRunner> deletion_task_runner_;
base::Thread thread_;
DISALLOW_COPY_AND_ASSIGN(HostController);
};
} // namespace forwarder2
#endif // TOOLS_ANDROID_FORWARDER2_HOST_CONTROLLER_H_

View File

@ -1,414 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <errno.h>
#include <signal.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
#include <cstdio>
#include <iostream>
#include <limits>
#include <string>
#include <utility>
#include <vector>
#include "base/at_exit.h"
#include "base/basictypes.h"
#include "base/bind.h"
#include "base/command_line.h"
#include "base/compiler_specific.h"
#include "base/containers/hash_tables.h"
#include "base/file_util.h"
#include "base/files/file_path.h"
#include "base/logging.h"
#include "base/memory/linked_ptr.h"
#include "base/memory/scoped_vector.h"
#include "base/memory/weak_ptr.h"
#include "base/pickle.h"
#include "base/posix/eintr_wrapper.h"
#include "base/safe_strerror_posix.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_piece.h"
#include "base/strings/string_split.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "base/task_runner.h"
#include "base/threading/thread.h"
#include "tools/android/forwarder2/common.h"
#include "tools/android/forwarder2/daemon.h"
#include "tools/android/forwarder2/host_controller.h"
#include "tools/android/forwarder2/pipe_notifier.h"
#include "tools/android/forwarder2/socket.h"
namespace forwarder2 {
namespace {
const char kLogFilePath[] = "/tmp/host_forwarder_log";
const char kDaemonIdentifier[] = "chrome_host_forwarder_daemon";
const char kKillServerCommand[] = "kill-server";
const char kForwardCommand[] = "forward";
const int kBufSize = 256;
// Needs to be global to be able to be accessed from the signal handler.
PipeNotifier* g_notifier = NULL;
// Lets the daemon fetch the exit notifier file descriptor.
int GetExitNotifierFD() {
DCHECK(g_notifier);
return g_notifier->receiver_fd();
}
void KillHandler(int signal_number) {
char buf[kBufSize];
if (signal_number != SIGTERM && signal_number != SIGINT) {
snprintf(buf, sizeof(buf), "Ignoring unexpected signal %d.", signal_number);
SIGNAL_SAFE_LOG(WARNING, buf);
return;
}
snprintf(buf, sizeof(buf), "Received signal %d.", signal_number);
SIGNAL_SAFE_LOG(WARNING, buf);
static int s_kill_handler_count = 0;
CHECK(g_notifier);
// If for some reason the forwarder get stuck in any socket waiting forever,
// we can send a SIGKILL or SIGINT three times to force it die
// (non-nicely). This is useful when debugging.
++s_kill_handler_count;
if (!g_notifier->Notify() || s_kill_handler_count > 2)
exit(1);
}
// Manages HostController instances. There is one HostController instance for
// each connection being forwarded. Note that forwarding can happen with many
// devices (identified with a serial id).
class HostControllersManager {
public:
HostControllersManager()
: weak_ptr_factory_(this),
controllers_(new HostControllerMap()),
has_failed_(false) {
}
~HostControllersManager() {
if (!thread_.get())
return;
// Delete the controllers on the thread they were created on.
thread_->message_loop_proxy()->DeleteSoon(
FROM_HERE, controllers_.release());
}
void HandleRequest(const std::string& device_serial,
int device_port,
int host_port,
scoped_ptr<Socket> client_socket) {
// Lazy initialize so that the CLI process doesn't get this thread created.
InitOnce();
thread_->message_loop_proxy()->PostTask(
FROM_HERE,
base::Bind(
&HostControllersManager::HandleRequestOnInternalThread,
base::Unretained(this), device_serial, device_port, host_port,
base::Passed(&client_socket)));
}
bool has_failed() const { return has_failed_; }
private:
typedef base::hash_map<
std::string, linked_ptr<HostController> > HostControllerMap;
static std::string MakeHostControllerMapKey(int adb_port, int device_port) {
return base::StringPrintf("%d:%d", adb_port, device_port);
}
void InitOnce() {
if (thread_.get())
return;
at_exit_manager_.reset(new base::AtExitManager());
thread_.reset(new base::Thread("HostControllersManagerThread"));
thread_->Start();
}
// Invoked when a HostController instance reports an error (e.g. due to a
// device connectivity issue). Note that this could be called after the
// controller manager was destroyed which is why a weak pointer is used.
static void DeleteHostController(
const base::WeakPtr<HostControllersManager>& manager_ptr,
scoped_ptr<HostController> host_controller) {
HostController* const controller = host_controller.release();
HostControllersManager* const manager = manager_ptr.get();
if (!manager) {
// Note that |controller| is not leaked in this case since the host
// controllers manager owns the controllers. If the manager was deleted
// then all the controllers (including |controller|) were also deleted.
return;
}
DCHECK(manager->thread_->message_loop_proxy()->RunsTasksOnCurrentThread());
// Note that this will delete |controller| which is owned by the map.
manager->controllers_->erase(
MakeHostControllerMapKey(controller->adb_port(),
controller->device_port()));
}
void HandleRequestOnInternalThread(const std::string& device_serial,
int device_port,
int host_port,
scoped_ptr<Socket> client_socket) {
const int adb_port = GetAdbPortForDevice(device_serial);
if (adb_port < 0) {
SendMessage(
"ERROR: could not get adb port for device. You might need to add "
"'adb' to your PATH or provide the device serial id.",
client_socket.get());
return;
}
if (device_port < 0) {
// Remove the previously created host controller.
const std::string controller_key = MakeHostControllerMapKey(
adb_port, -device_port);
const HostControllerMap::size_type removed_elements = controllers_->erase(
controller_key);
SendMessage(
!removed_elements ? "ERROR: could not unmap port" : "OK",
client_socket.get());
return;
}
if (host_port < 0) {
SendMessage("ERROR: missing host port", client_socket.get());
return;
}
const bool use_dynamic_port_allocation = device_port == 0;
if (!use_dynamic_port_allocation) {
const std::string controller_key = MakeHostControllerMapKey(
adb_port, device_port);
if (controllers_->find(controller_key) != controllers_->end()) {
LOG(INFO) << "Already forwarding device port " << device_port
<< " to host port " << host_port;
SendMessage(base::StringPrintf("%d:%d", device_port, host_port),
client_socket.get());
return;
}
}
// Create a new host controller.
scoped_ptr<HostController> host_controller(
HostController::Create(
device_port, host_port, adb_port, GetExitNotifierFD(),
base::Bind(&HostControllersManager::DeleteHostController,
weak_ptr_factory_.GetWeakPtr())));
if (!host_controller.get()) {
has_failed_ = true;
SendMessage("ERROR: Connection to device failed.", client_socket.get());
return;
}
// Get the current allocated port.
device_port = host_controller->device_port();
LOG(INFO) << "Forwarding device port " << device_port << " to host port "
<< host_port;
const std::string msg = base::StringPrintf("%d:%d", device_port, host_port);
if (!SendMessage(msg, client_socket.get()))
return;
host_controller->Start();
controllers_->insert(
std::make_pair(MakeHostControllerMapKey(adb_port, device_port),
linked_ptr<HostController>(host_controller.release())));
}
int GetAdbPortForDevice(const std::string& device_serial) {
base::hash_map<std::string, int>::const_iterator it =
device_serial_to_adb_port_map_.find(device_serial);
if (it != device_serial_to_adb_port_map_.end())
return it->second;
Socket bind_socket;
CHECK(bind_socket.BindTcp("127.0.0.1", 0));
const int port = bind_socket.GetPort();
bind_socket.Close();
const std::string serial_part = device_serial.empty() ?
std::string() : std::string("-s ") + device_serial;
const std::string command = base::StringPrintf(
"adb %s forward tcp:%d localabstract:chrome_device_forwarder",
device_serial.empty() ? "" : serial_part.c_str(),
port);
LOG(INFO) << command;
const int ret = system(command.c_str());
if (ret < 0 || !WIFEXITED(ret) || WEXITSTATUS(ret) != 0)
return -1;
device_serial_to_adb_port_map_[device_serial] = port;
return port;
}
bool SendMessage(const std::string& msg, Socket* client_socket) {
bool result = client_socket->WriteString(msg);
DCHECK(result);
if (!result)
has_failed_ = true;
return result;
}
base::WeakPtrFactory<HostControllersManager> weak_ptr_factory_;
base::hash_map<std::string, int> device_serial_to_adb_port_map_;
scoped_ptr<HostControllerMap> controllers_;
bool has_failed_;
scoped_ptr<base::AtExitManager> at_exit_manager_; // Needed by base::Thread.
scoped_ptr<base::Thread> thread_;
};
class ServerDelegate : public Daemon::ServerDelegate {
public:
ServerDelegate() : has_failed_(false) {}
bool has_failed() const {
return has_failed_ || controllers_manager_.has_failed();
}
// Daemon::ServerDelegate:
virtual void Init() OVERRIDE {
LOG(INFO) << "Starting host process daemon (pid=" << getpid() << ")";
DCHECK(!g_notifier);
g_notifier = new PipeNotifier();
signal(SIGTERM, KillHandler);
signal(SIGINT, KillHandler);
}
virtual void OnClientConnected(scoped_ptr<Socket> client_socket) OVERRIDE {
char buf[kBufSize];
const int bytes_read = client_socket->Read(buf, sizeof(buf));
if (bytes_read <= 0) {
if (client_socket->DidReceiveEvent())
return;
PError("Read()");
has_failed_ = true;
return;
}
const Pickle command_pickle(buf, bytes_read);
PickleIterator pickle_it(command_pickle);
std::string device_serial;
CHECK(pickle_it.ReadString(&device_serial));
int device_port;
if (!pickle_it.ReadInt(&device_port)) {
client_socket->WriteString("ERROR: missing device port");
return;
}
int host_port;
if (!pickle_it.ReadInt(&host_port))
host_port = -1;
controllers_manager_.HandleRequest(
device_serial, device_port, host_port, client_socket.Pass());
}
private:
bool has_failed_;
HostControllersManager controllers_manager_;
DISALLOW_COPY_AND_ASSIGN(ServerDelegate);
};
class ClientDelegate : public Daemon::ClientDelegate {
public:
ClientDelegate(const Pickle& command_pickle)
: command_pickle_(command_pickle),
has_failed_(false) {
}
bool has_failed() const { return has_failed_; }
// Daemon::ClientDelegate:
virtual void OnDaemonReady(Socket* daemon_socket) OVERRIDE {
// Send the forward command to the daemon.
CHECK_EQ(command_pickle_.size(),
daemon_socket->WriteNumBytes(command_pickle_.data(),
command_pickle_.size()));
char buf[kBufSize];
const int bytes_read = daemon_socket->Read(
buf, sizeof(buf) - 1 /* leave space for null terminator */);
CHECK_GT(bytes_read, 0);
DCHECK(bytes_read < sizeof(buf));
buf[bytes_read] = 0;
base::StringPiece msg(buf, bytes_read);
if (msg.starts_with("ERROR")) {
LOG(ERROR) << msg;
has_failed_ = true;
return;
}
printf("%s\n", buf);
}
private:
const Pickle command_pickle_;
bool has_failed_;
};
void ExitWithUsage() {
std::cerr << "Usage: host_forwarder [options]\n\n"
"Options:\n"
" --serial-id=[0-9A-Z]{16}]\n"
" --map DEVICE_PORT HOST_PORT\n"
" --unmap DEVICE_PORT\n"
" --kill-server\n";
exit(1);
}
int PortToInt(const std::string& s) {
int value;
// Note that 0 is a valid port (used for dynamic port allocation).
if (!base::StringToInt(s, &value) || value < 0 ||
value > std::numeric_limits<uint16>::max()) {
LOG(ERROR) << "Could not convert string " << s << " to port";
ExitWithUsage();
}
return value;
}
int RunHostForwarder(int argc, char** argv) {
CommandLine::Init(argc, argv);
const CommandLine& cmd_line = *CommandLine::ForCurrentProcess();
bool kill_server = false;
Pickle pickle;
pickle.WriteString(
cmd_line.HasSwitch("serial-id") ?
cmd_line.GetSwitchValueASCII("serial-id") : std::string());
const std::vector<std::string> args = cmd_line.GetArgs();
if (cmd_line.HasSwitch("kill-server")) {
kill_server = true;
} else if (cmd_line.HasSwitch("unmap")) {
if (args.size() != 1)
ExitWithUsage();
// Note the minus sign below.
pickle.WriteInt(-PortToInt(args[0]));
} else if (cmd_line.HasSwitch("map")) {
if (args.size() != 2)
ExitWithUsage();
pickle.WriteInt(PortToInt(args[0]));
pickle.WriteInt(PortToInt(args[1]));
} else {
ExitWithUsage();
}
if (kill_server && args.size() > 0)
ExitWithUsage();
ClientDelegate client_delegate(pickle);
ServerDelegate daemon_delegate;
Daemon daemon(
kLogFilePath, kDaemonIdentifier, &client_delegate, &daemon_delegate,
&GetExitNotifierFD);
if (kill_server)
return !daemon.Kill();
if (!daemon.SpawnIfNeeded())
return 1;
return client_delegate.has_failed() || daemon_delegate.has_failed();
}
} // namespace
} // namespace forwarder2
int main(int argc, char** argv) {
return forwarder2::RunHostForwarder(argc, argv);
}

View File

@ -1,43 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "tools/android/forwarder2/pipe_notifier.h"
#include <fcntl.h>
#include <unistd.h>
#include <sys/socket.h>
#include <sys/types.h>
#include "base/logging.h"
#include "base/posix/eintr_wrapper.h"
#include "base/safe_strerror_posix.h"
namespace forwarder2 {
PipeNotifier::PipeNotifier() {
int pipe_fd[2];
int ret = pipe(pipe_fd);
CHECK_EQ(0, ret);
receiver_fd_ = pipe_fd[0];
sender_fd_ = pipe_fd[1];
fcntl(sender_fd_, F_SETFL, O_NONBLOCK);
}
PipeNotifier::~PipeNotifier() {
(void) HANDLE_EINTR(close(receiver_fd_));
(void) HANDLE_EINTR(close(sender_fd_));
}
bool PipeNotifier::Notify() {
CHECK_NE(-1, sender_fd_);
errno = 0;
int ret = HANDLE_EINTR(write(sender_fd_, "1", 1));
if (ret < 0) {
LOG(WARNING) << "Error while notifying pipe. " << safe_strerror(errno);
return false;
}
return true;
}
} // namespace forwarder

View File

@ -1,35 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef TOOLS_ANDROID_FORWARDER2_PIPE_NOTIFIER_H_
#define TOOLS_ANDROID_FORWARDER2_PIPE_NOTIFIER_H_
#include "base/basictypes.h"
namespace forwarder2 {
// Helper class used to create a unix pipe that sends notifications to the
// |receiver_fd_| file descriptor when called |Notify()|. This should be used
// by the main thread to notify other threads that it must exit.
// The |receiver_fd_| can be put into a fd_set and used in a select together
// with a socket waiting to accept or read.
class PipeNotifier {
public:
PipeNotifier();
~PipeNotifier();
bool Notify();
int receiver_fd() const { return receiver_fd_; }
private:
int sender_fd_;
int receiver_fd_;
DISALLOW_COPY_AND_ASSIGN(PipeNotifier);
};
} // namespace forwarder
#endif // TOOLS_ANDROID_FORWARDER2_PIPE_NOTIFIER_H_

View File

@ -1,422 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "tools/android/forwarder2/socket.h"
#include <arpa/inet.h>
#include <fcntl.h>
#include <netdb.h>
#include <netinet/in.h>
#include <stdio.h>
#include <string.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <unistd.h>
#include "base/logging.h"
#include "base/posix/eintr_wrapper.h"
#include "base/safe_strerror_posix.h"
#include "tools/android/common/net.h"
#include "tools/android/forwarder2/common.h"
namespace {
const int kNoTimeout = -1;
const int kConnectTimeOut = 10; // Seconds.
bool FamilyIsTCP(int family) {
return family == AF_INET || family == AF_INET6;
}
} // namespace
namespace forwarder2 {
bool Socket::BindUnix(const std::string& path) {
errno = 0;
if (!InitUnixSocket(path) || !BindAndListen()) {
Close();
return false;
}
return true;
}
bool Socket::BindTcp(const std::string& host, int port) {
errno = 0;
if (!InitTcpSocket(host, port) || !BindAndListen()) {
Close();
return false;
}
return true;
}
bool Socket::ConnectUnix(const std::string& path) {
errno = 0;
if (!InitUnixSocket(path) || !Connect()) {
Close();
return false;
}
return true;
}
bool Socket::ConnectTcp(const std::string& host, int port) {
errno = 0;
if (!InitTcpSocket(host, port) || !Connect()) {
Close();
return false;
}
return true;
}
Socket::Socket()
: socket_(-1),
port_(0),
socket_error_(false),
family_(AF_INET),
addr_ptr_(reinterpret_cast<sockaddr*>(&addr_.addr4)),
addr_len_(sizeof(sockaddr)) {
memset(&addr_, 0, sizeof(addr_));
}
Socket::~Socket() {
Close();
}
void Socket::Shutdown() {
if (!IsClosed()) {
PRESERVE_ERRNO_HANDLE_EINTR(shutdown(socket_, SHUT_RDWR));
}
}
void Socket::Close() {
if (!IsClosed()) {
CloseFD(socket_);
socket_ = -1;
}
}
bool Socket::InitSocketInternal() {
socket_ = socket(family_, SOCK_STREAM, 0);
if (socket_ < 0)
return false;
tools::DisableNagle(socket_);
int reuse_addr = 1;
setsockopt(socket_, SOL_SOCKET, SO_REUSEADDR,
&reuse_addr, sizeof(reuse_addr));
return true;
}
bool Socket::InitUnixSocket(const std::string& path) {
static const size_t kPathMax = sizeof(addr_.addr_un.sun_path);
// For abstract sockets we need one extra byte for the leading zero.
if (path.size() + 2 /* '\0' */ > kPathMax) {
LOG(ERROR) << "The provided path is too big to create a unix "
<< "domain socket: " << path;
return false;
}
family_ = PF_UNIX;
addr_.addr_un.sun_family = family_;
// Copied from net/socket/unix_domain_socket_posix.cc
// Convert the path given into abstract socket name. It must start with
// the '\0' character, so we are adding it. |addr_len| must specify the
// length of the structure exactly, as potentially the socket name may
// have '\0' characters embedded (although we don't support this).
// Note that addr_.addr_un.sun_path is already zero initialized.
memcpy(addr_.addr_un.sun_path + 1, path.c_str(), path.size());
addr_len_ = path.size() + offsetof(struct sockaddr_un, sun_path) + 1;
addr_ptr_ = reinterpret_cast<sockaddr*>(&addr_.addr_un);
return InitSocketInternal();
}
bool Socket::InitTcpSocket(const std::string& host, int port) {
port_ = port;
if (host.empty()) {
// Use localhost: INADDR_LOOPBACK
family_ = AF_INET;
addr_.addr4.sin_family = family_;
addr_.addr4.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
} else if (!Resolve(host)) {
return false;
}
CHECK(FamilyIsTCP(family_)) << "Invalid socket family.";
if (family_ == AF_INET) {
addr_.addr4.sin_port = htons(port_);
addr_ptr_ = reinterpret_cast<sockaddr*>(&addr_.addr4);
addr_len_ = sizeof(addr_.addr4);
} else if (family_ == AF_INET6) {
addr_.addr6.sin6_port = htons(port_);
addr_ptr_ = reinterpret_cast<sockaddr*>(&addr_.addr6);
addr_len_ = sizeof(addr_.addr6);
}
return InitSocketInternal();
}
bool Socket::BindAndListen() {
errno = 0;
if (HANDLE_EINTR(bind(socket_, addr_ptr_, addr_len_)) < 0 ||
HANDLE_EINTR(listen(socket_, SOMAXCONN)) < 0) {
SetSocketError();
return false;
}
if (port_ == 0 && FamilyIsTCP(family_)) {
SockAddr addr;
memset(&addr, 0, sizeof(addr));
socklen_t addrlen = 0;
sockaddr* addr_ptr = NULL;
uint16* port_ptr = NULL;
if (family_ == AF_INET) {
addr_ptr = reinterpret_cast<sockaddr*>(&addr.addr4);
port_ptr = &addr.addr4.sin_port;
addrlen = sizeof(addr.addr4);
} else if (family_ == AF_INET6) {
addr_ptr = reinterpret_cast<sockaddr*>(&addr.addr6);
port_ptr = &addr.addr6.sin6_port;
addrlen = sizeof(addr.addr6);
}
errno = 0;
if (getsockname(socket_, addr_ptr, &addrlen) != 0) {
LOG(ERROR) << "getsockname error: " << safe_strerror(errno);;
SetSocketError();
return false;
}
port_ = ntohs(*port_ptr);
}
return true;
}
bool Socket::Accept(Socket* new_socket) {
DCHECK(new_socket != NULL);
if (!WaitForEvent(READ, kNoTimeout)) {
SetSocketError();
return false;
}
errno = 0;
int new_socket_fd = HANDLE_EINTR(accept(socket_, NULL, NULL));
if (new_socket_fd < 0) {
SetSocketError();
return false;
}
tools::DisableNagle(new_socket_fd);
new_socket->socket_ = new_socket_fd;
return true;
}
bool Socket::Connect() {
// Set non-block because we use select for connect.
const int kFlags = fcntl(socket_, F_GETFL);
DCHECK(!(kFlags & O_NONBLOCK));
fcntl(socket_, F_SETFL, kFlags | O_NONBLOCK);
errno = 0;
if (HANDLE_EINTR(connect(socket_, addr_ptr_, addr_len_)) < 0 &&
errno != EINPROGRESS) {
SetSocketError();
PRESERVE_ERRNO_HANDLE_EINTR(fcntl(socket_, F_SETFL, kFlags));
return false;
}
// Wait for connection to complete, or receive a notification.
if (!WaitForEvent(WRITE, kConnectTimeOut)) {
SetSocketError();
PRESERVE_ERRNO_HANDLE_EINTR(fcntl(socket_, F_SETFL, kFlags));
return false;
}
int socket_errno;
socklen_t opt_len = sizeof(socket_errno);
if (getsockopt(socket_, SOL_SOCKET, SO_ERROR, &socket_errno, &opt_len) < 0) {
LOG(ERROR) << "getsockopt(): " << safe_strerror(errno);
SetSocketError();
PRESERVE_ERRNO_HANDLE_EINTR(fcntl(socket_, F_SETFL, kFlags));
return false;
}
if (socket_errno != 0) {
LOG(ERROR) << "Could not connect to host: " << safe_strerror(socket_errno);
SetSocketError();
PRESERVE_ERRNO_HANDLE_EINTR(fcntl(socket_, F_SETFL, kFlags));
return false;
}
fcntl(socket_, F_SETFL, kFlags);
return true;
}
bool Socket::Resolve(const std::string& host) {
struct addrinfo hints;
struct addrinfo* res;
memset(&hints, 0, sizeof(hints));
hints.ai_family = AF_UNSPEC;
hints.ai_socktype = SOCK_STREAM;
hints.ai_flags |= AI_CANONNAME;
int errcode = getaddrinfo(host.c_str(), NULL, &hints, &res);
if (errcode != 0) {
SetSocketError();
freeaddrinfo(res);
return false;
}
family_ = res->ai_family;
switch (res->ai_family) {
case AF_INET:
memcpy(&addr_.addr4,
reinterpret_cast<sockaddr_in*>(res->ai_addr),
sizeof(sockaddr_in));
break;
case AF_INET6:
memcpy(&addr_.addr6,
reinterpret_cast<sockaddr_in6*>(res->ai_addr),
sizeof(sockaddr_in6));
break;
}
freeaddrinfo(res);
return true;
}
int Socket::GetPort() {
if (!FamilyIsTCP(family_)) {
LOG(ERROR) << "Can't call GetPort() on an unix domain socket.";
return 0;
}
return port_;
}
bool Socket::IsFdInSet(const fd_set& fds) const {
if (IsClosed())
return false;
return FD_ISSET(socket_, &fds);
}
bool Socket::AddFdToSet(fd_set* fds) const {
if (IsClosed())
return false;
FD_SET(socket_, fds);
return true;
}
int Socket::ReadNumBytes(void* buffer, size_t num_bytes) {
int bytes_read = 0;
int ret = 1;
while (bytes_read < num_bytes && ret > 0) {
ret = Read(static_cast<char*>(buffer) + bytes_read, num_bytes - bytes_read);
if (ret >= 0)
bytes_read += ret;
}
return bytes_read;
}
void Socket::SetSocketError() {
socket_error_ = true;
// We never use non-blocking socket.
DCHECK(errno != EAGAIN && errno != EWOULDBLOCK);
Close();
}
int Socket::Read(void* buffer, size_t buffer_size) {
if (!WaitForEvent(READ, kNoTimeout)) {
SetSocketError();
return 0;
}
int ret = HANDLE_EINTR(read(socket_, buffer, buffer_size));
if (ret < 0)
SetSocketError();
return ret;
}
int Socket::Write(const void* buffer, size_t count) {
int ret = HANDLE_EINTR(send(socket_, buffer, count, MSG_NOSIGNAL));
if (ret < 0)
SetSocketError();
return ret;
}
int Socket::WriteString(const std::string& buffer) {
return WriteNumBytes(buffer.c_str(), buffer.size());
}
void Socket::AddEventFd(int event_fd) {
Event event;
event.fd = event_fd;
event.was_fired = false;
events_.push_back(event);
}
bool Socket::DidReceiveEventOnFd(int fd) const {
for (size_t i = 0; i < events_.size(); ++i)
if (events_[i].fd == fd)
return events_[i].was_fired;
return false;
}
bool Socket::DidReceiveEvent() const {
for (size_t i = 0; i < events_.size(); ++i)
if (events_[i].was_fired)
return true;
return false;
}
int Socket::WriteNumBytes(const void* buffer, size_t num_bytes) {
int bytes_written = 0;
int ret = 1;
while (bytes_written < num_bytes && ret > 0) {
ret = Write(static_cast<const char*>(buffer) + bytes_written,
num_bytes - bytes_written);
if (ret >= 0)
bytes_written += ret;
}
return bytes_written;
}
bool Socket::WaitForEvent(EventType type, int timeout_secs) {
if (events_.empty() || socket_ == -1)
return true;
fd_set read_fds;
fd_set write_fds;
FD_ZERO(&read_fds);
FD_ZERO(&write_fds);
if (type == READ)
FD_SET(socket_, &read_fds);
else
FD_SET(socket_, &write_fds);
for (size_t i = 0; i < events_.size(); ++i)
FD_SET(events_[i].fd, &read_fds);
timeval tv = {};
timeval* tv_ptr = NULL;
if (timeout_secs > 0) {
tv.tv_sec = timeout_secs;
tv.tv_usec = 0;
tv_ptr = &tv;
}
int max_fd = socket_;
for (size_t i = 0; i < events_.size(); ++i)
if (events_[i].fd > max_fd)
max_fd = events_[i].fd;
if (HANDLE_EINTR(
select(max_fd + 1, &read_fds, &write_fds, NULL, tv_ptr)) <= 0) {
return false;
}
bool event_was_fired = false;
for (size_t i = 0; i < events_.size(); ++i) {
if (FD_ISSET(events_[i].fd, &read_fds)) {
events_[i].was_fired = true;
event_was_fired = true;
}
}
return !event_was_fired;
}
// static
int Socket::GetHighestFileDescriptor(const Socket& s1, const Socket& s2) {
return std::max(s1.socket_, s2.socket_);
}
// static
pid_t Socket::GetUnixDomainSocketProcessOwner(const std::string& path) {
Socket socket;
if (!socket.ConnectUnix(path))
return -1;
ucred ucred;
socklen_t len = sizeof(ucred);
if (getsockopt(socket.socket_, SOL_SOCKET, SO_PEERCRED, &ucred, &len) == -1) {
CHECK_NE(ENOPROTOOPT, errno);
return -1;
}
return ucred.pid;
}
} // namespace forwarder2

View File

@ -1,145 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef TOOLS_ANDROID_FORWARDER2_SOCKET_H_
#define TOOLS_ANDROID_FORWARDER2_SOCKET_H_
#include <fcntl.h>
#include <netinet/in.h>
#include <sys/socket.h>
#include <sys/un.h>
#include <string>
#include <vector>
#include "base/basictypes.h"
namespace forwarder2 {
// Wrapper class around unix socket api. Can be used to create, bind or
// connect to both Unix domain sockets and TCP sockets.
// TODO(pliard): Split this class into TCPSocket and UnixDomainSocket.
class Socket {
public:
Socket();
~Socket();
bool BindUnix(const std::string& path);
bool BindTcp(const std::string& host, int port);
bool ConnectUnix(const std::string& path);
bool ConnectTcp(const std::string& host, int port);
// Just a wrapper around unix socket shutdown(), see man 2 shutdown.
void Shutdown();
// Just a wrapper around unix socket close(), see man 2 close.
void Close();
bool IsClosed() const { return socket_ < 0; }
bool Accept(Socket* new_socket);
// Returns the port allocated to this socket or zero on error.
int GetPort();
bool IsFdInSet(const fd_set& fds) const;
bool AddFdToSet(fd_set* fds) const;
// Just a wrapper around unix read() function.
// Reads up to buffer_size, but may read less then buffer_size.
// Returns the number of bytes read.
int Read(void* buffer, size_t buffer_size);
// Same as Read(), just a wrapper around write().
int Write(const void* buffer, size_t count);
// Calls Read() multiple times until num_bytes is written to the provided
// buffer. No bounds checking is performed.
// Returns number of bytes read, which can be different from num_bytes in case
// of errror.
int ReadNumBytes(void* buffer, size_t num_bytes);
// Calls Write() multiple times until num_bytes is written. No bounds checking
// is performed. Returns number of bytes written, which can be different from
// num_bytes in case of errror.
int WriteNumBytes(const void* buffer, size_t num_bytes);
// Calls WriteNumBytes for the given std::string. Note that the null
// terminator is not written to the socket.
int WriteString(const std::string& buffer);
bool has_error() const { return socket_error_; }
// |event_fd| must be a valid pipe file descriptor created from the
// PipeNotifier and must live (not be closed) at least as long as this socket
// is alive.
void AddEventFd(int event_fd);
// Returns whether Accept() or Connect() was interrupted because the socket
// received an external event fired through the provided fd.
bool DidReceiveEventOnFd(int fd) const;
bool DidReceiveEvent() const;
static int GetHighestFileDescriptor(const Socket& s1, const Socket& s2);
static pid_t GetUnixDomainSocketProcessOwner(const std::string& path);
private:
enum EventType {
READ,
WRITE
};
union SockAddr {
// IPv4 sockaddr
sockaddr_in addr4;
// IPv6 sockaddr
sockaddr_in6 addr6;
// Unix Domain sockaddr
sockaddr_un addr_un;
};
struct Event {
int fd;
bool was_fired;
};
// If |host| is empty, use localhost.
bool InitTcpSocket(const std::string& host, int port);
bool InitUnixSocket(const std::string& path);
bool BindAndListen();
bool Connect();
bool Resolve(const std::string& host);
bool InitSocketInternal();
void SetSocketError();
// Waits until either the Socket or the |exit_notifier_fd_| has received an
// event.
bool WaitForEvent(EventType type, int timeout_secs);
int socket_;
int port_;
bool socket_error_;
// Family of the socket (PF_INET, PF_INET6 or PF_UNIX).
int family_;
SockAddr addr_;
// Points to one of the members of the above union depending on the family.
sockaddr* addr_ptr_;
// Length of one of the members of the above union depending on the family.
socklen_t addr_len_;
// Used to listen for external events (e.g. process received a SIGTERM) while
// blocking on I/O operations.
std::vector<Event> events_;
DISALLOW_COPY_AND_ASSIGN(Socket);
};
} // namespace forwarder
#endif // TOOLS_ANDROID_FORWARDER2_SOCKET_H_

View File

@ -1,93 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Md5sum implementation for Android. This version handles files as well as
// directories. Its output is sorted by file path.
#include <fstream>
#include <iostream>
#include <set>
#include <string>
#include "base/file_util.h"
#include "base/files/file_enumerator.h"
#include "base/files/file_path.h"
#include "base/logging.h"
#include "base/md5.h"
namespace {
const int kBufferSize = 1024;
// Returns whether |path|'s MD5 was successfully written to |digest_string|.
bool MD5Sum(const char* path, std::string* digest_string) {
std::ifstream stream(path);
if (!stream.good()) {
LOG(ERROR) << "Could not open file " << path;
return false;
}
base::MD5Context ctx;
base::MD5Init(&ctx);
char buf[kBufferSize];
while (stream.good()) {
std::streamsize bytes_read = stream.readsome(buf, sizeof(buf));
if (bytes_read == 0)
break;
base::MD5Update(&ctx, base::StringPiece(buf, bytes_read));
}
if (stream.fail()) {
LOG(ERROR) << "Error reading file " << path;
return false;
}
base::MD5Digest digest;
base::MD5Final(&digest, &ctx);
*digest_string = base::MD5DigestToBase16(digest);
return true;
}
// Returns the set of all files contained in |files|. This handles directories
// by walking them recursively. Excludes, .svn directories and file under them.
std::set<std::string> MakeFileSet(const char** files) {
const std::string svn_dir_component = FILE_PATH_LITERAL("/.svn/");
std::set<std::string> file_set;
for (const char** file = files; *file; ++file) {
base::FilePath file_path(*file);
if (base::DirectoryExists(file_path)) {
base::FileEnumerator file_enumerator(
file_path, true /* recurse */, base::FileEnumerator::FILES);
for (base::FilePath child, empty;
(child = file_enumerator.Next()) != empty; ) {
// If the path contains /.svn/, ignore it.
if (child.value().find(svn_dir_component) == std::string::npos) {
child = base::MakeAbsoluteFilePath(child);
file_set.insert(child.value());
}
}
} else {
file_set.insert(*file);
}
}
return file_set;
}
} // namespace
int main(int argc, const char* argv[]) {
if (argc < 2) {
LOG(ERROR) << "Usage: md5sum <path/to/file_or_dir>...";
return 1;
}
const std::set<std::string> files = MakeFileSet(argv + 1);
bool failed = false;
std::string digest;
for (std::set<std::string>::const_iterator it = files.begin();
it != files.end(); ++it) {
if (!MD5Sum(it->c_str(), &digest))
failed = true;
base::FilePath file_path(*it);
std::cout << digest << " "
<< base::MakeAbsoluteFilePath(file_path).value() << std::endl;
}
return failed;
}

View File

@ -1,77 +0,0 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'md5sum',
'type': 'none',
'dependencies': [
'md5sum_stripped_device_bin',
'md5sum_bin_host#host',
],
# For the component build, ensure dependent shared libraries are stripped
# and put alongside md5sum to simplify pushing to the device.
'variables': {
'output_dir': '<(PRODUCT_DIR)/md5sum_dist/',
'native_binary': '<(PRODUCT_DIR)/md5sum_bin',
},
'includes': ['../../../build/android/native_app_dependencies.gypi'],
},
{
'target_name': 'md5sum_device_bin',
'type': 'executable',
'dependencies': [
'../../../base/base.gyp:base',
],
'include_dirs': [
'../../..',
],
'sources': [
'md5sum.cc',
],
'conditions': [
[ 'order_profiling!=0 and OS=="android"', {
'dependencies': [ '../../../tools/cygprofile/cygprofile.gyp:cygprofile', ],
}],
],
},
{
'target_name': 'md5sum_stripped_device_bin',
'type': 'none',
'dependencies': [
'md5sum_device_bin',
],
'actions': [
{
'action_name': 'strip_md5sum_device_bin',
'inputs': ['<(PRODUCT_DIR)/md5sum_device_bin'],
'outputs': ['<(PRODUCT_DIR)/md5sum_bin'],
'action': [
'<(android_strip)',
'--strip-unneeded',
'<@(_inputs)',
'-o',
'<@(_outputs)',
],
},
],
},
# Same binary but for the host rather than the device.
{
'target_name': 'md5sum_bin_host',
'toolsets': ['host'],
'type': 'executable',
'dependencies': [
'../../../base/base.gyp:base',
],
'include_dirs': [
'../../..',
],
'sources': [
'md5sum.cc',
],
},
],
}

View File

@ -1,555 +0,0 @@
// Copyright (c) 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <fcntl.h>
#include <signal.h>
#include <sys/types.h>
#include <unistd.h>
#include <algorithm>
#include <cstring>
#include <fstream>
#include <iostream>
#include <limits>
#include <string>
#include <utility>
#include <vector>
#include "base/base64.h"
#include "base/basictypes.h"
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/containers/hash_tables.h"
#include "base/file_util.h"
#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_piece.h"
#include "base/strings/string_split.h"
#include "base/strings/stringprintf.h"
namespace {
class BitSet {
public:
void resize(size_t nbits) {
data_.resize((nbits + 7) / 8);
}
void set(uint32 bit) {
const uint32 byte_idx = bit / 8;
CHECK(byte_idx < data_.size());
data_[byte_idx] |= (1 << (bit & 7));
}
std::string AsB64String() const {
std::string bits(&data_[0], data_.size());
std::string b64_string;
base::Base64Encode(bits, &b64_string);
return b64_string;
}
private:
std::vector<char> data_;
};
// An entry in /proc/<pid>/pagemap.
struct PageMapEntry {
uint64 page_frame_number : 55;
uint unused : 8;
uint present : 1;
};
// Describes a memory page.
struct PageInfo {
int64 page_frame_number; // Physical page id, also known as PFN.
int64 flags;
int32 times_mapped;
};
struct MemoryMap {
std::string name;
std::string flags;
uint start_address;
uint end_address;
uint offset;
int private_count;
int unevictable_private_count;
int other_shared_count;
int unevictable_other_shared_count;
// app_shared_counts[i] contains the number of pages mapped in i+2 processes
// (only among the processes that are being analyzed).
std::vector<int> app_shared_counts;
std::vector<PageInfo> committed_pages;
// committed_pages_bits is a bitset reflecting the present bit for all the
// virtual pages of the mapping.
BitSet committed_pages_bits;
};
struct ProcessMemory {
pid_t pid;
std::vector<MemoryMap> memory_maps;
};
bool PageIsUnevictable(const PageInfo& page_info) {
// These constants are taken from kernel-page-flags.h.
const int KPF_DIRTY = 4; // Note that only file-mapped pages can be DIRTY.
const int KPF_ANON = 12; // Anonymous pages are dirty per definition.
const int KPF_UNEVICTABLE = 18;
const int KPF_MLOCKED = 33;
return (page_info.flags & ((1ll << KPF_DIRTY) |
(1ll << KPF_ANON) |
(1ll << KPF_UNEVICTABLE) |
(1ll << KPF_MLOCKED))) ?
true : false;
}
// Number of times a physical page is mapped in a process.
typedef base::hash_map<uint64, int> PFNMap;
// Parses lines from /proc/<PID>/maps, e.g.:
// 401e7000-401f5000 r-xp 00000000 103:02 158 /system/bin/linker
bool ParseMemoryMapLine(const std::string& line,
std::vector<std::string>* tokens,
MemoryMap* memory_map) {
tokens->clear();
base::SplitString(line, ' ', tokens);
if (tokens->size() < 2)
return false;
const int addr_len = 8;
const std::string& addr_range = tokens->at(0);
if (addr_range.length() != addr_len + 1 + addr_len)
return false;
uint64 tmp = 0;
if (!base::HexStringToUInt64(
base::StringPiece(
addr_range.begin(), addr_range.begin() + addr_len),
&tmp)) {
return false;
}
memory_map->start_address = static_cast<uint>(tmp);
const int end_addr_start_pos = addr_len + 1;
if (!base::HexStringToUInt64(
base::StringPiece(
addr_range.begin() + end_addr_start_pos,
addr_range.begin() + end_addr_start_pos + addr_len),
&tmp)) {
return false;
}
memory_map->end_address = static_cast<uint>(tmp);
if (tokens->at(1).size() != strlen("rwxp"))
return false;
memory_map->flags.swap(tokens->at(1));
if (!base::HexStringToUInt64(tokens->at(2), &tmp))
return false;
memory_map->offset = static_cast<uint>(tmp);
memory_map->committed_pages_bits.resize(
(memory_map->end_address - memory_map->start_address) / PAGE_SIZE);
const int map_name_index = 5;
if (tokens->size() >= map_name_index + 1) {
for (std::vector<std::string>::const_iterator it =
tokens->begin() + map_name_index; it != tokens->end(); ++it) {
if (!it->empty()) {
if (!memory_map->name.empty())
memory_map->name.append(" ");
memory_map->name.append(*it);
}
}
}
return true;
}
// Reads sizeof(T) bytes from file |fd| at |offset|.
template <typename T>
bool ReadFromFileAtOffset(int fd, off_t offset, T* value) {
if (lseek64(fd, offset * sizeof(*value), SEEK_SET) < 0) {
PLOG(ERROR) << "lseek";
return false;
}
ssize_t bytes = read(fd, value, sizeof(*value));
if (bytes != sizeof(*value) && bytes != 0) {
PLOG(ERROR) << "read";
return false;
}
return true;
}
// Fills |process_maps| in with the process memory maps identified by |pid|.
bool GetProcessMaps(pid_t pid, std::vector<MemoryMap>* process_maps) {
std::ifstream maps_file(base::StringPrintf("/proc/%d/maps", pid).c_str());
if (!maps_file.good()) {
PLOG(ERROR) << "open";
return false;
}
std::string line;
std::vector<std::string> tokens;
while (std::getline(maps_file, line) && !line.empty()) {
MemoryMap memory_map = {};
if (!ParseMemoryMapLine(line, &tokens, &memory_map)) {
LOG(ERROR) << "Could not parse line: " << line;
return false;
}
process_maps->push_back(memory_map);
}
return true;
}
// Fills |committed_pages| in with the set of committed pages contained in the
// provided memory map.
bool GetPagesForMemoryMap(int pagemap_fd,
const MemoryMap& memory_map,
std::vector<PageInfo>* committed_pages,
BitSet* committed_pages_bits) {
for (uint addr = memory_map.start_address, page_index = 0;
addr < memory_map.end_address;
addr += PAGE_SIZE, ++page_index) {
DCHECK_EQ(0, addr % PAGE_SIZE);
PageMapEntry page_map_entry = {};
COMPILE_ASSERT(sizeof(PageMapEntry) == sizeof(uint64), unexpected_size);
const off64_t offset = addr / PAGE_SIZE;
if (!ReadFromFileAtOffset(pagemap_fd, offset, &page_map_entry))
return false;
if (page_map_entry.present) { // Ignore non-committed pages.
if (page_map_entry.page_frame_number == 0)
continue;
PageInfo page_info = {};
page_info.page_frame_number = page_map_entry.page_frame_number;
committed_pages->push_back(page_info);
committed_pages_bits->set(page_index);
}
}
return true;
}
// Fills |committed_pages| with mapping count and flags information gathered
// looking-up /proc/kpagecount and /proc/kpageflags.
bool SetPagesInfo(int pagecount_fd,
int pageflags_fd,
std::vector<PageInfo>* pages) {
for (std::vector<PageInfo>::iterator it = pages->begin();
it != pages->end(); ++it) {
PageInfo* const page_info = &*it;
int64 times_mapped;
if (!ReadFromFileAtOffset(
pagecount_fd, page_info->page_frame_number, &times_mapped)) {
return false;
}
DCHECK(times_mapped <= std::numeric_limits<int32_t>::max());
page_info->times_mapped = static_cast<int32>(times_mapped);
int64 page_flags;
if (!ReadFromFileAtOffset(
pageflags_fd, page_info->page_frame_number, &page_flags)) {
return false;
}
page_info->flags = page_flags;
}
return true;
}
// Fills in the provided vector of Page Frame Number maps. This lets
// ClassifyPages() know how many times each page is mapped in the processes.
void FillPFNMaps(const std::vector<ProcessMemory>& processes_memory,
std::vector<PFNMap>* pfn_maps) {
int current_process_index = 0;
for (std::vector<ProcessMemory>::const_iterator it = processes_memory.begin();
it != processes_memory.end(); ++it, ++current_process_index) {
const std::vector<MemoryMap>& memory_maps = it->memory_maps;
for (std::vector<MemoryMap>::const_iterator it = memory_maps.begin();
it != memory_maps.end(); ++it) {
const std::vector<PageInfo>& pages = it->committed_pages;
for (std::vector<PageInfo>::const_iterator it = pages.begin();
it != pages.end(); ++it) {
const PageInfo& page_info = *it;
PFNMap* const pfn_map = &(*pfn_maps)[current_process_index];
const std::pair<PFNMap::iterator, bool> result = pfn_map->insert(
std::make_pair(page_info.page_frame_number, 0));
++result.first->second;
}
}
}
}
// Sets the private_count/app_shared_counts/other_shared_count fields of the
// provided memory maps for each process.
void ClassifyPages(std::vector<ProcessMemory>* processes_memory) {
std::vector<PFNMap> pfn_maps(processes_memory->size());
FillPFNMaps(*processes_memory, &pfn_maps);
// Hash set keeping track of the physical pages mapped in a single process so
// that they can be counted only once.
std::hash_set<uint64> physical_pages_mapped_in_process;
for (std::vector<ProcessMemory>::iterator it = processes_memory->begin();
it != processes_memory->end(); ++it) {
std::vector<MemoryMap>* const memory_maps = &it->memory_maps;
physical_pages_mapped_in_process.clear();
for (std::vector<MemoryMap>::iterator it = memory_maps->begin();
it != memory_maps->end(); ++it) {
MemoryMap* const memory_map = &*it;
const size_t processes_count = processes_memory->size();
memory_map->app_shared_counts.resize(processes_count - 1, 0);
const std::vector<PageInfo>& pages = memory_map->committed_pages;
for (std::vector<PageInfo>::const_iterator it = pages.begin();
it != pages.end(); ++it) {
const PageInfo& page_info = *it;
if (page_info.times_mapped == 1) {
++memory_map->private_count;
if (PageIsUnevictable(page_info))
++memory_map->unevictable_private_count;
continue;
}
const uint64 page_frame_number = page_info.page_frame_number;
const std::pair<std::hash_set<uint64>::iterator, bool> result =
physical_pages_mapped_in_process.insert(page_frame_number);
const bool did_insert = result.second;
if (!did_insert) {
// This physical page (mapped multiple times in the same process) was
// already counted.
continue;
}
// See if the current physical page is also mapped in the other
// processes that are being analyzed.
int times_mapped = 0;
int mapped_in_processes_count = 0;
for (std::vector<PFNMap>::const_iterator it = pfn_maps.begin();
it != pfn_maps.end(); ++it) {
const PFNMap& pfn_map = *it;
const PFNMap::const_iterator found_it = pfn_map.find(
page_frame_number);
if (found_it == pfn_map.end())
continue;
++mapped_in_processes_count;
times_mapped += found_it->second;
}
if (times_mapped == page_info.times_mapped) {
// The physical page is only mapped in the processes that are being
// analyzed.
if (mapped_in_processes_count > 1) {
// The physical page is mapped in multiple processes.
++memory_map->app_shared_counts[mapped_in_processes_count - 2];
} else {
// The physical page is mapped multiple times in the same process.
++memory_map->private_count;
if (PageIsUnevictable(page_info))
++memory_map->unevictable_private_count;
}
} else {
++memory_map->other_shared_count;
if (PageIsUnevictable(page_info))
++memory_map->unevictable_other_shared_count;
}
}
}
}
}
void AppendAppSharedField(const std::vector<int>& app_shared_counts,
std::string* out) {
out->append("[");
for (std::vector<int>::const_iterator it = app_shared_counts.begin();
it != app_shared_counts.end(); ++it) {
out->append(base::IntToString(*it * PAGE_SIZE));
if (it + 1 != app_shared_counts.end())
out->append(",");
}
out->append("]");
}
void DumpProcessesMemoryMaps(
const std::vector<ProcessMemory>& processes_memory) {
std::string buf;
std::string app_shared_buf;
for (std::vector<ProcessMemory>::const_iterator it = processes_memory.begin();
it != processes_memory.end(); ++it) {
const ProcessMemory& process_memory = *it;
std::cout << "[ PID=" << process_memory.pid << "]" << '\n';
const std::vector<MemoryMap>& memory_maps = process_memory.memory_maps;
for (std::vector<MemoryMap>::const_iterator it = memory_maps.begin();
it != memory_maps.end(); ++it) {
const MemoryMap& memory_map = *it;
app_shared_buf.clear();
AppendAppSharedField(memory_map.app_shared_counts, &app_shared_buf);
base::SStringPrintf(
&buf,
"%x-%x %s private_unevictable=%d private=%d shared_app=%s "
"shared_other_unevictable=%d shared_other=%d %s\n",
memory_map.start_address,
memory_map.end_address, memory_map.flags.c_str(),
memory_map.unevictable_private_count * PAGE_SIZE,
memory_map.private_count * PAGE_SIZE,
app_shared_buf.c_str(),
memory_map.unevictable_other_shared_count * PAGE_SIZE,
memory_map.other_shared_count * PAGE_SIZE,
memory_map.name.c_str());
std::cout << buf;
}
}
}
void DumpProcessesMemoryMapsInShortFormat(
const std::vector<ProcessMemory>& processes_memory) {
const int KB_PER_PAGE = PAGE_SIZE >> 10;
std::vector<int> totals_app_shared(processes_memory.size());
std::string buf;
std::cout << "pid\tprivate\t\tshared_app\tshared_other (KB)\n";
for (std::vector<ProcessMemory>::const_iterator it = processes_memory.begin();
it != processes_memory.end(); ++it) {
const ProcessMemory& process_memory = *it;
std::fill(totals_app_shared.begin(), totals_app_shared.end(), 0);
int total_private = 0, total_other_shared = 0;
const std::vector<MemoryMap>& memory_maps = process_memory.memory_maps;
for (std::vector<MemoryMap>::const_iterator it = memory_maps.begin();
it != memory_maps.end(); ++it) {
const MemoryMap& memory_map = *it;
total_private += memory_map.private_count;
for (size_t i = 0; i < memory_map.app_shared_counts.size(); ++i)
totals_app_shared[i] += memory_map.app_shared_counts[i];
total_other_shared += memory_map.other_shared_count;
}
double total_app_shared = 0;
for (size_t i = 0; i < totals_app_shared.size(); ++i)
total_app_shared += static_cast<double>(totals_app_shared[i]) / (i + 2);
base::SStringPrintf(
&buf, "%d\t%d\t\t%d\t\t%d\n",
process_memory.pid,
total_private * KB_PER_PAGE,
static_cast<int>(total_app_shared) * KB_PER_PAGE,
total_other_shared * KB_PER_PAGE);
std::cout << buf;
}
}
void DumpProcessesMemoryMapsInExtendedFormat(
const std::vector<ProcessMemory>& processes_memory) {
std::string buf;
std::string app_shared_buf;
for (std::vector<ProcessMemory>::const_iterator it = processes_memory.begin();
it != processes_memory.end(); ++it) {
const ProcessMemory& process_memory = *it;
std::cout << "[ PID=" << process_memory.pid << "]" << '\n';
const std::vector<MemoryMap>& memory_maps = process_memory.memory_maps;
for (std::vector<MemoryMap>::const_iterator it = memory_maps.begin();
it != memory_maps.end(); ++it) {
const MemoryMap& memory_map = *it;
app_shared_buf.clear();
AppendAppSharedField(memory_map.app_shared_counts, &app_shared_buf);
base::SStringPrintf(
&buf,
"%x-%x %s %x private_unevictable=%d private=%d shared_app=%s "
"shared_other_unevictable=%d shared_other=%d \"%s\" [%s]\n",
memory_map.start_address,
memory_map.end_address,
memory_map.flags.c_str(),
memory_map.offset,
memory_map.unevictable_private_count * PAGE_SIZE,
memory_map.private_count * PAGE_SIZE,
app_shared_buf.c_str(),
memory_map.unevictable_other_shared_count * PAGE_SIZE,
memory_map.other_shared_count * PAGE_SIZE,
memory_map.name.c_str(),
memory_map.committed_pages_bits.AsB64String().c_str());
std::cout << buf;
}
}
}
bool CollectProcessMemoryInformation(int page_count_fd,
int page_flags_fd,
ProcessMemory* process_memory) {
const pid_t pid = process_memory->pid;
int pagemap_fd = open(
base::StringPrintf("/proc/%d/pagemap", pid).c_str(), O_RDONLY);
if (pagemap_fd < 0) {
PLOG(ERROR) << "open";
return false;
}
file_util::ScopedFD auto_closer(&pagemap_fd);
std::vector<MemoryMap>* const process_maps = &process_memory->memory_maps;
if (!GetProcessMaps(pid, process_maps))
return false;
for (std::vector<MemoryMap>::iterator it = process_maps->begin();
it != process_maps->end(); ++it) {
std::vector<PageInfo>* const committed_pages = &it->committed_pages;
BitSet* const pages_bits = &it->committed_pages_bits;
GetPagesForMemoryMap(pagemap_fd, *it, committed_pages, pages_bits);
SetPagesInfo(page_count_fd, page_flags_fd, committed_pages);
}
return true;
}
void KillAll(const std::vector<pid_t>& pids, int signal_number) {
for (std::vector<pid_t>::const_iterator it = pids.begin(); it != pids.end();
++it) {
kill(*it, signal_number);
}
}
} // namespace
int main(int argc, char** argv) {
if (argc == 1) {
LOG(ERROR) << "Usage: " << argv[0] << " [-a|-x] <PID1>... <PIDN>";
return EXIT_FAILURE;
}
const bool short_output = !strncmp(argv[1], "-a", 2);
const bool extended_output = !strncmp(argv[1], "-x", 2);
if (short_output || extended_output) {
if (argc == 2) {
LOG(ERROR) << "Usage: " << argv[0] << " [-a|-x] <PID1>... <PIDN>";
return EXIT_FAILURE;
}
++argv;
}
std::vector<pid_t> pids;
for (const char* const* ptr = argv + 1; *ptr; ++ptr) {
pid_t pid;
if (!base::StringToInt(*ptr, &pid))
return EXIT_FAILURE;
pids.push_back(pid);
}
std::vector<ProcessMemory> processes_memory(pids.size());
{
int page_count_fd = open("/proc/kpagecount", O_RDONLY);
if (page_count_fd < 0) {
PLOG(ERROR) << "open /proc/kpagecount";
return EXIT_FAILURE;
}
int page_flags_fd = open("/proc/kpageflags", O_RDONLY);
if (page_flags_fd < 0) {
PLOG(ERROR) << "open /proc/kpageflags";
return EXIT_FAILURE;
}
file_util::ScopedFD page_count_fd_closer(&page_count_fd);
file_util::ScopedFD page_flags_fd_closer(&page_flags_fd);
base::ScopedClosureRunner auto_resume_processes(
base::Bind(&KillAll, pids, SIGCONT));
KillAll(pids, SIGSTOP);
for (std::vector<pid_t>::const_iterator it = pids.begin(); it != pids.end();
++it) {
ProcessMemory* const process_memory =
&processes_memory[it - pids.begin()];
process_memory->pid = *it;
if (!CollectProcessMemoryInformation(page_count_fd,
page_flags_fd,
process_memory))
return EXIT_FAILURE;
}
}
ClassifyPages(&processes_memory);
if (short_output)
DumpProcessesMemoryMapsInShortFormat(processes_memory);
else if (extended_output)
DumpProcessesMemoryMapsInExtendedFormat(processes_memory);
else
DumpProcessesMemoryMaps(processes_memory);
return EXIT_SUCCESS;
}

View File

@ -1,31 +0,0 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'memdump',
'type': 'executable',
'dependencies': [
'../../../base/base.gyp:base',
],
'conditions': [
# Warning: A PIE tool cannot run on ICS 4.0.4, so only
# build it as position-independent when ASAN
# is activated. See b/6587214 for details.
[ 'asan==1', {
'cflags': [
'-fPIE',
],
'ldflags': [
'-pie',
],
}],
],
'sources': [
'memdump.cc',
],
},
],
}

View File

@ -1,122 +0,0 @@
#!/usr/bin/env python
#
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import sys
from sets import Set
_ENTRIES = [
('Total', '.* r... .*'),
('Read-only', '.* r--. .*'),
('Read-write', '.* rw.. .*'),
('Executable', '.* ..x. .*'),
('Anonymous total', '.* .... .* .*other=[0-9]+ ($|.*chromium:.*)'),
('Anonymous read-write', '.* rw.. .* .*other=[0-9]+ ($|.*chromium:.*)'),
('Anonymous executable (JIT\'ed code)', '.* ..x. .* shared_other=[0-9]+ $'),
('File total', '.* .... .* /.*'),
('File read-write', '.* rw.. .* /.*'),
('File executable', '.* ..x. .* /.*'),
('chromium mmap', '.* r... .*chromium:.*'),
('chromium TransferBuffer', '.* r... .*chromium:.*CreateTransferBuffer.*'),
('Galaxy Nexus GL driver', '.* r... .*pvrsrvkm.*'),
('Dalvik', '.* rw.. .* /.*dalvik.*'),
('Dalvik heap', '.* rw.. .* /.*dalvik-heap.*'),
('Native heap (jemalloc)', '.* r... .* /.*jemalloc.*'),
('System heap', '.* r... .* \\[heap\\]'),
('Ashmem', '.* rw.. .* /dev/ashmem .*'),
('libchromeview.so total', '.* r... .* /.*libchromeview.so'),
('libchromeview.so read-only', '.* r--. .* /.*libchromeview.so'),
('libchromeview.so read-write', '.* rw-. .* /.*libchromeview.so'),
('libchromeview.so executable', '.* r.x. .* /.*libchromeview.so'),
]
def _CollectMemoryStats(memdump, region_filters):
processes = []
mem_usage_for_regions = None
regexps = {}
for region_filter in region_filters:
regexps[region_filter] = re.compile(region_filter)
for line in memdump:
if 'PID=' in line:
mem_usage_for_regions = {}
processes.append(mem_usage_for_regions)
continue
matched_regions = Set([])
for region_filter in region_filters:
if regexps[region_filter].match(line.rstrip('\r\n')):
matched_regions.add(region_filter)
if not region_filter in mem_usage_for_regions:
mem_usage_for_regions[region_filter] = {
'private_unevictable': 0,
'private': 0,
'shared_app': 0.0,
'shared_other_unevictable': 0,
'shared_other': 0,
}
for matched_region in matched_regions:
mem_usage = mem_usage_for_regions[matched_region]
for key in mem_usage:
for token in line.split(' '):
if (key + '=') in token:
field = token.split('=')[1]
if key != 'shared_app':
mem_usage[key] += int(field)
else: # shared_app=[\d,\d...]
array = eval(field)
for i in xrange(len(array)):
mem_usage[key] += float(array[i]) / (i + 2)
break
return processes
def _ConvertMemoryField(field):
return str(field / (1024.0 * 1024))
def _DumpCSV(processes_stats):
total_map = {}
i = 0
for process in processes_stats:
i += 1
print (',Process ' + str(i) + ',private,private_unevictable,shared_app,' +
'shared_other,shared_other_unevictable,')
for (k, v) in _ENTRIES:
if not v in process:
print ',' + k + ',0,0,0,0,'
continue
if not v in total_map:
total_map[v] = {'resident':0, 'unevictable':0}
total_map[v]['resident'] += (process[v]['private'] +
process[v]['shared_app'])
total_map[v]['unevictable'] += process[v]['private_unevictable']
print (
',' + k + ',' +
_ConvertMemoryField(process[v]['private']) + ',' +
_ConvertMemoryField(process[v]['private_unevictable']) + ',' +
_ConvertMemoryField(process[v]['shared_app']) + ',' +
_ConvertMemoryField(process[v]['shared_other']) + ',' +
_ConvertMemoryField(process[v]['shared_other_unevictable']) + ','
)
print ''
for (k, v) in _ENTRIES:
if not v in total_map:
print ',' + k + ',0,0,'
continue
print (',' + k + ',' + _ConvertMemoryField(total_map[v]['resident']) + ',' +
_ConvertMemoryField(total_map[v]['unevictable']) + ',')
print ''
def main(argv):
_DumpCSV(_CollectMemoryStats(sys.stdin, [value for (key, value) in _ENTRIES]))
if __name__ == '__main__':
main(sys.argv)

View File

@ -1,152 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import base64
import os
import sys
import re
from optparse import OptionParser
"""Extracts the list of resident symbols of a library loaded in a process.
This scripts combines the extended output of memdump for a given process
(obtained through memdump -x PID) and the symbol table of a .so loaded in that
process (obtained through nm -C lib-with-symbols.so), filtering out only those
symbols that, at the time of the snapshot, were resident in memory (that are,
the symbols which start address belongs to a mapped page of the .so which was
resident at the time of the snapshot).
The aim is to perform a "code coverage"-like profiling of a binary, intersecting
run-time information (list of resident pages) and debug symbols.
"""
_PAGE_SIZE = 4096
def _TestBit(word, bit):
assert(bit >= 0 and bit < 8)
return not not ((word >> bit) & 1)
def _HexAddr(addr):
return hex(addr)[2:].zfill(8)
def _GetResidentPagesSet(memdump_contents, lib_name, verbose):
"""Parses the memdump output and extracts the resident page set for lib_name.
Args:
memdump_contents: Array of strings (lines) of a memdump output.
lib_name: A string containing the name of the library.so to be matched.
verbose: Print a verbose header for each mapping matched.
Returns:
A set of resident pages (the key is the page index) for all the
mappings matching .*lib_name.
"""
resident_pages = set()
MAP_RX = re.compile(
r'^([0-9a-f]+)-([0-9a-f]+) ([\w-]+) ([0-9a-f]+) .* "(.*)" \[(.*)\]$')
for line in memdump_contents:
line = line.rstrip('\r\n')
if line.startswith('[ PID'):
continue
r = MAP_RX.match(line)
if not r:
sys.stderr.write('Skipping %s from %s\n' % (line, memdump_file))
continue
map_start = int(r.group(1), 16)
map_end = int(r.group(2), 16)
prot = r.group(3)
offset = int(r.group(4), 16)
assert(offset % _PAGE_SIZE == 0)
lib = r.group(5)
enc_bitmap = r.group(6)
if not lib.endswith(lib_name):
continue
bitmap = base64.b64decode(enc_bitmap)
map_pages_count = (map_end - map_start + 1) / _PAGE_SIZE
bitmap_pages_count = len(bitmap) * 8
if verbose:
print 'Found %s: mapped %d pages in mode %s @ offset %s.' % (
lib, map_pages_count, prot, _HexAddr(offset))
print ' Map range in the process VA: [%s - %s]. Len: %s' % (
_HexAddr(map_start),
_HexAddr(map_end),
_HexAddr(map_pages_count * _PAGE_SIZE))
print ' Corresponding addresses in the binary: [%s - %s]. Len: %s' % (
_HexAddr(offset),
_HexAddr(offset + map_end - map_start),
_HexAddr(map_pages_count * _PAGE_SIZE))
print ' Bitmap: %d pages' % bitmap_pages_count
print ''
assert(bitmap_pages_count >= map_pages_count)
for i in xrange(map_pages_count):
bitmap_idx = i / 8
bitmap_off = i % 8
if (bitmap_idx < len(bitmap) and
_TestBit(ord(bitmap[bitmap_idx]), bitmap_off)):
resident_pages.add(offset / _PAGE_SIZE + i)
return resident_pages
def main(argv):
NM_RX = re.compile(r'^([0-9a-f]+)\s+.*$')
parser = OptionParser()
parser.add_option("-r", "--reverse",
action="store_true", dest="reverse", default=False,
help="Print out non present symbols")
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose", default=False,
help="Print out verbose debug information.")
(options, args) = parser.parse_args()
if len(args) != 3:
print 'Usage: %s [-v] memdump.file nm.file library.so' % (
os.path.basename(argv[0]))
return 1
memdump_file = args[0]
nm_file = args[1]
lib_name = args[2]
if memdump_file == '-':
memdump_contents = sys.stdin.readlines()
else:
memdump_contents = open(memdump_file, 'r').readlines()
resident_pages = _GetResidentPagesSet(memdump_contents,
lib_name,
options.verbose)
# Process the nm symbol table, filtering out the resident symbols.
nm_fh = open(nm_file, 'r')
for line in nm_fh:
line = line.rstrip('\r\n')
# Skip undefined symbols (lines with no address).
if line.startswith(' '):
continue
r = NM_RX.match(line)
if not r:
sys.stderr.write('Skipping %s from %s\n' % (line, nm_file))
continue
sym_addr = int(r.group(1), 16)
sym_page = sym_addr / _PAGE_SIZE
last_sym_matched = (sym_page in resident_pages)
if (sym_page in resident_pages) != options.reverse:
print line
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))

View File

@ -1,49 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Remove strings by name from a GRD file."""
import optparse
import re
import sys
def RemoveStrings(grd_path, string_names):
"""Removes strings with the given names from a GRD file. Overwrites the file.
Args:
grd_path: path to the GRD file.
string_names: a list of string names to be removed.
"""
with open(grd_path, 'r') as f:
grd = f.read()
names_pattern = '|'.join(map(re.escape, string_names))
pattern = r'<message [^>]*name="(%s)".*?</message>\s*' % names_pattern
grd = re.sub(pattern, '', grd, flags=re.DOTALL)
with open(grd_path, 'w') as f:
f.write(grd)
def ParseArgs(args):
usage = 'usage: %prog GRD_PATH...'
parser = optparse.OptionParser(
usage=usage, description='Remove strings from GRD files. Reads string '
'names from stdin, and removes strings with those names from the listed '
'GRD files.')
options, args = parser.parse_args(args=args)
if not args:
parser.error('must provide GRD_PATH argument(s)')
return args
def main(args=None):
grd_paths = ParseArgs(args)
strings_to_remove = filter(None, map(str.strip, sys.stdin.readlines()))
for grd_path in grd_paths:
RemoveStrings(grd_path, strings_to_remove)
if __name__ == '__main__':
main()

View File

@ -1,25 +0,0 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Flag completion rule for bash.
# To load in your shell, "source path/to/this/file".
chrome_source=$(cd $(dirname $BASH_SOURCE)/.. && pwd)
_chrome_flag() {
local cur targets
cur="${COMP_WORDS[COMP_CWORD]}"
targets=$(cd $chrome_source; \
git ls-files '*switches*' | \
xargs sed -ne 's/^[^/]*"\([^" /]\{1,\}\)".*/--\1/p')
COMPREPLY=($(compgen -W "$targets" -- "$cur"))
return 0
}
complete -F _chrome_flag google-chrome
complete -F _chrome_flag chrome
if [ $(uname) = "Darwin" ]
then
complete -F _chrome_flag Chromium
fi

View File

@ -1,718 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Snapshot Build Bisect Tool
This script bisects a snapshot archive using binary search. It starts at
a bad revision (it will try to guess HEAD) and asks for a last known-good
revision. It will then binary search across this revision range by downloading,
unzipping, and opening Chromium for you. After testing the specific revision,
it will ask you whether it is good or bad before continuing the search.
"""
# The root URL for storage.
BASE_URL = 'http://commondatastorage.googleapis.com/chromium-browser-snapshots'
# The root URL for official builds.
OFFICIAL_BASE_URL = 'http://master.chrome.corp.google.com/official_builds'
# Changelogs URL.
CHANGELOG_URL = 'http://build.chromium.org/f/chromium/' \
'perf/dashboard/ui/changelog.html?' \
'url=/trunk/src&range=%d%%3A%d'
# Official Changelogs URL.
OFFICIAL_CHANGELOG_URL = 'http://omahaproxy.appspot.com/'\
'changelog?old_version=%s&new_version=%s'
# DEPS file URL.
DEPS_FILE= 'http://src.chromium.org/viewvc/chrome/trunk/src/DEPS?revision=%d'
# Blink Changelogs URL.
BLINK_CHANGELOG_URL = 'http://build.chromium.org/f/chromium/' \
'perf/dashboard/ui/changelog_blink.html?' \
'url=/trunk&range=%d%%3A%d'
DONE_MESSAGE_GOOD_MIN = 'You are probably looking for a change made after %s ' \
'(known good), but no later than %s (first known bad).'
DONE_MESSAGE_GOOD_MAX = 'You are probably looking for a change made after %s ' \
'(known bad), but no later than %s (first known good).'
###############################################################################
import math
import optparse
import os
import pipes
import re
import shutil
import subprocess
import sys
import tempfile
import threading
import urllib
from distutils.version import LooseVersion
from xml.etree import ElementTree
import zipfile
class PathContext(object):
"""A PathContext is used to carry the information used to construct URLs and
paths when dealing with the storage server and archives."""
def __init__(self, platform, good_revision, bad_revision, is_official):
super(PathContext, self).__init__()
# Store off the input parameters.
self.platform = platform # What's passed in to the '-a/--archive' option.
self.good_revision = good_revision
self.bad_revision = bad_revision
self.is_official = is_official
# The name of the ZIP file in a revision directory on the server.
self.archive_name = None
# Set some internal members:
# _listing_platform_dir = Directory that holds revisions. Ends with a '/'.
# _archive_extract_dir = Uncompressed directory in the archive_name file.
# _binary_name = The name of the executable to run.
if self.platform in ('linux', 'linux64', 'linux-arm'):
self._binary_name = 'chrome'
elif self.platform == 'mac':
self.archive_name = 'chrome-mac.zip'
self._archive_extract_dir = 'chrome-mac'
elif self.platform == 'win':
self.archive_name = 'chrome-win32.zip'
self._archive_extract_dir = 'chrome-win32'
self._binary_name = 'chrome.exe'
else:
raise Exception('Invalid platform: %s' % self.platform)
if is_official:
if self.platform == 'linux':
self._listing_platform_dir = 'lucid32bit/'
self.archive_name = 'chrome-lucid32bit.zip'
self._archive_extract_dir = 'chrome-lucid32bit'
elif self.platform == 'linux64':
self._listing_platform_dir = 'lucid64bit/'
self.archive_name = 'chrome-lucid64bit.zip'
self._archive_extract_dir = 'chrome-lucid64bit'
elif self.platform == 'mac':
self._listing_platform_dir = 'mac/'
self._binary_name = 'Google Chrome.app/Contents/MacOS/Google Chrome'
elif self.platform == 'win':
self._listing_platform_dir = 'win/'
else:
if self.platform in ('linux', 'linux64', 'linux-arm'):
self.archive_name = 'chrome-linux.zip'
self._archive_extract_dir = 'chrome-linux'
if self.platform == 'linux':
self._listing_platform_dir = 'Linux/'
elif self.platform == 'linux64':
self._listing_platform_dir = 'Linux_x64/'
elif self.platform == 'linux-arm':
self._listing_platform_dir = 'Linux_ARM_Cross-Compile/'
elif self.platform == 'mac':
self._listing_platform_dir = 'Mac/'
self._binary_name = 'Chromium.app/Contents/MacOS/Chromium'
elif self.platform == 'win':
self._listing_platform_dir = 'Win/'
def GetListingURL(self, marker=None):
"""Returns the URL for a directory listing, with an optional marker."""
marker_param = ''
if marker:
marker_param = '&marker=' + str(marker)
return BASE_URL + '/?delimiter=/&prefix=' + self._listing_platform_dir + \
marker_param
def GetDownloadURL(self, revision):
"""Gets the download URL for a build archive of a specific revision."""
if self.is_official:
return "%s/%s/%s%s" % (
OFFICIAL_BASE_URL, revision, self._listing_platform_dir,
self.archive_name)
else:
return "%s/%s%s/%s" % (
BASE_URL, self._listing_platform_dir, revision, self.archive_name)
def GetLastChangeURL(self):
"""Returns a URL to the LAST_CHANGE file."""
return BASE_URL + '/' + self._listing_platform_dir + 'LAST_CHANGE'
def GetLaunchPath(self):
"""Returns a relative path (presumably from the archive extraction location)
that is used to run the executable."""
return os.path.join(self._archive_extract_dir, self._binary_name)
def ParseDirectoryIndex(self):
"""Parses the Google Storage directory listing into a list of revision
numbers."""
def _FetchAndParse(url):
"""Fetches a URL and returns a 2-Tuple of ([revisions], next-marker). If
next-marker is not None, then the listing is a partial listing and another
fetch should be performed with next-marker being the marker= GET
parameter."""
handle = urllib.urlopen(url)
document = ElementTree.parse(handle)
# All nodes in the tree are namespaced. Get the root's tag name to extract
# the namespace. Etree does namespaces as |{namespace}tag|.
root_tag = document.getroot().tag
end_ns_pos = root_tag.find('}')
if end_ns_pos == -1:
raise Exception("Could not locate end namespace for directory index")
namespace = root_tag[:end_ns_pos + 1]
# Find the prefix (_listing_platform_dir) and whether or not the list is
# truncated.
prefix_len = len(document.find(namespace + 'Prefix').text)
next_marker = None
is_truncated = document.find(namespace + 'IsTruncated')
if is_truncated is not None and is_truncated.text.lower() == 'true':
next_marker = document.find(namespace + 'NextMarker').text
# Get a list of all the revisions.
all_prefixes = document.findall(namespace + 'CommonPrefixes/' +
namespace + 'Prefix')
# The <Prefix> nodes have content of the form of
# |_listing_platform_dir/revision/|. Strip off the platform dir and the
# trailing slash to just have a number.
revisions = []
for prefix in all_prefixes:
revnum = prefix.text[prefix_len:-1]
try:
revnum = int(revnum)
revisions.append(revnum)
except ValueError:
pass
return (revisions, next_marker)
# Fetch the first list of revisions.
(revisions, next_marker) = _FetchAndParse(self.GetListingURL())
# If the result list was truncated, refetch with the next marker. Do this
# until an entire directory listing is done.
while next_marker:
next_url = self.GetListingURL(next_marker)
(new_revisions, next_marker) = _FetchAndParse(next_url)
revisions.extend(new_revisions)
return revisions
def GetRevList(self):
"""Gets the list of revision numbers between self.good_revision and
self.bad_revision."""
# Download the revlist and filter for just the range between good and bad.
minrev = min(self.good_revision, self.bad_revision)
maxrev = max(self.good_revision, self.bad_revision)
revlist = map(int, self.ParseDirectoryIndex())
revlist = [x for x in revlist if x >= int(minrev) and x <= int(maxrev)]
revlist.sort()
return revlist
def GetOfficialBuildsList(self):
"""Gets the list of official build numbers between self.good_revision and
self.bad_revision."""
# Download the revlist and filter for just the range between good and bad.
minrev = min(self.good_revision, self.bad_revision)
maxrev = max(self.good_revision, self.bad_revision)
handle = urllib.urlopen(OFFICIAL_BASE_URL)
dirindex = handle.read()
handle.close()
build_numbers = re.findall(r'<a href="([0-9][0-9].*)/">', dirindex)
final_list = []
i = 0
parsed_build_numbers = [LooseVersion(x) for x in build_numbers]
for build_number in sorted(parsed_build_numbers):
path = OFFICIAL_BASE_URL + '/' + str(build_number) + '/' + \
self._listing_platform_dir + self.archive_name
i = i + 1
try:
connection = urllib.urlopen(path)
connection.close()
if build_number > maxrev:
break
if build_number >= minrev:
final_list.append(str(build_number))
except urllib.HTTPError, e:
pass
return final_list
def UnzipFilenameToDir(filename, dir):
"""Unzip |filename| to directory |dir|."""
cwd = os.getcwd()
if not os.path.isabs(filename):
filename = os.path.join(cwd, filename)
zf = zipfile.ZipFile(filename)
# Make base.
if not os.path.isdir(dir):
os.mkdir(dir)
os.chdir(dir)
# Extract files.
for info in zf.infolist():
name = info.filename
if name.endswith('/'): # dir
if not os.path.isdir(name):
os.makedirs(name)
else: # file
dir = os.path.dirname(name)
if not os.path.isdir(dir):
os.makedirs(dir)
out = open(name, 'wb')
out.write(zf.read(name))
out.close()
# Set permissions. Permission info in external_attr is shifted 16 bits.
os.chmod(name, info.external_attr >> 16L)
os.chdir(cwd)
def FetchRevision(context, rev, filename, quit_event=None, progress_event=None):
"""Downloads and unzips revision |rev|.
@param context A PathContext instance.
@param rev The Chromium revision number/tag to download.
@param filename The destination for the downloaded file.
@param quit_event A threading.Event which will be set by the master thread to
indicate that the download should be aborted.
@param progress_event A threading.Event which will be set by the master thread
to indicate that the progress of the download should be
displayed.
"""
def ReportHook(blocknum, blocksize, totalsize):
if quit_event and quit_event.isSet():
raise RuntimeError("Aborting download of revision %s" % str(rev))
if progress_event and progress_event.isSet():
size = blocknum * blocksize
if totalsize == -1: # Total size not known.
progress = "Received %d bytes" % size
else:
size = min(totalsize, size)
progress = "Received %d of %d bytes, %.2f%%" % (
size, totalsize, 100.0 * size / totalsize)
# Send a \r to let all progress messages use just one line of output.
sys.stdout.write("\r" + progress)
sys.stdout.flush()
download_url = context.GetDownloadURL(rev)
try:
urllib.urlretrieve(download_url, filename, ReportHook)
if progress_event and progress_event.isSet():
print
except RuntimeError, e:
pass
def RunRevision(context, revision, zipfile, profile, num_runs, command, args):
"""Given a zipped revision, unzip it and run the test."""
print "Trying revision %s..." % str(revision)
# Create a temp directory and unzip the revision into it.
cwd = os.getcwd()
tempdir = tempfile.mkdtemp(prefix='bisect_tmp')
UnzipFilenameToDir(zipfile, tempdir)
os.chdir(tempdir)
# Run the build as many times as specified.
testargs = ['--user-data-dir=%s' % profile] + args
# The sandbox must be run as root on Official Chrome, so bypass it.
if context.is_official and context.platform.startswith('linux'):
testargs.append('--no-sandbox')
runcommand = []
for token in command.split():
if token == "%a":
runcommand.extend(testargs)
else:
runcommand.append( \
token.replace('%p', context.GetLaunchPath()) \
.replace('%s', ' '.join(testargs)))
for i in range(0, num_runs):
subproc = subprocess.Popen(runcommand,
bufsize=-1,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = subproc.communicate()
os.chdir(cwd)
try:
shutil.rmtree(tempdir, True)
except Exception, e:
pass
return (subproc.returncode, stdout, stderr)
def AskIsGoodBuild(rev, official_builds, status, stdout, stderr):
"""Ask the user whether build |rev| is good or bad."""
# Loop until we get a response that we can parse.
while True:
response = raw_input('Revision %s is [(g)ood/(b)ad/(u)nknown/(q)uit]: ' %
str(rev))
if response and response in ('g', 'b', 'u'):
return response
if response and response == 'q':
raise SystemExit()
class DownloadJob(object):
"""DownloadJob represents a task to download a given Chromium revision."""
def __init__(self, context, name, rev, zipfile):
super(DownloadJob, self).__init__()
# Store off the input parameters.
self.context = context
self.name = name
self.rev = rev
self.zipfile = zipfile
self.quit_event = threading.Event()
self.progress_event = threading.Event()
def Start(self):
"""Starts the download."""
fetchargs = (self.context,
self.rev,
self.zipfile,
self.quit_event,
self.progress_event)
self.thread = threading.Thread(target=FetchRevision,
name=self.name,
args=fetchargs)
self.thread.start()
def Stop(self):
"""Stops the download which must have been started previously."""
self.quit_event.set()
self.thread.join()
os.unlink(self.zipfile)
def WaitFor(self):
"""Prints a message and waits for the download to complete. The download
must have been started previously."""
print "Downloading revision %s..." % str(self.rev)
self.progress_event.set() # Display progress of download.
self.thread.join()
def Bisect(platform,
official_builds,
good_rev=0,
bad_rev=0,
num_runs=1,
command="%p %a",
try_args=(),
profile=None,
evaluate=AskIsGoodBuild):
"""Given known good and known bad revisions, run a binary search on all
archived revisions to determine the last known good revision.
@param platform Which build to download/run ('mac', 'win', 'linux64', etc.).
@param official_builds Specify build type (Chromium or Official build).
@param good_rev Number/tag of the known good revision.
@param bad_rev Number/tag of the known bad revision.
@param num_runs Number of times to run each build for asking good/bad.
@param try_args A tuple of arguments to pass to the test application.
@param profile The name of the user profile to run with.
@param evaluate A function which returns 'g' if the argument build is good,
'b' if it's bad or 'u' if unknown.
Threading is used to fetch Chromium revisions in the background, speeding up
the user's experience. For example, suppose the bounds of the search are
good_rev=0, bad_rev=100. The first revision to be checked is 50. Depending on
whether revision 50 is good or bad, the next revision to check will be either
25 or 75. So, while revision 50 is being checked, the script will download
revisions 25 and 75 in the background. Once the good/bad verdict on rev 50 is
known:
- If rev 50 is good, the download of rev 25 is cancelled, and the next test
is run on rev 75.
- If rev 50 is bad, the download of rev 75 is cancelled, and the next test
is run on rev 25.
"""
if not profile:
profile = 'profile'
context = PathContext(platform, good_rev, bad_rev, official_builds)
cwd = os.getcwd()
print "Downloading list of known revisions..."
_GetDownloadPath = lambda rev: os.path.join(cwd,
'%s-%s' % (str(rev), context.archive_name))
if official_builds:
revlist = context.GetOfficialBuildsList()
else:
revlist = context.GetRevList()
# Get a list of revisions to bisect across.
if len(revlist) < 2: # Don't have enough builds to bisect.
msg = 'We don\'t have enough builds to bisect. revlist: %s' % revlist
raise RuntimeError(msg)
# Figure out our bookends and first pivot point; fetch the pivot revision.
minrev = 0
maxrev = len(revlist) - 1
pivot = maxrev / 2
rev = revlist[pivot]
zipfile = _GetDownloadPath(rev)
fetch = DownloadJob(context, 'initial_fetch', rev, zipfile)
fetch.Start()
fetch.WaitFor()
# Binary search time!
while fetch and fetch.zipfile and maxrev - minrev > 1:
if bad_rev < good_rev:
min_str, max_str = "bad", "good"
else:
min_str, max_str = "good", "bad"
print 'Bisecting range [%s (%s), %s (%s)].' % (revlist[minrev], min_str, \
revlist[maxrev], max_str)
# Pre-fetch next two possible pivots
# - down_pivot is the next revision to check if the current revision turns
# out to be bad.
# - up_pivot is the next revision to check if the current revision turns
# out to be good.
down_pivot = int((pivot - minrev) / 2) + minrev
down_fetch = None
if down_pivot != pivot and down_pivot != minrev:
down_rev = revlist[down_pivot]
down_fetch = DownloadJob(context, 'down_fetch', down_rev,
_GetDownloadPath(down_rev))
down_fetch.Start()
up_pivot = int((maxrev - pivot) / 2) + pivot
up_fetch = None
if up_pivot != pivot and up_pivot != maxrev:
up_rev = revlist[up_pivot]
up_fetch = DownloadJob(context, 'up_fetch', up_rev,
_GetDownloadPath(up_rev))
up_fetch.Start()
# Run test on the pivot revision.
status = None
stdout = None
stderr = None
try:
(status, stdout, stderr) = RunRevision(context,
rev,
fetch.zipfile,
profile,
num_runs,
command,
try_args)
except Exception, e:
print >>sys.stderr, e
fetch.Stop()
fetch = None
# Call the evaluate function to see if the current revision is good or bad.
# On that basis, kill one of the background downloads and complete the
# other, as described in the comments above.
try:
answer = evaluate(rev, official_builds, status, stdout, stderr)
if answer == 'g' and good_rev < bad_rev or \
answer == 'b' and bad_rev < good_rev:
minrev = pivot
if down_fetch:
down_fetch.Stop() # Kill the download of the older revision.
if up_fetch:
up_fetch.WaitFor()
pivot = up_pivot
fetch = up_fetch
elif answer == 'b' and good_rev < bad_rev or \
answer == 'g' and bad_rev < good_rev:
maxrev = pivot
if up_fetch:
up_fetch.Stop() # Kill the download of the newer revision.
if down_fetch:
down_fetch.WaitFor()
pivot = down_pivot
fetch = down_fetch
elif answer == 'u':
# Nuke the revision from the revlist and choose a new pivot.
revlist.pop(pivot)
maxrev -= 1 # Assumes maxrev >= pivot.
if maxrev - minrev > 1:
# Alternate between using down_pivot or up_pivot for the new pivot
# point, without affecting the range. Do this instead of setting the
# pivot to the midpoint of the new range because adjacent revisions
# are likely affected by the same issue that caused the (u)nknown
# response.
if up_fetch and down_fetch:
fetch = [up_fetch, down_fetch][len(revlist) % 2]
elif up_fetch:
fetch = up_fetch
else:
fetch = down_fetch
fetch.WaitFor()
if fetch == up_fetch:
pivot = up_pivot - 1 # Subtracts 1 because revlist was resized.
else:
pivot = down_pivot
zipfile = fetch.zipfile
if down_fetch and fetch != down_fetch:
down_fetch.Stop()
if up_fetch and fetch != up_fetch:
up_fetch.Stop()
else:
assert False, "Unexpected return value from evaluate(): " + answer
except SystemExit:
print "Cleaning up..."
for f in [_GetDownloadPath(revlist[down_pivot]),
_GetDownloadPath(revlist[up_pivot])]:
try:
os.unlink(f)
except OSError:
pass
sys.exit(0)
rev = revlist[pivot]
return (revlist[minrev], revlist[maxrev])
def GetBlinkRevisionForChromiumRevision(rev):
"""Returns the blink revision that was in chromium's DEPS file at
chromium revision |rev|."""
# . doesn't match newlines without re.DOTALL, so this is safe.
blink_re = re.compile(r'webkit_revision.:\D*(\d+)')
url = urllib.urlopen(DEPS_FILE % rev)
m = blink_re.search(url.read())
url.close()
if m:
return int(m.group(1))
else:
raise Exception('Could not get blink revision for cr rev %d' % rev)
def GetChromiumRevision(url):
"""Returns the chromium revision read from given URL."""
try:
# Location of the latest build revision number
return int(urllib.urlopen(url).read())
except Exception, e:
print('Could not determine latest revision. This could be bad...')
return 999999999
def main():
usage = ('%prog [options] [-- chromium-options]\n'
'Perform binary search on the snapshot builds to find a minimal\n'
'range of revisions where a behavior change happened. The\n'
'behaviors are described as "good" and "bad".\n'
'It is NOT assumed that the behavior of the later revision is\n'
'the bad one.\n'
'\n'
'Revision numbers should use\n'
' Official versions (e.g. 1.0.1000.0) for official builds. (-o)\n'
' SVN revisions (e.g. 123456) for chromium builds, from trunk.\n'
' Use base_trunk_revision from http://omahaproxy.appspot.com/\n'
' for earlier revs.\n'
' Chrome\'s about: build number and omahaproxy branch_revision\n'
' are incorrect, they are from branches.\n'
'\n'
'Tip: add "-- --no-first-run" to bypass the first run prompts.')
parser = optparse.OptionParser(usage=usage)
# Strangely, the default help output doesn't include the choice list.
choices = ['mac', 'win', 'linux', 'linux64', 'linux-arm']
# linux-chromiumos lacks a continuous archive http://crbug.com/78158
parser.add_option('-a', '--archive',
choices = choices,
help = 'The buildbot archive to bisect [%s].' %
'|'.join(choices))
parser.add_option('-o', action="store_true", dest='official_builds',
help = 'Bisect across official ' +
'Chrome builds (internal only) instead of ' +
'Chromium archives.')
parser.add_option('-b', '--bad', type = 'str',
help = 'A bad revision to start bisection. ' +
'May be earlier or later than the good revision. ' +
'Default is HEAD.')
parser.add_option('-g', '--good', type = 'str',
help = 'A good revision to start bisection. ' +
'May be earlier or later than the bad revision. ' +
'Default is 0.')
parser.add_option('-p', '--profile', '--user-data-dir', type = 'str',
help = 'Profile to use; this will not reset every run. ' +
'Defaults to a clean profile.', default = 'profile')
parser.add_option('-t', '--times', type = 'int',
help = 'Number of times to run each build before asking ' +
'if it\'s good or bad. Temporary profiles are reused.',
default = 1)
parser.add_option('-c', '--command', type = 'str',
help = 'Command to execute. %p and %a refer to Chrome ' +
'executable and specified extra arguments respectively. ' +
'Use %s to specify all extra arguments as one string. ' +
'Defaults to "%p %a". Note that any extra paths ' +
'specified should be absolute.',
default = '%p %a');
(opts, args) = parser.parse_args()
if opts.archive is None:
print 'Error: missing required parameter: --archive'
print
parser.print_help()
return 1
# Create the context. Initialize 0 for the revisions as they are set below.
context = PathContext(opts.archive, 0, 0, opts.official_builds)
# Pick a starting point, try to get HEAD for this.
if opts.bad:
bad_rev = opts.bad
else:
bad_rev = '999.0.0.0'
if not opts.official_builds:
bad_rev = GetChromiumRevision(context.GetLastChangeURL())
# Find out when we were good.
if opts.good:
good_rev = opts.good
else:
good_rev = '0.0.0.0' if opts.official_builds else 0
if opts.official_builds:
good_rev = LooseVersion(good_rev)
bad_rev = LooseVersion(bad_rev)
else:
good_rev = int(good_rev)
bad_rev = int(bad_rev)
if opts.times < 1:
print('Number of times to run (%d) must be greater than or equal to 1.' %
opts.times)
parser.print_help()
return 1
(min_chromium_rev, max_chromium_rev) = Bisect(
opts.archive, opts.official_builds, good_rev, bad_rev, opts.times,
opts.command, args, opts.profile)
# Get corresponding blink revisions.
try:
min_blink_rev = GetBlinkRevisionForChromiumRevision(min_chromium_rev)
max_blink_rev = GetBlinkRevisionForChromiumRevision(max_chromium_rev)
except Exception, e:
# Silently ignore the failure.
min_blink_rev, max_blink_rev = 0, 0
# We're done. Let the user know the results in an official manner.
if good_rev > bad_rev:
print DONE_MESSAGE_GOOD_MAX % (str(min_chromium_rev), str(max_chromium_rev))
else:
print DONE_MESSAGE_GOOD_MIN % (str(min_chromium_rev), str(max_chromium_rev))
if min_blink_rev != max_blink_rev:
print 'BLINK CHANGELOG URL:'
print ' ' + BLINK_CHANGELOG_URL % (max_blink_rev, min_blink_rev)
print 'CHANGELOG URL:'
if opts.official_builds:
print OFFICIAL_CHANGELOG_URL % (min_chromium_rev, max_chromium_rev)
else:
print ' ' + CHANGELOG_URL % (min_chromium_rev, max_chromium_rev)
if __name__ == '__main__':
sys.exit(main())

View File

@ -1,52 +0,0 @@
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Simple script which asks user to manually check result of bisection.
Typically used as by the run-bisect-manual-test.py script.
"""
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), 'telemetry'))
from telemetry.core import browser_finder
from telemetry.core import browser_options
def _StartManualTest(options):
"""Start browser then ask the user whether build is good or bad."""
browser_to_create = browser_finder.FindBrowser(options)
print 'Starting browser: %s.' % options.browser_type
with browser_to_create.Create() as browser:
# Loop until we get a response that we can parse.
while True:
sys.stderr.write('Revision is [(g)ood/(b)ad]: ')
response = raw_input()
if response and response in ('g', 'b'):
if response in ('g'):
print "RESULT manual_test: manual_test= 1"
else:
print "RESULT manual_test: manual_test= 0"
break
browser.Close()
def main():
usage = ('%prog [options]\n'
'Starts browser with an optional url and asks user whether '
'revision is good or bad.\n')
options = browser_options.BrowserOptions()
parser = options.CreateParser(usage)
options, args = parser.parse_args()
_StartManualTest(options)
if __name__ == '__main__':
sys.exit(main())

File diff suppressed because it is too large Load Diff

View File

@ -1,53 +0,0 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
bisect_builds = __import__('bisect-builds')
class BisectTest(unittest.TestCase):
patched = []
max_rev = 10000
def monkey_patch(self, obj, name, new):
self.patched.append((obj, name, getattr(obj, name)))
setattr(obj, name, new)
def clear_patching(self):
for obj, name, old in self.patched:
setattr(obj, name, old)
self.patched = []
def setUp(self):
self.monkey_patch(bisect_builds.DownloadJob, 'Start', lambda *args: None)
self.monkey_patch(bisect_builds.DownloadJob, 'Stop', lambda *args: None)
self.monkey_patch(bisect_builds.DownloadJob, 'WaitFor', lambda *args: None)
self.monkey_patch(bisect_builds, 'RunRevision', lambda *args: (0, "", ""))
self.monkey_patch(bisect_builds.PathContext, 'ParseDirectoryIndex',
lambda *args: range(self.max_rev))
def tearDown(self):
self.clear_patching()
def bisect(self, good_rev, bad_rev, evaluate):
return bisect_builds.Bisect(good_rev=good_rev,
bad_rev=bad_rev,
evaluate=evaluate,
num_runs=1,
official_builds=False,
platform='linux',
profile=None,
try_args=())
def testBisectConsistentAnswer(self):
self.assertEqual(self.bisect(1000, 100, lambda *args: 'g'), (100, 101))
self.assertEqual(self.bisect(100, 1000, lambda *args: 'b'), (100, 101))
self.assertEqual(self.bisect(2000, 200, lambda *args: 'b'), (1999, 2000))
self.assertEqual(self.bisect(200, 2000, lambda *args: 'g'), (1999, 2000))
if __name__ == '__main__':
unittest.main()

View File

@ -1,402 +0,0 @@
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Set of operations/utilities related to checking out the depot, and
outputting annotations on the buildbot waterfall. These are intended to be
used by the bisection scripts."""
import errno
import os
import shutil
import subprocess
import sys
GCLIENT_SPEC_DATA = [
{ "name" : "src",
"url" : "https://chromium.googlesource.com/chromium/src.git",
"deps_file" : ".DEPS.git",
"managed" : True,
"custom_deps" : {
"src/data/page_cycler": "https://chrome-internal.googlesource.com/"
"chrome/data/page_cycler/.git",
"src/data/dom_perf": "https://chrome-internal.googlesource.com/"
"chrome/data/dom_perf/.git",
"src/data/mach_ports": "https://chrome-internal.googlesource.com/"
"chrome/data/mach_ports/.git",
"src/tools/perf/data": "https://chrome-internal.googlesource.com/"
"chrome/tools/perf/data/.git",
"src/third_party/adobe/flash/binaries/ppapi/linux":
"https://chrome-internal.googlesource.com/"
"chrome/deps/adobe/flash/binaries/ppapi/linux/.git",
"src/third_party/adobe/flash/binaries/ppapi/linux_x64":
"https://chrome-internal.googlesource.com/"
"chrome/deps/adobe/flash/binaries/ppapi/linux_x64/.git",
"src/third_party/adobe/flash/binaries/ppapi/mac":
"https://chrome-internal.googlesource.com/"
"chrome/deps/adobe/flash/binaries/ppapi/mac/.git",
"src/third_party/adobe/flash/binaries/ppapi/mac_64":
"https://chrome-internal.googlesource.com/"
"chrome/deps/adobe/flash/binaries/ppapi/mac_64/.git",
"src/third_party/adobe/flash/binaries/ppapi/win":
"https://chrome-internal.googlesource.com/"
"chrome/deps/adobe/flash/binaries/ppapi/win/.git",
"src/third_party/adobe/flash/binaries/ppapi/win_x64":
"https://chrome-internal.googlesource.com/"
"chrome/deps/adobe/flash/binaries/ppapi/win_x64/.git",
},
"safesync_url": "",
},
]
GCLIENT_SPEC_ANDROID = "\ntarget_os = ['android']"
GCLIENT_CUSTOM_DEPS_V8 = {"src/v8_bleeding_edge": "git://github.com/v8/v8.git"}
FILE_DEPS_GIT = '.DEPS.git'
REPO_PARAMS = [
'https://chrome-internal.googlesource.com/chromeos/manifest-internal/',
'--repo-url',
'https://git.chromium.org/external/repo.git'
]
REPO_SYNC_COMMAND = 'git checkout -f $(git rev-list --max-count=1 '\
'--before=%d remotes/m/master)'
ORIGINAL_ENV = {}
def OutputAnnotationStepStart(name):
"""Outputs appropriate annotation to signal the start of a step to
a trybot.
Args:
name: The name of the step.
"""
print
print '@@@SEED_STEP %s@@@' % name
print '@@@STEP_CURSOR %s@@@' % name
print '@@@STEP_STARTED@@@'
print
sys.stdout.flush()
def OutputAnnotationStepClosed():
"""Outputs appropriate annotation to signal the closing of a step to
a trybot."""
print
print '@@@STEP_CLOSED@@@'
print
sys.stdout.flush()
def CreateAndChangeToSourceDirectory(working_directory):
"""Creates a directory 'bisect' as a subdirectory of 'working_directory'. If
the function is successful, the current working directory will change to that
of the new 'bisect' directory.
Returns:
True if the directory was successfully created (or already existed).
"""
cwd = os.getcwd()
os.chdir(working_directory)
try:
os.mkdir('bisect')
except OSError, e:
if e.errno != errno.EEXIST:
return False
os.chdir('bisect')
return True
def SubprocessCall(cmd, cwd=None):
"""Runs a subprocess with specified parameters.
Args:
params: A list of parameters to pass to gclient.
cwd: Working directory to run from.
Returns:
The return code of the call.
"""
if os.name == 'nt':
# "HOME" isn't normally defined on windows, but is needed
# for git to find the user's .netrc file.
if not os.getenv('HOME'):
os.environ['HOME'] = os.environ['USERPROFILE']
shell = os.name == 'nt'
return subprocess.call(cmd, shell=shell, cwd=cwd)
def RunGClient(params, cwd=None):
"""Runs gclient with the specified parameters.
Args:
params: A list of parameters to pass to gclient.
cwd: Working directory to run from.
Returns:
The return code of the call.
"""
cmd = ['gclient'] + params
return SubprocessCall(cmd, cwd=cwd)
def RunRepo(params):
"""Runs cros repo command with specified parameters.
Args:
params: A list of parameters to pass to gclient.
Returns:
The return code of the call.
"""
cmd = ['repo'] + params
return SubprocessCall(cmd)
def RunRepoSyncAtTimestamp(timestamp):
"""Syncs all git depots to the timestamp specified using repo forall.
Args:
params: Unix timestamp to sync to.
Returns:
The return code of the call.
"""
repo_sync = REPO_SYNC_COMMAND % timestamp
cmd = ['forall', '-c', REPO_SYNC_COMMAND % timestamp]
return RunRepo(cmd)
def RunGClientAndCreateConfig(opts, custom_deps=None, cwd=None):
"""Runs gclient and creates a config containing both src and src-internal.
Args:
opts: The options parsed from the command line through parse_args().
custom_deps: A dictionary of additional dependencies to add to .gclient.
cwd: Working directory to run from.
Returns:
The return code of the call.
"""
spec = GCLIENT_SPEC_DATA
if custom_deps:
for k, v in custom_deps.iteritems():
spec[0]['custom_deps'][k] = v
# Cannot have newlines in string on windows
spec = 'solutions =' + str(spec)
spec = ''.join([l for l in spec.splitlines()])
if opts.target_platform == 'android':
spec += GCLIENT_SPEC_ANDROID
return_code = RunGClient(
['config', '--spec=%s' % spec, '--git-deps'], cwd=cwd)
return return_code
def IsDepsFileBlink():
"""Reads .DEPS.git and returns whether or not we're using blink.
Returns:
True if blink, false if webkit.
"""
locals = {'Var': lambda _: locals["vars"][_],
'From': lambda *args: None}
execfile(FILE_DEPS_GIT, {}, locals)
return 'blink.git' in locals['vars']['webkit_url']
def RemoveThirdPartyWebkitDirectory():
"""Removes third_party/WebKit.
Returns:
True on success.
"""
try:
path_to_dir = os.path.join(os.getcwd(), 'third_party', 'WebKit')
if os.path.exists(path_to_dir):
shutil.rmtree(path_to_dir)
except OSError, e:
if e.errno != errno.ENOENT:
return False
return True
def RunGClientAndSync(cwd=None):
"""Runs gclient and does a normal sync.
Args:
cwd: Working directory to run from.
Returns:
The return code of the call.
"""
params = ['sync', '--verbose', '--nohooks', '--reset', '--force']
return RunGClient(params, cwd=cwd)
def SetupGitDepot(opts):
"""Sets up the depot for the bisection. The depot will be located in a
subdirectory called 'bisect'.
Args:
opts: The options parsed from the command line through parse_args().
Returns:
True if gclient successfully created the config file and did a sync, False
otherwise.
"""
name = 'Setting up Bisection Depot'
if opts.output_buildbot_annotations:
OutputAnnotationStepStart(name)
passed = False
if not RunGClientAndCreateConfig(opts):
passed_deps_check = True
if os.path.isfile(os.path.join('src', FILE_DEPS_GIT)):
cwd = os.getcwd()
os.chdir('src')
if not IsDepsFileBlink():
passed_deps_check = RemoveThirdPartyWebkitDirectory()
else:
passed_deps_check = True
os.chdir(cwd)
if passed_deps_check:
RunGClient(['revert'])
if not RunGClientAndSync():
passed = True
if opts.output_buildbot_annotations:
print
OutputAnnotationStepClosed()
return passed
def SetupCrosRepo():
"""Sets up cros repo for bisecting chromeos.
Returns:
Returns 0 on success.
"""
cwd = os.getcwd()
try:
os.mkdir('cros')
except OSError, e:
if e.errno != errno.EEXIST:
return False
os.chdir('cros')
cmd = ['init', '-u'] + REPO_PARAMS
passed = False
if not RunRepo(cmd):
if not RunRepo(['sync']):
passed = True
os.chdir(cwd)
return passed
def CopyAndSaveOriginalEnvironmentVars():
"""Makes a copy of the current environment variables."""
# TODO: Waiting on crbug.com/255689, will remove this after.
vars_to_remove = []
for k, v in os.environ.iteritems():
if 'ANDROID' in k:
vars_to_remove.append(k)
vars_to_remove.append('CHROME_SRC')
vars_to_remove.append('CHROMIUM_GYP_FILE')
vars_to_remove.append('GYP_CROSSCOMPILE')
vars_to_remove.append('GYP_DEFINES')
vars_to_remove.append('GYP_GENERATORS')
vars_to_remove.append('GYP_GENERATOR_FLAGS')
vars_to_remove.append('OBJCOPY')
for k in vars_to_remove:
if os.environ.has_key(k):
del os.environ[k]
global ORIGINAL_ENV
ORIGINAL_ENV = os.environ.copy()
def SetupAndroidBuildEnvironment(opts):
"""Sets up the android build environment.
Args:
opts: The options parsed from the command line through parse_args().
path_to_file: Path to the bisect script's directory.
Returns:
True if successful.
"""
# Revert the environment variables back to default before setting them up
# with envsetup.sh.
env_vars = os.environ.copy()
for k, _ in env_vars.iteritems():
del os.environ[k]
for k, v in ORIGINAL_ENV.iteritems():
os.environ[k] = v
path_to_file = os.path.join('build', 'android', 'envsetup.sh')
proc = subprocess.Popen(['bash', '-c', 'source %s && env' % path_to_file],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd='src')
(out, _) = proc.communicate()
for line in out.splitlines():
(k, _, v) = line.partition('=')
os.environ[k] = v
return not proc.returncode
def SetupPlatformBuildEnvironment(opts):
"""Performs any platform specific setup.
Args:
opts: The options parsed from the command line through parse_args().
path_to_file: Path to the bisect script's directory.
Returns:
True if successful.
"""
if opts.target_platform == 'android':
CopyAndSaveOriginalEnvironmentVars()
return SetupAndroidBuildEnvironment(opts)
elif opts.target_platform == 'cros':
return SetupCrosRepo()
return True
def CreateBisectDirectoryAndSetupDepot(opts):
"""Sets up a subdirectory 'bisect' and then retrieves a copy of the depot
there using gclient.
Args:
opts: The options parsed from the command line through parse_args().
reset: Whether to reset any changes to the depot.
Returns:
Returns 0 on success, otherwise 1.
"""
if not CreateAndChangeToSourceDirectory(opts.working_directory):
print 'Error: Could not create bisect directory.'
print
return 1
if not SetupGitDepot(opts):
print 'Error: Failed to grab source.'
print
return 1
return 0

View File

@ -1,109 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Makes sure that all EXE and DLL files in the provided directory were built
correctly.
In essense it runs a subset of BinScope tests ensuring that binaries have
/NXCOMPAT, /DYNAMICBASE and /SAFESEH.
"""
import os
import optparse
import sys
# Find /third_party/pefile based on current directory and script path.
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..',
'third_party', 'pefile'))
import pefile
PE_FILE_EXTENSIONS = ['.exe', '.dll']
DYNAMICBASE_FLAG = 0x0040
NXCOMPAT_FLAG = 0x0100
NO_SEH_FLAG = 0x0400
MACHINE_TYPE_AMD64 = 0x8664
# Please do not add your file here without confirming that it indeed doesn't
# require /NXCOMPAT and /DYNAMICBASE. Contact cpu@chromium.org or your local
# Windows guru for advice.
EXCLUDED_FILES = ['chrome_frame_mini_installer.exe',
'mini_installer.exe',
'wow_helper.exe',
'xinput1_3.dll' # Microsoft DirectX redistributable.
]
def IsPEFile(path):
return (os.path.isfile(path) and
os.path.splitext(path)[1].lower() in PE_FILE_EXTENSIONS and
os.path.basename(path) not in EXCLUDED_FILES)
def main(options, args):
directory = args[0]
pe_total = 0
pe_passed = 0
for file in os.listdir(directory):
path = os.path.abspath(os.path.join(directory, file))
if not IsPEFile(path):
continue
pe = pefile.PE(path, fast_load=True)
pe.parse_data_directories(directories=[
pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG']])
pe_total = pe_total + 1
success = True
# Check for /DYNAMICBASE.
if pe.OPTIONAL_HEADER.DllCharacteristics & DYNAMICBASE_FLAG:
if options.verbose:
print "Checking %s for /DYNAMICBASE... PASS" % path
else:
success = False
print "Checking %s for /DYNAMICBASE... FAIL" % path
# Check for /NXCOMPAT.
if pe.OPTIONAL_HEADER.DllCharacteristics & NXCOMPAT_FLAG:
if options.verbose:
print "Checking %s for /NXCOMPAT... PASS" % path
else:
success = False
print "Checking %s for /NXCOMPAT... FAIL" % path
# Check for /SAFESEH. Binaries should meet one of the following
# criteria:
# 1) Have no SEH table as indicated by the DLL characteristics
# 2) Have a LOAD_CONFIG section containing a valid SEH table
# 3) Be a 64-bit binary, in which case /SAFESEH isn't required
#
# Refer to the following MSDN article for more information:
# http://msdn.microsoft.com/en-us/library/9a89h429.aspx
if (pe.OPTIONAL_HEADER.DllCharacteristics & NO_SEH_FLAG or
(hasattr(pe, "DIRECTORY_ENTRY_LOAD_CONFIG") and
pe.DIRECTORY_ENTRY_LOAD_CONFIG.struct.SEHandlerCount > 0 and
pe.DIRECTORY_ENTRY_LOAD_CONFIG.struct.SEHandlerTable != 0) or
pe.FILE_HEADER.Machine == MACHINE_TYPE_AMD64):
if options.verbose:
print "Checking %s for /SAFESEH... PASS" % path
else:
success = False
print "Checking %s for /SAFESEH... FAIL" % path
# Update tally.
if success:
pe_passed = pe_passed + 1
print "Result: %d files found, %d files passed" % (pe_total, pe_passed)
if pe_passed != pe_total:
sys.exit(1)
if __name__ == '__main__':
usage = "Usage: %prog [options] DIRECTORY"
option_parser = optparse.OptionParser(usage=usage)
option_parser.add_option("-v", "--verbose", action="store_true",
default=False, help="Print debug logging")
options, args = option_parser.parse_args()
if not args:
option_parser.print_help()
sys.exit(0)
main(options, args)

View File

@ -1,3 +0,0 @@
skip_child_includes = [
"testdata",
]

View File

@ -1,25 +0,0 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for checkdeps tool.
"""
def CheckChange(input_api, output_api):
results = []
results.extend(input_api.canned_checks.RunUnitTests(
input_api, output_api,
[input_api.os_path.join(input_api.PresubmitLocalPath(),
'checkdeps_test.py')]))
return results
# Mandatory entrypoint.
def CheckChangeOnUpload(input_api, output_api):
return CheckChange(input_api, output_api)
# Mandatory entrypoint.
def CheckChangeOnCommit(input_api, output_api):
return CheckChange(input_api, output_api)

View File

@ -1,526 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Makes sure that files include headers from allowed directories.
Checks DEPS files in the source tree for rules, and applies those rules to
"#include" commands in source files. Any source file including something not
permitted by the DEPS files will fail.
The format of the deps file:
First you have the normal module-level deps. These are the ones used by
gclient. An example would be:
deps = {
"base":"http://foo.bar/trunk/base"
}
DEPS files not in the top-level of a module won't need this. Then you
have any additional include rules. You can add (using "+") or subtract
(using "-") from the previously specified rules (including
module-level deps). You can also specify a path that is allowed for
now but that we intend to remove, using "!"; this is treated the same
as "+" when check_deps is run by our bots, but a presubmit step will
show a warning if you add a new include of a file that is only allowed
by "!".
Note that for .java files, there is currently no difference between
"+" and "!", even in the presubmit step.
include_rules = {
# Code should be able to use base (it's specified in the module-level
# deps above), but nothing in "base/evil" because it's evil.
"-base/evil",
# But this one subdirectory of evil is OK.
"+base/evil/not",
# And it can include files from this other directory even though there is
# no deps rule for it.
"+tools/crime_fighter",
# This dependency is allowed for now but work is ongoing to remove it,
# so you shouldn't add further dependencies on it.
"!base/evil/ok_for_now.h",
}
If you have certain include rules that should only be applied for some
files within this directory and subdirectories, you can write a
section named specific_include_rules that is a hash map of regular
expressions to the list of rules that should apply to files matching
them. Note that such rules will always be applied before the rules
from 'include_rules' have been applied, but the order in which rules
associated with different regular expressions is applied is arbitrary.
specific_include_rules = {
".*_(unit|browser|api)test\.cc": [
"+libraries/testsupport",
],
}
DEPS files may be placed anywhere in the tree. Each one applies to all
subdirectories, where there may be more DEPS files that provide additions or
subtractions for their own sub-trees.
There is an implicit rule for the current directory (where the DEPS file lives)
and all of its subdirectories. This prevents you from having to explicitly
allow the current directory everywhere. This implicit rule is applied first,
so you can modify or remove it using the normal include rules.
The rules are processed in order. This means you can explicitly allow a higher
directory and then take away permissions from sub-parts, or the reverse.
Note that all directory separators must be slashes (Unix-style) and not
backslashes. All directories should be relative to the source root and use
only lowercase.
"""
import os
import optparse
import re
import subprocess
import sys
import copy
import cpp_checker
import java_checker
import results
from rules import Rule, Rules
# Variable name used in the DEPS file to add or subtract include files from
# the module-level deps.
INCLUDE_RULES_VAR_NAME = 'include_rules'
# Variable name used in the DEPS file to add or subtract include files
# from module-level deps specific to files whose basename (last
# component of path) matches a given regular expression.
SPECIFIC_INCLUDE_RULES_VAR_NAME = 'specific_include_rules'
# Optionally present in the DEPS file to list subdirectories which should not
# be checked. This allows us to skip third party code, for example.
SKIP_SUBDIRS_VAR_NAME = 'skip_child_includes'
def NormalizePath(path):
"""Returns a path normalized to how we write DEPS rules and compare paths.
"""
return path.lower().replace('\\', '/')
def _IsTestFile(filename):
"""Does a rudimentary check to try to skip test files; this could be
improved but is good enough for now.
"""
return re.match('(test|mock|dummy)_.*|.*_[a-z]*test\.(cc|mm|java)', filename)
class DepsChecker(object):
"""Parses include_rules from DEPS files and can verify files in the
source tree against them.
"""
def __init__(self,
base_directory=None,
verbose=False,
being_tested=False,
ignore_temp_rules=False,
skip_tests=False):
"""Creates a new DepsChecker.
Args:
base_directory: OS-compatible path to root of checkout, e.g. C:\chr\src.
verbose: Set to true for debug output.
being_tested: Set to true to ignore the DEPS file at tools/checkdeps/DEPS.
"""
self.base_directory = base_directory
self.verbose = verbose
self._under_test = being_tested
self._ignore_temp_rules = ignore_temp_rules
self._skip_tests = skip_tests
if not base_directory:
self.base_directory = os.path.abspath(
os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', '..'))
self.results_formatter = results.NormalResultsFormatter(verbose)
self.git_source_directories = set()
self._AddGitSourceDirectories()
# Map of normalized directory paths to rules to use for those
# directories, or None for directories that should be skipped.
self.directory_rules = {}
self._ApplyDirectoryRulesAndSkipSubdirs(Rules(), self.base_directory)
def Report(self):
"""Prints a report of results, and returns an exit code for the process."""
if self.results_formatter.GetResults():
self.results_formatter.PrintResults()
return 1
print '\nSUCCESS\n'
return 0
def _ApplyRules(self, existing_rules, includes, specific_includes, cur_dir):
"""Applies the given include rules, returning the new rules.
Args:
existing_rules: A set of existing rules that will be combined.
include: The list of rules from the "include_rules" section of DEPS.
specific_includes: E.g. {'.*_unittest\.cc': ['+foo', '-blat']} rules
from the "specific_include_rules" section of DEPS.
cur_dir: The current directory, normalized path. We will create an
implicit rule that allows inclusion from this directory.
Returns: A new set of rules combining the existing_rules with the other
arguments.
"""
rules = copy.deepcopy(existing_rules)
# First apply the implicit "allow" rule for the current directory.
if cur_dir.startswith(
NormalizePath(os.path.normpath(self.base_directory))):
relative_dir = cur_dir[len(self.base_directory) + 1:]
source = relative_dir
if len(source) == 0:
source = 'top level' # Make the help string a little more meaningful.
rules.AddRule('+' + relative_dir, 'Default rule for ' + source)
else:
raise Exception('Internal error: base directory is not at the beginning' +
' for\n %s and base dir\n %s' %
(cur_dir, self.base_directory))
def ApplyOneRule(rule_str, dependee_regexp=None):
"""Deduces a sensible description for the rule being added, and
adds the rule with its description to |rules|.
If we are ignoring temporary rules, this function does nothing
for rules beginning with the Rule.TEMP_ALLOW character.
"""
if self._ignore_temp_rules and rule_str.startswith(Rule.TEMP_ALLOW):
return
rule_block_name = 'include_rules'
if dependee_regexp:
rule_block_name = 'specific_include_rules'
if not relative_dir:
rule_description = 'the top level %s' % rule_block_name
else:
rule_description = relative_dir + "'s %s" % rule_block_name
rules.AddRule(rule_str, rule_description, dependee_regexp)
# Apply the additional explicit rules.
for (_, rule_str) in enumerate(includes):
ApplyOneRule(rule_str)
# Finally, apply the specific rules.
for regexp, specific_rules in specific_includes.iteritems():
for rule_str in specific_rules:
ApplyOneRule(rule_str, regexp)
return rules
def _ApplyDirectoryRules(self, existing_rules, dir_name):
"""Combines rules from the existing rules and the new directory.
Any directory can contain a DEPS file. Toplevel DEPS files can contain
module dependencies which are used by gclient. We use these, along with
additional include rules and implicit rules for the given directory, to
come up with a combined set of rules to apply for the directory.
Args:
existing_rules: The rules for the parent directory. We'll add-on to these.
dir_name: The directory name that the deps file may live in (if
it exists). This will also be used to generate the
implicit rules. This is a non-normalized path.
Returns: A tuple containing: (1) the combined set of rules to apply to the
sub-tree, and (2) a list of all subdirectories that should NOT be
checked, as specified in the DEPS file (if any).
"""
norm_dir_name = NormalizePath(dir_name)
# Check for a .svn directory in this directory or check this directory is
# contained in git source direcotries. This will tell us if it's a source
# directory and should be checked.
if not (os.path.exists(os.path.join(dir_name, ".svn")) or
(norm_dir_name in self.git_source_directories)):
return (None, [])
# Check the DEPS file in this directory.
if self.verbose:
print 'Applying rules from', dir_name
def FromImpl(_unused, _unused2):
pass # NOP function so "From" doesn't fail.
def FileImpl(_unused):
pass # NOP function so "File" doesn't fail.
class _VarImpl:
def __init__(self, local_scope):
self._local_scope = local_scope
def Lookup(self, var_name):
"""Implements the Var syntax."""
if var_name in self._local_scope.get('vars', {}):
return self._local_scope['vars'][var_name]
raise Exception('Var is not defined: %s' % var_name)
local_scope = {}
global_scope = {
'File': FileImpl,
'From': FromImpl,
'Var': _VarImpl(local_scope).Lookup,
}
deps_file = os.path.join(dir_name, 'DEPS')
# The second conditional here is to disregard the
# tools/checkdeps/DEPS file while running tests. This DEPS file
# has a skip_child_includes for 'testdata' which is necessary for
# running production tests, since there are intentional DEPS
# violations under the testdata directory. On the other hand when
# running tests, we absolutely need to verify the contents of that
# directory to trigger those intended violations and see that they
# are handled correctly.
if os.path.isfile(deps_file) and (
not self._under_test or not os.path.split(dir_name)[1] == 'checkdeps'):
execfile(deps_file, global_scope, local_scope)
elif self.verbose:
print ' No deps file found in', dir_name
# Even if a DEPS file does not exist we still invoke ApplyRules
# to apply the implicit "allow" rule for the current directory
include_rules = local_scope.get(INCLUDE_RULES_VAR_NAME, [])
specific_include_rules = local_scope.get(SPECIFIC_INCLUDE_RULES_VAR_NAME,
{})
skip_subdirs = local_scope.get(SKIP_SUBDIRS_VAR_NAME, [])
return (self._ApplyRules(existing_rules, include_rules,
specific_include_rules, norm_dir_name),
skip_subdirs)
def _ApplyDirectoryRulesAndSkipSubdirs(self, parent_rules, dir_path):
"""Given |parent_rules| and a subdirectory |dir_path| from the
directory that owns the |parent_rules|, add |dir_path|'s rules to
|self.directory_rules|, and add None entries for any of its
subdirectories that should be skipped.
"""
directory_rules, excluded_subdirs = self._ApplyDirectoryRules(parent_rules,
dir_path)
self.directory_rules[NormalizePath(dir_path)] = directory_rules
for subdir in excluded_subdirs:
self.directory_rules[NormalizePath(
os.path.normpath(os.path.join(dir_path, subdir)))] = None
def GetDirectoryRules(self, dir_path):
"""Returns a Rules object to use for the given directory, or None
if the given directory should be skipped. This takes care of
first building rules for parent directories (up to
self.base_directory) if needed.
Args:
dir_path: A real (non-normalized) path to the directory you want
rules for.
"""
norm_dir_path = NormalizePath(dir_path)
if not norm_dir_path.startswith(
NormalizePath(os.path.normpath(self.base_directory))):
dir_path = os.path.join(self.base_directory, dir_path)
norm_dir_path = NormalizePath(dir_path)
parent_dir = os.path.dirname(dir_path)
parent_rules = None
if not norm_dir_path in self.directory_rules:
parent_rules = self.GetDirectoryRules(parent_dir)
# We need to check for an entry for our dir_path again, in case we
# are at a path e.g. A/B/C where A/B/DEPS specifies the C
# subdirectory to be skipped; in this case, the invocation to
# GetDirectoryRules(parent_dir) has already filled in an entry for
# A/B/C.
if not norm_dir_path in self.directory_rules:
if not parent_rules:
# If the parent directory should be skipped, then the current
# directory should also be skipped.
self.directory_rules[norm_dir_path] = None
else:
self._ApplyDirectoryRulesAndSkipSubdirs(parent_rules, dir_path)
return self.directory_rules[norm_dir_path]
def CheckDirectory(self, start_dir):
"""Checks all relevant source files in the specified directory and
its subdirectories for compliance with DEPS rules throughout the
tree (starting at |self.base_directory|). |start_dir| must be a
subdirectory of |self.base_directory|.
On completion, self.results_formatter has the results of
processing, and calling Report() will print a report of results.
"""
java = java_checker.JavaChecker(self.base_directory, self.verbose)
cpp = cpp_checker.CppChecker(self.verbose)
checkers = dict(
(extension, checker)
for checker in [java, cpp] for extension in checker.EXTENSIONS)
self._CheckDirectoryImpl(checkers, start_dir)
def _CheckDirectoryImpl(self, checkers, dir_name):
rules = self.GetDirectoryRules(dir_name)
if rules == None:
return
# Collect a list of all files and directories to check.
files_to_check = []
dirs_to_check = []
contents = os.listdir(dir_name)
for cur in contents:
full_name = os.path.join(dir_name, cur)
if os.path.isdir(full_name):
dirs_to_check.append(full_name)
elif os.path.splitext(full_name)[1] in checkers:
if not self._skip_tests or not _IsTestFile(cur):
files_to_check.append(full_name)
# First check all files in this directory.
for cur in files_to_check:
checker = checkers[os.path.splitext(cur)[1]]
file_status = checker.CheckFile(rules, cur)
if file_status.HasViolations():
self.results_formatter.AddError(file_status)
# Next recurse into the subdirectories.
for cur in dirs_to_check:
self._CheckDirectoryImpl(checkers, cur)
def CheckAddedCppIncludes(self, added_includes):
"""This is used from PRESUBMIT.py to check new #include statements added in
the change being presubmit checked.
Args:
added_includes: ((file_path, (include_line, include_line, ...), ...)
Return:
A list of tuples, (bad_file_path, rule_type, rule_description)
where rule_type is one of Rule.DISALLOW or Rule.TEMP_ALLOW and
rule_description is human-readable. Empty if no problems.
"""
cpp = cpp_checker.CppChecker(self.verbose)
problems = []
for file_path, include_lines in added_includes:
if not cpp.IsCppFile(file_path):
pass
rules_for_file = self.GetDirectoryRules(os.path.dirname(file_path))
if rules_for_file:
for line in include_lines:
is_include, violation = cpp.CheckLine(
rules_for_file, line, file_path, True)
if violation:
rule_type = violation.violated_rule.allow
if rule_type != Rule.ALLOW:
violation_text = results.NormalResultsFormatter.FormatViolation(
violation, self.verbose)
problems.append((file_path, rule_type, violation_text))
return problems
def _AddGitSourceDirectories(self):
"""Adds any directories containing sources managed by git to
self.git_source_directories.
"""
if not os.path.exists(os.path.join(self.base_directory, '.git')):
return
popen_out = os.popen('cd %s && git ls-files --full-name .' %
subprocess.list2cmdline([self.base_directory]))
for line in popen_out.readlines():
dir_name = os.path.join(self.base_directory, os.path.dirname(line))
# Add the directory as well as all the parent directories. Use
# forward slashes and lower case to normalize paths.
while dir_name != self.base_directory:
self.git_source_directories.add(NormalizePath(dir_name))
dir_name = os.path.dirname(dir_name)
self.git_source_directories.add(NormalizePath(self.base_directory))
def PrintUsage():
print """Usage: python checkdeps.py [--root <root>] [tocheck]
--root ROOT Specifies the repository root. This defaults to "../../.."
relative to the script file. This will be correct given the
normal location of the script in "<root>/tools/checkdeps".
--(others) There are a few lesser-used options; run with --help to show them.
tocheck Specifies the directory, relative to root, to check. This defaults
to "." so it checks everything.
Examples:
python checkdeps.py
python checkdeps.py --root c:\\source chrome"""
def main():
option_parser = optparse.OptionParser()
option_parser.add_option(
'', '--root',
default='', dest='base_directory',
help='Specifies the repository root. This defaults '
'to "../../.." relative to the script file, which '
'will normally be the repository root.')
option_parser.add_option(
'', '--ignore-temp-rules',
action='store_true', dest='ignore_temp_rules', default=False,
help='Ignore !-prefixed (temporary) rules.')
option_parser.add_option(
'', '--generate-temp-rules',
action='store_true', dest='generate_temp_rules', default=False,
help='Print rules to temporarily allow files that fail '
'dependency checking.')
option_parser.add_option(
'', '--count-violations',
action='store_true', dest='count_violations', default=False,
help='Count #includes in violation of intended rules.')
option_parser.add_option(
'', '--skip-tests',
action='store_true', dest='skip_tests', default=False,
help='Skip checking test files (best effort).')
option_parser.add_option(
'-v', '--verbose',
action='store_true', default=False,
help='Print debug logging')
options, args = option_parser.parse_args()
deps_checker = DepsChecker(options.base_directory,
verbose=options.verbose,
ignore_temp_rules=options.ignore_temp_rules,
skip_tests=options.skip_tests)
# Figure out which directory we have to check.
start_dir = deps_checker.base_directory
if len(args) == 1:
# Directory specified. Start here. It's supposed to be relative to the
# base directory.
start_dir = os.path.abspath(
os.path.join(deps_checker.base_directory, args[0]))
elif len(args) >= 2 or (options.generate_temp_rules and
options.count_violations):
# More than one argument, or incompatible flags, we don't handle this.
PrintUsage()
return 1
print 'Using base directory:', deps_checker.base_directory
print 'Checking:', start_dir
if options.generate_temp_rules:
deps_checker.results_formatter = results.TemporaryRulesFormatter()
elif options.count_violations:
deps_checker.results_formatter = results.CountViolationsFormatter()
deps_checker.CheckDirectory(start_dir)
return deps_checker.Report()
if '__main__' == __name__:
sys.exit(main())

View File

@ -1,177 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for checkdeps.
"""
import os
import unittest
import checkdeps
import results
class CheckDepsTest(unittest.TestCase):
def setUp(self):
self.deps_checker = checkdeps.DepsChecker(being_tested=True)
def ImplTestRegularCheckDepsRun(self, ignore_temp_rules, skip_tests):
self.deps_checker._ignore_temp_rules = ignore_temp_rules
self.deps_checker._skip_tests = skip_tests
self.deps_checker.CheckDirectory(
os.path.join(self.deps_checker.base_directory,
'tools/checkdeps/testdata'))
problems = self.deps_checker.results_formatter.GetResults()
if skip_tests:
self.failUnlessEqual(3, len(problems))
else:
self.failUnlessEqual(4, len(problems))
def VerifySubstringsInProblems(key_path, substrings_in_sequence):
"""Finds the problem in |problems| that contains |key_path|,
then verifies that each of |substrings_in_sequence| occurs in
that problem, in the order they appear in
|substrings_in_sequence|.
"""
found = False
key_path = os.path.normpath(key_path)
for problem in problems:
index = problem.find(key_path)
if index != -1:
for substring in substrings_in_sequence:
index = problem.find(substring, index + 1)
self.failUnless(index != -1, '%s in %s' % (substring, problem))
found = True
break
if not found:
self.fail('Found no problem for file %s' % key_path)
if ignore_temp_rules:
VerifySubstringsInProblems('testdata/allowed/test.h',
['-tools/checkdeps/testdata/disallowed',
'temporarily_allowed.h',
'-third_party/explicitly_disallowed',
'Because of no rule applying'])
else:
VerifySubstringsInProblems('testdata/allowed/test.h',
['-tools/checkdeps/testdata/disallowed',
'-third_party/explicitly_disallowed',
'Because of no rule applying'])
VerifySubstringsInProblems('testdata/disallowed/test.h',
['-third_party/explicitly_disallowed',
'Because of no rule applying',
'Because of no rule applying'])
VerifySubstringsInProblems('disallowed/allowed/test.h',
['-third_party/explicitly_disallowed',
'Because of no rule applying',
'Because of no rule applying'])
if not skip_tests:
VerifySubstringsInProblems('allowed/not_a_test.cc',
['-tools/checkdeps/testdata/disallowed'])
def testRegularCheckDepsRun(self):
self.ImplTestRegularCheckDepsRun(False, False)
def testRegularCheckDepsRunIgnoringTempRules(self):
self.ImplTestRegularCheckDepsRun(True, False)
def testRegularCheckDepsRunSkipTests(self):
self.ImplTestRegularCheckDepsRun(False, True)
def testRegularCheckDepsRunIgnoringTempRulesSkipTests(self):
self.ImplTestRegularCheckDepsRun(True, True)
def CountViolations(self, ignore_temp_rules):
self.deps_checker._ignore_temp_rules = ignore_temp_rules
self.deps_checker.results_formatter = results.CountViolationsFormatter()
self.deps_checker.CheckDirectory(
os.path.join(self.deps_checker.base_directory,
'tools/checkdeps/testdata'))
return self.deps_checker.results_formatter.GetResults()
def testCountViolations(self):
self.failUnlessEqual('10', self.CountViolations(False))
def testCountViolationsIgnoringTempRules(self):
self.failUnlessEqual('11', self.CountViolations(True))
def testTempRulesGenerator(self):
self.deps_checker.results_formatter = results.TemporaryRulesFormatter()
self.deps_checker.CheckDirectory(
os.path.join(self.deps_checker.base_directory,
'tools/checkdeps/testdata/allowed'))
temp_rules = self.deps_checker.results_formatter.GetResults()
expected = [u' "!third_party/explicitly_disallowed/bad.h",',
u' "!third_party/no_rule/bad.h",',
u' "!tools/checkdeps/testdata/disallowed/bad.h",',
u' "!tools/checkdeps/testdata/disallowed/teststuff/bad.h",']
self.failUnlessEqual(expected, temp_rules)
def testCheckAddedIncludesAllGood(self):
problems = self.deps_checker.CheckAddedCppIncludes(
[['tools/checkdeps/testdata/allowed/test.cc',
['#include "tools/checkdeps/testdata/allowed/good.h"',
'#include "tools/checkdeps/testdata/disallowed/allowed/good.h"']
]])
self.failIf(problems)
def testCheckAddedIncludesManyGarbageLines(self):
garbage_lines = ["My name is Sam%d\n" % num for num in range(50)]
problems = self.deps_checker.CheckAddedCppIncludes(
[['tools/checkdeps/testdata/allowed/test.cc', garbage_lines]])
self.failIf(problems)
def testCheckAddedIncludesNoRule(self):
problems = self.deps_checker.CheckAddedCppIncludes(
[['tools/checkdeps/testdata/allowed/test.cc',
['#include "no_rule_for_this/nogood.h"']
]])
self.failUnless(problems)
def testCheckAddedIncludesSkippedDirectory(self):
problems = self.deps_checker.CheckAddedCppIncludes(
[['tools/checkdeps/testdata/disallowed/allowed/skipped/test.cc',
['#include "whatever/whocares.h"']
]])
self.failIf(problems)
def testCheckAddedIncludesTempAllowed(self):
problems = self.deps_checker.CheckAddedCppIncludes(
[['tools/checkdeps/testdata/allowed/test.cc',
['#include "tools/checkdeps/testdata/disallowed/temporarily_allowed.h"']
]])
self.failUnless(problems)
def testCopyIsDeep(self):
# Regression test for a bug where we were making shallow copies of
# Rules objects and therefore all Rules objects shared the same
# dictionary for specific rules.
#
# The first pair should bring in a rule from testdata/allowed/DEPS
# into that global dictionary that allows the
# temp_allowed_for_tests.h file to be included in files ending
# with _unittest.cc, and the second pair should completely fail
# once the bug is fixed, but succeed (with a temporary allowance)
# if the bug is in place.
problems = self.deps_checker.CheckAddedCppIncludes(
[['tools/checkdeps/testdata/allowed/test.cc',
['#include "tools/checkdeps/testdata/disallowed/temporarily_allowed.h"']
],
['tools/checkdeps/testdata/disallowed/foo_unittest.cc',
['#include "tools/checkdeps/testdata/bongo/temp_allowed_for_tests.h"']
]])
# With the bug in place, there would be two problems reported, and
# the second would be for foo_unittest.cc.
self.failUnless(len(problems) == 1)
self.failUnless(problems[0][0].endswith('/test.cc'))
if __name__ == '__main__':
unittest.main()

View File

@ -1,113 +0,0 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Checks C++ and Objective-C files for illegal includes."""
import codecs
import os
import re
import results
from rules import Rule, MessageRule
class CppChecker(object):
EXTENSIONS = [
'.h',
'.cc',
'.cpp',
'.m',
'.mm',
]
# The maximum number of non-include lines we can see before giving up.
_MAX_UNINTERESTING_LINES = 50
# The maximum line length, this is to be efficient in the case of very long
# lines (which can't be #includes).
_MAX_LINE_LENGTH = 128
# This regular expression will be used to extract filenames from include
# statements.
_EXTRACT_INCLUDE_PATH = re.compile(
'[ \t]*#[ \t]*(?:include|import)[ \t]+"(.*)"')
def __init__(self, verbose):
self._verbose = verbose
def CheckLine(self, rules, line, dependee_path, fail_on_temp_allow=False):
"""Checks the given line with the given rule set.
Returns a tuple (is_include, dependency_violation) where
is_include is True only if the line is an #include or #import
statement, and dependency_violation is an instance of
results.DependencyViolation if the line violates a rule, or None
if it does not.
"""
found_item = self._EXTRACT_INCLUDE_PATH.match(line)
if not found_item:
return False, None # Not a match
include_path = found_item.group(1)
if '\\' in include_path:
return True, results.DependencyViolation(
include_path,
MessageRule('Include paths may not include backslashes.'),
rules)
if '/' not in include_path:
# Don't fail when no directory is specified. We may want to be more
# strict about this in the future.
if self._verbose:
print ' WARNING: directory specified with no path: ' + include_path
return True, None
rule = rules.RuleApplyingTo(include_path, dependee_path)
if (rule.allow == Rule.DISALLOW or
(fail_on_temp_allow and rule.allow == Rule.TEMP_ALLOW)):
return True, results.DependencyViolation(include_path, rule, rules)
return True, None
def CheckFile(self, rules, filepath):
if self._verbose:
print 'Checking: ' + filepath
dependee_status = results.DependeeStatus(filepath)
ret_val = '' # We'll collect the error messages in here
last_include = 0
with codecs.open(filepath, encoding='utf-8') as f:
in_if0 = 0
for line_num, line in enumerate(f):
if line_num - last_include > self._MAX_UNINTERESTING_LINES:
break
line = line.strip()
# Check to see if we're at / inside a #if 0 block
if line.startswith('#if 0'):
in_if0 += 1
continue
if in_if0 > 0:
if line.startswith('#if'):
in_if0 += 1
elif line.startswith('#endif'):
in_if0 -= 1
continue
is_include, violation = self.CheckLine(rules, line, filepath)
if is_include:
last_include = line_num
if violation:
dependee_status.AddViolation(violation)
return dependee_status
@staticmethod
def IsCppFile(file_path):
"""Returns True iff the given path ends in one of the extensions
handled by this checker.
"""
return os.path.splitext(file_path)[1] in CppChecker.EXTENSIONS

View File

@ -1,107 +0,0 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Checks Java files for illegal imports."""
import codecs
import os
import re
import results
from rules import Rule
class JavaChecker(object):
"""Import checker for Java files.
The CheckFile method uses real filesystem paths, but Java imports work in
terms of package names. To deal with this, we have an extra "prescan" pass
that reads all the .java files and builds a mapping of class name -> filepath.
In CheckFile, we convert each import statement into a real filepath, and check
that against the rules in the DEPS files.
Note that in Java you can always use classes in the same directory without an
explicit import statement, so these imports can't be blocked with DEPS files.
But that shouldn't be a problem, because same-package imports are pretty much
always correct by definition. (If we find a case where this is *not* correct,
it probably means the package is too big and needs to be split up.)
Properties:
_classmap: dict of fully-qualified Java class name -> filepath
"""
EXTENSIONS = ['.java']
def __init__(self, base_directory, verbose):
self._base_directory = base_directory
self._verbose = verbose
self._classmap = {}
self._PrescanFiles()
def _PrescanFiles(self):
for root, dirs, files in os.walk(self._base_directory):
# Skip unwanted subdirectories. TODO(husky): it would be better to do
# this via the skip_child_includes flag in DEPS files. Maybe hoist this
# prescan logic into checkdeps.py itself?
for d in dirs:
# Skip hidden directories.
if d.startswith('.'):
dirs.remove(d)
# Skip the "out" directory, as dealing with generated files is awkward.
# We don't want paths like "out/Release/lib.java" in our DEPS files.
# TODO(husky): We need some way of determining the "real" path to
# a generated file -- i.e., where it would be in source control if
# it weren't generated.
if d == 'out':
dirs.remove(d)
# Skip third-party directories.
if d == 'third_party':
dirs.remove(d)
for f in files:
if f.endswith('.java'):
self._PrescanFile(os.path.join(root, f))
def _PrescanFile(self, filepath):
if self._verbose:
print 'Prescanning: ' + filepath
with codecs.open(filepath, encoding='utf-8') as f:
short_class_name, _ = os.path.splitext(os.path.basename(filepath))
for line in f:
for package in re.findall('^package ([\w\.]+);', line):
full_class_name = package + '.' + short_class_name
if full_class_name in self._classmap:
print 'WARNING: multiple definitions of %s:' % full_class_name
print ' ' + filepath
print ' ' + self._classmap[full_class_name]
print
else:
self._classmap[full_class_name] = filepath
return
print 'WARNING: no package definition found in %s' % filepath
def CheckFile(self, rules, filepath):
if self._verbose:
print 'Checking: ' + filepath
dependee_status = results.DependeeStatus(filepath)
with codecs.open(filepath, encoding='utf-8') as f:
for line in f:
for clazz in re.findall('^import\s+(?:static\s+)?([\w\.]+)\s*;', line):
if clazz not in self._classmap:
# Importing a class from outside the Chromium tree. That's fine --
# it's probably a Java or Android system class.
continue
include_path = os.path.relpath(
self._classmap[clazz], self._base_directory)
# Convert Windows paths to Unix style, as used in DEPS files.
include_path = include_path.replace(os.path.sep, '/')
rule = rules.RuleApplyingTo(include_path, filepath)
if rule.allow == Rule.DISALLOW:
dependee_status.AddViolation(
results.DependencyViolation(include_path, rule, rules))
if '{' in line:
# This is code, so we're finished reading imports for this file.
break
return dependee_status

View File

@ -1,140 +0,0 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Results object and results formatters for checkdeps tool."""
class DependencyViolation(object):
"""A single dependency violation."""
def __init__(self, include_path, violated_rule, rules):
# The include or import path that is in violation of a rule.
self.include_path = include_path
# The violated rule.
self.violated_rule = violated_rule
# The set of rules containing self.violated_rule.
self.rules = rules
class DependeeStatus(object):
"""Results object for a dependee file."""
def __init__(self, dependee_path):
# Path of the file whose nonconforming dependencies are listed in
# self.violations.
self.dependee_path = dependee_path
# List of DependencyViolation objects that apply to the dependee
# file. May be empty.
self.violations = []
def AddViolation(self, violation):
"""Adds a violation."""
self.violations.append(violation)
def HasViolations(self):
"""Returns True if this dependee is violating one or more rules."""
return not not self.violations
class ResultsFormatter(object):
"""Base class for results formatters."""
def AddError(self, dependee_status):
"""Add a formatted result to |self.results| for |dependee_status|,
which is guaranteed to return True for
|dependee_status.HasViolations|.
"""
raise NotImplementedError()
def GetResults(self):
"""Returns the results. May be overridden e.g. to process the
results that have been accumulated.
"""
raise NotImplementedError()
def PrintResults(self):
"""Prints the results to stdout."""
raise NotImplementedError()
class NormalResultsFormatter(ResultsFormatter):
"""A results formatting object that produces the classical,
detailed, human-readable output of the checkdeps tool.
"""
def __init__(self, verbose):
self.results = []
self.verbose = verbose
def AddError(self, dependee_status):
lines = []
lines.append('\nERROR in %s' % dependee_status.dependee_path)
for violation in dependee_status.violations:
lines.append(self.FormatViolation(violation, self.verbose))
self.results.append('\n'.join(lines))
@staticmethod
def FormatViolation(violation, verbose=False):
lines = []
if verbose:
lines.append(' For %s' % violation.rules)
lines.append(
' Illegal include: "%s"\n Because of %s' %
(violation.include_path, str(violation.violated_rule)))
return '\n'.join(lines)
def GetResults(self):
return self.results
def PrintResults(self):
for result in self.results:
print result
if self.results:
print '\nFAILED\n'
class TemporaryRulesFormatter(ResultsFormatter):
"""A results formatter that produces a single line per nonconforming
include. The combined output is suitable for directly pasting into a
DEPS file as a list of temporary-allow rules.
"""
def __init__(self):
self.violations = set()
def AddError(self, dependee_status):
for violation in dependee_status.violations:
self.violations.add(violation.include_path)
def GetResults(self):
return [' "!%s",' % path for path in sorted(self.violations)]
def PrintResults(self):
for result in self.GetResults():
print result
class CountViolationsFormatter(ResultsFormatter):
"""A results formatter that produces a number, the count of #include
statements that are in violation of the dependency rules.
Note that you normally want to instantiate DepsChecker with
ignore_temp_rules=True when you use this formatter.
"""
def __init__(self):
self.count = 0
def AddError(self, dependee_status):
self.count += len(dependee_status.violations)
def GetResults(self):
return '%d' % self.count
def PrintResults(self):
print self.count

View File

@ -1,151 +0,0 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Base classes to represent dependency rules, used by checkdeps.py"""
import os
import re
class Rule(object):
"""Specifies a single rule for an include, which can be one of
ALLOW, DISALLOW and TEMP_ALLOW.
"""
# These are the prefixes used to indicate each type of rule. These
# are also used as values for self.allow to indicate which type of
# rule this is.
ALLOW = '+'
DISALLOW = '-'
TEMP_ALLOW = '!'
def __init__(self, allow, directory, source):
self.allow = allow
self._dir = directory
self._source = source
def __str__(self):
return '"%s%s" from %s.' % (self.allow, self._dir, self._source)
def ParentOrMatch(self, other):
"""Returns true if the input string is an exact match or is a parent
of the current rule. For example, the input "foo" would match "foo/bar"."""
return self._dir == other or self._dir.startswith(other + '/')
def ChildOrMatch(self, other):
"""Returns true if the input string would be covered by this rule. For
example, the input "foo/bar" would match the rule "foo"."""
return self._dir == other or other.startswith(self._dir + '/')
class MessageRule(Rule):
"""A rule that has a simple message as the reason for failing,
unrelated to directory or source.
"""
def __init__(self, reason):
super(MessageRule, self).__init__(Rule.DISALLOW, '', '')
self._reason = reason
def __str__(self):
return self._reason
def ParseRuleString(rule_string, source):
"""Returns a tuple of a character indicating what type of rule this
is, and a string holding the path the rule applies to.
"""
if not rule_string:
raise Exception('The rule string "%s" is empty\nin %s' %
(rule_string, source))
if not rule_string[0] in [Rule.ALLOW, Rule.DISALLOW, Rule.TEMP_ALLOW]:
raise Exception(
'The rule string "%s" does not begin with a "+", "-" or "!".' %
rule_string)
return (rule_string[0], rule_string[1:])
class Rules(object):
"""Sets of rules for files in a directory.
By default, rules are added to the set of rules applicable to all
dependee files in the directory. Rules may also be added that apply
only to dependee files whose filename (last component of their path)
matches a given regular expression; hence there is one additional
set of rules per unique regular expression.
"""
def __init__(self):
"""Initializes the current rules with an empty rule list for all
files.
"""
# We keep the general rules out of the specific rules dictionary,
# as we need to always process them last.
self._general_rules = []
# Keys are regular expression strings, values are arrays of rules
# that apply to dependee files whose basename matches the regular
# expression. These are applied before the general rules, but
# their internal order is arbitrary.
self._specific_rules = {}
def __str__(self):
result = ['Rules = {\n (apply to all files): [\n%s\n ],' % '\n'.join(
' %s' % x for x in self._general_rules)]
for regexp, rules in self._specific_rules.iteritems():
result.append(' (limited to files matching %s): [\n%s\n ]' % (
regexp, '\n'.join(' %s' % x for x in rules)))
result.append(' }')
return '\n'.join(result)
def AddRule(self, rule_string, source, dependee_regexp=None):
"""Adds a rule for the given rule string.
Args:
rule_string: The include_rule string read from the DEPS file to apply.
source: A string representing the location of that string (filename, etc.)
so that we can give meaningful errors.
dependee_regexp: The rule will only be applied to dependee files
whose filename (last component of their path)
matches the expression. None to match all
dependee files.
"""
(rule_type, rule_dir) = ParseRuleString(rule_string, source)
if not dependee_regexp:
rules_to_update = self._general_rules
else:
if dependee_regexp in self._specific_rules:
rules_to_update = self._specific_rules[dependee_regexp]
else:
rules_to_update = []
# Remove any existing rules or sub-rules that apply. For example, if we're
# passed "foo", we should remove "foo", "foo/bar", but not "foobar".
rules_to_update = [x for x in rules_to_update
if not x.ParentOrMatch(rule_dir)]
rules_to_update.insert(0, Rule(rule_type, rule_dir, source))
if not dependee_regexp:
self._general_rules = rules_to_update
else:
self._specific_rules[dependee_regexp] = rules_to_update
def RuleApplyingTo(self, include_path, dependee_path):
"""Returns the rule that applies to |include_path| for a dependee
file located at |dependee_path|.
"""
dependee_filename = os.path.basename(dependee_path)
for regexp, specific_rules in self._specific_rules.iteritems():
if re.match(regexp, dependee_filename):
for rule in specific_rules:
if rule.ChildOrMatch(include_path):
return rule
for rule in self._general_rules:
if rule.ChildOrMatch(include_path):
return rule
return MessageRule('no rule applying.')

View File

@ -1,8 +0,0 @@
include_rules = [
"-tools/checkdeps/testdata/disallowed",
"+tools/checkdeps/testdata/allowed",
"-third_party/explicitly_disallowed",
]
skip_child_includes = [
"checkdeps_test",
]

View File

@ -1,12 +0,0 @@
include_rules = [
"+tools/checkdeps/testdata/disallowed/allowed",
"!tools/checkdeps/testdata/disallowed/temporarily_allowed.h",
"+third_party/allowed_may_use",
]
specific_include_rules = {
".*_unittest\.cc": [
"+tools/checkdeps/testdata/disallowed/teststuff",
"!tools/checkdeps/testdata/bongo/temp_allowed_for_tests.h",
]
}

View File

@ -1,5 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "tools/checkdeps/testdata/disallowed/teststuff/good.h"

View File

@ -1,5 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "tools/checkdeps/testdata/disallowed/teststuff/bad.h"

View File

@ -1,11 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "tools/checkdeps/testdata/allowed/good.h"
#include "tools/checkdeps/testdata/disallowed/bad.h"
#include "tools/checkdeps/testdata/disallowed/allowed/good.h"
#include "tools/checkdeps/testdata/disallowed/temporarily_allowed.h"
#include "third_party/explicitly_disallowed/bad.h"
#include "third_party/allowed_may_use/good.h"
#include "third_party/no_rule/bad.h"

View File

@ -1,5 +0,0 @@
include_rules = [
"-disallowed",
"+allowed",
"-third_party/explicitly_disallowed",
]

View File

@ -1,11 +0,0 @@
include_rules = [
"+disallowed/allowed",
"!disallowed/temporarily_allowed.h",
"+third_party/allowed_may_use",
]
specific_include_rules = {
".*_unittest\.cc": [
"+disallowed/teststuff",
]
}

View File

@ -1,5 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "disallowed/teststuff/good.h"

View File

@ -1,5 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "disallowed/teststuff/bad.h"

View File

@ -1,11 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "allowed/good.h"
#include "disallowed/bad.h"
#include "disallowed/allowed/good.h"
#include "disallowed/temporarily_allowed.h"
#include "third_party/explicitly_disallowed/bad.h"
#include "third_party/allowed_may_use/good.h"
#include "third_party/no_rule/bad.h"

View File

@ -1,3 +0,0 @@
skip_child_includes = [
"skipped",
]

View File

@ -1,5 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "whatever/whocares/ok.h"

View File

@ -1,11 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "allowed/good.h"
// Always allowed to include self and parents.
#include "disallowed/good.h"
#include "disallowed/allowed/good.h"
#include "third_party/explicitly_disallowed/bad.h"
#include "third_party/allowed_may_use/bad.h"
#include "third_party/no_rule/bad.h"

View File

@ -1,12 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "allowed/good.h"
// Always allowed to include self.
#include "disallowed/good.h"
#include "disallowed/allowed/good.h"
#include "third_party/explicitly_disallowed/bad.h"
// Only allowed for code under allowed/.
#include "third_party/allowed_may_use/bad.h"
#include "third_party/no_rule/bad.h"

View File

@ -1,3 +0,0 @@
skip_child_includes = [
"skipped",
]

View File

@ -1,5 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "whatever/whocares/ok.h"

View File

@ -1,11 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "tools/checkdeps/testdata/allowed/good.h"
// Always allowed to include self and parents.
#include "tools/checkdeps/testdata/disallowed/good.h"
#include "tools/checkdeps/testdata/disallowed/allowed/good.h"
#include "third_party/explicitly_disallowed/bad.h"
#include "third_party/allowed_may_use/bad.h"
#include "third_party/no_rule/bad.h"

View File

@ -1,10 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Not allowed for code under disallowed/ but temporarily allowed
// specifically for test code under allowed/. This regression tests a
// bug where we were taking shallow copies of rules when generating
// rules for subdirectories, so all rule objects were getting the same
// dictionary for specific rules.
#include "tools/checkdeps/testdata/disallowed/temp_allowed_for_tests.h"

View File

@ -1,12 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "tools/checkdeps/testdata/allowed/good.h"
// Always allowed to include self.
#include "tools/checkdeps/testdata/disallowed/good.h"
#include "tools/checkdeps/testdata/disallowed/allowed/good.h"
#include "third_party/explicitly_disallowed/bad.h"
// Only allowed for code under allowed/.
#include "third_party/allowed_may_use/bad.h"
#include "third_party/no_rule/bad.h"

View File

@ -1,3 +0,0 @@
set noparent
phajdan.jr@chromium.org
thestig@chromium.org

View File

@ -1,548 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Makes sure that all files contain proper licensing information."""
import optparse
import os.path
import subprocess
import sys
def PrintUsage():
print """Usage: python checklicenses.py [--root <root>] [tocheck]
--root Specifies the repository root. This defaults to "../.." relative
to the script file. This will be correct given the normal location
of the script in "<root>/tools/checklicenses".
--ignore-suppressions Ignores path-specific license whitelist. Useful when
trying to remove a suppression/whitelist entry.
tocheck Specifies the directory, relative to root, to check. This defaults
to "." so it checks everything.
Examples:
python checklicenses.py
python checklicenses.py --root ~/chromium/src third_party"""
WHITELISTED_LICENSES = [
'Apache (v2.0)',
'Apache (v2.0) BSD (2 clause)',
'Apache (v2.0) GPL (v2)',
'Apple MIT', # https://fedoraproject.org/wiki/Licensing/Apple_MIT_License
'APSL (v2)',
'APSL (v2) BSD (4 clause)',
'BSD',
'BSD (2 clause)',
'BSD (2 clause) ISC',
'BSD (2 clause) MIT/X11 (BSD like)',
'BSD (3 clause)',
'BSD (3 clause) GPL (v2)',
'BSD (3 clause) ISC',
'BSD (3 clause) LGPL (v2 or later)',
'BSD (3 clause) LGPL (v2.1 or later)',
'BSD (3 clause) MIT/X11 (BSD like)',
'BSD (4 clause)',
'BSD-like',
# TODO(phajdan.jr): Make licensecheck not print BSD-like twice.
'BSD-like MIT/X11 (BSD like)',
'BSL (v1.0)',
'GPL (v2) LGPL (v2.1 or later)',
'GPL (v2 or later) with Bison parser exception',
'GPL (v2 or later) with libtool exception',
'GPL (v3 or later) with Bison parser exception',
'GPL with Bison parser exception',
'ISC',
'LGPL (unversioned/unknown version)',
'LGPL (v2)',
'LGPL (v2 or later)',
'LGPL (v2.1)',
'LGPL (v2.1 or later)',
'LGPL (v3 or later)',
'MIT/X11 (BSD like)',
'MPL (v1.0) LGPL (v2 or later)',
'MPL (v1.1)',
'MPL (v1.1) BSD (3 clause) GPL (v2) LGPL (v2.1 or later)',
'MPL (v1.1) BSD (3 clause) LGPL (v2.1 or later)',
'MPL (v1.1) BSD-like',
'MPL (v1.1) BSD-like GPL (unversioned/unknown version)',
'MPL (v1.1) BSD-like GPL (v2) LGPL (v2.1 or later)',
'MPL (v1.1) GPL (v2)',
'MPL (v1.1) GPL (v2) LGPL (v2 or later)',
'MPL (v1.1) GPL (v2) LGPL (v2.1 or later)',
'MPL (v1.1) GPL (unversioned/unknown version)',
'MPL (v1.1) LGPL (v2 or later)',
'MPL (v1.1) LGPL (v2.1 or later)',
'MPL (v2.0)',
'Ms-PL',
'Public domain',
'Public domain BSD',
'Public domain BSD (3 clause)',
'Public domain BSD-like',
'Public domain LGPL (v2.1 or later)',
'libpng',
'zlib/libpng',
'SGI Free Software License B',
'University of Illinois/NCSA Open Source License (BSD like)',
]
PATH_SPECIFIC_WHITELISTED_LICENSES = {
'base/hash.cc': [ # http://crbug.com/98100
'UNKNOWN',
],
'base/third_party/icu': [ # http://crbug.com/98087
'UNKNOWN',
],
# http://code.google.com/p/google-breakpad/issues/detail?id=450
'breakpad/src': [
'UNKNOWN',
],
'chrome/common/extensions/docs/examples': [ # http://crbug.com/98092
'UNKNOWN',
],
'chrome/test/data/gpu/vt': [
'UNKNOWN',
],
'chrome/test/data/layout_tests/LayoutTests': [
'UNKNOWN',
],
'courgette/third_party/bsdiff_create.cc': [ # http://crbug.com/98095
'UNKNOWN',
],
'data/mozilla_js_tests': [
'UNKNOWN',
],
'data/page_cycler': [
'UNKNOWN',
'GPL (v2 or later)',
],
'data/tab_switching': [
'UNKNOWN',
],
'native_client': [ # http://crbug.com/98099
'UNKNOWN',
],
'native_client/toolchain': [
'BSD GPL (v2 or later)',
'BSD (2 clause) GPL (v2 or later)',
'BSD (3 clause) GPL (v2 or later)',
'BSL (v1.0) GPL',
'BSL (v1.0) GPL (v3.1)',
'GPL',
'GPL (unversioned/unknown version)',
'GPL (v2)',
'GPL (v2 or later)',
'GPL (v3.1)',
'GPL (v3 or later)',
],
'net/tools/spdyshark': [
'GPL (v2 or later)',
'UNKNOWN',
],
'third_party/WebKit': [
'UNKNOWN',
],
'third_party/WebKit/Websites/webkit.org/blog/wp-content/plugins/'
'akismet/akismet.php': [
'GPL (v2 or later)'
],
'third_party/WebKit/Source/JavaScriptCore/tests/mozilla': [
'GPL',
'GPL (v2 or later)',
'GPL (unversioned/unknown version)',
],
'third_party/active_doc': [ # http://crbug.com/98113
'UNKNOWN',
],
# http://code.google.com/p/angleproject/issues/detail?id=217
'third_party/angle': [
'UNKNOWN',
],
'third_party/bsdiff/mbsdiff.cc': [
'UNKNOWN',
],
'third_party/bzip2': [
'UNKNOWN',
],
# http://crbug.com/222828
# http://bugs.python.org/issue17514
'third_party/chromite/third_party/argparse.py': [
'UNKNOWN',
],
# Not used. http://crbug.com/156020
# Using third_party/cros_dbus_cplusplus/cros_dbus_cplusplus.gyp instead.
'third_party/cros_dbus_cplusplus/source/autogen.sh': [
'UNKNOWN',
],
# Included in the source tree but not built. http://crbug.com/156020
'third_party/cros_dbus_cplusplus/source/examples': [
'UNKNOWN',
],
'third_party/devscripts': [
'GPL (v2 or later)',
],
'third_party/expat/files/lib': [ # http://crbug.com/98121
'UNKNOWN',
],
'third_party/ffmpeg': [
'GPL',
'GPL (v2)',
'GPL (v2 or later)',
'UNKNOWN', # http://crbug.com/98123
],
'third_party/findbugs/doc': [ # http://crbug.com/157206
'UNKNOWN',
],
'third_party/freetype2': [ # http://crbug.com/177319
'UNKNOWN',
],
'third_party/gles2_book': [ # http://crbug.com/98130
'UNKNOWN',
],
'third_party/gles2_conform/GTF_ES': [ # http://crbug.com/98131
'UNKNOWN',
],
'third_party/harfbuzz': [ # http://crbug.com/98133
'UNKNOWN',
],
'third_party/hunspell': [ # http://crbug.com/98134
'UNKNOWN',
],
'third_party/hyphen/hyphen.tex': [ # http://crbug.com/157375
'UNKNOWN',
],
'third_party/iccjpeg': [ # http://crbug.com/98137
'UNKNOWN',
],
'third_party/icu': [ # http://crbug.com/98301
'UNKNOWN',
],
'third_party/jemalloc': [ # http://crbug.com/98302
'UNKNOWN',
],
'third_party/JSON': [
'Perl', # Build-only.
# License missing upstream on 3 minor files.
'UNKNOWN', # https://rt.cpan.org/Public/Bug/Display.html?id=85915
],
'third_party/lcov': [ # http://crbug.com/98304
'UNKNOWN',
],
'third_party/lcov/contrib/galaxy/genflat.pl': [
'GPL (v2 or later)',
],
'third_party/lcov-1.9/contrib/galaxy/genflat.pl': [
'GPL (v2 or later)',
],
'third_party/libevent': [ # http://crbug.com/98309
'UNKNOWN',
],
'third_party/libjingle/source/talk': [ # http://crbug.com/98310
'UNKNOWN',
],
'third_party/libjingle/source_internal/talk': [ # http://crbug.com/98310
'UNKNOWN',
],
'third_party/libjpeg': [ # http://crbug.com/98313
'UNKNOWN',
],
'third_party/libjpeg_turbo': [ # http://crbug.com/98314
'UNKNOWN',
],
'third_party/libpng': [ # http://crbug.com/98318
'UNKNOWN',
],
# The following files lack license headers, but are trivial.
'third_party/libusb/src/libusb/os/poll_posix.h': [
'UNKNOWN',
],
'third_party/libusb/src/libusb/version.h': [
'UNKNOWN',
],
'third_party/libusb/src/autogen.sh': [
'UNKNOWN',
],
'third_party/libusb/src/config.h': [
'UNKNOWN',
],
'third_party/libusb/src/msvc/config.h': [
'UNKNOWN',
],
'third_party/libvpx/source': [ # http://crbug.com/98319
'UNKNOWN',
],
'third_party/libvpx/source/libvpx/examples/includes': [
'GPL (v2 or later)',
],
'third_party/libxml': [
'UNKNOWN',
],
'third_party/libxslt': [
'UNKNOWN',
],
'third_party/lzma_sdk': [
'UNKNOWN',
],
'third_party/mesa/src': [
'GPL (v2)',
'GPL (v3 or later)',
'MIT/X11 (BSD like) GPL (v3 or later) with Bison parser exception',
'UNKNOWN', # http://crbug.com/98450
],
'third_party/modp_b64': [
'UNKNOWN',
],
'third_party/npapi/npspy/extern/java': [
'GPL (unversioned/unknown version)',
],
'third_party/openmax_dl/dl' : [
'Khronos Group',
],
'third_party/openssl': [ # http://crbug.com/98451
'UNKNOWN',
],
'third_party/ots/tools/ttf-checksum.py': [ # http://code.google.com/p/ots/issues/detail?id=2
'UNKNOWN',
],
'third_party/molokocacao': [ # http://crbug.com/98453
'UNKNOWN',
],
'third_party/npapi/npspy': [
'UNKNOWN',
],
'third_party/ocmock/OCMock': [ # http://crbug.com/98454
'UNKNOWN',
],
'third_party/ply/__init__.py': [
'UNKNOWN',
],
'third_party/protobuf': [ # http://crbug.com/98455
'UNKNOWN',
],
# http://crbug.com/222831
# https://bitbucket.org/eliben/pyelftools/issue/12
'third_party/pyelftools': [
'UNKNOWN',
],
'third_party/pylib': [
'UNKNOWN',
],
'third_party/scons-2.0.1/engine/SCons': [ # http://crbug.com/98462
'UNKNOWN',
],
'third_party/simplejson': [
'UNKNOWN',
],
'third_party/skia': [ # http://crbug.com/98463
'UNKNOWN',
],
'third_party/snappy/src': [ # http://crbug.com/98464
'UNKNOWN',
],
'third_party/smhasher/src': [ # http://crbug.com/98465
'UNKNOWN',
],
'third_party/speech-dispatcher/libspeechd.h': [
'GPL (v2 or later)',
],
'third_party/sqlite': [
'UNKNOWN',
],
'third_party/swig/Lib/linkruntime.c': [ # http://crbug.com/98585
'UNKNOWN',
],
'third_party/talloc': [
'GPL (v3 or later)',
'UNKNOWN', # http://crbug.com/98588
],
'third_party/tcmalloc': [
'UNKNOWN', # http://crbug.com/98589
],
'third_party/tlslite': [
'UNKNOWN',
],
'third_party/webdriver': [ # http://crbug.com/98590
'UNKNOWN',
],
'third_party/webrtc': [ # http://crbug.com/98592
'UNKNOWN',
],
'third_party/xdg-utils': [ # http://crbug.com/98593
'UNKNOWN',
],
'third_party/yasm/source': [ # http://crbug.com/98594
'UNKNOWN',
],
'third_party/zlib/contrib/minizip': [
'UNKNOWN',
],
'third_party/zlib/trees.h': [
'UNKNOWN',
],
'tools/dromaeo_benchmark_runner/dromaeo_benchmark_runner.py': [
'UNKNOWN',
],
'tools/emacs': [ # http://crbug.com/98595
'UNKNOWN',
],
'tools/grit/grit/node/custom/__init__.py': [
'UNKNOWN',
],
'tools/gyp/test': [
'UNKNOWN',
],
'tools/histograms': [
'UNKNOWN',
],
'tools/memory_watcher': [
'UNKNOWN',
],
'tools/playback_benchmark': [
'UNKNOWN',
],
'tools/python/google/__init__.py': [
'UNKNOWN',
],
'tools/site_compare': [
'UNKNOWN',
],
'tools/stats_viewer/Properties/AssemblyInfo.cs': [
'UNKNOWN',
],
'tools/symsrc/pefile.py': [
'UNKNOWN',
],
'v8/test/cctest': [ # http://crbug.com/98597
'UNKNOWN',
],
'webkit/data/ico_decoder': [
'UNKNOWN',
],
}
def check_licenses(options, args):
# Figure out which directory we have to check.
if len(args) == 0:
# No directory to check specified, use the repository root.
start_dir = options.base_directory
elif len(args) == 1:
# Directory specified. Start here. It's supposed to be relative to the
# base directory.
start_dir = os.path.abspath(os.path.join(options.base_directory, args[0]))
else:
# More than one argument, we don't handle this.
PrintUsage()
return 1
print "Using base directory:", options.base_directory
print "Checking:", start_dir
print
licensecheck_path = os.path.abspath(os.path.join(options.base_directory,
'third_party',
'devscripts',
'licensecheck.pl'))
licensecheck = subprocess.Popen([licensecheck_path,
'-l', '100',
'-r', start_dir],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = licensecheck.communicate()
if options.verbose:
print '----------- licensecheck stdout -----------'
print stdout
print '--------- end licensecheck stdout ---------'
if licensecheck.returncode != 0 or stderr:
print '----------- licensecheck stderr -----------'
print stderr
print '--------- end licensecheck stderr ---------'
print "\nFAILED\n"
return 1
success = True
for line in stdout.splitlines():
filename, license = line.split(':', 1)
filename = os.path.relpath(filename.strip(), options.base_directory)
# All files in the build output directory are generated one way or another.
# There's no need to check them.
if filename.startswith('out/') or filename.startswith('sconsbuild/'):
continue
# For now we're just interested in the license.
license = license.replace('*No copyright*', '').strip()
# Skip generated files.
if 'GENERATED FILE' in license:
continue
if license in WHITELISTED_LICENSES:
continue
if not options.ignore_suppressions:
found_path_specific = False
for prefix in PATH_SPECIFIC_WHITELISTED_LICENSES:
if (filename.startswith(prefix) and
license in PATH_SPECIFIC_WHITELISTED_LICENSES[prefix]):
found_path_specific = True
break
if found_path_specific:
continue
print "'%s' has non-whitelisted license '%s'" % (filename, license)
success = False
if success:
print "\nSUCCESS\n"
return 0
else:
print "\nFAILED\n"
print "Please read",
print "http://www.chromium.org/developers/adding-3rd-party-libraries"
print "for more info how to handle the failure."
print
print "Please respect OWNERS of checklicenses.py. Changes violating"
print "this requirement may be reverted."
return 1
def main():
default_root = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..'))
option_parser = optparse.OptionParser()
option_parser.add_option('--root', default=default_root,
dest='base_directory',
help='Specifies the repository root. This defaults '
'to "../.." relative to the script file, which '
'will normally be the repository root.')
option_parser.add_option('-v', '--verbose', action='store_true',
default=False, help='Print debug logging')
option_parser.add_option('--ignore-suppressions',
action='store_true',
default=False,
help='Ignore path-specific license whitelist.')
options, args = option_parser.parse_args()
return check_licenses(options, args)
if '__main__' == __name__:
sys.exit(main())

Some files were not shown because too many files have changed in this diff Show More