2015-02-11 19:18:45 +00:00
/ * Copyright ( c ) 2015 , Google Inc .
*
* Permission to use , copy , modify , and / or distribute this software for any
* purpose with or without fee is hereby granted , provided that the above
* copyright notice and this permission notice appear in all copies .
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS . IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* SPECIAL , DIRECT , INDIRECT , OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE , DATA OR PROFITS , WHETHER IN AN ACTION
* OF CONTRACT , NEGLIGENCE OR OTHER TORTIOUS ACTION , ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE . * /
package main
import (
2017-06-07 19:02:03 +01:00
"bufio"
2015-02-11 19:18:45 +00:00
"bytes"
2015-02-11 22:14:15 +00:00
"encoding/json"
2018-10-31 19:48:23 +00:00
"errors"
2015-02-11 19:18:45 +00:00
"flag"
"fmt"
2017-06-07 19:02:03 +01:00
"math/rand"
2015-02-11 19:18:45 +00:00
"os"
"os/exec"
"path"
2017-04-12 01:07:27 +01:00
"runtime"
2015-05-16 00:08:49 +01:00
"strconv"
2015-02-11 19:18:45 +00:00
"strings"
2016-03-02 16:53:07 +00:00
"sync"
2015-05-16 00:08:49 +01:00
"syscall"
2018-11-14 23:31:02 +00:00
"boringssl.googlesource.com/boringssl/util/testresult"
2015-02-11 19:18:45 +00:00
)
// TODO(davidben): Link tests with the malloc shim and port -malloc-test to this runner.
var (
2015-05-16 00:08:49 +01:00
useValgrind = flag . Bool ( "valgrind" , false , "If true, run code under valgrind" )
2016-02-29 21:58:26 +00:00
useCallgrind = flag . Bool ( "callgrind" , false , "If true, run code under valgrind to generate callgrind traces." )
2015-05-16 00:08:49 +01:00
useGDB = flag . Bool ( "gdb" , false , "If true, run BoringSSL code under gdb" )
2017-02-03 16:52:21 +00:00
useSDE = flag . Bool ( "sde" , false , "If true, run BoringSSL code under Intel's SDE for each supported chip" )
2017-05-10 21:56:02 +01:00
sdePath = flag . String ( "sde-path" , "sde" , "The path to find the sde binary." )
2015-05-16 00:08:49 +01:00
buildDir = flag . String ( "build-dir" , "build" , "The build directory to run the tests from." )
2017-04-12 01:07:27 +01:00
numWorkers = flag . Int ( "num-workers" , runtime . NumCPU ( ) , "Runs the given number of workers when testing." )
2015-05-16 00:08:49 +01:00
jsonOutput = flag . String ( "json-output" , "" , "The file to output JSON results to." )
mallocTest = flag . Int64 ( "malloc-test" , - 1 , "If non-negative, run each test with each malloc in turn failing from the given number onwards." )
mallocTestDebug = flag . Bool ( "malloc-test-debug" , false , "If true, ask each test to abort rather than fail a malloc. This can be used with a specific value for --malloc-test to identity the malloc failing that is causing problems." )
2018-10-31 19:48:23 +00:00
simulateARMCPUs = flag . Bool ( "simulate-arm-cpus" , simulateARMCPUsDefault ( ) , "If true, runs tests simulating different ARM CPUs." )
2015-02-11 19:18:45 +00:00
)
2018-10-31 19:48:23 +00:00
func simulateARMCPUsDefault ( ) bool {
return runtime . GOOS == "linux" && ( runtime . GOARCH == "arm" || runtime . GOARCH == "arm64" )
}
2017-02-03 16:52:21 +00:00
type test struct {
2017-09-28 20:17:05 +01:00
args [ ] string
shard , numShards int
2018-10-31 19:48:23 +00:00
// cpu, if not empty, contains a code to simulate. For SDE, run `sde64
// -help` to get a list of these codes. For ARM, see gtest_main.cc for
// the supported values.
2017-02-03 16:52:21 +00:00
cpu string
}
2015-02-11 19:18:45 +00:00
2016-03-02 16:53:07 +00:00
type result struct {
Test test
Passed bool
Error error
}
2017-02-03 16:52:21 +00:00
// sdeCPUs contains a list of CPU code that we run all tests under when *useSDE
// is true.
var sdeCPUs = [ ] string {
"p4p" , // Pentium4 Prescott
"mrm" , // Merom
"pnr" , // Penryn
"nhm" , // Nehalem
"wsm" , // Westmere
"snb" , // Sandy Bridge
"ivb" , // Ivy Bridge
"hsw" , // Haswell
"bdw" , // Broadwell
"skx" , // Skylake Server
"skl" , // Skylake Client
"cnl" , // Cannonlake
"knl" , // Knights Landing
"slt" , // Saltwell
"slm" , // Silvermont
"glm" , // Goldmont
2017-07-25 21:59:58 +01:00
"knm" , // Knights Mill
2017-02-03 16:52:21 +00:00
}
2018-10-31 19:48:23 +00:00
var armCPUs = [ ] string {
"none" , // No support for any ARM extensions.
"neon" , // Support for NEON.
"crypto" , // Support for NEON and crypto extensions.
}
2015-02-11 19:18:45 +00:00
func valgrindOf ( dbAttach bool , path string , args ... string ) * exec . Cmd {
2016-09-21 00:41:04 +01:00
valgrindArgs := [ ] string { "--error-exitcode=99" , "--track-origins=yes" , "--leak-check=full" , "--quiet" }
2015-02-11 19:18:45 +00:00
if dbAttach {
valgrindArgs = append ( valgrindArgs , "--db-attach=yes" , "--db-command=xterm -e gdb -nw %f %p" )
}
valgrindArgs = append ( valgrindArgs , path )
valgrindArgs = append ( valgrindArgs , args ... )
return exec . Command ( "valgrind" , valgrindArgs ... )
}
2016-02-29 21:58:26 +00:00
func callgrindOf ( path string , args ... string ) * exec . Cmd {
valgrindArgs := [ ] string { "-q" , "--tool=callgrind" , "--dump-instr=yes" , "--collect-jumps=yes" , "--callgrind-out-file=" + * buildDir + "/callgrind/callgrind.out.%p" }
valgrindArgs = append ( valgrindArgs , path )
valgrindArgs = append ( valgrindArgs , args ... )
return exec . Command ( "valgrind" , valgrindArgs ... )
}
2015-05-16 00:08:49 +01:00
func gdbOf ( path string , args ... string ) * exec . Cmd {
xtermArgs := [ ] string { "-e" , "gdb" , "--args" }
xtermArgs = append ( xtermArgs , path )
xtermArgs = append ( xtermArgs , args ... )
return exec . Command ( "xterm" , xtermArgs ... )
}
2017-02-03 16:52:21 +00:00
func sdeOf ( cpu , path string , args ... string ) * exec . Cmd {
2017-07-17 21:15:16 +01:00
sdeArgs := [ ] string { "-" + cpu }
// The kernel's vdso code for gettimeofday sometimes uses the RDTSCP
// instruction. Although SDE has a -chip_check_vsyscall flag that
// excludes such code by default, it does not seem to work. Instead,
// pass the -chip_check_exe_only flag which retains test coverage when
// statically linked and excludes the vdso.
if cpu == "p4p" || cpu == "pnr" || cpu == "mrm" || cpu == "slt" {
sdeArgs = append ( sdeArgs , "-chip_check_exe_only" )
}
sdeArgs = append ( sdeArgs , "--" , path )
2017-02-03 16:52:21 +00:00
sdeArgs = append ( sdeArgs , args ... )
2017-05-10 21:56:02 +01:00
return exec . Command ( * sdePath , sdeArgs ... )
2017-02-03 16:52:21 +00:00
}
2018-10-31 19:48:23 +00:00
var (
errMoreMallocs = errors . New ( "child process did not exhaust all allocation calls" )
errTestSkipped = errors . New ( "test was skipped" )
)
2015-05-16 00:08:49 +01:00
func runTestOnce ( test test , mallocNumToFail int64 ) ( passed bool , err error ) {
2017-02-03 16:52:21 +00:00
prog := path . Join ( * buildDir , test . args [ 0 ] )
args := test . args [ 1 : ]
2018-10-31 19:48:23 +00:00
if * simulateARMCPUs && test . cpu != "" {
args = append ( [ ] string { "--cpu=" + test . cpu } , args ... )
}
2015-02-11 19:18:45 +00:00
var cmd * exec . Cmd
if * useValgrind {
cmd = valgrindOf ( false , prog , args ... )
2016-02-29 21:58:26 +00:00
} else if * useCallgrind {
cmd = callgrindOf ( prog , args ... )
2015-05-16 00:08:49 +01:00
} else if * useGDB {
cmd = gdbOf ( prog , args ... )
2017-02-03 16:52:21 +00:00
} else if * useSDE {
cmd = sdeOf ( test . cpu , prog , args ... )
2015-02-11 19:18:45 +00:00
} else {
cmd = exec . Command ( prog , args ... )
}
2017-02-04 16:14:57 +00:00
var outBuf bytes . Buffer
cmd . Stdout = & outBuf
cmd . Stderr = & outBuf
2015-05-16 00:08:49 +01:00
if mallocNumToFail >= 0 {
cmd . Env = os . Environ ( )
cmd . Env = append ( cmd . Env , "MALLOC_NUMBER_TO_FAIL=" + strconv . FormatInt ( mallocNumToFail , 10 ) )
if * mallocTestDebug {
cmd . Env = append ( cmd . Env , "MALLOC_ABORT_ON_FAIL=1" )
}
cmd . Env = append ( cmd . Env , "_MALLOC_CHECK=1" )
}
2015-02-11 19:18:45 +00:00
if err := cmd . Start ( ) ; err != nil {
return false , err
}
if err := cmd . Wait ( ) ; err != nil {
2015-05-16 00:08:49 +01:00
if exitError , ok := err . ( * exec . ExitError ) ; ok {
2018-10-31 19:48:23 +00:00
switch exitError . Sys ( ) . ( syscall . WaitStatus ) . ExitStatus ( ) {
case 88 :
2015-05-16 00:08:49 +01:00
return false , errMoreMallocs
2018-10-31 19:48:23 +00:00
case 89 :
fmt . Print ( string ( outBuf . Bytes ( ) ) )
return false , errTestSkipped
2015-05-16 00:08:49 +01:00
}
}
2017-02-04 16:14:57 +00:00
fmt . Print ( string ( outBuf . Bytes ( ) ) )
2015-02-11 19:18:45 +00:00
return false , err
}
// Account for Windows line-endings.
2017-02-04 16:14:57 +00:00
stdout := bytes . Replace ( outBuf . Bytes ( ) , [ ] byte ( "\r\n" ) , [ ] byte ( "\n" ) , - 1 )
2015-02-11 19:18:45 +00:00
if bytes . HasSuffix ( stdout , [ ] byte ( "PASS\n" ) ) &&
( len ( stdout ) == 5 || stdout [ len ( stdout ) - 6 ] == '\n' ) {
return true , nil
}
Do a cursory conversion of a few tests to GTest.
For now, this is the laziest conversion possible. The intent is to just
get the build setup ready so that we can get everything working in our
consumers. The intended end state is:
- The standalone build produces three test targets, one per library:
{crypto,ssl,decrepit}_tests.
- Each FOO_test is made up of:
FOO/**/*_test.cc
crypto/test/gtest_main.cc
test_support
- generate_build_files.py emits variables crypto_test_sources and
ssl_test_sources. These variables are populated with FindCFiles,
looking for *_test.cc.
- The consuming file assembles those variables into the two test targets
(plus decrepit) from there. This avoids having generate_build_files.py
emit actual build rules.
- Our standalone builders, Chromium, and Android just run the top-level
test targets using whatever GTest-based reporting story they have.
In transition, we start by converting one of two tests in each library
to populate the three test targets. Those are added to all_tests.json
and all_tests.go hacked to handle them transparently. This keeps our
standalone builder working.
generate_build_files.py, to start with, populates the new source lists
manually and subtracts them out of the old machinery. We emit both for
the time being. When this change rolls in, we'll write all the build
glue needed to build the GTest-based tests and add it to consumers'
continuous builders.
Next, we'll subsume a file-based test and get the consumers working with
that. (I.e. make sure the GTest targets can depend on a data file.)
Once that's all done, we'll be sure all this will work. At that point,
we start subsuming the remaining tests into the GTest targets and,
asynchronously, rewriting tests to use GTest properly rather than
cursory conversion here.
When all non-GTest tests are gone, the old generate_build_files.py hooks
will be removed, consumers updated to not depend on them, and standalone
builders converted to not rely on all_tests.go, which can then be
removed. (Unless bits end up being needed as a malloc test driver. I'm
thinking we'll want to do something with --gtest_filter.)
As part of this CL, I've bumped the CMake requirements (for
target_include_directories) and added a few suppressions for warnings
that GTest doesn't pass.
BUG=129
Change-Id: I881b26b07a8739cc0b52dbb51a30956908e1b71a
Reviewed-on: https://boringssl-review.googlesource.com/13232
Reviewed-by: Adam Langley <agl@google.com>
2017-01-20 00:05:47 +00:00
// Also accept a googletest-style pass line. This is left here in
// transition until the tests are all converted and this script made
// unnecessary.
if bytes . Contains ( stdout , [ ] byte ( "\n[ PASSED ]" ) ) {
return true , nil
}
2017-02-04 16:14:57 +00:00
fmt . Print ( string ( outBuf . Bytes ( ) ) )
2015-02-11 19:18:45 +00:00
return false , nil
}
2015-05-16 00:08:49 +01:00
func runTest ( test test ) ( bool , error ) {
if * mallocTest < 0 {
return runTestOnce ( test , - 1 )
}
for mallocNumToFail := int64 ( * mallocTest ) ; ; mallocNumToFail ++ {
if passed , err := runTestOnce ( test , mallocNumToFail ) ; err != errMoreMallocs {
if err != nil {
err = fmt . Errorf ( "at malloc %d: %s" , mallocNumToFail , err )
}
return passed , err
}
}
}
2015-06-11 01:32:25 +01:00
// setWorkingDirectory walks up directories as needed until the current working
// directory is the top of a BoringSSL checkout.
func setWorkingDirectory ( ) {
for i := 0 ; i < 64 ; i ++ {
2015-09-03 17:09:36 +01:00
if _ , err := os . Stat ( "BUILDING.md" ) ; err == nil {
2015-06-11 01:32:25 +01:00
return
}
os . Chdir ( ".." )
}
2015-09-03 17:09:36 +01:00
panic ( "Couldn't find BUILDING.md in a parent directory!" )
2015-06-11 01:32:25 +01:00
}
func parseTestConfig ( filename string ) ( [ ] test , error ) {
in , err := os . Open ( filename )
if err != nil {
return nil , err
}
defer in . Close ( )
decoder := json . NewDecoder ( in )
2017-02-03 16:52:21 +00:00
var testArgs [ ] [ ] string
if err := decoder . Decode ( & testArgs ) ; err != nil {
2015-06-11 01:32:25 +01:00
return nil , err
}
2017-02-03 16:52:21 +00:00
var result [ ] test
for _ , args := range testArgs {
result = append ( result , test { args : args } )
}
2015-06-11 01:32:25 +01:00
return result , nil
}
2016-03-02 16:53:07 +00:00
func worker ( tests <- chan test , results chan <- result , done * sync . WaitGroup ) {
defer done . Done ( )
for test := range tests {
passed , err := runTest ( test )
results <- result { test , passed , err }
}
}
2017-09-28 20:17:05 +01:00
func ( t test ) shortName ( ) string {
return t . args [ 0 ] + t . shardMsg ( ) + t . cpuMsg ( )
}
func ( t test ) longName ( ) string {
return strings . Join ( t . args , " " ) + t . cpuMsg ( )
}
func ( t test ) shardMsg ( ) string {
if t . numShards == 0 {
return ""
}
return fmt . Sprintf ( " [shard %d/%d]" , t . shard + 1 , t . numShards )
}
2017-02-03 16:52:21 +00:00
func ( t test ) cpuMsg ( ) string {
if len ( t . cpu ) == 0 {
return ""
}
return fmt . Sprintf ( " (for CPU %q)" , t . cpu )
}
2017-06-07 19:02:03 +01:00
func ( t test ) getGTestShards ( ) ( [ ] test , error ) {
if * numWorkers == 1 || len ( t . args ) != 1 {
return [ ] test { t } , nil
}
// Only shard the three GTest-based tests.
if t . args [ 0 ] != "crypto/crypto_test" && t . args [ 0 ] != "ssl/ssl_test" && t . args [ 0 ] != "decrepit/decrepit_test" {
return [ ] test { t } , nil
}
prog := path . Join ( * buildDir , t . args [ 0 ] )
cmd := exec . Command ( prog , "--gtest_list_tests" )
var stdout bytes . Buffer
cmd . Stdout = & stdout
if err := cmd . Start ( ) ; err != nil {
return nil , err
}
if err := cmd . Wait ( ) ; err != nil {
return nil , err
}
var group string
var tests [ ] string
scanner := bufio . NewScanner ( & stdout )
for scanner . Scan ( ) {
line := scanner . Text ( )
// Remove the parameter comment and trailing space.
if idx := strings . Index ( line , "#" ) ; idx >= 0 {
line = line [ : idx ]
}
line = strings . TrimSpace ( line )
if len ( line ) == 0 {
continue
}
if line [ len ( line ) - 1 ] == '.' {
group = line
continue
}
if len ( group ) == 0 {
return nil , fmt . Errorf ( "found test case %q without group" , line )
}
tests = append ( tests , group + line )
}
const testsPerShard = 20
if len ( tests ) <= testsPerShard {
return [ ] test { t } , nil
}
// Slow tests which process large test vector files tend to be grouped
// together, so shuffle the order.
shuffled := make ( [ ] string , len ( tests ) )
perm := rand . Perm ( len ( tests ) )
for i , j := range perm {
shuffled [ i ] = tests [ j ]
}
var shards [ ] test
for i := 0 ; i < len ( shuffled ) ; i += testsPerShard {
n := len ( shuffled ) - i
if n > testsPerShard {
n = testsPerShard
}
shard := t
shard . args = [ ] string { shard . args [ 0 ] , "--gtest_filter=" + strings . Join ( shuffled [ i : i + n ] , ":" ) }
2017-09-28 20:17:05 +01:00
shard . shard = len ( shards )
2017-06-07 19:02:03 +01:00
shards = append ( shards , shard )
}
2017-09-28 20:17:05 +01:00
for i := range shards {
shards [ i ] . numShards = len ( shards )
}
2017-06-07 19:02:03 +01:00
return shards , nil
}
2015-02-11 19:18:45 +00:00
func main ( ) {
flag . Parse ( )
2015-06-11 01:32:25 +01:00
setWorkingDirectory ( )
2016-03-02 16:53:07 +00:00
testCases , err := parseTestConfig ( "util/all_tests.json" )
2015-06-11 01:32:25 +01:00
if err != nil {
fmt . Printf ( "Failed to parse input: %s\n" , err )
os . Exit ( 1 )
}
2015-02-11 19:18:45 +00:00
2016-03-02 16:53:07 +00:00
var wg sync . WaitGroup
tests := make ( chan test , * numWorkers )
2016-03-02 23:23:21 +00:00
results := make ( chan result , * numWorkers )
2016-03-02 16:53:07 +00:00
for i := 0 ; i < * numWorkers ; i ++ {
2016-03-02 23:23:21 +00:00
wg . Add ( 1 )
2016-03-02 16:53:07 +00:00
go worker ( tests , results , & wg )
}
go func ( ) {
2016-03-02 23:23:21 +00:00
for _ , test := range testCases {
2017-02-03 16:52:21 +00:00
if * useSDE {
2017-06-07 19:02:03 +01:00
// SDE generates plenty of tasks and gets slower
// with additional sharding.
2017-02-03 16:52:21 +00:00
for _ , cpu := range sdeCPUs {
testForCPU := test
testForCPU . cpu = cpu
tests <- testForCPU
}
2018-10-31 19:48:23 +00:00
} else if * simulateARMCPUs {
// This mode is run instead of the default path,
// so also include the native flow.
tests <- test
for _ , cpu := range armCPUs {
testForCPU := test
testForCPU . cpu = cpu
tests <- testForCPU
}
2017-02-03 16:52:21 +00:00
} else {
2017-06-07 19:02:03 +01:00
shards , err := test . getGTestShards ( )
if err != nil {
fmt . Printf ( "Error listing tests: %s\n" , err )
os . Exit ( 1 )
}
for _ , shard := range shards {
tests <- shard
}
2017-02-03 16:52:21 +00:00
}
2016-03-02 23:23:21 +00:00
}
close ( tests )
2016-03-02 16:53:07 +00:00
wg . Wait ( )
close ( results )
} ( )
2018-11-14 23:31:02 +00:00
testOutput := testresult . NewResults ( )
2018-10-31 19:48:23 +00:00
var failed , skipped [ ] test
2016-03-02 16:53:07 +00:00
for testResult := range results {
test := testResult . Test
2017-02-03 16:52:21 +00:00
args := test . args
2015-02-11 22:14:15 +00:00
2018-10-31 19:48:23 +00:00
if testResult . Error == errTestSkipped {
fmt . Printf ( "%s\n" , test . longName ( ) )
fmt . Printf ( "%s was skipped\n" , args [ 0 ] )
skipped = append ( skipped , test )
2018-11-14 23:31:02 +00:00
testOutput . AddSkip ( test . longName ( ) )
2018-10-31 19:48:23 +00:00
} else if testResult . Error != nil {
2017-09-28 20:17:05 +01:00
fmt . Printf ( "%s\n" , test . longName ( ) )
2017-02-03 16:52:21 +00:00
fmt . Printf ( "%s failed to complete: %s\n" , args [ 0 ] , testResult . Error )
2015-02-11 19:18:45 +00:00
failed = append ( failed , test )
2018-11-14 23:31:02 +00:00
testOutput . AddResult ( test . longName ( ) , "CRASH" )
2016-03-02 16:53:07 +00:00
} else if ! testResult . Passed {
2017-09-28 20:17:05 +01:00
fmt . Printf ( "%s\n" , test . longName ( ) )
2017-02-03 16:52:21 +00:00
fmt . Printf ( "%s failed to print PASS on the last line.\n" , args [ 0 ] )
2015-02-11 19:18:45 +00:00
failed = append ( failed , test )
2018-11-14 23:31:02 +00:00
testOutput . AddResult ( test . longName ( ) , "FAIL" )
2015-02-11 22:14:15 +00:00
} else {
2017-09-28 20:17:05 +01:00
fmt . Printf ( "%s\n" , test . shortName ( ) )
2018-11-14 23:31:02 +00:00
testOutput . AddResult ( test . longName ( ) , "PASS" )
2015-02-11 19:18:45 +00:00
}
}
2015-04-04 22:02:18 +01:00
if * jsonOutput != "" {
2018-11-14 23:31:02 +00:00
if err := testOutput . WriteToFile ( * jsonOutput ) ; err != nil {
2015-04-04 22:02:18 +01:00
fmt . Fprintf ( os . Stderr , "Error: %s\n" , err )
}
}
2018-10-31 19:48:23 +00:00
if len ( skipped ) > 0 {
fmt . Printf ( "\n%d of %d tests were skipped:\n" , len ( skipped ) , len ( testCases ) )
for _ , test := range skipped {
fmt . Printf ( "\t%s%s\n" , strings . Join ( test . args , " " ) , test . cpuMsg ( ) )
}
}
2015-04-04 22:02:18 +01:00
if len ( failed ) > 0 {
2016-03-02 23:23:21 +00:00
fmt . Printf ( "\n%d of %d tests failed:\n" , len ( failed ) , len ( testCases ) )
2015-02-11 19:18:45 +00:00
for _ , test := range failed {
2017-05-02 22:48:47 +01:00
fmt . Printf ( "\t%s%s\n" , strings . Join ( test . args , " " ) , test . cpuMsg ( ) )
2015-02-11 19:18:45 +00:00
}
2015-04-04 22:02:18 +01:00
os . Exit ( 1 )
2015-02-11 19:18:45 +00:00
}
2015-02-11 22:14:15 +00:00
2015-04-04 22:02:18 +01:00
fmt . Printf ( "\nAll tests passed!\n" )
2015-02-11 19:18:45 +00:00
}