Run pythonified tests in the containerized alternative architecture tests (#38)
Run pythonified tests in the containerized alternative architecture tests
This commit is contained in:
commit
dd15a07940
@ -51,7 +51,8 @@ matrix:
|
||||
script:
|
||||
- docker run --rm --privileged multiarch/qemu-user-static:register --reset
|
||||
- docker run --rm -v `pwd`:`pwd` -w `pwd` "pqclean/debian-unstable-ppc" /bin/bash -c "uname -a &&
|
||||
make ${MAKETARGET}"
|
||||
make ${MAKETARGET} &&
|
||||
cd test && python3 -m nose --rednose --verbose"
|
||||
- name: "Run tests on qemu-arm32 (GCC)"
|
||||
os: linux
|
||||
services: docker
|
||||
@ -60,7 +61,8 @@ matrix:
|
||||
script:
|
||||
- docker run --rm --privileged multiarch/qemu-user-static:register --reset
|
||||
- docker run --rm -v `pwd`:`pwd` -w `pwd` "pqclean/debian-buster-arm" /bin/bash -c "uname -a &&
|
||||
make ${MAKETARGET}"
|
||||
make ${MAKETARGET} &&
|
||||
cd test && python3 -m nose --rednose --verbose"
|
||||
- name: "Run tests on qemu-aarch64 (GCC)"
|
||||
os: linux
|
||||
services: docker
|
||||
@ -69,7 +71,8 @@ matrix:
|
||||
script:
|
||||
- docker run --rm --privileged multiarch/qemu-user-static:register --reset
|
||||
- docker run --rm -v `pwd`:`pwd` -w `pwd` "pqclean/debian-buster-aarch64" /bin/bash -c "uname -a &&
|
||||
make ${MAKETARGET}"
|
||||
make ${MAKETARGET} &&
|
||||
cd test && python3 -m nose --rednose --verbose"
|
||||
- name: "MacOS + Clang"
|
||||
os: osx
|
||||
osx_image: xcode10.1
|
||||
|
@ -1,13 +1,19 @@
|
||||
import subprocess
|
||||
|
||||
def run_subprocess(command, working_dir, expected_returncode = 0):
|
||||
"""Helper function to run a shell command and report success/failure depending on the exit status of the shell command."""
|
||||
# Note we need to capture stdout/stderr from the subprocess, then print it, which nose/unittest will then capture and buffer appropriately
|
||||
|
||||
def run_subprocess(command, working_dir, expected_returncode=0):
|
||||
"""
|
||||
Helper function to run a shell command and report success/failure
|
||||
depending on the exit status of the shell command.
|
||||
"""
|
||||
# Note we need to capture stdout/stderr from the subprocess,
|
||||
# then print it, which nose/unittest will then capture and
|
||||
# buffer appropriately
|
||||
result = subprocess.run(
|
||||
command,
|
||||
stdout = subprocess.PIPE,
|
||||
stderr = subprocess.STDOUT,
|
||||
cwd = working_dir
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
cwd=working_dir
|
||||
)
|
||||
print(result.stdout.decode('utf-8'))
|
||||
assert(result.returncode == expected_returncode)
|
||||
|
@ -1,6 +1,7 @@
|
||||
import os
|
||||
import yaml
|
||||
|
||||
|
||||
class Scheme:
|
||||
def __init__(self):
|
||||
self.type = None
|
||||
@ -58,6 +59,7 @@ class Scheme:
|
||||
print("Can't open {}: {}".format(metafile, e))
|
||||
return None
|
||||
|
||||
|
||||
class Implementation:
|
||||
|
||||
def __init__(self, scheme, name):
|
||||
@ -83,22 +85,24 @@ class Implementation:
|
||||
implementations.append(Implementation(scheme, d))
|
||||
return implementations
|
||||
|
||||
|
||||
class KEM(Scheme):
|
||||
|
||||
def __init__(self, name: str):
|
||||
self.type = 'kem'
|
||||
self.name = name;
|
||||
self.name = name
|
||||
self.implementations = Implementation.all_implementations(self)
|
||||
|
||||
@staticmethod
|
||||
def all_kems() -> list:
|
||||
return Scheme.all_schemes_of_type('kem')
|
||||
|
||||
|
||||
class Signature(Scheme):
|
||||
|
||||
def __init__(self, name: str):
|
||||
self.type = 'sign'
|
||||
self.name = name;
|
||||
self.name = name
|
||||
self.implementations = Implementation.all_implementations(self)
|
||||
|
||||
@staticmethod
|
||||
|
@ -1,18 +1,21 @@
|
||||
"""
|
||||
Checks that the archive library can be successfully built for every scheme/implementation.
|
||||
Checks that the archive library can be successfully built for every
|
||||
scheme/implementation.
|
||||
"""
|
||||
|
||||
import os
|
||||
import pqclean
|
||||
import helpers
|
||||
|
||||
|
||||
def test_compile_lib():
|
||||
for scheme in pqclean.Scheme.all_schemes():
|
||||
for implementation in scheme.implementations:
|
||||
yield check_compile_lib, scheme.name, implementation.name
|
||||
|
||||
|
||||
def check_compile_lib(scheme_name, implementation_name):
|
||||
implementation = pqclean.Implementation.by_name(scheme_name, implementation_name)
|
||||
implementation = pqclean.Implementation.by_name(
|
||||
scheme_name, implementation_name)
|
||||
helpers.run_subprocess(
|
||||
['make', 'clean'],
|
||||
implementation.path()
|
||||
|
@ -1,20 +1,27 @@
|
||||
"""
|
||||
Checks that the functional test program (functest) can be successfully built and executed for every scheme/implementation.
|
||||
Checks that the functional test program (functest) can be successfully built
|
||||
and executed for every scheme/implementation.
|
||||
"""
|
||||
|
||||
import os
|
||||
import pqclean
|
||||
import helpers
|
||||
|
||||
|
||||
def test_functest():
|
||||
for scheme in pqclean.Scheme.all_schemes():
|
||||
for implementation in scheme.implementations:
|
||||
yield check_functest, scheme.name, implementation.name
|
||||
|
||||
|
||||
def check_functest(scheme_name, implementation_name):
|
||||
implementation = pqclean.Implementation.by_name(scheme_name, implementation_name)
|
||||
implementation = pqclean.Implementation.by_name(
|
||||
scheme_name, implementation_name)
|
||||
helpers.run_subprocess(
|
||||
['make', 'TYPE=' + implementation.scheme.type, 'SCHEME=' + scheme_name, 'IMPLEMENTATION=' + implementation_name],
|
||||
['make',
|
||||
'TYPE={}'.format(implementation.scheme.type),
|
||||
'SCHEME={}'.format(scheme_name),
|
||||
'IMPLEMENTATION={}'.format(implementation_name)],
|
||||
os.path.join('..', 'test')
|
||||
)
|
||||
helpers.run_subprocess(
|
||||
|
@ -1,17 +1,21 @@
|
||||
"""
|
||||
Checks that a LICENSE or LICENSE.txt file is present for every implementation of the specified scheme.
|
||||
Checks that a LICENSE or LICENSE.txt file is present for every
|
||||
implementation of the specified scheme.
|
||||
"""
|
||||
|
||||
import os
|
||||
import pqclean
|
||||
|
||||
|
||||
def test_license():
|
||||
for scheme in pqclean.Scheme.all_schemes():
|
||||
for implementation in scheme.implementations:
|
||||
yield check_license, scheme.name, implementation.name
|
||||
|
||||
|
||||
def check_license(scheme_name, implementation_name):
|
||||
implementation = pqclean.Implementation.by_name(scheme_name, implementation_name)
|
||||
implementation = pqclean.Implementation.by_name(
|
||||
scheme_name, implementation_name)
|
||||
p1 = os.path.join(implementation.path(), 'LICENSE')
|
||||
p2 = os.path.join(implementation.path(), 'LICENSE.txt')
|
||||
assert(os.path.isfile(p1) or os.path.isfile(p2))
|
||||
|
@ -4,15 +4,14 @@ Verify the metadata specified in the META.yml files.
|
||||
|
||||
import copy
|
||||
import itertools
|
||||
import os
|
||||
import pqclean
|
||||
import yaml
|
||||
import unittest
|
||||
|
||||
|
||||
def test_metadata():
|
||||
for scheme in pqclean.Scheme.all_schemes():
|
||||
yield check_metadata, scheme.name
|
||||
|
||||
|
||||
def check_metadata(scheme_name):
|
||||
scheme = pqclean.Scheme.by_name(scheme_name)
|
||||
metadata = scheme.metadata()
|
||||
@ -22,17 +21,22 @@ def check_metadata(scheme_name):
|
||||
if scheme.type == 'kem':
|
||||
specification = itertools.chain(specification, KEM_FIELDS.items())
|
||||
elif scheme.type == 'sign':
|
||||
specification = itertools.chain(specification, SIGNATURE_FIELDS.items())
|
||||
specification = itertools.chain(specification,
|
||||
SIGNATURE_FIELDS.items())
|
||||
else:
|
||||
assert(False)
|
||||
|
||||
check_spec(copy.deepcopy(metadata), specification)
|
||||
|
||||
implementation_names_in_yaml = set(i['name'] for i in metadata['implementations'])
|
||||
implementation_names_in_yaml = set(
|
||||
i['name'] for i in metadata['implementations'])
|
||||
implementations_on_disk = set(i.name for i in scheme.implementations)
|
||||
if implementation_names_in_yaml != implementations_on_disk:
|
||||
raise AssertionError("Implementations in YAML file {} and implementations on disk {} do not match"
|
||||
.format(implementation_names_in_yaml, implementations_on_disk))
|
||||
raise AssertionError("Implementations in YAML file {} and "
|
||||
"implementations on disk {} do not match"
|
||||
.format(implementation_names_in_yaml,
|
||||
implementations_on_disk))
|
||||
|
||||
|
||||
EXPECTED_FIELDS = {
|
||||
'name': {'type': str},
|
||||
@ -62,6 +66,7 @@ SIGNATURE_FIELDS = {
|
||||
'length-signature': {'type': int, 'min': 1},
|
||||
}
|
||||
|
||||
|
||||
def check_spec(metadata, spec):
|
||||
for field, props in spec:
|
||||
if field not in metadata:
|
||||
@ -75,7 +80,8 @@ def check_spec(metadata, spec):
|
||||
|
||||
# Done checking all specified fields, check if we have extras
|
||||
for field, value in metadata.items():
|
||||
raise AssertionError("Unexpected item '{}' with value '{}'".format(field, value))
|
||||
raise AssertionError(
|
||||
"Unexpected item '{}' with value '{}'".format(field, value))
|
||||
|
||||
|
||||
def check_element(field, element, props):
|
||||
@ -96,19 +102,20 @@ def check_element(field, element, props):
|
||||
if 'min' in props:
|
||||
if element < props['min']:
|
||||
raise ValueError("Value of field '{}' is lower than minimum "
|
||||
"value {}".format(field, props['min']))
|
||||
"value {}".format(field, props['min']))
|
||||
if 'max' in props:
|
||||
if element > props['max']:
|
||||
raise ValueError("Value of field '{}' is larger than maximum"
|
||||
" value {}".format(field, metafile, props['max']))
|
||||
" value {}"
|
||||
.format(field, props['max']))
|
||||
|
||||
if type_ == str:
|
||||
if 'length' in props:
|
||||
actual_len = len(element)
|
||||
if actual_len != props['length']:
|
||||
raise ValueError("Value of field '{}' should be length {}"
|
||||
" but was length {}"
|
||||
.format(field, props['length'], actual_len))
|
||||
" but was length {}"
|
||||
.format(field, props['length'], actual_len))
|
||||
|
||||
if type_ == list: # recursively check the elements
|
||||
for el in element:
|
||||
|
@ -1,21 +1,25 @@
|
||||
"""
|
||||
Checks that the all exported symbols are properly namespaced, i.e., all start with "PQCLEAN_SCHEMENAME_".
|
||||
Checks that the all exported symbols are properly namespaced, i.e., all
|
||||
start with "PQCLEAN_SCHEMENAME_".
|
||||
"""
|
||||
|
||||
import os
|
||||
import pqclean
|
||||
import helpers
|
||||
import sys
|
||||
import unittest
|
||||
|
||||
|
||||
def test_symbol_namespace():
|
||||
if sys.platform not in ['linux', 'darwin']: raise unittest.SkipTest()
|
||||
if sys.platform not in ['linux', 'darwin']:
|
||||
raise unittest.SkipTest()
|
||||
for scheme in pqclean.Scheme.all_schemes():
|
||||
for implementation in scheme.implementations:
|
||||
yield check_symbol_namespace, scheme.name, implementation.name
|
||||
|
||||
|
||||
def check_symbol_namespace(scheme_name, implementation_name):
|
||||
implementation = pqclean.Implementation.by_name(scheme_name, implementation_name)
|
||||
implementation = pqclean.Implementation.by_name(
|
||||
scheme_name, implementation_name)
|
||||
helpers.run_subprocess(
|
||||
['make'],
|
||||
implementation.path()
|
||||
@ -36,7 +40,8 @@ def check_symbol_namespace(scheme_name, implementation_name):
|
||||
for symbolstr in symbols:
|
||||
*_, symtype, symbol = symbolstr.split()
|
||||
if symtype in 'TR':
|
||||
if not symbol.startswith(namespace) and not symbol.startswith('_' + namespace):
|
||||
if (not symbol.startswith(namespace) and
|
||||
not symbol.startswith('_' + namespace)):
|
||||
non_namespaced.append(symbol)
|
||||
|
||||
if non_namespaced:
|
||||
|
Loading…
Reference in New Issue
Block a user