aboutsummaryrefslogtreecommitdiffstats
path: root/tests/utils.py
diff options
context:
space:
mode:
Diffstat (limited to 'tests/utils.py')
-rw-r--r--tests/utils.py363
1 files changed, 264 insertions, 99 deletions
diff --git a/tests/utils.py b/tests/utils.py
index 46d93646..401b4e33 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -6,6 +6,8 @@ from __future__ import absolute_import, division, print_function
import binascii
import collections
+import json
+import os
import re
from contextlib import contextmanager
@@ -24,42 +26,12 @@ KeyedHashVector = collections.namedtuple(
)
-def select_backends(names, backend_list):
- if names is None:
- return backend_list
- split_names = [x.strip() for x in names.split(',')]
- selected_backends = []
- for backend in backend_list:
- if backend.name in split_names:
- selected_backends.append(backend)
-
- if len(selected_backends) > 0:
- return selected_backends
- else:
- raise ValueError(
- "No backend selected. Tried to select: {0}".format(split_names)
- )
-
-
-def skip_if_empty(backend_list, required_interfaces):
- if not backend_list:
- pytest.skip(
- "No backends provided supply the interface: {0}".format(
- ", ".join(iface.__name__ for iface in required_interfaces)
- )
- )
-
-
-def check_backend_support(item):
- supported = item.keywords.get("supported")
- if supported and "backend" in item.funcargs:
- if not supported.kwargs["only_if"](item.funcargs["backend"]):
- pytest.skip("{0} ({1})".format(
- supported.kwargs["skip_message"], item.funcargs["backend"]
+def check_backend_support(backend, item):
+ for mark in item.node.iter_markers("supported"):
+ if not mark.kwargs["only_if"](backend):
+ pytest.skip("{} ({})".format(
+ mark.kwargs["skip_message"], backend
))
- elif supported:
- raise ValueError("This mark is only available on methods that take a "
- "backend")
@contextmanager
@@ -161,7 +133,7 @@ def load_hash_vectors(vector_data):
# string as hex 00, which is of course not actually an empty
# string. So we parse the provided length and catch this edge case.
msg = line.split(" = ")[1].encode("ascii") if length > 0 else b""
- elif line.startswith("MD"):
+ elif line.startswith("MD") or line.startswith("Output"):
md = line.split(" = ")[1]
# after MD is found the Msg+MD (+ potential key) tuple is complete
if key is not None:
@@ -275,6 +247,9 @@ def load_pkcs1_vectors(vector_data):
attr = None
if private_key_vector is None or public_key_vector is None:
+ # Random garbage to defeat CPython's peephole optimizer so that
+ # coverage records correctly: https://bugs.python.org/issue2506
+ 1 + 1
continue
if line.startswith("# Private key"):
@@ -377,46 +352,28 @@ def load_fips_dsa_key_pair_vectors(vector_data):
Loads data out of the FIPS DSA KeyPair vector files.
"""
vectors = []
- # When reading_key_data is set to True it tells the loader to continue
- # constructing dictionaries. We set reading_key_data to False during the
- # blocks of the vectors of N=224 because we don't support it.
- reading_key_data = True
for line in vector_data:
line = line.strip()
- if not line or line.startswith("#"):
- continue
- elif line.startswith("[mod = L=1024"):
- continue
- elif line.startswith("[mod = L=2048, N=224"):
- reading_key_data = False
- continue
- elif line.startswith("[mod = L=2048, N=256"):
- reading_key_data = True
- continue
- elif line.startswith("[mod = L=3072"):
- continue
-
- if not reading_key_data:
+ if not line or line.startswith("#") or line.startswith("[mod"):
continue
- elif reading_key_data:
- if line.startswith("P"):
- vectors.append({'p': int(line.split("=")[1], 16)})
- elif line.startswith("Q"):
- vectors[-1]['q'] = int(line.split("=")[1], 16)
- elif line.startswith("G"):
- vectors[-1]['g'] = int(line.split("=")[1], 16)
- elif line.startswith("X") and 'x' not in vectors[-1]:
- vectors[-1]['x'] = int(line.split("=")[1], 16)
- elif line.startswith("X") and 'x' in vectors[-1]:
- vectors.append({'p': vectors[-1]['p'],
- 'q': vectors[-1]['q'],
- 'g': vectors[-1]['g'],
- 'x': int(line.split("=")[1], 16)
- })
- elif line.startswith("Y"):
- vectors[-1]['y'] = int(line.split("=")[1], 16)
+ if line.startswith("P"):
+ vectors.append({'p': int(line.split("=")[1], 16)})
+ elif line.startswith("Q"):
+ vectors[-1]['q'] = int(line.split("=")[1], 16)
+ elif line.startswith("G"):
+ vectors[-1]['g'] = int(line.split("=")[1], 16)
+ elif line.startswith("X") and 'x' not in vectors[-1]:
+ vectors[-1]['x'] = int(line.split("=")[1], 16)
+ elif line.startswith("X") and 'x' in vectors[-1]:
+ vectors.append({'p': vectors[-1]['p'],
+ 'q': vectors[-1]['q'],
+ 'g': vectors[-1]['g'],
+ 'x': int(line.split("=")[1], 16)
+ })
+ elif line.startswith("Y"):
+ vectors[-1]['y'] = int(line.split("=")[1], 16)
return vectors
@@ -429,10 +386,6 @@ def load_fips_dsa_sig_vectors(vector_data):
sha_regex = re.compile(
r"\[mod = L=...., N=..., SHA-(?P<sha>1|224|256|384|512)\]"
)
- # When reading_key_data is set to True it tells the loader to continue
- # constructing dictionaries. We set reading_key_data to False during the
- # blocks of the vectors of N=224 because we don't support it.
- reading_key_data = True
for line in vector_data:
line = line.strip()
@@ -442,16 +395,9 @@ def load_fips_dsa_sig_vectors(vector_data):
sha_match = sha_regex.match(line)
if sha_match:
- digest_algorithm = "SHA-{0}".format(sha_match.group("sha"))
+ digest_algorithm = "SHA-{}".format(sha_match.group("sha"))
- if line.startswith("[mod = L=2048, N=224"):
- reading_key_data = False
- continue
- elif line.startswith("[mod = L=2048, N=256"):
- reading_key_data = True
- continue
-
- if not reading_key_data or line.startswith("[mod"):
+ if line.startswith("[mod"):
continue
name, value = [c.strip() for c in line.split("=")]
@@ -488,7 +434,7 @@ def load_fips_dsa_sig_vectors(vector_data):
return vectors
-# http://tools.ietf.org/html/rfc4492#appendix-A
+# https://tools.ietf.org/html/rfc4492#appendix-A
_ECDSA_CURVE_NAMES = {
"P-192": "secp192r1",
"P-224": "secp224r1",
@@ -541,8 +487,8 @@ def load_fips_ecdsa_key_pair_vectors(vector_data):
elif line.startswith("Qy = "):
key_data["y"] = int(line.split("=")[1], 16)
- if key_data is not None:
- vectors.append(key_data)
+ assert key_data is not None
+ vectors.append(key_data)
return vectors
@@ -561,13 +507,10 @@ def load_fips_ecdsa_signing_vectors(vector_data):
for line in vector_data:
line = line.strip()
- if not line or line.startswith("#"):
- continue
-
curve_match = curve_rx.match(line)
if curve_match:
curve_name = _ECDSA_CURVE_NAMES[curve_match.group("curve")]
- digest_name = "SHA-{0}".format(curve_match.group("sha"))
+ digest_name = "SHA-{}".format(curve_match.group("sha"))
elif line.startswith("Msg = "):
if data is not None:
@@ -595,8 +538,8 @@ def load_fips_ecdsa_signing_vectors(vector_data):
elif line.startswith("Result = "):
data["fail"] = line.split("=")[1].strip()[0] == "F"
- if data is not None:
- vectors.append(data)
+ assert data is not None
+ vectors.append(data)
return vectors
@@ -675,7 +618,7 @@ def load_kasvs_ecdh_vectors(vector_data):
result_rx = re.compile(r"([FP]) \(([0-9]+) -")
tags = []
- sets = dict()
+ sets = {}
vectors = []
# find info in header
@@ -713,8 +656,8 @@ def load_kasvs_ecdh_vectors(vector_data):
# Data
data = {
- "CAVS": dict(),
- "IUT": dict(),
+ "CAVS": {},
+ "IUT": {},
}
tag = None
for line in vector_data:
@@ -726,7 +669,7 @@ def load_kasvs_ecdh_vectors(vector_data):
if line.startswith("["):
tag = line.split()[0][1:]
elif line.startswith("COUNT = "):
- data["COUNT"] = int(line.split("=")[1], 16)
+ data["COUNT"] = int(line.split("=")[1])
elif line.startswith("dsCAVS = "):
data["CAVS"]["d"] = int(line.split("=")[1], 16)
elif line.startswith("QsCAVSx = "):
@@ -760,8 +703,230 @@ def load_kasvs_ecdh_vectors(vector_data):
vectors.append(data)
data = {
- "CAVS": dict(),
- "IUT": dict(),
+ "CAVS": {},
+ "IUT": {},
}
return vectors
+
+
+def load_x963_vectors(vector_data):
+ """
+ Loads data out of the X9.63 vector data
+ """
+
+ vectors = []
+
+ # Sets Metadata
+ hashname = None
+ vector = {}
+ for line in vector_data:
+ line = line.strip()
+
+ if line.startswith("[SHA"):
+ hashname = line[1:-1]
+ shared_secret_len = 0
+ shared_info_len = 0
+ key_data_len = 0
+ elif line.startswith("[shared secret length"):
+ shared_secret_len = int(line[1:-1].split("=")[1].strip())
+ elif line.startswith("[SharedInfo length"):
+ shared_info_len = int(line[1:-1].split("=")[1].strip())
+ elif line.startswith("[key data length"):
+ key_data_len = int(line[1:-1].split("=")[1].strip())
+ elif line.startswith("COUNT"):
+ count = int(line.split("=")[1].strip())
+ vector["hash"] = hashname
+ vector["count"] = count
+ vector["shared_secret_length"] = shared_secret_len
+ vector["sharedinfo_length"] = shared_info_len
+ vector["key_data_length"] = key_data_len
+ elif line.startswith("Z"):
+ vector["Z"] = line.split("=")[1].strip()
+ assert ((shared_secret_len + 7) // 8) * 2 == len(vector["Z"])
+ elif line.startswith("SharedInfo"):
+ if shared_info_len != 0:
+ vector["sharedinfo"] = line.split("=")[1].strip()
+ silen = len(vector["sharedinfo"])
+ assert ((shared_info_len + 7) // 8) * 2 == silen
+ elif line.startswith("key_data"):
+ vector["key_data"] = line.split("=")[1].strip()
+ assert ((key_data_len + 7) // 8) * 2 == len(vector["key_data"])
+ vectors.append(vector)
+ vector = {}
+
+ return vectors
+
+
+def load_nist_kbkdf_vectors(vector_data):
+ """
+ Load NIST SP 800-108 KDF Vectors
+ """
+ vectors = []
+ test_data = None
+ tag = {}
+
+ for line in vector_data:
+ line = line.strip()
+
+ if not line or line.startswith("#"):
+ continue
+
+ if line.startswith("[") and line.endswith("]"):
+ tag_data = line[1:-1]
+ name, value = [c.strip() for c in tag_data.split("=")]
+ if value.endswith('_BITS'):
+ value = int(value.split('_')[0])
+ tag.update({name.lower(): value})
+ continue
+
+ tag.update({name.lower(): value.lower()})
+ elif line.startswith("COUNT="):
+ test_data = {}
+ test_data.update(tag)
+ vectors.append(test_data)
+ elif line.startswith("L"):
+ name, value = [c.strip() for c in line.split("=")]
+ test_data[name.lower()] = int(value)
+ else:
+ name, value = [c.strip() for c in line.split("=")]
+ test_data[name.lower()] = value.encode("ascii")
+
+ return vectors
+
+
+def load_ed25519_vectors(vector_data):
+ data = []
+ for line in vector_data:
+ secret_key, public_key, message, signature, _ = line.split(':')
+ # In the vectors the first element is secret key + public key
+ secret_key = secret_key[0:64]
+ # In the vectors the signature section is signature + message
+ signature = signature[0:128]
+ data.append({
+ "secret_key": secret_key,
+ "public_key": public_key,
+ "message": message,
+ "signature": signature
+ })
+ return data
+
+
+def load_nist_ccm_vectors(vector_data):
+ test_data = None
+ section_data = None
+ global_data = {}
+ new_section = False
+ data = []
+
+ for line in vector_data:
+ line = line.strip()
+
+ # Blank lines and comments should be ignored
+ if not line or line.startswith("#"):
+ continue
+
+ # Some of the CCM vectors have global values for this. They are always
+ # at the top before the first section header (see: VADT, VNT, VPT)
+ if line.startswith(("Alen", "Plen", "Nlen", "Tlen")):
+ name, value = [c.strip() for c in line.split("=")]
+ global_data[name.lower()] = int(value)
+ continue
+
+ # section headers contain length data we might care about
+ if line.startswith("["):
+ new_section = True
+ section_data = {}
+ section = line[1:-1]
+ items = [c.strip() for c in section.split(",")]
+ for item in items:
+ name, value = [c.strip() for c in item.split("=")]
+ section_data[name.lower()] = int(value)
+ continue
+
+ name, value = [c.strip() for c in line.split("=")]
+
+ if name.lower() in ("key", "nonce") and new_section:
+ section_data[name.lower()] = value.encode("ascii")
+ continue
+
+ new_section = False
+
+ # Payload is sometimes special because these vectors are absurd. Each
+ # example may or may not have a payload. If it does not then the
+ # previous example's payload should be used. We accomplish this by
+ # writing it into the section_data. Because we update each example
+ # with the section data it will be overwritten if a new payload value
+ # is present. NIST should be ashamed of their vector creation.
+ if name.lower() == "payload":
+ section_data[name.lower()] = value.encode("ascii")
+
+ # Result is a special token telling us if the test should pass/fail.
+ # This is only present in the DVPT CCM tests
+ if name.lower() == "result":
+ if value.lower() == "pass":
+ test_data["fail"] = False
+ else:
+ test_data["fail"] = True
+ continue
+
+ # COUNT is a special token that indicates a new block of data
+ if name.lower() == "count":
+ test_data = {}
+ test_data.update(global_data)
+ test_data.update(section_data)
+ data.append(test_data)
+ continue
+ # For all other tokens we simply want the name, value stored in
+ # the dictionary
+ else:
+ test_data[name.lower()] = value.encode("ascii")
+
+ return data
+
+
+class WycheproofTest(object):
+ def __init__(self, testfiledata, testgroup, testcase):
+ self.testfiledata = testfiledata
+ self.testgroup = testgroup
+ self.testcase = testcase
+
+ def __repr__(self):
+ return "<WycheproofTest({!r}, {!r}, {!r}, tcId={})>".format(
+ self.testfiledata,
+ self.testgroup,
+ self.testcase,
+ self.testcase["tcId"],
+ )
+
+ @property
+ def valid(self):
+ return self.testcase["result"] == "valid"
+
+ @property
+ def acceptable(self):
+ return self.testcase["result"] == "acceptable"
+
+ @property
+ def invalid(self):
+ return self.testcase["result"] == "invalid"
+
+ def has_flag(self, flag):
+ return flag in self.testcase["flags"]
+
+
+def skip_if_wycheproof_none(wycheproof):
+ # This is factored into its own function so we can easily test both
+ # branches
+ if wycheproof is None:
+ pytest.skip("--wycheproof-root not provided")
+
+
+def load_wycheproof_tests(wycheproof, test_file):
+ path = os.path.join(wycheproof, "testvectors", test_file)
+ with open(path) as f:
+ data = json.load(f)
+ for group in data.pop("testGroups"):
+ cases = group.pop("tests")
+ for c in cases:
+ yield WycheproofTest(data, group, c)