/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */ /* * vmx_fault.c: handling VMX architecture-related VM exits * Copyright (c) 2005, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * * Xiaoyan Feng (Fleming Feng) * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com) */ #include #include #include #include #include #include #include #include /* FOR EFI_UNIMPLEMENTED */ #include /* FOR struct ia64_sal_retval */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* reset all PSR field to 0, except up,mfl,mfh,pk,dt,rt,mc,it */ #define INITIAL_PSR_VALUE_AT_INTERRUPTION 0x0000001808028034 extern unsigned long handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr); #define DOMN_PAL_REQUEST 0x110000 #define DOMN_SAL_REQUEST 0x110001 static const u16 vec2off[68] = {0x0,0x400,0x800,0xc00,0x1000,0x1400,0x1800, 0x1c00,0x2000,0x2400,0x2800,0x2c00,0x3000,0x3400,0x3800,0x3c00,0x4000, 0x4400,0x4800,0x4c00,0x5000,0x5100,0x5200,0x5300,0x5400,0x5500,0x5600, 0x5700,0x5800,0x5900,0x5a00,0x5b00,0x5c00,0x5d00,0x5e00,0x5f00,0x6000, 0x6100,0x6200,0x6300,0x6400,0x6500,0x6600,0x6700,0x6800,0x6900,0x6a00, 0x6b00,0x6c00,0x6d00,0x6e00,0x6f00,0x7000,0x7100,0x7200,0x7300,0x7400, 0x7500,0x7600,0x7700,0x7800,0x7900,0x7a00,0x7b00,0x7c00,0x7d00,0x7e00, 0x7f00 }; void vmx_reflect_interruption(u64 ifa, u64 isr, u64 iim, u64 vec, REGS *regs) { u64 status, vector; VCPU *vcpu = current; u64 vpsr = VCPU(vcpu, vpsr); vector = vec2off[vec]; switch (vec) { case 5: // IA64_DATA_NESTED_TLB_VECTOR break; case 22: // IA64_INST_ACCESS_RIGHTS_VECTOR if (!(vpsr & IA64_PSR_IC)) goto nested_fault; if (vhpt_access_rights_fixup(vcpu, ifa, 0)) return; break; case 25: // IA64_DISABLED_FPREG_VECTOR if (!(vpsr & IA64_PSR_IC)) goto nested_fault; if (FP_PSR(vcpu) & IA64_PSR_DFH) { FP_PSR(vcpu) = IA64_PSR_MFH; if (__ia64_per_cpu_var(fp_owner) != vcpu) __ia64_load_fpu(vcpu->arch._thread.fph); } if (!(VCPU(vcpu, vpsr) & IA64_PSR_DFH)) { regs->cr_ipsr &= ~IA64_PSR_DFH; return; } break; case 32: // IA64_FP_FAULT_VECTOR if (!(vpsr & IA64_PSR_IC)) goto nested_fault; // handle fpswa emulation // fp fault status = handle_fpu_swa(1, regs, isr); if (!status) { vcpu_increment_iip(vcpu); return; } else if (IA64_RETRY == status) return; break; case 33: // IA64_FP_TRAP_VECTOR if (!(vpsr & IA64_PSR_IC)) goto nested_fault; //fp trap status = handle_fpu_swa(0, regs, isr); if (!status) return; else if (IA64_RETRY == status) { vcpu_decrement_iip(vcpu); return; } break; case 29: // IA64_DEBUG_VECTOR case 35: // IA64_TAKEN_BRANCH_TRAP_VECTOR case 36: // IA64_SINGLE_STEP_TRAP_VECTOR if (vmx_guest_kernel_mode(regs) && current->domain->debugger_attached) { domain_pause_for_debugger(); return; } if (!(vpsr & IA64_PSR_IC)) goto nested_fault; break; default: if (!(vpsr & IA64_PSR_IC)) goto nested_fault; break; } VCPU(vcpu,isr) = isr; VCPU(vcpu,iipa) = regs->cr_iip; if (vector == IA64_BREAK_VECTOR || vector == IA64_SPECULATION_VECTOR) VCPU(vcpu,iim) = iim; else set_ifa_itir_iha(vcpu, ifa, 1, 1, 1); inject_guest_interruption(vcpu, vector); return; nested_fault: panic_domain(regs, "Guest nested fault vector=%lx!\n", vector); } IA64FAULT vmx_ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim) { struct domain *d = current->domain; struct vcpu *v = current; perfc_incr(vmx_ia64_handle_break); #ifdef CRASH_DEBUG if ((iim == 0 || iim == CDB_BREAK_NUM) && !vmx_user_mode(regs) && IS_VMM_ADDRESS(regs->cr_iip)) { if (iim == 0) show_registers(regs); debugger_trap_fatal(0 /* don't care */, regs); regs_increment_iip(regs); return IA64_NO_FAULT; } #endif if (!vmx_user_mode(regs)) { show_registers(regs); gdprintk(XENLOG_DEBUG, "%s:%d imm %lx\n", __func__, __LINE__, iim); ia64_fault(11 /* break fault */, isr, ifa, iim, 0 /* cr.itir */, 0, 0, 0, (unsigned long)regs); } if (ia64_psr(regs)->cpl == 0) { /* Allow hypercalls only when cpl = 0. */ /* Only common hypercalls are handled by vmx_break_fault. */ if (iim == d->arch.breakimm) { ia64_hypercall(regs); vcpu_increment_iip(v); return IA64_NO_FAULT; } /* normal hypercalls are handled by vmx_break_fault */ BUG_ON(iim == d->arch.breakimm); if (iim == DOMN_PAL_REQUEST) { pal_emul(v); vcpu_increment_iip(v); return IA64_NO_FAULT; } else if (iim == DOMN_SAL_REQUEST) { if (d->arch.is_sioemu) sioemu_sal_assist(v); else { sal_emul(v); vcpu_increment_iip(v); } return IA64_NO_FAULT; } } vmx_reflect_interruption(ifa, isr, iim, 11, regs); return IA64_NO_FAULT; } void save_banked_regs_to_vpd(VCPU *v, REGS *regs) { unsigned long i=0UL, * src,* dst, *sunat, *dunat; IA64_PSR vpsr; src = ®s->r16; sunat = ®s->eml_unat; vpsr.val = VCPU(v, vpsr); if (vpsr.bn) { dst = &VCPU(v, vgr[0]); dunat =&VCPU(v, vnat); __asm__ __volatile__ (";;extr.u %0 = %1,%4,16;; \ dep %2 = %0, %2, 0, 16;; \ st8 [%3] = %2;;" ::"r"(i),"r"(*sunat),"r"(*dunat),"r"(dunat),"i"(IA64_PT_REGS_R16_SLOT):"memory"); } else { dst = &VCPU(v, vbgr[0]); // dunat =&VCPU(v, vbnat); // __asm__ __volatile__ (";;extr.u %0 = %1,%4,16;; // dep %2 = %0, %2, 16, 16;; // st8 [%3] = %2;;" // ::"r"(i),"r"(*sunat),"r"(*dunat),"r"(dunat),"i"(IA64_PT_REGS_R16_SLOT):"memory"); } for (i = 0; i < 16; i++) *dst++ = *src++; } // ONLY gets called from ia64_leave_kernel // ONLY call with interrupts disabled?? (else might miss one?) // NEVER successful if already reflecting a trap/fault because psr.i==0 void leave_hypervisor_tail(void) { struct domain *d = current->domain; struct vcpu *v = current; /* FIXME: can this happen ? */ if (is_idle_domain(current->domain)) return; // A softirq may generate an interrupt. So call softirq early. local_irq_enable(); do_softirq(); local_irq_disable(); // FIXME: Will this work properly if doing an RFI??? if (d->arch.is_sioemu) { if (local_events_need_delivery()) { sioemu_deliver_event(); } } else if (v->vcpu_id == 0) { unsigned long callback_irq = d->arch.hvm_domain.params[HVM_PARAM_CALLBACK_IRQ]; if (v->arch.arch_vmx.pal_init_pending) { /* inject INIT interruption to guest pal */ v->arch.arch_vmx.pal_init_pending = 0; deliver_pal_init(v); return; } /* * val[63:56] == 1: val[55:0] is a delivery PCI INTx line: * Domain = val[47:32], Bus = val[31:16], * DevFn = val[15: 8], IntX = val[ 1: 0] * val[63:56] == 0: val[55:0] is a delivery as GSI */ if (callback_irq != 0 && local_events_need_delivery()) { /* change level for para-device callback irq */ /* use level irq to send discrete event */ if ((uint8_t)(callback_irq >> 56) == 1) { /* case of using PCI INTx line as callback irq */ int pdev = (callback_irq >> 11) & 0x1f; int pintx = callback_irq & 3; viosapic_set_pci_irq(d, pdev, pintx, 1); viosapic_set_pci_irq(d, pdev, pintx, 0); } else { /* case of using GSI as callback irq */ viosapic_set_irq(d, callback_irq, 1); viosapic_set_irq(d, callback_irq, 0); } } hvm_dirq_assist(v); } rmb(); if (xchg(&v->arch.irq_new_pending, 0)) { v->arch.irq_new_condition = 0; vmx_check_pending_irq(v); } else if (v->arch.irq_new_condition) { v->arch.irq_new_condition = 0; vhpi_detection(v); } } static int vmx_handle_lds(REGS* regs) { regs->cr_ipsr |= IA64_PSR_ED; return IA64_FAULT; } static inline int unimpl_phys_addr (u64 paddr) { return (pa_clear_uc(paddr) >> MAX_PHYS_ADDR_BITS) != 0; } /* We came here because the H/W VHPT walker failed to find an entry */ IA64FAULT vmx_hpw_
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.

from __future__ import absolute_import, division, print_function

import binascii

import pytest

import six

from cryptography.exceptions import (
    AlreadyFinalized, InvalidKey, _Reasons
)
from cryptography.hazmat.backends.interfaces import HMACBackend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.hkdf import HKDF, HKDFExpand

from ...utils import raises_unsupported_algorithm


@pytest.mark.requires_backend_interface(interface=HMACBackend)
class TestHKDF(object):
    def test_length_limit(self, backend):
        big_length = 255 * (hashes.SHA256().digest_size // 8) + 1

        with pytest.raises(ValueError):
            HKDF(
                hashes.SHA256(),
                big_length,
                salt=None,
                info=None,
                backend=backend
            )

    def test_already_finalized(self, backend):
        hkdf = HKDF(
            hashes.SHA256(),
            16,
            salt=None,
            info=None,
            backend=backend
        )

        hkdf.derive(b"\x01" * 16)

        with pytest.raises(AlreadyFinalized):
            hkdf.derive(b"\x02" * 16)

        hkdf = HKDF(
            hashes.SHA256(),
            16,
            salt=None,
            info=None,
            backend=backend
        )

        hkdf.verify(b"\x01" * 16, b"gJ\xfb{\xb1Oi\xc5sMC\xb7\xe4@\xf7u")

        with pytest.raises(AlreadyFinalized):
            hkdf.verify(b"\x02" * 16, b"gJ\xfb{\xb1Oi\xc5sMC\xb7\xe4@\xf7u")

        hkdf = HKDF(
            hashes.SHA256(),
            16,
            salt=None,
            info=None,
            backend=backend
        )

    def test_verify(self, backend):
        hkdf = HKDF(
            hashes.SHA256(),
            16,
            salt=None,
            info=None,
            backend=backend
        )

        hkdf.verify(b"\x01" * 16, b"gJ\xfb{\xb1Oi\xc5sMC\xb7\xe4@\xf7u")

    def test_verify_invalid(self, backend):
        hkdf = HKDF(
            hashes.SHA256(),
            16,
            salt=None,
            info=None,
            backend=backend
        )

        with pytest.raises(InvalidKey):
            hkdf.verify(b"\x02" * 16, b"gJ\xfb{\xb1Oi\xc5sMC\xb7\xe4@\xf7u")

    def test_unicode_typeerror(self, backend):
        with pytest.raises(TypeError):
            HKDF(
                hashes.SHA256(),
                16,
                salt=six.u("foo"),
                info=None,
                backend=backend
            )

        with pytest.raises(TypeError):
            HKDF(
                hashes.SHA256(),
                16,
                salt=None,
                info=six.u("foo"),
                backend=backend
            )

        with pytest.raises(TypeError):
            hkdf = HKDF(
                hashes.SHA256(),
                16,
                salt=None,
                info=None,
                backend=backend
            )

            hkdf.derive(six.u("foo"))

        with pytest.raises(TypeError):
            hkdf = HKDF(
                hashes.SHA256(),
                16,
                salt=None,
                info=None,
                backend=backend
            )

            hkdf.verify(six.u("foo"), b"bar")

        with pytest.raises(TypeError):
            hkdf = HKDF(
                hashes.SHA256(),
                16,
                salt=None,
                info=None,
                backend=backend
            )

            hkdf.verify(b"foo", six.u("bar"))


@pytest.mark.requires_backend_interface(interface=HMACBackend)
class TestHKDFExpand(object):
    def test_derive(self, backend):
        prk = binascii.unhexlify(
            b"077709362c2e32df0ddc3f0dc47bba6390b6c73bb50f9c3122ec844ad7c2b3e5"
        )

        okm = (b"3cb25f25faacd57a90434f64d0362f2a2d2d0a90cf1a5a4c5db02d56ecc4c"
               b"5bf34007208d5b887185865")

        info = binascii.unhexlify(b"f0f1f2f3f4f5f6f7f8f9")
        hkdf = HKDFExpand(hashes.SHA256(), 42, info, backend)

        assert binascii.hexlify(hkdf.derive(prk)) == okm

    def test_verify(self, backend):
        prk = binascii.unhexlify(
            b"077709362c2e32df0ddc3f0dc47bba6390b6c73bb50f9c3122ec844ad7c2b3e5"
        )

        okm = (b"3cb25f25faacd57a90434f64d0362f2a2d2d0a90cf1a5a4c5db02d56ecc4c"
               b"5bf34007208d5b887185865")

        info = binascii.unhexlify(b"f0f1f2f3f4f5f6f7f8f9")
        hkdf = HKDFExpand(hashes.SHA256(), 42, info, backend)

        assert hkdf.verify(prk, binascii.unhexlify(okm)) is None

    def test_invalid_verify(self, backend):
        prk = binascii.unhexlify(
            b"077709362c2e32df0ddc3f0dc47bba6390b6c73bb50f9c3122ec844ad7c2b3e5"
        )

        info = binascii.unhexlify(b"f0f1f2f3f4f5f6f7f8f9")
        hkdf = HKDFExpand(hashes.SHA256(), 42, info, backend)

        with pytest.raises(InvalidKey):
            hkdf.verify(prk, b"wrong key")

    def test_already_finalized(self, backend):
        info = binascii.unhexlify(b"f0f1f2f3f4f5f6f7f8f9")
        hkdf = HKDFExpand(hashes.SHA256(), 42, info, backend)

        hkdf.derive(b"first")

        with pytest.raises(AlreadyFinalized):
            hkdf.derive(b"second")

    def test_unicode_error(self, backend):
        info = binascii.unhexlify(b"f0f1f2f3f4f5f6f7f8f9")
        hkdf = HKDFExpand(hashes.SHA256(), 42, info, backend)

        with pytest.raises(TypeError):
            hkdf.derive(six.u("first"))


def test_invalid_backend():
    pretend_backend = object()

    with raises_unsupported_algorithm(_Reasons.BACKEND_MISSING_INTERFACE):
        HKDF(hashes.SHA256(), 16, None, None, pretend_backend)

    with raises_unsupported_algorithm(_Reasons.BACKEND_MISSING_INTERFACE):
        HKDFExpand(hashes.SHA256(), 16, None, pretend_backend)