aboutsummaryrefslogtreecommitdiffstats
path: root/test/crypto/source/testref/ref_sha.c
blob: 8e9822dedc85d1751d4f3b2b71e117f5ae28a1c0 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
/*
    ChibiOS - Copyright (C) 2006..2017 Giovanni Di Sirio

    Licensed under the Apache License, Version 2.0 (the "License");
    you may not use this file except in compliance with the License.
    You may obtain a copy of the License at

        http://www.apache.org/licenses/LICENSE-2.0

    Unless required by applicable law or agreed to in writing, software
    distributed under the License is distributed on an "AS IS" BASIS,
    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    See the License for the specific language governing permissions and
    limitations under the License.
*/

#include "hal.h"

const uint8_t refSHA_SHA1_EMPTY[]={
0xDA,0x39,0xA3,0xEE,0x5E,0x6B,0x4B,0x0D,0x32,0x55,
0xBF,0xEF,0x95,0x60,0x18,0x90,0xAF,0xD8,0x07,0x09,
0x0D,0x0A,
};
const uint8_t refSHA_SHA1_3[]={
0xA9,0x99,0x3E,0x36,0x47,0x06,0x81,0x6A,0xBA,0x3E,
0x25,0x71,0x78,0x50,0xC2,0x6C,0x9C,0xD0,0xD8,0x9D,

};
const uint8_t refSHA_SHA1_56[]={
0x84,0x98,0x3E,0x44,0x1C,0x3B,0xD2,0x6E,0xBA,0xAE,
0x4A,0xA1,0xF9,0x51,0x29,0xE5,0xE5,0x46,0x70,0xF1,

};
const uint8_t refSHA_SHA1_64[]={
0x00,0x98,0xBA,0x82,0x4B,0x5C,0x16,0x42,0x7B,0xD7,
0xA1,0x12,0x2A,0x5A,0x44,0x2A,0x25,0xEC,0x64,0x4D,

};
const uint8_t refSHA_SHA1_128[]={
0xAD,0x5B,0x3F,0xDB,0xCB,0x52,0x67,0x78,0xC2,0x83,
0x9D,0x2F,0x15,0x1E,0xA7,0x53,0x99,0x5E,0x26,0xA0,

};
const uint8_t refSHA_SHA256_3[]={
0xBA,0x78,0x16,0xBF,0x8F,0x01,0xCF,0xEA,0x41,0x41,
0x40,0xDE,0x5D,0xAE,0x22,0x23,0xB0,0x03,0x61,0xA3,
0x96,0x17,0x7A,0x9C,0xB4,0x10,0xFF,0x61,0xF2,0x00,
0x15,0xAD,
};
const uint8_t refSHA_SHA256_56[]={
0x24,0x8D,0x6A,0x61,0xD2,0x06,0x38,0xB8,0xE5,0xC0,
0x26,0x93,0x0C,0x3E,0x60,0x39,0xA3,0x3C,0xE4,0x59,
0x64,0xFF,0x21,0x67,0xF6,0xEC,0xED,0xD4,0x19,0xDB,
0x06,0xC1,
};
const uint8_t refSHA_SHA256_64[]={
0xFF,0xE0,0x54,0xFE,0x7A,0xE0,0xCB,0x6D,0xC6,0x5C,
0x3A,0xF9,0xB6,0x1D,0x52,0x09,0xF4,0x39,0x85,0x1D,
0xB4,0x3D,0x0B,0xA5,0x99,0x73,0x37,0xDF,0x15,0x46,
0x68,0xEB,
};
const uint8_t refSHA_SHA256_128[]={
0x68,0x36,0xCF,0x13,0xBA,0xC4,0x00,0xE9,0x10,0x50,
0x71,0xCD,0x6A,0xF4,0x70,0x84,0xDF,0xAC,0xAD,0x4E,
0x5E,0x30,0x2C,0x94,0xBF,0xED,0x24,0xE0,0x13,0xAF,
0xB7,0x3E,
};
const uint8_t refSHA_SHA512_3[]={
0xDD,0xAF,0x35,0xA1,0x93,0x61,0x7A,0xBA,0xCC,0x41,
0x73,0x49,0xAE,0x20,0x41,0x31,0x12,0xE6,0xFA,0x4E,
0x89,0xA9,0x7E,0xA2,0x0A,0x9E,0xEE,0xE6,0x4B,0x55,
0xD3,0x9A,0x21,0x92,0x99,0x2A,0x27,0x4F,0xC1,0xA8,
0x36,0xBA,0x3C,0x23,0xA3,0xFE,0xEB,0xBD,0x45,0x4D,
0x44,0x23,0x64,0x3C,0xE8,0x0E,0x2A,0x9A,0xC9,0x4F,
0xA5,0x4C,0xA4,0x9F,
};
const uint8_t refSHA_SHA512_56[]={
0x20,0x4A,0x8F,0xC6,0xDD,0xA8,0x2F,0x0A,0x0C,0xED,
0x7B,0xEB,0x8E,0x08,0xA4,0x16,0x57,0xC1,0x6E,0xF4,
0x68,0xB2,0x28,0xA8,0x27,0x9B,0xE3,0x31,0xA7,0x03,
0xC3,0x35,0x96,0xFD,0x15,0xC1,0x3B,0x1B,0x07,0xF9,
0xAA,0x1D,0x3B,0xEA,0x57,0x78,0x9C,0xA0,0x31,0xAD,
0x85,0xC7,0xA7,0x1D,0xD7,0x03,0x54,0xEC,0x63,0x12,
0x38,0xCA,0x34,0x45,
};
const uint8_t refSHA_SHA512_64[]={
0x01,0xD3,0x5C,0x10,0xC6,0xC3,0x8C,0x2D,0xCF,0x48,
0xF7,0xEE,0xBB,0x32,0x35,0xFB,0x5A,0xD7,0x4A,0x65,
0xEC,0x4C,0xD0,0x16,0xE2,0x35,0x4C,0x63,0x7A,0x8F,
0xB4,0x9B,0x69,0x5E,0xF3,0xC1,0xD6,0xF7,0xAE,0x4C,
0xD7,0x4D,0x78,0xCC,0x9C,0x9B,0xCA,0xC9,0xD4,0xF2,
0x3A,0x73,0x01,0x99,0x98,0xA7,0xF7,0x30,0x38,0xA5,
0xC9,0xB2,0xDB,0xDE,
};
const uint8_t refSHA_SHA512_128[]={
0xB7,0x3D,0x19,0x29,0xAA,0x61,0x59,0x34,0xE6,0x1A,
0x87,0x15,0x96,0xB3,0xF3,0xB3,0x33,0x59,0xF4,0x2B,
0x81,0x75,0x60,0x2E,0x89,0xF7,0xE0,0x6E,0x5F,0x65,
0x8A,0x24,0x36,0x67,0x80,0x7E,0xD3,0x00,0x31,0x4B,
0x95,0xCA,0xCD,0xD5,0x79,0xF3,0xE3,0x3A,0xBD,0xFB,
0xE3,0x51,0x90,0x95,0x19,0xA8,0x46,0xD4,0x65,0xC5,
0x95,0x82,0xF3,0x21,
};
"o">*d, unsigned long begin_pfn, unsigned long nr, XEN_GUEST_HANDLE_64(uint8) dirty_bitmap); /* enable log dirty */ int paging_log_dirty_enable(struct domain *d); /* disable log dirty */ int paging_log_dirty_disable(struct domain *d); /* log dirty initialization */ void paging_log_dirty_init(struct domain *d, int (*enable_log_dirty)(struct domain *d), int (*disable_log_dirty)(struct domain *d), void (*clean_dirty_bitmap)(struct domain *d)); /* mark a page as dirty */ void paging_mark_dirty(struct domain *d, unsigned long guest_mfn); /* * Log-dirty radix tree indexing: * All tree nodes are PAGE_SIZE bytes, mapped on-demand. * Leaf nodes are simple bitmaps; 1 bit per guest pfn. * Interior nodes are arrays of LOGDIRTY_NODE_ENTRIES mfns. * TODO: Dynamic radix tree height. Most guests will only need 2 levels. * The fourth level is basically unusable on 32-bit Xen. * TODO2: Abstract out the radix-tree mechanics? */ #define LOGDIRTY_NODE_ENTRIES (1 << PAGETABLE_ORDER) #define L1_LOGDIRTY_IDX(pfn) ((pfn) & ((1 << (PAGE_SHIFT+3)) - 1)) #define L2_LOGDIRTY_IDX(pfn) (((pfn) >> (PAGE_SHIFT+3)) & \ (LOGDIRTY_NODE_ENTRIES-1)) #define L3_LOGDIRTY_IDX(pfn) (((pfn) >> (PAGE_SHIFT+3+PAGETABLE_ORDER)) & \ (LOGDIRTY_NODE_ENTRIES-1)) #if BITS_PER_LONG == 64 #define L4_LOGDIRTY_IDX(pfn) (((pfn) >> (PAGE_SHIFT+3+PAGETABLE_ORDER*2)) & \ (LOGDIRTY_NODE_ENTRIES-1)) #else #define L4_LOGDIRTY_IDX(pfn) 0 #endif /* VRAM dirty tracking support */ struct sh_dirty_vram { unsigned long begin_pfn; unsigned long end_pfn; paddr_t *sl1ma; uint8_t *dirty_bitmap; s_time_t last_dirty; }; /***************************************************************************** * Entry points into the paging-assistance code */ /* Initialize the paging resource for vcpu struct. It is called by * vcpu_initialise() in domain.c */ void paging_vcpu_init(struct vcpu *v); /* Set up the paging-assistance-specific parts of a domain struct at * start of day. Called for every domain from arch_domain_create() */ int paging_domain_init(struct domain *d, unsigned int domcr_flags); /* Handler for paging-control ops: operations from user-space to enable * and disable ephemeral shadow modes (test mode and log-dirty mode) and * manipulate the log-dirty bitmap. */ int paging_domctl(struct domain *d, xen_domctl_shadow_op_t *sc, XEN_GUEST_HANDLE(void) u_domctl); /* Call when destroying a domain */ void paging_teardown(struct domain *d); /* Call once all of the references to the domain have gone away */ void paging_final_teardown(struct domain *d); /* Enable an arbitrary paging-assistance mode. Call once at domain * creation. */ int paging_enable(struct domain *d, u32 mode); /* Page fault handler * Called from pagefault handler in Xen, and from the HVM trap handlers * for pagefaults. Returns 1 if this fault was an artefact of the * paging code (and the guest should retry) or 0 if it is not (and the * fault should be handled elsewhere or passed to the guest). * * Note: under shadow paging, this function handles all page faults; * however, for hardware-assisted paging, this function handles only * host page faults (i.e. nested page faults). */ static inline int paging_fault(unsigned long va, struct cpu_user_regs *regs) { struct vcpu *v = current; return v->arch.paging.mode->page_fault(v, va, regs); } /* Handle invlpg requests on vcpus. * Returns 1 if the invlpg instruction should be issued on the hardware, * or 0 if it's safe not to do so. */ static inline int paging_invlpg(struct vcpu *v, unsigned long va) { return v->arch.paging.mode->invlpg(v, va); } /* Translate a guest virtual address to the frame number that the * *guest* pagetables would map it to. Returns INVALID_GFN if the guest * tables don't map this address for this kind of access. * pfec[0] is used to determine which kind of access this is when * walking the tables. The caller should set the PFEC_page_present bit * in pfec[0]; in the failure case, that bit will be cleared if appropriate. */ #define INVALID_GFN (-1UL) static inline unsigned long paging_gva_to_gfn(struct vcpu *v, unsigned long va, uint32_t *pfec) { return v->arch.paging.mode->gva_to_gfn(v, va, pfec); } /* Update all the things that are derived from the guest's CR3. * Called when the guest changes CR3; the caller can then use v->arch.cr3 * as the value to load into the host CR3 to schedule this vcpu */ static inline void paging_update_cr3(struct vcpu *v) { v->arch.paging.mode->update_cr3(v, 1); } /* Update all the things that are derived from the guest's CR0/CR3/CR4. * Called to initialize paging structures if the paging mode * has changed, and when bringing up a VCPU for the first time. */ static inline void paging_update_paging_modes(struct vcpu *v) { v->arch.paging.mode->update_paging_modes(v); } /* Write a new value into the guest pagetable, and update the * paging-assistance state appropriately. Returns 0 if we page-faulted, * 1 for success. */ static inline int paging_write_guest_entry(struct vcpu *v, intpte_t *p, intpte_t new, mfn_t gmfn) { if ( unlikely(paging_mode_enabled(v->domain) && v->arch.paging.mode != NULL) ) return v->arch.paging.mode->write_guest_entry(v, p, new, gmfn); else return (!__copy_to_user(p, &new, sizeof(new))); } /* Cmpxchg a new value into the guest pagetable, and update the * paging-assistance state appropriately. Returns 0 if we page-faulted, * 1 if not. N.B. caller should check the value of "old" to see if the * cmpxchg itself was successful. */ static inline int paging_cmpxchg_guest_entry(struct vcpu *v, intpte_t *p, intpte_t *old, intpte_t new, mfn_t gmfn) { if ( unlikely(paging_mode_enabled(v->domain) && v->arch.paging.mode != NULL) ) return v->arch.paging.mode->cmpxchg_guest_entry(v, p, old, new, gmfn); else return (!cmpxchg_user(p, *old, new)); } /* Helper function that writes a pte in such a way that a concurrent read * never sees a half-written entry that has _PAGE_PRESENT set */ static inline void safe_write_pte(l1_pgentry_t *p, l1_pgentry_t new) { #if CONFIG_PAGING_LEVELS == 3 /* PAE machines write 64bit PTEs as two 32bit writes. */ volatile unsigned long *d = (unsigned long *) p; unsigned long *s = (unsigned long *) &new; BUILD_BUG_ON(sizeof (l1_pgentry_t) != 2 * sizeof (unsigned long)); d[0] = 0; d[1] = s[1]; d[0] = s[0]; #else *p = new; #endif } /* Atomically write a P2M entry and update the paging-assistance state * appropriately. * Arguments: the domain in question, the GFN whose mapping is being updated, * a pointer to the entry to be written, the MFN in which the entry resides, * the new contents of the entry, and the level in the p2m tree at which * we are writing. */ static inline void paging_write_p2m_entry(struct domain *d, unsigned long gfn, l1_pgentry_t *p, mfn_t table_mfn, l1_pgentry_t new, unsigned int level) { struct vcpu *v = current; if ( v->domain != d ) v = d->vcpu ? d->vcpu[0] : NULL; if ( likely(v && paging_mode_enabled(d) && v->arch.paging.mode != NULL) ) { return v->arch.paging.mode->write_p2m_entry(v, gfn, p, table_mfn, new, level); } else safe_write_pte(p, new); } /* Called from the guest to indicate that the a process is being * torn down and its pagetables will soon be discarded */ void pagetable_dying(struct domain *d, paddr_t gpa); /* Print paging-assistance info to the console */ void paging_dump_domain_info(struct domain *d); void paging_dump_vcpu_info(struct vcpu *v); /***************************************************************************** * Access to the guest pagetables */ /* Get a mapping of a PV guest's l1e for this virtual address. */ static inline l1_pgentry_t * guest_map_l1e(struct vcpu *v, unsigned long addr, unsigned long *gl1mfn) { l2_pgentry_t l2e; if ( unlikely(paging_mode_translate(v->domain)) ) return v->arch.paging.mode->guest_map_l1e(v, addr, gl1mfn); /* Find this l1e and its enclosing l1mfn in the linear map */ if ( __copy_from_user(&l2e, &__linear_l2_table[l2_linear_offset(addr)], sizeof(l2_pgentry_t)) != 0 ) return NULL; /* Check flags that it will be safe to read the l1e */ if ( (l2e_get_flags(l2e) & (_PAGE_PRESENT | _PAGE_PSE)) != _PAGE_PRESENT ) return NULL; *gl1mfn = l2e_get_pfn(l2e); return (l1_pgentry_t *)map_domain_page(*gl1mfn) + l1_table_offset(addr); } /* Pull down the mapping we got from guest_map_l1e() */ static inline void guest_unmap_l1e(struct vcpu *v, void *p) { unmap_domain_page(p); } /* Read the guest's l1e that maps this address. */ static inline void guest_get_eff_l1e(struct vcpu *v, unsigned long addr, void *eff_l1e) { if ( likely(!paging_mode_translate(v->domain)) ) { ASSERT(!paging_mode_external(v->domain)); if ( __copy_from_user(eff_l1e, &__linear_l1_table[l1_linear_offset(addr)], sizeof(l1_pgentry_t)) != 0 ) *(l1_pgentry_t *)eff_l1e = l1e_empty(); return; } v->arch.paging.mode->guest_get_eff_l1e(v, addr, eff_l1e); } /* Read the guest's l1e that maps this address, from the kernel-mode * pagetables. */ static inline void guest_get_eff_kern_l1e(struct vcpu *v, unsigned long addr, void *eff_l1e) { #if defined(__x86_64__) int user_mode = !(v->arch.flags & TF_kernel_mode); #define TOGGLE_MODE() if ( user_mode ) toggle_guest_mode(v) #else #define TOGGLE_MODE() ((void)0) #endif TOGGLE_MODE(); guest_get_eff_l1e(v, addr, eff_l1e); TOGGLE_MODE(); } #endif /* XEN_PAGING_H */ /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * indent-tabs-mode: nil * End: */