/* * io.c: Handling I/O and interrupts. * * Copyright (c) 2004, Intel Corporation. * Copyright (c) 2005, International Business Machines Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined (__i386__) static void set_reg_value (int size, int index, int seg, struct cpu_user_regs *regs, long value) { switch (size) { case BYTE: switch (index) { case 0: regs->eax &= 0xFFFFFF00; regs->eax |= (value & 0xFF); break; case 1: regs->ecx &= 0xFFFFFF00; regs->ecx |= (value & 0xFF); break; case 2: regs->edx &= 0xFFFFFF00; regs->edx |= (value & 0xFF); break; case 3: regs->ebx &= 0xFFFFFF00; regs->ebx |= (value & 0xFF); break; case 4: regs->eax &= 0xFFFF00FF; regs->eax |= ((value & 0xFF) << 8); break; case 5: regs->ecx &= 0xFFFF00FF; regs->ecx |= ((value & 0xFF) << 8); break; case 6: regs->edx &= 0xFFFF00FF; regs->edx |= ((value & 0xFF) << 8); break; case 7: regs->ebx &= 0xFFFF00FF; regs->ebx |= ((value & 0xFF) << 8); break; default: printk("Error: size:%x, index:%x are invalid!\n", size, index); domain_crash_synchronous(); break; } break; case WORD: switch (index) { case 0: regs->eax &= 0xFFFF0000; regs->eax |= (value & 0xFFFF); break; case 1: regs->ecx &= 0xFFFF0000; regs->ecx |= (value & 0xFFFF); break; case 2: regs->edx &= 0xFFFF0000; regs->edx |= (value & 0xFFFF); break; case 3: regs->ebx &= 0xFFFF0000; regs->ebx |= (value & 0xFFFF); break; case 4: regs->esp &= 0xFFFF0000; regs->esp |= (value & 0xFFFF); break; case 5: regs->ebp &= 0xFFFF0000; regs->ebp |= (value & 0xFFFF); break; case 6: regs->esi &= 0xFFFF0000; regs->esi |= (value & 0xFFFF); break; case 7: regs->edi &= 0xFFFF0000; regs->edi |= (value & 0xFFFF); break; default: printk("Error: size:%x, index:%x are invalid!\n", size, index); domain_crash_synchronous(); break; } break; case LONG: switch (index) { case 0: regs->eax = value; break; case 1: regs->ecx = value; break; case 2: regs->edx = value; break; case 3: regs->ebx = value; break; case 4: regs->esp = value; break; case 5: regs->ebp = value; break; case 6: regs->esi = value; break; case 7: regs->edi = value; break; default: printk("Error: size:%x, index:%x are invalid!\n", size, index); domain_crash_synchronous(); break; } break; default: printk("Error: size:%x, index:%x are invalid!\n", size, index); domain_crash_synchronous(); break; } } #else static inline void __set_reg_value(unsigned long *reg, int size, long value) { switch (size) { case BYTE_64: *reg &= ~0xFF; *reg |= (value & 0xFF); break; case WORD: *reg &= ~0xFFFF; *reg |= (value & 0xFFFF); break; case LONG: *reg &= ~0xFFFFFFFF; *reg |= (value & 0xFFFFFFFF); break; case QUAD: *reg = value; break; default: printk("Error: <__set_reg_value>: size:%x is invalid\n", size); domain_crash_synchronous(); } } static void set_reg_value (int size, int index, int seg, struct cpu_user_regs *regs, long value) { if (size == BYTE) { switch (index) { case 0: regs->rax &= ~0xFF; regs->rax |= (value & 0xFF); break; case 1: regs->rcx &= ~0xFF; regs->rcx |= (value & 0xFF); break; case 2: regs->rdx &= ~0xFF; regs->rdx |= (value & 0xFF); break; case 3: regs->rbx &= ~0xFF; regs->rbx |= (value & 0xFF); break; case 4: regs->rax &= 0xFFFFFFFFFFFF00FF; regs->rax |= ((value & 0xFF) << 8); break; case 5: regs->rcx &= 0xFFFFFFFFFFFF00FF; regs->rcx |= ((value & 0xFF) << 8); break; case 6: regs->rdx &= 0xFFFFFFFFFFFF00FF; regs->rdx |= ((value & 0xFF) << 8); break; case 7: regs->rbx &= 0xFFFFFFFFFFFF00FF; regs->rbx |= ((value & 0xFF) << 8); break; default: printk("Error: size:%x, index:%x are invalid!\n", size, index); domain_crash_synchronous(); break; } return; } switch (index) { case 0: __set_reg_value(®s->rax, size, value); break; case 1: __set_reg_value(®s->rcx, size, value); break; case 2: __set_reg_value(®s->rdx, size, value); break; case 3: __set_reg_value(®s->rbx, size, value); break; case 4: __set_reg_value(®s->rsp, size, value); break; case 5: __set_reg_value(®s->rbp, size, value); break; case 6: __set_reg_value(®s->rsi, size, value); break; case 7: __set_reg_value(®s->rdi, size, value); break; case 8: __set_reg_value(®s->r8, size, value); break; case 9: __set_reg_value(®s->r9, size, value); break; case 10: __set_reg_value(®s->r10, size, value); break; case 11: __set_reg_value(®s->r11, size, value); break; case 12: __set_reg_value(®s->r12, size, value); break; case 13: __set_reg_value(®s->r13, size, value); break; case 14: __set_reg_value(®s->r14, size, value); break; case 15: __set_reg_value(®s->r15, size, value); break; default: printk("Error: Invalid index\n"); domain_crash_synchronous(); } return; } #endif extern long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs); static inline void set_eflags_CF(int size, unsigned long v1, unsigned long v2, struct cpu_user_regs *regs) { unsigned long mask = (1 << (8 * size)) - 1; if ((v1 & mask) > (v2 & mask)) regs->eflags |= X86_EFLAGS_CF; else regs->eflags &= ~X86_EFLAGS_CF; } static inline void set_eflags_OF(int size, unsigned long v1, unsigned long v2, unsigned long v3, struct cpu_user_regs *regs) { if ((v3 ^ v2) & (v3 ^ v1) & (1 << ((8 * size) - 1))) regs->eflags |= X86_EFLAGS_OF; } static inline void set_eflags_AF(int size, unsigned long v1, unsigned long v2, unsigned long v3, struct cpu_user_regs *regs) { if ((v1 ^ v2 ^ v3) & 0x10) regs->eflags |= X86_EFLAGS_AF; } static inline void set_eflags_ZF(int size, unsigned long v1, struct cpu_user_regs *regs) { unsigned long mask = (1 << (8 * size)) - 1; if ((v1 & mask) == 0) regs->eflags |= X86_EFLAGS_ZF; } static inline void set_eflags_SF(int size, unsigned long v1, struct cpu_user_regs *regs) { if (v1 & (1 << ((8 * size) - 1))) regs->eflags |= X86_EFLAGS_SF; } static char parity_table[256] = { 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1 }; static inline void set_eflags_PF(int size, unsigned long v1, struct cpu_user_regs *regs) { if (parity_table[v1 & 0xFF]) regs->eflags |= X86_EFLAGS_PF; } static void hvm_pio_assist(struct cpu_user_regs *regs, ioreq_t *p, struct hvm_io_op *pio_opp) { unsigned long old_eax; int sign = p->df ? -1 : 1; if ( p->pdata_valid || (pio_opp->flags & OVERLAP) ) { if ( pio_opp->flags & REPZ ) regs->ecx -= p->count; if ( p->dir == IOREQ_READ ) { regs->edi += sign * p->count * p->size; if ( pio_opp->flags & OVERLAP ) { unsigned long addr = regs->edi; if (hvm_realmode(current)) addr += regs->es << 4; if (sign > 0) addr -= p->size; hvm_copy(&p->u.data, addr, p->size, HVM_COPY_OUT); } } else /* p->dir == IOREQ_WRITE */ { ASSERT(p->dir == IOREQ_WRITE); regs->esi += sign * p->count * p->size; } } else if ( p->dir == IOREQ_READ ) { old_eax = regs->eax; switch ( p->size ) { case 1: regs->eax = (old_eax & 0xffffff00) | (p->u.data & 0xff); break; case 2: regs->eax = (old_eax & 0xffff0000) | (p->u.data & 0xffff); break; case 4: regs->eax = (p->u.data & 0xffffffff); break; default: printk("Error: %s unknown port size\n", __FUNCTION__); domain_crash_synchronous(); } } } static void hvm_mmio_assist(struct cpu_user_regs *regs, ioreq_t *p, struct hvm_io_op *mmio_opp) { int sign = p->df ? -1 : 1; int size = -1, index = -1; unsigned long value = 0, diff = 0; unsigned long src, dst; src = mmio_opp->operand[0]; dst = mmio_opp->operand[1]; size = operand_size(src); switch (mmio_opp->instr) { case INSTR_MOV: if (dst & REGISTER) { index = operand_index(dst); set_reg_value(size, index, 0, regs, p->u.data); } break; case INSTR_MOVZX: if (dst & REGISTER) { switch (size) { case BYTE: p->u.data &= 0xFFULL; break; case WORD: p->u.data &= 0xFFFFULL; break; case LONG: p->u.data &= 0xFFFFFFFFULL; break; default: printk("Impossible source operand size of movzx instr: %d\n", size); domain_crash_synchronous(); } index = operand_index(dst); set_reg_value(operand_size(dst), index, 0, regs, p->u.data); } break; case INSTR_MOVSX: if (dst & REGISTER) { switch (size) { case BYTE: p->u.data &= 0xFFULL; if ( p->u.data & 0x80ULL ) p->u.data |= 0xFFFFFFFFFFFFFF00ULL; break; case WORD: p->u.data &= 0xFFFFULL; if ( p->u.data & 0x8000ULL ) p->u.data |= 0xFFFFFFFFFFFF0000ULL; break; case LONG: p->u.data &= 0xFFFFFFFFULL; if ( p->u.data & 0x80000000ULL ) p->u.data |= 0xFFFFFFFF00000000ULL; break; default: printk("Impossible source operand size of movsx instr: %d\n", size); domain_crash_synchronous(); } index = operand_index(dst); set_reg_value(operand_size(dst), index, 0, regs, p->u.data); } break; case INSTR_MOVS: sign = p->df ? -1 : 1; regs->esi += sign * p->count * p->size; regs->edi += sign * p->count * p->size; if ((mmio_opp->flags & OVERLAP) && p->dir == IOREQ_READ) { unsigned long addr = regs->edi; if (sign > 0) addr -= p->size; hvm_copy(&p->u.data, addr, p->size, HVM_COPY_OUT); } if (mmio_opp->flags & REPZ) regs->ecx -= p->count; break; case INSTR_STOS: sign = p->df ? -1 : 1; regs->edi += sign * p->count * p->size; if (mmio_opp->flags & REPZ) regs->ecx -= p->count; break; case INSTR_LODS: sign = p->df ? -1 : 1; regs->esi += sign * p->count * p->size; if (mmio_opp->flags & REPZ) regs->ecx -= p->count; break; case INSTR_AND: if (src & REGISTER) { index = operand_index(src); value = get_reg_value(size, index, 0, regs); diff = (unsigned long) p->u.data & value; } else if (src & IMMEDIATE) { value = mmio_opp->immediate; diff = (unsigned long) p->u.data & value; } else if (src & MEMORY) { index = operand_index(dst); value = get_reg_value(size, index, 0, regs); diff = (unsigned long) p->u.data & value; set_reg_value(size, index, 0, regs, diff); } /* * The OF and CF flags are cleared; the SF, ZF, and PF * flags are set according to the result. The state of * the AF flag is undefined. */ regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF| X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF); set_eflags_ZF(size, diff, regs); set_eflags_SF(size, diff, regs); set_eflags_PF(size, diff, regs); break; case INSTR_OR: if (src & REGISTER) { index = operand_index(src); value = get_reg_value(size, index, 0, regs); diff = (unsigned long) p->u.data | value; } else if (src & IMMEDIATE) { value = mmio_opp->immediate; diff = (unsigned long) p->u.data | value; } else if (src & MEMORY) { index = operand_index(dst); value = get_reg_value(size, index, 0, regs); diff = (unsigned long) p->u.data | value; set_reg_value(size, index, 0, regs, diff); } /* * The OF and CF flags are cleared; the SF, ZF, and PF * flags are set according to the result. The state of * the AF flag is undefined. */ regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF| X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF); set_eflags_ZF(size, diff, regs); set_eflags_SF(size, diff, regs); set_eflags_PF(size, diff, regs); break; case INSTR_XOR: if (src & REGISTER) { index = operand_index(src); value = get_reg_value(size, index, 0, regs); diff = (unsigned long) p->u.data ^ value; } else if (src & IMMEDIATE) { value = mmio_opp->immediate; diff = (unsigned long) p->u.data ^ value; } else if (src & MEMORY) { index = operand_index(dst); value = get_reg_value(size, index, 0, regs); diff = (unsigned long) p->u.data ^ value; set_reg_value(size, index, 0, regs, diff); } /* * The OF and CF flags are cleared; the SF, ZF, and PF * flags are set according to the result. The state of * the AF flag is undefined. */ regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF| X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF); set_eflags_ZF(size, diff, regs); set_eflags_SF(size, diff, regs); set_eflags_PF(size, diff, regs); break; case INSTR_CMP: if (src & REGISTER) { index = operand_index(src); value = get_reg_value(size, index, 0, regs); diff = (unsigned long) p->u.data - value; } else if (src & IMMEDIATE) { value = mmio_opp->immediate; diff = (unsigned long) p->u.data - value; } else if (src & MEMORY) { index = operand_index(dst); value = get_reg_value(size, index, 0, regs); diff = value - (unsigned long) p->u.data; } /* * The CF, OF, SF, ZF, AF, and PF flags are set according * to the result */ regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF|X86_EFLAGS_AF| X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF); set_eflags_CF(size, value, (unsigned long) p->u.data, regs); set_eflags_OF(size, diff, value, (unsigned long) p->u.data, regs); set_eflags_AF(size, diff, value, (unsigned long) p->u.data, regs); set_eflags_ZF(size, diff, regs); set_eflags_SF(size, diff, regs); set_eflags_PF(size, diff, regs); break; case INSTR_TEST: if (src & REGISTER) { index = operand_index(src); value = get_reg_value(size, index, 0, regs); } else if (src & IMMEDIATE) { value = mmio_opp->immediate; } else if (src & MEMORY) { index = operand_index(dst); value = get_reg_value(size, index, 0, regs); } diff = (unsigned long) p->u.data & value; /* * Sets the SF, ZF, and PF status flags. CF and OF are set to 0 */ regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF| X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF); set_eflags_ZF(size, diff, regs); set_eflags_SF(size, diff, regs); set_eflags_PF(size, diff, regs); break; case INSTR_BT: index = operand_index(src); value = get_reg_value(size, index, 0, regs); if (p->u.data & (1 << (value & ((1 << 5) - 1)))) regs->eflags |= X86_EFLAGS_CF; else regs->eflags &= ~X86_EFLAGS_CF; break; case INSTR_XCHG: if (src & REGISTER) { index = operand_index(src); set_reg_value(size, index, 0, regs, p->u.data); } else { index = operand_index(dst); set_reg_value(size, index, 0, regs, p->u.data); } break; } } void hvm_io_assist(struct vcpu *v) { vcpu_iodata_t *vio; ioreq_t *p; struct cpu_user_regs *regs; struct hvm_io_op *io_opp; io_opp = &v->arch.hvm_vcpu.io_op; regs = &io_opp->io_context; vio = get_vio(v->domain, v->vcpu_id); if ( vio == 0 ) { printf("bad shared page: %lx\n", (unsigned long)vio); domain_crash_synchronous(); } p = &vio->vp_ioreq; /* clear IO wait HVM flag */ if ( test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags) ) { if ( p->state == STATE_IORESP_READY ) { p->state = STATE_INVALID; clear_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags); if ( p->type == IOREQ_TYPE_PIO ) hvm_pio_assist(regs, p, io_opp); else hvm_mmio_assist(regs, p, io_opp); /* Copy register changes back into current guest state. */ hvm_load_cpu_guest_regs(v, regs); memcpy(guest_cpu_user_regs(), regs, HVM_CONTEXT_STACK_BYTES); } /* else an interrupt send event raced us */ } } /* * On exit from hvm_wait_io, we're guaranteed not to be waiting on * I/O response from the device model. */ void hvm_wait_io(void) { struct vcpu *v = current; struct domain *d = v->domain; int port = iopacket_port(v); for ( ; ; ) { /* Clear master flag, selector flag, event flag each in turn. */ v->vcpu_info->evtchn_upcall_pending = 0; clear_bit(port/BITS_PER_LONG, &v->vcpu_info->evtchn_pending_sel); smp_mb__after_clear_bit(); if ( test_and_clear_bit(port, &d->shared_info->evtchn_pending[0]) ) hvm_io_assist(v); /* Need to wait for I/O responses? */ if ( !test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags) ) break; do_sched_op_compat(SCHEDOP_block, 0); } /* * Re-set the selector and master flags in case any other notifications * are pending. */ if ( d->shared_info->evtchn_pending[port/BITS_PER_LONG] ) set_bit(port/BITS_PER_LONG, &v->vcpu_info->evtchn_pending_sel); if ( v->vcpu_info->evtchn_pending_sel ) v->vcpu_info->evtchn_upcall_pending = 1; } void hvm_safe_block(void) { struct vcpu *v = current; struct domain *d = v->domain; int port = iopacket_port(v); for ( ; ; ) { /* Clear master flag & selector flag so we will wake from block. */ v->vcpu_info->evtchn_upcall_pending = 0; clear_bit(port/BITS_PER_LONG, &v->vcpu_info->evtchn_pending_sel); smp_mb__after_clear_bit(); /* Event pending already? */ if ( test_bit(port, &d->shared_info->evtchn_pending[0]) ) break; do_sched_op_compat(SCHEDOP_block, 0); } /* Reflect pending event in selector and master flags. */ set_bit(port/BITS_PER_LONG, &v->vcpu_info->evtchn_pending_sel); v->vcpu_info->evtchn_upcall_pending = 1; } /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */ "cp">define KernelPackage/lib-crc16/description Kernel module for CRC16 support endef $(eval $(call KernelPackage,lib-crc16)) define KernelPackage/lib-crc32c SUBMENU:=$(LIB_MENU) TITLE:=CRC32 support KCONFIG:=CONFIG_LIBCRC32C DEPENDS:=+kmod-crypto-crc32c FILES:=$(LINUX_DIR)/lib/libcrc32c.ko AUTOLOAD:=$(call AutoProbe,libcrc32c) endef define KernelPackage/lib-crc32c/description Kernel module for CRC32 support endef $(eval $(call KernelPackage,lib-crc32c)) define KernelPackage/lib-lzo SUBMENU:=$(LIB_MENU) TITLE:=LZO support DEPENDS:=+kmod-crypto-acompress KCONFIG:= \ CONFIG_CRYPTO_LZO \ CONFIG_LZO_COMPRESS \ CONFIG_LZO_DECOMPRESS HIDDEN:=1 FILES:= \ $(LINUX_DIR)/crypto/lzo.ko \ $(LINUX_DIR)/crypto/lzo-rle.ko \ $(LINUX_DIR)/lib/lzo/lzo_compress.ko \ $(LINUX_DIR)/lib/lzo/lzo_decompress.ko AUTOLOAD:=$(call AutoProbe,lzo lzo-rle lzo_compress lzo_decompress) endef define KernelPackage/lib-lzo/description Kernel module for LZO compression/decompression support endef $(eval $(call KernelPackage,lib-lzo)) define KernelPackage/lib-zstd SUBMENU:=$(LIB_MENU) TITLE:=ZSTD support DEPENDS:=+kmod-crypto-acompress KCONFIG:= \ CONFIG_CRYPTO_ZSTD \ CONFIG_ZSTD_COMPRESS \ CONFIG_ZSTD_DECOMPRESS \ CONFIG_XXHASH FILES:= \ $(LINUX_DIR)/crypto/zstd.ko \ $(LINUX_DIR)/lib/xxhash.ko \ $(LINUX_DIR)/lib/zstd/zstd_compress.ko \ $(LINUX_DIR)/lib/zstd/zstd_decompress.ko AUTOLOAD:=$(call AutoProbe,xxhash zstd zstd_compress zstd_decompress) endef define KernelPackage/lib-zstd/description Kernel module for ZSTD compression/decompression support endef $(eval $(call KernelPackage,lib-zstd)) define KernelPackage/lib-lz4 SUBMENU:=$(LIB_MENU) TITLE:=LZ4 support DEPENDS:=+kmod-crypto-acompress KCONFIG:= \ CONFIG_CRYPTO_LZ4 \ CONFIG_CRYPTO_LZ4HC \ CONFIG_LZ4_COMPRESS \ CONFIG_LZ4_DECOMPRESS FILES:= \ $(LINUX_DIR)/crypto/lz4.ko \ $(LINUX_DIR)/lib/lz4/lz4_compress.ko \ $(LINUX_DIR)/lib/lz4/lz4hc_compress.ko \ $(LINUX_DIR)/lib/lz4/lz4_decompress.ko AUTOLOAD:=$(call AutoProbe,lz4 lz4_compress lz4hc_compress lz4_decompress) endef define KernelPackage/lib-lz4/description Kernel module for LZ4 compression/decompression support endef $(eval $(call KernelPackage,lib-lz4)) define KernelPackage/lib-raid6 SUBMENU:=$(LIB_MENU) TITLE:=RAID6 algorithm support HIDDEN:=1 KCONFIG:=CONFIG_RAID6_PQ FILES:=$(LINUX_DIR)/lib/raid6/raid6_pq.ko AUTOLOAD:=$(call AutoProbe,raid6_pq) endef define KernelPackage/lib-raid6/description Kernel module for RAID6 algorithms endef $(eval $(call KernelPackage,lib-raid6)) define KernelPackage/lib-xor SUBMENU:=$(LIB_MENU) TITLE:=XOR blocks algorithm support HIDDEN:=1 KCONFIG:=CONFIG_XOR_BLOCKS ifneq ($(wildcard $(LINUX_DIR)/arch/$(LINUX_KARCH)/lib/xor-neon.ko),) FILES:= \ $(LINUX_DIR)/crypto/xor.ko \ $(LINUX_DIR)/arch/$(LINUX_KARCH)/lib/xor-neon.ko AUTOLOAD:=$(call AutoProbe,xor-neon xor) else FILES:=$(LINUX_DIR)/crypto/xor.ko AUTOLOAD:=$(call AutoProbe,xor) endif endef define KernelPackage/lib-xor/description Kernel module for XOR blocks algorithms endef $(eval $(call KernelPackage,lib-xor)) define KernelPackage/lib-textsearch SUBMENU:=$(LIB_MENU) TITLE:=Textsearch support KCONFIG:= \ CONFIG_TEXTSEARCH=y \ CONFIG_TEXTSEARCH_KMP \ CONFIG_TEXTSEARCH_BM \ CONFIG_TEXTSEARCH_FSM FILES:= \ $(LINUX_DIR)/lib/ts_kmp.ko \ $(LINUX_DIR)/lib/ts_bm.ko \ $(LINUX_DIR)/lib/ts_fsm.ko AUTOLOAD:=$(call AutoProbe,ts_kmp ts_bm ts_fsm) endef $(eval $(call KernelPackage,lib-textsearch)) define KernelPackage/lib-zlib-inflate SUBMENU:=$(LIB_MENU) TITLE:=Zlib support HIDDEN:=1 KCONFIG:=CONFIG_ZLIB_INFLATE FILES:=$(LINUX_DIR)/lib/zlib_inflate/zlib_inflate.ko AUTOLOAD:=$(call AutoProbe,zlib_inflate) endef $(eval $(call KernelPackage,lib-zlib-inflate)) define KernelPackage/lib-zlib-deflate SUBMENU:=$(LIB_MENU) TITLE:=Zlib support HIDDEN:=1 KCONFIG:=CONFIG_ZLIB_DEFLATE FILES:=$(LINUX_DIR)/lib/zlib_deflate/zlib_deflate.ko AUTOLOAD:=$(call AutoProbe,zlib_deflate) endef $(eval $(call KernelPackage,lib-zlib-deflate)) define KernelPackage/lib-cordic SUBMENU:=$(LIB_MENU) TITLE:=Cordic function support KCONFIG:=CONFIG_CORDIC FILES:=$(LINUX_DIR)/lib/math/cordic.ko AUTOLOAD:=$(call AutoProbe,cordic) endef define KernelPackage/lib-cordic/description Kernel module for Cordic function support endef $(eval $(call KernelPackage,lib-cordic)) define KernelPackage/asn1-decoder SUBMENU:=$(LIB_MENU) TITLE:=Simple ASN1 decoder KCONFIG:= CONFIG_ASN1 HIDDEN:=1 FILES:=$(LINUX_DIR)/lib/asn1_decoder.ko endef $(eval $(call KernelPackage,asn1-decoder)) define KernelPackage/asn1-encoder SUBMENU:=$(LIB_MENU) TITLE:=Simple ASN1 encoder KCONFIG:= CONFIG_ASN1_ENCODER HIDDEN:=1 FILES:=$(LINUX_DIR)/lib/asn1_encoder.ko endef $(eval $(call KernelPackage,asn1-encoder)) define KernelPackage/oid-registry SUBMENU:=$(LIB_MENU) TITLE:=Object identifier registry KCONFIG:= CONFIG_OID_REGISTRY FILES:=$(LINUX_DIR)/lib/oid_registry.ko AUTOLOAD:=$(call AutoLoad,31,oid_registry) endef $(eval $(call KernelPackage,oid-registry))