/****************************************************************************** * tmem.c * * Transcendent memory * * Copyright (c) 2009, Dan Magenheimer, Oracle Corp. */ /* TODO list: 090129 (updated 100318) - any better reclamation policy? - use different tlsf pools for each client (maybe each pool) - test shared access more completely (ocfs2) - add feedback-driven compression (not for persistent pools though!) - add data-structure total bytes overhead stats */ #ifdef __XEN__ #include /* host-specific (eg Xen) code goes here */ #endif #include #include #include #include #include #define EXPORT /* indicates code other modules are dependent upon */ #define FORWARD #define TMEM_SPEC_VERSION 1 /************ INTERFACE TO TMEM HOST-DEPENDENT (tmh) CODE ************/ #define CLI_ID_NULL TMH_CLI_ID_NULL #define cli_id_str tmh_cli_id_str #define client_str tmh_client_str /************ DEBUG and STATISTICS (+ some compression testing) *******/ #ifndef NDEBUG #define SENTINELS #define NOINLINE noinline #else #define NOINLINE #endif #ifdef SENTINELS #define DECL_SENTINEL unsigned long sentinel; #define SET_SENTINEL(_x,_y) _x->sentinel = _y##_SENTINEL #define INVERT_SENTINEL(_x,_y) _x->sentinel = ~_y##_SENTINEL #define ASSERT_SENTINEL(_x,_y) \ ASSERT(_x->sentinel != ~_y##_SENTINEL);ASSERT(_x->sentinel == _y##_SENTINEL) #if defined(__i386__) || defined(CONFIG_ARM) #define POOL_SENTINEL 0x87658765 #define OBJ_SENTINEL 0x12345678 #define OBJNODE_SENTINEL 0xfedcba09 #define PGD_SENTINEL 0x43214321 #else #define POOL_SENTINEL 0x8765876587658765 #define OBJ_SENTINEL 0x1234567812345678 #define OBJNODE_SENTINEL 0xfedcba0987654321 #define PGD_SENTINEL 0x4321432143214321 #endif #else #define DECL_SENTINEL #define SET_SENTINEL(_x,_y) do { } while (0) #define ASSERT_SENTINEL(_x,_y) do { } while (0) #define INVERT_SENTINEL(_x,_y) do { } while (0) #endif /* global statistics (none need to be locked) */ static unsigned long total_tmem_ops = 0; static unsigned long errored_tmem_ops = 0; static unsigned long total_flush_pool = 0; static unsigned long alloc_failed = 0, alloc_page_failed = 0; static unsigned long evicted_pgs = 0, evict_attempts = 0; static unsigned long relinq_pgs = 0, relinq_attempts = 0; static unsigned long max_evicts_per_relinq = 0; static unsigned long low_on_memory = 0; static unsigned long deduped_puts = 0; static unsigned long tot_good_eph_puts = 0; static int global_obj_count_max = 0; static int global_pgp_count_max = 0; static int global_pcd_count_max = 0; static int global_page_count_max = 0; static int global_rtree_node_count_max = 0; static long global_eph_count_max = 0; static unsigned long failed_copies; static unsigned long pcd_tot_tze_size = 0; static unsigned long pcd_tot_csize = 0; DECL_CYC_COUNTER(succ_get); DECL_CYC_COUNTER(succ_put); DECL_CYC_COUNTER(non_succ_get); DECL_CYC_COUNTER(non_succ_put); DECL_CYC_COUNTER(flush); DECL_CYC_COUNTER(flush_obj); #ifdef COMPARE_COPY_PAGE_SSE2 EXTERN_CYC_COUNTER(pg_copy1); EXTERN_CYC_COUNTER(pg_copy2); EXTERN_CYC_COUNTER(pg_copy3); EXTERN_CYC_COUNTER(pg_copy4); #else EXTERN_CYC_COUNTER(pg_copy); #endif DECL_CYC_COUNTER(compress); DECL_CYC_COUNTER(decompress); /************ CORE DATA STRUCTURES ************************************/ #define MAX_POOLS_PER_DOMAIN 16 #define MAX_GLOBAL_SHARED_POOLS 16 struct tm_pool; struct tmem_page_descriptor; struct tmem_page_content_descriptor; struct client { struct list_head client_list; struct tm_pool *pools[MAX_POOLS_PER_DOMAIN]; tmh_client_t *tmh; struct list_head ephemeral_page_list; long eph_count, eph_count_max; cli_id_t cli_id; uint32_t weight; uint32_t cap; bool_t compress; bool_t frozen; bool_t shared_auth_required; /* for save/restore/migration */ bool_t live_migrating; bool_t was_frozen; struct list_head persistent_invalidated_list; struct tmem_page_descriptor *cur_pgp; /* statistics collection */ unsigned long compress_poor, compress_nomem; unsigned long compressed_pages; uint64_t compressed_sum_size; uint64_t total_cycles; unsigned long succ_pers_puts, succ_eph_gets, succ_pers_gets; /* shared pool authentication */ uint64_t shared_auth_uuid[MAX_GLOBAL_SHARED_POOLS][2]; }; typedef struct client client_t; struct share_list { struct list_head share_list; client_t *client; }; typedef struct share_list sharelist_t; #define OBJ_HASH_BUCKETS 256 /* must be power of two */ #define OBJ_HASH_BUCKETS_MASK (OBJ_HASH_BUCKETS-1) struct tm_pool { bool_t shared; bool_t persistent; bool_t is_dying; int pageshift; /* 0 == 2**12 */ struct list_head pool_list; client_t *client; uint64_t uuid[2]; /* 0 for private, non-zero for shared */ uint32_t pool_id; rwlock_t pool_rwlock; struct rb_root obj_rb_root[OBJ_HASH_BUCKETS]; /* protected by pool_rwlock */ struct list_head share_list; /* valid if shared */ int shared_count; /* valid if shared */ /* for save/restore/migration */ struct list_head persistent_page_list; struct tmem_page_descriptor *cur_pgp; /* statistics collection */ atomic_t pgp_count; int pgp_count_max; long obj_count; /* atomicity depends on pool_rwlock held for write */ long obj_count_max; unsigned long objnode_count, objnode_count_max; uint64_t sum_life_cycles; uint64_t sum_evicted_cycles; unsigned long puts, good_puts, no_mem_puts; unsigned long dup_puts_flushed, dup_puts_replaced; unsigned long gets, found_gets; unsigned long flushs, flushs_found; unsigned long flush_objs, flush_objs_found; DECL_SENTINEL }; typedef struct tm_pool pool_t; #define is_persistent(_p) (_p->persistent) #define is_ephemeral(_p) (!(_p->persistent)) #define is_shared(_p) (_p->shared) #define is_private(_p) (!(_p->shared)) struct oid { uint64_t oid[3]; }; typedef struct oid OID; struct tmem_object_root { DECL_SENTINEL OID oid; struct rb_node rb_tree_node; /* protected by pool->pool_rwlock */ unsigned long objnode_count; /* atomicity depends on obj_spinlock */ long pgp_count; /* atomicity depends on obj_spinlock */ struct radix_tree_root tree_root; /* tree of pages within object */ pool_t *pool; cli_id_t last_client; spinlock_t obj_spinlock; bool_t no_evict; /* if globally locked, pseudo-locks against eviction */ }; typedef struct tmem_object_root obj_t; typedef struct radix_tree_node rtn_t; struct tmem_object_node { obj_t *obj; DECL_SENTINEL rtn_t rtn; }; typedef struct tmem_object_node objnode_t; struct tmem_page_descriptor { union { struct list_head global_eph_pages; struct list_head client_inv_pages; }; union { struct { union { struct list_head client_eph_pages; struct list_head pool_pers_pages; }; obj_t *obj; } us; OID inv_oid; /* used for invalid list only */ }; pagesize_t size; /* 0 == PAGE_SIZE (pfp), -1 == data invalid, else compressed data (cdata) */ uint32_t index; /* must hold pcd_tree_rwlocks[firstbyte] to use pcd pointer/siblings */ uint16_t firstbyte; /* NON_SHAREABLE->pfp otherwise->pcd */ bool_t eviction_attempted; /* CHANGE TO lifetimes? (settable) */ struct list_head pcd_siblings; union { pfp_t *pfp; /* page frame pointer */ char *cdata; /* compressed data */ struct tmem_page_content_descriptor *pcd; /* page dedup */ }; union { uint64_t timestamp; uint32_t pool_id; /* used for invalid list only */ }; DECL_SENTINEL }; typedef struct tmem_page_descriptor pgp_t; #define PCD_TZE_MAX_SIZE (PAGE_SIZE - (PAGE_SIZE/64)) struct tmem_page_content_descriptor { union { pfp_t *pfp; /* page frame pointer */ char *cdata; /* if compression_enabled */ char *tze; /* if !compression_enabled, trailing zeroes eliminated */ }; struct list_head pgp_list; struct rb_node pcd_rb_tree_node; uint32_t pgp_ref_count; pagesize_t size; /* if compression_enabled -> 0 *pfp */ }; typedef struct tmem_page_content_descriptor pcd_t; struct rb_root pcd_tree_roots[256]; /* choose based on first byte of page */ rwlock_t pcd_tree_rwlocks[256]; /* poor man's concurrency for now */ static LIST_HEAD(global_ephemeral_page_list); /* all pages in ephemeral pools */ static LIST_HEAD(global_client_list); static LIST_HEAD(global_pool_list); static pool_t *global_shared_pools[MAX_GLOBAL_SHARED_POOLS] = { 0 }; static bool_t global_shared_auth = 0; static atomic_t client_weight_total = ATOMIC_INIT(0); static int tmem_initialized = 0; /************ CONCURRENCY ***********************************************/ EXPORT DEFINE_SPINLOCK(tmem_spinlock); /* used iff tmh_lock_all */ EXPORT DEFINE_RWLOCK(tmem_rwlock); /* used iff !tmh_lock_all */ static DEFINE_SPINLOCK(eph_lists_spinlock); /* protects global AND clients */ static DEFINE_SPINLOCK(pers_lists_spinlock); #define tmem_spin_lock(_l) do {if (!tmh_lock_all) spin_lock(_l);}while(0) #define tmem_spin_unlock(_l) do {if (!tmh_lock_all) spin_unlock(_l);}while(0) #define tmem_read_lock(_l) do {if (!tmh_lock_all) read_lock(_l);}while(0) #define tmem_read_unlock(_l) do {if (!tmh_lock_all) read_unlock(_l);}while(0) #define tmem_write_lock(_l) do {if (!tmh_lock_all) write_lock(_l);}while(0) #define tmem_write_unlock(_l) do {if (!tmh_lock_all) write_unlock(_l);}while(0) #define tmem_write_trylock(_l) ((tmh_lock_all)?1:write_trylock(_l)) #define tmem_spin_trylock(_l) (tmh_lock_all?1:spin_trylock(_l)) #define ASSERT_SPINLOCK(_l) ASSERT(tmh_lock_all || spin_is_locked(_l)) #define ASSERT_WRITELOCK(_l) ASSERT(tmh_lock_all || rw_is_write_locked(_l)) /* global counters (should use long_atomic_t access) */ static long global_eph_count = 0; /* atomicity depends on eph_lists_spinlock */ static atomic_t global_obj_count = ATOMIC_INIT(0); static atomic_t global_pgp_count = ATOMIC_INIT(0); static atomic_t global_pcd_count = ATOMIC_INIT(0); static atomic_t global_page_count = ATOMIC_INIT(0); static atomic_t global_rtree_node_count = ATOMIC_INIT(0); #define atomic_inc_and_max(_c) do { \ atomic_inc(&_c); \ if ( _atomic_read(_c) > _c##_max ) \ _c##_max = _atomic_read(_c); \ } while (0) #define atomic_dec_and_assert(_c) do { \ atomic_dec(&_c); \ ASSERT(_atomic_read(_c) >= 0); \ } while (0) /************ MEMORY ALLOCATION INTERFACE *****************************/ #define tmem_malloc(_type,_pool) \ _tmem_malloc(sizeof(_type), __alignof__(_type), _pool) #define tmem_malloc_bytes(_size,_pool) \ _tmem_malloc(_size, 1, _pool) static NOINLINE void *_tmem_malloc(size_t size, size_t align, pool_t *pool) { void *v; if ( (pool != NULL) && is_persistent(pool) ) v = tmh_alloc_subpage_thispool(pool,size,align); else v = tmh_alloc_subpage(pool, size, align); if ( v == NULL ) alloc_failed++; return v; } static NOINLINE void tmem_free(void *p, size_t size, pool_t *pool) { if ( pool == NULL || !is_persistent(pool) ) tmh_free_subpage(p,size); else tmh_free_subpage_thispool(pool,p,size); } static NOINLINE pfp_t *tmem_page_alloc(pool_t *pool) { pfp_t *pfp = NULL; if ( pool != NULL && is_persistent(pool) ) pfp = tmh_alloc_page_thispool(pool); else pfp = tmh_alloc_page(pool,0); if ( pfp == NULL ) alloc_page_failed++; else atomic_inc_and_max(global_page_count); return pfp; } static NOINLINE void tmem_page_free(pool_t *pool, pfp_t *pfp) { ASSERT(pfp); if ( pool == NULL || !is_persistent(pool) ) tmh_free_page(pfp); else tmh_free_page_thispool(pool,pfp); atomic_dec_and_assert(global_page_count); } /************ PAGE CONTENT DESCRIPTOR MANIPULATION ROUTINES ***********/ #define NOT_SHAREABLE ((uint16_t)-1UL) static NOINLINE int pcd_copy_to_client(tmem_cli_mfn_t cmfn, pgp_t *pgp) { uint8_t firstbyte = pgp->firstbyte; pcd_t *pcd; int ret; ASSERT(tmh_dedup_enabled()); tmem_read_lock(&pcd_tree_rwlocks[firstbyte]); pcd = pgp->pcd; if ( pgp->size < PAGE_SIZE && pgp->size != 0 && pcd->size < PAGE_SIZE && pcd->size != 0 ) ret = tmh_decompress_to_client(cmfn, pcd->cdata, pcd->size, NULL); else if ( tmh_tze_enabled() && pcd->size < PAGE_SIZE ) ret = tmh_copy_tze_to_client(cmfn, pcd->tze, pcd->size); else ret = tmh_copy_to_client(cmfn, pcd->pfp, 0, 0, PAGE_SIZE, NULL); tmem_read_unlock(&pcd_tree_rwlocks[firstbyte]); return ret; } /* ensure pgp no longer points to pcd, nor vice-versa */ /* take pcd rwlock unless have_pcd_rwlock is set, always unlock when done */ static NOINLINE void pcd_disassociate(pgp_t *pgp, pool_t *pool, bool_t have_pcd_rwlock) { pcd_t *pcd = pgp->pcd; pfp_t *pfp = pgp->pcd->pfp; uint16_t firstbyte = pgp->firstbyte; char *pcd_tze = pgp->pcd->tze; pagesize_t pcd_size = pcd->size; pagesize_t pgp_size = pgp->size; char *pcd_cdata = pgp->pcd->cdata; pagesize_t pcd_csize = pgp->pcd->size; ASSERT(tmh_dedup_enabled()); ASSERT(firstbyte != NOT_SHAREABLE); ASSERT(firstbyte < 256); if ( have_pcd_rwlock ) ASSERT_WRITELOCK(&pcd_tree_rwlocks[firstbyte]); else tmem_write_lock(&pcd_tree_rwlocks[firstbyte]); list_del_init(&pgp->pcd_siblings); pgp->pcd = NULL; pgp->firstbyte = NOT_SHAREABLE; pgp->size = -1; if ( --pcd->pgp_ref_count ) { tmem_write_unlock(&pcd_tree_rwlocks[firstbyte]); return; } /* no more references to this pcd, recycle it and the physical page */ ASSERT(list_empty(&pcd->pgp_list)); pcd->pfp = NULL; /* remove pcd from rbtree */ rb_erase(&pcd->pcd_rb_tree_node,&pcd_tree_roots[firstbyte]); /* reinit the struct for safety for now */ RB_CLEAR_NODE(&pcd->pcd_rb_tree_node); /* now free up the pcd memory */ tmem_free(pcd,sizeof(pcd_t),NULL); atomic_dec_and_assert(global_pcd_count); if ( pgp_size != 0 && pcd_size < PAGE_SIZE ) { /* compressed data */ tmem_free(pcd_cdata,pcd_csize,pool); pcd_tot_csize -= pcd_csize; } else if ( pcd_size != PAGE_SIZE ) { /* trailing zero data */ pcd_tot_tze_size -= pcd_size; if ( pcd_size ) tmem_free(pcd_tze,pcd_size,pool); } else { /* real physical page */ if ( tmh_tze_enabled() ) pcd_tot_tze_size -= PAGE_SIZE; if ( tmh_compression_enabled() ) pcd_tot_csize -= PAGE_SIZE; tmem_page_free(pool,pfp); } tmem_write_unlock(&pcd_tree_rwlocks[firstbyte]); } static NOINLINE int pcd_associate(pgp_t *pgp, char *cdata, pagesize_t csize) { struct rb_node **new, *parent = NULL; struct rb_root *root; pcd_t *pcd; int cmp; pagesize_t pfp_size = 0; uint8_t firstbyte = (cdata == NULL) ? tmh_get_first_byte(pgp->pfp) : *cdata; int ret = 0; if ( !tmh_dedup_enabled() ) return 0; ASSERT(pgp->us.obj != NULL); ASSERT(pgp->us.obj->pool != NULL); ASSERT(!pgp->us.obj->pool->persistent); if ( cdata == NULL ) { ASSERT(pgp->pfp != NULL); pfp_size = PAGE_SIZE; if ( tm
/*
Copyright 2011 Jun Wako <wakojun@gmail.com>

This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.

This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
GNU General Public License for more details.

You should have received a copy of the GNU General Public License
along with this program.  If not, see <http://www.gnu.org/licenses/>.
*/

#include <stdint.h>
#include "keycode.h"
#include "host.h"
#include "timer.h"
#include "print.h"
#include "debug.h"
#include "mousekey.h"

inline int8_t times_inv_sqrt2(int8_t x) {
    // 181/256 is pretty close to 1/sqrt(2)
    // 0.70703125                 0.707106781
    // 1 too small for x=99 and x=198
    // This ends up being a mult and discard lower 8 bits
    return (x * 181) >> 8;
}

static report_mouse_t mouse_report = {0};
static void           mousekey_debug(void);
static uint8_t        mousekey_accel  = 0;
static uint8_t        mousekey_repeat = 0;
static uint16_t       last_timer      = 0;

#ifndef MK_3_SPEED

/*
 * Mouse keys  acceleration algorithm
 *  http://en.wikipedia.org/wiki/Mouse_keys
 *
 *  speed = delta * max_speed * (repeat / time_to_max)**((1000+curve)/1000)
 */
/* milliseconds between the initial key press and first repeated motion event (0-2550) */
uint8_t mk_delay = MOUSEKEY_DELAY / 10;
/* milliseconds between repeated motion events (0-255) */
uint8_t mk_interval = MOUSEKEY_INTERVAL;
/* steady speed (in action_delta units) applied each event (0-255) */
uint8_t mk_max_speed = MOUSEKEY_MAX_SPEED;
/* number of events (count) accelerating to steady speed (0-255) */
uint8_t mk_time_to_max = MOUSEKEY_TIME_TO_MAX;
/* ramp used to reach maximum pointer speed (NOT SUPPORTED) */
// int8_t mk_curve = 0;
/* wheel params */
uint8_t mk_wheel_max_speed   = MOUSEKEY_WHEEL_MAX_SPEED;
uint8_t mk_wheel_time_to_max = MOUSEKEY_WHEEL_TIME_TO_MAX;

static uint8_t move_unit(void) {
    uint16_t unit;
    if (mousekey_accel & (1 << 0)) {
        unit = (MOUSEKEY_MOVE_DELTA * mk_max_speed) / 4;
    } else if (mousekey_accel & (1 << 1)) {
        unit = (MOUSEKEY_MOVE_DELTA * mk_max_speed) / 2;
    } else if (mousekey_accel & (1 << 2)) {
        unit = (MOUSEKEY_MOVE_DELTA * mk_max_speed);
    } else if (mousekey_repeat == 0) {
        unit = MOUSEKEY_MOVE_DELTA;
    } else if (mousekey_repeat >= mk_time_to_max) {
        unit = MOUSEKEY_MOVE_DELTA * mk_max_speed;
    } else {
        unit = (MOUSEKEY_MOVE_DELTA * mk_max_speed * mousekey_repeat) / mk_time_to_max;
    }
    return (unit > MOUSEKEY_MOVE_MAX ? MOUSEKEY_MOVE_MAX : (unit == 0 ? 1 : unit));
}

static uint8_t wheel_unit(void) {
    uint16_t unit;
    if (mousekey_accel & (1 << 0)) {
        unit = (MOUSEKEY_WHEEL_DELTA * mk_wheel_max_speed) / 4;
    } else if (mousekey_accel & (1 << 1)) {
        unit = (MOUSEKEY_WHEEL_DELTA * mk_wheel_max_speed) / 2;
    } else if (mousekey_accel & (1 << 2)) {
        unit = (MOUSEKEY_WHEEL_DELTA * mk_wheel_max_speed);
    } else if (mousekey_repeat == 0) {
        unit = MOUSEKEY_WHEEL_DELTA;
    } else if (mousekey_repeat >= mk_wheel_time_to_max) {
        unit = MOUSEKEY_WHEEL_DELTA * mk_wheel_max_speed;
    } else {
        unit = (MOUSEKEY_WHEEL_DELTA * mk_wheel_max_speed * mousekey_repeat) / mk_wheel_time_to_max;
    }
    return (unit > MOUSEKEY_WHEEL_MAX ? MOUSEKEY_WHEEL_MAX : (unit == 0 ? 1 : unit));
}

void mousekey_task(void) {
    if (timer_elapsed(last_timer) < (mousekey_repeat ? mk_interval : mk_delay * 10)) {
        return;
    }
    if (mouse_report.x == 0 && mouse_report.y == 0 && mouse_report.v == 0 && mouse_report.h == 0) {
        return;
    }
    if (mousekey_repeat != UINT8_MAX) mousekey_repeat++;
    if (mouse_report.x > 0) mouse_report.x = move_unit();
    if (mouse_report.x < 0) mouse_report.x = move_unit() * -1;
    if (mouse_report.y > 0) mouse_report.y = move_unit();
    if (mouse_report.y < 0) mouse_report.y = move_unit() * -1;
    /* diagonal move [1/sqrt(2)] */
    if (mouse_report.x && mouse_report.y) {
        mouse_report.x = times_inv_sqrt2(mouse_report.x);
        if (mouse_report.x == 0) {
            mouse_report.x = 1;
        }
        mouse_report.y = times_inv_sqrt2(mouse_report.y);
        if (mouse_report.y == 0) {
            mouse_report.y = 1;
        }
    }
    if (mouse_report.v > 0) mouse_report.v = wheel_unit();
    if (mouse_report.v < 0) mouse_report.v = wheel_unit() * -1;
    if (mouse_report.h > 0) mouse_report.h = wheel_unit();
    if (mouse_report.h < 0) mouse_report.h = wheel_unit() * -1;
    mousekey_send();
}

void mousekey_on(uint8_t code) {
    if (code == KC_MS_UP)
        mouse_report.y = move_unit() * -1;
    else if (code == KC_MS_DOWN)
        mouse_report.y = move_unit();
    else if (code == KC_MS_LEFT)
        mouse_report.x = move_unit() * -1;
    else if (code == KC_MS_RIGHT)
        mouse_report.x = move_unit();
    else if (code == KC_MS_WH_UP)
        mouse_report.v = wheel_unit();
    else if (code == KC_MS_WH_DOWN)
        mouse_report.v = wheel_unit() * -1;
    else if (code == KC_MS_WH_LEFT)
        mouse_report.h = wheel_unit() * -1;
    else if (code == KC_MS_WH_RIGHT)
        mouse_report.h = wheel_unit();
    else if (code == KC_MS_BTN1)
        mouse_report.buttons |= MOUSE_BTN1;
    else if (code == KC_MS_BTN2)
        mouse_report.buttons |= MOUSE_BTN2;
    else if (code == KC_MS_BTN3)
        mouse_report.buttons |= MOUSE_BTN3;
    else if (code == KC_MS_BTN4)
        mouse_report.buttons |= MOUSE_BTN4;
    else if (code == KC_MS_BTN5)
        mouse_report.buttons |= MOUSE_BTN5;
    else if (code == KC_MS_ACCEL0)
        mousekey_accel |= (1 << 0);
    else if (code == KC_MS_ACCEL1)
        mousekey_accel |= (1 << 1);
    else if (code == KC_MS_ACCEL2)
        mousekey_accel |= (1 << 2);
}

void mousekey_off(uint8_t code) {
    if (code == KC_MS_UP && mouse_report.y < 0)
        mouse_report.y = 0;
    else if (code == KC_MS_DOWN && mouse_report.y > 0)
        mouse_report.y = 0;
    else if (code == KC_MS_LEFT && mouse_report.x < 0)
        mouse_report.x = 0;
    else if (code == KC_MS_RIGHT && mouse_report.x > 0)
        mouse_report.x = 0;
    else if (code == KC_MS_WH_UP && mouse_report.v > 0)
        mouse_report.v = 0;
    else if (code == KC_MS_WH_DOWN && mouse_report.v < 0)
        mouse_report.v = 0;
    else if (code == KC_MS_WH_LEFT && mouse_report.h < 0)
        mouse_report.h = 0;
    else if (code == KC_MS_WH_RIGHT && mouse_report.h > 0)
        mouse_report.h = 0;
    else if (code == KC_MS_BTN1)
        mouse_report.buttons &= ~MOUSE_BTN1;
    else if (code == KC_MS_BTN2)
        mouse_report.buttons &= ~MOUSE_BTN2;
    else if (code == KC_MS_BTN3)
        mouse_report.buttons &= ~MOUSE_BTN3;
    else if (code == KC_MS_BTN4)
        mouse_report.buttons &= ~MOUSE_BTN4;
    else if (code == KC_MS_BTN5)
        mouse_report.buttons &= ~MOUSE_BTN5;
    else if (code == KC_MS_ACCEL0)
        mousekey_accel &= ~(1 << 0);
    else if (code == KC_MS_ACCEL1)
        mousekey_accel &= ~(1 << 1);
    else if (code == KC_MS_ACCEL2)
        mousekey_accel &= ~(1 << 2);
    if (mouse_report.x == 0 && mouse_report.y == 0 && mouse_report.v == 0 && mouse_report.h == 0) mousekey_repeat = 0;
}

#else /* #ifndef MK_3_SPEED */

enum { mkspd_unmod, mkspd_0, mkspd_1, mkspd_2, mkspd_COUNT };
#    ifndef MK_MOMENTARY_ACCEL
static uint8_t  mk_speed                 = mkspd_1;
#    else
static uint8_t mk_speed      = mkspd_unmod;
static uint8_t mkspd_DEFAULT = mkspd_unmod;
#    endif
static uint16_t last_timer_c             = 0;
static uint16_t last_timer_w             = 0;
uint16_t        c_offsets[mkspd_COUNT]   = {MK_C_OFFSET_UNMOD, MK_C_OFFSET_0, MK_C_OFFSET_1, MK_C_OFFSET_2};
uint16_t        c_intervals[mkspd_COUNT] = {MK_C_INTERVAL_UNMOD, MK_C_INTERVAL_0, MK_C_INTERVAL_1, MK_C_INTERVAL_2};
uint16_t        w_offsets[mkspd_COUNT]   = {MK_W_OFFSET_UNMOD, MK_W_OFFSET_0, MK_W_OFFSET_1, MK_W_OFFSET_2};
uint16_t        w_intervals[mkspd_COUNT] = {MK_W_INTERVAL_UNMOD, MK_W_INTERVAL_0, MK_W_INTERVAL_1, MK_W_INTERVAL_2};

void mousekey_task(void) {
    // report cursor and scroll movement independently
    report_mouse_t const tmpmr = mouse_report;
    if ((mouse_report.x || mouse_report.y) && timer_elapsed(last_timer_c) > c_intervals[mk_speed]) {
        mouse_report.h = 0;
        mouse_report.v = 0;
        mousekey_send();
        last_timer_c = last_timer;
        mouse_report = tmpmr;
    }
    if ((mouse_report.h || mouse_report.v) && timer_elapsed(last_timer_w) > w_intervals[mk_speed]) {
        mouse_report.x = 0;
        mouse_report.y = 0;
        mousekey_send();
        last_timer_w = last_timer;
        mouse_report = tmpmr;
    }
}

void adjust_speed(void) {
    uint16_t const c_offset = c_offsets[mk_speed];
    uint16_t const w_offset = w_offsets[mk_speed];
    if (mouse_report.x > 0) mouse_report.x = c_offset;
    if (mouse_report.x < 0) mouse_report.x = c_offset * -1;
    if (mouse_report.y > 0) mouse_report.y = c_offset;
    if (mouse_report.y < 0) mouse_report.y = c_offset * -1;
    if (mouse_report.h > 0) mouse_report.h = w_offset;
    if (mouse_report.h < 0) mouse_report.h = w_offset * -1;
    if (mouse_report.v > 0) mouse_report.v = w_offset;
    if (mouse_report.v < 0) mouse_report.v = w_offset * -1;
    // adjust for diagonals
    if (mouse_report.x && mouse_report.y) {
        mouse_report.x = times_inv_sqrt2(mouse_report.x);
        if (mouse_report.x == 0) {
            mouse_report.x = 1;
        }
        mouse_report.y = times_inv_sqrt2(mouse_report.y);
        if (mouse_report.y == 0) {
            mouse_report.y = 1;
        }
    }
    if (mouse_report.h && mouse_report.v) {
        mouse_report.h = times_inv_sqrt2(mouse_report.h);
        mouse_report.v = times_inv_sqrt2(mouse_report.v);
    }
}

void mousekey_on(uint8_t code) {
    uint16_t const c_offset  = c_offsets[mk_speed];
    uint16_t const w_offset  = w_offsets[mk_speed];
    uint8_t const  old_speed = mk_speed;
    if (code == KC_MS_UP)
        mouse_report.y = c_offset * -1;
    else if (code == KC_MS_DOWN)
        mouse_report.y = c_offset;
    else if (code == KC_MS_LEFT)
        mouse_report.x = c_offset * -1;
    else if (code == KC_MS_RIGHT)
        mouse_report.x = c_offset;
    else if (code == KC_MS_WH_UP)
        mouse_report.v = w_offset;
    else if (code == KC_MS_WH_DOWN)
        mouse_report.v = w_offset * -1;
    else if (code == KC_MS_WH_LEFT)
        mouse_report.h = w_offset * -1;
    else if (code == KC_MS_WH_RIGHT)
        mouse_report.h = w_offset;
    else if (code == KC_MS_BTN1)
        mouse_report.buttons |= MOUSE_BTN1;
    else if (code == KC_MS_BTN2)
        mouse_report.buttons |= MOUSE_BTN2;
    else if (code == KC_MS_BTN3)
        mouse_report.buttons |= MOUSE_BTN3;
    else if (code == KC_MS_BTN4)
        mouse_report.buttons |= MOUSE_BTN4;
    else if (code == KC_MS_BTN5)
        mouse_report.buttons |= MOUSE_BTN5;
    else if (code == KC_MS_ACCEL0)
        mk_speed = mkspd_0;
    else if (code == KC_MS_ACCEL1)
        mk_speed = mkspd_1;
    else if (code == KC_MS_ACCEL2)
        mk_speed = mkspd_2;
    if (mk_speed != old_speed) adjust_speed();
}

void mousekey_off(uint8_t code) {
#    ifdef MK_MOMENTARY_ACCEL
    uint8_t const old_speed = mk_speed;
#    endif
    if (code == KC_MS_UP && mouse_report.y < 0)
        mouse_report.y = 0;
    else if (code == KC_MS_DOWN && mouse_report.y > 0)
        mouse_report.y = 0;
    else if (code == KC_MS_LEFT && mouse_report.x < 0)
        mouse_report.x = 0;
    else if (code == KC_MS_RIGHT && mouse_report.x > 0)
        mouse_report.x = 0;
    else if (code == KC_MS_WH_UP && mouse_report.v > 0)
        mouse_report.v = 0;
    else if (code == KC_MS_WH_DOWN && mouse_report.v < 0)
        mouse_report.v = 0;
    else if (code == KC_MS_WH_LEFT && mouse_report.h < 0)
        mouse_report.h = 0;
    else if (code == KC_MS_WH_RIGHT && mouse_report.h > 0)
        mouse_report.h = 0;
    else if (code == KC_MS_BTN1)
        mouse_report.buttons &= ~MOUSE_BTN1;
    else if (code == KC_MS_BTN2)
        mouse_report.buttons &= ~MOUSE_BTN2;
    else if (code == KC_MS_BTN3)
        mouse_report.buttons &= ~MOUSE_BTN3;
    else if (code == KC_MS_BTN4)
        mouse_report.buttons &= ~MOUSE_BTN4;
    else if (code == KC_MS_BTN5)
        mouse_report.buttons &= ~MOUSE_BTN5;
#    ifdef MK_MOMENTARY_ACCEL
    else if (code == KC_MS_ACCEL0)
        mk_speed = mkspd_DEFAULT;
    else if (code == KC_MS_ACCEL1)
        mk_speed = mkspd_DEFAULT;
    else if (code == KC_MS_ACCEL2)
        mk_speed = mkspd_DEFAULT;
    if (mk_speed != old_speed) adjust_speed();
#    endif
}

#endif /* #ifndef MK_3_SPEED */

void mousekey_send(void) {
    mousekey_debug();
    host_mouse_send(&mouse_report);
    last_timer = timer_read();
}

void mousekey_clear(void) {
    mouse_report    = (report_mouse_t){};
    mousekey_repeat = 0;
    mousekey_accel  = 0;
}

static void mousekey_debug(void) {
    if (!debug_mouse) return;
    print("mousekey [btn|x y v h](rep/acl): [");
    phex(mouse_report.buttons);
    print("|");
    print_decs(mouse_report.x);
    print(" ");
    print_decs(mouse_report.y);
    print(" ");
    print_decs(mouse_report.v);
    print(" ");
    print_decs(mouse_report.h);
    print("](");
    print_dec(mousekey_repeat);
    print("/");
    print_dec(mousekey_accel);
    print(")\n");
}
ient_t *client = (client_t *)v; if ( client == NULL ) return; if ( !tmh_client_is_dying(client) ) { printk("tmem: tmem_destroy can only destroy dying client\n"); return; } if ( tmh_lock_all ) spin_lock(&tmem_spinlock); else write_lock(&tmem_rwlock); printk("tmem: flushing tmem pools for %s=%d\n", cli_id_str, client->cli_id); client_flush(client, 1); if ( tmh_lock_all ) spin_unlock(&tmem_spinlock); else write_unlock(&tmem_rwlock); } /* freezing all pools guarantees that no additional memory will be consumed */ EXPORT void tmem_freeze_all(unsigned char key) { static int freeze = 0; if ( tmh_lock_all ) spin_lock(&tmem_spinlock); else write_lock(&tmem_rwlock); freeze = !freeze; tmemc_freeze_pools(CLI_ID_NULL,freeze); if ( tmh_lock_all ) spin_unlock(&tmem_spinlock); else write_unlock(&tmem_rwlock); } #define MAX_EVICTS 10 /* should be variable or set via TMEMC_ ?? */ EXPORT void *tmem_relinquish_pages(unsigned int order, unsigned int memflags) { pfp_t *pfp; unsigned long evicts_per_relinq = 0; int max_evictions = 10; if (!tmh_enabled() || !tmh_freeable_pages()) return NULL; #ifdef __i386__ return NULL; #endif relinq_attempts++; if ( order > 0 ) { #ifndef NDEBUG printk("tmem_relinquish_page: failing order=%d\n", order); #endif return NULL; } if ( tmh_called_from_tmem(memflags) ) { if ( tmh_lock_all ) spin_lock(&tmem_spinlock); else read_lock(&tmem_rwlock); } while ( (pfp = tmh_alloc_page(NULL,1)) == NULL ) { if ( (max_evictions-- <= 0) || !tmem_evict()) break; evicts_per_relinq++; } if ( evicts_per_relinq > max_evicts_per_relinq ) max_evicts_per_relinq = evicts_per_relinq; tmh_scrub_page(pfp, memflags); if ( pfp != NULL ) relinq_pgs++; if ( tmh_called_from_tmem(memflags) ) { if ( tmh_lock_all ) spin_unlock(&tmem_spinlock); else read_unlock(&tmem_rwlock); } return pfp; } EXPORT unsigned long tmem_freeable_pages(void) { return tmh_freeable_pages(); } /* called at hypervisor startup */ static int __init init_tmem(void) { int i; if ( !tmh_enabled() ) return 0; if ( tmh_dedup_enabled() ) for (i = 0; i < 256; i++ ) { pcd_tree_roots[i] = RB_ROOT; rwlock_init(&pcd_tree_rwlocks[i]); } if ( tmh_init() ) { printk("tmem: initialized comp=%d dedup=%d tze=%d global-lock=%d\n", tmh_compression_enabled(), tmh_dedup_enabled(), tmh_tze_enabled(), tmh_lock_all); if ( tmh_dedup_enabled()&&tmh_compression_enabled()&&tmh_tze_enabled() ) { tmh_tze_disable(); printk("tmem: tze and compression not compatible, disabling tze\n"); } tmem_initialized = 1; } else printk("tmem: initialization FAILED\n"); return 0; } __initcall(init_tmem); /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */