aboutsummaryrefslogtreecommitdiffstats
path: root/quantum/process_keycode/process_tap_dance.c
blob: 83378069122ec2f700c0b3aecadd80e4b766cd51 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
/* Copyright 2016 Jack Humbert
 *
 * This program is free software: you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation, either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */
#include "quantum.h"
#include "action_tapping.h"

#ifndef NO_ACTION_ONESHOT
uint8_t get_oneshot_mods(void);
#endif

static uint16_t last_td;
static int8_t highest_td = -1;

void qk_tap_dance_pair_on_each_tap (qk_tap_dance_state_t *state, void *user_data) {
  qk_tap_dance_pair_t *pair = (qk_tap_dance_pair_t *)user_data;

  if (state->count == 2) {
    register_code16 (pair->kc2);
    state->finished = true;
  }
}

void qk_tap_dance_pair_finished (qk_tap_dance_state_t *state, void *user_data) {
  qk_tap_dance_pair_t *pair = (qk_tap_dance_pair_t *)user_data;

  if (state->count == 1) {
    register_code16 (pair->kc1);
  } else if (state->count == 2) {
    register_code16 (pair->kc2);
  }
}

void qk_tap_dance_pair_reset (qk_tap_dance_state_t *state, void *user_data) {
  qk_tap_dance_pair_t *pair = (qk_tap_dance_pair_t *)user_data;

  if (state->count == 1) {
    unregister_code16 (pair->kc1);
  } else if (state->count == 2) {
    unregister_code16 (pair->kc2);
  }
}

void qk_tap_dance_dual_role_on_each_tap (qk_tap_dance_state_t *state, void *user_data) {
  qk_tap_dance_dual_role_t *pair = (qk_tap_dance_dual_role_t *)user_data;

  if (state->count == 2) {
    layer_move (pair->layer);
    state->finished = true;
  }
}

void qk_tap_dance_dual_role_finished (qk_tap_dance_state_t *state, void *user_data) {
  qk_tap_dance_dual_role_t *pair = (qk_tap_dance_dual_role_t *)user_data;

  if (state->count == 1) {
    register_code16 (pair->kc);
  } else if (state->count == 2) {
    layer_move (pair->layer);
  }
}

void qk_tap_dance_dual_role_reset (qk_tap_dance_state_t *state, void *user_data) {
  qk_tap_dance_dual_role_t *pair = (qk_tap_dance_dual_role_t *)user_data;

  if (state->count == 1) {
    unregister_code16 (pair->kc);
  }
}

static inline void _process_tap_dance_action_fn (qk_tap_dance_state_t *state,
                                                 void *user_data,
                                                 qk_tap_dance_user_fn_t fn)
{
  if (fn) {
    fn(state, user_data);
  }
}

static inline void process_tap_dance_action_on_each_tap (qk_tap_dance_action_t *action)
{
  _process_tap_dance_action_fn (&action->state, action->user_data, action->fn.on_each_tap);
}

static inline void process_tap_dance_action_on_dance_finished (qk_tap_dance_action_t *action)
{
  if (action->state.finished)
    return;
  action->state.finished = true;
  add_mods(action->state.oneshot_mods);
  add_weak_mods(action->state.weak_mods);
  send_keyboard_report();
  _process_tap_dance_action_fn (&action->state, action->user_data, action->fn.on_dance_finished);
}

static inline void process_tap_dance_action_on_reset (qk_tap_dance_action_t *action)
{
  _process_tap_dance_action_fn (&action->state, action->user_data, action->fn.on_reset);
  del_mods(action->state.oneshot_mods);
  del_weak_mods(action->state.weak_mods);
  send_keyboard_report();
}

void preprocess_tap_dance(uint16_t keycode, keyrecord_t *record) {
  qk_tap_dance_action_t *action;

  if (!record->event.pressed)
    return;

  if (highest_td == -1)
    return;

  for (int i = 0; i <= highest_td; i++) {
    action = &tap_dance_actions[i];
    if (action->state.count) {
      if (keycode == action->state.keycode && keycode == last_td)
        continue;
      action->state.interrupted = true;
      process_tap_dance_action_on_dance_finished (action);
      reset_tap_dance (&action->state);
    }
  }
}

bool process_tap_dance(uint16_t keycode, keyrecord_t *record) {
  uint16_t idx = keycode - QK_TAP_DANCE;
  qk_tap_dance_action_t *action;

  switch(keycode) {
  case QK_TAP_DANCE ... QK_TAP_DANCE_MAX:
    if ((int16_t)idx > highest_td)
      highest_td = idx;
    action = &tap_dance_actions[idx];

    action->state.pressed = record->event.pressed;
    if (record->event.pressed) {
      action->state.keycode = keycode;
      action->state.count++;
      action->state.timer = timer_read();
#ifndef NO_ACTION_ONESHOT
      action->state.oneshot_mods = get_oneshot_mods();
#else
      action->state.oneshot_mods = 0;
#endif
      action->state.weak_mods = get_mods();
      action->state.weak_mods |= get_weak_mods();
      process_tap_dance_action_on_each_tap (action);

      last_td = keycode;
    } else {
      if (action->state.count && action->state.finished) {
        reset_tap_dance (&action->state);
      }
    }

    break;
  }

  return true;
}



void matrix_scan_tap_dance () {
  if (highest_td == -1)
    return;
  uint16_t tap_user_defined;

  for (uint8_t i = 0; i <= highest_td; i++) {
    qk_tap_dance_action_t *action = &tap_dance_actions[i];
    if(action->custom_tapping_term > 0 ) {
      tap_user_defined = action->custom_tapping_term;
    }
    else{
      tap_user_defined = TAPPING_TERM;
    }
    if (action->state.count && timer_elapsed (action->state.timer) > tap_user_defined) {
      process_tap_dance_action_on_dance_finished (action);
      reset_tap_dance (&action->state);
    }
  }
}

void reset_tap_dance (qk_tap_dance_state_t *state) {
  qk_tap_dance_action_t *action;

  if (state->pressed)
    return;

  action = &tap_dance_actions[state->keycode - QK_TAP_DANCE];

  process_tap_dance_action_on_reset (action);

  state->count = 0;
  state->interrupted = false;
  state->finished = false;
  last_td = 0;
}
span>((y = cmpxchg_rel(&page->count_info, x, nx)) != x)); if (unlikely((nx & PGC_count_mask) == 0)) free_domheap_page(page); } static inline struct domain *page_get_owner_and_reference( struct page_info *page) { unsigned long x, y = page->count_info; do { x = y; /* * Count == 0: Page is not allocated, so we cannot take a reference. * Count == -1: Reference count would wrap, which is invalid. * Count == -2: Remaining unused ref is reserved for get_page_light(). */ /* * On ia64, get_page_light() isn't defined so that it doesn't * make sense to take care of Count == -2. * Just for consistency with x86. */ if ( unlikely(((x + 2) & PGC_count_mask) <= 2) ) return NULL; y = cmpxchg_acq(&page->count_info, x, x + 1); } while (unlikely(y != x)); return page_get_owner(page); } /* count_info and ownership are checked atomically. */ static inline int get_page(struct page_info *page, struct domain *domain) { struct domain *owner = page_get_owner_and_reference(page); if (likely(owner == domain)) return 1; if (owner != NULL) put_page(page); /* if (!domain->is_dying) */ /* XXX: header inclusion hell */ gdprintk(XENLOG_INFO, "Error pfn %lx: rd=%p, od=%p, caf=%016lx, taf=%" PRtype_info "\n", page_to_mfn(page), domain, owner, page->count_info, page->u.inuse.type_info); return 0; } int is_iomem_page(unsigned long mfn); extern void put_page_type(struct page_info *page); extern int get_page_type(struct page_info *page, unsigned long type); static inline void put_page_and_type(struct page_info *page) { put_page_type(page); put_page(page); } static inline int get_page_and_type(struct page_info *page, struct domain *domain, unsigned long type) { int rc = get_page(page, domain); if ( likely(rc) && unlikely(!get_page_type(page, type)) ) { put_page(page); rc = 0; } return rc; } #define set_machinetophys(_mfn, _pfn) do { } while(0); #ifdef MEMORY_GUARD void *memguard_init(void *heap_start); void memguard_guard_stack(void *p); void memguard_guard_range(void *p, unsigned long l); void memguard_unguard_range(void *p, unsigned long l); #else #define memguard_init(_s) (_s) #define memguard_guard_stack(_p) ((void)0) #define memguard_guard_range(_p,_l) ((void)0) #define memguard_unguard_range(_p,_l) ((void)0) #endif // prototype of misc memory stuff //unsigned long __get_free_pages(unsigned int mask, unsigned int order); //void __free_pages(struct page_info *page, unsigned int order); void *pgtable_quicklist_alloc(void); void pgtable_quicklist_free(void *pgtable_entry); // FOLLOWING FROM linux-2.6.7/include/mm.h /* * This struct defines a memory VMM memory area. There is one of these * per VM-area/task. A VM area is any part of the process virtual memory * space that has a special rule for the page-fault handlers (ie a shared * library, the executable area etc). */ struct vm_area_struct { struct mm_struct * vm_mm; /* The address space we belong to. */ unsigned long vm_start; /* Our start address within vm_mm. */ unsigned long vm_end; /* The first byte after our end address within vm_mm. */ /* linked list of VM areas per task, sorted by address */ struct vm_area_struct *vm_next; pgprot_t vm_page_prot; /* Access permissions of this VMA. */ unsigned long vm_flags; /* Flags, listed below. */ #ifndef XEN struct rb_node vm_rb; // XEN doesn't need all the backing store stuff /* * For areas with an address space and backing store, * linkage into the address_space->i_mmap prio tree, or * linkage to the list of like vmas hanging off its node, or * linkage of vma in the address_space->i_mmap_nonlinear list. */ union { struct { struct list_head list; void *parent; /* aligns with prio_tree_node parent */ struct vm_area_struct *head; } vm_set; struct prio_tree_node prio_tree_node; } shared; /* * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma * list, after a COW of one of the file pages. A MAP_SHARED vma * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack * or brk vma (with NULL file) can only be in an anon_vma list. */ struct list_head anon_vma_node; /* Serialized by anon_vma->lock */ struct anon_vma *anon_vma; /* Serialized by page_table_lock */ /* Function pointers to deal with this struct. */ struct vm_operations_struct * vm_ops; /* Information about our backing store: */ unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE units, *not* PAGE_CACHE_SIZE */ struct file * vm_file; /* File we map to (can be NULL). */ void * vm_private_data; /* was vm_pte (shared mem) */ #ifdef CONFIG_NUMA struct mempolicy *vm_policy; /* NUMA policy for the VMA */ #endif #endif }; /* * vm_flags.. */ #define VM_READ 0x00000001 /* currently active flags */ #define VM_WRITE 0x00000002 #define VM_EXEC 0x00000004 #define VM_SHARED 0x00000008 #define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */ #define VM_MAYWRITE 0x00000020 #define VM_MAYEXEC 0x00000040 #define VM_MAYSHARE 0x00000080 #define VM_GROWSDOWN 0x00000100 /* general info on the segment */ #define VM_GROWSUP 0x00000200 #define VM_SHM 0x00000400 /* shared memory area, don't swap out */ #define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ #define VM_EXECUTABLE 0x00001000 #define VM_LOCKED 0x00002000 #define VM_IO 0x00004000 /* Memory mapped I/O or similar */ /* Used by sys_madvise() */ #define VM_SEQ_READ 0x00008000 /* App will access data sequentially */ #define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */ #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */ #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */ #define VM_RESERVED 0x00080000 /* Don't unmap it from swap_out */ #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */ #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */ #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS #endif #ifdef CONFIG_STACK_GROWSUP #define VM_STACK_FLAGS (VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT) #else #define VM_STACK_FLAGS (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT) #endif #if 0 /* removed when rebasing to 2.6.13 */ /* * The zone field is never updated after free_area_init_core() * sets it, so none of the operations on it need to be atomic. * We'll have up to (MAX_NUMNODES * MAX_NR_ZONES) zones total, * so we use (MAX_NODES_SHIFT + MAX_ZONES_SHIFT) here to get enough bits. */ #define NODEZONE_SHIFT (sizeof(page_flags_t)*8 - MAX_NODES_SHIFT - MAX_ZONES_SHIFT) #define NODEZONE(node, zone) ((node << ZONES_SHIFT) | zone) static inline unsigned long page_zonenum(struct page_info *page) { return (page->flags >> NODEZONE_SHIFT) & (~(~0UL << ZONES_SHIFT)); } static inline unsigned long page_to_nid(struct page_info *page) { return (page->flags >> (NODEZONE_SHIFT + ZONES_SHIFT)); } struct zone; extern struct zone *zone_table[]; static inline struct zone *page_zone(struct page_info *page) { return zone_table[page->flags >> NODEZONE_SHIFT]; } static inline void set_page_zone(struct page_info *page, unsigned long nodezone_num) { page->flags &= ~(~0UL << NODEZONE_SHIFT); page->flags |= nodezone_num << NODEZONE_SHIFT; } #endif #ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */ extern unsigned long max_mapnr; #endif static inline void *lowmem_page_address(struct page_info *page) { return __va(page_to_mfn(page) << PAGE_SHIFT); } #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) #define HASHED_PAGE_VIRTUAL #endif #if defined(WANT_PAGE_VIRTUAL) #define page_address(page) ((page)->virtual) #define set_page_address(page, address) \ do { \ (page)->virtual = (address); \ } while(0) #define page_address_init() do { } while(0) #endif #if defined(HASHED_PAGE_VIRTUAL) void *page_address(struct page_info *page); void set_page_address(struct page_info *page, void *virtual); void page_address_init(void); #endif #if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL) #define page_address(page) lowmem_page_address(page) #define set_page_address(page, address) do { } while(0) #define page_address_init() do { } while(0) #endif #ifndef CONFIG_DEBUG_PAGEALLOC static inline void kernel_map_pages(struct page_info *page, int numpages, int enable) { } #endif extern unsigned long num_physpages; extern unsigned long totalram_pages; extern int nr_swap_pages; extern void alloc_dom_xen_and_dom_io(void); extern int mm_teardown(struct domain* d); extern void mm_final_teardown(struct domain* d); extern struct page_info * assign_new_domain_page(struct domain *d, unsigned long mpaddr); extern void assign_new_domain0_page(struct domain *d, unsigned long mpaddr); extern int __assign_domain_page(struct domain *d, unsigned long mpaddr, unsigned long physaddr, unsigned long flags); extern void assign_domain_page(struct domain *d, unsigned long mpaddr, unsigned long physaddr); extern void assign_domain_io_page(struct domain *d, unsigned long mpaddr, unsigned long flags); extern int deassign_domain_mmio_page(struct domain *d, unsigned long mpaddr, unsigned long phys_addr, unsigned long size); struct p2m_entry; extern unsigned long lookup_domain_mpa(struct domain *d, unsigned long mpaddr, struct p2m_entry* entry); extern void *domain_mpa_to_imva(struct domain *d, unsigned long mpaddr); extern volatile pte_t *lookup_noalloc_domain_pte(struct domain* d, unsigned long mpaddr); extern unsigned long assign_domain_mmio_page(struct domain *d, unsigned long mpaddr, unsigned long phys_addr, unsigned long size, unsigned long flags); extern unsigned long assign_domain_mach_page(struct domain *d, unsigned long mpaddr, unsigned long size, unsigned long flags); int domain_page_mapped(struct domain *d, unsigned long mpaddr); int efi_mmio(unsigned long physaddr, unsigned long size); extern unsigned long ____lookup_domain_mpa(struct domain *d, unsigned long mpaddr); extern unsigned long do_dom0vp_op(unsigned long cmd, unsigned long arg0, unsigned long arg1, unsigned long arg2, unsigned long arg3); extern unsigned long dom0vp_zap_physmap(struct domain *d, unsigned long gpfn, unsigned int extent_order); extern unsigned long dom0vp_add_physmap(struct domain* d, unsigned long gpfn, unsigned long mfn, unsigned long flags, domid_t domid); extern unsigned long dom0vp_add_physmap_with_gmfn(struct domain* d, unsigned long gpfn, unsigned long gmfn, unsigned long flags, domid_t domid); #ifdef CONFIG_XEN_IA64_EXPOSE_P2M extern void expose_p2m_init(void); extern unsigned long dom0vp_expose_p2m(struct domain* d, unsigned long conv_start_gpfn, unsigned long assign_start_gpfn, unsigned long expose_size, unsigned long granule_pfn); extern void foreign_p2m_init(struct domain* d); extern void foreign_p2m_destroy(struct domain* d); extern unsigned long dom0vp_expose_foreign_p2m(struct domain* dest_dom, unsigned long dest_gpfn, domid_t domid, XEN_GUEST_HANDLE(char) buffer, unsigned long flags); extern unsigned long dom0vp_unexpose_foreign_p2m(struct domain* dest_dom, unsigned long dest_gpfn, domid_t domid); extern unsigned long dom0vp_get_memmap(domid_t domid, XEN_GUEST_HANDLE(char) buffer); #else #define expose_p2m_init() do { } while (0) #define dom0vp_expose_p2m(d, conv_start_gpfn, assign_start_gpfn, expose_size, granule_pfn) (-ENOSYS) #define foreign_p2m_init(d) do { } while (0) #define foreign_p2m_destroy(d) do { } while (0) #define dom0vp_expose_foreign_p2m(dest_dom, dest_gpfn, domid, buffer, flags) (-ENOSYS) #define dom0vp_unexpose_foreign_p2m(dest_dom, dest_gpfn, domid) (-ENOSYS) #define __dom0vp_add_memdesc(d, memmap_info, memdesc) (-ENOSYS) #define dom0vp_get_memmap(domid, buffer) (-ENOSYS) #endif int p2m_pod_decrease_reservation(struct domain *d, xen_pfn_t gpfn, unsigned int order); int guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn, unsigned int order); extern volatile unsigned long *mpt_table; extern unsigned long gmfn_to_mfn_foreign(struct domain *d, unsigned long gpfn); extern u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__, u64* itir, struct p2m_entry* entry); #define machine_to_phys_mapping mpt_table #define INVALID_M2P_ENTRY (~0UL) #define VALID_M2P(_e) (!((_e) & (1UL<<63))) #define SHARED_M2P(_e) 0 #define set_gpfn_from_mfn(mfn, pfn) (machine_to_phys_mapping[(mfn)] = (pfn)) #define get_gpfn_from_mfn(mfn) (machine_to_phys_mapping[(mfn)]) /* If pmt table is provided by control pannel later, we need __get_user * here. However if it's allocated by HV, we should access it directly */ #define mfn_to_gmfn(_d, mfn) \ get_gpfn_from_mfn(mfn) #define gmfn_to_mfn(_d, gpfn) \ gmfn_to_mfn_foreign((_d), (gpfn)) #define __gpfn_invalid(_d, gpfn) \ (lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT), NULL) == INVALID_MFN) #define __gmfn_valid(_d, gpfn) !__gpfn_invalid(_d, gpfn) #define __gpa_to_mpa(_d, gpa) \ ((gmfn_to_mfn((_d),(gpa)>>PAGE_SHIFT)<<PAGE_SHIFT)|((gpa)&~PAGE_MASK)) #define __mpa_to_gpa(madr) \ ((get_gpfn_from_mfn((madr) >> PAGE_SHIFT) << PAGE_SHIFT) | \ ((madr) & ~PAGE_MASK)) /* Internal use only: returns 0 in case of bad address. */ extern unsigned long paddr_to_maddr(unsigned long paddr); /* Arch-specific portion of memory_op hypercall. */ long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg); int steal_page( struct domain *d, struct page_info *page, unsigned int memflags); int donate_page( struct domain *d, struct page_info *page, unsigned int memflags); #define domain_clamp_alloc_bitsize(d, b) (b) unsigned long domain_get_maximum_gpfn(struct domain *d); extern struct domain *dom_xen, *dom_io, *dom_cow; /* for vmcoreinfo */ #endif /* __ASM_IA64_MM_H__ */