aboutsummaryrefslogtreecommitdiffstats
path: root/tmk_core/protocol/serial.h
blob: 96913c86755fad15b5681fe53d89c194f383a221 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
/*
Copyright 2012 Jun WAKO <wakojun@gmail.com>

This software is licensed with a Modified BSD License.
All of this is supposed to be Free Software, Open Source, DFSG-free,
GPL-compatible, and OK to use in both free and proprietary applications.
Additions and corrections to this file are welcome.


Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:

* Redistributions of source code must retain the above copyright
  notice, this list of conditions and the following disclaimer.

* Redistributions in binary form must reproduce the above copyright
  notice, this list of conditions and the following disclaimer in
  the documentation and/or other materials provided with the
  distribution.

* Neither the name of the copyright holders nor the names of
  contributors may be used to endorse or promote products derived
  from this software without specific prior written permission.

THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/

#ifndef SERIAL_H
#define SERIAL_H

/* host role */
void serial_init(void);
uint8_t serial_recv(void);
int16_t serial_recv2(void);
void serial_send(uint8_t data);

#endif
/ .highlight .nv { color: #336699 } /* Name.Variable */ .highlight .ow { color: #008800 } /* Operator.Word */ .highlight .w { color: #bbbbbb } /* Text.Whitespace */ .highlight .mb { color: #0000DD; font-weight: bold } /* Literal.Number.Bin */ .highlight .mf { color: #0000DD; font-weight: bold } /* Literal.Number.Float */ .highlight .mh { color: #0000DD; font-weight: bold } /* Literal.Number.Hex */ .highlight .mi { color: #0000DD; font-weight: bold } /* Literal.Number.Integer */ .highlight .mo { color: #0000DD; font-weight: bold } /* Literal.Number.Oct */ .highlight .sa { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Affix */ .highlight .sb { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Backtick */ .highlight .sc { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Char */ .highlight .dl { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Delimiter */ .highlight .sd { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Doc */ .highlight .s2 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Double */ .highlight .se { color: #0044dd; background-color: #fff0f0 } /* Literal.String.Escape */ .highlight .sh { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Heredoc */ .highlight .si { color: #3333bb; background-color: #fff0f0 } /* Literal.String.Interpol */ .highlight .sx { color: #22bb22; background-color: #f0fff0 } /* Literal.String.Other */ .highlight .sr { color: #008800; background-color: #fff0ff } /* Literal.String.Regex */ .highlight .s1 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Single */ .highlight .ss { color: #aa6600; background-color: #fff0f0 } /* Literal.String.Symbol */ .highlight .bp { color: #003388 } /* Name.Builtin.Pseudo */ .highlight .fm { color: #0066bb; font-weight: bold } /* Name.Function.Magic */ .highlight .vc { color: #336699 } /* Name.Variable.Class */ .highlight .vg { color: #dd7700 } /* Name.Variable.Global */ .highlight .vi { color: #3333bb } /* Name.Variable.Instance */ .highlight .vm { color: #336699 } /* Name.Variable.Magic */ .highlight .il { color: #0000DD; font-weight: bold } /* Literal.Number.Integer.Long */
#ifndef _X86_64_PGALLOC_H
#define _X86_64_PGALLOC_H

#include <asm/processor.h>
#include <asm/fixmap.h>
#include <asm/pda.h>
#include <linux/threads.h>
#include <linux/mm.h>
#include <asm/io.h>		/* for phys_to_virt and page_to_pseudophys */

void make_page_readonly(void *va);
void make_page_writable(void *va);
void make_pages_readonly(void *va, unsigned int nr);
void make_pages_writable(void *va, unsigned int nr);

#define __user_pgd(pgd) ((pgd) + PTRS_PER_PGD)

static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
{
	set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)));
        flush_page_update_queue();
}

static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
{
	set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT)));
        flush_page_update_queue();
}

static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
{
	set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
        flush_page_update_queue();
}

/*
 * We need to use the batch mode here, but pgd_pupulate() won't be
 * be called frequently.
 */
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
{
        set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
        set_pgd(__user_pgd(pgd), __pgd(_PAGE_TABLE | __pa(pud)));
        flush_page_update_queue();
}

extern __inline__ pmd_t *get_pmd(void)
{
        pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL);
        if (!pmd)
		return NULL;
        make_page_readonly(pmd);
        xen_pmd_pin(__pa(pmd));
        flush_page_update_queue();        
	return pmd;
}

extern __inline__ void pmd_free(pmd_t *pmd)
{
	BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
        xen_pmd_unpin(__pa(pmd));
        make_page_writable(pmd);
        flush_page_update_queue();
	free_page((unsigned long)pmd);
}

static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
        pmd_t *pmd = (pmd_t *) get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
        if (!pmd)
		return NULL;
        make_page_readonly(pmd);
        xen_pmd_pin(__pa(pmd)); 
        flush_page_update_queue(); 
        return pmd;
}

static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
        pud_t *pud = (pud_t *) get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
        if (!pud)
		return NULL;
        make_page_readonly(pud);
        xen_pud_pin(__pa(pud)); 
        flush_page_update_queue(); 
        return pud;
}

static inline void pud_free(pud_t *pud)
{
	BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
        xen_pud_unpin(__pa(pud));
        make_page_writable(pud);
	flush_page_update_queue(); 
	free_page((unsigned long)pud);
}

static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
        /*
         * We allocate two contiguous pages for kernel and user.
         */
        unsigned boundary;
	pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT, 1);

	if (!pgd)
		return NULL;
	/*
	 * Copy kernel pointers in from init.
	 * Could keep a freelist or slab cache of those because the kernel
	 * part never changes.
	 */
	boundary = pgd_index(__PAGE_OFFSET);
	memset(pgd, 0, boundary * sizeof(pgd_t));
	memcpy(pgd + boundary,
	       init_level4_pgt + boundary,
	       (PTRS_PER_PGD - boundary) * sizeof(pgd_t));

	memset(__user_pgd(pgd), 0, PAGE_SIZE); /* clean up user pgd */
        make_pages_readonly(pgd, 2);

        xen_pgd_pin(__pa(pgd)); /* kernel */
        xen_pgd_pin(__pa(__user_pgd(pgd))); /* user */
        /*
         * Set level3_user_pgt for vsyscall area
         */
	set_pgd(__user_pgd(pgd) + pgd_index(VSYSCALL_START), 
                mk_kernel_pgd(__pa_symbol(level3_user_pgt)));
        flush_page_update_queue();
	return pgd;
}

static inline void pgd_free(pgd_t *pgd)
{
	BUG_ON((unsigned long)pgd & (PAGE_SIZE-1));
        xen_pgd_unpin(__pa(pgd));
        xen_pgd_unpin(__pa(__user_pgd(pgd)));
        make_pages_writable(pgd, 2);
	flush_page_update_queue(); 
	free_pages((unsigned long)pgd, 1);
}

static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
        pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
        if (!pte)
		return NULL;
        make_page_readonly(pte);
        xen_pte_pin(__pa(pte));
	flush_page_update_queue(); 
	return pte;
}

static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
	pte_t *pte = (void *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
	if (!pte)
		return NULL;
        make_page_readonly(pte);
        xen_pte_pin(__pa(pte));
	flush_page_update_queue(); 
	return virt_to_page((unsigned long)pte);
}

/* Should really implement gc for free page table pages. This could be
   done with a reference count in struct page. */

extern __inline__ void pte_free_kernel(pte_t *pte)
{
	BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
        xen_pte_unpin(__pa(pte));
        make_page_writable(pte);
	flush_page_update_queue(); 
	free_page((unsigned long)pte); 
}

extern void pte_free(struct page *pte);

//#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte)) 

#define __pte_free_tlb(tlb,x)   pte_free((x))
#define __pmd_free_tlb(tlb,x)   pmd_free((x))
#define __pud_free_tlb(tlb,x)   pud_free((x))

#endif /* _X86_64_PGALLOC_H */