diff options
author | kaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk> | 2005-03-10 14:53:08 +0000 |
---|---|---|
committer | kaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk> | 2005-03-10 14:53:08 +0000 |
commit | 6c23763e5e9561ec315bb7de1a4426f9abd05291 (patch) | |
tree | 8b0501eeafb6f97acba3224da21dd140c1c33003 | |
parent | 15f6b230e351fa64b380ca1986724812c5b70a0f (diff) | |
download | xen-6c23763e5e9561ec315bb7de1a4426f9abd05291.tar.gz xen-6c23763e5e9561ec315bb7de1a4426f9abd05291.tar.bz2 xen-6c23763e5e9561ec315bb7de1a4426f9abd05291.zip |
bitkeeper revision 1.1159.258.37 (42305f54a6Tlb_891y3QBjoIVcCt3Q)
Fix balloon driver in highmem builds -- must forcibly flush the kmap
cache before relinquishing pages. This requires changes to generic
files highmem.[ch].
Signed-off-by: Keir Fraser <keir.fraser@cl.cam.ac.uk>
-rw-r--r-- | .rootkeys | 3 | ||||
-rw-r--r-- | linux-2.4.29-xen-sparse/include/linux/highmem.h | 137 | ||||
-rw-r--r-- | linux-2.4.29-xen-sparse/mm/highmem.c | 7 | ||||
-rw-r--r-- | linux-2.6.10-xen-sparse/drivers/xen/balloon/balloon.c | 19 | ||||
-rw-r--r-- | linux-2.6.10-xen-sparse/include/linux/highmem.h | 95 | ||||
-rw-r--r-- | linux-2.6.10-xen-sparse/mm/highmem.c | 614 |
6 files changed, 870 insertions, 5 deletions
@@ -111,6 +111,7 @@ 3f1056a9L_kqHcFheV00KbKBzv9j5w linux-2.4.29-xen-sparse/include/asm-xen/vga.h 40659defgWA92arexpMGn8X3QMDj3w linux-2.4.29-xen-sparse/include/asm-xen/xor.h 3f056927gMHl7mWB89rb73JahbhQIA linux-2.4.29-xen-sparse/include/linux/blk.h +42305f54mFScQCttpj57EIm60BnxIg linux-2.4.29-xen-sparse/include/linux/highmem.h 419e0488SBzS3mdUhwgsES5a5e3abA linux-2.4.29-xen-sparse/include/linux/irq.h 4124f66fPHG6yvB_vXmesjvzrJ3yMg linux-2.4.29-xen-sparse/include/linux/mm.h 401c0590D_kwJDU59X8NyvqSv_Cl2A linux-2.4.29-xen-sparse/include/linux/sched.h @@ -247,10 +248,12 @@ 4122466356eIBnC9ot44WSVVIFyhQA linux-2.6.10-xen-sparse/include/asm-xen/queues.h 3f689063BoW-HWV3auUJ-OqXfcGArw linux-2.6.10-xen-sparse/include/asm-xen/xen_proc.h 419b4e93z2S0gR17XTy8wg09JEwAhg linux-2.6.10-xen-sparse/include/linux/gfp.h +42305f545Vc5SLCUewZ2-n-P9JJhEQ linux-2.6.10-xen-sparse/include/linux/highmem.h 419dfc609zbti8rqL60tL2dHXQ_rvQ linux-2.6.10-xen-sparse/include/linux/irq.h 4124f66f4NaKNa0xPiGGykn9QaZk3w linux-2.6.10-xen-sparse/include/linux/skbuff.h 419dfc6awx7w88wk6cG9P3mPidX6LQ linux-2.6.10-xen-sparse/kernel/irq/manage.c 40f56a0ddHCSs3501MY4hRf22tctOw linux-2.6.10-xen-sparse/mkbuildtree +42305f54Q6xJ1bXcQJlCQq1m-e2C8g linux-2.6.10-xen-sparse/mm/highmem.c 412f46c0LJuKAgSPGoC0Z1DEkLfuLA linux-2.6.10-xen-sparse/mm/memory.c 410a94a4KT6I6X0LVc7djB39tRDp4g linux-2.6.10-xen-sparse/mm/page_alloc.c 41505c572m-s9ATiO1LiD1GPznTTIg linux-2.6.10-xen-sparse/net/core/skbuff.c diff --git a/linux-2.4.29-xen-sparse/include/linux/highmem.h b/linux-2.4.29-xen-sparse/include/linux/highmem.h new file mode 100644 index 0000000000..7dab94f5eb --- /dev/null +++ b/linux-2.4.29-xen-sparse/include/linux/highmem.h @@ -0,0 +1,137 @@ +#ifndef _LINUX_HIGHMEM_H +#define _LINUX_HIGHMEM_H + +#include <linux/config.h> +#include <asm/pgalloc.h> + +#ifdef CONFIG_HIGHMEM + +extern struct page *highmem_start_page; + +#include <asm/highmem.h> + +/* declarations for linux/mm/highmem.c */ +unsigned int nr_free_highpages(void); +void kmap_flush_unused(void); + +extern struct buffer_head *create_bounce(int rw, struct buffer_head * bh_orig); + +static inline char *bh_kmap(struct buffer_head *bh) +{ + return kmap(bh->b_page) + bh_offset(bh); +} + +static inline void bh_kunmap(struct buffer_head *bh) +{ + kunmap(bh->b_page); +} + +/* + * remember to add offset! and never ever reenable interrupts between a + * bh_kmap_irq and bh_kunmap_irq!! + */ +static inline char *bh_kmap_irq(struct buffer_head *bh, unsigned long *flags) +{ + unsigned long addr; + + __save_flags(*flags); + + /* + * could be low + */ + if (!PageHighMem(bh->b_page)) + return bh->b_data; + + /* + * it's a highmem page + */ + __cli(); + addr = (unsigned long) kmap_atomic(bh->b_page, KM_BH_IRQ); + + if (addr & ~PAGE_MASK) + BUG(); + + return (char *) addr + bh_offset(bh); +} + +static inline void bh_kunmap_irq(char *buffer, unsigned long *flags) +{ + unsigned long ptr = (unsigned long) buffer & PAGE_MASK; + + kunmap_atomic((void *) ptr, KM_BH_IRQ); + __restore_flags(*flags); +} + +#else /* CONFIG_HIGHMEM */ + +static inline unsigned int nr_free_highpages(void) { return 0; } +static inline void kmap_flush_unused(void) { } + +static inline void *kmap(struct page *page) { return page_address(page); } + +#define kunmap(page) do { } while (0) + +#define kmap_atomic(page,idx) kmap(page) +#define kunmap_atomic(page,idx) kunmap(page) + +#define bh_kmap(bh) ((bh)->b_data) +#define bh_kunmap(bh) do { } while (0) +#define kmap_nonblock(page) kmap(page) +#define bh_kmap_irq(bh, flags) ((bh)->b_data) +#define bh_kunmap_irq(bh, flags) do { *(flags) = 0; } while (0) + +#endif /* CONFIG_HIGHMEM */ + +/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ +static inline void clear_user_highpage(struct page *page, unsigned long vaddr) +{ + void *addr = kmap_atomic(page, KM_USER0); + clear_user_page(addr, vaddr); + kunmap_atomic(addr, KM_USER0); +} + +static inline void clear_highpage(struct page *page) +{ + clear_page(kmap(page)); + kunmap(page); +} + +/* + * Same but also flushes aliased cache contents to RAM. + */ +static inline void memclear_highpage_flush(struct page *page, unsigned int offset, unsigned int size) +{ + char *kaddr; + + if (offset + size > PAGE_SIZE) + out_of_line_bug(); + kaddr = kmap(page); + memset(kaddr + offset, 0, size); + flush_dcache_page(page); + flush_page_to_ram(page); + kunmap(page); +} + +static inline void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr) +{ + char *vfrom, *vto; + + vfrom = kmap_atomic(from, KM_USER0); + vto = kmap_atomic(to, KM_USER1); + copy_user_page(vto, vfrom, vaddr); + kunmap_atomic(vfrom, KM_USER0); + kunmap_atomic(vto, KM_USER1); +} + +static inline void copy_highpage(struct page *to, struct page *from) +{ + char *vfrom, *vto; + + vfrom = kmap_atomic(from, KM_USER0); + vto = kmap_atomic(to, KM_USER1); + copy_page(vto, vfrom); + kunmap_atomic(vfrom, KM_USER0); + kunmap_atomic(vto, KM_USER1); +} + +#endif /* _LINUX_HIGHMEM_H */ diff --git a/linux-2.4.29-xen-sparse/mm/highmem.c b/linux-2.4.29-xen-sparse/mm/highmem.c index 6ff48a0cac..341e6e29a9 100644 --- a/linux-2.4.29-xen-sparse/mm/highmem.c +++ b/linux-2.4.29-xen-sparse/mm/highmem.c @@ -130,6 +130,13 @@ start: return vaddr; } +void kmap_flush_unused(void) +{ + spin_lock(&kmap_lock); + flush_all_zero_pkmaps(); + spin_unlock(&kmap_lock); +} + void fastcall *kmap_high(struct page *page, int nonblocking) { unsigned long vaddr; diff --git a/linux-2.6.10-xen-sparse/drivers/xen/balloon/balloon.c b/linux-2.6.10-xen-sparse/drivers/xen/balloon/balloon.c index ad951fa984..9e70f584e0 100644 --- a/linux-2.6.10-xen-sparse/drivers/xen/balloon/balloon.c +++ b/linux-2.6.10-xen-sparse/drivers/xen/balloon/balloon.c @@ -92,6 +92,7 @@ static struct timer_list balloon_timer; #define UNLIST_PAGE(p) ( list_del(&p->list) ) #define pte_offset_kernel pte_offset #define subsys_initcall(_fn) __initcall(_fn) +#define pfn_to_page(_pfn) (mem_map + (_pfn)) #endif #define IPRINTK(fmt, args...) \ @@ -245,11 +246,10 @@ static void balloon_process(void *unused) pfn = page - mem_map; mfn_list[i] = phys_to_machine_mapping[pfn]; - phys_to_machine_mapping[pfn] = INVALID_P2M_ENTRY; if ( !PageHighMem(page) ) { - v = phys_to_virt((page - mem_map) << PAGE_SHIFT); + v = phys_to_virt(pfn << PAGE_SHIFT); scrub_pages(v, 1); queue_l1_entry_update(get_ptep((unsigned long)v), 0); } @@ -260,14 +260,23 @@ static void balloon_process(void *unused) scrub_pages(v, 1); kunmap(page); } -#endif - - balloon_append(page); +#endif } + /* Ensure that ballooned highmem pages don't have cached mappings. */ + kmap_flush_unused(); + /* Flush updates through and flush the TLB. */ xen_tlb_flush(); + /* No more mappings: invalidate pages in P2M and add to balloon. */ + for ( i = 0; i < debt; i++ ) + { + pfn = mfn_to_pfn(mfn_list[i]); + phys_to_machine_mapping[pfn] = INVALID_P2M_ENTRY; + balloon_append(pfn_to_page(pfn)); + } + if ( HYPERVISOR_dom_mem_op( MEMOP_decrease_reservation, mfn_list, debt, 0) != debt ) BUG(); diff --git a/linux-2.6.10-xen-sparse/include/linux/highmem.h b/linux-2.6.10-xen-sparse/include/linux/highmem.h new file mode 100644 index 0000000000..bbe2ce094e --- /dev/null +++ b/linux-2.6.10-xen-sparse/include/linux/highmem.h @@ -0,0 +1,95 @@ +#ifndef _LINUX_HIGHMEM_H +#define _LINUX_HIGHMEM_H + +#include <linux/config.h> +#include <linux/fs.h> +#include <linux/mm.h> + +#include <asm/cacheflush.h> + +#ifdef CONFIG_HIGHMEM + +extern struct page *highmem_start_page; + +#include <asm/highmem.h> + +/* declarations for linux/mm/highmem.c */ +unsigned int nr_free_highpages(void); +void kmap_flush_unused(void); + +#else /* CONFIG_HIGHMEM */ + +static inline unsigned int nr_free_highpages(void) { return 0; } +static inline void kmap_flush_unused(void) { } + +static inline void *kmap(struct page *page) +{ + might_sleep(); + return page_address(page); +} + +#define kunmap(page) do { (void) (page); } while (0) + +#define kmap_atomic(page, idx) page_address(page) +#define kunmap_atomic(addr, idx) do { } while (0) +#define kmap_atomic_to_page(ptr) virt_to_page(ptr) + +#endif /* CONFIG_HIGHMEM */ + +/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ +static inline void clear_user_highpage(struct page *page, unsigned long vaddr) +{ + void *addr = kmap_atomic(page, KM_USER0); + clear_user_page(addr, vaddr, page); + kunmap_atomic(addr, KM_USER0); + /* Make sure this page is cleared on other CPU's too before using it */ + smp_wmb(); +} + +static inline void clear_highpage(struct page *page) +{ + void *kaddr = kmap_atomic(page, KM_USER0); + clear_page(kaddr); + kunmap_atomic(kaddr, KM_USER0); +} + +/* + * Same but also flushes aliased cache contents to RAM. + */ +static inline void memclear_highpage_flush(struct page *page, unsigned int offset, unsigned int size) +{ + void *kaddr; + + BUG_ON(offset + size > PAGE_SIZE); + + kaddr = kmap_atomic(page, KM_USER0); + memset((char *)kaddr + offset, 0, size); + flush_dcache_page(page); + kunmap_atomic(kaddr, KM_USER0); +} + +static inline void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr) +{ + char *vfrom, *vto; + + vfrom = kmap_atomic(from, KM_USER0); + vto = kmap_atomic(to, KM_USER1); + copy_user_page(vto, vfrom, vaddr, to); + kunmap_atomic(vfrom, KM_USER0); + kunmap_atomic(vto, KM_USER1); + /* Make sure this page is cleared on other CPU's too before using it */ + smp_wmb(); +} + +static inline void copy_highpage(struct page *to, struct page *from) +{ + char *vfrom, *vto; + + vfrom = kmap_atomic(from, KM_USER0); + vto = kmap_atomic(to, KM_USER1); + copy_page(vto, vfrom); + kunmap_atomic(vfrom, KM_USER0); + kunmap_atomic(vto, KM_USER1); +} + +#endif /* _LINUX_HIGHMEM_H */ diff --git a/linux-2.6.10-xen-sparse/mm/highmem.c b/linux-2.6.10-xen-sparse/mm/highmem.c new file mode 100644 index 0000000000..96e7798983 --- /dev/null +++ b/linux-2.6.10-xen-sparse/mm/highmem.c @@ -0,0 +1,614 @@ +/* + * High memory handling common code and variables. + * + * (C) 1999 Andrea Arcangeli, SuSE GmbH, andrea@suse.de + * Gerhard Wichert, Siemens AG, Gerhard.Wichert@pdb.siemens.de + * + * + * Redesigned the x86 32-bit VM architecture to deal with + * 64-bit physical space. With current x86 CPUs this + * means up to 64 Gigabytes physical RAM. + * + * Rewrote high memory support to move the page cache into + * high memory. Implemented permanent (schedulable) kmaps + * based on Linus' idea. + * + * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> + */ + +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/swap.h> +#include <linux/bio.h> +#include <linux/pagemap.h> +#include <linux/mempool.h> +#include <linux/blkdev.h> +#include <linux/init.h> +#include <linux/hash.h> +#include <linux/highmem.h> +#include <asm/tlbflush.h> + +static mempool_t *page_pool, *isa_page_pool; + +static void *page_pool_alloc(int gfp_mask, void *data) +{ + int gfp = gfp_mask | (int) (long) data; + + return alloc_page(gfp); +} + +static void page_pool_free(void *page, void *data) +{ + __free_page(page); +} + +/* + * Virtual_count is not a pure "count". + * 0 means that it is not mapped, and has not been mapped + * since a TLB flush - it is usable. + * 1 means that there are no users, but it has been mapped + * since the last TLB flush - so we can't use it. + * n means that there are (n-1) current users of it. + */ +#ifdef CONFIG_HIGHMEM +static int pkmap_count[LAST_PKMAP]; +static unsigned int last_pkmap_nr; +static spinlock_t kmap_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED; + +pte_t * pkmap_page_table; + +static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait); + +static void flush_all_zero_pkmaps(void) +{ + int i; + + flush_cache_kmaps(); + + for (i = 0; i < LAST_PKMAP; i++) { + struct page *page; + + /* + * zero means we don't have anything to do, + * >1 means that it is still in use. Only + * a count of 1 means that it is free but + * needs to be unmapped + */ + if (pkmap_count[i] != 1) + continue; + pkmap_count[i] = 0; + + /* sanity check */ + if (pte_none(pkmap_page_table[i])) + BUG(); + + /* + * Don't need an atomic fetch-and-clear op here; + * no-one has the page mapped, and cannot get at + * its virtual address (and hence PTE) without first + * getting the kmap_lock (which is held here). + * So no dangers, even with speculative execution. + */ + page = pte_page(pkmap_page_table[i]); + pte_clear(&pkmap_page_table[i]); + + set_page_address(page, NULL); + } + flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP)); +} + +static inline unsigned long map_new_virtual(struct page *page) +{ + unsigned long vaddr; + int count; + +start: + count = LAST_PKMAP; + /* Find an empty entry */ + for (;;) { + last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK; + if (!last_pkmap_nr) { + flush_all_zero_pkmaps(); + count = LAST_PKMAP; + } + if (!pkmap_count[last_pkmap_nr]) + break; /* Found a usable entry */ + if (--count) + continue; + + /* + * Sleep for somebody else to unmap their entries + */ + { + DECLARE_WAITQUEUE(wait, current); + + __set_current_state(TASK_UNINTERRUPTIBLE); + add_wait_queue(&pkmap_map_wait, &wait); + spin_unlock(&kmap_lock); + schedule(); + remove_wait_queue(&pkmap_map_wait, &wait); + spin_lock(&kmap_lock); + + /* Somebody else might have mapped it while we slept */ + if (page_address(page)) + return (unsigned long)page_address(page); + + /* Re-start */ + goto start; + } + } + vaddr = PKMAP_ADDR(last_pkmap_nr); + set_pte(&(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot)); + + pkmap_count[last_pkmap_nr] = 1; + set_page_address(page, (void *)vaddr); + + return vaddr; +} + +void kmap_flush_unused(void) +{ + spin_lock(&kmap_lock); + flush_all_zero_pkmaps(); + spin_unlock(&kmap_lock); +} + +EXPORT_SYMBOL(kmap_flush_unused); + +void fastcall *kmap_high(struct page *page) +{ + unsigned long vaddr; + + /* + * For highmem pages, we can't trust "virtual" until + * after we have the lock. + * + * We cannot call this from interrupts, as it may block + */ + spin_lock(&kmap_lock); + vaddr = (unsigned long)page_address(page); + if (!vaddr) + vaddr = map_new_virtual(page); + pkmap_count[PKMAP_NR(vaddr)]++; + if (pkmap_count[PKMAP_NR(vaddr)] < 2) + BUG(); + spin_unlock(&kmap_lock); + return (void*) vaddr; +} + +EXPORT_SYMBOL(kmap_high); + +void fastcall kunmap_high(struct page *page) +{ + unsigned long vaddr; + unsigned long nr; + int need_wakeup; + + spin_lock(&kmap_lock); + vaddr = (unsigned long)page_address(page); + if (!vaddr) + BUG(); + nr = PKMAP_NR(vaddr); + + /* + * A count must never go down to zero + * without a TLB flush! + */ + need_wakeup = 0; + switch (--pkmap_count[nr]) { + case 0: + BUG(); + case 1: + /* + * Avoid an unnecessary wake_up() function call. + * The common case is pkmap_count[] == 1, but + * no waiters. + * The tasks queued in the wait-queue are guarded + * by both the lock in the wait-queue-head and by + * the kmap_lock. As the kmap_lock is held here, + * no need for the wait-queue-head's lock. Simply + * test if the queue is empty. + */ + need_wakeup = waitqueue_active(&pkmap_map_wait); + } + spin_unlock(&kmap_lock); + + /* do wake-up, if needed, race-free outside of the spin lock */ + if (need_wakeup) + wake_up(&pkmap_map_wait); +} + +EXPORT_SYMBOL(kunmap_high); + +#define POOL_SIZE 64 + +static __init int init_emergency_pool(void) +{ + struct sysinfo i; + si_meminfo(&i); + si_swapinfo(&i); + + if (!i.totalhigh) + return 0; + + page_pool = mempool_create(POOL_SIZE, page_pool_alloc, page_pool_free, NULL); + if (!page_pool) + BUG(); + printk("highmem bounce pool size: %d pages\n", POOL_SIZE); + + return 0; +} + +__initcall(init_emergency_pool); + +/* + * highmem version, map in to vec + */ +static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom) +{ + unsigned long flags; + unsigned char *vto; + + local_irq_save(flags); + vto = kmap_atomic(to->bv_page, KM_BOUNCE_READ); + memcpy(vto + to->bv_offset, vfrom, to->bv_len); + kunmap_atomic(vto, KM_BOUNCE_READ); + local_irq_restore(flags); +} + +#else /* CONFIG_HIGHMEM */ + +#define bounce_copy_vec(to, vfrom) \ + memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len) + +#endif + +#define ISA_POOL_SIZE 16 + +/* + * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA + * as the max address, so check if the pool has already been created. + */ +int init_emergency_isa_pool(void) +{ + if (isa_page_pool) + return 0; + + isa_page_pool = mempool_create(ISA_POOL_SIZE, page_pool_alloc, page_pool_free, (void *) __GFP_DMA); + if (!isa_page_pool) + BUG(); + + printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE); + return 0; +} + +/* + * Simple bounce buffer support for highmem pages. Depending on the + * queue gfp mask set, *to may or may not be a highmem page. kmap it + * always, it will do the Right Thing + */ +static void copy_to_high_bio_irq(struct bio *to, struct bio *from) +{ + unsigned char *vfrom; + struct bio_vec *tovec, *fromvec; + int i; + + __bio_for_each_segment(tovec, to, i, 0) { + fromvec = from->bi_io_vec + i; + + /* + * not bounced + */ + if (tovec->bv_page == fromvec->bv_page) + continue; + + /* + * fromvec->bv_offset and fromvec->bv_len might have been + * modified by the block layer, so use the original copy, + * bounce_copy_vec already uses tovec->bv_len + */ + vfrom = page_address(fromvec->bv_page) + tovec->bv_offset; + + flush_dcache_page(tovec->bv_page); + bounce_copy_vec(tovec, vfrom); + } +} + +static void bounce_end_io(struct bio *bio, mempool_t *pool, int err) +{ + struct bio *bio_orig = bio->bi_private; + struct bio_vec *bvec, *org_vec; + int i; + + if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags)) + set_bit(BIO_EOPNOTSUPP, &bio_orig->bi_flags); + + /* + * free up bounce indirect pages used + */ + __bio_for_each_segment(bvec, bio, i, 0) { + org_vec = bio_orig->bi_io_vec + i; + if (bvec->bv_page == org_vec->bv_page) + continue; + + mempool_free(bvec->bv_page, pool); + } + + bio_endio(bio_orig, bio_orig->bi_size, err); + bio_put(bio); +} + +static int bounce_end_io_write(struct bio *bio, unsigned int bytes_done,int err) +{ + if (bio->bi_size) + return 1; + + bounce_end_io(bio, page_pool, err); + return 0; +} + +static int bounce_end_io_write_isa(struct bio *bio, unsigned int bytes_done, int err) +{ + if (bio->bi_size) + return 1; + + bounce_end_io(bio, isa_page_pool, err); + return 0; +} + +static void __bounce_end_io_read(struct bio *bio, mempool_t *pool, int err) +{ + struct bio *bio_orig = bio->bi_private; + + if (test_bit(BIO_UPTODATE, &bio->bi_flags)) + copy_to_high_bio_irq(bio_orig, bio); + + bounce_end_io(bio, pool, err); +} + +static int bounce_end_io_read(struct bio *bio, unsigned int bytes_done, int err) +{ + if (bio->bi_size) + return 1; + + __bounce_end_io_read(bio, page_pool, err); + return 0; +} + +static int bounce_end_io_read_isa(struct bio *bio, unsigned int bytes_done, int err) +{ + if (bio->bi_size) + return 1; + + __bounce_end_io_read(bio, isa_page_pool, err); + return 0; +} + +static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig, + mempool_t *pool) +{ + struct page *page; + struct bio *bio = NULL; + int i, rw = bio_data_dir(*bio_orig); + struct bio_vec *to, *from; + + bio_for_each_segment(from, *bio_orig, i) { + page = from->bv_page; + + /* + * is destination page below bounce pfn? + */ + if (page_to_pfn(page) < q->bounce_pfn) + continue; + + /* + * irk, bounce it + */ + if (!bio) + bio = bio_alloc(GFP_NOIO, (*bio_orig)->bi_vcnt); + + to = bio->bi_io_vec + i; + + to->bv_page = mempool_alloc(pool, q->bounce_gfp); + to->bv_len = from->bv_len; + to->bv_offset = from->bv_offset; + + if (rw == WRITE) { + char *vto, *vfrom; + + flush_dcache_page(from->bv_page); + vto = page_address(to->bv_page) + to->bv_offset; + vfrom = kmap(from->bv_page) + from->bv_offset; + memcpy(vto, vfrom, to->bv_len); + kunmap(from->bv_page); + } + } + + /* + * no pages bounced + */ + if (!bio) + return; + + /* + * at least one page was bounced, fill in possible non-highmem + * pages + */ + bio_for_each_segment(from, *bio_orig, i) { + to = bio_iovec_idx(bio, i); + if (!to->bv_page) { + to->bv_page = from->bv_page; + to->bv_len = from->bv_len; + to->bv_offset = from->bv_offset; + } + } + + bio->bi_bdev = (*bio_orig)->bi_bdev; + bio->bi_flags |= (1 << BIO_BOUNCED); + bio->bi_sector = (*bio_orig)->bi_sector; + bio->bi_rw = (*bio_orig)->bi_rw; + + bio->bi_vcnt = (*bio_orig)->bi_vcnt; + bio->bi_idx = (*bio_orig)->bi_idx; + bio->bi_size = (*bio_orig)->bi_size; + + if (pool == page_pool) { + bio->bi_end_io = bounce_end_io_write; + if (rw == READ) + bio->bi_end_io = bounce_end_io_read; + } else { + bio->bi_end_io = bounce_end_io_write_isa; + if (rw == READ) + bio->bi_end_io = bounce_end_io_read_isa; + } + + bio->bi_private = *bio_orig; + *bio_orig = bio; +} + +void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig) +{ + mempool_t *pool; + + /* + * for non-isa bounce case, just check if the bounce pfn is equal + * to or bigger than the highest pfn in the system -- in that case, + * don't waste time iterating over bio segments + */ + if (!(q->bounce_gfp & GFP_DMA)) { + if (q->bounce_pfn >= blk_max_pfn) + return; + pool = page_pool; + } else { + BUG_ON(!isa_page_pool); + pool = isa_page_pool; + } + + /* + * slow path + */ + __blk_queue_bounce(q, bio_orig, pool); +} + +EXPORT_SYMBOL(blk_queue_bounce); + +#if defined(HASHED_PAGE_VIRTUAL) + +#define PA_HASH_ORDER 7 + +/* + * Describes one page->virtual association + */ +struct page_address_map { + struct page *page; + void *virtual; + struct list_head list; +}; + +/* + * page_address_map freelist, allocated from page_address_maps. + */ +static struct list_head page_address_pool; /* freelist */ +static spinlock_t pool_lock; /* protects page_address_pool */ + +/* + * Hash table bucket + */ +static struct page_address_slot { + struct list_head lh; /* List of page_address_maps */ + spinlock_t lock; /* Protect this bucket's list */ +} ____cacheline_aligned_in_smp page_address_htable[1<<PA_HASH_ORDER]; + +static struct page_address_slot *page_slot(struct page *page) +{ + return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)]; +} + +void *page_address(struct page *page) +{ + unsigned long flags; + void *ret; + struct page_address_slot *pas; + + if (!PageHighMem(page)) + return lowmem_page_address(page); + + pas = page_slot(page); + ret = NULL; + spin_lock_irqsave(&pas->lock, flags); + if (!list_empty(&pas->lh)) { + struct page_address_map *pam; + + list_for_each_entry(pam, &pas->lh, list) { + if (pam->page == page) { + ret = pam->virtual; + goto done; + } + } + } +done: + spin_unlock_irqrestore(&pas->lock, flags); + return ret; +} + +EXPORT_SYMBOL(page_address); + +void set_page_address(struct page *page, void *virtual) +{ + unsigned long flags; + struct page_address_slot *pas; + struct page_address_map *pam; + + BUG_ON(!PageHighMem(page)); + + pas = page_slot(page); + if (virtual) { /* Add */ + BUG_ON(list_empty(&page_address_pool)); + + spin_lock_irqsave(&pool_lock, flags); + pam = list_entry(page_address_pool.next, + struct page_address_map, list); + list_del(&pam->list); + spin_unlock_irqrestore(&pool_lock, flags); + + pam->page = page; + pam->virtual = virtual; + + spin_lock_irqsave(&pas->lock, flags); + list_add_tail(&pam->list, &pas->lh); + spin_unlock_irqrestore(&pas->lock, flags); + } else { /* Remove */ + spin_lock_irqsave(&pas->lock, flags); + list_for_each_entry(pam, &pas->lh, list) { + if (pam->page == page) { + list_del(&pam->list); + spin_unlock_irqrestore(&pas->lock, flags); + spin_lock_irqsave(&pool_lock, flags); + list_add_tail(&pam->list, &page_address_pool); + spin_unlock_irqrestore(&pool_lock, flags); + goto done; + } + } + spin_unlock_irqrestore(&pas->lock, flags); + } +done: + return; +} + +static struct page_address_map page_address_maps[LAST_PKMAP]; + +void __init page_address_init(void) +{ + int i; + + INIT_LIST_HEAD(&page_address_pool); + for (i = 0; i < ARRAY_SIZE(page_address_maps); i++) + list_add(&page_address_maps[i].list, &page_address_pool); + for (i = 0; i < ARRAY_SIZE(page_address_htable); i++) { + INIT_LIST_HEAD(&page_address_htable[i].lh); + spin_lock_init(&page_address_htable[i].lock); + } + spin_lock_init(&pool_lock); +} + +#endif /* defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) */ |