aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/generic/patches-4.0/404-mtd-add-more-helper-functions.patch
blob: 3220391604b1c215306a4aa5415f7b7e38cbb191 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -444,14 +444,12 @@ static struct mtd_part *allocate_partiti
 	if (slave->offset == MTDPART_OFS_APPEND)
 		slave->offset = cur_offset;
 	if (slave->offset == MTDPART_OFS_NXTBLK) {
-		slave->offset = cur_offset;
-		if (mtd_mod_by_eb(cur_offset, master) != 0) {
-			/* Round up to next erasesize */
-			slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize;
+		/* Round up to next erasesize */
+		slave->offset = mtd_roundup_to_eb(cur_offset, master);
+		if (slave->offset != cur_offset)
 			printk(KERN_NOTICE "Moving partition %d: "
 			       "0x%012llx -> 0x%012llx\n", partno,
 			       (unsigned long long)cur_offset, (unsigned long long)slave->offset);
-		}
 	}
 	if (slave->offset == MTDPART_OFS_RETAIN) {
 		slave->offset = cur_offset;
@@ -671,6 +669,17 @@ run_parsers_by_type(struct mtd_part *sla
 	return nr_parts;
 }
 
+static inline unsigned long
+mtd_pad_erasesize(struct mtd_info *mtd, int offset, int len)
+{
+	unsigned long mask = mtd->erasesize - 1;
+
+	len += offset & mask;
+	len = (len + mask) & ~mask;
+	len -= offset & mask;
+	return len;
+}
+
 #ifdef CONFIG_MTD_SPLIT_FIRMWARE_NAME
 #define SPLIT_FIRMWARE_NAME	CONFIG_MTD_SPLIT_FIRMWARE_NAME
 #else
@@ -912,6 +921,24 @@ int mtd_is_partition(const struct mtd_in
 }
 EXPORT_SYMBOL_GPL(mtd_is_partition);
 
+struct mtd_info *mtdpart_get_master(const struct mtd_info *mtd)
+{
+	if (!mtd_is_partition(mtd))
+		return (struct mtd_info *)mtd;
+
+	return PART(mtd)->master;
+}
+EXPORT_SYMBOL_GPL(mtdpart_get_master);
+
+uint64_t mtdpart_get_offset(const struct mtd_info *mtd)
+{
+	if (!mtd_is_partition(mtd))
+		return 0;
+
+	return PART(mtd)->offset;
+}
+EXPORT_SYMBOL_GPL(mtdpart_get_offset);
+
 /* Returns the size of the entire flash chip */
 uint64_t mtd_get_device_size(const struct mtd_info *mtd)
 {
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -90,6 +90,8 @@ int mtd_is_partition(const struct mtd_in
 int mtd_add_partition(struct mtd_info *master, const char *name,
 		      long long offset, long long length);
 int mtd_del_partition(struct mtd_info *master, int partno);
+struct mtd_info *mtdpart_get_master(const struct mtd_info *mtd);
+uint64_t mtdpart_get_offset(const struct mtd_info *mtd);
 uint64_t mtd_get_device_size(const struct mtd_info *mtd);
 extern void __weak arch_split_mtd_part(struct mtd_info *master,
 				       const char *name, int offset, int size);
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -334,6 +334,24 @@ static inline uint32_t mtd_mod_by_eb(uin
 	return do_div(sz, mtd->erasesize);
 }
 
+static inline uint64_t mtd_roundup_to_eb(uint64_t sz, struct mtd_info *mtd)
+{
+	if (mtd_mod_by_eb(sz, mtd) == 0)
+		return sz;
+
+	/* Round up to next erase block */
+	return (mtd_div_by_eb(sz, mtd) + 1) * mtd->erasesize;
+}
+
+static inline uint64_t mtd_rounddown_to_eb(uint64_t sz, struct mtd_info *mtd)
+{
+	if (mtd_mod_by_eb(sz, mtd) == 0)
+		return sz;
+
+	/* Round down to the start of the current erase block */
+	return (mtd_div_by_eb(sz, mtd)) * mtd->erasesize;
+}
+
 static inline uint32_t mtd_div_by_ws(uint64_t sz, struct mtd_info *mtd)
 {
 	if (mtd->writesize_shift)
' href='#n433'>433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501
/*
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 *
 * Copyright (C) IBM Corp. 2005
 *
 * Authors: Jimi Xenidis <jimix@watson.ibm.com>
 */

#undef DEBUG
#undef DEBUG_FAIL

#include <xen/config.h>
#include <xen/types.h>
#include <xen/sched.h>
#include <xen/init.h>
#include <public/xen.h>
#include <asm/current.h>
#include <asm/papr.h>
#include <asm/hcalls.h>

static void not_yet(struct cpu_user_regs *regs)
{
    printk("not implemented yet: 0x%lx\n", regs->gprs[3]);
    for (;;);
}

#ifdef USE_PTE_INSERT
static inline void pte_insert(union pte volatile *pte,
        ulong vsid, ulong rpn, ulong lrpn)
{
    /*
     * It's required that external locking be done to provide
     * exclusion between the choices of insertion points.  Any valid
     * choice of pte requires that the pte be invalid upon entry to
     * this function.
     */

    ASSERT( (pte->bits.v == 0) );

    /* Set shadow word. */
    (void)lrpn;

    /* Set the second word first so the valid bit is the last thing set */
    pte->words.rpn = rpn;

    /* Guarantee the second word is visible before the valid bit */
    __asm__ __volatile__("eieio" : : : "memory");

    /* Now set the first word including the valid bit */
    pte->words.vsid = vsid;
    /* Architecturally this instruction will cause a heavier operation
     * if this one is not supported.  note: on come machines like Cell
     * this coul dbe a nop */
    __asm__ __volatile__("ptesync" : : : "memory");
}
#endif

static void pte_tlbie(union pte volatile *pte, ulong ptex)
{
    ulong va;
    ulong vsid;
    ulong group;
    ulong pi;
    ulong pi_high;

    vsid = pte->bits.avpn >> 5;
    group = ptex >> 3;
    if (pte->bits.h) {
        group = ~group;
    }
    pi = (vsid ^ group) & 0x7ff;
    pi_high = (pte->bits.avpn & 0x1f) << 11;
    pi |= pi_high;
    va = (pi << 12) | (vsid << 28);
    va &= ~(0xffffULL << 48);

#ifndef FLUSH_THE_WHOLE_THING
    if (pte->bits.l) {
        va |= (pte->bits.rpn & 1);
        asm volatile("ptesync ;tlbie %0,1" : : "r"(va) : "memory");
    } else {
        asm volatile("ptesync; tlbie %0,0" : : "r"(va) : "memory");
    }
    asm volatile("eieio; tlbsync; ptesync" : : : "memory");
#else
    {
        unsigned i;
        ulong rb;

        for (i = 0; i < 256; i++) {
            rb = i;
            rb <<= 12;
            asm volatile("ptesync; tlbie %0,0; eieio; tlbsync; ptesync; isync"
                    : "=r" (rb): : "memory");
            asm volatile("ptesync; tlbie %0,1; eieio; tlbsync; ptesync; isync"
                    : "=r" (rb): : "memory");
        }
    }
#endif

}

static void h_enter(struct cpu_user_regs *regs)
{
    ulong flags = regs->gprs[4];
    ulong ptex = regs->gprs[5];

    union pte pte;
    union pte volatile *ppte;
    struct domain_htab *htab;
    int lp_bits = 0;
    int pgshift = PAGE_SHIFT;
    ulong idx;
    int limit = 0;                /* how many PTEs to examine in the PTEG */
    ulong lpn;
    ulong rpn;
    struct vcpu *v = get_current();
    struct domain *d = v->domain;
    int mtype;

    htab = &d->arch.htab;
    if (ptex > (1UL << htab->log_num_ptes)) {
        regs->gprs[3] = H_Parameter;
        printk("%s: bad ptex: 0x%lx\n", __func__, ptex);
        return;
    }

    /* use local HPTE to avoid manual shifting & masking */
    pte.words.vsid = regs->gprs[6];
    pte.words.rpn = regs->gprs[7];

    if ( pte.bits.l ) {        /* large page? */
        /* figure out the page size for the selected large page */
        ulong lp_rpn = pte.bits.rpn;
        uint lp_size = 0;

        while ( lp_rpn & 0x1 ) {
            lp_rpn >>= 1;
            lp_bits = ((lp_bits << 1) | 0x1);
            lp_size++;
        }

        if ( lp_size >= d->arch.large_page_sizes ) {
            printk("%s: attempt to use unsupported lp_size %d\n",
                   __func__, lp_size);
            regs->gprs[3] = H_Parameter;
            return;
        }

        /* get correct pgshift value */
        pgshift = d->arch.large_page_shift[lp_size];
    }

    /* get the correct logical RPN in terms of 4K pages need to mask
     * off lp bits and unused arpn bits if this is a large page */

    lpn = ~0ULL << (pgshift - 12);
    lpn = pte.bits.rpn & lpn;

    rpn = pfn2mfn(d, lpn, &mtype);

    if (mtype == PFN_TYPE_IO) {
        /* only a privilaged dom can access outside IO space */
        if ( !test_bit(_DOMF_privileged, &d->domain_flags) ) {
            regs->gprs[3] =  H_Privilege;
            printk("%s: unprivileged access to logical page: 0x%lx\n",
                   __func__, lpn);
            return;
        }

        if ( !((pte.bits.w == 0)
             && (pte.bits.i == 1)
             && (pte.bits.g == 1)) ) {
#ifdef DEBUG_FAIL
            printk("%s: expecting an IO WIMG "
                   "w=%x i=%d m=%d, g=%d\n word 0x%lx\n", __func__,
                   pte.bits.w, pte.bits.i, pte.bits.m, pte.bits.g,
                   pte.words.rpn);
#endif
            regs->gprs[3] =  H_Parameter;
            return;
        }
    }
    /* fixup the RPN field of our local PTE copy */
    pte.bits.rpn = rpn | lp_bits;

    /* clear reserved bits in high word */
    pte.bits.lock = 0x0;
    pte.bits.res = 0x0;

    /* clear reserved bits in low word */
    pte.bits.pp0 = 0x0;
    pte.bits.ts = 0x0;
    pte.bits.res2 = 0x0;

    if ( !(flags & H_EXACT) ) {
        /* PTEG (not specific PTE); clear 3 lowest bits */
        ptex &= ~0x7UL;
        limit = 7;
    }

        /* data manipulations should be done prior to the pte insertion. */
    if ( flags & H_ZERO_PAGE ) {
        memset((void *)(rpn << PAGE_SHIFT), 0, 1UL << pgshift);
    }

    if ( flags & H_ICACHE_INVALIDATE ) {
        ulong k;
        ulong addr = rpn << PAGE_SHIFT;

        for (k = 0; k < (1UL << pgshift); k += L1_CACHE_BYTES) {
            dcbst(addr + k);
            sync();
            icbi(addr + k);
            sync();
            isync();
        }
    }

    if ( flags & H_ICACHE_SYNCHRONIZE ) {
        ulong k;
        ulong addr = rpn << PAGE_SHIFT;
        for (k = 0; k < (1UL << pgshift); k += L1_CACHE_BYTES) {
            icbi(addr + k);
            sync();
            isync();
        }
    }

    for (idx = ptex; idx <= ptex + limit; idx++) {
        ppte = &htab->map[idx];

        if ( ppte->bits.v == 0 && ppte->bits.lock == 0) {
            /* got it */

            asm volatile(
                "std %1, 8(%0); eieio; std %2, 0(%0); ptesync"
                : 
                : "b" (ppte), "r" (pte.words.rpn), "r" (pte.words.vsid)
                : "memory");

            regs->gprs[3] = H_Success;
            regs->gprs[4] = idx;

            return;
        }
    }

#ifdef DEBUG
    /* If the PTEG is full then no additional values are returned. */
    printk("%s: PTEG FULL\n", __func__);
#endif

    regs->gprs[3] = H_PTEG_Full;
}

static void h_protect(struct cpu_user_regs *regs)
{
    ulong flags = regs->gprs[4];
    ulong ptex = regs->gprs[5];
    ulong avpn = regs->gprs[6];
    struct vcpu *v = get_current();
    struct domain *d = v->domain;
    struct domain_htab *htab = &d->arch.htab;
    union pte volatile *ppte;
    union pte lpte;

#ifdef DEBUG
    printk("%s: flags: 0x%lx ptex: 0x%lx avpn: 0x%lx\n", __func__,
           flags, ptex, avpn);
#endif
    if ( ptex > (1UL << htab->log_num_ptes) ) {
        regs->gprs[3] = H_Parameter;
        printk("%s: bad ptex: 0x%lx\n", __func__, ptex);
        return;
    }
    ppte = &htab->map[ptex];

    lpte.words.vsid = ppte->words.vsid;
    lpte.words.rpn = ppte->words.rpn;

    /* the AVPN param occupies the bit-space of the word */
    if ( (flags & H_AVPN) && lpte.bits.avpn != avpn >> 7 ) {
#ifdef DEBUG_FAIL
        printk("%s: %p: AVPN check failed: 0x%lx, 0x%lx\n", __func__,
                ppte, lpte.words.vsid, lpte.words.rpn);
#endif
        regs->gprs[3] = H_Not_Found;
        return;
    }

    if (lpte.bits.v == 0) {
        /* the PAPR does not specify what to do here, this is because
         * we invalidate entires where the PAPR says to 0 the whole hi
         * dword, so the AVPN should catch this first */

#ifdef DEBUG_FAIL
        printk("%s: pte invalid\n", __func__);
#endif
        regs->gprs[3] =  H_Not_Found;
        return;
    }

    lpte.bits.v = 0;
    
    /* ppte->words.vsid = lpte.words.vsid; */
    asm volatile(
        "eieio; std %1, 0(%0); ptesync"
        : 
        : "b" (ppte), "r" (0)
        : "memory");

    pte_tlbie(&lpte, ptex);

    /* We never touch pp0, and PP bits in flags are in the right
     * order */
    lpte.bits.pp1 = flags & (H_PP1 | H_PP2);
    lpte.bits.n = (flags & H_N) ? 1 : 0;

    lpte.bits.v = 1;
    lpte.bits.r = 0;

    asm volatile(
        "std  %1, 8(%0); eieio; std %2, 0(%0); ptesync"
        : 
        : "b" (ppte), "r" (lpte.words.rpn), "r" (lpte.words.vsid)
        : "memory");

    regs->gprs[3] = H_Success;
}

static void h_clear_ref(struct cpu_user_regs *regs)
{
    ulong flags = regs->gprs[4];
    ulong ptex = regs->gprs[5];
    struct vcpu *v = get_current();
    struct domain *d = v->domain;
    struct domain_htab *htab = &d->arch.htab;
    union pte volatile *pte;
    union pte lpte;

#ifdef DEBUG
    printk("%s: flags: 0x%lx ptex: 0x%lx\n", __func__,
           flags, ptex);
#endif

    if (flags != 0) {
        printk("WARNING: %s: "
                "flags are undefined and should be 0: 0x%lx\n",
                __func__, flags);
    }

    if (ptex > (1UL << htab->log_num_ptes)) {
        regs->gprs[3] = H_Parameter;
        printk("%s: bad ptex: 0x%lx\n", __func__, ptex);
        return;
    }
    pte = &htab->map[ptex];
    lpte.words.rpn = pte->words.rpn;

    regs->gprs[4] = lpte.words.rpn;

    if (lpte.bits.r != 0) {
        lpte.bits.r = 0;

        asm volatile("std  %1, 8(%0); eieio; ptesync"
                : 
                : "b" (pte), "r" (lpte.words.rpn) : "memory");

        pte_tlbie(&lpte, ptex);
    }
    regs->gprs[3] = H_Success;
}

static void h_clear_mod(struct cpu_user_regs *regs)
{
    ulong flags = regs->gprs[4];
    ulong ptex = regs->gprs[5];
    struct vcpu *v = get_current();
    struct domain *d = v->domain;
    struct domain_htab *htab = &d->arch.htab;
    union pte volatile *pte;
    union pte lpte;

#ifdef DEBUG
    printk("%s: flags: 0x%lx ptex: 0x%lx\n", __func__,
           flags, ptex);
#endif
    if (flags != 0) {
        printk("WARNING: %s: "
                "flags are undefined and should be 0: 0x%lx\n",
                __func__, flags);
    }
    
    if (ptex > (1UL << htab->log_num_ptes)) {
        regs->gprs[3] = H_Parameter;
        printk("%s: bad ptex: 0x%lx\n", __func__, ptex);
        return;
    }
    pte = &htab->map[ptex];
    lpte.words.vsid = pte->words.vsid;
    lpte.words.rpn = pte->words.rpn;

    regs->gprs[3] = H_Success;
    regs->gprs[4] = lpte.words.rpn;

    if (lpte.bits.c != 0) {
        /* invalidate */
        asm volatile(
                "eieio; std %1, 0(%0); ptesync"
                : 
                : "b" (pte), "r" (0)
                : "memory");

        pte_tlbie(&lpte, ptex);

        lpte.bits.c = 0;
        asm volatile(
                "std  %1, 8(%0); eieio; std %2, 0(%0); ptesync"
                : 
                : "b" (pte), "r" (lpte.words.rpn), "r" (lpte.words.vsid)
                : "memory");
    }
}

static void h_remove(struct cpu_user_regs *regs)
{
    ulong flags = regs->gprs[4];
    ulong ptex = regs->gprs[5];
    ulong avpn = regs->gprs[6];
    struct vcpu *v = get_current();
    struct domain *d = v->domain;
    struct domain_htab *htab = &d->arch.htab;
    union pte volatile *pte;
    union pte lpte;

#ifdef DEBUG
    printk("%s: flags: 0x%lx ptex: 0x%lx avpn: 0x%lx\n", __func__,
           flags, ptex, avpn);
#endif
    if ( ptex > (1UL << htab->log_num_ptes) ) {
        regs->gprs[3] = H_Parameter;
        printk("%s: bad ptex: 0x%lx\n", __func__, ptex);
        return;
    }
    pte = &htab->map[ptex];
    lpte.words.vsid = pte->words.vsid;
    lpte.words.rpn = pte->words.rpn;

    if ((flags & H_AVPN) && lpte.bits.avpn != (avpn >> 7)) {
#ifdef DEBUG_FAIL
        printk("%s: avpn doesn not match\n", __func__);
#endif
        regs->gprs[3] = H_Not_Found;
        return;
    }

    if ((flags & H_ANDCOND) && ((avpn & pte->words.vsid) != 0)) {
#ifdef DEBUG_FAIL
        printk("%s: andcond does not match\n", __func__);
#endif
        regs->gprs[3] = H_Not_Found;
        return;
    }

    regs->gprs[3] = H_Success;
    /* return old PTE in regs 4 and 5 */
    regs->gprs[4] = lpte.words.vsid;
    regs->gprs[5] = lpte.words.rpn;

    /* XXX - I'm very skeptical of doing ANYTHING if not bits.v */
    /* XXX - I think the spec should be questioned in this case (MFM) */
    if (pte->bits.v == 0) {
        printk("%s: removing invalid entry\n", __func__);
    }
    asm volatile("eieio; std %1, 0(%0); ptesync"
            :
            : "b" (pte), "r" (0)
            : "memory");

    pte_tlbie(&lpte, ptex);
}

__init_papr_hcall(H_ENTER, h_enter);
__init_papr_hcall(H_READ, not_yet);
__init_papr_hcall(H_REMOVE, h_remove);
__init_papr_hcall(H_CLEAR_MOD, h_clear_mod);
__init_papr_hcall(H_CLEAR_REF, h_clear_ref);
__init_papr_hcall(H_PROTECT, h_protect);