aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/layerscape/patches-5.4/814-qe-0005-QE-remove-PPCisms-for-QE.patch
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/layerscape/patches-5.4/814-qe-0005-QE-remove-PPCisms-for-QE.patch')
-rw-r--r--target/linux/layerscape/patches-5.4/814-qe-0005-QE-remove-PPCisms-for-QE.patch547
1 files changed, 547 insertions, 0 deletions
diff --git a/target/linux/layerscape/patches-5.4/814-qe-0005-QE-remove-PPCisms-for-QE.patch b/target/linux/layerscape/patches-5.4/814-qe-0005-QE-remove-PPCisms-for-QE.patch
new file mode 100644
index 0000000000..496cd3cacf
--- /dev/null
+++ b/target/linux/layerscape/patches-5.4/814-qe-0005-QE-remove-PPCisms-for-QE.patch
@@ -0,0 +1,547 @@
+From 3fb2f44e30cc3a151a0fa8160d8bf70062722ed7 Mon Sep 17 00:00:00 2001
+From: Zhao Qiang <qiang.zhao@nxp.com>
+Date: Thu, 27 Apr 2017 09:47:29 +0800
+Subject: [PATCH] QE: remove PPCisms for QE
+
+QE was supported on PowerPC, and dependent on PPC,
+Now it is supported on other platforms. so remove PPCisms.
+
+Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
+---
+ drivers/soc/fsl/qe/Kconfig | 2 +-
+ drivers/soc/fsl/qe/qe.c | 70 +++++++++++++++++++++++++---------------
+ drivers/soc/fsl/qe/qe_io.c | 42 +++++++++++-------------
+ drivers/soc/fsl/qe/qe_tdm.c | 8 ++---
+ drivers/soc/fsl/qe/ucc.c | 10 +++---
+ drivers/soc/fsl/qe/ucc_fast.c | 74 ++++++++++++++++++++++---------------------
+ drivers/tty/serial/ucc_uart.c | 1 +
+ include/soc/fsl/qe/qe.h | 1 -
+ 8 files changed, 112 insertions(+), 96 deletions(-)
+
+--- a/drivers/soc/fsl/qe/Kconfig
++++ b/drivers/soc/fsl/qe/Kconfig
+@@ -5,7 +5,7 @@
+
+ config QUICC_ENGINE
+ bool "QUICC Engine (QE) framework support"
+- depends on FSL_SOC && PPC32
++ depends on OF && HAS_IOMEM
+ select GENERIC_ALLOCATOR
+ select CRC32
+ help
+--- a/drivers/soc/fsl/qe/qe.c
++++ b/drivers/soc/fsl/qe/qe.c
+@@ -30,8 +30,6 @@
+ #include <asm/pgtable.h>
+ #include <soc/fsl/qe/immap_qe.h>
+ #include <soc/fsl/qe/qe.h>
+-#include <asm/prom.h>
+-#include <asm/rheap.h>
+
+ static void qe_snums_init(void);
+ static int qe_sdma_init(void);
+@@ -104,15 +102,27 @@ void qe_reset(void)
+ panic("sdma init failed!");
+ }
+
++/* issue commands to QE, return 0 on success while -EIO on error
++ *
++ * @cmd: the command code, should be QE_INIT_TX_RX, QE_STOP_TX and so on
++ * @device: which sub-block will run the command, QE_CR_SUBBLOCK_UCCFAST1 - 8
++ * , QE_CR_SUBBLOCK_UCCSLOW1 - 8, QE_CR_SUBBLOCK_MCC1 - 3,
++ * QE_CR_SUBBLOCK_IDMA1 - 4 and such on.
++ * @mcn_protocol: specifies mode for the command for non-MCC, should be
++ * QE_CR_PROTOCOL_HDLC_TRANSPARENT, QE_CR_PROTOCOL_QMC, QE_CR_PROTOCOL_UART
++ * and such on.
++ * @cmd_input: command related data.
++ */
+ int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input)
+ {
+ unsigned long flags;
+ u8 mcn_shift = 0, dev_shift = 0;
+- u32 ret;
++ int ret;
++ int i;
+
+ spin_lock_irqsave(&qe_lock, flags);
+ if (cmd == QE_RESET) {
+- out_be32(&qe_immr->cp.cecr, (u32) (cmd | QE_CR_FLG));
++ iowrite32be((cmd | QE_CR_FLG), &qe_immr->cp.cecr);
+ } else {
+ if (cmd == QE_ASSIGN_PAGE) {
+ /* Here device is the SNUM, not sub-block */
+@@ -129,20 +139,26 @@ int qe_issue_cmd(u32 cmd, u32 device, u8
+ mcn_shift = QE_CR_MCN_NORMAL_SHIFT;
+ }
+
+- out_be32(&qe_immr->cp.cecdr, cmd_input);
+- out_be32(&qe_immr->cp.cecr,
+- (cmd | QE_CR_FLG | ((u32) device << dev_shift) | (u32)
+- mcn_protocol << mcn_shift));
++ iowrite32be(cmd_input, &qe_immr->cp.cecdr);
++ iowrite32be((cmd | QE_CR_FLG | ((u32)device << dev_shift) |
++ (u32)mcn_protocol << mcn_shift), &qe_immr->cp.cecr);
+ }
+
+ /* wait for the QE_CR_FLG to clear */
+- ret = spin_event_timeout((in_be32(&qe_immr->cp.cecr) & QE_CR_FLG) == 0,
+- 100, 0);
++ ret = -EIO;
++ for (i = 0; i < 100; i++) {
++ if ((ioread32be(&qe_immr->cp.cecr) & QE_CR_FLG) == 0) {
++ ret = 0;
++ break;
++ }
++ udelay(1);
++ }
++
+ /* On timeout (e.g. failure), the expression will be false (ret == 0),
+ otherwise it will be true (ret == 1). */
+ spin_unlock_irqrestore(&qe_lock, flags);
+
+- return ret == 1;
++ return ret;
+ }
+ EXPORT_SYMBOL(qe_issue_cmd);
+
+@@ -167,6 +183,8 @@ unsigned int qe_get_brg_clk(void)
+ int size;
+ const u32 *prop;
+ unsigned int mod;
++ u32 val;
++ int ret;
+
+ if (brg_clk)
+ return brg_clk;
+@@ -175,9 +193,9 @@ unsigned int qe_get_brg_clk(void)
+ if (!qe)
+ return brg_clk;
+
+- prop = of_get_property(qe, "brg-frequency", &size);
+- if (prop && size == sizeof(*prop))
+- brg_clk = *prop;
++ ret = of_property_read_u32(qe, "brg-frequency", &val);
++ if (!ret)
++ brg_clk = val;
+
+ of_node_put(qe);
+
+@@ -223,14 +241,16 @@ int qe_setbrg(enum qe_clock brg, unsigne
+ /* Errata QE_General4, which affects some MPC832x and MPC836x SOCs, says
+ that the BRG divisor must be even if you're not using divide-by-16
+ mode. */
++#ifdef CONFIG_PPC
+ if (pvr_version_is(PVR_VER_836x) || pvr_version_is(PVR_VER_832x))
+ if (!div16 && (divisor & 1) && (divisor > 3))
+ divisor++;
++#endif
+
+ tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) |
+ QE_BRGC_ENABLE | div16;
+
+- out_be32(&qe_immr->brg.brgc[brg - QE_BRG1], tempval);
++ iowrite32be(tempval, &qe_immr->brg.brgc[brg - QE_BRG1]);
+
+ return 0;
+ }
+@@ -377,9 +397,9 @@ static int qe_sdma_init(void)
+ return -ENOMEM;
+ }
+
+- out_be32(&sdma->sdebcr, (u32) sdma_buf_offset & QE_SDEBCR_BA_MASK);
+- out_be32(&sdma->sdmr, (QE_SDMR_GLB_1_MSK |
+- (0x1 << QE_SDMR_CEN_SHIFT)));
++ iowrite32be((u32)sdma_buf_offset & QE_SDEBCR_BA_MASK, &sdma->sdebcr);
++ iowrite32be((QE_SDMR_GLB_1_MSK | (0x1 << QE_SDMR_CEN_SHIFT)),
++ &sdma->sdmr);
+
+ return 0;
+ }
+@@ -417,14 +437,14 @@ static void qe_upload_microcode(const vo
+ "uploading microcode '%s'\n", ucode->id);
+
+ /* Use auto-increment */
+- out_be32(&qe_immr->iram.iadd, be32_to_cpu(ucode->iram_offset) |
+- QE_IRAM_IADD_AIE | QE_IRAM_IADD_BADDR);
++ iowrite32be(be32_to_cpu(ucode->iram_offset) | QE_IRAM_IADD_AIE |
++ QE_IRAM_IADD_BADDR, &qe_immr->iram.iadd);
+
+ for (i = 0; i < be32_to_cpu(ucode->count); i++)
+- out_be32(&qe_immr->iram.idata, be32_to_cpu(code[i]));
++ iowrite32be(be32_to_cpu(code[i]), &qe_immr->iram.idata);
+
+ /* Set I-RAM Ready Register */
+- out_be32(&qe_immr->iram.iready, be32_to_cpu(QE_IRAM_READY));
++ iowrite32be(be32_to_cpu(QE_IRAM_READY), &qe_immr->iram.iready);
+ }
+
+ /*
+@@ -509,7 +529,7 @@ int qe_upload_firmware(const struct qe_f
+ * If the microcode calls for it, split the I-RAM.
+ */
+ if (!firmware->split)
+- setbits16(&qe_immr->cp.cercr, QE_CP_CERCR_CIR);
++ qe_setbits16(&qe_immr->cp.cercr, QE_CP_CERCR_CIR);
+
+ if (firmware->soc.model)
+ printk(KERN_INFO
+@@ -543,11 +563,11 @@ int qe_upload_firmware(const struct qe_f
+ u32 trap = be32_to_cpu(ucode->traps[j]);
+
+ if (trap)
+- out_be32(&qe_immr->rsp[i].tibcr[j], trap);
++ iowrite32be(trap, &qe_immr->rsp[i].tibcr[j]);
+ }
+
+ /* Enable traps */
+- out_be32(&qe_immr->rsp[i].eccr, be32_to_cpu(ucode->eccr));
++ iowrite32be(be32_to_cpu(ucode->eccr), &qe_immr->rsp[i].eccr);
+ }
+
+ qe_firmware_uploaded = 1;
+--- a/drivers/soc/fsl/qe/qe_io.c
++++ b/drivers/soc/fsl/qe/qe_io.c
+@@ -18,8 +18,6 @@
+
+ #include <asm/io.h>
+ #include <soc/fsl/qe/qe.h>
+-#include <asm/prom.h>
+-#include <sysdev/fsl_soc.h>
+
+ #undef DEBUG
+
+@@ -57,16 +55,16 @@ void __par_io_config_pin(struct qe_pio_r
+ pin_mask1bit = (u32) (1 << (QE_PIO_PINS - (pin + 1)));
+
+ /* Set open drain, if required */
+- tmp_val = in_be32(&par_io->cpodr);
++ tmp_val = ioread32be(&par_io->cpodr);
+ if (open_drain)
+- out_be32(&par_io->cpodr, pin_mask1bit | tmp_val);
++ iowrite32be(pin_mask1bit | tmp_val, &par_io->cpodr);
+ else
+- out_be32(&par_io->cpodr, ~pin_mask1bit & tmp_val);
++ iowrite32be(~pin_mask1bit & tmp_val, &par_io->cpodr);
+
+ /* define direction */
+ tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
+- in_be32(&par_io->cpdir2) :
+- in_be32(&par_io->cpdir1);
++ ioread32be(&par_io->cpdir2) :
++ ioread32be(&par_io->cpdir1);
+
+ /* get all bits mask for 2 bit per port */
+ pin_mask2bits = (u32) (0x3 << (QE_PIO_PINS -
+@@ -78,34 +76,30 @@ void __par_io_config_pin(struct qe_pio_r
+
+ /* clear and set 2 bits mask */
+ if (pin > (QE_PIO_PINS / 2) - 1) {
+- out_be32(&par_io->cpdir2,
+- ~pin_mask2bits & tmp_val);
++ iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir2);
+ tmp_val &= ~pin_mask2bits;
+- out_be32(&par_io->cpdir2, new_mask2bits | tmp_val);
++ iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir2);
+ } else {
+- out_be32(&par_io->cpdir1,
+- ~pin_mask2bits & tmp_val);
++ iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir1);
+ tmp_val &= ~pin_mask2bits;
+- out_be32(&par_io->cpdir1, new_mask2bits | tmp_val);
++ iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir1);
+ }
+ /* define pin assignment */
+ tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
+- in_be32(&par_io->cppar2) :
+- in_be32(&par_io->cppar1);
++ ioread32be(&par_io->cppar2) :
++ ioread32be(&par_io->cppar1);
+
+ new_mask2bits = (u32) (assignment << (QE_PIO_PINS -
+ (pin % (QE_PIO_PINS / 2) + 1) * 2));
+ /* clear and set 2 bits mask */
+ if (pin > (QE_PIO_PINS / 2) - 1) {
+- out_be32(&par_io->cppar2,
+- ~pin_mask2bits & tmp_val);
++ iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar2);
+ tmp_val &= ~pin_mask2bits;
+- out_be32(&par_io->cppar2, new_mask2bits | tmp_val);
++ iowrite32be(new_mask2bits | tmp_val, &par_io->cppar2);
+ } else {
+- out_be32(&par_io->cppar1,
+- ~pin_mask2bits & tmp_val);
++ iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar1);
+ tmp_val &= ~pin_mask2bits;
+- out_be32(&par_io->cppar1, new_mask2bits | tmp_val);
++ iowrite32be(new_mask2bits | tmp_val, &par_io->cppar1);
+ }
+ }
+ EXPORT_SYMBOL(__par_io_config_pin);
+@@ -133,12 +127,12 @@ int par_io_data_set(u8 port, u8 pin, u8
+ /* calculate pin location */
+ pin_mask = (u32) (1 << (QE_PIO_PINS - 1 - pin));
+
+- tmp_val = in_be32(&par_io[port].cpdata);
++ tmp_val = ioread32be(&par_io[port].cpdata);
+
+ if (val == 0) /* clear */
+- out_be32(&par_io[port].cpdata, ~pin_mask & tmp_val);
++ iowrite32be(~pin_mask & tmp_val, &par_io[port].cpdata);
+ else /* set */
+- out_be32(&par_io[port].cpdata, pin_mask | tmp_val);
++ iowrite32be(pin_mask | tmp_val, &par_io[port].cpdata);
+
+ return 0;
+ }
+--- a/drivers/soc/fsl/qe/qe_tdm.c
++++ b/drivers/soc/fsl/qe/qe_tdm.c
+@@ -169,10 +169,10 @@ void ucc_tdm_init(struct ucc_tdm *utdm,
+ &siram[siram_entry_id * 32 + 0x200 + i]);
+ }
+
+- setbits16(&siram[(siram_entry_id * 32) + (utdm->num_of_ts - 1)],
+- SIR_LAST);
+- setbits16(&siram[(siram_entry_id * 32) + 0x200 + (utdm->num_of_ts - 1)],
+- SIR_LAST);
++ qe_setbits16(&siram[(siram_entry_id * 32) + (utdm->num_of_ts - 1)],
++ SIR_LAST);
++ qe_setbits16(&siram[(siram_entry_id * 32) + 0x200 +
++ (utdm->num_of_ts - 1)], SIR_LAST);
+
+ /* Set SIxMR register */
+ sixmr = SIMR_SAD(siram_entry_id);
+--- a/drivers/soc/fsl/qe/ucc.c
++++ b/drivers/soc/fsl/qe/ucc.c
+@@ -35,7 +35,7 @@ int ucc_set_qe_mux_mii_mng(unsigned int
+ return -EINVAL;
+
+ spin_lock_irqsave(&cmxgcr_lock, flags);
+- clrsetbits_be32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG,
++ qe_clrsetbits32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG,
+ ucc_num << QE_CMXGCR_MII_ENET_MNG_SHIFT);
+ spin_unlock_irqrestore(&cmxgcr_lock, flags);
+
+@@ -80,7 +80,7 @@ int ucc_set_type(unsigned int ucc_num, e
+ return -EINVAL;
+ }
+
+- clrsetbits_8(guemr, UCC_GUEMR_MODE_MASK,
++ qe_clrsetbits8(guemr, UCC_GUEMR_MODE_MASK,
+ UCC_GUEMR_SET_RESERVED3 | speed);
+
+ return 0;
+@@ -109,9 +109,9 @@ int ucc_mux_set_grant_tsa_bkpt(unsigned
+ get_cmxucr_reg(ucc_num, &cmxucr, &reg_num, &shift);
+
+ if (set)
+- setbits32(cmxucr, mask << shift);
++ qe_setbits32(cmxucr, mask << shift);
+ else
+- clrbits32(cmxucr, mask << shift);
++ qe_clrbits32(cmxucr, mask << shift);
+
+ return 0;
+ }
+@@ -207,7 +207,7 @@ int ucc_set_qe_mux_rxtx(unsigned int ucc
+ if (mode == COMM_DIR_RX)
+ shift += 4;
+
+- clrsetbits_be32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
++ qe_clrsetbits32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
+ clock_bits << shift);
+
+ return 0;
+--- a/drivers/soc/fsl/qe/ucc_fast.c
++++ b/drivers/soc/fsl/qe/ucc_fast.c
+@@ -29,41 +29,41 @@ void ucc_fast_dump_regs(struct ucc_fast_
+ printk(KERN_INFO "Base address: 0x%p\n", uccf->uf_regs);
+
+ printk(KERN_INFO "gumr : addr=0x%p, val=0x%08x\n",
+- &uccf->uf_regs->gumr, in_be32(&uccf->uf_regs->gumr));
++ &uccf->uf_regs->gumr, ioread32be(&uccf->uf_regs->gumr));
+ printk(KERN_INFO "upsmr : addr=0x%p, val=0x%08x\n",
+- &uccf->uf_regs->upsmr, in_be32(&uccf->uf_regs->upsmr));
++ &uccf->uf_regs->upsmr, ioread32be(&uccf->uf_regs->upsmr));
+ printk(KERN_INFO "utodr : addr=0x%p, val=0x%04x\n",
+- &uccf->uf_regs->utodr, in_be16(&uccf->uf_regs->utodr));
++ &uccf->uf_regs->utodr, ioread16be(&uccf->uf_regs->utodr));
+ printk(KERN_INFO "udsr : addr=0x%p, val=0x%04x\n",
+- &uccf->uf_regs->udsr, in_be16(&uccf->uf_regs->udsr));
++ &uccf->uf_regs->udsr, ioread16be(&uccf->uf_regs->udsr));
+ printk(KERN_INFO "ucce : addr=0x%p, val=0x%08x\n",
+- &uccf->uf_regs->ucce, in_be32(&uccf->uf_regs->ucce));
++ &uccf->uf_regs->ucce, ioread32be(&uccf->uf_regs->ucce));
+ printk(KERN_INFO "uccm : addr=0x%p, val=0x%08x\n",
+- &uccf->uf_regs->uccm, in_be32(&uccf->uf_regs->uccm));
++ &uccf->uf_regs->uccm, ioread32be(&uccf->uf_regs->uccm));
+ printk(KERN_INFO "uccs : addr=0x%p, val=0x%02x\n",
+- &uccf->uf_regs->uccs, in_8(&uccf->uf_regs->uccs));
++ &uccf->uf_regs->uccs, ioread8(&uccf->uf_regs->uccs));
+ printk(KERN_INFO "urfb : addr=0x%p, val=0x%08x\n",
+- &uccf->uf_regs->urfb, in_be32(&uccf->uf_regs->urfb));
++ &uccf->uf_regs->urfb, ioread32be(&uccf->uf_regs->urfb));
+ printk(KERN_INFO "urfs : addr=0x%p, val=0x%04x\n",
+- &uccf->uf_regs->urfs, in_be16(&uccf->uf_regs->urfs));
++ &uccf->uf_regs->urfs, ioread16be(&uccf->uf_regs->urfs));
+ printk(KERN_INFO "urfet : addr=0x%p, val=0x%04x\n",
+- &uccf->uf_regs->urfet, in_be16(&uccf->uf_regs->urfet));
++ &uccf->uf_regs->urfet, ioread16be(&uccf->uf_regs->urfet));
+ printk(KERN_INFO "urfset: addr=0x%p, val=0x%04x\n",
+- &uccf->uf_regs->urfset, in_be16(&uccf->uf_regs->urfset));
++ &uccf->uf_regs->urfset, ioread16be(&uccf->uf_regs->urfset));
+ printk(KERN_INFO "utfb : addr=0x%p, val=0x%08x\n",
+- &uccf->uf_regs->utfb, in_be32(&uccf->uf_regs->utfb));
++ &uccf->uf_regs->utfb, ioread32be(&uccf->uf_regs->utfb));
+ printk(KERN_INFO "utfs : addr=0x%p, val=0x%04x\n",
+- &uccf->uf_regs->utfs, in_be16(&uccf->uf_regs->utfs));
++ &uccf->uf_regs->utfs, ioread16be(&uccf->uf_regs->utfs));
+ printk(KERN_INFO "utfet : addr=0x%p, val=0x%04x\n",
+- &uccf->uf_regs->utfet, in_be16(&uccf->uf_regs->utfet));
++ &uccf->uf_regs->utfet, ioread16be(&uccf->uf_regs->utfet));
+ printk(KERN_INFO "utftt : addr=0x%p, val=0x%04x\n",
+- &uccf->uf_regs->utftt, in_be16(&uccf->uf_regs->utftt));
++ &uccf->uf_regs->utftt, ioread16be(&uccf->uf_regs->utftt));
+ printk(KERN_INFO "utpt : addr=0x%p, val=0x%04x\n",
+- &uccf->uf_regs->utpt, in_be16(&uccf->uf_regs->utpt));
++ &uccf->uf_regs->utpt, ioread16be(&uccf->uf_regs->utpt));
+ printk(KERN_INFO "urtry : addr=0x%p, val=0x%08x\n",
+- &uccf->uf_regs->urtry, in_be32(&uccf->uf_regs->urtry));
++ &uccf->uf_regs->urtry, ioread32be(&uccf->uf_regs->urtry));
+ printk(KERN_INFO "guemr : addr=0x%p, val=0x%02x\n",
+- &uccf->uf_regs->guemr, in_8(&uccf->uf_regs->guemr));
++ &uccf->uf_regs->guemr, ioread8(&uccf->uf_regs->guemr));
+ }
+ EXPORT_SYMBOL(ucc_fast_dump_regs);
+
+@@ -85,7 +85,7 @@ EXPORT_SYMBOL(ucc_fast_get_qe_cr_subbloc
+
+ void ucc_fast_transmit_on_demand(struct ucc_fast_private * uccf)
+ {
+- out_be16(&uccf->uf_regs->utodr, UCC_FAST_TOD);
++ iowrite16be(UCC_FAST_TOD, &uccf->uf_regs->utodr);
+ }
+ EXPORT_SYMBOL(ucc_fast_transmit_on_demand);
+
+@@ -97,7 +97,7 @@ void ucc_fast_enable(struct ucc_fast_pri
+ uf_regs = uccf->uf_regs;
+
+ /* Enable reception and/or transmission on this UCC. */
+- gumr = in_be32(&uf_regs->gumr);
++ gumr = ioread32be(&uf_regs->gumr);
+ if (mode & COMM_DIR_TX) {
+ gumr |= UCC_FAST_GUMR_ENT;
+ uccf->enabled_tx = 1;
+@@ -106,7 +106,7 @@ void ucc_fast_enable(struct ucc_fast_pri
+ gumr |= UCC_FAST_GUMR_ENR;
+ uccf->enabled_rx = 1;
+ }
+- out_be32(&uf_regs->gumr, gumr);
++ iowrite32be(gumr, &uf_regs->gumr);
+ }
+ EXPORT_SYMBOL(ucc_fast_enable);
+
+@@ -118,7 +118,7 @@ void ucc_fast_disable(struct ucc_fast_pr
+ uf_regs = uccf->uf_regs;
+
+ /* Disable reception and/or transmission on this UCC. */
+- gumr = in_be32(&uf_regs->gumr);
++ gumr = ioread32be(&uf_regs->gumr);
+ if (mode & COMM_DIR_TX) {
+ gumr &= ~UCC_FAST_GUMR_ENT;
+ uccf->enabled_tx = 0;
+@@ -127,7 +127,7 @@ void ucc_fast_disable(struct ucc_fast_pr
+ gumr &= ~UCC_FAST_GUMR_ENR;
+ uccf->enabled_rx = 0;
+ }
+- out_be32(&uf_regs->gumr, gumr);
++ iowrite32be(gumr, &uf_regs->gumr);
+ }
+ EXPORT_SYMBOL(ucc_fast_disable);
+
+@@ -259,12 +259,13 @@ int ucc_fast_init(struct ucc_fast_info *
+ gumr |= uf_info->tenc;
+ gumr |= uf_info->tcrc;
+ gumr |= uf_info->mode;
+- out_be32(&uf_regs->gumr, gumr);
++ iowrite32be(gumr, &uf_regs->gumr);
+
+ /* Allocate memory for Tx Virtual Fifo */
+ uccf->ucc_fast_tx_virtual_fifo_base_offset =
+ qe_muram_alloc(uf_info->utfs, UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
+- if (IS_ERR_VALUE(uccf->ucc_fast_tx_virtual_fifo_base_offset)) {
++ if (IS_ERR_VALUE((unsigned long)uccf->
++ ucc_fast_tx_virtual_fifo_base_offset)) {
+ printk(KERN_ERR "%s: cannot allocate MURAM for TX FIFO\n",
+ __func__);
+ uccf->ucc_fast_tx_virtual_fifo_base_offset = 0;
+@@ -277,7 +278,8 @@ int ucc_fast_init(struct ucc_fast_info *
+ qe_muram_alloc(uf_info->urfs +
+ UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR,
+ UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
+- if (IS_ERR_VALUE(uccf->ucc_fast_rx_virtual_fifo_base_offset)) {
++ if (IS_ERR_VALUE((unsigned long)uccf->
++ ucc_fast_rx_virtual_fifo_base_offset)) {
+ printk(KERN_ERR "%s: cannot allocate MURAM for RX FIFO\n",
+ __func__);
+ uccf->ucc_fast_rx_virtual_fifo_base_offset = 0;
+@@ -286,15 +288,15 @@ int ucc_fast_init(struct ucc_fast_info *
+ }
+
+ /* Set Virtual Fifo registers */
+- out_be16(&uf_regs->urfs, uf_info->urfs);
+- out_be16(&uf_regs->urfet, uf_info->urfet);
+- out_be16(&uf_regs->urfset, uf_info->urfset);
+- out_be16(&uf_regs->utfs, uf_info->utfs);
+- out_be16(&uf_regs->utfet, uf_info->utfet);
+- out_be16(&uf_regs->utftt, uf_info->utftt);
++ iowrite16be(uf_info->urfs, &uf_regs->urfs);
++ iowrite16be(uf_info->urfet, &uf_regs->urfet);
++ iowrite16be(uf_info->urfset, &uf_regs->urfset);
++ iowrite16be(uf_info->utfs, &uf_regs->utfs);
++ iowrite16be(uf_info->utfet, &uf_regs->utfet);
++ iowrite16be(uf_info->utftt, &uf_regs->utftt);
+ /* utfb, urfb are offsets from MURAM base */
+- out_be32(&uf_regs->utfb, uccf->ucc_fast_tx_virtual_fifo_base_offset);
+- out_be32(&uf_regs->urfb, uccf->ucc_fast_rx_virtual_fifo_base_offset);
++ iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset, &uf_regs->utfb);
++ iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset, &uf_regs->urfb);
+
+ /* Mux clocking */
+ /* Grant Support */
+@@ -362,14 +364,14 @@ int ucc_fast_init(struct ucc_fast_info *
+ }
+
+ /* Set interrupt mask register at UCC level. */
+- out_be32(&uf_regs->uccm, uf_info->uccm_mask);
++ iowrite32be(uf_info->uccm_mask, &uf_regs->uccm);
+
+ /* First, clear anything pending at UCC level,
+ * otherwise, old garbage may come through
+ * as soon as the dam is opened. */
+
+ /* Writing '1' clears */
+- out_be32(&uf_regs->ucce, 0xffffffff);
++ iowrite32be(0xffffffff, &uf_regs->ucce);
+
+ *uccf_ret = uccf;
+ return 0;
+--- a/drivers/tty/serial/ucc_uart.c
++++ b/drivers/tty/serial/ucc_uart.c
+@@ -32,6 +32,7 @@
+ #include <soc/fsl/qe/ucc_slow.h>
+
+ #include <linux/firmware.h>
++#include <asm/cpm.h>
+ #include <asm/reg.h>
+
+ /*
+--- a/include/soc/fsl/qe/qe.h
++++ b/include/soc/fsl/qe/qe.h
+@@ -17,7 +17,6 @@
+ #include <linux/spinlock.h>
+ #include <linux/errno.h>
+ #include <linux/err.h>
+-#include <asm/cpm.h>
+ #include <soc/fsl/qe/immap_qe.h>
+ #include <linux/of.h>
+ #include <linux/of_address.h>