diff options
author | root <root@artemis.panaceas.org> | 2015-12-25 04:40:36 +0000 |
---|---|---|
committer | root <root@artemis.panaceas.org> | 2015-12-25 04:40:36 +0000 |
commit | 849369d6c66d3054688672f97d31fceb8e8230fb (patch) | |
tree | 6135abc790ca67dedbe07c39806591e70eda81ce /arch/powerpc/sysdev | |
download | linux-3.0.35-kobo-849369d6c66d3054688672f97d31fceb8e8230fb.tar.gz linux-3.0.35-kobo-849369d6c66d3054688672f97d31fceb8e8230fb.tar.bz2 linux-3.0.35-kobo-849369d6c66d3054688672f97d31fceb8e8230fb.zip |
initial_commit
Diffstat (limited to 'arch/powerpc/sysdev')
100 files changed, 27131 insertions, 0 deletions
diff --git a/arch/powerpc/sysdev/6xx-suspend.S b/arch/powerpc/sysdev/6xx-suspend.S new file mode 100644 index 00000000..21cda085 --- /dev/null +++ b/arch/powerpc/sysdev/6xx-suspend.S @@ -0,0 +1,52 @@ +/* + * Enter and leave sleep state on chips with 6xx-style HID0 + * power management bits, which don't leave sleep state via reset. + * + * Author: Scott Wood <scottwood@freescale.com> + * + * Copyright (c) 2006-2007 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#include <asm/ppc_asm.h> +#include <asm/reg.h> +#include <asm/thread_info.h> +#include <asm/asm-offsets.h> + +_GLOBAL(mpc6xx_enter_standby) + mflr r4 + + mfspr r5, SPRN_HID0 + rlwinm r5, r5, 0, ~(HID0_DOZE | HID0_NAP) + oris r5, r5, HID0_SLEEP@h + mtspr SPRN_HID0, r5 + isync + + lis r5, ret_from_standby@h + ori r5, r5, ret_from_standby@l + mtlr r5 + + rlwinm r5, r1, 0, 0, 31-THREAD_SHIFT + lwz r6, TI_LOCAL_FLAGS(r5) + ori r6, r6, _TLF_SLEEPING + stw r6, TI_LOCAL_FLAGS(r5) + + mfmsr r5 + ori r5, r5, MSR_EE + oris r5, r5, MSR_POW@h + sync + mtmsr r5 + isync + +1: b 1b + +ret_from_standby: + mfspr r5, SPRN_HID0 + rlwinm r5, r5, 0, ~HID0_SLEEP + mtspr SPRN_HID0, r5 + + mtlr r4 + blr diff --git a/arch/powerpc/sysdev/Kconfig b/arch/powerpc/sysdev/Kconfig new file mode 100644 index 00000000..7b4df37a --- /dev/null +++ b/arch/powerpc/sysdev/Kconfig @@ -0,0 +1,31 @@ +# For a description of the syntax of this configuration file, +# see Documentation/kbuild/kconfig-language.txt. +# + +config PPC4xx_PCI_EXPRESS + bool + depends on PCI && 4xx + default n + +config PPC4xx_MSI + bool + depends on PCI_MSI + depends on PCI && 4xx + default n + +config PPC_MSI_BITMAP + bool + depends on PCI_MSI + default y if MPIC + default y if FSL_PCI + default y if PPC4xx_MSI + +source "arch/powerpc/sysdev/xics/Kconfig" + +config PPC_SCOM + bool + +config SCOM_DEBUGFS + bool "Expose SCOM controllers via debugfs" + depends on PPC_SCOM + default n diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile new file mode 100644 index 00000000..0efa990e --- /dev/null +++ b/arch/powerpc/sysdev/Makefile @@ -0,0 +1,66 @@ +subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror + +ccflags-$(CONFIG_PPC64) := -mno-minimal-toc + +mpic-msi-obj-$(CONFIG_PCI_MSI) += mpic_msi.o mpic_u3msi.o mpic_pasemi_msi.o +obj-$(CONFIG_MPIC) += mpic.o $(mpic-msi-obj-y) +fsl-msi-obj-$(CONFIG_PCI_MSI) += fsl_msi.o +obj-$(CONFIG_PPC_MSI_BITMAP) += msi_bitmap.o + +obj-$(CONFIG_PPC_MPC106) += grackle.o +obj-$(CONFIG_PPC_DCR_NATIVE) += dcr-low.o +obj-$(CONFIG_PPC_PMI) += pmi.o +obj-$(CONFIG_U3_DART) += dart_iommu.o +obj-$(CONFIG_MMIO_NVRAM) += mmio_nvram.o +obj-$(CONFIG_FSL_SOC) += fsl_soc.o +obj-$(CONFIG_FSL_PCI) += fsl_pci.o $(fsl-msi-obj-y) +obj-$(CONFIG_FSL_PMC) += fsl_pmc.o +obj-$(CONFIG_FSL_LBC) += fsl_lbc.o +obj-$(CONFIG_FSL_GTM) += fsl_gtm.o +obj-$(CONFIG_MPC8xxx_GPIO) += mpc8xxx_gpio.o +obj-$(CONFIG_FSL_85XX_CACHE_SRAM) += fsl_85xx_l2ctlr.o fsl_85xx_cache_sram.o +obj-$(CONFIG_SIMPLE_GPIO) += simple_gpio.o +obj-$(CONFIG_FSL_RIO) += fsl_rio.o +obj-$(CONFIG_TSI108_BRIDGE) += tsi108_pci.o tsi108_dev.o +obj-$(CONFIG_QUICC_ENGINE) += qe_lib/ +obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/ +mv64x60-$(CONFIG_PCI) += mv64x60_pci.o +obj-$(CONFIG_MV64X60) += $(mv64x60-y) mv64x60_pic.o mv64x60_dev.o \ + mv64x60_udbg.o +obj-$(CONFIG_RTC_DRV_CMOS) += rtc_cmos_setup.o +obj-$(CONFIG_AXON_RAM) += axonram.o + +obj-$(CONFIG_PPC_INDIRECT_PCI) += indirect_pci.o +obj-$(CONFIG_PPC_I8259) += i8259.o +obj-$(CONFIG_IPIC) += ipic.o +obj-$(CONFIG_4xx) += uic.o +obj-$(CONFIG_4xx_SOC) += ppc4xx_soc.o +obj-$(CONFIG_XILINX_VIRTEX) += xilinx_intc.o +obj-$(CONFIG_XILINX_PCI) += xilinx_pci.o +obj-$(CONFIG_OF_RTC) += of_rtc.o +ifeq ($(CONFIG_PCI),y) +obj-$(CONFIG_4xx) += ppc4xx_pci.o +endif +obj-$(CONFIG_PPC4xx_MSI) += ppc4xx_msi.o +obj-$(CONFIG_PPC4xx_CPM) += ppc4xx_cpm.o +obj-$(CONFIG_PPC4xx_GPIO) += ppc4xx_gpio.o + +obj-$(CONFIG_CPM) += cpm_common.o +obj-$(CONFIG_CPM2) += cpm2.o cpm2_pic.o +obj-$(CONFIG_QUICC_ENGINE) += cpm_common.o +obj-$(CONFIG_PPC_DCR) += dcr.o +obj-$(CONFIG_8xx) += mpc8xx_pic.o cpm1.o +obj-$(CONFIG_UCODE_PATCH) += micropatch.o + +obj-$(CONFIG_PPC_MPC512x) += mpc5xxx_clocks.o +obj-$(CONFIG_PPC_MPC52xx) += mpc5xxx_clocks.o + +ifeq ($(CONFIG_SUSPEND),y) +obj-$(CONFIG_6xx) += 6xx-suspend.o +endif + +obj-$(CONFIG_PPC_SCOM) += scom.o + +subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror + +obj-$(CONFIG_PPC_XICS) += xics/ diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c new file mode 100644 index 00000000..bd0d5406 --- /dev/null +++ b/arch/powerpc/sysdev/axonram.c @@ -0,0 +1,370 @@ +/* + * (C) Copyright IBM Deutschland Entwicklung GmbH 2006 + * + * Author: Maxim Shchetynin <maxim@de.ibm.com> + * + * Axon DDR2 device driver. + * It registers one block device per Axon's DDR2 memory bank found on a system. + * Block devices are called axonram?, their major and minor numbers are + * available in /proc/devices, /proc/partitions or in /sys/block/axonram?/dev. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/bio.h> +#include <linux/blkdev.h> +#include <linux/buffer_head.h> +#include <linux/device.h> +#include <linux/errno.h> +#include <linux/fs.h> +#include <linux/genhd.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/ioport.h> +#include <linux/irq.h> +#include <linux/irqreturn.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/types.h> +#include <linux/of_device.h> +#include <linux/of_platform.h> + +#include <asm/page.h> +#include <asm/prom.h> + +#define AXON_RAM_MODULE_NAME "axonram" +#define AXON_RAM_DEVICE_NAME "axonram" +#define AXON_RAM_MINORS_PER_DISK 16 +#define AXON_RAM_BLOCK_SHIFT PAGE_SHIFT +#define AXON_RAM_BLOCK_SIZE 1 << AXON_RAM_BLOCK_SHIFT +#define AXON_RAM_SECTOR_SHIFT 9 +#define AXON_RAM_SECTOR_SIZE 1 << AXON_RAM_SECTOR_SHIFT +#define AXON_RAM_IRQ_FLAGS IRQF_SHARED | IRQF_TRIGGER_RISING + +static int azfs_major, azfs_minor; + +struct axon_ram_bank { + struct platform_device *device; + struct gendisk *disk; + unsigned int irq_id; + unsigned long ph_addr; + unsigned long io_addr; + unsigned long size; + unsigned long ecc_counter; +}; + +static ssize_t +axon_ram_sysfs_ecc(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct platform_device *device = to_platform_device(dev); + struct axon_ram_bank *bank = device->dev.platform_data; + + BUG_ON(!bank); + + return sprintf(buf, "%ld\n", bank->ecc_counter); +} + +static DEVICE_ATTR(ecc, S_IRUGO, axon_ram_sysfs_ecc, NULL); + +/** + * axon_ram_irq_handler - interrupt handler for Axon RAM ECC + * @irq: interrupt ID + * @dev: pointer to of_device + */ +static irqreturn_t +axon_ram_irq_handler(int irq, void *dev) +{ + struct platform_device *device = dev; + struct axon_ram_bank *bank = device->dev.platform_data; + + BUG_ON(!bank); + + dev_err(&device->dev, "Correctable memory error occurred\n"); + bank->ecc_counter++; + return IRQ_HANDLED; +} + +/** + * axon_ram_make_request - make_request() method for block device + * @queue, @bio: see blk_queue_make_request() + */ +static int +axon_ram_make_request(struct request_queue *queue, struct bio *bio) +{ + struct axon_ram_bank *bank = bio->bi_bdev->bd_disk->private_data; + unsigned long phys_mem, phys_end; + void *user_mem; + struct bio_vec *vec; + unsigned int transfered; + unsigned short idx; + int rc = 0; + + phys_mem = bank->io_addr + (bio->bi_sector << AXON_RAM_SECTOR_SHIFT); + phys_end = bank->io_addr + bank->size; + transfered = 0; + bio_for_each_segment(vec, bio, idx) { + if (unlikely(phys_mem + vec->bv_len > phys_end)) { + bio_io_error(bio); + rc = -ERANGE; + break; + } + + user_mem = page_address(vec->bv_page) + vec->bv_offset; + if (bio_data_dir(bio) == READ) + memcpy(user_mem, (void *) phys_mem, vec->bv_len); + else + memcpy((void *) phys_mem, user_mem, vec->bv_len); + + phys_mem += vec->bv_len; + transfered += vec->bv_len; + } + bio_endio(bio, 0); + + return rc; +} + +/** + * axon_ram_direct_access - direct_access() method for block device + * @device, @sector, @data: see block_device_operations method + */ +static int +axon_ram_direct_access(struct block_device *device, sector_t sector, + void **kaddr, unsigned long *pfn) +{ + struct axon_ram_bank *bank = device->bd_disk->private_data; + loff_t offset; + + offset = sector; + if (device->bd_part != NULL) + offset += device->bd_part->start_sect; + offset <<= AXON_RAM_SECTOR_SHIFT; + if (offset >= bank->size) { + dev_err(&bank->device->dev, "Access outside of address space\n"); + return -ERANGE; + } + + *kaddr = (void *)(bank->ph_addr + offset); + *pfn = virt_to_phys(kaddr) >> PAGE_SHIFT; + + return 0; +} + +static const struct block_device_operations axon_ram_devops = { + .owner = THIS_MODULE, + .direct_access = axon_ram_direct_access +}; + +/** + * axon_ram_probe - probe() method for platform driver + * @device: see platform_driver method + */ +static int axon_ram_probe(struct platform_device *device) +{ + static int axon_ram_bank_id = -1; + struct axon_ram_bank *bank; + struct resource resource; + int rc = 0; + + axon_ram_bank_id++; + + dev_info(&device->dev, "Found memory controller on %s\n", + device->dev.of_node->full_name); + + bank = kzalloc(sizeof(struct axon_ram_bank), GFP_KERNEL); + if (bank == NULL) { + dev_err(&device->dev, "Out of memory\n"); + rc = -ENOMEM; + goto failed; + } + + device->dev.platform_data = bank; + + bank->device = device; + + if (of_address_to_resource(device->dev.of_node, 0, &resource) != 0) { + dev_err(&device->dev, "Cannot access device tree\n"); + rc = -EFAULT; + goto failed; + } + + bank->size = resource.end - resource.start + 1; + + if (bank->size == 0) { + dev_err(&device->dev, "No DDR2 memory found for %s%d\n", + AXON_RAM_DEVICE_NAME, axon_ram_bank_id); + rc = -ENODEV; + goto failed; + } + + dev_info(&device->dev, "Register DDR2 memory device %s%d with %luMB\n", + AXON_RAM_DEVICE_NAME, axon_ram_bank_id, bank->size >> 20); + + bank->ph_addr = resource.start; + bank->io_addr = (unsigned long) ioremap_prot( + bank->ph_addr, bank->size, _PAGE_NO_CACHE); + if (bank->io_addr == 0) { + dev_err(&device->dev, "ioremap() failed\n"); + rc = -EFAULT; + goto failed; + } + + bank->disk = alloc_disk(AXON_RAM_MINORS_PER_DISK); + if (bank->disk == NULL) { + dev_err(&device->dev, "Cannot register disk\n"); + rc = -EFAULT; + goto failed; + } + + bank->disk->major = azfs_major; + bank->disk->first_minor = azfs_minor; + bank->disk->fops = &axon_ram_devops; + bank->disk->private_data = bank; + bank->disk->driverfs_dev = &device->dev; + + sprintf(bank->disk->disk_name, "%s%d", + AXON_RAM_DEVICE_NAME, axon_ram_bank_id); + + bank->disk->queue = blk_alloc_queue(GFP_KERNEL); + if (bank->disk->queue == NULL) { + dev_err(&device->dev, "Cannot register disk queue\n"); + rc = -EFAULT; + goto failed; + } + + set_capacity(bank->disk, bank->size >> AXON_RAM_SECTOR_SHIFT); + blk_queue_make_request(bank->disk->queue, axon_ram_make_request); + blk_queue_logical_block_size(bank->disk->queue, AXON_RAM_SECTOR_SIZE); + add_disk(bank->disk); + + bank->irq_id = irq_of_parse_and_map(device->dev.of_node, 0); + if (bank->irq_id == NO_IRQ) { + dev_err(&device->dev, "Cannot access ECC interrupt ID\n"); + rc = -EFAULT; + goto failed; + } + + rc = request_irq(bank->irq_id, axon_ram_irq_handler, + AXON_RAM_IRQ_FLAGS, bank->disk->disk_name, device); + if (rc != 0) { + dev_err(&device->dev, "Cannot register ECC interrupt handler\n"); + bank->irq_id = NO_IRQ; + rc = -EFAULT; + goto failed; + } + + rc = device_create_file(&device->dev, &dev_attr_ecc); + if (rc != 0) { + dev_err(&device->dev, "Cannot create sysfs file\n"); + rc = -EFAULT; + goto failed; + } + + azfs_minor += bank->disk->minors; + + return 0; + +failed: + if (bank != NULL) { + if (bank->irq_id != NO_IRQ) + free_irq(bank->irq_id, device); + if (bank->disk != NULL) { + if (bank->disk->major > 0) + unregister_blkdev(bank->disk->major, + bank->disk->disk_name); + del_gendisk(bank->disk); + } + device->dev.platform_data = NULL; + if (bank->io_addr != 0) + iounmap((void __iomem *) bank->io_addr); + kfree(bank); + } + + return rc; +} + +/** + * axon_ram_remove - remove() method for platform driver + * @device: see of_platform_driver method + */ +static int +axon_ram_remove(struct platform_device *device) +{ + struct axon_ram_bank *bank = device->dev.platform_data; + + BUG_ON(!bank || !bank->disk); + + device_remove_file(&device->dev, &dev_attr_ecc); + free_irq(bank->irq_id, device); + del_gendisk(bank->disk); + iounmap((void __iomem *) bank->io_addr); + kfree(bank); + + return 0; +} + +static struct of_device_id axon_ram_device_id[] = { + { + .type = "dma-memory" + }, + {} +}; + +static struct platform_driver axon_ram_driver = { + .probe = axon_ram_probe, + .remove = axon_ram_remove, + .driver = { + .name = AXON_RAM_MODULE_NAME, + .owner = THIS_MODULE, + .of_match_table = axon_ram_device_id, + }, +}; + +/** + * axon_ram_init + */ +static int __init +axon_ram_init(void) +{ + azfs_major = register_blkdev(azfs_major, AXON_RAM_DEVICE_NAME); + if (azfs_major < 0) { + printk(KERN_ERR "%s cannot become block device major number\n", + AXON_RAM_MODULE_NAME); + return -EFAULT; + } + azfs_minor = 0; + + return platform_driver_register(&axon_ram_driver); +} + +/** + * axon_ram_exit + */ +static void __exit +axon_ram_exit(void) +{ + platform_driver_unregister(&axon_ram_driver); + unregister_blkdev(azfs_major, AXON_RAM_DEVICE_NAME); +} + +module_init(axon_ram_init); +module_exit(axon_ram_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Maxim Shchetynin <maxim@de.ibm.com>"); +MODULE_DESCRIPTION("Axon DDR2 RAM device driver for IBM Cell BE"); diff --git a/arch/powerpc/sysdev/bestcomm/Kconfig b/arch/powerpc/sysdev/bestcomm/Kconfig new file mode 100644 index 00000000..29e42708 --- /dev/null +++ b/arch/powerpc/sysdev/bestcomm/Kconfig @@ -0,0 +1,36 @@ +# +# Kconfig options for Bestcomm +# + +config PPC_BESTCOMM + tristate "Bestcomm DMA engine support" + depends on PPC_MPC52xx + default n + select PPC_LIB_RHEAP + help + BestComm is the name of the communication coprocessor found + on the Freescale MPC5200 family of processor. Its usage is + optional for some drivers (like ATA), but required for + others (like FEC). + + If you want to use drivers that require DMA operations, + answer Y or M. Otherwise say N. + +config PPC_BESTCOMM_ATA + tristate + depends on PPC_BESTCOMM + help + This option enables the support for the ATA task. + +config PPC_BESTCOMM_FEC + tristate + depends on PPC_BESTCOMM + help + This option enables the support for the FEC tasks. + +config PPC_BESTCOMM_GEN_BD + tristate + depends on PPC_BESTCOMM + help + This option enables the support for the GenBD tasks. + diff --git a/arch/powerpc/sysdev/bestcomm/Makefile b/arch/powerpc/sysdev/bestcomm/Makefile new file mode 100644 index 00000000..aed2df2a --- /dev/null +++ b/arch/powerpc/sysdev/bestcomm/Makefile @@ -0,0 +1,14 @@ +# +# Makefile for BestComm & co +# + +bestcomm-core-objs := bestcomm.o sram.o +bestcomm-ata-objs := ata.o bcom_ata_task.o +bestcomm-fec-objs := fec.o bcom_fec_rx_task.o bcom_fec_tx_task.o +bestcomm-gen-bd-objs := gen_bd.o bcom_gen_bd_rx_task.o bcom_gen_bd_tx_task.o + +obj-$(CONFIG_PPC_BESTCOMM) += bestcomm-core.o +obj-$(CONFIG_PPC_BESTCOMM_ATA) += bestcomm-ata.o +obj-$(CONFIG_PPC_BESTCOMM_FEC) += bestcomm-fec.o +obj-$(CONFIG_PPC_BESTCOMM_GEN_BD) += bestcomm-gen-bd.o + diff --git a/arch/powerpc/sysdev/bestcomm/ata.c b/arch/powerpc/sysdev/bestcomm/ata.c new file mode 100644 index 00000000..901c9f91 --- /dev/null +++ b/arch/powerpc/sysdev/bestcomm/ata.c @@ -0,0 +1,157 @@ +/* + * Bestcomm ATA task driver + * + * + * Patterned after bestcomm/fec.c by Dale Farnsworth <dfarnsworth@mvista.com> + * 2003-2004 (c) MontaVista, Software, Inc. + * + * Copyright (C) 2006-2007 Sylvain Munaut <tnt@246tNt.com> + * Copyright (C) 2006 Freescale - John Rigby + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/types.h> +#include <asm/io.h> + +#include "bestcomm.h" +#include "bestcomm_priv.h" +#include "ata.h" + + +/* ======================================================================== */ +/* Task image/var/inc */ +/* ======================================================================== */ + +/* ata task image */ +extern u32 bcom_ata_task[]; + +/* ata task vars that need to be set before enabling the task */ +struct bcom_ata_var { + u32 enable; /* (u16*) address of task's control register */ + u32 bd_base; /* (struct bcom_bd*) beginning of ring buffer */ + u32 bd_last; /* (struct bcom_bd*) end of ring buffer */ + u32 bd_start; /* (struct bcom_bd*) current bd */ + u32 buffer_size; /* size of receive buffer */ +}; + +/* ata task incs that need to be set before enabling the task */ +struct bcom_ata_inc { + u16 pad0; + s16 incr_bytes; + u16 pad1; + s16 incr_dst; + u16 pad2; + s16 incr_src; +}; + + +/* ======================================================================== */ +/* Task support code */ +/* ======================================================================== */ + +struct bcom_task * +bcom_ata_init(int queue_len, int maxbufsize) +{ + struct bcom_task *tsk; + struct bcom_ata_var *var; + struct bcom_ata_inc *inc; + + /* Prefetch breaks ATA DMA. Turn it off for ATA DMA */ + bcom_disable_prefetch(); + + tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_ata_bd), 0); + if (!tsk) + return NULL; + + tsk->flags = BCOM_FLAGS_NONE; + + bcom_ata_reset_bd(tsk); + + var = (struct bcom_ata_var *) bcom_task_var(tsk->tasknum); + inc = (struct bcom_ata_inc *) bcom_task_inc(tsk->tasknum); + + if (bcom_load_image(tsk->tasknum, bcom_ata_task)) { + bcom_task_free(tsk); + return NULL; + } + + var->enable = bcom_eng->regs_base + + offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]); + var->bd_base = tsk->bd_pa; + var->bd_last = tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size); + var->bd_start = tsk->bd_pa; + var->buffer_size = maxbufsize; + + /* Configure some stuff */ + bcom_set_task_pragma(tsk->tasknum, BCOM_ATA_PRAGMA); + bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum); + + out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_ATA_RX], BCOM_IPR_ATA_RX); + out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_ATA_TX], BCOM_IPR_ATA_TX); + + out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum); /* Clear ints */ + + return tsk; +} +EXPORT_SYMBOL_GPL(bcom_ata_init); + +void bcom_ata_rx_prepare(struct bcom_task *tsk) +{ + struct bcom_ata_inc *inc; + + inc = (struct bcom_ata_inc *) bcom_task_inc(tsk->tasknum); + + inc->incr_bytes = -(s16)sizeof(u32); + inc->incr_src = 0; + inc->incr_dst = sizeof(u32); + + bcom_set_initiator(tsk->tasknum, BCOM_INITIATOR_ATA_RX); +} +EXPORT_SYMBOL_GPL(bcom_ata_rx_prepare); + +void bcom_ata_tx_prepare(struct bcom_task *tsk) +{ + struct bcom_ata_inc *inc; + + inc = (struct bcom_ata_inc *) bcom_task_inc(tsk->tasknum); + + inc->incr_bytes = -(s16)sizeof(u32); + inc->incr_src = sizeof(u32); + inc->incr_dst = 0; + + bcom_set_initiator(tsk->tasknum, BCOM_INITIATOR_ATA_TX); +} +EXPORT_SYMBOL_GPL(bcom_ata_tx_prepare); + +void bcom_ata_reset_bd(struct bcom_task *tsk) +{ + struct bcom_ata_var *var; + + /* Reset all BD */ + memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); + + tsk->index = 0; + tsk->outdex = 0; + + var = (struct bcom_ata_var *) bcom_task_var(tsk->tasknum); + var->bd_start = var->bd_base; +} +EXPORT_SYMBOL_GPL(bcom_ata_reset_bd); + +void bcom_ata_release(struct bcom_task *tsk) +{ + /* Nothing special for the ATA tasks */ + bcom_task_free(tsk); +} +EXPORT_SYMBOL_GPL(bcom_ata_release); + + +MODULE_DESCRIPTION("BestComm ATA task driver"); +MODULE_AUTHOR("John Rigby"); +MODULE_LICENSE("GPL v2"); + diff --git a/arch/powerpc/sysdev/bestcomm/ata.h b/arch/powerpc/sysdev/bestcomm/ata.h new file mode 100644 index 00000000..0b237181 --- /dev/null +++ b/arch/powerpc/sysdev/bestcomm/ata.h @@ -0,0 +1,30 @@ +/* + * Header for Bestcomm ATA task driver + * + * + * Copyright (C) 2006 Freescale - John Rigby + * Copyright (C) 2006 Sylvain Munaut <tnt@246tNt.com> + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#ifndef __BESTCOMM_ATA_H__ +#define __BESTCOMM_ATA_H__ + + +struct bcom_ata_bd { + u32 status; + u32 src_pa; + u32 dst_pa; +}; + +extern struct bcom_task * bcom_ata_init(int queue_len, int maxbufsize); +extern void bcom_ata_rx_prepare(struct bcom_task *tsk); +extern void bcom_ata_tx_prepare(struct bcom_task *tsk); +extern void bcom_ata_reset_bd(struct bcom_task *tsk); +extern void bcom_ata_release(struct bcom_task *tsk); + +#endif /* __BESTCOMM_ATA_H__ */ + diff --git a/arch/powerpc/sysdev/bestcomm/bcom_ata_task.c b/arch/powerpc/sysdev/bestcomm/bcom_ata_task.c new file mode 100644 index 00000000..cc6049a4 --- /dev/null +++ b/arch/powerpc/sysdev/bestcomm/bcom_ata_task.c @@ -0,0 +1,67 @@ +/* + * Bestcomm ATA task microcode + * + * Copyright (c) 2004 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Created based on bestcom/code_dma/image_rtos1/dma_image.hex + */ + +#include <asm/types.h> + +/* + * The header consists of the following fields: + * u32 magic; + * u8 desc_size; + * u8 var_size; + * u8 inc_size; + * u8 first_var; + * u8 reserved[8]; + * + * The size fields contain the number of 32-bit words. + */ + +u32 bcom_ata_task[] = { + /* header */ + 0x4243544b, + 0x0e060709, + 0x00000000, + 0x00000000, + + /* Task descriptors */ + 0x8198009b, /* LCD: idx0 = var3; idx0 <= var2; idx0 += inc3 */ + 0x13e00c08, /* DRD1A: var3 = var1; FN=0 MORE init=31 WS=0 RS=0 */ + 0xb8000264, /* LCD: idx1 = *idx0, idx2 = var0; idx1 < var9; idx1 += inc4, idx2 += inc4 */ + 0x10000f00, /* DRD1A: var3 = idx0; FN=0 MORE init=0 WS=0 RS=0 */ + 0x60140002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=2 RS=2 */ + 0x0c8cfc8a, /* DRD2B1: *idx2 = EU3(); EU3(*idx2,var10) */ + 0xd8988240, /* LCDEXT: idx1 = idx1; idx1 > var9; idx1 += inc0 */ + 0xf845e011, /* LCDEXT: idx2 = *(idx0 + var00000015); ; idx2 += inc2 */ + 0xb845e00a, /* LCD: idx3 = *(idx0 + var00000019); ; idx3 += inc1 */ + 0x0bfecf90, /* DRD1A: *idx3 = *idx2; FN=0 TFD init=31 WS=3 RS=3 */ + 0x9898802d, /* LCD: idx1 = idx1; idx1 once var0; idx1 += inc5 */ + 0x64000005, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=5 INT EXT init=0 WS=0 RS=0 */ + 0x0c0cf849, /* DRD2B1: *idx0 = EU3(); EU3(idx1,var9) */ + 0x000001f8, /* NOP */ + + /* VAR[9]-VAR[14] */ + 0x40000000, + 0x7fff7fff, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + + /* INC[0]-INC[6] */ + 0x40000000, + 0xe0000000, + 0xe0000000, + 0xa000000c, + 0x20000000, + 0x00000000, + 0x00000000, +}; + diff --git a/arch/powerpc/sysdev/bestcomm/bcom_fec_rx_task.c b/arch/powerpc/sysdev/bestcomm/bcom_fec_rx_task.c new file mode 100644 index 00000000..a1ad6a02 --- /dev/null +++ b/arch/powerpc/sysdev/bestcomm/bcom_fec_rx_task.c @@ -0,0 +1,78 @@ +/* + * Bestcomm FEC RX task microcode + * + * Copyright (c) 2004 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Automatically created based on BestCommAPI-2.2/code_dma/image_rtos1/dma_image.hex + * on Tue Mar 22 11:19:38 2005 GMT + */ + +#include <asm/types.h> + +/* + * The header consists of the following fields: + * u32 magic; + * u8 desc_size; + * u8 var_size; + * u8 inc_size; + * u8 first_var; + * u8 reserved[8]; + * + * The size fields contain the number of 32-bit words. + */ + +u32 bcom_fec_rx_task[] = { + /* header */ + 0x4243544b, + 0x18060709, + 0x00000000, + 0x00000000, + + /* Task descriptors */ + 0x808220e3, /* LCD: idx0 = var1, idx1 = var4; idx1 <= var3; idx0 += inc4, idx1 += inc3 */ + 0x10601010, /* DRD1A: var4 = var2; FN=0 MORE init=3 WS=0 RS=0 */ + 0xb8800264, /* LCD: idx2 = *idx1, idx3 = var0; idx2 < var9; idx2 += inc4, idx3 += inc4 */ + 0x10001308, /* DRD1A: var4 = idx1; FN=0 MORE init=0 WS=0 RS=0 */ + 0x60140002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=2 RS=2 */ + 0x0cccfcca, /* DRD2B1: *idx3 = EU3(); EU3(*idx3,var10) */ + 0x80004000, /* LCDEXT: idx2 = 0x00000000; ; */ + 0xb8c58029, /* LCD: idx3 = *(idx1 + var00000015); idx3 once var0; idx3 += inc5 */ + 0x60000002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=0 RS=0 */ + 0x088cf8cc, /* DRD2B1: idx2 = EU3(); EU3(idx3,var12) */ + 0x991982f2, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var11; idx2 += inc6, idx3 += inc2 */ + 0x006acf80, /* DRD1A: *idx3 = *idx0; FN=0 init=3 WS=1 RS=1 */ + 0x80004000, /* LCDEXT: idx2 = 0x00000000; ; */ + 0x9999802d, /* LCD: idx3 = idx3; idx3 once var0; idx3 += inc5 */ + 0x70000002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT MORE init=0 WS=0 RS=0 */ + 0x034cfc4e, /* DRD2B1: var13 = EU3(); EU3(*idx1,var14) */ + 0x00008868, /* DRD1A: idx2 = var13; FN=0 init=0 WS=0 RS=0 */ + 0x99198341, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var13; idx2 += inc0, idx3 += inc1 */ + 0x007ecf80, /* DRD1A: *idx3 = *idx0; FN=0 init=3 WS=3 RS=3 */ + 0x99198272, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var9; idx2 += inc6, idx3 += inc2 */ + 0x046acf80, /* DRD1A: *idx3 = *idx0; FN=0 INT init=3 WS=1 RS=1 */ + 0x9819002d, /* LCD: idx2 = idx0; idx2 once var0; idx2 += inc5 */ + 0x0060c790, /* DRD1A: *idx1 = *idx2; FN=0 init=3 WS=0 RS=0 */ + 0x000001f8, /* NOP */ + + /* VAR[9]-VAR[14] */ + 0x40000000, + 0x7fff7fff, + 0x00000000, + 0x00000003, + 0x40000008, + 0x43ffffff, + + /* INC[0]-INC[6] */ + 0x40000000, + 0xe0000000, + 0xe0000000, + 0xa0000008, + 0x20000000, + 0x00000000, + 0x4000ffff, +}; + diff --git a/arch/powerpc/sysdev/bestcomm/bcom_fec_tx_task.c b/arch/powerpc/sysdev/bestcomm/bcom_fec_tx_task.c new file mode 100644 index 00000000..b1c495c3 --- /dev/null +++ b/arch/powerpc/sysdev/bestcomm/bcom_fec_tx_task.c @@ -0,0 +1,91 @@ +/* + * Bestcomm FEC TX task microcode + * + * Copyright (c) 2004 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Automatically created based on BestCommAPI-2.2/code_dma/image_rtos1/dma_image.hex + * on Tue Mar 22 11:19:29 2005 GMT + */ + +#include <asm/types.h> + +/* + * The header consists of the following fields: + * u32 magic; + * u8 desc_size; + * u8 var_size; + * u8 inc_size; + * u8 first_var; + * u8 reserved[8]; + * + * The size fields contain the number of 32-bit words. + */ + +u32 bcom_fec_tx_task[] = { + /* header */ + 0x4243544b, + 0x2407070d, + 0x00000000, + 0x00000000, + + /* Task descriptors */ + 0x8018001b, /* LCD: idx0 = var0; idx0 <= var0; idx0 += inc3 */ + 0x60000005, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=5 EXT init=0 WS=0 RS=0 */ + 0x01ccfc0d, /* DRD2B1: var7 = EU3(); EU3(*idx0,var13) */ + 0x8082a123, /* LCD: idx0 = var1, idx1 = var5; idx1 <= var4; idx0 += inc4, idx1 += inc3 */ + 0x10801418, /* DRD1A: var5 = var3; FN=0 MORE init=4 WS=0 RS=0 */ + 0xf88103a4, /* LCDEXT: idx2 = *idx1, idx3 = var2; idx2 < var14; idx2 += inc4, idx3 += inc4 */ + 0x801a6024, /* LCD: idx4 = var0; ; idx4 += inc4 */ + 0x10001708, /* DRD1A: var5 = idx1; FN=0 MORE init=0 WS=0 RS=0 */ + 0x60140002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=2 RS=2 */ + 0x0cccfccf, /* DRD2B1: *idx3 = EU3(); EU3(*idx3,var15) */ + 0x991a002c, /* LCD: idx2 = idx2, idx3 = idx4; idx2 once var0; idx2 += inc5, idx3 += inc4 */ + 0x70000002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT MORE init=0 WS=0 RS=0 */ + 0x024cfc4d, /* DRD2B1: var9 = EU3(); EU3(*idx1,var13) */ + 0x60000003, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=3 EXT init=0 WS=0 RS=0 */ + 0x0cccf247, /* DRD2B1: *idx3 = EU3(); EU3(var9,var7) */ + 0x80004000, /* LCDEXT: idx2 = 0x00000000; ; */ + 0xb8c80029, /* LCD: idx3 = *(idx1 + var0000001a); idx3 once var0; idx3 += inc5 */ + 0x70000002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT MORE init=0 WS=0 RS=0 */ + 0x088cf8d1, /* DRD2B1: idx2 = EU3(); EU3(idx3,var17) */ + 0x00002f10, /* DRD1A: var11 = idx2; FN=0 init=0 WS=0 RS=0 */ + 0x99198432, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var16; idx2 += inc6, idx3 += inc2 */ + 0x008ac398, /* DRD1A: *idx0 = *idx3; FN=0 init=4 WS=1 RS=1 */ + 0x80004000, /* LCDEXT: idx2 = 0x00000000; ; */ + 0x9999802d, /* LCD: idx3 = idx3; idx3 once var0; idx3 += inc5 */ + 0x70000002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT MORE init=0 WS=0 RS=0 */ + 0x048cfc53, /* DRD2B1: var18 = EU3(); EU3(*idx1,var19) */ + 0x60000008, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=8 EXT init=0 WS=0 RS=0 */ + 0x088cf48b, /* DRD2B1: idx2 = EU3(); EU3(var18,var11) */ + 0x99198481, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var18; idx2 += inc0, idx3 += inc1 */ + 0x009ec398, /* DRD1A: *idx0 = *idx3; FN=0 init=4 WS=3 RS=3 */ + 0x991983b2, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var14; idx2 += inc6, idx3 += inc2 */ + 0x088ac398, /* DRD1A: *idx0 = *idx3; FN=0 TFD init=4 WS=1 RS=1 */ + 0x9919002d, /* LCD: idx2 = idx2; idx2 once var0; idx2 += inc5 */ + 0x60000005, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=5 EXT init=0 WS=0 RS=0 */ + 0x0c4cf88e, /* DRD2B1: *idx1 = EU3(); EU3(idx2,var14) */ + 0x000001f8, /* NOP */ + + /* VAR[13]-VAR[19] */ + 0x0c000000, + 0x40000000, + 0x7fff7fff, + 0x00000000, + 0x00000003, + 0x40000004, + 0x43ffffff, + + /* INC[0]-INC[6] */ + 0x40000000, + 0xe0000000, + 0xe0000000, + 0xa0000008, + 0x20000000, + 0x00000000, + 0x4000ffff, +}; + diff --git a/arch/powerpc/sysdev/bestcomm/bcom_gen_bd_rx_task.c b/arch/powerpc/sysdev/bestcomm/bcom_gen_bd_rx_task.c new file mode 100644 index 00000000..efee022b --- /dev/null +++ b/arch/powerpc/sysdev/bestcomm/bcom_gen_bd_rx_task.c @@ -0,0 +1,63 @@ +/* + * Bestcomm GenBD RX task microcode + * + * Copyright (C) 2006 AppSpec Computer Technologies Corp. + * Jeff Gibbons <jeff.gibbons@appspec.com> + * Copyright (c) 2004 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Based on BestCommAPI-2.2/code_dma/image_rtos1/dma_image.hex + * on Tue Mar 4 10:14:12 2006 GMT + * + */ + +#include <asm/types.h> + +/* + * The header consists of the following fields: + * u32 magic; + * u8 desc_size; + * u8 var_size; + * u8 inc_size; + * u8 first_var; + * u8 reserved[8]; + * + * The size fields contain the number of 32-bit words. + */ + +u32 bcom_gen_bd_rx_task[] = { + /* header */ + 0x4243544b, + 0x0d020409, + 0x00000000, + 0x00000000, + + /* Task descriptors */ + 0x808220da, /* LCD: idx0 = var1, idx1 = var4; idx1 <= var3; idx0 += inc3, idx1 += inc2 */ + 0x13e01010, /* DRD1A: var4 = var2; FN=0 MORE init=31 WS=0 RS=0 */ + 0xb880025b, /* LCD: idx2 = *idx1, idx3 = var0; idx2 < var9; idx2 += inc3, idx3 += inc3 */ + 0x10001308, /* DRD1A: var4 = idx1; FN=0 MORE init=0 WS=0 RS=0 */ + 0x60140002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=2 RS=2 */ + 0x0cccfcca, /* DRD2B1: *idx3 = EU3(); EU3(*idx3,var10) */ + 0xd9190240, /* LCDEXT: idx2 = idx2; idx2 > var9; idx2 += inc0 */ + 0xb8c5e009, /* LCD: idx3 = *(idx1 + var00000015); ; idx3 += inc1 */ + 0x07fecf80, /* DRD1A: *idx3 = *idx0; FN=0 INT init=31 WS=3 RS=3 */ + 0x99190024, /* LCD: idx2 = idx2; idx2 once var0; idx2 += inc4 */ + 0x60000005, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=5 EXT init=0 WS=0 RS=0 */ + 0x0c4cf889, /* DRD2B1: *idx1 = EU3(); EU3(idx2,var9) */ + 0x000001f8, /* NOP */ + + /* VAR[9]-VAR[10] */ + 0x40000000, + 0x7fff7fff, + + /* INC[0]-INC[3] */ + 0x40000000, + 0xe0000000, + 0xa0000008, + 0x20000000, +}; + diff --git a/arch/powerpc/sysdev/bestcomm/bcom_gen_bd_tx_task.c b/arch/powerpc/sysdev/bestcomm/bcom_gen_bd_tx_task.c new file mode 100644 index 00000000..c605aa42 --- /dev/null +++ b/arch/powerpc/sysdev/bestcomm/bcom_gen_bd_tx_task.c @@ -0,0 +1,69 @@ +/* + * Bestcomm GenBD TX task microcode + * + * Copyright (C) 2006 AppSpec Computer Technologies Corp. + * Jeff Gibbons <jeff.gibbons@appspec.com> + * Copyright (c) 2004 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Based on BestCommAPI-2.2/code_dma/image_rtos1/dma_image.hex + * on Tue Mar 4 10:14:12 2006 GMT + * + */ + +#include <asm/types.h> + +/* + * The header consists of the following fields: + * u32 magic; + * u8 desc_size; + * u8 var_size; + * u8 inc_size; + * u8 first_var; + * u8 reserved[8]; + * + * The size fields contain the number of 32-bit words. + */ + +u32 bcom_gen_bd_tx_task[] = { + /* header */ + 0x4243544b, + 0x0f040609, + 0x00000000, + 0x00000000, + + /* Task descriptors */ + 0x800220e3, /* LCD: idx0 = var0, idx1 = var4; idx1 <= var3; idx0 += inc4, idx1 += inc3 */ + 0x13e01010, /* DRD1A: var4 = var2; FN=0 MORE init=31 WS=0 RS=0 */ + 0xb8808264, /* LCD: idx2 = *idx1, idx3 = var1; idx2 < var9; idx2 += inc4, idx3 += inc4 */ + 0x10001308, /* DRD1A: var4 = idx1; FN=0 MORE init=0 WS=0 RS=0 */ + 0x60140002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=2 RS=2 */ + 0x0cccfcca, /* DRD2B1: *idx3 = EU3(); EU3(*idx3,var10) */ + 0xd9190300, /* LCDEXT: idx2 = idx2; idx2 > var12; idx2 += inc0 */ + 0xb8c5e009, /* LCD: idx3 = *(idx1 + var00000015); ; idx3 += inc1 */ + 0x03fec398, /* DRD1A: *idx0 = *idx3; FN=0 init=31 WS=3 RS=3 */ + 0x9919826a, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var9; idx2 += inc5, idx3 += inc2 */ + 0x0feac398, /* DRD1A: *idx0 = *idx3; FN=0 TFD INT init=31 WS=1 RS=1 */ + 0x99190036, /* LCD: idx2 = idx2; idx2 once var0; idx2 += inc6 */ + 0x60000005, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=5 EXT init=0 WS=0 RS=0 */ + 0x0c4cf889, /* DRD2B1: *idx1 = EU3(); EU3(idx2,var9) */ + 0x000001f8, /* NOP */ + + /* VAR[9]-VAR[12] */ + 0x40000000, + 0x7fff7fff, + 0x00000000, + 0x40000004, + + /* INC[0]-INC[5] */ + 0x40000000, + 0xe0000000, + 0xe0000000, + 0xa0000008, + 0x20000000, + 0x4000ffff, +}; + diff --git a/arch/powerpc/sysdev/bestcomm/bestcomm.c b/arch/powerpc/sysdev/bestcomm/bestcomm.c new file mode 100644 index 00000000..b3fbb271 --- /dev/null +++ b/arch/powerpc/sysdev/bestcomm/bestcomm.c @@ -0,0 +1,532 @@ +/* + * Driver for MPC52xx processor BestComm peripheral controller + * + * + * Copyright (C) 2006-2007 Sylvain Munaut <tnt@246tNt.com> + * Copyright (C) 2005 Varma Electronics Oy, + * ( by Andrey Volkov <avolkov@varma-el.com> ) + * Copyright (C) 2003-2004 MontaVista, Software, Inc. + * ( by Dale Farnsworth <dfarnsworth@mvista.com> ) + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_platform.h> +#include <asm/io.h> +#include <asm/irq.h> +#include <asm/mpc52xx.h> + +#include "sram.h" +#include "bestcomm_priv.h" +#include "bestcomm.h" + +#define DRIVER_NAME "bestcomm-core" + +/* MPC5200 device tree match tables */ +static struct of_device_id mpc52xx_sram_ids[] __devinitdata = { + { .compatible = "fsl,mpc5200-sram", }, + { .compatible = "mpc5200-sram", }, + {} +}; + + +struct bcom_engine *bcom_eng = NULL; +EXPORT_SYMBOL_GPL(bcom_eng); /* needed for inline functions */ + +/* ======================================================================== */ +/* Public and private API */ +/* ======================================================================== */ + +/* Private API */ + +struct bcom_task * +bcom_task_alloc(int bd_count, int bd_size, int priv_size) +{ + int i, tasknum = -1; + struct bcom_task *tsk; + + /* Don't try to do anything if bestcomm init failed */ + if (!bcom_eng) + return NULL; + + /* Get and reserve a task num */ + spin_lock(&bcom_eng->lock); + + for (i=0; i<BCOM_MAX_TASKS; i++) + if (!bcom_eng->tdt[i].stop) { /* we use stop as a marker */ + bcom_eng->tdt[i].stop = 0xfffffffful; /* dummy addr */ + tasknum = i; + break; + } + + spin_unlock(&bcom_eng->lock); + + if (tasknum < 0) + return NULL; + + /* Allocate our structure */ + tsk = kzalloc(sizeof(struct bcom_task) + priv_size, GFP_KERNEL); + if (!tsk) + goto error; + + tsk->tasknum = tasknum; + if (priv_size) + tsk->priv = (void*)tsk + sizeof(struct bcom_task); + + /* Get IRQ of that task */ + tsk->irq = irq_of_parse_and_map(bcom_eng->ofnode, tsk->tasknum); + if (tsk->irq == NO_IRQ) + goto error; + + /* Init the BDs, if needed */ + if (bd_count) { + tsk->cookie = kmalloc(sizeof(void*) * bd_count, GFP_KERNEL); + if (!tsk->cookie) + goto error; + + tsk->bd = bcom_sram_alloc(bd_count * bd_size, 4, &tsk->bd_pa); + if (!tsk->bd) + goto error; + memset(tsk->bd, 0x00, bd_count * bd_size); + + tsk->num_bd = bd_count; + tsk->bd_size = bd_size; + } + + return tsk; + +error: + if (tsk) { + if (tsk->irq != NO_IRQ) + irq_dispose_mapping(tsk->irq); + bcom_sram_free(tsk->bd); + kfree(tsk->cookie); + kfree(tsk); + } + + bcom_eng->tdt[tasknum].stop = 0; + + return NULL; +} +EXPORT_SYMBOL_GPL(bcom_task_alloc); + +void +bcom_task_free(struct bcom_task *tsk) +{ + /* Stop the task */ + bcom_disable_task(tsk->tasknum); + + /* Clear TDT */ + bcom_eng->tdt[tsk->tasknum].start = 0; + bcom_eng->tdt[tsk->tasknum].stop = 0; + + /* Free everything */ + irq_dispose_mapping(tsk->irq); + bcom_sram_free(tsk->bd); + kfree(tsk->cookie); + kfree(tsk); +} +EXPORT_SYMBOL_GPL(bcom_task_free); + +int +bcom_load_image(int task, u32 *task_image) +{ + struct bcom_task_header *hdr = (struct bcom_task_header *)task_image; + struct bcom_tdt *tdt; + u32 *desc, *var, *inc; + u32 *desc_src, *var_src, *inc_src; + + /* Safety checks */ + if (hdr->magic != BCOM_TASK_MAGIC) { + printk(KERN_ERR DRIVER_NAME + ": Trying to load invalid microcode\n"); + return -EINVAL; + } + + if ((task < 0) || (task >= BCOM_MAX_TASKS)) { + printk(KERN_ERR DRIVER_NAME + ": Trying to load invalid task %d\n", task); + return -EINVAL; + } + + /* Initial load or reload */ + tdt = &bcom_eng->tdt[task]; + + if (tdt->start) { + desc = bcom_task_desc(task); + if (hdr->desc_size != bcom_task_num_descs(task)) { + printk(KERN_ERR DRIVER_NAME + ": Trying to reload wrong task image " + "(%d size %d/%d)!\n", + task, + hdr->desc_size, + bcom_task_num_descs(task)); + return -EINVAL; + } + } else { + phys_addr_t start_pa; + + desc = bcom_sram_alloc(hdr->desc_size * sizeof(u32), 4, &start_pa); + if (!desc) + return -ENOMEM; + + tdt->start = start_pa; + tdt->stop = start_pa + ((hdr->desc_size-1) * sizeof(u32)); + } + + var = bcom_task_var(task); + inc = bcom_task_inc(task); + + /* Clear & copy */ + memset(var, 0x00, BCOM_VAR_SIZE); + memset(inc, 0x00, BCOM_INC_SIZE); + + desc_src = (u32 *)(hdr + 1); + var_src = desc_src + hdr->desc_size; + inc_src = var_src + hdr->var_size; + + memcpy(desc, desc_src, hdr->desc_size * sizeof(u32)); + memcpy(var + hdr->first_var, var_src, hdr->var_size * sizeof(u32)); + memcpy(inc, inc_src, hdr->inc_size * sizeof(u32)); + + return 0; +} +EXPORT_SYMBOL_GPL(bcom_load_image); + +void +bcom_set_initiator(int task, int initiator) +{ + int i; + int num_descs; + u32 *desc; + int next_drd_has_initiator; + + bcom_set_tcr_initiator(task, initiator); + + /* Just setting tcr is apparently not enough due to some problem */ + /* with it. So we just go thru all the microcode and replace in */ + /* the DRD directly */ + + desc = bcom_task_desc(task); + next_drd_has_initiator = 1; + num_descs = bcom_task_num_descs(task); + + for (i=0; i<num_descs; i++, desc++) { + if (!bcom_desc_is_drd(*desc)) + continue; + if (next_drd_has_initiator) + if (bcom_desc_initiator(*desc) != BCOM_INITIATOR_ALWAYS) + bcom_set_desc_initiator(desc, initiator); + next_drd_has_initiator = !bcom_drd_is_extended(*desc); + } +} +EXPORT_SYMBOL_GPL(bcom_set_initiator); + + +/* Public API */ + +void +bcom_enable(struct bcom_task *tsk) +{ + bcom_enable_task(tsk->tasknum); +} +EXPORT_SYMBOL_GPL(bcom_enable); + +void +bcom_disable(struct bcom_task *tsk) +{ + bcom_disable_task(tsk->tasknum); +} +EXPORT_SYMBOL_GPL(bcom_disable); + + +/* ======================================================================== */ +/* Engine init/cleanup */ +/* ======================================================================== */ + +/* Function Descriptor table */ +/* this will need to be updated if Freescale changes their task code FDT */ +static u32 fdt_ops[] = { + 0xa0045670, /* FDT[48] - load_acc() */ + 0x80045670, /* FDT[49] - unload_acc() */ + 0x21800000, /* FDT[50] - and() */ + 0x21e00000, /* FDT[51] - or() */ + 0x21500000, /* FDT[52] - xor() */ + 0x21400000, /* FDT[53] - andn() */ + 0x21500000, /* FDT[54] - not() */ + 0x20400000, /* FDT[55] - add() */ + 0x20500000, /* FDT[56] - sub() */ + 0x20800000, /* FDT[57] - lsh() */ + 0x20a00000, /* FDT[58] - rsh() */ + 0xc0170000, /* FDT[59] - crc8() */ + 0xc0145670, /* FDT[60] - crc16() */ + 0xc0345670, /* FDT[61] - crc32() */ + 0xa0076540, /* FDT[62] - endian32() */ + 0xa0000760, /* FDT[63] - endian16() */ +}; + + +static int __devinit +bcom_engine_init(void) +{ + int task; + phys_addr_t tdt_pa, ctx_pa, var_pa, fdt_pa; + unsigned int tdt_size, ctx_size, var_size, fdt_size; + + /* Allocate & clear SRAM zones for FDT, TDTs, contexts and vars/incs */ + tdt_size = BCOM_MAX_TASKS * sizeof(struct bcom_tdt); + ctx_size = BCOM_MAX_TASKS * BCOM_CTX_SIZE; + var_size = BCOM_MAX_TASKS * (BCOM_VAR_SIZE + BCOM_INC_SIZE); + fdt_size = BCOM_FDT_SIZE; + + bcom_eng->tdt = bcom_sram_alloc(tdt_size, sizeof(u32), &tdt_pa); + bcom_eng->ctx = bcom_sram_alloc(ctx_size, BCOM_CTX_ALIGN, &ctx_pa); + bcom_eng->var = bcom_sram_alloc(var_size, BCOM_VAR_ALIGN, &var_pa); + bcom_eng->fdt = bcom_sram_alloc(fdt_size, BCOM_FDT_ALIGN, &fdt_pa); + + if (!bcom_eng->tdt || !bcom_eng->ctx || !bcom_eng->var || !bcom_eng->fdt) { + printk(KERN_ERR "DMA: SRAM alloc failed in engine init !\n"); + + bcom_sram_free(bcom_eng->tdt); + bcom_sram_free(bcom_eng->ctx); + bcom_sram_free(bcom_eng->var); + bcom_sram_free(bcom_eng->fdt); + + return -ENOMEM; + } + + memset(bcom_eng->tdt, 0x00, tdt_size); + memset(bcom_eng->ctx, 0x00, ctx_size); + memset(bcom_eng->var, 0x00, var_size); + memset(bcom_eng->fdt, 0x00, fdt_size); + + /* Copy the FDT for the EU#3 */ + memcpy(&bcom_eng->fdt[48], fdt_ops, sizeof(fdt_ops)); + + /* Initialize Task base structure */ + for (task=0; task<BCOM_MAX_TASKS; task++) + { + out_be16(&bcom_eng->regs->tcr[task], 0); + out_8(&bcom_eng->regs->ipr[task], 0); + + bcom_eng->tdt[task].context = ctx_pa; + bcom_eng->tdt[task].var = var_pa; + bcom_eng->tdt[task].fdt = fdt_pa; + + var_pa += BCOM_VAR_SIZE + BCOM_INC_SIZE; + ctx_pa += BCOM_CTX_SIZE; + } + + out_be32(&bcom_eng->regs->taskBar, tdt_pa); + + /* Init 'always' initiator */ + out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_ALWAYS], BCOM_IPR_ALWAYS); + + /* Disable COMM Bus Prefetch on the original 5200; it's broken */ + if ((mfspr(SPRN_SVR) & MPC5200_SVR_MASK) == MPC5200_SVR) + bcom_disable_prefetch(); + + /* Init lock */ + spin_lock_init(&bcom_eng->lock); + + return 0; +} + +static void +bcom_engine_cleanup(void) +{ + int task; + + /* Stop all tasks */ + for (task=0; task<BCOM_MAX_TASKS; task++) + { + out_be16(&bcom_eng->regs->tcr[task], 0); + out_8(&bcom_eng->regs->ipr[task], 0); + } + + out_be32(&bcom_eng->regs->taskBar, 0ul); + + /* Release the SRAM zones */ + bcom_sram_free(bcom_eng->tdt); + bcom_sram_free(bcom_eng->ctx); + bcom_sram_free(bcom_eng->var); + bcom_sram_free(bcom_eng->fdt); +} + + +/* ======================================================================== */ +/* OF platform driver */ +/* ======================================================================== */ + +static int __devinit mpc52xx_bcom_probe(struct platform_device *op) +{ + struct device_node *ofn_sram; + struct resource res_bcom; + + int rv; + + /* Inform user we're ok so far */ + printk(KERN_INFO "DMA: MPC52xx BestComm driver\n"); + + /* Get the bestcomm node */ + of_node_get(op->dev.of_node); + + /* Prepare SRAM */ + ofn_sram = of_find_matching_node(NULL, mpc52xx_sram_ids); + if (!ofn_sram) { + printk(KERN_ERR DRIVER_NAME ": " + "No SRAM found in device tree\n"); + rv = -ENODEV; + goto error_ofput; + } + rv = bcom_sram_init(ofn_sram, DRIVER_NAME); + of_node_put(ofn_sram); + + if (rv) { + printk(KERN_ERR DRIVER_NAME ": " + "Error in SRAM init\n"); + goto error_ofput; + } + + /* Get a clean struct */ + bcom_eng = kzalloc(sizeof(struct bcom_engine), GFP_KERNEL); + if (!bcom_eng) { + printk(KERN_ERR DRIVER_NAME ": " + "Can't allocate state structure\n"); + rv = -ENOMEM; + goto error_sramclean; + } + + /* Save the node */ + bcom_eng->ofnode = op->dev.of_node; + + /* Get, reserve & map io */ + if (of_address_to_resource(op->dev.of_node, 0, &res_bcom)) { + printk(KERN_ERR DRIVER_NAME ": " + "Can't get resource\n"); + rv = -EINVAL; + goto error_sramclean; + } + + if (!request_mem_region(res_bcom.start, sizeof(struct mpc52xx_sdma), + DRIVER_NAME)) { + printk(KERN_ERR DRIVER_NAME ": " + "Can't request registers region\n"); + rv = -EBUSY; + goto error_sramclean; + } + + bcom_eng->regs_base = res_bcom.start; + bcom_eng->regs = ioremap(res_bcom.start, sizeof(struct mpc52xx_sdma)); + if (!bcom_eng->regs) { + printk(KERN_ERR DRIVER_NAME ": " + "Can't map registers\n"); + rv = -ENOMEM; + goto error_release; + } + + /* Now, do the real init */ + rv = bcom_engine_init(); + if (rv) + goto error_unmap; + + /* Done ! */ + printk(KERN_INFO "DMA: MPC52xx BestComm engine @%08lx ok !\n", + (long)bcom_eng->regs_base); + + return 0; + + /* Error path */ +error_unmap: + iounmap(bcom_eng->regs); +error_release: + release_mem_region(res_bcom.start, sizeof(struct mpc52xx_sdma)); +error_sramclean: + kfree(bcom_eng); + bcom_sram_cleanup(); +error_ofput: + of_node_put(op->dev.of_node); + + printk(KERN_ERR "DMA: MPC52xx BestComm init failed !\n"); + + return rv; +} + + +static int mpc52xx_bcom_remove(struct platform_device *op) +{ + /* Clean up the engine */ + bcom_engine_cleanup(); + + /* Cleanup SRAM */ + bcom_sram_cleanup(); + + /* Release regs */ + iounmap(bcom_eng->regs); + release_mem_region(bcom_eng->regs_base, sizeof(struct mpc52xx_sdma)); + + /* Release the node */ + of_node_put(bcom_eng->ofnode); + + /* Release memory */ + kfree(bcom_eng); + bcom_eng = NULL; + + return 0; +} + +static struct of_device_id mpc52xx_bcom_of_match[] = { + { .compatible = "fsl,mpc5200-bestcomm", }, + { .compatible = "mpc5200-bestcomm", }, + {}, +}; + +MODULE_DEVICE_TABLE(of, mpc52xx_bcom_of_match); + + +static struct platform_driver mpc52xx_bcom_of_platform_driver = { + .probe = mpc52xx_bcom_probe, + .remove = mpc52xx_bcom_remove, + .driver = { + .name = DRIVER_NAME, + .owner = THIS_MODULE, + .of_match_table = mpc52xx_bcom_of_match, + }, +}; + + +/* ======================================================================== */ +/* Module */ +/* ======================================================================== */ + +static int __init +mpc52xx_bcom_init(void) +{ + return platform_driver_register(&mpc52xx_bcom_of_platform_driver); +} + +static void __exit +mpc52xx_bcom_exit(void) +{ + platform_driver_unregister(&mpc52xx_bcom_of_platform_driver); +} + +/* If we're not a module, we must make sure everything is setup before */ +/* anyone tries to use us ... that's why we use subsys_initcall instead */ +/* of module_init. */ +subsys_initcall(mpc52xx_bcom_init); +module_exit(mpc52xx_bcom_exit); + +MODULE_DESCRIPTION("Freescale MPC52xx BestComm DMA"); +MODULE_AUTHOR("Sylvain Munaut <tnt@246tNt.com>"); +MODULE_AUTHOR("Andrey Volkov <avolkov@varma-el.com>"); +MODULE_AUTHOR("Dale Farnsworth <dfarnsworth@mvista.com>"); +MODULE_LICENSE("GPL v2"); + diff --git a/arch/powerpc/sysdev/bestcomm/bestcomm.h b/arch/powerpc/sysdev/bestcomm/bestcomm.h new file mode 100644 index 00000000..a0e2e6b1 --- /dev/null +++ b/arch/powerpc/sysdev/bestcomm/bestcomm.h @@ -0,0 +1,213 @@ +/* + * Public header for the MPC52xx processor BestComm driver + * + * + * Copyright (C) 2006 Sylvain Munaut <tnt@246tNt.com> + * Copyright (C) 2005 Varma Electronics Oy, + * ( by Andrey Volkov <avolkov@varma-el.com> ) + * Copyright (C) 2003-2004 MontaVista, Software, Inc. + * ( by Dale Farnsworth <dfarnsworth@mvista.com> ) + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#ifndef __BESTCOMM_H__ +#define __BESTCOMM_H__ + +/** + * struct bcom_bd - Structure describing a generic BestComm buffer descriptor + * @status: The current status of this buffer. Exact meaning depends on the + * task type + * @data: An array of u32 extra data. Size of array is task dependent. + * + * Note: Don't dereference a bcom_bd pointer as an array. The size of the + * bcom_bd is variable. Use bcom_get_bd() instead. + */ +struct bcom_bd { + u32 status; + u32 data[0]; /* variable payload size */ +}; + +/* ======================================================================== */ +/* Generic task management */ +/* ======================================================================== */ + +/** + * struct bcom_task - Structure describing a loaded BestComm task + * + * This structure is never built by the driver it self. It's built and + * filled the intermediate layer of the BestComm API, the task dependent + * support code. + * + * Most likely you don't need to poke around inside this structure. The + * fields are exposed in the header just for the sake of inline functions + */ +struct bcom_task { + unsigned int tasknum; + unsigned int flags; + int irq; + + struct bcom_bd *bd; + phys_addr_t bd_pa; + void **cookie; + unsigned short index; + unsigned short outdex; + unsigned int num_bd; + unsigned int bd_size; + + void* priv; +}; + +#define BCOM_FLAGS_NONE 0x00000000ul +#define BCOM_FLAGS_ENABLE_TASK (1ul << 0) + +/** + * bcom_enable - Enable a BestComm task + * @tsk: The BestComm task structure + * + * This function makes sure the given task is enabled and can be run + * by the BestComm engine as needed + */ +extern void bcom_enable(struct bcom_task *tsk); + +/** + * bcom_disable - Disable a BestComm task + * @tsk: The BestComm task structure + * + * This function disable a given task, making sure it's not executed + * by the BestComm engine. + */ +extern void bcom_disable(struct bcom_task *tsk); + + +/** + * bcom_get_task_irq - Returns the irq number of a BestComm task + * @tsk: The BestComm task structure + */ +static inline int +bcom_get_task_irq(struct bcom_task *tsk) { + return tsk->irq; +} + +/* ======================================================================== */ +/* BD based tasks helpers */ +/* ======================================================================== */ + +#define BCOM_BD_READY 0x40000000ul + +/** _bcom_next_index - Get next input index. + * @tsk: pointer to task structure + * + * Support function; Device drivers should not call this + */ +static inline int +_bcom_next_index(struct bcom_task *tsk) +{ + return ((tsk->index + 1) == tsk->num_bd) ? 0 : tsk->index + 1; +} + +/** _bcom_next_outdex - Get next output index. + * @tsk: pointer to task structure + * + * Support function; Device drivers should not call this + */ +static inline int +_bcom_next_outdex(struct bcom_task *tsk) +{ + return ((tsk->outdex + 1) == tsk->num_bd) ? 0 : tsk->outdex + 1; +} + +/** + * bcom_queue_empty - Checks if a BestComm task BD queue is empty + * @tsk: The BestComm task structure + */ +static inline int +bcom_queue_empty(struct bcom_task *tsk) +{ + return tsk->index == tsk->outdex; +} + +/** + * bcom_queue_full - Checks if a BestComm task BD queue is full + * @tsk: The BestComm task structure + */ +static inline int +bcom_queue_full(struct bcom_task *tsk) +{ + return tsk->outdex == _bcom_next_index(tsk); +} + +/** + * bcom_get_bd - Get a BD from the queue + * @tsk: The BestComm task structure + * index: Index of the BD to fetch + */ +static inline struct bcom_bd +*bcom_get_bd(struct bcom_task *tsk, unsigned int index) +{ + /* A cast to (void*) so the address can be incremented by the + * real size instead of by sizeof(struct bcom_bd) */ + return ((void *)tsk->bd) + (index * tsk->bd_size); +} + +/** + * bcom_buffer_done - Checks if a BestComm + * @tsk: The BestComm task structure + */ +static inline int +bcom_buffer_done(struct bcom_task *tsk) +{ + struct bcom_bd *bd; + if (bcom_queue_empty(tsk)) + return 0; + + bd = bcom_get_bd(tsk, tsk->outdex); + return !(bd->status & BCOM_BD_READY); +} + +/** + * bcom_prepare_next_buffer - clear status of next available buffer. + * @tsk: The BestComm task structure + * + * Returns pointer to next buffer descriptor + */ +static inline struct bcom_bd * +bcom_prepare_next_buffer(struct bcom_task *tsk) +{ + struct bcom_bd *bd; + + bd = bcom_get_bd(tsk, tsk->index); + bd->status = 0; /* cleanup last status */ + return bd; +} + +static inline void +bcom_submit_next_buffer(struct bcom_task *tsk, void *cookie) +{ + struct bcom_bd *bd = bcom_get_bd(tsk, tsk->index); + + tsk->cookie[tsk->index] = cookie; + mb(); /* ensure the bd is really up-to-date */ + bd->status |= BCOM_BD_READY; + tsk->index = _bcom_next_index(tsk); + if (tsk->flags & BCOM_FLAGS_ENABLE_TASK) + bcom_enable(tsk); +} + +static inline void * +bcom_retrieve_buffer(struct bcom_task *tsk, u32 *p_status, struct bcom_bd **p_bd) +{ + void *cookie = tsk->cookie[tsk->outdex]; + struct bcom_bd *bd = bcom_get_bd(tsk, tsk->outdex); + + if (p_status) + *p_status = bd->status; + if (p_bd) + *p_bd = bd; + tsk->outdex = _bcom_next_outdex(tsk); + return cookie; +} + +#endif /* __BESTCOMM_H__ */ diff --git a/arch/powerpc/sysdev/bestcomm/bestcomm_priv.h b/arch/powerpc/sysdev/bestcomm/bestcomm_priv.h new file mode 100644 index 00000000..3b52f3ff --- /dev/null +++ b/arch/powerpc/sysdev/bestcomm/bestcomm_priv.h @@ -0,0 +1,350 @@ +/* + * Private header for the MPC52xx processor BestComm driver + * + * By private, we mean that driver should not use it directly. It's meant + * to be used by the BestComm engine driver itself and by the intermediate + * layer between the core and the drivers. + * + * Copyright (C) 2006 Sylvain Munaut <tnt@246tNt.com> + * Copyright (C) 2005 Varma Electronics Oy, + * ( by Andrey Volkov <avolkov@varma-el.com> ) + * Copyright (C) 2003-2004 MontaVista, Software, Inc. + * ( by Dale Farnsworth <dfarnsworth@mvista.com> ) + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#ifndef __BESTCOMM_PRIV_H__ +#define __BESTCOMM_PRIV_H__ + +#include <linux/spinlock.h> +#include <linux/of.h> +#include <asm/io.h> +#include <asm/mpc52xx.h> + +#include "sram.h" + + +/* ======================================================================== */ +/* Engine related stuff */ +/* ======================================================================== */ + +/* Zones sizes and needed alignments */ +#define BCOM_MAX_TASKS 16 +#define BCOM_MAX_VAR 24 +#define BCOM_MAX_INC 8 +#define BCOM_MAX_FDT 64 +#define BCOM_MAX_CTX 20 +#define BCOM_CTX_SIZE (BCOM_MAX_CTX * sizeof(u32)) +#define BCOM_CTX_ALIGN 0x100 +#define BCOM_VAR_SIZE (BCOM_MAX_VAR * sizeof(u32)) +#define BCOM_INC_SIZE (BCOM_MAX_INC * sizeof(u32)) +#define BCOM_VAR_ALIGN 0x80 +#define BCOM_FDT_SIZE (BCOM_MAX_FDT * sizeof(u32)) +#define BCOM_FDT_ALIGN 0x100 + +/** + * struct bcom_tdt - Task Descriptor Table Entry + * + */ +struct bcom_tdt { + u32 start; + u32 stop; + u32 var; + u32 fdt; + u32 exec_status; /* used internally by BestComm engine */ + u32 mvtp; /* used internally by BestComm engine */ + u32 context; + u32 litbase; +}; + +/** + * struct bcom_engine + * + * This holds all info needed globaly to handle the engine + */ +struct bcom_engine { + struct device_node *ofnode; + struct mpc52xx_sdma __iomem *regs; + phys_addr_t regs_base; + + struct bcom_tdt *tdt; + u32 *ctx; + u32 *var; + u32 *fdt; + + spinlock_t lock; +}; + +extern struct bcom_engine *bcom_eng; + + +/* ======================================================================== */ +/* Tasks related stuff */ +/* ======================================================================== */ + +/* Tasks image header */ +#define BCOM_TASK_MAGIC 0x4243544B /* 'BCTK' */ + +struct bcom_task_header { + u32 magic; + u8 desc_size; /* the size fields */ + u8 var_size; /* are given in number */ + u8 inc_size; /* of 32-bits words */ + u8 first_var; + u8 reserved[8]; +}; + +/* Descriptors structure & co */ +#define BCOM_DESC_NOP 0x000001f8 +#define BCOM_LCD_MASK 0x80000000 +#define BCOM_DRD_EXTENDED 0x40000000 +#define BCOM_DRD_INITIATOR_SHIFT 21 + +/* Tasks pragma */ +#define BCOM_PRAGMA_BIT_RSV 7 /* reserved pragma bit */ +#define BCOM_PRAGMA_BIT_PRECISE_INC 6 /* increment 0=when possible, */ + /* 1=iter end */ +#define BCOM_PRAGMA_BIT_RST_ERROR_NO 5 /* don't reset errors on */ + /* task enable */ +#define BCOM_PRAGMA_BIT_PACK 4 /* pack data enable */ +#define BCOM_PRAGMA_BIT_INTEGER 3 /* data alignment */ + /* 0=frac(msb), 1=int(lsb) */ +#define BCOM_PRAGMA_BIT_SPECREAD 2 /* XLB speculative read */ +#define BCOM_PRAGMA_BIT_CW 1 /* write line buffer enable */ +#define BCOM_PRAGMA_BIT_RL 0 /* read line buffer enable */ + + /* Looks like XLB speculative read generates XLB errors when a buffer + * is at the end of the physical memory. i.e. when accessing the + * lasts words, the engine tries to prefetch the next but there is no + * next ... + */ +#define BCOM_STD_PRAGMA ((0 << BCOM_PRAGMA_BIT_RSV) | \ + (0 << BCOM_PRAGMA_BIT_PRECISE_INC) | \ + (0 << BCOM_PRAGMA_BIT_RST_ERROR_NO) | \ + (0 << BCOM_PRAGMA_BIT_PACK) | \ + (0 << BCOM_PRAGMA_BIT_INTEGER) | \ + (0 << BCOM_PRAGMA_BIT_SPECREAD) | \ + (1 << BCOM_PRAGMA_BIT_CW) | \ + (1 << BCOM_PRAGMA_BIT_RL)) + +#define BCOM_PCI_PRAGMA ((0 << BCOM_PRAGMA_BIT_RSV) | \ + (0 << BCOM_PRAGMA_BIT_PRECISE_INC) | \ + (0 << BCOM_PRAGMA_BIT_RST_ERROR_NO) | \ + (0 << BCOM_PRAGMA_BIT_PACK) | \ + (1 << BCOM_PRAGMA_BIT_INTEGER) | \ + (0 << BCOM_PRAGMA_BIT_SPECREAD) | \ + (1 << BCOM_PRAGMA_BIT_CW) | \ + (1 << BCOM_PRAGMA_BIT_RL)) + +#define BCOM_ATA_PRAGMA BCOM_STD_PRAGMA +#define BCOM_CRC16_DP_0_PRAGMA BCOM_STD_PRAGMA +#define BCOM_CRC16_DP_1_PRAGMA BCOM_STD_PRAGMA +#define BCOM_FEC_RX_BD_PRAGMA BCOM_STD_PRAGMA +#define BCOM_FEC_TX_BD_PRAGMA BCOM_STD_PRAGMA +#define BCOM_GEN_DP_0_PRAGMA BCOM_STD_PRAGMA +#define BCOM_GEN_DP_1_PRAGMA BCOM_STD_PRAGMA +#define BCOM_GEN_DP_2_PRAGMA BCOM_STD_PRAGMA +#define BCOM_GEN_DP_3_PRAGMA BCOM_STD_PRAGMA +#define BCOM_GEN_DP_BD_0_PRAGMA BCOM_STD_PRAGMA +#define BCOM_GEN_DP_BD_1_PRAGMA BCOM_STD_PRAGMA +#define BCOM_GEN_RX_BD_PRAGMA BCOM_STD_PRAGMA +#define BCOM_GEN_TX_BD_PRAGMA BCOM_STD_PRAGMA +#define BCOM_GEN_LPC_PRAGMA BCOM_STD_PRAGMA +#define BCOM_PCI_RX_PRAGMA BCOM_PCI_PRAGMA +#define BCOM_PCI_TX_PRAGMA BCOM_PCI_PRAGMA + +/* Initiators number */ +#define BCOM_INITIATOR_ALWAYS 0 +#define BCOM_INITIATOR_SCTMR_0 1 +#define BCOM_INITIATOR_SCTMR_1 2 +#define BCOM_INITIATOR_FEC_RX 3 +#define BCOM_INITIATOR_FEC_TX 4 +#define BCOM_INITIATOR_ATA_RX 5 +#define BCOM_INITIATOR_ATA_TX 6 +#define BCOM_INITIATOR_SCPCI_RX 7 +#define BCOM_INITIATOR_SCPCI_TX 8 +#define BCOM_INITIATOR_PSC3_RX 9 +#define BCOM_INITIATOR_PSC3_TX 10 +#define BCOM_INITIATOR_PSC2_RX 11 +#define BCOM_INITIATOR_PSC2_TX 12 +#define BCOM_INITIATOR_PSC1_RX 13 +#define BCOM_INITIATOR_PSC1_TX 14 +#define BCOM_INITIATOR_SCTMR_2 15 +#define BCOM_INITIATOR_SCLPC 16 +#define BCOM_INITIATOR_PSC5_RX 17 +#define BCOM_INITIATOR_PSC5_TX 18 +#define BCOM_INITIATOR_PSC4_RX 19 +#define BCOM_INITIATOR_PSC4_TX 20 +#define BCOM_INITIATOR_I2C2_RX 21 +#define BCOM_INITIATOR_I2C2_TX 22 +#define BCOM_INITIATOR_I2C1_RX 23 +#define BCOM_INITIATOR_I2C1_TX 24 +#define BCOM_INITIATOR_PSC6_RX 25 +#define BCOM_INITIATOR_PSC6_TX 26 +#define BCOM_INITIATOR_IRDA_RX 25 +#define BCOM_INITIATOR_IRDA_TX 26 +#define BCOM_INITIATOR_SCTMR_3 27 +#define BCOM_INITIATOR_SCTMR_4 28 +#define BCOM_INITIATOR_SCTMR_5 29 +#define BCOM_INITIATOR_SCTMR_6 30 +#define BCOM_INITIATOR_SCTMR_7 31 + +/* Initiators priorities */ +#define BCOM_IPR_ALWAYS 7 +#define BCOM_IPR_SCTMR_0 2 +#define BCOM_IPR_SCTMR_1 2 +#define BCOM_IPR_FEC_RX 6 +#define BCOM_IPR_FEC_TX 5 +#define BCOM_IPR_ATA_RX 7 +#define BCOM_IPR_ATA_TX 7 +#define BCOM_IPR_SCPCI_RX 2 +#define BCOM_IPR_SCPCI_TX 2 +#define BCOM_IPR_PSC3_RX 2 +#define BCOM_IPR_PSC3_TX 2 +#define BCOM_IPR_PSC2_RX 2 +#define BCOM_IPR_PSC2_TX 2 +#define BCOM_IPR_PSC1_RX 2 +#define BCOM_IPR_PSC1_TX 2 +#define BCOM_IPR_SCTMR_2 2 +#define BCOM_IPR_SCLPC 2 +#define BCOM_IPR_PSC5_RX 2 +#define BCOM_IPR_PSC5_TX 2 +#define BCOM_IPR_PSC4_RX 2 +#define BCOM_IPR_PSC4_TX 2 +#define BCOM_IPR_I2C2_RX 2 +#define BCOM_IPR_I2C2_TX 2 +#define BCOM_IPR_I2C1_RX 2 +#define BCOM_IPR_I2C1_TX 2 +#define BCOM_IPR_PSC6_RX 2 +#define BCOM_IPR_PSC6_TX 2 +#define BCOM_IPR_IRDA_RX 2 +#define BCOM_IPR_IRDA_TX 2 +#define BCOM_IPR_SCTMR_3 2 +#define BCOM_IPR_SCTMR_4 2 +#define BCOM_IPR_SCTMR_5 2 +#define BCOM_IPR_SCTMR_6 2 +#define BCOM_IPR_SCTMR_7 2 + + +/* ======================================================================== */ +/* API */ +/* ======================================================================== */ + +extern struct bcom_task *bcom_task_alloc(int bd_count, int bd_size, int priv_size); +extern void bcom_task_free(struct bcom_task *tsk); +extern int bcom_load_image(int task, u32 *task_image); +extern void bcom_set_initiator(int task, int initiator); + + +#define TASK_ENABLE 0x8000 + +/** + * bcom_disable_prefetch - Hook to disable bus prefetching + * + * ATA DMA and the original MPC5200 need this due to silicon bugs. At the + * moment disabling prefetch is a one-way street. There is no mechanism + * in place to turn prefetch back on after it has been disabled. There is + * no reason it couldn't be done, it would just be more complex to implement. + */ +static inline void bcom_disable_prefetch(void) +{ + u16 regval; + + regval = in_be16(&bcom_eng->regs->PtdCntrl); + out_be16(&bcom_eng->regs->PtdCntrl, regval | 1); +}; + +static inline void +bcom_enable_task(int task) +{ + u16 reg; + reg = in_be16(&bcom_eng->regs->tcr[task]); + out_be16(&bcom_eng->regs->tcr[task], reg | TASK_ENABLE); +} + +static inline void +bcom_disable_task(int task) +{ + u16 reg = in_be16(&bcom_eng->regs->tcr[task]); + out_be16(&bcom_eng->regs->tcr[task], reg & ~TASK_ENABLE); +} + + +static inline u32 * +bcom_task_desc(int task) +{ + return bcom_sram_pa2va(bcom_eng->tdt[task].start); +} + +static inline int +bcom_task_num_descs(int task) +{ + return (bcom_eng->tdt[task].stop - bcom_eng->tdt[task].start)/sizeof(u32) + 1; +} + +static inline u32 * +bcom_task_var(int task) +{ + return bcom_sram_pa2va(bcom_eng->tdt[task].var); +} + +static inline u32 * +bcom_task_inc(int task) +{ + return &bcom_task_var(task)[BCOM_MAX_VAR]; +} + + +static inline int +bcom_drd_is_extended(u32 desc) +{ + return (desc) & BCOM_DRD_EXTENDED; +} + +static inline int +bcom_desc_is_drd(u32 desc) +{ + return !(desc & BCOM_LCD_MASK) && desc != BCOM_DESC_NOP; +} + +static inline int +bcom_desc_initiator(u32 desc) +{ + return (desc >> BCOM_DRD_INITIATOR_SHIFT) & 0x1f; +} + +static inline void +bcom_set_desc_initiator(u32 *desc, int initiator) +{ + *desc = (*desc & ~(0x1f << BCOM_DRD_INITIATOR_SHIFT)) | + ((initiator & 0x1f) << BCOM_DRD_INITIATOR_SHIFT); +} + + +static inline void +bcom_set_task_pragma(int task, int pragma) +{ + u32 *fdt = &bcom_eng->tdt[task].fdt; + *fdt = (*fdt & ~0xff) | pragma; +} + +static inline void +bcom_set_task_auto_start(int task, int next_task) +{ + u16 __iomem *tcr = &bcom_eng->regs->tcr[task]; + out_be16(tcr, (in_be16(tcr) & ~0xff) | 0x00c0 | next_task); +} + +static inline void +bcom_set_tcr_initiator(int task, int initiator) +{ + u16 __iomem *tcr = &bcom_eng->regs->tcr[task]; + out_be16(tcr, (in_be16(tcr) & ~0x1f00) | ((initiator & 0x1f) << 8)); +} + + +#endif /* __BESTCOMM_PRIV_H__ */ + diff --git a/arch/powerpc/sysdev/bestcomm/fec.c b/arch/powerpc/sysdev/bestcomm/fec.c new file mode 100644 index 00000000..957a988d --- /dev/null +++ b/arch/powerpc/sysdev/bestcomm/fec.c @@ -0,0 +1,270 @@ +/* + * Bestcomm FEC tasks driver + * + * + * Copyright (C) 2006-2007 Sylvain Munaut <tnt@246tNt.com> + * Copyright (C) 2003-2004 MontaVista, Software, Inc. + * ( by Dale Farnsworth <dfarnsworth@mvista.com> ) + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/types.h> +#include <asm/io.h> + +#include "bestcomm.h" +#include "bestcomm_priv.h" +#include "fec.h" + + +/* ======================================================================== */ +/* Task image/var/inc */ +/* ======================================================================== */ + +/* fec tasks images */ +extern u32 bcom_fec_rx_task[]; +extern u32 bcom_fec_tx_task[]; + +/* rx task vars that need to be set before enabling the task */ +struct bcom_fec_rx_var { + u32 enable; /* (u16*) address of task's control register */ + u32 fifo; /* (u32*) address of fec's fifo */ + u32 bd_base; /* (struct bcom_bd*) beginning of ring buffer */ + u32 bd_last; /* (struct bcom_bd*) end of ring buffer */ + u32 bd_start; /* (struct bcom_bd*) current bd */ + u32 buffer_size; /* size of receive buffer */ +}; + +/* rx task incs that need to be set before enabling the task */ +struct bcom_fec_rx_inc { + u16 pad0; + s16 incr_bytes; + u16 pad1; + s16 incr_dst; + u16 pad2; + s16 incr_dst_ma; +}; + +/* tx task vars that need to be set before enabling the task */ +struct bcom_fec_tx_var { + u32 DRD; /* (u32*) address of self-modified DRD */ + u32 fifo; /* (u32*) address of fec's fifo */ + u32 enable; /* (u16*) address of task's control register */ + u32 bd_base; /* (struct bcom_bd*) beginning of ring buffer */ + u32 bd_last; /* (struct bcom_bd*) end of ring buffer */ + u32 bd_start; /* (struct bcom_bd*) current bd */ + u32 buffer_size; /* set by uCode for each packet */ +}; + +/* tx task incs that need to be set before enabling the task */ +struct bcom_fec_tx_inc { + u16 pad0; + s16 incr_bytes; + u16 pad1; + s16 incr_src; + u16 pad2; + s16 incr_src_ma; +}; + +/* private structure in the task */ +struct bcom_fec_priv { + phys_addr_t fifo; + int maxbufsize; +}; + + +/* ======================================================================== */ +/* Task support code */ +/* ======================================================================== */ + +struct bcom_task * +bcom_fec_rx_init(int queue_len, phys_addr_t fifo, int maxbufsize) +{ + struct bcom_task *tsk; + struct bcom_fec_priv *priv; + + tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_fec_bd), + sizeof(struct bcom_fec_priv)); + if (!tsk) + return NULL; + + tsk->flags = BCOM_FLAGS_NONE; + + priv = tsk->priv; + priv->fifo = fifo; + priv->maxbufsize = maxbufsize; + + if (bcom_fec_rx_reset(tsk)) { + bcom_task_free(tsk); + return NULL; + } + + return tsk; +} +EXPORT_SYMBOL_GPL(bcom_fec_rx_init); + +int +bcom_fec_rx_reset(struct bcom_task *tsk) +{ + struct bcom_fec_priv *priv = tsk->priv; + struct bcom_fec_rx_var *var; + struct bcom_fec_rx_inc *inc; + + /* Shutdown the task */ + bcom_disable_task(tsk->tasknum); + + /* Reset the microcode */ + var = (struct bcom_fec_rx_var *) bcom_task_var(tsk->tasknum); + inc = (struct bcom_fec_rx_inc *) bcom_task_inc(tsk->tasknum); + + if (bcom_load_image(tsk->tasknum, bcom_fec_rx_task)) + return -1; + + var->enable = bcom_eng->regs_base + + offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]); + var->fifo = (u32) priv->fifo; + var->bd_base = tsk->bd_pa; + var->bd_last = tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size); + var->bd_start = tsk->bd_pa; + var->buffer_size = priv->maxbufsize; + + inc->incr_bytes = -(s16)sizeof(u32); /* These should be in the */ + inc->incr_dst = sizeof(u32); /* task image, but we stick */ + inc->incr_dst_ma= sizeof(u8); /* to the official ones */ + + /* Reset the BDs */ + tsk->index = 0; + tsk->outdex = 0; + + memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); + + /* Configure some stuff */ + bcom_set_task_pragma(tsk->tasknum, BCOM_FEC_RX_BD_PRAGMA); + bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum); + + out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_FEC_RX], BCOM_IPR_FEC_RX); + + out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum); /* Clear ints */ + + return 0; +} +EXPORT_SYMBOL_GPL(bcom_fec_rx_reset); + +void +bcom_fec_rx_release(struct bcom_task *tsk) +{ + /* Nothing special for the FEC tasks */ + bcom_task_free(tsk); +} +EXPORT_SYMBOL_GPL(bcom_fec_rx_release); + + + + /* Return 2nd to last DRD */ + /* This is an ugly hack, but at least it's only done + once at initialization */ +static u32 *self_modified_drd(int tasknum) +{ + u32 *desc; + int num_descs; + int drd_count; + int i; + + num_descs = bcom_task_num_descs(tasknum); + desc = bcom_task_desc(tasknum) + num_descs - 1; + drd_count = 0; + for (i=0; i<num_descs; i++, desc--) + if (bcom_desc_is_drd(*desc) && ++drd_count == 3) + break; + return desc; +} + +struct bcom_task * +bcom_fec_tx_init(int queue_len, phys_addr_t fifo) +{ + struct bcom_task *tsk; + struct bcom_fec_priv *priv; + + tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_fec_bd), + sizeof(struct bcom_fec_priv)); + if (!tsk) + return NULL; + + tsk->flags = BCOM_FLAGS_ENABLE_TASK; + + priv = tsk->priv; + priv->fifo = fifo; + + if (bcom_fec_tx_reset(tsk)) { + bcom_task_free(tsk); + return NULL; + } + + return tsk; +} +EXPORT_SYMBOL_GPL(bcom_fec_tx_init); + +int +bcom_fec_tx_reset(struct bcom_task *tsk) +{ + struct bcom_fec_priv *priv = tsk->priv; + struct bcom_fec_tx_var *var; + struct bcom_fec_tx_inc *inc; + + /* Shutdown the task */ + bcom_disable_task(tsk->tasknum); + + /* Reset the microcode */ + var = (struct bcom_fec_tx_var *) bcom_task_var(tsk->tasknum); + inc = (struct bcom_fec_tx_inc *) bcom_task_inc(tsk->tasknum); + + if (bcom_load_image(tsk->tasknum, bcom_fec_tx_task)) + return -1; + + var->enable = bcom_eng->regs_base + + offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]); + var->fifo = (u32) priv->fifo; + var->DRD = bcom_sram_va2pa(self_modified_drd(tsk->tasknum)); + var->bd_base = tsk->bd_pa; + var->bd_last = tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size); + var->bd_start = tsk->bd_pa; + + inc->incr_bytes = -(s16)sizeof(u32); /* These should be in the */ + inc->incr_src = sizeof(u32); /* task image, but we stick */ + inc->incr_src_ma= sizeof(u8); /* to the official ones */ + + /* Reset the BDs */ + tsk->index = 0; + tsk->outdex = 0; + + memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); + + /* Configure some stuff */ + bcom_set_task_pragma(tsk->tasknum, BCOM_FEC_TX_BD_PRAGMA); + bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum); + + out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_FEC_TX], BCOM_IPR_FEC_TX); + + out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum); /* Clear ints */ + + return 0; +} +EXPORT_SYMBOL_GPL(bcom_fec_tx_reset); + +void +bcom_fec_tx_release(struct bcom_task *tsk) +{ + /* Nothing special for the FEC tasks */ + bcom_task_free(tsk); +} +EXPORT_SYMBOL_GPL(bcom_fec_tx_release); + + +MODULE_DESCRIPTION("BestComm FEC tasks driver"); +MODULE_AUTHOR("Dale Farnsworth <dfarnsworth@mvista.com>"); +MODULE_LICENSE("GPL v2"); + diff --git a/arch/powerpc/sysdev/bestcomm/fec.h b/arch/powerpc/sysdev/bestcomm/fec.h new file mode 100644 index 00000000..ee565d94 --- /dev/null +++ b/arch/powerpc/sysdev/bestcomm/fec.h @@ -0,0 +1,61 @@ +/* + * Header for Bestcomm FEC tasks driver + * + * + * Copyright (C) 2006-2007 Sylvain Munaut <tnt@246tNt.com> + * Copyright (C) 2003-2004 MontaVista, Software, Inc. + * ( by Dale Farnsworth <dfarnsworth@mvista.com> ) + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#ifndef __BESTCOMM_FEC_H__ +#define __BESTCOMM_FEC_H__ + + +struct bcom_fec_bd { + u32 status; + u32 skb_pa; +}; + +#define BCOM_FEC_TX_BD_TFD 0x08000000ul /* transmit frame done */ +#define BCOM_FEC_TX_BD_TC 0x04000000ul /* transmit CRC */ +#define BCOM_FEC_TX_BD_ABC 0x02000000ul /* append bad CRC */ + +#define BCOM_FEC_RX_BD_L 0x08000000ul /* buffer is last in frame */ +#define BCOM_FEC_RX_BD_BC 0x00800000ul /* DA is broadcast */ +#define BCOM_FEC_RX_BD_MC 0x00400000ul /* DA is multicast and not broadcast */ +#define BCOM_FEC_RX_BD_LG 0x00200000ul /* Rx frame length violation */ +#define BCOM_FEC_RX_BD_NO 0x00100000ul /* Rx non-octet aligned frame */ +#define BCOM_FEC_RX_BD_CR 0x00040000ul /* Rx CRC error */ +#define BCOM_FEC_RX_BD_OV 0x00020000ul /* overrun */ +#define BCOM_FEC_RX_BD_TR 0x00010000ul /* Rx frame truncated */ +#define BCOM_FEC_RX_BD_LEN_MASK 0x000007fful /* mask for length of received frame */ +#define BCOM_FEC_RX_BD_ERRORS (BCOM_FEC_RX_BD_LG | BCOM_FEC_RX_BD_NO | \ + BCOM_FEC_RX_BD_CR | BCOM_FEC_RX_BD_OV | BCOM_FEC_RX_BD_TR) + + +extern struct bcom_task * +bcom_fec_rx_init(int queue_len, phys_addr_t fifo, int maxbufsize); + +extern int +bcom_fec_rx_reset(struct bcom_task *tsk); + +extern void +bcom_fec_rx_release(struct bcom_task *tsk); + + +extern struct bcom_task * +bcom_fec_tx_init(int queue_len, phys_addr_t fifo); + +extern int +bcom_fec_tx_reset(struct bcom_task *tsk); + +extern void +bcom_fec_tx_release(struct bcom_task *tsk); + + +#endif /* __BESTCOMM_FEC_H__ */ + diff --git a/arch/powerpc/sysdev/bestcomm/gen_bd.c b/arch/powerpc/sysdev/bestcomm/gen_bd.c new file mode 100644 index 00000000..e0a53e31 --- /dev/null +++ b/arch/powerpc/sysdev/bestcomm/gen_bd.c @@ -0,0 +1,354 @@ +/* + * Driver for MPC52xx processor BestComm General Buffer Descriptor + * + * Copyright (C) 2007 Sylvain Munaut <tnt@246tNt.com> + * Copyright (C) 2006 AppSpec Computer Technologies Corp. + * Jeff Gibbons <jeff.gibbons@appspec.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/string.h> +#include <linux/types.h> +#include <asm/errno.h> +#include <asm/io.h> + +#include <asm/mpc52xx.h> +#include <asm/mpc52xx_psc.h> + +#include "bestcomm.h" +#include "bestcomm_priv.h" +#include "gen_bd.h" + + +/* ======================================================================== */ +/* Task image/var/inc */ +/* ======================================================================== */ + +/* gen_bd tasks images */ +extern u32 bcom_gen_bd_rx_task[]; +extern u32 bcom_gen_bd_tx_task[]; + +/* rx task vars that need to be set before enabling the task */ +struct bcom_gen_bd_rx_var { + u32 enable; /* (u16*) address of task's control register */ + u32 fifo; /* (u32*) address of gen_bd's fifo */ + u32 bd_base; /* (struct bcom_bd*) beginning of ring buffer */ + u32 bd_last; /* (struct bcom_bd*) end of ring buffer */ + u32 bd_start; /* (struct bcom_bd*) current bd */ + u32 buffer_size; /* size of receive buffer */ +}; + +/* rx task incs that need to be set before enabling the task */ +struct bcom_gen_bd_rx_inc { + u16 pad0; + s16 incr_bytes; + u16 pad1; + s16 incr_dst; +}; + +/* tx task vars that need to be set before enabling the task */ +struct bcom_gen_bd_tx_var { + u32 fifo; /* (u32*) address of gen_bd's fifo */ + u32 enable; /* (u16*) address of task's control register */ + u32 bd_base; /* (struct bcom_bd*) beginning of ring buffer */ + u32 bd_last; /* (struct bcom_bd*) end of ring buffer */ + u32 bd_start; /* (struct bcom_bd*) current bd */ + u32 buffer_size; /* set by uCode for each packet */ +}; + +/* tx task incs that need to be set before enabling the task */ +struct bcom_gen_bd_tx_inc { + u16 pad0; + s16 incr_bytes; + u16 pad1; + s16 incr_src; + u16 pad2; + s16 incr_src_ma; +}; + +/* private structure */ +struct bcom_gen_bd_priv { + phys_addr_t fifo; + int initiator; + int ipr; + int maxbufsize; +}; + + +/* ======================================================================== */ +/* Task support code */ +/* ======================================================================== */ + +struct bcom_task * +bcom_gen_bd_rx_init(int queue_len, phys_addr_t fifo, + int initiator, int ipr, int maxbufsize) +{ + struct bcom_task *tsk; + struct bcom_gen_bd_priv *priv; + + tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_gen_bd), + sizeof(struct bcom_gen_bd_priv)); + if (!tsk) + return NULL; + + tsk->flags = BCOM_FLAGS_NONE; + + priv = tsk->priv; + priv->fifo = fifo; + priv->initiator = initiator; + priv->ipr = ipr; + priv->maxbufsize = maxbufsize; + + if (bcom_gen_bd_rx_reset(tsk)) { + bcom_task_free(tsk); + return NULL; + } + + return tsk; +} +EXPORT_SYMBOL_GPL(bcom_gen_bd_rx_init); + +int +bcom_gen_bd_rx_reset(struct bcom_task *tsk) +{ + struct bcom_gen_bd_priv *priv = tsk->priv; + struct bcom_gen_bd_rx_var *var; + struct bcom_gen_bd_rx_inc *inc; + + /* Shutdown the task */ + bcom_disable_task(tsk->tasknum); + + /* Reset the microcode */ + var = (struct bcom_gen_bd_rx_var *) bcom_task_var(tsk->tasknum); + inc = (struct bcom_gen_bd_rx_inc *) bcom_task_inc(tsk->tasknum); + + if (bcom_load_image(tsk->tasknum, bcom_gen_bd_rx_task)) + return -1; + + var->enable = bcom_eng->regs_base + + offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]); + var->fifo = (u32) priv->fifo; + var->bd_base = tsk->bd_pa; + var->bd_last = tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size); + var->bd_start = tsk->bd_pa; + var->buffer_size = priv->maxbufsize; + + inc->incr_bytes = -(s16)sizeof(u32); + inc->incr_dst = sizeof(u32); + + /* Reset the BDs */ + tsk->index = 0; + tsk->outdex = 0; + + memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); + + /* Configure some stuff */ + bcom_set_task_pragma(tsk->tasknum, BCOM_GEN_RX_BD_PRAGMA); + bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum); + + out_8(&bcom_eng->regs->ipr[priv->initiator], priv->ipr); + bcom_set_initiator(tsk->tasknum, priv->initiator); + + out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum); /* Clear ints */ + + return 0; +} +EXPORT_SYMBOL_GPL(bcom_gen_bd_rx_reset); + +void +bcom_gen_bd_rx_release(struct bcom_task *tsk) +{ + /* Nothing special for the GenBD tasks */ + bcom_task_free(tsk); +} +EXPORT_SYMBOL_GPL(bcom_gen_bd_rx_release); + + +extern struct bcom_task * +bcom_gen_bd_tx_init(int queue_len, phys_addr_t fifo, + int initiator, int ipr) +{ + struct bcom_task *tsk; + struct bcom_gen_bd_priv *priv; + + tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_gen_bd), + sizeof(struct bcom_gen_bd_priv)); + if (!tsk) + return NULL; + + tsk->flags = BCOM_FLAGS_NONE; + + priv = tsk->priv; + priv->fifo = fifo; + priv->initiator = initiator; + priv->ipr = ipr; + + if (bcom_gen_bd_tx_reset(tsk)) { + bcom_task_free(tsk); + return NULL; + } + + return tsk; +} +EXPORT_SYMBOL_GPL(bcom_gen_bd_tx_init); + +int +bcom_gen_bd_tx_reset(struct bcom_task *tsk) +{ + struct bcom_gen_bd_priv *priv = tsk->priv; + struct bcom_gen_bd_tx_var *var; + struct bcom_gen_bd_tx_inc *inc; + + /* Shutdown the task */ + bcom_disable_task(tsk->tasknum); + + /* Reset the microcode */ + var = (struct bcom_gen_bd_tx_var *) bcom_task_var(tsk->tasknum); + inc = (struct bcom_gen_bd_tx_inc *) bcom_task_inc(tsk->tasknum); + + if (bcom_load_image(tsk->tasknum, bcom_gen_bd_tx_task)) + return -1; + + var->enable = bcom_eng->regs_base + + offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]); + var->fifo = (u32) priv->fifo; + var->bd_base = tsk->bd_pa; + var->bd_last = tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size); + var->bd_start = tsk->bd_pa; + + inc->incr_bytes = -(s16)sizeof(u32); + inc->incr_src = sizeof(u32); + inc->incr_src_ma = sizeof(u8); + + /* Reset the BDs */ + tsk->index = 0; + tsk->outdex = 0; + + memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size); + + /* Configure some stuff */ + bcom_set_task_pragma(tsk->tasknum, BCOM_GEN_TX_BD_PRAGMA); + bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum); + + out_8(&bcom_eng->regs->ipr[priv->initiator], priv->ipr); + bcom_set_initiator(tsk->tasknum, priv->initiator); + + out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum); /* Clear ints */ + + return 0; +} +EXPORT_SYMBOL_GPL(bcom_gen_bd_tx_reset); + +void +bcom_gen_bd_tx_release(struct bcom_task *tsk) +{ + /* Nothing special for the GenBD tasks */ + bcom_task_free(tsk); +} +EXPORT_SYMBOL_GPL(bcom_gen_bd_tx_release); + +/* --------------------------------------------------------------------- + * PSC support code + */ + +/** + * bcom_psc_parameters - Bestcomm initialization value table for PSC devices + * + * This structure is only used internally. It is a lookup table for PSC + * specific parameters to bestcomm tasks. + */ +static struct bcom_psc_params { + int rx_initiator; + int rx_ipr; + int tx_initiator; + int tx_ipr; +} bcom_psc_params[] = { + [0] = { + .rx_initiator = BCOM_INITIATOR_PSC1_RX, + .rx_ipr = BCOM_IPR_PSC1_RX, + .tx_initiator = BCOM_INITIATOR_PSC1_TX, + .tx_ipr = BCOM_IPR_PSC1_TX, + }, + [1] = { + .rx_initiator = BCOM_INITIATOR_PSC2_RX, + .rx_ipr = BCOM_IPR_PSC2_RX, + .tx_initiator = BCOM_INITIATOR_PSC2_TX, + .tx_ipr = BCOM_IPR_PSC2_TX, + }, + [2] = { + .rx_initiator = BCOM_INITIATOR_PSC3_RX, + .rx_ipr = BCOM_IPR_PSC3_RX, + .tx_initiator = BCOM_INITIATOR_PSC3_TX, + .tx_ipr = BCOM_IPR_PSC3_TX, + }, + [3] = { + .rx_initiator = BCOM_INITIATOR_PSC4_RX, + .rx_ipr = BCOM_IPR_PSC4_RX, + .tx_initiator = BCOM_INITIATOR_PSC4_TX, + .tx_ipr = BCOM_IPR_PSC4_TX, + }, + [4] = { + .rx_initiator = BCOM_INITIATOR_PSC5_RX, + .rx_ipr = BCOM_IPR_PSC5_RX, + .tx_initiator = BCOM_INITIATOR_PSC5_TX, + .tx_ipr = BCOM_IPR_PSC5_TX, + }, + [5] = { + .rx_initiator = BCOM_INITIATOR_PSC6_RX, + .rx_ipr = BCOM_IPR_PSC6_RX, + .tx_initiator = BCOM_INITIATOR_PSC6_TX, + .tx_ipr = BCOM_IPR_PSC6_TX, + }, +}; + +/** + * bcom_psc_gen_bd_rx_init - Allocate a receive bcom_task for a PSC port + * @psc_num: Number of the PSC to allocate a task for + * @queue_len: number of buffer descriptors to allocate for the task + * @fifo: physical address of FIFO register + * @maxbufsize: Maximum receive data size in bytes. + * + * Allocate a bestcomm task structure for receiving data from a PSC. + */ +struct bcom_task * bcom_psc_gen_bd_rx_init(unsigned psc_num, int queue_len, + phys_addr_t fifo, int maxbufsize) +{ + if (psc_num >= MPC52xx_PSC_MAXNUM) + return NULL; + + return bcom_gen_bd_rx_init(queue_len, fifo, + bcom_psc_params[psc_num].rx_initiator, + bcom_psc_params[psc_num].rx_ipr, + maxbufsize); +} +EXPORT_SYMBOL_GPL(bcom_psc_gen_bd_rx_init); + +/** + * bcom_psc_gen_bd_tx_init - Allocate a transmit bcom_task for a PSC port + * @psc_num: Number of the PSC to allocate a task for + * @queue_len: number of buffer descriptors to allocate for the task + * @fifo: physical address of FIFO register + * + * Allocate a bestcomm task structure for transmitting data to a PSC. + */ +struct bcom_task * +bcom_psc_gen_bd_tx_init(unsigned psc_num, int queue_len, phys_addr_t fifo) +{ + struct psc; + return bcom_gen_bd_tx_init(queue_len, fifo, + bcom_psc_params[psc_num].tx_initiator, + bcom_psc_params[psc_num].tx_ipr); +} +EXPORT_SYMBOL_GPL(bcom_psc_gen_bd_tx_init); + + +MODULE_DESCRIPTION("BestComm General Buffer Descriptor tasks driver"); +MODULE_AUTHOR("Jeff Gibbons <jeff.gibbons@appspec.com>"); +MODULE_LICENSE("GPL v2"); + diff --git a/arch/powerpc/sysdev/bestcomm/gen_bd.h b/arch/powerpc/sysdev/bestcomm/gen_bd.h new file mode 100644 index 00000000..de47260e --- /dev/null +++ b/arch/powerpc/sysdev/bestcomm/gen_bd.h @@ -0,0 +1,53 @@ +/* + * Header for Bestcomm General Buffer Descriptor tasks driver + * + * + * Copyright (C) 2007 Sylvain Munaut <tnt@246tNt.com> + * Copyright (C) 2006 AppSpec Computer Technologies Corp. + * Jeff Gibbons <jeff.gibbons@appspec.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * + */ + +#ifndef __BESTCOMM_GEN_BD_H__ +#define __BESTCOMM_GEN_BD_H__ + +struct bcom_gen_bd { + u32 status; + u32 buf_pa; +}; + + +extern struct bcom_task * +bcom_gen_bd_rx_init(int queue_len, phys_addr_t fifo, + int initiator, int ipr, int maxbufsize); + +extern int +bcom_gen_bd_rx_reset(struct bcom_task *tsk); + +extern void +bcom_gen_bd_rx_release(struct bcom_task *tsk); + + +extern struct bcom_task * +bcom_gen_bd_tx_init(int queue_len, phys_addr_t fifo, + int initiator, int ipr); + +extern int +bcom_gen_bd_tx_reset(struct bcom_task *tsk); + +extern void +bcom_gen_bd_tx_release(struct bcom_task *tsk); + + +/* PSC support utility wrappers */ +struct bcom_task * bcom_psc_gen_bd_rx_init(unsigned psc_num, int queue_len, + phys_addr_t fifo, int maxbufsize); +struct bcom_task * bcom_psc_gen_bd_tx_init(unsigned psc_num, int queue_len, + phys_addr_t fifo); +#endif /* __BESTCOMM_GEN_BD_H__ */ + diff --git a/arch/powerpc/sysdev/bestcomm/sram.c b/arch/powerpc/sysdev/bestcomm/sram.c new file mode 100644 index 00000000..1225012a --- /dev/null +++ b/arch/powerpc/sysdev/bestcomm/sram.c @@ -0,0 +1,178 @@ +/* + * Simple memory allocator for on-board SRAM + * + * + * Maintainer : Sylvain Munaut <tnt@246tNt.com> + * + * Copyright (C) 2005 Sylvain Munaut <tnt@246tNt.com> + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/string.h> +#include <linux/ioport.h> +#include <linux/of.h> + +#include <asm/io.h> +#include <asm/mmu.h> + +#include "sram.h" + + +/* Struct keeping our 'state' */ +struct bcom_sram *bcom_sram = NULL; +EXPORT_SYMBOL_GPL(bcom_sram); /* needed for inline functions */ + + +/* ======================================================================== */ +/* Public API */ +/* ======================================================================== */ +/* DO NOT USE in interrupts, if needed in irq handler, we should use the + _irqsave version of the spin_locks */ + +int bcom_sram_init(struct device_node *sram_node, char *owner) +{ + int rv; + const u32 *regaddr_p; + u64 regaddr64, size64; + unsigned int psize; + + /* Create our state struct */ + if (bcom_sram) { + printk(KERN_ERR "%s: bcom_sram_init: " + "Already initialized !\n", owner); + return -EBUSY; + } + + bcom_sram = kmalloc(sizeof(struct bcom_sram), GFP_KERNEL); + if (!bcom_sram) { + printk(KERN_ERR "%s: bcom_sram_init: " + "Couldn't allocate internal state !\n", owner); + return -ENOMEM; + } + + /* Get address and size of the sram */ + regaddr_p = of_get_address(sram_node, 0, &size64, NULL); + if (!regaddr_p) { + printk(KERN_ERR "%s: bcom_sram_init: " + "Invalid device node !\n", owner); + rv = -EINVAL; + goto error_free; + } + + regaddr64 = of_translate_address(sram_node, regaddr_p); + + bcom_sram->base_phys = (phys_addr_t) regaddr64; + bcom_sram->size = (unsigned int) size64; + + /* Request region */ + if (!request_mem_region(bcom_sram->base_phys, bcom_sram->size, owner)) { + printk(KERN_ERR "%s: bcom_sram_init: " + "Couldn't request region !\n", owner); + rv = -EBUSY; + goto error_free; + } + + /* Map SRAM */ + /* sram is not really __iomem */ + bcom_sram->base_virt = (void*) ioremap(bcom_sram->base_phys, bcom_sram->size); + + if (!bcom_sram->base_virt) { + printk(KERN_ERR "%s: bcom_sram_init: " + "Map error SRAM zone 0x%08lx (0x%0x)!\n", + owner, (long)bcom_sram->base_phys, bcom_sram->size ); + rv = -ENOMEM; + goto error_release; + } + + /* Create an rheap (defaults to 32 bits word alignment) */ + bcom_sram->rh = rh_create(4); + + /* Attach the free zones */ +#if 0 + /* Currently disabled ... for future use only */ + reg_addr_p = of_get_property(sram_node, "available", &psize); +#else + regaddr_p = NULL; + psize = 0; +#endif + + if (!regaddr_p || !psize) { + /* Attach the whole zone */ + rh_attach_region(bcom_sram->rh, 0, bcom_sram->size); + } else { + /* Attach each zone independently */ + while (psize >= 2 * sizeof(u32)) { + phys_addr_t zbase = of_translate_address(sram_node, regaddr_p); + rh_attach_region(bcom_sram->rh, zbase - bcom_sram->base_phys, regaddr_p[1]); + regaddr_p += 2; + psize -= 2 * sizeof(u32); + } + } + + /* Init our spinlock */ + spin_lock_init(&bcom_sram->lock); + + return 0; + +error_release: + release_mem_region(bcom_sram->base_phys, bcom_sram->size); +error_free: + kfree(bcom_sram); + bcom_sram = NULL; + + return rv; +} +EXPORT_SYMBOL_GPL(bcom_sram_init); + +void bcom_sram_cleanup(void) +{ + /* Free resources */ + if (bcom_sram) { + rh_destroy(bcom_sram->rh); + iounmap((void __iomem *)bcom_sram->base_virt); + release_mem_region(bcom_sram->base_phys, bcom_sram->size); + kfree(bcom_sram); + bcom_sram = NULL; + } +} +EXPORT_SYMBOL_GPL(bcom_sram_cleanup); + +void* bcom_sram_alloc(int size, int align, phys_addr_t *phys) +{ + unsigned long offset; + + spin_lock(&bcom_sram->lock); + offset = rh_alloc_align(bcom_sram->rh, size, align, NULL); + spin_unlock(&bcom_sram->lock); + + if (IS_ERR_VALUE(offset)) + return NULL; + + *phys = bcom_sram->base_phys + offset; + return bcom_sram->base_virt + offset; +} +EXPORT_SYMBOL_GPL(bcom_sram_alloc); + +void bcom_sram_free(void *ptr) +{ + unsigned long offset; + + if (!ptr) + return; + + offset = ptr - bcom_sram->base_virt; + + spin_lock(&bcom_sram->lock); + rh_free(bcom_sram->rh, offset); + spin_unlock(&bcom_sram->lock); +} +EXPORT_SYMBOL_GPL(bcom_sram_free); + diff --git a/arch/powerpc/sysdev/bestcomm/sram.h b/arch/powerpc/sysdev/bestcomm/sram.h new file mode 100644 index 00000000..b6d66896 --- /dev/null +++ b/arch/powerpc/sysdev/bestcomm/sram.h @@ -0,0 +1,54 @@ +/* + * Handling of a sram zone for bestcomm + * + * + * Copyright (C) 2007 Sylvain Munaut <tnt@246tNt.com> + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#ifndef __BESTCOMM_SRAM_H__ +#define __BESTCOMM_SRAM_H__ + +#include <asm/rheap.h> +#include <asm/mmu.h> +#include <linux/spinlock.h> + + +/* Structure used internally */ + /* The internals are here for the inline functions + * sake, certainly not for the user to mess with ! + */ +struct bcom_sram { + phys_addr_t base_phys; + void *base_virt; + unsigned int size; + rh_info_t *rh; + spinlock_t lock; +}; + +extern struct bcom_sram *bcom_sram; + + +/* Public API */ +extern int bcom_sram_init(struct device_node *sram_node, char *owner); +extern void bcom_sram_cleanup(void); + +extern void* bcom_sram_alloc(int size, int align, phys_addr_t *phys); +extern void bcom_sram_free(void *ptr); + +static inline phys_addr_t bcom_sram_va2pa(void *va) { + return bcom_sram->base_phys + + (unsigned long)(va - bcom_sram->base_virt); +} + +static inline void *bcom_sram_pa2va(phys_addr_t pa) { + return bcom_sram->base_virt + + (unsigned long)(pa - bcom_sram->base_phys); +} + + +#endif /* __BESTCOMM_SRAM_H__ */ + diff --git a/arch/powerpc/sysdev/cpm1.c b/arch/powerpc/sysdev/cpm1.c new file mode 100644 index 00000000..350787c8 --- /dev/null +++ b/arch/powerpc/sysdev/cpm1.c @@ -0,0 +1,789 @@ +/* + * General Purpose functions for the global management of the + * Communication Processor Module. + * Copyright (c) 1997 Dan error_act (dmalek@jlc.net) + * + * In addition to the individual control of the communication + * channels, there are a few functions that globally affect the + * communication processor. + * + * Buffer descriptors must be allocated from the dual ported memory + * space. The allocator for that is here. When the communication + * process is reset, we reclaim the memory available. There is + * currently no deallocator for this memory. + * The amount of space available is platform dependent. On the + * MBX, the EPPC software loads additional microcode into the + * communication processor, and uses some of the DP ram for this + * purpose. Current, the first 512 bytes and the last 256 bytes of + * memory are used. Right now I am conservative and only use the + * memory that can never be used for microcode. If there are + * applications that require more DP ram, we can expand the boundaries + * but then we have to be careful of any downloaded microcode. + */ +#include <linux/errno.h> +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/dma-mapping.h> +#include <linux/param.h> +#include <linux/string.h> +#include <linux/mm.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/module.h> +#include <linux/spinlock.h> +#include <linux/slab.h> +#include <asm/page.h> +#include <asm/pgtable.h> +#include <asm/8xx_immap.h> +#include <asm/cpm1.h> +#include <asm/io.h> +#include <asm/tlbflush.h> +#include <asm/rheap.h> +#include <asm/prom.h> +#include <asm/cpm.h> + +#include <asm/fs_pd.h> + +#ifdef CONFIG_8xx_GPIO +#include <linux/of_gpio.h> +#endif + +#define CPM_MAP_SIZE (0x4000) + +cpm8xx_t __iomem *cpmp; /* Pointer to comm processor space */ +immap_t __iomem *mpc8xx_immr; +static cpic8xx_t __iomem *cpic_reg; + +static struct irq_host *cpm_pic_host; + +static void cpm_mask_irq(struct irq_data *d) +{ + unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d); + + clrbits32(&cpic_reg->cpic_cimr, (1 << cpm_vec)); +} + +static void cpm_unmask_irq(struct irq_data *d) +{ + unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d); + + setbits32(&cpic_reg->cpic_cimr, (1 << cpm_vec)); +} + +static void cpm_end_irq(struct irq_data *d) +{ + unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d); + + out_be32(&cpic_reg->cpic_cisr, (1 << cpm_vec)); +} + +static struct irq_chip cpm_pic = { + .name = "CPM PIC", + .irq_mask = cpm_mask_irq, + .irq_unmask = cpm_unmask_irq, + .irq_eoi = cpm_end_irq, +}; + +int cpm_get_irq(void) +{ + int cpm_vec; + + /* Get the vector by setting the ACK bit and then reading + * the register. + */ + out_be16(&cpic_reg->cpic_civr, 1); + cpm_vec = in_be16(&cpic_reg->cpic_civr); + cpm_vec >>= 11; + + return irq_linear_revmap(cpm_pic_host, cpm_vec); +} + +static int cpm_pic_host_map(struct irq_host *h, unsigned int virq, + irq_hw_number_t hw) +{ + pr_debug("cpm_pic_host_map(%d, 0x%lx)\n", virq, hw); + + irq_set_status_flags(virq, IRQ_LEVEL); + irq_set_chip_and_handler(virq, &cpm_pic, handle_fasteoi_irq); + return 0; +} + +/* The CPM can generate the error interrupt when there is a race condition + * between generating and masking interrupts. All we have to do is ACK it + * and return. This is a no-op function so we don't need any special + * tests in the interrupt handler. + */ +static irqreturn_t cpm_error_interrupt(int irq, void *dev) +{ + return IRQ_HANDLED; +} + +static struct irqaction cpm_error_irqaction = { + .handler = cpm_error_interrupt, + .name = "error", +}; + +static struct irq_host_ops cpm_pic_host_ops = { + .map = cpm_pic_host_map, +}; + +unsigned int cpm_pic_init(void) +{ + struct device_node *np = NULL; + struct resource res; + unsigned int sirq = NO_IRQ, hwirq, eirq; + int ret; + + pr_debug("cpm_pic_init\n"); + + np = of_find_compatible_node(NULL, NULL, "fsl,cpm1-pic"); + if (np == NULL) + np = of_find_compatible_node(NULL, "cpm-pic", "CPM"); + if (np == NULL) { + printk(KERN_ERR "CPM PIC init: can not find cpm-pic node\n"); + return sirq; + } + + ret = of_address_to_resource(np, 0, &res); + if (ret) + goto end; + + cpic_reg = ioremap(res.start, res.end - res.start + 1); + if (cpic_reg == NULL) + goto end; + + sirq = irq_of_parse_and_map(np, 0); + if (sirq == NO_IRQ) + goto end; + + /* Initialize the CPM interrupt controller. */ + hwirq = (unsigned int)virq_to_hw(sirq); + out_be32(&cpic_reg->cpic_cicr, + (CICR_SCD_SCC4 | CICR_SCC_SCC3 | CICR_SCB_SCC2 | CICR_SCA_SCC1) | + ((hwirq/2) << 13) | CICR_HP_MASK); + + out_be32(&cpic_reg->cpic_cimr, 0); + + cpm_pic_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, + 64, &cpm_pic_host_ops, 64); + if (cpm_pic_host == NULL) { + printk(KERN_ERR "CPM2 PIC: failed to allocate irq host!\n"); + sirq = NO_IRQ; + goto end; + } + + /* Install our own error handler. */ + np = of_find_compatible_node(NULL, NULL, "fsl,cpm1"); + if (np == NULL) + np = of_find_node_by_type(NULL, "cpm"); + if (np == NULL) { + printk(KERN_ERR "CPM PIC init: can not find cpm node\n"); + goto end; + } + + eirq = irq_of_parse_and_map(np, 0); + if (eirq == NO_IRQ) + goto end; + + if (setup_irq(eirq, &cpm_error_irqaction)) + printk(KERN_ERR "Could not allocate CPM error IRQ!"); + + setbits32(&cpic_reg->cpic_cicr, CICR_IEN); + +end: + of_node_put(np); + return sirq; +} + +void __init cpm_reset(void) +{ + sysconf8xx_t __iomem *siu_conf; + + mpc8xx_immr = ioremap(get_immrbase(), 0x4000); + if (!mpc8xx_immr) { + printk(KERN_CRIT "Could not map IMMR\n"); + return; + } + + cpmp = &mpc8xx_immr->im_cpm; + +#ifndef CONFIG_PPC_EARLY_DEBUG_CPM + /* Perform a reset. + */ + out_be16(&cpmp->cp_cpcr, CPM_CR_RST | CPM_CR_FLG); + + /* Wait for it. + */ + while (in_be16(&cpmp->cp_cpcr) & CPM_CR_FLG); +#endif + +#ifdef CONFIG_UCODE_PATCH + cpm_load_patch(cpmp); +#endif + + /* Set SDMA Bus Request priority 5. + * On 860T, this also enables FEC priority 6. I am not sure + * this is what we really want for some applications, but the + * manual recommends it. + * Bit 25, FAM can also be set to use FEC aggressive mode (860T). + */ + siu_conf = immr_map(im_siu_conf); + out_be32(&siu_conf->sc_sdcr, 1); + immr_unmap(siu_conf); + + cpm_muram_init(); +} + +static DEFINE_SPINLOCK(cmd_lock); + +#define MAX_CR_CMD_LOOPS 10000 + +int cpm_command(u32 command, u8 opcode) +{ + int i, ret; + unsigned long flags; + + if (command & 0xffffff0f) + return -EINVAL; + + spin_lock_irqsave(&cmd_lock, flags); + + ret = 0; + out_be16(&cpmp->cp_cpcr, command | CPM_CR_FLG | (opcode << 8)); + for (i = 0; i < MAX_CR_CMD_LOOPS; i++) + if ((in_be16(&cpmp->cp_cpcr) & CPM_CR_FLG) == 0) + goto out; + + printk(KERN_ERR "%s(): Not able to issue CPM command\n", __func__); + ret = -EIO; +out: + spin_unlock_irqrestore(&cmd_lock, flags); + return ret; +} +EXPORT_SYMBOL(cpm_command); + +/* Set a baud rate generator. This needs lots of work. There are + * four BRGs, any of which can be wired to any channel. + * The internal baud rate clock is the system clock divided by 16. + * This assumes the baudrate is 16x oversampled by the uart. + */ +#define BRG_INT_CLK (get_brgfreq()) +#define BRG_UART_CLK (BRG_INT_CLK/16) +#define BRG_UART_CLK_DIV16 (BRG_UART_CLK/16) + +void +cpm_setbrg(uint brg, uint rate) +{ + u32 __iomem *bp; + + /* This is good enough to get SMCs running..... + */ + bp = &cpmp->cp_brgc1; + bp += brg; + /* The BRG has a 12-bit counter. For really slow baud rates (or + * really fast processors), we may have to further divide by 16. + */ + if (((BRG_UART_CLK / rate) - 1) < 4096) + out_be32(bp, (((BRG_UART_CLK / rate) - 1) << 1) | CPM_BRG_EN); + else + out_be32(bp, (((BRG_UART_CLK_DIV16 / rate) - 1) << 1) | + CPM_BRG_EN | CPM_BRG_DIV16); +} + +struct cpm_ioport16 { + __be16 dir, par, odr_sor, dat, intr; + __be16 res[3]; +}; + +struct cpm_ioport32b { + __be32 dir, par, odr, dat; +}; + +struct cpm_ioport32e { + __be32 dir, par, sor, odr, dat; +}; + +static void cpm1_set_pin32(int port, int pin, int flags) +{ + struct cpm_ioport32e __iomem *iop; + pin = 1 << (31 - pin); + + if (port == CPM_PORTB) + iop = (struct cpm_ioport32e __iomem *) + &mpc8xx_immr->im_cpm.cp_pbdir; + else + iop = (struct cpm_ioport32e __iomem *) + &mpc8xx_immr->im_cpm.cp_pedir; + + if (flags & CPM_PIN_OUTPUT) + setbits32(&iop->dir, pin); + else + clrbits32(&iop->dir, pin); + + if (!(flags & CPM_PIN_GPIO)) + setbits32(&iop->par, pin); + else + clrbits32(&iop->par, pin); + + if (port == CPM_PORTB) { + if (flags & CPM_PIN_OPENDRAIN) + setbits16(&mpc8xx_immr->im_cpm.cp_pbodr, pin); + else + clrbits16(&mpc8xx_immr->im_cpm.cp_pbodr, pin); + } + + if (port == CPM_PORTE) { + if (flags & CPM_PIN_SECONDARY) + setbits32(&iop->sor, pin); + else + clrbits32(&iop->sor, pin); + + if (flags & CPM_PIN_OPENDRAIN) + setbits32(&mpc8xx_immr->im_cpm.cp_peodr, pin); + else + clrbits32(&mpc8xx_immr->im_cpm.cp_peodr, pin); + } +} + +static void cpm1_set_pin16(int port, int pin, int flags) +{ + struct cpm_ioport16 __iomem *iop = + (struct cpm_ioport16 __iomem *)&mpc8xx_immr->im_ioport; + + pin = 1 << (15 - pin); + + if (port != 0) + iop += port - 1; + + if (flags & CPM_PIN_OUTPUT) + setbits16(&iop->dir, pin); + else + clrbits16(&iop->dir, pin); + + if (!(flags & CPM_PIN_GPIO)) + setbits16(&iop->par, pin); + else + clrbits16(&iop->par, pin); + + if (port == CPM_PORTA) { + if (flags & CPM_PIN_OPENDRAIN) + setbits16(&iop->odr_sor, pin); + else + clrbits16(&iop->odr_sor, pin); + } + if (port == CPM_PORTC) { + if (flags & CPM_PIN_SECONDARY) + setbits16(&iop->odr_sor, pin); + else + clrbits16(&iop->odr_sor, pin); + } +} + +void cpm1_set_pin(enum cpm_port port, int pin, int flags) +{ + if (port == CPM_PORTB || port == CPM_PORTE) + cpm1_set_pin32(port, pin, flags); + else + cpm1_set_pin16(port, pin, flags); +} + +int cpm1_clk_setup(enum cpm_clk_target target, int clock, int mode) +{ + int shift; + int i, bits = 0; + u32 __iomem *reg; + u32 mask = 7; + + u8 clk_map[][3] = { + {CPM_CLK_SCC1, CPM_BRG1, 0}, + {CPM_CLK_SCC1, CPM_BRG2, 1}, + {CPM_CLK_SCC1, CPM_BRG3, 2}, + {CPM_CLK_SCC1, CPM_BRG4, 3}, + {CPM_CLK_SCC1, CPM_CLK1, 4}, + {CPM_CLK_SCC1, CPM_CLK2, 5}, + {CPM_CLK_SCC1, CPM_CLK3, 6}, + {CPM_CLK_SCC1, CPM_CLK4, 7}, + + {CPM_CLK_SCC2, CPM_BRG1, 0}, + {CPM_CLK_SCC2, CPM_BRG2, 1}, + {CPM_CLK_SCC2, CPM_BRG3, 2}, + {CPM_CLK_SCC2, CPM_BRG4, 3}, + {CPM_CLK_SCC2, CPM_CLK1, 4}, + {CPM_CLK_SCC2, CPM_CLK2, 5}, + {CPM_CLK_SCC2, CPM_CLK3, 6}, + {CPM_CLK_SCC2, CPM_CLK4, 7}, + + {CPM_CLK_SCC3, CPM_BRG1, 0}, + {CPM_CLK_SCC3, CPM_BRG2, 1}, + {CPM_CLK_SCC3, CPM_BRG3, 2}, + {CPM_CLK_SCC3, CPM_BRG4, 3}, + {CPM_CLK_SCC3, CPM_CLK5, 4}, + {CPM_CLK_SCC3, CPM_CLK6, 5}, + {CPM_CLK_SCC3, CPM_CLK7, 6}, + {CPM_CLK_SCC3, CPM_CLK8, 7}, + + {CPM_CLK_SCC4, CPM_BRG1, 0}, + {CPM_CLK_SCC4, CPM_BRG2, 1}, + {CPM_CLK_SCC4, CPM_BRG3, 2}, + {CPM_CLK_SCC4, CPM_BRG4, 3}, + {CPM_CLK_SCC4, CPM_CLK5, 4}, + {CPM_CLK_SCC4, CPM_CLK6, 5}, + {CPM_CLK_SCC4, CPM_CLK7, 6}, + {CPM_CLK_SCC4, CPM_CLK8, 7}, + + {CPM_CLK_SMC1, CPM_BRG1, 0}, + {CPM_CLK_SMC1, CPM_BRG2, 1}, + {CPM_CLK_SMC1, CPM_BRG3, 2}, + {CPM_CLK_SMC1, CPM_BRG4, 3}, + {CPM_CLK_SMC1, CPM_CLK1, 4}, + {CPM_CLK_SMC1, CPM_CLK2, 5}, + {CPM_CLK_SMC1, CPM_CLK3, 6}, + {CPM_CLK_SMC1, CPM_CLK4, 7}, + + {CPM_CLK_SMC2, CPM_BRG1, 0}, + {CPM_CLK_SMC2, CPM_BRG2, 1}, + {CPM_CLK_SMC2, CPM_BRG3, 2}, + {CPM_CLK_SMC2, CPM_BRG4, 3}, + {CPM_CLK_SMC2, CPM_CLK5, 4}, + {CPM_CLK_SMC2, CPM_CLK6, 5}, + {CPM_CLK_SMC2, CPM_CLK7, 6}, + {CPM_CLK_SMC2, CPM_CLK8, 7}, + }; + + switch (target) { + case CPM_CLK_SCC1: + reg = &mpc8xx_immr->im_cpm.cp_sicr; + shift = 0; + break; + + case CPM_CLK_SCC2: + reg = &mpc8xx_immr->im_cpm.cp_sicr; + shift = 8; + break; + + case CPM_CLK_SCC3: + reg = &mpc8xx_immr->im_cpm.cp_sicr; + shift = 16; + break; + + case CPM_CLK_SCC4: + reg = &mpc8xx_immr->im_cpm.cp_sicr; + shift = 24; + break; + + case CPM_CLK_SMC1: + reg = &mpc8xx_immr->im_cpm.cp_simode; + shift = 12; + break; + + case CPM_CLK_SMC2: + reg = &mpc8xx_immr->im_cpm.cp_simode; + shift = 28; + break; + + default: + printk(KERN_ERR "cpm1_clock_setup: invalid clock target\n"); + return -EINVAL; + } + + for (i = 0; i < ARRAY_SIZE(clk_map); i++) { + if (clk_map[i][0] == target && clk_map[i][1] == clock) { + bits = clk_map[i][2]; + break; + } + } + + if (i == ARRAY_SIZE(clk_map)) { + printk(KERN_ERR "cpm1_clock_setup: invalid clock combination\n"); + return -EINVAL; + } + + bits <<= shift; + mask <<= shift; + + if (reg == &mpc8xx_immr->im_cpm.cp_sicr) { + if (mode == CPM_CLK_RTX) { + bits |= bits << 3; + mask |= mask << 3; + } else if (mode == CPM_CLK_RX) { + bits <<= 3; + mask <<= 3; + } + } + + out_be32(reg, (in_be32(reg) & ~mask) | bits); + + return 0; +} + +/* + * GPIO LIB API implementation + */ +#ifdef CONFIG_8xx_GPIO + +struct cpm1_gpio16_chip { + struct of_mm_gpio_chip mm_gc; + spinlock_t lock; + + /* shadowed data register to clear/set bits safely */ + u16 cpdata; +}; + +static inline struct cpm1_gpio16_chip * +to_cpm1_gpio16_chip(struct of_mm_gpio_chip *mm_gc) +{ + return container_of(mm_gc, struct cpm1_gpio16_chip, mm_gc); +} + +static void cpm1_gpio16_save_regs(struct of_mm_gpio_chip *mm_gc) +{ + struct cpm1_gpio16_chip *cpm1_gc = to_cpm1_gpio16_chip(mm_gc); + struct cpm_ioport16 __iomem *iop = mm_gc->regs; + + cpm1_gc->cpdata = in_be16(&iop->dat); +} + +static int cpm1_gpio16_get(struct gpio_chip *gc, unsigned int gpio) +{ + struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); + struct cpm_ioport16 __iomem *iop = mm_gc->regs; + u16 pin_mask; + + pin_mask = 1 << (15 - gpio); + + return !!(in_be16(&iop->dat) & pin_mask); +} + +static void __cpm1_gpio16_set(struct of_mm_gpio_chip *mm_gc, u16 pin_mask, + int value) +{ + struct cpm1_gpio16_chip *cpm1_gc = to_cpm1_gpio16_chip(mm_gc); + struct cpm_ioport16 __iomem *iop = mm_gc->regs; + + if (value) + cpm1_gc->cpdata |= pin_mask; + else + cpm1_gc->cpdata &= ~pin_mask; + + out_be16(&iop->dat, cpm1_gc->cpdata); +} + +static void cpm1_gpio16_set(struct gpio_chip *gc, unsigned int gpio, int value) +{ + struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); + struct cpm1_gpio16_chip *cpm1_gc = to_cpm1_gpio16_chip(mm_gc); + unsigned long flags; + u16 pin_mask = 1 << (15 - gpio); + + spin_lock_irqsave(&cpm1_gc->lock, flags); + + __cpm1_gpio16_set(mm_gc, pin_mask, value); + + spin_unlock_irqrestore(&cpm1_gc->lock, flags); +} + +static int cpm1_gpio16_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) +{ + struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); + struct cpm1_gpio16_chip *cpm1_gc = to_cpm1_gpio16_chip(mm_gc); + struct cpm_ioport16 __iomem *iop = mm_gc->regs; + unsigned long flags; + u16 pin_mask = 1 << (15 - gpio); + + spin_lock_irqsave(&cpm1_gc->lock, flags); + + setbits16(&iop->dir, pin_mask); + __cpm1_gpio16_set(mm_gc, pin_mask, val); + + spin_unlock_irqrestore(&cpm1_gc->lock, flags); + + return 0; +} + +static int cpm1_gpio16_dir_in(struct gpio_chip *gc, unsigned int gpio) +{ + struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); + struct cpm1_gpio16_chip *cpm1_gc = to_cpm1_gpio16_chip(mm_gc); + struct cpm_ioport16 __iomem *iop = mm_gc->regs; + unsigned long flags; + u16 pin_mask = 1 << (15 - gpio); + + spin_lock_irqsave(&cpm1_gc->lock, flags); + + clrbits16(&iop->dir, pin_mask); + + spin_unlock_irqrestore(&cpm1_gc->lock, flags); + + return 0; +} + +int cpm1_gpiochip_add16(struct device_node *np) +{ + struct cpm1_gpio16_chip *cpm1_gc; + struct of_mm_gpio_chip *mm_gc; + struct gpio_chip *gc; + + cpm1_gc = kzalloc(sizeof(*cpm1_gc), GFP_KERNEL); + if (!cpm1_gc) + return -ENOMEM; + + spin_lock_init(&cpm1_gc->lock); + + mm_gc = &cpm1_gc->mm_gc; + gc = &mm_gc->gc; + + mm_gc->save_regs = cpm1_gpio16_save_regs; + gc->ngpio = 16; + gc->direction_input = cpm1_gpio16_dir_in; + gc->direction_output = cpm1_gpio16_dir_out; + gc->get = cpm1_gpio16_get; + gc->set = cpm1_gpio16_set; + + return of_mm_gpiochip_add(np, mm_gc); +} + +struct cpm1_gpio32_chip { + struct of_mm_gpio_chip mm_gc; + spinlock_t lock; + + /* shadowed data register to clear/set bits safely */ + u32 cpdata; +}; + +static inline struct cpm1_gpio32_chip * +to_cpm1_gpio32_chip(struct of_mm_gpio_chip *mm_gc) +{ + return container_of(mm_gc, struct cpm1_gpio32_chip, mm_gc); +} + +static void cpm1_gpio32_save_regs(struct of_mm_gpio_chip *mm_gc) +{ + struct cpm1_gpio32_chip *cpm1_gc = to_cpm1_gpio32_chip(mm_gc); + struct cpm_ioport32b __iomem *iop = mm_gc->regs; + + cpm1_gc->cpdata = in_be32(&iop->dat); +} + +static int cpm1_gpio32_get(struct gpio_chip *gc, unsigned int gpio) +{ + struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); + struct cpm_ioport32b __iomem *iop = mm_gc->regs; + u32 pin_mask; + + pin_mask = 1 << (31 - gpio); + + return !!(in_be32(&iop->dat) & pin_mask); +} + +static void __cpm1_gpio32_set(struct of_mm_gpio_chip *mm_gc, u32 pin_mask, + int value) +{ + struct cpm1_gpio32_chip *cpm1_gc = to_cpm1_gpio32_chip(mm_gc); + struct cpm_ioport32b __iomem *iop = mm_gc->regs; + + if (value) + cpm1_gc->cpdata |= pin_mask; + else + cpm1_gc->cpdata &= ~pin_mask; + + out_be32(&iop->dat, cpm1_gc->cpdata); +} + +static void cpm1_gpio32_set(struct gpio_chip *gc, unsigned int gpio, int value) +{ + struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); + struct cpm1_gpio32_chip *cpm1_gc = to_cpm1_gpio32_chip(mm_gc); + unsigned long flags; + u32 pin_mask = 1 << (31 - gpio); + + spin_lock_irqsave(&cpm1_gc->lock, flags); + + __cpm1_gpio32_set(mm_gc, pin_mask, value); + + spin_unlock_irqrestore(&cpm1_gc->lock, flags); +} + +static int cpm1_gpio32_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) +{ + struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); + struct cpm1_gpio32_chip *cpm1_gc = to_cpm1_gpio32_chip(mm_gc); + struct cpm_ioport32b __iomem *iop = mm_gc->regs; + unsigned long flags; + u32 pin_mask = 1 << (31 - gpio); + + spin_lock_irqsave(&cpm1_gc->lock, flags); + + setbits32(&iop->dir, pin_mask); + __cpm1_gpio32_set(mm_gc, pin_mask, val); + + spin_unlock_irqrestore(&cpm1_gc->lock, flags); + + return 0; +} + +static int cpm1_gpio32_dir_in(struct gpio_chip *gc, unsigned int gpio) +{ + struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); + struct cpm1_gpio32_chip *cpm1_gc = to_cpm1_gpio32_chip(mm_gc); + struct cpm_ioport32b __iomem *iop = mm_gc->regs; + unsigned long flags; + u32 pin_mask = 1 << (31 - gpio); + + spin_lock_irqsave(&cpm1_gc->lock, flags); + + clrbits32(&iop->dir, pin_mask); + + spin_unlock_irqrestore(&cpm1_gc->lock, flags); + + return 0; +} + +int cpm1_gpiochip_add32(struct device_node *np) +{ + struct cpm1_gpio32_chip *cpm1_gc; + struct of_mm_gpio_chip *mm_gc; + struct gpio_chip *gc; + + cpm1_gc = kzalloc(sizeof(*cpm1_gc), GFP_KERNEL); + if (!cpm1_gc) + return -ENOMEM; + + spin_lock_init(&cpm1_gc->lock); + + mm_gc = &cpm1_gc->mm_gc; + gc = &mm_gc->gc; + + mm_gc->save_regs = cpm1_gpio32_save_regs; + gc->ngpio = 32; + gc->direction_input = cpm1_gpio32_dir_in; + gc->direction_output = cpm1_gpio32_dir_out; + gc->get = cpm1_gpio32_get; + gc->set = cpm1_gpio32_set; + + return of_mm_gpiochip_add(np, mm_gc); +} + +static int cpm_init_par_io(void) +{ + struct device_node *np; + + for_each_compatible_node(np, NULL, "fsl,cpm1-pario-bank-a") + cpm1_gpiochip_add16(np); + + for_each_compatible_node(np, NULL, "fsl,cpm1-pario-bank-b") + cpm1_gpiochip_add32(np); + + for_each_compatible_node(np, NULL, "fsl,cpm1-pario-bank-c") + cpm1_gpiochip_add16(np); + + for_each_compatible_node(np, NULL, "fsl,cpm1-pario-bank-d") + cpm1_gpiochip_add16(np); + + /* Port E uses CPM2 layout */ + for_each_compatible_node(np, NULL, "fsl,cpm1-pario-bank-e") + cpm2_gpiochip_add32(np); + return 0; +} +arch_initcall(cpm_init_par_io); + +#endif /* CONFIG_8xx_GPIO */ diff --git a/arch/powerpc/sysdev/cpm2.c b/arch/powerpc/sysdev/cpm2.c new file mode 100644 index 00000000..8dc1e24f --- /dev/null +++ b/arch/powerpc/sysdev/cpm2.c @@ -0,0 +1,371 @@ +/* + * General Purpose functions for the global management of the + * 8260 Communication Processor Module. + * Copyright (c) 1999-2001 Dan Malek <dan@embeddedalley.com> + * Copyright (c) 2000 MontaVista Software, Inc (source@mvista.com) + * 2.3.99 Updates + * + * 2006 (c) MontaVista Software, Inc. + * Vitaly Bordug <vbordug@ru.mvista.com> + * Merged to arch/powerpc from arch/ppc/syslib/cpm2_common.c + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +/* + * + * In addition to the individual control of the communication + * channels, there are a few functions that globally affect the + * communication processor. + * + * Buffer descriptors must be allocated from the dual ported memory + * space. The allocator for that is here. When the communication + * process is reset, we reclaim the memory available. There is + * currently no deallocator for this memory. + */ +#include <linux/errno.h> +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/param.h> +#include <linux/string.h> +#include <linux/mm.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/of.h> + +#include <asm/io.h> +#include <asm/irq.h> +#include <asm/mpc8260.h> +#include <asm/page.h> +#include <asm/pgtable.h> +#include <asm/cpm2.h> +#include <asm/rheap.h> +#include <asm/fs_pd.h> + +#include <sysdev/fsl_soc.h> + +cpm_cpm2_t __iomem *cpmp; /* Pointer to comm processor space */ + +/* We allocate this here because it is used almost exclusively for + * the communication processor devices. + */ +cpm2_map_t __iomem *cpm2_immr; +EXPORT_SYMBOL(cpm2_immr); + +#define CPM_MAP_SIZE (0x40000) /* 256k - the PQ3 reserve this amount + of space for CPM as it is larger + than on PQ2 */ + +void __init cpm2_reset(void) +{ +#ifdef CONFIG_PPC_85xx + cpm2_immr = ioremap(get_immrbase() + 0x80000, CPM_MAP_SIZE); +#else + cpm2_immr = ioremap(get_immrbase(), CPM_MAP_SIZE); +#endif + + /* Reclaim the DP memory for our use. + */ + cpm_muram_init(); + + /* Tell everyone where the comm processor resides. + */ + cpmp = &cpm2_immr->im_cpm; + +#ifndef CONFIG_PPC_EARLY_DEBUG_CPM + /* Reset the CPM. + */ + cpm_command(CPM_CR_RST, 0); +#endif +} + +static DEFINE_SPINLOCK(cmd_lock); + +#define MAX_CR_CMD_LOOPS 10000 + +int cpm_command(u32 command, u8 opcode) +{ + int i, ret; + unsigned long flags; + + spin_lock_irqsave(&cmd_lock, flags); + + ret = 0; + out_be32(&cpmp->cp_cpcr, command | opcode | CPM_CR_FLG); + for (i = 0; i < MAX_CR_CMD_LOOPS; i++) + if ((in_be32(&cpmp->cp_cpcr) & CPM_CR_FLG) == 0) + goto out; + + printk(KERN_ERR "%s(): Not able to issue CPM command\n", __func__); + ret = -EIO; +out: + spin_unlock_irqrestore(&cmd_lock, flags); + return ret; +} +EXPORT_SYMBOL(cpm_command); + +/* Set a baud rate generator. This needs lots of work. There are + * eight BRGs, which can be connected to the CPM channels or output + * as clocks. The BRGs are in two different block of internal + * memory mapped space. + * The baud rate clock is the system clock divided by something. + * It was set up long ago during the initial boot phase and is + * is given to us. + * Baud rate clocks are zero-based in the driver code (as that maps + * to port numbers). Documentation uses 1-based numbering. + */ +void __cpm2_setbrg(uint brg, uint rate, uint clk, int div16, int src) +{ + u32 __iomem *bp; + u32 val; + + /* This is good enough to get SMCs running..... + */ + if (brg < 4) { + bp = cpm2_map_size(im_brgc1, 16); + } else { + bp = cpm2_map_size(im_brgc5, 16); + brg -= 4; + } + bp += brg; + /* Round the clock divider to the nearest integer. */ + val = (((clk * 2 / rate) - 1) & ~1) | CPM_BRG_EN | src; + if (div16) + val |= CPM_BRG_DIV16; + + out_be32(bp, val); + cpm2_unmap(bp); +} +EXPORT_SYMBOL(__cpm2_setbrg); + +int cpm2_clk_setup(enum cpm_clk_target target, int clock, int mode) +{ + int ret = 0; + int shift; + int i, bits = 0; + cpmux_t __iomem *im_cpmux; + u32 __iomem *reg; + u32 mask = 7; + + u8 clk_map[][3] = { + {CPM_CLK_FCC1, CPM_BRG5, 0}, + {CPM_CLK_FCC1, CPM_BRG6, 1}, + {CPM_CLK_FCC1, CPM_BRG7, 2}, + {CPM_CLK_FCC1, CPM_BRG8, 3}, + {CPM_CLK_FCC1, CPM_CLK9, 4}, + {CPM_CLK_FCC1, CPM_CLK10, 5}, + {CPM_CLK_FCC1, CPM_CLK11, 6}, + {CPM_CLK_FCC1, CPM_CLK12, 7}, + {CPM_CLK_FCC2, CPM_BRG5, 0}, + {CPM_CLK_FCC2, CPM_BRG6, 1}, + {CPM_CLK_FCC2, CPM_BRG7, 2}, + {CPM_CLK_FCC2, CPM_BRG8, 3}, + {CPM_CLK_FCC2, CPM_CLK13, 4}, + {CPM_CLK_FCC2, CPM_CLK14, 5}, + {CPM_CLK_FCC2, CPM_CLK15, 6}, + {CPM_CLK_FCC2, CPM_CLK16, 7}, + {CPM_CLK_FCC3, CPM_BRG5, 0}, + {CPM_CLK_FCC3, CPM_BRG6, 1}, + {CPM_CLK_FCC3, CPM_BRG7, 2}, + {CPM_CLK_FCC3, CPM_BRG8, 3}, + {CPM_CLK_FCC3, CPM_CLK13, 4}, + {CPM_CLK_FCC3, CPM_CLK14, 5}, + {CPM_CLK_FCC3, CPM_CLK15, 6}, + {CPM_CLK_FCC3, CPM_CLK16, 7}, + {CPM_CLK_SCC1, CPM_BRG1, 0}, + {CPM_CLK_SCC1, CPM_BRG2, 1}, + {CPM_CLK_SCC1, CPM_BRG3, 2}, + {CPM_CLK_SCC1, CPM_BRG4, 3}, + {CPM_CLK_SCC1, CPM_CLK11, 4}, + {CPM_CLK_SCC1, CPM_CLK12, 5}, + {CPM_CLK_SCC1, CPM_CLK3, 6}, + {CPM_CLK_SCC1, CPM_CLK4, 7}, + {CPM_CLK_SCC2, CPM_BRG1, 0}, + {CPM_CLK_SCC2, CPM_BRG2, 1}, + {CPM_CLK_SCC2, CPM_BRG3, 2}, + {CPM_CLK_SCC2, CPM_BRG4, 3}, + {CPM_CLK_SCC2, CPM_CLK11, 4}, + {CPM_CLK_SCC2, CPM_CLK12, 5}, + {CPM_CLK_SCC2, CPM_CLK3, 6}, + {CPM_CLK_SCC2, CPM_CLK4, 7}, + {CPM_CLK_SCC3, CPM_BRG1, 0}, + {CPM_CLK_SCC3, CPM_BRG2, 1}, + {CPM_CLK_SCC3, CPM_BRG3, 2}, + {CPM_CLK_SCC3, CPM_BRG4, 3}, + {CPM_CLK_SCC3, CPM_CLK5, 4}, + {CPM_CLK_SCC3, CPM_CLK6, 5}, + {CPM_CLK_SCC3, CPM_CLK7, 6}, + {CPM_CLK_SCC3, CPM_CLK8, 7}, + {CPM_CLK_SCC4, CPM_BRG1, 0}, + {CPM_CLK_SCC4, CPM_BRG2, 1}, + {CPM_CLK_SCC4, CPM_BRG3, 2}, + {CPM_CLK_SCC4, CPM_BRG4, 3}, + {CPM_CLK_SCC4, CPM_CLK5, 4}, + {CPM_CLK_SCC4, CPM_CLK6, 5}, + {CPM_CLK_SCC4, CPM_CLK7, 6}, + {CPM_CLK_SCC4, CPM_CLK8, 7}, + }; + + im_cpmux = cpm2_map(im_cpmux); + + switch (target) { + case CPM_CLK_SCC1: + reg = &im_cpmux->cmx_scr; + shift = 24; + break; + case CPM_CLK_SCC2: + reg = &im_cpmux->cmx_scr; + shift = 16; + break; + case CPM_CLK_SCC3: + reg = &im_cpmux->cmx_scr; + shift = 8; + break; + case CPM_CLK_SCC4: + reg = &im_cpmux->cmx_scr; + shift = 0; + break; + case CPM_CLK_FCC1: + reg = &im_cpmux->cmx_fcr; + shift = 24; + break; + case CPM_CLK_FCC2: + reg = &im_cpmux->cmx_fcr; + shift = 16; + break; + case CPM_CLK_FCC3: + reg = &im_cpmux->cmx_fcr; + shift = 8; + break; + default: + printk(KERN_ERR "cpm2_clock_setup: invalid clock target\n"); + return -EINVAL; + } + + for (i = 0; i < ARRAY_SIZE(clk_map); i++) { + if (clk_map[i][0] == target && clk_map[i][1] == clock) { + bits = clk_map[i][2]; + break; + } + } + if (i == ARRAY_SIZE(clk_map)) + ret = -EINVAL; + + bits <<= shift; + mask <<= shift; + + if (mode == CPM_CLK_RTX) { + bits |= bits << 3; + mask |= mask << 3; + } else if (mode == CPM_CLK_RX) { + bits <<= 3; + mask <<= 3; + } + + out_be32(reg, (in_be32(reg) & ~mask) | bits); + + cpm2_unmap(im_cpmux); + return ret; +} + +int cpm2_smc_clk_setup(enum cpm_clk_target target, int clock) +{ + int ret = 0; + int shift; + int i, bits = 0; + cpmux_t __iomem *im_cpmux; + u8 __iomem *reg; + u8 mask = 3; + + u8 clk_map[][3] = { + {CPM_CLK_SMC1, CPM_BRG1, 0}, + {CPM_CLK_SMC1, CPM_BRG7, 1}, + {CPM_CLK_SMC1, CPM_CLK7, 2}, + {CPM_CLK_SMC1, CPM_CLK9, 3}, + {CPM_CLK_SMC2, CPM_BRG2, 0}, + {CPM_CLK_SMC2, CPM_BRG8, 1}, + {CPM_CLK_SMC2, CPM_CLK4, 2}, + {CPM_CLK_SMC2, CPM_CLK15, 3}, + }; + + im_cpmux = cpm2_map(im_cpmux); + + switch (target) { + case CPM_CLK_SMC1: + reg = &im_cpmux->cmx_smr; + mask = 3; + shift = 4; + break; + case CPM_CLK_SMC2: + reg = &im_cpmux->cmx_smr; + mask = 3; + shift = 0; + break; + default: + printk(KERN_ERR "cpm2_smc_clock_setup: invalid clock target\n"); + return -EINVAL; + } + + for (i = 0; i < ARRAY_SIZE(clk_map); i++) { + if (clk_map[i][0] == target && clk_map[i][1] == clock) { + bits = clk_map[i][2]; + break; + } + } + if (i == ARRAY_SIZE(clk_map)) + ret = -EINVAL; + + bits <<= shift; + mask <<= shift; + + out_8(reg, (in_8(reg) & ~mask) | bits); + + cpm2_unmap(im_cpmux); + return ret; +} + +struct cpm2_ioports { + u32 dir, par, sor, odr, dat; + u32 res[3]; +}; + +void cpm2_set_pin(int port, int pin, int flags) +{ + struct cpm2_ioports __iomem *iop = + (struct cpm2_ioports __iomem *)&cpm2_immr->im_ioport; + + pin = 1 << (31 - pin); + + if (flags & CPM_PIN_OUTPUT) + setbits32(&iop[port].dir, pin); + else + clrbits32(&iop[port].dir, pin); + + if (!(flags & CPM_PIN_GPIO)) + setbits32(&iop[port].par, pin); + else + clrbits32(&iop[port].par, pin); + + if (flags & CPM_PIN_SECONDARY) + setbits32(&iop[port].sor, pin); + else + clrbits32(&iop[port].sor, pin); + + if (flags & CPM_PIN_OPENDRAIN) + setbits32(&iop[port].odr, pin); + else + clrbits32(&iop[port].odr, pin); +} + +static int cpm_init_par_io(void) +{ + struct device_node *np; + + for_each_compatible_node(np, NULL, "fsl,cpm2-pario-bank") + cpm2_gpiochip_add32(np); + return 0; +} +arch_initcall(cpm_init_par_io); + diff --git a/arch/powerpc/sysdev/cpm2_pic.c b/arch/powerpc/sysdev/cpm2_pic.c new file mode 100644 index 00000000..bcab50e2 --- /dev/null +++ b/arch/powerpc/sysdev/cpm2_pic.c @@ -0,0 +1,284 @@ +/* + * Platform information definitions. + * + * Copied from arch/ppc/syslib/cpm2_pic.c with minor subsequent updates + * to make in work in arch/powerpc/. Original (c) belongs to Dan Malek. + * + * Author: Vitaly Bordug <vbordug@ru.mvista.com> + * + * 1999-2001 (c) Dan Malek <dan@embeddedalley.com> + * 2006 (c) MontaVista Software, Inc. + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +/* The CPM2 internal interrupt controller. It is usually + * the only interrupt controller. + * There are two 32-bit registers (high/low) for up to 64 + * possible interrupts. + * + * Now, the fun starts.....Interrupt Numbers DO NOT MAP + * in a simple arithmetic fashion to mask or pending registers. + * That is, interrupt 4 does not map to bit position 4. + * We create two tables, indexed by vector number, to indicate + * which register to use and which bit in the register to use. + */ + +#include <linux/stddef.h> +#include <linux/init.h> +#include <linux/sched.h> +#include <linux/signal.h> +#include <linux/irq.h> + +#include <asm/immap_cpm2.h> +#include <asm/mpc8260.h> +#include <asm/io.h> +#include <asm/prom.h> +#include <asm/fs_pd.h> + +#include "cpm2_pic.h" + +/* External IRQS */ +#define CPM2_IRQ_EXT1 19 +#define CPM2_IRQ_EXT7 25 + +/* Port C IRQS */ +#define CPM2_IRQ_PORTC15 48 +#define CPM2_IRQ_PORTC0 63 + +static intctl_cpm2_t __iomem *cpm2_intctl; + +static struct irq_host *cpm2_pic_host; +#define NR_MASK_WORDS ((NR_IRQS + 31) / 32) +static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; + +static const u_char irq_to_siureg[] = { + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0 +}; + +/* bit numbers do not match the docs, these are precomputed so the bit for + * a given irq is (1 << irq_to_siubit[irq]) */ +static const u_char irq_to_siubit[] = { + 0, 15, 14, 13, 12, 11, 10, 9, + 8, 7, 6, 5, 4, 3, 2, 1, + 2, 1, 0, 14, 13, 12, 11, 10, + 9, 8, 7, 6, 5, 4, 3, 0, + 31, 30, 29, 28, 27, 26, 25, 24, + 23, 22, 21, 20, 19, 18, 17, 16, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static void cpm2_mask_irq(struct irq_data *d) +{ + int bit, word; + unsigned int irq_nr = irqd_to_hwirq(d); + + bit = irq_to_siubit[irq_nr]; + word = irq_to_siureg[irq_nr]; + + ppc_cached_irq_mask[word] &= ~(1 << bit); + out_be32(&cpm2_intctl->ic_simrh + word, ppc_cached_irq_mask[word]); +} + +static void cpm2_unmask_irq(struct irq_data *d) +{ + int bit, word; + unsigned int irq_nr = irqd_to_hwirq(d); + + bit = irq_to_siubit[irq_nr]; + word = irq_to_siureg[irq_nr]; + + ppc_cached_irq_mask[word] |= 1 << bit; + out_be32(&cpm2_intctl->ic_simrh + word, ppc_cached_irq_mask[word]); +} + +static void cpm2_ack(struct irq_data *d) +{ + int bit, word; + unsigned int irq_nr = irqd_to_hwirq(d); + + bit = irq_to_siubit[irq_nr]; + word = irq_to_siureg[irq_nr]; + + out_be32(&cpm2_intctl->ic_sipnrh + word, 1 << bit); +} + +static void cpm2_end_irq(struct irq_data *d) +{ + int bit, word; + unsigned int irq_nr = irqd_to_hwirq(d); + + bit = irq_to_siubit[irq_nr]; + word = irq_to_siureg[irq_nr]; + + ppc_cached_irq_mask[word] |= 1 << bit; + out_be32(&cpm2_intctl->ic_simrh + word, ppc_cached_irq_mask[word]); + + /* + * Work around large numbers of spurious IRQs on PowerPC 82xx + * systems. + */ + mb(); +} + +static int cpm2_set_irq_type(struct irq_data *d, unsigned int flow_type) +{ + unsigned int src = irqd_to_hwirq(d); + unsigned int vold, vnew, edibit; + + /* Port C interrupts are either IRQ_TYPE_EDGE_FALLING or + * IRQ_TYPE_EDGE_BOTH (default). All others are IRQ_TYPE_EDGE_FALLING + * or IRQ_TYPE_LEVEL_LOW (default) + */ + if (src >= CPM2_IRQ_PORTC15 && src <= CPM2_IRQ_PORTC0) { + if (flow_type == IRQ_TYPE_NONE) + flow_type = IRQ_TYPE_EDGE_BOTH; + + if (flow_type != IRQ_TYPE_EDGE_BOTH && + flow_type != IRQ_TYPE_EDGE_FALLING) + goto err_sense; + } else { + if (flow_type == IRQ_TYPE_NONE) + flow_type = IRQ_TYPE_LEVEL_LOW; + + if (flow_type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH)) + goto err_sense; + } + + irqd_set_trigger_type(d, flow_type); + if (flow_type & IRQ_TYPE_LEVEL_LOW) + __irq_set_handler_locked(d->irq, handle_level_irq); + else + __irq_set_handler_locked(d->irq, handle_edge_irq); + + /* internal IRQ senses are LEVEL_LOW + * EXT IRQ and Port C IRQ senses are programmable + */ + if (src >= CPM2_IRQ_EXT1 && src <= CPM2_IRQ_EXT7) + edibit = (14 - (src - CPM2_IRQ_EXT1)); + else + if (src >= CPM2_IRQ_PORTC15 && src <= CPM2_IRQ_PORTC0) + edibit = (31 - (CPM2_IRQ_PORTC0 - src)); + else + return (flow_type & IRQ_TYPE_LEVEL_LOW) ? + IRQ_SET_MASK_OK_NOCOPY : -EINVAL; + + vold = in_be32(&cpm2_intctl->ic_siexr); + + if ((flow_type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_FALLING) + vnew = vold | (1 << edibit); + else + vnew = vold & ~(1 << edibit); + + if (vold != vnew) + out_be32(&cpm2_intctl->ic_siexr, vnew); + return IRQ_SET_MASK_OK_NOCOPY; + +err_sense: + pr_err("CPM2 PIC: sense type 0x%x not supported\n", flow_type); + return -EINVAL; +} + +static struct irq_chip cpm2_pic = { + .name = "CPM2 SIU", + .irq_mask = cpm2_mask_irq, + .irq_unmask = cpm2_unmask_irq, + .irq_ack = cpm2_ack, + .irq_eoi = cpm2_end_irq, + .irq_set_type = cpm2_set_irq_type, + .flags = IRQCHIP_EOI_IF_HANDLED, +}; + +unsigned int cpm2_get_irq(void) +{ + int irq; + unsigned long bits; + + /* For CPM2, read the SIVEC register and shift the bits down + * to get the irq number. */ + bits = in_be32(&cpm2_intctl->ic_sivec); + irq = bits >> 26; + + if (irq == 0) + return(-1); + return irq_linear_revmap(cpm2_pic_host, irq); +} + +static int cpm2_pic_host_map(struct irq_host *h, unsigned int virq, + irq_hw_number_t hw) +{ + pr_debug("cpm2_pic_host_map(%d, 0x%lx)\n", virq, hw); + + irq_set_status_flags(virq, IRQ_LEVEL); + irq_set_chip_and_handler(virq, &cpm2_pic, handle_level_irq); + return 0; +} + +static int cpm2_pic_host_xlate(struct irq_host *h, struct device_node *ct, + const u32 *intspec, unsigned int intsize, + irq_hw_number_t *out_hwirq, unsigned int *out_flags) +{ + *out_hwirq = intspec[0]; + if (intsize > 1) + *out_flags = intspec[1]; + else + *out_flags = IRQ_TYPE_NONE; + return 0; +} + +static struct irq_host_ops cpm2_pic_host_ops = { + .map = cpm2_pic_host_map, + .xlate = cpm2_pic_host_xlate, +}; + +void cpm2_pic_init(struct device_node *node) +{ + int i; + + cpm2_intctl = cpm2_map(im_intctl); + + /* Clear the CPM IRQ controller, in case it has any bits set + * from the bootloader + */ + + /* Mask out everything */ + + out_be32(&cpm2_intctl->ic_simrh, 0x00000000); + out_be32(&cpm2_intctl->ic_simrl, 0x00000000); + + wmb(); + + /* Ack everything */ + out_be32(&cpm2_intctl->ic_sipnrh, 0xffffffff); + out_be32(&cpm2_intctl->ic_sipnrl, 0xffffffff); + wmb(); + + /* Dummy read of the vector */ + i = in_be32(&cpm2_intctl->ic_sivec); + rmb(); + + /* Initialize the default interrupt mapping priorities, + * in case the boot rom changed something on us. + */ + out_be16(&cpm2_intctl->ic_sicr, 0); + out_be32(&cpm2_intctl->ic_scprrh, 0x05309770); + out_be32(&cpm2_intctl->ic_scprrl, 0x05309770); + + /* create a legacy host */ + cpm2_pic_host = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR, + 64, &cpm2_pic_host_ops, 64); + if (cpm2_pic_host == NULL) { + printk(KERN_ERR "CPM2 PIC: failed to allocate irq host!\n"); + return; + } +} diff --git a/arch/powerpc/sysdev/cpm2_pic.h b/arch/powerpc/sysdev/cpm2_pic.h new file mode 100644 index 00000000..2c5f70c2 --- /dev/null +++ b/arch/powerpc/sysdev/cpm2_pic.h @@ -0,0 +1,8 @@ +#ifndef _PPC_KERNEL_CPM2_H +#define _PPC_KERNEL_CPM2_H + +extern unsigned int cpm2_get_irq(void); + +extern void cpm2_pic_init(struct device_node *); + +#endif /* _PPC_KERNEL_CPM2_H */ diff --git a/arch/powerpc/sysdev/cpm_common.c b/arch/powerpc/sysdev/cpm_common.c new file mode 100644 index 00000000..2b69aa03 --- /dev/null +++ b/arch/powerpc/sysdev/cpm_common.c @@ -0,0 +1,348 @@ +/* + * Common CPM code + * + * Author: Scott Wood <scottwood@freescale.com> + * + * Copyright 2007 Freescale Semiconductor, Inc. + * + * Some parts derived from commproc.c/cpm2_common.c, which is: + * Copyright (c) 1997 Dan error_act (dmalek@jlc.net) + * Copyright (c) 1999-2001 Dan Malek <dan@embeddedalley.com> + * Copyright (c) 2000 MontaVista Software, Inc (source@mvista.com) + * 2006 (c) MontaVista Software, Inc. + * Vitaly Bordug <vbordug@ru.mvista.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + */ + +#include <linux/init.h> +#include <linux/of_device.h> +#include <linux/spinlock.h> +#include <linux/of.h> +#include <linux/slab.h> + +#include <asm/udbg.h> +#include <asm/io.h> +#include <asm/system.h> +#include <asm/rheap.h> +#include <asm/cpm.h> + +#include <mm/mmu_decl.h> + +#if defined(CONFIG_CPM2) || defined(CONFIG_8xx_GPIO) +#include <linux/of_gpio.h> +#endif + +#ifdef CONFIG_PPC_EARLY_DEBUG_CPM +static u32 __iomem *cpm_udbg_txdesc = + (u32 __iomem __force *)CONFIG_PPC_EARLY_DEBUG_CPM_ADDR; + +static void udbg_putc_cpm(char c) +{ + u8 __iomem *txbuf = (u8 __iomem __force *)in_be32(&cpm_udbg_txdesc[1]); + + if (c == '\n') + udbg_putc_cpm('\r'); + + while (in_be32(&cpm_udbg_txdesc[0]) & 0x80000000) + ; + + out_8(txbuf, c); + out_be32(&cpm_udbg_txdesc[0], 0xa0000001); +} + +void __init udbg_init_cpm(void) +{ + if (cpm_udbg_txdesc) { +#ifdef CONFIG_CPM2 + setbat(1, 0xf0000000, 0xf0000000, 1024*1024, PAGE_KERNEL_NCG); +#endif + udbg_putc = udbg_putc_cpm; + } +} +#endif + +static spinlock_t cpm_muram_lock; +static rh_block_t cpm_boot_muram_rh_block[16]; +static rh_info_t cpm_muram_info; +static u8 __iomem *muram_vbase; +static phys_addr_t muram_pbase; + +/* Max address size we deal with */ +#define OF_MAX_ADDR_CELLS 4 + +int cpm_muram_init(void) +{ + struct device_node *np; + struct resource r; + u32 zero[OF_MAX_ADDR_CELLS] = {}; + resource_size_t max = 0; + int i = 0; + int ret = 0; + + if (muram_pbase) + return 0; + + spin_lock_init(&cpm_muram_lock); + /* initialize the info header */ + rh_init(&cpm_muram_info, 1, + sizeof(cpm_boot_muram_rh_block) / + sizeof(cpm_boot_muram_rh_block[0]), + cpm_boot_muram_rh_block); + + np = of_find_compatible_node(NULL, NULL, "fsl,cpm-muram-data"); + if (!np) { + /* try legacy bindings */ + np = of_find_node_by_name(NULL, "data-only"); + if (!np) { + printk(KERN_ERR "Cannot find CPM muram data node"); + ret = -ENODEV; + goto out; + } + } + + muram_pbase = of_translate_address(np, zero); + if (muram_pbase == (phys_addr_t)OF_BAD_ADDR) { + printk(KERN_ERR "Cannot translate zero through CPM muram node"); + ret = -ENODEV; + goto out; + } + + while (of_address_to_resource(np, i++, &r) == 0) { + if (r.end > max) + max = r.end; + + rh_attach_region(&cpm_muram_info, r.start - muram_pbase, + r.end - r.start + 1); + } + + muram_vbase = ioremap(muram_pbase, max - muram_pbase + 1); + if (!muram_vbase) { + printk(KERN_ERR "Cannot map CPM muram"); + ret = -ENOMEM; + } + +out: + of_node_put(np); + return ret; +} + +/** + * cpm_muram_alloc - allocate the requested size worth of multi-user ram + * @size: number of bytes to allocate + * @align: requested alignment, in bytes + * + * This function returns an offset into the muram area. + * Use cpm_dpram_addr() to get the virtual address of the area. + * Use cpm_muram_free() to free the allocation. + */ +unsigned long cpm_muram_alloc(unsigned long size, unsigned long align) +{ + unsigned long start; + unsigned long flags; + + spin_lock_irqsave(&cpm_muram_lock, flags); + cpm_muram_info.alignment = align; + start = rh_alloc(&cpm_muram_info, size, "commproc"); + spin_unlock_irqrestore(&cpm_muram_lock, flags); + + return start; +} +EXPORT_SYMBOL(cpm_muram_alloc); + +/** + * cpm_muram_free - free a chunk of multi-user ram + * @offset: The beginning of the chunk as returned by cpm_muram_alloc(). + */ +int cpm_muram_free(unsigned long offset) +{ + int ret; + unsigned long flags; + + spin_lock_irqsave(&cpm_muram_lock, flags); + ret = rh_free(&cpm_muram_info, offset); + spin_unlock_irqrestore(&cpm_muram_lock, flags); + + return ret; +} +EXPORT_SYMBOL(cpm_muram_free); + +/** + * cpm_muram_alloc_fixed - reserve a specific region of multi-user ram + * @offset: the offset into the muram area to reserve + * @size: the number of bytes to reserve + * + * This function returns "start" on success, -ENOMEM on failure. + * Use cpm_dpram_addr() to get the virtual address of the area. + * Use cpm_muram_free() to free the allocation. + */ +unsigned long cpm_muram_alloc_fixed(unsigned long offset, unsigned long size) +{ + unsigned long start; + unsigned long flags; + + spin_lock_irqsave(&cpm_muram_lock, flags); + cpm_muram_info.alignment = 1; + start = rh_alloc_fixed(&cpm_muram_info, offset, size, "commproc"); + spin_unlock_irqrestore(&cpm_muram_lock, flags); + + return start; +} +EXPORT_SYMBOL(cpm_muram_alloc_fixed); + +/** + * cpm_muram_addr - turn a muram offset into a virtual address + * @offset: muram offset to convert + */ +void __iomem *cpm_muram_addr(unsigned long offset) +{ + return muram_vbase + offset; +} +EXPORT_SYMBOL(cpm_muram_addr); + +unsigned long cpm_muram_offset(void __iomem *addr) +{ + return addr - (void __iomem *)muram_vbase; +} +EXPORT_SYMBOL(cpm_muram_offset); + +/** + * cpm_muram_dma - turn a muram virtual address into a DMA address + * @offset: virtual address from cpm_muram_addr() to convert + */ +dma_addr_t cpm_muram_dma(void __iomem *addr) +{ + return muram_pbase + ((u8 __iomem *)addr - muram_vbase); +} +EXPORT_SYMBOL(cpm_muram_dma); + +#if defined(CONFIG_CPM2) || defined(CONFIG_8xx_GPIO) + +struct cpm2_ioports { + u32 dir, par, sor, odr, dat; + u32 res[3]; +}; + +struct cpm2_gpio32_chip { + struct of_mm_gpio_chip mm_gc; + spinlock_t lock; + + /* shadowed data register to clear/set bits safely */ + u32 cpdata; +}; + +static inline struct cpm2_gpio32_chip * +to_cpm2_gpio32_chip(struct of_mm_gpio_chip *mm_gc) +{ + return container_of(mm_gc, struct cpm2_gpio32_chip, mm_gc); +} + +static void cpm2_gpio32_save_regs(struct of_mm_gpio_chip *mm_gc) +{ + struct cpm2_gpio32_chip *cpm2_gc = to_cpm2_gpio32_chip(mm_gc); + struct cpm2_ioports __iomem *iop = mm_gc->regs; + + cpm2_gc->cpdata = in_be32(&iop->dat); +} + +static int cpm2_gpio32_get(struct gpio_chip *gc, unsigned int gpio) +{ + struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); + struct cpm2_ioports __iomem *iop = mm_gc->regs; + u32 pin_mask; + + pin_mask = 1 << (31 - gpio); + + return !!(in_be32(&iop->dat) & pin_mask); +} + +static void __cpm2_gpio32_set(struct of_mm_gpio_chip *mm_gc, u32 pin_mask, + int value) +{ + struct cpm2_gpio32_chip *cpm2_gc = to_cpm2_gpio32_chip(mm_gc); + struct cpm2_ioports __iomem *iop = mm_gc->regs; + + if (value) + cpm2_gc->cpdata |= pin_mask; + else + cpm2_gc->cpdata &= ~pin_mask; + + out_be32(&iop->dat, cpm2_gc->cpdata); +} + +static void cpm2_gpio32_set(struct gpio_chip *gc, unsigned int gpio, int value) +{ + struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); + struct cpm2_gpio32_chip *cpm2_gc = to_cpm2_gpio32_chip(mm_gc); + unsigned long flags; + u32 pin_mask = 1 << (31 - gpio); + + spin_lock_irqsave(&cpm2_gc->lock, flags); + + __cpm2_gpio32_set(mm_gc, pin_mask, value); + + spin_unlock_irqrestore(&cpm2_gc->lock, flags); +} + +static int cpm2_gpio32_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) +{ + struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); + struct cpm2_gpio32_chip *cpm2_gc = to_cpm2_gpio32_chip(mm_gc); + struct cpm2_ioports __iomem *iop = mm_gc->regs; + unsigned long flags; + u32 pin_mask = 1 << (31 - gpio); + + spin_lock_irqsave(&cpm2_gc->lock, flags); + + setbits32(&iop->dir, pin_mask); + __cpm2_gpio32_set(mm_gc, pin_mask, val); + + spin_unlock_irqrestore(&cpm2_gc->lock, flags); + + return 0; +} + +static int cpm2_gpio32_dir_in(struct gpio_chip *gc, unsigned int gpio) +{ + struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); + struct cpm2_gpio32_chip *cpm2_gc = to_cpm2_gpio32_chip(mm_gc); + struct cpm2_ioports __iomem *iop = mm_gc->regs; + unsigned long flags; + u32 pin_mask = 1 << (31 - gpio); + + spin_lock_irqsave(&cpm2_gc->lock, flags); + + clrbits32(&iop->dir, pin_mask); + + spin_unlock_irqrestore(&cpm2_gc->lock, flags); + + return 0; +} + +int cpm2_gpiochip_add32(struct device_node *np) +{ + struct cpm2_gpio32_chip *cpm2_gc; + struct of_mm_gpio_chip *mm_gc; + struct gpio_chip *gc; + + cpm2_gc = kzalloc(sizeof(*cpm2_gc), GFP_KERNEL); + if (!cpm2_gc) + return -ENOMEM; + + spin_lock_init(&cpm2_gc->lock); + + mm_gc = &cpm2_gc->mm_gc; + gc = &mm_gc->gc; + + mm_gc->save_regs = cpm2_gpio32_save_regs; + gc->ngpio = 32; + gc->direction_input = cpm2_gpio32_dir_in; + gc->direction_output = cpm2_gpio32_dir_out; + gc->get = cpm2_gpio32_get; + gc->set = cpm2_gpio32_set; + + return of_mm_gpiochip_add(np, mm_gc); +} +#endif /* CONFIG_CPM2 || CONFIG_8xx_GPIO */ diff --git a/arch/powerpc/sysdev/dart.h b/arch/powerpc/sysdev/dart.h new file mode 100644 index 00000000..ff202edb --- /dev/null +++ b/arch/powerpc/sysdev/dart.h @@ -0,0 +1,77 @@ +/* + * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _POWERPC_SYSDEV_DART_H +#define _POWERPC_SYSDEV_DART_H + + +/* Offset from base to control register */ +#define DART_CNTL 0 + +/* Offset from base to exception register */ +#define DART_EXCP_U3 0x10 +/* Offset from base to TLB tag registers */ +#define DART_TAGS_U3 0x1000 + +/* U4 registers */ +#define DART_BASE_U4 0x10 +#define DART_SIZE_U4 0x20 +#define DART_EXCP_U4 0x30 +#define DART_TAGS_U4 0x1000 + +/* Control Register fields */ + +/* U3 registers */ +#define DART_CNTL_U3_BASE_MASK 0xfffff +#define DART_CNTL_U3_BASE_SHIFT 12 +#define DART_CNTL_U3_FLUSHTLB 0x400 +#define DART_CNTL_U3_ENABLE 0x200 +#define DART_CNTL_U3_SIZE_MASK 0x1ff +#define DART_CNTL_U3_SIZE_SHIFT 0 + +/* U4 registers */ +#define DART_BASE_U4_BASE_MASK 0xffffff +#define DART_BASE_U4_BASE_SHIFT 0 +#define DART_CNTL_U4_ENABLE 0x80000000 +#define DART_CNTL_U4_IONE 0x40000000 +#define DART_CNTL_U4_FLUSHTLB 0x20000000 +#define DART_CNTL_U4_IDLE 0x10000000 +#define DART_CNTL_U4_PAR_EN 0x08000000 +#define DART_CNTL_U4_IONE_MASK 0x07ffffff +#define DART_SIZE_U4_SIZE_MASK 0x1fff +#define DART_SIZE_U4_SIZE_SHIFT 0 + +#define DART_REG(r) (dart + ((r) >> 2)) +#define DART_IN(r) (in_be32(DART_REG(r))) +#define DART_OUT(r,v) (out_be32(DART_REG(r), (v))) + + +/* size of table in pages */ + + +/* DART table fields */ + +#define DARTMAP_VALID 0x80000000 +#define DARTMAP_RPNMASK 0x00ffffff + + +#define DART_PAGE_SHIFT 12 +#define DART_PAGE_SIZE (1 << DART_PAGE_SHIFT) + + +#endif /* _POWERPC_SYSDEV_DART_H */ diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c new file mode 100644 index 00000000..8e9e06a7 --- /dev/null +++ b/arch/powerpc/sysdev/dart_iommu.c @@ -0,0 +1,469 @@ +/* + * arch/powerpc/sysdev/dart_iommu.c + * + * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation + * Copyright (C) 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>, + * IBM Corporation + * + * Based on pSeries_iommu.c: + * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation + * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation + * + * Dynamic DMA mapping support, Apple U3, U4 & IBM CPC925 "DART" iommu. + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/init.h> +#include <linux/types.h> +#include <linux/mm.h> +#include <linux/spinlock.h> +#include <linux/string.h> +#include <linux/pci.h> +#include <linux/dma-mapping.h> +#include <linux/vmalloc.h> +#include <linux/suspend.h> +#include <linux/memblock.h> +#include <linux/gfp.h> +#include <asm/io.h> +#include <asm/prom.h> +#include <asm/iommu.h> +#include <asm/pci-bridge.h> +#include <asm/machdep.h> +#include <asm/abs_addr.h> +#include <asm/cacheflush.h> +#include <asm/ppc-pci.h> + +#include "dart.h" + +/* Physical base address and size of the DART table */ +unsigned long dart_tablebase; /* exported to htab_initialize */ +static unsigned long dart_tablesize; + +/* Virtual base address of the DART table */ +static u32 *dart_vbase; +#ifdef CONFIG_PM +static u32 *dart_copy; +#endif + +/* Mapped base address for the dart */ +static unsigned int __iomem *dart; + +/* Dummy val that entries are set to when unused */ +static unsigned int dart_emptyval; + +static struct iommu_table iommu_table_dart; +static int iommu_table_dart_inited; +static int dart_dirty; +static int dart_is_u4; + +#define DART_U4_BYPASS_BASE 0x8000000000ull + +#define DBG(...) + +static inline void dart_tlb_invalidate_all(void) +{ + unsigned long l = 0; + unsigned int reg, inv_bit; + unsigned long limit; + + DBG("dart: flush\n"); + + /* To invalidate the DART, set the DARTCNTL_FLUSHTLB bit in the + * control register and wait for it to clear. + * + * Gotcha: Sometimes, the DART won't detect that the bit gets + * set. If so, clear it and set it again. + */ + + limit = 0; + + inv_bit = dart_is_u4 ? DART_CNTL_U4_FLUSHTLB : DART_CNTL_U3_FLUSHTLB; +retry: + l = 0; + reg = DART_IN(DART_CNTL); + reg |= inv_bit; + DART_OUT(DART_CNTL, reg); + + while ((DART_IN(DART_CNTL) & inv_bit) && l < (1L << limit)) + l++; + if (l == (1L << limit)) { + if (limit < 4) { + limit++; + reg = DART_IN(DART_CNTL); + reg &= ~inv_bit; + DART_OUT(DART_CNTL, reg); + goto retry; + } else + panic("DART: TLB did not flush after waiting a long " + "time. Buggy U3 ?"); + } +} + +static inline void dart_tlb_invalidate_one(unsigned long bus_rpn) +{ + unsigned int reg; + unsigned int l, limit; + + reg = DART_CNTL_U4_ENABLE | DART_CNTL_U4_IONE | + (bus_rpn & DART_CNTL_U4_IONE_MASK); + DART_OUT(DART_CNTL, reg); + + limit = 0; +wait_more: + l = 0; + while ((DART_IN(DART_CNTL) & DART_CNTL_U4_IONE) && l < (1L << limit)) { + rmb(); + l++; + } + + if (l == (1L << limit)) { + if (limit < 4) { + limit++; + goto wait_more; + } else + panic("DART: TLB did not flush after waiting a long " + "time. Buggy U4 ?"); + } +} + +static void dart_flush(struct iommu_table *tbl) +{ + mb(); + if (dart_dirty) { + dart_tlb_invalidate_all(); + dart_dirty = 0; + } +} + +static int dart_build(struct iommu_table *tbl, long index, + long npages, unsigned long uaddr, + enum dma_data_direction direction, + struct dma_attrs *attrs) +{ + unsigned int *dp; + unsigned int rpn; + long l; + + DBG("dart: build at: %lx, %lx, addr: %x\n", index, npages, uaddr); + + dp = ((unsigned int*)tbl->it_base) + index; + + /* On U3, all memory is contiguous, so we can move this + * out of the loop. + */ + l = npages; + while (l--) { + rpn = virt_to_abs(uaddr) >> DART_PAGE_SHIFT; + + *(dp++) = DARTMAP_VALID | (rpn & DARTMAP_RPNMASK); + + uaddr += DART_PAGE_SIZE; + } + + /* make sure all updates have reached memory */ + mb(); + in_be32((unsigned __iomem *)dp); + mb(); + + if (dart_is_u4) { + rpn = index; + while (npages--) + dart_tlb_invalidate_one(rpn++); + } else { + dart_dirty = 1; + } + return 0; +} + + +static void dart_free(struct iommu_table *tbl, long index, long npages) +{ + unsigned int *dp; + + /* We don't worry about flushing the TLB cache. The only drawback of + * not doing it is that we won't catch buggy device drivers doing + * bad DMAs, but then no 32-bit architecture ever does either. + */ + + DBG("dart: free at: %lx, %lx\n", index, npages); + + dp = ((unsigned int *)tbl->it_base) + index; + + while (npages--) + *(dp++) = dart_emptyval; +} + + +static int __init dart_init(struct device_node *dart_node) +{ + unsigned int i; + unsigned long tmp, base, size; + struct resource r; + + if (dart_tablebase == 0 || dart_tablesize == 0) { + printk(KERN_INFO "DART: table not allocated, using " + "direct DMA\n"); + return -ENODEV; + } + + if (of_address_to_resource(dart_node, 0, &r)) + panic("DART: can't get register base ! "); + + /* Make sure nothing from the DART range remains in the CPU cache + * from a previous mapping that existed before the kernel took + * over + */ + flush_dcache_phys_range(dart_tablebase, + dart_tablebase + dart_tablesize); + + /* Allocate a spare page to map all invalid DART pages. We need to do + * that to work around what looks like a problem with the HT bridge + * prefetching into invalid pages and corrupting data + */ + tmp = memblock_alloc(DART_PAGE_SIZE, DART_PAGE_SIZE); + dart_emptyval = DARTMAP_VALID | ((tmp >> DART_PAGE_SHIFT) & + DARTMAP_RPNMASK); + + /* Map in DART registers */ + dart = ioremap(r.start, r.end - r.start + 1); + if (dart == NULL) + panic("DART: Cannot map registers!"); + + /* Map in DART table */ + dart_vbase = ioremap(virt_to_abs(dart_tablebase), dart_tablesize); + + /* Fill initial table */ + for (i = 0; i < dart_tablesize/4; i++) + dart_vbase[i] = dart_emptyval; + + /* Initialize DART with table base and enable it. */ + base = dart_tablebase >> DART_PAGE_SHIFT; + size = dart_tablesize >> DART_PAGE_SHIFT; + if (dart_is_u4) { + size &= DART_SIZE_U4_SIZE_MASK; + DART_OUT(DART_BASE_U4, base); + DART_OUT(DART_SIZE_U4, size); + DART_OUT(DART_CNTL, DART_CNTL_U4_ENABLE); + } else { + size &= DART_CNTL_U3_SIZE_MASK; + DART_OUT(DART_CNTL, + DART_CNTL_U3_ENABLE | + (base << DART_CNTL_U3_BASE_SHIFT) | + (size << DART_CNTL_U3_SIZE_SHIFT)); + } + + /* Invalidate DART to get rid of possible stale TLBs */ + dart_tlb_invalidate_all(); + + printk(KERN_INFO "DART IOMMU initialized for %s type chipset\n", + dart_is_u4 ? "U4" : "U3"); + + return 0; +} + +static void iommu_table_dart_setup(void) +{ + iommu_table_dart.it_busno = 0; + iommu_table_dart.it_offset = 0; + /* it_size is in number of entries */ + iommu_table_dart.it_size = dart_tablesize / sizeof(u32); + + /* Initialize the common IOMMU code */ + iommu_table_dart.it_base = (unsigned long)dart_vbase; + iommu_table_dart.it_index = 0; + iommu_table_dart.it_blocksize = 1; + iommu_init_table(&iommu_table_dart, -1); + + /* Reserve the last page of the DART to avoid possible prefetch + * past the DART mapped area + */ + set_bit(iommu_table_dart.it_size - 1, iommu_table_dart.it_map); +} + +static void dma_dev_setup_dart(struct device *dev) +{ + /* We only have one iommu table on the mac for now, which makes + * things simple. Setup all PCI devices to point to this table + */ + if (get_dma_ops(dev) == &dma_direct_ops) + set_dma_offset(dev, DART_U4_BYPASS_BASE); + else + set_iommu_table_base(dev, &iommu_table_dart); +} + +static void pci_dma_dev_setup_dart(struct pci_dev *dev) +{ + dma_dev_setup_dart(&dev->dev); +} + +static void pci_dma_bus_setup_dart(struct pci_bus *bus) +{ + if (!iommu_table_dart_inited) { + iommu_table_dart_inited = 1; + iommu_table_dart_setup(); + } +} + +static bool dart_device_on_pcie(struct device *dev) +{ + struct device_node *np = of_node_get(dev->of_node); + + while(np) { + if (of_device_is_compatible(np, "U4-pcie") || + of_device_is_compatible(np, "u4-pcie")) { + of_node_put(np); + return true; + } + np = of_get_next_parent(np); + } + return false; +} + +static int dart_dma_set_mask(struct device *dev, u64 dma_mask) +{ + if (!dev->dma_mask || !dma_supported(dev, dma_mask)) + return -EIO; + + /* U4 supports a DART bypass, we use it for 64-bit capable + * devices to improve performances. However, that only works + * for devices connected to U4 own PCIe interface, not bridged + * through hypertransport. We need the device to support at + * least 40 bits of addresses. + */ + if (dart_device_on_pcie(dev) && dma_mask >= DMA_BIT_MASK(40)) { + dev_info(dev, "Using 64-bit DMA iommu bypass\n"); + set_dma_ops(dev, &dma_direct_ops); + } else { + dev_info(dev, "Using 32-bit DMA via iommu\n"); + set_dma_ops(dev, &dma_iommu_ops); + } + dma_dev_setup_dart(dev); + + *dev->dma_mask = dma_mask; + return 0; +} + +void __init iommu_init_early_dart(void) +{ + struct device_node *dn; + + /* Find the DART in the device-tree */ + dn = of_find_compatible_node(NULL, "dart", "u3-dart"); + if (dn == NULL) { + dn = of_find_compatible_node(NULL, "dart", "u4-dart"); + if (dn == NULL) + return; /* use default direct_dma_ops */ + dart_is_u4 = 1; + } + + /* Initialize the DART HW */ + if (dart_init(dn) != 0) + goto bail; + + /* Setup low level TCE operations for the core IOMMU code */ + ppc_md.tce_build = dart_build; + ppc_md.tce_free = dart_free; + ppc_md.tce_flush = dart_flush; + + /* Setup bypass if supported */ + if (dart_is_u4) + ppc_md.dma_set_mask = dart_dma_set_mask; + + ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_dart; + ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_dart; + + /* Setup pci_dma ops */ + set_pci_dma_ops(&dma_iommu_ops); + return; + + bail: + /* If init failed, use direct iommu and null setup functions */ + ppc_md.pci_dma_dev_setup = NULL; + ppc_md.pci_dma_bus_setup = NULL; + + /* Setup pci_dma ops */ + set_pci_dma_ops(&dma_direct_ops); +} + +#ifdef CONFIG_PM +static void iommu_dart_save(void) +{ + memcpy(dart_copy, dart_vbase, 2*1024*1024); +} + +static void iommu_dart_restore(void) +{ + memcpy(dart_vbase, dart_copy, 2*1024*1024); + dart_tlb_invalidate_all(); +} + +static int __init iommu_init_late_dart(void) +{ + unsigned long tbasepfn; + struct page *p; + + /* if no dart table exists then we won't need to save it + * and the area has also not been reserved */ + if (!dart_tablebase) + return 0; + + tbasepfn = __pa(dart_tablebase) >> PAGE_SHIFT; + register_nosave_region_late(tbasepfn, + tbasepfn + ((1<<24) >> PAGE_SHIFT)); + + /* For suspend we need to copy the dart contents because + * it is not part of the regular mapping (see above) and + * thus not saved automatically. The memory for this copy + * must be allocated early because we need 2 MB. */ + p = alloc_pages(GFP_KERNEL, 21 - PAGE_SHIFT); + BUG_ON(!p); + dart_copy = page_address(p); + + ppc_md.iommu_save = iommu_dart_save; + ppc_md.iommu_restore = iommu_dart_restore; + + return 0; +} + +late_initcall(iommu_init_late_dart); +#endif + +void __init alloc_dart_table(void) +{ + /* Only reserve DART space if machine has more than 1GB of RAM + * or if requested with iommu=on on cmdline. + * + * 1GB of RAM is picked as limit because some default devices + * (i.e. Airport Extreme) have 30 bit address range limits. + */ + + if (iommu_is_off) + return; + + if (!iommu_force_on && memblock_end_of_DRAM() <= 0x40000000ull) + return; + + /* 512 pages (2MB) is max DART tablesize. */ + dart_tablesize = 1UL << 21; + /* 16MB (1 << 24) alignment. We allocate a full 16Mb chuck since we + * will blow up an entire large page anyway in the kernel mapping + */ + dart_tablebase = (unsigned long) + abs_to_virt(memblock_alloc_base(1UL<<24, 1UL<<24, 0x80000000L)); + + printk(KERN_INFO "DART table allocated at: %lx\n", dart_tablebase); +} diff --git a/arch/powerpc/sysdev/dcr-low.S b/arch/powerpc/sysdev/dcr-low.S new file mode 100644 index 00000000..d3098ef1 --- /dev/null +++ b/arch/powerpc/sysdev/dcr-low.S @@ -0,0 +1,45 @@ +/* + * "Indirect" DCR access + * + * Copyright (c) 2004 Eugene Surovegin <ebs@ebshome.net> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <asm/ppc_asm.h> +#include <asm/processor.h> +#include <asm/bug.h> + +#define DCR_ACCESS_PROLOG(table) \ + cmpli cr0,r3,1024; \ + rlwinm r3,r3,4,18,27; \ + lis r5,table@h; \ + ori r5,r5,table@l; \ + add r3,r3,r5; \ + bge- 1f; \ + mtctr r3; \ + bctr; \ +1: trap; \ + EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0; \ + blr + +_GLOBAL(__mfdcr) + DCR_ACCESS_PROLOG(__mfdcr_table) + +_GLOBAL(__mtdcr) + DCR_ACCESS_PROLOG(__mtdcr_table) + +__mfdcr_table: + mfdcr r3,0; blr +__mtdcr_table: + mtdcr 0,r4; blr + +dcr = 1 + .rept 1023 + mfdcr r3,dcr; blr + mtdcr dcr,r4; blr + dcr = dcr + 1 + .endr diff --git a/arch/powerpc/sysdev/dcr.c b/arch/powerpc/sysdev/dcr.c new file mode 100644 index 00000000..bb44aa9f --- /dev/null +++ b/arch/powerpc/sysdev/dcr.c @@ -0,0 +1,233 @@ +/* + * (c) Copyright 2006 Benjamin Herrenschmidt, IBM Corp. + * <benh@kernel.crashing.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See + * the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#undef DEBUG + +#include <linux/kernel.h> +#include <asm/prom.h> +#include <asm/dcr.h> + +#ifdef CONFIG_PPC_DCR_MMIO +static struct device_node *find_dcr_parent(struct device_node *node) +{ + struct device_node *par, *tmp; + const u32 *p; + + for (par = of_node_get(node); par;) { + if (of_get_property(par, "dcr-controller", NULL)) + break; + p = of_get_property(par, "dcr-parent", NULL); + tmp = par; + if (p == NULL) + par = of_get_parent(par); + else + par = of_find_node_by_phandle(*p); + of_node_put(tmp); + } + return par; +} +#endif + +#if defined(CONFIG_PPC_DCR_NATIVE) && defined(CONFIG_PPC_DCR_MMIO) + +bool dcr_map_ok_generic(dcr_host_t host) +{ + if (host.type == DCR_HOST_NATIVE) + return dcr_map_ok_native(host.host.native); + else if (host.type == DCR_HOST_MMIO) + return dcr_map_ok_mmio(host.host.mmio); + else + return 0; +} +EXPORT_SYMBOL_GPL(dcr_map_ok_generic); + +dcr_host_t dcr_map_generic(struct device_node *dev, + unsigned int dcr_n, + unsigned int dcr_c) +{ + dcr_host_t host; + struct device_node *dp; + const char *prop; + + host.type = DCR_HOST_INVALID; + + dp = find_dcr_parent(dev); + if (dp == NULL) + return host; + + prop = of_get_property(dp, "dcr-access-method", NULL); + + pr_debug("dcr_map_generic(dcr-access-method = %s)\n", prop); + + if (!strcmp(prop, "native")) { + host.type = DCR_HOST_NATIVE; + host.host.native = dcr_map_native(dev, dcr_n, dcr_c); + } else if (!strcmp(prop, "mmio")) { + host.type = DCR_HOST_MMIO; + host.host.mmio = dcr_map_mmio(dev, dcr_n, dcr_c); + } + + of_node_put(dp); + return host; +} +EXPORT_SYMBOL_GPL(dcr_map_generic); + +void dcr_unmap_generic(dcr_host_t host, unsigned int dcr_c) +{ + if (host.type == DCR_HOST_NATIVE) + dcr_unmap_native(host.host.native, dcr_c); + else if (host.type == DCR_HOST_MMIO) + dcr_unmap_mmio(host.host.mmio, dcr_c); + else /* host.type == DCR_HOST_INVALID */ + WARN_ON(true); +} +EXPORT_SYMBOL_GPL(dcr_unmap_generic); + +u32 dcr_read_generic(dcr_host_t host, unsigned int dcr_n) +{ + if (host.type == DCR_HOST_NATIVE) + return dcr_read_native(host.host.native, dcr_n); + else if (host.type == DCR_HOST_MMIO) + return dcr_read_mmio(host.host.mmio, dcr_n); + else /* host.type == DCR_HOST_INVALID */ + WARN_ON(true); + return 0; +} +EXPORT_SYMBOL_GPL(dcr_read_generic); + +void dcr_write_generic(dcr_host_t host, unsigned int dcr_n, u32 value) +{ + if (host.type == DCR_HOST_NATIVE) + dcr_write_native(host.host.native, dcr_n, value); + else if (host.type == DCR_HOST_MMIO) + dcr_write_mmio(host.host.mmio, dcr_n, value); + else /* host.type == DCR_HOST_INVALID */ + WARN_ON(true); +} +EXPORT_SYMBOL_GPL(dcr_write_generic); + +#endif /* defined(CONFIG_PPC_DCR_NATIVE) && defined(CONFIG_PPC_DCR_MMIO) */ + +unsigned int dcr_resource_start(const struct device_node *np, + unsigned int index) +{ + unsigned int ds; + const u32 *dr = of_get_property(np, "dcr-reg", &ds); + + if (dr == NULL || ds & 1 || index >= (ds / 8)) + return 0; + + return dr[index * 2]; +} +EXPORT_SYMBOL_GPL(dcr_resource_start); + +unsigned int dcr_resource_len(const struct device_node *np, unsigned int index) +{ + unsigned int ds; + const u32 *dr = of_get_property(np, "dcr-reg", &ds); + + if (dr == NULL || ds & 1 || index >= (ds / 8)) + return 0; + + return dr[index * 2 + 1]; +} +EXPORT_SYMBOL_GPL(dcr_resource_len); + +#ifdef CONFIG_PPC_DCR_MMIO + +u64 of_translate_dcr_address(struct device_node *dev, + unsigned int dcr_n, + unsigned int *out_stride) +{ + struct device_node *dp; + const u32 *p; + unsigned int stride; + u64 ret = OF_BAD_ADDR; + + dp = find_dcr_parent(dev); + if (dp == NULL) + return OF_BAD_ADDR; + + /* Stride is not properly defined yet, default to 0x10 for Axon */ + p = of_get_property(dp, "dcr-mmio-stride", NULL); + stride = (p == NULL) ? 0x10 : *p; + + /* XXX FIXME: Which property name is to use of the 2 following ? */ + p = of_get_property(dp, "dcr-mmio-range", NULL); + if (p == NULL) + p = of_get_property(dp, "dcr-mmio-space", NULL); + if (p == NULL) + goto done; + + /* Maybe could do some better range checking here */ + ret = of_translate_address(dp, p); + if (ret != OF_BAD_ADDR) + ret += (u64)(stride) * (u64)dcr_n; + if (out_stride) + *out_stride = stride; + + done: + of_node_put(dp); + return ret; +} + +dcr_host_mmio_t dcr_map_mmio(struct device_node *dev, + unsigned int dcr_n, + unsigned int dcr_c) +{ + dcr_host_mmio_t ret = { .token = NULL, .stride = 0, .base = dcr_n }; + u64 addr; + + pr_debug("dcr_map(%s, 0x%x, 0x%x)\n", + dev->full_name, dcr_n, dcr_c); + + addr = of_translate_dcr_address(dev, dcr_n, &ret.stride); + pr_debug("translates to addr: 0x%llx, stride: 0x%x\n", + (unsigned long long) addr, ret.stride); + if (addr == OF_BAD_ADDR) + return ret; + pr_debug("mapping 0x%x bytes\n", dcr_c * ret.stride); + ret.token = ioremap(addr, dcr_c * ret.stride); + if (ret.token == NULL) + return ret; + pr_debug("mapped at 0x%p -> base is 0x%p\n", + ret.token, ret.token - dcr_n * ret.stride); + ret.token -= dcr_n * ret.stride; + return ret; +} +EXPORT_SYMBOL_GPL(dcr_map_mmio); + +void dcr_unmap_mmio(dcr_host_mmio_t host, unsigned int dcr_c) +{ + dcr_host_mmio_t h = host; + + if (h.token == NULL) + return; + h.token += host.base * h.stride; + iounmap(h.token); + h.token = NULL; +} +EXPORT_SYMBOL_GPL(dcr_unmap_mmio); + +#endif /* defined(CONFIG_PPC_DCR_MMIO) */ + +#ifdef CONFIG_PPC_DCR_NATIVE +DEFINE_SPINLOCK(dcr_ind_lock); +#endif /* defined(CONFIG_PPC_DCR_NATIVE) */ + diff --git a/arch/powerpc/sysdev/fsl_85xx_cache_ctlr.h b/arch/powerpc/sysdev/fsl_85xx_cache_ctlr.h new file mode 100644 index 00000000..60c9c0bd --- /dev/null +++ b/arch/powerpc/sysdev/fsl_85xx_cache_ctlr.h @@ -0,0 +1,101 @@ +/* + * Copyright 2009-2010 Freescale Semiconductor, Inc + * + * QorIQ based Cache Controller Memory Mapped Registers + * + * Author: Vivek Mahajan <vivek.mahajan@freescale.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __FSL_85XX_CACHE_CTLR_H__ +#define __FSL_85XX_CACHE_CTLR_H__ + +#define L2CR_L2FI 0x40000000 /* L2 flash invalidate */ +#define L2CR_L2IO 0x00200000 /* L2 instruction only */ +#define L2CR_SRAM_ZERO 0x00000000 /* L2SRAM zero size */ +#define L2CR_SRAM_FULL 0x00010000 /* L2SRAM full size */ +#define L2CR_SRAM_HALF 0x00020000 /* L2SRAM half size */ +#define L2CR_SRAM_TWO_HALFS 0x00030000 /* L2SRAM two half sizes */ +#define L2CR_SRAM_QUART 0x00040000 /* L2SRAM one quarter size */ +#define L2CR_SRAM_TWO_QUARTS 0x00050000 /* L2SRAM two quarter size */ +#define L2CR_SRAM_EIGHTH 0x00060000 /* L2SRAM one eighth size */ +#define L2CR_SRAM_TWO_EIGHTH 0x00070000 /* L2SRAM two eighth size */ + +#define L2SRAM_OPTIMAL_SZ_SHIFT 0x00000003 /* Optimum size for L2SRAM */ + +#define L2SRAM_BAR_MSK_LO18 0xFFFFC000 /* Lower 18 bits */ +#define L2SRAM_BARE_MSK_HI4 0x0000000F /* Upper 4 bits */ + +enum cache_sram_lock_ways { + LOCK_WAYS_ZERO, + LOCK_WAYS_EIGHTH, + LOCK_WAYS_TWO_EIGHTH, + LOCK_WAYS_HALF = 4, + LOCK_WAYS_FULL = 8, +}; + +struct mpc85xx_l2ctlr { + u32 ctl; /* 0x000 - L2 control */ + u8 res1[0xC]; + u32 ewar0; /* 0x010 - External write address 0 */ + u32 ewarea0; /* 0x014 - External write address extended 0 */ + u32 ewcr0; /* 0x018 - External write ctrl */ + u8 res2[4]; + u32 ewar1; /* 0x020 - External write address 1 */ + u32 ewarea1; /* 0x024 - External write address extended 1 */ + u32 ewcr1; /* 0x028 - External write ctrl 1 */ + u8 res3[4]; + u32 ewar2; /* 0x030 - External write address 2 */ + u32 ewarea2; /* 0x034 - External write address extended 2 */ + u32 ewcr2; /* 0x038 - External write ctrl 2 */ + u8 res4[4]; + u32 ewar3; /* 0x040 - External write address 3 */ + u32 ewarea3; /* 0x044 - External write address extended 3 */ + u32 ewcr3; /* 0x048 - External write ctrl 3 */ + u8 res5[0xB4]; + u32 srbar0; /* 0x100 - SRAM base address 0 */ + u32 srbarea0; /* 0x104 - SRAM base addr reg ext address 0 */ + u32 srbar1; /* 0x108 - SRAM base address 1 */ + u32 srbarea1; /* 0x10C - SRAM base addr reg ext address 1 */ + u8 res6[0xCF0]; + u32 errinjhi; /* 0xE00 - Error injection mask high */ + u32 errinjlo; /* 0xE04 - Error injection mask low */ + u32 errinjctl; /* 0xE08 - Error injection tag/ecc control */ + u8 res7[0x14]; + u32 captdatahi; /* 0xE20 - Error data high capture */ + u32 captdatalo; /* 0xE24 - Error data low capture */ + u32 captecc; /* 0xE28 - Error syndrome */ + u8 res8[0x14]; + u32 errdet; /* 0xE40 - Error detect */ + u32 errdis; /* 0xE44 - Error disable */ + u32 errinten; /* 0xE48 - Error interrupt enable */ + u32 errattr; /* 0xE4c - Error attribute capture */ + u32 erradrrl; /* 0xE50 - Error address capture low */ + u32 erradrrh; /* 0xE54 - Error address capture high */ + u32 errctl; /* 0xE58 - Error control */ + u8 res9[0x1A4]; +}; + +struct sram_parameters { + unsigned int sram_size; + uint64_t sram_offset; +}; + +extern int instantiate_cache_sram(struct platform_device *dev, + struct sram_parameters sram_params); +extern void remove_cache_sram(struct platform_device *dev); + +#endif /* __FSL_85XX_CACHE_CTLR_H__ */ diff --git a/arch/powerpc/sysdev/fsl_85xx_cache_sram.c b/arch/powerpc/sysdev/fsl_85xx_cache_sram.c new file mode 100644 index 00000000..11641589 --- /dev/null +++ b/arch/powerpc/sysdev/fsl_85xx_cache_sram.c @@ -0,0 +1,159 @@ +/* + * Copyright 2009-2010 Freescale Semiconductor, Inc. + * + * Simple memory allocator abstraction for QorIQ (P1/P2) based Cache-SRAM + * + * Author: Vivek Mahajan <vivek.mahajan@freescale.com> + * + * This file is derived from the original work done + * by Sylvain Munaut for the Bestcomm SRAM allocator. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/err.h> +#include <linux/of_platform.h> +#include <asm/pgtable.h> +#include <asm/fsl_85xx_cache_sram.h> + +#include "fsl_85xx_cache_ctlr.h" + +struct mpc85xx_cache_sram *cache_sram; + +void *mpc85xx_cache_sram_alloc(unsigned int size, + phys_addr_t *phys, unsigned int align) +{ + unsigned long offset; + unsigned long flags; + + if (unlikely(cache_sram == NULL)) + return NULL; + + if (!size || (size > cache_sram->size) || (align > cache_sram->size)) { + pr_err("%s(): size(=%x) or align(=%x) zero or too big\n", + __func__, size, align); + return NULL; + } + + if ((align & (align - 1)) || align <= 1) { + pr_err("%s(): align(=%x) must be power of two and >1\n", + __func__, align); + return NULL; + } + + spin_lock_irqsave(&cache_sram->lock, flags); + offset = rh_alloc_align(cache_sram->rh, size, align, NULL); + spin_unlock_irqrestore(&cache_sram->lock, flags); + + if (IS_ERR_VALUE(offset)) + return NULL; + + *phys = cache_sram->base_phys + offset; + + return (unsigned char *)cache_sram->base_virt + offset; +} +EXPORT_SYMBOL(mpc85xx_cache_sram_alloc); + +void mpc85xx_cache_sram_free(void *ptr) +{ + unsigned long flags; + BUG_ON(!ptr); + + spin_lock_irqsave(&cache_sram->lock, flags); + rh_free(cache_sram->rh, ptr - cache_sram->base_virt); + spin_unlock_irqrestore(&cache_sram->lock, flags); +} +EXPORT_SYMBOL(mpc85xx_cache_sram_free); + +int __init instantiate_cache_sram(struct platform_device *dev, + struct sram_parameters sram_params) +{ + int ret = 0; + + if (cache_sram) { + dev_err(&dev->dev, "Already initialized cache-sram\n"); + return -EBUSY; + } + + cache_sram = kzalloc(sizeof(struct mpc85xx_cache_sram), GFP_KERNEL); + if (!cache_sram) { + dev_err(&dev->dev, "Out of memory for cache_sram structure\n"); + return -ENOMEM; + } + + cache_sram->base_phys = sram_params.sram_offset; + cache_sram->size = sram_params.sram_size; + + if (!request_mem_region(cache_sram->base_phys, cache_sram->size, + "fsl_85xx_cache_sram")) { + dev_err(&dev->dev, "%s: request memory failed\n", + dev->dev.of_node->full_name); + ret = -ENXIO; + goto out_free; + } + + cache_sram->base_virt = ioremap_prot(cache_sram->base_phys, + cache_sram->size, _PAGE_COHERENT | PAGE_KERNEL); + if (!cache_sram->base_virt) { + dev_err(&dev->dev, "%s: ioremap_prot failed\n", + dev->dev.of_node->full_name); + ret = -ENOMEM; + goto out_release; + } + + cache_sram->rh = rh_create(sizeof(unsigned int)); + if (IS_ERR(cache_sram->rh)) { + dev_err(&dev->dev, "%s: Unable to create remote heap\n", + dev->dev.of_node->full_name); + ret = PTR_ERR(cache_sram->rh); + goto out_unmap; + } + + rh_attach_region(cache_sram->rh, 0, cache_sram->size); + spin_lock_init(&cache_sram->lock); + + dev_info(&dev->dev, "[base:0x%llx, size:0x%x] configured and loaded\n", + (unsigned long long)cache_sram->base_phys, cache_sram->size); + + return 0; + +out_unmap: + iounmap(cache_sram->base_virt); + +out_release: + release_mem_region(cache_sram->base_phys, cache_sram->size); + +out_free: + kfree(cache_sram); + return ret; +} + +void remove_cache_sram(struct platform_device *dev) +{ + BUG_ON(!cache_sram); + + rh_detach_region(cache_sram->rh, 0, cache_sram->size); + rh_destroy(cache_sram->rh); + + iounmap(cache_sram->base_virt); + release_mem_region(cache_sram->base_phys, cache_sram->size); + + kfree(cache_sram); + cache_sram = NULL; + + dev_info(&dev->dev, "MPC85xx Cache-SRAM driver unloaded\n"); +} diff --git a/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c b/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c new file mode 100644 index 00000000..5f88797d --- /dev/null +++ b/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c @@ -0,0 +1,230 @@ +/* + * Copyright 2009-2010 Freescale Semiconductor, Inc. + * + * QorIQ (P1/P2) L2 controller init for Cache-SRAM instantiation + * + * Author: Vivek Mahajan <vivek.mahajan@freescale.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/kernel.h> +#include <linux/of_platform.h> +#include <asm/io.h> + +#include "fsl_85xx_cache_ctlr.h" + +static char *sram_size; +static char *sram_offset; +struct mpc85xx_l2ctlr __iomem *l2ctlr; + +static long get_cache_sram_size(void) +{ + unsigned long val; + + if (!sram_size || (strict_strtoul(sram_size, 0, &val) < 0)) + return -EINVAL; + + return val; +} + +static long get_cache_sram_offset(void) +{ + unsigned long val; + + if (!sram_offset || (strict_strtoul(sram_offset, 0, &val) < 0)) + return -EINVAL; + + return val; +} + +static int __init get_size_from_cmdline(char *str) +{ + if (!str) + return 0; + + sram_size = str; + return 1; +} + +static int __init get_offset_from_cmdline(char *str) +{ + if (!str) + return 0; + + sram_offset = str; + return 1; +} + +__setup("cache-sram-size=", get_size_from_cmdline); +__setup("cache-sram-offset=", get_offset_from_cmdline); + +static int __devinit mpc85xx_l2ctlr_of_probe(struct platform_device *dev) +{ + long rval; + unsigned int rem; + unsigned char ways; + const unsigned int *prop; + unsigned int l2cache_size; + struct sram_parameters sram_params; + + if (!dev->dev.of_node) { + dev_err(&dev->dev, "Device's OF-node is NULL\n"); + return -EINVAL; + } + + prop = of_get_property(dev->dev.of_node, "cache-size", NULL); + if (!prop) { + dev_err(&dev->dev, "Missing L2 cache-size\n"); + return -EINVAL; + } + l2cache_size = *prop; + + sram_params.sram_size = get_cache_sram_size(); + if ((int)sram_params.sram_size <= 0) { + dev_err(&dev->dev, + "Entire L2 as cache, Aborting Cache-SRAM stuff\n"); + return -EINVAL; + } + + sram_params.sram_offset = get_cache_sram_offset(); + if ((int64_t)sram_params.sram_offset <= 0) { + dev_err(&dev->dev, + "Entire L2 as cache, provide a valid sram offset\n"); + return -EINVAL; + } + + + rem = l2cache_size % sram_params.sram_size; + ways = LOCK_WAYS_FULL * sram_params.sram_size / l2cache_size; + if (rem || (ways & (ways - 1))) { + dev_err(&dev->dev, "Illegal cache-sram-size in command line\n"); + return -EINVAL; + } + + l2ctlr = of_iomap(dev->dev.of_node, 0); + if (!l2ctlr) { + dev_err(&dev->dev, "Can't map L2 controller\n"); + return -EINVAL; + } + + /* + * Write bits[0-17] to srbar0 + */ + out_be32(&l2ctlr->srbar0, + sram_params.sram_offset & L2SRAM_BAR_MSK_LO18); + + /* + * Write bits[18-21] to srbare0 + */ +#ifdef CONFIG_PHYS_64BIT + out_be32(&l2ctlr->srbarea0, + (sram_params.sram_offset >> 32) & L2SRAM_BARE_MSK_HI4); +#endif + + clrsetbits_be32(&l2ctlr->ctl, L2CR_L2E, L2CR_L2FI); + + switch (ways) { + case LOCK_WAYS_EIGHTH: + setbits32(&l2ctlr->ctl, + L2CR_L2E | L2CR_L2FI | L2CR_SRAM_EIGHTH); + break; + + case LOCK_WAYS_TWO_EIGHTH: + setbits32(&l2ctlr->ctl, + L2CR_L2E | L2CR_L2FI | L2CR_SRAM_QUART); + break; + + case LOCK_WAYS_HALF: + setbits32(&l2ctlr->ctl, + L2CR_L2E | L2CR_L2FI | L2CR_SRAM_HALF); + break; + + case LOCK_WAYS_FULL: + default: + setbits32(&l2ctlr->ctl, + L2CR_L2E | L2CR_L2FI | L2CR_SRAM_FULL); + break; + } + eieio(); + + rval = instantiate_cache_sram(dev, sram_params); + if (rval < 0) { + dev_err(&dev->dev, "Can't instantiate Cache-SRAM\n"); + iounmap(l2ctlr); + return -EINVAL; + } + + return 0; +} + +static int __devexit mpc85xx_l2ctlr_of_remove(struct platform_device *dev) +{ + BUG_ON(!l2ctlr); + + iounmap(l2ctlr); + remove_cache_sram(dev); + dev_info(&dev->dev, "MPC85xx L2 controller unloaded\n"); + + return 0; +} + +static struct of_device_id mpc85xx_l2ctlr_of_match[] = { + { + .compatible = "fsl,p2020-l2-cache-controller", + }, + { + .compatible = "fsl,p2010-l2-cache-controller", + }, + { + .compatible = "fsl,p1020-l2-cache-controller", + }, + { + .compatible = "fsl,p1011-l2-cache-controller", + }, + { + .compatible = "fsl,p1013-l2-cache-controller", + }, + { + .compatible = "fsl,p1022-l2-cache-controller", + }, + {}, +}; + +static struct platform_driver mpc85xx_l2ctlr_of_platform_driver = { + .driver = { + .name = "fsl-l2ctlr", + .owner = THIS_MODULE, + .of_match_table = mpc85xx_l2ctlr_of_match, + }, + .probe = mpc85xx_l2ctlr_of_probe, + .remove = __devexit_p(mpc85xx_l2ctlr_of_remove), +}; + +static __init int mpc85xx_l2ctlr_of_init(void) +{ + return platform_driver_register(&mpc85xx_l2ctlr_of_platform_driver); +} + +static void __exit mpc85xx_l2ctlr_of_exit(void) +{ + platform_driver_unregister(&mpc85xx_l2ctlr_of_platform_driver); +} + +subsys_initcall(mpc85xx_l2ctlr_of_init); +module_exit(mpc85xx_l2ctlr_of_exit); + +MODULE_DESCRIPTION("Freescale MPC85xx L2 controller init"); +MODULE_LICENSE("GPL v2"); diff --git a/arch/powerpc/sysdev/fsl_gtm.c b/arch/powerpc/sysdev/fsl_gtm.c new file mode 100644 index 00000000..7dd28853 --- /dev/null +++ b/arch/powerpc/sysdev/fsl_gtm.c @@ -0,0 +1,436 @@ +/* + * Freescale General-purpose Timers Module + * + * Copyright (c) Freescale Semicondutor, Inc. 2006. + * Shlomi Gridish <gridish@freescale.com> + * Jerry Huang <Chang-Ming.Huang@freescale.com> + * Copyright (c) MontaVista Software, Inc. 2008. + * Anton Vorontsov <avorontsov@ru.mvista.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/list.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/spinlock.h> +#include <linux/bitops.h> +#include <linux/slab.h> +#include <asm/fsl_gtm.h> + +#define GTCFR_STP(x) ((x) & 1 ? 1 << 5 : 1 << 1) +#define GTCFR_RST(x) ((x) & 1 ? 1 << 4 : 1 << 0) + +#define GTMDR_ICLK_MASK (3 << 1) +#define GTMDR_ICLK_ICAS (0 << 1) +#define GTMDR_ICLK_ICLK (1 << 1) +#define GTMDR_ICLK_SLGO (2 << 1) +#define GTMDR_FRR (1 << 3) +#define GTMDR_ORI (1 << 4) +#define GTMDR_SPS(x) ((x) << 8) + +struct gtm_timers_regs { + u8 gtcfr1; /* Timer 1, Timer 2 global config register */ + u8 res0[0x3]; + u8 gtcfr2; /* Timer 3, timer 4 global config register */ + u8 res1[0xB]; + __be16 gtmdr1; /* Timer 1 mode register */ + __be16 gtmdr2; /* Timer 2 mode register */ + __be16 gtrfr1; /* Timer 1 reference register */ + __be16 gtrfr2; /* Timer 2 reference register */ + __be16 gtcpr1; /* Timer 1 capture register */ + __be16 gtcpr2; /* Timer 2 capture register */ + __be16 gtcnr1; /* Timer 1 counter */ + __be16 gtcnr2; /* Timer 2 counter */ + __be16 gtmdr3; /* Timer 3 mode register */ + __be16 gtmdr4; /* Timer 4 mode register */ + __be16 gtrfr3; /* Timer 3 reference register */ + __be16 gtrfr4; /* Timer 4 reference register */ + __be16 gtcpr3; /* Timer 3 capture register */ + __be16 gtcpr4; /* Timer 4 capture register */ + __be16 gtcnr3; /* Timer 3 counter */ + __be16 gtcnr4; /* Timer 4 counter */ + __be16 gtevr1; /* Timer 1 event register */ + __be16 gtevr2; /* Timer 2 event register */ + __be16 gtevr3; /* Timer 3 event register */ + __be16 gtevr4; /* Timer 4 event register */ + __be16 gtpsr1; /* Timer 1 prescale register */ + __be16 gtpsr2; /* Timer 2 prescale register */ + __be16 gtpsr3; /* Timer 3 prescale register */ + __be16 gtpsr4; /* Timer 4 prescale register */ + u8 res2[0x40]; +} __attribute__ ((packed)); + +struct gtm { + unsigned int clock; + struct gtm_timers_regs __iomem *regs; + struct gtm_timer timers[4]; + spinlock_t lock; + struct list_head list_node; +}; + +static LIST_HEAD(gtms); + +/** + * gtm_get_timer - request GTM timer to use it with the rest of GTM API + * Context: non-IRQ + * + * This function reserves GTM timer for later use. It returns gtm_timer + * structure to use with the rest of GTM API, you should use timer->irq + * to manage timer interrupt. + */ +struct gtm_timer *gtm_get_timer16(void) +{ + struct gtm *gtm = NULL; + int i; + + list_for_each_entry(gtm, >ms, list_node) { + spin_lock_irq(>m->lock); + + for (i = 0; i < ARRAY_SIZE(gtm->timers); i++) { + if (!gtm->timers[i].requested) { + gtm->timers[i].requested = true; + spin_unlock_irq(>m->lock); + return >m->timers[i]; + } + } + + spin_unlock_irq(>m->lock); + } + + if (gtm) + return ERR_PTR(-EBUSY); + return ERR_PTR(-ENODEV); +} +EXPORT_SYMBOL(gtm_get_timer16); + +/** + * gtm_get_specific_timer - request specific GTM timer + * @gtm: specific GTM, pass here GTM's device_node->data + * @timer: specific timer number, Timer1 is 0. + * Context: non-IRQ + * + * This function reserves GTM timer for later use. It returns gtm_timer + * structure to use with the rest of GTM API, you should use timer->irq + * to manage timer interrupt. + */ +struct gtm_timer *gtm_get_specific_timer16(struct gtm *gtm, + unsigned int timer) +{ + struct gtm_timer *ret = ERR_PTR(-EBUSY); + + if (timer > 3) + return ERR_PTR(-EINVAL); + + spin_lock_irq(>m->lock); + + if (gtm->timers[timer].requested) + goto out; + + ret = >m->timers[timer]; + ret->requested = true; + +out: + spin_unlock_irq(>m->lock); + return ret; +} +EXPORT_SYMBOL(gtm_get_specific_timer16); + +/** + * gtm_put_timer16 - release 16 bits GTM timer + * @tmr: pointer to the gtm_timer structure obtained from gtm_get_timer + * Context: any + * + * This function releases GTM timer so others may request it. + */ +void gtm_put_timer16(struct gtm_timer *tmr) +{ + gtm_stop_timer16(tmr); + + spin_lock_irq(&tmr->gtm->lock); + tmr->requested = false; + spin_unlock_irq(&tmr->gtm->lock); +} +EXPORT_SYMBOL(gtm_put_timer16); + +/* + * This is back-end for the exported functions, it's used to reset single + * timer in reference mode. + */ +static int gtm_set_ref_timer16(struct gtm_timer *tmr, int frequency, + int reference_value, bool free_run) +{ + struct gtm *gtm = tmr->gtm; + int num = tmr - >m->timers[0]; + unsigned int prescaler; + u8 iclk = GTMDR_ICLK_ICLK; + u8 psr; + u8 sps; + unsigned long flags; + int max_prescaler = 256 * 256 * 16; + + /* CPM2 doesn't have primary prescaler */ + if (!tmr->gtpsr) + max_prescaler /= 256; + + prescaler = gtm->clock / frequency; + /* + * We have two 8 bit prescalers -- primary and secondary (psr, sps), + * plus "slow go" mode (clk / 16). So, total prescale value is + * 16 * (psr + 1) * (sps + 1). Though, for CPM2 GTMs we losing psr. + */ + if (prescaler > max_prescaler) + return -EINVAL; + + if (prescaler > max_prescaler / 16) { + iclk = GTMDR_ICLK_SLGO; + prescaler /= 16; + } + + if (prescaler <= 256) { + psr = 0; + sps = prescaler - 1; + } else { + psr = 256 - 1; + sps = prescaler / 256 - 1; + } + + spin_lock_irqsave(>m->lock, flags); + + /* + * Properly reset timers: stop, reset, set up prescalers, reference + * value and clear event register. + */ + clrsetbits_8(tmr->gtcfr, ~(GTCFR_STP(num) | GTCFR_RST(num)), + GTCFR_STP(num) | GTCFR_RST(num)); + + setbits8(tmr->gtcfr, GTCFR_STP(num)); + + if (tmr->gtpsr) + out_be16(tmr->gtpsr, psr); + clrsetbits_be16(tmr->gtmdr, 0xFFFF, iclk | GTMDR_SPS(sps) | + GTMDR_ORI | (free_run ? GTMDR_FRR : 0)); + out_be16(tmr->gtcnr, 0); + out_be16(tmr->gtrfr, reference_value); + out_be16(tmr->gtevr, 0xFFFF); + + /* Let it be. */ + clrbits8(tmr->gtcfr, GTCFR_STP(num)); + + spin_unlock_irqrestore(>m->lock, flags); + + return 0; +} + +/** + * gtm_set_timer16 - (re)set 16 bit timer with arbitrary precision + * @tmr: pointer to the gtm_timer structure obtained from gtm_get_timer + * @usec: timer interval in microseconds + * @reload: if set, the timer will reset upon expiry rather than + * continue running free. + * Context: any + * + * This function (re)sets the GTM timer so that it counts up to the requested + * interval value, and fires the interrupt when the value is reached. This + * function will reduce the precision of the timer as needed in order for the + * requested timeout to fit in a 16-bit register. + */ +int gtm_set_timer16(struct gtm_timer *tmr, unsigned long usec, bool reload) +{ + /* quite obvious, frequency which is enough for µSec precision */ + int freq = 1000000; + unsigned int bit; + + bit = fls_long(usec); + if (bit > 15) { + freq >>= bit - 15; + usec >>= bit - 15; + } + + if (!freq) + return -EINVAL; + + return gtm_set_ref_timer16(tmr, freq, usec, reload); +} +EXPORT_SYMBOL(gtm_set_timer16); + +/** + * gtm_set_exact_utimer16 - (re)set 16 bits timer + * @tmr: pointer to the gtm_timer structure obtained from gtm_get_timer + * @usec: timer interval in microseconds + * @reload: if set, the timer will reset upon expiry rather than + * continue running free. + * Context: any + * + * This function (re)sets GTM timer so that it counts up to the requested + * interval value, and fires the interrupt when the value is reached. If reload + * flag was set, timer will also reset itself upon reference value, otherwise + * it continues to increment. + * + * The _exact_ bit in the function name states that this function will not + * crop precision of the "usec" argument, thus usec is limited to 16 bits + * (single timer width). + */ +int gtm_set_exact_timer16(struct gtm_timer *tmr, u16 usec, bool reload) +{ + /* quite obvious, frequency which is enough for µSec precision */ + const int freq = 1000000; + + /* + * We can lower the frequency (and probably power consumption) by + * dividing both frequency and usec by 2 until there is no remainder. + * But we won't bother with this unless savings are measured, so just + * run the timer as is. + */ + + return gtm_set_ref_timer16(tmr, freq, usec, reload); +} +EXPORT_SYMBOL(gtm_set_exact_timer16); + +/** + * gtm_stop_timer16 - stop single timer + * @tmr: pointer to the gtm_timer structure obtained from gtm_get_timer + * Context: any + * + * This function simply stops the GTM timer. + */ +void gtm_stop_timer16(struct gtm_timer *tmr) +{ + struct gtm *gtm = tmr->gtm; + int num = tmr - >m->timers[0]; + unsigned long flags; + + spin_lock_irqsave(>m->lock, flags); + + setbits8(tmr->gtcfr, GTCFR_STP(num)); + out_be16(tmr->gtevr, 0xFFFF); + + spin_unlock_irqrestore(>m->lock, flags); +} +EXPORT_SYMBOL(gtm_stop_timer16); + +/** + * gtm_ack_timer16 - acknowledge timer event (free-run timers only) + * @tmr: pointer to the gtm_timer structure obtained from gtm_get_timer + * @events: events mask to ack + * Context: any + * + * Thus function used to acknowledge timer interrupt event, use it inside the + * interrupt handler. + */ +void gtm_ack_timer16(struct gtm_timer *tmr, u16 events) +{ + out_be16(tmr->gtevr, events); +} +EXPORT_SYMBOL(gtm_ack_timer16); + +static void __init gtm_set_shortcuts(struct device_node *np, + struct gtm_timer *timers, + struct gtm_timers_regs __iomem *regs) +{ + /* + * Yeah, I don't like this either, but timers' registers a bit messed, + * so we have to provide shortcuts to write timer independent code. + * Alternative option is to create gt*() accessors, but that will be + * even uglier and cryptic. + */ + timers[0].gtcfr = ®s->gtcfr1; + timers[0].gtmdr = ®s->gtmdr1; + timers[0].gtcnr = ®s->gtcnr1; + timers[0].gtrfr = ®s->gtrfr1; + timers[0].gtevr = ®s->gtevr1; + + timers[1].gtcfr = ®s->gtcfr1; + timers[1].gtmdr = ®s->gtmdr2; + timers[1].gtcnr = ®s->gtcnr2; + timers[1].gtrfr = ®s->gtrfr2; + timers[1].gtevr = ®s->gtevr2; + + timers[2].gtcfr = ®s->gtcfr2; + timers[2].gtmdr = ®s->gtmdr3; + timers[2].gtcnr = ®s->gtcnr3; + timers[2].gtrfr = ®s->gtrfr3; + timers[2].gtevr = ®s->gtevr3; + + timers[3].gtcfr = ®s->gtcfr2; + timers[3].gtmdr = ®s->gtmdr4; + timers[3].gtcnr = ®s->gtcnr4; + timers[3].gtrfr = ®s->gtrfr4; + timers[3].gtevr = ®s->gtevr4; + + /* CPM2 doesn't have primary prescaler */ + if (!of_device_is_compatible(np, "fsl,cpm2-gtm")) { + timers[0].gtpsr = ®s->gtpsr1; + timers[1].gtpsr = ®s->gtpsr2; + timers[2].gtpsr = ®s->gtpsr3; + timers[3].gtpsr = ®s->gtpsr4; + } +} + +static int __init fsl_gtm_init(void) +{ + struct device_node *np; + + for_each_compatible_node(np, NULL, "fsl,gtm") { + int i; + struct gtm *gtm; + const u32 *clock; + int size; + + gtm = kzalloc(sizeof(*gtm), GFP_KERNEL); + if (!gtm) { + pr_err("%s: unable to allocate memory\n", + np->full_name); + continue; + } + + spin_lock_init(>m->lock); + + clock = of_get_property(np, "clock-frequency", &size); + if (!clock || size != sizeof(*clock)) { + pr_err("%s: no clock-frequency\n", np->full_name); + goto err; + } + gtm->clock = *clock; + + for (i = 0; i < ARRAY_SIZE(gtm->timers); i++) { + int ret; + struct resource irq; + + ret = of_irq_to_resource(np, i, &irq); + if (ret == NO_IRQ) { + pr_err("%s: not enough interrupts specified\n", + np->full_name); + goto err; + } + gtm->timers[i].irq = irq.start; + gtm->timers[i].gtm = gtm; + } + + gtm->regs = of_iomap(np, 0); + if (!gtm->regs) { + pr_err("%s: unable to iomap registers\n", + np->full_name); + goto err; + } + + gtm_set_shortcuts(np, gtm->timers, gtm->regs); + list_add(>m->list_node, >ms); + + /* We don't want to lose the node and its ->data */ + np->data = gtm; + of_node_get(np); + + continue; +err: + kfree(gtm); + } + return 0; +} +arch_initcall(fsl_gtm_init); diff --git a/arch/powerpc/sysdev/fsl_lbc.c b/arch/powerpc/sysdev/fsl_lbc.c new file mode 100644 index 00000000..d917573c --- /dev/null +++ b/arch/powerpc/sysdev/fsl_lbc.c @@ -0,0 +1,353 @@ +/* + * Freescale LBC and UPM routines. + * + * Copyright © 2007-2008 MontaVista Software, Inc. + * Copyright © 2010 Freescale Semiconductor + * + * Author: Anton Vorontsov <avorontsov@ru.mvista.com> + * Author: Jack Lan <Jack.Lan@freescale.com> + * Author: Roy Zang <tie-fei.zang@freescale.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/compiler.h> +#include <linux/spinlock.h> +#include <linux/types.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/slab.h> +#include <linux/platform_device.h> +#include <linux/interrupt.h> +#include <linux/mod_devicetable.h> +#include <asm/prom.h> +#include <asm/fsl_lbc.h> + +static spinlock_t fsl_lbc_lock = __SPIN_LOCK_UNLOCKED(fsl_lbc_lock); +struct fsl_lbc_ctrl *fsl_lbc_ctrl_dev; +EXPORT_SYMBOL(fsl_lbc_ctrl_dev); + +/** + * fsl_lbc_addr - convert the base address + * @addr_base: base address of the memory bank + * + * This function converts a base address of lbc into the right format for the + * BR register. If the SOC has eLBC then it returns 32bit physical address + * else it convers a 34bit local bus physical address to correct format of + * 32bit address for BR register (Example: MPC8641). + */ +u32 fsl_lbc_addr(phys_addr_t addr_base) +{ + struct device_node *np = fsl_lbc_ctrl_dev->dev->of_node; + u32 addr = addr_base & 0xffff8000; + + if (of_device_is_compatible(np, "fsl,elbc")) + return addr; + + return addr | ((addr_base & 0x300000000ull) >> 19); +} +EXPORT_SYMBOL(fsl_lbc_addr); + +/** + * fsl_lbc_find - find Localbus bank + * @addr_base: base address of the memory bank + * + * This function walks LBC banks comparing "Base address" field of the BR + * registers with the supplied addr_base argument. When bases match this + * function returns bank number (starting with 0), otherwise it returns + * appropriate errno value. + */ +int fsl_lbc_find(phys_addr_t addr_base) +{ + int i; + struct fsl_lbc_regs __iomem *lbc; + + if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs) + return -ENODEV; + + lbc = fsl_lbc_ctrl_dev->regs; + for (i = 0; i < ARRAY_SIZE(lbc->bank); i++) { + __be32 br = in_be32(&lbc->bank[i].br); + __be32 or = in_be32(&lbc->bank[i].or); + + if (br & BR_V && (br & or & BR_BA) == fsl_lbc_addr(addr_base)) + return i; + } + + return -ENOENT; +} +EXPORT_SYMBOL(fsl_lbc_find); + +/** + * fsl_upm_find - find pre-programmed UPM via base address + * @addr_base: base address of the memory bank controlled by the UPM + * @upm: pointer to the allocated fsl_upm structure + * + * This function fills fsl_upm structure so you can use it with the rest of + * UPM API. On success this function returns 0, otherwise it returns + * appropriate errno value. + */ +int fsl_upm_find(phys_addr_t addr_base, struct fsl_upm *upm) +{ + int bank; + __be32 br; + struct fsl_lbc_regs __iomem *lbc; + + bank = fsl_lbc_find(addr_base); + if (bank < 0) + return bank; + + if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs) + return -ENODEV; + + lbc = fsl_lbc_ctrl_dev->regs; + br = in_be32(&lbc->bank[bank].br); + + switch (br & BR_MSEL) { + case BR_MS_UPMA: + upm->mxmr = &lbc->mamr; + break; + case BR_MS_UPMB: + upm->mxmr = &lbc->mbmr; + break; + case BR_MS_UPMC: + upm->mxmr = &lbc->mcmr; + break; + default: + return -EINVAL; + } + + switch (br & BR_PS) { + case BR_PS_8: + upm->width = 8; + break; + case BR_PS_16: + upm->width = 16; + break; + case BR_PS_32: + upm->width = 32; + break; + default: + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL(fsl_upm_find); + +/** + * fsl_upm_run_pattern - actually run an UPM pattern + * @upm: pointer to the fsl_upm structure obtained via fsl_upm_find + * @io_base: remapped pointer to where memory access should happen + * @mar: MAR register content during pattern execution + * + * This function triggers dummy write to the memory specified by the io_base, + * thus UPM pattern actually executed. Note that mar usage depends on the + * pre-programmed AMX bits in the UPM RAM. + */ +int fsl_upm_run_pattern(struct fsl_upm *upm, void __iomem *io_base, u32 mar) +{ + int ret = 0; + unsigned long flags; + + if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs) + return -ENODEV; + + spin_lock_irqsave(&fsl_lbc_lock, flags); + + out_be32(&fsl_lbc_ctrl_dev->regs->mar, mar); + + switch (upm->width) { + case 8: + out_8(io_base, 0x0); + break; + case 16: + out_be16(io_base, 0x0); + break; + case 32: + out_be32(io_base, 0x0); + break; + default: + ret = -EINVAL; + break; + } + + spin_unlock_irqrestore(&fsl_lbc_lock, flags); + + return ret; +} +EXPORT_SYMBOL(fsl_upm_run_pattern); + +static int __devinit fsl_lbc_ctrl_init(struct fsl_lbc_ctrl *ctrl, + struct device_node *node) +{ + struct fsl_lbc_regs __iomem *lbc = ctrl->regs; + + /* clear event registers */ + setbits32(&lbc->ltesr, LTESR_CLEAR); + out_be32(&lbc->lteatr, 0); + out_be32(&lbc->ltear, 0); + out_be32(&lbc->lteccr, LTECCR_CLEAR); + out_be32(&lbc->ltedr, LTEDR_ENABLE); + + /* Set the monitor timeout value to the maximum for erratum A001 */ + if (of_device_is_compatible(node, "fsl,elbc")) + clrsetbits_be32(&lbc->lbcr, LBCR_BMT, LBCR_BMTPS); + + return 0; +} + +/* + * NOTE: This interrupt is used to report localbus events of various kinds, + * such as transaction errors on the chipselects. + */ + +static irqreturn_t fsl_lbc_ctrl_irq(int irqno, void *data) +{ + struct fsl_lbc_ctrl *ctrl = data; + struct fsl_lbc_regs __iomem *lbc = ctrl->regs; + u32 status; + + status = in_be32(&lbc->ltesr); + if (!status) + return IRQ_NONE; + + out_be32(&lbc->ltesr, LTESR_CLEAR); + out_be32(&lbc->lteatr, 0); + out_be32(&lbc->ltear, 0); + ctrl->irq_status = status; + + if (status & LTESR_BM) + dev_err(ctrl->dev, "Local bus monitor time-out: " + "LTESR 0x%08X\n", status); + if (status & LTESR_WP) + dev_err(ctrl->dev, "Write protect error: " + "LTESR 0x%08X\n", status); + if (status & LTESR_ATMW) + dev_err(ctrl->dev, "Atomic write error: " + "LTESR 0x%08X\n", status); + if (status & LTESR_ATMR) + dev_err(ctrl->dev, "Atomic read error: " + "LTESR 0x%08X\n", status); + if (status & LTESR_CS) + dev_err(ctrl->dev, "Chip select error: " + "LTESR 0x%08X\n", status); + if (status & LTESR_UPM) + ; + if (status & LTESR_FCT) { + dev_err(ctrl->dev, "FCM command time-out: " + "LTESR 0x%08X\n", status); + smp_wmb(); + wake_up(&ctrl->irq_wait); + } + if (status & LTESR_PAR) { + dev_err(ctrl->dev, "Parity or Uncorrectable ECC error: " + "LTESR 0x%08X\n", status); + smp_wmb(); + wake_up(&ctrl->irq_wait); + } + if (status & LTESR_CC) { + smp_wmb(); + wake_up(&ctrl->irq_wait); + } + if (status & ~LTESR_MASK) + dev_err(ctrl->dev, "Unknown error: " + "LTESR 0x%08X\n", status); + return IRQ_HANDLED; +} + +/* + * fsl_lbc_ctrl_probe + * + * called by device layer when it finds a device matching + * one our driver can handled. This code allocates all of + * the resources needed for the controller only. The + * resources for the NAND banks themselves are allocated + * in the chip probe function. +*/ + +static int __devinit fsl_lbc_ctrl_probe(struct platform_device *dev) +{ + int ret; + + if (!dev->dev.of_node) { + dev_err(&dev->dev, "Device OF-Node is NULL"); + return -EFAULT; + } + + fsl_lbc_ctrl_dev = kzalloc(sizeof(*fsl_lbc_ctrl_dev), GFP_KERNEL); + if (!fsl_lbc_ctrl_dev) + return -ENOMEM; + + dev_set_drvdata(&dev->dev, fsl_lbc_ctrl_dev); + + spin_lock_init(&fsl_lbc_ctrl_dev->lock); + init_waitqueue_head(&fsl_lbc_ctrl_dev->irq_wait); + + fsl_lbc_ctrl_dev->regs = of_iomap(dev->dev.of_node, 0); + if (!fsl_lbc_ctrl_dev->regs) { + dev_err(&dev->dev, "failed to get memory region\n"); + ret = -ENODEV; + goto err; + } + + fsl_lbc_ctrl_dev->irq = irq_of_parse_and_map(dev->dev.of_node, 0); + if (fsl_lbc_ctrl_dev->irq == NO_IRQ) { + dev_err(&dev->dev, "failed to get irq resource\n"); + ret = -ENODEV; + goto err; + } + + fsl_lbc_ctrl_dev->dev = &dev->dev; + + ret = fsl_lbc_ctrl_init(fsl_lbc_ctrl_dev, dev->dev.of_node); + if (ret < 0) + goto err; + + ret = request_irq(fsl_lbc_ctrl_dev->irq, fsl_lbc_ctrl_irq, 0, + "fsl-lbc", fsl_lbc_ctrl_dev); + if (ret != 0) { + dev_err(&dev->dev, "failed to install irq (%d)\n", + fsl_lbc_ctrl_dev->irq); + ret = fsl_lbc_ctrl_dev->irq; + goto err; + } + + /* Enable interrupts for any detected events */ + out_be32(&fsl_lbc_ctrl_dev->regs->lteir, LTEIR_ENABLE); + + return 0; + +err: + iounmap(fsl_lbc_ctrl_dev->regs); + kfree(fsl_lbc_ctrl_dev); + return ret; +} + +static const struct of_device_id fsl_lbc_match[] = { + { .compatible = "fsl,elbc", }, + { .compatible = "fsl,pq3-localbus", }, + { .compatible = "fsl,pq2-localbus", }, + { .compatible = "fsl,pq2pro-localbus", }, + {}, +}; + +static struct platform_driver fsl_lbc_ctrl_driver = { + .driver = { + .name = "fsl-lbc", + .of_match_table = fsl_lbc_match, + }, + .probe = fsl_lbc_ctrl_probe, +}; + +static int __init fsl_lbc_init(void) +{ + return platform_driver_register(&fsl_lbc_ctrl_driver); +} +module_init(fsl_lbc_init); diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c new file mode 100644 index 00000000..92e78333 --- /dev/null +++ b/arch/powerpc/sysdev/fsl_msi.c @@ -0,0 +1,457 @@ +/* + * Copyright (C) 2007-2011 Freescale Semiconductor, Inc. + * + * Author: Tony Li <tony.li@freescale.com> + * Jason Jin <Jason.jin@freescale.com> + * + * The hwirq alloc and free code reuse from sysdev/mpic_msi.c + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 of the + * License. + * + */ +#include <linux/irq.h> +#include <linux/bootmem.h> +#include <linux/msi.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/of_platform.h> +#include <sysdev/fsl_soc.h> +#include <asm/prom.h> +#include <asm/hw_irq.h> +#include <asm/ppc-pci.h> +#include <asm/mpic.h> +#include "fsl_msi.h" +#include "fsl_pci.h" + +LIST_HEAD(msi_head); + +struct fsl_msi_feature { + u32 fsl_pic_ip; + u32 msiir_offset; +}; + +struct fsl_msi_cascade_data { + struct fsl_msi *msi_data; + int index; +}; + +static inline u32 fsl_msi_read(u32 __iomem *base, unsigned int reg) +{ + return in_be32(base + (reg >> 2)); +} + +/* + * We do not need this actually. The MSIR register has been read once + * in the cascade interrupt. So, this MSI interrupt has been acked +*/ +static void fsl_msi_end_irq(struct irq_data *d) +{ +} + +static struct irq_chip fsl_msi_chip = { + .irq_mask = mask_msi_irq, + .irq_unmask = unmask_msi_irq, + .irq_ack = fsl_msi_end_irq, + .name = "FSL-MSI", +}; + +static int fsl_msi_host_map(struct irq_host *h, unsigned int virq, + irq_hw_number_t hw) +{ + struct fsl_msi *msi_data = h->host_data; + struct irq_chip *chip = &fsl_msi_chip; + + irq_set_status_flags(virq, IRQ_TYPE_EDGE_FALLING); + + irq_set_chip_data(virq, msi_data); + irq_set_chip_and_handler(virq, chip, handle_edge_irq); + + return 0; +} + +static struct irq_host_ops fsl_msi_host_ops = { + .map = fsl_msi_host_map, +}; + +static int fsl_msi_init_allocator(struct fsl_msi *msi_data) +{ + int rc; + + rc = msi_bitmap_alloc(&msi_data->bitmap, NR_MSI_IRQS, + msi_data->irqhost->of_node); + if (rc) + return rc; + + rc = msi_bitmap_reserve_dt_hwirqs(&msi_data->bitmap); + if (rc < 0) { + msi_bitmap_free(&msi_data->bitmap); + return rc; + } + + return 0; +} + +static int fsl_msi_check_device(struct pci_dev *pdev, int nvec, int type) +{ + if (type == PCI_CAP_ID_MSIX) + pr_debug("fslmsi: MSI-X untested, trying anyway.\n"); + + return 0; +} + +static void fsl_teardown_msi_irqs(struct pci_dev *pdev) +{ + struct msi_desc *entry; + struct fsl_msi *msi_data; + + list_for_each_entry(entry, &pdev->msi_list, list) { + if (entry->irq == NO_IRQ) + continue; + msi_data = irq_get_chip_data(entry->irq); + irq_set_msi_desc(entry->irq, NULL); + msi_bitmap_free_hwirqs(&msi_data->bitmap, + virq_to_hw(entry->irq), 1); + irq_dispose_mapping(entry->irq); + } + + return; +} + +static void fsl_compose_msi_msg(struct pci_dev *pdev, int hwirq, + struct msi_msg *msg, + struct fsl_msi *fsl_msi_data) +{ + struct fsl_msi *msi_data = fsl_msi_data; + struct pci_controller *hose = pci_bus_to_host(pdev->bus); + u64 base = fsl_pci_immrbar_base(hose); + + msg->address_lo = msi_data->msi_addr_lo + lower_32_bits(base); + msg->address_hi = msi_data->msi_addr_hi + upper_32_bits(base); + + msg->data = hwirq; + + pr_debug("%s: allocated srs: %d, ibs: %d\n", + __func__, hwirq / IRQS_PER_MSI_REG, hwirq % IRQS_PER_MSI_REG); +} + +static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) +{ + int rc, hwirq = -ENOMEM; + unsigned int virq; + struct msi_desc *entry; + struct msi_msg msg; + struct fsl_msi *msi_data; + + list_for_each_entry(entry, &pdev->msi_list, list) { + list_for_each_entry(msi_data, &msi_head, list) { + hwirq = msi_bitmap_alloc_hwirqs(&msi_data->bitmap, 1); + if (hwirq >= 0) + break; + } + + if (hwirq < 0) { + rc = hwirq; + pr_debug("%s: fail allocating msi interrupt\n", + __func__); + goto out_free; + } + + virq = irq_create_mapping(msi_data->irqhost, hwirq); + + if (virq == NO_IRQ) { + pr_debug("%s: fail mapping hwirq 0x%x\n", + __func__, hwirq); + msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1); + rc = -ENOSPC; + goto out_free; + } + /* chip_data is msi_data via host->hostdata in host->map() */ + irq_set_msi_desc(virq, entry); + + fsl_compose_msi_msg(pdev, hwirq, &msg, msi_data); + write_msi_msg(virq, &msg); + } + return 0; + +out_free: + /* free by the caller of this function */ + return rc; +} + +static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + struct irq_data *idata = irq_desc_get_irq_data(desc); + unsigned int cascade_irq; + struct fsl_msi *msi_data; + int msir_index = -1; + u32 msir_value = 0; + u32 intr_index; + u32 have_shift = 0; + struct fsl_msi_cascade_data *cascade_data; + + cascade_data = irq_get_handler_data(irq); + msi_data = cascade_data->msi_data; + + raw_spin_lock(&desc->lock); + if ((msi_data->feature & FSL_PIC_IP_MASK) == FSL_PIC_IP_IPIC) { + if (chip->irq_mask_ack) + chip->irq_mask_ack(idata); + else { + chip->irq_mask(idata); + chip->irq_ack(idata); + } + } + + if (unlikely(irqd_irq_inprogress(idata))) + goto unlock; + + msir_index = cascade_data->index; + + if (msir_index >= NR_MSI_REG) + cascade_irq = NO_IRQ; + + irqd_set_chained_irq_inprogress(idata); + switch (msi_data->feature & FSL_PIC_IP_MASK) { + case FSL_PIC_IP_MPIC: + msir_value = fsl_msi_read(msi_data->msi_regs, + msir_index * 0x10); + break; + case FSL_PIC_IP_IPIC: + msir_value = fsl_msi_read(msi_data->msi_regs, msir_index * 0x4); + break; + } + + while (msir_value) { + intr_index = ffs(msir_value) - 1; + + cascade_irq = irq_linear_revmap(msi_data->irqhost, + msir_index * IRQS_PER_MSI_REG + + intr_index + have_shift); + if (cascade_irq != NO_IRQ) + generic_handle_irq(cascade_irq); + have_shift += intr_index + 1; + msir_value = msir_value >> (intr_index + 1); + } + irqd_clr_chained_irq_inprogress(idata); + + switch (msi_data->feature & FSL_PIC_IP_MASK) { + case FSL_PIC_IP_MPIC: + chip->irq_eoi(idata); + break; + case FSL_PIC_IP_IPIC: + if (!irqd_irq_disabled(idata) && chip->irq_unmask) + chip->irq_unmask(idata); + break; + } +unlock: + raw_spin_unlock(&desc->lock); +} + +static int fsl_of_msi_remove(struct platform_device *ofdev) +{ + struct fsl_msi *msi = platform_get_drvdata(ofdev); + int virq, i; + struct fsl_msi_cascade_data *cascade_data; + + if (msi->list.prev != NULL) + list_del(&msi->list); + for (i = 0; i < NR_MSI_REG; i++) { + virq = msi->msi_virqs[i]; + if (virq != NO_IRQ) { + cascade_data = irq_get_handler_data(virq); + kfree(cascade_data); + irq_dispose_mapping(virq); + } + } + if (msi->bitmap.bitmap) + msi_bitmap_free(&msi->bitmap); + iounmap(msi->msi_regs); + kfree(msi); + + return 0; +} + +static int __devinit fsl_msi_setup_hwirq(struct fsl_msi *msi, + struct platform_device *dev, + int offset, int irq_index) +{ + struct fsl_msi_cascade_data *cascade_data = NULL; + int virt_msir; + + virt_msir = irq_of_parse_and_map(dev->dev.of_node, irq_index); + if (virt_msir == NO_IRQ) { + dev_err(&dev->dev, "%s: Cannot translate IRQ index %d\n", + __func__, irq_index); + return 0; + } + + cascade_data = kzalloc(sizeof(struct fsl_msi_cascade_data), GFP_KERNEL); + if (!cascade_data) { + dev_err(&dev->dev, "No memory for MSI cascade data\n"); + return -ENOMEM; + } + + msi->msi_virqs[irq_index] = virt_msir; + cascade_data->index = offset + irq_index; + cascade_data->msi_data = msi; + irq_set_handler_data(virt_msir, cascade_data); + irq_set_chained_handler(virt_msir, fsl_msi_cascade); + + return 0; +} + +static const struct of_device_id fsl_of_msi_ids[]; +static int __devinit fsl_of_msi_probe(struct platform_device *dev) +{ + const struct of_device_id *match; + struct fsl_msi *msi; + struct resource res; + int err, i, j, irq_index, count; + int rc; + const u32 *p; + struct fsl_msi_feature *features; + int len; + u32 offset; + static const u32 all_avail[] = { 0, NR_MSI_IRQS }; + + match = of_match_device(fsl_of_msi_ids, &dev->dev); + if (!match) + return -EINVAL; + features = match->data; + + printk(KERN_DEBUG "Setting up Freescale MSI support\n"); + + msi = kzalloc(sizeof(struct fsl_msi), GFP_KERNEL); + if (!msi) { + dev_err(&dev->dev, "No memory for MSI structure\n"); + return -ENOMEM; + } + platform_set_drvdata(dev, msi); + + msi->irqhost = irq_alloc_host(dev->dev.of_node, IRQ_HOST_MAP_LINEAR, + NR_MSI_IRQS, &fsl_msi_host_ops, 0); + + if (msi->irqhost == NULL) { + dev_err(&dev->dev, "No memory for MSI irqhost\n"); + err = -ENOMEM; + goto error_out; + } + + /* Get the MSI reg base */ + err = of_address_to_resource(dev->dev.of_node, 0, &res); + if (err) { + dev_err(&dev->dev, "%s resource error!\n", + dev->dev.of_node->full_name); + goto error_out; + } + + msi->msi_regs = ioremap(res.start, res.end - res.start + 1); + if (!msi->msi_regs) { + dev_err(&dev->dev, "ioremap problem failed\n"); + goto error_out; + } + + msi->feature = features->fsl_pic_ip; + + msi->irqhost->host_data = msi; + + msi->msi_addr_hi = 0x0; + msi->msi_addr_lo = features->msiir_offset + (res.start & 0xfffff); + + rc = fsl_msi_init_allocator(msi); + if (rc) { + dev_err(&dev->dev, "Error allocating MSI bitmap\n"); + goto error_out; + } + + p = of_get_property(dev->dev.of_node, "msi-available-ranges", &len); + if (p && len % (2 * sizeof(u32)) != 0) { + dev_err(&dev->dev, "%s: Malformed msi-available-ranges property\n", + __func__); + err = -EINVAL; + goto error_out; + } + + if (!p) + p = all_avail; + + for (irq_index = 0, i = 0; i < len / (2 * sizeof(u32)); i++) { + if (p[i * 2] % IRQS_PER_MSI_REG || + p[i * 2 + 1] % IRQS_PER_MSI_REG) { + printk(KERN_WARNING "%s: %s: msi available range of %u at %u is not IRQ-aligned\n", + __func__, dev->dev.of_node->full_name, + p[i * 2 + 1], p[i * 2]); + err = -EINVAL; + goto error_out; + } + + offset = p[i * 2] / IRQS_PER_MSI_REG; + count = p[i * 2 + 1] / IRQS_PER_MSI_REG; + + for (j = 0; j < count; j++, irq_index++) { + err = fsl_msi_setup_hwirq(msi, dev, offset, irq_index); + if (err) + goto error_out; + } + } + + list_add_tail(&msi->list, &msi_head); + + /* The multiple setting ppc_md.setup_msi_irqs will not harm things */ + if (!ppc_md.setup_msi_irqs) { + ppc_md.setup_msi_irqs = fsl_setup_msi_irqs; + ppc_md.teardown_msi_irqs = fsl_teardown_msi_irqs; + ppc_md.msi_check_device = fsl_msi_check_device; + } else if (ppc_md.setup_msi_irqs != fsl_setup_msi_irqs) { + dev_err(&dev->dev, "Different MSI driver already installed!\n"); + err = -ENODEV; + goto error_out; + } + return 0; +error_out: + fsl_of_msi_remove(dev); + return err; +} + +static const struct fsl_msi_feature mpic_msi_feature = { + .fsl_pic_ip = FSL_PIC_IP_MPIC, + .msiir_offset = 0x140, +}; + +static const struct fsl_msi_feature ipic_msi_feature = { + .fsl_pic_ip = FSL_PIC_IP_IPIC, + .msiir_offset = 0x38, +}; + +static const struct of_device_id fsl_of_msi_ids[] = { + { + .compatible = "fsl,mpic-msi", + .data = (void *)&mpic_msi_feature, + }, + { + .compatible = "fsl,ipic-msi", + .data = (void *)&ipic_msi_feature, + }, + {} +}; + +static struct platform_driver fsl_of_msi_driver = { + .driver = { + .name = "fsl-msi", + .owner = THIS_MODULE, + .of_match_table = fsl_of_msi_ids, + }, + .probe = fsl_of_msi_probe, + .remove = fsl_of_msi_remove, +}; + +static __init int fsl_of_msi_init(void) +{ + return platform_driver_register(&fsl_of_msi_driver); +} + +subsys_initcall(fsl_of_msi_init); diff --git a/arch/powerpc/sysdev/fsl_msi.h b/arch/powerpc/sysdev/fsl_msi.h new file mode 100644 index 00000000..624580c2 --- /dev/null +++ b/arch/powerpc/sysdev/fsl_msi.h @@ -0,0 +1,43 @@ +/* + * Copyright (C) 2007-2008 Freescale Semiconductor, Inc. All rights reserved. + * + * Author: Tony Li <tony.li@freescale.com> + * Jason Jin <Jason.jin@freescale.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 of the + * License. + * + */ +#ifndef _POWERPC_SYSDEV_FSL_MSI_H +#define _POWERPC_SYSDEV_FSL_MSI_H + +#include <asm/msi_bitmap.h> + +#define NR_MSI_REG 8 +#define IRQS_PER_MSI_REG 32 +#define NR_MSI_IRQS (NR_MSI_REG * IRQS_PER_MSI_REG) + +#define FSL_PIC_IP_MASK 0x0000000F +#define FSL_PIC_IP_MPIC 0x00000001 +#define FSL_PIC_IP_IPIC 0x00000002 + +struct fsl_msi { + struct irq_host *irqhost; + + unsigned long cascade_irq; + + u32 msi_addr_lo; + u32 msi_addr_hi; + void __iomem *msi_regs; + u32 feature; + int msi_virqs[NR_MSI_REG]; + + struct msi_bitmap bitmap; + + struct list_head list; /* support multiple MSI banks */ +}; + +#endif /* _POWERPC_SYSDEV_FSL_MSI_H */ + diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c new file mode 100644 index 00000000..68ca9290 --- /dev/null +++ b/arch/powerpc/sysdev/fsl_pci.c @@ -0,0 +1,753 @@ +/* + * MPC83xx/85xx/86xx PCI/PCIE support routing. + * + * Copyright 2007-2011 Freescale Semiconductor, Inc. + * Copyright 2008-2009 MontaVista Software, Inc. + * + * Initial author: Xianghua Xiao <x.xiao@freescale.com> + * Recode: ZHANG WEI <wei.zhang@freescale.com> + * Rewrite the routing for Frescale PCI and PCI Express + * Roy Zang <tie-fei.zang@freescale.com> + * MPC83xx PCI-Express support: + * Tony Li <tony.li@freescale.com> + * Anton Vorontsov <avorontsov@ru.mvista.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/delay.h> +#include <linux/string.h> +#include <linux/init.h> +#include <linux/bootmem.h> +#include <linux/memblock.h> +#include <linux/log2.h> +#include <linux/slab.h> + +#include <asm/io.h> +#include <asm/prom.h> +#include <asm/pci-bridge.h> +#include <asm/machdep.h> +#include <sysdev/fsl_soc.h> +#include <sysdev/fsl_pci.h> + +static int fsl_pcie_bus_fixup, is_mpc83xx_pci; + +static void __init quirk_fsl_pcie_header(struct pci_dev *dev) +{ + /* if we aren't a PCIe don't bother */ + if (!pci_find_capability(dev, PCI_CAP_ID_EXP)) + return; + + dev->class = PCI_CLASS_BRIDGE_PCI << 8; + fsl_pcie_bus_fixup = 1; + return; +} + +static int __init fsl_pcie_check_link(struct pci_controller *hose) +{ + u32 val; + + early_read_config_dword(hose, 0, 0, PCIE_LTSSM, &val); + if (val < PCIE_LTSSM_L0) + return 1; + return 0; +} + +#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx) +static int __init setup_one_atmu(struct ccsr_pci __iomem *pci, + unsigned int index, const struct resource *res, + resource_size_t offset) +{ + resource_size_t pci_addr = res->start - offset; + resource_size_t phys_addr = res->start; + resource_size_t size = res->end - res->start + 1; + u32 flags = 0x80044000; /* enable & mem R/W */ + unsigned int i; + + pr_debug("PCI MEM resource start 0x%016llx, size 0x%016llx.\n", + (u64)res->start, (u64)size); + + if (res->flags & IORESOURCE_PREFETCH) + flags |= 0x10000000; /* enable relaxed ordering */ + + for (i = 0; size > 0; i++) { + unsigned int bits = min(__ilog2(size), + __ffs(pci_addr | phys_addr)); + + if (index + i >= 5) + return -1; + + out_be32(&pci->pow[index + i].potar, pci_addr >> 12); + out_be32(&pci->pow[index + i].potear, (u64)pci_addr >> 44); + out_be32(&pci->pow[index + i].powbar, phys_addr >> 12); + out_be32(&pci->pow[index + i].powar, flags | (bits - 1)); + + pci_addr += (resource_size_t)1U << bits; + phys_addr += (resource_size_t)1U << bits; + size -= (resource_size_t)1U << bits; + } + + return i; +} + +/* atmu setup for fsl pci/pcie controller */ +static void __init setup_pci_atmu(struct pci_controller *hose, + struct resource *rsrc) +{ + struct ccsr_pci __iomem *pci; + int i, j, n, mem_log, win_idx = 3, start_idx = 1, end_idx = 4; + u64 mem, sz, paddr_hi = 0; + u64 paddr_lo = ULLONG_MAX; + u32 pcicsrbar = 0, pcicsrbar_sz; + u32 piwar = PIWAR_EN | PIWAR_PF | PIWAR_TGI_LOCAL | + PIWAR_READ_SNOOP | PIWAR_WRITE_SNOOP; + char *name = hose->dn->full_name; + + pr_debug("PCI memory map start 0x%016llx, size 0x%016llx\n", + (u64)rsrc->start, (u64)rsrc->end - (u64)rsrc->start + 1); + + if (of_device_is_compatible(hose->dn, "fsl,qoriq-pcie-v2.2")) { + win_idx = 2; + start_idx = 0; + end_idx = 3; + } + + pci = ioremap(rsrc->start, rsrc->end - rsrc->start + 1); + if (!pci) { + dev_err(hose->parent, "Unable to map ATMU registers\n"); + return; + } + + /* Disable all windows (except powar0 since it's ignored) */ + for(i = 1; i < 5; i++) + out_be32(&pci->pow[i].powar, 0); + for (i = start_idx; i < end_idx; i++) + out_be32(&pci->piw[i].piwar, 0); + + /* Setup outbound MEM window */ + for(i = 0, j = 1; i < 3; i++) { + if (!(hose->mem_resources[i].flags & IORESOURCE_MEM)) + continue; + + paddr_lo = min(paddr_lo, (u64)hose->mem_resources[i].start); + paddr_hi = max(paddr_hi, (u64)hose->mem_resources[i].end); + + n = setup_one_atmu(pci, j, &hose->mem_resources[i], + hose->pci_mem_offset); + + if (n < 0 || j >= 5) { + pr_err("Ran out of outbound PCI ATMUs for resource %d!\n", i); + hose->mem_resources[i].flags |= IORESOURCE_DISABLED; + } else + j += n; + } + + /* Setup outbound IO window */ + if (hose->io_resource.flags & IORESOURCE_IO) { + if (j >= 5) { + pr_err("Ran out of outbound PCI ATMUs for IO resource\n"); + } else { + pr_debug("PCI IO resource start 0x%016llx, size 0x%016llx, " + "phy base 0x%016llx.\n", + (u64)hose->io_resource.start, + (u64)hose->io_resource.end - (u64)hose->io_resource.start + 1, + (u64)hose->io_base_phys); + out_be32(&pci->pow[j].potar, (hose->io_resource.start >> 12)); + out_be32(&pci->pow[j].potear, 0); + out_be32(&pci->pow[j].powbar, (hose->io_base_phys >> 12)); + /* Enable, IO R/W */ + out_be32(&pci->pow[j].powar, 0x80088000 + | (__ilog2(hose->io_resource.end + - hose->io_resource.start + 1) - 1)); + } + } + + /* convert to pci address space */ + paddr_hi -= hose->pci_mem_offset; + paddr_lo -= hose->pci_mem_offset; + + if (paddr_hi == paddr_lo) { + pr_err("%s: No outbound window space\n", name); + return ; + } + + if (paddr_lo == 0) { + pr_err("%s: No space for inbound window\n", name); + return ; + } + + /* setup PCSRBAR/PEXCSRBAR */ + early_write_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, 0xffffffff); + early_read_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, &pcicsrbar_sz); + pcicsrbar_sz = ~pcicsrbar_sz + 1; + + if (paddr_hi < (0x100000000ull - pcicsrbar_sz) || + (paddr_lo > 0x100000000ull)) + pcicsrbar = 0x100000000ull - pcicsrbar_sz; + else + pcicsrbar = (paddr_lo - pcicsrbar_sz) & -pcicsrbar_sz; + early_write_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, pcicsrbar); + + paddr_lo = min(paddr_lo, (u64)pcicsrbar); + + pr_info("%s: PCICSRBAR @ 0x%x\n", name, pcicsrbar); + + /* Setup inbound mem window */ + mem = memblock_end_of_DRAM(); + sz = min(mem, paddr_lo); + mem_log = __ilog2_u64(sz); + + /* PCIe can overmap inbound & outbound since RX & TX are separated */ + if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) { + /* Size window to exact size if power-of-two or one size up */ + if ((1ull << mem_log) != mem) { + if ((1ull << mem_log) > mem) + pr_info("%s: Setting PCI inbound window " + "greater than memory size\n", name); + mem_log++; + } + + piwar |= ((mem_log - 1) & PIWAR_SZ_MASK); + + /* Setup inbound memory window */ + out_be32(&pci->piw[win_idx].pitar, 0x00000000); + out_be32(&pci->piw[win_idx].piwbar, 0x00000000); + out_be32(&pci->piw[win_idx].piwar, piwar); + win_idx--; + + hose->dma_window_base_cur = 0x00000000; + hose->dma_window_size = (resource_size_t)sz; + } else { + u64 paddr = 0; + + /* Setup inbound memory window */ + out_be32(&pci->piw[win_idx].pitar, paddr >> 12); + out_be32(&pci->piw[win_idx].piwbar, paddr >> 12); + out_be32(&pci->piw[win_idx].piwar, (piwar | (mem_log - 1))); + win_idx--; + + paddr += 1ull << mem_log; + sz -= 1ull << mem_log; + + if (sz) { + mem_log = __ilog2_u64(sz); + piwar |= (mem_log - 1); + + out_be32(&pci->piw[win_idx].pitar, paddr >> 12); + out_be32(&pci->piw[win_idx].piwbar, paddr >> 12); + out_be32(&pci->piw[win_idx].piwar, piwar); + win_idx--; + + paddr += 1ull << mem_log; + } + + hose->dma_window_base_cur = 0x00000000; + hose->dma_window_size = (resource_size_t)paddr; + } + + if (hose->dma_window_size < mem) { +#ifndef CONFIG_SWIOTLB + pr_err("%s: ERROR: Memory size exceeds PCI ATMU ability to " + "map - enable CONFIG_SWIOTLB to avoid dma errors.\n", + name); +#endif + /* adjusting outbound windows could reclaim space in mem map */ + if (paddr_hi < 0xffffffffull) + pr_warning("%s: WARNING: Outbound window cfg leaves " + "gaps in memory map. Adjusting the memory map " + "could reduce unnecessary bounce buffering.\n", + name); + + pr_info("%s: DMA window size is 0x%llx\n", name, + (u64)hose->dma_window_size); + } + + iounmap(pci); +} + +static void __init setup_pci_cmd(struct pci_controller *hose) +{ + u16 cmd; + int cap_x; + + early_read_config_word(hose, 0, 0, PCI_COMMAND, &cmd); + cmd |= PCI_COMMAND_SERR | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY + | PCI_COMMAND_IO; + early_write_config_word(hose, 0, 0, PCI_COMMAND, cmd); + + cap_x = early_find_capability(hose, 0, 0, PCI_CAP_ID_PCIX); + if (cap_x) { + int pci_x_cmd = cap_x + PCI_X_CMD; + cmd = PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ + | PCI_X_CMD_ERO | PCI_X_CMD_DPERR_E; + early_write_config_word(hose, 0, 0, pci_x_cmd, cmd); + } else { + early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0x80); + } +} + +void fsl_pcibios_fixup_bus(struct pci_bus *bus) +{ + struct pci_controller *hose = pci_bus_to_host(bus); + int i; + + if ((bus->parent == hose->bus) && + ((fsl_pcie_bus_fixup && + early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) || + (hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK))) + { + for (i = 0; i < 4; ++i) { + struct resource *res = bus->resource[i]; + struct resource *par = bus->parent->resource[i]; + if (res) { + res->start = 0; + res->end = 0; + res->flags = 0; + } + if (res && par) { + res->start = par->start; + res->end = par->end; + res->flags = par->flags; + } + } + } +} + +int __init fsl_add_bridge(struct device_node *dev, int is_primary) +{ + int len; + struct pci_controller *hose; + struct resource rsrc; + const int *bus_range; + + if (!of_device_is_available(dev)) { + pr_warning("%s: disabled\n", dev->full_name); + return -ENODEV; + } + + pr_debug("Adding PCI host bridge %s\n", dev->full_name); + + /* Fetch host bridge registers address */ + if (of_address_to_resource(dev, 0, &rsrc)) { + printk(KERN_WARNING "Can't get pci register base!"); + return -ENOMEM; + } + + /* Get bus range if any */ + bus_range = of_get_property(dev, "bus-range", &len); + if (bus_range == NULL || len < 2 * sizeof(int)) + printk(KERN_WARNING "Can't get bus-range for %s, assume" + " bus 0\n", dev->full_name); + + ppc_pci_add_flags(PPC_PCI_REASSIGN_ALL_BUS); + hose = pcibios_alloc_controller(dev); + if (!hose) + return -ENOMEM; + + hose->first_busno = bus_range ? bus_range[0] : 0x0; + hose->last_busno = bus_range ? bus_range[1] : 0xff; + + setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4, + PPC_INDIRECT_TYPE_BIG_ENDIAN); + setup_pci_cmd(hose); + + /* check PCI express link status */ + if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) { + hose->indirect_type |= PPC_INDIRECT_TYPE_EXT_REG | + PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS; + if (fsl_pcie_check_link(hose)) + hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK; + } + + printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. " + "Firmware bus number: %d->%d\n", + (unsigned long long)rsrc.start, hose->first_busno, + hose->last_busno); + + pr_debug(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n", + hose, hose->cfg_addr, hose->cfg_data); + + /* Interpret the "ranges" property */ + /* This also maps the I/O region and sets isa_io/mem_base */ + pci_process_bridge_OF_ranges(hose, dev, is_primary); + + /* Setup PEX window registers */ + setup_pci_atmu(hose, &rsrc); + + return 0; +} + +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8548E, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8548, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8543E, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8543, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8547E, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8545E, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8545, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8569E, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8569, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8568E, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8568, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8567E, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8567, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8533E, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8533, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8544E, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8544, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8572E, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8572, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8536E, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8536, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8641, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8641D, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8610, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1011E, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1011, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1013E, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1013, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1020E, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1020, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1021E, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1021, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1022E, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1022, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2010E, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2010, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2020E, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2020, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2040E, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2040, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P3041E, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P3041, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4040E, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4040, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4080E, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4080, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P5010E, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P5010, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P5020E, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P5020, quirk_fsl_pcie_header); +#endif /* CONFIG_FSL_SOC_BOOKE || CONFIG_PPC_86xx */ + +#if defined(CONFIG_PPC_83xx) || defined(CONFIG_PPC_MPC512x) +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8308, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8314E, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8314, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8315E, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8315, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8377E, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8377, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8378E, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8378, quirk_fsl_pcie_header); + +struct mpc83xx_pcie_priv { + void __iomem *cfg_type0; + void __iomem *cfg_type1; + u32 dev_base; +}; + +struct pex_inbound_window { + u32 ar; + u32 tar; + u32 barl; + u32 barh; +}; + +/* + * With the convention of u-boot, the PCIE outbound window 0 serves + * as configuration transactions outbound. + */ +#define PEX_OUTWIN0_BAR 0xCA4 +#define PEX_OUTWIN0_TAL 0xCA8 +#define PEX_OUTWIN0_TAH 0xCAC +#define PEX_RC_INWIN_BASE 0xE60 +#define PEX_RCIWARn_EN 0x1 + +static int mpc83xx_pcie_exclude_device(struct pci_bus *bus, unsigned int devfn) +{ + struct pci_controller *hose = pci_bus_to_host(bus); + + if (hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK) + return PCIBIOS_DEVICE_NOT_FOUND; + /* + * Workaround for the HW bug: for Type 0 configure transactions the + * PCI-E controller does not check the device number bits and just + * assumes that the device number bits are 0. + */ + if (bus->number == hose->first_busno || + bus->primary == hose->first_busno) { + if (devfn & 0xf8) + return PCIBIOS_DEVICE_NOT_FOUND; + } + + if (ppc_md.pci_exclude_device) { + if (ppc_md.pci_exclude_device(hose, bus->number, devfn)) + return PCIBIOS_DEVICE_NOT_FOUND; + } + + return PCIBIOS_SUCCESSFUL; +} + +static void __iomem *mpc83xx_pcie_remap_cfg(struct pci_bus *bus, + unsigned int devfn, int offset) +{ + struct pci_controller *hose = pci_bus_to_host(bus); + struct mpc83xx_pcie_priv *pcie = hose->dn->data; + u32 dev_base = bus->number << 24 | devfn << 16; + int ret; + + ret = mpc83xx_pcie_exclude_device(bus, devfn); + if (ret) + return NULL; + + offset &= 0xfff; + + /* Type 0 */ + if (bus->number == hose->first_busno) + return pcie->cfg_type0 + offset; + + if (pcie->dev_base == dev_base) + goto mapped; + + out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAL, dev_base); + + pcie->dev_base = dev_base; +mapped: + return pcie->cfg_type1 + offset; +} + +static int mpc83xx_pcie_read_config(struct pci_bus *bus, unsigned int devfn, + int offset, int len, u32 *val) +{ + void __iomem *cfg_addr; + + cfg_addr = mpc83xx_pcie_remap_cfg(bus, devfn, offset); + if (!cfg_addr) + return PCIBIOS_DEVICE_NOT_FOUND; + + switch (len) { + case 1: + *val = in_8(cfg_addr); + break; + case 2: + *val = in_le16(cfg_addr); + break; + default: + *val = in_le32(cfg_addr); + break; + } + + return PCIBIOS_SUCCESSFUL; +} + +static int mpc83xx_pcie_write_config(struct pci_bus *bus, unsigned int devfn, + int offset, int len, u32 val) +{ + struct pci_controller *hose = pci_bus_to_host(bus); + void __iomem *cfg_addr; + + cfg_addr = mpc83xx_pcie_remap_cfg(bus, devfn, offset); + if (!cfg_addr) + return PCIBIOS_DEVICE_NOT_FOUND; + + /* PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS */ + if (offset == PCI_PRIMARY_BUS && bus->number == hose->first_busno) + val &= 0xffffff00; + + switch (len) { + case 1: + out_8(cfg_addr, val); + break; + case 2: + out_le16(cfg_addr, val); + break; + default: + out_le32(cfg_addr, val); + break; + } + + return PCIBIOS_SUCCESSFUL; +} + +static struct pci_ops mpc83xx_pcie_ops = { + .read = mpc83xx_pcie_read_config, + .write = mpc83xx_pcie_write_config, +}; + +static int __init mpc83xx_pcie_setup(struct pci_controller *hose, + struct resource *reg) +{ + struct mpc83xx_pcie_priv *pcie; + u32 cfg_bar; + int ret = -ENOMEM; + + pcie = zalloc_maybe_bootmem(sizeof(*pcie), GFP_KERNEL); + if (!pcie) + return ret; + + pcie->cfg_type0 = ioremap(reg->start, resource_size(reg)); + if (!pcie->cfg_type0) + goto err0; + + cfg_bar = in_le32(pcie->cfg_type0 + PEX_OUTWIN0_BAR); + if (!cfg_bar) { + /* PCI-E isn't configured. */ + ret = -ENODEV; + goto err1; + } + + pcie->cfg_type1 = ioremap(cfg_bar, 0x1000); + if (!pcie->cfg_type1) + goto err1; + + WARN_ON(hose->dn->data); + hose->dn->data = pcie; + hose->ops = &mpc83xx_pcie_ops; + + out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAH, 0); + out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAL, 0); + + if (fsl_pcie_check_link(hose)) + hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK; + + return 0; +err1: + iounmap(pcie->cfg_type0); +err0: + kfree(pcie); + return ret; + +} + +int __init mpc83xx_add_bridge(struct device_node *dev) +{ + int ret; + int len; + struct pci_controller *hose; + struct resource rsrc_reg; + struct resource rsrc_cfg; + const int *bus_range; + int primary; + + is_mpc83xx_pci = 1; + + if (!of_device_is_available(dev)) { + pr_warning("%s: disabled by the firmware.\n", + dev->full_name); + return -ENODEV; + } + pr_debug("Adding PCI host bridge %s\n", dev->full_name); + + /* Fetch host bridge registers address */ + if (of_address_to_resource(dev, 0, &rsrc_reg)) { + printk(KERN_WARNING "Can't get pci register base!\n"); + return -ENOMEM; + } + + memset(&rsrc_cfg, 0, sizeof(rsrc_cfg)); + + if (of_address_to_resource(dev, 1, &rsrc_cfg)) { + printk(KERN_WARNING + "No pci config register base in dev tree, " + "using default\n"); + /* + * MPC83xx supports up to two host controllers + * one at 0x8500 has config space registers at 0x8300 + * one at 0x8600 has config space registers at 0x8380 + */ + if ((rsrc_reg.start & 0xfffff) == 0x8500) + rsrc_cfg.start = (rsrc_reg.start & 0xfff00000) + 0x8300; + else if ((rsrc_reg.start & 0xfffff) == 0x8600) + rsrc_cfg.start = (rsrc_reg.start & 0xfff00000) + 0x8380; + } + /* + * Controller at offset 0x8500 is primary + */ + if ((rsrc_reg.start & 0xfffff) == 0x8500) + primary = 1; + else + primary = 0; + + /* Get bus range if any */ + bus_range = of_get_property(dev, "bus-range", &len); + if (bus_range == NULL || len < 2 * sizeof(int)) { + printk(KERN_WARNING "Can't get bus-range for %s, assume" + " bus 0\n", dev->full_name); + } + + ppc_pci_add_flags(PPC_PCI_REASSIGN_ALL_BUS); + hose = pcibios_alloc_controller(dev); + if (!hose) + return -ENOMEM; + + hose->first_busno = bus_range ? bus_range[0] : 0; + hose->last_busno = bus_range ? bus_range[1] : 0xff; + + if (of_device_is_compatible(dev, "fsl,mpc8314-pcie")) { + ret = mpc83xx_pcie_setup(hose, &rsrc_reg); + if (ret) + goto err0; + } else { + setup_indirect_pci(hose, rsrc_cfg.start, + rsrc_cfg.start + 4, 0); + } + + printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. " + "Firmware bus number: %d->%d\n", + (unsigned long long)rsrc_reg.start, hose->first_busno, + hose->last_busno); + + pr_debug(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n", + hose, hose->cfg_addr, hose->cfg_data); + + /* Interpret the "ranges" property */ + /* This also maps the I/O region and sets isa_io/mem_base */ + pci_process_bridge_OF_ranges(hose, dev, primary); + + return 0; +err0: + pcibios_free_controller(hose); + return ret; +} +#endif /* CONFIG_PPC_83xx */ + +u64 fsl_pci_immrbar_base(struct pci_controller *hose) +{ +#ifdef CONFIG_PPC_83xx + if (is_mpc83xx_pci) { + struct mpc83xx_pcie_priv *pcie = hose->dn->data; + struct pex_inbound_window *in; + int i; + + /* Walk the Root Complex Inbound windows to match IMMR base */ + in = pcie->cfg_type0 + PEX_RC_INWIN_BASE; + for (i = 0; i < 4; i++) { + /* not enabled, skip */ + if (!in_le32(&in[i].ar) & PEX_RCIWARn_EN) + continue; + + if (get_immrbase() == in_le32(&in[i].tar)) + return (u64)in_le32(&in[i].barh) << 32 | + in_le32(&in[i].barl); + } + + printk(KERN_WARNING "could not find PCI BAR matching IMMR\n"); + } +#endif + +#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx) + if (!is_mpc83xx_pci) { + u32 base; + + pci_bus_read_config_dword(hose->bus, + PCI_DEVFN(0, 0), PCI_BASE_ADDRESS_0, &base); + return base; + } +#endif + + return 0; +} diff --git a/arch/powerpc/sysdev/fsl_pci.h b/arch/powerpc/sysdev/fsl_pci.h new file mode 100644 index 00000000..a39ed5cc --- /dev/null +++ b/arch/powerpc/sysdev/fsl_pci.h @@ -0,0 +1,97 @@ +/* + * MPC85xx/86xx PCI Express structure define + * + * Copyright 2007,2011 Freescale Semiconductor, Inc + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifdef __KERNEL__ +#ifndef __POWERPC_FSL_PCI_H +#define __POWERPC_FSL_PCI_H + +#define PCIE_LTSSM 0x0404 /* PCIE Link Training and Status */ +#define PCIE_LTSSM_L0 0x16 /* L0 state */ +#define PIWAR_EN 0x80000000 /* Enable */ +#define PIWAR_PF 0x20000000 /* prefetch */ +#define PIWAR_TGI_LOCAL 0x00f00000 /* target - local memory */ +#define PIWAR_READ_SNOOP 0x00050000 +#define PIWAR_WRITE_SNOOP 0x00005000 +#define PIWAR_SZ_MASK 0x0000003f + +/* PCI/PCI Express outbound window reg */ +struct pci_outbound_window_regs { + __be32 potar; /* 0x.0 - Outbound translation address register */ + __be32 potear; /* 0x.4 - Outbound translation extended address register */ + __be32 powbar; /* 0x.8 - Outbound window base address register */ + u8 res1[4]; + __be32 powar; /* 0x.10 - Outbound window attributes register */ + u8 res2[12]; +}; + +/* PCI/PCI Express inbound window reg */ +struct pci_inbound_window_regs { + __be32 pitar; /* 0x.0 - Inbound translation address register */ + u8 res1[4]; + __be32 piwbar; /* 0x.8 - Inbound window base address register */ + __be32 piwbear; /* 0x.c - Inbound window base extended address register */ + __be32 piwar; /* 0x.10 - Inbound window attributes register */ + u8 res2[12]; +}; + +/* PCI/PCI Express IO block registers for 85xx/86xx */ +struct ccsr_pci { + __be32 config_addr; /* 0x.000 - PCI/PCIE Configuration Address Register */ + __be32 config_data; /* 0x.004 - PCI/PCIE Configuration Data Register */ + __be32 int_ack; /* 0x.008 - PCI Interrupt Acknowledge Register */ + __be32 pex_otb_cpl_tor; /* 0x.00c - PCIE Outbound completion timeout register */ + __be32 pex_conf_tor; /* 0x.010 - PCIE configuration timeout register */ + __be32 pex_config; /* 0x.014 - PCIE CONFIG Register */ + __be32 pex_int_status; /* 0x.018 - PCIE interrupt status */ + u8 res2[4]; + __be32 pex_pme_mes_dr; /* 0x.020 - PCIE PME and message detect register */ + __be32 pex_pme_mes_disr; /* 0x.024 - PCIE PME and message disable register */ + __be32 pex_pme_mes_ier; /* 0x.028 - PCIE PME and message interrupt enable register */ + __be32 pex_pmcr; /* 0x.02c - PCIE power management command register */ + u8 res3[3024]; + +/* PCI/PCI Express outbound window 0-4 + * Window 0 is the default window and is the only window enabled upon reset. + * The default outbound register set is used when a transaction misses + * in all of the other outbound windows. + */ + struct pci_outbound_window_regs pow[5]; + u8 res14[96]; + struct pci_inbound_window_regs pmit; /* 0xd00 - 0xd9c Inbound MSI */ + u8 res6[96]; +/* PCI/PCI Express inbound window 3-0 + * inbound window 1 supports only a 32-bit base address and does not + * define an inbound window base extended address register. + */ + struct pci_inbound_window_regs piw[4]; + + __be32 pex_err_dr; /* 0x.e00 - PCI/PCIE error detect register */ + u8 res21[4]; + __be32 pex_err_en; /* 0x.e08 - PCI/PCIE error interrupt enable register */ + u8 res22[4]; + __be32 pex_err_disr; /* 0x.e10 - PCI/PCIE error disable register */ + u8 res23[12]; + __be32 pex_err_cap_stat; /* 0x.e20 - PCI/PCIE error capture status register */ + u8 res24[4]; + __be32 pex_err_cap_r0; /* 0x.e28 - PCIE error capture register 0 */ + __be32 pex_err_cap_r1; /* 0x.e2c - PCIE error capture register 0 */ + __be32 pex_err_cap_r2; /* 0x.e30 - PCIE error capture register 0 */ + __be32 pex_err_cap_r3; /* 0x.e34 - PCIE error capture register 0 */ +}; + +extern int fsl_add_bridge(struct device_node *dev, int is_primary); +extern void fsl_pcibios_fixup_bus(struct pci_bus *bus); +extern int mpc83xx_add_bridge(struct device_node *dev); +u64 fsl_pci_immrbar_base(struct pci_controller *hose); + +#endif /* __POWERPC_FSL_PCI_H */ +#endif /* __KERNEL__ */ diff --git a/arch/powerpc/sysdev/fsl_pmc.c b/arch/powerpc/sysdev/fsl_pmc.c new file mode 100644 index 00000000..f122e896 --- /dev/null +++ b/arch/powerpc/sysdev/fsl_pmc.c @@ -0,0 +1,91 @@ +/* + * Suspend/resume support + * + * Copyright 2009 MontaVista Software, Inc. + * + * Author: Anton Vorontsov <avorontsov@ru.mvista.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/init.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/suspend.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/of_platform.h> + +struct pmc_regs { + __be32 devdisr; + __be32 devdisr2; + __be32 :32; + __be32 :32; + __be32 pmcsr; +#define PMCSR_SLP (1 << 17) +}; + +static struct device *pmc_dev; +static struct pmc_regs __iomem *pmc_regs; + +static int pmc_suspend_enter(suspend_state_t state) +{ + int ret; + + setbits32(&pmc_regs->pmcsr, PMCSR_SLP); + /* At this point, the CPU is asleep. */ + + /* Upon resume, wait for SLP bit to be clear. */ + ret = spin_event_timeout((in_be32(&pmc_regs->pmcsr) & PMCSR_SLP) == 0, + 10000, 10) ? 0 : -ETIMEDOUT; + if (ret) + dev_err(pmc_dev, "tired waiting for SLP bit to clear\n"); + return ret; +} + +static int pmc_suspend_valid(suspend_state_t state) +{ + if (state != PM_SUSPEND_STANDBY) + return 0; + return 1; +} + +static const struct platform_suspend_ops pmc_suspend_ops = { + .valid = pmc_suspend_valid, + .enter = pmc_suspend_enter, +}; + +static int pmc_probe(struct platform_device *ofdev) +{ + pmc_regs = of_iomap(ofdev->dev.of_node, 0); + if (!pmc_regs) + return -ENOMEM; + + pmc_dev = &ofdev->dev; + suspend_set_ops(&pmc_suspend_ops); + return 0; +} + +static const struct of_device_id pmc_ids[] = { + { .compatible = "fsl,mpc8548-pmc", }, + { .compatible = "fsl,mpc8641d-pmc", }, + { }, +}; + +static struct platform_driver pmc_driver = { + .driver = { + .name = "fsl-pmc", + .owner = THIS_MODULE, + .of_match_table = pmc_ids, + }, + .probe = pmc_probe, +}; + +static int __init pmc_init(void) +{ + return platform_driver_register(&pmc_driver); +} +device_initcall(pmc_init); diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c new file mode 100644 index 00000000..cdd765bb --- /dev/null +++ b/arch/powerpc/sysdev/fsl_rio.c @@ -0,0 +1,1652 @@ +/* + * Freescale MPC85xx/MPC86xx RapidIO support + * + * Copyright 2009 Sysgo AG + * Thomas Moll <thomas.moll@sysgo.com> + * - fixed maintenance access routines, check for aligned access + * + * Copyright 2009 Integrated Device Technology, Inc. + * Alex Bounine <alexandre.bounine@idt.com> + * - Added Port-Write message handling + * - Added Machine Check exception handling + * + * Copyright (C) 2007, 2008, 2010 Freescale Semiconductor, Inc. + * Zhang Wei <wei.zhang@freescale.com> + * + * Copyright 2005 MontaVista Software, Inc. + * Matt Porter <mporter@kernel.crashing.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/dma-mapping.h> +#include <linux/interrupt.h> +#include <linux/device.h> +#include <linux/rio.h> +#include <linux/rio_drv.h> +#include <linux/of_platform.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/kfifo.h> + +#include <asm/io.h> +#include <asm/machdep.h> +#include <asm/uaccess.h> + +#undef DEBUG_PW /* Port-Write debugging */ + +/* RapidIO definition irq, which read from OF-tree */ +#define IRQ_RIO_BELL(m) (((struct rio_priv *)(m->priv))->bellirq) +#define IRQ_RIO_TX(m) (((struct rio_priv *)(m->priv))->txirq) +#define IRQ_RIO_RX(m) (((struct rio_priv *)(m->priv))->rxirq) +#define IRQ_RIO_PW(m) (((struct rio_priv *)(m->priv))->pwirq) + +#define IPWSR_CLEAR 0x98 +#define OMSR_CLEAR 0x1cb3 +#define IMSR_CLEAR 0x491 +#define IDSR_CLEAR 0x91 +#define ODSR_CLEAR 0x1c00 +#define LTLEECSR_ENABLE_ALL 0xFFC000FC +#define ESCSR_CLEAR 0x07120204 +#define IECSR_CLEAR 0x80000000 + +#define RIO_PORT1_EDCSR 0x0640 +#define RIO_PORT2_EDCSR 0x0680 +#define RIO_PORT1_IECSR 0x10130 +#define RIO_PORT2_IECSR 0x101B0 +#define RIO_IM0SR 0x13064 +#define RIO_IM1SR 0x13164 +#define RIO_OM0SR 0x13004 +#define RIO_OM1SR 0x13104 + +#define RIO_ATMU_REGS_OFFSET 0x10c00 +#define RIO_P_MSG_REGS_OFFSET 0x11000 +#define RIO_S_MSG_REGS_OFFSET 0x13000 +#define RIO_GCCSR 0x13c +#define RIO_ESCSR 0x158 +#define RIO_PORT2_ESCSR 0x178 +#define RIO_CCSR 0x15c +#define RIO_LTLEDCSR 0x0608 +#define RIO_LTLEDCSR_IER 0x80000000 +#define RIO_LTLEDCSR_PRT 0x01000000 +#define RIO_LTLEECSR 0x060c +#define RIO_EPWISR 0x10010 +#define RIO_ISR_AACR 0x10120 +#define RIO_ISR_AACR_AA 0x1 /* Accept All ID */ +#define RIO_MAINT_WIN_SIZE 0x400000 +#define RIO_DBELL_WIN_SIZE 0x1000 + +#define RIO_MSG_OMR_MUI 0x00000002 +#define RIO_MSG_OSR_TE 0x00000080 +#define RIO_MSG_OSR_QOI 0x00000020 +#define RIO_MSG_OSR_QFI 0x00000010 +#define RIO_MSG_OSR_MUB 0x00000004 +#define RIO_MSG_OSR_EOMI 0x00000002 +#define RIO_MSG_OSR_QEI 0x00000001 + +#define RIO_MSG_IMR_MI 0x00000002 +#define RIO_MSG_ISR_TE 0x00000080 +#define RIO_MSG_ISR_QFI 0x00000010 +#define RIO_MSG_ISR_DIQI 0x00000001 + +#define RIO_IPWMR_SEN 0x00100000 +#define RIO_IPWMR_QFIE 0x00000100 +#define RIO_IPWMR_EIE 0x00000020 +#define RIO_IPWMR_CQ 0x00000002 +#define RIO_IPWMR_PWE 0x00000001 + +#define RIO_IPWSR_QF 0x00100000 +#define RIO_IPWSR_TE 0x00000080 +#define RIO_IPWSR_QFI 0x00000010 +#define RIO_IPWSR_PWD 0x00000008 +#define RIO_IPWSR_PWB 0x00000004 + +/* EPWISR Error match value */ +#define RIO_EPWISR_PINT1 0x80000000 +#define RIO_EPWISR_PINT2 0x40000000 +#define RIO_EPWISR_MU 0x00000002 +#define RIO_EPWISR_PW 0x00000001 + +#define RIO_MSG_DESC_SIZE 32 +#define RIO_MSG_BUFFER_SIZE 4096 +#define RIO_MIN_TX_RING_SIZE 2 +#define RIO_MAX_TX_RING_SIZE 2048 +#define RIO_MIN_RX_RING_SIZE 2 +#define RIO_MAX_RX_RING_SIZE 2048 + +#define DOORBELL_DMR_DI 0x00000002 +#define DOORBELL_DSR_TE 0x00000080 +#define DOORBELL_DSR_QFI 0x00000010 +#define DOORBELL_DSR_DIQI 0x00000001 +#define DOORBELL_TID_OFFSET 0x02 +#define DOORBELL_SID_OFFSET 0x04 +#define DOORBELL_INFO_OFFSET 0x06 + +#define DOORBELL_MESSAGE_SIZE 0x08 +#define DBELL_SID(x) (*(u16 *)(x + DOORBELL_SID_OFFSET)) +#define DBELL_TID(x) (*(u16 *)(x + DOORBELL_TID_OFFSET)) +#define DBELL_INF(x) (*(u16 *)(x + DOORBELL_INFO_OFFSET)) + +struct rio_atmu_regs { + u32 rowtar; + u32 rowtear; + u32 rowbar; + u32 pad2; + u32 rowar; + u32 pad3[3]; +}; + +struct rio_msg_regs { + u32 omr; /* 0xD_3000 - Outbound message 0 mode register */ + u32 osr; /* 0xD_3004 - Outbound message 0 status register */ + u32 pad1; + u32 odqdpar; /* 0xD_300C - Outbound message 0 descriptor queue + dequeue pointer address register */ + u32 pad2; + u32 osar; /* 0xD_3014 - Outbound message 0 source address + register */ + u32 odpr; /* 0xD_3018 - Outbound message 0 destination port + register */ + u32 odatr; /* 0xD_301C - Outbound message 0 destination attributes + Register*/ + u32 odcr; /* 0xD_3020 - Outbound message 0 double-word count + register */ + u32 pad3; + u32 odqepar; /* 0xD_3028 - Outbound message 0 descriptor queue + enqueue pointer address register */ + u32 pad4[13]; + u32 imr; /* 0xD_3060 - Inbound message 0 mode register */ + u32 isr; /* 0xD_3064 - Inbound message 0 status register */ + u32 pad5; + u32 ifqdpar; /* 0xD_306C - Inbound message 0 frame queue dequeue + pointer address register*/ + u32 pad6; + u32 ifqepar; /* 0xD_3074 - Inbound message 0 frame queue enqueue + pointer address register */ + u32 pad7[226]; + u32 odmr; /* 0xD_3400 - Outbound doorbell mode register */ + u32 odsr; /* 0xD_3404 - Outbound doorbell status register */ + u32 res0[4]; + u32 oddpr; /* 0xD_3418 - Outbound doorbell destination port + register */ + u32 oddatr; /* 0xD_341c - Outbound doorbell destination attributes + register */ + u32 res1[3]; + u32 odretcr; /* 0xD_342C - Outbound doorbell retry error threshold + configuration register */ + u32 res2[12]; + u32 dmr; /* 0xD_3460 - Inbound doorbell mode register */ + u32 dsr; /* 0xD_3464 - Inbound doorbell status register */ + u32 pad8; + u32 dqdpar; /* 0xD_346C - Inbound doorbell queue dequeue Pointer + address register */ + u32 pad9; + u32 dqepar; /* 0xD_3474 - Inbound doorbell Queue enqueue pointer + address register */ + u32 pad10[26]; + u32 pwmr; /* 0xD_34E0 - Inbound port-write mode register */ + u32 pwsr; /* 0xD_34E4 - Inbound port-write status register */ + u32 epwqbar; /* 0xD_34E8 - Extended Port-Write Queue Base Address + register */ + u32 pwqbar; /* 0xD_34EC - Inbound port-write queue base address + register */ +}; + +struct rio_tx_desc { + u32 res1; + u32 saddr; + u32 dport; + u32 dattr; + u32 res2; + u32 res3; + u32 dwcnt; + u32 res4; +}; + +struct rio_dbell_ring { + void *virt; + dma_addr_t phys; +}; + +struct rio_msg_tx_ring { + void *virt; + dma_addr_t phys; + void *virt_buffer[RIO_MAX_TX_RING_SIZE]; + dma_addr_t phys_buffer[RIO_MAX_TX_RING_SIZE]; + int tx_slot; + int size; + void *dev_id; +}; + +struct rio_msg_rx_ring { + void *virt; + dma_addr_t phys; + void *virt_buffer[RIO_MAX_RX_RING_SIZE]; + int rx_slot; + int size; + void *dev_id; +}; + +struct rio_port_write_msg { + void *virt; + dma_addr_t phys; + u32 msg_count; + u32 err_count; + u32 discard_count; +}; + +struct rio_priv { + struct device *dev; + void __iomem *regs_win; + struct rio_atmu_regs __iomem *atmu_regs; + struct rio_atmu_regs __iomem *maint_atmu_regs; + struct rio_atmu_regs __iomem *dbell_atmu_regs; + void __iomem *dbell_win; + void __iomem *maint_win; + struct rio_msg_regs __iomem *msg_regs; + struct rio_dbell_ring dbell_ring; + struct rio_msg_tx_ring msg_tx_ring; + struct rio_msg_rx_ring msg_rx_ring; + struct rio_port_write_msg port_write_msg; + int bellirq; + int txirq; + int rxirq; + int pwirq; + struct work_struct pw_work; + struct kfifo pw_fifo; + spinlock_t pw_fifo_lock; +}; + +#define __fsl_read_rio_config(x, addr, err, op) \ + __asm__ __volatile__( \ + "1: "op" %1,0(%2)\n" \ + " eieio\n" \ + "2:\n" \ + ".section .fixup,\"ax\"\n" \ + "3: li %1,-1\n" \ + " li %0,%3\n" \ + " b 2b\n" \ + ".section __ex_table,\"a\"\n" \ + " .align 2\n" \ + " .long 1b,3b\n" \ + ".text" \ + : "=r" (err), "=r" (x) \ + : "b" (addr), "i" (-EFAULT), "0" (err)) + +static void __iomem *rio_regs_win; + +#ifdef CONFIG_E500 +int fsl_rio_mcheck_exception(struct pt_regs *regs) +{ + const struct exception_table_entry *entry; + unsigned long reason; + + if (!rio_regs_win) + return 0; + + reason = in_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR)); + if (reason & (RIO_LTLEDCSR_IER | RIO_LTLEDCSR_PRT)) { + /* Check if we are prepared to handle this fault */ + entry = search_exception_tables(regs->nip); + if (entry) { + pr_debug("RIO: %s - MC Exception handled\n", + __func__); + out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), + 0); + regs->msr |= MSR_RI; + regs->nip = entry->fixup; + return 1; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(fsl_rio_mcheck_exception); +#endif + +/** + * fsl_rio_doorbell_send - Send a MPC85xx doorbell message + * @mport: RapidIO master port info + * @index: ID of RapidIO interface + * @destid: Destination ID of target device + * @data: 16-bit info field of RapidIO doorbell message + * + * Sends a MPC85xx doorbell message. Returns %0 on success or + * %-EINVAL on failure. + */ +static int fsl_rio_doorbell_send(struct rio_mport *mport, + int index, u16 destid, u16 data) +{ + struct rio_priv *priv = mport->priv; + pr_debug("fsl_doorbell_send: index %d destid %4.4x data %4.4x\n", + index, destid, data); + switch (mport->phy_type) { + case RIO_PHY_PARALLEL: + out_be32(&priv->dbell_atmu_regs->rowtar, destid << 22); + out_be16(priv->dbell_win, data); + break; + case RIO_PHY_SERIAL: + /* In the serial version silicons, such as MPC8548, MPC8641, + * below operations is must be. + */ + out_be32(&priv->msg_regs->odmr, 0x00000000); + out_be32(&priv->msg_regs->odretcr, 0x00000004); + out_be32(&priv->msg_regs->oddpr, destid << 16); + out_be32(&priv->msg_regs->oddatr, data); + out_be32(&priv->msg_regs->odmr, 0x00000001); + break; + } + + return 0; +} + +/** + * fsl_local_config_read - Generate a MPC85xx local config space read + * @mport: RapidIO master port info + * @index: ID of RapdiIO interface + * @offset: Offset into configuration space + * @len: Length (in bytes) of the maintenance transaction + * @data: Value to be read into + * + * Generates a MPC85xx local configuration space read. Returns %0 on + * success or %-EINVAL on failure. + */ +static int fsl_local_config_read(struct rio_mport *mport, + int index, u32 offset, int len, u32 *data) +{ + struct rio_priv *priv = mport->priv; + pr_debug("fsl_local_config_read: index %d offset %8.8x\n", index, + offset); + *data = in_be32(priv->regs_win + offset); + + return 0; +} + +/** + * fsl_local_config_write - Generate a MPC85xx local config space write + * @mport: RapidIO master port info + * @index: ID of RapdiIO interface + * @offset: Offset into configuration space + * @len: Length (in bytes) of the maintenance transaction + * @data: Value to be written + * + * Generates a MPC85xx local configuration space write. Returns %0 on + * success or %-EINVAL on failure. + */ +static int fsl_local_config_write(struct rio_mport *mport, + int index, u32 offset, int len, u32 data) +{ + struct rio_priv *priv = mport->priv; + pr_debug + ("fsl_local_config_write: index %d offset %8.8x data %8.8x\n", + index, offset, data); + out_be32(priv->regs_win + offset, data); + + return 0; +} + +/** + * fsl_rio_config_read - Generate a MPC85xx read maintenance transaction + * @mport: RapidIO master port info + * @index: ID of RapdiIO interface + * @destid: Destination ID of transaction + * @hopcount: Number of hops to target device + * @offset: Offset into configuration space + * @len: Length (in bytes) of the maintenance transaction + * @val: Location to be read into + * + * Generates a MPC85xx read maintenance transaction. Returns %0 on + * success or %-EINVAL on failure. + */ +static int +fsl_rio_config_read(struct rio_mport *mport, int index, u16 destid, + u8 hopcount, u32 offset, int len, u32 *val) +{ + struct rio_priv *priv = mport->priv; + u8 *data; + u32 rval, err = 0; + + pr_debug + ("fsl_rio_config_read: index %d destid %d hopcount %d offset %8.8x len %d\n", + index, destid, hopcount, offset, len); + + /* 16MB maintenance window possible */ + /* allow only aligned access to maintenance registers */ + if (offset > (0x1000000 - len) || !IS_ALIGNED(offset, len)) + return -EINVAL; + + out_be32(&priv->maint_atmu_regs->rowtar, + (destid << 22) | (hopcount << 12) | (offset >> 12)); + out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10)); + + data = (u8 *) priv->maint_win + (offset & (RIO_MAINT_WIN_SIZE - 1)); + switch (len) { + case 1: + __fsl_read_rio_config(rval, data, err, "lbz"); + break; + case 2: + __fsl_read_rio_config(rval, data, err, "lhz"); + break; + case 4: + __fsl_read_rio_config(rval, data, err, "lwz"); + break; + default: + return -EINVAL; + } + + if (err) { + pr_debug("RIO: cfg_read error %d for %x:%x:%x\n", + err, destid, hopcount, offset); + } + + *val = rval; + + return err; +} + +/** + * fsl_rio_config_write - Generate a MPC85xx write maintenance transaction + * @mport: RapidIO master port info + * @index: ID of RapdiIO interface + * @destid: Destination ID of transaction + * @hopcount: Number of hops to target device + * @offset: Offset into configuration space + * @len: Length (in bytes) of the maintenance transaction + * @val: Value to be written + * + * Generates an MPC85xx write maintenance transaction. Returns %0 on + * success or %-EINVAL on failure. + */ +static int +fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid, + u8 hopcount, u32 offset, int len, u32 val) +{ + struct rio_priv *priv = mport->priv; + u8 *data; + pr_debug + ("fsl_rio_config_write: index %d destid %d hopcount %d offset %8.8x len %d val %8.8x\n", + index, destid, hopcount, offset, len, val); + + /* 16MB maintenance windows possible */ + /* allow only aligned access to maintenance registers */ + if (offset > (0x1000000 - len) || !IS_ALIGNED(offset, len)) + return -EINVAL; + + out_be32(&priv->maint_atmu_regs->rowtar, + (destid << 22) | (hopcount << 12) | (offset >> 12)); + out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10)); + + data = (u8 *) priv->maint_win + (offset & (RIO_MAINT_WIN_SIZE - 1)); + switch (len) { + case 1: + out_8((u8 *) data, val); + break; + case 2: + out_be16((u16 *) data, val); + break; + case 4: + out_be32((u32 *) data, val); + break; + default: + return -EINVAL; + } + + return 0; +} + +/** + * fsl_add_outb_message - Add message to the MPC85xx outbound message queue + * @mport: Master port with outbound message queue + * @rdev: Target of outbound message + * @mbox: Outbound mailbox + * @buffer: Message to add to outbound queue + * @len: Length of message + * + * Adds the @buffer message to the MPC85xx outbound message queue. Returns + * %0 on success or %-EINVAL on failure. + */ +static int +fsl_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox, + void *buffer, size_t len) +{ + struct rio_priv *priv = mport->priv; + u32 omr; + struct rio_tx_desc *desc = (struct rio_tx_desc *)priv->msg_tx_ring.virt + + priv->msg_tx_ring.tx_slot; + int ret = 0; + + pr_debug("RIO: fsl_add_outb_message(): destid %4.4x mbox %d buffer " \ + "%8.8x len %8.8x\n", rdev->destid, mbox, (int)buffer, len); + + if ((len < 8) || (len > RIO_MAX_MSG_SIZE)) { + ret = -EINVAL; + goto out; + } + + /* Copy and clear rest of buffer */ + memcpy(priv->msg_tx_ring.virt_buffer[priv->msg_tx_ring.tx_slot], buffer, + len); + if (len < (RIO_MAX_MSG_SIZE - 4)) + memset(priv->msg_tx_ring.virt_buffer[priv->msg_tx_ring.tx_slot] + + len, 0, RIO_MAX_MSG_SIZE - len); + + switch (mport->phy_type) { + case RIO_PHY_PARALLEL: + /* Set mbox field for message */ + desc->dport = mbox & 0x3; + + /* Enable EOMI interrupt, set priority, and set destid */ + desc->dattr = 0x28000000 | (rdev->destid << 2); + break; + case RIO_PHY_SERIAL: + /* Set mbox field for message, and set destid */ + desc->dport = (rdev->destid << 16) | (mbox & 0x3); + + /* Enable EOMI interrupt and priority */ + desc->dattr = 0x28000000; + break; + } + + /* Set transfer size aligned to next power of 2 (in double words) */ + desc->dwcnt = is_power_of_2(len) ? len : 1 << get_bitmask_order(len); + + /* Set snooping and source buffer address */ + desc->saddr = 0x00000004 + | priv->msg_tx_ring.phys_buffer[priv->msg_tx_ring.tx_slot]; + + /* Increment enqueue pointer */ + omr = in_be32(&priv->msg_regs->omr); + out_be32(&priv->msg_regs->omr, omr | RIO_MSG_OMR_MUI); + + /* Go to next descriptor */ + if (++priv->msg_tx_ring.tx_slot == priv->msg_tx_ring.size) + priv->msg_tx_ring.tx_slot = 0; + + out: + return ret; +} + +/** + * fsl_rio_tx_handler - MPC85xx outbound message interrupt handler + * @irq: Linux interrupt number + * @dev_instance: Pointer to interrupt-specific data + * + * Handles outbound message interrupts. Executes a register outbound + * mailbox event handler and acks the interrupt occurrence. + */ +static irqreturn_t +fsl_rio_tx_handler(int irq, void *dev_instance) +{ + int osr; + struct rio_mport *port = (struct rio_mport *)dev_instance; + struct rio_priv *priv = port->priv; + + osr = in_be32(&priv->msg_regs->osr); + + if (osr & RIO_MSG_OSR_TE) { + pr_info("RIO: outbound message transmission error\n"); + out_be32(&priv->msg_regs->osr, RIO_MSG_OSR_TE); + goto out; + } + + if (osr & RIO_MSG_OSR_QOI) { + pr_info("RIO: outbound message queue overflow\n"); + out_be32(&priv->msg_regs->osr, RIO_MSG_OSR_QOI); + goto out; + } + + if (osr & RIO_MSG_OSR_EOMI) { + u32 dqp = in_be32(&priv->msg_regs->odqdpar); + int slot = (dqp - priv->msg_tx_ring.phys) >> 5; + port->outb_msg[0].mcback(port, priv->msg_tx_ring.dev_id, -1, + slot); + + /* Ack the end-of-message interrupt */ + out_be32(&priv->msg_regs->osr, RIO_MSG_OSR_EOMI); + } + + out: + return IRQ_HANDLED; +} + +/** + * fsl_open_outb_mbox - Initialize MPC85xx outbound mailbox + * @mport: Master port implementing the outbound message unit + * @dev_id: Device specific pointer to pass on event + * @mbox: Mailbox to open + * @entries: Number of entries in the outbound mailbox ring + * + * Initializes buffer ring, request the outbound message interrupt, + * and enables the outbound message unit. Returns %0 on success and + * %-EINVAL or %-ENOMEM on failure. + */ +static int +fsl_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries) +{ + int i, j, rc = 0; + struct rio_priv *priv = mport->priv; + + if ((entries < RIO_MIN_TX_RING_SIZE) || + (entries > RIO_MAX_TX_RING_SIZE) || (!is_power_of_2(entries))) { + rc = -EINVAL; + goto out; + } + + /* Initialize shadow copy ring */ + priv->msg_tx_ring.dev_id = dev_id; + priv->msg_tx_ring.size = entries; + + for (i = 0; i < priv->msg_tx_ring.size; i++) { + priv->msg_tx_ring.virt_buffer[i] = + dma_alloc_coherent(priv->dev, RIO_MSG_BUFFER_SIZE, + &priv->msg_tx_ring.phys_buffer[i], GFP_KERNEL); + if (!priv->msg_tx_ring.virt_buffer[i]) { + rc = -ENOMEM; + for (j = 0; j < priv->msg_tx_ring.size; j++) + if (priv->msg_tx_ring.virt_buffer[j]) + dma_free_coherent(priv->dev, + RIO_MSG_BUFFER_SIZE, + priv->msg_tx_ring. + virt_buffer[j], + priv->msg_tx_ring. + phys_buffer[j]); + goto out; + } + } + + /* Initialize outbound message descriptor ring */ + priv->msg_tx_ring.virt = dma_alloc_coherent(priv->dev, + priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE, + &priv->msg_tx_ring.phys, GFP_KERNEL); + if (!priv->msg_tx_ring.virt) { + rc = -ENOMEM; + goto out_dma; + } + memset(priv->msg_tx_ring.virt, 0, + priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE); + priv->msg_tx_ring.tx_slot = 0; + + /* Point dequeue/enqueue pointers at first entry in ring */ + out_be32(&priv->msg_regs->odqdpar, priv->msg_tx_ring.phys); + out_be32(&priv->msg_regs->odqepar, priv->msg_tx_ring.phys); + + /* Configure for snooping */ + out_be32(&priv->msg_regs->osar, 0x00000004); + + /* Clear interrupt status */ + out_be32(&priv->msg_regs->osr, 0x000000b3); + + /* Hook up outbound message handler */ + rc = request_irq(IRQ_RIO_TX(mport), fsl_rio_tx_handler, 0, + "msg_tx", (void *)mport); + if (rc < 0) + goto out_irq; + + /* + * Configure outbound message unit + * Snooping + * Interrupts (all enabled, except QEIE) + * Chaining mode + * Disable + */ + out_be32(&priv->msg_regs->omr, 0x00100220); + + /* Set number of entries */ + out_be32(&priv->msg_regs->omr, + in_be32(&priv->msg_regs->omr) | + ((get_bitmask_order(entries) - 2) << 12)); + + /* Now enable the unit */ + out_be32(&priv->msg_regs->omr, in_be32(&priv->msg_regs->omr) | 0x1); + + out: + return rc; + + out_irq: + dma_free_coherent(priv->dev, + priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE, + priv->msg_tx_ring.virt, priv->msg_tx_ring.phys); + + out_dma: + for (i = 0; i < priv->msg_tx_ring.size; i++) + dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE, + priv->msg_tx_ring.virt_buffer[i], + priv->msg_tx_ring.phys_buffer[i]); + + return rc; +} + +/** + * fsl_close_outb_mbox - Shut down MPC85xx outbound mailbox + * @mport: Master port implementing the outbound message unit + * @mbox: Mailbox to close + * + * Disables the outbound message unit, free all buffers, and + * frees the outbound message interrupt. + */ +static void fsl_close_outb_mbox(struct rio_mport *mport, int mbox) +{ + struct rio_priv *priv = mport->priv; + /* Disable inbound message unit */ + out_be32(&priv->msg_regs->omr, 0); + + /* Free ring */ + dma_free_coherent(priv->dev, + priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE, + priv->msg_tx_ring.virt, priv->msg_tx_ring.phys); + + /* Free interrupt */ + free_irq(IRQ_RIO_TX(mport), (void *)mport); +} + +/** + * fsl_rio_rx_handler - MPC85xx inbound message interrupt handler + * @irq: Linux interrupt number + * @dev_instance: Pointer to interrupt-specific data + * + * Handles inbound message interrupts. Executes a registered inbound + * mailbox event handler and acks the interrupt occurrence. + */ +static irqreturn_t +fsl_rio_rx_handler(int irq, void *dev_instance) +{ + int isr; + struct rio_mport *port = (struct rio_mport *)dev_instance; + struct rio_priv *priv = port->priv; + + isr = in_be32(&priv->msg_regs->isr); + + if (isr & RIO_MSG_ISR_TE) { + pr_info("RIO: inbound message reception error\n"); + out_be32((void *)&priv->msg_regs->isr, RIO_MSG_ISR_TE); + goto out; + } + + /* XXX Need to check/dispatch until queue empty */ + if (isr & RIO_MSG_ISR_DIQI) { + /* + * We implement *only* mailbox 0, but can receive messages + * for any mailbox/letter to that mailbox destination. So, + * make the callback with an unknown/invalid mailbox number + * argument. + */ + port->inb_msg[0].mcback(port, priv->msg_rx_ring.dev_id, -1, -1); + + /* Ack the queueing interrupt */ + out_be32(&priv->msg_regs->isr, RIO_MSG_ISR_DIQI); + } + + out: + return IRQ_HANDLED; +} + +/** + * fsl_open_inb_mbox - Initialize MPC85xx inbound mailbox + * @mport: Master port implementing the inbound message unit + * @dev_id: Device specific pointer to pass on event + * @mbox: Mailbox to open + * @entries: Number of entries in the inbound mailbox ring + * + * Initializes buffer ring, request the inbound message interrupt, + * and enables the inbound message unit. Returns %0 on success + * and %-EINVAL or %-ENOMEM on failure. + */ +static int +fsl_open_inb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries) +{ + int i, rc = 0; + struct rio_priv *priv = mport->priv; + + if ((entries < RIO_MIN_RX_RING_SIZE) || + (entries > RIO_MAX_RX_RING_SIZE) || (!is_power_of_2(entries))) { + rc = -EINVAL; + goto out; + } + + /* Initialize client buffer ring */ + priv->msg_rx_ring.dev_id = dev_id; + priv->msg_rx_ring.size = entries; + priv->msg_rx_ring.rx_slot = 0; + for (i = 0; i < priv->msg_rx_ring.size; i++) + priv->msg_rx_ring.virt_buffer[i] = NULL; + + /* Initialize inbound message ring */ + priv->msg_rx_ring.virt = dma_alloc_coherent(priv->dev, + priv->msg_rx_ring.size * RIO_MAX_MSG_SIZE, + &priv->msg_rx_ring.phys, GFP_KERNEL); + if (!priv->msg_rx_ring.virt) { + rc = -ENOMEM; + goto out; + } + + /* Point dequeue/enqueue pointers at first entry in ring */ + out_be32(&priv->msg_regs->ifqdpar, (u32) priv->msg_rx_ring.phys); + out_be32(&priv->msg_regs->ifqepar, (u32) priv->msg_rx_ring.phys); + + /* Clear interrupt status */ + out_be32(&priv->msg_regs->isr, 0x00000091); + + /* Hook up inbound message handler */ + rc = request_irq(IRQ_RIO_RX(mport), fsl_rio_rx_handler, 0, + "msg_rx", (void *)mport); + if (rc < 0) { + dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE, + priv->msg_tx_ring.virt_buffer[i], + priv->msg_tx_ring.phys_buffer[i]); + goto out; + } + + /* + * Configure inbound message unit: + * Snooping + * 4KB max message size + * Unmask all interrupt sources + * Disable + */ + out_be32(&priv->msg_regs->imr, 0x001b0060); + + /* Set number of queue entries */ + setbits32(&priv->msg_regs->imr, (get_bitmask_order(entries) - 2) << 12); + + /* Now enable the unit */ + setbits32(&priv->msg_regs->imr, 0x1); + + out: + return rc; +} + +/** + * fsl_close_inb_mbox - Shut down MPC85xx inbound mailbox + * @mport: Master port implementing the inbound message unit + * @mbox: Mailbox to close + * + * Disables the inbound message unit, free all buffers, and + * frees the inbound message interrupt. + */ +static void fsl_close_inb_mbox(struct rio_mport *mport, int mbox) +{ + struct rio_priv *priv = mport->priv; + /* Disable inbound message unit */ + out_be32(&priv->msg_regs->imr, 0); + + /* Free ring */ + dma_free_coherent(priv->dev, priv->msg_rx_ring.size * RIO_MAX_MSG_SIZE, + priv->msg_rx_ring.virt, priv->msg_rx_ring.phys); + + /* Free interrupt */ + free_irq(IRQ_RIO_RX(mport), (void *)mport); +} + +/** + * fsl_add_inb_buffer - Add buffer to the MPC85xx inbound message queue + * @mport: Master port implementing the inbound message unit + * @mbox: Inbound mailbox number + * @buf: Buffer to add to inbound queue + * + * Adds the @buf buffer to the MPC85xx inbound message queue. Returns + * %0 on success or %-EINVAL on failure. + */ +static int fsl_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf) +{ + int rc = 0; + struct rio_priv *priv = mport->priv; + + pr_debug("RIO: fsl_add_inb_buffer(), msg_rx_ring.rx_slot %d\n", + priv->msg_rx_ring.rx_slot); + + if (priv->msg_rx_ring.virt_buffer[priv->msg_rx_ring.rx_slot]) { + printk(KERN_ERR + "RIO: error adding inbound buffer %d, buffer exists\n", + priv->msg_rx_ring.rx_slot); + rc = -EINVAL; + goto out; + } + + priv->msg_rx_ring.virt_buffer[priv->msg_rx_ring.rx_slot] = buf; + if (++priv->msg_rx_ring.rx_slot == priv->msg_rx_ring.size) + priv->msg_rx_ring.rx_slot = 0; + + out: + return rc; +} + +/** + * fsl_get_inb_message - Fetch inbound message from the MPC85xx message unit + * @mport: Master port implementing the inbound message unit + * @mbox: Inbound mailbox number + * + * Gets the next available inbound message from the inbound message queue. + * A pointer to the message is returned on success or NULL on failure. + */ +static void *fsl_get_inb_message(struct rio_mport *mport, int mbox) +{ + struct rio_priv *priv = mport->priv; + u32 phys_buf, virt_buf; + void *buf = NULL; + int buf_idx; + + phys_buf = in_be32(&priv->msg_regs->ifqdpar); + + /* If no more messages, then bail out */ + if (phys_buf == in_be32(&priv->msg_regs->ifqepar)) + goto out2; + + virt_buf = (u32) priv->msg_rx_ring.virt + (phys_buf + - priv->msg_rx_ring.phys); + buf_idx = (phys_buf - priv->msg_rx_ring.phys) / RIO_MAX_MSG_SIZE; + buf = priv->msg_rx_ring.virt_buffer[buf_idx]; + + if (!buf) { + printk(KERN_ERR + "RIO: inbound message copy failed, no buffers\n"); + goto out1; + } + + /* Copy max message size, caller is expected to allocate that big */ + memcpy(buf, (void *)virt_buf, RIO_MAX_MSG_SIZE); + + /* Clear the available buffer */ + priv->msg_rx_ring.virt_buffer[buf_idx] = NULL; + + out1: + setbits32(&priv->msg_regs->imr, RIO_MSG_IMR_MI); + + out2: + return buf; +} + +/** + * fsl_rio_dbell_handler - MPC85xx doorbell interrupt handler + * @irq: Linux interrupt number + * @dev_instance: Pointer to interrupt-specific data + * + * Handles doorbell interrupts. Parses a list of registered + * doorbell event handlers and executes a matching event handler. + */ +static irqreturn_t +fsl_rio_dbell_handler(int irq, void *dev_instance) +{ + int dsr; + struct rio_mport *port = (struct rio_mport *)dev_instance; + struct rio_priv *priv = port->priv; + + dsr = in_be32(&priv->msg_regs->dsr); + + if (dsr & DOORBELL_DSR_TE) { + pr_info("RIO: doorbell reception error\n"); + out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_TE); + goto out; + } + + if (dsr & DOORBELL_DSR_QFI) { + pr_info("RIO: doorbell queue full\n"); + out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_QFI); + } + + /* XXX Need to check/dispatch until queue empty */ + if (dsr & DOORBELL_DSR_DIQI) { + u32 dmsg = + (u32) priv->dbell_ring.virt + + (in_be32(&priv->msg_regs->dqdpar) & 0xfff); + struct rio_dbell *dbell; + int found = 0; + + pr_debug + ("RIO: processing doorbell, sid %2.2x tid %2.2x info %4.4x\n", + DBELL_SID(dmsg), DBELL_TID(dmsg), DBELL_INF(dmsg)); + + list_for_each_entry(dbell, &port->dbells, node) { + if ((dbell->res->start <= DBELL_INF(dmsg)) && + (dbell->res->end >= DBELL_INF(dmsg))) { + found = 1; + break; + } + } + if (found) { + dbell->dinb(port, dbell->dev_id, DBELL_SID(dmsg), DBELL_TID(dmsg), + DBELL_INF(dmsg)); + } else { + pr_debug + ("RIO: spurious doorbell, sid %2.2x tid %2.2x info %4.4x\n", + DBELL_SID(dmsg), DBELL_TID(dmsg), DBELL_INF(dmsg)); + } + setbits32(&priv->msg_regs->dmr, DOORBELL_DMR_DI); + out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_DIQI); + } + + out: + return IRQ_HANDLED; +} + +/** + * fsl_rio_doorbell_init - MPC85xx doorbell interface init + * @mport: Master port implementing the inbound doorbell unit + * + * Initializes doorbell unit hardware and inbound DMA buffer + * ring. Called from fsl_rio_setup(). Returns %0 on success + * or %-ENOMEM on failure. + */ +static int fsl_rio_doorbell_init(struct rio_mport *mport) +{ + struct rio_priv *priv = mport->priv; + int rc = 0; + + /* Map outbound doorbell window immediately after maintenance window */ + priv->dbell_win = ioremap(mport->iores.start + RIO_MAINT_WIN_SIZE, + RIO_DBELL_WIN_SIZE); + if (!priv->dbell_win) { + printk(KERN_ERR + "RIO: unable to map outbound doorbell window\n"); + rc = -ENOMEM; + goto out; + } + + /* Initialize inbound doorbells */ + priv->dbell_ring.virt = dma_alloc_coherent(priv->dev, 512 * + DOORBELL_MESSAGE_SIZE, &priv->dbell_ring.phys, GFP_KERNEL); + if (!priv->dbell_ring.virt) { + printk(KERN_ERR "RIO: unable allocate inbound doorbell ring\n"); + rc = -ENOMEM; + iounmap(priv->dbell_win); + goto out; + } + + /* Point dequeue/enqueue pointers at first entry in ring */ + out_be32(&priv->msg_regs->dqdpar, (u32) priv->dbell_ring.phys); + out_be32(&priv->msg_regs->dqepar, (u32) priv->dbell_ring.phys); + + /* Clear interrupt status */ + out_be32(&priv->msg_regs->dsr, 0x00000091); + + /* Hook up doorbell handler */ + rc = request_irq(IRQ_RIO_BELL(mport), fsl_rio_dbell_handler, 0, + "dbell_rx", (void *)mport); + if (rc < 0) { + iounmap(priv->dbell_win); + dma_free_coherent(priv->dev, 512 * DOORBELL_MESSAGE_SIZE, + priv->dbell_ring.virt, priv->dbell_ring.phys); + printk(KERN_ERR + "MPC85xx RIO: unable to request inbound doorbell irq"); + goto out; + } + + /* Configure doorbells for snooping, 512 entries, and enable */ + out_be32(&priv->msg_regs->dmr, 0x00108161); + + out: + return rc; +} + +static void port_error_handler(struct rio_mport *port, int offset) +{ + /*XXX: Error recovery is not implemented, we just clear errors */ + out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0); + + if (offset == 0) { + out_be32((u32 *)(rio_regs_win + RIO_PORT1_EDCSR), 0); + out_be32((u32 *)(rio_regs_win + RIO_PORT1_IECSR), IECSR_CLEAR); + out_be32((u32 *)(rio_regs_win + RIO_ESCSR), ESCSR_CLEAR); + } else { + out_be32((u32 *)(rio_regs_win + RIO_PORT2_EDCSR), 0); + out_be32((u32 *)(rio_regs_win + RIO_PORT2_IECSR), IECSR_CLEAR); + out_be32((u32 *)(rio_regs_win + RIO_PORT2_ESCSR), ESCSR_CLEAR); + } +} + +static void msg_unit_error_handler(struct rio_mport *port) +{ + struct rio_priv *priv = port->priv; + + /*XXX: Error recovery is not implemented, we just clear errors */ + out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0); + + out_be32((u32 *)(rio_regs_win + RIO_IM0SR), IMSR_CLEAR); + out_be32((u32 *)(rio_regs_win + RIO_IM1SR), IMSR_CLEAR); + out_be32((u32 *)(rio_regs_win + RIO_OM0SR), OMSR_CLEAR); + out_be32((u32 *)(rio_regs_win + RIO_OM1SR), OMSR_CLEAR); + + out_be32(&priv->msg_regs->odsr, ODSR_CLEAR); + out_be32(&priv->msg_regs->dsr, IDSR_CLEAR); + + out_be32(&priv->msg_regs->pwsr, IPWSR_CLEAR); +} + +/** + * fsl_rio_port_write_handler - MPC85xx port write interrupt handler + * @irq: Linux interrupt number + * @dev_instance: Pointer to interrupt-specific data + * + * Handles port write interrupts. Parses a list of registered + * port write event handlers and executes a matching event handler. + */ +static irqreturn_t +fsl_rio_port_write_handler(int irq, void *dev_instance) +{ + u32 ipwmr, ipwsr; + struct rio_mport *port = (struct rio_mport *)dev_instance; + struct rio_priv *priv = port->priv; + u32 epwisr, tmp; + + epwisr = in_be32(priv->regs_win + RIO_EPWISR); + if (!(epwisr & RIO_EPWISR_PW)) + goto pw_done; + + ipwmr = in_be32(&priv->msg_regs->pwmr); + ipwsr = in_be32(&priv->msg_regs->pwsr); + +#ifdef DEBUG_PW + pr_debug("PW Int->IPWMR: 0x%08x IPWSR: 0x%08x (", ipwmr, ipwsr); + if (ipwsr & RIO_IPWSR_QF) + pr_debug(" QF"); + if (ipwsr & RIO_IPWSR_TE) + pr_debug(" TE"); + if (ipwsr & RIO_IPWSR_QFI) + pr_debug(" QFI"); + if (ipwsr & RIO_IPWSR_PWD) + pr_debug(" PWD"); + if (ipwsr & RIO_IPWSR_PWB) + pr_debug(" PWB"); + pr_debug(" )\n"); +#endif + /* Schedule deferred processing if PW was received */ + if (ipwsr & RIO_IPWSR_QFI) { + /* Save PW message (if there is room in FIFO), + * otherwise discard it. + */ + if (kfifo_avail(&priv->pw_fifo) >= RIO_PW_MSG_SIZE) { + priv->port_write_msg.msg_count++; + kfifo_in(&priv->pw_fifo, priv->port_write_msg.virt, + RIO_PW_MSG_SIZE); + } else { + priv->port_write_msg.discard_count++; + pr_debug("RIO: ISR Discarded Port-Write Msg(s) (%d)\n", + priv->port_write_msg.discard_count); + } + /* Clear interrupt and issue Clear Queue command. This allows + * another port-write to be received. + */ + out_be32(&priv->msg_regs->pwsr, RIO_IPWSR_QFI); + out_be32(&priv->msg_regs->pwmr, ipwmr | RIO_IPWMR_CQ); + + schedule_work(&priv->pw_work); + } + + if ((ipwmr & RIO_IPWMR_EIE) && (ipwsr & RIO_IPWSR_TE)) { + priv->port_write_msg.err_count++; + pr_debug("RIO: Port-Write Transaction Err (%d)\n", + priv->port_write_msg.err_count); + /* Clear Transaction Error: port-write controller should be + * disabled when clearing this error + */ + out_be32(&priv->msg_regs->pwmr, ipwmr & ~RIO_IPWMR_PWE); + out_be32(&priv->msg_regs->pwsr, RIO_IPWSR_TE); + out_be32(&priv->msg_regs->pwmr, ipwmr); + } + + if (ipwsr & RIO_IPWSR_PWD) { + priv->port_write_msg.discard_count++; + pr_debug("RIO: Port Discarded Port-Write Msg(s) (%d)\n", + priv->port_write_msg.discard_count); + out_be32(&priv->msg_regs->pwsr, RIO_IPWSR_PWD); + } + +pw_done: + if (epwisr & RIO_EPWISR_PINT1) { + tmp = in_be32(priv->regs_win + RIO_LTLEDCSR); + pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp); + port_error_handler(port, 0); + } + + if (epwisr & RIO_EPWISR_PINT2) { + tmp = in_be32(priv->regs_win + RIO_LTLEDCSR); + pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp); + port_error_handler(port, 1); + } + + if (epwisr & RIO_EPWISR_MU) { + tmp = in_be32(priv->regs_win + RIO_LTLEDCSR); + pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp); + msg_unit_error_handler(port); + } + + return IRQ_HANDLED; +} + +static void fsl_pw_dpc(struct work_struct *work) +{ + struct rio_priv *priv = container_of(work, struct rio_priv, pw_work); + unsigned long flags; + u32 msg_buffer[RIO_PW_MSG_SIZE/sizeof(u32)]; + + /* + * Process port-write messages + */ + spin_lock_irqsave(&priv->pw_fifo_lock, flags); + while (kfifo_out(&priv->pw_fifo, (unsigned char *)msg_buffer, + RIO_PW_MSG_SIZE)) { + /* Process one message */ + spin_unlock_irqrestore(&priv->pw_fifo_lock, flags); +#ifdef DEBUG_PW + { + u32 i; + pr_debug("%s : Port-Write Message:", __func__); + for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32); i++) { + if ((i%4) == 0) + pr_debug("\n0x%02x: 0x%08x", i*4, + msg_buffer[i]); + else + pr_debug(" 0x%08x", msg_buffer[i]); + } + pr_debug("\n"); + } +#endif + /* Pass the port-write message to RIO core for processing */ + rio_inb_pwrite_handler((union rio_pw_msg *)msg_buffer); + spin_lock_irqsave(&priv->pw_fifo_lock, flags); + } + spin_unlock_irqrestore(&priv->pw_fifo_lock, flags); +} + +/** + * fsl_rio_pw_enable - enable/disable port-write interface init + * @mport: Master port implementing the port write unit + * @enable: 1=enable; 0=disable port-write message handling + */ +static int fsl_rio_pw_enable(struct rio_mport *mport, int enable) +{ + struct rio_priv *priv = mport->priv; + u32 rval; + + rval = in_be32(&priv->msg_regs->pwmr); + + if (enable) + rval |= RIO_IPWMR_PWE; + else + rval &= ~RIO_IPWMR_PWE; + + out_be32(&priv->msg_regs->pwmr, rval); + + return 0; +} + +/** + * fsl_rio_port_write_init - MPC85xx port write interface init + * @mport: Master port implementing the port write unit + * + * Initializes port write unit hardware and DMA buffer + * ring. Called from fsl_rio_setup(). Returns %0 on success + * or %-ENOMEM on failure. + */ +static int fsl_rio_port_write_init(struct rio_mport *mport) +{ + struct rio_priv *priv = mport->priv; + int rc = 0; + + /* Following configurations require a disabled port write controller */ + out_be32(&priv->msg_regs->pwmr, + in_be32(&priv->msg_regs->pwmr) & ~RIO_IPWMR_PWE); + + /* Initialize port write */ + priv->port_write_msg.virt = dma_alloc_coherent(priv->dev, + RIO_PW_MSG_SIZE, + &priv->port_write_msg.phys, GFP_KERNEL); + if (!priv->port_write_msg.virt) { + pr_err("RIO: unable allocate port write queue\n"); + return -ENOMEM; + } + + priv->port_write_msg.err_count = 0; + priv->port_write_msg.discard_count = 0; + + /* Point dequeue/enqueue pointers at first entry */ + out_be32(&priv->msg_regs->epwqbar, 0); + out_be32(&priv->msg_regs->pwqbar, (u32) priv->port_write_msg.phys); + + pr_debug("EIPWQBAR: 0x%08x IPWQBAR: 0x%08x\n", + in_be32(&priv->msg_regs->epwqbar), + in_be32(&priv->msg_regs->pwqbar)); + + /* Clear interrupt status IPWSR */ + out_be32(&priv->msg_regs->pwsr, + (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD)); + + /* Configure port write contoller for snooping enable all reporting, + clear queue full */ + out_be32(&priv->msg_regs->pwmr, + RIO_IPWMR_SEN | RIO_IPWMR_QFIE | RIO_IPWMR_EIE | RIO_IPWMR_CQ); + + + /* Hook up port-write handler */ + rc = request_irq(IRQ_RIO_PW(mport), fsl_rio_port_write_handler, + IRQF_SHARED, "port-write", (void *)mport); + if (rc < 0) { + pr_err("MPC85xx RIO: unable to request inbound doorbell irq"); + goto err_out; + } + /* Enable Error Interrupt */ + out_be32((u32 *)(rio_regs_win + RIO_LTLEECSR), LTLEECSR_ENABLE_ALL); + + INIT_WORK(&priv->pw_work, fsl_pw_dpc); + spin_lock_init(&priv->pw_fifo_lock); + if (kfifo_alloc(&priv->pw_fifo, RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) { + pr_err("FIFO allocation failed\n"); + rc = -ENOMEM; + goto err_out_irq; + } + + pr_debug("IPWMR: 0x%08x IPWSR: 0x%08x\n", + in_be32(&priv->msg_regs->pwmr), + in_be32(&priv->msg_regs->pwsr)); + + return rc; + +err_out_irq: + free_irq(IRQ_RIO_PW(mport), (void *)mport); +err_out: + dma_free_coherent(priv->dev, RIO_PW_MSG_SIZE, + priv->port_write_msg.virt, + priv->port_write_msg.phys); + return rc; +} + +static inline void fsl_rio_info(struct device *dev, u32 ccsr) +{ + const char *str; + if (ccsr & 1) { + /* Serial phy */ + switch (ccsr >> 30) { + case 0: + str = "1"; + break; + case 1: + str = "4"; + break; + default: + str = "Unknown"; + break; + } + dev_info(dev, "Hardware port width: %s\n", str); + + switch ((ccsr >> 27) & 7) { + case 0: + str = "Single-lane 0"; + break; + case 1: + str = "Single-lane 2"; + break; + case 2: + str = "Four-lane"; + break; + default: + str = "Unknown"; + break; + } + dev_info(dev, "Training connection status: %s\n", str); + } else { + /* Parallel phy */ + if (!(ccsr & 0x80000000)) + dev_info(dev, "Output port operating in 8-bit mode\n"); + if (!(ccsr & 0x08000000)) + dev_info(dev, "Input port operating in 8-bit mode\n"); + } +} + +/** + * fsl_rio_setup - Setup Freescale PowerPC RapidIO interface + * @dev: platform_device pointer + * + * Initializes MPC85xx RapidIO hardware interface, configures + * master port with system-specific info, and registers the + * master port with the RapidIO subsystem. + */ +int fsl_rio_setup(struct platform_device *dev) +{ + struct rio_ops *ops; + struct rio_mport *port; + struct rio_priv *priv; + int rc = 0; + const u32 *dt_range, *cell; + struct resource regs; + int rlen; + u32 ccsr; + u64 law_start, law_size; + int paw, aw, sw; + + if (!dev->dev.of_node) { + dev_err(&dev->dev, "Device OF-Node is NULL"); + return -EFAULT; + } + + rc = of_address_to_resource(dev->dev.of_node, 0, ®s); + if (rc) { + dev_err(&dev->dev, "Can't get %s property 'reg'\n", + dev->dev.of_node->full_name); + return -EFAULT; + } + dev_info(&dev->dev, "Of-device full name %s\n", dev->dev.of_node->full_name); + dev_info(&dev->dev, "Regs: %pR\n", ®s); + + dt_range = of_get_property(dev->dev.of_node, "ranges", &rlen); + if (!dt_range) { + dev_err(&dev->dev, "Can't get %s property 'ranges'\n", + dev->dev.of_node->full_name); + return -EFAULT; + } + + /* Get node address wide */ + cell = of_get_property(dev->dev.of_node, "#address-cells", NULL); + if (cell) + aw = *cell; + else + aw = of_n_addr_cells(dev->dev.of_node); + /* Get node size wide */ + cell = of_get_property(dev->dev.of_node, "#size-cells", NULL); + if (cell) + sw = *cell; + else + sw = of_n_size_cells(dev->dev.of_node); + /* Get parent address wide wide */ + paw = of_n_addr_cells(dev->dev.of_node); + + law_start = of_read_number(dt_range + aw, paw); + law_size = of_read_number(dt_range + aw + paw, sw); + + dev_info(&dev->dev, "LAW start 0x%016llx, size 0x%016llx.\n", + law_start, law_size); + + ops = kzalloc(sizeof(struct rio_ops), GFP_KERNEL); + if (!ops) { + rc = -ENOMEM; + goto err_ops; + } + ops->lcread = fsl_local_config_read; + ops->lcwrite = fsl_local_config_write; + ops->cread = fsl_rio_config_read; + ops->cwrite = fsl_rio_config_write; + ops->dsend = fsl_rio_doorbell_send; + ops->pwenable = fsl_rio_pw_enable; + ops->open_outb_mbox = fsl_open_outb_mbox; + ops->open_inb_mbox = fsl_open_inb_mbox; + ops->close_outb_mbox = fsl_close_outb_mbox; + ops->close_inb_mbox = fsl_close_inb_mbox; + ops->add_outb_message = fsl_add_outb_message; + ops->add_inb_buffer = fsl_add_inb_buffer; + ops->get_inb_message = fsl_get_inb_message; + + port = kzalloc(sizeof(struct rio_mport), GFP_KERNEL); + if (!port) { + rc = -ENOMEM; + goto err_port; + } + port->index = 0; + + priv = kzalloc(sizeof(struct rio_priv), GFP_KERNEL); + if (!priv) { + printk(KERN_ERR "Can't alloc memory for 'priv'\n"); + rc = -ENOMEM; + goto err_priv; + } + + INIT_LIST_HEAD(&port->dbells); + port->iores.start = law_start; + port->iores.end = law_start + law_size - 1; + port->iores.flags = IORESOURCE_MEM; + port->iores.name = "rio_io_win"; + + if (request_resource(&iomem_resource, &port->iores) < 0) { + dev_err(&dev->dev, "RIO: Error requesting master port region" + " 0x%016llx-0x%016llx\n", + (u64)port->iores.start, (u64)port->iores.end); + rc = -ENOMEM; + goto err_res; + } + + priv->pwirq = irq_of_parse_and_map(dev->dev.of_node, 0); + priv->bellirq = irq_of_parse_and_map(dev->dev.of_node, 2); + priv->txirq = irq_of_parse_and_map(dev->dev.of_node, 3); + priv->rxirq = irq_of_parse_and_map(dev->dev.of_node, 4); + dev_info(&dev->dev, "pwirq: %d, bellirq: %d, txirq: %d, rxirq %d\n", + priv->pwirq, priv->bellirq, priv->txirq, priv->rxirq); + + rio_init_dbell_res(&port->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff); + rio_init_mbox_res(&port->riores[RIO_INB_MBOX_RESOURCE], 0, 0); + rio_init_mbox_res(&port->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0); + strcpy(port->name, "RIO0 mport"); + + priv->dev = &dev->dev; + + port->ops = ops; + port->priv = priv; + port->phys_efptr = 0x100; + + priv->regs_win = ioremap(regs.start, regs.end - regs.start + 1); + rio_regs_win = priv->regs_win; + + /* Probe the master port phy type */ + ccsr = in_be32(priv->regs_win + RIO_CCSR); + port->phy_type = (ccsr & 1) ? RIO_PHY_SERIAL : RIO_PHY_PARALLEL; + dev_info(&dev->dev, "RapidIO PHY type: %s\n", + (port->phy_type == RIO_PHY_PARALLEL) ? "parallel" : + ((port->phy_type == RIO_PHY_SERIAL) ? "serial" : + "unknown")); + /* Checking the port training status */ + if (in_be32((priv->regs_win + RIO_ESCSR)) & 1) { + dev_err(&dev->dev, "Port is not ready. " + "Try to restart connection...\n"); + switch (port->phy_type) { + case RIO_PHY_SERIAL: + /* Disable ports */ + out_be32(priv->regs_win + RIO_CCSR, 0); + /* Set 1x lane */ + setbits32(priv->regs_win + RIO_CCSR, 0x02000000); + /* Enable ports */ + setbits32(priv->regs_win + RIO_CCSR, 0x00600000); + break; + case RIO_PHY_PARALLEL: + /* Disable ports */ + out_be32(priv->regs_win + RIO_CCSR, 0x22000000); + /* Enable ports */ + out_be32(priv->regs_win + RIO_CCSR, 0x44000000); + break; + } + msleep(100); + if (in_be32((priv->regs_win + RIO_ESCSR)) & 1) { + dev_err(&dev->dev, "Port restart failed.\n"); + rc = -ENOLINK; + goto err; + } + dev_info(&dev->dev, "Port restart success!\n"); + } + fsl_rio_info(&dev->dev, ccsr); + + port->sys_size = (in_be32((priv->regs_win + RIO_PEF_CAR)) + & RIO_PEF_CTLS) >> 4; + dev_info(&dev->dev, "RapidIO Common Transport System size: %d\n", + port->sys_size ? 65536 : 256); + + if (rio_register_mport(port)) + goto err; + + if (port->host_deviceid >= 0) + out_be32(priv->regs_win + RIO_GCCSR, RIO_PORT_GEN_HOST | + RIO_PORT_GEN_MASTER | RIO_PORT_GEN_DISCOVERED); + else + out_be32(priv->regs_win + RIO_GCCSR, 0x00000000); + + priv->atmu_regs = (struct rio_atmu_regs *)(priv->regs_win + + RIO_ATMU_REGS_OFFSET); + priv->maint_atmu_regs = priv->atmu_regs + 1; + priv->dbell_atmu_regs = priv->atmu_regs + 2; + priv->msg_regs = (struct rio_msg_regs *)(priv->regs_win + + ((port->phy_type == RIO_PHY_SERIAL) ? + RIO_S_MSG_REGS_OFFSET : RIO_P_MSG_REGS_OFFSET)); + + /* Set to receive any dist ID for serial RapidIO controller. */ + if (port->phy_type == RIO_PHY_SERIAL) + out_be32((priv->regs_win + RIO_ISR_AACR), RIO_ISR_AACR_AA); + + /* Configure maintenance transaction window */ + out_be32(&priv->maint_atmu_regs->rowbar, law_start >> 12); + out_be32(&priv->maint_atmu_regs->rowar, + 0x80077000 | (ilog2(RIO_MAINT_WIN_SIZE) - 1)); + + priv->maint_win = ioremap(law_start, RIO_MAINT_WIN_SIZE); + + /* Configure outbound doorbell window */ + out_be32(&priv->dbell_atmu_regs->rowbar, + (law_start + RIO_MAINT_WIN_SIZE) >> 12); + out_be32(&priv->dbell_atmu_regs->rowar, 0x8004200b); /* 4k */ + fsl_rio_doorbell_init(port); + fsl_rio_port_write_init(port); + + return 0; +err: + iounmap(priv->regs_win); +err_res: + kfree(priv); +err_priv: + kfree(port); +err_port: + kfree(ops); +err_ops: + return rc; +} + +/* The probe function for RapidIO peer-to-peer network. + */ +static int __devinit fsl_of_rio_rpn_probe(struct platform_device *dev) +{ + printk(KERN_INFO "Setting up RapidIO peer-to-peer network %s\n", + dev->dev.of_node->full_name); + + return fsl_rio_setup(dev); +}; + +static const struct of_device_id fsl_of_rio_rpn_ids[] = { + { + .compatible = "fsl,rapidio-delta", + }, + {}, +}; + +static struct platform_driver fsl_of_rio_rpn_driver = { + .driver = { + .name = "fsl-of-rio", + .owner = THIS_MODULE, + .of_match_table = fsl_of_rio_rpn_ids, + }, + .probe = fsl_of_rio_rpn_probe, +}; + +static __init int fsl_of_rio_rpn_init(void) +{ + return platform_driver_register(&fsl_of_rio_rpn_driver); +} + +subsys_initcall(fsl_of_rio_rpn_init); diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c new file mode 100644 index 00000000..19e5015e --- /dev/null +++ b/arch/powerpc/sysdev/fsl_soc.c @@ -0,0 +1,254 @@ +/* + * FSL SoC setup code + * + * Maintained by Kumar Gala (see MAINTAINERS for contact information) + * + * 2006 (c) MontaVista Software, Inc. + * Vitaly Bordug <vbordug@ru.mvista.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/stddef.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/major.h> +#include <linux/delay.h> +#include <linux/irq.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/platform_device.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/phy.h> +#include <linux/phy_fixed.h> +#include <linux/spi/spi.h> +#include <linux/fsl_devices.h> +#include <linux/fs_enet_pd.h> +#include <linux/fs_uart_pd.h> + +#include <asm/system.h> +#include <asm/atomic.h> +#include <asm/io.h> +#include <asm/irq.h> +#include <asm/time.h> +#include <asm/prom.h> +#include <asm/machdep.h> +#include <sysdev/fsl_soc.h> +#include <mm/mmu_decl.h> +#include <asm/cpm2.h> + +extern void init_fcc_ioports(struct fs_platform_info*); +extern void init_fec_ioports(struct fs_platform_info*); +extern void init_smc_ioports(struct fs_uart_platform_info*); +static phys_addr_t immrbase = -1; + +phys_addr_t get_immrbase(void) +{ + struct device_node *soc; + + if (immrbase != -1) + return immrbase; + + soc = of_find_node_by_type(NULL, "soc"); + if (soc) { + int size; + u32 naddr; + const u32 *prop = of_get_property(soc, "#address-cells", &size); + + if (prop && size == 4) + naddr = *prop; + else + naddr = 2; + + prop = of_get_property(soc, "ranges", &size); + if (prop) + immrbase = of_translate_address(soc, prop + naddr); + + of_node_put(soc); + } + + return immrbase; +} + +EXPORT_SYMBOL(get_immrbase); + +static u32 sysfreq = -1; + +u32 fsl_get_sys_freq(void) +{ + struct device_node *soc; + const u32 *prop; + int size; + + if (sysfreq != -1) + return sysfreq; + + soc = of_find_node_by_type(NULL, "soc"); + if (!soc) + return -1; + + prop = of_get_property(soc, "clock-frequency", &size); + if (!prop || size != sizeof(*prop) || *prop == 0) + prop = of_get_property(soc, "bus-frequency", &size); + + if (prop && size == sizeof(*prop)) + sysfreq = *prop; + + of_node_put(soc); + return sysfreq; +} +EXPORT_SYMBOL(fsl_get_sys_freq); + +#if defined(CONFIG_CPM2) || defined(CONFIG_QUICC_ENGINE) || defined(CONFIG_8xx) + +static u32 brgfreq = -1; + +u32 get_brgfreq(void) +{ + struct device_node *node; + const unsigned int *prop; + int size; + + if (brgfreq != -1) + return brgfreq; + + node = of_find_compatible_node(NULL, NULL, "fsl,cpm-brg"); + if (node) { + prop = of_get_property(node, "clock-frequency", &size); + if (prop && size == 4) + brgfreq = *prop; + + of_node_put(node); + return brgfreq; + } + + /* Legacy device binding -- will go away when no users are left. */ + node = of_find_node_by_type(NULL, "cpm"); + if (!node) + node = of_find_compatible_node(NULL, NULL, "fsl,qe"); + if (!node) + node = of_find_node_by_type(NULL, "qe"); + + if (node) { + prop = of_get_property(node, "brg-frequency", &size); + if (prop && size == 4) + brgfreq = *prop; + + if (brgfreq == -1 || brgfreq == 0) { + prop = of_get_property(node, "bus-frequency", &size); + if (prop && size == 4) + brgfreq = *prop / 2; + } + of_node_put(node); + } + + return brgfreq; +} + +EXPORT_SYMBOL(get_brgfreq); + +static u32 fs_baudrate = -1; + +u32 get_baudrate(void) +{ + struct device_node *node; + + if (fs_baudrate != -1) + return fs_baudrate; + + node = of_find_node_by_type(NULL, "serial"); + if (node) { + int size; + const unsigned int *prop = of_get_property(node, + "current-speed", &size); + + if (prop) + fs_baudrate = *prop; + of_node_put(node); + } + + return fs_baudrate; +} + +EXPORT_SYMBOL(get_baudrate); +#endif /* CONFIG_CPM2 */ + +#ifdef CONFIG_FIXED_PHY +static int __init of_add_fixed_phys(void) +{ + int ret; + struct device_node *np; + u32 *fixed_link; + struct fixed_phy_status status = {}; + + for_each_node_by_name(np, "ethernet") { + fixed_link = (u32 *)of_get_property(np, "fixed-link", NULL); + if (!fixed_link) + continue; + + status.link = 1; + status.duplex = fixed_link[1]; + status.speed = fixed_link[2]; + status.pause = fixed_link[3]; + status.asym_pause = fixed_link[4]; + + ret = fixed_phy_add(PHY_POLL, fixed_link[0], &status); + if (ret) { + of_node_put(np); + return ret; + } + } + + return 0; +} +arch_initcall(of_add_fixed_phys); +#endif /* CONFIG_FIXED_PHY */ + +#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx) +static __be32 __iomem *rstcr; + +static int __init setup_rstcr(void) +{ + struct device_node *np; + + for_each_node_by_name(np, "global-utilities") { + if ((of_get_property(np, "fsl,has-rstcr", NULL))) { + rstcr = of_iomap(np, 0) + 0xb0; + if (!rstcr) + printk (KERN_ERR "Error: reset control " + "register not mapped!\n"); + break; + } + } + + if (!rstcr && ppc_md.restart == fsl_rstcr_restart) + printk(KERN_ERR "No RSTCR register, warm reboot won't work\n"); + + if (np) + of_node_put(np); + + return 0; +} + +arch_initcall(setup_rstcr); + +void fsl_rstcr_restart(char *cmd) +{ + local_irq_disable(); + if (rstcr) + /* set reset control register */ + out_be32(rstcr, 0x2); /* HRESET_REQ */ + + while (1) ; +} +#endif + +#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE) +struct platform_diu_data_ops diu_ops; +EXPORT_SYMBOL(diu_ops); +#endif diff --git a/arch/powerpc/sysdev/fsl_soc.h b/arch/powerpc/sysdev/fsl_soc.h new file mode 100644 index 00000000..53609489 --- /dev/null +++ b/arch/powerpc/sysdev/fsl_soc.h @@ -0,0 +1,40 @@ +#ifndef __PPC_FSL_SOC_H +#define __PPC_FSL_SOC_H +#ifdef __KERNEL__ + +#include <asm/mmu.h> + +struct spi_device; + +extern phys_addr_t get_immrbase(void); +#if defined(CONFIG_CPM2) || defined(CONFIG_QUICC_ENGINE) || defined(CONFIG_8xx) +extern u32 get_brgfreq(void); +extern u32 get_baudrate(void); +#else +static inline u32 get_brgfreq(void) { return -1; } +static inline u32 get_baudrate(void) { return -1; } +#endif +extern u32 fsl_get_sys_freq(void); + +struct spi_board_info; +struct device_node; + +extern void fsl_rstcr_restart(char *cmd); + +#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE) +struct platform_diu_data_ops { + unsigned int (*get_pixel_format) (unsigned int bits_per_pixel, + int monitor_port); + void (*set_gamma_table) (int monitor_port, char *gamma_table_base); + void (*set_monitor_port) (int monitor_port); + void (*set_pixel_clock) (unsigned int pixclock); + ssize_t (*show_monitor_port) (int monitor_port, char *buf); + int (*set_sysfs_monitor_port) (int val); + void (*release_bootmem) (void); +}; + +extern struct platform_diu_data_ops diu_ops; +#endif + +#endif +#endif diff --git a/arch/powerpc/sysdev/grackle.c b/arch/powerpc/sysdev/grackle.c new file mode 100644 index 00000000..cf27df6e --- /dev/null +++ b/arch/powerpc/sysdev/grackle.c @@ -0,0 +1,66 @@ +/* + * Functions for setting up and using a MPC106 northbridge + * Extracted from arch/powerpc/platforms/powermac/pci.c. + * + * Copyright (C) 2003 Benjamin Herrenschmuidt (benh@kernel.crashing.org) + * Copyright (C) 1997 Paul Mackerras (paulus@samba.org) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/init.h> + +#include <asm/io.h> +#include <asm/prom.h> +#include <asm/pci-bridge.h> +#include <asm/grackle.h> + +#define GRACKLE_CFA(b, d, o) (0x80 | ((b) << 8) | ((d) << 16) \ + | (((o) & ~3) << 24)) + +#define GRACKLE_PICR1_STG 0x00000040 +#define GRACKLE_PICR1_LOOPSNOOP 0x00000010 + +/* N.B. this is called before bridges is initialized, so we can't + use grackle_pcibios_{read,write}_config_dword. */ +static inline void grackle_set_stg(struct pci_controller* bp, int enable) +{ + unsigned int val; + + out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8)); + val = in_le32(bp->cfg_data); + val = enable? (val | GRACKLE_PICR1_STG) : + (val & ~GRACKLE_PICR1_STG); + out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8)); + out_le32(bp->cfg_data, val); + (void)in_le32(bp->cfg_data); +} + +static inline void grackle_set_loop_snoop(struct pci_controller *bp, int enable) +{ + unsigned int val; + + out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8)); + val = in_le32(bp->cfg_data); + val = enable? (val | GRACKLE_PICR1_LOOPSNOOP) : + (val & ~GRACKLE_PICR1_LOOPSNOOP); + out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8)); + out_le32(bp->cfg_data, val); + (void)in_le32(bp->cfg_data); +} + +void __init setup_grackle(struct pci_controller *hose) +{ + setup_indirect_pci(hose, 0xfec00000, 0xfee00000, 0); + if (of_machine_is_compatible("PowerMac1,1")) + ppc_pci_add_flags(PPC_PCI_REASSIGN_ALL_BUS); + if (of_machine_is_compatible("AAPL,PowerBook1998")) + grackle_set_loop_snoop(hose, 1); +#if 0 /* Disabled for now, HW problems ??? */ + grackle_set_stg(hose, 1); +#endif +} diff --git a/arch/powerpc/sysdev/i8259.c b/arch/powerpc/sysdev/i8259.c new file mode 100644 index 00000000..d18bb27e --- /dev/null +++ b/arch/powerpc/sysdev/i8259.c @@ -0,0 +1,286 @@ +/* + * i8259 interrupt controller driver. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#undef DEBUG + +#include <linux/init.h> +#include <linux/ioport.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/delay.h> +#include <asm/io.h> +#include <asm/i8259.h> +#include <asm/prom.h> + +static volatile void __iomem *pci_intack; /* RO, gives us the irq vector */ + +static unsigned char cached_8259[2] = { 0xff, 0xff }; +#define cached_A1 (cached_8259[0]) +#define cached_21 (cached_8259[1]) + +static DEFINE_RAW_SPINLOCK(i8259_lock); + +static struct irq_host *i8259_host; + +/* + * Acknowledge the IRQ using either the PCI host bridge's interrupt + * acknowledge feature or poll. How i8259_init() is called determines + * which is called. It should be noted that polling is broken on some + * IBM and Motorola PReP boxes so we must use the int-ack feature on them. + */ +unsigned int i8259_irq(void) +{ + int irq; + int lock = 0; + + /* Either int-ack or poll for the IRQ */ + if (pci_intack) + irq = readb(pci_intack); + else { + raw_spin_lock(&i8259_lock); + lock = 1; + + /* Perform an interrupt acknowledge cycle on controller 1. */ + outb(0x0C, 0x20); /* prepare for poll */ + irq = inb(0x20) & 7; + if (irq == 2 ) { + /* + * Interrupt is cascaded so perform interrupt + * acknowledge on controller 2. + */ + outb(0x0C, 0xA0); /* prepare for poll */ + irq = (inb(0xA0) & 7) + 8; + } + } + + if (irq == 7) { + /* + * This may be a spurious interrupt. + * + * Read the interrupt status register (ISR). If the most + * significant bit is not set then there is no valid + * interrupt. + */ + if (!pci_intack) + outb(0x0B, 0x20); /* ISR register */ + if(~inb(0x20) & 0x80) + irq = NO_IRQ; + } else if (irq == 0xff) + irq = NO_IRQ; + + if (lock) + raw_spin_unlock(&i8259_lock); + return irq; +} + +static void i8259_mask_and_ack_irq(struct irq_data *d) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&i8259_lock, flags); + if (d->irq > 7) { + cached_A1 |= 1 << (d->irq-8); + inb(0xA1); /* DUMMY */ + outb(cached_A1, 0xA1); + outb(0x20, 0xA0); /* Non-specific EOI */ + outb(0x20, 0x20); /* Non-specific EOI to cascade */ + } else { + cached_21 |= 1 << d->irq; + inb(0x21); /* DUMMY */ + outb(cached_21, 0x21); + outb(0x20, 0x20); /* Non-specific EOI */ + } + raw_spin_unlock_irqrestore(&i8259_lock, flags); +} + +static void i8259_set_irq_mask(int irq_nr) +{ + outb(cached_A1,0xA1); + outb(cached_21,0x21); +} + +static void i8259_mask_irq(struct irq_data *d) +{ + unsigned long flags; + + pr_debug("i8259_mask_irq(%d)\n", d->irq); + + raw_spin_lock_irqsave(&i8259_lock, flags); + if (d->irq < 8) + cached_21 |= 1 << d->irq; + else + cached_A1 |= 1 << (d->irq-8); + i8259_set_irq_mask(d->irq); + raw_spin_unlock_irqrestore(&i8259_lock, flags); +} + +static void i8259_unmask_irq(struct irq_data *d) +{ + unsigned long flags; + + pr_debug("i8259_unmask_irq(%d)\n", d->irq); + + raw_spin_lock_irqsave(&i8259_lock, flags); + if (d->irq < 8) + cached_21 &= ~(1 << d->irq); + else + cached_A1 &= ~(1 << (d->irq-8)); + i8259_set_irq_mask(d->irq); + raw_spin_unlock_irqrestore(&i8259_lock, flags); +} + +static struct irq_chip i8259_pic = { + .name = "i8259", + .irq_mask = i8259_mask_irq, + .irq_disable = i8259_mask_irq, + .irq_unmask = i8259_unmask_irq, + .irq_mask_ack = i8259_mask_and_ack_irq, +}; + +static struct resource pic1_iores = { + .name = "8259 (master)", + .start = 0x20, + .end = 0x21, + .flags = IORESOURCE_BUSY, +}; + +static struct resource pic2_iores = { + .name = "8259 (slave)", + .start = 0xa0, + .end = 0xa1, + .flags = IORESOURCE_BUSY, +}; + +static struct resource pic_edgectrl_iores = { + .name = "8259 edge control", + .start = 0x4d0, + .end = 0x4d1, + .flags = IORESOURCE_BUSY, +}; + +static int i8259_host_match(struct irq_host *h, struct device_node *node) +{ + return h->of_node == NULL || h->of_node == node; +} + +static int i8259_host_map(struct irq_host *h, unsigned int virq, + irq_hw_number_t hw) +{ + pr_debug("i8259_host_map(%d, 0x%lx)\n", virq, hw); + + /* We block the internal cascade */ + if (hw == 2) + irq_set_status_flags(virq, IRQ_NOREQUEST); + + /* We use the level handler only for now, we might want to + * be more cautious here but that works for now + */ + irq_set_status_flags(virq, IRQ_LEVEL); + irq_set_chip_and_handler(virq, &i8259_pic, handle_level_irq); + return 0; +} + +static int i8259_host_xlate(struct irq_host *h, struct device_node *ct, + const u32 *intspec, unsigned int intsize, + irq_hw_number_t *out_hwirq, unsigned int *out_flags) +{ + static unsigned char map_isa_senses[4] = { + IRQ_TYPE_LEVEL_LOW, + IRQ_TYPE_LEVEL_HIGH, + IRQ_TYPE_EDGE_FALLING, + IRQ_TYPE_EDGE_RISING, + }; + + *out_hwirq = intspec[0]; + if (intsize > 1 && intspec[1] < 4) + *out_flags = map_isa_senses[intspec[1]]; + else + *out_flags = IRQ_TYPE_NONE; + + return 0; +} + +static struct irq_host_ops i8259_host_ops = { + .match = i8259_host_match, + .map = i8259_host_map, + .xlate = i8259_host_xlate, +}; + +struct irq_host *i8259_get_host(void) +{ + return i8259_host; +} + +/** + * i8259_init - Initialize the legacy controller + * @node: device node of the legacy PIC (can be NULL, but then, it will match + * all interrupts, so beware) + * @intack_addr: PCI interrupt acknowledge (real) address which will return + * the active irq from the 8259 + */ +void i8259_init(struct device_node *node, unsigned long intack_addr) +{ + unsigned long flags; + + /* initialize the controller */ + raw_spin_lock_irqsave(&i8259_lock, flags); + + /* Mask all first */ + outb(0xff, 0xA1); + outb(0xff, 0x21); + + /* init master interrupt controller */ + outb(0x11, 0x20); /* Start init sequence */ + outb(0x00, 0x21); /* Vector base */ + outb(0x04, 0x21); /* edge tiggered, Cascade (slave) on IRQ2 */ + outb(0x01, 0x21); /* Select 8086 mode */ + + /* init slave interrupt controller */ + outb(0x11, 0xA0); /* Start init sequence */ + outb(0x08, 0xA1); /* Vector base */ + outb(0x02, 0xA1); /* edge triggered, Cascade (slave) on IRQ2 */ + outb(0x01, 0xA1); /* Select 8086 mode */ + + /* That thing is slow */ + udelay(100); + + /* always read ISR */ + outb(0x0B, 0x20); + outb(0x0B, 0xA0); + + /* Unmask the internal cascade */ + cached_21 &= ~(1 << 2); + + /* Set interrupt masks */ + outb(cached_A1, 0xA1); + outb(cached_21, 0x21); + + raw_spin_unlock_irqrestore(&i8259_lock, flags); + + /* create a legacy host */ + i8259_host = irq_alloc_host(node, IRQ_HOST_MAP_LEGACY, + 0, &i8259_host_ops, 0); + if (i8259_host == NULL) { + printk(KERN_ERR "i8259: failed to allocate irq host !\n"); + return; + } + + /* reserve our resources */ + /* XXX should we continue doing that ? it seems to cause problems + * with further requesting of PCI IO resources for that range... + * need to look into it. + */ + request_resource(&ioport_resource, &pic1_iores); + request_resource(&ioport_resource, &pic2_iores); + request_resource(&ioport_resource, &pic_edgectrl_iores); + + if (intack_addr != 0) + pci_intack = ioremap(intack_addr, 1); + + printk(KERN_INFO "i8259 legacy interrupt controller initialized\n"); +} diff --git a/arch/powerpc/sysdev/indirect_pci.c b/arch/powerpc/sysdev/indirect_pci.c new file mode 100644 index 00000000..82fdad88 --- /dev/null +++ b/arch/powerpc/sysdev/indirect_pci.c @@ -0,0 +1,172 @@ +/* + * Support for indirect PCI bridges. + * + * Copyright (C) 1998 Gabriel Paubert. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/delay.h> +#include <linux/string.h> +#include <linux/init.h> + +#include <asm/io.h> +#include <asm/prom.h> +#include <asm/pci-bridge.h> +#include <asm/machdep.h> + +static int +indirect_read_config(struct pci_bus *bus, unsigned int devfn, int offset, + int len, u32 *val) +{ + struct pci_controller *hose = pci_bus_to_host(bus); + volatile void __iomem *cfg_data; + u8 cfg_type = 0; + u32 bus_no, reg; + + if (hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK) { + if (bus->number != hose->first_busno) + return PCIBIOS_DEVICE_NOT_FOUND; + if (devfn != 0) + return PCIBIOS_DEVICE_NOT_FOUND; + } + + if (ppc_md.pci_exclude_device) + if (ppc_md.pci_exclude_device(hose, bus->number, devfn)) + return PCIBIOS_DEVICE_NOT_FOUND; + + if (hose->indirect_type & PPC_INDIRECT_TYPE_SET_CFG_TYPE) + if (bus->number != hose->first_busno) + cfg_type = 1; + + bus_no = (bus->number == hose->first_busno) ? + hose->self_busno : bus->number; + + if (hose->indirect_type & PPC_INDIRECT_TYPE_EXT_REG) + reg = ((offset & 0xf00) << 16) | (offset & 0xfc); + else + reg = offset & 0xfc; + + if (hose->indirect_type & PPC_INDIRECT_TYPE_BIG_ENDIAN) + out_be32(hose->cfg_addr, (0x80000000 | (bus_no << 16) | + (devfn << 8) | reg | cfg_type)); + else + out_le32(hose->cfg_addr, (0x80000000 | (bus_no << 16) | + (devfn << 8) | reg | cfg_type)); + + /* + * Note: the caller has already checked that offset is + * suitably aligned and that len is 1, 2 or 4. + */ + cfg_data = hose->cfg_data + (offset & 3); + switch (len) { + case 1: + *val = in_8(cfg_data); + break; + case 2: + *val = in_le16(cfg_data); + break; + default: + *val = in_le32(cfg_data); + break; + } + return PCIBIOS_SUCCESSFUL; +} + +static int +indirect_write_config(struct pci_bus *bus, unsigned int devfn, int offset, + int len, u32 val) +{ + struct pci_controller *hose = pci_bus_to_host(bus); + volatile void __iomem *cfg_data; + u8 cfg_type = 0; + u32 bus_no, reg; + + if (hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK) { + if (bus->number != hose->first_busno) + return PCIBIOS_DEVICE_NOT_FOUND; + if (devfn != 0) + return PCIBIOS_DEVICE_NOT_FOUND; + } + + if (ppc_md.pci_exclude_device) + if (ppc_md.pci_exclude_device(hose, bus->number, devfn)) + return PCIBIOS_DEVICE_NOT_FOUND; + + if (hose->indirect_type & PPC_INDIRECT_TYPE_SET_CFG_TYPE) + if (bus->number != hose->first_busno) + cfg_type = 1; + + bus_no = (bus->number == hose->first_busno) ? + hose->self_busno : bus->number; + + if (hose->indirect_type & PPC_INDIRECT_TYPE_EXT_REG) + reg = ((offset & 0xf00) << 16) | (offset & 0xfc); + else + reg = offset & 0xfc; + + if (hose->indirect_type & PPC_INDIRECT_TYPE_BIG_ENDIAN) + out_be32(hose->cfg_addr, (0x80000000 | (bus_no << 16) | + (devfn << 8) | reg | cfg_type)); + else + out_le32(hose->cfg_addr, (0x80000000 | (bus_no << 16) | + (devfn << 8) | reg | cfg_type)); + + /* suppress setting of PCI_PRIMARY_BUS */ + if (hose->indirect_type & PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS) + if ((offset == PCI_PRIMARY_BUS) && + (bus->number == hose->first_busno)) + val &= 0xffffff00; + + /* Workaround for PCI_28 Errata in 440EPx/GRx */ + if ((hose->indirect_type & PPC_INDIRECT_TYPE_BROKEN_MRM) && + offset == PCI_CACHE_LINE_SIZE) { + val = 0; + } + + /* + * Note: the caller has already checked that offset is + * suitably aligned and that len is 1, 2 or 4. + */ + cfg_data = hose->cfg_data + (offset & 3); + switch (len) { + case 1: + out_8(cfg_data, val); + break; + case 2: + out_le16(cfg_data, val); + break; + default: + out_le32(cfg_data, val); + break; + } + return PCIBIOS_SUCCESSFUL; +} + +static struct pci_ops indirect_pci_ops = +{ + .read = indirect_read_config, + .write = indirect_write_config, +}; + +void __init +setup_indirect_pci(struct pci_controller* hose, + resource_size_t cfg_addr, + resource_size_t cfg_data, u32 flags) +{ + resource_size_t base = cfg_addr & PAGE_MASK; + void __iomem *mbase; + + mbase = ioremap(base, PAGE_SIZE); + hose->cfg_addr = mbase + (cfg_addr & ~PAGE_MASK); + if ((cfg_data & PAGE_MASK) != base) + mbase = ioremap(cfg_data & PAGE_MASK, PAGE_SIZE); + hose->cfg_data = mbase + (cfg_data & ~PAGE_MASK); + hose->ops = &indirect_pci_ops; + hose->indirect_type = flags; +} diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c new file mode 100644 index 00000000..7367d173 --- /dev/null +++ b/arch/powerpc/sysdev/ipic.c @@ -0,0 +1,972 @@ +/* + * arch/powerpc/sysdev/ipic.c + * + * IPIC routines implementations. + * + * Copyright 2005 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/reboot.h> +#include <linux/slab.h> +#include <linux/stddef.h> +#include <linux/sched.h> +#include <linux/signal.h> +#include <linux/syscore_ops.h> +#include <linux/device.h> +#include <linux/bootmem.h> +#include <linux/spinlock.h> +#include <linux/fsl_devices.h> +#include <asm/irq.h> +#include <asm/io.h> +#include <asm/prom.h> +#include <asm/ipic.h> + +#include "ipic.h" + +static struct ipic * primary_ipic; +static struct irq_chip ipic_level_irq_chip, ipic_edge_irq_chip; +static DEFINE_RAW_SPINLOCK(ipic_lock); + +static struct ipic_info ipic_info[] = { + [1] = { + .mask = IPIC_SIMSR_H, + .prio = IPIC_SIPRR_C, + .force = IPIC_SIFCR_H, + .bit = 16, + .prio_mask = 0, + }, + [2] = { + .mask = IPIC_SIMSR_H, + .prio = IPIC_SIPRR_C, + .force = IPIC_SIFCR_H, + .bit = 17, + .prio_mask = 1, + }, + [3] = { + .mask = IPIC_SIMSR_H, + .prio = IPIC_SIPRR_C, + .force = IPIC_SIFCR_H, + .bit = 18, + .prio_mask = 2, + }, + [4] = { + .mask = IPIC_SIMSR_H, + .prio = IPIC_SIPRR_C, + .force = IPIC_SIFCR_H, + .bit = 19, + .prio_mask = 3, + }, + [5] = { + .mask = IPIC_SIMSR_H, + .prio = IPIC_SIPRR_C, + .force = IPIC_SIFCR_H, + .bit = 20, + .prio_mask = 4, + }, + [6] = { + .mask = IPIC_SIMSR_H, + .prio = IPIC_SIPRR_C, + .force = IPIC_SIFCR_H, + .bit = 21, + .prio_mask = 5, + }, + [7] = { + .mask = IPIC_SIMSR_H, + .prio = IPIC_SIPRR_C, + .force = IPIC_SIFCR_H, + .bit = 22, + .prio_mask = 6, + }, + [8] = { + .mask = IPIC_SIMSR_H, + .prio = IPIC_SIPRR_C, + .force = IPIC_SIFCR_H, + .bit = 23, + .prio_mask = 7, + }, + [9] = { + .mask = IPIC_SIMSR_H, + .prio = IPIC_SIPRR_D, + .force = IPIC_SIFCR_H, + .bit = 24, + .prio_mask = 0, + }, + [10] = { + .mask = IPIC_SIMSR_H, + .prio = IPIC_SIPRR_D, + .force = IPIC_SIFCR_H, + .bit = 25, + .prio_mask = 1, + }, + [11] = { + .mask = IPIC_SIMSR_H, + .prio = IPIC_SIPRR_D, + .force = IPIC_SIFCR_H, + .bit = 26, + .prio_mask = 2, + }, + [12] = { + .mask = IPIC_SIMSR_H, + .prio = IPIC_SIPRR_D, + .force = IPIC_SIFCR_H, + .bit = 27, + .prio_mask = 3, + }, + [13] = { + .mask = IPIC_SIMSR_H, + .prio = IPIC_SIPRR_D, + .force = IPIC_SIFCR_H, + .bit = 28, + .prio_mask = 4, + }, + [14] = { + .mask = IPIC_SIMSR_H, + .prio = IPIC_SIPRR_D, + .force = IPIC_SIFCR_H, + .bit = 29, + .prio_mask = 5, + }, + [15] = { + .mask = IPIC_SIMSR_H, + .prio = IPIC_SIPRR_D, + .force = IPIC_SIFCR_H, + .bit = 30, + .prio_mask = 6, + }, + [16] = { + .mask = IPIC_SIMSR_H, + .prio = IPIC_SIPRR_D, + .force = IPIC_SIFCR_H, + .bit = 31, + .prio_mask = 7, + }, + [17] = { + .ack = IPIC_SEPNR, + .mask = IPIC_SEMSR, + .prio = IPIC_SMPRR_A, + .force = IPIC_SEFCR, + .bit = 1, + .prio_mask = 5, + }, + [18] = { + .ack = IPIC_SEPNR, + .mask = IPIC_SEMSR, + .prio = IPIC_SMPRR_A, + .force = IPIC_SEFCR, + .bit = 2, + .prio_mask = 6, + }, + [19] = { + .ack = IPIC_SEPNR, + .mask = IPIC_SEMSR, + .prio = IPIC_SMPRR_A, + .force = IPIC_SEFCR, + .bit = 3, + .prio_mask = 7, + }, + [20] = { + .ack = IPIC_SEPNR, + .mask = IPIC_SEMSR, + .prio = IPIC_SMPRR_B, + .force = IPIC_SEFCR, + .bit = 4, + .prio_mask = 4, + }, + [21] = { + .ack = IPIC_SEPNR, + .mask = IPIC_SEMSR, + .prio = IPIC_SMPRR_B, + .force = IPIC_SEFCR, + .bit = 5, + .prio_mask = 5, + }, + [22] = { + .ack = IPIC_SEPNR, + .mask = IPIC_SEMSR, + .prio = IPIC_SMPRR_B, + .force = IPIC_SEFCR, + .bit = 6, + .prio_mask = 6, + }, + [23] = { + .ack = IPIC_SEPNR, + .mask = IPIC_SEMSR, + .prio = IPIC_SMPRR_B, + .force = IPIC_SEFCR, + .bit = 7, + .prio_mask = 7, + }, + [32] = { + .mask = IPIC_SIMSR_H, + .prio = IPIC_SIPRR_A, + .force = IPIC_SIFCR_H, + .bit = 0, + .prio_mask = 0, + }, + [33] = { + .mask = IPIC_SIMSR_H, + .prio = IPIC_SIPRR_A, + .force = IPIC_SIFCR_H, + .bit = 1, + .prio_mask = 1, + }, + [34] = { + .mask = IPIC_SIMSR_H, + .prio = IPIC_SIPRR_A, + .force = IPIC_SIFCR_H, + .bit = 2, + .prio_mask = 2, + }, + [35] = { + .mask = IPIC_SIMSR_H, + .prio = IPIC_SIPRR_A, + .force = IPIC_SIFCR_H, + .bit = 3, + .prio_mask = 3, + }, + [36] = { + .mask = IPIC_SIMSR_H, + .prio = IPIC_SIPRR_A, + .force = IPIC_SIFCR_H, + .bit = 4, + .prio_mask = 4, + }, + [37] = { + .mask = IPIC_SIMSR_H, + .prio = IPIC_SIPRR_A, + .force = IPIC_SIFCR_H, + .bit = 5, + .prio_mask = 5, + }, + [38] = { + .mask = IPIC_SIMSR_H, + .prio = IPIC_SIPRR_A, + .force = IPIC_SIFCR_H, + .bit = 6, + .prio_mask = 6, + }, + [39] = { + .mask = IPIC_SIMSR_H, + .prio = IPIC_SIPRR_A, + .force = IPIC_SIFCR_H, + .bit = 7, + .prio_mask = 7, + }, + [40] = { + .mask = IPIC_SIMSR_H, + .prio = IPIC_SIPRR_B, + .force = IPIC_SIFCR_H, + .bit = 8, + .prio_mask = 0, + }, + [41] = { + .mask = IPIC_SIMSR_H, + .prio = IPIC_SIPRR_B, + .force = IPIC_SIFCR_H, + .bit = 9, + .prio_mask = 1, + }, + [42] = { + .mask = IPIC_SIMSR_H, + .prio = IPIC_SIPRR_B, + .force = IPIC_SIFCR_H, + .bit = 10, + .prio_mask = 2, + }, + [43] = { + .mask = IPIC_SIMSR_H, + .prio = IPIC_SIPRR_B, + .force = IPIC_SIFCR_H, + .bit = 11, + .prio_mask = 3, + }, + [44] = { + .mask = IPIC_SIMSR_H, + .prio = IPIC_SIPRR_B, + .force = IPIC_SIFCR_H, + .bit = 12, + .prio_mask = 4, + }, + [45] = { + .mask = IPIC_SIMSR_H, + .prio = IPIC_SIPRR_B, + .force = IPIC_SIFCR_H, + .bit = 13, + .prio_mask = 5, + }, + [46] = { + .mask = IPIC_SIMSR_H, + .prio = IPIC_SIPRR_B, + .force = IPIC_SIFCR_H, + .bit = 14, + .prio_mask = 6, + }, + [47] = { + .mask = IPIC_SIMSR_H, + .prio = IPIC_SIPRR_B, + .force = IPIC_SIFCR_H, + .bit = 15, + .prio_mask = 7, + }, + [48] = { + .mask = IPIC_SEMSR, + .prio = IPIC_SMPRR_A, + .force = IPIC_SEFCR, + .bit = 0, + .prio_mask = 4, + }, + [64] = { + .mask = IPIC_SIMSR_L, + .prio = IPIC_SMPRR_A, + .force = IPIC_SIFCR_L, + .bit = 0, + .prio_mask = 0, + }, + [65] = { + .mask = IPIC_SIMSR_L, + .prio = IPIC_SMPRR_A, + .force = IPIC_SIFCR_L, + .bit = 1, + .prio_mask = 1, + }, + [66] = { + .mask = IPIC_SIMSR_L, + .prio = IPIC_SMPRR_A, + .force = IPIC_SIFCR_L, + .bit = 2, + .prio_mask = 2, + }, + [67] = { + .mask = IPIC_SIMSR_L, + .prio = IPIC_SMPRR_A, + .force = IPIC_SIFCR_L, + .bit = 3, + .prio_mask = 3, + }, + [68] = { + .mask = IPIC_SIMSR_L, + .prio = IPIC_SMPRR_B, + .force = IPIC_SIFCR_L, + .bit = 4, + .prio_mask = 0, + }, + [69] = { + .mask = IPIC_SIMSR_L, + .prio = IPIC_SMPRR_B, + .force = IPIC_SIFCR_L, + .bit = 5, + .prio_mask = 1, + }, + [70] = { + .mask = IPIC_SIMSR_L, + .prio = IPIC_SMPRR_B, + .force = IPIC_SIFCR_L, + .bit = 6, + .prio_mask = 2, + }, + [71] = { + .mask = IPIC_SIMSR_L, + .prio = IPIC_SMPRR_B, + .force = IPIC_SIFCR_L, + .bit = 7, + .prio_mask = 3, + }, + [72] = { + .mask = IPIC_SIMSR_L, + .prio = 0, + .force = IPIC_SIFCR_L, + .bit = 8, + }, + [73] = { + .mask = IPIC_SIMSR_L, + .prio = 0, + .force = IPIC_SIFCR_L, + .bit = 9, + }, + [74] = { + .mask = IPIC_SIMSR_L, + .prio = 0, + .force = IPIC_SIFCR_L, + .bit = 10, + }, + [75] = { + .mask = IPIC_SIMSR_L, + .prio = 0, + .force = IPIC_SIFCR_L, + .bit = 11, + }, + [76] = { + .mask = IPIC_SIMSR_L, + .prio = 0, + .force = IPIC_SIFCR_L, + .bit = 12, + }, + [77] = { + .mask = IPIC_SIMSR_L, + .prio = 0, + .force = IPIC_SIFCR_L, + .bit = 13, + }, + [78] = { + .mask = IPIC_SIMSR_L, + .prio = 0, + .force = IPIC_SIFCR_L, + .bit = 14, + }, + [79] = { + .mask = IPIC_SIMSR_L, + .prio = 0, + .force = IPIC_SIFCR_L, + .bit = 15, + }, + [80] = { + .mask = IPIC_SIMSR_L, + .prio = 0, + .force = IPIC_SIFCR_L, + .bit = 16, + }, + [81] = { + .mask = IPIC_SIMSR_L, + .prio = 0, + .force = IPIC_SIFCR_L, + .bit = 17, + }, + [82] = { + .mask = IPIC_SIMSR_L, + .prio = 0, + .force = IPIC_SIFCR_L, + .bit = 18, + }, + [83] = { + .mask = IPIC_SIMSR_L, + .prio = 0, + .force = IPIC_SIFCR_L, + .bit = 19, + }, + [84] = { + .mask = IPIC_SIMSR_L, + .prio = 0, + .force = IPIC_SIFCR_L, + .bit = 20, + }, + [85] = { + .mask = IPIC_SIMSR_L, + .prio = 0, + .force = IPIC_SIFCR_L, + .bit = 21, + }, + [86] = { + .mask = IPIC_SIMSR_L, + .prio = 0, + .force = IPIC_SIFCR_L, + .bit = 22, + }, + [87] = { + .mask = IPIC_SIMSR_L, + .prio = 0, + .force = IPIC_SIFCR_L, + .bit = 23, + }, + [88] = { + .mask = IPIC_SIMSR_L, + .prio = 0, + .force = IPIC_SIFCR_L, + .bit = 24, + }, + [89] = { + .mask = IPIC_SIMSR_L, + .prio = 0, + .force = IPIC_SIFCR_L, + .bit = 25, + }, + [90] = { + .mask = IPIC_SIMSR_L, + .prio = 0, + .force = IPIC_SIFCR_L, + .bit = 26, + }, + [91] = { + .mask = IPIC_SIMSR_L, + .prio = 0, + .force = IPIC_SIFCR_L, + .bit = 27, + }, + [94] = { + .mask = IPIC_SIMSR_L, + .prio = 0, + .force = IPIC_SIFCR_L, + .bit = 30, + }, +}; + +static inline u32 ipic_read(volatile u32 __iomem *base, unsigned int reg) +{ + return in_be32(base + (reg >> 2)); +} + +static inline void ipic_write(volatile u32 __iomem *base, unsigned int reg, u32 value) +{ + out_be32(base + (reg >> 2), value); +} + +static inline struct ipic * ipic_from_irq(unsigned int virq) +{ + return primary_ipic; +} + +static void ipic_unmask_irq(struct irq_data *d) +{ + struct ipic *ipic = ipic_from_irq(d->irq); + unsigned int src = irqd_to_hwirq(d); + unsigned long flags; + u32 temp; + + raw_spin_lock_irqsave(&ipic_lock, flags); + + temp = ipic_read(ipic->regs, ipic_info[src].mask); + temp |= (1 << (31 - ipic_info[src].bit)); + ipic_write(ipic->regs, ipic_info[src].mask, temp); + + raw_spin_unlock_irqrestore(&ipic_lock, flags); +} + +static void ipic_mask_irq(struct irq_data *d) +{ + struct ipic *ipic = ipic_from_irq(d->irq); + unsigned int src = irqd_to_hwirq(d); + unsigned long flags; + u32 temp; + + raw_spin_lock_irqsave(&ipic_lock, flags); + + temp = ipic_read(ipic->regs, ipic_info[src].mask); + temp &= ~(1 << (31 - ipic_info[src].bit)); + ipic_write(ipic->regs, ipic_info[src].mask, temp); + + /* mb() can't guarantee that masking is finished. But it does finish + * for nearly all cases. */ + mb(); + + raw_spin_unlock_irqrestore(&ipic_lock, flags); +} + +static void ipic_ack_irq(struct irq_data *d) +{ + struct ipic *ipic = ipic_from_irq(d->irq); + unsigned int src = irqd_to_hwirq(d); + unsigned long flags; + u32 temp; + + raw_spin_lock_irqsave(&ipic_lock, flags); + + temp = 1 << (31 - ipic_info[src].bit); + ipic_write(ipic->regs, ipic_info[src].ack, temp); + + /* mb() can't guarantee that ack is finished. But it does finish + * for nearly all cases. */ + mb(); + + raw_spin_unlock_irqrestore(&ipic_lock, flags); +} + +static void ipic_mask_irq_and_ack(struct irq_data *d) +{ + struct ipic *ipic = ipic_from_irq(d->irq); + unsigned int src = irqd_to_hwirq(d); + unsigned long flags; + u32 temp; + + raw_spin_lock_irqsave(&ipic_lock, flags); + + temp = ipic_read(ipic->regs, ipic_info[src].mask); + temp &= ~(1 << (31 - ipic_info[src].bit)); + ipic_write(ipic->regs, ipic_info[src].mask, temp); + + temp = 1 << (31 - ipic_info[src].bit); + ipic_write(ipic->regs, ipic_info[src].ack, temp); + + /* mb() can't guarantee that ack is finished. But it does finish + * for nearly all cases. */ + mb(); + + raw_spin_unlock_irqrestore(&ipic_lock, flags); +} + +static int ipic_set_irq_type(struct irq_data *d, unsigned int flow_type) +{ + struct ipic *ipic = ipic_from_irq(d->irq); + unsigned int src = irqd_to_hwirq(d); + unsigned int vold, vnew, edibit; + + if (flow_type == IRQ_TYPE_NONE) + flow_type = IRQ_TYPE_LEVEL_LOW; + + /* ipic supports only low assertion and high-to-low change senses + */ + if (!(flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING))) { + printk(KERN_ERR "ipic: sense type 0x%x not supported\n", + flow_type); + return -EINVAL; + } + /* ipic supports only edge mode on external interrupts */ + if ((flow_type & IRQ_TYPE_EDGE_FALLING) && !ipic_info[src].ack) { + printk(KERN_ERR "ipic: edge sense not supported on internal " + "interrupts\n"); + return -EINVAL; + + } + + irqd_set_trigger_type(d, flow_type); + if (flow_type & IRQ_TYPE_LEVEL_LOW) { + __irq_set_handler_locked(d->irq, handle_level_irq); + d->chip = &ipic_level_irq_chip; + } else { + __irq_set_handler_locked(d->irq, handle_edge_irq); + d->chip = &ipic_edge_irq_chip; + } + + /* only EXT IRQ senses are programmable on ipic + * internal IRQ senses are LEVEL_LOW + */ + if (src == IPIC_IRQ_EXT0) + edibit = 15; + else + if (src >= IPIC_IRQ_EXT1 && src <= IPIC_IRQ_EXT7) + edibit = (14 - (src - IPIC_IRQ_EXT1)); + else + return (flow_type & IRQ_TYPE_LEVEL_LOW) ? 0 : -EINVAL; + + vold = ipic_read(ipic->regs, IPIC_SECNR); + if ((flow_type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_FALLING) { + vnew = vold | (1 << edibit); + } else { + vnew = vold & ~(1 << edibit); + } + if (vold != vnew) + ipic_write(ipic->regs, IPIC_SECNR, vnew); + return IRQ_SET_MASK_OK_NOCOPY; +} + +/* level interrupts and edge interrupts have different ack operations */ +static struct irq_chip ipic_level_irq_chip = { + .name = "IPIC", + .irq_unmask = ipic_unmask_irq, + .irq_mask = ipic_mask_irq, + .irq_mask_ack = ipic_mask_irq, + .irq_set_type = ipic_set_irq_type, +}; + +static struct irq_chip ipic_edge_irq_chip = { + .name = "IPIC", + .irq_unmask = ipic_unmask_irq, + .irq_mask = ipic_mask_irq, + .irq_mask_ack = ipic_mask_irq_and_ack, + .irq_ack = ipic_ack_irq, + .irq_set_type = ipic_set_irq_type, +}; + +static int ipic_host_match(struct irq_host *h, struct device_node *node) +{ + /* Exact match, unless ipic node is NULL */ + return h->of_node == NULL || h->of_node == node; +} + +static int ipic_host_map(struct irq_host *h, unsigned int virq, + irq_hw_number_t hw) +{ + struct ipic *ipic = h->host_data; + + irq_set_chip_data(virq, ipic); + irq_set_chip_and_handler(virq, &ipic_level_irq_chip, handle_level_irq); + + /* Set default irq type */ + irq_set_irq_type(virq, IRQ_TYPE_NONE); + + return 0; +} + +static int ipic_host_xlate(struct irq_host *h, struct device_node *ct, + const u32 *intspec, unsigned int intsize, + irq_hw_number_t *out_hwirq, unsigned int *out_flags) + +{ + /* interrupt sense values coming from the device tree equal either + * LEVEL_LOW (low assertion) or EDGE_FALLING (high-to-low change) + */ + *out_hwirq = intspec[0]; + if (intsize > 1) + *out_flags = intspec[1]; + else + *out_flags = IRQ_TYPE_NONE; + return 0; +} + +static struct irq_host_ops ipic_host_ops = { + .match = ipic_host_match, + .map = ipic_host_map, + .xlate = ipic_host_xlate, +}; + +struct ipic * __init ipic_init(struct device_node *node, unsigned int flags) +{ + struct ipic *ipic; + struct resource res; + u32 temp = 0, ret; + + ret = of_address_to_resource(node, 0, &res); + if (ret) + return NULL; + + ipic = kzalloc(sizeof(*ipic), GFP_KERNEL); + if (ipic == NULL) + return NULL; + + ipic->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR, + NR_IPIC_INTS, + &ipic_host_ops, 0); + if (ipic->irqhost == NULL) { + kfree(ipic); + return NULL; + } + + ipic->regs = ioremap(res.start, res.end - res.start + 1); + + ipic->irqhost->host_data = ipic; + + /* init hw */ + ipic_write(ipic->regs, IPIC_SICNR, 0x0); + + /* default priority scheme is grouped. If spread mode is required + * configure SICFR accordingly */ + if (flags & IPIC_SPREADMODE_GRP_A) + temp |= SICFR_IPSA; + if (flags & IPIC_SPREADMODE_GRP_B) + temp |= SICFR_IPSB; + if (flags & IPIC_SPREADMODE_GRP_C) + temp |= SICFR_IPSC; + if (flags & IPIC_SPREADMODE_GRP_D) + temp |= SICFR_IPSD; + if (flags & IPIC_SPREADMODE_MIX_A) + temp |= SICFR_MPSA; + if (flags & IPIC_SPREADMODE_MIX_B) + temp |= SICFR_MPSB; + + ipic_write(ipic->regs, IPIC_SICFR, temp); + + /* handle MCP route */ + temp = 0; + if (flags & IPIC_DISABLE_MCP_OUT) + temp = SERCR_MCPR; + ipic_write(ipic->regs, IPIC_SERCR, temp); + + /* handle routing of IRQ0 to MCP */ + temp = ipic_read(ipic->regs, IPIC_SEMSR); + + if (flags & IPIC_IRQ0_MCP) + temp |= SEMSR_SIRQ0; + else + temp &= ~SEMSR_SIRQ0; + + ipic_write(ipic->regs, IPIC_SEMSR, temp); + + primary_ipic = ipic; + irq_set_default_host(primary_ipic->irqhost); + + ipic_write(ipic->regs, IPIC_SIMSR_H, 0); + ipic_write(ipic->regs, IPIC_SIMSR_L, 0); + + printk ("IPIC (%d IRQ sources) at %p\n", NR_IPIC_INTS, + primary_ipic->regs); + + return ipic; +} + +int ipic_set_priority(unsigned int virq, unsigned int priority) +{ + struct ipic *ipic = ipic_from_irq(virq); + unsigned int src = virq_to_hw(virq); + u32 temp; + + if (priority > 7) + return -EINVAL; + if (src > 127) + return -EINVAL; + if (ipic_info[src].prio == 0) + return -EINVAL; + + temp = ipic_read(ipic->regs, ipic_info[src].prio); + + if (priority < 4) { + temp &= ~(0x7 << (20 + (3 - priority) * 3)); + temp |= ipic_info[src].prio_mask << (20 + (3 - priority) * 3); + } else { + temp &= ~(0x7 << (4 + (7 - priority) * 3)); + temp |= ipic_info[src].prio_mask << (4 + (7 - priority) * 3); + } + + ipic_write(ipic->regs, ipic_info[src].prio, temp); + + return 0; +} + +void ipic_set_highest_priority(unsigned int virq) +{ + struct ipic *ipic = ipic_from_irq(virq); + unsigned int src = virq_to_hw(virq); + u32 temp; + + temp = ipic_read(ipic->regs, IPIC_SICFR); + + /* clear and set HPI */ + temp &= 0x7f000000; + temp |= (src & 0x7f) << 24; + + ipic_write(ipic->regs, IPIC_SICFR, temp); +} + +void ipic_set_default_priority(void) +{ + ipic_write(primary_ipic->regs, IPIC_SIPRR_A, IPIC_PRIORITY_DEFAULT); + ipic_write(primary_ipic->regs, IPIC_SIPRR_B, IPIC_PRIORITY_DEFAULT); + ipic_write(primary_ipic->regs, IPIC_SIPRR_C, IPIC_PRIORITY_DEFAULT); + ipic_write(primary_ipic->regs, IPIC_SIPRR_D, IPIC_PRIORITY_DEFAULT); + ipic_write(primary_ipic->regs, IPIC_SMPRR_A, IPIC_PRIORITY_DEFAULT); + ipic_write(primary_ipic->regs, IPIC_SMPRR_B, IPIC_PRIORITY_DEFAULT); +} + +void ipic_enable_mcp(enum ipic_mcp_irq mcp_irq) +{ + struct ipic *ipic = primary_ipic; + u32 temp; + + temp = ipic_read(ipic->regs, IPIC_SERMR); + temp |= (1 << (31 - mcp_irq)); + ipic_write(ipic->regs, IPIC_SERMR, temp); +} + +void ipic_disable_mcp(enum ipic_mcp_irq mcp_irq) +{ + struct ipic *ipic = primary_ipic; + u32 temp; + + temp = ipic_read(ipic->regs, IPIC_SERMR); + temp &= (1 << (31 - mcp_irq)); + ipic_write(ipic->regs, IPIC_SERMR, temp); +} + +u32 ipic_get_mcp_status(void) +{ + return ipic_read(primary_ipic->regs, IPIC_SERMR); +} + +void ipic_clear_mcp_status(u32 mask) +{ + ipic_write(primary_ipic->regs, IPIC_SERMR, mask); +} + +/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */ +unsigned int ipic_get_irq(void) +{ + int irq; + + BUG_ON(primary_ipic == NULL); + +#define IPIC_SIVCR_VECTOR_MASK 0x7f + irq = ipic_read(primary_ipic->regs, IPIC_SIVCR) & IPIC_SIVCR_VECTOR_MASK; + + if (irq == 0) /* 0 --> no irq is pending */ + return NO_IRQ; + + return irq_linear_revmap(primary_ipic->irqhost, irq); +} + +#ifdef CONFIG_SUSPEND +static struct { + u32 sicfr; + u32 siprr[2]; + u32 simsr[2]; + u32 sicnr; + u32 smprr[2]; + u32 semsr; + u32 secnr; + u32 sermr; + u32 sercr; +} ipic_saved_state; + +static int ipic_suspend(void) +{ + struct ipic *ipic = primary_ipic; + + ipic_saved_state.sicfr = ipic_read(ipic->regs, IPIC_SICFR); + ipic_saved_state.siprr[0] = ipic_read(ipic->regs, IPIC_SIPRR_A); + ipic_saved_state.siprr[1] = ipic_read(ipic->regs, IPIC_SIPRR_D); + ipic_saved_state.simsr[0] = ipic_read(ipic->regs, IPIC_SIMSR_H); + ipic_saved_state.simsr[1] = ipic_read(ipic->regs, IPIC_SIMSR_L); + ipic_saved_state.sicnr = ipic_read(ipic->regs, IPIC_SICNR); + ipic_saved_state.smprr[0] = ipic_read(ipic->regs, IPIC_SMPRR_A); + ipic_saved_state.smprr[1] = ipic_read(ipic->regs, IPIC_SMPRR_B); + ipic_saved_state.semsr = ipic_read(ipic->regs, IPIC_SEMSR); + ipic_saved_state.secnr = ipic_read(ipic->regs, IPIC_SECNR); + ipic_saved_state.sermr = ipic_read(ipic->regs, IPIC_SERMR); + ipic_saved_state.sercr = ipic_read(ipic->regs, IPIC_SERCR); + + if (fsl_deep_sleep()) { + /* In deep sleep, make sure there can be no + * pending interrupts, as this can cause + * problems on 831x. + */ + ipic_write(ipic->regs, IPIC_SIMSR_H, 0); + ipic_write(ipic->regs, IPIC_SIMSR_L, 0); + ipic_write(ipic->regs, IPIC_SEMSR, 0); + ipic_write(ipic->regs, IPIC_SERMR, 0); + } + + return 0; +} + +static void ipic_resume(void) +{ + struct ipic *ipic = primary_ipic; + + ipic_write(ipic->regs, IPIC_SICFR, ipic_saved_state.sicfr); + ipic_write(ipic->regs, IPIC_SIPRR_A, ipic_saved_state.siprr[0]); + ipic_write(ipic->regs, IPIC_SIPRR_D, ipic_saved_state.siprr[1]); + ipic_write(ipic->regs, IPIC_SIMSR_H, ipic_saved_state.simsr[0]); + ipic_write(ipic->regs, IPIC_SIMSR_L, ipic_saved_state.simsr[1]); + ipic_write(ipic->regs, IPIC_SICNR, ipic_saved_state.sicnr); + ipic_write(ipic->regs, IPIC_SMPRR_A, ipic_saved_state.smprr[0]); + ipic_write(ipic->regs, IPIC_SMPRR_B, ipic_saved_state.smprr[1]); + ipic_write(ipic->regs, IPIC_SEMSR, ipic_saved_state.semsr); + ipic_write(ipic->regs, IPIC_SECNR, ipic_saved_state.secnr); + ipic_write(ipic->regs, IPIC_SERMR, ipic_saved_state.sermr); + ipic_write(ipic->regs, IPIC_SERCR, ipic_saved_state.sercr); +} +#else +#define ipic_suspend NULL +#define ipic_resume NULL +#endif + +static struct syscore_ops ipic_syscore_ops = { + .suspend = ipic_suspend, + .resume = ipic_resume, +}; + +static int __init init_ipic_syscore(void) +{ + if (!primary_ipic || !primary_ipic->regs) + return -ENODEV; + + printk(KERN_DEBUG "Registering ipic system core operations\n"); + register_syscore_ops(&ipic_syscore_ops); + + return 0; +} + +subsys_initcall(init_ipic_syscore); diff --git a/arch/powerpc/sysdev/ipic.h b/arch/powerpc/sysdev/ipic.h new file mode 100644 index 00000000..9391c57b --- /dev/null +++ b/arch/powerpc/sysdev/ipic.h @@ -0,0 +1,60 @@ +/* + * IPIC private definitions and structure. + * + * Maintainer: Kumar Gala <galak@kernel.crashing.org> + * + * Copyright 2005 Freescale Semiconductor, Inc + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#ifndef __IPIC_H__ +#define __IPIC_H__ + +#include <asm/ipic.h> + +#define NR_IPIC_INTS 128 + +/* External IRQS */ +#define IPIC_IRQ_EXT0 48 +#define IPIC_IRQ_EXT1 17 +#define IPIC_IRQ_EXT7 23 + +/* Default Priority Registers */ +#define IPIC_PRIORITY_DEFAULT 0x05309770 + +/* System Global Interrupt Configuration Register */ +#define SICFR_IPSA 0x00010000 +#define SICFR_IPSB 0x00020000 +#define SICFR_IPSC 0x00040000 +#define SICFR_IPSD 0x00080000 +#define SICFR_MPSA 0x00200000 +#define SICFR_MPSB 0x00400000 + +/* System External Interrupt Mask Register */ +#define SEMSR_SIRQ0 0x00008000 + +/* System Error Control Register */ +#define SERCR_MCPR 0x00000001 + +struct ipic { + volatile u32 __iomem *regs; + + /* The remapper for this IPIC */ + struct irq_host *irqhost; +}; + +struct ipic_info { + u8 ack; /* pending register offset from base if the irq + supports ack operation */ + u8 mask; /* mask register offset from base */ + u8 prio; /* priority register offset from base */ + u8 force; /* force register offset from base */ + u8 bit; /* register bit position (as per doc) + bit mask = 1 << (31 - bit) */ + u8 prio_mask; /* priority mask value */ +}; + +#endif /* __IPIC_H__ */ diff --git a/arch/powerpc/sysdev/micropatch.c b/arch/powerpc/sysdev/micropatch.c new file mode 100644 index 00000000..c0bb76ef --- /dev/null +++ b/arch/powerpc/sysdev/micropatch.c @@ -0,0 +1,749 @@ + +/* Microcode patches for the CPM as supplied by Motorola. + * This is the one for IIC/SPI. There is a newer one that + * also relocates SMC2, but this would require additional changes + * to uart.c, so I am holding off on that for a moment. + */ +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/param.h> +#include <linux/string.h> +#include <linux/mm.h> +#include <linux/interrupt.h> +#include <asm/irq.h> +#include <asm/mpc8xx.h> +#include <asm/page.h> +#include <asm/pgtable.h> +#include <asm/8xx_immap.h> +#include <asm/cpm.h> +#include <asm/cpm1.h> + +/* + * I2C/SPI relocation patch arrays. + */ + +#ifdef CONFIG_I2C_SPI_UCODE_PATCH + +static uint patch_2000[] __initdata = { + 0x7FFFEFD9, + 0x3FFD0000, + 0x7FFB49F7, + 0x7FF90000, + 0x5FEFADF7, + 0x5F89ADF7, + 0x5FEFAFF7, + 0x5F89AFF7, + 0x3A9CFBC8, + 0xE7C0EDF0, + 0x77C1E1BB, + 0xF4DC7F1D, + 0xABAD932F, + 0x4E08FDCF, + 0x6E0FAFF8, + 0x7CCF76CF, + 0xFD1FF9CF, + 0xABF88DC6, + 0xAB5679F7, + 0xB0937383, + 0xDFCE79F7, + 0xB091E6BB, + 0xE5BBE74F, + 0xB3FA6F0F, + 0x6FFB76CE, + 0xEE0DF9CF, + 0x2BFBEFEF, + 0xCFEEF9CF, + 0x76CEAD24, + 0x90B2DF9A, + 0x7FDDD0BF, + 0x4BF847FD, + 0x7CCF76CE, + 0xCFEF7E1F, + 0x7F1D7DFD, + 0xF0B6EF71, + 0x7FC177C1, + 0xFBC86079, + 0xE722FBC8, + 0x5FFFDFFF, + 0x5FB2FFFB, + 0xFBC8F3C8, + 0x94A67F01, + 0x7F1D5F39, + 0xAFE85F5E, + 0xFFDFDF96, + 0xCB9FAF7D, + 0x5FC1AFED, + 0x8C1C5FC1, + 0xAFDD5FC3, + 0xDF9A7EFD, + 0xB0B25FB2, + 0xFFFEABAD, + 0x5FB2FFFE, + 0x5FCE600B, + 0xE6BB600B, + 0x5FCEDFC6, + 0x27FBEFDF, + 0x5FC8CFDE, + 0x3A9CE7C0, + 0xEDF0F3C8, + 0x7F0154CD, + 0x7F1D2D3D, + 0x363A7570, + 0x7E0AF1CE, + 0x37EF2E68, + 0x7FEE10EC, + 0xADF8EFDE, + 0xCFEAE52F, + 0x7D0FE12B, + 0xF1CE5F65, + 0x7E0A4DF8, + 0xCFEA5F72, + 0x7D0BEFEE, + 0xCFEA5F74, + 0xE522EFDE, + 0x5F74CFDA, + 0x0B627385, + 0xDF627E0A, + 0x30D8145B, + 0xBFFFF3C8, + 0x5FFFDFFF, + 0xA7F85F5E, + 0xBFFE7F7D, + 0x10D31450, + 0x5F36BFFF, + 0xAF785F5E, + 0xBFFDA7F8, + 0x5F36BFFE, + 0x77FD30C0, + 0x4E08FDCF, + 0xE5FF6E0F, + 0xAFF87E1F, + 0x7E0FFD1F, + 0xF1CF5F1B, + 0xABF80D5E, + 0x5F5EFFEF, + 0x79F730A2, + 0xAFDD5F34, + 0x47F85F34, + 0xAFED7FDD, + 0x50B24978, + 0x47FD7F1D, + 0x7DFD70AD, + 0xEF717EC1, + 0x6BA47F01, + 0x2D267EFD, + 0x30DE5F5E, + 0xFFFD5F5E, + 0xFFEF5F5E, + 0xFFDF0CA0, + 0xAFED0A9E, + 0xAFDD0C3A, + 0x5F3AAFBD, + 0x7FBDB082, + 0x5F8247F8 +}; + +static uint patch_2f00[] __initdata = { + 0x3E303430, + 0x34343737, + 0xABF7BF9B, + 0x994B4FBD, + 0xBD599493, + 0x349FFF37, + 0xFB9B177D, + 0xD9936956, + 0xBBFDD697, + 0xBDD2FD11, + 0x31DB9BB3, + 0x63139637, + 0x93733693, + 0x193137F7, + 0x331737AF, + 0x7BB9B999, + 0xBB197957, + 0x7FDFD3D5, + 0x73B773F7, + 0x37933B99, + 0x1D115316, + 0x99315315, + 0x31694BF4, + 0xFBDBD359, + 0x31497353, + 0x76956D69, + 0x7B9D9693, + 0x13131979, + 0x79376935 +}; +#endif + +/* + * I2C/SPI/SMC1 relocation patch arrays. + */ + +#ifdef CONFIG_I2C_SPI_SMC1_UCODE_PATCH + +static uint patch_2000[] __initdata = { + 0x3fff0000, + 0x3ffd0000, + 0x3ffb0000, + 0x3ff90000, + 0x5f13eff8, + 0x5eb5eff8, + 0x5f88adf7, + 0x5fefadf7, + 0x3a9cfbc8, + 0x77cae1bb, + 0xf4de7fad, + 0xabae9330, + 0x4e08fdcf, + 0x6e0faff8, + 0x7ccf76cf, + 0xfdaff9cf, + 0xabf88dc8, + 0xab5879f7, + 0xb0925d8d, + 0xdfd079f7, + 0xb090e6bb, + 0xe5bbe74f, + 0x9e046f0f, + 0x6ffb76ce, + 0xee0cf9cf, + 0x2bfbefef, + 0xcfeef9cf, + 0x76cead23, + 0x90b3df99, + 0x7fddd0c1, + 0x4bf847fd, + 0x7ccf76ce, + 0xcfef77ca, + 0x7eaf7fad, + 0x7dfdf0b7, + 0xef7a7fca, + 0x77cafbc8, + 0x6079e722, + 0xfbc85fff, + 0xdfff5fb3, + 0xfffbfbc8, + 0xf3c894a5, + 0xe7c9edf9, + 0x7f9a7fad, + 0x5f36afe8, + 0x5f5bffdf, + 0xdf95cb9e, + 0xaf7d5fc3, + 0xafed8c1b, + 0x5fc3afdd, + 0x5fc5df99, + 0x7efdb0b3, + 0x5fb3fffe, + 0xabae5fb3, + 0xfffe5fd0, + 0x600be6bb, + 0x600b5fd0, + 0xdfc827fb, + 0xefdf5fca, + 0xcfde3a9c, + 0xe7c9edf9, + 0xf3c87f9e, + 0x54ca7fed, + 0x2d3a3637, + 0x756f7e9a, + 0xf1ce37ef, + 0x2e677fee, + 0x10ebadf8, + 0xefdecfea, + 0xe52f7d9f, + 0xe12bf1ce, + 0x5f647e9a, + 0x4df8cfea, + 0x5f717d9b, + 0xefeecfea, + 0x5f73e522, + 0xefde5f73, + 0xcfda0b61, + 0x5d8fdf61, + 0xe7c9edf9, + 0x7e9a30d5, + 0x1458bfff, + 0xf3c85fff, + 0xdfffa7f8, + 0x5f5bbffe, + 0x7f7d10d0, + 0x144d5f33, + 0xbfffaf78, + 0x5f5bbffd, + 0xa7f85f33, + 0xbffe77fd, + 0x30bd4e08, + 0xfdcfe5ff, + 0x6e0faff8, + 0x7eef7e9f, + 0xfdeff1cf, + 0x5f17abf8, + 0x0d5b5f5b, + 0xffef79f7, + 0x309eafdd, + 0x5f3147f8, + 0x5f31afed, + 0x7fdd50af, + 0x497847fd, + 0x7f9e7fed, + 0x7dfd70a9, + 0xef7e7ece, + 0x6ba07f9e, + 0x2d227efd, + 0x30db5f5b, + 0xfffd5f5b, + 0xffef5f5b, + 0xffdf0c9c, + 0xafed0a9a, + 0xafdd0c37, + 0x5f37afbd, + 0x7fbdb081, + 0x5f8147f8, + 0x3a11e710, + 0xedf0ccdd, + 0xf3186d0a, + 0x7f0e5f06, + 0x7fedbb38, + 0x3afe7468, + 0x7fedf4fc, + 0x8ffbb951, + 0xb85f77fd, + 0xb0df5ddd, + 0xdefe7fed, + 0x90e1e74d, + 0x6f0dcbf7, + 0xe7decfed, + 0xcb74cfed, + 0xcfeddf6d, + 0x91714f74, + 0x5dd2deef, + 0x9e04e7df, + 0xefbb6ffb, + 0xe7ef7f0e, + 0x9e097fed, + 0xebdbeffa, + 0xeb54affb, + 0x7fea90d7, + 0x7e0cf0c3, + 0xbffff318, + 0x5fffdfff, + 0xac59efea, + 0x7fce1ee5, + 0xe2ff5ee1, + 0xaffbe2ff, + 0x5ee3affb, + 0xf9cc7d0f, + 0xaef8770f, + 0x7d0fb0c6, + 0xeffbbfff, + 0xcfef5ede, + 0x7d0fbfff, + 0x5ede4cf8, + 0x7fddd0bf, + 0x49f847fd, + 0x7efdf0bb, + 0x7fedfffd, + 0x7dfdf0b7, + 0xef7e7e1e, + 0x5ede7f0e, + 0x3a11e710, + 0xedf0ccab, + 0xfb18ad2e, + 0x1ea9bbb8, + 0x74283b7e, + 0x73c2e4bb, + 0x2ada4fb8, + 0xdc21e4bb, + 0xb2a1ffbf, + 0x5e2c43f8, + 0xfc87e1bb, + 0xe74ffd91, + 0x6f0f4fe8, + 0xc7ba32e2, + 0xf396efeb, + 0x600b4f78, + 0xe5bb760b, + 0x53acaef8, + 0x4ef88b0e, + 0xcfef9e09, + 0xabf8751f, + 0xefef5bac, + 0x741f4fe8, + 0x751e760d, + 0x7fdbf081, + 0x741cafce, + 0xefcc7fce, + 0x751e70ac, + 0x741ce7bb, + 0x3372cfed, + 0xafdbefeb, + 0xe5bb760b, + 0x53f2aef8, + 0xafe8e7eb, + 0x4bf8771e, + 0x7e247fed, + 0x4fcbe2cc, + 0x7fbc30a9, + 0x7b0f7a0f, + 0x34d577fd, + 0x308b5db7, + 0xde553e5f, + 0xaf78741f, + 0x741f30f0, + 0xcfef5e2c, + 0x741f3eac, + 0xafb8771e, + 0x5e677fed, + 0x0bd3e2cc, + 0x741ccfec, + 0xe5ca53cd, + 0x6fcb4f74, + 0x5dadde4b, + 0x2ab63d38, + 0x4bb3de30, + 0x751f741c, + 0x6c42effa, + 0xefea7fce, + 0x6ffc30be, + 0xefec3fca, + 0x30b3de2e, + 0xadf85d9e, + 0xaf7daefd, + 0x5d9ede2e, + 0x5d9eafdd, + 0x761f10ac, + 0x1da07efd, + 0x30adfffe, + 0x4908fb18, + 0x5fffdfff, + 0xafbb709b, + 0x4ef85e67, + 0xadf814ad, + 0x7a0f70ad, + 0xcfef50ad, + 0x7a0fde30, + 0x5da0afed, + 0x3c12780f, + 0xefef780f, + 0xefef790f, + 0xa7f85e0f, + 0xffef790f, + 0xefef790f, + 0x14adde2e, + 0x5d9eadfd, + 0x5e2dfffb, + 0xe79addfd, + 0xeff96079, + 0x607ae79a, + 0xddfceff9, + 0x60795dff, + 0x607acfef, + 0xefefefdf, + 0xefbfef7f, + 0xeeffedff, + 0xebffe7ff, + 0xafefafdf, + 0xafbfaf7f, + 0xaeffadff, + 0xabffa7ff, + 0x6fef6fdf, + 0x6fbf6f7f, + 0x6eff6dff, + 0x6bff67ff, + 0x2fef2fdf, + 0x2fbf2f7f, + 0x2eff2dff, + 0x2bff27ff, + 0x4e08fd1f, + 0xe5ff6e0f, + 0xaff87eef, + 0x7e0ffdef, + 0xf11f6079, + 0xabf8f542, + 0x7e0af11c, + 0x37cfae3a, + 0x7fec90be, + 0xadf8efdc, + 0xcfeae52f, + 0x7d0fe12b, + 0xf11c6079, + 0x7e0a4df8, + 0xcfea5dc4, + 0x7d0befec, + 0xcfea5dc6, + 0xe522efdc, + 0x5dc6cfda, + 0x4e08fd1f, + 0x6e0faff8, + 0x7c1f761f, + 0xfdeff91f, + 0x6079abf8, + 0x761cee24, + 0xf91f2bfb, + 0xefefcfec, + 0xf91f6079, + 0x761c27fb, + 0xefdf5da7, + 0xcfdc7fdd, + 0xd09c4bf8, + 0x47fd7c1f, + 0x761ccfcf, + 0x7eef7fed, + 0x7dfdf093, + 0xef7e7f1e, + 0x771efb18, + 0x6079e722, + 0xe6bbe5bb, + 0xae0ae5bb, + 0x600bae85, + 0xe2bbe2bb, + 0xe2bbe2bb, + 0xaf02e2bb, + 0xe2bb2ff9, + 0x6079e2bb +}; + +static uint patch_2f00[] __initdata = { + 0x30303030, + 0x3e3e3434, + 0xabbf9b99, + 0x4b4fbdbd, + 0x59949334, + 0x9fff37fb, + 0x9b177dd9, + 0x936956bb, + 0xfbdd697b, + 0xdd2fd113, + 0x1db9f7bb, + 0x36313963, + 0x79373369, + 0x3193137f, + 0x7331737a, + 0xf7bb9b99, + 0x9bb19795, + 0x77fdfd3d, + 0x573b773f, + 0x737933f7, + 0xb991d115, + 0x31699315, + 0x31531694, + 0xbf4fbdbd, + 0x35931497, + 0x35376956, + 0xbd697b9d, + 0x96931313, + 0x19797937, + 0x6935af78, + 0xb9b3baa3, + 0xb8788683, + 0x368f78f7, + 0x87778733, + 0x3ffffb3b, + 0x8e8f78b8, + 0x1d118e13, + 0xf3ff3f8b, + 0x6bd8e173, + 0xd1366856, + 0x68d1687b, + 0x3daf78b8, + 0x3a3a3f87, + 0x8f81378f, + 0xf876f887, + 0x77fd8778, + 0x737de8d6, + 0xbbf8bfff, + 0xd8df87f7, + 0xfd876f7b, + 0x8bfff8bd, + 0x8683387d, + 0xb873d87b, + 0x3b8fd7f8, + 0xf7338883, + 0xbb8ee1f8, + 0xef837377, + 0x3337b836, + 0x817d11f8, + 0x7378b878, + 0xd3368b7d, + 0xed731b7d, + 0x833731f3, + 0xf22f3f23 +}; + +static uint patch_2e00[] __initdata = { + 0x27eeeeee, + 0xeeeeeeee, + 0xeeeeeeee, + 0xeeeeeeee, + 0xee4bf4fb, + 0xdbd259bb, + 0x1979577f, + 0xdfd2d573, + 0xb773f737, + 0x4b4fbdbd, + 0x25b9b177, + 0xd2d17376, + 0x956bbfdd, + 0x697bdd2f, + 0xff9f79ff, + 0xff9ff22f +}; +#endif + +/* + * USB SOF patch arrays. + */ + +#ifdef CONFIG_USB_SOF_UCODE_PATCH + +static uint patch_2000[] __initdata = { + 0x7fff0000, + 0x7ffd0000, + 0x7ffb0000, + 0x49f7ba5b, + 0xba383ffb, + 0xf9b8b46d, + 0xe5ab4e07, + 0xaf77bffe, + 0x3f7bbf79, + 0xba5bba38, + 0xe7676076, + 0x60750000 +}; + +static uint patch_2f00[] __initdata = { + 0x3030304c, + 0xcab9e441, + 0xa1aaf220 +}; +#endif + +void __init cpm_load_patch(cpm8xx_t *cp) +{ + volatile uint *dp; /* Dual-ported RAM. */ + volatile cpm8xx_t *commproc; +#if defined(CONFIG_I2C_SPI_UCODE_PATCH) || \ + defined(CONFIG_I2C_SPI_SMC1_UCODE_PATCH) + volatile iic_t *iip; + volatile struct spi_pram *spp; +#ifdef CONFIG_I2C_SPI_SMC1_UCODE_PATCH + volatile smc_uart_t *smp; +#endif +#endif + int i; + + commproc = cp; + +#ifdef CONFIG_USB_SOF_UCODE_PATCH + commproc->cp_rccr = 0; + + dp = (uint *)(commproc->cp_dpmem); + for (i=0; i<(sizeof(patch_2000)/4); i++) + *dp++ = patch_2000[i]; + + dp = (uint *)&(commproc->cp_dpmem[0x0f00]); + for (i=0; i<(sizeof(patch_2f00)/4); i++) + *dp++ = patch_2f00[i]; + + commproc->cp_rccr = 0x0009; + + printk("USB SOF microcode patch installed\n"); +#endif /* CONFIG_USB_SOF_UCODE_PATCH */ + +#if defined(CONFIG_I2C_SPI_UCODE_PATCH) || \ + defined(CONFIG_I2C_SPI_SMC1_UCODE_PATCH) + + commproc->cp_rccr = 0; + + dp = (uint *)(commproc->cp_dpmem); + for (i=0; i<(sizeof(patch_2000)/4); i++) + *dp++ = patch_2000[i]; + + dp = (uint *)&(commproc->cp_dpmem[0x0f00]); + for (i=0; i<(sizeof(patch_2f00)/4); i++) + *dp++ = patch_2f00[i]; + + iip = (iic_t *)&commproc->cp_dparam[PROFF_IIC]; +# define RPBASE 0x0500 + iip->iic_rpbase = RPBASE; + + /* Put SPI above the IIC, also 32-byte aligned. + */ + i = (RPBASE + sizeof(iic_t) + 31) & ~31; + spp = (struct spi_pram *)&commproc->cp_dparam[PROFF_SPI]; + spp->rpbase = i; + +# if defined(CONFIG_I2C_SPI_UCODE_PATCH) + commproc->cp_cpmcr1 = 0x802a; + commproc->cp_cpmcr2 = 0x8028; + commproc->cp_cpmcr3 = 0x802e; + commproc->cp_cpmcr4 = 0x802c; + commproc->cp_rccr = 1; + + printk("I2C/SPI microcode patch installed.\n"); +# endif /* CONFIG_I2C_SPI_UCODE_PATCH */ + +# if defined(CONFIG_I2C_SPI_SMC1_UCODE_PATCH) + + dp = (uint *)&(commproc->cp_dpmem[0x0e00]); + for (i=0; i<(sizeof(patch_2e00)/4); i++) + *dp++ = patch_2e00[i]; + + commproc->cp_cpmcr1 = 0x8080; + commproc->cp_cpmcr2 = 0x808a; + commproc->cp_cpmcr3 = 0x8028; + commproc->cp_cpmcr4 = 0x802a; + commproc->cp_rccr = 3; + + smp = (smc_uart_t *)&commproc->cp_dparam[PROFF_SMC1]; + smp->smc_rpbase = 0x1FC0; + + printk("I2C/SPI/SMC1 microcode patch installed.\n"); +# endif /* CONFIG_I2C_SPI_SMC1_UCODE_PATCH) */ + +#endif /* some variation of the I2C/SPI patch was selected */ +} + +/* + * Take this entire routine out, since no one calls it and its + * logic is suspect. + */ + +#if 0 +void +verify_patch(volatile immap_t *immr) +{ + volatile uint *dp; + volatile cpm8xx_t *commproc; + int i; + + commproc = (cpm8xx_t *)&immr->im_cpm; + + printk("cp_rccr %x\n", commproc->cp_rccr); + commproc->cp_rccr = 0; + + dp = (uint *)(commproc->cp_dpmem); + for (i=0; i<(sizeof(patch_2000)/4); i++) + if (*dp++ != patch_2000[i]) { + printk("patch_2000 bad at %d\n", i); + dp--; + printk("found 0x%X, wanted 0x%X\n", *dp, patch_2000[i]); + break; + } + + dp = (uint *)&(commproc->cp_dpmem[0x0f00]); + for (i=0; i<(sizeof(patch_2f00)/4); i++) + if (*dp++ != patch_2f00[i]) { + printk("patch_2f00 bad at %d\n", i); + dp--; + printk("found 0x%X, wanted 0x%X\n", *dp, patch_2f00[i]); + break; + } + + commproc->cp_rccr = 0x0009; +} +#endif diff --git a/arch/powerpc/sysdev/mmio_nvram.c b/arch/powerpc/sysdev/mmio_nvram.c new file mode 100644 index 00000000..ddc877a3 --- /dev/null +++ b/arch/powerpc/sysdev/mmio_nvram.c @@ -0,0 +1,158 @@ +/* + * memory mapped NVRAM + * + * (C) Copyright IBM Corp. 2005 + * + * Authors : Utz Bacher <utz.bacher@de.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/fs.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/spinlock.h> +#include <linux/types.h> + +#include <asm/machdep.h> +#include <asm/nvram.h> +#include <asm/prom.h> + +static void __iomem *mmio_nvram_start; +static long mmio_nvram_len; +static DEFINE_SPINLOCK(mmio_nvram_lock); + +static ssize_t mmio_nvram_read(char *buf, size_t count, loff_t *index) +{ + unsigned long flags; + + if (*index >= mmio_nvram_len) + return 0; + if (*index + count > mmio_nvram_len) + count = mmio_nvram_len - *index; + + spin_lock_irqsave(&mmio_nvram_lock, flags); + + memcpy_fromio(buf, mmio_nvram_start + *index, count); + + spin_unlock_irqrestore(&mmio_nvram_lock, flags); + + *index += count; + return count; +} + +static unsigned char mmio_nvram_read_val(int addr) +{ + unsigned long flags; + unsigned char val; + + if (addr >= mmio_nvram_len) + return 0xff; + + spin_lock_irqsave(&mmio_nvram_lock, flags); + + val = ioread8(mmio_nvram_start + addr); + + spin_unlock_irqrestore(&mmio_nvram_lock, flags); + + return val; +} + +static ssize_t mmio_nvram_write(char *buf, size_t count, loff_t *index) +{ + unsigned long flags; + + if (*index >= mmio_nvram_len) + return 0; + if (*index + count > mmio_nvram_len) + count = mmio_nvram_len - *index; + + spin_lock_irqsave(&mmio_nvram_lock, flags); + + memcpy_toio(mmio_nvram_start + *index, buf, count); + + spin_unlock_irqrestore(&mmio_nvram_lock, flags); + + *index += count; + return count; +} + +void mmio_nvram_write_val(int addr, unsigned char val) +{ + unsigned long flags; + + if (addr < mmio_nvram_len) { + spin_lock_irqsave(&mmio_nvram_lock, flags); + + iowrite8(val, mmio_nvram_start + addr); + + spin_unlock_irqrestore(&mmio_nvram_lock, flags); + } +} + +static ssize_t mmio_nvram_get_size(void) +{ + return mmio_nvram_len; +} + +int __init mmio_nvram_init(void) +{ + struct device_node *nvram_node; + unsigned long nvram_addr; + struct resource r; + int ret; + + nvram_node = of_find_node_by_type(NULL, "nvram"); + if (!nvram_node) + nvram_node = of_find_compatible_node(NULL, NULL, "nvram"); + if (!nvram_node) { + printk(KERN_WARNING "nvram: no node found in device-tree\n"); + return -ENODEV; + } + + ret = of_address_to_resource(nvram_node, 0, &r); + if (ret) { + printk(KERN_WARNING "nvram: failed to get address (err %d)\n", + ret); + goto out; + } + nvram_addr = r.start; + mmio_nvram_len = r.end - r.start + 1; + if ( (!mmio_nvram_len) || (!nvram_addr) ) { + printk(KERN_WARNING "nvram: address or length is 0\n"); + ret = -EIO; + goto out; + } + + mmio_nvram_start = ioremap(nvram_addr, mmio_nvram_len); + if (!mmio_nvram_start) { + printk(KERN_WARNING "nvram: failed to ioremap\n"); + ret = -ENOMEM; + goto out; + } + + printk(KERN_INFO "mmio NVRAM, %luk at 0x%lx mapped to %p\n", + mmio_nvram_len >> 10, nvram_addr, mmio_nvram_start); + + ppc_md.nvram_read_val = mmio_nvram_read_val; + ppc_md.nvram_write_val = mmio_nvram_write_val; + ppc_md.nvram_read = mmio_nvram_read; + ppc_md.nvram_write = mmio_nvram_write; + ppc_md.nvram_size = mmio_nvram_get_size; + +out: + of_node_put(nvram_node); + return ret; +} diff --git a/arch/powerpc/sysdev/mpc5xxx_clocks.c b/arch/powerpc/sysdev/mpc5xxx_clocks.c new file mode 100644 index 00000000..34e12f99 --- /dev/null +++ b/arch/powerpc/sysdev/mpc5xxx_clocks.c @@ -0,0 +1,33 @@ +/** + * mpc5xxx_get_bus_frequency - Find the bus frequency for a device + * @node: device node + * + * Returns bus frequency (IPS on MPC512x, IPB on MPC52xx), + * or 0 if the bus frequency cannot be found. + */ + +#include <linux/kernel.h> +#include <linux/of_platform.h> + +unsigned int +mpc5xxx_get_bus_frequency(struct device_node *node) +{ + struct device_node *np; + const unsigned int *p_bus_freq = NULL; + + of_node_get(node); + while (node) { + p_bus_freq = of_get_property(node, "bus-frequency", NULL); + if (p_bus_freq) + break; + + np = of_get_parent(node); + of_node_put(node); + node = np; + } + if (node) + of_node_put(node); + + return p_bus_freq ? *p_bus_freq : 0; +} +EXPORT_SYMBOL(mpc5xxx_get_bus_frequency); diff --git a/arch/powerpc/sysdev/mpc8xx_pic.c b/arch/powerpc/sysdev/mpc8xx_pic.c new file mode 100644 index 00000000..20924f22 --- /dev/null +++ b/arch/powerpc/sysdev/mpc8xx_pic.c @@ -0,0 +1,187 @@ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/stddef.h> +#include <linux/init.h> +#include <linux/sched.h> +#include <linux/signal.h> +#include <linux/irq.h> +#include <linux/dma-mapping.h> +#include <asm/prom.h> +#include <asm/irq.h> +#include <asm/io.h> +#include <asm/8xx_immap.h> + +#include "mpc8xx_pic.h" + + +#define PIC_VEC_SPURRIOUS 15 + +extern int cpm_get_irq(struct pt_regs *regs); + +static struct irq_host *mpc8xx_pic_host; +#define NR_MASK_WORDS ((NR_IRQS + 31) / 32) +static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; +static sysconf8xx_t __iomem *siu_reg; + +int cpm_get_irq(struct pt_regs *regs); + +static void mpc8xx_unmask_irq(struct irq_data *d) +{ + int bit, word; + unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d); + + bit = irq_nr & 0x1f; + word = irq_nr >> 5; + + ppc_cached_irq_mask[word] |= (1 << (31-bit)); + out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]); +} + +static void mpc8xx_mask_irq(struct irq_data *d) +{ + int bit, word; + unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d); + + bit = irq_nr & 0x1f; + word = irq_nr >> 5; + + ppc_cached_irq_mask[word] &= ~(1 << (31-bit)); + out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]); +} + +static void mpc8xx_ack(struct irq_data *d) +{ + int bit; + unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d); + + bit = irq_nr & 0x1f; + out_be32(&siu_reg->sc_sipend, 1 << (31-bit)); +} + +static void mpc8xx_end_irq(struct irq_data *d) +{ + int bit, word; + unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d); + + bit = irq_nr & 0x1f; + word = irq_nr >> 5; + + ppc_cached_irq_mask[word] |= (1 << (31-bit)); + out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]); +} + +static int mpc8xx_set_irq_type(struct irq_data *d, unsigned int flow_type) +{ + if (flow_type & IRQ_TYPE_EDGE_FALLING) { + irq_hw_number_t hw = (unsigned int)irqd_to_hwirq(d); + unsigned int siel = in_be32(&siu_reg->sc_siel); + + /* only external IRQ senses are programmable */ + if ((hw & 1) == 0) { + siel |= (0x80000000 >> hw); + out_be32(&siu_reg->sc_siel, siel); + __irq_set_handler_locked(d->irq, handle_edge_irq); + } + } + return 0; +} + +static struct irq_chip mpc8xx_pic = { + .name = "MPC8XX SIU", + .irq_unmask = mpc8xx_unmask_irq, + .irq_mask = mpc8xx_mask_irq, + .irq_ack = mpc8xx_ack, + .irq_eoi = mpc8xx_end_irq, + .irq_set_type = mpc8xx_set_irq_type, +}; + +unsigned int mpc8xx_get_irq(void) +{ + int irq; + + /* For MPC8xx, read the SIVEC register and shift the bits down + * to get the irq number. + */ + irq = in_be32(&siu_reg->sc_sivec) >> 26; + + if (irq == PIC_VEC_SPURRIOUS) + irq = NO_IRQ; + + return irq_linear_revmap(mpc8xx_pic_host, irq); + +} + +static int mpc8xx_pic_host_map(struct irq_host *h, unsigned int virq, + irq_hw_number_t hw) +{ + pr_debug("mpc8xx_pic_host_map(%d, 0x%lx)\n", virq, hw); + + /* Set default irq handle */ + irq_set_chip_and_handler(virq, &mpc8xx_pic, handle_level_irq); + return 0; +} + + +static int mpc8xx_pic_host_xlate(struct irq_host *h, struct device_node *ct, + const u32 *intspec, unsigned int intsize, + irq_hw_number_t *out_hwirq, unsigned int *out_flags) +{ + static unsigned char map_pic_senses[4] = { + IRQ_TYPE_EDGE_RISING, + IRQ_TYPE_LEVEL_LOW, + IRQ_TYPE_LEVEL_HIGH, + IRQ_TYPE_EDGE_FALLING, + }; + + *out_hwirq = intspec[0]; + if (intsize > 1 && intspec[1] < 4) + *out_flags = map_pic_senses[intspec[1]]; + else + *out_flags = IRQ_TYPE_NONE; + + return 0; +} + + +static struct irq_host_ops mpc8xx_pic_host_ops = { + .map = mpc8xx_pic_host_map, + .xlate = mpc8xx_pic_host_xlate, +}; + +int mpc8xx_pic_init(void) +{ + struct resource res; + struct device_node *np; + int ret; + + np = of_find_compatible_node(NULL, NULL, "fsl,pq1-pic"); + if (np == NULL) + np = of_find_node_by_type(NULL, "mpc8xx-pic"); + if (np == NULL) { + printk(KERN_ERR "Could not find fsl,pq1-pic node\n"); + return -ENOMEM; + } + + ret = of_address_to_resource(np, 0, &res); + if (ret) + goto out; + + siu_reg = ioremap(res.start, res.end - res.start + 1); + if (siu_reg == NULL) { + ret = -EINVAL; + goto out; + } + + mpc8xx_pic_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, + 64, &mpc8xx_pic_host_ops, 64); + if (mpc8xx_pic_host == NULL) { + printk(KERN_ERR "MPC8xx PIC: failed to allocate irq host!\n"); + ret = -ENOMEM; + goto out; + } + return 0; + +out: + of_node_put(np); + return ret; +} diff --git a/arch/powerpc/sysdev/mpc8xx_pic.h b/arch/powerpc/sysdev/mpc8xx_pic.h new file mode 100644 index 00000000..9fe00eeb --- /dev/null +++ b/arch/powerpc/sysdev/mpc8xx_pic.h @@ -0,0 +1,19 @@ +#ifndef _PPC_KERNEL_MPC8xx_H +#define _PPC_KERNEL_MPC8xx_H + +#include <linux/irq.h> +#include <linux/interrupt.h> + +int mpc8xx_pic_init(void); +unsigned int mpc8xx_get_irq(void); + +/* + * Some internal interrupt registers use an 8-bit mask for the interrupt + * level instead of a number. + */ +static inline uint mk_int_int_mask(uint mask) +{ + return (1 << (7 - (mask/2))); +} + +#endif /* _PPC_KERNEL_PPC8xx_H */ diff --git a/arch/powerpc/sysdev/mpc8xxx_gpio.c b/arch/powerpc/sysdev/mpc8xxx_gpio.c new file mode 100644 index 00000000..fb4963ab --- /dev/null +++ b/arch/powerpc/sysdev/mpc8xxx_gpio.c @@ -0,0 +1,395 @@ +/* + * GPIOs on MPC512x/8349/8572/8610 and compatible + * + * Copyright (C) 2008 Peter Korsgaard <jacmet@sunsite.dk> + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/spinlock.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_gpio.h> +#include <linux/gpio.h> +#include <linux/slab.h> +#include <linux/irq.h> + +#define MPC8XXX_GPIO_PINS 32 + +#define GPIO_DIR 0x00 +#define GPIO_ODR 0x04 +#define GPIO_DAT 0x08 +#define GPIO_IER 0x0c +#define GPIO_IMR 0x10 +#define GPIO_ICR 0x14 +#define GPIO_ICR2 0x18 + +struct mpc8xxx_gpio_chip { + struct of_mm_gpio_chip mm_gc; + spinlock_t lock; + + /* + * shadowed data register to be able to clear/set output pins in + * open drain mode safely + */ + u32 data; + struct irq_host *irq; + void *of_dev_id_data; +}; + +static inline u32 mpc8xxx_gpio2mask(unsigned int gpio) +{ + return 1u << (MPC8XXX_GPIO_PINS - 1 - gpio); +} + +static inline struct mpc8xxx_gpio_chip * +to_mpc8xxx_gpio_chip(struct of_mm_gpio_chip *mm) +{ + return container_of(mm, struct mpc8xxx_gpio_chip, mm_gc); +} + +static void mpc8xxx_gpio_save_regs(struct of_mm_gpio_chip *mm) +{ + struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); + + mpc8xxx_gc->data = in_be32(mm->regs + GPIO_DAT); +} + +/* Workaround GPIO 1 errata on MPC8572/MPC8536. The status of GPIOs + * defined as output cannot be determined by reading GPDAT register, + * so we use shadow data register instead. The status of input pins + * is determined by reading GPDAT register. + */ +static int mpc8572_gpio_get(struct gpio_chip *gc, unsigned int gpio) +{ + u32 val; + struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); + struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); + + val = in_be32(mm->regs + GPIO_DAT) & ~in_be32(mm->regs + GPIO_DIR); + + return (val | mpc8xxx_gc->data) & mpc8xxx_gpio2mask(gpio); +} + +static int mpc8xxx_gpio_get(struct gpio_chip *gc, unsigned int gpio) +{ + struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); + + return in_be32(mm->regs + GPIO_DAT) & mpc8xxx_gpio2mask(gpio); +} + +static void mpc8xxx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) +{ + struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); + struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); + unsigned long flags; + + spin_lock_irqsave(&mpc8xxx_gc->lock, flags); + + if (val) + mpc8xxx_gc->data |= mpc8xxx_gpio2mask(gpio); + else + mpc8xxx_gc->data &= ~mpc8xxx_gpio2mask(gpio); + + out_be32(mm->regs + GPIO_DAT, mpc8xxx_gc->data); + + spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); +} + +static int mpc8xxx_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio) +{ + struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); + struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); + unsigned long flags; + + spin_lock_irqsave(&mpc8xxx_gc->lock, flags); + + clrbits32(mm->regs + GPIO_DIR, mpc8xxx_gpio2mask(gpio)); + + spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); + + return 0; +} + +static int mpc8xxx_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) +{ + struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); + struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); + unsigned long flags; + + mpc8xxx_gpio_set(gc, gpio, val); + + spin_lock_irqsave(&mpc8xxx_gc->lock, flags); + + setbits32(mm->regs + GPIO_DIR, mpc8xxx_gpio2mask(gpio)); + + spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); + + return 0; +} + +static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset) +{ + struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); + struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); + + if (mpc8xxx_gc->irq && offset < MPC8XXX_GPIO_PINS) + return irq_create_mapping(mpc8xxx_gc->irq, offset); + else + return -ENXIO; +} + +static void mpc8xxx_gpio_irq_cascade(unsigned int irq, struct irq_desc *desc) +{ + struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_desc_get_handler_data(desc); + struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; + unsigned int mask; + + mask = in_be32(mm->regs + GPIO_IER) & in_be32(mm->regs + GPIO_IMR); + if (mask) + generic_handle_irq(irq_linear_revmap(mpc8xxx_gc->irq, + 32 - ffs(mask))); +} + +static void mpc8xxx_irq_unmask(struct irq_data *d) +{ + struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d); + struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; + unsigned long flags; + + spin_lock_irqsave(&mpc8xxx_gc->lock, flags); + + setbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(irqd_to_hwirq(d))); + + spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); +} + +static void mpc8xxx_irq_mask(struct irq_data *d) +{ + struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d); + struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; + unsigned long flags; + + spin_lock_irqsave(&mpc8xxx_gc->lock, flags); + + clrbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(irqd_to_hwirq(d))); + + spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); +} + +static void mpc8xxx_irq_ack(struct irq_data *d) +{ + struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d); + struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; + + out_be32(mm->regs + GPIO_IER, mpc8xxx_gpio2mask(irqd_to_hwirq(d))); +} + +static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type) +{ + struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d); + struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; + unsigned long flags; + + switch (flow_type) { + case IRQ_TYPE_EDGE_FALLING: + spin_lock_irqsave(&mpc8xxx_gc->lock, flags); + setbits32(mm->regs + GPIO_ICR, + mpc8xxx_gpio2mask(irqd_to_hwirq(d))); + spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); + break; + + case IRQ_TYPE_EDGE_BOTH: + spin_lock_irqsave(&mpc8xxx_gc->lock, flags); + clrbits32(mm->regs + GPIO_ICR, + mpc8xxx_gpio2mask(irqd_to_hwirq(d))); + spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); + break; + + default: + return -EINVAL; + } + + return 0; +} + +static int mpc512x_irq_set_type(struct irq_data *d, unsigned int flow_type) +{ + struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d); + struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; + unsigned long gpio = irqd_to_hwirq(d); + void __iomem *reg; + unsigned int shift; + unsigned long flags; + + if (gpio < 16) { + reg = mm->regs + GPIO_ICR; + shift = (15 - gpio) * 2; + } else { + reg = mm->regs + GPIO_ICR2; + shift = (15 - (gpio % 16)) * 2; + } + + switch (flow_type) { + case IRQ_TYPE_EDGE_FALLING: + case IRQ_TYPE_LEVEL_LOW: + spin_lock_irqsave(&mpc8xxx_gc->lock, flags); + clrsetbits_be32(reg, 3 << shift, 2 << shift); + spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); + break; + + case IRQ_TYPE_EDGE_RISING: + case IRQ_TYPE_LEVEL_HIGH: + spin_lock_irqsave(&mpc8xxx_gc->lock, flags); + clrsetbits_be32(reg, 3 << shift, 1 << shift); + spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); + break; + + case IRQ_TYPE_EDGE_BOTH: + spin_lock_irqsave(&mpc8xxx_gc->lock, flags); + clrbits32(reg, 3 << shift); + spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); + break; + + default: + return -EINVAL; + } + + return 0; +} + +static struct irq_chip mpc8xxx_irq_chip = { + .name = "mpc8xxx-gpio", + .irq_unmask = mpc8xxx_irq_unmask, + .irq_mask = mpc8xxx_irq_mask, + .irq_ack = mpc8xxx_irq_ack, + .irq_set_type = mpc8xxx_irq_set_type, +}; + +static int mpc8xxx_gpio_irq_map(struct irq_host *h, unsigned int virq, + irq_hw_number_t hw) +{ + struct mpc8xxx_gpio_chip *mpc8xxx_gc = h->host_data; + + if (mpc8xxx_gc->of_dev_id_data) + mpc8xxx_irq_chip.irq_set_type = mpc8xxx_gc->of_dev_id_data; + + irq_set_chip_data(virq, h->host_data); + irq_set_chip_and_handler(virq, &mpc8xxx_irq_chip, handle_level_irq); + irq_set_irq_type(virq, IRQ_TYPE_NONE); + + return 0; +} + +static int mpc8xxx_gpio_irq_xlate(struct irq_host *h, struct device_node *ct, + const u32 *intspec, unsigned int intsize, + irq_hw_number_t *out_hwirq, + unsigned int *out_flags) + +{ + /* interrupt sense values coming from the device tree equal either + * EDGE_FALLING or EDGE_BOTH + */ + *out_hwirq = intspec[0]; + *out_flags = intspec[1]; + + return 0; +} + +static struct irq_host_ops mpc8xxx_gpio_irq_ops = { + .map = mpc8xxx_gpio_irq_map, + .xlate = mpc8xxx_gpio_irq_xlate, +}; + +static struct of_device_id mpc8xxx_gpio_ids[] __initdata = { + { .compatible = "fsl,mpc8349-gpio", }, + { .compatible = "fsl,mpc8572-gpio", }, + { .compatible = "fsl,mpc8610-gpio", }, + { .compatible = "fsl,mpc5121-gpio", .data = mpc512x_irq_set_type, }, + { .compatible = "fsl,qoriq-gpio", }, + {} +}; + +static void __init mpc8xxx_add_controller(struct device_node *np) +{ + struct mpc8xxx_gpio_chip *mpc8xxx_gc; + struct of_mm_gpio_chip *mm_gc; + struct gpio_chip *gc; + const struct of_device_id *id; + unsigned hwirq; + int ret; + + mpc8xxx_gc = kzalloc(sizeof(*mpc8xxx_gc), GFP_KERNEL); + if (!mpc8xxx_gc) { + ret = -ENOMEM; + goto err; + } + + spin_lock_init(&mpc8xxx_gc->lock); + + mm_gc = &mpc8xxx_gc->mm_gc; + gc = &mm_gc->gc; + + mm_gc->save_regs = mpc8xxx_gpio_save_regs; + gc->ngpio = MPC8XXX_GPIO_PINS; + gc->direction_input = mpc8xxx_gpio_dir_in; + gc->direction_output = mpc8xxx_gpio_dir_out; + if (of_device_is_compatible(np, "fsl,mpc8572-gpio")) + gc->get = mpc8572_gpio_get; + else + gc->get = mpc8xxx_gpio_get; + gc->set = mpc8xxx_gpio_set; + gc->to_irq = mpc8xxx_gpio_to_irq; + + ret = of_mm_gpiochip_add(np, mm_gc); + if (ret) + goto err; + + hwirq = irq_of_parse_and_map(np, 0); + if (hwirq == NO_IRQ) + goto skip_irq; + + mpc8xxx_gc->irq = + irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, MPC8XXX_GPIO_PINS, + &mpc8xxx_gpio_irq_ops, MPC8XXX_GPIO_PINS); + if (!mpc8xxx_gc->irq) + goto skip_irq; + + id = of_match_node(mpc8xxx_gpio_ids, np); + if (id) + mpc8xxx_gc->of_dev_id_data = id->data; + + mpc8xxx_gc->irq->host_data = mpc8xxx_gc; + + /* ack and mask all irqs */ + out_be32(mm_gc->regs + GPIO_IER, 0xffffffff); + out_be32(mm_gc->regs + GPIO_IMR, 0); + + irq_set_handler_data(hwirq, mpc8xxx_gc); + irq_set_chained_handler(hwirq, mpc8xxx_gpio_irq_cascade); + +skip_irq: + return; + +err: + pr_err("%s: registration failed with status %d\n", + np->full_name, ret); + kfree(mpc8xxx_gc); + + return; +} + +static int __init mpc8xxx_add_gpiochips(void) +{ + struct device_node *np; + + for_each_matching_node(np, mpc8xxx_gpio_ids) + mpc8xxx_add_controller(np); + + return 0; +} +arch_initcall(mpc8xxx_add_gpiochips); diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c new file mode 100644 index 00000000..58d7a534 --- /dev/null +++ b/arch/powerpc/sysdev/mpic.c @@ -0,0 +1,1872 @@ +/* + * arch/powerpc/kernel/mpic.c + * + * Driver for interrupt controllers following the OpenPIC standard, the + * common implementation beeing IBM's MPIC. This driver also can deal + * with various broken implementations of this HW. + * + * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp. + * Copyright 2010-2011 Freescale Semiconductor, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive + * for more details. + */ + +#undef DEBUG +#undef DEBUG_IPI +#undef DEBUG_IRQ +#undef DEBUG_LOW + +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/irq.h> +#include <linux/smp.h> +#include <linux/interrupt.h> +#include <linux/bootmem.h> +#include <linux/spinlock.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/syscore_ops.h> +#include <linux/ratelimit.h> + +#include <asm/ptrace.h> +#include <asm/signal.h> +#include <asm/io.h> +#include <asm/pgtable.h> +#include <asm/irq.h> +#include <asm/machdep.h> +#include <asm/mpic.h> +#include <asm/smp.h> + +#include "mpic.h" + +#ifdef DEBUG +#define DBG(fmt...) printk(fmt) +#else +#define DBG(fmt...) +#endif + +static struct mpic *mpics; +static struct mpic *mpic_primary; +static DEFINE_RAW_SPINLOCK(mpic_lock); + +#ifdef CONFIG_PPC32 /* XXX for now */ +#ifdef CONFIG_IRQ_ALL_CPUS +#define distribute_irqs (1) +#else +#define distribute_irqs (0) +#endif +#endif + +#ifdef CONFIG_MPIC_WEIRD +static u32 mpic_infos[][MPIC_IDX_END] = { + [0] = { /* Original OpenPIC compatible MPIC */ + MPIC_GREG_BASE, + MPIC_GREG_FEATURE_0, + MPIC_GREG_GLOBAL_CONF_0, + MPIC_GREG_VENDOR_ID, + MPIC_GREG_IPI_VECTOR_PRI_0, + MPIC_GREG_IPI_STRIDE, + MPIC_GREG_SPURIOUS, + MPIC_GREG_TIMER_FREQ, + + MPIC_TIMER_BASE, + MPIC_TIMER_STRIDE, + MPIC_TIMER_CURRENT_CNT, + MPIC_TIMER_BASE_CNT, + MPIC_TIMER_VECTOR_PRI, + MPIC_TIMER_DESTINATION, + + MPIC_CPU_BASE, + MPIC_CPU_STRIDE, + MPIC_CPU_IPI_DISPATCH_0, + MPIC_CPU_IPI_DISPATCH_STRIDE, + MPIC_CPU_CURRENT_TASK_PRI, + MPIC_CPU_WHOAMI, + MPIC_CPU_INTACK, + MPIC_CPU_EOI, + MPIC_CPU_MCACK, + + MPIC_IRQ_BASE, + MPIC_IRQ_STRIDE, + MPIC_IRQ_VECTOR_PRI, + MPIC_VECPRI_VECTOR_MASK, + MPIC_VECPRI_POLARITY_POSITIVE, + MPIC_VECPRI_POLARITY_NEGATIVE, + MPIC_VECPRI_SENSE_LEVEL, + MPIC_VECPRI_SENSE_EDGE, + MPIC_VECPRI_POLARITY_MASK, + MPIC_VECPRI_SENSE_MASK, + MPIC_IRQ_DESTINATION + }, + [1] = { /* Tsi108/109 PIC */ + TSI108_GREG_BASE, + TSI108_GREG_FEATURE_0, + TSI108_GREG_GLOBAL_CONF_0, + TSI108_GREG_VENDOR_ID, + TSI108_GREG_IPI_VECTOR_PRI_0, + TSI108_GREG_IPI_STRIDE, + TSI108_GREG_SPURIOUS, + TSI108_GREG_TIMER_FREQ, + + TSI108_TIMER_BASE, + TSI108_TIMER_STRIDE, + TSI108_TIMER_CURRENT_CNT, + TSI108_TIMER_BASE_CNT, + TSI108_TIMER_VECTOR_PRI, + TSI108_TIMER_DESTINATION, + + TSI108_CPU_BASE, + TSI108_CPU_STRIDE, + TSI108_CPU_IPI_DISPATCH_0, + TSI108_CPU_IPI_DISPATCH_STRIDE, + TSI108_CPU_CURRENT_TASK_PRI, + TSI108_CPU_WHOAMI, + TSI108_CPU_INTACK, + TSI108_CPU_EOI, + TSI108_CPU_MCACK, + + TSI108_IRQ_BASE, + TSI108_IRQ_STRIDE, + TSI108_IRQ_VECTOR_PRI, + TSI108_VECPRI_VECTOR_MASK, + TSI108_VECPRI_POLARITY_POSITIVE, + TSI108_VECPRI_POLARITY_NEGATIVE, + TSI108_VECPRI_SENSE_LEVEL, + TSI108_VECPRI_SENSE_EDGE, + TSI108_VECPRI_POLARITY_MASK, + TSI108_VECPRI_SENSE_MASK, + TSI108_IRQ_DESTINATION + }, +}; + +#define MPIC_INFO(name) mpic->hw_set[MPIC_IDX_##name] + +#else /* CONFIG_MPIC_WEIRD */ + +#define MPIC_INFO(name) MPIC_##name + +#endif /* CONFIG_MPIC_WEIRD */ + +static inline unsigned int mpic_processor_id(struct mpic *mpic) +{ + unsigned int cpu = 0; + + if (mpic->flags & MPIC_PRIMARY) + cpu = hard_smp_processor_id(); + + return cpu; +} + +/* + * Register accessor functions + */ + + +static inline u32 _mpic_read(enum mpic_reg_type type, + struct mpic_reg_bank *rb, + unsigned int reg) +{ + switch(type) { +#ifdef CONFIG_PPC_DCR + case mpic_access_dcr: + return dcr_read(rb->dhost, reg); +#endif + case mpic_access_mmio_be: + return in_be32(rb->base + (reg >> 2)); + case mpic_access_mmio_le: + default: + return in_le32(rb->base + (reg >> 2)); + } +} + +static inline void _mpic_write(enum mpic_reg_type type, + struct mpic_reg_bank *rb, + unsigned int reg, u32 value) +{ + switch(type) { +#ifdef CONFIG_PPC_DCR + case mpic_access_dcr: + dcr_write(rb->dhost, reg, value); + break; +#endif + case mpic_access_mmio_be: + out_be32(rb->base + (reg >> 2), value); + break; + case mpic_access_mmio_le: + default: + out_le32(rb->base + (reg >> 2), value); + break; + } +} + +static inline u32 _mpic_ipi_read(struct mpic *mpic, unsigned int ipi) +{ + enum mpic_reg_type type = mpic->reg_type; + unsigned int offset = MPIC_INFO(GREG_IPI_VECTOR_PRI_0) + + (ipi * MPIC_INFO(GREG_IPI_STRIDE)); + + if ((mpic->flags & MPIC_BROKEN_IPI) && type == mpic_access_mmio_le) + type = mpic_access_mmio_be; + return _mpic_read(type, &mpic->gregs, offset); +} + +static inline void _mpic_ipi_write(struct mpic *mpic, unsigned int ipi, u32 value) +{ + unsigned int offset = MPIC_INFO(GREG_IPI_VECTOR_PRI_0) + + (ipi * MPIC_INFO(GREG_IPI_STRIDE)); + + _mpic_write(mpic->reg_type, &mpic->gregs, offset, value); +} + +static inline u32 _mpic_tm_read(struct mpic *mpic, unsigned int tm) +{ + unsigned int offset = MPIC_INFO(TIMER_VECTOR_PRI) + + ((tm & 3) * MPIC_INFO(TIMER_STRIDE)); + + if (tm >= 4) + offset += 0x1000 / 4; + + return _mpic_read(mpic->reg_type, &mpic->tmregs, offset); +} + +static inline void _mpic_tm_write(struct mpic *mpic, unsigned int tm, u32 value) +{ + unsigned int offset = MPIC_INFO(TIMER_VECTOR_PRI) + + ((tm & 3) * MPIC_INFO(TIMER_STRIDE)); + + if (tm >= 4) + offset += 0x1000 / 4; + + _mpic_write(mpic->reg_type, &mpic->tmregs, offset, value); +} + +static inline u32 _mpic_cpu_read(struct mpic *mpic, unsigned int reg) +{ + unsigned int cpu = mpic_processor_id(mpic); + + return _mpic_read(mpic->reg_type, &mpic->cpuregs[cpu], reg); +} + +static inline void _mpic_cpu_write(struct mpic *mpic, unsigned int reg, u32 value) +{ + unsigned int cpu = mpic_processor_id(mpic); + + _mpic_write(mpic->reg_type, &mpic->cpuregs[cpu], reg, value); +} + +static inline u32 _mpic_irq_read(struct mpic *mpic, unsigned int src_no, unsigned int reg) +{ + unsigned int isu = src_no >> mpic->isu_shift; + unsigned int idx = src_no & mpic->isu_mask; + unsigned int val; + + val = _mpic_read(mpic->reg_type, &mpic->isus[isu], + reg + (idx * MPIC_INFO(IRQ_STRIDE))); +#ifdef CONFIG_MPIC_BROKEN_REGREAD + if (reg == 0) + val = (val & (MPIC_VECPRI_MASK | MPIC_VECPRI_ACTIVITY)) | + mpic->isu_reg0_shadow[src_no]; +#endif + return val; +} + +static inline void _mpic_irq_write(struct mpic *mpic, unsigned int src_no, + unsigned int reg, u32 value) +{ + unsigned int isu = src_no >> mpic->isu_shift; + unsigned int idx = src_no & mpic->isu_mask; + + _mpic_write(mpic->reg_type, &mpic->isus[isu], + reg + (idx * MPIC_INFO(IRQ_STRIDE)), value); + +#ifdef CONFIG_MPIC_BROKEN_REGREAD + if (reg == 0) + mpic->isu_reg0_shadow[src_no] = + value & ~(MPIC_VECPRI_MASK | MPIC_VECPRI_ACTIVITY); +#endif +} + +#define mpic_read(b,r) _mpic_read(mpic->reg_type,&(b),(r)) +#define mpic_write(b,r,v) _mpic_write(mpic->reg_type,&(b),(r),(v)) +#define mpic_ipi_read(i) _mpic_ipi_read(mpic,(i)) +#define mpic_ipi_write(i,v) _mpic_ipi_write(mpic,(i),(v)) +#define mpic_tm_read(i) _mpic_tm_read(mpic,(i)) +#define mpic_tm_write(i,v) _mpic_tm_write(mpic,(i),(v)) +#define mpic_cpu_read(i) _mpic_cpu_read(mpic,(i)) +#define mpic_cpu_write(i,v) _mpic_cpu_write(mpic,(i),(v)) +#define mpic_irq_read(s,r) _mpic_irq_read(mpic,(s),(r)) +#define mpic_irq_write(s,r,v) _mpic_irq_write(mpic,(s),(r),(v)) + + +/* + * Low level utility functions + */ + + +static void _mpic_map_mmio(struct mpic *mpic, phys_addr_t phys_addr, + struct mpic_reg_bank *rb, unsigned int offset, + unsigned int size) +{ + rb->base = ioremap(phys_addr + offset, size); + BUG_ON(rb->base == NULL); +} + +#ifdef CONFIG_PPC_DCR +static void _mpic_map_dcr(struct mpic *mpic, struct device_node *node, + struct mpic_reg_bank *rb, + unsigned int offset, unsigned int size) +{ + const u32 *dbasep; + + dbasep = of_get_property(node, "dcr-reg", NULL); + + rb->dhost = dcr_map(node, *dbasep + offset, size); + BUG_ON(!DCR_MAP_OK(rb->dhost)); +} + +static inline void mpic_map(struct mpic *mpic, struct device_node *node, + phys_addr_t phys_addr, struct mpic_reg_bank *rb, + unsigned int offset, unsigned int size) +{ + if (mpic->flags & MPIC_USES_DCR) + _mpic_map_dcr(mpic, node, rb, offset, size); + else + _mpic_map_mmio(mpic, phys_addr, rb, offset, size); +} +#else /* CONFIG_PPC_DCR */ +#define mpic_map(m,n,p,b,o,s) _mpic_map_mmio(m,p,b,o,s) +#endif /* !CONFIG_PPC_DCR */ + + + +/* Check if we have one of those nice broken MPICs with a flipped endian on + * reads from IPI registers + */ +static void __init mpic_test_broken_ipi(struct mpic *mpic) +{ + u32 r; + + mpic_write(mpic->gregs, MPIC_INFO(GREG_IPI_VECTOR_PRI_0), MPIC_VECPRI_MASK); + r = mpic_read(mpic->gregs, MPIC_INFO(GREG_IPI_VECTOR_PRI_0)); + + if (r == le32_to_cpu(MPIC_VECPRI_MASK)) { + printk(KERN_INFO "mpic: Detected reversed IPI registers\n"); + mpic->flags |= MPIC_BROKEN_IPI; + } +} + +#ifdef CONFIG_MPIC_U3_HT_IRQS + +/* Test if an interrupt is sourced from HyperTransport (used on broken U3s) + * to force the edge setting on the MPIC and do the ack workaround. + */ +static inline int mpic_is_ht_interrupt(struct mpic *mpic, unsigned int source) +{ + if (source >= 128 || !mpic->fixups) + return 0; + return mpic->fixups[source].base != NULL; +} + + +static inline void mpic_ht_end_irq(struct mpic *mpic, unsigned int source) +{ + struct mpic_irq_fixup *fixup = &mpic->fixups[source]; + + if (fixup->applebase) { + unsigned int soff = (fixup->index >> 3) & ~3; + unsigned int mask = 1U << (fixup->index & 0x1f); + writel(mask, fixup->applebase + soff); + } else { + raw_spin_lock(&mpic->fixup_lock); + writeb(0x11 + 2 * fixup->index, fixup->base + 2); + writel(fixup->data, fixup->base + 4); + raw_spin_unlock(&mpic->fixup_lock); + } +} + +static void mpic_startup_ht_interrupt(struct mpic *mpic, unsigned int source, + bool level) +{ + struct mpic_irq_fixup *fixup = &mpic->fixups[source]; + unsigned long flags; + u32 tmp; + + if (fixup->base == NULL) + return; + + DBG("startup_ht_interrupt(0x%x) index: %d\n", + source, fixup->index); + raw_spin_lock_irqsave(&mpic->fixup_lock, flags); + /* Enable and configure */ + writeb(0x10 + 2 * fixup->index, fixup->base + 2); + tmp = readl(fixup->base + 4); + tmp &= ~(0x23U); + if (level) + tmp |= 0x22; + writel(tmp, fixup->base + 4); + raw_spin_unlock_irqrestore(&mpic->fixup_lock, flags); + +#ifdef CONFIG_PM + /* use the lowest bit inverted to the actual HW, + * set if this fixup was enabled, clear otherwise */ + mpic->save_data[source].fixup_data = tmp | 1; +#endif +} + +static void mpic_shutdown_ht_interrupt(struct mpic *mpic, unsigned int source) +{ + struct mpic_irq_fixup *fixup = &mpic->fixups[source]; + unsigned long flags; + u32 tmp; + + if (fixup->base == NULL) + return; + + DBG("shutdown_ht_interrupt(0x%x)\n", source); + + /* Disable */ + raw_spin_lock_irqsave(&mpic->fixup_lock, flags); + writeb(0x10 + 2 * fixup->index, fixup->base + 2); + tmp = readl(fixup->base + 4); + tmp |= 1; + writel(tmp, fixup->base + 4); + raw_spin_unlock_irqrestore(&mpic->fixup_lock, flags); + +#ifdef CONFIG_PM + /* use the lowest bit inverted to the actual HW, + * set if this fixup was enabled, clear otherwise */ + mpic->save_data[source].fixup_data = tmp & ~1; +#endif +} + +#ifdef CONFIG_PCI_MSI +static void __init mpic_scan_ht_msi(struct mpic *mpic, u8 __iomem *devbase, + unsigned int devfn) +{ + u8 __iomem *base; + u8 pos, flags; + u64 addr = 0; + + for (pos = readb(devbase + PCI_CAPABILITY_LIST); pos != 0; + pos = readb(devbase + pos + PCI_CAP_LIST_NEXT)) { + u8 id = readb(devbase + pos + PCI_CAP_LIST_ID); + if (id == PCI_CAP_ID_HT) { + id = readb(devbase + pos + 3); + if ((id & HT_5BIT_CAP_MASK) == HT_CAPTYPE_MSI_MAPPING) + break; + } + } + + if (pos == 0) + return; + + base = devbase + pos; + + flags = readb(base + HT_MSI_FLAGS); + if (!(flags & HT_MSI_FLAGS_FIXED)) { + addr = readl(base + HT_MSI_ADDR_LO) & HT_MSI_ADDR_LO_MASK; + addr = addr | ((u64)readl(base + HT_MSI_ADDR_HI) << 32); + } + + printk(KERN_DEBUG "mpic: - HT:%02x.%x %s MSI mapping found @ 0x%llx\n", + PCI_SLOT(devfn), PCI_FUNC(devfn), + flags & HT_MSI_FLAGS_ENABLE ? "enabled" : "disabled", addr); + + if (!(flags & HT_MSI_FLAGS_ENABLE)) + writeb(flags | HT_MSI_FLAGS_ENABLE, base + HT_MSI_FLAGS); +} +#else +static void __init mpic_scan_ht_msi(struct mpic *mpic, u8 __iomem *devbase, + unsigned int devfn) +{ + return; +} +#endif + +static void __init mpic_scan_ht_pic(struct mpic *mpic, u8 __iomem *devbase, + unsigned int devfn, u32 vdid) +{ + int i, irq, n; + u8 __iomem *base; + u32 tmp; + u8 pos; + + for (pos = readb(devbase + PCI_CAPABILITY_LIST); pos != 0; + pos = readb(devbase + pos + PCI_CAP_LIST_NEXT)) { + u8 id = readb(devbase + pos + PCI_CAP_LIST_ID); + if (id == PCI_CAP_ID_HT) { + id = readb(devbase + pos + 3); + if ((id & HT_5BIT_CAP_MASK) == HT_CAPTYPE_IRQ) + break; + } + } + if (pos == 0) + return; + + base = devbase + pos; + writeb(0x01, base + 2); + n = (readl(base + 4) >> 16) & 0xff; + + printk(KERN_INFO "mpic: - HT:%02x.%x [0x%02x] vendor %04x device %04x" + " has %d irqs\n", + devfn >> 3, devfn & 0x7, pos, vdid & 0xffff, vdid >> 16, n + 1); + + for (i = 0; i <= n; i++) { + writeb(0x10 + 2 * i, base + 2); + tmp = readl(base + 4); + irq = (tmp >> 16) & 0xff; + DBG("HT PIC index 0x%x, irq 0x%x, tmp: %08x\n", i, irq, tmp); + /* mask it , will be unmasked later */ + tmp |= 0x1; + writel(tmp, base + 4); + mpic->fixups[irq].index = i; + mpic->fixups[irq].base = base; + /* Apple HT PIC has a non-standard way of doing EOIs */ + if ((vdid & 0xffff) == 0x106b) + mpic->fixups[irq].applebase = devbase + 0x60; + else + mpic->fixups[irq].applebase = NULL; + writeb(0x11 + 2 * i, base + 2); + mpic->fixups[irq].data = readl(base + 4) | 0x80000000; + } +} + + +static void __init mpic_scan_ht_pics(struct mpic *mpic) +{ + unsigned int devfn; + u8 __iomem *cfgspace; + + printk(KERN_INFO "mpic: Setting up HT PICs workarounds for U3/U4\n"); + + /* Allocate fixups array */ + mpic->fixups = kzalloc(128 * sizeof(*mpic->fixups), GFP_KERNEL); + BUG_ON(mpic->fixups == NULL); + + /* Init spinlock */ + raw_spin_lock_init(&mpic->fixup_lock); + + /* Map U3 config space. We assume all IO-APICs are on the primary bus + * so we only need to map 64kB. + */ + cfgspace = ioremap(0xf2000000, 0x10000); + BUG_ON(cfgspace == NULL); + + /* Now we scan all slots. We do a very quick scan, we read the header + * type, vendor ID and device ID only, that's plenty enough + */ + for (devfn = 0; devfn < 0x100; devfn++) { + u8 __iomem *devbase = cfgspace + (devfn << 8); + u8 hdr_type = readb(devbase + PCI_HEADER_TYPE); + u32 l = readl(devbase + PCI_VENDOR_ID); + u16 s; + + DBG("devfn %x, l: %x\n", devfn, l); + + /* If no device, skip */ + if (l == 0xffffffff || l == 0x00000000 || + l == 0x0000ffff || l == 0xffff0000) + goto next; + /* Check if is supports capability lists */ + s = readw(devbase + PCI_STATUS); + if (!(s & PCI_STATUS_CAP_LIST)) + goto next; + + mpic_scan_ht_pic(mpic, devbase, devfn, l); + mpic_scan_ht_msi(mpic, devbase, devfn); + + next: + /* next device, if function 0 */ + if (PCI_FUNC(devfn) == 0 && (hdr_type & 0x80) == 0) + devfn += 7; + } +} + +#else /* CONFIG_MPIC_U3_HT_IRQS */ + +static inline int mpic_is_ht_interrupt(struct mpic *mpic, unsigned int source) +{ + return 0; +} + +static void __init mpic_scan_ht_pics(struct mpic *mpic) +{ +} + +#endif /* CONFIG_MPIC_U3_HT_IRQS */ + +#ifdef CONFIG_SMP +static int irq_choose_cpu(const struct cpumask *mask) +{ + int cpuid; + + if (cpumask_equal(mask, cpu_all_mask)) { + static int irq_rover = 0; + static DEFINE_RAW_SPINLOCK(irq_rover_lock); + unsigned long flags; + + /* Round-robin distribution... */ + do_round_robin: + raw_spin_lock_irqsave(&irq_rover_lock, flags); + + irq_rover = cpumask_next(irq_rover, cpu_online_mask); + if (irq_rover >= nr_cpu_ids) + irq_rover = cpumask_first(cpu_online_mask); + + cpuid = irq_rover; + + raw_spin_unlock_irqrestore(&irq_rover_lock, flags); + } else { + cpuid = cpumask_first_and(mask, cpu_online_mask); + if (cpuid >= nr_cpu_ids) + goto do_round_robin; + } + + return get_hard_smp_processor_id(cpuid); +} +#else +static int irq_choose_cpu(const struct cpumask *mask) +{ + return hard_smp_processor_id(); +} +#endif + +/* Find an mpic associated with a given linux interrupt */ +static struct mpic *mpic_find(unsigned int irq) +{ + if (irq < NUM_ISA_INTERRUPTS) + return NULL; + + return irq_get_chip_data(irq); +} + +/* Determine if the linux irq is an IPI */ +static unsigned int mpic_is_ipi(struct mpic *mpic, unsigned int irq) +{ + unsigned int src = virq_to_hw(irq); + + return (src >= mpic->ipi_vecs[0] && src <= mpic->ipi_vecs[3]); +} + +/* Determine if the linux irq is a timer */ +static unsigned int mpic_is_tm(struct mpic *mpic, unsigned int irq) +{ + unsigned int src = virq_to_hw(irq); + + return (src >= mpic->timer_vecs[0] && src <= mpic->timer_vecs[7]); +} + +/* Convert a cpu mask from logical to physical cpu numbers. */ +static inline u32 mpic_physmask(u32 cpumask) +{ + int i; + u32 mask = 0; + + for (i = 0; i < min(32, NR_CPUS); ++i, cpumask >>= 1) + mask |= (cpumask & 1) << get_hard_smp_processor_id(i); + return mask; +} + +#ifdef CONFIG_SMP +/* Get the mpic structure from the IPI number */ +static inline struct mpic * mpic_from_ipi(struct irq_data *d) +{ + return irq_data_get_irq_chip_data(d); +} +#endif + +/* Get the mpic structure from the irq number */ +static inline struct mpic * mpic_from_irq(unsigned int irq) +{ + return irq_get_chip_data(irq); +} + +/* Get the mpic structure from the irq data */ +static inline struct mpic * mpic_from_irq_data(struct irq_data *d) +{ + return irq_data_get_irq_chip_data(d); +} + +/* Send an EOI */ +static inline void mpic_eoi(struct mpic *mpic) +{ + mpic_cpu_write(MPIC_INFO(CPU_EOI), 0); + (void)mpic_cpu_read(MPIC_INFO(CPU_WHOAMI)); +} + +/* + * Linux descriptor level callbacks + */ + + +void mpic_unmask_irq(struct irq_data *d) +{ + unsigned int loops = 100000; + struct mpic *mpic = mpic_from_irq_data(d); + unsigned int src = irqd_to_hwirq(d); + + DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, d->irq, src); + + mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), + mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & + ~MPIC_VECPRI_MASK); + /* make sure mask gets to controller before we return to user */ + do { + if (!loops--) { + printk(KERN_ERR "%s: timeout on hwirq %u\n", + __func__, src); + break; + } + } while(mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & MPIC_VECPRI_MASK); +} + +void mpic_mask_irq(struct irq_data *d) +{ + unsigned int loops = 100000; + struct mpic *mpic = mpic_from_irq_data(d); + unsigned int src = irqd_to_hwirq(d); + + DBG("%s: disable_irq: %d (src %d)\n", mpic->name, d->irq, src); + + mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), + mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) | + MPIC_VECPRI_MASK); + + /* make sure mask gets to controller before we return to user */ + do { + if (!loops--) { + printk(KERN_ERR "%s: timeout on hwirq %u\n", + __func__, src); + break; + } + } while(!(mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & MPIC_VECPRI_MASK)); +} + +void mpic_end_irq(struct irq_data *d) +{ + struct mpic *mpic = mpic_from_irq_data(d); + +#ifdef DEBUG_IRQ + DBG("%s: end_irq: %d\n", mpic->name, d->irq); +#endif + /* We always EOI on end_irq() even for edge interrupts since that + * should only lower the priority, the MPIC should have properly + * latched another edge interrupt coming in anyway + */ + + mpic_eoi(mpic); +} + +#ifdef CONFIG_MPIC_U3_HT_IRQS + +static void mpic_unmask_ht_irq(struct irq_data *d) +{ + struct mpic *mpic = mpic_from_irq_data(d); + unsigned int src = irqd_to_hwirq(d); + + mpic_unmask_irq(d); + + if (irqd_is_level_type(d)) + mpic_ht_end_irq(mpic, src); +} + +static unsigned int mpic_startup_ht_irq(struct irq_data *d) +{ + struct mpic *mpic = mpic_from_irq_data(d); + unsigned int src = irqd_to_hwirq(d); + + mpic_unmask_irq(d); + mpic_startup_ht_interrupt(mpic, src, irqd_is_level_type(d)); + + return 0; +} + +static void mpic_shutdown_ht_irq(struct irq_data *d) +{ + struct mpic *mpic = mpic_from_irq_data(d); + unsigned int src = irqd_to_hwirq(d); + + mpic_shutdown_ht_interrupt(mpic, src); + mpic_mask_irq(d); +} + +static void mpic_end_ht_irq(struct irq_data *d) +{ + struct mpic *mpic = mpic_from_irq_data(d); + unsigned int src = irqd_to_hwirq(d); + +#ifdef DEBUG_IRQ + DBG("%s: end_irq: %d\n", mpic->name, d->irq); +#endif + /* We always EOI on end_irq() even for edge interrupts since that + * should only lower the priority, the MPIC should have properly + * latched another edge interrupt coming in anyway + */ + + if (irqd_is_level_type(d)) + mpic_ht_end_irq(mpic, src); + mpic_eoi(mpic); +} +#endif /* !CONFIG_MPIC_U3_HT_IRQS */ + +#ifdef CONFIG_SMP + +static void mpic_unmask_ipi(struct irq_data *d) +{ + struct mpic *mpic = mpic_from_ipi(d); + unsigned int src = virq_to_hw(d->irq) - mpic->ipi_vecs[0]; + + DBG("%s: enable_ipi: %d (ipi %d)\n", mpic->name, d->irq, src); + mpic_ipi_write(src, mpic_ipi_read(src) & ~MPIC_VECPRI_MASK); +} + +static void mpic_mask_ipi(struct irq_data *d) +{ + /* NEVER disable an IPI... that's just plain wrong! */ +} + +static void mpic_end_ipi(struct irq_data *d) +{ + struct mpic *mpic = mpic_from_ipi(d); + + /* + * IPIs are marked IRQ_PER_CPU. This has the side effect of + * preventing the IRQ_PENDING/IRQ_INPROGRESS logic from + * applying to them. We EOI them late to avoid re-entering. + * We mark IPI's with IRQF_DISABLED as they must run with + * irqs disabled. + */ + mpic_eoi(mpic); +} + +#endif /* CONFIG_SMP */ + +static void mpic_unmask_tm(struct irq_data *d) +{ + struct mpic *mpic = mpic_from_irq_data(d); + unsigned int src = virq_to_hw(d->irq) - mpic->timer_vecs[0]; + + DBG("%s: enable_tm: %d (tm %d)\n", mpic->name, irq, src); + mpic_tm_write(src, mpic_tm_read(src) & ~MPIC_VECPRI_MASK); + mpic_tm_read(src); +} + +static void mpic_mask_tm(struct irq_data *d) +{ + struct mpic *mpic = mpic_from_irq_data(d); + unsigned int src = virq_to_hw(d->irq) - mpic->timer_vecs[0]; + + mpic_tm_write(src, mpic_tm_read(src) | MPIC_VECPRI_MASK); + mpic_tm_read(src); +} + +int mpic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, + bool force) +{ + struct mpic *mpic = mpic_from_irq_data(d); + unsigned int src = irqd_to_hwirq(d); + + if (mpic->flags & MPIC_SINGLE_DEST_CPU) { + int cpuid = irq_choose_cpu(cpumask); + + mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 1 << cpuid); + } else { + u32 mask = cpumask_bits(cpumask)[0]; + + mask &= cpumask_bits(cpu_online_mask)[0]; + + mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), + mpic_physmask(mask)); + } + + return 0; +} + +static unsigned int mpic_type_to_vecpri(struct mpic *mpic, unsigned int type) +{ + /* Now convert sense value */ + switch(type & IRQ_TYPE_SENSE_MASK) { + case IRQ_TYPE_EDGE_RISING: + return MPIC_INFO(VECPRI_SENSE_EDGE) | + MPIC_INFO(VECPRI_POLARITY_POSITIVE); + case IRQ_TYPE_EDGE_FALLING: + case IRQ_TYPE_EDGE_BOTH: + return MPIC_INFO(VECPRI_SENSE_EDGE) | + MPIC_INFO(VECPRI_POLARITY_NEGATIVE); + case IRQ_TYPE_LEVEL_HIGH: + return MPIC_INFO(VECPRI_SENSE_LEVEL) | + MPIC_INFO(VECPRI_POLARITY_POSITIVE); + case IRQ_TYPE_LEVEL_LOW: + default: + return MPIC_INFO(VECPRI_SENSE_LEVEL) | + MPIC_INFO(VECPRI_POLARITY_NEGATIVE); + } +} + +int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type) +{ + struct mpic *mpic = mpic_from_irq_data(d); + unsigned int src = irqd_to_hwirq(d); + unsigned int vecpri, vold, vnew; + + DBG("mpic: set_irq_type(mpic:@%p,virq:%d,src:0x%x,type:0x%x)\n", + mpic, d->irq, src, flow_type); + + if (src >= mpic->irq_count) + return -EINVAL; + + if (flow_type == IRQ_TYPE_NONE) + if (mpic->senses && src < mpic->senses_count) + flow_type = mpic->senses[src]; + if (flow_type == IRQ_TYPE_NONE) + flow_type = IRQ_TYPE_LEVEL_LOW; + + irqd_set_trigger_type(d, flow_type); + + if (mpic_is_ht_interrupt(mpic, src)) + vecpri = MPIC_VECPRI_POLARITY_POSITIVE | + MPIC_VECPRI_SENSE_EDGE; + else + vecpri = mpic_type_to_vecpri(mpic, flow_type); + + vold = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)); + vnew = vold & ~(MPIC_INFO(VECPRI_POLARITY_MASK) | + MPIC_INFO(VECPRI_SENSE_MASK)); + vnew |= vecpri; + if (vold != vnew) + mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), vnew); + + return IRQ_SET_MASK_OK_NOCOPY;; +} + +void mpic_set_vector(unsigned int virq, unsigned int vector) +{ + struct mpic *mpic = mpic_from_irq(virq); + unsigned int src = virq_to_hw(virq); + unsigned int vecpri; + + DBG("mpic: set_vector(mpic:@%p,virq:%d,src:%d,vector:0x%x)\n", + mpic, virq, src, vector); + + if (src >= mpic->irq_count) + return; + + vecpri = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)); + vecpri = vecpri & ~MPIC_INFO(VECPRI_VECTOR_MASK); + vecpri |= vector; + mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), vecpri); +} + +void mpic_set_destination(unsigned int virq, unsigned int cpuid) +{ + struct mpic *mpic = mpic_from_irq(virq); + unsigned int src = virq_to_hw(virq); + + DBG("mpic: set_destination(mpic:@%p,virq:%d,src:%d,cpuid:0x%x)\n", + mpic, virq, src, cpuid); + + if (src >= mpic->irq_count) + return; + + mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 1 << cpuid); +} + +static struct irq_chip mpic_irq_chip = { + .irq_mask = mpic_mask_irq, + .irq_unmask = mpic_unmask_irq, + .irq_eoi = mpic_end_irq, + .irq_set_type = mpic_set_irq_type, +}; + +#ifdef CONFIG_SMP +static struct irq_chip mpic_ipi_chip = { + .irq_mask = mpic_mask_ipi, + .irq_unmask = mpic_unmask_ipi, + .irq_eoi = mpic_end_ipi, +}; +#endif /* CONFIG_SMP */ + +static struct irq_chip mpic_tm_chip = { + .irq_mask = mpic_mask_tm, + .irq_unmask = mpic_unmask_tm, + .irq_eoi = mpic_end_irq, +}; + +#ifdef CONFIG_MPIC_U3_HT_IRQS +static struct irq_chip mpic_irq_ht_chip = { + .irq_startup = mpic_startup_ht_irq, + .irq_shutdown = mpic_shutdown_ht_irq, + .irq_mask = mpic_mask_irq, + .irq_unmask = mpic_unmask_ht_irq, + .irq_eoi = mpic_end_ht_irq, + .irq_set_type = mpic_set_irq_type, +}; +#endif /* CONFIG_MPIC_U3_HT_IRQS */ + + +static int mpic_host_match(struct irq_host *h, struct device_node *node) +{ + /* Exact match, unless mpic node is NULL */ + return h->of_node == NULL || h->of_node == node; +} + +static int mpic_host_map(struct irq_host *h, unsigned int virq, + irq_hw_number_t hw) +{ + struct mpic *mpic = h->host_data; + struct irq_chip *chip; + + DBG("mpic: map virq %d, hwirq 0x%lx\n", virq, hw); + + if (hw == mpic->spurious_vec) + return -EINVAL; + if (mpic->protected && test_bit(hw, mpic->protected)) + return -EINVAL; + +#ifdef CONFIG_SMP + else if (hw >= mpic->ipi_vecs[0]) { + WARN_ON(!(mpic->flags & MPIC_PRIMARY)); + + DBG("mpic: mapping as IPI\n"); + irq_set_chip_data(virq, mpic); + irq_set_chip_and_handler(virq, &mpic->hc_ipi, + handle_percpu_irq); + return 0; + } +#endif /* CONFIG_SMP */ + + if (hw >= mpic->timer_vecs[0] && hw <= mpic->timer_vecs[7]) { + WARN_ON(!(mpic->flags & MPIC_PRIMARY)); + + DBG("mpic: mapping as timer\n"); + irq_set_chip_data(virq, mpic); + irq_set_chip_and_handler(virq, &mpic->hc_tm, + handle_fasteoi_irq); + return 0; + } + + if (hw >= mpic->irq_count) + return -EINVAL; + + mpic_msi_reserve_hwirq(mpic, hw); + + /* Default chip */ + chip = &mpic->hc_irq; + +#ifdef CONFIG_MPIC_U3_HT_IRQS + /* Check for HT interrupts, override vecpri */ + if (mpic_is_ht_interrupt(mpic, hw)) + chip = &mpic->hc_ht_irq; +#endif /* CONFIG_MPIC_U3_HT_IRQS */ + + DBG("mpic: mapping to irq chip @%p\n", chip); + + irq_set_chip_data(virq, mpic); + irq_set_chip_and_handler(virq, chip, handle_fasteoi_irq); + + /* Set default irq type */ + irq_set_irq_type(virq, IRQ_TYPE_NONE); + + /* If the MPIC was reset, then all vectors have already been + * initialized. Otherwise, a per source lazy initialization + * is done here. + */ + if (!mpic_is_ipi(mpic, hw) && (mpic->flags & MPIC_NO_RESET)) { + mpic_set_vector(virq, hw); + mpic_set_destination(virq, mpic_processor_id(mpic)); + mpic_irq_set_priority(virq, 8); + } + + return 0; +} + +static int mpic_host_xlate(struct irq_host *h, struct device_node *ct, + const u32 *intspec, unsigned int intsize, + irq_hw_number_t *out_hwirq, unsigned int *out_flags) + +{ + struct mpic *mpic = h->host_data; + static unsigned char map_mpic_senses[4] = { + IRQ_TYPE_EDGE_RISING, + IRQ_TYPE_LEVEL_LOW, + IRQ_TYPE_LEVEL_HIGH, + IRQ_TYPE_EDGE_FALLING, + }; + + *out_hwirq = intspec[0]; + if (intsize >= 4 && (mpic->flags & MPIC_FSL)) { + /* + * Freescale MPIC with extended intspec: + * First two cells are as usual. Third specifies + * an "interrupt type". Fourth is type-specific data. + * + * See Documentation/devicetree/bindings/powerpc/fsl/mpic.txt + */ + switch (intspec[2]) { + case 0: + case 1: /* no EISR/EIMR support for now, treat as shared IRQ */ + break; + case 2: + if (intspec[0] >= ARRAY_SIZE(mpic->ipi_vecs)) + return -EINVAL; + + *out_hwirq = mpic->ipi_vecs[intspec[0]]; + break; + case 3: + if (intspec[0] >= ARRAY_SIZE(mpic->timer_vecs)) + return -EINVAL; + + *out_hwirq = mpic->timer_vecs[intspec[0]]; + break; + default: + pr_debug("%s: unknown irq type %u\n", + __func__, intspec[2]); + return -EINVAL; + } + + *out_flags = map_mpic_senses[intspec[1] & 3]; + } else if (intsize > 1) { + u32 mask = 0x3; + + /* Apple invented a new race of encoding on machines with + * an HT APIC. They encode, among others, the index within + * the HT APIC. We don't care about it here since thankfully, + * it appears that they have the APIC already properly + * configured, and thus our current fixup code that reads the + * APIC config works fine. However, we still need to mask out + * bits in the specifier to make sure we only get bit 0 which + * is the level/edge bit (the only sense bit exposed by Apple), + * as their bit 1 means something else. + */ + if (machine_is(powermac)) + mask = 0x1; + *out_flags = map_mpic_senses[intspec[1] & mask]; + } else + *out_flags = IRQ_TYPE_NONE; + + DBG("mpic: xlate (%d cells: 0x%08x 0x%08x) to line 0x%lx sense 0x%x\n", + intsize, intspec[0], intspec[1], *out_hwirq, *out_flags); + + return 0; +} + +static struct irq_host_ops mpic_host_ops = { + .match = mpic_host_match, + .map = mpic_host_map, + .xlate = mpic_host_xlate, +}; + +static int mpic_reset_prohibited(struct device_node *node) +{ + return node && of_get_property(node, "pic-no-reset", NULL); +} + +/* + * Exported functions + */ + +struct mpic * __init mpic_alloc(struct device_node *node, + phys_addr_t phys_addr, + unsigned int flags, + unsigned int isu_size, + unsigned int irq_count, + const char *name) +{ + struct mpic *mpic; + u32 greg_feature; + const char *vers; + int i; + int intvec_top; + u64 paddr = phys_addr; + + mpic = kzalloc(sizeof(struct mpic), GFP_KERNEL); + if (mpic == NULL) + return NULL; + + mpic->name = name; + + mpic->hc_irq = mpic_irq_chip; + mpic->hc_irq.name = name; + if (flags & MPIC_PRIMARY) + mpic->hc_irq.irq_set_affinity = mpic_set_affinity; +#ifdef CONFIG_MPIC_U3_HT_IRQS + mpic->hc_ht_irq = mpic_irq_ht_chip; + mpic->hc_ht_irq.name = name; + if (flags & MPIC_PRIMARY) + mpic->hc_ht_irq.irq_set_affinity = mpic_set_affinity; +#endif /* CONFIG_MPIC_U3_HT_IRQS */ + +#ifdef CONFIG_SMP + mpic->hc_ipi = mpic_ipi_chip; + mpic->hc_ipi.name = name; +#endif /* CONFIG_SMP */ + + mpic->hc_tm = mpic_tm_chip; + mpic->hc_tm.name = name; + + mpic->flags = flags; + mpic->isu_size = isu_size; + mpic->irq_count = irq_count; + mpic->num_sources = 0; /* so far */ + + if (flags & MPIC_LARGE_VECTORS) + intvec_top = 2047; + else + intvec_top = 255; + + mpic->timer_vecs[0] = intvec_top - 12; + mpic->timer_vecs[1] = intvec_top - 11; + mpic->timer_vecs[2] = intvec_top - 10; + mpic->timer_vecs[3] = intvec_top - 9; + mpic->timer_vecs[4] = intvec_top - 8; + mpic->timer_vecs[5] = intvec_top - 7; + mpic->timer_vecs[6] = intvec_top - 6; + mpic->timer_vecs[7] = intvec_top - 5; + mpic->ipi_vecs[0] = intvec_top - 4; + mpic->ipi_vecs[1] = intvec_top - 3; + mpic->ipi_vecs[2] = intvec_top - 2; + mpic->ipi_vecs[3] = intvec_top - 1; + mpic->spurious_vec = intvec_top; + + /* Check for "big-endian" in device-tree */ + if (node && of_get_property(node, "big-endian", NULL) != NULL) + mpic->flags |= MPIC_BIG_ENDIAN; + if (node && of_device_is_compatible(node, "fsl,mpic")) + mpic->flags |= MPIC_FSL; + + /* Look for protected sources */ + if (node) { + int psize; + unsigned int bits, mapsize; + const u32 *psrc = + of_get_property(node, "protected-sources", &psize); + if (psrc) { + psize /= 4; + bits = intvec_top + 1; + mapsize = BITS_TO_LONGS(bits) * sizeof(unsigned long); + mpic->protected = kzalloc(mapsize, GFP_KERNEL); + BUG_ON(mpic->protected == NULL); + for (i = 0; i < psize; i++) { + if (psrc[i] > intvec_top) + continue; + __set_bit(psrc[i], mpic->protected); + } + } + } + +#ifdef CONFIG_MPIC_WEIRD + mpic->hw_set = mpic_infos[MPIC_GET_REGSET(flags)]; +#endif + + /* default register type */ + mpic->reg_type = (flags & MPIC_BIG_ENDIAN) ? + mpic_access_mmio_be : mpic_access_mmio_le; + + /* If no physical address is passed in, a device-node is mandatory */ + BUG_ON(paddr == 0 && node == NULL); + + /* If no physical address passed in, check if it's dcr based */ + if (paddr == 0 && of_get_property(node, "dcr-reg", NULL) != NULL) { +#ifdef CONFIG_PPC_DCR + mpic->flags |= MPIC_USES_DCR; + mpic->reg_type = mpic_access_dcr; +#else + BUG(); +#endif /* CONFIG_PPC_DCR */ + } + + /* If the MPIC is not DCR based, and no physical address was passed + * in, try to obtain one + */ + if (paddr == 0 && !(mpic->flags & MPIC_USES_DCR)) { + const u32 *reg = of_get_property(node, "reg", NULL); + BUG_ON(reg == NULL); + paddr = of_translate_address(node, reg); + BUG_ON(paddr == OF_BAD_ADDR); + } + + /* Map the global registers */ + mpic_map(mpic, node, paddr, &mpic->gregs, MPIC_INFO(GREG_BASE), 0x1000); + mpic_map(mpic, node, paddr, &mpic->tmregs, MPIC_INFO(TIMER_BASE), 0x1000); + + /* Reset */ + + /* When using a device-node, reset requests are only honored if the MPIC + * is allowed to reset. + */ + if (mpic_reset_prohibited(node)) + mpic->flags |= MPIC_NO_RESET; + + if ((flags & MPIC_WANTS_RESET) && !(mpic->flags & MPIC_NO_RESET)) { + printk(KERN_DEBUG "mpic: Resetting\n"); + mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0), + mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0)) + | MPIC_GREG_GCONF_RESET); + while( mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0)) + & MPIC_GREG_GCONF_RESET) + mb(); + } + + /* CoreInt */ + if (flags & MPIC_ENABLE_COREINT) + mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0), + mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0)) + | MPIC_GREG_GCONF_COREINT); + + if (flags & MPIC_ENABLE_MCK) + mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0), + mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0)) + | MPIC_GREG_GCONF_MCK); + + /* Read feature register, calculate num CPUs and, for non-ISU + * MPICs, num sources as well. On ISU MPICs, sources are counted + * as ISUs are added + */ + greg_feature = mpic_read(mpic->gregs, MPIC_INFO(GREG_FEATURE_0)); + mpic->num_cpus = ((greg_feature & MPIC_GREG_FEATURE_LAST_CPU_MASK) + >> MPIC_GREG_FEATURE_LAST_CPU_SHIFT) + 1; + if (isu_size == 0) { + if (flags & MPIC_BROKEN_FRR_NIRQS) + mpic->num_sources = mpic->irq_count; + else + mpic->num_sources = + ((greg_feature & MPIC_GREG_FEATURE_LAST_SRC_MASK) + >> MPIC_GREG_FEATURE_LAST_SRC_SHIFT) + 1; + } + + /* Map the per-CPU registers */ + for (i = 0; i < mpic->num_cpus; i++) { + mpic_map(mpic, node, paddr, &mpic->cpuregs[i], + MPIC_INFO(CPU_BASE) + i * MPIC_INFO(CPU_STRIDE), + 0x1000); + } + + /* Initialize main ISU if none provided */ + if (mpic->isu_size == 0) { + mpic->isu_size = mpic->num_sources; + mpic_map(mpic, node, paddr, &mpic->isus[0], + MPIC_INFO(IRQ_BASE), MPIC_INFO(IRQ_STRIDE) * mpic->isu_size); + } + mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1); + mpic->isu_mask = (1 << mpic->isu_shift) - 1; + + mpic->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR, + isu_size ? isu_size : mpic->num_sources, + &mpic_host_ops, + flags & MPIC_LARGE_VECTORS ? 2048 : 256); + if (mpic->irqhost == NULL) + return NULL; + + mpic->irqhost->host_data = mpic; + + /* Display version */ + switch (greg_feature & MPIC_GREG_FEATURE_VERSION_MASK) { + case 1: + vers = "1.0"; + break; + case 2: + vers = "1.2"; + break; + case 3: + vers = "1.3"; + break; + default: + vers = "<unknown>"; + break; + } + printk(KERN_INFO "mpic: Setting up MPIC \"%s\" version %s at %llx," + " max %d CPUs\n", + name, vers, (unsigned long long)paddr, mpic->num_cpus); + printk(KERN_INFO "mpic: ISU size: %d, shift: %d, mask: %x\n", + mpic->isu_size, mpic->isu_shift, mpic->isu_mask); + + mpic->next = mpics; + mpics = mpic; + + if (flags & MPIC_PRIMARY) { + mpic_primary = mpic; + irq_set_default_host(mpic->irqhost); + } + + return mpic; +} + +void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num, + phys_addr_t paddr) +{ + unsigned int isu_first = isu_num * mpic->isu_size; + + BUG_ON(isu_num >= MPIC_MAX_ISU); + + mpic_map(mpic, mpic->irqhost->of_node, + paddr, &mpic->isus[isu_num], 0, + MPIC_INFO(IRQ_STRIDE) * mpic->isu_size); + + if ((isu_first + mpic->isu_size) > mpic->num_sources) + mpic->num_sources = isu_first + mpic->isu_size; +} + +void __init mpic_set_default_senses(struct mpic *mpic, u8 *senses, int count) +{ + mpic->senses = senses; + mpic->senses_count = count; +} + +void __init mpic_init(struct mpic *mpic) +{ + int i; + int cpu; + + BUG_ON(mpic->num_sources == 0); + + printk(KERN_INFO "mpic: Initializing for %d sources\n", mpic->num_sources); + + /* Set current processor priority to max */ + mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0xf); + + /* Initialize timers to our reserved vectors and mask them for now */ + for (i = 0; i < 4; i++) { + mpic_write(mpic->tmregs, + i * MPIC_INFO(TIMER_STRIDE) + + MPIC_INFO(TIMER_DESTINATION), + 1 << hard_smp_processor_id()); + mpic_write(mpic->tmregs, + i * MPIC_INFO(TIMER_STRIDE) + + MPIC_INFO(TIMER_VECTOR_PRI), + MPIC_VECPRI_MASK | + (9 << MPIC_VECPRI_PRIORITY_SHIFT) | + (mpic->timer_vecs[0] + i)); + } + + /* Initialize IPIs to our reserved vectors and mark them disabled for now */ + mpic_test_broken_ipi(mpic); + for (i = 0; i < 4; i++) { + mpic_ipi_write(i, + MPIC_VECPRI_MASK | + (10 << MPIC_VECPRI_PRIORITY_SHIFT) | + (mpic->ipi_vecs[0] + i)); + } + + /* Initialize interrupt sources */ + if (mpic->irq_count == 0) + mpic->irq_count = mpic->num_sources; + + /* Do the HT PIC fixups on U3 broken mpic */ + DBG("MPIC flags: %x\n", mpic->flags); + if ((mpic->flags & MPIC_U3_HT_IRQS) && (mpic->flags & MPIC_PRIMARY)) { + mpic_scan_ht_pics(mpic); + mpic_u3msi_init(mpic); + } + + mpic_pasemi_msi_init(mpic); + + cpu = mpic_processor_id(mpic); + + if (!(mpic->flags & MPIC_NO_RESET)) { + for (i = 0; i < mpic->num_sources; i++) { + /* start with vector = source number, and masked */ + u32 vecpri = MPIC_VECPRI_MASK | i | + (8 << MPIC_VECPRI_PRIORITY_SHIFT); + + /* check if protected */ + if (mpic->protected && test_bit(i, mpic->protected)) + continue; + /* init hw */ + mpic_irq_write(i, MPIC_INFO(IRQ_VECTOR_PRI), vecpri); + mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION), 1 << cpu); + } + } + + /* Init spurious vector */ + mpic_write(mpic->gregs, MPIC_INFO(GREG_SPURIOUS), mpic->spurious_vec); + + /* Disable 8259 passthrough, if supported */ + if (!(mpic->flags & MPIC_NO_PTHROU_DIS)) + mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0), + mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0)) + | MPIC_GREG_GCONF_8259_PTHROU_DIS); + + if (mpic->flags & MPIC_NO_BIAS) + mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0), + mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0)) + | MPIC_GREG_GCONF_NO_BIAS); + + /* Set current processor priority to 0 */ + mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0); + +#ifdef CONFIG_PM + /* allocate memory to save mpic state */ + mpic->save_data = kmalloc(mpic->num_sources * sizeof(*mpic->save_data), + GFP_KERNEL); + BUG_ON(mpic->save_data == NULL); +#endif +} + +void __init mpic_set_clk_ratio(struct mpic *mpic, u32 clock_ratio) +{ + u32 v; + + v = mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1); + v &= ~MPIC_GREG_GLOBAL_CONF_1_CLK_RATIO_MASK; + v |= MPIC_GREG_GLOBAL_CONF_1_CLK_RATIO(clock_ratio); + mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1, v); +} + +void __init mpic_set_serial_int(struct mpic *mpic, int enable) +{ + unsigned long flags; + u32 v; + + raw_spin_lock_irqsave(&mpic_lock, flags); + v = mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1); + if (enable) + v |= MPIC_GREG_GLOBAL_CONF_1_SIE; + else + v &= ~MPIC_GREG_GLOBAL_CONF_1_SIE; + mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1, v); + raw_spin_unlock_irqrestore(&mpic_lock, flags); +} + +void mpic_irq_set_priority(unsigned int irq, unsigned int pri) +{ + struct mpic *mpic = mpic_find(irq); + unsigned int src = virq_to_hw(irq); + unsigned long flags; + u32 reg; + + if (!mpic) + return; + + raw_spin_lock_irqsave(&mpic_lock, flags); + if (mpic_is_ipi(mpic, irq)) { + reg = mpic_ipi_read(src - mpic->ipi_vecs[0]) & + ~MPIC_VECPRI_PRIORITY_MASK; + mpic_ipi_write(src - mpic->ipi_vecs[0], + reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT)); + } else if (mpic_is_tm(mpic, irq)) { + reg = mpic_tm_read(src - mpic->timer_vecs[0]) & + ~MPIC_VECPRI_PRIORITY_MASK; + mpic_tm_write(src - mpic->timer_vecs[0], + reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT)); + } else { + reg = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) + & ~MPIC_VECPRI_PRIORITY_MASK; + mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), + reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT)); + } + raw_spin_unlock_irqrestore(&mpic_lock, flags); +} + +void mpic_setup_this_cpu(void) +{ +#ifdef CONFIG_SMP + struct mpic *mpic = mpic_primary; + unsigned long flags; + u32 msk = 1 << hard_smp_processor_id(); + unsigned int i; + + BUG_ON(mpic == NULL); + + DBG("%s: setup_this_cpu(%d)\n", mpic->name, hard_smp_processor_id()); + + raw_spin_lock_irqsave(&mpic_lock, flags); + + /* let the mpic know we want intrs. default affinity is 0xffffffff + * until changed via /proc. That's how it's done on x86. If we want + * it differently, then we should make sure we also change the default + * values of irq_desc[].affinity in irq.c. + */ + if (distribute_irqs) { + for (i = 0; i < mpic->num_sources ; i++) + mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION), + mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)) | msk); + } + + /* Set current processor priority to 0 */ + mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0); + + raw_spin_unlock_irqrestore(&mpic_lock, flags); +#endif /* CONFIG_SMP */ +} + +int mpic_cpu_get_priority(void) +{ + struct mpic *mpic = mpic_primary; + + return mpic_cpu_read(MPIC_INFO(CPU_CURRENT_TASK_PRI)); +} + +void mpic_cpu_set_priority(int prio) +{ + struct mpic *mpic = mpic_primary; + + prio &= MPIC_CPU_TASKPRI_MASK; + mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), prio); +} + +void mpic_teardown_this_cpu(int secondary) +{ + struct mpic *mpic = mpic_primary; + unsigned long flags; + u32 msk = 1 << hard_smp_processor_id(); + unsigned int i; + + BUG_ON(mpic == NULL); + + DBG("%s: teardown_this_cpu(%d)\n", mpic->name, hard_smp_processor_id()); + raw_spin_lock_irqsave(&mpic_lock, flags); + + /* let the mpic know we don't want intrs. */ + for (i = 0; i < mpic->num_sources ; i++) + mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION), + mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)) & ~msk); + + /* Set current processor priority to max */ + mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0xf); + /* We need to EOI the IPI since not all platforms reset the MPIC + * on boot and new interrupts wouldn't get delivered otherwise. + */ + mpic_eoi(mpic); + + raw_spin_unlock_irqrestore(&mpic_lock, flags); +} + + +static unsigned int _mpic_get_one_irq(struct mpic *mpic, int reg) +{ + u32 src; + + src = mpic_cpu_read(reg) & MPIC_INFO(VECPRI_VECTOR_MASK); +#ifdef DEBUG_LOW + DBG("%s: get_one_irq(reg 0x%x): %d\n", mpic->name, reg, src); +#endif + if (unlikely(src == mpic->spurious_vec)) { + if (mpic->flags & MPIC_SPV_EOI) + mpic_eoi(mpic); + return NO_IRQ; + } + if (unlikely(mpic->protected && test_bit(src, mpic->protected))) { + printk_ratelimited(KERN_WARNING "%s: Got protected source %d !\n", + mpic->name, (int)src); + mpic_eoi(mpic); + return NO_IRQ; + } + + return irq_linear_revmap(mpic->irqhost, src); +} + +unsigned int mpic_get_one_irq(struct mpic *mpic) +{ + return _mpic_get_one_irq(mpic, MPIC_INFO(CPU_INTACK)); +} + +unsigned int mpic_get_irq(void) +{ + struct mpic *mpic = mpic_primary; + + BUG_ON(mpic == NULL); + + return mpic_get_one_irq(mpic); +} + +unsigned int mpic_get_coreint_irq(void) +{ +#ifdef CONFIG_BOOKE + struct mpic *mpic = mpic_primary; + u32 src; + + BUG_ON(mpic == NULL); + + src = mfspr(SPRN_EPR); + + if (unlikely(src == mpic->spurious_vec)) { + if (mpic->flags & MPIC_SPV_EOI) + mpic_eoi(mpic); + return NO_IRQ; + } + if (unlikely(mpic->protected && test_bit(src, mpic->protected))) { + printk_ratelimited(KERN_WARNING "%s: Got protected source %d !\n", + mpic->name, (int)src); + return NO_IRQ; + } + + return irq_linear_revmap(mpic->irqhost, src); +#else + return NO_IRQ; +#endif +} + +unsigned int mpic_get_mcirq(void) +{ + struct mpic *mpic = mpic_primary; + + BUG_ON(mpic == NULL); + + return _mpic_get_one_irq(mpic, MPIC_INFO(CPU_MCACK)); +} + +#ifdef CONFIG_SMP +void mpic_request_ipis(void) +{ + struct mpic *mpic = mpic_primary; + int i; + BUG_ON(mpic == NULL); + + printk(KERN_INFO "mpic: requesting IPIs...\n"); + + for (i = 0; i < 4; i++) { + unsigned int vipi = irq_create_mapping(mpic->irqhost, + mpic->ipi_vecs[0] + i); + if (vipi == NO_IRQ) { + printk(KERN_ERR "Failed to map %s\n", smp_ipi_name[i]); + continue; + } + smp_request_message_ipi(vipi, i); + } +} + +void smp_mpic_message_pass(int cpu, int msg) +{ + struct mpic *mpic = mpic_primary; + u32 physmask; + + BUG_ON(mpic == NULL); + + /* make sure we're sending something that translates to an IPI */ + if ((unsigned int)msg > 3) { + printk("SMP %d: smp_message_pass: unknown msg %d\n", + smp_processor_id(), msg); + return; + } + +#ifdef DEBUG_IPI + DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, msg); +#endif + + physmask = 1 << get_hard_smp_processor_id(cpu); + + mpic_cpu_write(MPIC_INFO(CPU_IPI_DISPATCH_0) + + msg * MPIC_INFO(CPU_IPI_DISPATCH_STRIDE), physmask); +} + +int __init smp_mpic_probe(void) +{ + int nr_cpus; + + DBG("smp_mpic_probe()...\n"); + + nr_cpus = cpumask_weight(cpu_possible_mask); + + DBG("nr_cpus: %d\n", nr_cpus); + + if (nr_cpus > 1) + mpic_request_ipis(); + + return nr_cpus; +} + +void __devinit smp_mpic_setup_cpu(int cpu) +{ + mpic_setup_this_cpu(); +} + +void mpic_reset_core(int cpu) +{ + struct mpic *mpic = mpic_primary; + u32 pir; + int cpuid = get_hard_smp_processor_id(cpu); + + /* Set target bit for core reset */ + pir = mpic_read(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT)); + pir |= (1 << cpuid); + mpic_write(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT), pir); + mpic_read(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT)); + + /* Restore target bit after reset complete */ + pir &= ~(1 << cpuid); + mpic_write(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT), pir); + mpic_read(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT)); +} +#endif /* CONFIG_SMP */ + +#ifdef CONFIG_PM +static void mpic_suspend_one(struct mpic *mpic) +{ + int i; + + for (i = 0; i < mpic->num_sources; i++) { + mpic->save_data[i].vecprio = + mpic_irq_read(i, MPIC_INFO(IRQ_VECTOR_PRI)); + mpic->save_data[i].dest = + mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)); + } +} + +static int mpic_suspend(void) +{ + struct mpic *mpic = mpics; + + while (mpic) { + mpic_suspend_one(mpic); + mpic = mpic->next; + } + + return 0; +} + +static void mpic_resume_one(struct mpic *mpic) +{ + int i; + + for (i = 0; i < mpic->num_sources; i++) { + mpic_irq_write(i, MPIC_INFO(IRQ_VECTOR_PRI), + mpic->save_data[i].vecprio); + mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION), + mpic->save_data[i].dest); + +#ifdef CONFIG_MPIC_U3_HT_IRQS + if (mpic->fixups) { + struct mpic_irq_fixup *fixup = &mpic->fixups[i]; + + if (fixup->base) { + /* we use the lowest bit in an inverted meaning */ + if ((mpic->save_data[i].fixup_data & 1) == 0) + continue; + + /* Enable and configure */ + writeb(0x10 + 2 * fixup->index, fixup->base + 2); + + writel(mpic->save_data[i].fixup_data & ~1, + fixup->base + 4); + } + } +#endif + } /* end for loop */ +} + +static void mpic_resume(void) +{ + struct mpic *mpic = mpics; + + while (mpic) { + mpic_resume_one(mpic); + mpic = mpic->next; + } +} + +static struct syscore_ops mpic_syscore_ops = { + .resume = mpic_resume, + .suspend = mpic_suspend, +}; + +static int mpic_init_sys(void) +{ + register_syscore_ops(&mpic_syscore_ops); + return 0; +} + +device_initcall(mpic_init_sys); +#endif diff --git a/arch/powerpc/sysdev/mpic.h b/arch/powerpc/sysdev/mpic.h new file mode 100644 index 00000000..13f3e891 --- /dev/null +++ b/arch/powerpc/sysdev/mpic.h @@ -0,0 +1,43 @@ +#ifndef _POWERPC_SYSDEV_MPIC_H +#define _POWERPC_SYSDEV_MPIC_H + +/* + * Copyright 2006-2007, Michael Ellerman, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 of the + * License. + * + */ + +#ifdef CONFIG_PCI_MSI +extern void mpic_msi_reserve_hwirq(struct mpic *mpic, irq_hw_number_t hwirq); +extern int mpic_msi_init_allocator(struct mpic *mpic); +extern int mpic_u3msi_init(struct mpic *mpic); +extern int mpic_pasemi_msi_init(struct mpic *mpic); +#else +static inline void mpic_msi_reserve_hwirq(struct mpic *mpic, + irq_hw_number_t hwirq) +{ + return; +} + +static inline int mpic_u3msi_init(struct mpic *mpic) +{ + return -1; +} + +static inline int mpic_pasemi_msi_init(struct mpic *mpic) +{ + return -1; +} +#endif + +extern int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type); +extern void mpic_set_vector(unsigned int virq, unsigned int vector); +extern int mpic_set_affinity(struct irq_data *d, + const struct cpumask *cpumask, bool force); +extern void mpic_reset_core(int cpu); + +#endif /* _POWERPC_SYSDEV_MPIC_H */ diff --git a/arch/powerpc/sysdev/mpic_msi.c b/arch/powerpc/sysdev/mpic_msi.c new file mode 100644 index 00000000..0f67cd79 --- /dev/null +++ b/arch/powerpc/sysdev/mpic_msi.c @@ -0,0 +1,103 @@ +/* + * Copyright 2006-2007, Michael Ellerman, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 of the + * License. + * + */ + +#include <linux/irq.h> +#include <linux/bitmap.h> +#include <linux/msi.h> +#include <asm/mpic.h> +#include <asm/prom.h> +#include <asm/hw_irq.h> +#include <asm/ppc-pci.h> +#include <asm/msi_bitmap.h> + +#include <sysdev/mpic.h> + +void mpic_msi_reserve_hwirq(struct mpic *mpic, irq_hw_number_t hwirq) +{ + /* The mpic calls this even when there is no allocator setup */ + if (!mpic->msi_bitmap.bitmap) + return; + + msi_bitmap_reserve_hwirq(&mpic->msi_bitmap, hwirq); +} + +#ifdef CONFIG_MPIC_U3_HT_IRQS +static int mpic_msi_reserve_u3_hwirqs(struct mpic *mpic) +{ + irq_hw_number_t hwirq; + struct irq_host_ops *ops = mpic->irqhost->ops; + struct device_node *np; + int flags, index, i; + struct of_irq oirq; + + pr_debug("mpic: found U3, guessing msi allocator setup\n"); + + /* Reserve source numbers we know are reserved in the HW. + * + * This is a bit of a mix of U3 and U4 reserves but that's going + * to work fine, we have plenty enugh numbers left so let's just + * mark anything we don't like reserved. + */ + for (i = 0; i < 8; i++) + msi_bitmap_reserve_hwirq(&mpic->msi_bitmap, i); + + for (i = 42; i < 46; i++) + msi_bitmap_reserve_hwirq(&mpic->msi_bitmap, i); + + for (i = 100; i < 105; i++) + msi_bitmap_reserve_hwirq(&mpic->msi_bitmap, i); + + for (i = 124; i < mpic->irq_count; i++) + msi_bitmap_reserve_hwirq(&mpic->msi_bitmap, i); + + + np = NULL; + while ((np = of_find_all_nodes(np))) { + pr_debug("mpic: mapping hwirqs for %s\n", np->full_name); + + index = 0; + while (of_irq_map_one(np, index++, &oirq) == 0) { + ops->xlate(mpic->irqhost, NULL, oirq.specifier, + oirq.size, &hwirq, &flags); + msi_bitmap_reserve_hwirq(&mpic->msi_bitmap, hwirq); + } + } + + return 0; +} +#else +static int mpic_msi_reserve_u3_hwirqs(struct mpic *mpic) +{ + return -1; +} +#endif + +int mpic_msi_init_allocator(struct mpic *mpic) +{ + int rc; + + rc = msi_bitmap_alloc(&mpic->msi_bitmap, mpic->irq_count, + mpic->irqhost->of_node); + if (rc) + return rc; + + rc = msi_bitmap_reserve_dt_hwirqs(&mpic->msi_bitmap); + if (rc > 0) { + if (mpic->flags & MPIC_U3_HT_IRQS) + rc = mpic_msi_reserve_u3_hwirqs(mpic); + + if (rc) { + msi_bitmap_free(&mpic->msi_bitmap); + return rc; + } + } + + return 0; +} diff --git a/arch/powerpc/sysdev/mpic_pasemi_msi.c b/arch/powerpc/sysdev/mpic_pasemi_msi.c new file mode 100644 index 00000000..38e62382 --- /dev/null +++ b/arch/powerpc/sysdev/mpic_pasemi_msi.c @@ -0,0 +1,175 @@ +/* + * Copyright 2007, Olof Johansson, PA Semi + * + * Based on arch/powerpc/sysdev/mpic_u3msi.c: + * + * Copyright 2006, Segher Boessenkool, IBM Corporation. + * Copyright 2006-2007, Michael Ellerman, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 of the + * License. + * + */ + +#undef DEBUG + +#include <linux/irq.h> +#include <linux/bootmem.h> +#include <linux/msi.h> +#include <asm/mpic.h> +#include <asm/prom.h> +#include <asm/hw_irq.h> +#include <asm/ppc-pci.h> +#include <asm/msi_bitmap.h> + +#include "mpic.h" + +/* Allocate 16 interrupts per device, to give an alignment of 16, + * since that's the size of the grouping w.r.t. affinity. If someone + * needs more than 32 MSI's down the road we'll have to rethink this, + * but it should be OK for now. + */ +#define ALLOC_CHUNK 16 + +#define PASEMI_MSI_ADDR 0xfc080000 + +/* A bit ugly, can we get this from the pci_dev somehow? */ +static struct mpic *msi_mpic; + + +static void mpic_pasemi_msi_mask_irq(struct irq_data *data) +{ + pr_debug("mpic_pasemi_msi_mask_irq %d\n", data->irq); + mask_msi_irq(data); + mpic_mask_irq(data); +} + +static void mpic_pasemi_msi_unmask_irq(struct irq_data *data) +{ + pr_debug("mpic_pasemi_msi_unmask_irq %d\n", data->irq); + mpic_unmask_irq(data); + unmask_msi_irq(data); +} + +static struct irq_chip mpic_pasemi_msi_chip = { + .irq_shutdown = mpic_pasemi_msi_mask_irq, + .irq_mask = mpic_pasemi_msi_mask_irq, + .irq_unmask = mpic_pasemi_msi_unmask_irq, + .irq_eoi = mpic_end_irq, + .irq_set_type = mpic_set_irq_type, + .irq_set_affinity = mpic_set_affinity, + .name = "PASEMI-MSI", +}; + +static int pasemi_msi_check_device(struct pci_dev *pdev, int nvec, int type) +{ + if (type == PCI_CAP_ID_MSIX) + pr_debug("pasemi_msi: MSI-X untested, trying anyway\n"); + + return 0; +} + +static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev) +{ + struct msi_desc *entry; + + pr_debug("pasemi_msi_teardown_msi_irqs, pdev %p\n", pdev); + + list_for_each_entry(entry, &pdev->msi_list, list) { + if (entry->irq == NO_IRQ) + continue; + + irq_set_msi_desc(entry->irq, NULL); + msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, + virq_to_hw(entry->irq), ALLOC_CHUNK); + irq_dispose_mapping(entry->irq); + } + + return; +} + +static int pasemi_msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) +{ + unsigned int virq; + struct msi_desc *entry; + struct msi_msg msg; + int hwirq; + + pr_debug("pasemi_msi_setup_msi_irqs, pdev %p nvec %d type %d\n", + pdev, nvec, type); + + msg.address_hi = 0; + msg.address_lo = PASEMI_MSI_ADDR; + + list_for_each_entry(entry, &pdev->msi_list, list) { + /* Allocate 16 interrupts for now, since that's the grouping for + * affinity. This can be changed later if it turns out 32 is too + * few MSIs for someone, but restrictions will apply to how the + * sources can be changed independently. + */ + hwirq = msi_bitmap_alloc_hwirqs(&msi_mpic->msi_bitmap, + ALLOC_CHUNK); + if (hwirq < 0) { + pr_debug("pasemi_msi: failed allocating hwirq\n"); + return hwirq; + } + + virq = irq_create_mapping(msi_mpic->irqhost, hwirq); + if (virq == NO_IRQ) { + pr_debug("pasemi_msi: failed mapping hwirq 0x%x\n", + hwirq); + msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, + ALLOC_CHUNK); + return -ENOSPC; + } + + /* Vector on MSI is really an offset, the hardware adds + * it to the value written at the magic address. So set + * it to 0 to remain sane. + */ + mpic_set_vector(virq, 0); + + irq_set_msi_desc(virq, entry); + irq_set_chip(virq, &mpic_pasemi_msi_chip); + irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING); + + pr_debug("pasemi_msi: allocated virq 0x%x (hw 0x%x) " \ + "addr 0x%x\n", virq, hwirq, msg.address_lo); + + /* Likewise, the device writes [0...511] into the target + * register to generate MSI [512...1023] + */ + msg.data = hwirq-0x200; + write_msi_msg(virq, &msg); + } + + return 0; +} + +int mpic_pasemi_msi_init(struct mpic *mpic) +{ + int rc; + + if (!mpic->irqhost->of_node || + !of_device_is_compatible(mpic->irqhost->of_node, + "pasemi,pwrficient-openpic")) + return -ENODEV; + + rc = mpic_msi_init_allocator(mpic); + if (rc) { + pr_debug("pasemi_msi: Error allocating bitmap!\n"); + return rc; + } + + pr_debug("pasemi_msi: Registering PA Semi MPIC MSI callbacks\n"); + + msi_mpic = mpic; + WARN_ON(ppc_md.setup_msi_irqs); + ppc_md.setup_msi_irqs = pasemi_msi_setup_msi_irqs; + ppc_md.teardown_msi_irqs = pasemi_msi_teardown_msi_irqs; + ppc_md.msi_check_device = pasemi_msi_check_device; + + return 0; +} diff --git a/arch/powerpc/sysdev/mpic_u3msi.c b/arch/powerpc/sysdev/mpic_u3msi.c new file mode 100644 index 00000000..9a7aa0ed --- /dev/null +++ b/arch/powerpc/sysdev/mpic_u3msi.c @@ -0,0 +1,208 @@ +/* + * Copyright 2006, Segher Boessenkool, IBM Corporation. + * Copyright 2006-2007, Michael Ellerman, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 of the + * License. + * + */ + +#include <linux/irq.h> +#include <linux/bootmem.h> +#include <linux/msi.h> +#include <asm/mpic.h> +#include <asm/prom.h> +#include <asm/hw_irq.h> +#include <asm/ppc-pci.h> +#include <asm/msi_bitmap.h> + +#include "mpic.h" + +/* A bit ugly, can we get this from the pci_dev somehow? */ +static struct mpic *msi_mpic; + +static void mpic_u3msi_mask_irq(struct irq_data *data) +{ + mask_msi_irq(data); + mpic_mask_irq(data); +} + +static void mpic_u3msi_unmask_irq(struct irq_data *data) +{ + mpic_unmask_irq(data); + unmask_msi_irq(data); +} + +static struct irq_chip mpic_u3msi_chip = { + .irq_shutdown = mpic_u3msi_mask_irq, + .irq_mask = mpic_u3msi_mask_irq, + .irq_unmask = mpic_u3msi_unmask_irq, + .irq_eoi = mpic_end_irq, + .irq_set_type = mpic_set_irq_type, + .irq_set_affinity = mpic_set_affinity, + .name = "MPIC-U3MSI", +}; + +static u64 read_ht_magic_addr(struct pci_dev *pdev, unsigned int pos) +{ + u8 flags; + u32 tmp; + u64 addr; + + pci_read_config_byte(pdev, pos + HT_MSI_FLAGS, &flags); + + if (flags & HT_MSI_FLAGS_FIXED) + return HT_MSI_FIXED_ADDR; + + pci_read_config_dword(pdev, pos + HT_MSI_ADDR_LO, &tmp); + addr = tmp & HT_MSI_ADDR_LO_MASK; + pci_read_config_dword(pdev, pos + HT_MSI_ADDR_HI, &tmp); + addr = addr | ((u64)tmp << 32); + + return addr; +} + +static u64 find_ht_magic_addr(struct pci_dev *pdev, unsigned int hwirq) +{ + struct pci_bus *bus; + unsigned int pos; + + for (bus = pdev->bus; bus && bus->self; bus = bus->parent) { + pos = pci_find_ht_capability(bus->self, HT_CAPTYPE_MSI_MAPPING); + if (pos) + return read_ht_magic_addr(bus->self, pos); + } + + return 0; +} + +static u64 find_u4_magic_addr(struct pci_dev *pdev, unsigned int hwirq) +{ + struct pci_controller *hose = pci_bus_to_host(pdev->bus); + + /* U4 PCIe MSIs need to write to the special register in + * the bridge that generates interrupts. There should be + * theorically a register at 0xf8005000 where you just write + * the MSI number and that triggers the right interrupt, but + * unfortunately, this is busted in HW, the bridge endian swaps + * the value and hits the wrong nibble in the register. + * + * So instead we use another register set which is used normally + * for converting HT interrupts to MPIC interrupts, which decodes + * the interrupt number as part of the low address bits + * + * This will not work if we ever use more than one legacy MSI in + * a block but we never do. For one MSI or multiple MSI-X where + * each interrupt address can be specified separately, it works + * just fine. + */ + if (of_device_is_compatible(hose->dn, "u4-pcie") || + of_device_is_compatible(hose->dn, "U4-pcie")) + return 0xf8004000 | (hwirq << 4); + + return 0; +} + +static int u3msi_msi_check_device(struct pci_dev *pdev, int nvec, int type) +{ + if (type == PCI_CAP_ID_MSIX) + pr_debug("u3msi: MSI-X untested, trying anyway.\n"); + + /* If we can't find a magic address then MSI ain't gonna work */ + if (find_ht_magic_addr(pdev, 0) == 0 && + find_u4_magic_addr(pdev, 0) == 0) { + pr_debug("u3msi: no magic address found for %s\n", + pci_name(pdev)); + return -ENXIO; + } + + return 0; +} + +static void u3msi_teardown_msi_irqs(struct pci_dev *pdev) +{ + struct msi_desc *entry; + + list_for_each_entry(entry, &pdev->msi_list, list) { + if (entry->irq == NO_IRQ) + continue; + + irq_set_msi_desc(entry->irq, NULL); + msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, + virq_to_hw(entry->irq), 1); + irq_dispose_mapping(entry->irq); + } + + return; +} + +static int u3msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) +{ + unsigned int virq; + struct msi_desc *entry; + struct msi_msg msg; + u64 addr; + int hwirq; + + list_for_each_entry(entry, &pdev->msi_list, list) { + hwirq = msi_bitmap_alloc_hwirqs(&msi_mpic->msi_bitmap, 1); + if (hwirq < 0) { + pr_debug("u3msi: failed allocating hwirq\n"); + return hwirq; + } + + addr = find_ht_magic_addr(pdev, hwirq); + if (addr == 0) + addr = find_u4_magic_addr(pdev, hwirq); + msg.address_lo = addr & 0xFFFFFFFF; + msg.address_hi = addr >> 32; + + virq = irq_create_mapping(msi_mpic->irqhost, hwirq); + if (virq == NO_IRQ) { + pr_debug("u3msi: failed mapping hwirq 0x%x\n", hwirq); + msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, 1); + return -ENOSPC; + } + + irq_set_msi_desc(virq, entry); + irq_set_chip(virq, &mpic_u3msi_chip); + irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING); + + pr_debug("u3msi: allocated virq 0x%x (hw 0x%x) addr 0x%lx\n", + virq, hwirq, (unsigned long)addr); + + printk("u3msi: allocated virq 0x%x (hw 0x%x) addr 0x%lx\n", + virq, hwirq, (unsigned long)addr); + msg.data = hwirq; + write_msi_msg(virq, &msg); + + hwirq++; + } + + return 0; +} + +int mpic_u3msi_init(struct mpic *mpic) +{ + int rc; + + rc = mpic_msi_init_allocator(mpic); + if (rc) { + pr_debug("u3msi: Error allocating bitmap!\n"); + return rc; + } + + pr_debug("u3msi: Registering MPIC U3 MSI callbacks.\n"); + + BUG_ON(msi_mpic); + msi_mpic = mpic; + + WARN_ON(ppc_md.setup_msi_irqs); + ppc_md.setup_msi_irqs = u3msi_setup_msi_irqs; + ppc_md.teardown_msi_irqs = u3msi_teardown_msi_irqs; + ppc_md.msi_check_device = u3msi_msi_check_device; + + return 0; +} diff --git a/arch/powerpc/sysdev/msi_bitmap.c b/arch/powerpc/sysdev/msi_bitmap.c new file mode 100644 index 00000000..5287e95c --- /dev/null +++ b/arch/powerpc/sysdev/msi_bitmap.c @@ -0,0 +1,248 @@ +/* + * Copyright 2006-2008, Michael Ellerman, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 of the + * License. + * + */ + +#include <linux/slab.h> +#include <linux/kernel.h> +#include <linux/bitmap.h> +#include <asm/msi_bitmap.h> + +int msi_bitmap_alloc_hwirqs(struct msi_bitmap *bmp, int num) +{ + unsigned long flags; + int offset, order = get_count_order(num); + + spin_lock_irqsave(&bmp->lock, flags); + /* + * This is fast, but stricter than we need. We might want to add + * a fallback routine which does a linear search with no alignment. + */ + offset = bitmap_find_free_region(bmp->bitmap, bmp->irq_count, order); + spin_unlock_irqrestore(&bmp->lock, flags); + + pr_debug("msi_bitmap: allocated 0x%x (2^%d) at offset 0x%x\n", + num, order, offset); + + return offset; +} + +void msi_bitmap_free_hwirqs(struct msi_bitmap *bmp, unsigned int offset, + unsigned int num) +{ + unsigned long flags; + int order = get_count_order(num); + + pr_debug("msi_bitmap: freeing 0x%x (2^%d) at offset 0x%x\n", + num, order, offset); + + spin_lock_irqsave(&bmp->lock, flags); + bitmap_release_region(bmp->bitmap, offset, order); + spin_unlock_irqrestore(&bmp->lock, flags); +} + +void msi_bitmap_reserve_hwirq(struct msi_bitmap *bmp, unsigned int hwirq) +{ + unsigned long flags; + + pr_debug("msi_bitmap: reserving hwirq 0x%x\n", hwirq); + + spin_lock_irqsave(&bmp->lock, flags); + bitmap_allocate_region(bmp->bitmap, hwirq, 0); + spin_unlock_irqrestore(&bmp->lock, flags); +} + +/** + * msi_bitmap_reserve_dt_hwirqs - Reserve irqs specified in the device tree. + * @bmp: pointer to the MSI bitmap. + * + * Looks in the device tree to see if there is a property specifying which + * irqs can be used for MSI. If found those irqs reserved in the device tree + * are reserved in the bitmap. + * + * Returns 0 for success, < 0 if there was an error, and > 0 if no property + * was found in the device tree. + **/ +int msi_bitmap_reserve_dt_hwirqs(struct msi_bitmap *bmp) +{ + int i, j, len; + const u32 *p; + + if (!bmp->of_node) + return 1; + + p = of_get_property(bmp->of_node, "msi-available-ranges", &len); + if (!p) { + pr_debug("msi_bitmap: no msi-available-ranges property " \ + "found on %s\n", bmp->of_node->full_name); + return 1; + } + + if (len % (2 * sizeof(u32)) != 0) { + printk(KERN_WARNING "msi_bitmap: Malformed msi-available-ranges" + " property on %s\n", bmp->of_node->full_name); + return -EINVAL; + } + + bitmap_allocate_region(bmp->bitmap, 0, get_count_order(bmp->irq_count)); + + spin_lock(&bmp->lock); + + /* Format is: (<u32 start> <u32 count>)+ */ + len /= 2 * sizeof(u32); + for (i = 0; i < len; i++, p += 2) { + for (j = 0; j < *(p + 1); j++) + bitmap_release_region(bmp->bitmap, *p + j, 0); + } + + spin_unlock(&bmp->lock); + + return 0; +} + +int msi_bitmap_alloc(struct msi_bitmap *bmp, unsigned int irq_count, + struct device_node *of_node) +{ + int size; + + if (!irq_count) + return -EINVAL; + + size = BITS_TO_LONGS(irq_count) * sizeof(long); + pr_debug("msi_bitmap: allocator bitmap size is 0x%x bytes\n", size); + + bmp->bitmap = zalloc_maybe_bootmem(size, GFP_KERNEL); + if (!bmp->bitmap) { + pr_debug("msi_bitmap: ENOMEM allocating allocator bitmap!\n"); + return -ENOMEM; + } + + /* We zalloc'ed the bitmap, so all irqs are free by default */ + spin_lock_init(&bmp->lock); + bmp->of_node = of_node_get(of_node); + bmp->irq_count = irq_count; + + return 0; +} + +void msi_bitmap_free(struct msi_bitmap *bmp) +{ + /* we can't free the bitmap we don't know if it's bootmem etc. */ + of_node_put(bmp->of_node); + bmp->bitmap = NULL; +} + +#ifdef CONFIG_MSI_BITMAP_SELFTEST + +#define check(x) \ + if (!(x)) printk("msi_bitmap: test failed at line %d\n", __LINE__); + +void __init test_basics(void) +{ + struct msi_bitmap bmp; + int i, size = 512; + + /* Can't allocate a bitmap of 0 irqs */ + check(msi_bitmap_alloc(&bmp, 0, NULL) != 0); + + /* of_node may be NULL */ + check(0 == msi_bitmap_alloc(&bmp, size, NULL)); + + /* Should all be free by default */ + check(0 == bitmap_find_free_region(bmp.bitmap, size, + get_count_order(size))); + bitmap_release_region(bmp.bitmap, 0, get_count_order(size)); + + /* With no node, there's no msi-available-ranges, so expect > 0 */ + check(msi_bitmap_reserve_dt_hwirqs(&bmp) > 0); + + /* Should all still be free */ + check(0 == bitmap_find_free_region(bmp.bitmap, size, + get_count_order(size))); + bitmap_release_region(bmp.bitmap, 0, get_count_order(size)); + + /* Check we can fill it up and then no more */ + for (i = 0; i < size; i++) + check(msi_bitmap_alloc_hwirqs(&bmp, 1) >= 0); + + check(msi_bitmap_alloc_hwirqs(&bmp, 1) < 0); + + /* Should all be allocated */ + check(bitmap_find_free_region(bmp.bitmap, size, 0) < 0); + + /* And if we free one we can then allocate another */ + msi_bitmap_free_hwirqs(&bmp, size / 2, 1); + check(msi_bitmap_alloc_hwirqs(&bmp, 1) == size / 2); + + msi_bitmap_free(&bmp); + + /* Clients may check bitmap == NULL for "not-allocated" */ + check(bmp.bitmap == NULL); + + kfree(bmp.bitmap); +} + +void __init test_of_node(void) +{ + u32 prop_data[] = { 10, 10, 25, 3, 40, 1, 100, 100, 200, 20 }; + const char *expected_str = "0-9,20-24,28-39,41-99,220-255"; + char *prop_name = "msi-available-ranges"; + char *node_name = "/fakenode"; + struct device_node of_node; + struct property prop; + struct msi_bitmap bmp; + int size = 256; + DECLARE_BITMAP(expected, size); + + /* There should really be a struct device_node allocator */ + memset(&of_node, 0, sizeof(of_node)); + kref_init(&of_node.kref); + of_node.full_name = node_name; + + check(0 == msi_bitmap_alloc(&bmp, size, &of_node)); + + /* No msi-available-ranges, so expect > 0 */ + check(msi_bitmap_reserve_dt_hwirqs(&bmp) > 0); + + /* Should all still be free */ + check(0 == bitmap_find_free_region(bmp.bitmap, size, + get_count_order(size))); + bitmap_release_region(bmp.bitmap, 0, get_count_order(size)); + + /* Now create a fake msi-available-ranges property */ + + /* There should really .. oh whatever */ + memset(&prop, 0, sizeof(prop)); + prop.name = prop_name; + prop.value = &prop_data; + prop.length = sizeof(prop_data); + + of_node.properties = ∝ + + /* msi-available-ranges, so expect == 0 */ + check(msi_bitmap_reserve_dt_hwirqs(&bmp) == 0); + + /* Check we got the expected result */ + check(0 == bitmap_parselist(expected_str, expected, size)); + check(bitmap_equal(expected, bmp.bitmap, size)); + + msi_bitmap_free(&bmp); + kfree(bmp.bitmap); +} + +int __init msi_bitmap_selftest(void) +{ + printk(KERN_DEBUG "Running MSI bitmap self-tests ...\n"); + + test_basics(); + test_of_node(); + + return 0; +} +late_initcall(msi_bitmap_selftest); +#endif /* CONFIG_MSI_BITMAP_SELFTEST */ diff --git a/arch/powerpc/sysdev/mv64x60.h b/arch/powerpc/sysdev/mv64x60.h new file mode 100644 index 00000000..4f618fa4 --- /dev/null +++ b/arch/powerpc/sysdev/mv64x60.h @@ -0,0 +1,12 @@ +#ifndef __MV64X60_H__ +#define __MV64X60_H__ + +#include <linux/init.h> + +extern void __init mv64x60_init_irq(void); +extern unsigned int mv64x60_get_irq(void); + +extern void __init mv64x60_pci_init(void); +extern void __init mv64x60_init_early(void); + +#endif /* __MV64X60_H__ */ diff --git a/arch/powerpc/sysdev/mv64x60_dev.c b/arch/powerpc/sysdev/mv64x60_dev.c new file mode 100644 index 00000000..0f6af41e --- /dev/null +++ b/arch/powerpc/sysdev/mv64x60_dev.c @@ -0,0 +1,523 @@ +/* + * Platform device setup for Marvell mv64360/mv64460 host bridges (Discovery) + * + * Author: Dale Farnsworth <dale@farnsworth.org> + * + * 2007 (c) MontaVista, Software, Inc. This file is licensed under + * the terms of the GNU General Public License version 2. This program + * is licensed "as is" without any warranty of any kind, whether express + * or implied. + */ + +#include <linux/stddef.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/console.h> +#include <linux/mv643xx.h> +#include <linux/platform_device.h> +#include <linux/of_platform.h> +#include <linux/of_net.h> +#include <linux/dma-mapping.h> + +#include <asm/prom.h> + +/* These functions provide the necessary setup for the mv64x60 drivers. */ + +static struct of_device_id __initdata of_mv64x60_devices[] = { + { .compatible = "marvell,mv64306-devctrl", }, + {} +}; + +/* + * Create MPSC platform devices + */ +static int __init mv64x60_mpsc_register_shared_pdev(struct device_node *np) +{ + struct platform_device *pdev; + struct resource r[2]; + struct mpsc_shared_pdata pdata; + const phandle *ph; + struct device_node *mpscrouting, *mpscintr; + int err; + + ph = of_get_property(np, "mpscrouting", NULL); + mpscrouting = of_find_node_by_phandle(*ph); + if (!mpscrouting) + return -ENODEV; + + err = of_address_to_resource(mpscrouting, 0, &r[0]); + of_node_put(mpscrouting); + if (err) + return err; + + ph = of_get_property(np, "mpscintr", NULL); + mpscintr = of_find_node_by_phandle(*ph); + if (!mpscintr) + return -ENODEV; + + err = of_address_to_resource(mpscintr, 0, &r[1]); + of_node_put(mpscintr); + if (err) + return err; + + memset(&pdata, 0, sizeof(pdata)); + + pdev = platform_device_alloc(MPSC_SHARED_NAME, 0); + if (!pdev) + return -ENOMEM; + + err = platform_device_add_resources(pdev, r, 2); + if (err) + goto error; + + err = platform_device_add_data(pdev, &pdata, sizeof(pdata)); + if (err) + goto error; + + err = platform_device_add(pdev); + if (err) + goto error; + + return 0; + +error: + platform_device_put(pdev); + return err; +} + + +static int __init mv64x60_mpsc_device_setup(struct device_node *np, int id) +{ + struct resource r[5]; + struct mpsc_pdata pdata; + struct platform_device *pdev; + const unsigned int *prop; + const phandle *ph; + struct device_node *sdma, *brg; + int err; + int port_number; + + /* only register the shared platform device the first time through */ + if (id == 0 && (err = mv64x60_mpsc_register_shared_pdev(np))) + return err; + + memset(r, 0, sizeof(r)); + + err = of_address_to_resource(np, 0, &r[0]); + if (err) + return err; + + of_irq_to_resource(np, 0, &r[4]); + + ph = of_get_property(np, "sdma", NULL); + sdma = of_find_node_by_phandle(*ph); + if (!sdma) + return -ENODEV; + + of_irq_to_resource(sdma, 0, &r[3]); + err = of_address_to_resource(sdma, 0, &r[1]); + of_node_put(sdma); + if (err) + return err; + + ph = of_get_property(np, "brg", NULL); + brg = of_find_node_by_phandle(*ph); + if (!brg) + return -ENODEV; + + err = of_address_to_resource(brg, 0, &r[2]); + of_node_put(brg); + if (err) + return err; + + prop = of_get_property(np, "cell-index", NULL); + if (!prop) + return -ENODEV; + port_number = *(int *)prop; + + memset(&pdata, 0, sizeof(pdata)); + + pdata.cache_mgmt = 1; /* All current revs need this set */ + + pdata.max_idle = 40; /* default */ + prop = of_get_property(np, "max_idle", NULL); + if (prop) + pdata.max_idle = *prop; + + prop = of_get_property(brg, "current-speed", NULL); + if (prop) + pdata.default_baud = *prop; + + /* Default is 8 bits, no parity, no flow control */ + pdata.default_bits = 8; + pdata.default_parity = 'n'; + pdata.default_flow = 'n'; + + prop = of_get_property(np, "chr_1", NULL); + if (prop) + pdata.chr_1_val = *prop; + + prop = of_get_property(np, "chr_2", NULL); + if (prop) + pdata.chr_2_val = *prop; + + prop = of_get_property(np, "chr_10", NULL); + if (prop) + pdata.chr_10_val = *prop; + + prop = of_get_property(np, "mpcr", NULL); + if (prop) + pdata.mpcr_val = *prop; + + prop = of_get_property(brg, "bcr", NULL); + if (prop) + pdata.bcr_val = *prop; + + pdata.brg_can_tune = 1; /* All current revs need this set */ + + prop = of_get_property(brg, "clock-src", NULL); + if (prop) + pdata.brg_clk_src = *prop; + + prop = of_get_property(brg, "clock-frequency", NULL); + if (prop) + pdata.brg_clk_freq = *prop; + + pdev = platform_device_alloc(MPSC_CTLR_NAME, port_number); + if (!pdev) + return -ENOMEM; + pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); + + err = platform_device_add_resources(pdev, r, 5); + if (err) + goto error; + + err = platform_device_add_data(pdev, &pdata, sizeof(pdata)); + if (err) + goto error; + + err = platform_device_add(pdev); + if (err) + goto error; + + return 0; + +error: + platform_device_put(pdev); + return err; +} + +/* + * Create mv64x60_eth platform devices + */ +static struct platform_device * __init mv64x60_eth_register_shared_pdev( + struct device_node *np, int id) +{ + struct platform_device *pdev; + struct resource r[1]; + int err; + + err = of_address_to_resource(np, 0, &r[0]); + if (err) + return ERR_PTR(err); + + pdev = platform_device_register_simple(MV643XX_ETH_SHARED_NAME, id, + r, 1); + return pdev; +} + +static int __init mv64x60_eth_device_setup(struct device_node *np, int id, + struct platform_device *shared_pdev) +{ + struct resource r[1]; + struct mv643xx_eth_platform_data pdata; + struct platform_device *pdev; + struct device_node *phy; + const u8 *mac_addr; + const int *prop; + const phandle *ph; + int err; + + memset(r, 0, sizeof(r)); + of_irq_to_resource(np, 0, &r[0]); + + memset(&pdata, 0, sizeof(pdata)); + + pdata.shared = shared_pdev; + + prop = of_get_property(np, "reg", NULL); + if (!prop) + return -ENODEV; + pdata.port_number = *prop; + + mac_addr = of_get_mac_address(np); + if (mac_addr) + memcpy(pdata.mac_addr, mac_addr, 6); + + prop = of_get_property(np, "speed", NULL); + if (prop) + pdata.speed = *prop; + + prop = of_get_property(np, "tx_queue_size", NULL); + if (prop) + pdata.tx_queue_size = *prop; + + prop = of_get_property(np, "rx_queue_size", NULL); + if (prop) + pdata.rx_queue_size = *prop; + + prop = of_get_property(np, "tx_sram_addr", NULL); + if (prop) + pdata.tx_sram_addr = *prop; + + prop = of_get_property(np, "tx_sram_size", NULL); + if (prop) + pdata.tx_sram_size = *prop; + + prop = of_get_property(np, "rx_sram_addr", NULL); + if (prop) + pdata.rx_sram_addr = *prop; + + prop = of_get_property(np, "rx_sram_size", NULL); + if (prop) + pdata.rx_sram_size = *prop; + + ph = of_get_property(np, "phy", NULL); + if (!ph) + return -ENODEV; + + phy = of_find_node_by_phandle(*ph); + if (phy == NULL) + return -ENODEV; + + prop = of_get_property(phy, "reg", NULL); + if (prop) + pdata.phy_addr = MV643XX_ETH_PHY_ADDR(*prop); + + of_node_put(phy); + + pdev = platform_device_alloc(MV643XX_ETH_NAME, id); + if (!pdev) + return -ENOMEM; + + pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); + err = platform_device_add_resources(pdev, r, 1); + if (err) + goto error; + + err = platform_device_add_data(pdev, &pdata, sizeof(pdata)); + if (err) + goto error; + + err = platform_device_add(pdev); + if (err) + goto error; + + return 0; + +error: + platform_device_put(pdev); + return err; +} + +/* + * Create mv64x60_i2c platform devices + */ +static int __init mv64x60_i2c_device_setup(struct device_node *np, int id) +{ + struct resource r[2]; + struct platform_device *pdev; + struct mv64xxx_i2c_pdata pdata; + const unsigned int *prop; + int err; + + memset(r, 0, sizeof(r)); + + err = of_address_to_resource(np, 0, &r[0]); + if (err) + return err; + + of_irq_to_resource(np, 0, &r[1]); + + memset(&pdata, 0, sizeof(pdata)); + + pdata.freq_m = 8; /* default */ + prop = of_get_property(np, "freq_m", NULL); + if (prop) + pdata.freq_m = *prop; + + pdata.freq_n = 3; /* default */ + prop = of_get_property(np, "freq_n", NULL); + if (prop) + pdata.freq_n = *prop; + + pdata.timeout = 1000; /* default: 1 second */ + + pdev = platform_device_alloc(MV64XXX_I2C_CTLR_NAME, id); + if (!pdev) + return -ENOMEM; + + err = platform_device_add_resources(pdev, r, 2); + if (err) + goto error; + + err = platform_device_add_data(pdev, &pdata, sizeof(pdata)); + if (err) + goto error; + + err = platform_device_add(pdev); + if (err) + goto error; + + return 0; + +error: + platform_device_put(pdev); + return err; +} + +/* + * Create mv64x60_wdt platform devices + */ +static int __init mv64x60_wdt_device_setup(struct device_node *np, int id) +{ + struct resource r; + struct platform_device *pdev; + struct mv64x60_wdt_pdata pdata; + const unsigned int *prop; + int err; + + err = of_address_to_resource(np, 0, &r); + if (err) + return err; + + memset(&pdata, 0, sizeof(pdata)); + + pdata.timeout = 10; /* Default: 10 seconds */ + + np = of_get_parent(np); + if (!np) + return -ENODEV; + + prop = of_get_property(np, "clock-frequency", NULL); + of_node_put(np); + if (!prop) + return -ENODEV; + pdata.bus_clk = *prop / 1000000; /* wdt driver wants freq in MHz */ + + pdev = platform_device_alloc(MV64x60_WDT_NAME, id); + if (!pdev) + return -ENOMEM; + + err = platform_device_add_resources(pdev, &r, 1); + if (err) + goto error; + + err = platform_device_add_data(pdev, &pdata, sizeof(pdata)); + if (err) + goto error; + + err = platform_device_add(pdev); + if (err) + goto error; + + return 0; + +error: + platform_device_put(pdev); + return err; +} + +static int __init mv64x60_device_setup(void) +{ + struct device_node *np, *np2; + struct platform_device *pdev; + int id, id2; + int err; + + id = 0; + for_each_compatible_node(np, "serial", "marvell,mv64360-mpsc") { + err = mv64x60_mpsc_device_setup(np, id++); + if (err) + printk(KERN_ERR "Failed to initialize MV64x60 " + "serial device %s: error %d.\n", + np->full_name, err); + } + + id = 0; + id2 = 0; + for_each_compatible_node(np, NULL, "marvell,mv64360-eth-group") { + pdev = mv64x60_eth_register_shared_pdev(np, id++); + if (IS_ERR(pdev)) { + err = PTR_ERR(pdev); + printk(KERN_ERR "Failed to initialize MV64x60 " + "network block %s: error %d.\n", + np->full_name, err); + continue; + } + for_each_child_of_node(np, np2) { + if (!of_device_is_compatible(np2, + "marvell,mv64360-eth")) + continue; + err = mv64x60_eth_device_setup(np2, id2++, pdev); + if (err) + printk(KERN_ERR "Failed to initialize " + "MV64x60 network device %s: " + "error %d.\n", + np2->full_name, err); + } + } + + id = 0; + for_each_compatible_node(np, "i2c", "marvell,mv64360-i2c") { + err = mv64x60_i2c_device_setup(np, id++); + if (err) + printk(KERN_ERR "Failed to initialize MV64x60 I2C " + "bus %s: error %d.\n", + np->full_name, err); + } + + /* support up to one watchdog timer */ + np = of_find_compatible_node(np, NULL, "marvell,mv64360-wdt"); + if (np) { + if ((err = mv64x60_wdt_device_setup(np, id))) + printk(KERN_ERR "Failed to initialize MV64x60 " + "Watchdog %s: error %d.\n", + np->full_name, err); + of_node_put(np); + } + + /* Now add every node that is on the device bus */ + for_each_compatible_node(np, NULL, "marvell,mv64360") + of_platform_bus_probe(np, of_mv64x60_devices, NULL); + + return 0; +} +arch_initcall(mv64x60_device_setup); + +static int __init mv64x60_add_mpsc_console(void) +{ + struct device_node *np = NULL; + const char *prop; + + prop = of_get_property(of_chosen, "linux,stdout-path", NULL); + if (prop == NULL) + goto not_mpsc; + + np = of_find_node_by_path(prop); + if (!np) + goto not_mpsc; + + if (!of_device_is_compatible(np, "marvell,mv64360-mpsc")) + goto not_mpsc; + + prop = of_get_property(np, "cell-index", NULL); + if (!prop) + goto not_mpsc; + + add_preferred_console("ttyMM", *(int *)prop, NULL); + +not_mpsc: + return 0; +} +console_initcall(mv64x60_add_mpsc_console); diff --git a/arch/powerpc/sysdev/mv64x60_pci.c b/arch/powerpc/sysdev/mv64x60_pci.c new file mode 100644 index 00000000..77bb3f4d --- /dev/null +++ b/arch/powerpc/sysdev/mv64x60_pci.c @@ -0,0 +1,170 @@ +/* + * PCI bus setup for Marvell mv64360/mv64460 host bridges (Discovery) + * + * Author: Dale Farnsworth <dale@farnsworth.org> + * + * 2007 (c) MontaVista, Software, Inc. This file is licensed under + * the terms of the GNU General Public License version 2. This program + * is licensed "as is" without any warranty of any kind, whether express + * or implied. + */ + +#include <linux/stddef.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/pci.h> + +#include <asm/prom.h> +#include <asm/pci-bridge.h> + +#define PCI_HEADER_TYPE_INVALID 0x7f /* Invalid PCI header type */ + +#ifdef CONFIG_SYSFS +/* 32-bit hex or dec stringified number + '\n' */ +#define MV64X60_VAL_LEN_MAX 11 +#define MV64X60_PCICFG_CPCI_HOTSWAP 0x68 + +static ssize_t mv64x60_hs_reg_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, + loff_t off, size_t count) +{ + struct pci_dev *phb; + u32 v; + + if (off > 0) + return 0; + if (count < MV64X60_VAL_LEN_MAX) + return -EINVAL; + + phb = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); + if (!phb) + return -ENODEV; + pci_read_config_dword(phb, MV64X60_PCICFG_CPCI_HOTSWAP, &v); + pci_dev_put(phb); + + return sprintf(buf, "0x%08x\n", v); +} + +static ssize_t mv64x60_hs_reg_write(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, + loff_t off, size_t count) +{ + struct pci_dev *phb; + u32 v; + + if (off > 0) + return 0; + if (count <= 0) + return -EINVAL; + + if (sscanf(buf, "%i", &v) != 1) + return -EINVAL; + + phb = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); + if (!phb) + return -ENODEV; + pci_write_config_dword(phb, MV64X60_PCICFG_CPCI_HOTSWAP, v); + pci_dev_put(phb); + + return count; +} + +static struct bin_attribute mv64x60_hs_reg_attr = { /* Hotswap register */ + .attr = { + .name = "hs_reg", + .mode = S_IRUGO | S_IWUSR, + }, + .size = MV64X60_VAL_LEN_MAX, + .read = mv64x60_hs_reg_read, + .write = mv64x60_hs_reg_write, +}; + +static int __init mv64x60_sysfs_init(void) +{ + struct device_node *np; + struct platform_device *pdev; + const unsigned int *prop; + + np = of_find_compatible_node(NULL, NULL, "marvell,mv64360"); + if (!np) + return 0; + + prop = of_get_property(np, "hs_reg_valid", NULL); + of_node_put(np); + + pdev = platform_device_register_simple("marvell,mv64360", 0, NULL, 0); + if (IS_ERR(pdev)) + return PTR_ERR(pdev); + + return sysfs_create_bin_file(&pdev->dev.kobj, &mv64x60_hs_reg_attr); +} + +subsys_initcall(mv64x60_sysfs_init); + +#endif /* CONFIG_SYSFS */ + +static void __init mv64x60_pci_fixup_early(struct pci_dev *dev) +{ + /* + * Set the host bridge hdr_type to an invalid value so that + * pci_setup_device() will ignore the host bridge. + */ + dev->hdr_type = PCI_HEADER_TYPE_INVALID; +} +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_MARVELL, PCI_DEVICE_ID_MARVELL_MV64360, + mv64x60_pci_fixup_early); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_MARVELL, PCI_DEVICE_ID_MARVELL_MV64460, + mv64x60_pci_fixup_early); + +static int __init mv64x60_add_bridge(struct device_node *dev) +{ + int len; + struct pci_controller *hose; + struct resource rsrc; + const int *bus_range; + int primary; + + memset(&rsrc, 0, sizeof(rsrc)); + + /* Fetch host bridge registers address */ + if (of_address_to_resource(dev, 0, &rsrc)) { + printk(KERN_ERR "No PCI reg property in device tree\n"); + return -ENODEV; + } + + /* Get bus range if any */ + bus_range = of_get_property(dev, "bus-range", &len); + if (bus_range == NULL || len < 2 * sizeof(int)) + printk(KERN_WARNING "Can't get bus-range for %s, assume" + " bus 0\n", dev->full_name); + + hose = pcibios_alloc_controller(dev); + if (!hose) + return -ENOMEM; + + hose->first_busno = bus_range ? bus_range[0] : 0; + hose->last_busno = bus_range ? bus_range[1] : 0xff; + + setup_indirect_pci(hose, rsrc.start, rsrc.start + 4, 0); + hose->self_busno = hose->first_busno; + + printk(KERN_INFO "Found MV64x60 PCI host bridge at 0x%016llx. " + "Firmware bus number: %d->%d\n", + (unsigned long long)rsrc.start, hose->first_busno, + hose->last_busno); + + /* Interpret the "ranges" property */ + /* This also maps the I/O region and sets isa_io/mem_base */ + primary = (hose->first_busno == 0); + pci_process_bridge_OF_ranges(hose, dev, primary); + + return 0; +} + +void __init mv64x60_pci_init(void) +{ + struct device_node *np; + + for_each_compatible_node(np, "pci", "marvell,mv64360-pci") + mv64x60_add_bridge(np); +} diff --git a/arch/powerpc/sysdev/mv64x60_pic.c b/arch/powerpc/sysdev/mv64x60_pic.c new file mode 100644 index 00000000..14d13026 --- /dev/null +++ b/arch/powerpc/sysdev/mv64x60_pic.c @@ -0,0 +1,298 @@ +/* + * Interrupt handling for Marvell mv64360/mv64460 host bridges (Discovery) + * + * Author: Dale Farnsworth <dale@farnsworth.org> + * + * 2007 (c) MontaVista, Software, Inc. This file is licensed under + * the terms of the GNU General Public License version 2. This program + * is licensed "as is" without any warranty of any kind, whether express + * or implied. + */ + +#include <linux/stddef.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/irq.h> +#include <linux/interrupt.h> +#include <linux/spinlock.h> + +#include <asm/byteorder.h> +#include <asm/io.h> +#include <asm/prom.h> +#include <asm/irq.h> + +#include "mv64x60.h" + +/* Interrupt Controller Interface Registers */ +#define MV64X60_IC_MAIN_CAUSE_LO 0x0004 +#define MV64X60_IC_MAIN_CAUSE_HI 0x000c +#define MV64X60_IC_CPU0_INTR_MASK_LO 0x0014 +#define MV64X60_IC_CPU0_INTR_MASK_HI 0x001c +#define MV64X60_IC_CPU0_SELECT_CAUSE 0x0024 + +#define MV64X60_HIGH_GPP_GROUPS 0x0f000000 +#define MV64X60_SELECT_CAUSE_HIGH 0x40000000 + +/* General Purpose Pins Controller Interface Registers */ +#define MV64x60_GPP_INTR_CAUSE 0x0008 +#define MV64x60_GPP_INTR_MASK 0x000c + +#define MV64x60_LEVEL1_LOW 0 +#define MV64x60_LEVEL1_HIGH 1 +#define MV64x60_LEVEL1_GPP 2 + +#define MV64x60_LEVEL1_MASK 0x00000060 +#define MV64x60_LEVEL1_OFFSET 5 + +#define MV64x60_LEVEL2_MASK 0x0000001f + +#define MV64x60_NUM_IRQS 96 + +static DEFINE_SPINLOCK(mv64x60_lock); + +static void __iomem *mv64x60_irq_reg_base; +static void __iomem *mv64x60_gpp_reg_base; + +/* + * Interrupt Controller Handling + * + * The interrupt controller handles three groups of interrupts: + * main low: IRQ0-IRQ31 + * main high: IRQ32-IRQ63 + * gpp: IRQ64-IRQ95 + * + * This code handles interrupts in two levels. Level 1 selects the + * interrupt group, and level 2 selects an IRQ within that group. + * Each group has its own irq_chip structure. + */ + +static u32 mv64x60_cached_low_mask; +static u32 mv64x60_cached_high_mask = MV64X60_HIGH_GPP_GROUPS; +static u32 mv64x60_cached_gpp_mask; + +static struct irq_host *mv64x60_irq_host; + +/* + * mv64x60_chip_low functions + */ + +static void mv64x60_mask_low(struct irq_data *d) +{ + int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK; + unsigned long flags; + + spin_lock_irqsave(&mv64x60_lock, flags); + mv64x60_cached_low_mask &= ~(1 << level2); + out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_LO, + mv64x60_cached_low_mask); + spin_unlock_irqrestore(&mv64x60_lock, flags); + (void)in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_LO); +} + +static void mv64x60_unmask_low(struct irq_data *d) +{ + int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK; + unsigned long flags; + + spin_lock_irqsave(&mv64x60_lock, flags); + mv64x60_cached_low_mask |= 1 << level2; + out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_LO, + mv64x60_cached_low_mask); + spin_unlock_irqrestore(&mv64x60_lock, flags); + (void)in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_LO); +} + +static struct irq_chip mv64x60_chip_low = { + .name = "mv64x60_low", + .irq_mask = mv64x60_mask_low, + .irq_mask_ack = mv64x60_mask_low, + .irq_unmask = mv64x60_unmask_low, +}; + +/* + * mv64x60_chip_high functions + */ + +static void mv64x60_mask_high(struct irq_data *d) +{ + int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK; + unsigned long flags; + + spin_lock_irqsave(&mv64x60_lock, flags); + mv64x60_cached_high_mask &= ~(1 << level2); + out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_HI, + mv64x60_cached_high_mask); + spin_unlock_irqrestore(&mv64x60_lock, flags); + (void)in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_HI); +} + +static void mv64x60_unmask_high(struct irq_data *d) +{ + int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK; + unsigned long flags; + + spin_lock_irqsave(&mv64x60_lock, flags); + mv64x60_cached_high_mask |= 1 << level2; + out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_HI, + mv64x60_cached_high_mask); + spin_unlock_irqrestore(&mv64x60_lock, flags); + (void)in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_HI); +} + +static struct irq_chip mv64x60_chip_high = { + .name = "mv64x60_high", + .irq_mask = mv64x60_mask_high, + .irq_mask_ack = mv64x60_mask_high, + .irq_unmask = mv64x60_unmask_high, +}; + +/* + * mv64x60_chip_gpp functions + */ + +static void mv64x60_mask_gpp(struct irq_data *d) +{ + int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK; + unsigned long flags; + + spin_lock_irqsave(&mv64x60_lock, flags); + mv64x60_cached_gpp_mask &= ~(1 << level2); + out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK, + mv64x60_cached_gpp_mask); + spin_unlock_irqrestore(&mv64x60_lock, flags); + (void)in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK); +} + +static void mv64x60_mask_ack_gpp(struct irq_data *d) +{ + int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK; + unsigned long flags; + + spin_lock_irqsave(&mv64x60_lock, flags); + mv64x60_cached_gpp_mask &= ~(1 << level2); + out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK, + mv64x60_cached_gpp_mask); + out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_CAUSE, + ~(1 << level2)); + spin_unlock_irqrestore(&mv64x60_lock, flags); + (void)in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_CAUSE); +} + +static void mv64x60_unmask_gpp(struct irq_data *d) +{ + int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK; + unsigned long flags; + + spin_lock_irqsave(&mv64x60_lock, flags); + mv64x60_cached_gpp_mask |= 1 << level2; + out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK, + mv64x60_cached_gpp_mask); + spin_unlock_irqrestore(&mv64x60_lock, flags); + (void)in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK); +} + +static struct irq_chip mv64x60_chip_gpp = { + .name = "mv64x60_gpp", + .irq_mask = mv64x60_mask_gpp, + .irq_mask_ack = mv64x60_mask_ack_gpp, + .irq_unmask = mv64x60_unmask_gpp, +}; + +/* + * mv64x60_host_ops functions + */ + +static struct irq_chip *mv64x60_chips[] = { + [MV64x60_LEVEL1_LOW] = &mv64x60_chip_low, + [MV64x60_LEVEL1_HIGH] = &mv64x60_chip_high, + [MV64x60_LEVEL1_GPP] = &mv64x60_chip_gpp, +}; + +static int mv64x60_host_map(struct irq_host *h, unsigned int virq, + irq_hw_number_t hwirq) +{ + int level1; + + irq_set_status_flags(virq, IRQ_LEVEL); + + level1 = (hwirq & MV64x60_LEVEL1_MASK) >> MV64x60_LEVEL1_OFFSET; + BUG_ON(level1 > MV64x60_LEVEL1_GPP); + irq_set_chip_and_handler(virq, mv64x60_chips[level1], + handle_level_irq); + + return 0; +} + +static struct irq_host_ops mv64x60_host_ops = { + .map = mv64x60_host_map, +}; + +/* + * Global functions + */ + +void __init mv64x60_init_irq(void) +{ + struct device_node *np; + phys_addr_t paddr; + unsigned int size; + const unsigned int *reg; + unsigned long flags; + + np = of_find_compatible_node(NULL, NULL, "marvell,mv64360-gpp"); + reg = of_get_property(np, "reg", &size); + paddr = of_translate_address(np, reg); + mv64x60_gpp_reg_base = ioremap(paddr, reg[1]); + of_node_put(np); + + np = of_find_compatible_node(NULL, NULL, "marvell,mv64360-pic"); + reg = of_get_property(np, "reg", &size); + paddr = of_translate_address(np, reg); + mv64x60_irq_reg_base = ioremap(paddr, reg[1]); + + mv64x60_irq_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, + MV64x60_NUM_IRQS, + &mv64x60_host_ops, MV64x60_NUM_IRQS); + + spin_lock_irqsave(&mv64x60_lock, flags); + out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK, + mv64x60_cached_gpp_mask); + out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_LO, + mv64x60_cached_low_mask); + out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_HI, + mv64x60_cached_high_mask); + + out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_CAUSE, 0); + out_le32(mv64x60_irq_reg_base + MV64X60_IC_MAIN_CAUSE_LO, 0); + out_le32(mv64x60_irq_reg_base + MV64X60_IC_MAIN_CAUSE_HI, 0); + spin_unlock_irqrestore(&mv64x60_lock, flags); +} + +unsigned int mv64x60_get_irq(void) +{ + u32 cause; + int level1; + irq_hw_number_t hwirq; + int virq = NO_IRQ; + + cause = in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_SELECT_CAUSE); + if (cause & MV64X60_SELECT_CAUSE_HIGH) { + cause &= mv64x60_cached_high_mask; + level1 = MV64x60_LEVEL1_HIGH; + if (cause & MV64X60_HIGH_GPP_GROUPS) { + cause = in_le32(mv64x60_gpp_reg_base + + MV64x60_GPP_INTR_CAUSE); + cause &= mv64x60_cached_gpp_mask; + level1 = MV64x60_LEVEL1_GPP; + } + } else { + cause &= mv64x60_cached_low_mask; + level1 = MV64x60_LEVEL1_LOW; + } + if (cause) { + hwirq = (level1 << MV64x60_LEVEL1_OFFSET) | __ilog2(cause); + virq = irq_linear_revmap(mv64x60_irq_host, hwirq); + } + + return virq; +} diff --git a/arch/powerpc/sysdev/mv64x60_udbg.c b/arch/powerpc/sysdev/mv64x60_udbg.c new file mode 100644 index 00000000..2792dc8b --- /dev/null +++ b/arch/powerpc/sysdev/mv64x60_udbg.c @@ -0,0 +1,152 @@ +/* + * udbg serial input/output routines for the Marvell MV64x60 (Discovery). + * + * Author: Dale Farnsworth <dale@farnsworth.org> + * + * 2007 (c) MontaVista Software, Inc. This file is licensed under + * the terms of the GNU General Public License version 2. This program + * is licensed "as is" without any warranty of any kind, whether express + * or implied. + */ + +#include <asm/io.h> +#include <asm/prom.h> +#include <asm/udbg.h> + +#include <sysdev/mv64x60.h> + +#define MPSC_0_CR1_OFFSET 0x000c + +#define MPSC_0_CR2_OFFSET 0x0010 +#define MPSC_CHR_2_TCS (1 << 9) + +#define MPSC_0_CHR_10_OFFSET 0x0030 + +#define MPSC_INTR_CAUSE_OFF_0 0x0004 +#define MPSC_INTR_CAUSE_OFF_1 0x000c +#define MPSC_INTR_CAUSE_RCC (1<<6) + +static void __iomem *mpsc_base; +static void __iomem *mpsc_intr_cause; + +static void mv64x60_udbg_putc(char c) +{ + if (c == '\n') + mv64x60_udbg_putc('\r'); + + while(in_le32(mpsc_base + MPSC_0_CR2_OFFSET) & MPSC_CHR_2_TCS) + ; + out_le32(mpsc_base + MPSC_0_CR1_OFFSET, c); + out_le32(mpsc_base + MPSC_0_CR2_OFFSET, MPSC_CHR_2_TCS); +} + +static int mv64x60_udbg_testc(void) +{ + return (in_le32(mpsc_intr_cause) & MPSC_INTR_CAUSE_RCC) != 0; +} + +static int mv64x60_udbg_getc(void) +{ + int cause = 0; + int c; + + while (!mv64x60_udbg_testc()) + ; + + c = in_8(mpsc_base + MPSC_0_CHR_10_OFFSET + 2); + out_8(mpsc_base + MPSC_0_CHR_10_OFFSET + 2, c); + out_le32(mpsc_intr_cause, cause & ~MPSC_INTR_CAUSE_RCC); + return c; +} + +static int mv64x60_udbg_getc_poll(void) +{ + if (!mv64x60_udbg_testc()) + return -1; + + return mv64x60_udbg_getc(); +} + +static void mv64x60_udbg_init(void) +{ + struct device_node *np, *mpscintr, *stdout = NULL; + const char *path; + const phandle *ph; + struct resource r[2]; + const int *block_index; + int intr_cause_offset; + int err; + + path = of_get_property(of_chosen, "linux,stdout-path", NULL); + if (!path) + return; + + stdout = of_find_node_by_path(path); + if (!stdout) + return; + + for_each_compatible_node(np, "serial", "marvell,mv64360-mpsc") { + if (np == stdout) + break; + } + + of_node_put(stdout); + if (!np) + return; + + block_index = of_get_property(np, "cell-index", NULL); + if (!block_index) + goto error; + + switch (*block_index) { + case 0: + intr_cause_offset = MPSC_INTR_CAUSE_OFF_0; + break; + case 1: + intr_cause_offset = MPSC_INTR_CAUSE_OFF_1; + break; + default: + goto error; + } + + err = of_address_to_resource(np, 0, &r[0]); + if (err) + goto error; + + ph = of_get_property(np, "mpscintr", NULL); + mpscintr = of_find_node_by_phandle(*ph); + if (!mpscintr) + goto error; + + err = of_address_to_resource(mpscintr, 0, &r[1]); + of_node_put(mpscintr); + if (err) + goto error; + + of_node_put(np); + + mpsc_base = ioremap(r[0].start, r[0].end - r[0].start + 1); + if (!mpsc_base) + return; + + mpsc_intr_cause = ioremap(r[1].start, r[1].end - r[1].start + 1); + if (!mpsc_intr_cause) { + iounmap(mpsc_base); + return; + } + mpsc_intr_cause += intr_cause_offset; + + udbg_putc = mv64x60_udbg_putc; + udbg_getc = mv64x60_udbg_getc; + udbg_getc_poll = mv64x60_udbg_getc_poll; + + return; + +error: + of_node_put(np); +} + +void mv64x60_init_early(void) +{ + mv64x60_udbg_init(); +} diff --git a/arch/powerpc/sysdev/of_rtc.c b/arch/powerpc/sysdev/of_rtc.c new file mode 100644 index 00000000..c9e803f3 --- /dev/null +++ b/arch/powerpc/sysdev/of_rtc.c @@ -0,0 +1,60 @@ +/* + * Instantiate mmio-mapped RTC chips based on device tree information + * + * Copyright 2007 David Gibson <dwg@au1.ibm.com>, IBM Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#include <linux/kernel.h> +#include <linux/of.h> +#include <linux/init.h> +#include <linux/of_platform.h> +#include <linux/slab.h> + +static __initdata struct { + const char *compatible; + char *plat_name; +} of_rtc_table[] = { + { "ds1743-nvram", "rtc-ds1742" }, +}; + +void __init of_instantiate_rtc(void) +{ + struct device_node *node; + int err; + int i; + + for (i = 0; i < ARRAY_SIZE(of_rtc_table); i++) { + char *plat_name = of_rtc_table[i].plat_name; + + for_each_compatible_node(node, NULL, + of_rtc_table[i].compatible) { + struct resource *res; + + res = kmalloc(sizeof(*res), GFP_KERNEL); + if (!res) { + printk(KERN_ERR "OF RTC: Out of memory " + "allocating resource structure for %s\n", + node->full_name); + continue; + } + + err = of_address_to_resource(node, 0, res); + if (err) { + printk(KERN_ERR "OF RTC: Error " + "translating resources for %s\n", + node->full_name); + continue; + } + + printk(KERN_INFO "OF_RTC: %s is a %s @ 0x%llx-0x%llx\n", + node->full_name, plat_name, + (unsigned long long)res->start, + (unsigned long long)res->end); + platform_device_register_simple(plat_name, -1, res, 1); + } + } +} diff --git a/arch/powerpc/sysdev/pmi.c b/arch/powerpc/sysdev/pmi.c new file mode 100644 index 00000000..8ce4fc3d --- /dev/null +++ b/arch/powerpc/sysdev/pmi.c @@ -0,0 +1,292 @@ +/* + * pmi driver + * + * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 + * + * PMI (Platform Management Interrupt) is a way to communicate + * with the BMC (Baseboard Management Controller) via interrupts. + * Unlike IPMI it is bidirectional and has a low latency. + * + * Author: Christian Krafft <krafft@de.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/interrupt.h> +#include <linux/slab.h> +#include <linux/completion.h> +#include <linux/spinlock.h> +#include <linux/workqueue.h> +#include <linux/of_device.h> +#include <linux/of_platform.h> + +#include <asm/io.h> +#include <asm/pmi.h> +#include <asm/prom.h> + +struct pmi_data { + struct list_head handler; + spinlock_t handler_spinlock; + spinlock_t pmi_spinlock; + struct mutex msg_mutex; + pmi_message_t msg; + struct completion *completion; + struct platform_device *dev; + int irq; + u8 __iomem *pmi_reg; + struct work_struct work; +}; + +static struct pmi_data *data; + +static irqreturn_t pmi_irq_handler(int irq, void *dev_id) +{ + u8 type; + int rc; + + spin_lock(&data->pmi_spinlock); + + type = ioread8(data->pmi_reg + PMI_READ_TYPE); + pr_debug("pmi: got message of type %d\n", type); + + if (type & PMI_ACK && !data->completion) { + printk(KERN_WARNING "pmi: got unexpected ACK message.\n"); + rc = -EIO; + goto unlock; + } + + if (data->completion && !(type & PMI_ACK)) { + printk(KERN_WARNING "pmi: expected ACK, but got %d\n", type); + rc = -EIO; + goto unlock; + } + + data->msg.type = type; + data->msg.data0 = ioread8(data->pmi_reg + PMI_READ_DATA0); + data->msg.data1 = ioread8(data->pmi_reg + PMI_READ_DATA1); + data->msg.data2 = ioread8(data->pmi_reg + PMI_READ_DATA2); + rc = 0; +unlock: + spin_unlock(&data->pmi_spinlock); + + if (rc == -EIO) { + rc = IRQ_HANDLED; + goto out; + } + + if (data->msg.type & PMI_ACK) { + complete(data->completion); + rc = IRQ_HANDLED; + goto out; + } + + schedule_work(&data->work); + + rc = IRQ_HANDLED; +out: + return rc; +} + + +static struct of_device_id pmi_match[] = { + { .type = "ibm,pmi", .name = "ibm,pmi" }, + { .type = "ibm,pmi" }, + {}, +}; + +MODULE_DEVICE_TABLE(of, pmi_match); + +static void pmi_notify_handlers(struct work_struct *work) +{ + struct pmi_handler *handler; + + spin_lock(&data->handler_spinlock); + list_for_each_entry(handler, &data->handler, node) { + pr_debug("pmi: notifying handler %p\n", handler); + if (handler->type == data->msg.type) + handler->handle_pmi_message(data->msg); + } + spin_unlock(&data->handler_spinlock); +} + +static int pmi_of_probe(struct platform_device *dev) +{ + struct device_node *np = dev->dev.of_node; + int rc; + + if (data) { + printk(KERN_ERR "pmi: driver has already been initialized.\n"); + rc = -EBUSY; + goto out; + } + + data = kzalloc(sizeof(struct pmi_data), GFP_KERNEL); + if (!data) { + printk(KERN_ERR "pmi: could not allocate memory.\n"); + rc = -ENOMEM; + goto out; + } + + data->pmi_reg = of_iomap(np, 0); + if (!data->pmi_reg) { + printk(KERN_ERR "pmi: invalid register address.\n"); + rc = -EFAULT; + goto error_cleanup_data; + } + + INIT_LIST_HEAD(&data->handler); + + mutex_init(&data->msg_mutex); + spin_lock_init(&data->pmi_spinlock); + spin_lock_init(&data->handler_spinlock); + + INIT_WORK(&data->work, pmi_notify_handlers); + + data->dev = dev; + + data->irq = irq_of_parse_and_map(np, 0); + if (data->irq == NO_IRQ) { + printk(KERN_ERR "pmi: invalid interrupt.\n"); + rc = -EFAULT; + goto error_cleanup_iomap; + } + + rc = request_irq(data->irq, pmi_irq_handler, 0, "pmi", NULL); + if (rc) { + printk(KERN_ERR "pmi: can't request IRQ %d: returned %d\n", + data->irq, rc); + goto error_cleanup_iomap; + } + + printk(KERN_INFO "pmi: found pmi device at addr %p.\n", data->pmi_reg); + + goto out; + +error_cleanup_iomap: + iounmap(data->pmi_reg); + +error_cleanup_data: + kfree(data); + +out: + return rc; +} + +static int pmi_of_remove(struct platform_device *dev) +{ + struct pmi_handler *handler, *tmp; + + free_irq(data->irq, NULL); + iounmap(data->pmi_reg); + + spin_lock(&data->handler_spinlock); + + list_for_each_entry_safe(handler, tmp, &data->handler, node) + list_del(&handler->node); + + spin_unlock(&data->handler_spinlock); + + kfree(data); + data = NULL; + + return 0; +} + +static struct platform_driver pmi_of_platform_driver = { + .probe = pmi_of_probe, + .remove = pmi_of_remove, + .driver = { + .name = "pmi", + .owner = THIS_MODULE, + .of_match_table = pmi_match, + }, +}; + +static int __init pmi_module_init(void) +{ + return platform_driver_register(&pmi_of_platform_driver); +} +module_init(pmi_module_init); + +static void __exit pmi_module_exit(void) +{ + platform_driver_unregister(&pmi_of_platform_driver); +} +module_exit(pmi_module_exit); + +int pmi_send_message(pmi_message_t msg) +{ + unsigned long flags; + DECLARE_COMPLETION_ONSTACK(completion); + + if (!data) + return -ENODEV; + + mutex_lock(&data->msg_mutex); + + data->msg = msg; + pr_debug("pmi_send_message: msg is %08x\n", *(u32*)&msg); + + data->completion = &completion; + + spin_lock_irqsave(&data->pmi_spinlock, flags); + iowrite8(msg.data0, data->pmi_reg + PMI_WRITE_DATA0); + iowrite8(msg.data1, data->pmi_reg + PMI_WRITE_DATA1); + iowrite8(msg.data2, data->pmi_reg + PMI_WRITE_DATA2); + iowrite8(msg.type, data->pmi_reg + PMI_WRITE_TYPE); + spin_unlock_irqrestore(&data->pmi_spinlock, flags); + + pr_debug("pmi_send_message: wait for completion\n"); + + wait_for_completion_interruptible_timeout(data->completion, + PMI_TIMEOUT); + + data->completion = NULL; + + mutex_unlock(&data->msg_mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(pmi_send_message); + +int pmi_register_handler(struct pmi_handler *handler) +{ + if (!data) + return -ENODEV; + + spin_lock(&data->handler_spinlock); + list_add_tail(&handler->node, &data->handler); + spin_unlock(&data->handler_spinlock); + + return 0; +} +EXPORT_SYMBOL_GPL(pmi_register_handler); + +void pmi_unregister_handler(struct pmi_handler *handler) +{ + if (!data) + return; + + pr_debug("pmi: unregistering handler %p\n", handler); + + spin_lock(&data->handler_spinlock); + list_del(&handler->node); + spin_unlock(&data->handler_spinlock); +} +EXPORT_SYMBOL_GPL(pmi_unregister_handler); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>"); +MODULE_DESCRIPTION("IBM Platform Management Interrupt driver"); diff --git a/arch/powerpc/sysdev/ppc4xx_cpm.c b/arch/powerpc/sysdev/ppc4xx_cpm.c new file mode 100644 index 00000000..73b86cc5 --- /dev/null +++ b/arch/powerpc/sysdev/ppc4xx_cpm.c @@ -0,0 +1,346 @@ +/* + * PowerPC 4xx Clock and Power Management + * + * Copyright (C) 2010, Applied Micro Circuits Corporation + * Victor Gallardo (vgallardo@apm.com) + * + * Based on arch/powerpc/platforms/44x/idle.c: + * Jerone Young <jyoung5@us.ibm.com> + * Copyright 2008 IBM Corp. + * + * Based on arch/powerpc/sysdev/fsl_pmc.c: + * Anton Vorontsov <avorontsov@ru.mvista.com> + * Copyright 2009 MontaVista Software, Inc. + * + * See file CREDITS for list of people who contributed to this + * project. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +#include <linux/kernel.h> +#include <linux/of_platform.h> +#include <linux/sysfs.h> +#include <linux/cpu.h> +#include <linux/suspend.h> +#include <asm/dcr.h> +#include <asm/dcr-native.h> +#include <asm/machdep.h> + +#define CPM_ER 0 +#define CPM_FR 1 +#define CPM_SR 2 + +#define CPM_IDLE_WAIT 0 +#define CPM_IDLE_DOZE 1 + +struct cpm { + dcr_host_t dcr_host; + unsigned int dcr_offset[3]; + unsigned int powersave_off; + unsigned int unused; + unsigned int idle_doze; + unsigned int standby; + unsigned int suspend; +}; + +static struct cpm cpm; + +struct cpm_idle_mode { + unsigned int enabled; + const char *name; +}; + +static struct cpm_idle_mode idle_mode[] = { + [CPM_IDLE_WAIT] = { 1, "wait" }, /* default */ + [CPM_IDLE_DOZE] = { 0, "doze" }, +}; + +static unsigned int cpm_set(unsigned int cpm_reg, unsigned int mask) +{ + unsigned int value; + + /* CPM controller supports 3 different types of sleep interface + * known as class 1, 2 and 3. For class 1 units, they are + * unconditionally put to sleep when the corresponding CPM bit is + * set. For class 2 and 3 units this is not case; if they can be + * put to to sleep, they will. Here we do not verify, we just + * set them and expect them to eventually go off when they can. + */ + value = dcr_read(cpm.dcr_host, cpm.dcr_offset[cpm_reg]); + dcr_write(cpm.dcr_host, cpm.dcr_offset[cpm_reg], value | mask); + + /* return old state, to restore later if needed */ + return value; +} + +static void cpm_idle_wait(void) +{ + unsigned long msr_save; + + /* save off initial state */ + msr_save = mfmsr(); + /* sync required when CPM0_ER[CPU] is set */ + mb(); + /* set wait state MSR */ + mtmsr(msr_save|MSR_WE|MSR_EE|MSR_CE|MSR_DE); + isync(); + /* return to initial state */ + mtmsr(msr_save); + isync(); +} + +static void cpm_idle_sleep(unsigned int mask) +{ + unsigned int er_save; + + /* update CPM_ER state */ + er_save = cpm_set(CPM_ER, mask); + + /* go to wait state so that CPM0_ER[CPU] can take effect */ + cpm_idle_wait(); + + /* restore CPM_ER state */ + dcr_write(cpm.dcr_host, cpm.dcr_offset[CPM_ER], er_save); +} + +static void cpm_idle_doze(void) +{ + cpm_idle_sleep(cpm.idle_doze); +} + +static void cpm_idle_config(int mode) +{ + int i; + + if (idle_mode[mode].enabled) + return; + + for (i = 0; i < ARRAY_SIZE(idle_mode); i++) + idle_mode[i].enabled = 0; + + idle_mode[mode].enabled = 1; +} + +static ssize_t cpm_idle_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + char *s = buf; + int i; + + for (i = 0; i < ARRAY_SIZE(idle_mode); i++) { + if (idle_mode[i].enabled) + s += sprintf(s, "[%s] ", idle_mode[i].name); + else + s += sprintf(s, "%s ", idle_mode[i].name); + } + + *(s-1) = '\n'; /* convert the last space to a newline */ + + return s - buf; +} + +static ssize_t cpm_idle_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t n) +{ + int i; + char *p; + int len; + + p = memchr(buf, '\n', n); + len = p ? p - buf : n; + + for (i = 0; i < ARRAY_SIZE(idle_mode); i++) { + if (strncmp(buf, idle_mode[i].name, len) == 0) { + cpm_idle_config(i); + return n; + } + } + + return -EINVAL; +} + +static struct kobj_attribute cpm_idle_attr = + __ATTR(idle, 0644, cpm_idle_show, cpm_idle_store); + +static void cpm_idle_config_sysfs(void) +{ + struct sys_device *sys_dev; + unsigned long ret; + + sys_dev = get_cpu_sysdev(0); + + ret = sysfs_create_file(&sys_dev->kobj, + &cpm_idle_attr.attr); + if (ret) + printk(KERN_WARNING + "cpm: failed to create idle sysfs entry\n"); +} + +static void cpm_idle(void) +{ + if (idle_mode[CPM_IDLE_DOZE].enabled) + cpm_idle_doze(); + else + cpm_idle_wait(); +} + +static int cpm_suspend_valid(suspend_state_t state) +{ + switch (state) { + case PM_SUSPEND_STANDBY: + return !!cpm.standby; + case PM_SUSPEND_MEM: + return !!cpm.suspend; + default: + return 0; + } +} + +static void cpm_suspend_standby(unsigned int mask) +{ + unsigned long tcr_save; + + /* disable decrement interrupt */ + tcr_save = mfspr(SPRN_TCR); + mtspr(SPRN_TCR, tcr_save & ~TCR_DIE); + + /* go to sleep state */ + cpm_idle_sleep(mask); + + /* restore decrement interrupt */ + mtspr(SPRN_TCR, tcr_save); +} + +static int cpm_suspend_enter(suspend_state_t state) +{ + switch (state) { + case PM_SUSPEND_STANDBY: + cpm_suspend_standby(cpm.standby); + break; + case PM_SUSPEND_MEM: + cpm_suspend_standby(cpm.suspend); + break; + } + + return 0; +} + +static struct platform_suspend_ops cpm_suspend_ops = { + .valid = cpm_suspend_valid, + .enter = cpm_suspend_enter, +}; + +static int cpm_get_uint_property(struct device_node *np, + const char *name) +{ + int len; + const unsigned int *prop = of_get_property(np, name, &len); + + if (prop == NULL || len < sizeof(u32)) + return 0; + + return *prop; +} + +static int __init cpm_init(void) +{ + struct device_node *np; + int dcr_base, dcr_len; + int ret = 0; + + if (!cpm.powersave_off) { + cpm_idle_config(CPM_IDLE_WAIT); + ppc_md.power_save = &cpm_idle; + } + + np = of_find_compatible_node(NULL, NULL, "ibm,cpm"); + if (!np) { + ret = -EINVAL; + goto out; + } + + dcr_base = dcr_resource_start(np, 0); + dcr_len = dcr_resource_len(np, 0); + + if (dcr_base == 0 || dcr_len == 0) { + printk(KERN_ERR "cpm: could not parse dcr property for %s\n", + np->full_name); + ret = -EINVAL; + goto out; + } + + cpm.dcr_host = dcr_map(np, dcr_base, dcr_len); + + if (!DCR_MAP_OK(cpm.dcr_host)) { + printk(KERN_ERR "cpm: failed to map dcr property for %s\n", + np->full_name); + ret = -EINVAL; + goto out; + } + + /* All 4xx SoCs with a CPM controller have one of two + * different order for the CPM registers. Some have the + * CPM registers in the following order (ER,FR,SR). The + * others have them in the following order (SR,ER,FR). + */ + + if (cpm_get_uint_property(np, "er-offset") == 0) { + cpm.dcr_offset[CPM_ER] = 0; + cpm.dcr_offset[CPM_FR] = 1; + cpm.dcr_offset[CPM_SR] = 2; + } else { + cpm.dcr_offset[CPM_ER] = 1; + cpm.dcr_offset[CPM_FR] = 2; + cpm.dcr_offset[CPM_SR] = 0; + } + + /* Now let's see what IPs to turn off for the following modes */ + + cpm.unused = cpm_get_uint_property(np, "unused-units"); + cpm.idle_doze = cpm_get_uint_property(np, "idle-doze"); + cpm.standby = cpm_get_uint_property(np, "standby"); + cpm.suspend = cpm_get_uint_property(np, "suspend"); + + /* If some IPs are unused let's turn them off now */ + + if (cpm.unused) { + cpm_set(CPM_ER, cpm.unused); + cpm_set(CPM_FR, cpm.unused); + } + + /* Now let's export interfaces */ + + if (!cpm.powersave_off && cpm.idle_doze) + cpm_idle_config_sysfs(); + + if (cpm.standby || cpm.suspend) + suspend_set_ops(&cpm_suspend_ops); +out: + if (np) + of_node_put(np); + return ret; +} + +late_initcall(cpm_init); + +static int __init cpm_powersave_off(char *arg) +{ + cpm.powersave_off = 1; + return 0; +} +__setup("powersave=off", cpm_powersave_off); diff --git a/arch/powerpc/sysdev/ppc4xx_gpio.c b/arch/powerpc/sysdev/ppc4xx_gpio.c new file mode 100644 index 00000000..fc65ad1b --- /dev/null +++ b/arch/powerpc/sysdev/ppc4xx_gpio.c @@ -0,0 +1,215 @@ +/* + * PPC4xx gpio driver + * + * Copyright (c) 2008 Harris Corporation + * Copyright (c) 2008 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix + * Copyright (c) MontaVista Software, Inc. 2008. + * + * Author: Steve Falco <sfalco@harris.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/spinlock.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_gpio.h> +#include <linux/gpio.h> +#include <linux/types.h> +#include <linux/slab.h> + +#define GPIO_MASK(gpio) (0x80000000 >> (gpio)) +#define GPIO_MASK2(gpio) (0xc0000000 >> ((gpio) * 2)) + +/* Physical GPIO register layout */ +struct ppc4xx_gpio { + __be32 or; + __be32 tcr; + __be32 osrl; + __be32 osrh; + __be32 tsrl; + __be32 tsrh; + __be32 odr; + __be32 ir; + __be32 rr1; + __be32 rr2; + __be32 rr3; + __be32 reserved1; + __be32 isr1l; + __be32 isr1h; + __be32 isr2l; + __be32 isr2h; + __be32 isr3l; + __be32 isr3h; +}; + +struct ppc4xx_gpio_chip { + struct of_mm_gpio_chip mm_gc; + spinlock_t lock; +}; + +/* + * GPIO LIB API implementation for GPIOs + * + * There are a maximum of 32 gpios in each gpio controller. + */ + +static inline struct ppc4xx_gpio_chip * +to_ppc4xx_gpiochip(struct of_mm_gpio_chip *mm_gc) +{ + return container_of(mm_gc, struct ppc4xx_gpio_chip, mm_gc); +} + +static int ppc4xx_gpio_get(struct gpio_chip *gc, unsigned int gpio) +{ + struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); + struct ppc4xx_gpio __iomem *regs = mm_gc->regs; + + return in_be32(®s->ir) & GPIO_MASK(gpio); +} + +static inline void +__ppc4xx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) +{ + struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); + struct ppc4xx_gpio __iomem *regs = mm_gc->regs; + + if (val) + setbits32(®s->or, GPIO_MASK(gpio)); + else + clrbits32(®s->or, GPIO_MASK(gpio)); +} + +static void +ppc4xx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) +{ + struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); + struct ppc4xx_gpio_chip *chip = to_ppc4xx_gpiochip(mm_gc); + unsigned long flags; + + spin_lock_irqsave(&chip->lock, flags); + + __ppc4xx_gpio_set(gc, gpio, val); + + spin_unlock_irqrestore(&chip->lock, flags); + + pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val); +} + +static int ppc4xx_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio) +{ + struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); + struct ppc4xx_gpio_chip *chip = to_ppc4xx_gpiochip(mm_gc); + struct ppc4xx_gpio __iomem *regs = mm_gc->regs; + unsigned long flags; + + spin_lock_irqsave(&chip->lock, flags); + + /* Disable open-drain function */ + clrbits32(®s->odr, GPIO_MASK(gpio)); + + /* Float the pin */ + clrbits32(®s->tcr, GPIO_MASK(gpio)); + + /* Bits 0-15 use TSRL/OSRL, bits 16-31 use TSRH/OSRH */ + if (gpio < 16) { + clrbits32(®s->osrl, GPIO_MASK2(gpio)); + clrbits32(®s->tsrl, GPIO_MASK2(gpio)); + } else { + clrbits32(®s->osrh, GPIO_MASK2(gpio)); + clrbits32(®s->tsrh, GPIO_MASK2(gpio)); + } + + spin_unlock_irqrestore(&chip->lock, flags); + + return 0; +} + +static int +ppc4xx_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) +{ + struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); + struct ppc4xx_gpio_chip *chip = to_ppc4xx_gpiochip(mm_gc); + struct ppc4xx_gpio __iomem *regs = mm_gc->regs; + unsigned long flags; + + spin_lock_irqsave(&chip->lock, flags); + + /* First set initial value */ + __ppc4xx_gpio_set(gc, gpio, val); + + /* Disable open-drain function */ + clrbits32(®s->odr, GPIO_MASK(gpio)); + + /* Drive the pin */ + setbits32(®s->tcr, GPIO_MASK(gpio)); + + /* Bits 0-15 use TSRL, bits 16-31 use TSRH */ + if (gpio < 16) { + clrbits32(®s->osrl, GPIO_MASK2(gpio)); + clrbits32(®s->tsrl, GPIO_MASK2(gpio)); + } else { + clrbits32(®s->osrh, GPIO_MASK2(gpio)); + clrbits32(®s->tsrh, GPIO_MASK2(gpio)); + } + + spin_unlock_irqrestore(&chip->lock, flags); + + pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val); + + return 0; +} + +static int __init ppc4xx_add_gpiochips(void) +{ + struct device_node *np; + + for_each_compatible_node(np, NULL, "ibm,ppc4xx-gpio") { + int ret; + struct ppc4xx_gpio_chip *ppc4xx_gc; + struct of_mm_gpio_chip *mm_gc; + struct gpio_chip *gc; + + ppc4xx_gc = kzalloc(sizeof(*ppc4xx_gc), GFP_KERNEL); + if (!ppc4xx_gc) { + ret = -ENOMEM; + goto err; + } + + spin_lock_init(&ppc4xx_gc->lock); + + mm_gc = &ppc4xx_gc->mm_gc; + gc = &mm_gc->gc; + + gc->ngpio = 32; + gc->direction_input = ppc4xx_gpio_dir_in; + gc->direction_output = ppc4xx_gpio_dir_out; + gc->get = ppc4xx_gpio_get; + gc->set = ppc4xx_gpio_set; + + ret = of_mm_gpiochip_add(np, mm_gc); + if (ret) + goto err; + continue; +err: + pr_err("%s: registration failed with status %d\n", + np->full_name, ret); + kfree(ppc4xx_gc); + /* try others anyway */ + } + return 0; +} +arch_initcall(ppc4xx_add_gpiochips); diff --git a/arch/powerpc/sysdev/ppc4xx_msi.c b/arch/powerpc/sysdev/ppc4xx_msi.c new file mode 100644 index 00000000..367af024 --- /dev/null +++ b/arch/powerpc/sysdev/ppc4xx_msi.c @@ -0,0 +1,276 @@ +/* + * Adding PCI-E MSI support for PPC4XX SoCs. + * + * Copyright (c) 2010, Applied Micro Circuits Corporation + * Authors: Tirumala R Marri <tmarri@apm.com> + * Feng Kan <fkan@apm.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +#include <linux/irq.h> +#include <linux/bootmem.h> +#include <linux/pci.h> +#include <linux/msi.h> +#include <linux/of_platform.h> +#include <linux/interrupt.h> +#include <asm/prom.h> +#include <asm/hw_irq.h> +#include <asm/ppc-pci.h> +#include <boot/dcr.h> +#include <asm/dcr-regs.h> +#include <asm/msi_bitmap.h> + +#define PEIH_TERMADH 0x00 +#define PEIH_TERMADL 0x08 +#define PEIH_MSIED 0x10 +#define PEIH_MSIMK 0x18 +#define PEIH_MSIASS 0x20 +#define PEIH_FLUSH0 0x30 +#define PEIH_FLUSH1 0x38 +#define PEIH_CNTRST 0x48 +#define NR_MSI_IRQS 4 + +struct ppc4xx_msi { + u32 msi_addr_lo; + u32 msi_addr_hi; + void __iomem *msi_regs; + int msi_virqs[NR_MSI_IRQS]; + struct msi_bitmap bitmap; + struct device_node *msi_dev; +}; + +static struct ppc4xx_msi ppc4xx_msi; + +static int ppc4xx_msi_init_allocator(struct platform_device *dev, + struct ppc4xx_msi *msi_data) +{ + int err; + + err = msi_bitmap_alloc(&msi_data->bitmap, NR_MSI_IRQS, + dev->dev.of_node); + if (err) + return err; + + err = msi_bitmap_reserve_dt_hwirqs(&msi_data->bitmap); + if (err < 0) { + msi_bitmap_free(&msi_data->bitmap); + return err; + } + + return 0; +} + +static int ppc4xx_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) +{ + int int_no = -ENOMEM; + unsigned int virq; + struct msi_msg msg; + struct msi_desc *entry; + struct ppc4xx_msi *msi_data = &ppc4xx_msi; + + list_for_each_entry(entry, &dev->msi_list, list) { + int_no = msi_bitmap_alloc_hwirqs(&msi_data->bitmap, 1); + if (int_no >= 0) + break; + if (int_no < 0) { + pr_debug("%s: fail allocating msi interrupt\n", + __func__); + } + virq = irq_of_parse_and_map(msi_data->msi_dev, int_no); + if (virq == NO_IRQ) { + dev_err(&dev->dev, "%s: fail mapping irq\n", __func__); + msi_bitmap_free_hwirqs(&msi_data->bitmap, int_no, 1); + return -ENOSPC; + } + dev_dbg(&dev->dev, "%s: virq = %d\n", __func__, virq); + + /* Setup msi address space */ + msg.address_hi = msi_data->msi_addr_hi; + msg.address_lo = msi_data->msi_addr_lo; + + irq_set_msi_desc(virq, entry); + msg.data = int_no; + write_msi_msg(virq, &msg); + } + return 0; +} + +void ppc4xx_teardown_msi_irqs(struct pci_dev *dev) +{ + struct msi_desc *entry; + struct ppc4xx_msi *msi_data = &ppc4xx_msi; + + dev_dbg(&dev->dev, "PCIE-MSI: tearing down msi irqs\n"); + + list_for_each_entry(entry, &dev->msi_list, list) { + if (entry->irq == NO_IRQ) + continue; + irq_set_msi_desc(entry->irq, NULL); + msi_bitmap_free_hwirqs(&msi_data->bitmap, + virq_to_hw(entry->irq), 1); + irq_dispose_mapping(entry->irq); + } +} + +static int ppc4xx_msi_check_device(struct pci_dev *pdev, int nvec, int type) +{ + dev_dbg(&pdev->dev, "PCIE-MSI:%s called. vec %x type %d\n", + __func__, nvec, type); + if (type == PCI_CAP_ID_MSIX) + pr_debug("ppc4xx msi: MSI-X untested, trying anyway.\n"); + + return 0; +} + +static int ppc4xx_setup_pcieh_hw(struct platform_device *dev, + struct resource res, struct ppc4xx_msi *msi) +{ + const u32 *msi_data; + const u32 *msi_mask; + const u32 *sdr_addr; + dma_addr_t msi_phys; + void *msi_virt; + + sdr_addr = of_get_property(dev->dev.of_node, "sdr-base", NULL); + if (!sdr_addr) + return -1; + + SDR0_WRITE(sdr_addr, (u64)res.start >> 32); /*HIGH addr */ + SDR0_WRITE(sdr_addr + 1, res.start & 0xFFFFFFFF); /* Low addr */ + + + msi->msi_dev = of_find_node_by_name(NULL, "ppc4xx-msi"); + if (msi->msi_dev) + return -ENODEV; + + msi->msi_regs = of_iomap(msi->msi_dev, 0); + if (!msi->msi_regs) { + dev_err(&dev->dev, "of_iomap problem failed\n"); + return -ENOMEM; + } + dev_dbg(&dev->dev, "PCIE-MSI: msi register mapped 0x%x 0x%x\n", + (u32) (msi->msi_regs + PEIH_TERMADH), (u32) (msi->msi_regs)); + + msi_virt = dma_alloc_coherent(&dev->dev, 64, &msi_phys, GFP_KERNEL); + msi->msi_addr_hi = 0x0; + msi->msi_addr_lo = (u32) msi_phys; + dev_dbg(&dev->dev, "PCIE-MSI: msi address 0x%x\n", msi->msi_addr_lo); + + /* Progam the Interrupt handler Termination addr registers */ + out_be32(msi->msi_regs + PEIH_TERMADH, msi->msi_addr_hi); + out_be32(msi->msi_regs + PEIH_TERMADL, msi->msi_addr_lo); + + msi_data = of_get_property(dev->dev.of_node, "msi-data", NULL); + if (!msi_data) + return -1; + msi_mask = of_get_property(dev->dev.of_node, "msi-mask", NULL); + if (!msi_mask) + return -1; + /* Program MSI Expected data and Mask bits */ + out_be32(msi->msi_regs + PEIH_MSIED, *msi_data); + out_be32(msi->msi_regs + PEIH_MSIMK, *msi_mask); + + return 0; +} + +static int ppc4xx_of_msi_remove(struct platform_device *dev) +{ + struct ppc4xx_msi *msi = dev->dev.platform_data; + int i; + int virq; + + for (i = 0; i < NR_MSI_IRQS; i++) { + virq = msi->msi_virqs[i]; + if (virq != NO_IRQ) + irq_dispose_mapping(virq); + } + + if (msi->bitmap.bitmap) + msi_bitmap_free(&msi->bitmap); + iounmap(msi->msi_regs); + of_node_put(msi->msi_dev); + kfree(msi); + + return 0; +} + +static int __devinit ppc4xx_msi_probe(struct platform_device *dev) +{ + struct ppc4xx_msi *msi; + struct resource res; + int err = 0; + + msi = &ppc4xx_msi;/*keep the msi data for further use*/ + + dev_dbg(&dev->dev, "PCIE-MSI: Setting up MSI support...\n"); + + msi = kzalloc(sizeof(struct ppc4xx_msi), GFP_KERNEL); + if (!msi) { + dev_err(&dev->dev, "No memory for MSI structure\n"); + return -ENOMEM; + } + dev->dev.platform_data = msi; + + /* Get MSI ranges */ + err = of_address_to_resource(dev->dev.of_node, 0, &res); + if (err) { + dev_err(&dev->dev, "%s resource error!\n", + dev->dev.of_node->full_name); + goto error_out; + } + + if (ppc4xx_setup_pcieh_hw(dev, res, msi)) + goto error_out; + + err = ppc4xx_msi_init_allocator(dev, msi); + if (err) { + dev_err(&dev->dev, "Error allocating MSI bitmap\n"); + goto error_out; + } + + ppc_md.setup_msi_irqs = ppc4xx_setup_msi_irqs; + ppc_md.teardown_msi_irqs = ppc4xx_teardown_msi_irqs; + ppc_md.msi_check_device = ppc4xx_msi_check_device; + return err; + +error_out: + ppc4xx_of_msi_remove(dev); + return err; +} +static const struct of_device_id ppc4xx_msi_ids[] = { + { + .compatible = "amcc,ppc4xx-msi", + }, + {} +}; +static struct platform_driver ppc4xx_msi_driver = { + .probe = ppc4xx_msi_probe, + .remove = ppc4xx_of_msi_remove, + .driver = { + .name = "ppc4xx-msi", + .owner = THIS_MODULE, + .of_match_table = ppc4xx_msi_ids, + }, + +}; + +static __init int ppc4xx_msi_init(void) +{ + return platform_driver_register(&ppc4xx_msi_driver); +} + +subsys_initcall(ppc4xx_msi_init); diff --git a/arch/powerpc/sysdev/ppc4xx_pci.c b/arch/powerpc/sysdev/ppc4xx_pci.c new file mode 100644 index 00000000..156aa7d3 --- /dev/null +++ b/arch/powerpc/sysdev/ppc4xx_pci.c @@ -0,0 +1,1977 @@ +/* + * PCI / PCI-X / PCI-Express support for 4xx parts + * + * Copyright 2007 Ben. Herrenschmidt <benh@kernel.crashing.org>, IBM Corp. + * + * Most PCI Express code is coming from Stefan Roese implementation for + * arch/ppc in the Denx tree, slightly reworked by me. + * + * Copyright 2007 DENX Software Engineering, Stefan Roese <sr@denx.de> + * + * Some of that comes itself from a previous implementation for 440SPE only + * by Roland Dreier: + * + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Roland Dreier <rolandd@cisco.com> + * + */ + +#undef DEBUG + +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/of.h> +#include <linux/bootmem.h> +#include <linux/delay.h> +#include <linux/slab.h> + +#include <asm/io.h> +#include <asm/pci-bridge.h> +#include <asm/machdep.h> +#include <asm/dcr.h> +#include <asm/dcr-regs.h> +#include <mm/mmu_decl.h> + +#include "ppc4xx_pci.h" + +static int dma_offset_set; + +#define U64_TO_U32_LOW(val) ((u32)((val) & 0x00000000ffffffffULL)) +#define U64_TO_U32_HIGH(val) ((u32)((val) >> 32)) + +#define RES_TO_U32_LOW(val) \ + ((sizeof(resource_size_t) > sizeof(u32)) ? U64_TO_U32_LOW(val) : (val)) +#define RES_TO_U32_HIGH(val) \ + ((sizeof(resource_size_t) > sizeof(u32)) ? U64_TO_U32_HIGH(val) : (0)) + +static inline int ppc440spe_revA(void) +{ + /* Catch both 440SPe variants, with and without RAID6 support */ + if ((mfspr(SPRN_PVR) & 0xffefffff) == 0x53421890) + return 1; + else + return 0; +} + +static void fixup_ppc4xx_pci_bridge(struct pci_dev *dev) +{ + struct pci_controller *hose; + int i; + + if (dev->devfn != 0 || dev->bus->self != NULL) + return; + + hose = pci_bus_to_host(dev->bus); + if (hose == NULL) + return; + + if (!of_device_is_compatible(hose->dn, "ibm,plb-pciex") && + !of_device_is_compatible(hose->dn, "ibm,plb-pcix") && + !of_device_is_compatible(hose->dn, "ibm,plb-pci")) + return; + + if (of_device_is_compatible(hose->dn, "ibm,plb440epx-pci") || + of_device_is_compatible(hose->dn, "ibm,plb440grx-pci")) { + hose->indirect_type |= PPC_INDIRECT_TYPE_BROKEN_MRM; + } + + /* Hide the PCI host BARs from the kernel as their content doesn't + * fit well in the resource management + */ + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { + dev->resource[i].start = dev->resource[i].end = 0; + dev->resource[i].flags = 0; + } + + printk(KERN_INFO "PCI: Hiding 4xx host bridge resources %s\n", + pci_name(dev)); +} +DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, fixup_ppc4xx_pci_bridge); + +static int __init ppc4xx_parse_dma_ranges(struct pci_controller *hose, + void __iomem *reg, + struct resource *res) +{ + u64 size; + const u32 *ranges; + int rlen; + int pna = of_n_addr_cells(hose->dn); + int np = pna + 5; + + /* Default */ + res->start = 0; + size = 0x80000000; + res->end = size - 1; + res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH; + + /* Get dma-ranges property */ + ranges = of_get_property(hose->dn, "dma-ranges", &rlen); + if (ranges == NULL) + goto out; + + /* Walk it */ + while ((rlen -= np * 4) >= 0) { + u32 pci_space = ranges[0]; + u64 pci_addr = of_read_number(ranges + 1, 2); + u64 cpu_addr = of_translate_dma_address(hose->dn, ranges + 3); + size = of_read_number(ranges + pna + 3, 2); + ranges += np; + if (cpu_addr == OF_BAD_ADDR || size == 0) + continue; + + /* We only care about memory */ + if ((pci_space & 0x03000000) != 0x02000000) + continue; + + /* We currently only support memory at 0, and pci_addr + * within 32 bits space + */ + if (cpu_addr != 0 || pci_addr > 0xffffffff) { + printk(KERN_WARNING "%s: Ignored unsupported dma range" + " 0x%016llx...0x%016llx -> 0x%016llx\n", + hose->dn->full_name, + pci_addr, pci_addr + size - 1, cpu_addr); + continue; + } + + /* Check if not prefetchable */ + if (!(pci_space & 0x40000000)) + res->flags &= ~IORESOURCE_PREFETCH; + + + /* Use that */ + res->start = pci_addr; + /* Beware of 32 bits resources */ + if (sizeof(resource_size_t) == sizeof(u32) && + (pci_addr + size) > 0x100000000ull) + res->end = 0xffffffff; + else + res->end = res->start + size - 1; + break; + } + + /* We only support one global DMA offset */ + if (dma_offset_set && pci_dram_offset != res->start) { + printk(KERN_ERR "%s: dma-ranges(s) mismatch\n", + hose->dn->full_name); + return -ENXIO; + } + + /* Check that we can fit all of memory as we don't support + * DMA bounce buffers + */ + if (size < total_memory) { + printk(KERN_ERR "%s: dma-ranges too small " + "(size=%llx total_memory=%llx)\n", + hose->dn->full_name, size, (u64)total_memory); + return -ENXIO; + } + + /* Check we are a power of 2 size and that base is a multiple of size*/ + if ((size & (size - 1)) != 0 || + (res->start & (size - 1)) != 0) { + printk(KERN_ERR "%s: dma-ranges unaligned\n", + hose->dn->full_name); + return -ENXIO; + } + + /* Check that we are fully contained within 32 bits space */ + if (res->end > 0xffffffff) { + printk(KERN_ERR "%s: dma-ranges outside of 32 bits space\n", + hose->dn->full_name); + return -ENXIO; + } + out: + dma_offset_set = 1; + pci_dram_offset = res->start; + + printk(KERN_INFO "4xx PCI DMA offset set to 0x%08lx\n", + pci_dram_offset); + return 0; +} + +/* + * 4xx PCI 2.x part + */ + +static int __init ppc4xx_setup_one_pci_PMM(struct pci_controller *hose, + void __iomem *reg, + u64 plb_addr, + u64 pci_addr, + u64 size, + unsigned int flags, + int index) +{ + u32 ma, pcila, pciha; + + /* Hack warning ! The "old" PCI 2.x cell only let us configure the low + * 32-bit of incoming PLB addresses. The top 4 bits of the 36-bit + * address are actually hard wired to a value that appears to depend + * on the specific SoC. For example, it's 0 on 440EP and 1 on 440EPx. + * + * The trick here is we just crop those top bits and ignore them when + * programming the chip. That means the device-tree has to be right + * for the specific part used (we don't print a warning if it's wrong + * but on the other hand, you'll crash quickly enough), but at least + * this code should work whatever the hard coded value is + */ + plb_addr &= 0xffffffffull; + + /* Note: Due to the above hack, the test below doesn't actually test + * if you address is above 4G, but it tests that address and + * (address + size) are both contained in the same 4G + */ + if ((plb_addr + size) > 0xffffffffull || !is_power_of_2(size) || + size < 0x1000 || (plb_addr & (size - 1)) != 0) { + printk(KERN_WARNING "%s: Resource out of range\n", + hose->dn->full_name); + return -1; + } + ma = (0xffffffffu << ilog2(size)) | 1; + if (flags & IORESOURCE_PREFETCH) + ma |= 2; + + pciha = RES_TO_U32_HIGH(pci_addr); + pcila = RES_TO_U32_LOW(pci_addr); + + writel(plb_addr, reg + PCIL0_PMM0LA + (0x10 * index)); + writel(pcila, reg + PCIL0_PMM0PCILA + (0x10 * index)); + writel(pciha, reg + PCIL0_PMM0PCIHA + (0x10 * index)); + writel(ma, reg + PCIL0_PMM0MA + (0x10 * index)); + + return 0; +} + +static void __init ppc4xx_configure_pci_PMMs(struct pci_controller *hose, + void __iomem *reg) +{ + int i, j, found_isa_hole = 0; + + /* Setup outbound memory windows */ + for (i = j = 0; i < 3; i++) { + struct resource *res = &hose->mem_resources[i]; + + /* we only care about memory windows */ + if (!(res->flags & IORESOURCE_MEM)) + continue; + if (j > 2) { + printk(KERN_WARNING "%s: Too many ranges\n", + hose->dn->full_name); + break; + } + + /* Configure the resource */ + if (ppc4xx_setup_one_pci_PMM(hose, reg, + res->start, + res->start - hose->pci_mem_offset, + res->end + 1 - res->start, + res->flags, + j) == 0) { + j++; + + /* If the resource PCI address is 0 then we have our + * ISA memory hole + */ + if (res->start == hose->pci_mem_offset) + found_isa_hole = 1; + } + } + + /* Handle ISA memory hole if not already covered */ + if (j <= 2 && !found_isa_hole && hose->isa_mem_size) + if (ppc4xx_setup_one_pci_PMM(hose, reg, hose->isa_mem_phys, 0, + hose->isa_mem_size, 0, j) == 0) + printk(KERN_INFO "%s: Legacy ISA memory support enabled\n", + hose->dn->full_name); +} + +static void __init ppc4xx_configure_pci_PTMs(struct pci_controller *hose, + void __iomem *reg, + const struct resource *res) +{ + resource_size_t size = res->end - res->start + 1; + u32 sa; + + /* Calculate window size */ + sa = (0xffffffffu << ilog2(size)) | 1; + sa |= 0x1; + + /* RAM is always at 0 local for now */ + writel(0, reg + PCIL0_PTM1LA); + writel(sa, reg + PCIL0_PTM1MS); + + /* Map on PCI side */ + early_write_config_dword(hose, hose->first_busno, 0, + PCI_BASE_ADDRESS_1, res->start); + early_write_config_dword(hose, hose->first_busno, 0, + PCI_BASE_ADDRESS_2, 0x00000000); + early_write_config_word(hose, hose->first_busno, 0, + PCI_COMMAND, 0x0006); +} + +static void __init ppc4xx_probe_pci_bridge(struct device_node *np) +{ + /* NYI */ + struct resource rsrc_cfg; + struct resource rsrc_reg; + struct resource dma_window; + struct pci_controller *hose = NULL; + void __iomem *reg = NULL; + const int *bus_range; + int primary = 0; + + /* Check if device is enabled */ + if (!of_device_is_available(np)) { + printk(KERN_INFO "%s: Port disabled via device-tree\n", + np->full_name); + return; + } + + /* Fetch config space registers address */ + if (of_address_to_resource(np, 0, &rsrc_cfg)) { + printk(KERN_ERR "%s: Can't get PCI config register base !", + np->full_name); + return; + } + /* Fetch host bridge internal registers address */ + if (of_address_to_resource(np, 3, &rsrc_reg)) { + printk(KERN_ERR "%s: Can't get PCI internal register base !", + np->full_name); + return; + } + + /* Check if primary bridge */ + if (of_get_property(np, "primary", NULL)) + primary = 1; + + /* Get bus range if any */ + bus_range = of_get_property(np, "bus-range", NULL); + + /* Map registers */ + reg = ioremap(rsrc_reg.start, rsrc_reg.end + 1 - rsrc_reg.start); + if (reg == NULL) { + printk(KERN_ERR "%s: Can't map registers !", np->full_name); + goto fail; + } + + /* Allocate the host controller data structure */ + hose = pcibios_alloc_controller(np); + if (!hose) + goto fail; + + hose->first_busno = bus_range ? bus_range[0] : 0x0; + hose->last_busno = bus_range ? bus_range[1] : 0xff; + + /* Setup config space */ + setup_indirect_pci(hose, rsrc_cfg.start, rsrc_cfg.start + 0x4, 0); + + /* Disable all windows */ + writel(0, reg + PCIL0_PMM0MA); + writel(0, reg + PCIL0_PMM1MA); + writel(0, reg + PCIL0_PMM2MA); + writel(0, reg + PCIL0_PTM1MS); + writel(0, reg + PCIL0_PTM2MS); + + /* Parse outbound mapping resources */ + pci_process_bridge_OF_ranges(hose, np, primary); + + /* Parse inbound mapping resources */ + if (ppc4xx_parse_dma_ranges(hose, reg, &dma_window) != 0) + goto fail; + + /* Configure outbound ranges POMs */ + ppc4xx_configure_pci_PMMs(hose, reg); + + /* Configure inbound ranges PIMs */ + ppc4xx_configure_pci_PTMs(hose, reg, &dma_window); + + /* We don't need the registers anymore */ + iounmap(reg); + return; + + fail: + if (hose) + pcibios_free_controller(hose); + if (reg) + iounmap(reg); +} + +/* + * 4xx PCI-X part + */ + +static int __init ppc4xx_setup_one_pcix_POM(struct pci_controller *hose, + void __iomem *reg, + u64 plb_addr, + u64 pci_addr, + u64 size, + unsigned int flags, + int index) +{ + u32 lah, lal, pciah, pcial, sa; + + if (!is_power_of_2(size) || size < 0x1000 || + (plb_addr & (size - 1)) != 0) { + printk(KERN_WARNING "%s: Resource out of range\n", + hose->dn->full_name); + return -1; + } + + /* Calculate register values */ + lah = RES_TO_U32_HIGH(plb_addr); + lal = RES_TO_U32_LOW(plb_addr); + pciah = RES_TO_U32_HIGH(pci_addr); + pcial = RES_TO_U32_LOW(pci_addr); + sa = (0xffffffffu << ilog2(size)) | 0x1; + + /* Program register values */ + if (index == 0) { + writel(lah, reg + PCIX0_POM0LAH); + writel(lal, reg + PCIX0_POM0LAL); + writel(pciah, reg + PCIX0_POM0PCIAH); + writel(pcial, reg + PCIX0_POM0PCIAL); + writel(sa, reg + PCIX0_POM0SA); + } else { + writel(lah, reg + PCIX0_POM1LAH); + writel(lal, reg + PCIX0_POM1LAL); + writel(pciah, reg + PCIX0_POM1PCIAH); + writel(pcial, reg + PCIX0_POM1PCIAL); + writel(sa, reg + PCIX0_POM1SA); + } + + return 0; +} + +static void __init ppc4xx_configure_pcix_POMs(struct pci_controller *hose, + void __iomem *reg) +{ + int i, j, found_isa_hole = 0; + + /* Setup outbound memory windows */ + for (i = j = 0; i < 3; i++) { + struct resource *res = &hose->mem_resources[i]; + + /* we only care about memory windows */ + if (!(res->flags & IORESOURCE_MEM)) + continue; + if (j > 1) { + printk(KERN_WARNING "%s: Too many ranges\n", + hose->dn->full_name); + break; + } + + /* Configure the resource */ + if (ppc4xx_setup_one_pcix_POM(hose, reg, + res->start, + res->start - hose->pci_mem_offset, + res->end + 1 - res->start, + res->flags, + j) == 0) { + j++; + + /* If the resource PCI address is 0 then we have our + * ISA memory hole + */ + if (res->start == hose->pci_mem_offset) + found_isa_hole = 1; + } + } + + /* Handle ISA memory hole if not already covered */ + if (j <= 1 && !found_isa_hole && hose->isa_mem_size) + if (ppc4xx_setup_one_pcix_POM(hose, reg, hose->isa_mem_phys, 0, + hose->isa_mem_size, 0, j) == 0) + printk(KERN_INFO "%s: Legacy ISA memory support enabled\n", + hose->dn->full_name); +} + +static void __init ppc4xx_configure_pcix_PIMs(struct pci_controller *hose, + void __iomem *reg, + const struct resource *res, + int big_pim, + int enable_msi_hole) +{ + resource_size_t size = res->end - res->start + 1; + u32 sa; + + /* RAM is always at 0 */ + writel(0x00000000, reg + PCIX0_PIM0LAH); + writel(0x00000000, reg + PCIX0_PIM0LAL); + + /* Calculate window size */ + sa = (0xffffffffu << ilog2(size)) | 1; + sa |= 0x1; + if (res->flags & IORESOURCE_PREFETCH) + sa |= 0x2; + if (enable_msi_hole) + sa |= 0x4; + writel(sa, reg + PCIX0_PIM0SA); + if (big_pim) + writel(0xffffffff, reg + PCIX0_PIM0SAH); + + /* Map on PCI side */ + writel(0x00000000, reg + PCIX0_BAR0H); + writel(res->start, reg + PCIX0_BAR0L); + writew(0x0006, reg + PCIX0_COMMAND); +} + +static void __init ppc4xx_probe_pcix_bridge(struct device_node *np) +{ + struct resource rsrc_cfg; + struct resource rsrc_reg; + struct resource dma_window; + struct pci_controller *hose = NULL; + void __iomem *reg = NULL; + const int *bus_range; + int big_pim = 0, msi = 0, primary = 0; + + /* Fetch config space registers address */ + if (of_address_to_resource(np, 0, &rsrc_cfg)) { + printk(KERN_ERR "%s:Can't get PCI-X config register base !", + np->full_name); + return; + } + /* Fetch host bridge internal registers address */ + if (of_address_to_resource(np, 3, &rsrc_reg)) { + printk(KERN_ERR "%s: Can't get PCI-X internal register base !", + np->full_name); + return; + } + + /* Check if it supports large PIMs (440GX) */ + if (of_get_property(np, "large-inbound-windows", NULL)) + big_pim = 1; + + /* Check if we should enable MSIs inbound hole */ + if (of_get_property(np, "enable-msi-hole", NULL)) + msi = 1; + + /* Check if primary bridge */ + if (of_get_property(np, "primary", NULL)) + primary = 1; + + /* Get bus range if any */ + bus_range = of_get_property(np, "bus-range", NULL); + + /* Map registers */ + reg = ioremap(rsrc_reg.start, rsrc_reg.end + 1 - rsrc_reg.start); + if (reg == NULL) { + printk(KERN_ERR "%s: Can't map registers !", np->full_name); + goto fail; + } + + /* Allocate the host controller data structure */ + hose = pcibios_alloc_controller(np); + if (!hose) + goto fail; + + hose->first_busno = bus_range ? bus_range[0] : 0x0; + hose->last_busno = bus_range ? bus_range[1] : 0xff; + + /* Setup config space */ + setup_indirect_pci(hose, rsrc_cfg.start, rsrc_cfg.start + 0x4, + PPC_INDIRECT_TYPE_SET_CFG_TYPE); + + /* Disable all windows */ + writel(0, reg + PCIX0_POM0SA); + writel(0, reg + PCIX0_POM1SA); + writel(0, reg + PCIX0_POM2SA); + writel(0, reg + PCIX0_PIM0SA); + writel(0, reg + PCIX0_PIM1SA); + writel(0, reg + PCIX0_PIM2SA); + if (big_pim) { + writel(0, reg + PCIX0_PIM0SAH); + writel(0, reg + PCIX0_PIM2SAH); + } + + /* Parse outbound mapping resources */ + pci_process_bridge_OF_ranges(hose, np, primary); + + /* Parse inbound mapping resources */ + if (ppc4xx_parse_dma_ranges(hose, reg, &dma_window) != 0) + goto fail; + + /* Configure outbound ranges POMs */ + ppc4xx_configure_pcix_POMs(hose, reg); + + /* Configure inbound ranges PIMs */ + ppc4xx_configure_pcix_PIMs(hose, reg, &dma_window, big_pim, msi); + + /* We don't need the registers anymore */ + iounmap(reg); + return; + + fail: + if (hose) + pcibios_free_controller(hose); + if (reg) + iounmap(reg); +} + +#ifdef CONFIG_PPC4xx_PCI_EXPRESS + +/* + * 4xx PCI-Express part + * + * We support 3 parts currently based on the compatible property: + * + * ibm,plb-pciex-440spe + * ibm,plb-pciex-405ex + * ibm,plb-pciex-460ex + * + * Anything else will be rejected for now as they are all subtly + * different unfortunately. + * + */ + +#define MAX_PCIE_BUS_MAPPED 0x40 + +struct ppc4xx_pciex_port +{ + struct pci_controller *hose; + struct device_node *node; + unsigned int index; + int endpoint; + int link; + int has_ibpre; + unsigned int sdr_base; + dcr_host_t dcrs; + struct resource cfg_space; + struct resource utl_regs; + void __iomem *utl_base; +}; + +static struct ppc4xx_pciex_port *ppc4xx_pciex_ports; +static unsigned int ppc4xx_pciex_port_count; + +struct ppc4xx_pciex_hwops +{ + int (*core_init)(struct device_node *np); + int (*port_init_hw)(struct ppc4xx_pciex_port *port); + int (*setup_utl)(struct ppc4xx_pciex_port *port); +}; + +static struct ppc4xx_pciex_hwops *ppc4xx_pciex_hwops; + +#ifdef CONFIG_44x + +/* Check various reset bits of the 440SPe PCIe core */ +static int __init ppc440spe_pciex_check_reset(struct device_node *np) +{ + u32 valPE0, valPE1, valPE2; + int err = 0; + + /* SDR0_PEGPLLLCT1 reset */ + if (!(mfdcri(SDR0, PESDR0_PLLLCT1) & 0x01000000)) { + /* + * the PCIe core was probably already initialised + * by firmware - let's re-reset RCSSET regs + * + * -- Shouldn't we also re-reset the whole thing ? -- BenH + */ + pr_debug("PCIE: SDR0_PLLLCT1 already reset.\n"); + mtdcri(SDR0, PESDR0_440SPE_RCSSET, 0x01010000); + mtdcri(SDR0, PESDR1_440SPE_RCSSET, 0x01010000); + mtdcri(SDR0, PESDR2_440SPE_RCSSET, 0x01010000); + } + + valPE0 = mfdcri(SDR0, PESDR0_440SPE_RCSSET); + valPE1 = mfdcri(SDR0, PESDR1_440SPE_RCSSET); + valPE2 = mfdcri(SDR0, PESDR2_440SPE_RCSSET); + + /* SDR0_PExRCSSET rstgu */ + if (!(valPE0 & 0x01000000) || + !(valPE1 & 0x01000000) || + !(valPE2 & 0x01000000)) { + printk(KERN_INFO "PCIE: SDR0_PExRCSSET rstgu error\n"); + err = -1; + } + + /* SDR0_PExRCSSET rstdl */ + if (!(valPE0 & 0x00010000) || + !(valPE1 & 0x00010000) || + !(valPE2 & 0x00010000)) { + printk(KERN_INFO "PCIE: SDR0_PExRCSSET rstdl error\n"); + err = -1; + } + + /* SDR0_PExRCSSET rstpyn */ + if ((valPE0 & 0x00001000) || + (valPE1 & 0x00001000) || + (valPE2 & 0x00001000)) { + printk(KERN_INFO "PCIE: SDR0_PExRCSSET rstpyn error\n"); + err = -1; + } + + /* SDR0_PExRCSSET hldplb */ + if ((valPE0 & 0x10000000) || + (valPE1 & 0x10000000) || + (valPE2 & 0x10000000)) { + printk(KERN_INFO "PCIE: SDR0_PExRCSSET hldplb error\n"); + err = -1; + } + + /* SDR0_PExRCSSET rdy */ + if ((valPE0 & 0x00100000) || + (valPE1 & 0x00100000) || + (valPE2 & 0x00100000)) { + printk(KERN_INFO "PCIE: SDR0_PExRCSSET rdy error\n"); + err = -1; + } + + /* SDR0_PExRCSSET shutdown */ + if ((valPE0 & 0x00000100) || + (valPE1 & 0x00000100) || + (valPE2 & 0x00000100)) { + printk(KERN_INFO "PCIE: SDR0_PExRCSSET shutdown error\n"); + err = -1; + } + + return err; +} + +/* Global PCIe core initializations for 440SPe core */ +static int __init ppc440spe_pciex_core_init(struct device_node *np) +{ + int time_out = 20; + + /* Set PLL clock receiver to LVPECL */ + dcri_clrset(SDR0, PESDR0_PLLLCT1, 0, 1 << 28); + + /* Shouldn't we do all the calibration stuff etc... here ? */ + if (ppc440spe_pciex_check_reset(np)) + return -ENXIO; + + if (!(mfdcri(SDR0, PESDR0_PLLLCT2) & 0x10000)) { + printk(KERN_INFO "PCIE: PESDR_PLLCT2 resistance calibration " + "failed (0x%08x)\n", + mfdcri(SDR0, PESDR0_PLLLCT2)); + return -1; + } + + /* De-assert reset of PCIe PLL, wait for lock */ + dcri_clrset(SDR0, PESDR0_PLLLCT1, 1 << 24, 0); + udelay(3); + + while (time_out) { + if (!(mfdcri(SDR0, PESDR0_PLLLCT3) & 0x10000000)) { + time_out--; + udelay(1); + } else + break; + } + if (!time_out) { + printk(KERN_INFO "PCIE: VCO output not locked\n"); + return -1; + } + + pr_debug("PCIE initialization OK\n"); + + return 3; +} + +static int ppc440spe_pciex_init_port_hw(struct ppc4xx_pciex_port *port) +{ + u32 val = 1 << 24; + + if (port->endpoint) + val = PTYPE_LEGACY_ENDPOINT << 20; + else + val = PTYPE_ROOT_PORT << 20; + + if (port->index == 0) + val |= LNKW_X8 << 12; + else + val |= LNKW_X4 << 12; + + mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET, val); + mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, 0x20222222); + if (ppc440spe_revA()) + mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x11000000); + mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL0SET1, 0x35000000); + mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL1SET1, 0x35000000); + mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL2SET1, 0x35000000); + mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL3SET1, 0x35000000); + if (port->index == 0) { + mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL4SET1, + 0x35000000); + mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL5SET1, + 0x35000000); + mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL6SET1, + 0x35000000); + mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL7SET1, + 0x35000000); + } + dcri_clrset(SDR0, port->sdr_base + PESDRn_RCSSET, + (1 << 24) | (1 << 16), 1 << 12); + + return 0; +} + +static int ppc440speA_pciex_init_port_hw(struct ppc4xx_pciex_port *port) +{ + return ppc440spe_pciex_init_port_hw(port); +} + +static int ppc440speB_pciex_init_port_hw(struct ppc4xx_pciex_port *port) +{ + int rc = ppc440spe_pciex_init_port_hw(port); + + port->has_ibpre = 1; + + return rc; +} + +static int ppc440speA_pciex_init_utl(struct ppc4xx_pciex_port *port) +{ + /* XXX Check what that value means... I hate magic */ + dcr_write(port->dcrs, DCRO_PEGPL_SPECIAL, 0x68782800); + + /* + * Set buffer allocations and then assert VRB and TXE. + */ + out_be32(port->utl_base + PEUTL_OUTTR, 0x08000000); + out_be32(port->utl_base + PEUTL_INTR, 0x02000000); + out_be32(port->utl_base + PEUTL_OPDBSZ, 0x10000000); + out_be32(port->utl_base + PEUTL_PBBSZ, 0x53000000); + out_be32(port->utl_base + PEUTL_IPHBSZ, 0x08000000); + out_be32(port->utl_base + PEUTL_IPDBSZ, 0x10000000); + out_be32(port->utl_base + PEUTL_RCIRQEN, 0x00f00000); + out_be32(port->utl_base + PEUTL_PCTL, 0x80800066); + + return 0; +} + +static int ppc440speB_pciex_init_utl(struct ppc4xx_pciex_port *port) +{ + /* Report CRS to the operating system */ + out_be32(port->utl_base + PEUTL_PBCTL, 0x08000000); + + return 0; +} + +static struct ppc4xx_pciex_hwops ppc440speA_pcie_hwops __initdata = +{ + .core_init = ppc440spe_pciex_core_init, + .port_init_hw = ppc440speA_pciex_init_port_hw, + .setup_utl = ppc440speA_pciex_init_utl, +}; + +static struct ppc4xx_pciex_hwops ppc440speB_pcie_hwops __initdata = +{ + .core_init = ppc440spe_pciex_core_init, + .port_init_hw = ppc440speB_pciex_init_port_hw, + .setup_utl = ppc440speB_pciex_init_utl, +}; + +static int __init ppc460ex_pciex_core_init(struct device_node *np) +{ + /* Nothing to do, return 2 ports */ + return 2; +} + +static int ppc460ex_pciex_init_port_hw(struct ppc4xx_pciex_port *port) +{ + u32 val; + u32 utlset1; + + if (port->endpoint) + val = PTYPE_LEGACY_ENDPOINT << 20; + else + val = PTYPE_ROOT_PORT << 20; + + if (port->index == 0) { + val |= LNKW_X1 << 12; + utlset1 = 0x20000000; + } else { + val |= LNKW_X4 << 12; + utlset1 = 0x20101101; + } + + mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET, val); + mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, utlset1); + mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x01210000); + + switch (port->index) { + case 0: + mtdcri(SDR0, PESDR0_460EX_L0CDRCTL, 0x00003230); + mtdcri(SDR0, PESDR0_460EX_L0DRV, 0x00000130); + mtdcri(SDR0, PESDR0_460EX_L0CLK, 0x00000006); + + mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST,0x10000000); + break; + + case 1: + mtdcri(SDR0, PESDR1_460EX_L0CDRCTL, 0x00003230); + mtdcri(SDR0, PESDR1_460EX_L1CDRCTL, 0x00003230); + mtdcri(SDR0, PESDR1_460EX_L2CDRCTL, 0x00003230); + mtdcri(SDR0, PESDR1_460EX_L3CDRCTL, 0x00003230); + mtdcri(SDR0, PESDR1_460EX_L0DRV, 0x00000130); + mtdcri(SDR0, PESDR1_460EX_L1DRV, 0x00000130); + mtdcri(SDR0, PESDR1_460EX_L2DRV, 0x00000130); + mtdcri(SDR0, PESDR1_460EX_L3DRV, 0x00000130); + mtdcri(SDR0, PESDR1_460EX_L0CLK, 0x00000006); + mtdcri(SDR0, PESDR1_460EX_L1CLK, 0x00000006); + mtdcri(SDR0, PESDR1_460EX_L2CLK, 0x00000006); + mtdcri(SDR0, PESDR1_460EX_L3CLK, 0x00000006); + + mtdcri(SDR0, PESDR1_460EX_PHY_CTL_RST,0x10000000); + break; + } + + mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, + mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) | + (PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTPYN)); + + /* Poll for PHY reset */ + /* XXX FIXME add timeout */ + switch (port->index) { + case 0: + while (!(mfdcri(SDR0, PESDR0_460EX_RSTSTA) & 0x1)) + udelay(10); + break; + case 1: + while (!(mfdcri(SDR0, PESDR1_460EX_RSTSTA) & 0x1)) + udelay(10); + break; + } + + mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, + (mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) & + ~(PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTDL)) | + PESDRx_RCSSET_RSTPYN); + + port->has_ibpre = 1; + + return 0; +} + +static int ppc460ex_pciex_init_utl(struct ppc4xx_pciex_port *port) +{ + dcr_write(port->dcrs, DCRO_PEGPL_SPECIAL, 0x0); + + /* + * Set buffer allocations and then assert VRB and TXE. + */ + out_be32(port->utl_base + PEUTL_PBCTL, 0x0800000c); + out_be32(port->utl_base + PEUTL_OUTTR, 0x08000000); + out_be32(port->utl_base + PEUTL_INTR, 0x02000000); + out_be32(port->utl_base + PEUTL_OPDBSZ, 0x04000000); + out_be32(port->utl_base + PEUTL_PBBSZ, 0x00000000); + out_be32(port->utl_base + PEUTL_IPHBSZ, 0x02000000); + out_be32(port->utl_base + PEUTL_IPDBSZ, 0x04000000); + out_be32(port->utl_base + PEUTL_RCIRQEN,0x00f00000); + out_be32(port->utl_base + PEUTL_PCTL, 0x80800066); + + return 0; +} + +static struct ppc4xx_pciex_hwops ppc460ex_pcie_hwops __initdata = +{ + .core_init = ppc460ex_pciex_core_init, + .port_init_hw = ppc460ex_pciex_init_port_hw, + .setup_utl = ppc460ex_pciex_init_utl, +}; + +static int __init ppc460sx_pciex_core_init(struct device_node *np) +{ + /* HSS drive amplitude */ + mtdcri(SDR0, PESDR0_460SX_HSSL0DAMP, 0xB9843211); + mtdcri(SDR0, PESDR0_460SX_HSSL1DAMP, 0xB9843211); + mtdcri(SDR0, PESDR0_460SX_HSSL2DAMP, 0xB9843211); + mtdcri(SDR0, PESDR0_460SX_HSSL3DAMP, 0xB9843211); + mtdcri(SDR0, PESDR0_460SX_HSSL4DAMP, 0xB9843211); + mtdcri(SDR0, PESDR0_460SX_HSSL5DAMP, 0xB9843211); + mtdcri(SDR0, PESDR0_460SX_HSSL6DAMP, 0xB9843211); + mtdcri(SDR0, PESDR0_460SX_HSSL7DAMP, 0xB9843211); + + mtdcri(SDR0, PESDR1_460SX_HSSL0DAMP, 0xB9843211); + mtdcri(SDR0, PESDR1_460SX_HSSL1DAMP, 0xB9843211); + mtdcri(SDR0, PESDR1_460SX_HSSL2DAMP, 0xB9843211); + mtdcri(SDR0, PESDR1_460SX_HSSL3DAMP, 0xB9843211); + + mtdcri(SDR0, PESDR2_460SX_HSSL0DAMP, 0xB9843211); + mtdcri(SDR0, PESDR2_460SX_HSSL1DAMP, 0xB9843211); + mtdcri(SDR0, PESDR2_460SX_HSSL2DAMP, 0xB9843211); + mtdcri(SDR0, PESDR2_460SX_HSSL3DAMP, 0xB9843211); + + /* HSS TX pre-emphasis */ + mtdcri(SDR0, PESDR0_460SX_HSSL0COEFA, 0xDCB98987); + mtdcri(SDR0, PESDR0_460SX_HSSL1COEFA, 0xDCB98987); + mtdcri(SDR0, PESDR0_460SX_HSSL2COEFA, 0xDCB98987); + mtdcri(SDR0, PESDR0_460SX_HSSL3COEFA, 0xDCB98987); + mtdcri(SDR0, PESDR0_460SX_HSSL4COEFA, 0xDCB98987); + mtdcri(SDR0, PESDR0_460SX_HSSL5COEFA, 0xDCB98987); + mtdcri(SDR0, PESDR0_460SX_HSSL6COEFA, 0xDCB98987); + mtdcri(SDR0, PESDR0_460SX_HSSL7COEFA, 0xDCB98987); + + mtdcri(SDR0, PESDR1_460SX_HSSL0COEFA, 0xDCB98987); + mtdcri(SDR0, PESDR1_460SX_HSSL1COEFA, 0xDCB98987); + mtdcri(SDR0, PESDR1_460SX_HSSL2COEFA, 0xDCB98987); + mtdcri(SDR0, PESDR1_460SX_HSSL3COEFA, 0xDCB98987); + + mtdcri(SDR0, PESDR2_460SX_HSSL0COEFA, 0xDCB98987); + mtdcri(SDR0, PESDR2_460SX_HSSL1COEFA, 0xDCB98987); + mtdcri(SDR0, PESDR2_460SX_HSSL2COEFA, 0xDCB98987); + mtdcri(SDR0, PESDR2_460SX_HSSL3COEFA, 0xDCB98987); + + /* HSS TX calibration control */ + mtdcri(SDR0, PESDR0_460SX_HSSL1CALDRV, 0x22222222); + mtdcri(SDR0, PESDR1_460SX_HSSL1CALDRV, 0x22220000); + mtdcri(SDR0, PESDR2_460SX_HSSL1CALDRV, 0x22220000); + + /* HSS TX slew control */ + mtdcri(SDR0, PESDR0_460SX_HSSSLEW, 0xFFFFFFFF); + mtdcri(SDR0, PESDR1_460SX_HSSSLEW, 0xFFFF0000); + mtdcri(SDR0, PESDR2_460SX_HSSSLEW, 0xFFFF0000); + + udelay(100); + + /* De-assert PLLRESET */ + dcri_clrset(SDR0, PESDR0_PLLLCT2, 0x00000100, 0); + + /* Reset DL, UTL, GPL before configuration */ + mtdcri(SDR0, PESDR0_460SX_RCSSET, + PESDRx_RCSSET_RSTDL | PESDRx_RCSSET_RSTGU); + mtdcri(SDR0, PESDR1_460SX_RCSSET, + PESDRx_RCSSET_RSTDL | PESDRx_RCSSET_RSTGU); + mtdcri(SDR0, PESDR2_460SX_RCSSET, + PESDRx_RCSSET_RSTDL | PESDRx_RCSSET_RSTGU); + + udelay(100); + + /* + * If bifurcation is not enabled, u-boot would have disabled the + * third PCIe port + */ + if (((mfdcri(SDR0, PESDR1_460SX_HSSCTLSET) & 0x00000001) == + 0x00000001)) { + printk(KERN_INFO "PCI: PCIE bifurcation setup successfully.\n"); + printk(KERN_INFO "PCI: Total 3 PCIE ports are present\n"); + return 3; + } + + printk(KERN_INFO "PCI: Total 2 PCIE ports are present\n"); + return 2; +} + +static int ppc460sx_pciex_init_port_hw(struct ppc4xx_pciex_port *port) +{ + + if (port->endpoint) + dcri_clrset(SDR0, port->sdr_base + PESDRn_UTLSET2, + 0x01000000, 0); + else + dcri_clrset(SDR0, port->sdr_base + PESDRn_UTLSET2, + 0, 0x01000000); + + /*Gen-1*/ + mtdcri(SDR0, port->sdr_base + PESDRn_460SX_RCEI, 0x08000000); + + dcri_clrset(SDR0, port->sdr_base + PESDRn_RCSSET, + (PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTDL), + PESDRx_RCSSET_RSTPYN); + + port->has_ibpre = 1; + + return 0; +} + +static int ppc460sx_pciex_init_utl(struct ppc4xx_pciex_port *port) +{ + /* Max 128 Bytes */ + out_be32 (port->utl_base + PEUTL_PBBSZ, 0x00000000); + return 0; +} + +static struct ppc4xx_pciex_hwops ppc460sx_pcie_hwops __initdata = { + .core_init = ppc460sx_pciex_core_init, + .port_init_hw = ppc460sx_pciex_init_port_hw, + .setup_utl = ppc460sx_pciex_init_utl, +}; + +#endif /* CONFIG_44x */ + +#ifdef CONFIG_40x + +static int __init ppc405ex_pciex_core_init(struct device_node *np) +{ + /* Nothing to do, return 2 ports */ + return 2; +} + +static void ppc405ex_pcie_phy_reset(struct ppc4xx_pciex_port *port) +{ + /* Assert the PE0_PHY reset */ + mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01010000); + msleep(1); + + /* deassert the PE0_hotreset */ + if (port->endpoint) + mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01111000); + else + mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01101000); + + /* poll for phy !reset */ + /* XXX FIXME add timeout */ + while (!(mfdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSTA) & 0x00001000)) + ; + + /* deassert the PE0_gpl_utl_reset */ + mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x00101000); +} + +static int ppc405ex_pciex_init_port_hw(struct ppc4xx_pciex_port *port) +{ + u32 val; + + if (port->endpoint) + val = PTYPE_LEGACY_ENDPOINT; + else + val = PTYPE_ROOT_PORT; + + mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET, + 1 << 24 | val << 20 | LNKW_X1 << 12); + + mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, 0x00000000); + mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x01010000); + mtdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSET1, 0x720F0000); + mtdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSET2, 0x70600003); + + /* + * Only reset the PHY when no link is currently established. + * This is for the Atheros PCIe board which has problems to establish + * the link (again) after this PHY reset. All other currently tested + * PCIe boards don't show this problem. + * This has to be re-tested and fixed in a later release! + */ + val = mfdcri(SDR0, port->sdr_base + PESDRn_LOOP); + if (!(val & 0x00001000)) + ppc405ex_pcie_phy_reset(port); + + dcr_write(port->dcrs, DCRO_PEGPL_CFG, 0x10000000); /* guarded on */ + + port->has_ibpre = 1; + + return 0; +} + +static int ppc405ex_pciex_init_utl(struct ppc4xx_pciex_port *port) +{ + dcr_write(port->dcrs, DCRO_PEGPL_SPECIAL, 0x0); + + /* + * Set buffer allocations and then assert VRB and TXE. + */ + out_be32(port->utl_base + PEUTL_OUTTR, 0x02000000); + out_be32(port->utl_base + PEUTL_INTR, 0x02000000); + out_be32(port->utl_base + PEUTL_OPDBSZ, 0x04000000); + out_be32(port->utl_base + PEUTL_PBBSZ, 0x21000000); + out_be32(port->utl_base + PEUTL_IPHBSZ, 0x02000000); + out_be32(port->utl_base + PEUTL_IPDBSZ, 0x04000000); + out_be32(port->utl_base + PEUTL_RCIRQEN, 0x00f00000); + out_be32(port->utl_base + PEUTL_PCTL, 0x80800066); + + out_be32(port->utl_base + PEUTL_PBCTL, 0x08000000); + + return 0; +} + +static struct ppc4xx_pciex_hwops ppc405ex_pcie_hwops __initdata = +{ + .core_init = ppc405ex_pciex_core_init, + .port_init_hw = ppc405ex_pciex_init_port_hw, + .setup_utl = ppc405ex_pciex_init_utl, +}; + +#endif /* CONFIG_40x */ + + +/* Check that the core has been initied and if not, do it */ +static int __init ppc4xx_pciex_check_core_init(struct device_node *np) +{ + static int core_init; + int count = -ENODEV; + + if (core_init++) + return 0; + +#ifdef CONFIG_44x + if (of_device_is_compatible(np, "ibm,plb-pciex-440spe")) { + if (ppc440spe_revA()) + ppc4xx_pciex_hwops = &ppc440speA_pcie_hwops; + else + ppc4xx_pciex_hwops = &ppc440speB_pcie_hwops; + } + if (of_device_is_compatible(np, "ibm,plb-pciex-460ex")) + ppc4xx_pciex_hwops = &ppc460ex_pcie_hwops; + if (of_device_is_compatible(np, "ibm,plb-pciex-460sx")) + ppc4xx_pciex_hwops = &ppc460sx_pcie_hwops; +#endif /* CONFIG_44x */ +#ifdef CONFIG_40x + if (of_device_is_compatible(np, "ibm,plb-pciex-405ex")) + ppc4xx_pciex_hwops = &ppc405ex_pcie_hwops; +#endif + if (ppc4xx_pciex_hwops == NULL) { + printk(KERN_WARNING "PCIE: unknown host type %s\n", + np->full_name); + return -ENODEV; + } + + count = ppc4xx_pciex_hwops->core_init(np); + if (count > 0) { + ppc4xx_pciex_ports = + kzalloc(count * sizeof(struct ppc4xx_pciex_port), + GFP_KERNEL); + if (ppc4xx_pciex_ports) { + ppc4xx_pciex_port_count = count; + return 0; + } + printk(KERN_WARNING "PCIE: failed to allocate ports array\n"); + return -ENOMEM; + } + return -ENODEV; +} + +static void __init ppc4xx_pciex_port_init_mapping(struct ppc4xx_pciex_port *port) +{ + /* We map PCI Express configuration based on the reg property */ + dcr_write(port->dcrs, DCRO_PEGPL_CFGBAH, + RES_TO_U32_HIGH(port->cfg_space.start)); + dcr_write(port->dcrs, DCRO_PEGPL_CFGBAL, + RES_TO_U32_LOW(port->cfg_space.start)); + + /* XXX FIXME: Use size from reg property. For now, map 512M */ + dcr_write(port->dcrs, DCRO_PEGPL_CFGMSK, 0xe0000001); + + /* We map UTL registers based on the reg property */ + dcr_write(port->dcrs, DCRO_PEGPL_REGBAH, + RES_TO_U32_HIGH(port->utl_regs.start)); + dcr_write(port->dcrs, DCRO_PEGPL_REGBAL, + RES_TO_U32_LOW(port->utl_regs.start)); + + /* XXX FIXME: Use size from reg property */ + dcr_write(port->dcrs, DCRO_PEGPL_REGMSK, 0x00007001); + + /* Disable all other outbound windows */ + dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL, 0); + dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKL, 0); + dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKL, 0); + dcr_write(port->dcrs, DCRO_PEGPL_MSGMSK, 0); +} + +static int __init ppc4xx_pciex_wait_on_sdr(struct ppc4xx_pciex_port *port, + unsigned int sdr_offset, + unsigned int mask, + unsigned int value, + int timeout_ms) +{ + u32 val; + + while(timeout_ms--) { + val = mfdcri(SDR0, port->sdr_base + sdr_offset); + if ((val & mask) == value) { + pr_debug("PCIE%d: Wait on SDR %x success with tm %d (%08x)\n", + port->index, sdr_offset, timeout_ms, val); + return 0; + } + msleep(1); + } + return -1; +} + +static int __init ppc4xx_pciex_port_init(struct ppc4xx_pciex_port *port) +{ + int rc = 0; + + /* Init HW */ + if (ppc4xx_pciex_hwops->port_init_hw) + rc = ppc4xx_pciex_hwops->port_init_hw(port); + if (rc != 0) + return rc; + + printk(KERN_INFO "PCIE%d: Checking link...\n", + port->index); + + /* Wait for reset to complete */ + if (ppc4xx_pciex_wait_on_sdr(port, PESDRn_RCSSTS, 1 << 20, 0, 10)) { + printk(KERN_WARNING "PCIE%d: PGRST failed\n", + port->index); + return -1; + } + + /* Check for card presence detect if supported, if not, just wait for + * link unconditionally. + * + * note that we don't fail if there is no link, we just filter out + * config space accesses. That way, it will be easier to implement + * hotplug later on. + */ + if (!port->has_ibpre || + !ppc4xx_pciex_wait_on_sdr(port, PESDRn_LOOP, + 1 << 28, 1 << 28, 100)) { + printk(KERN_INFO + "PCIE%d: Device detected, waiting for link...\n", + port->index); + if (ppc4xx_pciex_wait_on_sdr(port, PESDRn_LOOP, + 0x1000, 0x1000, 2000)) + printk(KERN_WARNING + "PCIE%d: Link up failed\n", port->index); + else { + printk(KERN_INFO + "PCIE%d: link is up !\n", port->index); + port->link = 1; + } + } else + printk(KERN_INFO "PCIE%d: No device detected.\n", port->index); + + /* + * Initialize mapping: disable all regions and configure + * CFG and REG regions based on resources in the device tree + */ + ppc4xx_pciex_port_init_mapping(port); + + /* + * Map UTL + */ + port->utl_base = ioremap(port->utl_regs.start, 0x100); + BUG_ON(port->utl_base == NULL); + + /* + * Setup UTL registers --BenH. + */ + if (ppc4xx_pciex_hwops->setup_utl) + ppc4xx_pciex_hwops->setup_utl(port); + + /* + * Check for VC0 active and assert RDY. + */ + if (port->link && + ppc4xx_pciex_wait_on_sdr(port, PESDRn_RCSSTS, + 1 << 16, 1 << 16, 5000)) { + printk(KERN_INFO "PCIE%d: VC0 not active\n", port->index); + port->link = 0; + } + + dcri_clrset(SDR0, port->sdr_base + PESDRn_RCSSET, 0, 1 << 20); + msleep(100); + + return 0; +} + +static int ppc4xx_pciex_validate_bdf(struct ppc4xx_pciex_port *port, + struct pci_bus *bus, + unsigned int devfn) +{ + static int message; + + /* Endpoint can not generate upstream(remote) config cycles */ + if (port->endpoint && bus->number != port->hose->first_busno) + return PCIBIOS_DEVICE_NOT_FOUND; + + /* Check we are within the mapped range */ + if (bus->number > port->hose->last_busno) { + if (!message) { + printk(KERN_WARNING "Warning! Probing bus %u" + " out of range !\n", bus->number); + message++; + } + return PCIBIOS_DEVICE_NOT_FOUND; + } + + /* The root complex has only one device / function */ + if (bus->number == port->hose->first_busno && devfn != 0) + return PCIBIOS_DEVICE_NOT_FOUND; + + /* The other side of the RC has only one device as well */ + if (bus->number == (port->hose->first_busno + 1) && + PCI_SLOT(devfn) != 0) + return PCIBIOS_DEVICE_NOT_FOUND; + + /* Check if we have a link */ + if ((bus->number != port->hose->first_busno) && !port->link) + return PCIBIOS_DEVICE_NOT_FOUND; + + return 0; +} + +static void __iomem *ppc4xx_pciex_get_config_base(struct ppc4xx_pciex_port *port, + struct pci_bus *bus, + unsigned int devfn) +{ + int relbus; + + /* Remove the casts when we finally remove the stupid volatile + * in struct pci_controller + */ + if (bus->number == port->hose->first_busno) + return (void __iomem *)port->hose->cfg_addr; + + relbus = bus->number - (port->hose->first_busno + 1); + return (void __iomem *)port->hose->cfg_data + + ((relbus << 20) | (devfn << 12)); +} + +static int ppc4xx_pciex_read_config(struct pci_bus *bus, unsigned int devfn, + int offset, int len, u32 *val) +{ + struct pci_controller *hose = pci_bus_to_host(bus); + struct ppc4xx_pciex_port *port = + &ppc4xx_pciex_ports[hose->indirect_type]; + void __iomem *addr; + u32 gpl_cfg; + + BUG_ON(hose != port->hose); + + if (ppc4xx_pciex_validate_bdf(port, bus, devfn) != 0) + return PCIBIOS_DEVICE_NOT_FOUND; + + addr = ppc4xx_pciex_get_config_base(port, bus, devfn); + + /* + * Reading from configuration space of non-existing device can + * generate transaction errors. For the read duration we suppress + * assertion of machine check exceptions to avoid those. + */ + gpl_cfg = dcr_read(port->dcrs, DCRO_PEGPL_CFG); + dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg | GPL_DMER_MASK_DISA); + + /* Make sure no CRS is recorded */ + out_be32(port->utl_base + PEUTL_RCSTA, 0x00040000); + + switch (len) { + case 1: + *val = in_8((u8 *)(addr + offset)); + break; + case 2: + *val = in_le16((u16 *)(addr + offset)); + break; + default: + *val = in_le32((u32 *)(addr + offset)); + break; + } + + pr_debug("pcie-config-read: bus=%3d [%3d..%3d] devfn=0x%04x" + " offset=0x%04x len=%d, addr=0x%p val=0x%08x\n", + bus->number, hose->first_busno, hose->last_busno, + devfn, offset, len, addr + offset, *val); + + /* Check for CRS (440SPe rev B does that for us but heh ..) */ + if (in_be32(port->utl_base + PEUTL_RCSTA) & 0x00040000) { + pr_debug("Got CRS !\n"); + if (len != 4 || offset != 0) + return PCIBIOS_DEVICE_NOT_FOUND; + *val = 0xffff0001; + } + + dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg); + + return PCIBIOS_SUCCESSFUL; +} + +static int ppc4xx_pciex_write_config(struct pci_bus *bus, unsigned int devfn, + int offset, int len, u32 val) +{ + struct pci_controller *hose = pci_bus_to_host(bus); + struct ppc4xx_pciex_port *port = + &ppc4xx_pciex_ports[hose->indirect_type]; + void __iomem *addr; + u32 gpl_cfg; + + if (ppc4xx_pciex_validate_bdf(port, bus, devfn) != 0) + return PCIBIOS_DEVICE_NOT_FOUND; + + addr = ppc4xx_pciex_get_config_base(port, bus, devfn); + + /* + * Reading from configuration space of non-existing device can + * generate transaction errors. For the read duration we suppress + * assertion of machine check exceptions to avoid those. + */ + gpl_cfg = dcr_read(port->dcrs, DCRO_PEGPL_CFG); + dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg | GPL_DMER_MASK_DISA); + + pr_debug("pcie-config-write: bus=%3d [%3d..%3d] devfn=0x%04x" + " offset=0x%04x len=%d, addr=0x%p val=0x%08x\n", + bus->number, hose->first_busno, hose->last_busno, + devfn, offset, len, addr + offset, val); + + switch (len) { + case 1: + out_8((u8 *)(addr + offset), val); + break; + case 2: + out_le16((u16 *)(addr + offset), val); + break; + default: + out_le32((u32 *)(addr + offset), val); + break; + } + + dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg); + + return PCIBIOS_SUCCESSFUL; +} + +static struct pci_ops ppc4xx_pciex_pci_ops = +{ + .read = ppc4xx_pciex_read_config, + .write = ppc4xx_pciex_write_config, +}; + +static int __init ppc4xx_setup_one_pciex_POM(struct ppc4xx_pciex_port *port, + struct pci_controller *hose, + void __iomem *mbase, + u64 plb_addr, + u64 pci_addr, + u64 size, + unsigned int flags, + int index) +{ + u32 lah, lal, pciah, pcial, sa; + + if (!is_power_of_2(size) || + (index < 2 && size < 0x100000) || + (index == 2 && size < 0x100) || + (plb_addr & (size - 1)) != 0) { + printk(KERN_WARNING "%s: Resource out of range\n", + hose->dn->full_name); + return -1; + } + + /* Calculate register values */ + lah = RES_TO_U32_HIGH(plb_addr); + lal = RES_TO_U32_LOW(plb_addr); + pciah = RES_TO_U32_HIGH(pci_addr); + pcial = RES_TO_U32_LOW(pci_addr); + sa = (0xffffffffu << ilog2(size)) | 0x1; + + /* Program register values */ + switch (index) { + case 0: + out_le32(mbase + PECFG_POM0LAH, pciah); + out_le32(mbase + PECFG_POM0LAL, pcial); + dcr_write(port->dcrs, DCRO_PEGPL_OMR1BAH, lah); + dcr_write(port->dcrs, DCRO_PEGPL_OMR1BAL, lal); + dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKH, 0x7fffffff); + /* Note that 3 here means enabled | single region */ + dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL, sa | 3); + break; + case 1: + out_le32(mbase + PECFG_POM1LAH, pciah); + out_le32(mbase + PECFG_POM1LAL, pcial); + dcr_write(port->dcrs, DCRO_PEGPL_OMR2BAH, lah); + dcr_write(port->dcrs, DCRO_PEGPL_OMR2BAL, lal); + dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKH, 0x7fffffff); + /* Note that 3 here means enabled | single region */ + dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKL, sa | 3); + break; + case 2: + out_le32(mbase + PECFG_POM2LAH, pciah); + out_le32(mbase + PECFG_POM2LAL, pcial); + dcr_write(port->dcrs, DCRO_PEGPL_OMR3BAH, lah); + dcr_write(port->dcrs, DCRO_PEGPL_OMR3BAL, lal); + dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKH, 0x7fffffff); + /* Note that 3 here means enabled | IO space !!! */ + dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKL, sa | 3); + break; + } + + return 0; +} + +static void __init ppc4xx_configure_pciex_POMs(struct ppc4xx_pciex_port *port, + struct pci_controller *hose, + void __iomem *mbase) +{ + int i, j, found_isa_hole = 0; + + /* Setup outbound memory windows */ + for (i = j = 0; i < 3; i++) { + struct resource *res = &hose->mem_resources[i]; + + /* we only care about memory windows */ + if (!(res->flags & IORESOURCE_MEM)) + continue; + if (j > 1) { + printk(KERN_WARNING "%s: Too many ranges\n", + port->node->full_name); + break; + } + + /* Configure the resource */ + if (ppc4xx_setup_one_pciex_POM(port, hose, mbase, + res->start, + res->start - hose->pci_mem_offset, + res->end + 1 - res->start, + res->flags, + j) == 0) { + j++; + + /* If the resource PCI address is 0 then we have our + * ISA memory hole + */ + if (res->start == hose->pci_mem_offset) + found_isa_hole = 1; + } + } + + /* Handle ISA memory hole if not already covered */ + if (j <= 1 && !found_isa_hole && hose->isa_mem_size) + if (ppc4xx_setup_one_pciex_POM(port, hose, mbase, + hose->isa_mem_phys, 0, + hose->isa_mem_size, 0, j) == 0) + printk(KERN_INFO "%s: Legacy ISA memory support enabled\n", + hose->dn->full_name); + + /* Configure IO, always 64K starting at 0. We hard wire it to 64K ! + * Note also that it -has- to be region index 2 on this HW + */ + if (hose->io_resource.flags & IORESOURCE_IO) + ppc4xx_setup_one_pciex_POM(port, hose, mbase, + hose->io_base_phys, 0, + 0x10000, IORESOURCE_IO, 2); +} + +static void __init ppc4xx_configure_pciex_PIMs(struct ppc4xx_pciex_port *port, + struct pci_controller *hose, + void __iomem *mbase, + struct resource *res) +{ + resource_size_t size = res->end - res->start + 1; + u64 sa; + + if (port->endpoint) { + resource_size_t ep_addr = 0; + resource_size_t ep_size = 32 << 20; + + /* Currently we map a fixed 64MByte window to PLB address + * 0 (SDRAM). This should probably be configurable via a dts + * property. + */ + + /* Calculate window size */ + sa = (0xffffffffffffffffull << ilog2(ep_size)); + + /* Setup BAR0 */ + out_le32(mbase + PECFG_BAR0HMPA, RES_TO_U32_HIGH(sa)); + out_le32(mbase + PECFG_BAR0LMPA, RES_TO_U32_LOW(sa) | + PCI_BASE_ADDRESS_MEM_TYPE_64); + + /* Disable BAR1 & BAR2 */ + out_le32(mbase + PECFG_BAR1MPA, 0); + out_le32(mbase + PECFG_BAR2HMPA, 0); + out_le32(mbase + PECFG_BAR2LMPA, 0); + + out_le32(mbase + PECFG_PIM01SAH, RES_TO_U32_HIGH(sa)); + out_le32(mbase + PECFG_PIM01SAL, RES_TO_U32_LOW(sa)); + + out_le32(mbase + PCI_BASE_ADDRESS_0, RES_TO_U32_LOW(ep_addr)); + out_le32(mbase + PCI_BASE_ADDRESS_1, RES_TO_U32_HIGH(ep_addr)); + } else { + /* Calculate window size */ + sa = (0xffffffffffffffffull << ilog2(size)); + if (res->flags & IORESOURCE_PREFETCH) + sa |= 0x8; + + out_le32(mbase + PECFG_BAR0HMPA, RES_TO_U32_HIGH(sa)); + out_le32(mbase + PECFG_BAR0LMPA, RES_TO_U32_LOW(sa)); + + /* The setup of the split looks weird to me ... let's see + * if it works + */ + out_le32(mbase + PECFG_PIM0LAL, 0x00000000); + out_le32(mbase + PECFG_PIM0LAH, 0x00000000); + out_le32(mbase + PECFG_PIM1LAL, 0x00000000); + out_le32(mbase + PECFG_PIM1LAH, 0x00000000); + out_le32(mbase + PECFG_PIM01SAH, 0xffff0000); + out_le32(mbase + PECFG_PIM01SAL, 0x00000000); + + out_le32(mbase + PCI_BASE_ADDRESS_0, RES_TO_U32_LOW(res->start)); + out_le32(mbase + PCI_BASE_ADDRESS_1, RES_TO_U32_HIGH(res->start)); + } + + /* Enable inbound mapping */ + out_le32(mbase + PECFG_PIMEN, 0x1); + + /* Enable I/O, Mem, and Busmaster cycles */ + out_le16(mbase + PCI_COMMAND, + in_le16(mbase + PCI_COMMAND) | + PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); +} + +static void __init ppc4xx_pciex_port_setup_hose(struct ppc4xx_pciex_port *port) +{ + struct resource dma_window; + struct pci_controller *hose = NULL; + const int *bus_range; + int primary = 0, busses; + void __iomem *mbase = NULL, *cfg_data = NULL; + const u32 *pval; + u32 val; + + /* Check if primary bridge */ + if (of_get_property(port->node, "primary", NULL)) + primary = 1; + + /* Get bus range if any */ + bus_range = of_get_property(port->node, "bus-range", NULL); + + /* Allocate the host controller data structure */ + hose = pcibios_alloc_controller(port->node); + if (!hose) + goto fail; + + /* We stick the port number in "indirect_type" so the config space + * ops can retrieve the port data structure easily + */ + hose->indirect_type = port->index; + + /* Get bus range */ + hose->first_busno = bus_range ? bus_range[0] : 0x0; + hose->last_busno = bus_range ? bus_range[1] : 0xff; + + /* Because of how big mapping the config space is (1M per bus), we + * limit how many busses we support. In the long run, we could replace + * that with something akin to kmap_atomic instead. We set aside 1 bus + * for the host itself too. + */ + busses = hose->last_busno - hose->first_busno; /* This is off by 1 */ + if (busses > MAX_PCIE_BUS_MAPPED) { + busses = MAX_PCIE_BUS_MAPPED; + hose->last_busno = hose->first_busno + busses; + } + + if (!port->endpoint) { + /* Only map the external config space in cfg_data for + * PCIe root-complexes. External space is 1M per bus + */ + cfg_data = ioremap(port->cfg_space.start + + (hose->first_busno + 1) * 0x100000, + busses * 0x100000); + if (cfg_data == NULL) { + printk(KERN_ERR "%s: Can't map external config space !", + port->node->full_name); + goto fail; + } + hose->cfg_data = cfg_data; + } + + /* Always map the host config space in cfg_addr. + * Internal space is 4K + */ + mbase = ioremap(port->cfg_space.start + 0x10000000, 0x1000); + if (mbase == NULL) { + printk(KERN_ERR "%s: Can't map internal config space !", + port->node->full_name); + goto fail; + } + hose->cfg_addr = mbase; + + pr_debug("PCIE %s, bus %d..%d\n", port->node->full_name, + hose->first_busno, hose->last_busno); + pr_debug(" config space mapped at: root @0x%p, other @0x%p\n", + hose->cfg_addr, hose->cfg_data); + + /* Setup config space */ + hose->ops = &ppc4xx_pciex_pci_ops; + port->hose = hose; + mbase = (void __iomem *)hose->cfg_addr; + + if (!port->endpoint) { + /* + * Set bus numbers on our root port + */ + out_8(mbase + PCI_PRIMARY_BUS, hose->first_busno); + out_8(mbase + PCI_SECONDARY_BUS, hose->first_busno + 1); + out_8(mbase + PCI_SUBORDINATE_BUS, hose->last_busno); + } + + /* + * OMRs are already reset, also disable PIMs + */ + out_le32(mbase + PECFG_PIMEN, 0); + + /* Parse outbound mapping resources */ + pci_process_bridge_OF_ranges(hose, port->node, primary); + + /* Parse inbound mapping resources */ + if (ppc4xx_parse_dma_ranges(hose, mbase, &dma_window) != 0) + goto fail; + + /* Configure outbound ranges POMs */ + ppc4xx_configure_pciex_POMs(port, hose, mbase); + + /* Configure inbound ranges PIMs */ + ppc4xx_configure_pciex_PIMs(port, hose, mbase, &dma_window); + + /* The root complex doesn't show up if we don't set some vendor + * and device IDs into it. The defaults below are the same bogus + * one that the initial code in arch/ppc had. This can be + * overwritten by setting the "vendor-id/device-id" properties + * in the pciex node. + */ + + /* Get the (optional) vendor-/device-id from the device-tree */ + pval = of_get_property(port->node, "vendor-id", NULL); + if (pval) { + val = *pval; + } else { + if (!port->endpoint) + val = 0xaaa0 + port->index; + else + val = 0xeee0 + port->index; + } + out_le16(mbase + 0x200, val); + + pval = of_get_property(port->node, "device-id", NULL); + if (pval) { + val = *pval; + } else { + if (!port->endpoint) + val = 0xbed0 + port->index; + else + val = 0xfed0 + port->index; + } + out_le16(mbase + 0x202, val); + + if (!port->endpoint) { + /* Set Class Code to PCI-PCI bridge and Revision Id to 1 */ + out_le32(mbase + 0x208, 0x06040001); + + printk(KERN_INFO "PCIE%d: successfully set as root-complex\n", + port->index); + } else { + /* Set Class Code to Processor/PPC */ + out_le32(mbase + 0x208, 0x0b200001); + + printk(KERN_INFO "PCIE%d: successfully set as endpoint\n", + port->index); + } + + return; + fail: + if (hose) + pcibios_free_controller(hose); + if (cfg_data) + iounmap(cfg_data); + if (mbase) + iounmap(mbase); +} + +static void __init ppc4xx_probe_pciex_bridge(struct device_node *np) +{ + struct ppc4xx_pciex_port *port; + const u32 *pval; + int portno; + unsigned int dcrs; + const char *val; + + /* First, proceed to core initialization as we assume there's + * only one PCIe core in the system + */ + if (ppc4xx_pciex_check_core_init(np)) + return; + + /* Get the port number from the device-tree */ + pval = of_get_property(np, "port", NULL); + if (pval == NULL) { + printk(KERN_ERR "PCIE: Can't find port number for %s\n", + np->full_name); + return; + } + portno = *pval; + if (portno >= ppc4xx_pciex_port_count) { + printk(KERN_ERR "PCIE: port number out of range for %s\n", + np->full_name); + return; + } + port = &ppc4xx_pciex_ports[portno]; + port->index = portno; + + /* + * Check if device is enabled + */ + if (!of_device_is_available(np)) { + printk(KERN_INFO "PCIE%d: Port disabled via device-tree\n", port->index); + return; + } + + port->node = of_node_get(np); + pval = of_get_property(np, "sdr-base", NULL); + if (pval == NULL) { + printk(KERN_ERR "PCIE: missing sdr-base for %s\n", + np->full_name); + return; + } + port->sdr_base = *pval; + + /* Check if device_type property is set to "pci" or "pci-endpoint". + * Resulting from this setup this PCIe port will be configured + * as root-complex or as endpoint. + */ + val = of_get_property(port->node, "device_type", NULL); + if (!strcmp(val, "pci-endpoint")) { + port->endpoint = 1; + } else if (!strcmp(val, "pci")) { + port->endpoint = 0; + } else { + printk(KERN_ERR "PCIE: missing or incorrect device_type for %s\n", + np->full_name); + return; + } + + /* Fetch config space registers address */ + if (of_address_to_resource(np, 0, &port->cfg_space)) { + printk(KERN_ERR "%s: Can't get PCI-E config space !", + np->full_name); + return; + } + /* Fetch host bridge internal registers address */ + if (of_address_to_resource(np, 1, &port->utl_regs)) { + printk(KERN_ERR "%s: Can't get UTL register base !", + np->full_name); + return; + } + + /* Map DCRs */ + dcrs = dcr_resource_start(np, 0); + if (dcrs == 0) { + printk(KERN_ERR "%s: Can't get DCR register base !", + np->full_name); + return; + } + port->dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0)); + + /* Initialize the port specific registers */ + if (ppc4xx_pciex_port_init(port)) { + printk(KERN_WARNING "PCIE%d: Port init failed\n", port->index); + return; + } + + /* Setup the linux hose data structure */ + ppc4xx_pciex_port_setup_hose(port); +} + +#endif /* CONFIG_PPC4xx_PCI_EXPRESS */ + +static int __init ppc4xx_pci_find_bridges(void) +{ + struct device_node *np; + + ppc_pci_flags |= PPC_PCI_ENABLE_PROC_DOMAINS | PPC_PCI_COMPAT_DOMAIN_0; + +#ifdef CONFIG_PPC4xx_PCI_EXPRESS + for_each_compatible_node(np, NULL, "ibm,plb-pciex") + ppc4xx_probe_pciex_bridge(np); +#endif + for_each_compatible_node(np, NULL, "ibm,plb-pcix") + ppc4xx_probe_pcix_bridge(np); + for_each_compatible_node(np, NULL, "ibm,plb-pci") + ppc4xx_probe_pci_bridge(np); + + return 0; +} +arch_initcall(ppc4xx_pci_find_bridges); + diff --git a/arch/powerpc/sysdev/ppc4xx_pci.h b/arch/powerpc/sysdev/ppc4xx_pci.h new file mode 100644 index 00000000..c39a134e --- /dev/null +++ b/arch/powerpc/sysdev/ppc4xx_pci.h @@ -0,0 +1,486 @@ +/* + * PCI / PCI-X / PCI-Express support for 4xx parts + * + * Copyright 2007 Ben. Herrenschmidt <benh@kernel.crashing.org>, IBM Corp. + * + * Bits and pieces extracted from arch/ppc support by + * + * Matt Porter <mporter@kernel.crashing.org> + * + * Copyright 2002-2005 MontaVista Software Inc. + */ +#ifndef __PPC4XX_PCI_H__ +#define __PPC4XX_PCI_H__ + +/* + * 4xx PCI-X bridge register definitions + */ +#define PCIX0_VENDID 0x000 +#define PCIX0_DEVID 0x002 +#define PCIX0_COMMAND 0x004 +#define PCIX0_STATUS 0x006 +#define PCIX0_REVID 0x008 +#define PCIX0_CLS 0x009 +#define PCIX0_CACHELS 0x00c +#define PCIX0_LATTIM 0x00d +#define PCIX0_HDTYPE 0x00e +#define PCIX0_BIST 0x00f +#define PCIX0_BAR0L 0x010 +#define PCIX0_BAR0H 0x014 +#define PCIX0_BAR1 0x018 +#define PCIX0_BAR2L 0x01c +#define PCIX0_BAR2H 0x020 +#define PCIX0_BAR3 0x024 +#define PCIX0_CISPTR 0x028 +#define PCIX0_SBSYSVID 0x02c +#define PCIX0_SBSYSID 0x02e +#define PCIX0_EROMBA 0x030 +#define PCIX0_CAP 0x034 +#define PCIX0_RES0 0x035 +#define PCIX0_RES1 0x036 +#define PCIX0_RES2 0x038 +#define PCIX0_INTLN 0x03c +#define PCIX0_INTPN 0x03d +#define PCIX0_MINGNT 0x03e +#define PCIX0_MAXLTNCY 0x03f +#define PCIX0_BRDGOPT1 0x040 +#define PCIX0_BRDGOPT2 0x044 +#define PCIX0_ERREN 0x050 +#define PCIX0_ERRSTS 0x054 +#define PCIX0_PLBBESR 0x058 +#define PCIX0_PLBBEARL 0x05c +#define PCIX0_PLBBEARH 0x060 +#define PCIX0_POM0LAL 0x068 +#define PCIX0_POM0LAH 0x06c +#define PCIX0_POM0SA 0x070 +#define PCIX0_POM0PCIAL 0x074 +#define PCIX0_POM0PCIAH 0x078 +#define PCIX0_POM1LAL 0x07c +#define PCIX0_POM1LAH 0x080 +#define PCIX0_POM1SA 0x084 +#define PCIX0_POM1PCIAL 0x088 +#define PCIX0_POM1PCIAH 0x08c +#define PCIX0_POM2SA 0x090 +#define PCIX0_PIM0SAL 0x098 +#define PCIX0_PIM0SA PCIX0_PIM0SAL +#define PCIX0_PIM0LAL 0x09c +#define PCIX0_PIM0LAH 0x0a0 +#define PCIX0_PIM1SA 0x0a4 +#define PCIX0_PIM1LAL 0x0a8 +#define PCIX0_PIM1LAH 0x0ac +#define PCIX0_PIM2SAL 0x0b0 +#define PCIX0_PIM2SA PCIX0_PIM2SAL +#define PCIX0_PIM2LAL 0x0b4 +#define PCIX0_PIM2LAH 0x0b8 +#define PCIX0_OMCAPID 0x0c0 +#define PCIX0_OMNIPTR 0x0c1 +#define PCIX0_OMMC 0x0c2 +#define PCIX0_OMMA 0x0c4 +#define PCIX0_OMMUA 0x0c8 +#define PCIX0_OMMDATA 0x0cc +#define PCIX0_OMMEOI 0x0ce +#define PCIX0_PMCAPID 0x0d0 +#define PCIX0_PMNIPTR 0x0d1 +#define PCIX0_PMC 0x0d2 +#define PCIX0_PMCSR 0x0d4 +#define PCIX0_PMCSRBSE 0x0d6 +#define PCIX0_PMDATA 0x0d7 +#define PCIX0_PMSCRR 0x0d8 +#define PCIX0_CAPID 0x0dc +#define PCIX0_NIPTR 0x0dd +#define PCIX0_CMD 0x0de +#define PCIX0_STS 0x0e0 +#define PCIX0_IDR 0x0e4 +#define PCIX0_CID 0x0e8 +#define PCIX0_RID 0x0ec +#define PCIX0_PIM0SAH 0x0f8 +#define PCIX0_PIM2SAH 0x0fc +#define PCIX0_MSGIL 0x100 +#define PCIX0_MSGIH 0x104 +#define PCIX0_MSGOL 0x108 +#define PCIX0_MSGOH 0x10c +#define PCIX0_IM 0x1f8 + +/* + * 4xx PCI bridge register definitions + */ +#define PCIL0_PMM0LA 0x00 +#define PCIL0_PMM0MA 0x04 +#define PCIL0_PMM0PCILA 0x08 +#define PCIL0_PMM0PCIHA 0x0c +#define PCIL0_PMM1LA 0x10 +#define PCIL0_PMM1MA 0x14 +#define PCIL0_PMM1PCILA 0x18 +#define PCIL0_PMM1PCIHA 0x1c +#define PCIL0_PMM2LA 0x20 +#define PCIL0_PMM2MA 0x24 +#define PCIL0_PMM2PCILA 0x28 +#define PCIL0_PMM2PCIHA 0x2c +#define PCIL0_PTM1MS 0x30 +#define PCIL0_PTM1LA 0x34 +#define PCIL0_PTM2MS 0x38 +#define PCIL0_PTM2LA 0x3c + +/* + * 4xx PCIe bridge register definitions + */ + +/* DCR offsets */ +#define DCRO_PEGPL_CFGBAH 0x00 +#define DCRO_PEGPL_CFGBAL 0x01 +#define DCRO_PEGPL_CFGMSK 0x02 +#define DCRO_PEGPL_MSGBAH 0x03 +#define DCRO_PEGPL_MSGBAL 0x04 +#define DCRO_PEGPL_MSGMSK 0x05 +#define DCRO_PEGPL_OMR1BAH 0x06 +#define DCRO_PEGPL_OMR1BAL 0x07 +#define DCRO_PEGPL_OMR1MSKH 0x08 +#define DCRO_PEGPL_OMR1MSKL 0x09 +#define DCRO_PEGPL_OMR2BAH 0x0a +#define DCRO_PEGPL_OMR2BAL 0x0b +#define DCRO_PEGPL_OMR2MSKH 0x0c +#define DCRO_PEGPL_OMR2MSKL 0x0d +#define DCRO_PEGPL_OMR3BAH 0x0e +#define DCRO_PEGPL_OMR3BAL 0x0f +#define DCRO_PEGPL_OMR3MSKH 0x10 +#define DCRO_PEGPL_OMR3MSKL 0x11 +#define DCRO_PEGPL_REGBAH 0x12 +#define DCRO_PEGPL_REGBAL 0x13 +#define DCRO_PEGPL_REGMSK 0x14 +#define DCRO_PEGPL_SPECIAL 0x15 +#define DCRO_PEGPL_CFG 0x16 +#define DCRO_PEGPL_ESR 0x17 +#define DCRO_PEGPL_EARH 0x18 +#define DCRO_PEGPL_EARL 0x19 +#define DCRO_PEGPL_EATR 0x1a + +/* DMER mask */ +#define GPL_DMER_MASK_DISA 0x02000000 + +/* + * System DCRs (SDRs) + */ +#define PESDR0_PLLLCT1 0x03a0 +#define PESDR0_PLLLCT2 0x03a1 +#define PESDR0_PLLLCT3 0x03a2 + +/* + * 440SPe additional DCRs + */ +#define PESDR0_440SPE_UTLSET1 0x0300 +#define PESDR0_440SPE_UTLSET2 0x0301 +#define PESDR0_440SPE_DLPSET 0x0302 +#define PESDR0_440SPE_LOOP 0x0303 +#define PESDR0_440SPE_RCSSET 0x0304 +#define PESDR0_440SPE_RCSSTS 0x0305 +#define PESDR0_440SPE_HSSL0SET1 0x0306 +#define PESDR0_440SPE_HSSL0SET2 0x0307 +#define PESDR0_440SPE_HSSL0STS 0x0308 +#define PESDR0_440SPE_HSSL1SET1 0x0309 +#define PESDR0_440SPE_HSSL1SET2 0x030a +#define PESDR0_440SPE_HSSL1STS 0x030b +#define PESDR0_440SPE_HSSL2SET1 0x030c +#define PESDR0_440SPE_HSSL2SET2 0x030d +#define PESDR0_440SPE_HSSL2STS 0x030e +#define PESDR0_440SPE_HSSL3SET1 0x030f +#define PESDR0_440SPE_HSSL3SET2 0x0310 +#define PESDR0_440SPE_HSSL3STS 0x0311 +#define PESDR0_440SPE_HSSL4SET1 0x0312 +#define PESDR0_440SPE_HSSL4SET2 0x0313 +#define PESDR0_440SPE_HSSL4STS 0x0314 +#define PESDR0_440SPE_HSSL5SET1 0x0315 +#define PESDR0_440SPE_HSSL5SET2 0x0316 +#define PESDR0_440SPE_HSSL5STS 0x0317 +#define PESDR0_440SPE_HSSL6SET1 0x0318 +#define PESDR0_440SPE_HSSL6SET2 0x0319 +#define PESDR0_440SPE_HSSL6STS 0x031a +#define PESDR0_440SPE_HSSL7SET1 0x031b +#define PESDR0_440SPE_HSSL7SET2 0x031c +#define PESDR0_440SPE_HSSL7STS 0x031d +#define PESDR0_440SPE_HSSCTLSET 0x031e +#define PESDR0_440SPE_LANE_ABCD 0x031f +#define PESDR0_440SPE_LANE_EFGH 0x0320 + +#define PESDR1_440SPE_UTLSET1 0x0340 +#define PESDR1_440SPE_UTLSET2 0x0341 +#define PESDR1_440SPE_DLPSET 0x0342 +#define PESDR1_440SPE_LOOP 0x0343 +#define PESDR1_440SPE_RCSSET 0x0344 +#define PESDR1_440SPE_RCSSTS 0x0345 +#define PESDR1_440SPE_HSSL0SET1 0x0346 +#define PESDR1_440SPE_HSSL0SET2 0x0347 +#define PESDR1_440SPE_HSSL0STS 0x0348 +#define PESDR1_440SPE_HSSL1SET1 0x0349 +#define PESDR1_440SPE_HSSL1SET2 0x034a +#define PESDR1_440SPE_HSSL1STS 0x034b +#define PESDR1_440SPE_HSSL2SET1 0x034c +#define PESDR1_440SPE_HSSL2SET2 0x034d +#define PESDR1_440SPE_HSSL2STS 0x034e +#define PESDR1_440SPE_HSSL3SET1 0x034f +#define PESDR1_440SPE_HSSL3SET2 0x0350 +#define PESDR1_440SPE_HSSL3STS 0x0351 +#define PESDR1_440SPE_HSSCTLSET 0x0352 +#define PESDR1_440SPE_LANE_ABCD 0x0353 + +#define PESDR2_440SPE_UTLSET1 0x0370 +#define PESDR2_440SPE_UTLSET2 0x0371 +#define PESDR2_440SPE_DLPSET 0x0372 +#define PESDR2_440SPE_LOOP 0x0373 +#define PESDR2_440SPE_RCSSET 0x0374 +#define PESDR2_440SPE_RCSSTS 0x0375 +#define PESDR2_440SPE_HSSL0SET1 0x0376 +#define PESDR2_440SPE_HSSL0SET2 0x0377 +#define PESDR2_440SPE_HSSL0STS 0x0378 +#define PESDR2_440SPE_HSSL1SET1 0x0379 +#define PESDR2_440SPE_HSSL1SET2 0x037a +#define PESDR2_440SPE_HSSL1STS 0x037b +#define PESDR2_440SPE_HSSL2SET1 0x037c +#define PESDR2_440SPE_HSSL2SET2 0x037d +#define PESDR2_440SPE_HSSL2STS 0x037e +#define PESDR2_440SPE_HSSL3SET1 0x037f +#define PESDR2_440SPE_HSSL3SET2 0x0380 +#define PESDR2_440SPE_HSSL3STS 0x0381 +#define PESDR2_440SPE_HSSCTLSET 0x0382 +#define PESDR2_440SPE_LANE_ABCD 0x0383 + +/* + * 405EX additional DCRs + */ +#define PESDR0_405EX_UTLSET1 0x0400 +#define PESDR0_405EX_UTLSET2 0x0401 +#define PESDR0_405EX_DLPSET 0x0402 +#define PESDR0_405EX_LOOP 0x0403 +#define PESDR0_405EX_RCSSET 0x0404 +#define PESDR0_405EX_RCSSTS 0x0405 +#define PESDR0_405EX_PHYSET1 0x0406 +#define PESDR0_405EX_PHYSET2 0x0407 +#define PESDR0_405EX_BIST 0x0408 +#define PESDR0_405EX_LPB 0x040B +#define PESDR0_405EX_PHYSTA 0x040C + +#define PESDR1_405EX_UTLSET1 0x0440 +#define PESDR1_405EX_UTLSET2 0x0441 +#define PESDR1_405EX_DLPSET 0x0442 +#define PESDR1_405EX_LOOP 0x0443 +#define PESDR1_405EX_RCSSET 0x0444 +#define PESDR1_405EX_RCSSTS 0x0445 +#define PESDR1_405EX_PHYSET1 0x0446 +#define PESDR1_405EX_PHYSET2 0x0447 +#define PESDR1_405EX_BIST 0x0448 +#define PESDR1_405EX_LPB 0x044B +#define PESDR1_405EX_PHYSTA 0x044C + +/* + * 460EX additional DCRs + */ +#define PESDR0_460EX_L0BIST 0x0308 +#define PESDR0_460EX_L0BISTSTS 0x0309 +#define PESDR0_460EX_L0CDRCTL 0x030A +#define PESDR0_460EX_L0DRV 0x030B +#define PESDR0_460EX_L0REC 0x030C +#define PESDR0_460EX_L0LPB 0x030D +#define PESDR0_460EX_L0CLK 0x030E +#define PESDR0_460EX_PHY_CTL_RST 0x030F +#define PESDR0_460EX_RSTSTA 0x0310 +#define PESDR0_460EX_OBS 0x0311 +#define PESDR0_460EX_L0ERRC 0x0320 + +#define PESDR1_460EX_L0BIST 0x0348 +#define PESDR1_460EX_L1BIST 0x0349 +#define PESDR1_460EX_L2BIST 0x034A +#define PESDR1_460EX_L3BIST 0x034B +#define PESDR1_460EX_L0BISTSTS 0x034C +#define PESDR1_460EX_L1BISTSTS 0x034D +#define PESDR1_460EX_L2BISTSTS 0x034E +#define PESDR1_460EX_L3BISTSTS 0x034F +#define PESDR1_460EX_L0CDRCTL 0x0350 +#define PESDR1_460EX_L1CDRCTL 0x0351 +#define PESDR1_460EX_L2CDRCTL 0x0352 +#define PESDR1_460EX_L3CDRCTL 0x0353 +#define PESDR1_460EX_L0DRV 0x0354 +#define PESDR1_460EX_L1DRV 0x0355 +#define PESDR1_460EX_L2DRV 0x0356 +#define PESDR1_460EX_L3DRV 0x0357 +#define PESDR1_460EX_L0REC 0x0358 +#define PESDR1_460EX_L1REC 0x0359 +#define PESDR1_460EX_L2REC 0x035A +#define PESDR1_460EX_L3REC 0x035B +#define PESDR1_460EX_L0LPB 0x035C +#define PESDR1_460EX_L1LPB 0x035D +#define PESDR1_460EX_L2LPB 0x035E +#define PESDR1_460EX_L3LPB 0x035F +#define PESDR1_460EX_L0CLK 0x0360 +#define PESDR1_460EX_L1CLK 0x0361 +#define PESDR1_460EX_L2CLK 0x0362 +#define PESDR1_460EX_L3CLK 0x0363 +#define PESDR1_460EX_PHY_CTL_RST 0x0364 +#define PESDR1_460EX_RSTSTA 0x0365 +#define PESDR1_460EX_OBS 0x0366 +#define PESDR1_460EX_L0ERRC 0x0368 +#define PESDR1_460EX_L1ERRC 0x0369 +#define PESDR1_460EX_L2ERRC 0x036A +#define PESDR1_460EX_L3ERRC 0x036B +#define PESDR0_460EX_IHS1 0x036C +#define PESDR0_460EX_IHS2 0x036D + +/* + * 460SX additional DCRs + */ +#define PESDRn_460SX_RCEI 0x02 + +#define PESDR0_460SX_HSSL0DAMP 0x320 +#define PESDR0_460SX_HSSL1DAMP 0x321 +#define PESDR0_460SX_HSSL2DAMP 0x322 +#define PESDR0_460SX_HSSL3DAMP 0x323 +#define PESDR0_460SX_HSSL4DAMP 0x324 +#define PESDR0_460SX_HSSL5DAMP 0x325 +#define PESDR0_460SX_HSSL6DAMP 0x326 +#define PESDR0_460SX_HSSL7DAMP 0x327 + +#define PESDR1_460SX_HSSL0DAMP 0x354 +#define PESDR1_460SX_HSSL1DAMP 0x355 +#define PESDR1_460SX_HSSL2DAMP 0x356 +#define PESDR1_460SX_HSSL3DAMP 0x357 + +#define PESDR2_460SX_HSSL0DAMP 0x384 +#define PESDR2_460SX_HSSL1DAMP 0x385 +#define PESDR2_460SX_HSSL2DAMP 0x386 +#define PESDR2_460SX_HSSL3DAMP 0x387 + +#define PESDR0_460SX_HSSL0COEFA 0x328 +#define PESDR0_460SX_HSSL1COEFA 0x329 +#define PESDR0_460SX_HSSL2COEFA 0x32A +#define PESDR0_460SX_HSSL3COEFA 0x32B +#define PESDR0_460SX_HSSL4COEFA 0x32C +#define PESDR0_460SX_HSSL5COEFA 0x32D +#define PESDR0_460SX_HSSL6COEFA 0x32E +#define PESDR0_460SX_HSSL7COEFA 0x32F + +#define PESDR1_460SX_HSSL0COEFA 0x358 +#define PESDR1_460SX_HSSL1COEFA 0x359 +#define PESDR1_460SX_HSSL2COEFA 0x35A +#define PESDR1_460SX_HSSL3COEFA 0x35B + +#define PESDR2_460SX_HSSL0COEFA 0x388 +#define PESDR2_460SX_HSSL1COEFA 0x389 +#define PESDR2_460SX_HSSL2COEFA 0x38A +#define PESDR2_460SX_HSSL3COEFA 0x38B + +#define PESDR0_460SX_HSSL1CALDRV 0x339 +#define PESDR1_460SX_HSSL1CALDRV 0x361 +#define PESDR2_460SX_HSSL1CALDRV 0x391 + +#define PESDR0_460SX_HSSSLEW 0x338 +#define PESDR1_460SX_HSSSLEW 0x360 +#define PESDR2_460SX_HSSSLEW 0x390 + +#define PESDR0_460SX_HSSCTLSET 0x31E +#define PESDR1_460SX_HSSCTLSET 0x352 +#define PESDR2_460SX_HSSCTLSET 0x382 + +#define PESDR0_460SX_RCSSET 0x304 +#define PESDR1_460SX_RCSSET 0x344 +#define PESDR2_460SX_RCSSET 0x374 +/* + * Of the above, some are common offsets from the base + */ +#define PESDRn_UTLSET1 0x00 +#define PESDRn_UTLSET2 0x01 +#define PESDRn_DLPSET 0x02 +#define PESDRn_LOOP 0x03 +#define PESDRn_RCSSET 0x04 +#define PESDRn_RCSSTS 0x05 + +/* 440spe only */ +#define PESDRn_440SPE_HSSL0SET1 0x06 +#define PESDRn_440SPE_HSSL0SET2 0x07 +#define PESDRn_440SPE_HSSL0STS 0x08 +#define PESDRn_440SPE_HSSL1SET1 0x09 +#define PESDRn_440SPE_HSSL1SET2 0x0a +#define PESDRn_440SPE_HSSL1STS 0x0b +#define PESDRn_440SPE_HSSL2SET1 0x0c +#define PESDRn_440SPE_HSSL2SET2 0x0d +#define PESDRn_440SPE_HSSL2STS 0x0e +#define PESDRn_440SPE_HSSL3SET1 0x0f +#define PESDRn_440SPE_HSSL3SET2 0x10 +#define PESDRn_440SPE_HSSL3STS 0x11 + +/* 440spe port 0 only */ +#define PESDRn_440SPE_HSSL4SET1 0x12 +#define PESDRn_440SPE_HSSL4SET2 0x13 +#define PESDRn_440SPE_HSSL4STS 0x14 +#define PESDRn_440SPE_HSSL5SET1 0x15 +#define PESDRn_440SPE_HSSL5SET2 0x16 +#define PESDRn_440SPE_HSSL5STS 0x17 +#define PESDRn_440SPE_HSSL6SET1 0x18 +#define PESDRn_440SPE_HSSL6SET2 0x19 +#define PESDRn_440SPE_HSSL6STS 0x1a +#define PESDRn_440SPE_HSSL7SET1 0x1b +#define PESDRn_440SPE_HSSL7SET2 0x1c +#define PESDRn_440SPE_HSSL7STS 0x1d + +/* 405ex only */ +#define PESDRn_405EX_PHYSET1 0x06 +#define PESDRn_405EX_PHYSET2 0x07 +#define PESDRn_405EX_PHYSTA 0x0c + +/* + * UTL register offsets + */ +#define PEUTL_PBCTL 0x00 +#define PEUTL_PBBSZ 0x20 +#define PEUTL_OPDBSZ 0x68 +#define PEUTL_IPHBSZ 0x70 +#define PEUTL_IPDBSZ 0x78 +#define PEUTL_OUTTR 0x90 +#define PEUTL_INTR 0x98 +#define PEUTL_PCTL 0xa0 +#define PEUTL_RCSTA 0xB0 +#define PEUTL_RCIRQEN 0xb8 + +/* + * Config space register offsets + */ +#define PECFG_ECRTCTL 0x074 + +#define PECFG_BAR0LMPA 0x210 +#define PECFG_BAR0HMPA 0x214 +#define PECFG_BAR1MPA 0x218 +#define PECFG_BAR2LMPA 0x220 +#define PECFG_BAR2HMPA 0x224 + +#define PECFG_PIMEN 0x33c +#define PECFG_PIM0LAL 0x340 +#define PECFG_PIM0LAH 0x344 +#define PECFG_PIM1LAL 0x348 +#define PECFG_PIM1LAH 0x34c +#define PECFG_PIM01SAL 0x350 +#define PECFG_PIM01SAH 0x354 + +#define PECFG_POM0LAL 0x380 +#define PECFG_POM0LAH 0x384 +#define PECFG_POM1LAL 0x388 +#define PECFG_POM1LAH 0x38c +#define PECFG_POM2LAL 0x390 +#define PECFG_POM2LAH 0x394 + +/* SDR Bit Mappings */ +#define PESDRx_RCSSET_HLDPLB 0x10000000 +#define PESDRx_RCSSET_RSTGU 0x01000000 +#define PESDRx_RCSSET_RDY 0x00100000 +#define PESDRx_RCSSET_RSTDL 0x00010000 +#define PESDRx_RCSSET_RSTPYN 0x00001000 + +enum +{ + PTYPE_ENDPOINT = 0x0, + PTYPE_LEGACY_ENDPOINT = 0x1, + PTYPE_ROOT_PORT = 0x4, + + LNKW_X1 = 0x1, + LNKW_X4 = 0x4, + LNKW_X8 = 0x8 +}; + + +#endif /* __PPC4XX_PCI_H__ */ diff --git a/arch/powerpc/sysdev/ppc4xx_soc.c b/arch/powerpc/sysdev/ppc4xx_soc.c new file mode 100644 index 00000000..d3d6ce3c --- /dev/null +++ b/arch/powerpc/sysdev/ppc4xx_soc.c @@ -0,0 +1,221 @@ +/* + * IBM/AMCC PPC4xx SoC setup code + * + * Copyright 2008 DENX Software Engineering, Stefan Roese <sr@denx.de> + * + * L2 cache routines cloned from arch/ppc/syslib/ibm440gx_common.c which is: + * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> + * Copyright (c) 2003 - 2006 Zultys Technologies + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/stddef.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/of_platform.h> + +#include <asm/dcr.h> +#include <asm/dcr-regs.h> +#include <asm/reg.h> + +static u32 dcrbase_l2c; + +/* + * L2-cache + */ + +/* Issue L2C diagnostic command */ +static inline u32 l2c_diag(u32 addr) +{ + mtdcr(dcrbase_l2c + DCRN_L2C0_ADDR, addr); + mtdcr(dcrbase_l2c + DCRN_L2C0_CMD, L2C_CMD_DIAG); + while (!(mfdcr(dcrbase_l2c + DCRN_L2C0_SR) & L2C_SR_CC)) + ; + + return mfdcr(dcrbase_l2c + DCRN_L2C0_DATA); +} + +static irqreturn_t l2c_error_handler(int irq, void *dev) +{ + u32 sr = mfdcr(dcrbase_l2c + DCRN_L2C0_SR); + + if (sr & L2C_SR_CPE) { + /* Read cache trapped address */ + u32 addr = l2c_diag(0x42000000); + printk(KERN_EMERG "L2C: Cache Parity Error, addr[16:26] = 0x%08x\n", + addr); + } + if (sr & L2C_SR_TPE) { + /* Read tag trapped address */ + u32 addr = l2c_diag(0x82000000) >> 16; + printk(KERN_EMERG "L2C: Tag Parity Error, addr[16:26] = 0x%08x\n", + addr); + } + + /* Clear parity errors */ + if (sr & (L2C_SR_CPE | L2C_SR_TPE)){ + mtdcr(dcrbase_l2c + DCRN_L2C0_ADDR, 0); + mtdcr(dcrbase_l2c + DCRN_L2C0_CMD, L2C_CMD_CCP | L2C_CMD_CTE); + } else { + printk(KERN_EMERG "L2C: LRU error\n"); + } + + return IRQ_HANDLED; +} + +static int __init ppc4xx_l2c_probe(void) +{ + struct device_node *np; + u32 r; + unsigned long flags; + int irq; + const u32 *dcrreg; + u32 dcrbase_isram; + int len; + const u32 *prop; + u32 l2_size; + + np = of_find_compatible_node(NULL, NULL, "ibm,l2-cache"); + if (!np) + return 0; + + /* Get l2 cache size */ + prop = of_get_property(np, "cache-size", NULL); + if (prop == NULL) { + printk(KERN_ERR "%s: Can't get cache-size!\n", np->full_name); + of_node_put(np); + return -ENODEV; + } + l2_size = prop[0]; + + /* Map DCRs */ + dcrreg = of_get_property(np, "dcr-reg", &len); + if (!dcrreg || (len != 4 * sizeof(u32))) { + printk(KERN_ERR "%s: Can't get DCR register base !", + np->full_name); + of_node_put(np); + return -ENODEV; + } + dcrbase_isram = dcrreg[0]; + dcrbase_l2c = dcrreg[2]; + + /* Get and map irq number from device tree */ + irq = irq_of_parse_and_map(np, 0); + if (irq == NO_IRQ) { + printk(KERN_ERR "irq_of_parse_and_map failed\n"); + of_node_put(np); + return -ENODEV; + } + + /* Install error handler */ + if (request_irq(irq, l2c_error_handler, IRQF_DISABLED, "L2C", 0) < 0) { + printk(KERN_ERR "Cannot install L2C error handler" + ", cache is not enabled\n"); + of_node_put(np); + return -ENODEV; + } + + local_irq_save(flags); + asm volatile ("sync" ::: "memory"); + + /* Disable SRAM */ + mtdcr(dcrbase_isram + DCRN_SRAM0_DPC, + mfdcr(dcrbase_isram + DCRN_SRAM0_DPC) & ~SRAM_DPC_ENABLE); + mtdcr(dcrbase_isram + DCRN_SRAM0_SB0CR, + mfdcr(dcrbase_isram + DCRN_SRAM0_SB0CR) & ~SRAM_SBCR_BU_MASK); + mtdcr(dcrbase_isram + DCRN_SRAM0_SB1CR, + mfdcr(dcrbase_isram + DCRN_SRAM0_SB1CR) & ~SRAM_SBCR_BU_MASK); + mtdcr(dcrbase_isram + DCRN_SRAM0_SB2CR, + mfdcr(dcrbase_isram + DCRN_SRAM0_SB2CR) & ~SRAM_SBCR_BU_MASK); + mtdcr(dcrbase_isram + DCRN_SRAM0_SB3CR, + mfdcr(dcrbase_isram + DCRN_SRAM0_SB3CR) & ~SRAM_SBCR_BU_MASK); + + /* Enable L2_MODE without ICU/DCU */ + r = mfdcr(dcrbase_l2c + DCRN_L2C0_CFG) & + ~(L2C_CFG_ICU | L2C_CFG_DCU | L2C_CFG_SS_MASK); + r |= L2C_CFG_L2M | L2C_CFG_SS_256; + mtdcr(dcrbase_l2c + DCRN_L2C0_CFG, r); + + mtdcr(dcrbase_l2c + DCRN_L2C0_ADDR, 0); + + /* Hardware Clear Command */ + mtdcr(dcrbase_l2c + DCRN_L2C0_CMD, L2C_CMD_HCC); + while (!(mfdcr(dcrbase_l2c + DCRN_L2C0_SR) & L2C_SR_CC)) + ; + + /* Clear Cache Parity and Tag Errors */ + mtdcr(dcrbase_l2c + DCRN_L2C0_CMD, L2C_CMD_CCP | L2C_CMD_CTE); + + /* Enable 64G snoop region starting at 0 */ + r = mfdcr(dcrbase_l2c + DCRN_L2C0_SNP0) & + ~(L2C_SNP_BA_MASK | L2C_SNP_SSR_MASK); + r |= L2C_SNP_SSR_32G | L2C_SNP_ESR; + mtdcr(dcrbase_l2c + DCRN_L2C0_SNP0, r); + + r = mfdcr(dcrbase_l2c + DCRN_L2C0_SNP1) & + ~(L2C_SNP_BA_MASK | L2C_SNP_SSR_MASK); + r |= 0x80000000 | L2C_SNP_SSR_32G | L2C_SNP_ESR; + mtdcr(dcrbase_l2c + DCRN_L2C0_SNP1, r); + + asm volatile ("sync" ::: "memory"); + + /* Enable ICU/DCU ports */ + r = mfdcr(dcrbase_l2c + DCRN_L2C0_CFG); + r &= ~(L2C_CFG_DCW_MASK | L2C_CFG_PMUX_MASK | L2C_CFG_PMIM + | L2C_CFG_TPEI | L2C_CFG_CPEI | L2C_CFG_NAM | L2C_CFG_NBRM); + r |= L2C_CFG_ICU | L2C_CFG_DCU | L2C_CFG_TPC | L2C_CFG_CPC | L2C_CFG_FRAN + | L2C_CFG_CPIM | L2C_CFG_TPIM | L2C_CFG_LIM | L2C_CFG_SMCM; + + /* Check for 460EX/GT special handling */ + if (of_device_is_compatible(np, "ibm,l2-cache-460ex") || + of_device_is_compatible(np, "ibm,l2-cache-460gt")) + r |= L2C_CFG_RDBW; + + mtdcr(dcrbase_l2c + DCRN_L2C0_CFG, r); + + asm volatile ("sync; isync" ::: "memory"); + local_irq_restore(flags); + + printk(KERN_INFO "%dk L2-cache enabled\n", l2_size >> 10); + + of_node_put(np); + return 0; +} +arch_initcall(ppc4xx_l2c_probe); + +/* + * Apply a system reset. Alternatively a board specific value may be + * provided via the "reset-type" property in the cpu node. + */ +void ppc4xx_reset_system(char *cmd) +{ + struct device_node *np; + u32 reset_type = DBCR0_RST_SYSTEM; + const u32 *prop; + + np = of_find_node_by_type(NULL, "cpu"); + if (np) { + prop = of_get_property(np, "reset-type", NULL); + + /* + * Check if property exists and if it is in range: + * 1 - PPC4xx core reset + * 2 - PPC4xx chip reset + * 3 - PPC4xx system reset (default) + */ + if ((prop) && ((prop[0] >= 1) && (prop[0] <= 3))) + reset_type = prop[0] << 28; + } + + mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | reset_type); + + while (1) + ; /* Just in case the reset doesn't work */ +} diff --git a/arch/powerpc/sysdev/qe_lib/Kconfig b/arch/powerpc/sysdev/qe_lib/Kconfig new file mode 100644 index 00000000..41ac3dfa --- /dev/null +++ b/arch/powerpc/sysdev/qe_lib/Kconfig @@ -0,0 +1,27 @@ +# +# QE Communication options +# + +config UCC_SLOW + bool + default y if SERIAL_QE + help + This option provides qe_lib support to UCC slow + protocols: UART, BISYNC, QMC + +config UCC_FAST + bool + default y if UCC_GETH + help + This option provides qe_lib support to UCC fast + protocols: HDLC, Ethernet, ATM, transparent + +config UCC + bool + default y if UCC_FAST || UCC_SLOW + +config QE_USB + bool + default y if USB_GADGET_FSL_QE + help + QE USB Controller support diff --git a/arch/powerpc/sysdev/qe_lib/Makefile b/arch/powerpc/sysdev/qe_lib/Makefile new file mode 100644 index 00000000..f1855c18 --- /dev/null +++ b/arch/powerpc/sysdev/qe_lib/Makefile @@ -0,0 +1,10 @@ +# +# Makefile for the linux ppc-specific parts of QE +# +obj-$(CONFIG_QUICC_ENGINE)+= qe.o qe_ic.o qe_io.o + +obj-$(CONFIG_UCC) += ucc.o +obj-$(CONFIG_UCC_SLOW) += ucc_slow.o +obj-$(CONFIG_UCC_FAST) += ucc_fast.o +obj-$(CONFIG_QE_USB) += usb.o +obj-$(CONFIG_QE_GPIO) += gpio.o diff --git a/arch/powerpc/sysdev/qe_lib/gpio.c b/arch/powerpc/sysdev/qe_lib/gpio.c new file mode 100644 index 00000000..36bf845d --- /dev/null +++ b/arch/powerpc/sysdev/qe_lib/gpio.c @@ -0,0 +1,342 @@ +/* + * QUICC Engine GPIOs + * + * Copyright (c) MontaVista Software, Inc. 2008. + * + * Author: Anton Vorontsov <avorontsov@ru.mvista.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/spinlock.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_gpio.h> +#include <linux/gpio.h> +#include <linux/slab.h> +#include <asm/qe.h> + +struct qe_gpio_chip { + struct of_mm_gpio_chip mm_gc; + spinlock_t lock; + + unsigned long pin_flags[QE_PIO_PINS]; +#define QE_PIN_REQUESTED 0 + + /* shadowed data register to clear/set bits safely */ + u32 cpdata; + + /* saved_regs used to restore dedicated functions */ + struct qe_pio_regs saved_regs; +}; + +static inline struct qe_gpio_chip * +to_qe_gpio_chip(struct of_mm_gpio_chip *mm_gc) +{ + return container_of(mm_gc, struct qe_gpio_chip, mm_gc); +} + +static void qe_gpio_save_regs(struct of_mm_gpio_chip *mm_gc) +{ + struct qe_gpio_chip *qe_gc = to_qe_gpio_chip(mm_gc); + struct qe_pio_regs __iomem *regs = mm_gc->regs; + + qe_gc->cpdata = in_be32(®s->cpdata); + qe_gc->saved_regs.cpdata = qe_gc->cpdata; + qe_gc->saved_regs.cpdir1 = in_be32(®s->cpdir1); + qe_gc->saved_regs.cpdir2 = in_be32(®s->cpdir2); + qe_gc->saved_regs.cppar1 = in_be32(®s->cppar1); + qe_gc->saved_regs.cppar2 = in_be32(®s->cppar2); + qe_gc->saved_regs.cpodr = in_be32(®s->cpodr); +} + +static int qe_gpio_get(struct gpio_chip *gc, unsigned int gpio) +{ + struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); + struct qe_pio_regs __iomem *regs = mm_gc->regs; + u32 pin_mask = 1 << (QE_PIO_PINS - 1 - gpio); + + return in_be32(®s->cpdata) & pin_mask; +} + +static void qe_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) +{ + struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); + struct qe_gpio_chip *qe_gc = to_qe_gpio_chip(mm_gc); + struct qe_pio_regs __iomem *regs = mm_gc->regs; + unsigned long flags; + u32 pin_mask = 1 << (QE_PIO_PINS - 1 - gpio); + + spin_lock_irqsave(&qe_gc->lock, flags); + + if (val) + qe_gc->cpdata |= pin_mask; + else + qe_gc->cpdata &= ~pin_mask; + + out_be32(®s->cpdata, qe_gc->cpdata); + + spin_unlock_irqrestore(&qe_gc->lock, flags); +} + +static int qe_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio) +{ + struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); + struct qe_gpio_chip *qe_gc = to_qe_gpio_chip(mm_gc); + unsigned long flags; + + spin_lock_irqsave(&qe_gc->lock, flags); + + __par_io_config_pin(mm_gc->regs, gpio, QE_PIO_DIR_IN, 0, 0, 0); + + spin_unlock_irqrestore(&qe_gc->lock, flags); + + return 0; +} + +static int qe_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) +{ + struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); + struct qe_gpio_chip *qe_gc = to_qe_gpio_chip(mm_gc); + unsigned long flags; + + qe_gpio_set(gc, gpio, val); + + spin_lock_irqsave(&qe_gc->lock, flags); + + __par_io_config_pin(mm_gc->regs, gpio, QE_PIO_DIR_OUT, 0, 0, 0); + + spin_unlock_irqrestore(&qe_gc->lock, flags); + + return 0; +} + +struct qe_pin { + /* + * The qe_gpio_chip name is unfortunate, we should change that to + * something like qe_pio_controller. Someday. + */ + struct qe_gpio_chip *controller; + int num; +}; + +/** + * qe_pin_request - Request a QE pin + * @np: device node to get a pin from + * @index: index of a pin in the device tree + * Context: non-atomic + * + * This function return qe_pin so that you could use it with the rest of + * the QE Pin Multiplexing API. + */ +struct qe_pin *qe_pin_request(struct device_node *np, int index) +{ + struct qe_pin *qe_pin; + struct device_node *gpio_np; + struct gpio_chip *gc; + struct of_mm_gpio_chip *mm_gc; + struct qe_gpio_chip *qe_gc; + int err; + int size; + const void *gpio_spec; + const u32 *gpio_cells; + unsigned long flags; + + qe_pin = kzalloc(sizeof(*qe_pin), GFP_KERNEL); + if (!qe_pin) { + pr_debug("%s: can't allocate memory\n", __func__); + return ERR_PTR(-ENOMEM); + } + + err = of_parse_phandles_with_args(np, "gpios", "#gpio-cells", index, + &gpio_np, &gpio_spec); + if (err) { + pr_debug("%s: can't parse gpios property\n", __func__); + goto err0; + } + + if (!of_device_is_compatible(gpio_np, "fsl,mpc8323-qe-pario-bank")) { + pr_debug("%s: tried to get a non-qe pin\n", __func__); + err = -EINVAL; + goto err1; + } + + gc = of_node_to_gpiochip(gpio_np); + if (!gc) { + pr_debug("%s: gpio controller %s isn't registered\n", + np->full_name, gpio_np->full_name); + err = -ENODEV; + goto err1; + } + + gpio_cells = of_get_property(gpio_np, "#gpio-cells", &size); + if (!gpio_cells || size != sizeof(*gpio_cells) || + *gpio_cells != gc->of_gpio_n_cells) { + pr_debug("%s: wrong #gpio-cells for %s\n", + np->full_name, gpio_np->full_name); + err = -EINVAL; + goto err1; + } + + err = gc->of_xlate(gc, np, gpio_spec, NULL); + if (err < 0) + goto err1; + + mm_gc = to_of_mm_gpio_chip(gc); + qe_gc = to_qe_gpio_chip(mm_gc); + + spin_lock_irqsave(&qe_gc->lock, flags); + + if (test_and_set_bit(QE_PIN_REQUESTED, &qe_gc->pin_flags[err]) == 0) { + qe_pin->controller = qe_gc; + qe_pin->num = err; + err = 0; + } else { + err = -EBUSY; + } + + spin_unlock_irqrestore(&qe_gc->lock, flags); + + if (!err) + return qe_pin; +err1: + of_node_put(gpio_np); +err0: + kfree(qe_pin); + pr_debug("%s failed with status %d\n", __func__, err); + return ERR_PTR(err); +} +EXPORT_SYMBOL(qe_pin_request); + +/** + * qe_pin_free - Free a pin + * @qe_pin: pointer to the qe_pin structure + * Context: any + * + * This function frees the qe_pin structure and makes a pin available + * for further qe_pin_request() calls. + */ +void qe_pin_free(struct qe_pin *qe_pin) +{ + struct qe_gpio_chip *qe_gc = qe_pin->controller; + unsigned long flags; + const int pin = qe_pin->num; + + spin_lock_irqsave(&qe_gc->lock, flags); + test_and_clear_bit(QE_PIN_REQUESTED, &qe_gc->pin_flags[pin]); + spin_unlock_irqrestore(&qe_gc->lock, flags); + + kfree(qe_pin); +} +EXPORT_SYMBOL(qe_pin_free); + +/** + * qe_pin_set_dedicated - Revert a pin to a dedicated peripheral function mode + * @qe_pin: pointer to the qe_pin structure + * Context: any + * + * This function resets a pin to a dedicated peripheral function that + * has been set up by the firmware. + */ +void qe_pin_set_dedicated(struct qe_pin *qe_pin) +{ + struct qe_gpio_chip *qe_gc = qe_pin->controller; + struct qe_pio_regs __iomem *regs = qe_gc->mm_gc.regs; + struct qe_pio_regs *sregs = &qe_gc->saved_regs; + int pin = qe_pin->num; + u32 mask1 = 1 << (QE_PIO_PINS - (pin + 1)); + u32 mask2 = 0x3 << (QE_PIO_PINS - (pin % (QE_PIO_PINS / 2) + 1) * 2); + bool second_reg = pin > (QE_PIO_PINS / 2) - 1; + unsigned long flags; + + spin_lock_irqsave(&qe_gc->lock, flags); + + if (second_reg) { + clrsetbits_be32(®s->cpdir2, mask2, sregs->cpdir2 & mask2); + clrsetbits_be32(®s->cppar2, mask2, sregs->cppar2 & mask2); + } else { + clrsetbits_be32(®s->cpdir1, mask2, sregs->cpdir1 & mask2); + clrsetbits_be32(®s->cppar1, mask2, sregs->cppar1 & mask2); + } + + if (sregs->cpdata & mask1) + qe_gc->cpdata |= mask1; + else + qe_gc->cpdata &= ~mask1; + + out_be32(®s->cpdata, qe_gc->cpdata); + clrsetbits_be32(®s->cpodr, mask1, sregs->cpodr & mask1); + + spin_unlock_irqrestore(&qe_gc->lock, flags); +} +EXPORT_SYMBOL(qe_pin_set_dedicated); + +/** + * qe_pin_set_gpio - Set a pin to the GPIO mode + * @qe_pin: pointer to the qe_pin structure + * Context: any + * + * This function sets a pin to the GPIO mode. + */ +void qe_pin_set_gpio(struct qe_pin *qe_pin) +{ + struct qe_gpio_chip *qe_gc = qe_pin->controller; + struct qe_pio_regs __iomem *regs = qe_gc->mm_gc.regs; + unsigned long flags; + + spin_lock_irqsave(&qe_gc->lock, flags); + + /* Let's make it input by default, GPIO API is able to change that. */ + __par_io_config_pin(regs, qe_pin->num, QE_PIO_DIR_IN, 0, 0, 0); + + spin_unlock_irqrestore(&qe_gc->lock, flags); +} +EXPORT_SYMBOL(qe_pin_set_gpio); + +static int __init qe_add_gpiochips(void) +{ + struct device_node *np; + + for_each_compatible_node(np, NULL, "fsl,mpc8323-qe-pario-bank") { + int ret; + struct qe_gpio_chip *qe_gc; + struct of_mm_gpio_chip *mm_gc; + struct gpio_chip *gc; + + qe_gc = kzalloc(sizeof(*qe_gc), GFP_KERNEL); + if (!qe_gc) { + ret = -ENOMEM; + goto err; + } + + spin_lock_init(&qe_gc->lock); + + mm_gc = &qe_gc->mm_gc; + gc = &mm_gc->gc; + + mm_gc->save_regs = qe_gpio_save_regs; + gc->ngpio = QE_PIO_PINS; + gc->direction_input = qe_gpio_dir_in; + gc->direction_output = qe_gpio_dir_out; + gc->get = qe_gpio_get; + gc->set = qe_gpio_set; + + ret = of_mm_gpiochip_add(np, mm_gc); + if (ret) + goto err; + continue; +err: + pr_err("%s: registration failed with status %d\n", + np->full_name, ret); + kfree(qe_gc); + /* try others anyway */ + } + return 0; +} +arch_initcall(qe_add_gpiochips); diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c new file mode 100644 index 00000000..904c6cba --- /dev/null +++ b/arch/powerpc/sysdev/qe_lib/qe.c @@ -0,0 +1,687 @@ +/* + * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved. + * + * Authors: Shlomi Gridish <gridish@freescale.com> + * Li Yang <leoli@freescale.com> + * Based on cpm2_common.c from Dan Malek (dmalek@jlc.net) + * + * Description: + * General Purpose functions for the global management of the + * QUICC Engine (QE). + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#include <linux/errno.h> +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/param.h> +#include <linux/string.h> +#include <linux/spinlock.h> +#include <linux/mm.h> +#include <linux/interrupt.h> +#include <linux/bootmem.h> +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/ioport.h> +#include <linux/crc32.h> +#include <linux/mod_devicetable.h> +#include <linux/of_platform.h> +#include <asm/irq.h> +#include <asm/page.h> +#include <asm/pgtable.h> +#include <asm/immap_qe.h> +#include <asm/qe.h> +#include <asm/prom.h> +#include <asm/rheap.h> + +static void qe_snums_init(void); +static int qe_sdma_init(void); + +static DEFINE_SPINLOCK(qe_lock); +DEFINE_SPINLOCK(cmxgcr_lock); +EXPORT_SYMBOL(cmxgcr_lock); + +/* QE snum state */ +enum qe_snum_state { + QE_SNUM_STATE_USED, + QE_SNUM_STATE_FREE +}; + +/* QE snum */ +struct qe_snum { + u8 num; + enum qe_snum_state state; +}; + +/* We allocate this here because it is used almost exclusively for + * the communication processor devices. + */ +struct qe_immap __iomem *qe_immr; +EXPORT_SYMBOL(qe_immr); + +static struct qe_snum snums[QE_NUM_OF_SNUM]; /* Dynamically allocated SNUMs */ +static unsigned int qe_num_of_snum; + +static phys_addr_t qebase = -1; + +phys_addr_t get_qe_base(void) +{ + struct device_node *qe; + int size; + const u32 *prop; + + if (qebase != -1) + return qebase; + + qe = of_find_compatible_node(NULL, NULL, "fsl,qe"); + if (!qe) { + qe = of_find_node_by_type(NULL, "qe"); + if (!qe) + return qebase; + } + + prop = of_get_property(qe, "reg", &size); + if (prop && size >= sizeof(*prop)) + qebase = of_translate_address(qe, prop); + of_node_put(qe); + + return qebase; +} + +EXPORT_SYMBOL(get_qe_base); + +void qe_reset(void) +{ + if (qe_immr == NULL) + qe_immr = ioremap(get_qe_base(), QE_IMMAP_SIZE); + + qe_snums_init(); + + qe_issue_cmd(QE_RESET, QE_CR_SUBBLOCK_INVALID, + QE_CR_PROTOCOL_UNSPECIFIED, 0); + + /* Reclaim the MURAM memory for our use. */ + qe_muram_init(); + + if (qe_sdma_init()) + panic("sdma init failed!"); +} + +int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input) +{ + unsigned long flags; + u8 mcn_shift = 0, dev_shift = 0; + u32 ret; + + spin_lock_irqsave(&qe_lock, flags); + if (cmd == QE_RESET) { + out_be32(&qe_immr->cp.cecr, (u32) (cmd | QE_CR_FLG)); + } else { + if (cmd == QE_ASSIGN_PAGE) { + /* Here device is the SNUM, not sub-block */ + dev_shift = QE_CR_SNUM_SHIFT; + } else if (cmd == QE_ASSIGN_RISC) { + /* Here device is the SNUM, and mcnProtocol is + * e_QeCmdRiscAssignment value */ + dev_shift = QE_CR_SNUM_SHIFT; + mcn_shift = QE_CR_MCN_RISC_ASSIGN_SHIFT; + } else { + if (device == QE_CR_SUBBLOCK_USB) + mcn_shift = QE_CR_MCN_USB_SHIFT; + else + mcn_shift = QE_CR_MCN_NORMAL_SHIFT; + } + + out_be32(&qe_immr->cp.cecdr, cmd_input); + out_be32(&qe_immr->cp.cecr, + (cmd | QE_CR_FLG | ((u32) device << dev_shift) | (u32) + mcn_protocol << mcn_shift)); + } + + /* wait for the QE_CR_FLG to clear */ + ret = spin_event_timeout((in_be32(&qe_immr->cp.cecr) & QE_CR_FLG) == 0, + 100, 0); + /* On timeout (e.g. failure), the expression will be false (ret == 0), + otherwise it will be true (ret == 1). */ + spin_unlock_irqrestore(&qe_lock, flags); + + return ret == 1; +} +EXPORT_SYMBOL(qe_issue_cmd); + +/* Set a baud rate generator. This needs lots of work. There are + * 16 BRGs, which can be connected to the QE channels or output + * as clocks. The BRGs are in two different block of internal + * memory mapped space. + * The BRG clock is the QE clock divided by 2. + * It was set up long ago during the initial boot phase and is + * is given to us. + * Baud rate clocks are zero-based in the driver code (as that maps + * to port numbers). Documentation uses 1-based numbering. + */ +static unsigned int brg_clk = 0; + +unsigned int qe_get_brg_clk(void) +{ + struct device_node *qe; + int size; + const u32 *prop; + + if (brg_clk) + return brg_clk; + + qe = of_find_compatible_node(NULL, NULL, "fsl,qe"); + if (!qe) { + qe = of_find_node_by_type(NULL, "qe"); + if (!qe) + return brg_clk; + } + + prop = of_get_property(qe, "brg-frequency", &size); + if (prop && size == sizeof(*prop)) + brg_clk = *prop; + + of_node_put(qe); + + return brg_clk; +} +EXPORT_SYMBOL(qe_get_brg_clk); + +/* Program the BRG to the given sampling rate and multiplier + * + * @brg: the BRG, QE_BRG1 - QE_BRG16 + * @rate: the desired sampling rate + * @multiplier: corresponds to the value programmed in GUMR_L[RDCR] or + * GUMR_L[TDCR]. E.g., if this BRG is the RX clock, and GUMR_L[RDCR]=01, + * then 'multiplier' should be 8. + */ +int qe_setbrg(enum qe_clock brg, unsigned int rate, unsigned int multiplier) +{ + u32 divisor, tempval; + u32 div16 = 0; + + if ((brg < QE_BRG1) || (brg > QE_BRG16)) + return -EINVAL; + + divisor = qe_get_brg_clk() / (rate * multiplier); + + if (divisor > QE_BRGC_DIVISOR_MAX + 1) { + div16 = QE_BRGC_DIV16; + divisor /= 16; + } + + /* Errata QE_General4, which affects some MPC832x and MPC836x SOCs, says + that the BRG divisor must be even if you're not using divide-by-16 + mode. */ + if (!div16 && (divisor & 1)) + divisor++; + + tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) | + QE_BRGC_ENABLE | div16; + + out_be32(&qe_immr->brg.brgc[brg - QE_BRG1], tempval); + + return 0; +} +EXPORT_SYMBOL(qe_setbrg); + +/* Convert a string to a QE clock source enum + * + * This function takes a string, typically from a property in the device + * tree, and returns the corresponding "enum qe_clock" value. +*/ +enum qe_clock qe_clock_source(const char *source) +{ + unsigned int i; + + if (strcasecmp(source, "none") == 0) + return QE_CLK_NONE; + + if (strncasecmp(source, "brg", 3) == 0) { + i = simple_strtoul(source + 3, NULL, 10); + if ((i >= 1) && (i <= 16)) + return (QE_BRG1 - 1) + i; + else + return QE_CLK_DUMMY; + } + + if (strncasecmp(source, "clk", 3) == 0) { + i = simple_strtoul(source + 3, NULL, 10); + if ((i >= 1) && (i <= 24)) + return (QE_CLK1 - 1) + i; + else + return QE_CLK_DUMMY; + } + + return QE_CLK_DUMMY; +} +EXPORT_SYMBOL(qe_clock_source); + +/* Initialize SNUMs (thread serial numbers) according to + * QE Module Control chapter, SNUM table + */ +static void qe_snums_init(void) +{ + int i; + static const u8 snum_init[] = { + 0x04, 0x05, 0x0C, 0x0D, 0x14, 0x15, 0x1C, 0x1D, + 0x24, 0x25, 0x2C, 0x2D, 0x34, 0x35, 0x88, 0x89, + 0x98, 0x99, 0xA8, 0xA9, 0xB8, 0xB9, 0xC8, 0xC9, + 0xD8, 0xD9, 0xE8, 0xE9, 0x08, 0x09, 0x18, 0x19, + 0x28, 0x29, 0x38, 0x39, 0x48, 0x49, 0x58, 0x59, + 0x68, 0x69, 0x78, 0x79, 0x80, 0x81, + }; + + qe_num_of_snum = qe_get_num_of_snums(); + + for (i = 0; i < qe_num_of_snum; i++) { + snums[i].num = snum_init[i]; + snums[i].state = QE_SNUM_STATE_FREE; + } +} + +int qe_get_snum(void) +{ + unsigned long flags; + int snum = -EBUSY; + int i; + + spin_lock_irqsave(&qe_lock, flags); + for (i = 0; i < qe_num_of_snum; i++) { + if (snums[i].state == QE_SNUM_STATE_FREE) { + snums[i].state = QE_SNUM_STATE_USED; + snum = snums[i].num; + break; + } + } + spin_unlock_irqrestore(&qe_lock, flags); + + return snum; +} +EXPORT_SYMBOL(qe_get_snum); + +void qe_put_snum(u8 snum) +{ + int i; + + for (i = 0; i < qe_num_of_snum; i++) { + if (snums[i].num == snum) { + snums[i].state = QE_SNUM_STATE_FREE; + break; + } + } +} +EXPORT_SYMBOL(qe_put_snum); + +static int qe_sdma_init(void) +{ + struct sdma __iomem *sdma = &qe_immr->sdma; + static unsigned long sdma_buf_offset = (unsigned long)-ENOMEM; + + if (!sdma) + return -ENODEV; + + /* allocate 2 internal temporary buffers (512 bytes size each) for + * the SDMA */ + if (IS_ERR_VALUE(sdma_buf_offset)) { + sdma_buf_offset = qe_muram_alloc(512 * 2, 4096); + if (IS_ERR_VALUE(sdma_buf_offset)) + return -ENOMEM; + } + + out_be32(&sdma->sdebcr, (u32) sdma_buf_offset & QE_SDEBCR_BA_MASK); + out_be32(&sdma->sdmr, (QE_SDMR_GLB_1_MSK | + (0x1 << QE_SDMR_CEN_SHIFT))); + + return 0; +} + +/* The maximum number of RISCs we support */ +#define MAX_QE_RISC 4 + +/* Firmware information stored here for qe_get_firmware_info() */ +static struct qe_firmware_info qe_firmware_info; + +/* + * Set to 1 if QE firmware has been uploaded, and therefore + * qe_firmware_info contains valid data. + */ +static int qe_firmware_uploaded; + +/* + * Upload a QE microcode + * + * This function is a worker function for qe_upload_firmware(). It does + * the actual uploading of the microcode. + */ +static void qe_upload_microcode(const void *base, + const struct qe_microcode *ucode) +{ + const __be32 *code = base + be32_to_cpu(ucode->code_offset); + unsigned int i; + + if (ucode->major || ucode->minor || ucode->revision) + printk(KERN_INFO "qe-firmware: " + "uploading microcode '%s' version %u.%u.%u\n", + ucode->id, ucode->major, ucode->minor, ucode->revision); + else + printk(KERN_INFO "qe-firmware: " + "uploading microcode '%s'\n", ucode->id); + + /* Use auto-increment */ + out_be32(&qe_immr->iram.iadd, be32_to_cpu(ucode->iram_offset) | + QE_IRAM_IADD_AIE | QE_IRAM_IADD_BADDR); + + for (i = 0; i < be32_to_cpu(ucode->count); i++) + out_be32(&qe_immr->iram.idata, be32_to_cpu(code[i])); +} + +/* + * Upload a microcode to the I-RAM at a specific address. + * + * See Documentation/powerpc/qe-firmware.txt for information on QE microcode + * uploading. + * + * Currently, only version 1 is supported, so the 'version' field must be + * set to 1. + * + * The SOC model and revision are not validated, they are only displayed for + * informational purposes. + * + * 'calc_size' is the calculated size, in bytes, of the firmware structure and + * all of the microcode structures, minus the CRC. + * + * 'length' is the size that the structure says it is, including the CRC. + */ +int qe_upload_firmware(const struct qe_firmware *firmware) +{ + unsigned int i; + unsigned int j; + u32 crc; + size_t calc_size = sizeof(struct qe_firmware); + size_t length; + const struct qe_header *hdr; + + if (!firmware) { + printk(KERN_ERR "qe-firmware: invalid pointer\n"); + return -EINVAL; + } + + hdr = &firmware->header; + length = be32_to_cpu(hdr->length); + + /* Check the magic */ + if ((hdr->magic[0] != 'Q') || (hdr->magic[1] != 'E') || + (hdr->magic[2] != 'F')) { + printk(KERN_ERR "qe-firmware: not a microcode\n"); + return -EPERM; + } + + /* Check the version */ + if (hdr->version != 1) { + printk(KERN_ERR "qe-firmware: unsupported version\n"); + return -EPERM; + } + + /* Validate some of the fields */ + if ((firmware->count < 1) || (firmware->count > MAX_QE_RISC)) { + printk(KERN_ERR "qe-firmware: invalid data\n"); + return -EINVAL; + } + + /* Validate the length and check if there's a CRC */ + calc_size += (firmware->count - 1) * sizeof(struct qe_microcode); + + for (i = 0; i < firmware->count; i++) + /* + * For situations where the second RISC uses the same microcode + * as the first, the 'code_offset' and 'count' fields will be + * zero, so it's okay to add those. + */ + calc_size += sizeof(__be32) * + be32_to_cpu(firmware->microcode[i].count); + + /* Validate the length */ + if (length != calc_size + sizeof(__be32)) { + printk(KERN_ERR "qe-firmware: invalid length\n"); + return -EPERM; + } + + /* Validate the CRC */ + crc = be32_to_cpu(*(__be32 *)((void *)firmware + calc_size)); + if (crc != crc32(0, firmware, calc_size)) { + printk(KERN_ERR "qe-firmware: firmware CRC is invalid\n"); + return -EIO; + } + + /* + * If the microcode calls for it, split the I-RAM. + */ + if (!firmware->split) + setbits16(&qe_immr->cp.cercr, QE_CP_CERCR_CIR); + + if (firmware->soc.model) + printk(KERN_INFO + "qe-firmware: firmware '%s' for %u V%u.%u\n", + firmware->id, be16_to_cpu(firmware->soc.model), + firmware->soc.major, firmware->soc.minor); + else + printk(KERN_INFO "qe-firmware: firmware '%s'\n", + firmware->id); + + /* + * The QE only supports one microcode per RISC, so clear out all the + * saved microcode information and put in the new. + */ + memset(&qe_firmware_info, 0, sizeof(qe_firmware_info)); + strcpy(qe_firmware_info.id, firmware->id); + qe_firmware_info.extended_modes = firmware->extended_modes; + memcpy(qe_firmware_info.vtraps, firmware->vtraps, + sizeof(firmware->vtraps)); + + /* Loop through each microcode. */ + for (i = 0; i < firmware->count; i++) { + const struct qe_microcode *ucode = &firmware->microcode[i]; + + /* Upload a microcode if it's present */ + if (ucode->code_offset) + qe_upload_microcode(firmware, ucode); + + /* Program the traps for this processor */ + for (j = 0; j < 16; j++) { + u32 trap = be32_to_cpu(ucode->traps[j]); + + if (trap) + out_be32(&qe_immr->rsp[i].tibcr[j], trap); + } + + /* Enable traps */ + out_be32(&qe_immr->rsp[i].eccr, be32_to_cpu(ucode->eccr)); + } + + qe_firmware_uploaded = 1; + + return 0; +} +EXPORT_SYMBOL(qe_upload_firmware); + +/* + * Get info on the currently-loaded firmware + * + * This function also checks the device tree to see if the boot loader has + * uploaded a firmware already. + */ +struct qe_firmware_info *qe_get_firmware_info(void) +{ + static int initialized; + struct property *prop; + struct device_node *qe; + struct device_node *fw = NULL; + const char *sprop; + unsigned int i; + + /* + * If we haven't checked yet, and a driver hasn't uploaded a firmware + * yet, then check the device tree for information. + */ + if (qe_firmware_uploaded) + return &qe_firmware_info; + + if (initialized) + return NULL; + + initialized = 1; + + /* + * Newer device trees have an "fsl,qe" compatible property for the QE + * node, but we still need to support older device trees. + */ + qe = of_find_compatible_node(NULL, NULL, "fsl,qe"); + if (!qe) { + qe = of_find_node_by_type(NULL, "qe"); + if (!qe) + return NULL; + } + + /* Find the 'firmware' child node */ + for_each_child_of_node(qe, fw) { + if (strcmp(fw->name, "firmware") == 0) + break; + } + + of_node_put(qe); + + /* Did we find the 'firmware' node? */ + if (!fw) + return NULL; + + qe_firmware_uploaded = 1; + + /* Copy the data into qe_firmware_info*/ + sprop = of_get_property(fw, "id", NULL); + if (sprop) + strncpy(qe_firmware_info.id, sprop, + sizeof(qe_firmware_info.id) - 1); + + prop = of_find_property(fw, "extended-modes", NULL); + if (prop && (prop->length == sizeof(u64))) { + const u64 *iprop = prop->value; + + qe_firmware_info.extended_modes = *iprop; + } + + prop = of_find_property(fw, "virtual-traps", NULL); + if (prop && (prop->length == 32)) { + const u32 *iprop = prop->value; + + for (i = 0; i < ARRAY_SIZE(qe_firmware_info.vtraps); i++) + qe_firmware_info.vtraps[i] = iprop[i]; + } + + of_node_put(fw); + + return &qe_firmware_info; +} +EXPORT_SYMBOL(qe_get_firmware_info); + +unsigned int qe_get_num_of_risc(void) +{ + struct device_node *qe; + int size; + unsigned int num_of_risc = 0; + const u32 *prop; + + qe = of_find_compatible_node(NULL, NULL, "fsl,qe"); + if (!qe) { + /* Older devices trees did not have an "fsl,qe" + * compatible property, so we need to look for + * the QE node by name. + */ + qe = of_find_node_by_type(NULL, "qe"); + if (!qe) + return num_of_risc; + } + + prop = of_get_property(qe, "fsl,qe-num-riscs", &size); + if (prop && size == sizeof(*prop)) + num_of_risc = *prop; + + of_node_put(qe); + + return num_of_risc; +} +EXPORT_SYMBOL(qe_get_num_of_risc); + +unsigned int qe_get_num_of_snums(void) +{ + struct device_node *qe; + int size; + unsigned int num_of_snums; + const u32 *prop; + + num_of_snums = 28; /* The default number of snum for threads is 28 */ + qe = of_find_compatible_node(NULL, NULL, "fsl,qe"); + if (!qe) { + /* Older devices trees did not have an "fsl,qe" + * compatible property, so we need to look for + * the QE node by name. + */ + qe = of_find_node_by_type(NULL, "qe"); + if (!qe) + return num_of_snums; + } + + prop = of_get_property(qe, "fsl,qe-num-snums", &size); + if (prop && size == sizeof(*prop)) { + num_of_snums = *prop; + if ((num_of_snums < 28) || (num_of_snums > QE_NUM_OF_SNUM)) { + /* No QE ever has fewer than 28 SNUMs */ + pr_err("QE: number of snum is invalid\n"); + of_node_put(qe); + return -EINVAL; + } + } + + of_node_put(qe); + + return num_of_snums; +} +EXPORT_SYMBOL(qe_get_num_of_snums); + +#if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC_85xx) +static int qe_resume(struct platform_device *ofdev) +{ + if (!qe_alive_during_sleep()) + qe_reset(); + return 0; +} + +static int qe_probe(struct platform_device *ofdev) +{ + return 0; +} + +static const struct of_device_id qe_ids[] = { + { .compatible = "fsl,qe", }, + { }, +}; + +static struct platform_driver qe_driver = { + .driver = { + .name = "fsl-qe", + .owner = THIS_MODULE, + .of_match_table = qe_ids, + }, + .probe = qe_probe, + .resume = qe_resume, +}; + +static int __init qe_drv_init(void) +{ + return platform_driver_register(&qe_driver); +} +device_initcall(qe_drv_init); +#endif /* defined(CONFIG_SUSPEND) && defined(CONFIG_PPC_85xx) */ diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c new file mode 100644 index 00000000..b2acda07 --- /dev/null +++ b/arch/powerpc/sysdev/qe_lib/qe_ic.c @@ -0,0 +1,515 @@ +/* + * arch/powerpc/sysdev/qe_lib/qe_ic.c + * + * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved. + * + * Author: Li Yang <leoli@freescale.com> + * Based on code from Shlomi Gridish <gridish@freescale.com> + * + * QUICC ENGINE Interrupt Controller + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/reboot.h> +#include <linux/slab.h> +#include <linux/stddef.h> +#include <linux/sched.h> +#include <linux/signal.h> +#include <linux/sysdev.h> +#include <linux/device.h> +#include <linux/bootmem.h> +#include <linux/spinlock.h> +#include <asm/irq.h> +#include <asm/io.h> +#include <asm/prom.h> +#include <asm/qe_ic.h> + +#include "qe_ic.h" + +static DEFINE_RAW_SPINLOCK(qe_ic_lock); + +static struct qe_ic_info qe_ic_info[] = { + [1] = { + .mask = 0x00008000, + .mask_reg = QEIC_CIMR, + .pri_code = 0, + .pri_reg = QEIC_CIPWCC, + }, + [2] = { + .mask = 0x00004000, + .mask_reg = QEIC_CIMR, + .pri_code = 1, + .pri_reg = QEIC_CIPWCC, + }, + [3] = { + .mask = 0x00002000, + .mask_reg = QEIC_CIMR, + .pri_code = 2, + .pri_reg = QEIC_CIPWCC, + }, + [10] = { + .mask = 0x00000040, + .mask_reg = QEIC_CIMR, + .pri_code = 1, + .pri_reg = QEIC_CIPZCC, + }, + [11] = { + .mask = 0x00000020, + .mask_reg = QEIC_CIMR, + .pri_code = 2, + .pri_reg = QEIC_CIPZCC, + }, + [12] = { + .mask = 0x00000010, + .mask_reg = QEIC_CIMR, + .pri_code = 3, + .pri_reg = QEIC_CIPZCC, + }, + [13] = { + .mask = 0x00000008, + .mask_reg = QEIC_CIMR, + .pri_code = 4, + .pri_reg = QEIC_CIPZCC, + }, + [14] = { + .mask = 0x00000004, + .mask_reg = QEIC_CIMR, + .pri_code = 5, + .pri_reg = QEIC_CIPZCC, + }, + [15] = { + .mask = 0x00000002, + .mask_reg = QEIC_CIMR, + .pri_code = 6, + .pri_reg = QEIC_CIPZCC, + }, + [20] = { + .mask = 0x10000000, + .mask_reg = QEIC_CRIMR, + .pri_code = 3, + .pri_reg = QEIC_CIPRTA, + }, + [25] = { + .mask = 0x00800000, + .mask_reg = QEIC_CRIMR, + .pri_code = 0, + .pri_reg = QEIC_CIPRTB, + }, + [26] = { + .mask = 0x00400000, + .mask_reg = QEIC_CRIMR, + .pri_code = 1, + .pri_reg = QEIC_CIPRTB, + }, + [27] = { + .mask = 0x00200000, + .mask_reg = QEIC_CRIMR, + .pri_code = 2, + .pri_reg = QEIC_CIPRTB, + }, + [28] = { + .mask = 0x00100000, + .mask_reg = QEIC_CRIMR, + .pri_code = 3, + .pri_reg = QEIC_CIPRTB, + }, + [32] = { + .mask = 0x80000000, + .mask_reg = QEIC_CIMR, + .pri_code = 0, + .pri_reg = QEIC_CIPXCC, + }, + [33] = { + .mask = 0x40000000, + .mask_reg = QEIC_CIMR, + .pri_code = 1, + .pri_reg = QEIC_CIPXCC, + }, + [34] = { + .mask = 0x20000000, + .mask_reg = QEIC_CIMR, + .pri_code = 2, + .pri_reg = QEIC_CIPXCC, + }, + [35] = { + .mask = 0x10000000, + .mask_reg = QEIC_CIMR, + .pri_code = 3, + .pri_reg = QEIC_CIPXCC, + }, + [36] = { + .mask = 0x08000000, + .mask_reg = QEIC_CIMR, + .pri_code = 4, + .pri_reg = QEIC_CIPXCC, + }, + [40] = { + .mask = 0x00800000, + .mask_reg = QEIC_CIMR, + .pri_code = 0, + .pri_reg = QEIC_CIPYCC, + }, + [41] = { + .mask = 0x00400000, + .mask_reg = QEIC_CIMR, + .pri_code = 1, + .pri_reg = QEIC_CIPYCC, + }, + [42] = { + .mask = 0x00200000, + .mask_reg = QEIC_CIMR, + .pri_code = 2, + .pri_reg = QEIC_CIPYCC, + }, + [43] = { + .mask = 0x00100000, + .mask_reg = QEIC_CIMR, + .pri_code = 3, + .pri_reg = QEIC_CIPYCC, + }, +}; + +static inline u32 qe_ic_read(volatile __be32 __iomem * base, unsigned int reg) +{ + return in_be32(base + (reg >> 2)); +} + +static inline void qe_ic_write(volatile __be32 __iomem * base, unsigned int reg, + u32 value) +{ + out_be32(base + (reg >> 2), value); +} + +static inline struct qe_ic *qe_ic_from_irq(unsigned int virq) +{ + return irq_get_chip_data(virq); +} + +static inline struct qe_ic *qe_ic_from_irq_data(struct irq_data *d) +{ + return irq_data_get_irq_chip_data(d); +} + +static void qe_ic_unmask_irq(struct irq_data *d) +{ + struct qe_ic *qe_ic = qe_ic_from_irq_data(d); + unsigned int src = irqd_to_hwirq(d); + unsigned long flags; + u32 temp; + + raw_spin_lock_irqsave(&qe_ic_lock, flags); + + temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg); + qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg, + temp | qe_ic_info[src].mask); + + raw_spin_unlock_irqrestore(&qe_ic_lock, flags); +} + +static void qe_ic_mask_irq(struct irq_data *d) +{ + struct qe_ic *qe_ic = qe_ic_from_irq_data(d); + unsigned int src = irqd_to_hwirq(d); + unsigned long flags; + u32 temp; + + raw_spin_lock_irqsave(&qe_ic_lock, flags); + + temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg); + qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg, + temp & ~qe_ic_info[src].mask); + + /* Flush the above write before enabling interrupts; otherwise, + * spurious interrupts will sometimes happen. To be 100% sure + * that the write has reached the device before interrupts are + * enabled, the mask register would have to be read back; however, + * this is not required for correctness, only to avoid wasting + * time on a large number of spurious interrupts. In testing, + * a sync reduced the observed spurious interrupts to zero. + */ + mb(); + + raw_spin_unlock_irqrestore(&qe_ic_lock, flags); +} + +static struct irq_chip qe_ic_irq_chip = { + .name = "QEIC", + .irq_unmask = qe_ic_unmask_irq, + .irq_mask = qe_ic_mask_irq, + .irq_mask_ack = qe_ic_mask_irq, +}; + +static int qe_ic_host_match(struct irq_host *h, struct device_node *node) +{ + /* Exact match, unless qe_ic node is NULL */ + return h->of_node == NULL || h->of_node == node; +} + +static int qe_ic_host_map(struct irq_host *h, unsigned int virq, + irq_hw_number_t hw) +{ + struct qe_ic *qe_ic = h->host_data; + struct irq_chip *chip; + + if (qe_ic_info[hw].mask == 0) { + printk(KERN_ERR "Can't map reserved IRQ\n"); + return -EINVAL; + } + /* Default chip */ + chip = &qe_ic->hc_irq; + + irq_set_chip_data(virq, qe_ic); + irq_set_status_flags(virq, IRQ_LEVEL); + + irq_set_chip_and_handler(virq, chip, handle_level_irq); + + return 0; +} + +static int qe_ic_host_xlate(struct irq_host *h, struct device_node *ct, + const u32 * intspec, unsigned int intsize, + irq_hw_number_t * out_hwirq, + unsigned int *out_flags) +{ + *out_hwirq = intspec[0]; + if (intsize > 1) + *out_flags = intspec[1]; + else + *out_flags = IRQ_TYPE_NONE; + return 0; +} + +static struct irq_host_ops qe_ic_host_ops = { + .match = qe_ic_host_match, + .map = qe_ic_host_map, + .xlate = qe_ic_host_xlate, +}; + +/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */ +unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic) +{ + int irq; + + BUG_ON(qe_ic == NULL); + + /* get the interrupt source vector. */ + irq = qe_ic_read(qe_ic->regs, QEIC_CIVEC) >> 26; + + if (irq == 0) + return NO_IRQ; + + return irq_linear_revmap(qe_ic->irqhost, irq); +} + +/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */ +unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic) +{ + int irq; + + BUG_ON(qe_ic == NULL); + + /* get the interrupt source vector. */ + irq = qe_ic_read(qe_ic->regs, QEIC_CHIVEC) >> 26; + + if (irq == 0) + return NO_IRQ; + + return irq_linear_revmap(qe_ic->irqhost, irq); +} + +void __init qe_ic_init(struct device_node *node, unsigned int flags, + void (*low_handler)(unsigned int irq, struct irq_desc *desc), + void (*high_handler)(unsigned int irq, struct irq_desc *desc)) +{ + struct qe_ic *qe_ic; + struct resource res; + u32 temp = 0, ret, high_active = 0; + + ret = of_address_to_resource(node, 0, &res); + if (ret) + return; + + qe_ic = kzalloc(sizeof(*qe_ic), GFP_KERNEL); + if (qe_ic == NULL) + return; + + qe_ic->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR, + NR_QE_IC_INTS, &qe_ic_host_ops, 0); + if (qe_ic->irqhost == NULL) { + kfree(qe_ic); + return; + } + + qe_ic->regs = ioremap(res.start, res.end - res.start + 1); + + qe_ic->irqhost->host_data = qe_ic; + qe_ic->hc_irq = qe_ic_irq_chip; + + qe_ic->virq_high = irq_of_parse_and_map(node, 0); + qe_ic->virq_low = irq_of_parse_and_map(node, 1); + + if (qe_ic->virq_low == NO_IRQ) { + printk(KERN_ERR "Failed to map QE_IC low IRQ\n"); + kfree(qe_ic); + return; + } + + /* default priority scheme is grouped. If spread mode is */ + /* required, configure cicr accordingly. */ + if (flags & QE_IC_SPREADMODE_GRP_W) + temp |= CICR_GWCC; + if (flags & QE_IC_SPREADMODE_GRP_X) + temp |= CICR_GXCC; + if (flags & QE_IC_SPREADMODE_GRP_Y) + temp |= CICR_GYCC; + if (flags & QE_IC_SPREADMODE_GRP_Z) + temp |= CICR_GZCC; + if (flags & QE_IC_SPREADMODE_GRP_RISCA) + temp |= CICR_GRTA; + if (flags & QE_IC_SPREADMODE_GRP_RISCB) + temp |= CICR_GRTB; + + /* choose destination signal for highest priority interrupt */ + if (flags & QE_IC_HIGH_SIGNAL) { + temp |= (SIGNAL_HIGH << CICR_HPIT_SHIFT); + high_active = 1; + } + + qe_ic_write(qe_ic->regs, QEIC_CICR, temp); + + irq_set_handler_data(qe_ic->virq_low, qe_ic); + irq_set_chained_handler(qe_ic->virq_low, low_handler); + + if (qe_ic->virq_high != NO_IRQ && + qe_ic->virq_high != qe_ic->virq_low) { + irq_set_handler_data(qe_ic->virq_high, qe_ic); + irq_set_chained_handler(qe_ic->virq_high, high_handler); + } +} + +void qe_ic_set_highest_priority(unsigned int virq, int high) +{ + struct qe_ic *qe_ic = qe_ic_from_irq(virq); + unsigned int src = virq_to_hw(virq); + u32 temp = 0; + + temp = qe_ic_read(qe_ic->regs, QEIC_CICR); + + temp &= ~CICR_HP_MASK; + temp |= src << CICR_HP_SHIFT; + + temp &= ~CICR_HPIT_MASK; + temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << CICR_HPIT_SHIFT; + + qe_ic_write(qe_ic->regs, QEIC_CICR, temp); +} + +/* Set Priority level within its group, from 1 to 8 */ +int qe_ic_set_priority(unsigned int virq, unsigned int priority) +{ + struct qe_ic *qe_ic = qe_ic_from_irq(virq); + unsigned int src = virq_to_hw(virq); + u32 temp; + + if (priority > 8 || priority == 0) + return -EINVAL; + if (src > 127) + return -EINVAL; + if (qe_ic_info[src].pri_reg == 0) + return -EINVAL; + + temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].pri_reg); + + if (priority < 4) { + temp &= ~(0x7 << (32 - priority * 3)); + temp |= qe_ic_info[src].pri_code << (32 - priority * 3); + } else { + temp &= ~(0x7 << (24 - priority * 3)); + temp |= qe_ic_info[src].pri_code << (24 - priority * 3); + } + + qe_ic_write(qe_ic->regs, qe_ic_info[src].pri_reg, temp); + + return 0; +} + +/* Set a QE priority to use high irq, only priority 1~2 can use high irq */ +int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high) +{ + struct qe_ic *qe_ic = qe_ic_from_irq(virq); + unsigned int src = virq_to_hw(virq); + u32 temp, control_reg = QEIC_CICNR, shift = 0; + + if (priority > 2 || priority == 0) + return -EINVAL; + + switch (qe_ic_info[src].pri_reg) { + case QEIC_CIPZCC: + shift = CICNR_ZCC1T_SHIFT; + break; + case QEIC_CIPWCC: + shift = CICNR_WCC1T_SHIFT; + break; + case QEIC_CIPYCC: + shift = CICNR_YCC1T_SHIFT; + break; + case QEIC_CIPXCC: + shift = CICNR_XCC1T_SHIFT; + break; + case QEIC_CIPRTA: + shift = CRICR_RTA1T_SHIFT; + control_reg = QEIC_CRICR; + break; + case QEIC_CIPRTB: + shift = CRICR_RTB1T_SHIFT; + control_reg = QEIC_CRICR; + break; + default: + return -EINVAL; + } + + shift += (2 - priority) * 2; + temp = qe_ic_read(qe_ic->regs, control_reg); + temp &= ~(SIGNAL_MASK << shift); + temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << shift; + qe_ic_write(qe_ic->regs, control_reg, temp); + + return 0; +} + +static struct sysdev_class qe_ic_sysclass = { + .name = "qe_ic", +}; + +static struct sys_device device_qe_ic = { + .id = 0, + .cls = &qe_ic_sysclass, +}; + +static int __init init_qe_ic_sysfs(void) +{ + int rc; + + printk(KERN_DEBUG "Registering qe_ic with sysfs...\n"); + + rc = sysdev_class_register(&qe_ic_sysclass); + if (rc) { + printk(KERN_ERR "Failed registering qe_ic sys class\n"); + return -ENODEV; + } + rc = sysdev_register(&device_qe_ic); + if (rc) { + printk(KERN_ERR "Failed registering qe_ic sys device\n"); + return -ENODEV; + } + return 0; +} + +subsys_initcall(init_qe_ic_sysfs); diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.h b/arch/powerpc/sysdev/qe_lib/qe_ic.h new file mode 100644 index 00000000..c1361d00 --- /dev/null +++ b/arch/powerpc/sysdev/qe_lib/qe_ic.h @@ -0,0 +1,103 @@ +/* + * arch/powerpc/sysdev/qe_lib/qe_ic.h + * + * QUICC ENGINE Interrupt Controller Header + * + * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved. + * + * Author: Li Yang <leoli@freescale.com> + * Based on code from Shlomi Gridish <gridish@freescale.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#ifndef _POWERPC_SYSDEV_QE_IC_H +#define _POWERPC_SYSDEV_QE_IC_H + +#include <asm/qe_ic.h> + +#define NR_QE_IC_INTS 64 + +/* QE IC registers offset */ +#define QEIC_CICR 0x00 +#define QEIC_CIVEC 0x04 +#define QEIC_CRIPNR 0x08 +#define QEIC_CIPNR 0x0c +#define QEIC_CIPXCC 0x10 +#define QEIC_CIPYCC 0x14 +#define QEIC_CIPWCC 0x18 +#define QEIC_CIPZCC 0x1c +#define QEIC_CIMR 0x20 +#define QEIC_CRIMR 0x24 +#define QEIC_CICNR 0x28 +#define QEIC_CIPRTA 0x30 +#define QEIC_CIPRTB 0x34 +#define QEIC_CRICR 0x3c +#define QEIC_CHIVEC 0x60 + +/* Interrupt priority registers */ +#define CIPCC_SHIFT_PRI0 29 +#define CIPCC_SHIFT_PRI1 26 +#define CIPCC_SHIFT_PRI2 23 +#define CIPCC_SHIFT_PRI3 20 +#define CIPCC_SHIFT_PRI4 13 +#define CIPCC_SHIFT_PRI5 10 +#define CIPCC_SHIFT_PRI6 7 +#define CIPCC_SHIFT_PRI7 4 + +/* CICR priority modes */ +#define CICR_GWCC 0x00040000 +#define CICR_GXCC 0x00020000 +#define CICR_GYCC 0x00010000 +#define CICR_GZCC 0x00080000 +#define CICR_GRTA 0x00200000 +#define CICR_GRTB 0x00400000 +#define CICR_HPIT_SHIFT 8 +#define CICR_HPIT_MASK 0x00000300 +#define CICR_HP_SHIFT 24 +#define CICR_HP_MASK 0x3f000000 + +/* CICNR */ +#define CICNR_WCC1T_SHIFT 20 +#define CICNR_ZCC1T_SHIFT 28 +#define CICNR_YCC1T_SHIFT 12 +#define CICNR_XCC1T_SHIFT 4 + +/* CRICR */ +#define CRICR_RTA1T_SHIFT 20 +#define CRICR_RTB1T_SHIFT 28 + +/* Signal indicator */ +#define SIGNAL_MASK 3 +#define SIGNAL_HIGH 2 +#define SIGNAL_LOW 0 + +struct qe_ic { + /* Control registers offset */ + volatile u32 __iomem *regs; + + /* The remapper for this QEIC */ + struct irq_host *irqhost; + + /* The "linux" controller struct */ + struct irq_chip hc_irq; + + /* VIRQ numbers of QE high/low irqs */ + unsigned int virq_high; + unsigned int virq_low; +}; + +/* + * QE interrupt controller internal structure + */ +struct qe_ic_info { + u32 mask; /* location of this source at the QIMR register. */ + u32 mask_reg; /* Mask register offset */ + u8 pri_code; /* for grouped interrupts sources - the interrupt + code as appears at the group priority register */ + u32 pri_reg; /* Group priority register offset */ +}; + +#endif /* _POWERPC_SYSDEV_QE_IC_H */ diff --git a/arch/powerpc/sysdev/qe_lib/qe_io.c b/arch/powerpc/sysdev/qe_lib/qe_io.c new file mode 100644 index 00000000..77e4934b --- /dev/null +++ b/arch/powerpc/sysdev/qe_lib/qe_io.c @@ -0,0 +1,218 @@ +/* + * arch/powerpc/sysdev/qe_lib/qe_io.c + * + * QE Parallel I/O ports configuration routines + * + * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved. + * + * Author: Li Yang <LeoLi@freescale.com> + * Based on code from Shlomi Gridish <gridish@freescale.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/stddef.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/module.h> +#include <linux/ioport.h> + +#include <asm/io.h> +#include <asm/qe.h> +#include <asm/prom.h> +#include <sysdev/fsl_soc.h> + +#undef DEBUG + +static struct qe_pio_regs __iomem *par_io; +static int num_par_io_ports = 0; + +int par_io_init(struct device_node *np) +{ + struct resource res; + int ret; + const u32 *num_ports; + + /* Map Parallel I/O ports registers */ + ret = of_address_to_resource(np, 0, &res); + if (ret) + return ret; + par_io = ioremap(res.start, res.end - res.start + 1); + + num_ports = of_get_property(np, "num-ports", NULL); + if (num_ports) + num_par_io_ports = *num_ports; + + return 0; +} + +void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin, int dir, + int open_drain, int assignment, int has_irq) +{ + u32 pin_mask1bit; + u32 pin_mask2bits; + u32 new_mask2bits; + u32 tmp_val; + + /* calculate pin location for single and 2 bits information */ + pin_mask1bit = (u32) (1 << (QE_PIO_PINS - (pin + 1))); + + /* Set open drain, if required */ + tmp_val = in_be32(&par_io->cpodr); + if (open_drain) + out_be32(&par_io->cpodr, pin_mask1bit | tmp_val); + else + out_be32(&par_io->cpodr, ~pin_mask1bit & tmp_val); + + /* define direction */ + tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ? + in_be32(&par_io->cpdir2) : + in_be32(&par_io->cpdir1); + + /* get all bits mask for 2 bit per port */ + pin_mask2bits = (u32) (0x3 << (QE_PIO_PINS - + (pin % (QE_PIO_PINS / 2) + 1) * 2)); + + /* Get the final mask we need for the right definition */ + new_mask2bits = (u32) (dir << (QE_PIO_PINS - + (pin % (QE_PIO_PINS / 2) + 1) * 2)); + + /* clear and set 2 bits mask */ + if (pin > (QE_PIO_PINS / 2) - 1) { + out_be32(&par_io->cpdir2, + ~pin_mask2bits & tmp_val); + tmp_val &= ~pin_mask2bits; + out_be32(&par_io->cpdir2, new_mask2bits | tmp_val); + } else { + out_be32(&par_io->cpdir1, + ~pin_mask2bits & tmp_val); + tmp_val &= ~pin_mask2bits; + out_be32(&par_io->cpdir1, new_mask2bits | tmp_val); + } + /* define pin assignment */ + tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ? + in_be32(&par_io->cppar2) : + in_be32(&par_io->cppar1); + + new_mask2bits = (u32) (assignment << (QE_PIO_PINS - + (pin % (QE_PIO_PINS / 2) + 1) * 2)); + /* clear and set 2 bits mask */ + if (pin > (QE_PIO_PINS / 2) - 1) { + out_be32(&par_io->cppar2, + ~pin_mask2bits & tmp_val); + tmp_val &= ~pin_mask2bits; + out_be32(&par_io->cppar2, new_mask2bits | tmp_val); + } else { + out_be32(&par_io->cppar1, + ~pin_mask2bits & tmp_val); + tmp_val &= ~pin_mask2bits; + out_be32(&par_io->cppar1, new_mask2bits | tmp_val); + } +} +EXPORT_SYMBOL(__par_io_config_pin); + +int par_io_config_pin(u8 port, u8 pin, int dir, int open_drain, + int assignment, int has_irq) +{ + if (!par_io || port >= num_par_io_ports) + return -EINVAL; + + __par_io_config_pin(&par_io[port], pin, dir, open_drain, assignment, + has_irq); + return 0; +} +EXPORT_SYMBOL(par_io_config_pin); + +int par_io_data_set(u8 port, u8 pin, u8 val) +{ + u32 pin_mask, tmp_val; + + if (port >= num_par_io_ports) + return -EINVAL; + if (pin >= QE_PIO_PINS) + return -EINVAL; + /* calculate pin location */ + pin_mask = (u32) (1 << (QE_PIO_PINS - 1 - pin)); + + tmp_val = in_be32(&par_io[port].cpdata); + + if (val == 0) /* clear */ + out_be32(&par_io[port].cpdata, ~pin_mask & tmp_val); + else /* set */ + out_be32(&par_io[port].cpdata, pin_mask | tmp_val); + + return 0; +} +EXPORT_SYMBOL(par_io_data_set); + +int par_io_of_config(struct device_node *np) +{ + struct device_node *pio; + const phandle *ph; + int pio_map_len; + const unsigned int *pio_map; + + if (par_io == NULL) { + printk(KERN_ERR "par_io not initialized\n"); + return -1; + } + + ph = of_get_property(np, "pio-handle", NULL); + if (ph == NULL) { + printk(KERN_ERR "pio-handle not available\n"); + return -1; + } + + pio = of_find_node_by_phandle(*ph); + + pio_map = of_get_property(pio, "pio-map", &pio_map_len); + if (pio_map == NULL) { + printk(KERN_ERR "pio-map is not set!\n"); + return -1; + } + pio_map_len /= sizeof(unsigned int); + if ((pio_map_len % 6) != 0) { + printk(KERN_ERR "pio-map format wrong!\n"); + return -1; + } + + while (pio_map_len > 0) { + par_io_config_pin((u8) pio_map[0], (u8) pio_map[1], + (int) pio_map[2], (int) pio_map[3], + (int) pio_map[4], (int) pio_map[5]); + pio_map += 6; + pio_map_len -= 6; + } + of_node_put(pio); + return 0; +} +EXPORT_SYMBOL(par_io_of_config); + +#ifdef DEBUG +static void dump_par_io(void) +{ + unsigned int i; + + printk(KERN_INFO "%s: par_io=%p\n", __func__, par_io); + for (i = 0; i < num_par_io_ports; i++) { + printk(KERN_INFO " cpodr[%u]=%08x\n", i, + in_be32(&par_io[i].cpodr)); + printk(KERN_INFO " cpdata[%u]=%08x\n", i, + in_be32(&par_io[i].cpdata)); + printk(KERN_INFO " cpdir1[%u]=%08x\n", i, + in_be32(&par_io[i].cpdir1)); + printk(KERN_INFO " cpdir2[%u]=%08x\n", i, + in_be32(&par_io[i].cpdir2)); + printk(KERN_INFO " cppar1[%u]=%08x\n", i, + in_be32(&par_io[i].cppar1)); + printk(KERN_INFO " cppar2[%u]=%08x\n", i, + in_be32(&par_io[i].cppar2)); + } + +} +EXPORT_SYMBOL(dump_par_io); +#endif /* DEBUG */ diff --git a/arch/powerpc/sysdev/qe_lib/ucc.c b/arch/powerpc/sysdev/qe_lib/ucc.c new file mode 100644 index 00000000..fa589b21 --- /dev/null +++ b/arch/powerpc/sysdev/qe_lib/ucc.c @@ -0,0 +1,213 @@ +/* + * arch/powerpc/sysdev/qe_lib/ucc.c + * + * QE UCC API Set - UCC specific routines implementations. + * + * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved. + * + * Authors: Shlomi Gridish <gridish@freescale.com> + * Li Yang <leoli@freescale.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/stddef.h> +#include <linux/spinlock.h> +#include <linux/module.h> + +#include <asm/irq.h> +#include <asm/io.h> +#include <asm/immap_qe.h> +#include <asm/qe.h> +#include <asm/ucc.h> + +int ucc_set_qe_mux_mii_mng(unsigned int ucc_num) +{ + unsigned long flags; + + if (ucc_num > UCC_MAX_NUM - 1) + return -EINVAL; + + spin_lock_irqsave(&cmxgcr_lock, flags); + clrsetbits_be32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG, + ucc_num << QE_CMXGCR_MII_ENET_MNG_SHIFT); + spin_unlock_irqrestore(&cmxgcr_lock, flags); + + return 0; +} +EXPORT_SYMBOL(ucc_set_qe_mux_mii_mng); + +/* Configure the UCC to either Slow or Fast. + * + * A given UCC can be figured to support either "slow" devices (e.g. UART) + * or "fast" devices (e.g. Ethernet). + * + * 'ucc_num' is the UCC number, from 0 - 7. + * + * This function also sets the UCC_GUEMR_SET_RESERVED3 bit because that bit + * must always be set to 1. + */ +int ucc_set_type(unsigned int ucc_num, enum ucc_speed_type speed) +{ + u8 __iomem *guemr; + + /* The GUEMR register is at the same location for both slow and fast + devices, so we just use uccX.slow.guemr. */ + switch (ucc_num) { + case 0: guemr = &qe_immr->ucc1.slow.guemr; + break; + case 1: guemr = &qe_immr->ucc2.slow.guemr; + break; + case 2: guemr = &qe_immr->ucc3.slow.guemr; + break; + case 3: guemr = &qe_immr->ucc4.slow.guemr; + break; + case 4: guemr = &qe_immr->ucc5.slow.guemr; + break; + case 5: guemr = &qe_immr->ucc6.slow.guemr; + break; + case 6: guemr = &qe_immr->ucc7.slow.guemr; + break; + case 7: guemr = &qe_immr->ucc8.slow.guemr; + break; + default: + return -EINVAL; + } + + clrsetbits_8(guemr, UCC_GUEMR_MODE_MASK, + UCC_GUEMR_SET_RESERVED3 | speed); + + return 0; +} + +static void get_cmxucr_reg(unsigned int ucc_num, __be32 __iomem **cmxucr, + unsigned int *reg_num, unsigned int *shift) +{ + unsigned int cmx = ((ucc_num & 1) << 1) + (ucc_num > 3); + + *reg_num = cmx + 1; + *cmxucr = &qe_immr->qmx.cmxucr[cmx]; + *shift = 16 - 8 * (ucc_num & 2); +} + +int ucc_mux_set_grant_tsa_bkpt(unsigned int ucc_num, int set, u32 mask) +{ + __be32 __iomem *cmxucr; + unsigned int reg_num; + unsigned int shift; + + /* check if the UCC number is in range. */ + if (ucc_num > UCC_MAX_NUM - 1) + return -EINVAL; + + get_cmxucr_reg(ucc_num, &cmxucr, ®_num, &shift); + + if (set) + setbits32(cmxucr, mask << shift); + else + clrbits32(cmxucr, mask << shift); + + return 0; +} + +int ucc_set_qe_mux_rxtx(unsigned int ucc_num, enum qe_clock clock, + enum comm_dir mode) +{ + __be32 __iomem *cmxucr; + unsigned int reg_num; + unsigned int shift; + u32 clock_bits = 0; + + /* check if the UCC number is in range. */ + if (ucc_num > UCC_MAX_NUM - 1) + return -EINVAL; + + /* The communications direction must be RX or TX */ + if (!((mode == COMM_DIR_RX) || (mode == COMM_DIR_TX))) + return -EINVAL; + + get_cmxucr_reg(ucc_num, &cmxucr, ®_num, &shift); + + switch (reg_num) { + case 1: + switch (clock) { + case QE_BRG1: clock_bits = 1; break; + case QE_BRG2: clock_bits = 2; break; + case QE_BRG7: clock_bits = 3; break; + case QE_BRG8: clock_bits = 4; break; + case QE_CLK9: clock_bits = 5; break; + case QE_CLK10: clock_bits = 6; break; + case QE_CLK11: clock_bits = 7; break; + case QE_CLK12: clock_bits = 8; break; + case QE_CLK15: clock_bits = 9; break; + case QE_CLK16: clock_bits = 10; break; + default: break; + } + break; + case 2: + switch (clock) { + case QE_BRG5: clock_bits = 1; break; + case QE_BRG6: clock_bits = 2; break; + case QE_BRG7: clock_bits = 3; break; + case QE_BRG8: clock_bits = 4; break; + case QE_CLK13: clock_bits = 5; break; + case QE_CLK14: clock_bits = 6; break; + case QE_CLK19: clock_bits = 7; break; + case QE_CLK20: clock_bits = 8; break; + case QE_CLK15: clock_bits = 9; break; + case QE_CLK16: clock_bits = 10; break; + default: break; + } + break; + case 3: + switch (clock) { + case QE_BRG9: clock_bits = 1; break; + case QE_BRG10: clock_bits = 2; break; + case QE_BRG15: clock_bits = 3; break; + case QE_BRG16: clock_bits = 4; break; + case QE_CLK3: clock_bits = 5; break; + case QE_CLK4: clock_bits = 6; break; + case QE_CLK17: clock_bits = 7; break; + case QE_CLK18: clock_bits = 8; break; + case QE_CLK7: clock_bits = 9; break; + case QE_CLK8: clock_bits = 10; break; + case QE_CLK16: clock_bits = 11; break; + default: break; + } + break; + case 4: + switch (clock) { + case QE_BRG13: clock_bits = 1; break; + case QE_BRG14: clock_bits = 2; break; + case QE_BRG15: clock_bits = 3; break; + case QE_BRG16: clock_bits = 4; break; + case QE_CLK5: clock_bits = 5; break; + case QE_CLK6: clock_bits = 6; break; + case QE_CLK21: clock_bits = 7; break; + case QE_CLK22: clock_bits = 8; break; + case QE_CLK7: clock_bits = 9; break; + case QE_CLK8: clock_bits = 10; break; + case QE_CLK16: clock_bits = 11; break; + default: break; + } + break; + default: break; + } + + /* Check for invalid combination of clock and UCC number */ + if (!clock_bits) + return -ENOENT; + + if (mode == COMM_DIR_RX) + shift += 4; + + clrsetbits_be32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift, + clock_bits << shift); + + return 0; +} diff --git a/arch/powerpc/sysdev/qe_lib/ucc_fast.c b/arch/powerpc/sysdev/qe_lib/ucc_fast.c new file mode 100644 index 00000000..25fbbfaa --- /dev/null +++ b/arch/powerpc/sysdev/qe_lib/ucc_fast.c @@ -0,0 +1,364 @@ +/* + * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved. + * + * Authors: Shlomi Gridish <gridish@freescale.com> + * Li Yang <leoli@freescale.com> + * + * Description: + * QE UCC Fast API Set - UCC Fast specific routines implementations. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/slab.h> +#include <linux/stddef.h> +#include <linux/interrupt.h> +#include <linux/err.h> +#include <linux/module.h> + +#include <asm/io.h> +#include <asm/immap_qe.h> +#include <asm/qe.h> + +#include <asm/ucc.h> +#include <asm/ucc_fast.h> + +void ucc_fast_dump_regs(struct ucc_fast_private * uccf) +{ + printk(KERN_INFO "UCC%u Fast registers:\n", uccf->uf_info->ucc_num); + printk(KERN_INFO "Base address: 0x%p\n", uccf->uf_regs); + + printk(KERN_INFO "gumr : addr=0x%p, val=0x%08x\n", + &uccf->uf_regs->gumr, in_be32(&uccf->uf_regs->gumr)); + printk(KERN_INFO "upsmr : addr=0x%p, val=0x%08x\n", + &uccf->uf_regs->upsmr, in_be32(&uccf->uf_regs->upsmr)); + printk(KERN_INFO "utodr : addr=0x%p, val=0x%04x\n", + &uccf->uf_regs->utodr, in_be16(&uccf->uf_regs->utodr)); + printk(KERN_INFO "udsr : addr=0x%p, val=0x%04x\n", + &uccf->uf_regs->udsr, in_be16(&uccf->uf_regs->udsr)); + printk(KERN_INFO "ucce : addr=0x%p, val=0x%08x\n", + &uccf->uf_regs->ucce, in_be32(&uccf->uf_regs->ucce)); + printk(KERN_INFO "uccm : addr=0x%p, val=0x%08x\n", + &uccf->uf_regs->uccm, in_be32(&uccf->uf_regs->uccm)); + printk(KERN_INFO "uccs : addr=0x%p, val=0x%02x\n", + &uccf->uf_regs->uccs, in_8(&uccf->uf_regs->uccs)); + printk(KERN_INFO "urfb : addr=0x%p, val=0x%08x\n", + &uccf->uf_regs->urfb, in_be32(&uccf->uf_regs->urfb)); + printk(KERN_INFO "urfs : addr=0x%p, val=0x%04x\n", + &uccf->uf_regs->urfs, in_be16(&uccf->uf_regs->urfs)); + printk(KERN_INFO "urfet : addr=0x%p, val=0x%04x\n", + &uccf->uf_regs->urfet, in_be16(&uccf->uf_regs->urfet)); + printk(KERN_INFO "urfset: addr=0x%p, val=0x%04x\n", + &uccf->uf_regs->urfset, in_be16(&uccf->uf_regs->urfset)); + printk(KERN_INFO "utfb : addr=0x%p, val=0x%08x\n", + &uccf->uf_regs->utfb, in_be32(&uccf->uf_regs->utfb)); + printk(KERN_INFO "utfs : addr=0x%p, val=0x%04x\n", + &uccf->uf_regs->utfs, in_be16(&uccf->uf_regs->utfs)); + printk(KERN_INFO "utfet : addr=0x%p, val=0x%04x\n", + &uccf->uf_regs->utfet, in_be16(&uccf->uf_regs->utfet)); + printk(KERN_INFO "utftt : addr=0x%p, val=0x%04x\n", + &uccf->uf_regs->utftt, in_be16(&uccf->uf_regs->utftt)); + printk(KERN_INFO "utpt : addr=0x%p, val=0x%04x\n", + &uccf->uf_regs->utpt, in_be16(&uccf->uf_regs->utpt)); + printk(KERN_INFO "urtry : addr=0x%p, val=0x%08x\n", + &uccf->uf_regs->urtry, in_be32(&uccf->uf_regs->urtry)); + printk(KERN_INFO "guemr : addr=0x%p, val=0x%02x\n", + &uccf->uf_regs->guemr, in_8(&uccf->uf_regs->guemr)); +} +EXPORT_SYMBOL(ucc_fast_dump_regs); + +u32 ucc_fast_get_qe_cr_subblock(int uccf_num) +{ + switch (uccf_num) { + case 0: return QE_CR_SUBBLOCK_UCCFAST1; + case 1: return QE_CR_SUBBLOCK_UCCFAST2; + case 2: return QE_CR_SUBBLOCK_UCCFAST3; + case 3: return QE_CR_SUBBLOCK_UCCFAST4; + case 4: return QE_CR_SUBBLOCK_UCCFAST5; + case 5: return QE_CR_SUBBLOCK_UCCFAST6; + case 6: return QE_CR_SUBBLOCK_UCCFAST7; + case 7: return QE_CR_SUBBLOCK_UCCFAST8; + default: return QE_CR_SUBBLOCK_INVALID; + } +} +EXPORT_SYMBOL(ucc_fast_get_qe_cr_subblock); + +void ucc_fast_transmit_on_demand(struct ucc_fast_private * uccf) +{ + out_be16(&uccf->uf_regs->utodr, UCC_FAST_TOD); +} +EXPORT_SYMBOL(ucc_fast_transmit_on_demand); + +void ucc_fast_enable(struct ucc_fast_private * uccf, enum comm_dir mode) +{ + struct ucc_fast __iomem *uf_regs; + u32 gumr; + + uf_regs = uccf->uf_regs; + + /* Enable reception and/or transmission on this UCC. */ + gumr = in_be32(&uf_regs->gumr); + if (mode & COMM_DIR_TX) { + gumr |= UCC_FAST_GUMR_ENT; + uccf->enabled_tx = 1; + } + if (mode & COMM_DIR_RX) { + gumr |= UCC_FAST_GUMR_ENR; + uccf->enabled_rx = 1; + } + out_be32(&uf_regs->gumr, gumr); +} +EXPORT_SYMBOL(ucc_fast_enable); + +void ucc_fast_disable(struct ucc_fast_private * uccf, enum comm_dir mode) +{ + struct ucc_fast __iomem *uf_regs; + u32 gumr; + + uf_regs = uccf->uf_regs; + + /* Disable reception and/or transmission on this UCC. */ + gumr = in_be32(&uf_regs->gumr); + if (mode & COMM_DIR_TX) { + gumr &= ~UCC_FAST_GUMR_ENT; + uccf->enabled_tx = 0; + } + if (mode & COMM_DIR_RX) { + gumr &= ~UCC_FAST_GUMR_ENR; + uccf->enabled_rx = 0; + } + out_be32(&uf_regs->gumr, gumr); +} +EXPORT_SYMBOL(ucc_fast_disable); + +int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** uccf_ret) +{ + struct ucc_fast_private *uccf; + struct ucc_fast __iomem *uf_regs; + u32 gumr; + int ret; + + if (!uf_info) + return -EINVAL; + + /* check if the UCC port number is in range. */ + if ((uf_info->ucc_num < 0) || (uf_info->ucc_num > UCC_MAX_NUM - 1)) { + printk(KERN_ERR "%s: illegal UCC number\n", __func__); + return -EINVAL; + } + + /* Check that 'max_rx_buf_length' is properly aligned (4). */ + if (uf_info->max_rx_buf_length & (UCC_FAST_MRBLR_ALIGNMENT - 1)) { + printk(KERN_ERR "%s: max_rx_buf_length not aligned\n", + __func__); + return -EINVAL; + } + + /* Validate Virtual Fifo register values */ + if (uf_info->urfs < UCC_FAST_URFS_MIN_VAL) { + printk(KERN_ERR "%s: urfs is too small\n", __func__); + return -EINVAL; + } + + if (uf_info->urfs & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) { + printk(KERN_ERR "%s: urfs is not aligned\n", __func__); + return -EINVAL; + } + + if (uf_info->urfet & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) { + printk(KERN_ERR "%s: urfet is not aligned.\n", __func__); + return -EINVAL; + } + + if (uf_info->urfset & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) { + printk(KERN_ERR "%s: urfset is not aligned\n", __func__); + return -EINVAL; + } + + if (uf_info->utfs & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) { + printk(KERN_ERR "%s: utfs is not aligned\n", __func__); + return -EINVAL; + } + + if (uf_info->utfet & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) { + printk(KERN_ERR "%s: utfet is not aligned\n", __func__); + return -EINVAL; + } + + if (uf_info->utftt & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) { + printk(KERN_ERR "%s: utftt is not aligned\n", __func__); + return -EINVAL; + } + + uccf = kzalloc(sizeof(struct ucc_fast_private), GFP_KERNEL); + if (!uccf) { + printk(KERN_ERR "%s: Cannot allocate private data\n", + __func__); + return -ENOMEM; + } + + /* Fill fast UCC structure */ + uccf->uf_info = uf_info; + /* Set the PHY base address */ + uccf->uf_regs = ioremap(uf_info->regs, sizeof(struct ucc_fast)); + if (uccf->uf_regs == NULL) { + printk(KERN_ERR "%s: Cannot map UCC registers\n", __func__); + kfree(uccf); + return -ENOMEM; + } + + uccf->enabled_tx = 0; + uccf->enabled_rx = 0; + uccf->stopped_tx = 0; + uccf->stopped_rx = 0; + uf_regs = uccf->uf_regs; + uccf->p_ucce = &uf_regs->ucce; + uccf->p_uccm = &uf_regs->uccm; +#ifdef CONFIG_UGETH_TX_ON_DEMAND + uccf->p_utodr = &uf_regs->utodr; +#endif +#ifdef STATISTICS + uccf->tx_frames = 0; + uccf->rx_frames = 0; + uccf->rx_discarded = 0; +#endif /* STATISTICS */ + + /* Set UCC to fast type */ + ret = ucc_set_type(uf_info->ucc_num, UCC_SPEED_TYPE_FAST); + if (ret) { + printk(KERN_ERR "%s: cannot set UCC type\n", __func__); + ucc_fast_free(uccf); + return ret; + } + + uccf->mrblr = uf_info->max_rx_buf_length; + + /* Set GUMR */ + /* For more details see the hardware spec. */ + gumr = uf_info->ttx_trx; + if (uf_info->tci) + gumr |= UCC_FAST_GUMR_TCI; + if (uf_info->cdp) + gumr |= UCC_FAST_GUMR_CDP; + if (uf_info->ctsp) + gumr |= UCC_FAST_GUMR_CTSP; + if (uf_info->cds) + gumr |= UCC_FAST_GUMR_CDS; + if (uf_info->ctss) + gumr |= UCC_FAST_GUMR_CTSS; + if (uf_info->txsy) + gumr |= UCC_FAST_GUMR_TXSY; + if (uf_info->rsyn) + gumr |= UCC_FAST_GUMR_RSYN; + gumr |= uf_info->synl; + if (uf_info->rtsm) + gumr |= UCC_FAST_GUMR_RTSM; + gumr |= uf_info->renc; + if (uf_info->revd) + gumr |= UCC_FAST_GUMR_REVD; + gumr |= uf_info->tenc; + gumr |= uf_info->tcrc; + gumr |= uf_info->mode; + out_be32(&uf_regs->gumr, gumr); + + /* Allocate memory for Tx Virtual Fifo */ + uccf->ucc_fast_tx_virtual_fifo_base_offset = + qe_muram_alloc(uf_info->utfs, UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT); + if (IS_ERR_VALUE(uccf->ucc_fast_tx_virtual_fifo_base_offset)) { + printk(KERN_ERR "%s: cannot allocate MURAM for TX FIFO\n", + __func__); + uccf->ucc_fast_tx_virtual_fifo_base_offset = 0; + ucc_fast_free(uccf); + return -ENOMEM; + } + + /* Allocate memory for Rx Virtual Fifo */ + uccf->ucc_fast_rx_virtual_fifo_base_offset = + qe_muram_alloc(uf_info->urfs + + UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR, + UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT); + if (IS_ERR_VALUE(uccf->ucc_fast_rx_virtual_fifo_base_offset)) { + printk(KERN_ERR "%s: cannot allocate MURAM for RX FIFO\n", + __func__); + uccf->ucc_fast_rx_virtual_fifo_base_offset = 0; + ucc_fast_free(uccf); + return -ENOMEM; + } + + /* Set Virtual Fifo registers */ + out_be16(&uf_regs->urfs, uf_info->urfs); + out_be16(&uf_regs->urfet, uf_info->urfet); + out_be16(&uf_regs->urfset, uf_info->urfset); + out_be16(&uf_regs->utfs, uf_info->utfs); + out_be16(&uf_regs->utfet, uf_info->utfet); + out_be16(&uf_regs->utftt, uf_info->utftt); + /* utfb, urfb are offsets from MURAM base */ + out_be32(&uf_regs->utfb, uccf->ucc_fast_tx_virtual_fifo_base_offset); + out_be32(&uf_regs->urfb, uccf->ucc_fast_rx_virtual_fifo_base_offset); + + /* Mux clocking */ + /* Grant Support */ + ucc_set_qe_mux_grant(uf_info->ucc_num, uf_info->grant_support); + /* Breakpoint Support */ + ucc_set_qe_mux_bkpt(uf_info->ucc_num, uf_info->brkpt_support); + /* Set Tsa or NMSI mode. */ + ucc_set_qe_mux_tsa(uf_info->ucc_num, uf_info->tsa); + /* If NMSI (not Tsa), set Tx and Rx clock. */ + if (!uf_info->tsa) { + /* Rx clock routing */ + if ((uf_info->rx_clock != QE_CLK_NONE) && + ucc_set_qe_mux_rxtx(uf_info->ucc_num, uf_info->rx_clock, + COMM_DIR_RX)) { + printk(KERN_ERR "%s: illegal value for RX clock\n", + __func__); + ucc_fast_free(uccf); + return -EINVAL; + } + /* Tx clock routing */ + if ((uf_info->tx_clock != QE_CLK_NONE) && + ucc_set_qe_mux_rxtx(uf_info->ucc_num, uf_info->tx_clock, + COMM_DIR_TX)) { + printk(KERN_ERR "%s: illegal value for TX clock\n", + __func__); + ucc_fast_free(uccf); + return -EINVAL; + } + } + + /* Set interrupt mask register at UCC level. */ + out_be32(&uf_regs->uccm, uf_info->uccm_mask); + + /* First, clear anything pending at UCC level, + * otherwise, old garbage may come through + * as soon as the dam is opened. */ + + /* Writing '1' clears */ + out_be32(&uf_regs->ucce, 0xffffffff); + + *uccf_ret = uccf; + return 0; +} +EXPORT_SYMBOL(ucc_fast_init); + +void ucc_fast_free(struct ucc_fast_private * uccf) +{ + if (!uccf) + return; + + if (uccf->ucc_fast_tx_virtual_fifo_base_offset) + qe_muram_free(uccf->ucc_fast_tx_virtual_fifo_base_offset); + + if (uccf->ucc_fast_rx_virtual_fifo_base_offset) + qe_muram_free(uccf->ucc_fast_rx_virtual_fifo_base_offset); + + if (uccf->uf_regs) + iounmap(uccf->uf_regs); + + kfree(uccf); +} +EXPORT_SYMBOL(ucc_fast_free); diff --git a/arch/powerpc/sysdev/qe_lib/ucc_slow.c b/arch/powerpc/sysdev/qe_lib/ucc_slow.c new file mode 100644 index 00000000..e1d6a134 --- /dev/null +++ b/arch/powerpc/sysdev/qe_lib/ucc_slow.c @@ -0,0 +1,380 @@ +/* + * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved. + * + * Authors: Shlomi Gridish <gridish@freescale.com> + * Li Yang <leoli@freescale.com> + * + * Description: + * QE UCC Slow API Set - UCC Slow specific routines implementations. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/slab.h> +#include <linux/stddef.h> +#include <linux/interrupt.h> +#include <linux/err.h> +#include <linux/module.h> + +#include <asm/io.h> +#include <asm/immap_qe.h> +#include <asm/qe.h> + +#include <asm/ucc.h> +#include <asm/ucc_slow.h> + +u32 ucc_slow_get_qe_cr_subblock(int uccs_num) +{ + switch (uccs_num) { + case 0: return QE_CR_SUBBLOCK_UCCSLOW1; + case 1: return QE_CR_SUBBLOCK_UCCSLOW2; + case 2: return QE_CR_SUBBLOCK_UCCSLOW3; + case 3: return QE_CR_SUBBLOCK_UCCSLOW4; + case 4: return QE_CR_SUBBLOCK_UCCSLOW5; + case 5: return QE_CR_SUBBLOCK_UCCSLOW6; + case 6: return QE_CR_SUBBLOCK_UCCSLOW7; + case 7: return QE_CR_SUBBLOCK_UCCSLOW8; + default: return QE_CR_SUBBLOCK_INVALID; + } +} +EXPORT_SYMBOL(ucc_slow_get_qe_cr_subblock); + +void ucc_slow_poll_transmitter_now(struct ucc_slow_private * uccs) +{ + out_be16(&uccs->us_regs->utodr, UCC_SLOW_TOD); +} + +void ucc_slow_graceful_stop_tx(struct ucc_slow_private * uccs) +{ + struct ucc_slow_info *us_info = uccs->us_info; + u32 id; + + id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num); + qe_issue_cmd(QE_GRACEFUL_STOP_TX, id, + QE_CR_PROTOCOL_UNSPECIFIED, 0); +} +EXPORT_SYMBOL(ucc_slow_graceful_stop_tx); + +void ucc_slow_stop_tx(struct ucc_slow_private * uccs) +{ + struct ucc_slow_info *us_info = uccs->us_info; + u32 id; + + id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num); + qe_issue_cmd(QE_STOP_TX, id, QE_CR_PROTOCOL_UNSPECIFIED, 0); +} +EXPORT_SYMBOL(ucc_slow_stop_tx); + +void ucc_slow_restart_tx(struct ucc_slow_private * uccs) +{ + struct ucc_slow_info *us_info = uccs->us_info; + u32 id; + + id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num); + qe_issue_cmd(QE_RESTART_TX, id, QE_CR_PROTOCOL_UNSPECIFIED, 0); +} +EXPORT_SYMBOL(ucc_slow_restart_tx); + +void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode) +{ + struct ucc_slow *us_regs; + u32 gumr_l; + + us_regs = uccs->us_regs; + + /* Enable reception and/or transmission on this UCC. */ + gumr_l = in_be32(&us_regs->gumr_l); + if (mode & COMM_DIR_TX) { + gumr_l |= UCC_SLOW_GUMR_L_ENT; + uccs->enabled_tx = 1; + } + if (mode & COMM_DIR_RX) { + gumr_l |= UCC_SLOW_GUMR_L_ENR; + uccs->enabled_rx = 1; + } + out_be32(&us_regs->gumr_l, gumr_l); +} +EXPORT_SYMBOL(ucc_slow_enable); + +void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode) +{ + struct ucc_slow *us_regs; + u32 gumr_l; + + us_regs = uccs->us_regs; + + /* Disable reception and/or transmission on this UCC. */ + gumr_l = in_be32(&us_regs->gumr_l); + if (mode & COMM_DIR_TX) { + gumr_l &= ~UCC_SLOW_GUMR_L_ENT; + uccs->enabled_tx = 0; + } + if (mode & COMM_DIR_RX) { + gumr_l &= ~UCC_SLOW_GUMR_L_ENR; + uccs->enabled_rx = 0; + } + out_be32(&us_regs->gumr_l, gumr_l); +} +EXPORT_SYMBOL(ucc_slow_disable); + +/* Initialize the UCC for Slow operations + * + * The caller should initialize the following us_info + */ +int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** uccs_ret) +{ + struct ucc_slow_private *uccs; + u32 i; + struct ucc_slow __iomem *us_regs; + u32 gumr; + struct qe_bd *bd; + u32 id; + u32 command; + int ret = 0; + + if (!us_info) + return -EINVAL; + + /* check if the UCC port number is in range. */ + if ((us_info->ucc_num < 0) || (us_info->ucc_num > UCC_MAX_NUM - 1)) { + printk(KERN_ERR "%s: illegal UCC number\n", __func__); + return -EINVAL; + } + + /* + * Set mrblr + * Check that 'max_rx_buf_length' is properly aligned (4), unless + * rfw is 1, meaning that QE accepts one byte at a time, unlike normal + * case when QE accepts 32 bits at a time. + */ + if ((!us_info->rfw) && + (us_info->max_rx_buf_length & (UCC_SLOW_MRBLR_ALIGNMENT - 1))) { + printk(KERN_ERR "max_rx_buf_length not aligned.\n"); + return -EINVAL; + } + + uccs = kzalloc(sizeof(struct ucc_slow_private), GFP_KERNEL); + if (!uccs) { + printk(KERN_ERR "%s: Cannot allocate private data\n", + __func__); + return -ENOMEM; + } + + /* Fill slow UCC structure */ + uccs->us_info = us_info; + /* Set the PHY base address */ + uccs->us_regs = ioremap(us_info->regs, sizeof(struct ucc_slow)); + if (uccs->us_regs == NULL) { + printk(KERN_ERR "%s: Cannot map UCC registers\n", __func__); + kfree(uccs); + return -ENOMEM; + } + + uccs->saved_uccm = 0; + uccs->p_rx_frame = 0; + us_regs = uccs->us_regs; + uccs->p_ucce = (u16 *) & (us_regs->ucce); + uccs->p_uccm = (u16 *) & (us_regs->uccm); +#ifdef STATISTICS + uccs->rx_frames = 0; + uccs->tx_frames = 0; + uccs->rx_discarded = 0; +#endif /* STATISTICS */ + + /* Get PRAM base */ + uccs->us_pram_offset = + qe_muram_alloc(UCC_SLOW_PRAM_SIZE, ALIGNMENT_OF_UCC_SLOW_PRAM); + if (IS_ERR_VALUE(uccs->us_pram_offset)) { + printk(KERN_ERR "%s: cannot allocate MURAM for PRAM", __func__); + ucc_slow_free(uccs); + return -ENOMEM; + } + id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num); + qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, id, us_info->protocol, + uccs->us_pram_offset); + + uccs->us_pram = qe_muram_addr(uccs->us_pram_offset); + + /* Set UCC to slow type */ + ret = ucc_set_type(us_info->ucc_num, UCC_SPEED_TYPE_SLOW); + if (ret) { + printk(KERN_ERR "%s: cannot set UCC type", __func__); + ucc_slow_free(uccs); + return ret; + } + + out_be16(&uccs->us_pram->mrblr, us_info->max_rx_buf_length); + + INIT_LIST_HEAD(&uccs->confQ); + + /* Allocate BDs. */ + uccs->rx_base_offset = + qe_muram_alloc(us_info->rx_bd_ring_len * sizeof(struct qe_bd), + QE_ALIGNMENT_OF_BD); + if (IS_ERR_VALUE(uccs->rx_base_offset)) { + printk(KERN_ERR "%s: cannot allocate %u RX BDs\n", __func__, + us_info->rx_bd_ring_len); + uccs->rx_base_offset = 0; + ucc_slow_free(uccs); + return -ENOMEM; + } + + uccs->tx_base_offset = + qe_muram_alloc(us_info->tx_bd_ring_len * sizeof(struct qe_bd), + QE_ALIGNMENT_OF_BD); + if (IS_ERR_VALUE(uccs->tx_base_offset)) { + printk(KERN_ERR "%s: cannot allocate TX BDs", __func__); + uccs->tx_base_offset = 0; + ucc_slow_free(uccs); + return -ENOMEM; + } + + /* Init Tx bds */ + bd = uccs->confBd = uccs->tx_bd = qe_muram_addr(uccs->tx_base_offset); + for (i = 0; i < us_info->tx_bd_ring_len - 1; i++) { + /* clear bd buffer */ + out_be32(&bd->buf, 0); + /* set bd status and length */ + out_be32((u32 *) bd, 0); + bd++; + } + /* for last BD set Wrap bit */ + out_be32(&bd->buf, 0); + out_be32((u32 *) bd, cpu_to_be32(T_W)); + + /* Init Rx bds */ + bd = uccs->rx_bd = qe_muram_addr(uccs->rx_base_offset); + for (i = 0; i < us_info->rx_bd_ring_len - 1; i++) { + /* set bd status and length */ + out_be32((u32*)bd, 0); + /* clear bd buffer */ + out_be32(&bd->buf, 0); + bd++; + } + /* for last BD set Wrap bit */ + out_be32((u32*)bd, cpu_to_be32(R_W)); + out_be32(&bd->buf, 0); + + /* Set GUMR (For more details see the hardware spec.). */ + /* gumr_h */ + gumr = us_info->tcrc; + if (us_info->cdp) + gumr |= UCC_SLOW_GUMR_H_CDP; + if (us_info->ctsp) + gumr |= UCC_SLOW_GUMR_H_CTSP; + if (us_info->cds) + gumr |= UCC_SLOW_GUMR_H_CDS; + if (us_info->ctss) + gumr |= UCC_SLOW_GUMR_H_CTSS; + if (us_info->tfl) + gumr |= UCC_SLOW_GUMR_H_TFL; + if (us_info->rfw) + gumr |= UCC_SLOW_GUMR_H_RFW; + if (us_info->txsy) + gumr |= UCC_SLOW_GUMR_H_TXSY; + if (us_info->rtsm) + gumr |= UCC_SLOW_GUMR_H_RTSM; + out_be32(&us_regs->gumr_h, gumr); + + /* gumr_l */ + gumr = us_info->tdcr | us_info->rdcr | us_info->tenc | us_info->renc | + us_info->diag | us_info->mode; + if (us_info->tci) + gumr |= UCC_SLOW_GUMR_L_TCI; + if (us_info->rinv) + gumr |= UCC_SLOW_GUMR_L_RINV; + if (us_info->tinv) + gumr |= UCC_SLOW_GUMR_L_TINV; + if (us_info->tend) + gumr |= UCC_SLOW_GUMR_L_TEND; + out_be32(&us_regs->gumr_l, gumr); + + /* Function code registers */ + + /* if the data is in cachable memory, the 'global' */ + /* in the function code should be set. */ + uccs->us_pram->tbmr = UCC_BMR_BO_BE; + uccs->us_pram->rbmr = UCC_BMR_BO_BE; + + /* rbase, tbase are offsets from MURAM base */ + out_be16(&uccs->us_pram->rbase, uccs->rx_base_offset); + out_be16(&uccs->us_pram->tbase, uccs->tx_base_offset); + + /* Mux clocking */ + /* Grant Support */ + ucc_set_qe_mux_grant(us_info->ucc_num, us_info->grant_support); + /* Breakpoint Support */ + ucc_set_qe_mux_bkpt(us_info->ucc_num, us_info->brkpt_support); + /* Set Tsa or NMSI mode. */ + ucc_set_qe_mux_tsa(us_info->ucc_num, us_info->tsa); + /* If NMSI (not Tsa), set Tx and Rx clock. */ + if (!us_info->tsa) { + /* Rx clock routing */ + if (ucc_set_qe_mux_rxtx(us_info->ucc_num, us_info->rx_clock, + COMM_DIR_RX)) { + printk(KERN_ERR "%s: illegal value for RX clock\n", + __func__); + ucc_slow_free(uccs); + return -EINVAL; + } + /* Tx clock routing */ + if (ucc_set_qe_mux_rxtx(us_info->ucc_num, us_info->tx_clock, + COMM_DIR_TX)) { + printk(KERN_ERR "%s: illegal value for TX clock\n", + __func__); + ucc_slow_free(uccs); + return -EINVAL; + } + } + + /* Set interrupt mask register at UCC level. */ + out_be16(&us_regs->uccm, us_info->uccm_mask); + + /* First, clear anything pending at UCC level, + * otherwise, old garbage may come through + * as soon as the dam is opened. */ + + /* Writing '1' clears */ + out_be16(&us_regs->ucce, 0xffff); + + /* Issue QE Init command */ + if (us_info->init_tx && us_info->init_rx) + command = QE_INIT_TX_RX; + else if (us_info->init_tx) + command = QE_INIT_TX; + else + command = QE_INIT_RX; /* We know at least one is TRUE */ + + qe_issue_cmd(command, id, us_info->protocol, 0); + + *uccs_ret = uccs; + return 0; +} +EXPORT_SYMBOL(ucc_slow_init); + +void ucc_slow_free(struct ucc_slow_private * uccs) +{ + if (!uccs) + return; + + if (uccs->rx_base_offset) + qe_muram_free(uccs->rx_base_offset); + + if (uccs->tx_base_offset) + qe_muram_free(uccs->tx_base_offset); + + if (uccs->us_pram) + qe_muram_free(uccs->us_pram_offset); + + if (uccs->us_regs) + iounmap(uccs->us_regs); + + kfree(uccs); +} +EXPORT_SYMBOL(ucc_slow_free); + diff --git a/arch/powerpc/sysdev/qe_lib/usb.c b/arch/powerpc/sysdev/qe_lib/usb.c new file mode 100644 index 00000000..81054620 --- /dev/null +++ b/arch/powerpc/sysdev/qe_lib/usb.c @@ -0,0 +1,55 @@ +/* + * QE USB routines + * + * Copyright (c) Freescale Semicondutor, Inc. 2006. + * Shlomi Gridish <gridish@freescale.com> + * Jerry Huang <Chang-Ming.Huang@freescale.com> + * Copyright (c) MontaVista Software, Inc. 2008. + * Anton Vorontsov <avorontsov@ru.mvista.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/io.h> +#include <asm/immap_qe.h> +#include <asm/qe.h> + +int qe_usb_clock_set(enum qe_clock clk, int rate) +{ + struct qe_mux __iomem *mux = &qe_immr->qmx; + unsigned long flags; + u32 val; + + switch (clk) { + case QE_CLK3: val = QE_CMXGCR_USBCS_CLK3; break; + case QE_CLK5: val = QE_CMXGCR_USBCS_CLK5; break; + case QE_CLK7: val = QE_CMXGCR_USBCS_CLK7; break; + case QE_CLK9: val = QE_CMXGCR_USBCS_CLK9; break; + case QE_CLK13: val = QE_CMXGCR_USBCS_CLK13; break; + case QE_CLK17: val = QE_CMXGCR_USBCS_CLK17; break; + case QE_CLK19: val = QE_CMXGCR_USBCS_CLK19; break; + case QE_CLK21: val = QE_CMXGCR_USBCS_CLK21; break; + case QE_BRG9: val = QE_CMXGCR_USBCS_BRG9; break; + case QE_BRG10: val = QE_CMXGCR_USBCS_BRG10; break; + default: + pr_err("%s: requested unknown clock %d\n", __func__, clk); + return -EINVAL; + } + + if (qe_clock_is_brg(clk)) + qe_setbrg(clk, rate, 1); + + spin_lock_irqsave(&cmxgcr_lock, flags); + + clrsetbits_be32(&mux->cmxgcr, QE_CMXGCR_USBCS, val); + + spin_unlock_irqrestore(&cmxgcr_lock, flags); + + return 0; +} +EXPORT_SYMBOL(qe_usb_clock_set); diff --git a/arch/powerpc/sysdev/rtc_cmos_setup.c b/arch/powerpc/sysdev/rtc_cmos_setup.c new file mode 100644 index 00000000..c1879ebf --- /dev/null +++ b/arch/powerpc/sysdev/rtc_cmos_setup.c @@ -0,0 +1,71 @@ +/* + * Setup code for PC-style Real-Time Clock. + * + * Author: Wade Farnsworth <wfarnsworth@mvista.com> + * + * 2007 (c) MontaVista Software, Inc. This file is licensed under + * the terms of the GNU General Public License version 2. This program + * is licensed "as is" without any warranty of any kind, whether express + * or implied. + */ + +#include <linux/platform_device.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/mc146818rtc.h> + +#include <asm/prom.h> + +static int __init add_rtc(void) +{ + struct device_node *np; + struct platform_device *pd; + struct resource res[2]; + unsigned int num_res = 1; + int ret; + + memset(&res, 0, sizeof(res)); + + np = of_find_compatible_node(NULL, NULL, "pnpPNP,b00"); + if (!np) + return -ENODEV; + + ret = of_address_to_resource(np, 0, &res[0]); + of_node_put(np); + if (ret) + return ret; + + /* + * RTC_PORT(x) is hardcoded in asm/mc146818rtc.h. Verify that the + * address provided by the device node matches. + */ + if (res[0].start != RTC_PORT(0)) + return -EINVAL; + + np = of_find_compatible_node(NULL, NULL, "chrp,iic"); + if (!np) + np = of_find_compatible_node(NULL, NULL, "pnpPNP,000"); + if (np) { + of_node_put(np); + /* + * Use a fixed interrupt value of 8 since on PPC if we are + * using this its off an i8259 which we ensure has interrupt + * numbers 0..15. + */ + res[1].start = 8; + res[1].end = 8; + res[1].flags = IORESOURCE_IRQ; + num_res++; + } + + pd = platform_device_register_simple("rtc_cmos", -1, + &res[0], num_res); + + if (IS_ERR(pd)) + return PTR_ERR(pd); + + return 0; +} +fs_initcall(add_rtc); + +MODULE_LICENSE("GPL"); diff --git a/arch/powerpc/sysdev/scom.c b/arch/powerpc/sysdev/scom.c new file mode 100644 index 00000000..b2593ce3 --- /dev/null +++ b/arch/powerpc/sysdev/scom.c @@ -0,0 +1,192 @@ +/* + * Copyright 2010 Benjamin Herrenschmidt, IBM Corp + * <benh@kernel.crashing.org> + * and David Gibson, IBM Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See + * the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/kernel.h> +#include <linux/debugfs.h> +#include <linux/slab.h> +#include <asm/prom.h> +#include <asm/scom.h> + +const struct scom_controller *scom_controller; +EXPORT_SYMBOL_GPL(scom_controller); + +struct device_node *scom_find_parent(struct device_node *node) +{ + struct device_node *par, *tmp; + const u32 *p; + + for (par = of_node_get(node); par;) { + if (of_get_property(par, "scom-controller", NULL)) + break; + p = of_get_property(par, "scom-parent", NULL); + tmp = par; + if (p == NULL) + par = of_get_parent(par); + else + par = of_find_node_by_phandle(*p); + of_node_put(tmp); + } + return par; +} +EXPORT_SYMBOL_GPL(scom_find_parent); + +scom_map_t scom_map_device(struct device_node *dev, int index) +{ + struct device_node *parent; + unsigned int cells, size; + const u32 *prop; + u64 reg, cnt; + scom_map_t ret; + + parent = scom_find_parent(dev); + + if (parent == NULL) + return 0; + + prop = of_get_property(parent, "#scom-cells", NULL); + cells = prop ? *prop : 1; + + prop = of_get_property(dev, "scom-reg", &size); + if (!prop) + return 0; + size >>= 2; + + if (index >= (size / (2*cells))) + return 0; + + reg = of_read_number(&prop[index * cells * 2], cells); + cnt = of_read_number(&prop[index * cells * 2 + cells], cells); + + ret = scom_map(parent, reg, cnt); + of_node_put(parent); + + return ret; +} +EXPORT_SYMBOL_GPL(scom_map_device); + +#ifdef CONFIG_SCOM_DEBUGFS +struct scom_debug_entry { + struct device_node *dn; + unsigned long addr; + scom_map_t map; + spinlock_t lock; + char name[8]; + struct debugfs_blob_wrapper blob; +}; + +static int scom_addr_set(void *data, u64 val) +{ + struct scom_debug_entry *ent = data; + + ent->addr = 0; + scom_unmap(ent->map); + + ent->map = scom_map(ent->dn, val, 1); + if (scom_map_ok(ent->map)) + ent->addr = val; + else + return -EFAULT; + + return 0; +} + +static int scom_addr_get(void *data, u64 *val) +{ + struct scom_debug_entry *ent = data; + *val = ent->addr; + return 0; +} +DEFINE_SIMPLE_ATTRIBUTE(scom_addr_fops, scom_addr_get, scom_addr_set, + "0x%llx\n"); + +static int scom_val_set(void *data, u64 val) +{ + struct scom_debug_entry *ent = data; + + if (!scom_map_ok(ent->map)) + return -EFAULT; + + scom_write(ent->map, 0, val); + + return 0; +} + +static int scom_val_get(void *data, u64 *val) +{ + struct scom_debug_entry *ent = data; + + if (!scom_map_ok(ent->map)) + return -EFAULT; + + *val = scom_read(ent->map, 0); + return 0; +} +DEFINE_SIMPLE_ATTRIBUTE(scom_val_fops, scom_val_get, scom_val_set, + "0x%llx\n"); + +static int scom_debug_init_one(struct dentry *root, struct device_node *dn, + int i) +{ + struct scom_debug_entry *ent; + struct dentry *dir; + + ent = kzalloc(sizeof(*ent), GFP_KERNEL); + if (!ent) + return -ENOMEM; + + ent->dn = of_node_get(dn); + ent->map = SCOM_MAP_INVALID; + spin_lock_init(&ent->lock); + snprintf(ent->name, 8, "scom%d", i); + ent->blob.data = dn->full_name; + ent->blob.size = strlen(dn->full_name); + + dir = debugfs_create_dir(ent->name, root); + if (!dir) { + of_node_put(dn); + kfree(ent); + return -1; + } + + debugfs_create_file("addr", 0600, dir, ent, &scom_addr_fops); + debugfs_create_file("value", 0600, dir, ent, &scom_val_fops); + debugfs_create_blob("path", 0400, dir, &ent->blob); + + return 0; +} + +static int scom_debug_init(void) +{ + struct device_node *dn; + struct dentry *root; + int i, rc; + + root = debugfs_create_dir("scom", powerpc_debugfs_root); + if (!root) + return -1; + + i = rc = 0; + for_each_node_with_property(dn, "scom-controller") + rc |= scom_debug_init_one(root, dn, i++); + + return rc; +} +device_initcall(scom_debug_init); +#endif /* CONFIG_SCOM_DEBUGFS */ diff --git a/arch/powerpc/sysdev/simple_gpio.c b/arch/powerpc/sysdev/simple_gpio.c new file mode 100644 index 00000000..b6defda5 --- /dev/null +++ b/arch/powerpc/sysdev/simple_gpio.c @@ -0,0 +1,153 @@ +/* + * Simple Memory-Mapped GPIOs + * + * Copyright (c) MontaVista Software, Inc. 2008. + * + * Author: Anton Vorontsov <avorontsov@ru.mvista.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/spinlock.h> +#include <linux/types.h> +#include <linux/ioport.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_gpio.h> +#include <linux/gpio.h> +#include <linux/slab.h> +#include <asm/prom.h> +#include "simple_gpio.h" + +struct u8_gpio_chip { + struct of_mm_gpio_chip mm_gc; + spinlock_t lock; + + /* shadowed data register to clear/set bits safely */ + u8 data; +}; + +static struct u8_gpio_chip *to_u8_gpio_chip(struct of_mm_gpio_chip *mm_gc) +{ + return container_of(mm_gc, struct u8_gpio_chip, mm_gc); +} + +static u8 u8_pin2mask(unsigned int pin) +{ + return 1 << (8 - 1 - pin); +} + +static int u8_gpio_get(struct gpio_chip *gc, unsigned int gpio) +{ + struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); + + return in_8(mm_gc->regs) & u8_pin2mask(gpio); +} + +static void u8_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) +{ + struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); + struct u8_gpio_chip *u8_gc = to_u8_gpio_chip(mm_gc); + unsigned long flags; + + spin_lock_irqsave(&u8_gc->lock, flags); + + if (val) + u8_gc->data |= u8_pin2mask(gpio); + else + u8_gc->data &= ~u8_pin2mask(gpio); + + out_8(mm_gc->regs, u8_gc->data); + + spin_unlock_irqrestore(&u8_gc->lock, flags); +} + +static int u8_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio) +{ + return 0; +} + +static int u8_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) +{ + u8_gpio_set(gc, gpio, val); + return 0; +} + +static void u8_gpio_save_regs(struct of_mm_gpio_chip *mm_gc) +{ + struct u8_gpio_chip *u8_gc = to_u8_gpio_chip(mm_gc); + + u8_gc->data = in_8(mm_gc->regs); +} + +static int __init u8_simple_gpiochip_add(struct device_node *np) +{ + int ret; + struct u8_gpio_chip *u8_gc; + struct of_mm_gpio_chip *mm_gc; + struct gpio_chip *gc; + + u8_gc = kzalloc(sizeof(*u8_gc), GFP_KERNEL); + if (!u8_gc) + return -ENOMEM; + + spin_lock_init(&u8_gc->lock); + + mm_gc = &u8_gc->mm_gc; + gc = &mm_gc->gc; + + mm_gc->save_regs = u8_gpio_save_regs; + gc->ngpio = 8; + gc->direction_input = u8_gpio_dir_in; + gc->direction_output = u8_gpio_dir_out; + gc->get = u8_gpio_get; + gc->set = u8_gpio_set; + + ret = of_mm_gpiochip_add(np, mm_gc); + if (ret) + goto err; + return 0; +err: + kfree(u8_gc); + return ret; +} + +void __init simple_gpiochip_init(const char *compatible) +{ + struct device_node *np; + + for_each_compatible_node(np, NULL, compatible) { + int ret; + struct resource r; + + ret = of_address_to_resource(np, 0, &r); + if (ret) + goto err; + + switch (resource_size(&r)) { + case 1: + ret = u8_simple_gpiochip_add(np); + if (ret) + goto err; + break; + default: + /* + * Whenever you need support for GPIO bank width > 1, + * please just turn u8_ code into huge macros, and + * construct needed uX_ code with it. + */ + ret = -ENOSYS; + goto err; + } + continue; +err: + pr_err("%s: registration failed, status %d\n", + np->full_name, ret); + } +} diff --git a/arch/powerpc/sysdev/simple_gpio.h b/arch/powerpc/sysdev/simple_gpio.h new file mode 100644 index 00000000..3a7b0c51 --- /dev/null +++ b/arch/powerpc/sysdev/simple_gpio.h @@ -0,0 +1,12 @@ +#ifndef __SYSDEV_SIMPLE_GPIO_H +#define __SYSDEV_SIMPLE_GPIO_H + +#include <linux/errno.h> + +#ifdef CONFIG_SIMPLE_GPIO +extern void simple_gpiochip_init(const char *compatible); +#else +static inline void simple_gpiochip_init(const char *compatible) {} +#endif /* CONFIG_SIMPLE_GPIO */ + +#endif /* __SYSDEV_SIMPLE_GPIO_H */ diff --git a/arch/powerpc/sysdev/tsi108_dev.c b/arch/powerpc/sysdev/tsi108_dev.c new file mode 100644 index 00000000..ee056807 --- /dev/null +++ b/arch/powerpc/sysdev/tsi108_dev.c @@ -0,0 +1,160 @@ +/* + * tsi108/109 device setup code + * + * Maintained by Roy Zang < tie-fei.zang@freescale.com > + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/stddef.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/major.h> +#include <linux/delay.h> +#include <linux/irq.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/platform_device.h> +#include <linux/of_net.h> +#include <asm/tsi108.h> + +#include <asm/system.h> +#include <asm/atomic.h> +#include <asm/io.h> +#include <asm/irq.h> +#include <asm/prom.h> +#include <mm/mmu_decl.h> + +#undef DEBUG + +#ifdef DEBUG +#define DBG(fmt...) do { printk(fmt); } while(0) +#else +#define DBG(fmt...) do { } while(0) +#endif + +static phys_addr_t tsi108_csr_base = -1; + +phys_addr_t get_csrbase(void) +{ + struct device_node *tsi; + + if (tsi108_csr_base != -1) + return tsi108_csr_base; + + tsi = of_find_node_by_type(NULL, "tsi-bridge"); + if (tsi) { + unsigned int size; + const void *prop = of_get_property(tsi, "reg", &size); + tsi108_csr_base = of_translate_address(tsi, prop); + of_node_put(tsi); + }; + return tsi108_csr_base; +} + +u32 get_vir_csrbase(void) +{ + return (u32) (ioremap(get_csrbase(), 0x10000)); +} + +EXPORT_SYMBOL(get_csrbase); +EXPORT_SYMBOL(get_vir_csrbase); + +static int __init tsi108_eth_of_init(void) +{ + struct device_node *np; + unsigned int i = 0; + struct platform_device *tsi_eth_dev; + struct resource res; + int ret; + + for_each_compatible_node(np, "network", "tsi108-ethernet") { + struct resource r[2]; + struct device_node *phy, *mdio; + hw_info tsi_eth_data; + const unsigned int *phy_id; + const void *mac_addr; + const phandle *ph; + + memset(r, 0, sizeof(r)); + memset(&tsi_eth_data, 0, sizeof(tsi_eth_data)); + + ret = of_address_to_resource(np, 0, &r[0]); + DBG("%s: name:start->end = %s:%pR\n", + __func__, r[0].name, &r[0]); + if (ret) + goto err; + + r[1].name = "tx"; + r[1].start = irq_of_parse_and_map(np, 0); + r[1].end = irq_of_parse_and_map(np, 0); + r[1].flags = IORESOURCE_IRQ; + DBG("%s: name:start->end = %s:%pR\n", + __func__, r[1].name, &r[1]); + + tsi_eth_dev = + platform_device_register_simple("tsi-ethernet", i++, &r[0], + 1); + + if (IS_ERR(tsi_eth_dev)) { + ret = PTR_ERR(tsi_eth_dev); + goto err; + } + + mac_addr = of_get_mac_address(np); + if (mac_addr) + memcpy(tsi_eth_data.mac_addr, mac_addr, 6); + + ph = of_get_property(np, "mdio-handle", NULL); + mdio = of_find_node_by_phandle(*ph); + ret = of_address_to_resource(mdio, 0, &res); + of_node_put(mdio); + if (ret) + goto unreg; + + ph = of_get_property(np, "phy-handle", NULL); + phy = of_find_node_by_phandle(*ph); + + if (phy == NULL) { + ret = -ENODEV; + goto unreg; + } + + phy_id = of_get_property(phy, "reg", NULL); + + tsi_eth_data.regs = r[0].start; + tsi_eth_data.phyregs = res.start; + tsi_eth_data.phy = *phy_id; + tsi_eth_data.irq_num = irq_of_parse_and_map(np, 0); + + /* Some boards with the TSI108 bridge (e.g. Holly) + * have a miswiring of the ethernet PHYs which + * requires a workaround. The special + * "txc-rxc-delay-disable" property enables this + * workaround. FIXME: Need to port the tsi108_eth + * driver itself to phylib and use a non-misleading + * name for the workaround flag - it's not actually to + * do with the model of PHY in use */ + if (of_get_property(phy, "txc-rxc-delay-disable", NULL)) + tsi_eth_data.phy_type = TSI108_PHY_BCM54XX; + of_node_put(phy); + + ret = + platform_device_add_data(tsi_eth_dev, &tsi_eth_data, + sizeof(hw_info)); + if (ret) + goto unreg; + } + return 0; +unreg: + platform_device_unregister(tsi_eth_dev); +err: + of_node_put(np); + return ret; +} + +arch_initcall(tsi108_eth_of_init); diff --git a/arch/powerpc/sysdev/tsi108_pci.c b/arch/powerpc/sysdev/tsi108_pci.c new file mode 100644 index 00000000..4d186581 --- /dev/null +++ b/arch/powerpc/sysdev/tsi108_pci.c @@ -0,0 +1,441 @@ +/* + * Common routines for Tundra Semiconductor TSI108 host bridge. + * + * 2004-2005 (c) Tundra Semiconductor Corp. + * Author: Alex Bounine (alexandreb@tundra.com) + * Author: Roy Zang (tie-fei.zang@freescale.com) + * Add pci interrupt router host + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/irq.h> +#include <linux/interrupt.h> + +#include <asm/byteorder.h> +#include <asm/io.h> +#include <asm/irq.h> +#include <asm/uaccess.h> +#include <asm/machdep.h> +#include <asm/pci-bridge.h> +#include <asm/tsi108.h> +#include <asm/tsi108_pci.h> +#include <asm/tsi108_irq.h> +#include <asm/prom.h> + +#undef DEBUG +#ifdef DEBUG +#define DBG(x...) printk(x) +#else +#define DBG(x...) +#endif + +#define tsi_mk_config_addr(bus, devfunc, offset) \ + ((((bus)<<16) | ((devfunc)<<8) | (offset & 0xfc)) + tsi108_pci_cfg_base) + +u32 tsi108_pci_cfg_base; +static u32 tsi108_pci_cfg_phys; +u32 tsi108_csr_vir_base; +static struct irq_host *pci_irq_host; + +extern u32 get_vir_csrbase(void); +extern u32 tsi108_read_reg(u32 reg_offset); +extern void tsi108_write_reg(u32 reg_offset, u32 val); + +int +tsi108_direct_write_config(struct pci_bus *bus, unsigned int devfunc, + int offset, int len, u32 val) +{ + volatile unsigned char *cfg_addr; + struct pci_controller *hose = pci_bus_to_host(bus); + + if (ppc_md.pci_exclude_device) + if (ppc_md.pci_exclude_device(hose, bus->number, devfunc)) + return PCIBIOS_DEVICE_NOT_FOUND; + + cfg_addr = (unsigned char *)(tsi_mk_config_addr(bus->number, + devfunc, offset) | + (offset & 0x03)); + +#ifdef DEBUG + printk("PCI CFG write : "); + printk("%d:0x%x:0x%x ", bus->number, devfunc, offset); + printk("%d ADDR=0x%08x ", len, (uint) cfg_addr); + printk("data = 0x%08x\n", val); +#endif + + switch (len) { + case 1: + out_8((u8 *) cfg_addr, val); + break; + case 2: + out_le16((u16 *) cfg_addr, val); + break; + default: + out_le32((u32 *) cfg_addr, val); + break; + } + + return PCIBIOS_SUCCESSFUL; +} + +void tsi108_clear_pci_error(u32 pci_cfg_base) +{ + u32 err_stat, err_addr, pci_stat; + + /* + * Quietly clear PB and PCI error flags set as result + * of PCI/X configuration read requests. + */ + + /* Read PB Error Log Registers */ + + err_stat = tsi108_read_reg(TSI108_PB_OFFSET + TSI108_PB_ERRCS); + err_addr = tsi108_read_reg(TSI108_PB_OFFSET + TSI108_PB_AERR); + + if (err_stat & TSI108_PB_ERRCS_ES) { + /* Clear error flag */ + tsi108_write_reg(TSI108_PB_OFFSET + TSI108_PB_ERRCS, + TSI108_PB_ERRCS_ES); + + /* Clear read error reported in PB_ISR */ + tsi108_write_reg(TSI108_PB_OFFSET + TSI108_PB_ISR, + TSI108_PB_ISR_PBS_RD_ERR); + + /* Clear PCI/X bus cfg errors if applicable */ + if ((err_addr & 0xFF000000) == pci_cfg_base) { + pci_stat = + tsi108_read_reg(TSI108_PCI_OFFSET + TSI108_PCI_CSR); + tsi108_write_reg(TSI108_PCI_OFFSET + TSI108_PCI_CSR, + pci_stat); + } + } + + return; +} + +#define __tsi108_read_pci_config(x, addr, op) \ + __asm__ __volatile__( \ + " "op" %0,0,%1\n" \ + "1: eieio\n" \ + "2:\n" \ + ".section .fixup,\"ax\"\n" \ + "3: li %0,-1\n" \ + " b 2b\n" \ + ".section __ex_table,\"a\"\n" \ + " .align 2\n" \ + " .long 1b,3b\n" \ + ".text" \ + : "=r"(x) : "r"(addr)) + +int +tsi108_direct_read_config(struct pci_bus *bus, unsigned int devfn, int offset, + int len, u32 * val) +{ + volatile unsigned char *cfg_addr; + struct pci_controller *hose = pci_bus_to_host(bus); + u32 temp; + + if (ppc_md.pci_exclude_device) + if (ppc_md.pci_exclude_device(hose, bus->number, devfn)) + return PCIBIOS_DEVICE_NOT_FOUND; + + cfg_addr = (unsigned char *)(tsi_mk_config_addr(bus->number, + devfn, + offset) | (offset & + 0x03)); + + switch (len) { + case 1: + __tsi108_read_pci_config(temp, cfg_addr, "lbzx"); + break; + case 2: + __tsi108_read_pci_config(temp, cfg_addr, "lhbrx"); + break; + default: + __tsi108_read_pci_config(temp, cfg_addr, "lwbrx"); + break; + } + + *val = temp; + +#ifdef DEBUG + if ((0xFFFFFFFF != temp) && (0xFFFF != temp) && (0xFF != temp)) { + printk("PCI CFG read : "); + printk("%d:0x%x:0x%x ", bus->number, devfn, offset); + printk("%d ADDR=0x%08x ", len, (uint) cfg_addr); + printk("data = 0x%x\n", *val); + } +#endif + return PCIBIOS_SUCCESSFUL; +} + +void tsi108_clear_pci_cfg_error(void) +{ + tsi108_clear_pci_error(tsi108_pci_cfg_phys); +} + +static struct pci_ops tsi108_direct_pci_ops = { + .read = tsi108_direct_read_config, + .write = tsi108_direct_write_config, +}; + +int __init tsi108_setup_pci(struct device_node *dev, u32 cfg_phys, int primary) +{ + int len; + struct pci_controller *hose; + struct resource rsrc; + const int *bus_range; + int has_address = 0; + + /* PCI Config mapping */ + tsi108_pci_cfg_base = (u32)ioremap(cfg_phys, TSI108_PCI_CFG_SIZE); + tsi108_pci_cfg_phys = cfg_phys; + DBG("TSI_PCI: %s tsi108_pci_cfg_base=0x%x\n", __func__, + tsi108_pci_cfg_base); + + /* Fetch host bridge registers address */ + has_address = (of_address_to_resource(dev, 0, &rsrc) == 0); + + /* Get bus range if any */ + bus_range = of_get_property(dev, "bus-range", &len); + if (bus_range == NULL || len < 2 * sizeof(int)) { + printk(KERN_WARNING "Can't get bus-range for %s, assume" + " bus 0\n", dev->full_name); + } + + hose = pcibios_alloc_controller(dev); + + if (!hose) { + printk("PCI Host bridge init failed\n"); + return -ENOMEM; + } + + hose->first_busno = bus_range ? bus_range[0] : 0; + hose->last_busno = bus_range ? bus_range[1] : 0xff; + + (hose)->ops = &tsi108_direct_pci_ops; + + printk(KERN_INFO "Found tsi108 PCI host bridge at 0x%08x. " + "Firmware bus number: %d->%d\n", + rsrc.start, hose->first_busno, hose->last_busno); + + /* Interpret the "ranges" property */ + /* This also maps the I/O region and sets isa_io/mem_base */ + pci_process_bridge_OF_ranges(hose, dev, primary); + return 0; +} + +/* + * Low level utility functions + */ + +static void tsi108_pci_int_mask(u_int irq) +{ + u_int irp_cfg; + int int_line = (irq - IRQ_PCI_INTAD_BASE); + + irp_cfg = tsi108_read_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_CFG_CTL); + mb(); + irp_cfg |= (1 << int_line); /* INTx_DIR = output */ + irp_cfg &= ~(3 << (8 + (int_line * 2))); /* INTx_TYPE = unused */ + tsi108_write_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_CFG_CTL, irp_cfg); + mb(); + irp_cfg = tsi108_read_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_CFG_CTL); +} + +static void tsi108_pci_int_unmask(u_int irq) +{ + u_int irp_cfg; + int int_line = (irq - IRQ_PCI_INTAD_BASE); + + irp_cfg = tsi108_read_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_CFG_CTL); + mb(); + irp_cfg &= ~(1 << int_line); + irp_cfg |= (3 << (8 + (int_line * 2))); + tsi108_write_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_CFG_CTL, irp_cfg); + mb(); +} + +static void init_pci_source(void) +{ + tsi108_write_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_CFG_CTL, + 0x0000ff00); + tsi108_write_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_ENABLE, + TSI108_PCI_IRP_ENABLE_P_INT); + mb(); +} + +static inline unsigned int get_pci_source(void) +{ + u_int temp = 0; + int irq = -1; + int i; + u_int pci_irp_stat; + static int mask = 0; + + /* Read PCI/X block interrupt status register */ + pci_irp_stat = tsi108_read_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_STAT); + mb(); + + if (pci_irp_stat & TSI108_PCI_IRP_STAT_P_INT) { + /* Process Interrupt from PCI bus INTA# - INTD# lines */ + temp = + tsi108_read_reg(TSI108_PCI_OFFSET + + TSI108_PCI_IRP_INTAD) & 0xf; + mb(); + for (i = 0; i < 4; i++, mask++) { + if (temp & (1 << mask % 4)) { + irq = IRQ_PCI_INTA + mask % 4; + mask++; + break; + } + } + + /* Disable interrupts from PCI block */ + temp = tsi108_read_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_ENABLE); + tsi108_write_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_ENABLE, + temp & ~TSI108_PCI_IRP_ENABLE_P_INT); + mb(); + (void)tsi108_read_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_ENABLE); + mb(); + } +#ifdef DEBUG + else { + printk("TSI108_PIC: error in TSI108_PCI_IRP_STAT\n"); + pci_irp_stat = + tsi108_read_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_STAT); + temp = + tsi108_read_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_INTAD); + mb(); + printk(">> stat=0x%08x intad=0x%08x ", pci_irp_stat, temp); + temp = + tsi108_read_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_CFG_CTL); + mb(); + printk("cfg_ctl=0x%08x ", temp); + temp = + tsi108_read_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_ENABLE); + mb(); + printk("irp_enable=0x%08x\n", temp); + } +#endif /* end of DEBUG */ + + return irq; +} + + +/* + * Linux descriptor level callbacks + */ + +static void tsi108_pci_irq_unmask(struct irq_data *d) +{ + tsi108_pci_int_unmask(d->irq); + + /* Enable interrupts from PCI block */ + tsi108_write_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_ENABLE, + tsi108_read_reg(TSI108_PCI_OFFSET + + TSI108_PCI_IRP_ENABLE) | + TSI108_PCI_IRP_ENABLE_P_INT); + mb(); +} + +static void tsi108_pci_irq_mask(struct irq_data *d) +{ + tsi108_pci_int_mask(d->irq); +} + +static void tsi108_pci_irq_ack(struct irq_data *d) +{ + tsi108_pci_int_mask(d->irq); +} + +/* + * Interrupt controller descriptor for cascaded PCI interrupt controller. + */ + +static struct irq_chip tsi108_pci_irq = { + .name = "tsi108_PCI_int", + .irq_mask = tsi108_pci_irq_mask, + .irq_ack = tsi108_pci_irq_ack, + .irq_unmask = tsi108_pci_irq_unmask, +}; + +static int pci_irq_host_xlate(struct irq_host *h, struct device_node *ct, + const u32 *intspec, unsigned int intsize, + irq_hw_number_t *out_hwirq, unsigned int *out_flags) +{ + *out_hwirq = intspec[0]; + *out_flags = IRQ_TYPE_LEVEL_HIGH; + return 0; +} + +static int pci_irq_host_map(struct irq_host *h, unsigned int virq, + irq_hw_number_t hw) +{ unsigned int irq; + DBG("%s(%d, 0x%lx)\n", __func__, virq, hw); + if ((virq >= 1) && (virq <= 4)){ + irq = virq + IRQ_PCI_INTAD_BASE - 1; + irq_set_status_flags(irq, IRQ_LEVEL); + irq_set_chip(irq, &tsi108_pci_irq); + } + return 0; +} + +static struct irq_host_ops pci_irq_host_ops = { + .map = pci_irq_host_map, + .xlate = pci_irq_host_xlate, +}; + +/* + * Exported functions + */ + +/* + * The Tsi108 PCI interrupts initialization routine. + * + * The INTA# - INTD# interrupts on the PCI bus are reported by the PCI block + * to the MPIC using single interrupt source (IRQ_TSI108_PCI). Therefore the + * PCI block has to be treated as a cascaded interrupt controller connected + * to the MPIC. + */ + +void __init tsi108_pci_int_init(struct device_node *node) +{ + DBG("Tsi108_pci_int_init: initializing PCI interrupts\n"); + + pci_irq_host = irq_alloc_host(node, IRQ_HOST_MAP_LEGACY, + 0, &pci_irq_host_ops, 0); + if (pci_irq_host == NULL) { + printk(KERN_ERR "pci_irq_host: failed to allocate irq host !\n"); + return; + } + + init_pci_source(); +} + +void tsi108_irq_cascade(unsigned int irq, struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + unsigned int cascade_irq = get_pci_source(); + + if (cascade_irq != NO_IRQ) + generic_handle_irq(cascade_irq); + + chip->irq_eoi(&desc->irq_data); +} diff --git a/arch/powerpc/sysdev/uic.c b/arch/powerpc/sysdev/uic.c new file mode 100644 index 00000000..984cd202 --- /dev/null +++ b/arch/powerpc/sysdev/uic.c @@ -0,0 +1,350 @@ +/* + * arch/powerpc/sysdev/uic.c + * + * IBM PowerPC 4xx Universal Interrupt Controller + * + * Copyright 2007 David Gibson <dwg@au1.ibm.com>, IBM Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/reboot.h> +#include <linux/slab.h> +#include <linux/stddef.h> +#include <linux/sched.h> +#include <linux/signal.h> +#include <linux/sysdev.h> +#include <linux/device.h> +#include <linux/bootmem.h> +#include <linux/spinlock.h> +#include <linux/irq.h> +#include <linux/interrupt.h> +#include <linux/kernel_stat.h> +#include <asm/irq.h> +#include <asm/io.h> +#include <asm/prom.h> +#include <asm/dcr.h> + +#define NR_UIC_INTS 32 + +#define UIC_SR 0x0 +#define UIC_ER 0x2 +#define UIC_CR 0x3 +#define UIC_PR 0x4 +#define UIC_TR 0x5 +#define UIC_MSR 0x6 +#define UIC_VR 0x7 +#define UIC_VCR 0x8 + +struct uic *primary_uic; + +struct uic { + int index; + int dcrbase; + + spinlock_t lock; + + /* The remapper for this UIC */ + struct irq_host *irqhost; +}; + +static void uic_unmask_irq(struct irq_data *d) +{ + struct uic *uic = irq_data_get_irq_chip_data(d); + unsigned int src = irqd_to_hwirq(d); + unsigned long flags; + u32 er, sr; + + sr = 1 << (31-src); + spin_lock_irqsave(&uic->lock, flags); + /* ack level-triggered interrupts here */ + if (irqd_is_level_type(d)) + mtdcr(uic->dcrbase + UIC_SR, sr); + er = mfdcr(uic->dcrbase + UIC_ER); + er |= sr; + mtdcr(uic->dcrbase + UIC_ER, er); + spin_unlock_irqrestore(&uic->lock, flags); +} + +static void uic_mask_irq(struct irq_data *d) +{ + struct uic *uic = irq_data_get_irq_chip_data(d); + unsigned int src = irqd_to_hwirq(d); + unsigned long flags; + u32 er; + + spin_lock_irqsave(&uic->lock, flags); + er = mfdcr(uic->dcrbase + UIC_ER); + er &= ~(1 << (31 - src)); + mtdcr(uic->dcrbase + UIC_ER, er); + spin_unlock_irqrestore(&uic->lock, flags); +} + +static void uic_ack_irq(struct irq_data *d) +{ + struct uic *uic = irq_data_get_irq_chip_data(d); + unsigned int src = irqd_to_hwirq(d); + unsigned long flags; + + spin_lock_irqsave(&uic->lock, flags); + mtdcr(uic->dcrbase + UIC_SR, 1 << (31-src)); + spin_unlock_irqrestore(&uic->lock, flags); +} + +static void uic_mask_ack_irq(struct irq_data *d) +{ + struct uic *uic = irq_data_get_irq_chip_data(d); + unsigned int src = irqd_to_hwirq(d); + unsigned long flags; + u32 er, sr; + + sr = 1 << (31-src); + spin_lock_irqsave(&uic->lock, flags); + er = mfdcr(uic->dcrbase + UIC_ER); + er &= ~sr; + mtdcr(uic->dcrbase + UIC_ER, er); + /* On the UIC, acking (i.e. clearing the SR bit) + * a level irq will have no effect if the interrupt + * is still asserted by the device, even if + * the interrupt is already masked. Therefore + * we only ack the egde interrupts here, while + * level interrupts are ack'ed after the actual + * isr call in the uic_unmask_irq() + */ + if (!irqd_is_level_type(d)) + mtdcr(uic->dcrbase + UIC_SR, sr); + spin_unlock_irqrestore(&uic->lock, flags); +} + +static int uic_set_irq_type(struct irq_data *d, unsigned int flow_type) +{ + struct uic *uic = irq_data_get_irq_chip_data(d); + unsigned int src = irqd_to_hwirq(d); + unsigned long flags; + int trigger, polarity; + u32 tr, pr, mask; + + switch (flow_type & IRQ_TYPE_SENSE_MASK) { + case IRQ_TYPE_NONE: + uic_mask_irq(d); + return 0; + + case IRQ_TYPE_EDGE_RISING: + trigger = 1; polarity = 1; + break; + case IRQ_TYPE_EDGE_FALLING: + trigger = 1; polarity = 0; + break; + case IRQ_TYPE_LEVEL_HIGH: + trigger = 0; polarity = 1; + break; + case IRQ_TYPE_LEVEL_LOW: + trigger = 0; polarity = 0; + break; + default: + return -EINVAL; + } + + mask = ~(1 << (31 - src)); + + spin_lock_irqsave(&uic->lock, flags); + tr = mfdcr(uic->dcrbase + UIC_TR); + pr = mfdcr(uic->dcrbase + UIC_PR); + tr = (tr & mask) | (trigger << (31-src)); + pr = (pr & mask) | (polarity << (31-src)); + + mtdcr(uic->dcrbase + UIC_PR, pr); + mtdcr(uic->dcrbase + UIC_TR, tr); + + spin_unlock_irqrestore(&uic->lock, flags); + + return 0; +} + +static struct irq_chip uic_irq_chip = { + .name = "UIC", + .irq_unmask = uic_unmask_irq, + .irq_mask = uic_mask_irq, + .irq_mask_ack = uic_mask_ack_irq, + .irq_ack = uic_ack_irq, + .irq_set_type = uic_set_irq_type, +}; + +static int uic_host_map(struct irq_host *h, unsigned int virq, + irq_hw_number_t hw) +{ + struct uic *uic = h->host_data; + + irq_set_chip_data(virq, uic); + /* Despite the name, handle_level_irq() works for both level + * and edge irqs on UIC. FIXME: check this is correct */ + irq_set_chip_and_handler(virq, &uic_irq_chip, handle_level_irq); + + /* Set default irq type */ + irq_set_irq_type(virq, IRQ_TYPE_NONE); + + return 0; +} + +static int uic_host_xlate(struct irq_host *h, struct device_node *ct, + const u32 *intspec, unsigned int intsize, + irq_hw_number_t *out_hwirq, unsigned int *out_type) + +{ + /* UIC intspecs must have 2 cells */ + BUG_ON(intsize != 2); + *out_hwirq = intspec[0]; + *out_type = intspec[1]; + return 0; +} + +static struct irq_host_ops uic_host_ops = { + .map = uic_host_map, + .xlate = uic_host_xlate, +}; + +void uic_irq_cascade(unsigned int virq, struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + struct irq_data *idata = irq_desc_get_irq_data(desc); + struct uic *uic = irq_get_handler_data(virq); + u32 msr; + int src; + int subvirq; + + raw_spin_lock(&desc->lock); + if (irqd_is_level_type(idata)) + chip->irq_mask(idata); + else + chip->irq_mask_ack(idata); + raw_spin_unlock(&desc->lock); + + msr = mfdcr(uic->dcrbase + UIC_MSR); + if (!msr) /* spurious interrupt */ + goto uic_irq_ret; + + src = 32 - ffs(msr); + + subvirq = irq_linear_revmap(uic->irqhost, src); + generic_handle_irq(subvirq); + +uic_irq_ret: + raw_spin_lock(&desc->lock); + if (irqd_is_level_type(idata)) + chip->irq_ack(idata); + if (!irqd_irq_disabled(idata) && chip->irq_unmask) + chip->irq_unmask(idata); + raw_spin_unlock(&desc->lock); +} + +static struct uic * __init uic_init_one(struct device_node *node) +{ + struct uic *uic; + const u32 *indexp, *dcrreg; + int len; + + BUG_ON(! of_device_is_compatible(node, "ibm,uic")); + + uic = kzalloc(sizeof(*uic), GFP_KERNEL); + if (! uic) + return NULL; /* FIXME: panic? */ + + spin_lock_init(&uic->lock); + indexp = of_get_property(node, "cell-index", &len); + if (!indexp || (len != sizeof(u32))) { + printk(KERN_ERR "uic: Device node %s has missing or invalid " + "cell-index property\n", node->full_name); + return NULL; + } + uic->index = *indexp; + + dcrreg = of_get_property(node, "dcr-reg", &len); + if (!dcrreg || (len != 2*sizeof(u32))) { + printk(KERN_ERR "uic: Device node %s has missing or invalid " + "dcr-reg property\n", node->full_name); + return NULL; + } + uic->dcrbase = *dcrreg; + + uic->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR, + NR_UIC_INTS, &uic_host_ops, -1); + if (! uic->irqhost) + return NULL; /* FIXME: panic? */ + + uic->irqhost->host_data = uic; + + /* Start with all interrupts disabled, level and non-critical */ + mtdcr(uic->dcrbase + UIC_ER, 0); + mtdcr(uic->dcrbase + UIC_CR, 0); + mtdcr(uic->dcrbase + UIC_TR, 0); + /* Clear any pending interrupts, in case the firmware left some */ + mtdcr(uic->dcrbase + UIC_SR, 0xffffffff); + + printk ("UIC%d (%d IRQ sources) at DCR 0x%x\n", uic->index, + NR_UIC_INTS, uic->dcrbase); + + return uic; +} + +void __init uic_init_tree(void) +{ + struct device_node *np; + struct uic *uic; + const u32 *interrupts; + + /* First locate and initialize the top-level UIC */ + for_each_compatible_node(np, NULL, "ibm,uic") { + interrupts = of_get_property(np, "interrupts", NULL); + if (!interrupts) + break; + } + + BUG_ON(!np); /* uic_init_tree() assumes there's a UIC as the + * top-level interrupt controller */ + primary_uic = uic_init_one(np); + if (!primary_uic) + panic("Unable to initialize primary UIC %s\n", np->full_name); + + irq_set_default_host(primary_uic->irqhost); + of_node_put(np); + + /* The scan again for cascaded UICs */ + for_each_compatible_node(np, NULL, "ibm,uic") { + interrupts = of_get_property(np, "interrupts", NULL); + if (interrupts) { + /* Secondary UIC */ + int cascade_virq; + + uic = uic_init_one(np); + if (! uic) + panic("Unable to initialize a secondary UIC %s\n", + np->full_name); + + cascade_virq = irq_of_parse_and_map(np, 0); + + irq_set_handler_data(cascade_virq, uic); + irq_set_chained_handler(cascade_virq, uic_irq_cascade); + + /* FIXME: setup critical cascade?? */ + } + } +} + +/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */ +unsigned int uic_get_irq(void) +{ + u32 msr; + int src; + + BUG_ON(! primary_uic); + + msr = mfdcr(primary_uic->dcrbase + UIC_MSR); + src = 32 - ffs(msr); + + return irq_linear_revmap(primary_uic->irqhost, src); +} diff --git a/arch/powerpc/sysdev/xics/Kconfig b/arch/powerpc/sysdev/xics/Kconfig new file mode 100644 index 00000000..0031eda3 --- /dev/null +++ b/arch/powerpc/sysdev/xics/Kconfig @@ -0,0 +1,13 @@ +config PPC_XICS + def_bool n + select PPC_SMP_MUXED_IPI + +config PPC_ICP_NATIVE + def_bool n + +config PPC_ICP_HV + def_bool n + +config PPC_ICS_RTAS + def_bool n + diff --git a/arch/powerpc/sysdev/xics/Makefile b/arch/powerpc/sysdev/xics/Makefile new file mode 100644 index 00000000..b75a6059 --- /dev/null +++ b/arch/powerpc/sysdev/xics/Makefile @@ -0,0 +1,6 @@ +subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror + +obj-y += xics-common.o +obj-$(CONFIG_PPC_ICP_NATIVE) += icp-native.o +obj-$(CONFIG_PPC_ICP_HV) += icp-hv.o +obj-$(CONFIG_PPC_ICS_RTAS) += ics-rtas.o diff --git a/arch/powerpc/sysdev/xics/icp-hv.c b/arch/powerpc/sysdev/xics/icp-hv.c new file mode 100644 index 00000000..9518d367 --- /dev/null +++ b/arch/powerpc/sysdev/xics/icp-hv.c @@ -0,0 +1,164 @@ +/* + * Copyright 2011 IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/irq.h> +#include <linux/smp.h> +#include <linux/interrupt.h> +#include <linux/init.h> +#include <linux/cpu.h> +#include <linux/of.h> + +#include <asm/smp.h> +#include <asm/irq.h> +#include <asm/errno.h> +#include <asm/xics.h> +#include <asm/io.h> +#include <asm/hvcall.h> + +static inline unsigned int icp_hv_get_xirr(unsigned char cppr) +{ + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + long rc; + + rc = plpar_hcall(H_XIRR, retbuf, cppr); + if (rc != H_SUCCESS) + panic(" bad return code xirr - rc = %lx\n", rc); + return (unsigned int)retbuf[0]; +} + +static inline void icp_hv_set_xirr(unsigned int value) +{ + long rc = plpar_hcall_norets(H_EOI, value); + if (rc != H_SUCCESS) + panic("bad return code EOI - rc = %ld, value=%x\n", rc, value); +} + +static inline void icp_hv_set_cppr(u8 value) +{ + long rc = plpar_hcall_norets(H_CPPR, value); + if (rc != H_SUCCESS) + panic("bad return code cppr - rc = %lx\n", rc); +} + +static inline void icp_hv_set_qirr(int n_cpu , u8 value) +{ + long rc = plpar_hcall_norets(H_IPI, get_hard_smp_processor_id(n_cpu), + value); + if (rc != H_SUCCESS) + panic("bad return code qirr - rc = %lx\n", rc); +} + +static void icp_hv_eoi(struct irq_data *d) +{ + unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); + + iosync(); + icp_hv_set_xirr((xics_pop_cppr() << 24) | hw_irq); +} + +static void icp_hv_teardown_cpu(void) +{ + int cpu = smp_processor_id(); + + /* Clear any pending IPI */ + icp_hv_set_qirr(cpu, 0xff); +} + +static void icp_hv_flush_ipi(void) +{ + /* We take the ipi irq but and never return so we + * need to EOI the IPI, but want to leave our priority 0 + * + * should we check all the other interrupts too? + * should we be flagging idle loop instead? + * or creating some task to be scheduled? + */ + + icp_hv_set_xirr((0x00 << 24) | XICS_IPI); +} + +static unsigned int icp_hv_get_irq(void) +{ + unsigned int xirr = icp_hv_get_xirr(xics_cppr_top()); + unsigned int vec = xirr & 0x00ffffff; + unsigned int irq; + + if (vec == XICS_IRQ_SPURIOUS) + return NO_IRQ; + + irq = irq_radix_revmap_lookup(xics_host, vec); + if (likely(irq != NO_IRQ)) { + xics_push_cppr(vec); + return irq; + } + + /* We don't have a linux mapping, so have rtas mask it. */ + xics_mask_unknown_vec(vec); + + /* We might learn about it later, so EOI it */ + icp_hv_set_xirr(xirr); + + return NO_IRQ; +} + +static void icp_hv_set_cpu_priority(unsigned char cppr) +{ + xics_set_base_cppr(cppr); + icp_hv_set_cppr(cppr); + iosync(); +} + +#ifdef CONFIG_SMP + +static void icp_hv_cause_ipi(int cpu, unsigned long data) +{ + icp_hv_set_qirr(cpu, IPI_PRIORITY); +} + +static irqreturn_t icp_hv_ipi_action(int irq, void *dev_id) +{ + int cpu = smp_processor_id(); + + icp_hv_set_qirr(cpu, 0xff); + + return smp_ipi_demux(); +} + +#endif /* CONFIG_SMP */ + +static const struct icp_ops icp_hv_ops = { + .get_irq = icp_hv_get_irq, + .eoi = icp_hv_eoi, + .set_priority = icp_hv_set_cpu_priority, + .teardown_cpu = icp_hv_teardown_cpu, + .flush_ipi = icp_hv_flush_ipi, +#ifdef CONFIG_SMP + .ipi_action = icp_hv_ipi_action, + .cause_ipi = icp_hv_cause_ipi, +#endif +}; + +int icp_hv_init(void) +{ + struct device_node *np; + + np = of_find_compatible_node(NULL, NULL, "ibm,ppc-xicp"); + if (!np) + np = of_find_node_by_type(NULL, + "PowerPC-External-Interrupt-Presentation"); + if (!np) + return -ENODEV; + + icp_ops = &icp_hv_ops; + + return 0; +} + diff --git a/arch/powerpc/sysdev/xics/icp-native.c b/arch/powerpc/sysdev/xics/icp-native.c new file mode 100644 index 00000000..1f15ad43 --- /dev/null +++ b/arch/powerpc/sysdev/xics/icp-native.c @@ -0,0 +1,293 @@ +/* + * Copyright 2011 IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/irq.h> +#include <linux/smp.h> +#include <linux/interrupt.h> +#include <linux/init.h> +#include <linux/cpu.h> +#include <linux/of.h> +#include <linux/spinlock.h> + +#include <asm/prom.h> +#include <asm/io.h> +#include <asm/smp.h> +#include <asm/irq.h> +#include <asm/errno.h> +#include <asm/xics.h> + +struct icp_ipl { + union { + u32 word; + u8 bytes[4]; + } xirr_poll; + union { + u32 word; + u8 bytes[4]; + } xirr; + u32 dummy; + union { + u32 word; + u8 bytes[4]; + } qirr; + u32 link_a; + u32 link_b; + u32 link_c; +}; + +static struct icp_ipl __iomem *icp_native_regs[NR_CPUS]; + +static inline unsigned int icp_native_get_xirr(void) +{ + int cpu = smp_processor_id(); + + return in_be32(&icp_native_regs[cpu]->xirr.word); +} + +static inline void icp_native_set_xirr(unsigned int value) +{ + int cpu = smp_processor_id(); + + out_be32(&icp_native_regs[cpu]->xirr.word, value); +} + +static inline void icp_native_set_cppr(u8 value) +{ + int cpu = smp_processor_id(); + + out_8(&icp_native_regs[cpu]->xirr.bytes[0], value); +} + +static inline void icp_native_set_qirr(int n_cpu, u8 value) +{ + out_8(&icp_native_regs[n_cpu]->qirr.bytes[0], value); +} + +static void icp_native_set_cpu_priority(unsigned char cppr) +{ + xics_set_base_cppr(cppr); + icp_native_set_cppr(cppr); + iosync(); +} + +static void icp_native_eoi(struct irq_data *d) +{ + unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); + + iosync(); + icp_native_set_xirr((xics_pop_cppr() << 24) | hw_irq); +} + +static void icp_native_teardown_cpu(void) +{ + int cpu = smp_processor_id(); + + /* Clear any pending IPI */ + icp_native_set_qirr(cpu, 0xff); +} + +static void icp_native_flush_ipi(void) +{ + /* We take the ipi irq but and never return so we + * need to EOI the IPI, but want to leave our priority 0 + * + * should we check all the other interrupts too? + * should we be flagging idle loop instead? + * or creating some task to be scheduled? + */ + + icp_native_set_xirr((0x00 << 24) | XICS_IPI); +} + +static unsigned int icp_native_get_irq(void) +{ + unsigned int xirr = icp_native_get_xirr(); + unsigned int vec = xirr & 0x00ffffff; + unsigned int irq; + + if (vec == XICS_IRQ_SPURIOUS) + return NO_IRQ; + + irq = irq_radix_revmap_lookup(xics_host, vec); + if (likely(irq != NO_IRQ)) { + xics_push_cppr(vec); + return irq; + } + + /* We don't have a linux mapping, so have rtas mask it. */ + xics_mask_unknown_vec(vec); + + /* We might learn about it later, so EOI it */ + icp_native_set_xirr(xirr); + + return NO_IRQ; +} + +#ifdef CONFIG_SMP + +static void icp_native_cause_ipi(int cpu, unsigned long data) +{ + icp_native_set_qirr(cpu, IPI_PRIORITY); +} + +static irqreturn_t icp_native_ipi_action(int irq, void *dev_id) +{ + int cpu = smp_processor_id(); + + icp_native_set_qirr(cpu, 0xff); + + return smp_ipi_demux(); +} + +#endif /* CONFIG_SMP */ + +static int __init icp_native_map_one_cpu(int hw_id, unsigned long addr, + unsigned long size) +{ + char *rname; + int i, cpu = -1; + + /* This may look gross but it's good enough for now, we don't quite + * have a hard -> linux processor id matching. + */ + for_each_possible_cpu(i) { + if (!cpu_present(i)) + continue; + if (hw_id == get_hard_smp_processor_id(i)) { + cpu = i; + break; + } + } + + /* Fail, skip that CPU. Don't print, it's normal, some XICS come up + * with way more entries in there than you have CPUs + */ + if (cpu == -1) + return 0; + + rname = kasprintf(GFP_KERNEL, "CPU %d [0x%x] Interrupt Presentation", + cpu, hw_id); + + if (!request_mem_region(addr, size, rname)) { + pr_warning("icp_native: Could not reserve ICP MMIO" + " for CPU %d, interrupt server #0x%x\n", + cpu, hw_id); + return -EBUSY; + } + + icp_native_regs[cpu] = ioremap(addr, size); + if (!icp_native_regs[cpu]) { + pr_warning("icp_native: Failed ioremap for CPU %d, " + "interrupt server #0x%x, addr %#lx\n", + cpu, hw_id, addr); + release_mem_region(addr, size); + return -ENOMEM; + } + return 0; +} + +static int __init icp_native_init_one_node(struct device_node *np, + unsigned int *indx) +{ + unsigned int ilen; + const u32 *ireg; + int i; + int reg_tuple_size; + int num_servers = 0; + + /* This code does the theorically broken assumption that the interrupt + * server numbers are the same as the hard CPU numbers. + * This happens to be the case so far but we are playing with fire... + * should be fixed one of these days. -BenH. + */ + ireg = of_get_property(np, "ibm,interrupt-server-ranges", &ilen); + + /* Do that ever happen ? we'll know soon enough... but even good'old + * f80 does have that property .. + */ + WARN_ON((ireg == NULL) || (ilen != 2*sizeof(u32))); + + if (ireg) { + *indx = of_read_number(ireg, 1); + if (ilen >= 2*sizeof(u32)) + num_servers = of_read_number(ireg + 1, 1); + } + + ireg = of_get_property(np, "reg", &ilen); + if (!ireg) { + pr_err("icp_native: Can't find interrupt reg property"); + return -1; + } + + reg_tuple_size = (of_n_addr_cells(np) + of_n_size_cells(np)) * 4; + if (((ilen % reg_tuple_size) != 0) + || (num_servers && (num_servers != (ilen / reg_tuple_size)))) { + pr_err("icp_native: ICP reg len (%d) != num servers (%d)", + ilen / reg_tuple_size, num_servers); + return -1; + } + + for (i = 0; i < (ilen / reg_tuple_size); i++) { + struct resource r; + int err; + + err = of_address_to_resource(np, i, &r); + if (err) { + pr_err("icp_native: Could not translate ICP MMIO" + " for interrupt server 0x%x (%d)\n", *indx, err); + return -1; + } + + if (icp_native_map_one_cpu(*indx, r.start, r.end - r.start)) + return -1; + + (*indx)++; + } + return 0; +} + +static const struct icp_ops icp_native_ops = { + .get_irq = icp_native_get_irq, + .eoi = icp_native_eoi, + .set_priority = icp_native_set_cpu_priority, + .teardown_cpu = icp_native_teardown_cpu, + .flush_ipi = icp_native_flush_ipi, +#ifdef CONFIG_SMP + .ipi_action = icp_native_ipi_action, + .cause_ipi = icp_native_cause_ipi, +#endif +}; + +int icp_native_init(void) +{ + struct device_node *np; + u32 indx = 0; + int found = 0; + + for_each_compatible_node(np, NULL, "ibm,ppc-xicp") + if (icp_native_init_one_node(np, &indx) == 0) + found = 1; + if (!found) { + for_each_node_by_type(np, + "PowerPC-External-Interrupt-Presentation") { + if (icp_native_init_one_node(np, &indx) == 0) + found = 1; + } + } + + if (found == 0) + return -ENODEV; + + icp_ops = &icp_native_ops; + + return 0; +} diff --git a/arch/powerpc/sysdev/xics/ics-rtas.c b/arch/powerpc/sysdev/xics/ics-rtas.c new file mode 100644 index 00000000..c782f85c --- /dev/null +++ b/arch/powerpc/sysdev/xics/ics-rtas.c @@ -0,0 +1,240 @@ +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/irq.h> +#include <linux/smp.h> +#include <linux/interrupt.h> +#include <linux/init.h> +#include <linux/cpu.h> +#include <linux/of.h> +#include <linux/spinlock.h> +#include <linux/msi.h> + +#include <asm/prom.h> +#include <asm/smp.h> +#include <asm/machdep.h> +#include <asm/irq.h> +#include <asm/errno.h> +#include <asm/xics.h> +#include <asm/rtas.h> + +/* RTAS service tokens */ +static int ibm_get_xive; +static int ibm_set_xive; +static int ibm_int_on; +static int ibm_int_off; + +static int ics_rtas_map(struct ics *ics, unsigned int virq); +static void ics_rtas_mask_unknown(struct ics *ics, unsigned long vec); +static long ics_rtas_get_server(struct ics *ics, unsigned long vec); +static int ics_rtas_host_match(struct ics *ics, struct device_node *node); + +/* Only one global & state struct ics */ +static struct ics ics_rtas = { + .map = ics_rtas_map, + .mask_unknown = ics_rtas_mask_unknown, + .get_server = ics_rtas_get_server, + .host_match = ics_rtas_host_match, +}; + +static void ics_rtas_unmask_irq(struct irq_data *d) +{ + unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); + int call_status; + int server; + + pr_devel("xics: unmask virq %d [hw 0x%x]\n", d->irq, hw_irq); + + if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS) + return; + + server = xics_get_irq_server(d->irq, d->affinity, 0); + + call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hw_irq, server, + DEFAULT_PRIORITY); + if (call_status != 0) { + printk(KERN_ERR + "%s: ibm_set_xive irq %u server %x returned %d\n", + __func__, hw_irq, server, call_status); + return; + } + + /* Now unmask the interrupt (often a no-op) */ + call_status = rtas_call(ibm_int_on, 1, 1, NULL, hw_irq); + if (call_status != 0) { + printk(KERN_ERR "%s: ibm_int_on irq=%u returned %d\n", + __func__, hw_irq, call_status); + return; + } +} + +static unsigned int ics_rtas_startup(struct irq_data *d) +{ +#ifdef CONFIG_PCI_MSI + /* + * The generic MSI code returns with the interrupt disabled on the + * card, using the MSI mask bits. Firmware doesn't appear to unmask + * at that level, so we do it here by hand. + */ + if (d->msi_desc) + unmask_msi_irq(d); +#endif + /* unmask it */ + ics_rtas_unmask_irq(d); + return 0; +} + +static void ics_rtas_mask_real_irq(unsigned int hw_irq) +{ + int call_status; + + if (hw_irq == XICS_IPI) + return; + + call_status = rtas_call(ibm_int_off, 1, 1, NULL, hw_irq); + if (call_status != 0) { + printk(KERN_ERR "%s: ibm_int_off irq=%u returned %d\n", + __func__, hw_irq, call_status); + return; + } + + /* Have to set XIVE to 0xff to be able to remove a slot */ + call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hw_irq, + xics_default_server, 0xff); + if (call_status != 0) { + printk(KERN_ERR "%s: ibm_set_xive(0xff) irq=%u returned %d\n", + __func__, hw_irq, call_status); + return; + } +} + +static void ics_rtas_mask_irq(struct irq_data *d) +{ + unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); + + pr_devel("xics: mask virq %d [hw 0x%x]\n", d->irq, hw_irq); + + if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS) + return; + ics_rtas_mask_real_irq(hw_irq); +} + +static int ics_rtas_set_affinity(struct irq_data *d, + const struct cpumask *cpumask, + bool force) +{ + unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); + int status; + int xics_status[2]; + int irq_server; + + if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS) + return -1; + + status = rtas_call(ibm_get_xive, 1, 3, xics_status, hw_irq); + + if (status) { + printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n", + __func__, hw_irq, status); + return -1; + } + + irq_server = xics_get_irq_server(d->irq, cpumask, 1); + if (irq_server == -1) { + char cpulist[128]; + cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask); + printk(KERN_WARNING + "%s: No online cpus in the mask %s for irq %d\n", + __func__, cpulist, d->irq); + return -1; + } + + status = rtas_call(ibm_set_xive, 3, 1, NULL, + hw_irq, irq_server, xics_status[1]); + + if (status) { + printk(KERN_ERR "%s: ibm,set-xive irq=%u returns %d\n", + __func__, hw_irq, status); + return -1; + } + + return IRQ_SET_MASK_OK; +} + +static struct irq_chip ics_rtas_irq_chip = { + .name = "XICS", + .irq_startup = ics_rtas_startup, + .irq_mask = ics_rtas_mask_irq, + .irq_unmask = ics_rtas_unmask_irq, + .irq_eoi = NULL, /* Patched at init time */ + .irq_set_affinity = ics_rtas_set_affinity +}; + +static int ics_rtas_map(struct ics *ics, unsigned int virq) +{ + unsigned int hw_irq = (unsigned int)virq_to_hw(virq); + int status[2]; + int rc; + + if (WARN_ON(hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)) + return -EINVAL; + + /* Check if RTAS knows about this interrupt */ + rc = rtas_call(ibm_get_xive, 1, 3, status, hw_irq); + if (rc) + return -ENXIO; + + irq_set_chip_and_handler(virq, &ics_rtas_irq_chip, handle_fasteoi_irq); + irq_set_chip_data(virq, &ics_rtas); + + return 0; +} + +static void ics_rtas_mask_unknown(struct ics *ics, unsigned long vec) +{ + ics_rtas_mask_real_irq(vec); +} + +static long ics_rtas_get_server(struct ics *ics, unsigned long vec) +{ + int rc, status[2]; + + rc = rtas_call(ibm_get_xive, 1, 3, status, vec); + if (rc) + return -1; + return status[0]; +} + +static int ics_rtas_host_match(struct ics *ics, struct device_node *node) +{ + /* IBM machines have interrupt parents of various funky types for things + * like vdevices, events, etc... The trick we use here is to match + * everything here except the legacy 8259 which is compatible "chrp,iic" + */ + return !of_device_is_compatible(node, "chrp,iic"); +} + +int ics_rtas_init(void) +{ + ibm_get_xive = rtas_token("ibm,get-xive"); + ibm_set_xive = rtas_token("ibm,set-xive"); + ibm_int_on = rtas_token("ibm,int-on"); + ibm_int_off = rtas_token("ibm,int-off"); + + /* We enable the RTAS "ICS" if RTAS is present with the + * appropriate tokens + */ + if (ibm_get_xive == RTAS_UNKNOWN_SERVICE || + ibm_set_xive == RTAS_UNKNOWN_SERVICE) + return -ENODEV; + + /* We need to patch our irq chip's EOI to point to the + * right ICP + */ + ics_rtas_irq_chip.irq_eoi = icp_ops->eoi; + + /* Register ourselves */ + xics_register_ics(&ics_rtas); + + return 0; +} + diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c new file mode 100644 index 00000000..445c5a01 --- /dev/null +++ b/arch/powerpc/sysdev/xics/xics-common.c @@ -0,0 +1,443 @@ +/* + * Copyright 2011 IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ +#include <linux/types.h> +#include <linux/threads.h> +#include <linux/kernel.h> +#include <linux/irq.h> +#include <linux/debugfs.h> +#include <linux/smp.h> +#include <linux/interrupt.h> +#include <linux/seq_file.h> +#include <linux/init.h> +#include <linux/cpu.h> +#include <linux/of.h> +#include <linux/slab.h> +#include <linux/spinlock.h> + +#include <asm/prom.h> +#include <asm/io.h> +#include <asm/smp.h> +#include <asm/machdep.h> +#include <asm/irq.h> +#include <asm/errno.h> +#include <asm/rtas.h> +#include <asm/xics.h> +#include <asm/firmware.h> + +/* Globals common to all ICP/ICS implementations */ +const struct icp_ops *icp_ops; + +unsigned int xics_default_server = 0xff; +unsigned int xics_default_distrib_server = 0; +unsigned int xics_interrupt_server_size = 8; + +DEFINE_PER_CPU(struct xics_cppr, xics_cppr); + +struct irq_host *xics_host; + +static LIST_HEAD(ics_list); + +void xics_update_irq_servers(void) +{ + int i, j; + struct device_node *np; + u32 ilen; + const u32 *ireg; + u32 hcpuid; + + /* Find the server numbers for the boot cpu. */ + np = of_get_cpu_node(boot_cpuid, NULL); + BUG_ON(!np); + + hcpuid = get_hard_smp_processor_id(boot_cpuid); + xics_default_server = xics_default_distrib_server = hcpuid; + + pr_devel("xics: xics_default_server = 0x%x\n", xics_default_server); + + ireg = of_get_property(np, "ibm,ppc-interrupt-gserver#s", &ilen); + if (!ireg) { + of_node_put(np); + return; + } + + i = ilen / sizeof(int); + + /* Global interrupt distribution server is specified in the last + * entry of "ibm,ppc-interrupt-gserver#s" property. Get the last + * entry fom this property for current boot cpu id and use it as + * default distribution server + */ + for (j = 0; j < i; j += 2) { + if (ireg[j] == hcpuid) { + xics_default_distrib_server = ireg[j+1]; + break; + } + } + pr_devel("xics: xics_default_distrib_server = 0x%x\n", + xics_default_distrib_server); + of_node_put(np); +} + +/* GIQ stuff, currently only supported on RTAS setups, will have + * to be sorted properly for bare metal + */ +void xics_set_cpu_giq(unsigned int gserver, unsigned int join) +{ +#ifdef CONFIG_PPC_RTAS + int index; + int status; + + if (!rtas_indicator_present(GLOBAL_INTERRUPT_QUEUE, NULL)) + return; + + index = (1UL << xics_interrupt_server_size) - 1 - gserver; + + status = rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE, index, join); + + WARN(status < 0, "set-indicator(%d, %d, %u) returned %d\n", + GLOBAL_INTERRUPT_QUEUE, index, join, status); +#endif +} + +void xics_setup_cpu(void) +{ + icp_ops->set_priority(LOWEST_PRIORITY); + + xics_set_cpu_giq(xics_default_distrib_server, 1); +} + +void xics_mask_unknown_vec(unsigned int vec) +{ + struct ics *ics; + + pr_err("Interrupt 0x%x (real) is invalid, disabling it.\n", vec); + + list_for_each_entry(ics, &ics_list, link) + ics->mask_unknown(ics, vec); +} + + +#ifdef CONFIG_SMP + +static void xics_request_ipi(void) +{ + unsigned int ipi; + + ipi = irq_create_mapping(xics_host, XICS_IPI); + BUG_ON(ipi == NO_IRQ); + + /* + * IPIs are marked IRQF_DISABLED as they must run with irqs + * disabled, and PERCPU. The handler was set in map. + */ + BUG_ON(request_irq(ipi, icp_ops->ipi_action, + IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL)); +} + +int __init xics_smp_probe(void) +{ + /* Setup cause_ipi callback based on which ICP is used */ + smp_ops->cause_ipi = icp_ops->cause_ipi; + + /* Register all the IPIs */ + xics_request_ipi(); + + return cpumask_weight(cpu_possible_mask); +} + +#endif /* CONFIG_SMP */ + +void xics_teardown_cpu(void) +{ + struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); + + /* + * we have to reset the cppr index to 0 because we're + * not going to return from the IPI + */ + os_cppr->index = 0; + icp_ops->set_priority(0); + icp_ops->teardown_cpu(); +} + +void xics_kexec_teardown_cpu(int secondary) +{ + xics_teardown_cpu(); + + icp_ops->flush_ipi(); + + /* + * Some machines need to have at least one cpu in the GIQ, + * so leave the master cpu in the group. + */ + if (secondary) + xics_set_cpu_giq(xics_default_distrib_server, 0); +} + + +#ifdef CONFIG_HOTPLUG_CPU + +/* Interrupts are disabled. */ +void xics_migrate_irqs_away(void) +{ + int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id(); + unsigned int irq, virq; + + /* If we used to be the default server, move to the new "boot_cpuid" */ + if (hw_cpu == xics_default_server) + xics_update_irq_servers(); + + /* Reject any interrupt that was queued to us... */ + icp_ops->set_priority(0); + + /* Remove ourselves from the global interrupt queue */ + xics_set_cpu_giq(xics_default_distrib_server, 0); + + /* Allow IPIs again... */ + icp_ops->set_priority(DEFAULT_PRIORITY); + + for_each_irq(virq) { + struct irq_desc *desc; + struct irq_chip *chip; + long server; + unsigned long flags; + struct ics *ics; + + /* We can't set affinity on ISA interrupts */ + if (virq < NUM_ISA_INTERRUPTS) + continue; + if (!virq_is_host(virq, xics_host)) + continue; + irq = (unsigned int)virq_to_hw(virq); + /* We need to get IPIs still. */ + if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) + continue; + desc = irq_to_desc(virq); + /* We only need to migrate enabled IRQS */ + if (!desc || !desc->action) + continue; + chip = irq_desc_get_chip(desc); + if (!chip || !chip->irq_set_affinity) + continue; + + raw_spin_lock_irqsave(&desc->lock, flags); + + /* Locate interrupt server */ + server = -1; + ics = irq_get_chip_data(virq); + if (ics) + server = ics->get_server(ics, irq); + if (server < 0) { + printk(KERN_ERR "%s: Can't find server for irq %d\n", + __func__, irq); + goto unlock; + } + + /* We only support delivery to all cpus or to one cpu. + * The irq has to be migrated only in the single cpu + * case. + */ + if (server != hw_cpu) + goto unlock; + + /* This is expected during cpu offline. */ + if (cpu_online(cpu)) + pr_warning("IRQ %u affinity broken off cpu %u\n", + virq, cpu); + + /* Reset affinity to all cpus */ + raw_spin_unlock_irqrestore(&desc->lock, flags); + irq_set_affinity(virq, cpu_all_mask); + continue; +unlock: + raw_spin_unlock_irqrestore(&desc->lock, flags); + } +} +#endif /* CONFIG_HOTPLUG_CPU */ + +#ifdef CONFIG_SMP +/* + * For the moment we only implement delivery to all cpus or one cpu. + * + * If the requested affinity is cpu_all_mask, we set global affinity. + * If not we set it to the first cpu in the mask, even if multiple cpus + * are set. This is so things like irqbalance (which set core and package + * wide affinities) do the right thing. + * + * We need to fix this to implement support for the links + */ +int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask, + unsigned int strict_check) +{ + + if (!distribute_irqs) + return xics_default_server; + + if (!cpumask_subset(cpu_possible_mask, cpumask)) { + int server = cpumask_first_and(cpu_online_mask, cpumask); + + if (server < nr_cpu_ids) + return get_hard_smp_processor_id(server); + + if (strict_check) + return -1; + } + + /* + * Workaround issue with some versions of JS20 firmware that + * deliver interrupts to cpus which haven't been started. This + * happens when using the maxcpus= boot option. + */ + if (cpumask_equal(cpu_online_mask, cpu_present_mask)) + return xics_default_distrib_server; + + return xics_default_server; +} +#endif /* CONFIG_SMP */ + +static int xics_host_match(struct irq_host *h, struct device_node *node) +{ + struct ics *ics; + + list_for_each_entry(ics, &ics_list, link) + if (ics->host_match(ics, node)) + return 1; + + return 0; +} + +/* Dummies */ +static void xics_ipi_unmask(struct irq_data *d) { } +static void xics_ipi_mask(struct irq_data *d) { } + +static struct irq_chip xics_ipi_chip = { + .name = "XICS", + .irq_eoi = NULL, /* Patched at init time */ + .irq_mask = xics_ipi_mask, + .irq_unmask = xics_ipi_unmask, +}; + +static int xics_host_map(struct irq_host *h, unsigned int virq, + irq_hw_number_t hw) +{ + struct ics *ics; + + pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hw); + + /* Insert the interrupt mapping into the radix tree for fast lookup */ + irq_radix_revmap_insert(xics_host, virq, hw); + + /* They aren't all level sensitive but we just don't really know */ + irq_set_status_flags(virq, IRQ_LEVEL); + + /* Don't call into ICS for IPIs */ + if (hw == XICS_IPI) { + irq_set_chip_and_handler(virq, &xics_ipi_chip, + handle_percpu_irq); + return 0; + } + + /* Let the ICS setup the chip data */ + list_for_each_entry(ics, &ics_list, link) + if (ics->map(ics, virq) == 0) + return 0; + + return -EINVAL; +} + +static int xics_host_xlate(struct irq_host *h, struct device_node *ct, + const u32 *intspec, unsigned int intsize, + irq_hw_number_t *out_hwirq, unsigned int *out_flags) + +{ + /* Current xics implementation translates everything + * to level. It is not technically right for MSIs but this + * is irrelevant at this point. We might get smarter in the future + */ + *out_hwirq = intspec[0]; + *out_flags = IRQ_TYPE_LEVEL_LOW; + + return 0; +} + +static struct irq_host_ops xics_host_ops = { + .match = xics_host_match, + .map = xics_host_map, + .xlate = xics_host_xlate, +}; + +static void __init xics_init_host(void) +{ + xics_host = irq_alloc_host(NULL, IRQ_HOST_MAP_TREE, 0, &xics_host_ops, + XICS_IRQ_SPURIOUS); + BUG_ON(xics_host == NULL); + irq_set_default_host(xics_host); +} + +void __init xics_register_ics(struct ics *ics) +{ + list_add(&ics->link, &ics_list); +} + +static void __init xics_get_server_size(void) +{ + struct device_node *np; + const u32 *isize; + + /* We fetch the interrupt server size from the first ICS node + * we find if any + */ + np = of_find_compatible_node(NULL, NULL, "ibm,ppc-xics"); + if (!np) + return; + isize = of_get_property(np, "ibm,interrupt-server#-size", NULL); + if (!isize) + return; + xics_interrupt_server_size = *isize; + of_node_put(np); +} + +void __init xics_init(void) +{ + int rc = -1; + + /* Fist locate ICP */ +#ifdef CONFIG_PPC_ICP_HV + if (firmware_has_feature(FW_FEATURE_LPAR)) + rc = icp_hv_init(); +#endif +#ifdef CONFIG_PPC_ICP_NATIVE + if (rc < 0) + rc = icp_native_init(); +#endif + if (rc < 0) { + pr_warning("XICS: Cannot find a Presentation Controller !\n"); + return; + } + + /* Copy get_irq callback over to ppc_md */ + ppc_md.get_irq = icp_ops->get_irq; + + /* Patch up IPI chip EOI */ + xics_ipi_chip.irq_eoi = icp_ops->eoi; + + /* Now locate ICS */ +#ifdef CONFIG_PPC_ICS_RTAS + rc = ics_rtas_init(); +#endif + if (rc < 0) + pr_warning("XICS: Cannot find a Source Controller !\n"); + + /* Initialize common bits */ + xics_get_server_size(); + xics_update_irq_servers(); + xics_init_host(); + xics_setup_cpu(); +} diff --git a/arch/powerpc/sysdev/xilinx_intc.c b/arch/powerpc/sysdev/xilinx_intc.c new file mode 100644 index 00000000..61837997 --- /dev/null +++ b/arch/powerpc/sysdev/xilinx_intc.c @@ -0,0 +1,294 @@ +/* + * Interrupt controller driver for Xilinx Virtex FPGAs + * + * Copyright (C) 2007 Secret Lab Technologies Ltd. + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + * + */ + +/* + * This is a driver for the interrupt controller typically found in + * Xilinx Virtex FPGA designs. + * + * The interrupt sense levels are hard coded into the FPGA design with + * typically a 1:1 relationship between irq lines and devices (no shared + * irq lines). Therefore, this driver does not attempt to handle edge + * and level interrupts differently. + */ +#undef DEBUG + +#include <linux/kernel.h> +#include <linux/irq.h> +#include <linux/of.h> +#include <asm/io.h> +#include <asm/processor.h> +#include <asm/i8259.h> +#include <asm/irq.h> + +/* + * INTC Registers + */ +#define XINTC_ISR 0 /* Interrupt Status */ +#define XINTC_IPR 4 /* Interrupt Pending */ +#define XINTC_IER 8 /* Interrupt Enable */ +#define XINTC_IAR 12 /* Interrupt Acknowledge */ +#define XINTC_SIE 16 /* Set Interrupt Enable bits */ +#define XINTC_CIE 20 /* Clear Interrupt Enable bits */ +#define XINTC_IVR 24 /* Interrupt Vector */ +#define XINTC_MER 28 /* Master Enable */ + +static struct irq_host *master_irqhost; + +#define XILINX_INTC_MAXIRQS (32) + +/* The following table allows the interrupt type, edge or level, + * to be cached after being read from the device tree until the interrupt + * is mapped + */ +static int xilinx_intc_typetable[XILINX_INTC_MAXIRQS]; + +/* Map the interrupt type from the device tree to the interrupt types + * used by the interrupt subsystem + */ +static unsigned char xilinx_intc_map_senses[] = { + IRQ_TYPE_EDGE_RISING, + IRQ_TYPE_EDGE_FALLING, + IRQ_TYPE_LEVEL_HIGH, + IRQ_TYPE_LEVEL_LOW, +}; + +/* + * The interrupt controller is setup such that it doesn't work well with + * the level interrupt handler in the kernel because the handler acks the + * interrupt before calling the application interrupt handler. To deal with + * that, we use 2 different irq chips so that different functions can be + * used for level and edge type interrupts. + * + * IRQ Chip common (across level and edge) operations + */ +static void xilinx_intc_mask(struct irq_data *d) +{ + int irq = irqd_to_hwirq(d); + void * regs = irq_data_get_irq_chip_data(d); + pr_debug("mask: %d\n", irq); + out_be32(regs + XINTC_CIE, 1 << irq); +} + +static int xilinx_intc_set_type(struct irq_data *d, unsigned int flow_type) +{ + return 0; +} + +/* + * IRQ Chip level operations + */ +static void xilinx_intc_level_unmask(struct irq_data *d) +{ + int irq = irqd_to_hwirq(d); + void * regs = irq_data_get_irq_chip_data(d); + pr_debug("unmask: %d\n", irq); + out_be32(regs + XINTC_SIE, 1 << irq); + + /* ack level irqs because they can't be acked during + * ack function since the handle_level_irq function + * acks the irq before calling the inerrupt handler + */ + out_be32(regs + XINTC_IAR, 1 << irq); +} + +static struct irq_chip xilinx_intc_level_irqchip = { + .name = "Xilinx Level INTC", + .irq_mask = xilinx_intc_mask, + .irq_mask_ack = xilinx_intc_mask, + .irq_unmask = xilinx_intc_level_unmask, + .irq_set_type = xilinx_intc_set_type, +}; + +/* + * IRQ Chip edge operations + */ +static void xilinx_intc_edge_unmask(struct irq_data *d) +{ + int irq = irqd_to_hwirq(d); + void *regs = irq_data_get_irq_chip_data(d); + pr_debug("unmask: %d\n", irq); + out_be32(regs + XINTC_SIE, 1 << irq); +} + +static void xilinx_intc_edge_ack(struct irq_data *d) +{ + int irq = irqd_to_hwirq(d); + void * regs = irq_data_get_irq_chip_data(d); + pr_debug("ack: %d\n", irq); + out_be32(regs + XINTC_IAR, 1 << irq); +} + +static struct irq_chip xilinx_intc_edge_irqchip = { + .name = "Xilinx Edge INTC", + .irq_mask = xilinx_intc_mask, + .irq_unmask = xilinx_intc_edge_unmask, + .irq_ack = xilinx_intc_edge_ack, + .irq_set_type = xilinx_intc_set_type, +}; + +/* + * IRQ Host operations + */ + +/** + * xilinx_intc_xlate - translate virq# from device tree interrupts property + */ +static int xilinx_intc_xlate(struct irq_host *h, struct device_node *ct, + const u32 *intspec, unsigned int intsize, + irq_hw_number_t *out_hwirq, + unsigned int *out_flags) +{ + if ((intsize < 2) || (intspec[0] >= XILINX_INTC_MAXIRQS)) + return -EINVAL; + + /* keep a copy of the interrupt type til the interrupt is mapped + */ + xilinx_intc_typetable[intspec[0]] = xilinx_intc_map_senses[intspec[1]]; + + /* Xilinx uses 2 interrupt entries, the 1st being the h/w + * interrupt number, the 2nd being the interrupt type, edge or level + */ + *out_hwirq = intspec[0]; + *out_flags = xilinx_intc_map_senses[intspec[1]]; + + return 0; +} +static int xilinx_intc_map(struct irq_host *h, unsigned int virq, + irq_hw_number_t irq) +{ + irq_set_chip_data(virq, h->host_data); + + if (xilinx_intc_typetable[irq] == IRQ_TYPE_LEVEL_HIGH || + xilinx_intc_typetable[irq] == IRQ_TYPE_LEVEL_LOW) { + irq_set_chip_and_handler(virq, &xilinx_intc_level_irqchip, + handle_level_irq); + } else { + irq_set_chip_and_handler(virq, &xilinx_intc_edge_irqchip, + handle_edge_irq); + } + return 0; +} + +static struct irq_host_ops xilinx_intc_ops = { + .map = xilinx_intc_map, + .xlate = xilinx_intc_xlate, +}; + +struct irq_host * __init +xilinx_intc_init(struct device_node *np) +{ + struct irq_host * irq; + void * regs; + + /* Find and map the intc registers */ + regs = of_iomap(np, 0); + if (!regs) { + pr_err("xilinx_intc: could not map registers\n"); + return NULL; + } + + /* Setup interrupt controller */ + out_be32(regs + XINTC_IER, 0); /* disable all irqs */ + out_be32(regs + XINTC_IAR, ~(u32) 0); /* Acknowledge pending irqs */ + out_be32(regs + XINTC_MER, 0x3UL); /* Turn on the Master Enable. */ + + /* Allocate and initialize an irq_host structure. */ + irq = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, XILINX_INTC_MAXIRQS, + &xilinx_intc_ops, -1); + if (!irq) + panic(__FILE__ ": Cannot allocate IRQ host\n"); + irq->host_data = regs; + + return irq; +} + +int xilinx_intc_get_irq(void) +{ + void * regs = master_irqhost->host_data; + pr_debug("get_irq:\n"); + return irq_linear_revmap(master_irqhost, in_be32(regs + XINTC_IVR)); +} + +#if defined(CONFIG_PPC_I8259) +/* + * Support code for cascading to 8259 interrupt controllers + */ +static void xilinx_i8259_cascade(unsigned int irq, struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + unsigned int cascade_irq = i8259_irq(); + + if (cascade_irq) + generic_handle_irq(cascade_irq); + + /* Let xilinx_intc end the interrupt */ + chip->irq_unmask(&desc->irq_data); +} + +static void __init xilinx_i8259_setup_cascade(void) +{ + struct device_node *cascade_node; + int cascade_irq; + + /* Initialize i8259 controller */ + cascade_node = of_find_compatible_node(NULL, NULL, "chrp,iic"); + if (!cascade_node) + return; + + cascade_irq = irq_of_parse_and_map(cascade_node, 0); + if (!cascade_irq) { + pr_err("virtex_ml510: Failed to map cascade interrupt\n"); + goto out; + } + + i8259_init(cascade_node, 0); + irq_set_chained_handler(cascade_irq, xilinx_i8259_cascade); + + /* Program irq 7 (usb/audio), 14/15 (ide) to level sensitive */ + /* This looks like a dirty hack to me --gcl */ + outb(0xc0, 0x4d0); + outb(0xc0, 0x4d1); + + out: + of_node_put(cascade_node); +} +#else +static inline void xilinx_i8259_setup_cascade(void) { return; } +#endif /* defined(CONFIG_PPC_I8259) */ + +static struct of_device_id xilinx_intc_match[] __initconst = { + { .compatible = "xlnx,opb-intc-1.00.c", }, + { .compatible = "xlnx,xps-intc-1.00.a", }, + {} +}; + +/* + * Initialize master Xilinx interrupt controller + */ +void __init xilinx_intc_init_tree(void) +{ + struct device_node *np; + + /* find top level interrupt controller */ + for_each_matching_node(np, xilinx_intc_match) { + if (!of_get_property(np, "interrupts", NULL)) + break; + } + BUG_ON(!np); + + master_irqhost = xilinx_intc_init(np); + BUG_ON(!master_irqhost); + + irq_set_default_host(master_irqhost); + of_node_put(np); + + xilinx_i8259_setup_cascade(); +} diff --git a/arch/powerpc/sysdev/xilinx_pci.c b/arch/powerpc/sysdev/xilinx_pci.c new file mode 100644 index 00000000..1453b0ee --- /dev/null +++ b/arch/powerpc/sysdev/xilinx_pci.c @@ -0,0 +1,132 @@ +/* + * PCI support for Xilinx plbv46_pci soft-core which can be used on + * Xilinx Virtex ML410 / ML510 boards. + * + * Copyright 2009 Roderick Colenbrander + * Copyright 2009 Secret Lab Technologies Ltd. + * + * The pci bridge fixup code was copied from ppc4xx_pci.c and was written + * by Benjamin Herrenschmidt. + * Copyright 2007 Ben. Herrenschmidt <benh@kernel.crashing.org>, IBM Corp. + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include <linux/ioport.h> +#include <linux/of.h> +#include <linux/pci.h> +#include <mm/mmu_decl.h> +#include <asm/io.h> +#include <asm/xilinx_pci.h> + +#define XPLB_PCI_ADDR 0x10c +#define XPLB_PCI_DATA 0x110 +#define XPLB_PCI_BUS 0x114 + +#define PCI_HOST_ENABLE_CMD PCI_COMMAND_SERR | PCI_COMMAND_PARITY | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY + +static struct of_device_id xilinx_pci_match[] = { + { .compatible = "xlnx,plbv46-pci-1.03.a", }, + {} +}; + +/** + * xilinx_pci_fixup_bridge - Block Xilinx PHB configuration. + */ +static void xilinx_pci_fixup_bridge(struct pci_dev *dev) +{ + struct pci_controller *hose; + int i; + + if (dev->devfn || dev->bus->self) + return; + + hose = pci_bus_to_host(dev->bus); + if (!hose) + return; + + if (!of_match_node(xilinx_pci_match, hose->dn)) + return; + + /* Hide the PCI host BARs from the kernel as their content doesn't + * fit well in the resource management + */ + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { + dev->resource[i].start = 0; + dev->resource[i].end = 0; + dev->resource[i].flags = 0; + } + + dev_info(&dev->dev, "Hiding Xilinx plb-pci host bridge resources %s\n", + pci_name(dev)); +} +DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, xilinx_pci_fixup_bridge); + +/** + * xilinx_pci_exclude_device - Don't do config access for non-root bus + * + * This is a hack. Config access to any bus other than bus 0 does not + * currently work on the ML510 so we prevent it here. + */ +static int +xilinx_pci_exclude_device(struct pci_controller *hose, u_char bus, u8 devfn) +{ + return (bus != 0); +} + +/** + * xilinx_pci_init - Find and register a Xilinx PCI host bridge + */ +void __init xilinx_pci_init(void) +{ + struct pci_controller *hose; + struct resource r; + void __iomem *pci_reg; + struct device_node *pci_node; + + pci_node = of_find_matching_node(NULL, xilinx_pci_match); + if(!pci_node) + return; + + if (of_address_to_resource(pci_node, 0, &r)) { + pr_err("xilinx-pci: cannot resolve base address\n"); + return; + } + + hose = pcibios_alloc_controller(pci_node); + if (!hose) { + pr_err("xilinx-pci: pcibios_alloc_controller() failed\n"); + return; + } + + /* Setup config space */ + setup_indirect_pci(hose, r.start + XPLB_PCI_ADDR, + r.start + XPLB_PCI_DATA, + PPC_INDIRECT_TYPE_SET_CFG_TYPE); + + /* According to the xilinx plbv46_pci documentation the soft-core starts + * a self-init when the bus master enable bit is set. Without this bit + * set the pci bus can't be scanned. + */ + early_write_config_word(hose, 0, 0, PCI_COMMAND, PCI_HOST_ENABLE_CMD); + + /* Set the max latency timer to 255 */ + early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0xff); + + /* Set the max bus number to 255 */ + pci_reg = of_iomap(pci_node, 0); + out_8(pci_reg + XPLB_PCI_BUS, 0xff); + iounmap(pci_reg); + + /* Nothing past the root bridge is working right now. By default + * exclude config access to anything except bus 0 */ + if (!ppc_md.pci_exclude_device) + ppc_md.pci_exclude_device = xilinx_pci_exclude_device; + + /* Register the host bridge with the linux kernel! */ + pci_process_bridge_OF_ranges(hose, pci_node, 1); + + pr_info("xilinx-pci: Registered PCI host bridge\n"); +} |