From 69b87e767b7dc9ffe8c3081e9921aa0891d068ee Mon Sep 17 00:00:00 2001 From: Imre Kaloz Date: Mon, 22 Nov 2010 13:29:58 +0000 Subject: [coldfire]: 2.6.31 support (WiP) git-svn-id: svn://svn.openwrt.org/openwrt/trunk@24087 3c298f89-4303-0410-b956-a3cf2f4a3e73 --- .../files-2.6.31/arch/m68k/include/asm/MCD_dma.h | 432 +++++++++++++ .../arch/m68k/include/asm/asm-offsets.h | 73 +++ .../arch/m68k/include/asm/cf_548x_cacheflush.h | 262 ++++++++ .../files-2.6.31/arch/m68k/include/asm/cf_bitops.h | 443 +++++++++++++ .../arch/m68k/include/asm/cf_cacheflush.h | 10 + .../files-2.6.31/arch/m68k/include/asm/cf_entry.h | 146 +++++ .../files-2.6.31/arch/m68k/include/asm/cf_io.h | 191 ++++++ .../files-2.6.31/arch/m68k/include/asm/cf_page.h | 186 ++++++ .../arch/m68k/include/asm/cf_pgalloc.h | 107 ++++ .../arch/m68k/include/asm/cf_pgtable.h | 360 +++++++++++ .../files-2.6.31/arch/m68k/include/asm/cf_raw_io.h | 181 ++++++ .../arch/m68k/include/asm/cf_tlbflush.h | 62 ++ .../arch/m68k/include/asm/cf_uaccess.h | 379 +++++++++++ .../arch/m68k/include/asm/cf_virtconvert.h | 58 ++ .../files-2.6.31/arch/m68k/include/asm/cfcache.h | 114 ++++ .../files-2.6.31/arch/m68k/include/asm/cfmmu.h | 105 ++++ .../files-2.6.31/arch/m68k/include/asm/m5485dma.h | 98 +++ .../files-2.6.31/arch/m68k/include/asm/m5485dspi.h | 146 +++++ .../files-2.6.31/arch/m68k/include/asm/m5485gpio.h | 695 +++++++++++++++++++++ .../files-2.6.31/arch/m68k/include/asm/m5485gpt.h | 90 +++ .../files-2.6.31/arch/m68k/include/asm/m5485pci.h | 380 +++++++++++ .../files-2.6.31/arch/m68k/include/asm/m5485psc.h | 475 ++++++++++++++ .../files-2.6.31/arch/m68k/include/asm/m5485sec.h | 177 ++++++ .../files-2.6.31/arch/m68k/include/asm/m5485sim.h | 345 ++++++++++ .../files-2.6.31/arch/m68k/include/asm/m5485sram.h | 12 + .../files-2.6.31/arch/m68k/include/asm/mcffec.h | 204 ++++++ 26 files changed, 5731 insertions(+) create mode 100644 target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/MCD_dma.h create mode 100644 target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/asm-offsets.h create mode 100644 target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cf_548x_cacheflush.h create mode 100644 target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cf_bitops.h create mode 100644 target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cf_cacheflush.h create mode 100644 target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cf_entry.h create mode 100644 target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cf_io.h create mode 100644 target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cf_page.h create mode 100644 target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cf_pgalloc.h create mode 100644 target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cf_pgtable.h create mode 100644 target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cf_raw_io.h create mode 100644 target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cf_tlbflush.h create mode 100644 target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cf_uaccess.h create mode 100644 target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cf_virtconvert.h create mode 100644 target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cfcache.h create mode 100644 target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cfmmu.h create mode 100644 target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/m5485dma.h create mode 100644 target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/m5485dspi.h create mode 100644 target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/m5485gpio.h create mode 100644 target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/m5485gpt.h create mode 100644 target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/m5485pci.h create mode 100644 target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/m5485psc.h create mode 100644 target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/m5485sec.h create mode 100644 target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/m5485sim.h create mode 100644 target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/m5485sram.h create mode 100644 target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/mcffec.h (limited to 'target/linux/coldfire/files-2.6.31/arch/m68k/include') diff --git a/target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/MCD_dma.h b/target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/MCD_dma.h new file mode 100644 index 0000000000..700adf2754 --- /dev/null +++ b/target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/MCD_dma.h @@ -0,0 +1,432 @@ +/********************************************************************* + * + * Copyright (C) 2004 Motorola, Inc. + * MOTOROLA, INC. All Rights Reserved. + * Copyright 2009 Freescale Semiconductor, Inc. + * Shrek Wu b16972@freescale.com + * + * You are hereby granted a copyright license to use + * the SOFTWARE so long as this entire notice is + * retained without alteration in any modified and/or redistributed + * versions, and that such modified versions are clearly identified + * as such. No licenses are granted by implication, estoppel or + * otherwise under any patents or trademarks of Motorola, Inc. This + * software is provided on an "AS IS" basis and without warranty. + * + * To the maximum extent permitted by applicable law, MOTOROLA + * DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED, INCLUDING + * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR + * PURPOSE AND ANY WARRANTY AGAINST INFRINGEMENT WITH REGARD TO THE + * SOFTWARE (INCLUDING ANY MODIFIED VERSIONS THEREOF) AND ANY + * ACCOMPANYING WRITTEN MATERIALS. + * + * To the maximum extent permitted by applicable law, IN NO EVENT + * SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER (INCLUDING + * WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS, BUSINESS + * INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY + * LOSS) ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE. + * + * Motorola assumes no responsibility for the maintenance and support + * of this software + ********************************************************************/ + +/* + * File: MCD_dma.h + * Purpose: Main header file for multi-channel DMA API. + * + * Notes: + * + * Modifications: + */ +#ifndef _MCD_API_H +#define _MCD_API_H + +#include + +/* + * Turn Execution Unit tasks ON (#define) or OFF (#undef) + */ +#undef MCD_INCLUDE_EU + +/* + * Number of DMA channels + */ +#define NCHANNELS 16 + +/* + * Total number of variants + */ +#ifdef MCD_INCLUDE_EU +#define NUMOFVARIANTS 6 +#else +#define NUMOFVARIANTS 4 +#endif + +/* + * Define sizes of the various tables + */ +#define TASK_TABLE_SIZE (NCHANNELS*32) +#define VAR_TAB_SIZE (128) +#define CONTEXT_SAVE_SIZE (128) +#define FUNCDESC_TAB_SIZE (256) + +#ifdef MCD_INCLUDE_EU +#define FUNCDESC_TAB_NUM 16 +#else +#define FUNCDESC_TAB_NUM 1 +#endif + + +#ifndef DEFINESONLY + +/* + * Portability typedefs + */ + /* +#ifndef s32 +typedef int s32; +#endif +#ifndef u32 +typedef unsigned int u32; +#endif +#ifndef s16 +typedef short s16; +#endif +#ifndef u16 +typedef unsigned short u16; +#endif +#ifndef s8 +typedef char s8; +#endif +#ifndef u8 +typedef unsigned char u8; +#endif +*/ +/* + * These structures represent the internal registers of the + * multi-channel DMA + */ +struct dmaRegs_s { + u32 taskbar; /* task table base address register */ + u32 currPtr; + u32 endPtr; + u32 varTablePtr; + u16 dma_rsvd0; + u16 ptdControl; /* ptd control */ + u32 intPending; /* interrupt pending register */ + u32 intMask; /* interrupt mask register */ + u16 taskControl[16]; /* task control registers */ + u8 priority[32]; /* priority registers */ + u32 initiatorMux; /* initiator mux control */ + u32 taskSize0; /* task size control register 0. */ + u32 taskSize1; /* task size control register 1. */ + u32 dma_rsvd1; /* reserved */ + u32 dma_rsvd2; /* reserved */ + u32 debugComp1; /* debug comparator 1 */ + u32 debugComp2; /* debug comparator 2 */ + u32 debugControl; /* debug control */ + u32 debugStatus; /* debug status */ + u32 ptdDebug; /* priority task decode debug */ + u32 dma_rsvd3[31]; /* reserved */ +}; +typedef volatile struct dmaRegs_s dmaRegs; + +#endif + +/* + * PTD contrl reg bits + */ +#define PTD_CTL_TSK_PRI 0x8000 +#define PTD_CTL_COMM_PREFETCH 0x0001 + +/* + * Task Control reg bits and field masks + */ +#define TASK_CTL_EN 0x8000 +#define TASK_CTL_VALID 0x4000 +#define TASK_CTL_ALWAYS 0x2000 +#define TASK_CTL_INIT_MASK 0x1f00 +#define TASK_CTL_ASTRT 0x0080 +#define TASK_CTL_HIPRITSKEN 0x0040 +#define TASK_CTL_HLDINITNUM 0x0020 +#define TASK_CTL_ASTSKNUM_MASK 0x000f + +/* + * Priority reg bits and field masks + */ +#define PRIORITY_HLD 0x80 +#define PRIORITY_PRI_MASK 0x07 + +/* + * Debug Control reg bits and field masks + */ +#define DBG_CTL_BLOCK_TASKS_MASK 0xffff0000 +#define DBG_CTL_AUTO_ARM 0x00008000 +#define DBG_CTL_BREAK 0x00004000 +#define DBG_CTL_COMP1_TYP_MASK 0x00003800 +#define DBG_CTL_COMP2_TYP_MASK 0x00000070 +#define DBG_CTL_EXT_BREAK 0x00000004 +#define DBG_CTL_INT_BREAK 0x00000002 + +/* + * PTD Debug reg selector addresses + * This reg must be written with a value to show the contents of + * one of the desired internal register. + */ +#define PTD_DBG_REQ 0x00 /* shows the state of 31 initiators */ +#define PTD_DBG_TSK_VLD_INIT 0x01 /* shows which 16 tasks are valid and + have initiators asserted */ + + +/* + * General return values + */ +#define MCD_OK 0 +#define MCD_ERROR -1 +#define MCD_TABLE_UNALIGNED -2 +#define MCD_CHANNEL_INVALID -3 + +/* + * MCD_initDma input flags + */ +#define MCD_RELOC_TASKS 0x00000001 +#define MCD_NO_RELOC_TASKS 0x00000000 +#define MCD_COMM_PREFETCH_EN 0x00000002 +/* Commbus Prefetching - MCF547x/548x ONLY */ + +/* + * MCD_dmaStatus Status Values for each channel + */ +#define MCD_NO_DMA 1 /* No DMA has been requested since reset */ +#define MCD_IDLE 2 /* DMA active, but the initiator is currently inactive */ +#define MCD_RUNNING 3 /* DMA active, and the initiator is currently active */ +#define MCD_PAUSED 4 /* DMA active but it is currently paused */ +#define MCD_HALTED 5 +/* the most recent DMA has been killed with MCD_killTask() */ +#define MCD_DONE 6 /* the most recent DMA has completed. */ + + +/* + * MCD_startDma parameter defines + */ + +/* + * Constants for the funcDesc parameter + */ +/* Byte swapping: */ +#define MCD_NO_BYTE_SWAP 0x00045670 /* to disable byte swapping. */ +#define MCD_BYTE_REVERSE 0x00076540 +/* to reverse the bytes of each u32 of the DMAed data. */ +#define MCD_U16_REVERSE 0x00067450 /* to reverse the 16-bit halves of + each 32-bit data value being DMAed.*/ +#define MCD_U16_BYTE_REVERSE 0x00054760 /* to reverse the byte halves of each + 16-bit half of each 32-bit data value DMAed */ +#define MCD_NO_BIT_REV 0x00000000 +/* do not reverse the bits of each byte DMAed. */ +#define MCD_BIT_REV 0x00088880 /* reverse the bits of each byte DMAed */ +/* CRCing: */ +#define MCD_CRC16 0xc0100000 /* to perform CRC-16 on DMAed data. */ +#define MCD_CRCCCITT 0xc0200000 /* to perform CRC-CCITT on DMAed data. */ +#define MCD_CRC32 0xc0300000 /* to perform CRC-32 on DMAed data. */ +#define MCD_CSUMINET 0xc0400000 +/* to perform internet checksums on DMAed data.*/ +#define MCD_NO_CSUM 0xa0000000 /* to perform no checksumming. */ + +#define MCD_FUNC_NOEU1 (MCD_NO_BYTE_SWAP | MCD_NO_BIT_REV | MCD_NO_CSUM) +#define MCD_FUNC_NOEU2 (MCD_NO_BYTE_SWAP | MCD_NO_CSUM) + +/* + * Constants for the flags parameter + */ +#define MCD_TT_FLAGS_RL 0x00000001 /* Read line */ +#define MCD_TT_FLAGS_CW 0x00000002 /* Combine Writes */ +#define MCD_TT_FLAGS_SP 0x00000004 +/* Speculative prefetch(XLB) MCF547x/548x ONLY */ +#define MCD_TT_FLAGS_MASK 0x000000ff +#define MCD_TT_FLAGS_DEF (MCD_TT_FLAGS_RL | MCD_TT_FLAGS_CW) + +#define MCD_SINGLE_DMA 0x00000100 /* Unchained DMA */ +#define MCD_CHAIN_DMA /* TBD */ +#define MCD_EU_DMA /* TBD */ +#define MCD_FECTX_DMA 0x00001000 /* FEC TX ring DMA */ +#define MCD_FECRX_DMA 0x00002000 /* FEC RX ring DMA */ + + +/* these flags are valid for MCD_startDma and the chained buffer descriptors */ +#define MCD_BUF_READY 0x80000000 +/* indicates that this buffer is now under the DMA's control */ +#define MCD_WRAP 0x20000000 +/* to tell the FEC Dmas to wrap to the first BD */ +#define MCD_INTERRUPT 0x10000000 +/* to generate an interrupt after completion of the DMA. */ +#define MCD_END_FRAME 0x08000000 +/* tell the DMA to end the frame when transferring + last byte of data in buffer */ +#define MCD_CRC_RESTART 0x40000000 /* to empty out the accumulated checksum + prior to performing the DMA. */ + +/* Defines for the FEC buffer descriptor control/status word*/ +#define MCD_FEC_BUF_READY 0x8000 +#define MCD_FEC_WRAP 0x2000 +#define MCD_FEC_INTERRUPT 0x1000 +#define MCD_FEC_END_FRAME 0x0800 + + +/* + * Defines for general intuitiveness + */ + +#define MCD_TRUE 1 +#define MCD_FALSE 0 + +/* + * Three different cases for destination and source. + */ +#define MINUS1 -1 +#define ZERO 0 +#define PLUS1 1 + +#ifndef DEFINESONLY + +/* Task Table Entry struct*/ +typedef struct { + u32 TDTstart; /* task descriptor table start */ + u32 TDTend; /* task descriptor table end */ + u32 varTab; /* variable table start */ + u32 FDTandFlags; /* function descriptor table start and flags */ + volatile u32 descAddrAndStatus; + volatile u32 modifiedVarTab; + u32 contextSaveSpace; /* context save space start */ + u32 literalBases; +} TaskTableEntry; + + +/* Chained buffer descriptor */ +typedef volatile struct MCD_bufDesc_struct MCD_bufDesc; +struct MCD_bufDesc_struct { + u32 flags; /* flags describing the DMA */ + u32 csumResult; + /* checksum from checksumming performed since last checksum reset */ + s8 *srcAddr; /* the address to move data from */ + s8 *destAddr; /* the address to move data to */ + s8 *lastDestAddr; /* the last address written to */ + u32 dmaSize; + /* the number of bytes to transfer independent of the transfer size */ + MCD_bufDesc *next; /* next buffer descriptor in chain */ + u32 info; + /* private information about this descriptor; DMA does not affect it */ +}; + +/* Progress Query struct */ +typedef volatile struct MCD_XferProg_struct { + s8 *lastSrcAddr; + /* the most-recent or last, post-increment source address */ + s8 *lastDestAddr; + /* the most-recent or last, post-increment destination address */ + u32 dmaSize; + /* the amount of data transferred for the current buffer */ + MCD_bufDesc *currBufDesc; + /* pointer to the current buffer descriptor being DMAed */ +} MCD_XferProg; + + +/* FEC buffer descriptor */ +typedef volatile struct MCD_bufDescFec_struct { + u16 statCtrl; + u16 length; + u32 dataPointer; +} MCD_bufDescFec; + + +/*************************************************************************/ +/* + * API function Prototypes - see MCD_dmaApi.c for further notes + */ + +/* + * MCD_startDma starts a particular kind of DMA . + */ +int MCD_startDma( + int channel, /* the channel on which to run the DMA */ + s8 *srcAddr, + /* the address to move data from, or buffer-descriptor address */ + s16 srcIncr, /* the amount to increment the source address per transfer */ + s8 *destAddr, /* the address to move data to */ + s16 destIncr, + /* the amount to increment the destination address per transfer */ + u32 dmaSize, + /* the number of bytes to transfer independent of the transfer size */ + u32 xferSize, /* the number bytes in of each data movement (1, 2, or 4) */ + u32 initiator, /* what device initiates the DMA */ + int priority, /* priority of the DMA */ + u32 flags, /* flags describing the DMA */ + u32 funcDesc +/* a description of byte swapping, bit swapping, and CRC actions */ +); + +/* + * MCD_initDma() initializes the DMA API by setting up a pointer to the DMA + * registers, relocating and creating the appropriate task structures, and + * setting up some global settings + */ +int MCD_initDma(dmaRegs *sDmaBarAddr, void *taskTableDest, u32 flags); + +/* + * MCD_dmaStatus() returns the status of the DMA on the requested channel. + */ +int MCD_dmaStatus(int channel); + +/* + * MCD_XferProgrQuery() returns progress of DMA on requested channel + */ +int MCD_XferProgrQuery(int channel, MCD_XferProg *progRep); + +/* + * MCD_killDma() halts the DMA on the requested channel, without any + * intention of resuming the DMA. + */ +int MCD_killDma(int channel); + +/* + * MCD_continDma() continues a DMA which as stopped due to encountering an + * unready buffer descriptor. + */ +int MCD_continDma(int channel); + +/* + * MCD_pauseDma() pauses the DMA on the given channel ( if any DMA is + * running on that channel). + */ +int MCD_pauseDma(int channel); + +/* + * MCD_resumeDma() resumes the DMA on a given channel (if any DMA is + * running on that channel). + */ +int MCD_resumeDma(int channel); + +/* + * MCD_csumQuery provides the checksum/CRC after performing a non-chained DMA + */ +int MCD_csumQuery(int channel, u32 *csum); + +/* + * MCD_getCodeSize provides the packed size required by the microcoded task + * and structures. + */ +int MCD_getCodeSize(void); + +/* + * MCD_getVersion provides a pointer to a version string and returns a + * version number. + */ +int MCD_getVersion(char **longVersion); + +/* macro for setting a location in the variable table */ +#define MCD_SET_VAR(taskTab, idx, value) ((u32 *)(taskTab)->varTab)[idx] = value + /* Note that MCD_SET_VAR() is invoked many times in firing up a DMA function, + so I'm avoiding surrounding it with "do {} while(0)" */ + +#endif /* DEFINESONLY */ + +#endif /* _MCD_API_H */ diff --git a/target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/asm-offsets.h b/target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/asm-offsets.h new file mode 100644 index 0000000000..dffe187b7c --- /dev/null +++ b/target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/asm-offsets.h @@ -0,0 +1,73 @@ +#ifndef __ASM_OFFSETS_H__ +#define __ASM_OFFSETS_H__ +/* + * DO NOT MODIFY. + * + * This file was generated by Kbuild + * + */ + +#define TASK_STATE 0 /* offsetof(struct task_struct, state) | */ +#define TASK_FLAGS 12 /* offsetof(struct task_struct, flags) | */ +#define TASK_PTRACE 16 /* offsetof(struct task_struct, ptrace) | */ +#define TASK_THREAD 476 /* offsetof(struct task_struct, thread) | */ +#define TASK_INFO 638 /* offsetof(struct task_struct, thread.info) | */ +#define TASK_MM 178 /* offsetof(struct task_struct, mm) | */ +#define TASK_ACTIVE_MM 182 /* offsetof(struct task_struct, active_mm) | */ +#define THREAD_KSP 0 /* offsetof(struct thread_struct, ksp) | */ +#define THREAD_USP 4 /* offsetof(struct thread_struct, usp) | */ +#define THREAD_SR 8 /* offsetof(struct thread_struct, sr) | */ +#define THREAD_FS 10 /* offsetof(struct thread_struct, fs) | */ +#define THREAD_CRP 14 /* offsetof(struct thread_struct, crp) | */ +#define THREAD_ESP0 22 /* offsetof(struct thread_struct, esp0) | */ +#define THREAD_FPREG 38 /* offsetof(struct thread_struct, fp) | */ +#define THREAD_FPCNTL 134 /* offsetof(struct thread_struct, fpcntl) | */ +#define THREAD_FPSTATE 146 /* offsetof(struct thread_struct, fpstate) | */ +#define TINFO_PREEMPT 12 /* offsetof(struct thread_info, preempt_count) | */ +#define TINFO_FLAGS 4 /* offsetof(struct thread_info, flags) | */ +#define PT_D0 32 /* offsetof(struct pt_regs, d0) | */ +#define PT_ORIG_D0 36 /* offsetof(struct pt_regs, orig_d0) | */ +#define PT_D1 0 /* offsetof(struct pt_regs, d1) | */ +#define PT_D2 4 /* offsetof(struct pt_regs, d2) | */ +#define PT_D3 8 /* offsetof(struct pt_regs, d3) | */ +#define PT_D4 12 /* offsetof(struct pt_regs, d4) | */ +#define PT_D5 16 /* offsetof(struct pt_regs, d5) | */ +#define PT_A0 20 /* offsetof(struct pt_regs, a0) | */ +#define PT_A1 24 /* offsetof(struct pt_regs, a1) | */ +#define PT_A2 28 /* offsetof(struct pt_regs, a2) | */ +#define PT_PC 56 /* offsetof(struct pt_regs, pc) | */ +#define PT_SR 54 /* offsetof(struct pt_regs, sr) | */ +#define MM_CONTEXT 328 /* offsetof(struct mm_struct, context) | */ +#define PT_VECTOR 52 /* offsetof(struct pt_regs, pc) - 4 | */ +#define IRQ_HANDLER 0 /* offsetof(struct irq_node, handler) | */ +#define IRQ_DEVID 4 /* offsetof(struct irq_node, dev_id) | */ +#define IRQ_NEXT 8 /* offsetof(struct irq_node, next) | */ +#define STAT_IRQ 72 /* offsetof(struct kernel_stat, irqs) | */ +#define CPUSTAT_SOFTIRQ_PENDING 0 /* offsetof(irq_cpustat_t, __softirq_pending) | */ +#define BIR_TAG 0 /* offsetof(struct bi_record, tag) | */ +#define BIR_SIZE 2 /* offsetof(struct bi_record, size) | */ +#define BIR_DATA 4 /* offsetof(struct bi_record, data) | */ +#define FONT_DESC_IDX 0 /* offsetof(struct font_desc, idx) | */ +#define FONT_DESC_NAME 4 /* offsetof(struct font_desc, name) | */ +#define FONT_DESC_WIDTH 8 /* offsetof(struct font_desc, width) | */ +#define FONT_DESC_HEIGHT 12 /* offsetof(struct font_desc, height) | */ +#define FONT_DESC_DATA 16 /* offsetof(struct font_desc, data) | */ +#define FONT_DESC_PREF 20 /* offsetof(struct font_desc, pref) | */ +#define SIGSEGV 11 /* SIGSEGV | */ +#define SEGV_MAPERR 196609 /* SEGV_MAPERR | */ +#define SIGTRAP 5 /* SIGTRAP | */ +#define TRAP_TRACE 196610 /* TRAP_TRACE | */ +#define CUSTOMBASE -2132807680 /* &amiga_custom | */ +#define C_INTENAR 28 /* offsetof(struct CUSTOM, intenar) | */ +#define C_INTREQR 30 /* offsetof(struct CUSTOM, intreqr) | */ +#define C_INTENA 154 /* offsetof(struct CUSTOM, intena) | */ +#define C_INTREQ 156 /* offsetof(struct CUSTOM, intreq) | */ +#define C_SERDATR 24 /* offsetof(struct CUSTOM, serdatr) | */ +#define C_SERDAT 48 /* offsetof(struct CUSTOM, serdat) | */ +#define C_SERPER 50 /* offsetof(struct CUSTOM, serper) | */ +#define CIAABASE -2134908927 /* &ciaa | */ +#define CIABBASE -2134913024 /* &ciab | */ +#define C_PRA 0 /* offsetof(struct CIA, pra) | */ +#define ZTWOBASE -2147483648 /* zTwoBase | */ + +#endif diff --git a/target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cf_548x_cacheflush.h b/target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cf_548x_cacheflush.h new file mode 100644 index 0000000000..bc19dfc15c --- /dev/null +++ b/target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cf_548x_cacheflush.h @@ -0,0 +1,262 @@ +/* + * arch/m68k/include/asm/cf_548x_cacheflush.h - Coldfire 547x/548x Cache + * + * Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved. + * Kurt Mahan kmahan@freescale.com + * Shrek Wu b16972@freescale.com + * + * Based on include/asm-m68k/cacheflush.h + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#ifndef M68K_CF_548x_CACHEFLUSH_H +#define M68K_CF_548x_CACHEFLUSH_H + +#include +/* + * Cache handling functions + */ + +#define flush_icache() \ +({ \ + unsigned long set; \ + unsigned long start_set; \ + unsigned long end_set; \ + \ + start_set = 0; \ + end_set = (unsigned long)LAST_DCACHE_ADDR; \ + \ + for (set = start_set; set <= end_set; set += (0x10 - 3)) {\ + asm volatile("cpushl %%ic,(%0)\n" \ + "\taddq%.l #1,%0\n" \ + "\tcpushl %%ic,(%0)\n" \ + "\taddq%.l #1,%0\n" \ + "\tcpushl %%ic,(%0)\n" \ + "\taddq%.l #1,%0\n" \ + "\tcpushl %%ic,(%0)" : "=a" (set) : "a" (set)); \ + } \ +}) + +#define flush_dcache() \ +({ \ + unsigned long set; \ + unsigned long start_set; \ + unsigned long end_set; \ + \ + start_set = 0; \ + end_set = (unsigned long)LAST_DCACHE_ADDR; \ + \ + for (set = start_set; set <= end_set; set += (0x10 - 3)) { \ + asm volatile("cpushl %%dc,(%0)\n" \ + "\taddq%.l #1,%0\n" \ + "\tcpushl %%dc,(%0)\n" \ + "\taddq%.l #1,%0\n" \ + "\tcpushl %%dc,(%0)\n" \ + "\taddq%.l #1,%0\n" \ + "\tcpushl %%dc,(%0)" : "=a" (set) : "a" (set)); \ + } \ +}) + +#define flush_bcache() \ +({ \ + unsigned long set; \ + unsigned long start_set; \ + unsigned long end_set; \ + \ + start_set = 0; \ + end_set = (unsigned long)LAST_DCACHE_ADDR; \ + \ + for (set = start_set; set <= end_set; set += (0x10 - 3)) { \ + asm volatile("cpushl %%bc,(%0)\n" \ + "\taddq%.l #1,%0\n" \ + "\tcpushl %%bc,(%0)\n" \ + "\taddq%.l #1,%0\n" \ + "\tcpushl %%bc,(%0)\n" \ + "\taddq%.l #1,%0\n" \ + "\tcpushl %%bc,(%0)" : "=a" (set) : "a" (set)); \ + } \ +}) + +/* + * invalidate the cache for the specified memory range. + * It starts at the physical address specified for + * the given number of bytes. + */ +extern void cache_clear(unsigned long paddr, int len); +/* + * push any dirty cache in the specified memory range. + * It starts at the physical address specified for + * the given number of bytes. + */ +extern void cache_push(unsigned long paddr, int len); + +/* + * push and invalidate pages in the specified user virtual + * memory range. + */ +extern void cache_push_v(unsigned long vaddr, int len); + +/* This is needed whenever the virtual mapping of the current + process changes. */ + +/** + * flush_cache_mm - Flush an mm_struct + * @mm: mm_struct to flush + */ +static inline void flush_cache_mm(struct mm_struct *mm) +{ + if (mm == current->mm) + flush_bcache(); +} + +#define flush_cache_dup_mm(mm) flush_cache_mm(mm) + +#define flush_cache_all() flush_bcache() + +/** + * flush_cache_range - Flush a cache range + * @vma: vma struct + * @start: Starting address + * @end: Ending address + * + * flush_cache_range must be a macro to avoid a dependency on + * linux/mm.h which includes this file. + */ +static inline void flush_cache_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ + if (vma->vm_mm == current->mm) + flush_bcache(); +/*cf_cache_flush_range(start, end);*/ +} + +/** + * flush_cache_page - Flush a page of the cache + * @vma: vma struct + * @vmaddr: + * @pfn: page numer + * + * flush_cache_page must be a macro to avoid a dependency on + * linux/mm.h which includes this file. + */ +static inline void flush_cache_page(struct vm_area_struct *vma, + unsigned long vmaddr, unsigned long pfn) +{ + if (vma->vm_mm == current->mm) + flush_bcache(); +/*cf_cache_flush_range(vmaddr, vmaddr+PAGE_SIZE);*/ +} + +/* Push the page at kernel virtual address and clear the icache */ +/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */ +#define flush_page_to_ram(page) __flush_page_to_ram((void *) page_address(page)) +extern inline void __flush_page_to_ram(void *address) +{ + unsigned long set; + unsigned long start_set; + unsigned long end_set; + unsigned long addr = (unsigned long) address; + + addr &= ~(PAGE_SIZE - 1); + /* round down to page start address */ + + start_set = addr & _ICACHE_SET_MASK; + end_set = (addr + PAGE_SIZE-1) & _ICACHE_SET_MASK; + + if (start_set > end_set) { + /* from the begining to the lowest address */ + for (set = 0; set <= end_set; set += (0x10 - 3)) { + asm volatile("cpushl %%bc,(%0)\n" + "\taddq%.l #1,%0\n" + "\tcpushl %%bc,(%0)\n" + "\taddq%.l #1,%0\n" + "\tcpushl %%bc,(%0)\n" + "\taddq%.l #1,%0\n" + "\tcpushl %%bc,(%0)" : "=a" (set) : "a" (set)); + } + /* next loop will finish the cache ie pass the hole */ + end_set = LAST_ICACHE_ADDR; + } + + for (set = start_set; set <= end_set; set += (0x10 - 3)) { + asm volatile("cpushl %%bc,(%0)\n" + "\taddq%.l #1,%0\n" + "\tcpushl %%bc,(%0)\n" + "\taddq%.l #1,%0\n" + "\tcpushl %%bc,(%0)\n" + "\taddq%.l #1,%0\n" + "\tcpushl %%bc,(%0)" : "=a" (set) : "a" (set)); + } +} + +/* Use __flush_page_to_ram() for flush_dcache_page all values are same - MW */ +#define flush_dcache_page(page) \ + __flush_page_to_ram((void *) page_address(page)) +#define flush_icache_page(vma, pg) \ + __flush_page_to_ram((void *) page_address(pg)) +#define flush_icache_user_range(adr, len) \ + do { } while (0) +/* NL */ +#define flush_icache_user_page(vma, page, addr, len) \ + do { } while (0) + +/* Push n pages at kernel virtual address and clear the icache */ +/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */ +extern inline void flush_icache_range(unsigned long address, + unsigned long endaddr) +{ + unsigned long set; + unsigned long start_set; + unsigned long end_set; + + start_set = address & _ICACHE_SET_MASK; + end_set = endaddr & _ICACHE_SET_MASK; + + if (start_set > end_set) { + /* from the begining to the lowest address */ + for (set = 0; set <= end_set; set += (0x10 - 3)) { + asm volatile("cpushl %%ic,(%0)\n" + "\taddq%.l #1,%0\n" + "\tcpushl %%ic,(%0)\n" + "\taddq%.l #1,%0\n" + "\tcpushl %%ic,(%0)\n" + "\taddq%.l #1,%0\n" + "\tcpushl %%ic,(%0)" : "=a" (set) : "a" (set)); + } + /* next loop will finish the cache ie pass the hole */ + end_set = LAST_ICACHE_ADDR; + } + for (set = start_set; set <= end_set; set += (0x10 - 3)) { + asm volatile("cpushl %%ic,(%0)\n" + "\taddq%.l #1,%0\n" + "\tcpushl %%ic,(%0)\n" + "\taddq%.l #1,%0\n" + "\tcpushl %%ic,(%0)\n" + "\taddq%.l #1,%0\n" + "\tcpushl %%ic,(%0)" : "=a" (set) : "a" (set)); + } +} + +static inline void copy_to_user_page(struct vm_area_struct *vma, + struct page *page, unsigned long vaddr, + void *dst, void *src, int len) +{ + memcpy(dst, src, len); + flush_icache_user_page(vma, page, vaddr, len); +} +static inline void copy_from_user_page(struct vm_area_struct *vma, + struct page *page, unsigned long vaddr, + void *dst, void *src, int len) +{ + memcpy(dst, src, len); +} + +#define flush_cache_vmap(start, end) flush_cache_all() +#define flush_cache_vunmap(start, end) flush_cache_all() +#define flush_dcache_mmap_lock(mapping) do { } while (0) +#define flush_dcache_mmap_unlock(mapping) do { } while (0) + +#endif /* M68K_CF_548x_CACHEFLUSH_H */ diff --git a/target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cf_bitops.h b/target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cf_bitops.h new file mode 100644 index 0000000000..6f9e9c0990 --- /dev/null +++ b/target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cf_bitops.h @@ -0,0 +1,443 @@ +/* + * Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive + * for more details. + */ +#ifndef __CF_BITOPS__ +#define __CF_BITOPS__ + +#ifndef _LINUX_BITOPS_H +#error only can be included directly +#endif + +#include + +#define test_and_set_bit(nr,vaddr) \ + (__builtin_constant_p(nr) ? \ + __constant_coldfire_test_and_set_bit(nr, vaddr) : \ + __generic_coldfire_test_and_set_bit(nr, vaddr)) + +static __inline__ int __constant_coldfire_test_and_set_bit(int nr, + volatile void *vaddr) +{ + char retval; + volatile char *p = &((volatile char *)vaddr)[(nr^31) >> 3]; + __asm__ __volatile__ ("bset %2,(%4); sne %0" + : "=d" (retval), "=m" (*p) + : "di" (nr & 7), "m" (*p), "a" (p)); + return retval; +} + +static __inline__ int __generic_coldfire_test_and_set_bit(int nr, + volatile void *vaddr) +{ + char retval; + + __asm__ __volatile__ ("bset %2,%1; sne %0" + : "=d" (retval), "=m" (((volatile char *)vaddr)[(nr^31) >> 3]) + : "d" (nr) + : "memory"); + return retval; +} +#define __test_and_set_bit(nr, vaddr) test_and_set_bit(nr, vaddr) + +#define set_bit(nr,vaddr) \ + (__builtin_constant_p(nr) ? \ + __constant_coldfire_set_bit(nr, vaddr) : \ + __generic_coldfire_set_bit(nr, vaddr)) + +static __inline__ void __constant_coldfire_set_bit(int nr, + volatile void *vaddr) +{ + volatile char *p = &((volatile char *)vaddr)[(nr^31) >> 3]; + __asm__ __volatile__ ("bset %1,(%3)" + : "=m" (*p) : "di" (nr & 7), "m" (*p), "a" (p)); +} + +static __inline__ void __generic_coldfire_set_bit(int nr, + volatile void *vaddr) +{ + __asm__ __volatile__ ("bset %1,%0" + : "=m" (((volatile char *)vaddr)[(nr^31) >> 3]) + : "d" (nr) + : "memory"); +} +#define __set_bit(nr, vaddr) set_bit(nr, vaddr) + +#define test_and_clear_bit(nr, vaddr) \ + (__builtin_constant_p(nr) ? \ + __constant_coldfire_test_and_clear_bit(nr, vaddr) : \ + __generic_coldfire_test_and_clear_bit(nr, vaddr)) + +static __inline__ int __constant_coldfire_test_and_clear_bit(int nr, + volatile void *vaddr) +{ + char retval; + volatile char *p = &((volatile char *)vaddr)[(nr^31) >> 3]; + + __asm__ __volatile__ ("bclr %2,(%4); sne %0" + : "=d" (retval), "=m" (*p) + : "id" (nr & 7), "m" (*p), "a" (p)); + + return retval; +} + +static __inline__ int __generic_coldfire_test_and_clear_bit(int nr, + volatile void *vaddr) +{ + char retval; + + __asm__ __volatile__ ("bclr %2,%1; sne %0" + : "=d" (retval), "=m" (((volatile char *)vaddr)[(nr^31) >> 3]) + : "d" (nr & 7) + : "memory"); + + return retval; +} +#define __test_and_clear_bit(nr, vaddr) test_and_clear_bit(nr, vaddr) + +/* + * clear_bit() doesn't provide any barrier for the compiler. + */ +#define smp_mb__before_clear_bit() barrier() +#define smp_mb__after_clear_bit() barrier() + +#define clear_bit(nr,vaddr) \ + (__builtin_constant_p(nr) ? \ + __constant_coldfire_clear_bit(nr, vaddr) : \ + __generic_coldfire_clear_bit(nr, vaddr)) + +static __inline__ void __constant_coldfire_clear_bit(int nr, + volatile void *vaddr) +{ + volatile char *p = &((volatile char *)vaddr)[(nr^31) >> 3]; + __asm__ __volatile__ ("bclr %1,(%3)" + : "=m" (*p) : "id" (nr & 7), "m" (*p), "a" (p)); +} + +static __inline__ void __generic_coldfire_clear_bit(int nr, + volatile void *vaddr) +{ + __asm__ __volatile__ ("bclr %1,%0" + : "=m" (((volatile char *)vaddr)[(nr^31) >> 3]) + : "d" (nr) + : "memory"); +} +#define __clear_bit(nr, vaddr) clear_bit(nr, vaddr) + +#define test_and_change_bit(nr, vaddr) \ + (__builtin_constant_p(nr) ? \ + __constant_coldfire_test_and_change_bit(nr, vaddr) : \ + __generic_coldfire_test_and_change_bit(nr, vaddr)) + +static __inline__ int __constant_coldfire_test_and_change_bit(int nr, + volatile void *vaddr) +{ + char retval; + volatile char *p = &((volatile char *)vaddr)[(nr^31) >> 3]; + + __asm__ __volatile__ ("bchg %2,(%4); sne %0" + : "=d" (retval), "=m" (*p) + : "id" (nr & 7), "m" (*p), "a" (p)); + + return retval; +} + +static __inline__ int __generic_coldfire_test_and_change_bit(int nr, + volatile void *vaddr) +{ + char retval; + + __asm__ __volatile__ ("bchg %2,%1; sne %0" + : "=d" (retval), "=m" (((volatile char *)vaddr)[(nr^31) >> 3]) + : "id" (nr) + : "memory"); + + return retval; +} +#define __test_and_change_bit(nr, vaddr) test_and_change_bit(nr, vaddr) +#define __change_bit(nr, vaddr) change_bit(nr, vaddr) + +#define change_bit(nr,vaddr) \ + (__builtin_constant_p(nr) ? \ + __constant_coldfire_change_bit(nr, vaddr) : \ + __generic_coldfire_change_bit(nr, vaddr)) + +static __inline__ void __constant_coldfire_change_bit(int nr, + volatile void *vaddr) +{ + volatile char *p = &((volatile char *)vaddr)[(nr^31) >> 3]; + __asm__ __volatile__ ("bchg %1,(%3)" + : "=m" (*p) : "id" (nr & 7), "m" (*p), "a" (p)); +} + +static __inline__ void __generic_coldfire_change_bit(int nr, + volatile void *vaddr) +{ + __asm__ __volatile__ ("bchg %1,%0" + : "=m" (((volatile char *)vaddr)[(nr^31) >> 3]) + : "d" (nr) + : "memory"); +} + +static inline int test_bit(int nr, const unsigned long *vaddr) +{ + return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0; +} + +static __inline__ unsigned long ffz(unsigned long word) +{ + unsigned long result = 0; + + while (word & 1) { + result++; + word >>= 1; + } + return result; +} + +/* find_next_zero_bit() finds the first zero bit in a bit string of length + * 'size' bits, starting the search at bit 'offset'. This is largely based + * on Linus's ALPHA routines. + */ +static __inline__ unsigned long find_next_zero_bit(void *addr, + unsigned long size, unsigned long offset) +{ + unsigned long *p = ((unsigned long *) addr) + (offset >> 5); + unsigned long result = offset & ~31UL; + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset &= 31UL; + if (offset) { + tmp = *(p++); + tmp |= ~0UL >> (32-offset); + if (size < 32) + goto found_first; + if (~tmp) + goto found_middle; + size -= 32; + result += 32; + } + while (size & ~31UL) { + tmp = *(p++); + if (~tmp) + goto found_middle; + result += 32; + size -= 32; + } + if (!size) + return result; + tmp = *p; + +found_first: + tmp |= ~0UL >> size; +found_middle: + return result + ffz(tmp); +} + +#define find_first_zero_bit(addr, size) find_next_zero_bit(((void *)addr), \ + (size), 0) + +/* Ported from included/linux/bitops.h */ +static __inline__ int ffs(int x) +{ + int r = 1; + + if (!x) + return 0; + if (!(x & 0xffff)) { + x >>= 16; + r += 16; + } + if (!(x & 0xff)) { + x >>= 8; + r += 8; + } + if (!(x & 0xf)) { + x >>= 4; + r += 4; + } + if (!(x & 3)) { + x >>= 2; + r += 2; + } + if (!(x & 1)) { + x >>= 1; + r += 1; + } + return r; +} +#define __ffs(x) (ffs(x) - 1) + +/* find_next_bit - find the next set bit in a memory region + * (from asm-ppc/bitops.h) + */ +static __inline__ unsigned long find_next_bit(const unsigned long *addr, + unsigned long size, unsigned long offset) +{ + unsigned int *p = ((unsigned int *) addr) + (offset >> 5); + unsigned int result = offset & ~31UL; + unsigned int tmp; + + if (offset >= size) + return size; + size -= result; + offset &= 31UL; + if (offset) { + tmp = *p++; + tmp &= ~0UL << offset; + if (size < 32) + goto found_first; + if (tmp) + goto found_middle; + size -= 32; + result += 32; + } + while (size >= 32) { + tmp = *p++; + if (tmp != 0) + goto found_middle; + result += 32; + size -= 32; + } + if (!size) + return result; + tmp = *p; + +found_first: + tmp &= ~0UL >> (32 - size); + if (tmp == 0UL) /* Are any bits set? */ + return result + size; /* Nope. */ +found_middle: + return result + __ffs(tmp); +} + +#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) + +#ifdef __KERNEL__ + +/* Ported from include/linux/bitops.h */ +static __inline__ int fls(int x) +{ + int r = 32; + + if (!x) + return 0; + if (!(x & 0xffff0000u)) { + x <<= 16; + r -= 16; + } + if (!(x & 0xff000000u)) { + x <<= 8; + r -= 8; + } + if (!(x & 0xf0000000u)) { + x <<= 4; + r -= 4; + } + if (!(x & 0xc0000000u)) { + x <<= 2; + r -= 2; + } + if (!(x & 0x80000000u)) { + x <<= 1; + r -= 1; + } + return r; +} + +static inline int __fls(int x) +{ + return fls(x) - 1; +} + +#include +#include +#include +#include + +#define minix_find_first_zero_bit(addr, size) find_next_zero_bit((addr), \ + (size), 0) +#define minix_test_and_set_bit(nr, addr) test_and_set_bit((nr), \ + (unsigned long *)(addr)) +#define minix_set_bit(nr, addr) set_bit((nr), \ + (unsigned long *)(addr)) +#define minix_test_and_clear_bit(nr, addr) test_and_clear_bit((nr), \ + (unsigned long *)(addr)) + +static inline int minix_test_bit(int nr, const volatile unsigned long *vaddr) +{ + int *a = (int *)vaddr; + int mask; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + return ((mask & *a) != 0); +} + +#define ext2_set_bit(nr, addr) test_and_set_bit((nr) ^ 24, \ + (unsigned long *)(addr)) +#define ext2_set_bit_atomic(lock, nr, addr) test_and_set_bit((nr) ^ 24, \ + (unsigned long *)(addr)) +#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 24, \ + (unsigned long *)(addr)) +#define ext2_clear_bit_atomic(lock, nr, addr) test_and_clear_bit((nr) ^ 24, \ + (unsigned long *)(addr)) + +static inline int ext2_test_bit(int nr, const void *vaddr) +{ + const unsigned char *p = vaddr; + return (p[nr >> 3] & (1U << (nr & 7))) != 0; +} + +static inline int ext2_find_first_zero_bit(const void *vaddr, unsigned size) +{ + const unsigned long *p = vaddr, *addr = vaddr; + int res; + + if (!size) + return 0; + + size = (size >> 5) + ((size & 31) > 0); + while (*p++ == ~0UL) { + if (--size == 0) + return (p - addr) << 5; + } + + --p; + for (res = 0; res < 32; res++) + if (!ext2_test_bit (res, p)) + break; + return (p - addr) * 32 + res; +} + +static inline int ext2_find_next_zero_bit(const void *vaddr, unsigned size, + unsigned offset) +{ + const unsigned long *addr = vaddr; + const unsigned long *p = addr + (offset >> 5); + int bit = offset & 31UL, res; + + if (offset >= size) + return size; + + if (bit) { + /* Look for zero in first longword */ + for (res = bit; res < 32; res++) + if (!ext2_test_bit (res, p)) + return (p - addr) * 32 + res; + p++; + } + /* No zero yet, search remaining full bytes for a zero */ + res = ext2_find_first_zero_bit(p, size - 32 * (p - addr)); + return (p - addr) * 32 + res; +} + +#endif /* KERNEL */ + +#endif /* __CF_BITOPS__ */ diff --git a/target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cf_cacheflush.h b/target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cf_cacheflush.h new file mode 100644 index 0000000000..49c5c4cca7 --- /dev/null +++ b/target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cf_cacheflush.h @@ -0,0 +1,10 @@ +#ifndef M68K_CF_CACHEFLUSH_H +#define M68K_CF_CACHEFLUSH_H + +#ifdef CONFIG_M5445X +#include "cf_5445x_cacheflush.h" +#else +#include "cf_548x_cacheflush.h" +#endif + +#endif /* M68K_CF_CACHEFLUSH_H */ diff --git a/target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cf_entry.h b/target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cf_entry.h new file mode 100644 index 0000000000..c8f37a4105 --- /dev/null +++ b/target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cf_entry.h @@ -0,0 +1,146 @@ +#ifndef __CF_M68K_ENTRY_H +#define __CF_M68K_ENTRY_H + +#include +#include +#include +#include +#include + +/* + * Stack layout in 'ret_from_exception': + * + * This allows access to the syscall arguments in registers d1-d5 + * + * 0(sp) - d1 + * 4(sp) - d2 + * 8(sp) - d3 + * C(sp) - d4 + * 10(sp) - d5 + * 14(sp) - a0 + * 18(sp) - a1 + * 1C(sp) - a2 + * 20(sp) - d0 + * 24(sp) - orig_d0 + * 28(sp) - stack adjustment + * 2C(sp) - sr + * 2E(sp) - pc + * 32(sp) - format & vector + * 36(sp) - MMUSR + * 3A(sp) - MMUAR + */ + +/* + * 97/05/14 Andreas: Register %a2 is now set to the current task throughout + * the whole kernel. + */ + +/* the following macro is used when enabling interrupts */ +/* portable version */ +#define ALLOWINT (~0x700) +#define MAX_NOINT_IPL 0 + +#ifdef __ASSEMBLY__ + +#define curptr a2 + +LFLUSH_I_AND_D = 0x00000808 +LSIGTRAP = 5 + +/* process bits for task_struct.ptrace */ +PT_TRACESYS_OFF = 3 +PT_TRACESYS_BIT = 1 +PT_PTRACED_OFF = 3 +PT_PTRACED_BIT = 0 +PT_DTRACE_OFF = 3 +PT_DTRACE_BIT = 2 + +#define SAVE_ALL_INT save_all_int +#define SAVE_ALL_SYS save_all_sys +#define RESTORE_ALL restore_all +/* + * This defines the normal kernel pt-regs layout. + * + * regs a3-a6 and d6-d7 are preserved by C code + * the kernel doesn't mess with usp unless it needs to + */ + +/* + * a -1 in the orig_d0 field signifies + * that the stack frame is NOT for syscall + */ +.macro save_all_int + movel MMUSR,%sp@- + movel MMUAR,%sp@- + clrl %sp@- | stk_adj + pea -1:w | orig d0 + movel %d0,%sp@- | d0 + subal #(8*4), %sp + moveml %d1-%d5/%a0-%a1/%curptr,%sp@ +.endm + +.macro save_all_sys + movel MMUSR,%sp@- + movel MMUAR,%sp@- + clrl %sp@- | stk_adj + movel %d0,%sp@- | orig d0 + movel %d0,%sp@- | d0 + subal #(8*4), %sp + moveml %d1-%d5/%a0-%a1/%curptr,%sp@ +.endm + +.macro restore_all + moveml %sp@,%a0-%a1/%curptr/%d1-%d5 + addal #(8*4), %sp + movel %sp@+,%d0 | d0 + addql #4,%sp | orig d0 + addl %sp@+,%sp | stk_adj + addql #8,%sp | MMUAR & MMUSR + rte +.endm + +#define SWITCH_STACK_SIZE (6*4+4) /* includes return address */ + +#define SAVE_SWITCH_STACK save_switch_stack +#define RESTORE_SWITCH_STACK restore_switch_stack +#define GET_CURRENT(tmp) get_current tmp + +.macro save_switch_stack + subal #(6*4), %sp + moveml %a3-%a6/%d6-%d7,%sp@ +.endm + +.macro restore_switch_stack + moveml %sp@,%a3-%a6/%d6-%d7 + addal #(6*4), %sp +.endm + +.macro get_current reg=%d0 + movel %sp,\reg + andl #-THREAD_SIZE,\reg + movel \reg,%curptr + movel %curptr@,%curptr +.endm + +#else /* C source */ + +#define STR(X) STR1(X) +#define STR1(X) #X + +#define PT_OFF_ORIG_D0 0x24 +#define PT_OFF_FORMATVEC 0x32 +#define PT_OFF_SR 0x2C +#define SAVE_ALL_INT \ + "clrl %%sp@-;" /* stk_adj */ \ + "pea -1:w;" /* orig d0 = -1 */ \ + "movel %%d0,%%sp@-;" /* d0 */ \ + "subal #(8*4),%sp" \ + "moveml %%d1-%%d5/%%a0-%%a2,%%sp@" +#define GET_CURRENT(tmp) \ + "movel %%sp,"#tmp"\n\t" \ + "andw #-"STR(THREAD_SIZE)","#tmp"\n\t" \ + "movel "#tmp",%%a2\n\t" + +#endif + +#endif /* __CF_M68K_ENTRY_H */ diff --git a/target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cf_io.h b/target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cf_io.h new file mode 100644 index 0000000000..b2c7a46481 --- /dev/null +++ b/target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cf_io.h @@ -0,0 +1,191 @@ +/* + * linux/include/asm-m68k/cf_io.h + * + * Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved. + * 9/30/08 JKM - Separated Coldfire pieces out from m68k. + */ + +#ifndef __CF_IO__ +#define __CF_IO__ + +#ifdef __KERNEL__ + +#include +#include +#include + +#include + +/* + * These should be valid on any ioremap()ed region + */ +#define readb(addr) in_8(addr) +#define writeb(val,addr) out_8((addr),(val)) +#define readw(addr) in_le16(addr) +#define writew(val,addr) out_le16((addr),(val)) +#define readl(addr) in_le32(addr) +#define writel(val,addr) out_le32((addr),(val)) + +#define readb_relaxed(addr) readb(addr) +#define readw_relaxed(addr) readw(addr) +#define readl_relaxed(addr) readl(addr) + +#ifdef CONFIG_PCI + +/* + * IO space in Coldfire + */ +/*#define HAVE_ARCH_PIO_SIZE 1 +#define PIO_OFFSET 0x00000000UL +#define PIO_RESERVED 0x100000000UL +#define PIO_MASK (PIO_RESERVED - 1) +*/ +#define inb_p inb +#define inw_p inw +#define inl_p inl +#define outb_p outb +#define outw_p outw +#define outl_p outl + +#ifndef CONFIG_COLDFIRE +#define inb(port) in_8(port) +#define outb(val,port) out_8((port),(val)) +#define inw(port) in_le16(port) +#define outw(val,port) out_le16((port),(val)) +#define inl(port) in_le32(port) +#define outl(val,port) out_le32((port),(val)) +#define insb(port, buf, nr) \ + raw_insb((u8 *)(port), (u8 *)(buf), (nr)) +#define outsb(port, buf, nr) \ + raw_outsb((u8 *)(port), (u8 *)(buf), (nr)) +#define insw(port, buf, nr) \ + raw_insw_swapw((u16 *)(port), (u16 *)(buf), (nr)) +#define outsw(port, buf, nr) \ + raw_outsw_swapw((u16 *)(port), (u16 *)(buf), (nr)) +#define insl(port, buf, nr) \ + raw_insw_swapw((u16 *)(port), (u16 *)(buf), (nr)<<1) +#define outsl(port, buf, nr) \ + raw_outsw_swapw((u16 *)(port), (u16 *)(buf), (nr)<<1) +#else +#define inb(port) pci_inb(port) +#define outb(val, port) pci_outb((val), (port)) +#define inw(port) pci_inw(port) +#define outw(val, port) pci_outw((val), (port)) +#define insb(a, b, c) \ + pci_insb((volatile unsigned char *)a, (unsigned char *)b, c) +#define insw(a, b, c) \ + pci_insw((volatile unsigned short *)a, (const unsigned short *)b, c) +#define insl(a, b, c) \ + pci_insl((volatile unsigned long *)a, (const unsigned long *)b, c) +#define outsb(a, b, c) \ + pci_outsb((volatile unsigned char *)a, (const unsigned char *)b, c) +#define outsw(a, b, c) \ + pci_outsw((volatile unsigned short *)a, (const unsigned short *)b, c) +#define outsl(a, b, c) \ + pci_outsl((volatile unsigned long *)a, (const unsigned long *)b, c) +#define inl(port) pci_inl(port) +#define outl(val, port) pci_outl((val), (port)) +#endif + +#else +/* no pci */ + +#define inb(port) in_8(port) +#define outb(val, port) out_8((port), (val)) +#define inw(port) in_le16(port) +#define outw(val, port) out_le16((port), (val)) +#define inl(port) in_le32(port) +#define outl(val, port) out_le32((port), (val)) +#define insb(port, buf, nr) \ + raw_insb((u8 *)(port), (u8 *)(buf), (nr)) +#define outsb(port, buf, nr) \ + raw_outsb((u8 *)(port), (u8 *)(buf), (nr)) +#define insw(port, buf, nr) \ + raw_insw_swapw((u16 *)(port), (u16 *)(buf), (nr)) +#define outsw(port, buf, nr) \ + raw_outsw_swapw((u16 *)(port), (u16 *)(buf), (nr)) +#define insl(port, buf, nr) \ + raw_insw_swapw((u16 *)(port), (u16 *)(buf), (nr)<<1) +#define outsl(port, buf, nr) \ + raw_outsw_swapw((u16 *)(port), (u16 *)(buf), (nr)<<1) + +#endif /* CONFIG_PCI */ + +#define mmiowb() + +static inline void __iomem *ioremap(unsigned long physaddr, unsigned long size) +{ + return __ioremap(physaddr, size, IOMAP_NOCACHE_SER); +} +static inline void __iomem *ioremap_nocache(unsigned long physaddr, unsigned long size) +{ + return __ioremap(physaddr, size, IOMAP_NOCACHE_SER); +} +static inline void __iomem *ioremap_writethrough(unsigned long physaddr, + unsigned long size) +{ + return __ioremap(physaddr, size, IOMAP_WRITETHROUGH); +} +static inline void __iomem *ioremap_fullcache(unsigned long physaddr, + unsigned long size) +{ + return __ioremap(physaddr, size, IOMAP_FULL_CACHING); +} + +static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count) +{ + __builtin_memset((void __force *) addr, val, count); +} +static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count) +{ + __builtin_memcpy(dst, (void __force *) src, count); +} +static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count) +{ + __builtin_memcpy((void __force *) dst, src, count); +} + +#define IO_SPACE_LIMIT 0xffffffff + +#endif /* __KERNEL__ */ + +#define __ARCH_HAS_NO_PAGE_ZERO_MAPPED 1 + +/* + * Convert a physical pointer to a virtual kernel pointer for /dev/mem + * access + */ +#define xlate_dev_mem_ptr(p) __va(p) + +/* + * Convert a virtual cached pointer to an uncached pointer + */ +#define xlate_dev_kmem_ptr(p) p + +#define __raw_readb(addr) \ + ({ unsigned char __v = (*(volatile unsigned char *) (addr)); __v; }) +#define __raw_readw(addr) \ + ({ unsigned short __v = (*(volatile unsigned short *) (addr)); __v; }) +#define __raw_readl(addr) \ + ({ unsigned long __v = (*(volatile unsigned long *) (addr)); __v; }) +#define __raw_writeb(b,addr) (void)((*(volatile unsigned char *) (addr)) = (b)) +#define __raw_writew(b,addr) (void)((*(volatile unsigned short *) (addr)) = (b)) +#define __raw_writel(b,addr) (void)((*(volatile unsigned int *) (addr)) = (b)) + +#define memset_io(a, b, c) memset((void *)(a), (b), (c)) +#define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c)) +#define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c)) + +#if !defined(readb) +#define readb(addr) \ + ({ unsigned char __v = (*(volatile unsigned char *) (addr)); __v; }) +#define readw(addr) \ + ({ unsigned short __v = (*(volatile unsigned short *) (addr)); __v; }) +#define readl(addr) \ + ({ unsigned int __v = (*(volatile unsigned int *) (addr)); __v; }) +#define writeb(b, addr) (void)((*(volatile unsigned char *) (addr)) = (b)) +#define writew(b, addr) (void)((*(volatile unsigned short *) (addr)) = (b)) +#define writel(b, addr) (void)((*(volatile unsigned int *) (addr)) = (b)) +#endif /* readb */ + +#endif /* _IO_H */ diff --git a/target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cf_page.h b/target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cf_page.h new file mode 100644 index 0000000000..1421f49437 --- /dev/null +++ b/target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cf_page.h @@ -0,0 +1,186 @@ +/* + * linux/include/asm-m68k/cf_page.h + * + * Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved. + * Based on linux/include/asm-m68k/page.h + * + * 10/09/08 JKM: split Coldfire pieces into separate file + */ +#ifndef __CF_PAGE__ +#define __CF_PAGE__ + +#include +#include +#include + +/* Virtual base page location */ +#define PAGE_OFFSET (PAGE_OFFSET_RAW) + +/* PAGE_SHIFT determines the page size */ +#define PAGE_SHIFT (13) /* 8K pages */ +#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) +#define PAGE_MASK (~(PAGE_SIZE-1)) + +#define THREAD_SIZE PAGE_SIZE + +#ifndef __ASSEMBLY__ +#include +#include + +#define get_user_page(vaddr) __get_free_page(GFP_KERNEL) +#define free_user_page(page, addr) free_page(addr) + +#define clear_page(page) memset((page), 0, PAGE_SIZE) +#define copy_page(to,from) memcpy((to), (from), PAGE_SIZE) + +#define clear_user_page(addr, vaddr, page) \ + do { clear_page(addr); \ + flush_dcache_page(page); \ + } while (0) + +#define copy_user_page(to, from, vaddr, page) \ + do { copy_page(to, from); \ + flush_dcache_page(page); \ + } while (0) + +/* + * These are used to make use of C type-checking.. + */ +typedef struct { unsigned long pte; } pte_t; +typedef struct { unsigned long pmd[16]; } pmd_t; +typedef struct { unsigned long pgd; } pgd_t; +typedef struct { unsigned long pgprot; } pgprot_t; +typedef struct page *pgtable_t; + +#define pte_val(x) ((x).pte) +#define pmd_val(x) ((&x)->pmd[0]) +#define pgd_val(x) ((x).pgd) +#define pgprot_val(x) ((x).pgprot) + +#define __pte(x) ((pte_t) { (x) } ) +#define __pmd(x) ((pmd_t) { (x) } ) +#define __pgd(x) ((pgd_t) { (x) } ) +#define __pgprot(x) ((pgprot_t) { (x) } ) + +/* to align the pointer to the (next) page boundary */ +/*Defined in linux/mm.h*/ +/*#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)*/ + +extern unsigned long m68k_memoffset; + +#define WANT_PAGE_VIRTUAL + +extern unsigned long cf_dma_base; +extern unsigned long cf_dma_end; + +/* + * Convert a virt to a phys + */ +static inline unsigned long ___pa(void *vaddr) +{ +#if CONFIG_SDRAM_BASE != PAGE_OFFSET + return (((unsigned long)vaddr & 0x0fffffff) + CONFIG_SDRAM_BASE); +#else + if ((unsigned long)vaddr >= CONFIG_DMA_BASE && + (unsigned long)vaddr < (CONFIG_DMA_BASE + CONFIG_DMA_SIZE)) { + /* address is in carved out DMA range */ + return ((unsigned long)vaddr - CONFIG_DMA_BASE) + CONFIG_SDRAM_BASE; + } + else if ((unsigned long)vaddr >= PAGE_OFFSET && + (unsigned long)vaddr < (PAGE_OFFSET + CONFIG_SDRAM_SIZE)) { + /* normal mapping */ + return ((unsigned long)vaddr - PAGE_OFFSET) + CONFIG_SDRAM_BASE; + } + + return (unsigned long)vaddr; +#endif +} +#define __pa(vaddr) ___pa((void *)(vaddr)) + +/* + * Convert a phys to a virt + */ +static inline void *__va(unsigned long paddr) +{ +#if CONFIG_SDRAM_BASE != PAGE_OFFSET + return (void *)((paddr & 0x0fffffff) + PAGE_OFFSET); +#else + if (paddr >= cf_dma_base && paddr <= cf_dma_end) { + /* mapped address for DMA */ + return (void *)((paddr - CONFIG_SDRAM_BASE) + CONFIG_DMA_BASE); + } + else if (paddr >= cf_dma_end && + paddr < (CONFIG_SDRAM_BASE + CONFIG_SDRAM_SIZE)) { + /* normal mapping */ + return (void *)((paddr - CONFIG_SDRAM_BASE) + PAGE_OFFSET); + } + return (void *)paddr; +#endif +} + +/* + * NOTE: virtual isn't really correct, actually it should be the offset into the + * memory node, but we have no highmem, so that works for now. + * + * TODO: implement (fast) pfn<->pgdat_idx conversion functions, this makes lots + * of the shifts unnecessary. + * + * PFNs are used to map physical pages. So PFN[0] maps to the base phys addr. + */ +#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) +#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT) + +extern int m68k_virt_to_node_shift; + +#ifdef CONFIG_SINGLE_MEMORY_CHUNK +#define __virt_to_node(addr) (&pg_data_map[0]) +#else +extern struct pglist_data *pg_data_table[]; + +static inline __attribute_const__ int __virt_to_node_shift(void) +{ + return m68k_virt_to_node_shift; +} + +#define __virt_to_node(addr) (pg_data_table[(unsigned long)(addr) >> __virt_to_node_shift()]) +#endif /* !CONFIG_SINGLE_MEMORY_CHUNK */ + +#define virt_to_page(addr) ({ \ + pfn_to_page(virt_to_pfn(addr)); \ +}) +#define page_to_virt(page) ({ \ + pfn_to_virt(page_to_pfn(page)); \ +}) + +#define pfn_to_page(pfn) ({ \ + unsigned long __pfn = (pfn); \ + struct pglist_data *pgdat; \ + pgdat = __virt_to_node((unsigned long)pfn_to_virt(__pfn)); \ + pgdat->node_mem_map + (__pfn - pgdat->node_start_pfn); \ +}) +#define page_to_pfn(_page) ({ \ + struct page *__p = (_page); \ + struct pglist_data *pgdat; \ + pgdat = &pg_data_map[page_to_nid(__p)]; \ + ((__p) - pgdat->node_mem_map) + pgdat->node_start_pfn; \ +}) + +#define virt_addr_valid(kaddr) ( ((void *)(kaddr) >= (void *)PAGE_OFFSET && \ + (void *)(kaddr) < high_memory) || \ + ((void *)(kaddr) >= (void*)CONFIG_DMA_BASE && \ + (void *)(kaddr) < (void*)(CONFIG_DMA_BASE+CONFIG_DMA_SIZE))) + +#define pfn_valid(pfn) virt_addr_valid(pfn_to_virt(pfn)) + +#endif /* __ASSEMBLY__ */ + +#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + +#include + +#ifdef CONFIG_VDSO +#define __HAVE_ARCH_GATE_AREA +#endif + +#endif /* __CF_PAGE__ */ diff --git a/target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cf_pgalloc.h b/target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cf_pgalloc.h new file mode 100644 index 0000000000..de3916762a --- /dev/null +++ b/target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cf_pgalloc.h @@ -0,0 +1,107 @@ +/* + * Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved. + */ + +#ifndef M68K_CF_PGALLOC_H +#define M68K_CF_PGALLOC_H +#include +#include +#include +#include +#include + +extern inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) +{ + free_page((unsigned long) pte); +} + +extern const char bad_pmd_string[]; + +extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, + unsigned long address) +{ + unsigned long page = __get_free_page(GFP_KERNEL|__GFP_REPEAT); + + if (!page) + return NULL; + + memset((void *)page, 0, PAGE_SIZE); + return (pte_t *) (page); +} + +extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address) +{ + return (pmd_t *) pgd; +} + +#define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); }) +#define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); }) + +#define pte_alloc_one_fast(mm, addr) pte_alloc_one(mm, addr) + +#define pmd_populate(mm, pmd, page) (pmd_val(*pmd) = \ + (unsigned long)(page_address(page))) + +#define pmd_populate_kernel(mm, pmd, pte) (pmd_val(*pmd) = (unsigned long)(pte)) + +#define pmd_pgtable(pmd) pmd_page(pmd) + +static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *page, unsigned long address) +{ + __free_page(page); +} + +#define __pmd_free_tlb(tlb, pmd, addr) do { } while (0) + +static inline struct page *pte_alloc_one(struct mm_struct *mm, + unsigned long address) +{ + struct page *page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); + pte_t *pte; + + if (!page) + return NULL; + + pte = kmap(page); +// if (pte) { + clear_page(pte); + __flush_page_to_ram(pte); + flush_tlb_kernel_page(pte); + nocache_page(pte); +// } + kunmap(page); + + return page; +} + +extern inline void pte_free(struct mm_struct *mm, struct page *page) +{ + __free_page(page); +} + +/* + * In our implementation, each pgd entry contains 1 pmd that is never allocated + * or freed. pgd_present is always 1, so this should never be called. -NL + */ +#define pmd_free(mm, pmd) BUG() + +extern inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) +{ + free_page((unsigned long) pgd); +} + +extern inline pgd_t *pgd_alloc(struct mm_struct *mm) +{ + pgd_t *new_pgd; + + new_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_NOWARN); + if (!new_pgd) + return NULL; + memcpy(new_pgd, swapper_pg_dir, PAGE_SIZE); + memset(new_pgd, 0, PAGE_OFFSET >> PGDIR_SHIFT); + return new_pgd; +} + +#define pgd_populate(mm, pmd, pte) BUG() + +#endif /* M68K_CF_PGALLOC_H */ diff --git a/target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cf_pgtable.h b/target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cf_pgtable.h new file mode 100644 index 0000000000..65668381d6 --- /dev/null +++ b/target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cf_pgtable.h @@ -0,0 +1,360 @@ +/* + * Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved. + */ + +#ifndef _CF_PGTABLE_H +#define _CF_PGTABLE_H + +#include +#include + +#ifndef __ASSEMBLY__ +#include +#include + +/* For virtual address to physical address conversion */ +#define VTOP(addr) __pa(addr) +#define PTOV(addr) __va(addr) + + +#endif /* !__ASSEMBLY__ */ + +/* Page protection values within PTE. */ + +/* MMUDR bits, in proper place. */ +#define CF_PAGE_LOCKED (0x00000002) +#define CF_PAGE_EXEC (0x00000004) +#define CF_PAGE_WRITABLE (0x00000008) +#define CF_PAGE_READABLE (0x00000010) +#define CF_PAGE_SYSTEM (0x00000020) +#define CF_PAGE_COPYBACK (0x00000040) +#define CF_PAGE_NOCACHE (0x00000080) + +#define CF_CACHEMASK (~0x00000040) +#define CF_PAGE_MMUDR_MASK (0x000000fe) + +#define _PAGE_NOCACHE030 (CF_PAGE_NOCACHE) + +/* MMUTR bits, need shifting down. */ +#define CF_PAGE_VALID (0x00000400) +#define CF_PAGE_SHARED (0x00000800) + +#define CF_PAGE_MMUTR_MASK (0x00000c00) +#define CF_PAGE_MMUTR_SHIFT (10) +#define CF_ASID_MMU_SHIFT (2) + +/* Fake bits, not implemented in CF, will get masked out before + hitting hardware, and might go away altogether once this port is + complete. */ +#if PAGE_SHIFT < 13 +#error COLDFIRE Error: Pages must be at least 8k in size +#endif +#define CF_PAGE_ACCESSED (0x00001000) +#define CF_PAGE_FILE (0x00000200) +#define CF_PAGE_DIRTY (0x00000001) + +#define _PAGE_CACHE040 0x020 /* 68040 cache mode, cachable, copyback */ +#define _PAGE_NOCACHE_S 0x040 /* 68040 no-cache mode, serialized */ +#define _PAGE_NOCACHE 0x060 /* 68040 cache mode, non-serialized */ +#define _PAGE_CACHE040W 0x000 /* 68040 cache mode, cachable, write-through */ +#define _DESCTYPE_MASK 0x003 +#define _CACHEMASK040 (~0x060) +#define _PAGE_GLOBAL040 0x400 /* 68040 global bit, used for kva descs */ + + +/* Externally used page protection values. */ +#define _PAGE_PRESENT (CF_PAGE_VALID) +#define _PAGE_ACCESSED (CF_PAGE_ACCESSED) +#define _PAGE_DIRTY (CF_PAGE_DIRTY) +#define _PAGE_READWRITE (CF_PAGE_WRITABLE \ + | CF_PAGE_READABLE \ + | CF_PAGE_SHARED \ + | CF_PAGE_SYSTEM) + +/* Compound page protection values. */ +#define PAGE_NONE __pgprot(CF_PAGE_VALID \ + | CF_PAGE_ACCESSED) + +#define PAGE_SHARED __pgprot(CF_PAGE_VALID \ + | CF_PAGE_READABLE \ + | CF_PAGE_WRITABLE \ + | CF_PAGE_ACCESSED) + +#define PAGE_INIT __pgprot(CF_PAGE_VALID \ + | CF_PAGE_WRITABLE \ + | CF_PAGE_READABLE \ + | CF_PAGE_EXEC \ + | CF_PAGE_SYSTEM \ + | CF_PAGE_SHARED) + +#define PAGE_KERNEL __pgprot(CF_PAGE_VALID \ + | CF_PAGE_WRITABLE \ + | CF_PAGE_READABLE \ + | CF_PAGE_EXEC \ + | CF_PAGE_SYSTEM \ + | CF_PAGE_SHARED \ + | CF_PAGE_ACCESSED) + +#define PAGE_COPY __pgprot(CF_PAGE_VALID \ + | CF_PAGE_ACCESSED \ + | CF_PAGE_READABLE) + +/* + * Page protections for initialising protection_map. See mm/mmap.c + * for use. In general, the bit positions are xwr, and P-items are + * private, the S-items are shared. + */ + +#define __P000 PAGE_NONE +#define __P100 __pgprot(CF_PAGE_VALID \ + | CF_PAGE_ACCESSED \ + | CF_PAGE_EXEC) +#define __P010 __pgprot(CF_PAGE_VALID \ + | CF_PAGE_WRITABLE \ + | CF_PAGE_ACCESSED) +#define __P110 __pgprot(CF_PAGE_VALID \ + | CF_PAGE_ACCESSED \ + | CF_PAGE_WRITABLE \ + | CF_PAGE_EXEC) +#define __P001 __pgprot(CF_PAGE_VALID \ + | CF_PAGE_ACCESSED \ + | CF_PAGE_READABLE) +#define __P101 __pgprot(CF_PAGE_VALID \ + | CF_PAGE_ACCESSED \ + | CF_PAGE_READABLE \ + | CF_PAGE_EXEC) +#define __P011 __pgprot(CF_PAGE_VALID \ + | CF_PAGE_READABLE \ + | CF_PAGE_WRITABLE \ + | CF_PAGE_ACCESSED) +#define __P111 __pgprot(CF_PAGE_VALID \ + | CF_PAGE_ACCESSED \ + | CF_PAGE_WRITABLE \ + | CF_PAGE_READABLE \ + | CF_PAGE_EXEC) + +#define __S000 PAGE_NONE +#define __S100 __pgprot(CF_PAGE_VALID \ + | CF_PAGE_ACCESSED \ + | CF_PAGE_EXEC) +#define __S010 PAGE_SHARED +#define __S110 __pgprot(CF_PAGE_VALID \ + | CF_PAGE_ACCESSED \ + | CF_PAGE_WRITABLE \ + | CF_PAGE_EXEC) +#define __S001 __pgprot(CF_PAGE_VALID \ + | CF_PAGE_ACCESSED \ + | CF_PAGE_READABLE) +#define __S101 __pgprot(CF_PAGE_VALID \ + | CF_PAGE_ACCESSED \ + | CF_PAGE_READABLE \ + | CF_PAGE_EXEC) +#define __S011 PAGE_SHARED +#define __S111 __pgprot(CF_PAGE_VALID \ + | CF_PAGE_ACCESSED \ + | CF_PAGE_READABLE \ + | CF_PAGE_WRITABLE \ + | CF_PAGE_EXEC) + +#define PTE_MASK PAGE_MASK +#define CF_PAGE_CHG_MASK (PTE_MASK | CF_PAGE_ACCESSED | CF_PAGE_DIRTY) + +#ifndef __ASSEMBLY__ + +/* + * Conversion functions: convert a page and protection to a page entry, + * and a page entry and page directory to the page they refer to. + */ +#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) + +extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot) +{ + pte_val(pte) = (pte_val(pte) & CF_PAGE_CHG_MASK) | pgprot_val(newprot); + return pte; +} + +#define pmd_set(pmdp, ptep) do {} while (0) + +static inline void pgd_set(pgd_t *pgdp, pmd_t *pmdp) +{ + pgd_val(*pgdp) = virt_to_phys(pmdp); +} + +#define __pte_page(pte) \ + ((unsigned long) ((pte_val(pte) & CF_PAGE_PGNUM_MASK) + PAGE_OFFSET)) +#define __pmd_page(pmd) ((unsigned long) (pmd_val(pmd))) + +extern inline int pte_none(pte_t pte) +{ + return !pte_val(pte); +} +extern inline int pte_present(pte_t pte) +{ + return pte_val(pte) & CF_PAGE_VALID; +} +extern inline void pte_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) +{ + pte_val(*ptep) = 0; +} + +#define pte_pagenr(pte) ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT) +#define pte_page(pte) virt_to_page(__pte_page(pte)) + +extern inline int pmd_none2(pmd_t *pmd) { return !pmd_val(*pmd); } +#define pmd_none(pmd) pmd_none2(&(pmd)) +extern inline int pmd_bad2(pmd_t *pmd) { return 0; } +#define pmd_bad(pmd) pmd_bad2(&(pmd)) +#define pmd_present(pmd) (!pmd_none2(&(pmd))) +extern inline void pmd_clear(pmd_t *pmdp) { pmd_val(*pmdp) = 0; } + +extern inline int pgd_none(pgd_t pgd) { return 0; } +extern inline int pgd_bad(pgd_t pgd) { return 0; } +extern inline int pgd_present(pgd_t pgd) { return 1; } +extern inline void pgd_clear(pgd_t *pgdp) {} + + +#define pte_ERROR(e) \ + printk(KERN_ERR "%s:%d: bad pte %08lx.\n", \ + __FILE__, __LINE__, pte_val(e)) +#define pmd_ERROR(e) \ + printk(KERN_ERR "%s:%d: bad pmd %08lx.\n", \ + __FILE__, __LINE__, pmd_val(e)) +#define pgd_ERROR(e) \ + printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", \ + __FILE__, __LINE__, pgd_val(e)) + + +/* + * The following only work if pte_present() is true. + * Undefined behaviour if not... + * [we have the full set here even if they don't change from m68k] + */ +extern inline int pte_read(pte_t pte) \ + { return pte_val(pte) & CF_PAGE_READABLE; } +extern inline int pte_write(pte_t pte) \ + { return pte_val(pte) & CF_PAGE_WRITABLE; } +extern inline int pte_exec(pte_t pte) \ + { return pte_val(pte) & CF_PAGE_EXEC; } +extern inline int pte_dirty(pte_t pte) \ + { return pte_val(pte) & CF_PAGE_DIRTY; } +extern inline int pte_young(pte_t pte) \ + { return pte_val(pte) & CF_PAGE_ACCESSED; } +extern inline int pte_file(pte_t pte) \ + { return pte_val(pte) & CF_PAGE_FILE; } +static inline int pte_special(pte_t pte) { return 0; } + + +extern inline pte_t pte_wrprotect(pte_t pte) \ + { pte_val(pte) &= ~CF_PAGE_WRITABLE; return pte; } +extern inline pte_t pte_rdprotect(pte_t pte) \ + { pte_val(pte) &= ~CF_PAGE_READABLE; return pte; } +extern inline pte_t pte_exprotect(pte_t pte) \ + { pte_val(pte) &= ~CF_PAGE_EXEC; return pte; } +extern inline pte_t pte_mkclean(pte_t pte) \ + { pte_val(pte) &= ~CF_PAGE_DIRTY; return pte; } +extern inline pte_t pte_mkold(pte_t pte) \ + { pte_val(pte) &= ~CF_PAGE_ACCESSED; return pte; } +extern inline pte_t pte_mkwrite(pte_t pte) \ + { pte_val(pte) |= CF_PAGE_WRITABLE; return pte; } +extern inline pte_t pte_mkread(pte_t pte) \ + { pte_val(pte) |= CF_PAGE_READABLE; return pte; } +extern inline pte_t pte_mkexec(pte_t pte) \ + { pte_val(pte) |= CF_PAGE_EXEC; return pte; } +extern inline pte_t pte_mkdirty(pte_t pte) \ + { pte_val(pte) |= CF_PAGE_DIRTY; return pte; } +extern inline pte_t pte_mkyoung(pte_t pte) \ + { pte_val(pte) |= CF_PAGE_ACCESSED; return pte; } +extern inline pte_t pte_mknocache(pte_t pte) \ + { pte_val(pte) |= 0x80 | (pte_val(pte) & ~0x40); return pte; } +extern inline pte_t pte_mkcache(pte_t pte) \ + { pte_val(pte) &= ~CF_PAGE_NOCACHE; return pte; } +static inline pte_t pte_mkspecial(pte_t pte) { return pte; } + + +#define swapper_pg_dir kernel_pg_dir +extern pgd_t kernel_pg_dir[PTRS_PER_PGD]; + +/* Find an entry in a pagetable directory. */ +#define pgd_index(address) ((address) >> PGDIR_SHIFT) + +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) + +/* Find an entry in a kernel pagetable directory. */ +#define pgd_offset_k(address) pgd_offset(&init_mm, address) + +/* Find an entry in the second-level pagetable. */ +extern inline pmd_t *pmd_offset(pgd_t *pgd, unsigned long address) +{ + return (pmd_t *) pgd; +} + +/* Find an entry in the third-level pagetable. */ +#define __pte_offset(address) ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +#define pte_offset_kernel(dir, address) ((pte_t *) __pmd_page(*(dir)) + \ + __pte_offset(address)) + +/* Disable caching for page at given kernel virtual address. */ +static inline void nocache_page(void *vaddr) +{ + pgd_t *dir; + pmd_t *pmdp; + pte_t *ptep; + unsigned long addr = (unsigned long)vaddr; + + dir = pgd_offset_k(addr); + pmdp = pmd_offset(dir, addr); + ptep = pte_offset_kernel(pmdp, addr); + *ptep = pte_mknocache(*ptep); +} + +/* Enable caching for page at given kernel virtual address. */ +static inline void cache_page(void *vaddr) +{ + pgd_t *dir; + pmd_t *pmdp; + pte_t *ptep; + unsigned long addr = (unsigned long)vaddr; + + dir = pgd_offset_k(addr); + pmdp = pmd_offset(dir, addr); + ptep = pte_offset_kernel(pmdp, addr); + *ptep = pte_mkcache(*ptep); +} + +#define PTE_FILE_MAX_BITS 21 +#define PTE_FILE_SHIFT 11 + +static inline unsigned long pte_to_pgoff(pte_t pte) +{ + return pte_val(pte) >> PTE_FILE_SHIFT; +} + +static inline pte_t pgoff_to_pte(unsigned pgoff) +{ + pte_t pte = __pte((pgoff << PTE_FILE_SHIFT) + CF_PAGE_FILE); + return pte; +} + +/* Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e)) */ +#define __swp_entry(type, offset) ((swp_entry_t) { (type) | \ + (offset << PTE_FILE_SHIFT) }) +#define __swp_type(x) ((x).val & 0xFF) +#define __swp_offset(x) ((x).val >> PTE_FILE_SHIFT) +#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) +#define __swp_entry_to_pte(x) (__pte((x).val)) + +#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) + +#define pte_offset_map(pmdp, address) ((pte_t *)__pmd_page(*pmdp) + \ + __pte_offset(address)) +#define pte_offset_map_nested(pmdp, address) pte_offset_map(pmdp, address) +#define pte_unmap(pte) kunmap(pte) +#define pte_unmap_nested(pte) kunmap(pte) + +#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) +#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) + + +#endif /* !__ASSEMBLY__ */ +#endif /* !_CF_PGTABLE_H */ diff --git a/target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cf_raw_io.h b/target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cf_raw_io.h new file mode 100644 index 0000000000..246a45568d --- /dev/null +++ b/target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cf_raw_io.h @@ -0,0 +1,181 @@ +/* + * linux/include/asm-m68k/cf_raw_io.h + * + * Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved. + * 09/30/08 JKM: split Coldfire pieces into separate file + */ +#ifndef __CF_RAW_IO__ +#define __CF_RAW_IO__ + +#ifdef __KERNEL__ + +#include + +/* Values for nocacheflag and cmode */ +#define IOMAP_FULL_CACHING 0 +#define IOMAP_NOCACHE_SER 1 +#define IOMAP_NOCACHE_NONSER 2 +#define IOMAP_WRITETHROUGH 3 + +extern void iounmap(void __iomem *addr); + +extern void __iomem *__ioremap(unsigned long physaddr, unsigned long size, + int cacheflag); +extern void __iounmap(void *addr, unsigned long size); + + +/* ++roman: The assignments to temp. vars avoid that gcc sometimes generates + * two accesses to memory, which may be undesirable for some devices. + */ +#define in_8(addr) \ + ({ u8 __v = (*(__force volatile u8 *) (addr)); __v; }) +#define in_be16(addr) \ + ({ u16 __v = (*(__force volatile u16 *) (addr)); __v; }) +#define in_be32(addr) \ + ({ u32 __v = (*(__force volatile u32 *) (addr)); __v; }) +#define in_le16(addr) \ + ({ u16 __v = le16_to_cpu(*(__force volatile __le16 *) (addr)); __v; }) +#define in_le32(addr) \ + ({ u32 __v = le32_to_cpu(*(__force volatile __le32 *) (addr)); __v; }) + +#define out_8(addr,b) (void)((*(__force volatile u8 *) (addr)) = (b)) +#define out_be16(addr,w) (void)((*(__force volatile u16 *) (addr)) = (w)) +#define out_be32(addr,l) (void)((*(__force volatile u32 *) (addr)) = (l)) +#define out_le16(addr,w) (void)((*(__force volatile __le16 *) (addr)) = cpu_to_le16(w)) +#define out_le32(addr,l) (void)((*(__force volatile __le32 *) (addr)) = cpu_to_le32(l)) + + +#ifdef CONFIG_PCI +/* pci */ +unsigned char pci_inb(long addr); +unsigned short pci_inw(long addr); +unsigned long pci_inl(long addr); + +void pci_outb(unsigned char val, long addr); +void pci_outw(unsigned short val, long addr); +void pci_outl(unsigned long val, long addr); + +void pci_insb(volatile unsigned char *addr, + unsigned char *buf, int len); +void pci_insw(volatile unsigned short *addr, + unsigned short *buf, int len); +void pci_insl(volatile unsigned long *addr, + unsigned long *buf, int len); + +void pci_outsb(volatile unsigned char *addr, + const unsigned char *buf, int len); +void pci_outsw(volatile unsigned short *addr, + const unsigned short *buf, int len); +void pci_outsl(volatile unsigned long *addr, + const unsigned long *buf, int len); + +unsigned short pci_raw_inw(long addr); +unsigned long pci_raw_inl(long addr); +void pci_raw_outw(unsigned short val, long addr); +void pci_raw_outl(unsigned long val, long addr); + +#define raw_inb(port) pci_inb((long)((volatile unsigned char *)(port))) +#define raw_inw(port) pci_raw_inw((long)((volatile unsigned short *)(port))) +#define raw_inl(port) pci_raw_inl((long)((volatile unsigned long *)(port))) + +#define raw_outb(val, port) \ + pci_outb((val), (long)((volatile unsigned char *)(port))) +#define raw_outw(val, port) \ + pci_raw_outw((val), (long)((volatile unsigned short *)(port))) +#define raw_outl(val, port) \ + pci_raw_outl((val), (long)((volatile unsigned long *)(port))) + +#define swap_inw(port) pci_inw((long)((volatile unsigned short *)(port))) +#define swap_outw(val, port) \ + pci_outw((val), (long)((volatile unsigned short *)(port))) + +#else +/* non-pci */ +#define raw_inb in_8 +#define raw_inw in_be16 +#define raw_inl in_be32 + +#define raw_outb(val,port) out_8((port),(val)) +#define raw_outw(val,port) out_be16((port),(val)) +#define raw_outl(val,port) out_be32((port),(val)) + +#define swap_inw(port) in_le16((port)) +#define swap_outw(val,port) out_le16((port),(val)) +#endif + +static inline void raw_insb(volatile u8 __iomem *port, u8 *buf, unsigned int len) +{ + unsigned int i; + + for (i = 0; i < len; i++) + *buf++ = in_8(port); +} + +static inline void raw_outsb(volatile u8 __iomem *port, const u8 *buf, + unsigned int len) +{ + unsigned int i; + + for (i = 0; i < len; i++) + out_8(port, *buf++); +} + +static inline void raw_insw(volatile u16 *port, u16 *buf, unsigned int nr) +{ + unsigned int i; + + for (i = 0; i < nr; i++) + *buf++ = raw_inw(port); +} + +static inline void raw_outsw(volatile u16 *port, const u16 *buf, + unsigned int nr) +{ + unsigned int i; + + for (i = 0; i < nr; i++, buf++) + raw_outw(*buf, port); +} + +static inline void raw_insl(volatile u32 *port, u32 *buf, unsigned int nr) +{ + unsigned int i; + + for (i = 0; i < nr; i++) + *buf++ = raw_inl(port); +} + +static inline void raw_outsl(volatile u32 *port, const u32 *buf, + unsigned int nr) +{ + unsigned int i; + + for (i = 0; i < nr; i++, buf++) + raw_outl(*buf, port); +} + +static inline void raw_insw_swapw(volatile u16 *port, u16 *buf, + unsigned int nr) +{ +#ifdef UNDEF + unsigned int i; + + for (i = 0; i < nr; i++) + *buf++ = in_le16(port); +#endif +} + +static inline void raw_outsw_swapw(volatile u16 __iomem *port, const u16 *buf, + unsigned int nr) +{ +#ifdef UNDEF + unsigned int i; + + for (i = 0; i < nr; i++, buf++) + out_le16(port, *buf); +#endif +} + +#endif /* __KERNEL__ */ + +#endif /* __CF_RAW_IO__ */ diff --git a/target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cf_tlbflush.h b/target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cf_tlbflush.h new file mode 100644 index 0000000000..ae253d4eef --- /dev/null +++ b/target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cf_tlbflush.h @@ -0,0 +1,62 @@ +/* + * Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved. + */ +#ifndef M68K_CF_TLBFLUSH_H +#define M68K_CF_TLBFLUSH_H + +#include + +/* Flush all userspace mappings. */ +static inline void flush_tlb_all(void) +{ + preempt_disable(); + *MMUOR = MMUOR_CNL; + preempt_enable(); +} + +/* Clear user TLB entries within the context named in mm */ +static inline void flush_tlb_mm(struct mm_struct *mm) +{ + preempt_disable(); + *MMUOR = MMUOR_CNL; + preempt_enable(); +} + +/* Flush a single TLB page. */ +static inline void flush_tlb_page(struct vm_area_struct *vma, + unsigned long addr) +{ + preempt_disable(); + *MMUOR = MMUOR_CNL; + preempt_enable(); +} +/* Flush a range of pages from TLB. */ + +static inline void flush_tlb_range(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + preempt_disable(); + *MMUOR = MMUOR_CNL; + preempt_enable(); +} + +/* Flush kernel page from TLB. */ +static inline void flush_tlb_kernel_page(void *addr) +{ + preempt_disable(); + *MMUOR = MMUOR_CNL; + preempt_enable(); +} + +static inline void flush_tlb_kernel_range(unsigned long start, + unsigned long end) +{ + flush_tlb_all(); +} + +extern inline void flush_tlb_pgtables(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ +} + +#endif /* M68K_CF_TLBFLUSH_H */ diff --git a/target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cf_uaccess.h b/target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cf_uaccess.h new file mode 100644 index 0000000000..d667097dd0 --- /dev/null +++ b/target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cf_uaccess.h @@ -0,0 +1,379 @@ +/* + * Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved. + */ +#ifndef __M68K_CF_UACCESS_H +#define __M68K_CF_UACCESS_H + +/* + * User space memory access functions + */ + +/* The "moves" command is not available in the CF instruction set. */ +#include +#include +#include +#include +#include + +#define VERIFY_READ 0 +#define VERIFY_WRITE 1 + +/* We let the MMU do all checking */ +#define access_ok(type, addr, size) 1 + +/* + * The exception table consists of pairs of addresses: the first is the + * address of an instruction that is allowed to fault, and the second is + * the address at which the program should continue. No registers are + * modified, so it is entirely up to the continuation code to figure out + * what to do. + * + * All the routines below use bits of fixup code that are out of line + * with the main instruction path. This means when everything is well, + * we don't even have to jump over them. Further, they do not intrude + * on our cache or tlb entries. + */ + +struct exception_table_entry +{ + unsigned long insn, fixup; +}; + +extern int __put_user_bad(void); +extern int __get_user_bad(void); + +#define __put_user_asm(res, x, ptr, bwl, reg, err) \ +asm volatile ("\n" \ + "1: move."#bwl" %2,%1\n" \ + "2:\n" \ + " .section .fixup,\"ax\"\n" \ + " .even\n" \ + "10: moveq.l %3,%0\n" \ + " jra 2b\n" \ + " .previous\n" \ + "\n" \ + " .section __ex_table,\"a\"\n" \ + " .align 4\n" \ + " .long 1b,10b\n" \ + " .long 2b,10b\n" \ + " .previous" \ + : "+d" (res), "=m" (*(ptr)) \ + : #reg (x), "i" (err)) + +/* + * These are the main single-value transfer routines. They automatically + * use the right size if we just have the right pointer type. + */ + +#define __put_user(x, ptr) \ +({ \ + typeof(*(ptr)) __pu_val = (x); \ + int __pu_err = 0; \ + __chk_user_ptr(ptr); \ + switch (sizeof (*(ptr))) { \ + case 1: \ + __put_user_asm(__pu_err, __pu_val, ptr, b, d, -EFAULT); \ + break; \ + case 2: \ + __put_user_asm(__pu_err, __pu_val, ptr, w, d, -EFAULT); \ + break; \ + case 4: \ + __put_user_asm(__pu_err, __pu_val, ptr, l, r, -EFAULT); \ + break; \ + case 8: \ + { \ + const void __user *__pu_ptr = (ptr); \ + asm volatile ("\n" \ + "1: move.l %2,(%1)+\n" \ + "2: move.l %R2,(%1)\n" \ + "3:\n" \ + " .section .fixup,\"ax\"\n" \ + " .even\n" \ + "10: movel %3,%0\n" \ + " jra 3b\n" \ + " .previous\n" \ + "\n" \ + " .section __ex_table,\"a\"\n" \ + " .align 4\n" \ + " .long 1b,10b\n" \ + " .long 2b,10b\n" \ + " .long 3b,10b\n" \ + " .previous" \ + : "+d" (__pu_err), "+a" (__pu_ptr) \ + : "r" (__pu_val), "i" (-EFAULT) \ + : "memory"); \ + break; \ + } \ + default: \ + __pu_err = __put_user_bad(); \ + break; \ + } \ + __pu_err; \ +}) +#define put_user(x, ptr) __put_user(x, ptr) + + +#define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({ \ + type __gu_val; \ + asm volatile ("\n" \ + "1: move."#bwl" %2,%1\n" \ + "2:\n" \ + " .section .fixup,\"ax\"\n" \ + " .even\n" \ + "10: move.l %3,%0\n" \ + " subl %1,%1\n" \ + " jra 2b\n" \ + " .previous\n" \ + "\n" \ + " .section __ex_table,\"a\"\n" \ + " .align 4\n" \ + " .long 1b,10b\n" \ + " .previous" \ + : "+d" (res), "=&" #reg (__gu_val) \ + : "m" (*(ptr)), "i" (err)); \ + (x) = (typeof(*(ptr)))(unsigned long)__gu_val; \ +}) + +#define __get_user(x, ptr) \ +({ \ + int __gu_err = 0; \ + __chk_user_ptr(ptr); \ + switch (sizeof(*(ptr))) { \ + case 1: \ + __get_user_asm(__gu_err, x, ptr, u8, b, d, -EFAULT); \ + break; \ + case 2: \ + __get_user_asm(__gu_err, x, ptr, u16, w, d, -EFAULT); \ + break; \ + case 4: \ + __get_user_asm(__gu_err, x, ptr, u32, l, r, -EFAULT); \ + break; \ +/* case 8: disabled because gcc-4.1 has a broken typeof \ + { \ + const void *__gu_ptr = (ptr); \ + u64 __gu_val; \ + asm volatile ("\n" \ + "1: move.l (%2)+,%1\n" \ + "2: move.l (%2),%R1\n" \ + "3:\n" \ + " .section .fixup,\"ax\"\n" \ + " .even\n" \ + "10: move.l %3,%0\n" \ + " subl %1,%1\n" \ + " subl %R1,%R1\n" \ + " jra 3b\n" \ + " .previous\n" \ + "\n" \ + " .section __ex_table,\"a\"\n" \ + " .align 4\n" \ + " .long 1b,10b\n" \ + " .long 2b,10b\n" \ + " .previous" \ + : "+d" (__gu_err), "=&r" (__gu_val), \ + "+a" (__gu_ptr) \ + : "i" (-EFAULT) \ + : "memory"); \ + (x) = (typeof(*(ptr)))__gu_val; \ + break; \ + } */ \ + default: \ + __gu_err = __get_user_bad(); \ + break; \ + } \ + __gu_err; \ +}) +#define get_user(x, ptr) __get_user(x, ptr) + +unsigned long __generic_copy_from_user(void *to, const void __user *from, + unsigned long n); +unsigned long __generic_copy_to_user(void __user *to, const void *from, + unsigned long n); + +#define __constant_copy_from_user_asm(res, to, from, tmp, n, s1, s2, s3)\ + asm volatile ("\n" \ + "1: move."#s1" (%2)+,%3\n" \ + " move."#s1" %3,(%1)+\n" \ + "2: move."#s2" (%2)+,%3\n" \ + " move."#s2" %3,(%1)+\n" \ + " .ifnc \""#s3"\",\"\"\n" \ + "3: move."#s3" (%2)+,%3\n" \ + " move."#s3" %3,(%1)+\n" \ + " .endif\n" \ + "4:\n" \ + " .section __ex_table,\"a\"\n" \ + " .align 4\n" \ + " .long 1b,10f\n" \ + " .long 2b,20f\n" \ + " .ifnc \""#s3"\",\"\"\n" \ + " .long 3b,30f\n" \ + " .endif\n" \ + " .previous\n" \ + "\n" \ + " .section .fixup,\"ax\"\n" \ + " .even\n" \ + "10: clr."#s1" (%1)+\n" \ + "20: clr."#s2" (%1)+\n" \ + " .ifnc \""#s3"\",\"\"\n" \ + "30: clr."#s3" (%1)+\n" \ + " .endif\n" \ + " moveq.l #"#n",%0\n" \ + " jra 4b\n" \ + " .previous\n" \ + : "+d" (res), "+&a" (to), "+a" (from), "=&d" (tmp) \ + : : "memory") + +static __always_inline unsigned long +__constant_copy_from_user(void *to, const void __user *from, unsigned long n) +{ + unsigned long res = 0, tmp; + + switch (n) { + case 1: + __get_user_asm(res, *(u8 *)to, (u8 __user *)from, u8, b, d, 1); + break; + case 2: + __get_user_asm(res, *(u16 *)to, (u16 __user *)from, u16, w, + d, 2); + break; + case 3: + __constant_copy_from_user_asm(res, to, from, tmp, 3, w, b,); + break; + case 4: + __get_user_asm(res, *(u32 *)to, (u32 __user *)from, u32, l, + r, 4); + break; + case 5: + __constant_copy_from_user_asm(res, to, from, tmp, 5, l, b,); + break; + case 6: + __constant_copy_from_user_asm(res, to, from, tmp, 6, l, w,); + break; + case 7: + __constant_copy_from_user_asm(res, to, from, tmp, 7, l, w, b); + break; + case 8: + __constant_copy_from_user_asm(res, to, from, tmp, 8, l, l,); + break; + case 9: + __constant_copy_from_user_asm(res, to, from, tmp, 9, l, l, b); + break; + case 10: + __constant_copy_from_user_asm(res, to, from, tmp, 10, l, l, w); + break; + case 12: + __constant_copy_from_user_asm(res, to, from, tmp, 12, l, l, l); + break; + default: + /* we limit the inlined version to 3 moves */ + return __generic_copy_from_user(to, from, n); + } + + return res; +} + +#define __constant_copy_to_user_asm(res, to, from, tmp, n, s1, s2, s3) \ + asm volatile ("\n" \ + " move."#s1" (%2)+,%3\n" \ + "11: move."#s1" %3,(%1)+\n" \ + "12: move."#s2" (%2)+,%3\n" \ + "21: move."#s2" %3,(%1)+\n" \ + "22:\n" \ + " .ifnc \""#s3"\",\"\"\n" \ + " move."#s3" (%2)+,%3\n" \ + "31: move."#s3" %3,(%1)+\n" \ + "32:\n" \ + " .endif\n" \ + "4:\n" \ + "\n" \ + " .section __ex_table,\"a\"\n" \ + " .align 4\n" \ + " .long 11b,5f\n" \ + " .long 12b,5f\n" \ + " .long 21b,5f\n" \ + " .long 22b,5f\n" \ + " .ifnc \""#s3"\",\"\"\n" \ + " .long 31b,5f\n" \ + " .long 32b,5f\n" \ + " .endif\n" \ + " .previous\n" \ + "\n" \ + " .section .fixup,\"ax\"\n" \ + " .even\n" \ + "5: moveq.l #"#n",%0\n" \ + " jra 4b\n" \ + " .previous\n" \ + : "+d" (res), "+a" (to), "+a" (from), "=&d" (tmp) \ + : : "memory") + +static __always_inline unsigned long +__constant_copy_to_user(void __user *to, const void *from, unsigned long n) +{ + unsigned long res = 0, tmp; + + switch (n) { + case 1: + __put_user_asm(res, *(u8 *)from, (u8 __user *)to, b, d, 1); + break; + case 2: + __put_user_asm(res, *(u16 *)from, (u16 __user *)to, w, d, 2); + break; + case 3: + __constant_copy_to_user_asm(res, to, from, tmp, 3, w, b,); + break; + case 4: + __put_user_asm(res, *(u32 *)from, (u32 __user *)to, l, r, 4); + break; + case 5: + __constant_copy_to_user_asm(res, to, from, tmp, 5, l, b,); + break; + case 6: + __constant_copy_to_user_asm(res, to, from, tmp, 6, l, w,); + break; + case 7: + __constant_copy_to_user_asm(res, to, from, tmp, 7, l, w, b); + break; + case 8: + __constant_copy_to_user_asm(res, to, from, tmp, 8, l, l,); + break; + case 9: + __constant_copy_to_user_asm(res, to, from, tmp, 9, l, l, b); + break; + case 10: + __constant_copy_to_user_asm(res, to, from, tmp, 10, l, l, w); + break; + case 12: + __constant_copy_to_user_asm(res, to, from, tmp, 12, l, l, l); + break; + default: + /* limit the inlined version to 3 moves */ + return __generic_copy_to_user(to, from, n); + } + + return res; +} + +#define __copy_from_user(to, from, n) \ +(__builtin_constant_p(n) ? \ + __constant_copy_from_user(to, from, n) : \ + __generic_copy_from_user(to, from, n)) + +#define __copy_to_user(to, from, n) \ +(__builtin_constant_p(n) ? \ + __constant_copy_to_user(to, from, n) : \ + __generic_copy_to_user(to, from, n)) + +#define __copy_to_user_inatomic __copy_to_user +#define __copy_from_user_inatomic __copy_from_user + +#define copy_from_user(to, from, n) __copy_from_user(to, from, n) +#define copy_to_user(to, from, n) __copy_to_user(to, from, n) + +long strncpy_from_user(char *dst, const char __user *src, long count); +long strnlen_user(const char __user *src, long n); +unsigned long __clear_user(void __user *to, unsigned long n); + +#define clear_user __clear_user + +#define strlen_user(str) strnlen_user(str, 32767) + +#endif /* _M68K_CF_UACCESS_H */ diff --git a/target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cf_virtconvert.h b/target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cf_virtconvert.h new file mode 100644 index 0000000000..ed95887da2 --- /dev/null +++ b/target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cf_virtconvert.h @@ -0,0 +1,58 @@ +/* + * Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved. + */ +#ifndef __CF_VIRTCONVERT__ +#define __CF_VIRTCONVERT__ + +/* + * Macros used for converting between virtual and physical mappings. + * + * Coldfire Specific + */ + +#ifdef __KERNEL__ + +#include +#include +#include +#include + +/* + * Change virtual addresses to physical addresses and vv. + */ +static inline unsigned long virt_to_phys(void *address) +{ + return __pa(address); +} + +static inline void *phys_to_virt(unsigned long address) +{ + return __va(address); +} + +/* Permanent address of a page. */ +#ifdef CONFIG_SINGLE_MEMORY_CHUNK +#define page_to_phys(page) \ + __pa(PAGE_OFFSET + (((page) - pg_data_map[0].node_mem_map) << PAGE_SHIFT)) +#else +#define page_to_phys(_page) ({ \ + struct page *__page = _page; \ + struct pglist_data *pgdat; \ + pgdat = pg_data_table[page_to_nid(__page)]; \ + page_to_pfn(__page) << PAGE_SHIFT; \ +}) +#endif + +/* + * IO bus memory addresses are 1:1 with the physical address, + */ +#ifdef CONFIG_PCI +#define virt_to_bus(a) (a + PCI_DMA_BASE) +#define bus_to_virt(a) (a - PCI_DMA_BASE) +#else +#define virt_to_bus(a) (a) +#define bus_to_virt(a) (a) +#endif + +#endif /* __KERNEL__ */ +#endif /* __CF_VIRTCONVERT__ */ diff --git a/target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cfcache.h b/target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cfcache.h new file mode 100644 index 0000000000..730ee97770 --- /dev/null +++ b/target/linux/coldfire/files-2.6.31/arch/m68k/include/asm/cfcache.h @@ -0,0 +1,114 @@ +/* + * include/asm-m68k/cfcache.h - Coldfire Cache Controller + * + * Kurt Mahan kmahan@freescale.com + * + * Copyright Freescale Semiconductor, Inc. 2007 + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#ifndef CF_CFCACHE_H +#define CF_CFCACHE_H + +/* + * CACR Cache Control Register + */ +#define CF_CACR_DEC (0x80000000) /* Data Cache Enable */ +#define CF_CACR_DW (0x40000000) /* Data default Write-protect */ +#define CF_CACR_DESB (0x20000000) /* Data Enable Store Buffer */ +#define CF_CACR_DPI (0x10000000) /* Data Disable CPUSHL Invalidate */ +#define CF_CACR_DHLCK (0x08000000) /* 1/2 Data Cache Lock Mode */ +#define CF_CACR_DDCM_00 (0x00000000) /* Cacheable writethrough imprecise */ +#define CF_CACR_DDCM_01 (0x02000000) /* Cacheable copyback */ +#define CF_CACR_DDCM_10 (0x04000000) /* Noncacheable precise */ +#define CF_CACR_DDCM_11 (0x06000000) /* Noncacheable imprecise */ +#define CF_CACR_DCINVA (0x01000000) /* Data Cache Invalidate All */ +#define CF_CACR_DDSP (0x00800000) /* Data default supervisor-protect */ +#define CF_CACR_IVO (0x00100000) /* Invalidate only */ +#define CF_CACR_BEC (0x00080000) /* Branch Cache Enable */ +#define CF_CACR_BCINVA (0x00040000) /* Branch Cache Invalidate All */ +#define CF_CACR_IEC (0x00008000) /* Instruction Cache Enable */ +#define CF_CACR_SPA (0x00004000) /* Search by Physical Address */ +#define CF_CACR_DNFB (0x00002000) /* Default cache-inhibited fill buf */ +#define CF_CACR_IDPI (0x00001000) /* Instr Disable CPUSHL Invalidate */ +#define CF_CACR_IHLCK (0x00000800) /* 1/2 Instruction Cache Lock Mode */ +#define CF_CACR_IDCM (0x00000400) /* Noncacheable Instr default mode */ +#define CF_CACR_ICINVA (0x00000100) /* Instr Cache Invalidate All */ +#define CF_CACR_IDSP (0x00000080) /* Ins default supervisor-protect */ +#define CF_CACR_EUSP (0x00000020) /* Switch stacks in user mode */ + +#ifdef CONFIG_M5445X +/* + * M5445x Cache Configuration + * - cache line size is 16 bytes + * - cache is 4-way set associative + * - each cache has 256 sets (64k / 16bytes / 4way) + * - I-Cache size is 16KB + * - D-Cache size is 16KB + */ +#define ICACHE_SIZE 0x4000 /* instruction - 16k */ +#define DCACHE_SIZE 0x4000 /* data - 16k */ + +#define CACHE_LINE_SIZE 0x0010 /* 16 bytes */ +#define CACHE_SETS 0x0100 /* 256 sets */ +#define CACHE_WAYS 0x0004 /* 4 way */ + +#define CACHE_DISABLE_MODE (CF_CACR_DCINVA+ \ + CF_CACR_BCINVA+ \ + CF_CACR_ICINVA) + +#ifndef CONFIG_M5445X_DISABLE_CACHE +#define CACHE_INITIAL_MODE (CF_CACR_DEC+ \ + CF_CACR_BEC+ \ + CF_CACR_IEC+ \ + CF_CACR_DESB+ \ + CF_CACR_EUSP) +#else +/* cache disabled for testing */ +#define CACHE_INITIAL_MODE (CF_CACR_EUSP) +#endif /* CONFIG_M5445X_DISABLE_CACHE */ + +#elif defined(CONFIG_M547X_8X) +/* + * * M547x/M548x Cache Configuration + * * - cache line size is 16 bytes + * * - cache is 4-way set associative + * * - each cache has 512 sets (128k / 16bytes / 4way) + * * - I-Cache size is 32KB + * * - D-Cache size is 32KB + * */ +#define ICACHE_SIZE 0x8000 /* instruction - 32k */ +#define DCACHE_SIZE 0x8000 /* data - 32k */ + +#define CACHE_LINE_SIZE 0x0010 /* 16 bytes */ +#define CACHE_SETS 0x0200 /* 512 sets */ +#define CACHE_WAYS 0x0004 /* 4 way */ + +/* in for the old cpushl caching code */ +#define _DCACHE_SET_MASK ((DCACHE_SIZE/64-1)< + +#ifndef __CF_MMU_H__ +#define __CF_MMU_H__ + + +#define MMU_BASE 0xE8000000 + + +#define MMUCR (MMU_BASE+0x00) +#define MMUCR_ASMN 1 +#define MMUCR_ASM (1< + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + * + */ +#ifndef __MCFFEC_H__ +#define __MCFFEC_H +#include +#include +#include +#include +#include +#include +#include + +/* The FEC stores dest/src/type, data, and checksum for receive packets. + */ +#define PKT_MAXBUF_SIZE 1518 + +/* + * The 5270/5271/5280/5282/532x RX control register also contains maximum frame + * size bits. Other FEC hardware does not, so we need to take that into + * account when setting it. + */ +#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ + defined(CONFIG_M520x) || defined(CONFIG_M532x) || \ + defined(CONFIG_M537x) || defined(CONFIG_M5301x) || \ + defined(CONFIG_M5445X) +#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) +#else +#define OPT_FRAME_SIZE 0 +#endif +/* + * Some hardware gets it MAC address out of local flash memory. + * if this is non-zero then assume it is the address to get MAC from. + */ +#if defined(CONFIG_NETtel) +#define FEC_FLASHMAC 0xf0006006 +#elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES) +#define FEC_FLASHMAC 0xf0006000 +#elif defined(CONFIG_CANCam) +#define FEC_FLASHMAC 0xf0020000 +#elif defined(CONFIG_M5272C3) +#define FEC_FLASHMAC (0xffe04000 + 4) +#elif defined(CONFIG_MOD5272) +#define FEC_FLASHMAC 0xffc0406b +#else +#define FEC_FLASHMAC 0 +#endif + +#ifdef CONFIG_FEC_DMA_USE_SRAM +#define TX_RING_SIZE 8 /* Must be power of two */ +#define TX_RING_MOD_MASK 7 /* for this to work */ +#else +#define TX_RING_SIZE 16 /* Must be power of two */ +#define TX_RING_MOD_MASK 15 /* for this to work */ +#endif + +typedef struct fec { + unsigned long fec_reserved0; + unsigned long fec_ievent; /* Interrupt event reg */ + unsigned long fec_imask; /* Interrupt mask reg */ + unsigned long fec_reserved1; + unsigned long fec_r_des_active; /* Receive descriptor reg */ + unsigned long fec_x_des_active; /* Transmit descriptor reg */ + unsigned long fec_reserved2[3]; + unsigned long fec_ecntrl; /* Ethernet control reg */ + unsigned long fec_reserved3[6]; + unsigned long fec_mii_data; /* MII manage frame reg */ + unsigned long fec_mii_speed; /* MII speed control reg */ + unsigned long fec_reserved4[7]; + unsigned long fec_mib_ctrlstat; /* MIB control/status reg */ + unsigned long fec_reserved5[7]; + unsigned long fec_r_cntrl; /* Receive control reg */ + unsigned long fec_reserved6[15]; + unsigned long fec_x_cntrl; /* Transmit Control reg */ + unsigned long fec_reserved7[7]; + unsigned long fec_addr_low; /* Low 32bits MAC address */ + unsigned long fec_addr_high; /* High 16bits MAC address */ + unsigned long fec_opd; /* Opcode + Pause duration */ + unsigned long fec_reserved8[10]; + unsigned long fec_hash_table_high; /* High 32bits hash table */ + unsigned long fec_hash_table_low; /* Low 32bits hash table */ + unsigned long fec_grp_hash_table_high;/* High 32bits hash table */ + unsigned long fec_grp_hash_table_low; /* Low 32bits hash table */ + unsigned long fec_reserved9[7]; + unsigned long fec_x_wmrk; /* FIFO transmit water mark */ + unsigned long fec_reserved10; + unsigned long fec_r_bound; /* FIFO receive bound reg */ + unsigned long fec_r_fstart; /* FIFO receive start reg */ + unsigned long fec_reserved11[11]; + unsigned long fec_r_des_start; /* Receive descriptor ring */ + unsigned long fec_x_des_start; /* Transmit descriptor ring */ + unsigned long fec_r_buff_size; /* Maximum receive buff size */ +} fec_t; + +/* + * Define the buffer descriptor structure. + */ +typedef struct bufdesc { + unsigned short cbd_sc; /* Control and status info */ + unsigned short cbd_datlen; /* Data length */ + unsigned long cbd_bufaddr; /* Buffer address */ +} cbd_t; + +/* Forward declarations of some structures to support different PHYs + */ +typedef struct { + uint mii_data; + void (*funct)(uint mii_reg, struct net_device *dev); +} phy_cmd_t; + +typedef struct { + uint id; + char *name; + + const phy_cmd_t *config; + const phy_cmd_t *startup; + const phy_cmd_t *ack_int; + const phy_cmd_t *shutdown; +} phy_info_t; + +/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and + * tx_bd_base always point to the base of the buffer descriptors. The + * cur_rx and cur_tx point to the currently available buffer. + * The dirty_tx tracks the current buffer that is being sent by the + * controller. The cur_tx and dirty_tx are equal under both completely + * empty and completely full conditions. The empty/ready indicator in + * the buffer descriptor determines the actual condition. + */ +struct fec_enet_private { + /* Hardware registers of the FEC device */ + volatile fec_t *hwp; + + struct net_device *netdev; + struct platform_device *pdev; + /* The saved address of a sent-in-place packet/buffer, for skfree(). */ + unsigned char *tx_bounce[TX_RING_SIZE]; + struct sk_buff *tx_skbuff[TX_RING_SIZE]; + ushort skb_cur; + ushort skb_dirty; + + /* CPM dual port RAM relative addresses. + */ + cbd_t *rx_bd_base; /* Address of Rx and Tx buffers. */ + cbd_t *tx_bd_base; + cbd_t *cur_rx, *cur_tx; /* The next free ring entry */ + cbd_t *dirty_tx; /* The ring entries to be free()ed. */ + uint tx_full; + /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */ + spinlock_t hw_lock; + + /* hold while accessing the mii_list_t() elements */ + spinlock_t mii_lock; + struct mii_bus *mdio_bus; + struct phy_device *phydev; + + uint phy_id; + uint phy_id_done; + uint phy_status; + uint phy_speed; + phy_info_t const *phy; + struct work_struct phy_task; + volatile fec_t *phy_hwp; + + uint sequence_done; + uint mii_phy_task_queued; + + uint phy_addr; + + int index; + int opened; + int link; + int old_link; + int full_duplex; + int duplex; + int speed; + int msg_enable; +}; + +struct fec_platform_private { + struct platform_device *pdev; + + unsigned long quirks; + int num_slots; /* Slots on controller */ + struct fec_enet_private *fep_host[0]; /* Pointers to hosts */ +}; + +#endif -- cgit v1.2.3