aboutsummaryrefslogtreecommitdiffstats
path: root/include/version.mk
Commit message (Expand)AuthorAgeFilesLines
* base-files: fix HOME_URL replaceChen Minqiang2018-08-021-1/+1
* base-files: add menuconfig option for HOME_URLMathias Kresin2018-06-271-0/+5
* build: change version.mk defaults to OpenWrtJo-Philipp Wich2018-06-221-3/+3
* treewide: combine VERSION_SED and VERSION_SED_SCRIPTPhilip Prindeville2018-02-021-2/+1
* version.mk: escape values used in VERSION_SED macroPhilip Prindeville2018-02-021-33/+36
* merge: release/banner: drop release name and update bannerZoltan HERPAI2017-12-081-10/+2
* build: use SNAPSHOT instead of CURRENT to designate untagged branch buildsJo-Philipp Wich2016-12-141-1/+1
* build: adjust version number handlingJo-Philipp Wich2016-12-021-6/+7
* sdk: predefine SOURCE_DATE_EPOCHJo-Philipp Wich2016-10-211-0/+1
* base-files: Add standard os-release fileDaniel Dickinson2016-06-241-0/+10
* include: move VERSION_DIST_SANITIZED to version.mkAlexander Couzens2016-06-011-0/+3
* Centralize setting of all version info to include/version.mkHannu Nyman2016-05-241-0/+2
* include/version.mk: rework repository url handlingJo-Philipp Wich2016-04-131-1/+3
* branding: add LEDE brandingJohn Crispin2016-03-241-5/+5
* base-files: add URL option for OEM manufacturer infoJohn Crispin2015-10-051-0/+5
* buildroot: make it easier to build all kmodsJonas Gorski2015-03-161-1/+1
* toolchain: The glorious return of glibc, ver 2.21John Crispin2015-03-121-1/+1
* base-files: taint the build if the override mechanism is usedJohn Crispin2015-02-091-0/+1
* include, base-files: align default repository url with changed buildbot struc...Jo-Philipp Wich2015-01-231-2/+2
* Revert "version.mk: add "%s" placeholder for dotted target.subtarget notation"Jo-Philipp Wich2015-01-231-2/+1
* Revert "version.mk: explicitely filter "generic" subtarget for "%s" placehold...Jo-Philipp Wich2015-01-231-1/+1
* version.mk: explicitely filter "generic" subtarget for "%s" placeholder (#18710)Jo-Philipp Wich2015-01-101-1/+1
* version.mk: add "%s" placeholder for dotted target.subtarget notationJo-Philipp Wich2015-01-081-1/+2
* base-files: properly escape strings for version infoFelix Fietkau2014-07-291-8/+13
* base-files: add extra version configuration options for OEM manufacturer infoFelix Fietkau2014-07-291-1/+15
* version.mk: add initial infrastructure for recording specific build taint con...Jo-Philipp Wich2014-01-311-0/+25
* version.mk: ensure that %S is always populated with a subtarget identifier, f...Jo-Philipp Wich2012-09-241-1/+1
* don't overwrite version.mk, substitute REVISION insteadJo-Philipp Wich2012-04-131-0/+3
* include, base-files, opkg: introduce version configuration to override the em...Jo-Philipp Wich2012-04-121-0/+40
4' href='#n344'>344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424
/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
/*
 * mmio.c: MMIO emulation components.
 * Copyright (c) 2004, Intel Corporation.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
 * Place - Suite 330, Boston, MA 02111-1307 USA.
 *
 *  Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
 *  Kun Tian (Kevin Tian) (Kevin.tian@intel.com)
 */

#include <linux/sched.h>
#include <xen/mm.h>
#include <asm/vmx_mm_def.h>
#include <asm/gcc_intrin.h>
#include <linux/interrupt.h>
#include <asm/vmx_vcpu.h>
#include <asm/bundle.h>
#include <asm/types.h>
#include <public/hvm/ioreq.h>
#include <asm/vmx.h>
#include <public/event_channel.h>
#include <public/xen.h>
#include <linux/event.h>
#include <xen/domain.h>
#include <asm/viosapic.h>
#include <asm/vlsapic.h>

#define HVM_BUFFERED_IO_RANGE_NR 1

struct hvm_buffered_io_range {
    unsigned long start_addr;
    unsigned long length;
};

static struct hvm_buffered_io_range buffered_stdvga_range = {0xA0000, 0x20000};
static struct hvm_buffered_io_range
*hvm_buffered_io_ranges[HVM_BUFFERED_IO_RANGE_NR] =
{
    &buffered_stdvga_range
};

int hvm_buffered_io_intercept(ioreq_t *p)
{
    struct vcpu *v = current;
    spinlock_t  *buffered_io_lock;
    buffered_iopage_t *buffered_iopage =
        (buffered_iopage_t *)(v->domain->arch.hvm_domain.buffered_io_va);
    unsigned long tmp_write_pointer = 0;
    int i;

    /* ignore READ ioreq_t! */
    if ( p->dir == IOREQ_READ )
        return 0;

    for ( i = 0; i < HVM_BUFFERED_IO_RANGE_NR; i++ ) {
        if ( p->addr >= hvm_buffered_io_ranges[i]->start_addr &&
             p->addr + p->size - 1 < hvm_buffered_io_ranges[i]->start_addr +
                                     hvm_buffered_io_ranges[i]->length )
            break;
    }

    if ( i == HVM_BUFFERED_IO_RANGE_NR )
        return 0;

    buffered_io_lock = &v->domain->arch.hvm_domain.buffered_io_lock;
    spin_lock(buffered_io_lock);

    if ( buffered_iopage->write_pointer - buffered_iopage->read_pointer ==
         (unsigned long)IOREQ_BUFFER_SLOT_NUM ) {
        /* the queue is full.
         * send the iopacket through the normal path.
         * NOTE: The arithimetic operation could handle the situation for
         * write_pointer overflow.
         */
        spin_unlock(buffered_io_lock);
        return 0;
    }

    tmp_write_pointer = buffered_iopage->write_pointer % IOREQ_BUFFER_SLOT_NUM;

    memcpy(&buffered_iopage->ioreq[tmp_write_pointer], p, sizeof(ioreq_t));

    /*make the ioreq_t visible before write_pointer*/
    wmb();
    buffered_iopage->write_pointer++;

    spin_unlock(buffered_io_lock);

    return 1;
}


static void low_mmio_access(VCPU *vcpu, u64 pa, u64 *val, size_t s, int dir)
{
    struct vcpu *v = current;
    vcpu_iodata_t *vio;
    ioreq_t *p;

    vio = get_vio(v->domain, v->vcpu_id);
    if (vio == 0) {
        panic_domain(NULL,"bad shared page: %lx", (unsigned long)vio);
    }
    p = &vio->vp_ioreq;
    p->addr = pa;
    p->size = s;
    p->count = 1;
    p->dir = dir;
    if(dir==IOREQ_WRITE)     //write;
        p->data = *val;
    p->data_is_ptr = 0;
    p->type = 1;
    p->df = 0;

    p->io_count++;
    if(hvm_buffered_io_intercept(p)){
        p->state = STATE_IORESP_READY;
        vmx_io_assist(v);
        return ;
    }else 
    vmx_send_assist_req(v);
    if(dir==IOREQ_READ){ //read
        *val=p->data;
    }
    return;
}
#define TO_LEGACY_IO(pa)  (((pa)>>12<<2)|((pa)&0x3))

static void legacy_io_access(VCPU *vcpu, u64 pa, u64 *val, size_t s, int dir)
{
    struct vcpu *v = current;
    vcpu_iodata_t *vio;
    ioreq_t *p;

    vio = get_vio(v->domain, v->vcpu_id);
    if (vio == 0) {
        panic_domain(NULL,"bad shared page\n");
    }
    p = &vio->vp_ioreq;
    p->addr = TO_LEGACY_IO(pa&0x3ffffffUL);
    p->size = s;
    p->count = 1;
    p->dir = dir;
    if(dir==IOREQ_WRITE)     //write;
        p->data = *val;
    p->data_is_ptr = 0;
    p->type = 0;
    p->df = 0;

    p->io_count++;

    vmx_send_assist_req(v);
    if(dir==IOREQ_READ){ //read
        *val=p->data;
    }
#ifdef DEBUG_PCI
    if(dir==IOREQ_WRITE)
        if(p->addr == 0xcf8UL)
            printk("Write 0xcf8, with val [0x%lx]\n", p->data);
    else
        if(p->addr == 0xcfcUL)
            printk("Read 0xcfc, with val [0x%lx]\n", p->data);
#endif //DEBUG_PCI
    return;
}

static void mmio_access(VCPU *vcpu, u64 src_pa, u64 *dest, size_t s, int ma, int dir)
{
    struct virtual_platform_def *v_plat;
    //mmio_type_t iot;
    unsigned long iot;
    iot=__gpfn_is_io(vcpu->domain, src_pa>>PAGE_SHIFT);
    v_plat = vmx_vcpu_get_plat(vcpu);

    perfc_incra(vmx_mmio_access, iot >> 56);
    switch (iot) {
    case GPFN_PIB:       
        if (ma != 4)
            panic_domain(NULL, "Access PIB not with UC attribute\n");

        if (!dir)
            vlsapic_write(vcpu, src_pa, s, *dest);
        else
            *dest = vlsapic_read(vcpu, src_pa, s);
        break;
    case GPFN_GFW:
        break;
    case GPFN_IOSAPIC:
	if (!dir)
	    viosapic_write(vcpu, src_pa, s, *dest);
	else
	    *dest = viosapic_read(vcpu, src_pa, s);
	break;
    case GPFN_FRAME_BUFFER:
    case GPFN_LOW_MMIO:
        low_mmio_access(vcpu, src_pa, dest, s, dir);
        break;
    case GPFN_LEGACY_IO:
        legacy_io_access(vcpu, src_pa, dest, s, dir);
        break;
    default:
        panic_domain(NULL,"Bad I/O access\n");
        break;
    }
    return;
}

/*
   dir 1: read 0:write
    inst_type 0:integer 1:floating point
 */
#define SL_INTEGER  0        // store/load interger
#define SL_FLOATING    1       // store/load floating

void emulate_io_inst(VCPU *vcpu, u64 padr, u64 ma)
{
    REGS *regs;
    IA64_BUNDLE bundle;
    int slot, dir=0, inst_type;
    size_t size;
    u64 data, post_update, slot1a, slot1b, temp;
    INST64 inst;
    regs=vcpu_regs(vcpu);
    if (IA64_RETRY == __vmx_get_domain_bundle(regs->cr_iip, &bundle)) {
        /* if fetch code fail, return and try again */
        return;
    }
    slot = ((struct ia64_psr *)&(regs->cr_ipsr))->ri;
    if (!slot) inst.inst = bundle.slot0;
    else if (slot == 1){
        slot1a=bundle.slot1a;
        slot1b=bundle.slot1b;
        inst.inst =slot1a + (slot1b<<18);
    }
    else if (slot == 2) inst.inst = bundle.slot2;


    // Integer Load/Store
    if(inst.M1.major==4&&inst.M1.m==0&&inst.M1.x==0){
        inst_type = SL_INTEGER;  //
        size=(inst.M1.x6&0x3);
        if((inst.M1.x6>>2)>0xb){      // write
            dir=IOREQ_WRITE;     //write
            vcpu_get_gr_nat(vcpu,inst.M4.r2,&data);
        }else if((inst.M1.x6>>2)<0xb){   //  read
            dir=IOREQ_READ;
        }
    }
    // Integer Load + Reg update
    else if(inst.M2.major==4&&inst.M2.m==1&&inst.M2.x==0){
        inst_type = SL_INTEGER;
        dir = IOREQ_READ;     //write
        size = (inst.M2.x6&0x3);
        vcpu_get_gr_nat(vcpu,inst.M2.r3,&temp);
        vcpu_get_gr_nat(vcpu,inst.M2.r2,&post_update);
        temp += post_update;
        vcpu_set_gr(vcpu,inst.M2.r3,temp,0);
    }
    // Integer Load/Store + Imm update
    else if(inst.M3.major==5){
        inst_type = SL_INTEGER;  //
        size=(inst.M3.x6&0x3);
        if((inst.M5.x6>>2)>0xb){      // write
            dir=IOREQ_WRITE;     //write
            vcpu_get_gr_nat(vcpu,inst.M5.r2,&data);
            vcpu_get_gr_nat(vcpu,inst.M5.r3,&temp);
            post_update = (inst.M5.i<<7)+inst.M5.imm7;
            if(inst.M5.s)
                temp -= post_update;
            else
                temp += post_update;
            vcpu_set_gr(vcpu,inst.M5.r3,temp,0);

        }else if((inst.M3.x6>>2)<0xb){   //  read
            dir=IOREQ_READ;
            vcpu_get_gr_nat(vcpu,inst.M3.r3,&temp);
            post_update = (inst.M3.i<<7)+inst.M3.imm7;
            if(inst.M3.s)
                temp -= post_update;
            else
                temp += post_update;
            vcpu_set_gr(vcpu,inst.M3.r3,temp,0);

        }
    }
    // Floating-point spill
    else if (inst.M9.major == 6 && inst.M9.x6 == 0x3B &&
             inst.M9.m == 0 && inst.M9.x == 0) {
        struct ia64_fpreg v;

        inst_type = SL_FLOATING;
        dir = IOREQ_WRITE;
        vcpu_get_fpreg(vcpu, inst.M9.f2, &v);
        /* Write high word.
           FIXME: this is a kludge!  */
        v.u.bits[1] &= 0x3ffff;
        mmio_access(vcpu, padr + 8, &v.u.bits[1], 8, ma, IOREQ_WRITE);
        data = v.u.bits[0];
        size = 3;
    }
    // Floating-point spill + Imm update
    else if(inst.M10.major==7&&inst.M10.x6==0x3B){
        struct ia64_fpreg v;
	inst_type=SL_FLOATING;
	dir=IOREQ_WRITE;
	vcpu_get_fpreg(vcpu,inst.M10.f2,&v);
	vcpu_get_gr_nat(vcpu,inst.M10.r3,&temp);
	post_update = (inst.M10.i<<7)+inst.M10.imm7;
	if(inst.M10.s)
            temp -= post_update;
	else
            temp += post_update;
	vcpu_set_gr(vcpu,inst.M10.r3,temp,0);

	/* Write high word.
	   FIXME: this is a kludge!  */
	v.u.bits[1] &= 0x3ffff;
	mmio_access(vcpu, padr + 8, &v.u.bits[1], 8, ma, IOREQ_WRITE);
	data = v.u.bits[0];
	size = 3;
    }
    // Floating-point stf8 + Imm update
    else if(inst.M10.major==7&&inst.M10.x6==0x31){
        struct ia64_fpreg v;
	inst_type=SL_FLOATING;
	dir=IOREQ_WRITE;
	size=3;
	vcpu_get_fpreg(vcpu,inst.M10.f2,&v);
	data = v.u.bits[0]; /* Significand.  */
	vcpu_get_gr_nat(vcpu,inst.M10.r3,&temp);
	post_update = (inst.M10.i<<7)+inst.M10.imm7;
	if(inst.M10.s)
            temp -= post_update;
	else
            temp += post_update;
	vcpu_set_gr(vcpu,inst.M10.r3,temp,0);
    }
//    else if(inst.M6.major==6&&inst.M6.m==0&&inst.M6.x==0&&inst.M6.x6==3){
//        inst_type=SL_FLOATING;  //fp
//        dir=IOREQ_READ;
//        size=3;     //ldfd
//    }
    //  lfetch - do not perform accesses.
    else if(inst.M15.major==7&&inst.M15.x6>=0x2c&&inst.M15.x6<=0x2f){
	vcpu_get_gr_nat(vcpu,inst.M15.r3,&temp);
	post_update = (inst.M15.i<<7)+inst.M15.imm7;
	if(inst.M15.s)
            temp -= post_update;
	else
            temp += post_update;
	vcpu_set_gr(vcpu,inst.M15.r3,temp,0);

	vmx_vcpu_increment_iip(vcpu);
	return;
    }
    // Floating-point Load Pair + Imm ldfp8 M12
    else if(inst.M12.major==6&&inst.M12.m==1&&inst.M12.x==1&&inst.M12.x6==1){
        struct ia64_fpreg v;
        inst_type=SL_FLOATING;
        dir = IOREQ_READ;
        size = 8;     //ldfd
        mmio_access(vcpu, padr, &data, size, ma, dir);
        v.u.bits[0]=data;
        v.u.bits[1]=0x1003E;
        vcpu_set_fpreg(vcpu,inst.M12.f1,&v);
        padr += 8;
        mmio_access(vcpu, padr, &data, size, ma, dir);
        v.u.bits[0]=data;
        v.u.bits[1]=0x1003E;
        vcpu_set_fpreg(vcpu,inst.M12.f2,&v);
        padr += 8;
        vcpu_set_gr(vcpu,inst.M12.r3,padr,0);
        vmx_vcpu_increment_iip(vcpu);
        return;
    }					
    else{
        panic_domain
	  (NULL,"This memory access instr can't be emulated: %lx pc=%lx\n ",
	   inst.inst, regs->cr_iip);
    }

    size = 1 << size;
    if(dir==IOREQ_WRITE){
        mmio_access(vcpu, padr, &data, size, ma, dir);
    }else{
        mmio_access(vcpu, padr, &data, size, ma, dir);
        if(inst_type==SL_INTEGER){       //gp
            vcpu_set_gr(vcpu,inst.M1.r1,data,0);
        }else{
            panic_domain(NULL, "Don't support ldfd now !");
/*            switch(inst.M6.f1){

            case 6:
                regs->f6=(struct ia64_fpreg)data;
            case 7:
                regs->f7=(struct ia64_fpreg)data;
            case 8:
                regs->f8=(struct ia64_fpreg)data;
            case 9:
                regs->f9=(struct ia64_fpreg)data;
            case 10:
                regs->f10=(struct ia64_fpreg)data;
            case 11:
                regs->f11=(struct ia64_fpreg)data;
            default :
                ia64_ldfs(inst.M6.f1,&data);
            }
*/
        }
    }
    vmx_vcpu_increment_iip(vcpu);
}