/* * Virtualized CPU functions * * Copyright (C) 2004 Hewlett-Packard Co. * Dan Magenheimer (dan.magenheimer@hp.com) * */ #include #include #include #include #include #include #include #include #ifdef CONFIG_VTI #include #endif // CONFIG_VTI typedef union { struct ia64_psr ia64_psr; unsigned long i64; } PSR; //typedef struct pt_regs REGS; //typedef struct domain VCPU; // this def for vcpu_regs won't work if kernel stack is present #define vcpu_regs(vcpu) ((struct pt_regs *) vcpu->arch.regs) #define PSCB(x,y) VCPU(x,y) #define PSCBX(x,y) x->arch.y #define TRUE 1 #define FALSE 0 #define IA64_PTA_SZ_BIT 2 #define IA64_PTA_VF_BIT 8 #define IA64_PTA_BASE_BIT 15 #define IA64_PTA_LFMT (1UL << IA64_PTA_VF_BIT) #define IA64_PTA_SZ(x) (x##UL << IA64_PTA_SZ_BIT) #define STATIC #ifdef PRIVOP_ADDR_COUNT struct privop_addr_count privop_addr_counter[PRIVOP_COUNT_NINSTS] = { { "=ifa", { 0 }, { 0 }, 0 }, { "thash", { 0 }, { 0 }, 0 }, 0 }; extern void privop_count_addr(unsigned long addr, int inst); #define PRIVOP_COUNT_ADDR(regs,inst) privop_count_addr(regs->cr_iip,inst) #else #define PRIVOP_COUNT_ADDR(x,y) do {} while (0) #endif unsigned long dtlb_translate_count = 0; unsigned long tr_translate_count = 0; unsigned long phys_translate_count = 0; unsigned long vcpu_verbose = 0; #define verbose(a...) do {if (vcpu_verbose) printf(a);} while(0) extern TR_ENTRY *match_tr(VCPU *vcpu, unsigned long ifa); extern TR_ENTRY *match_dtlb(VCPU *vcpu, unsigned long ifa); /************************************************************************** VCPU general register access routines **************************************************************************/ UINT64 vcpu_get_gr(VCPU *vcpu, unsigned reg) { REGS *regs = vcpu_regs(vcpu); UINT64 val; if (!reg) return 0; getreg(reg,&val,0,regs); // FIXME: handle NATs later return val; } // returns: // IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault // IA64_NO_FAULT otherwise IA64FAULT vcpu_set_gr(VCPU *vcpu, unsigned reg, UINT64 value) { REGS *regs = vcpu_regs(vcpu); long sof = (regs->cr_ifs) & 0x7f; if (!reg) return IA64_ILLOP_FAULT; if (reg >= sof + 32) return IA64_ILLOP_FAULT; setreg(reg,value,0,regs); // FIXME: handle NATs later return IA64_NO_FAULT; } /************************************************************************** VCPU privileged application register access routines **************************************************************************/ IA64FAULT vcpu_set_ar(VCPU *vcpu, UINT64 reg, UINT64 val) { if (reg == 44) return (vcpu_set_itc(vcpu,val)); else if (reg == 27) return (IA64_ILLOP_FAULT); else if (reg == 24) printf("warning: setting ar.eflg is a no-op; no IA-32 support\n"); else if (reg > 7) return (IA64_ILLOP_FAULT); else PSCB(vcpu,krs[reg]) = val; return IA64_NO_FAULT; } IA64FAULT vcpu_get_ar(VCPU *vcpu, UINT64 reg, UINT64 *val) { if (reg == 24) printf("warning: getting ar.eflg is a no-op; no IA-32 support\n"); else if (reg > 7) return (IA64_ILLOP_FAULT); else *val = PSCB(vcpu,krs[reg]); return IA64_NO_FAULT; } /************************************************************************** VCPU processor status register access routines **************************************************************************/ void vcpu_set_metaphysical_mode(VCPU *vcpu, BOOLEAN newmode) { /* only do something if mode changes */ if (!!newmode ^ !!PSCB(vcpu,metaphysical_mode)) { if (newmode) set_metaphysical_rr0(); else if (PSCB(vcpu,rrs[0]) != -1) set_one_rr(0, PSCB(vcpu,rrs[0])); PSCB(vcpu,metaphysical_mode) = newmode; } } IA64FAULT vcpu_reset_psr_dt(VCPU *vcpu) { vcpu_set_metaphysical_mode(vcpu,TRUE); return IA64_NO_FAULT; } IA64FAULT vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24) { struct ia64_psr psr, imm, *ipsr; REGS *regs = vcpu_regs(vcpu); //PRIVOP_COUNT_ADDR(regs,_RSM); // TODO: All of these bits need to be virtualized // TODO: Only allowed for current vcpu __asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory"); ipsr = (struct ia64_psr *)®s->cr_ipsr; imm = *(struct ia64_psr *)&imm24; // interrupt flag if (imm.i) PSCB(vcpu,interrupt_delivery_enabled) = 0; if (imm.ic) PSCB(vcpu,interrupt_collection_enabled) = 0; // interrupt collection flag //if (imm.ic) PSCB(vcpu,interrupt_delivery_enabled) = 0; // just handle psr.up and psr.pp for now if (imm24 & ~(IA64_PSR_BE | IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP | IA64_PSR_I | IA64_PSR_IC | IA64_PSR_DT | IA64_PSR_DFL | IA64_PSR_DFH)) return (IA64_ILLOP_FAULT); if (imm.dfh) ipsr->dfh = 0; if (imm.dfl) ipsr->dfl = 0; if (imm.pp) { ipsr->pp = 0; psr.pp = 0; } if (imm.up) { ipsr->up = 0; psr.up = 0; } if (imm.sp) { ipsr->sp = 0; psr.sp = 0; } if (imm.be) ipsr->be = 0; if (imm.dt) vcpu_set_metaphysical_mode(vcpu,TRUE); __asm__ __volatile (";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory"); return IA64_NO_FAULT; } extern UINT64 vcpu_check_pending_interrupts(VCPU *vcpu); #define SPURIOUS_VECTOR 0xf IA64FAULT vcpu_set_psr_dt(VCPU *vcpu) { vcpu_set_metaphysical_mode(vcpu,FALSE); return IA64_NO_FAULT; } IA64FAULT vcpu_set_psr_i(VCPU *vcpu) { PSCB(vcpu,interrupt_delivery_enabled) = 1; PSCB(vcpu,interrupt_collection_enabled) = 1; return IA64_NO_FAULT; } IA64FAULT vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm24) { struct ia64_psr psr, imm, *ipsr; REGS *regs = vcpu_regs(vcpu); UINT64 mask, enabling_interrupts = 0; //PRIVOP_COUNT_ADDR(regs,_SSM); // TODO: All of these bits need to be virtualized __asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory"); imm = *(struct ia64_psr *)&imm24; ipsr = (struct ia64_psr *)®s->cr_ipsr; // just handle psr.sp,pp and psr.i,ic (and user mask) for now mask = IA64_PSR_PP|IA64_PSR_SP|IA64_PSR_I|IA64_PSR_IC|IA64_PSR_UM | IA64_PSR_DT|IA64_PSR_DFL|IA64_PSR_DFH; if (imm24 & ~mask) return (IA64_ILLOP_FAULT); if (imm.dfh) ipsr->dfh = 1; if (imm.dfl) ipsr->dfl = 1; if (imm.pp) { ipsr->pp = 1; psr.pp = 1; } if (imm.sp) { ipsr->sp = 1; psr.sp = 1; } if (imm.i) { if (!PSCB(vcpu,interrupt_delivery_enabled)) { //printf("vcpu_set_psr_sm: psr.ic 0->1 "); enabling_interrupts = 1; } PSCB(vcpu,interrupt_delivery_enabled) = 1; } if (imm.ic) PSCB(vcpu,interrupt_collection_enabled) = 1; // TODO: do this faster if (imm.mfl) { ipsr->mfl = 1; psr.mfl = 1; } if (imm.mfh) { ipsr->mfh = 1; psr.mfh = 1; } if (imm.ac) { ipsr->ac = 1; psr.ac = 1; } if (imm.up) { ipsr->up = 1; psr.up = 1; } if (imm.be) { printf("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n"); return (IA64_ILLOP_FAULT); } if (imm.dt) vcpu_set_metaphysical_mode(vcpu,FALSE); __asm__ __volatile (";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory"); #if 0 // now done with deliver_pending_interrupts if (enabling_interrupts) { if (vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR) { //printf("with interrupts pending\n"); return IA64_EXTINT_VECTOR; } //else printf("but nothing pending\n"); } #endif if (enabling_interrupts && vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR) PSCB(vcpu,pending_interruption) = 1; return IA64_NO_FAULT; } IA64FAULT vcpu_set_psr_l(VCPU *vcpu, UINT64 val) { struct ia64_psr psr, newpsr, *ipsr; REGS *regs = vcpu_regs(vcpu); UINT64 enabling_interrupts = 0; // TODO: All of these bits need to be virtualized __asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory"); newpsr = *(struct ia64_psr *)&val; ipsr = (struct ia64_psr *)®s->cr_ipsr; // just handle psr.up and psr.pp for now //if (val & ~(IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP)) return (IA64_ILLOP_FAULT); // however trying to set other bits can't be an error as it is in ssm if (newpsr.dfh) ipsr->dfh = 1; if (newpsr.dfl) ipsr->dfl = 1; if (newpsr.pp) { ipsr->pp = 1; psr.pp = 1; } if (newpsr.up) { ipsr->up = 1; psr.up = 1; } if (newpsr.sp) { ipsr->sp = 1; psr.sp = 1; } if (newpsr.i) { if (!PSCB(vcpu,interrupt_delivery_enabled)) enabling_interrupts = 1; PSCB(vcpu,interrupt_delivery_enabled) = 1; } if (newpsr.ic) PSCB(vcpu,interrupt_collection_enabled) = 1; if (newpsr.mfl) { ipsr->mfl = 1; psr.mfl = 1; } if (newpsr.mfh) { ipsr->mfh = 1; psr.mfh = 1; } if (newpsr.ac) { ipsr->ac = 1; psr.ac = 1; } if (newpsr.up) { ipsr->up = 1; psr.up = 1; } if (newpsr.dt && newpsr.rt) vcpu_set_metaphysical_mode(vcpu,FALSE); else vcpu_set_metaphysical_mode(vcpu,TRUE); if (newpsr.be) { printf("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n"); return (IA64_ILLOP_FAULT); } //__asm__ __volatile (";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory"); #if 0 // now done with deliver_pending_interrupts if (enabling_interrupts) { if (vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR) return IA64_EXTINT_VECTOR; } #endif if (enabling_interrupts && vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR) PSCB(vcpu,pending_interruption) = 1; return IA64_NO_FAULT; } IA64FAULT vcpu_get_psr(VCPU *vcpu, UINT64 *pval) { UINT64 psr; struct ia64_psr newpsr; // TODO: This needs to return a "filtered" view of // the psr, not the actual psr. Probably the psr needs // to be a field in regs (in addition to ipsr). __asm__ __volatile ("mov %0=psr;;" : "=r"(psr) :: "memory"); newpsr = *(struct ia64_psr *)&psr; if (newpsr.cpl == 2) newpsr.cpl = 0; if (PSCB(vcpu,interrupt_delivery_enabled)) newpsr.i = 1; else newpsr.i = 0; if (PSCB(vcpu,interrupt_collection_enabled)) newpsr.ic = 1; else newpsr.ic = 0; *pval = *(unsigned long *)&newpsr; return IA64_NO_FAULT; } BOOLEAN vcpu_get_psr_ic(VCPU *vcpu) { return !!PSCB(vcpu,interrupt_collection_enabled); } BOOLEAN vcpu_get_psr_i(VCPU *vcpu) { return !!PSCB(vcpu,interrupt_delivery_enabled); } UINT64 vcpu_get_ipsr_int_state(VCPU *vcpu,UINT64 prevpsr) { UINT64 dcr = PSCBX(vcpu,dcr); PSR psr = {0}; //printf("*** vcpu_get_ipsr_int_state (0x%016lx)...",prevpsr); psr.i64 = prevpsr; psr.ia64_psr.be = 0; if (dcr & IA64_DCR_BE) psr.ia64_psr.be = 1; psr.ia64_psr.pp = 0; if (dcr & IA64_DCR_PP) psr.ia64_psr.pp = 1; psr.ia64_psr.ic = PSCB(vcpu,interrupt_collection_enabled); psr.ia64_psr.i = PSCB(vcpu,interrupt_delivery_enabled); psr.ia64_psr.bn = PSCB(vcpu,banknum); psr.ia64_psr.dt = 1; psr.ia64_psr.it = 1; psr.ia64_psr.rt = 1; if (psr.ia64_psr.cpl == 2) psr.ia64_psr.cpl = 0; // !!!! fool domain // psr.pk = 1; //printf("returns 0x%016lx...",psr.i64); return psr.i64; } /************************************************************************** VCPU control register access routines **************************************************************************/ IA64FAULT vcpu_get_dcr(VCPU *vcpu, UINT64 *pval) { extern unsigned long privop_trace; //privop_trace=0; //verbose("vcpu_get_dcr: called @%p\n",PSCB(vcpu,iip)); // Reads of cr.dcr on Xen always have the sign bit set, so // a domain can differentiate whether it is running on SP or not *pval = PSCBX(vcpu,dcr) | 0x8000000000000000L; return (IA64_NO_FAULT); } IA64FAULT vcpu_get_iva(VCPU *vcpu, UINT64 *pval) { *pval = PSCBX(vcpu,iva) & ~0x7fffL; return (IA64_NO_FAULT); } IA64FAULT vcpu_get_pta(VCPU *vcpu, UINT64 *pval) { *pval = PSCB(vcpu,pta); return (IA64_NO_FAULT); } IA64FAULT vcpu_get_ipsr(VCPU *vcpu, UINT64 *pval) { //REGS *regs = vcpu_regs(vcpu); //*pval = regs->cr_ipsr; *pval = PSCB(vcpu,ipsr); return (IA64_NO_FAULT); } IA64FAULT vcpu_get_isr(VCPU *vcpu, UINT64 *pval) { *pval = PSCB(vcpu,isr); return (IA64_NO_FAULT); } IA64FAULT vcpu_get_iip(VCPU *vcpu, UINT64 *pval) { //REGS *regs = vcpu_regs(vcpu); //*pval = regs->cr_iip; *pval = PSCB(vcpu,iip); return (IA64_NO_FAULT); } IA64FAULT vcpu_get_ifa(VCPU *vcpu, UINT64 *pval) { UINT64 val = PSCB(vcpu,ifa); REGS *regs = vcpu_regs(vcpu); PRIVOP_COUNT_ADDR(regs,_GET_IFA); *pval = val; return (IA64_NO_FAULT); } unsigned long vcpu_get_rr_ps(VCPU *vcpu,UINT64 vadr) { ia64_rr rr; rr.rrval = PSCB(vcpu,rrs)[vadr>>61]; return(rr.ps); } unsigned long vcpu_get_rr_rid(VCPU *vcpu,UINT64 vadr) { ia64_rr rr; rr.rrval = PSCB(vcpu,rrs)[vadr>>61]; return(rr.rid); } unsigned long vcpu_get_itir_on_fault(VCPU *vcpu, UINT64 ifa) { ia64_rr rr; rr.rrval = 0; rr.ps = vcpu_get_rr_ps(vcpu,ifa); rr.rid = vcpu_get_rr_rid(vcpu,ifa); return (rr.rrval); } IA64FAULT vcpu_get_itir(VCPU *vcpu, UINT64 *pval) { UINT64 val = PSCB(vcpu,itir); *pval = val; return (IA64_NO_FAULT); } IA64FAULT vcpu_get_iipa(VCPU *vcpu, UINT64 *pval) { UINT64 val = PSCB(vcpu,iipa); // SP entry code does not save iipa yet nor does it get // properly delivered in the pscb printf("*** vcpu_get_iipa: cr.iipa not fully implemented yet!!\n"); *pval = val; return (IA64_NO_FAULT); } IA64FAULT vcpu_get_ifs(VCPU *vcpu, UINT64 *pval) { //PSCB(vcpu,ifs) = PSCB(vcpu)->regs.cr_ifs; //*pval = PSCB(vcpu,regs).cr_ifs; *pval = PSCB(vcpu,ifs); PSCB(vcpu,incomplete_regframe) = 0; return (IA64_NO_FAULT); } IA64FAULT vcpu_get_iim(VCPU *vcpu, UINT64 *pval) { UINT64 val = PSCB(vcpu,iim); *pval = val; return (IA64_NO_FAULT); } IA64FAULT vcpu_get_iha(VCPU *vcpu, UINT64 *pval) { //return vcpu_thash(vcpu,PSCB(vcpu,ifa),pval); UINT64 val = PSCB(vcpu,iha); REGS *regs = vcpu_regs(vcpu); PRIVOP_COUNT_ADDR(regs,_THASH); *pval = val; return (IA64_NO_FAULT); } IA64FAULT vcpu_set_dcr(VCPU *vcpu, UINT64 val) { extern unsigned long privop_trace; //privop_trace=1; // Reads of cr.dcr on SP always have the sign bit set, so // a domain can differentiate whether it is running on SP or not // Thus, writes of DCR should ignore the sign bit //verbose("vcpu_set_dcr: called\n"); PSCBX(vcpu,dcr) = val & ~0x8000000000000000L; return (IA64_NO_FAULT); } IA64FAULT vcpu_set_iva(VCPU *vcpu, UINT64 val) { PSCBX(vcpu,iva) = val & ~0x7fffL; return (IA64_NO_FAULT); } IA64FAULT vcpu_set_pta(VCPU *vcpu, UINT64 val) { if (val & IA64_PTA_LFMT) { printf("*** No support for VHPT long format yet!!\n"); return (IA64_ILLOP_FAULT); } if (val & (0x3f<<9)) /* reserved fields */ return IA64_RSVDREG_FAULT; if (val & 2) /* reserved fields */ return IA64_RSVDREG_FAULT; PSCB(vcpu,pta) = val; return IA64_NO_FAULT; } IA64FAULT vcpu_set_ipsr(VCPU *vcpu, UINT64 val) { PSCB(vcpu,ipsr) = val; return IA64_NO_FAULT; } IA64FAULT vcpu_set_isr(VCPU *vcpu, UINT64 val) { PSCB(vcpu,isr) = val; return IA64_NO_FAULT; } IA64FAULT vcpu_set_iip(VCPU *vcpu, UINT64 val) { PSCB(vcpu,iip) = val; return IA64_NO_FAULT; } IA64FAULT vcpu_increment_iip(VCPU *vcpu) { REGS *regs = vcpu_regs(vcpu); struct ia64_psr *ipsr = (struct ia64_psr *)®s->cr_ipsr; if (ipsr->ri == 2) { ipsr->ri=0; regs->cr_iip += 16; } else ipsr->ri++; return (IA64_NO_FAULT); } IA64FAULT vcpu_set_ifa(VCPU *vcpu, UINT64 val) { PSCB(vcpu,ifa) = val; return IA64_NO_FAULT; } IA64FAULT vcpu_set_itir(VCPU *vcpu, UINT64 val) { PSCB(vcpu,itir) = val; return IA64_NO_FAULT; } IA64FAULT vcpu_set_iipa(VCPU *vcpu, UINT64 val) { // SP entry code does not save iipa yet nor does it get // properly delivered in the pscb printf("*** vcpu_set_iipa: cr.iipa not fully implemented yet!!\n"); PSCB(vcpu,iipa) = val; return IA64_NO_FAULT; } IA64FAULT vcpu_set_ifs(VCPU *vcpu, UINT64 val) { //REGS *regs = vcpu_regs(vcpu); PSCB(vcpu,ifs) = val; return IA64_NO_FAULT; } IA64FAULT vcpu_set_iim(VCPU *vcpu, UINT64 val) { PSCB(vcpu,iim) = val; return IA64_NO_FAULT; } IA64FAULT vcpu_set_iha(VCPU *vcpu, UINT64 val) { PSCB(vcpu,iha) = val; return IA64_NO_FAULT; } /************************************************************************** VCPU interrupt control register access routines **************************************************************************/ void vcpu_pend_unspecified_interrupt(VCPU *vcpu) { PSCB(vcpu,pending_interruption) = 1; } void vcpu_pend_interrupt(VCPU *vcpu, UINT64 vector) { if (vector & ~0xff) { printf("vcpu_pend_interrupt: bad vector\n"); return; } #ifdef CONFIG_VTI if ( VMX_DOMAIN(vcpu) ) { set_bit(vector,VPD_CR(vcpu,irr)); } else #endif // CONFIG_VTI { /* if (!test_bit(vector,PSCB(vcpu,delivery_mask))) return; */ if (test_bit(vector,PSCBX(vcpu,irr))) { //printf("vcpu_pend_interrupt: overrun\n"); } set_bit(vector,PSCBX(vcpu,irr)); PSCB(vcpu,pending_interruption) = 1; } } void early_tick(VCPU *vcpu) { UINT64 *p = &PSCBX(vcpu,irr[3]); printf("vcpu_check_pending: about to deliver early tick\n"); printf("&irr[0]=%p, irr[0]=0x%lx\n",p,*p); } #define IA64_TPR_MMI 0x10000 #define IA64_TPR_MIC 0x000f0 /* checks to see if a VCPU has any unmasked pending interrupts * if so, returns the highest, else returns SPURIOUS_VECTOR */ /* NOTE: Since this gets called from vcpu_get_ivr() and the * semantics of "mov rx=cr.ivr" ignore the setting of the psr.i bit, * this routine also ignores pscb.interrupt_delivery_enabled * and this must be checked independently; see vcpu_deliverable interrupts() */ UINT64 vcpu_check_pending_interrupts(VCPU *vcpu) { UINT64 *p, *q, *r, bits, bitnum, mask, i, vector; p = &PSCBX(vcpu,irr[3]); /* q = &PSCB(vcpu,delivery_mask[3]); */ r = &PSCBX(vcpu,insvc[3]); for (i = 3; ; p--, q--, r--, i--) { bits = *p /* & *q */; if (bits) break; // got a potential interrupt if (*r) { // nothi
/* 
 * The MIT License (MIT)
 *
 * Copyright (c) 2019 Ha Thach (tinyusb.org)
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
 * THE SOFTWARE.
 *
 * This file is part of the TinyUSB stack.
 */

#include "tusb_option.h"

#if (TUSB_OPT_HOST_ENABLED && CFG_TUH_HUB)

#include "usbh.h"
#include "usbh_classdriver.h"
#include "hub.h"

//--------------------------------------------------------------------+
// MACRO CONSTANT TYPEDEF
//--------------------------------------------------------------------+
typedef struct
{
  uint8_t itf_num;
  uint8_t ep_in;
  uint8_t port_count;
  uint8_t status_change; // data from status change interrupt endpoint

  hub_port_status_response_t port_status;
} hub_interface_t;

CFG_TUSB_MEM_SECTION static hub_interface_t hub_data[CFG_TUH_HUB];
CFG_TUSB_MEM_SECTION TU_ATTR_ALIGNED(4) static uint8_t _hub_buffer[sizeof(descriptor_hub_desc_t)];

TU_ATTR_ALWAYS_INLINE
static inline hub_interface_t* get_itf(uint8_t dev_addr)
{
  return &hub_data[dev_addr-1-CFG_TUH_DEVICE_MAX];
}

#if CFG_TUSB_DEBUG
static char const* const _hub_feature_str[] =
{
  [HUB_FEATURE_PORT_CONNECTION          ] = "PORT_CONNECTION",
  [HUB_FEATURE_PORT_ENABLE              ] = "PORT_ENABLE",
  [HUB_FEATURE_PORT_SUSPEND             ] = "PORT_SUSPEND",
  [HUB_FEATURE_PORT_OVER_CURRENT        ] = "PORT_OVER_CURRENT",
  [HUB_FEATURE_PORT_RESET               ] = "PORT_RESET",
  [HUB_FEATURE_PORT_POWER               ] = "PORT_POWER",
  [HUB_FEATURE_PORT_LOW_SPEED           ] = "PORT_LOW_SPEED",
  [HUB_FEATURE_PORT_CONNECTION_CHANGE   ] = "PORT_CONNECTION_CHANGE",
  [HUB_FEATURE_PORT_ENABLE_CHANGE       ] = "PORT_ENABLE_CHANGE",
  [HUB_FEATURE_PORT_SUSPEND_CHANGE      ] = "PORT_SUSPEND_CHANGE",
  [HUB_FEATURE_PORT_OVER_CURRENT_CHANGE ] = "PORT_OVER_CURRENT_CHANGE",
  [HUB_FEATURE_PORT_RESET_CHANGE        ] = "PORT_RESET_CHANGE",
  [HUB_FEATURE_PORT_TEST                ] = "PORT_TEST",
  [HUB_FEATURE_PORT_INDICATOR           ] = "PORT_INDICATOR",
};
#endif

//--------------------------------------------------------------------+
// HUB
//--------------------------------------------------------------------+
bool hub_port_clear_feature(uint8_t hub_addr, uint8_t hub_port, uint8_t feature, tuh_control_complete_cb_t complete_cb)
{
  tusb_control_request_t const request =
  {
    .bmRequestType_bit =
    {
      .recipient = TUSB_REQ_RCPT_OTHER,
      .type      = TUSB_REQ_TYPE_CLASS,
      .direction = TUSB_DIR_OUT
    },
    .bRequest = HUB_REQUEST_CLEAR_FEATURE,
    .wValue   = feature,
    .wIndex   = hub_port,
    .wLength  = 0
  };

  TU_LOG2("HUB Clear Feature: %s, addr = %u port = %u\r\n", _hub_feature_str[feature], hub_addr, hub_port);
  TU_ASSERT( tuh_control_xfer(hub_addr, &request, NULL, complete_cb) );
  return true;
}

bool hub_port_set_feature(uint8_t hub_addr, uint8_t hub_port, uint8_t feature, tuh_control_complete_cb_t complete_cb)
{
  tusb_control_request_t const request =
  {
    .bmRequestType_bit =
    {
      .recipient = TUSB_REQ_RCPT_OTHER,
      .type      = TUSB_REQ_TYPE_CLASS,
      .direction = TUSB_DIR_OUT
    },
    .bRequest = HUB_REQUEST_SET_FEATURE,
    .wValue   = feature,
    .wIndex   = hub_port,
    .wLength  = 0
  };

  TU_LOG2("HUB Set Feature: %s, addr = %u port = %u\r\n", _hub_feature_str[feature], hub_addr, hub_port);
  TU_ASSERT( tuh_control_xfer(hub_addr, &request, NULL, complete_cb) );
  return true;
}

bool hub_port_reset(uint8_t hub_addr, uint8_t hub_port, tuh_control_complete_cb_t complete_cb)
{
  return hub_port_set_feature(hub_addr, hub_port, HUB_FEATURE_PORT_RESET, complete_cb);
}

bool hub_port_get_status(uint8_t hub_addr, uint8_t hub_port, void* resp, tuh_control_complete_cb_t complete_cb)
{
  tusb_control_request_t const request =
  {
    .bmRequestType_bit =
    {
      .recipient = TUSB_REQ_RCPT_OTHER,
      .type      = TUSB_REQ_TYPE_CLASS,
      .direction = TUSB_DIR_IN
    },
    .bRequest = HUB_REQUEST_GET_STATUS,
    .wValue   = 0,
    .wIndex   = hub_port,
    .wLength  = 4
  };

  TU_LOG2("HUB Get Port Status: addr = %u port = %u\r\n", hub_addr, hub_port);
  TU_ASSERT( tuh_control_xfer( hub_addr, &request, resp, complete_cb) );
  return true;
}

//--------------------------------------------------------------------+
// CLASS-USBH API (don't require to verify parameters)
//--------------------------------------------------------------------+
void hub_init(void)
{
  tu_memclr(hub_data, sizeof(hub_data));
}

bool hub_open(uint8_t rhport, uint8_t dev_addr, tusb_desc_interface_t const *itf_desc, uint16_t max_len)
{
  TU_VERIFY(TUSB_CLASS_HUB == itf_desc->bInterfaceClass &&
            0              == itf_desc->bInterfaceSubClass);

  // hub driver does not support multiple TT yet
  TU_VERIFY(itf_desc->bInterfaceProtocol <= 1);

  // msc driver length is fixed
  uint16_t const drv_len = sizeof(tusb_desc_interface_t) + sizeof(tusb_desc_endpoint_t);
  TU_ASSERT(drv_len <= max_len);

  //------------- Interrupt Status endpoint -------------//
  tusb_desc_endpoint_t const *desc_ep = (tusb_desc_endpoint_t const *) tu_desc_next(itf_desc);

  TU_ASSERT(TUSB_DESC_ENDPOINT  == desc_ep->bDescriptorType &&
            TUSB_XFER_INTERRUPT == desc_ep->bmAttributes.xfer, 0);
  
  TU_ASSERT(usbh_edpt_open(rhport, dev_addr, desc_ep));

  hub_interface_t* p_hub = get_itf(dev_addr);

  p_hub->itf_num = itf_desc->bInterfaceNumber;
  p_hub->ep_in   = desc_ep->bEndpointAddress;

  return true;
}

void hub_close(uint8_t dev_addr)
{
  TU_VERIFY(dev_addr > CFG_TUH_DEVICE_MAX, );
  hub_interface_t* p_hub = get_itf(dev_addr);

  if (p_hub->ep_in) tu_memclr(p_hub, sizeof( hub_interface_t));
}

bool hub_status_pipe_queue(uint8_t dev_addr)
{
  hub_interface_t* hub_itf = get_itf(dev_addr);
  return usbh_edpt_xfer(dev_addr, hub_itf->ep_in, &hub_itf->status_change, 1);
}


//--------------------------------------------------------------------+
// Set Configure
//--------------------------------------------------------------------+

static bool config_set_port_power (uint8_t dev_addr, tusb_control_request_t const * request, xfer_result_t result);
static bool config_port_power_complete (uint8_t dev_addr, tusb_control_request_t const * request, xfer_result_t result);

bool hub_set_config(uint8_t dev_addr, uint8_t itf_num)
{
  hub_interface_t* p_hub = get_itf(dev_addr);
  TU_ASSERT(itf_num == p_hub->itf_num);

  // Get Hub Descriptor
  tusb_control_request_t const request =
  {
    .bmRequestType_bit =
    {
      .recipient = TUSB_REQ_RCPT_DEVICE,
      .type      = TUSB_REQ_TYPE_CLASS,
      .direction = TUSB_DIR_IN
    },
    .bRequest = HUB_REQUEST_GET_DESCRIPTOR,
    .wValue   = 0,
    .wIndex   = 0,
    .wLength  = sizeof(descriptor_hub_desc_t)
  };

  TU_ASSERT( tuh_control_xfer(dev_addr, &request, _hub_buffer, config_set_port_power) );

  return true;
}

static bool config_set_port_power (uint8_t dev_addr, tusb_control_request_t const * request, xfer_result_t result)
{
  (void) request;
  TU_ASSERT(XFER_RESULT_SUCCESS == result);

  hub_interface_t* p_hub = get_itf(dev_addr);

  // only use number of ports in hub descriptor
  descriptor_hub_desc_t const* desc_hub = (descriptor_hub_desc_t const*) _hub_buffer;
  p_hub->port_count = desc_hub->bNbrPorts;

  // May need to GET_STATUS

  // Set Port Power to be able to detect connection, starting with port 1
  uint8_t const hub_port = 1;
  return hub_port_set_feature(dev_addr, hub_port, HUB_FEATURE_PORT_POWER, config_port_power_complete);
}

static bool config_port_power_complete (uint8_t dev_addr, tusb_control_request_t const * request, xfer_result_t result)
{
  TU_ASSERT(XFER_RESULT_SUCCESS == result);
   hub_interface_t* p_hub = get_itf(dev_addr);

  if (request->wIndex == p_hub->port_count)
  {
    // All ports are power -> queue notification status endpoint and
    // complete the SET CONFIGURATION
    TU_ASSERT( usbh_edpt_xfer(dev_addr, p_hub->ep_in, &p_hub->status_change, 1) );

    usbh_driver_set_config_complete(dev_addr, p_hub->itf_num);
  }else
  {
    // power next port
    uint8_t const hub_port = (uint8_t) (request->wIndex + 1);
    return hub_port_set_feature(dev_addr, hub_port, HUB_FEATURE_PORT_POWER, config_port_power_complete);
  }

  return true;
}

//--------------------------------------------------------------------+
// Connection Changes
//--------------------------------------------------------------------+

static bool connection_get_status_complete (uint8_t dev_addr, tusb_control_request_t const * request, xfer_result_t result);
static bool connection_clear_conn_change_complete (uint8_t dev_addr, tusb_control_request_t const * request, xfer_result_t result);
static bool connection_port_reset_complete (uint8_t dev_addr, tusb_control_request_t const * request, xfer_result_t result);

// callback as response of interrupt endpoint polling
bool hub_xfer_cb(uint8_t dev_addr, uint8_t ep_addr, xfer_result_t result, uint32_t xferred_bytes)
{
  (void) xferred_bytes; // TODO can be more than 1 for hub with lots of ports
  (void) ep_addr;
  TU_ASSERT(result == XFER_RESULT_SUCCESS);

  hub_interface_t* p_hub = get_itf(dev_addr);

  TU_LOG2("  Port Status Change = 0x%02X\r\n", p_hub->status_change);

  // Hub ignore bit0 in status change
  for (uint8_t port=1; port <= p_hub->port_count; port++)
  {
    if ( tu_bit_test(p_hub->status_change, port) )
    {
      hub_port_get_status(dev_addr, port, &p_hub->port_status, connection_get_status_complete);
      break;
    }
  }

  // NOTE: next status transfer is queued by usbh.c after handling this request

  return true;
}

static bool connection_get_status_complete (uint8_t dev_addr, tusb_control_request_t const * request, xfer_result_t result)
{
  TU_ASSERT(result == XFER_RESULT_SUCCESS);

  hub_interface_t* p_hub = get_itf(dev_addr);
  uint8_t const port_num = (uint8_t) request->wIndex;

  // Connection change
  if (p_hub->port_status.change.connection)
  {
    // Port is powered and enabled
    //TU_VERIFY(port_status.status_current.port_power && port_status.status_current.port_enable, );

    // Acknowledge Port Connection Change
    hub_port_clear_feature(dev_addr, port_num, HUB_FEATURE_PORT_CONNECTION_CHANGE, connection_clear_conn_change_complete);
  }else
  {
    // Other changes are: Enable, Suspend, Over Current, Reset, L1 state
    // TODO clear change

    // prepare for next hub status
    // TODO continue with status_change, or maybe we can do it again with status
    hub_status_pipe_queue(dev_addr);
  }

  return true;
}

static bool connection_clear_conn_change_complete (uint8_t dev_addr, tusb_control_request_t const * request, xfer_result_t result)
{
  TU_ASSERT(result == XFER_RESULT_SUCCESS);

  hub_interface_t* p_hub = get_itf(dev_addr);
  uint8_t const port_num = (uint8_t) request->wIndex;

  if ( p_hub->port_status.status.connection )
  {
    // Reset port if attach event
    hub_port_reset(dev_addr, port_num, connection_port_reset_complete);
  }else
  {
    // submit detach event
    hcd_event_t event =
    {
      .rhport     = usbh_get_rhport(dev_addr),
      .event_id   = HCD_EVENT_DEVICE_REMOVE,
      .connection =
       {
         .hub_addr = dev_addr,
         .hub_port = port_num
       }
    };

    hcd_event_handler(&event, false);
  }

  return true;
}

static bool connection_port_reset_complete (uint8_t dev_addr, tusb_control_request_t const * request, xfer_result_t result)
{
  TU_ASSERT(result == XFER_RESULT_SUCCESS);

  // hub_interface_t* p_hub = get_itf(dev_addr);
  uint8_t const port_num = (uint8_t) request->wIndex;

  // submit attach event
  hcd_event_t event =
  {
    .rhport     = usbh_get_rhport(dev_addr),
    .event_id   = HCD_EVENT_DEVICE_ATTACH,
    .connection =
    {
      .hub_addr = dev_addr,
      .hub_port = port_num
    }
  };

  hcd_event_handler(&event, false);

  return true;
}

#endif