aboutsummaryrefslogtreecommitdiffstats
path: root/lib/lufa/Projects/AVRISP-MKII/AVRISPDescriptors.c
blob: f4210fbbbfd07a225135b3d486eb92f7d9906db0 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
/*
             LUFA Library
     Copyright (C) Dean Camera, 2017.

  dean [at] fourwalledcubicle [dot] com
           www.lufa-lib.org
*/

/*
  Copyright 2017  Dean Camera (dean [at] fourwalledcubicle [dot] com)

  Permission to use, copy, modify, distribute, and sell this
  software and its documentation for any purpose is hereby granted
  without fee, provided that the above copyright notice appear in
  all copies and that both that the copyright notice and this
  permission notice and warranty disclaimer appear in supporting
  documentation, and that the name of the author not be used in
  advertising or publicity pertaining to distribution of the
  software without specific, written prior permission.

  The author disclaims all warranties with regard to this
  software, including all implied warranties of merchantability
  and fitness.  In no event shall the author be liable for any
  special, indirect or consequential damages or any damages
  whatsoever resulting from loss of use, data or profits, whether
  in an action of contract, negligence or other tortious action,
  arising out of or in connection with the use or performance of
  this software.
*/

/** \file
 *
 *  USB Device Descriptors, for library use when in USB device mode. Descriptors are special
 *  computer-readable structures which the host requests upon device enumeration, to determine
 *  the device's capabilities and functions.
 */

#include "AVRISPDescriptors.h"

/** Device descriptor structure. This descriptor, located in FLASH memory, describes the overall
 *  device characteristics, including the supported USB version, control endpoint size and the
 *  number of device configurations. The descriptor is read out by the USB host when the enumeration
 *  process begins.
 */
const USB_Descriptor_Device_t PROGMEM AVRISP_DeviceDescriptor =
{
	.Header                 = {.Size = sizeof(USB_Descriptor_Device_t), .Type = DTYPE_Device},

	.USBSpecification       = VERSION_BCD(1,1,0),
	.Class                  = USB_CSCP_VendorSpecificClass,
	.SubClass               = USB_CSCP_NoDeviceSubclass,
	.Protocol               = USB_CSCP_NoDeviceProtocol,

	.Endpoint0Size          = FIXED_CONTROL_ENDPOINT_SIZE,

	.VendorID               = 0x03EB,
	.ProductID              = 0x2104,
	.ReleaseNumber          = VERSION_BCD(2,0,0),

	.ManufacturerStrIndex   = AVRISP_STRING_ID_Manufacturer,
	.ProductStrIndex        = AVRISP_STRING_ID_Product,
	.SerialNumStrIndex      = AVRISP_STRING_ID_Serial,

	.NumberOfConfigurations = FIXED_NUM_CONFIGURATIONS
};

/** Configuration descriptor structure. This descriptor, located in FLASH memory, describes the usage
 *  of the device in one of its supported configurations, including information about any device interfaces
 *  and endpoints. The descriptor is read out by the USB host during the enumeration process when selecting
 *  a configuration so that the host may correctly communicate with the USB device.
 */
const AVRISP_USB_Descriptor_Configuration_t PROGMEM AVRISP_ConfigurationDescriptor =
{
	.Config =
		{
			.Header                 = {.Size = sizeof(USB_Descriptor_Configuration_Header_t), .Type = DTYPE_Configuration},

			.TotalConfigurationSize = sizeof(AVRISP_USB_Descriptor_Configuration_t),
			.TotalInterfaces        = 1,

			.ConfigurationNumber    = 1,
			.ConfigurationStrIndex  = NO_DESCRIPTOR,

			.ConfigAttributes       = (USB_CONFIG_ATTR_RESERVED | USB_CONFIG_ATTR_SELFPOWERED),

			.MaxPowerConsumption    = USB_CONFIG_POWER_MA(100)
		},

	.AVRISP_Interface =
		{
			.Header                 = {.Size = sizeof(USB_Descriptor_Interface_t), .Type = DTYPE_Interface},

			.InterfaceNumber        = INTERFACE_ID_AVRISP,
			.AlternateSetting       = 0,

			.TotalEndpoints         = 2,

			.Class                  = USB_CSCP_VendorSpecificClass,
			.SubClass               = USB_CSCP_NoDeviceSubclass,
			.Protocol               = USB_CSCP_NoDeviceProtocol,

			.InterfaceStrIndex      = NO_DESCRIPTOR
		},

	.AVRISP_DataInEndpoint =
		{
			.Header                 = {.Size = sizeof(USB_Descriptor_Endpoint_t), .Type = DTYPE_Endpoint},

			.EndpointAddress        = AVRISP_DATA_IN_EPADDR,
			.Attributes             = (EP_TYPE_BULK | ENDPOINT_ATTR_NO_SYNC | ENDPOINT_USAGE_DATA),
			.EndpointSize           = AVRISP_DATA_EPSIZE,
			.PollingIntervalMS      = 0x0A
		},

	.AVRISP_DataOutEndpoint =
		{
			.Header                 = {.Size = sizeof(USB_Descriptor_Endpoint_t), .Type = DTYPE_Endpoint},

			.EndpointAddress        = AVRISP_DATA_OUT_EPADDR,
			.Attributes             = (EP_TYPE_BULK | ENDPOINT_ATTR_NO_SYNC | ENDPOINT_USAGE_DATA),
			.EndpointSize           = AVRISP_DATA_EPSIZE,
			.PollingIntervalMS      = 0x0A
		},
};

/** Language descriptor structure. This descriptor, located in FLASH memory, is returned when the host requests
 *  the string descriptor with index 0 (the first index). It is actually an array of 16-bit integers, which indicate
 *  via the language ID table available at USB.org what languages the device supports for its string descriptors.
 */
const USB_Descriptor_String_t PROGMEM AVRISP_LanguageString = USB_STRING_DESCRIPTOR_ARRAY(LANGUAGE_ID_ENG);

/** Manufacturer descriptor string. This is a Unicode string containing the manufacturer's details in human readable
 *  form, and is read out upon request by the host when the appropriate string ID is requested, listed in the Device
 *  Descriptor.
 */
const USB_Descriptor_String_t PROGMEM AVRISP_ManufacturerString = USB_STRING_DESCRIPTOR(L"ATMEL");

/** Product descriptor string. This is a Unicode string containing the product's details in human readable form,
 *  and is read out upon request by the host when the appropriate string ID is requested, listed in the Device
 *  Descriptor.
 */
const USB_Descriptor_String_t PROGMEM AVRISP_ProductString = USB_STRING_DESCRIPTOR(L"AVRISP mkII");

/** Serial number string. This is a Unicode string containing the device's unique serial number, expressed as a
 *  series of uppercase hexadecimal digits.
 */
const USB_Descriptor_String_t PROGMEM AVRISP_SerialString = USB_STRING_DESCRIPTOR(L"000200012345\0"
    // Note: Real AVRISP-MKII has the embedded NUL byte, bug in firmware?
);

/** This function is called by the library when in device mode, and must be overridden (see library "USB Descriptors"
 *  documentation) by the application code so that the address and size of a requested descriptor can be given
 *  to the USB library. When the device receives a Get Descriptor request on the control endpoint, this function
 *  is called so that the descriptor details can be passed back and the appropriate descriptor sent back to the
 *  USB host.
 */
uint16_t AVRISP_GetDescriptor(const uint16_t wValue,
                              const uint16_t wIndex,
                              const void** const DescriptorAddress)
{
	const uint8_t  DescriptorType   = (wValue >> 8);
	const uint8_t  DescriptorNumber = (wValue & 0xFF);

	const void* Address = NULL;
	uint16_t    Size    = NO_DESCRIPTOR;

	switch (DescriptorType)
	{
		case DTYPE_Device:
			Address = &AVRISP_DeviceDescriptor;
			Size    = sizeof(USB_Descriptor_Device_t);
			break;
		case DTYPE_Configuration:
			Address = &AVRISP_ConfigurationDescriptor;
			Size    = sizeof(AVRISP_USB_Descriptor_Configuration_t);
			break;
		case DTYPE_String:
			switch (DescriptorNumber)
			{
				case AVRISP_STRING_ID_Language:
					Address = &AVRISP_LanguageString;
					Size    = pgm_read_byte(&AVRISP_LanguageString.Header.Size);
					break;
				case AVRISP_STRING_ID_Manufacturer:
					Address = &AVRISP_ManufacturerString;
					Size    = pgm_read_byte(&AVRISP_ManufacturerString.Header.Size);
					break;
				case AVRISP_STRING_ID_Product:
					Address = &AVRISP_ProductString;
					Size    = pgm_read_byte(&AVRISP_ProductString.Header.Size);
					break;
				case AVRISP_STRING_ID_Serial:
					Address = &AVRISP_SerialString;
					Size    = AVRISP_SerialString.Header.Size;
					break;
			}

			break;
	}

	*DescriptorAddress = Address;
	return Size;
}
cpf"><asm/ptrace.h> #include <xen/delay.h> #include <xen/perfc.h> #include <xen/mm.h> #include <asm/system.h> #include <asm/processor.h> #include <xen/irq.h> #include <xen/event.h> #include <asm/privop.h> #include <asm/vcpu.h> #include <asm/ia64_int.h> #include <asm/dom_fw.h> #include <asm/vhpt.h> #include <asm/debugger.h> #include <asm/fpswa.h> #include <asm/bundle.h> #include <asm/asm-xsi-offsets.h> #include <asm/shadow.h> #include <asm/uaccess.h> #include <asm/p2m_entry.h> extern void die_if_kernel(char *str, struct pt_regs *regs, long err); /* FIXME: where these declarations shold be there ? */ extern int ia64_hyperprivop(unsigned long, REGS *); extern IA64FAULT ia64_hypercall(struct pt_regs *regs); extern void do_ssc(unsigned long ssc, struct pt_regs *regs); // should never panic domain... if it does, stack may have been overrun static void check_bad_nested_interruption(unsigned long isr, struct pt_regs *regs, unsigned long vector) { struct vcpu *v = current; if (!(PSCB(v, ipsr) & IA64_PSR_DT)) { panic_domain(regs, "psr.dt off, trying to deliver nested dtlb!\n"); } vector &= ~0xf; if (vector != IA64_DATA_TLB_VECTOR && vector != IA64_ALT_DATA_TLB_VECTOR && vector != IA64_VHPT_TRANS_VECTOR) { panic_domain(regs, "psr.ic off, delivering fault=%lx," "ipsr=%lx,iip=%lx,ifa=%lx,isr=%lx,PSCB.iip=%lx\n", vector, regs->cr_ipsr, regs->cr_iip, PSCB(v, ifa), isr, PSCB(v, iip)); } } static void reflect_interruption(unsigned long isr, struct pt_regs *regs, unsigned long vector) { struct vcpu *v = current; if (!PSCB(v, interrupt_collection_enabled)) check_bad_nested_interruption(isr, regs, vector); PSCB(v, unat) = regs->ar_unat; // not sure if this is really needed? PSCB(v, precover_ifs) = regs->cr_ifs; PSCB(v, ipsr) = vcpu_get_psr(v); vcpu_bsw0(v); PSCB(v, isr) = isr; PSCB(v, iip) = regs->cr_iip; PSCB(v, ifs) = 0; regs->cr_iip = ((unsigned long)PSCBX(v, iva) + vector) & ~0xffUL; regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET; regs->cr_ipsr = vcpu_pl_adjust(regs->cr_ipsr, IA64_PSR_CPL0_BIT); if (PSCB(v, dcr) & IA64_DCR_BE) regs->cr_ipsr |= IA64_PSR_BE; else regs->cr_ipsr &= ~IA64_PSR_BE; if (PSCB(v, hpsr_dfh)) regs->cr_ipsr |= IA64_PSR_DFH; PSCB(v, vpsr_dfh) = 0; v->vcpu_info->evtchn_upcall_mask = 1; PSCB(v, interrupt_collection_enabled) = 0; perfc_incra(slow_reflect, vector >> 8); debugger_event(vector == IA64_EXTINT_VECTOR ? XEN_IA64_DEBUG_ON_EXTINT : XEN_IA64_DEBUG_ON_EXCEPT); } void reflect_event(void) { struct vcpu *v = current; struct pt_regs *regs; unsigned long isr; if (!event_pending(v)) return; /* Sanity check */ if (is_idle_vcpu(v)) { //printk("WARN: invocation to reflect_event in nested xen\n"); return; } regs = vcpu_regs(v); isr = regs->cr_ipsr & IA64_PSR_RI; if (!PSCB(v, interrupt_collection_enabled)) printk("psr.ic off, delivering event, ipsr=%lx,iip=%lx," "isr=%lx,viip=0x%lx\n", regs->cr_ipsr, regs->cr_iip, isr, PSCB(v, iip)); PSCB(v, unat) = regs->ar_unat; // not sure if this is really needed? PSCB(v, precover_ifs) = regs->cr_ifs; PSCB(v, ipsr) = vcpu_get_psr(v); vcpu_bsw0(v); PSCB(v, isr) = isr; PSCB(v, iip) = regs->cr_iip; PSCB(v, ifs) = 0; regs->cr_iip = v->arch.event_callback_ip; regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET; regs->cr_ipsr = vcpu_pl_adjust(regs->cr_ipsr, IA64_PSR_CPL0_BIT); if (PSCB(v, dcr) & IA64_DCR_BE) regs->cr_ipsr |= IA64_PSR_BE; else regs->cr_ipsr &= ~IA64_PSR_BE; if (PSCB(v, hpsr_dfh)) regs->cr_ipsr |= IA64_PSR_DFH; PSCB(v, vpsr_dfh) = 0; v->vcpu_info->evtchn_upcall_mask = 1; PSCB(v, interrupt_collection_enabled) = 0; debugger_event(XEN_IA64_DEBUG_ON_EVENT); } static int handle_lazy_cover(struct vcpu *v, struct pt_regs *regs) { if (!PSCB(v, interrupt_collection_enabled)) { PSCB(v, ifs) = regs->cr_ifs; regs->cr_ifs = 0; perfc_incr(lazy_cover); return 1; // retry same instruction with cr.ifs off } return 0; } void ia64_do_page_fault(unsigned long address, unsigned long isr, struct pt_regs *regs, unsigned long itir) { unsigned long iip = regs->cr_iip, iha; // FIXME should validate address here unsigned long pteval; unsigned long is_data = !((isr >> IA64_ISR_X_BIT) & 1UL); IA64FAULT fault; int is_ptc_l_needed = 0; ia64_itir_t _itir = {.itir = itir}; if ((isr & IA64_ISR_SP) || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) { /* * This fault was due to a speculative load or lfetch.fault, * set the "ed" bit in the psr to ensure forward progress. * (Target register will get a NaT for ld.s, lfetch will be * canceled.) */ ia64_psr(regs)->ed = 1; return; } again: fault = vcpu_translate(current, address, is_data, &pteval, &itir, &iha); if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB) { struct p2m_entry entry; unsigned long m_pteval; m_pteval = translate_domain_pte(pteval, address, itir, &(_itir.itir), &entry); vcpu_itc_no_srlz(current, is_data ? 2 : 1, address, m_pteval, pteval, _itir.itir, &entry); if ((fault == IA64_USE_TLB && !current->arch.dtlb.pte.p) || p2m_entry_retry(&entry)) { /* dtlb has been purged in-between. This dtlb was matching. Undo the work. */ vcpu_flush_tlb_vhpt_range(address, _itir.ps); // the stale entry which we inserted above // may remains in tlb cache. // we don't purge it now hoping next itc purges it. is_ptc_l_needed = 1; goto again; } return; } if (is_ptc_l_needed) vcpu_ptc_l(current, address, _itir.ps); if (!guest_mode(regs)) { /* The fault occurs inside Xen. */ if (!ia64_done_with_exception(regs)) { // should never happen. If it does, region 0 addr may // indicate a bad xen pointer printk("*** xen_handle_domain_access: exception table" " lookup failed, iip=0x%lx, addr=0x%lx, " "spinning...\n", iip, address); panic_domain(regs, "*** xen_handle_domain_access: " "exception table lookup failed, " "iip=0x%lx, addr=0x%lx, spinning...\n", iip, address); } return; } if ((isr & IA64_ISR_IR) && handle_lazy_cover(current, regs)) return; if (!PSCB(current, interrupt_collection_enabled)) { check_bad_nested_interruption(isr, regs, fault); //printk("Delivering NESTED DATA TLB fault\n"); fault = IA64_DATA_NESTED_TLB_VECTOR; regs->cr_iip = ((unsigned long)PSCBX(current, iva) + fault) & ~0xffUL; regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET; regs->cr_ipsr = vcpu_pl_adjust(regs->cr_ipsr, IA64_PSR_CPL0_BIT); if (PSCB(current, dcr) & IA64_DCR_BE) regs->cr_ipsr |= IA64_PSR_BE; else regs->cr_ipsr &= ~IA64_PSR_BE; if (PSCB(current, hpsr_dfh)) regs->cr_ipsr |= IA64_PSR_DFH; PSCB(current, vpsr_dfh) = 0; perfc_incra(slow_reflect, fault >> 8); return; } PSCB(current, itir) = itir; PSCB(current, iha) = iha; PSCB(current, ifa) = address; reflect_interruption(isr, regs, fault); } fpswa_interface_t *fpswa_interface = 0; void __init trap_init(void) { if (ia64_boot_param->fpswa) /* FPSWA fixup: make the interface pointer a virtual address */ fpswa_interface = __va(ia64_boot_param->fpswa); else printk("No FPSWA supported.\n"); } static fpswa_ret_t fp_emulate(int fp_fault, void *bundle, unsigned long *ipsr, unsigned long *fpsr, unsigned long *isr, unsigned long *pr, unsigned long *ifs, struct pt_regs *regs) { fp_state_t fp_state; fpswa_ret_t ret; XEN_EFI_RR_DECLARE(rr6, rr7); if (!fpswa_interface) return (fpswa_ret_t) {-1, 0, 0, 0}; memset(&fp_state, 0, sizeof(fp_state_t)); /* * compute fp_state. only FP registers f6 - f11 are used by the * kernel, so set those bits in the mask and set the low volatile * pointer to point to these registers. */ fp_state.bitmask_low64 = 0xfc0; /* bit6..bit11 */ fp_state.fp_state_low_volatile = (fp_state_low_volatile_t *) &regs->f6; /* * unsigned long (*EFI_FPSWA) ( * unsigned long trap_type, * void *Bundle, * unsigned long *pipsr, * unsigned long *pfsr, * unsigned long *pisr, * unsigned long *ppreds, * unsigned long *pifs, * void *fp_state); */ XEN_EFI_RR_ENTER(rr6, rr7); ret = (*fpswa_interface->fpswa) (fp_fault, bundle, ipsr, fpsr, isr, pr, ifs, &fp_state); XEN_EFI_RR_LEAVE(rr6, rr7); return ret; } /* * Handle floating-point assist faults and traps for domain. */ unsigned long handle_fpu_swa(int fp_fault, struct pt_regs *regs, unsigned long isr) { IA64_BUNDLE bundle; unsigned long fault_ip; fpswa_ret_t ret; unsigned long rc; fault_ip = regs->cr_iip; /* * When the FP trap occurs, the trapping instruction is completed. * If ipsr.ri == 0, there is the trapping instruction in previous * bundle. */ if (!fp_fault && (ia64_psr(regs)->ri == 0)) fault_ip -= 16; if (VMX_DOMAIN(current)) { rc = __vmx_get_domain_bundle(fault_ip, &bundle); } else { rc = 0; if (vcpu_get_domain_bundle(current, regs, fault_ip, &bundle) == 0) rc = IA64_RETRY; } if (rc == IA64_RETRY) { PSCBX(current, fpswa_ret) = (fpswa_ret_t){IA64_RETRY, 0, 0, 0}; gdprintk(XENLOG_DEBUG, "%s(%s): floating-point bundle at 0x%lx not mapped\n", __FUNCTION__, fp_fault ? "fault" : "trap", fault_ip); return IA64_RETRY; } ret = fp_emulate(fp_fault, &bundle, &regs->cr_ipsr, &regs->ar_fpsr, &isr, &regs->pr, &regs->cr_ifs, regs); if (ret.status) { PSCBX(current, fpswa_ret) = ret; gdprintk(XENLOG_ERR, "%s(%s): fp_emulate() returned %ld\n", __FUNCTION__, fp_fault ? "fault" : "trap", ret.status); } return ret.status; } void ia64_fault(unsigned long vector, unsigned long isr, unsigned long ifa, unsigned long iim, unsigned long itir, unsigned long arg5, unsigned long arg6, unsigned long arg7, unsigned long stack) { struct pt_regs *regs = (struct pt_regs *)&stack; unsigned long code; static const char *const reason[] = { "IA-64 Illegal Operation fault", "IA-64 Privileged Operation fault", "IA-64 Privileged Register fault", "IA-64 Reserved Register/Field fault", "Disabled Instruction Set Transition fault", "Unknown fault 5", "Unknown fault 6", "Unknown fault 7", "Illegal Hazard fault", "Unknown fault 9", "Unknown fault 10", "Unknown fault 11", "Unknown fault 12", "Unknown fault 13", "Unknown fault 14", "Unknown fault 15" }; printk("ia64_fault, vector=0x%lx, ifa=0x%016lx, iip=0x%016lx, " "ipsr=0x%016lx, isr=0x%016lx\n", vector, ifa, regs->cr_iip, regs->cr_ipsr, isr); if ((isr & IA64_ISR_NA) && ((isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) { /* * This fault was due to lfetch.fault, set "ed" bit in the * psr to cancel the lfetch. */ ia64_psr(regs)->ed = 1; printk("ia64_fault: handled lfetch.fault\n"); return; } switch (vector) { case 0: printk("VHPT Translation.\n"); break; case 4: printk("Alt DTLB.\n"); break; case 6: printk("Instruction Key Miss.\n"); break; case 7: printk("Data Key Miss.\n"); break; case 8: printk("Dirty-bit.\n"); break; case 10: /* __domain_get_bundle() may cause fault. */ if (ia64_done_with_exception(regs)) return; printk("Data Access-bit.\n"); break; case 20: printk("Page Not Found.\n"); break; case 21: printk("Key Permission.\n"); break; case 22: printk("Instruction Access Rights.\n"); break; case 24: /* General Exception */ code = (isr >> 4) & 0xf; printk("General Exception: %s%s.\n", reason[code], (code == 3) ? ((isr & (1UL << 37)) ? " (RSE access)" : " (data access)") : ""); if (code == 8) { #ifdef CONFIG_IA64_PRINT_HAZARDS printk("%s[%d]: possible hazard @ ip=%016lx " "(pr = %016lx)\n", current->comm, current->pid, regs->cr_iip + ia64_psr(regs)->ri, regs->pr); #endif printk("ia64_fault: returning on hazard\n"); return; } break; case 25: printk("Disabled FP-Register.\n"); break; case 26: printk("NaT consumption.\n"); break; case 29: printk("Debug.\n"); break; case 30: printk("Unaligned Reference.\n"); break; case 31: printk("Unsupported data reference.\n"); break; case 32: printk("Floating-Point Fault.\n"); break; case 33: printk("Floating-Point Trap.\n"); break; case 34: printk("Lower Privilege Transfer Trap.\n"); break; case 35: printk("Taken Branch Trap.\n"); break; case 36: printk("Single Step Trap.\n"); break; case 45: printk("IA-32 Exception.\n"); break; case 46: printk("IA-32 Intercept.\n"); break; case 47: printk("IA-32 Interrupt.\n"); break; default: printk("Fault %lu\n", vector); break; } show_registers(regs); panic("Fault in Xen.\n"); } /* Also read in hyperprivop.S */ int first_break = 0; void ia64_handle_break(unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim) { struct domain *d = current->domain; struct vcpu *v = current; IA64FAULT vector; /* FIXME: don't hardcode constant */ if ((iim == 0x80001 || iim == 0x80002) && ia64_get_cpl(regs->cr_ipsr) == CONFIG_CPL0_EMUL) { do_ssc(vcpu_get_gr(current, 36), regs); } #ifdef CRASH_DEBUG else if ((iim == 0 || iim == CDB_BREAK_NUM) && !guest_mode(regs)) { if (iim == 0) show_registers(regs); debugger_trap_fatal(0 /* don't care */ , regs); regs_increment_iip(regs); } #endif else if (iim == d->arch.breakimm && ia64_get_cpl(regs->cr_ipsr) == CONFIG_CPL0_EMUL) { /* by default, do not continue */ v->arch.hypercall_continuation = 0; if ((vector = ia64_hypercall(regs)) == IA64_NO_FAULT) { if (!PSCBX(v, hypercall_continuation)) vcpu_increment_iip(current); } else reflect_interruption(isr, regs, vector); } else if ((iim - HYPERPRIVOP_START) < HYPERPRIVOP_MAX && ia64_get_cpl(regs->cr_ipsr) == CONFIG_CPL0_EMUL) { if (ia64_hyperprivop(iim, regs)) vcpu_increment_iip(current); } else { if (iim == 0) die_if_kernel("bug check", regs, iim); PSCB(v, iim) = iim; reflect_interruption(isr, regs, IA64_BREAK_VECTOR); } } void ia64_handle_privop(unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long itir) { IA64FAULT vector; vector = priv_emulate(current, regs, isr); if (vector != IA64_NO_FAULT && vector != IA64_RFI_IN_PROGRESS) { // Note: if a path results in a vector to reflect that requires // iha/itir (e.g. vcpu_force_data_miss), they must be set there /* * IA64_GENEX_VECTOR may contain in the lowest byte an ISR.code * see IA64_ILLOP_FAULT, ... */ if ((vector & ~0xffUL) == IA64_GENEX_VECTOR) { isr = vector & 0xffUL; vector = IA64_GENEX_VECTOR; } reflect_interruption(isr, regs, vector); } } void ia64_lazy_load_fpu(struct vcpu *v) { if (PSCB(v, hpsr_dfh)) { PSCB(v, hpsr_dfh) = 0; PSCB(v, hpsr_mfh) = 1; if (__ia64_per_cpu_var(fp_owner) != v) __ia64_load_fpu(v->arch._thread.fph); } } void ia64_handle_reflection(unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim, unsigned long vector) { struct vcpu *v = current; unsigned long check_lazy_cover = 0; unsigned long psr = regs->cr_ipsr; unsigned long status; /* Following faults shouldn't be seen from Xen itself */ BUG_ON(!(psr & IA64_PSR_CPL)); switch (vector) { case 6: vector = IA64_INST_KEY_MISS_VECTOR; break; case 7: vector = IA64_DATA_KEY_MISS_VECTOR; break; case 8: vector = IA64_DIRTY_BIT_VECTOR; break; case 9: vector = IA64_INST_ACCESS_BIT_VECTOR; break; case 10: check_lazy_cover = 1; vector = IA64_DATA_ACCESS_BIT_VECTOR; break; case 20: check_lazy_cover = 1; vector = IA64_PAGE_NOT_PRESENT_VECTOR; break; case 21: vector = IA64_KEY_PERMISSION_VECTOR; break; case 22: vector = IA64_INST_ACCESS_RIGHTS_VECTOR; break; case 23: check_lazy_cover = 1; vector = IA64_DATA_ACCESS_RIGHTS_VECTOR; break; case 24: vector = IA64_GENEX_VECTOR; break; case 25: ia64_lazy_load_fpu(v); if (!PSCB(v, vpsr_dfh)) { regs->cr_ipsr &= ~IA64_PSR_DFH; return; } vector = IA64_DISABLED_FPREG_VECTOR; break; case 26: if (((isr >> 4L) & 0xfL) == 1) { /* Fault is due to a register NaT consumption fault. */ //regs->eml_unat = 0; FIXME: DO WE NEED THIS?? vector = IA64_NAT_CONSUMPTION_VECTOR; break; } #if 1 // pass null pointer dereferences through with no error // but retain debug output for non-zero ifa if (!ifa) { vector = IA64_NAT_CONSUMPTION_VECTOR; break; } #endif #ifdef CONFIG_PRIVIFY /* Some privified operations are coded using reg+64 instead of reg. */ printk("*** NaT fault... attempting to handle as privop\n"); printk("isr=%016lx, ifa=%016lx, iip=%016lx, ipsr=%016lx\n", isr, ifa, regs->cr_iip, psr); //regs->eml_unat = 0; FIXME: DO WE NEED THIS??? // certain NaT faults are higher priority than privop faults vector = priv_emulate(v, regs, isr); if (vector == IA64_NO_FAULT) { printk("*** Handled privop masquerading as NaT " "fault\n"); return; } #endif vector = IA64_NAT_CONSUMPTION_VECTOR; break; case 27: //printk("*** Handled speculation vector, itc=%lx!\n", // ia64_get_itc()); PSCB(current, iim) = iim; vector = IA64_SPECULATION_VECTOR; break; case 29: vector = IA64_DEBUG_VECTOR; if (debugger_kernel_event(regs, XEN_IA64_DEBUG_ON_KERN_DEBUG)) return; break; case 30: // FIXME: Should we handle unaligned refs in Xen?? vector = IA64_UNALIGNED_REF_VECTOR; break; case 32: status = handle_fpu_swa(1, regs, isr); if (!status) { vcpu_increment_iip(v); return; } vector = IA64_FP_FAULT_VECTOR; break; case 33: status = handle_fpu_swa(0, regs, isr); if (!status) return; vector = IA64_FP_TRAP_VECTOR; break; case 34: if (isr & (1UL << 4)) printk("ia64_handle_reflection: handling " "unimplemented instruction address %s\n", (isr & (1UL<<32)) ? "fault" : "trap"); vector = IA64_LOWERPRIV_TRANSFER_TRAP_VECTOR; break; case 35: vector = IA64_TAKEN_BRANCH_TRAP_VECTOR; if (debugger_kernel_event(regs, XEN_IA64_DEBUG_ON_KERN_TBRANCH)) return; break; case 36: vector = IA64_SINGLE_STEP_TRAP_VECTOR; if (debugger_kernel_event(regs, XEN_IA64_DEBUG_ON_KERN_SSTEP)) return; break; default: panic_domain(regs, "ia64_handle_reflection: " "unhandled vector=0x%lx\n", vector); return; } if (check_lazy_cover && (isr & IA64_ISR_IR) && handle_lazy_cover(v, regs)) return; PSCB(current, ifa) = ifa; PSCB(current, itir) = vcpu_get_itir_on_fault(v, ifa); reflect_interruption(isr, regs, vector); } void ia64_shadow_fault(unsigned long ifa, unsigned long itir, unsigned long isr, struct pt_regs *regs) { struct vcpu *v = current; struct domain *d = current->domain; unsigned long gpfn; unsigned long pte = 0; struct vhpt_lf_entry *vlfe; /* * v->arch.vhpt_pg_shift shouldn't be used here. * Currently dirty page logging bitmap is allocated based * on PAGE_SIZE. This is part of xen_domctl_shadow_op ABI. * If we want to log dirty pages in finer grained when * v->arch.vhpt_pg_shift < PAGE_SHIFT, we have to * revise the ABI and update this function and the related * tool stack (live relocation). */ unsigned long vhpt_pg_shift = PAGE_SHIFT; /* There are 2 jobs to do: - marking the page as dirty (the metaphysical address must be extracted to do that). - reflecting or not the fault (the virtual Dirty bit must be extracted to decide). Unfortunatly these informations are not immediatly available! */ /* Extract the metaphysical address. Try to get it from VHPT and M2P as we need the flags. */ vlfe = (struct vhpt_lf_entry *)ia64_thash(ifa); pte = vlfe->page_flags; if (vlfe->ti_tag == ia64_ttag(ifa)) { /* The VHPT entry is valid. */ gpfn = get_gpfn_from_mfn((pte & _PAGE_PPN_MASK) >> vhpt_pg_shift); BUG_ON(gpfn == INVALID_M2P_ENTRY); } else { unsigned long itir, iha; IA64FAULT fault; /* The VHPT entry is not valid. */ vlfe = NULL; /* FIXME: gives a chance to tpa, as the TC was valid. */ fault = vcpu_translate(v, ifa, 1, &pte, &itir, &iha); /* Try again! */ if (fault != IA64_NO_FAULT) { /* This will trigger a dtlb miss. */ ia64_ptcl(ifa, vhpt_pg_shift << 2); return; } gpfn = ((pte & _PAGE_PPN_MASK) >> vhpt_pg_shift); if (pte & _PAGE_D) pte |= _PAGE_VIRT_D; } /* Set the dirty bit in the bitmap. */ shadow_mark_page_dirty(d, gpfn); /* Update the local TC/VHPT and decides wether or not the fault should be reflected. SMP note: we almost ignore the other processors. The shadow_bitmap has been atomically updated. If the dirty fault happen on another processor, it will do its job. */ if (pte != 0) { /* We will know how to handle the fault. */ if (pte & _PAGE_VIRT_D) { /* Rewrite VHPT entry. There is no race here because only the cpu VHPT owner can write page_flags. */ if (vlfe) vlfe->page_flags = pte | _PAGE_D; /* Purge the TC locally. It will be reloaded from the VHPT iff the VHPT entry is still valid. */ ia64_ptcl(ifa, vhpt_pg_shift << 2); atomic64_inc(&d->arch.shadow_fault_count); } else { /* Reflect. In this case there is no need to purge. */ ia64_handle_reflection(ifa, regs, isr, 0, 8); } } else { /* We don't know wether or not the fault must be reflected. The VHPT entry is not valid. */ /* FIXME: in metaphysical mode, we could do an ITC now. */ ia64_ptcl(ifa, vhpt_pg_shift << 2); } }