aboutsummaryrefslogtreecommitdiffstats
path: root/lib/lufa/Projects/MissileLauncher/ConfigDescriptor.c
blob: d6a7b8726e1860737d6e9b5b7779e88be10a6c91 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
/*
             LUFA Library
     Copyright (C) Dean Camera, 2017.

  dean [at] fourwalledcubicle [dot] com
           www.lufa-lib.org
*/

/*
  Copyright 2017  Dean Camera (dean [at] fourwalledcubicle [dot] com)

  Permission to use, copy, modify, distribute, and sell this
  software and its documentation for any purpose is hereby granted
  without fee, provided that the above copyright notice appear in
  all copies and that both that the copyright notice and this
  permission notice and warranty disclaimer appear in supporting
  documentation, and that the name of the author not be used in
  advertising or publicity pertaining to distribution of the
  software without specific, written prior permission.

  The author disclaims all warranties with regard to this
  software, including all implied warranties of merchantability
  and fitness.  In no event shall the author be liable for any
  special, indirect or consequential damages or any damages
  whatsoever resulting from loss of use, data or profits, whether
  in an action of contract, negligence or other tortious action,
  arising out of or in connection with the use or performance of
  this software.
*/

/** \file
 *
 *  USB Device Configuration Descriptor processing routines, to determine the correct pipe configurations
 *  needed to communication with an attached USB device. Descriptors are special  computer-readable structures
 *  which the host requests upon device enumeration, to determine the device's capabilities and functions.
 */

#include "ConfigDescriptor.h"

/** Reads and processes an attached device's descriptors, to determine compatibility and pipe configurations. This
 *  routine will read in the entire configuration descriptor, and configure the hosts pipes to correctly communicate
 *  with compatible devices.
 *
 *  This routine searches for a HID interface descriptor containing at least one Interrupt type IN endpoint.
 *
 *  \return An error code from the GenericHIDHost_GetConfigDescriptorDataCodes_t enum.
 */
uint8_t ProcessConfigurationDescriptor(void)
{
	uint8_t  ConfigDescriptorData[512];
	void*    CurrConfigLocation = ConfigDescriptorData;
	uint16_t CurrConfigBytesRem;

	USB_Descriptor_Interface_t* HIDInterface    = NULL;
	USB_Descriptor_Endpoint_t*  DataINEndpoint  = NULL;
	USB_Descriptor_Endpoint_t*  DataOUTEndpoint = NULL;

	/* Retrieve the entire configuration descriptor into the allocated buffer */
	switch (USB_Host_GetDeviceConfigDescriptor(1, &CurrConfigBytesRem, ConfigDescriptorData, sizeof(ConfigDescriptorData)))
	{
		case HOST_GETCONFIG_Successful:
			break;
		case HOST_GETCONFIG_InvalidData:
			return InvalidConfigDataReturned;
		case HOST_GETCONFIG_BuffOverflow:
			return DescriptorTooLarge;
		default:
			return ControlError;
	}

	while (!(DataINEndpoint) || !(DataOUTEndpoint))
	{
		/* See if we've found a likely compatible interface, and if there is an endpoint within that interface */
		if (!(HIDInterface) ||
		    USB_GetNextDescriptorComp(&CurrConfigBytesRem, &CurrConfigLocation,
		                              DComp_NextHIDInterfaceDataEndpoint) != DESCRIPTOR_SEARCH_COMP_Found)
		{
			/* Not all HID devices have an OUT endpoint - if we've reached the end of the HID descriptor
			 * but only found the mandatory IN endpoint, it's safe to continue with the device enumeration */
			if (DataINEndpoint)
			  break;

			/* Get the next HID interface from the configuration descriptor */
			if (USB_GetNextDescriptorComp(&CurrConfigBytesRem, &CurrConfigLocation,
			                              DComp_NextHIDInterface) != DESCRIPTOR_SEARCH_COMP_Found)
			{
				/* Descriptor not found, error out */
				return NoCompatibleInterfaceFound;
			}

			/* Save the interface in case we need to refer back to it later */
			HIDInterface = DESCRIPTOR_PCAST(CurrConfigLocation, USB_Descriptor_Interface_t);

			/* Clear any found endpoints */
			DataOUTEndpoint = NULL;

			/* Skip the remainder of the loop as we have not found an endpoint yet */
			continue;
		}

		/* Retrieve the endpoint address from the endpoint descriptor */
		USB_Descriptor_Endpoint_t* EndpointData = DESCRIPTOR_PCAST(CurrConfigLocation, USB_Descriptor_Endpoint_t);

		/* If the endpoint is a IN type endpoint */
		if ((EndpointData->EndpointAddress & ENDPOINT_DIR_MASK) == ENDPOINT_DIR_IN)
		  DataINEndpoint  = EndpointData;
		else
		  DataOUTEndpoint = EndpointData;
	}

	/* Configure the HID data IN pipe */
	Pipe_ConfigurePipe(HID_DATA_IN_PIPE, EP_TYPE_INTERRUPT, DataINEndpoint->EndpointAddress, DataINEndpoint->EndpointSize, 1);
	Pipe_SetInterruptPeriod(DataINEndpoint->PollingIntervalMS);

	/* Check if the HID interface contained an optional OUT data endpoint */
	if (DataOUTEndpoint)
	{
		/* Configure the HID data OUT pipe */
		Pipe_ConfigurePipe(HID_DATA_OUT_PIPE, EP_TYPE_INTERRUPT, DataOUTEndpoint->EndpointAddress, DataOUTEndpoint->EndpointSize, 1);
	}

	/* Valid data found, return success */
	return SuccessfulConfigRead;
}

/** Descriptor comparator function. This comparator function is can be called while processing an attached USB device's
 *  configuration descriptor, to search for a specific sub descriptor. It can also be used to abort the configuration
 *  descriptor processing if an incompatible descriptor configuration is found.
 *
 *  This comparator searches for the next Interface descriptor of the correct HID Class value.
 *
 *  \return A value from the DSEARCH_Return_ErrorCodes_t enum
 */
uint8_t DComp_NextHIDInterface(void* CurrentDescriptor)
{
	USB_Descriptor_Header_t* Header = DESCRIPTOR_PCAST(CurrentDescriptor, USB_Descriptor_Header_t);

	/* Determine if the current descriptor is an interface descriptor */
	if (Header->Type == DTYPE_Interface)
	{
		USB_Descriptor_Interface_t* Interface = DESCRIPTOR_PCAST(CurrentDescriptor, USB_Descriptor_Interface_t);

		/* Check the HID descriptor class and protocol, break out if correct class/protocol interface found */
		if (Interface->Class == HID_CLASS)
		{
			/* Indicate that the descriptor being searched for has been found */
			return DESCRIPTOR_SEARCH_Found;
		}
	}

	/* Current descriptor does not match what this comparator is looking for */
	return DESCRIPTOR_SEARCH_NotFound;
}

/** Descriptor comparator function. This comparator function is can be called while processing an attached USB device's
 *  configuration descriptor, to search for a specific sub descriptor. It can also be used to abort the configuration
 *  descriptor processing if an incompatible descriptor configuration is found.
 *
 *  This comparator searches for the next Endpoint descriptor inside the current interface descriptor,
 *  aborting the search if another interface descriptor is found before the required endpoint.
 *
 *  \return A value from the DSEARCH_Return_ErrorCodes_t enum
 */
uint8_t DComp_NextHIDInterfaceDataEndpoint(void* CurrentDescriptor)
{
	USB_Descriptor_Header_t* Header = DESCRIPTOR_PCAST(CurrentDescriptor, USB_Descriptor_Header_t);

	/* Determine the type of the current descriptor */
	if (Header->Type == DTYPE_Endpoint)
	{
		/* Indicate that the descriptor being searched for has been found */
		return DESCRIPTOR_SEARCH_Found;
	}
	else if (Header->Type == DTYPE_Interface)
	{
		/* Indicate that the search has failed prematurely and should be aborted */
		return DESCRIPTOR_SEARCH_Fail;
	}
	else
	{
		/* Current descriptor does not match what this comparator is looking for */
		return DESCRIPTOR_SEARCH_NotFound;
	}
}
#include <xeno/vbd.h> #include <xeno/slab.h> /* * These are rather arbitrary. They are fairly large because adjacent requests * pulled from a communication ring are quite likely to end up being part of * the same scatter/gather request at the disc. * * ** TRY INCREASING 'MAX_PENDING_REQS' IF WRITE SPEEDS SEEM TOO LOW ** * This will increase the chances of being able to write whole tracks. * 64 should be enough to keep us competitive with Linux. */ #define MAX_PENDING_REQS 64 #define BATCH_PER_DOMAIN 16 /* * Each outstanding request that we've passed to the lower device layers has a * 'pending_req' allocated to it. Each buffer_head that completes decrements * the pendcnt towards zero. When it hits zero, the specified domain has a * response queued for it, with the saved 'id' passed back. * * We can't allocate pending_req's in order, since they may complete out of * order. We therefore maintain an allocation ring. This ring also indicates * when enough work has been passed down -- at that point the allocation ring * will be empty. */ static pending_req_t pending_reqs[MAX_PENDING_REQS]; static unsigned char pending_ring[MAX_PENDING_REQS]; static spinlock_t pend_prod_lock = SPIN_LOCK_UNLOCKED; /* NB. We use a different index type to differentiate from shared blk rings. */ typedef unsigned int PEND_RING_IDX; #define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1)) static PEND_RING_IDX pending_prod, pending_cons; #define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons) static kmem_cache_t *buffer_head_cachep; static struct buffer_head *completed_bhs[NR_CPUS] __cacheline_aligned; static int lock_buffer(struct task_struct *p, unsigned long buffer, unsigned short size, int writeable_buffer); static void unlock_buffer(unsigned long buffer, unsigned short size, int writeable_buffer); static void io_schedule(unsigned long unused); static int do_block_io_op_domain(struct task_struct *p, int max_to_do); static void dispatch_rw_block_io(struct task_struct *p, blk_ring_req_entry_t *req); static void make_response(struct task_struct *p, unsigned long id, unsigned short op, unsigned long st); /****************************************************************** * BLOCK-DEVICE SCHEDULER LIST MAINTENANCE */ static struct list_head io_schedule_list; static spinlock_t io_schedule_list_lock; static int __on_blkdev_list(struct task_struct *p) { return p->blkdev_list.next != NULL; } static void remove_from_blkdev_list(struct task_struct *p) { unsigned long flags; if ( !__on_blkdev_list(p) ) return; spin_lock_irqsave(&io_schedule_list_lock, flags); if ( __on_blkdev_list(p) ) { list_del(&p->blkdev_list); p->blkdev_list.next = NULL; put_task_struct(p); } spin_unlock_irqrestore(&io_schedule_list_lock, flags); } static void add_to_blkdev_list_tail(struct task_struct *p) { unsigned long flags; if ( __on_blkdev_list(p) ) return; spin_lock_irqsave(&io_schedule_list_lock, flags); if ( !__on_blkdev_list(p) ) { list_add_tail(&p->blkdev_list, &io_schedule_list); get_task_struct(p); } spin_unlock_irqrestore(&io_schedule_list_lock, flags); } /****************************************************************** * SCHEDULER FUNCTIONS */ static DECLARE_TASKLET(io_schedule_tasklet, io_schedule, 0); static void io_schedule(unsigned long unused) { struct task_struct *p; struct list_head *ent; /* Queue up a batch of requests. */ while ( (NR_PENDING_REQS < MAX_PENDING_REQS) && !list_empty(&io_schedule_list) ) { ent = io_schedule_list.next; p = list_entry(ent, struct task_struct, blkdev_list); get_task_struct(p); remove_from_blkdev_list(p); if ( do_block_io_op_domain(p, BATCH_PER_DOMAIN) ) add_to_blkdev_list_tail(p); put_task_struct(p); } /* Push the batch through to disc. */ run_task_queue(&tq_disk); } static void maybe_trigger_io_schedule(void) { /* * Needed so that two processes, who together make the following predicate * true, don't both read stale values and evaluate the predicate * incorrectly. Incredibly unlikely to stall the scheduler on x86, but... */ smp_mb(); if ( (NR_PENDING_REQS < (MAX_PENDING_REQS/2)) && !list_empty(&io_schedule_list) ) tasklet_schedule(&io_schedule_tasklet); } /****************************************************************** * COMPLETION CALLBACK -- Called as bh->b_end_io() */ static void end_block_io_op_softirq(struct softirq_action *h) { pending_req_t *pending_req; struct buffer_head *bh, *nbh; unsigned int cpu = smp_processor_id(); local_irq_disable(); bh = completed_bhs[cpu]; completed_bhs[cpu] = NULL; local_irq_enable(); while ( bh != NULL ) { pending_req = bh->pending_req; unlock_buffer(virt_to_phys(bh->b_data), bh->b_size, (pending_req->operation==READ)); if ( atomic_dec_and_test(&pending_req->pendcnt) ) { make_response(pending_req->domain, pending_req->id, pending_req->operation, pending_req->status); put_task_struct(pending_req->domain); spin_lock(&pend_prod_lock); pending_ring[MASK_PEND_IDX(pending_prod)] = pending_req - pending_reqs; pending_prod++; spin_unlock(&pend_prod_lock); maybe_trigger_io_schedule(); } nbh = bh->b_reqnext; kmem_cache_free(buffer_head_cachep, bh); bh = nbh; } } static void end_block_io_op(struct buffer_head *bh, int uptodate) { unsigned long flags; unsigned int cpu = smp_processor_id(); /* An error fails the entire request. */ if ( !uptodate ) { DPRINTK("Buffer not up-to-date at end of operation\n"); bh->pending_req->status = 2; } local_irq_save(flags); bh->b_reqnext = completed_bhs[cpu]; completed_bhs[cpu] = bh; local_irq_restore(flags); __cpu_raise_softirq(cpu, BLKDEV_RESPONSE_SOFTIRQ); } /* ----[ Syscall Interface ]------------------------------------------------*/ long do_block_io_op(block_io_op_t *u_block_io_op) { long ret = 0; block_io_op_t op; struct task_struct *p = current; if ( unlikely(copy_from_user(&op, u_block_io_op, sizeof(op)) != 0) ) return -EFAULT; switch ( op.cmd ) { case BLOCK_IO_OP_SIGNAL: /* simply indicates there're reqs outstanding => add current to list */ add_to_blkdev_list_tail(p); maybe_trigger_io_schedule(); break; case BLOCK_IO_OP_RESET: /* Avoid a race with the tasklet. */ remove_from_blkdev_list(p); if ( p->blk_req_cons != p->blk_resp_prod ) { /* Interface isn't quiescent. */ ret = -EINVAL; } else { p->blk_req_cons = p->blk_resp_prod = 0; ret = 0; } break; case BLOCK_IO_OP_RING_ADDRESS: op.u.ring_mfn = virt_to_phys(p->blk_ring_base) >> PAGE_SHIFT; ret = copy_to_user(u_block_io_op, &op, sizeof(op)) ? -EFAULT : 0; break; case BLOCK_IO_OP_VBD_CREATE: /* create a new VBD */ ret = vbd_create(&op.u.create_params); break; case BLOCK_IO_OP_VBD_GROW: /* append an extent to a VBD */ ret = vbd_grow(&op.u.grow_params); break; case BLOCK_IO_OP_VBD_SHRINK: /* remove teh final extent from a VBD */ ret = vbd_shrink(&op.u.shrink_params); break; case BLOCK_IO_OP_VBD_SET_EXTENTS: /* a fresh extent list for the given VBD */ ret = vbd_setextents(&op.u.setextents_params); break; case BLOCK_IO_OP_VBD_DELETE: /* delete a VBD */ ret = vbd_delete(&op.u.delete_params); break; case BLOCK_IO_OP_VBD_PROBE: /* query VBD information for self or others (or all) */ if ( (ret = vbd_probe(&op.u.probe_params)) == 0 ) copy_to_user(u_block_io_op, &op, sizeof(op)); break; case BLOCK_IO_OP_VBD_INFO: /* query information about a particular VBD */ if ( (ret = vbd_info(&op.u.info_params)) == 0 ) copy_to_user(u_block_io_op, &op, sizeof(op)); break; default: ret = -ENOSYS; } return ret; } /****************************************************************** * DOWNWARD CALLS -- These interface with the block-device layer proper. */ static int lock_buffer(struct task_struct *p, unsigned long buffer, unsigned short size, int writeable_buffer) { unsigned long pfn; struct pfn_info *page; for ( pfn = buffer >> PAGE_SHIFT; pfn < ((buffer + size + PAGE_SIZE - 1) >> PAGE_SHIFT); pfn++ ) { if ( unlikely(pfn >= max_page) ) goto fail; page = &frame_table[pfn]; if ( unlikely(!get_page(page, p)) ) goto fail; if ( writeable_buffer && unlikely(!get_page_type(page, PGT_writeable_page)) ) { put_page(page); goto fail; } } return 1; fail: while ( pfn-- > (buffer >> PAGE_SHIFT) ) { if ( writeable_buffer ) put_page_type(&frame_table[pfn]); put_page(&frame_table[pfn]); } return 0; } static void unlock_buffer(unsigned long buffer, unsigned short size, int writeable_buffer) { unsigned long pfn; for ( pfn = buffer >> PAGE_SHIFT; pfn < ((buffer + size + PAGE_SIZE - 1) >> PAGE_SHIFT); pfn++ ) { if ( writeable_buffer ) put_page_type(&frame_table[pfn]); put_page(&frame_table[pfn]); } } static int do_block_io_op_domain(struct task_struct *p, int max_to_do) { blk_ring_t *blk_ring = p->blk_ring_base; blk_ring_req_entry_t *req; BLK_RING_IDX i; int more_to_do = 0; /* Take items off the comms ring, taking care not to overflow. */ for ( i = p->blk_req_cons; (i != blk_ring->req_prod) && ((i-p->blk_resp_prod) != BLK_RING_SIZE); i++ ) { if ( (max_to_do-- == 0) || (NR_PENDING_REQS == MAX_PENDING_REQS) ) { more_to_do = 1; break; } req = &blk_ring->ring[MASK_BLK_IDX(i)].req; switch ( req->operation ) { case XEN_BLOCK_READ: case XEN_BLOCK_WRITE: dispatch_rw_block_io(p, req); break; default: DPRINTK("error: unknown block io operation [%d]\n", blk_ring->ring[i].req.operation); make_response(p, blk_ring->ring[i].req.id, blk_ring->ring[i].req.operation, 1); break; } } p->blk_req_cons = i; return more_to_do; } static void dispatch_rw_block_io(struct task_struct *p, blk_ring_req_entry_t *req) { extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]); struct buffer_head *bh; int operation = (req->operation == XEN_BLOCK_WRITE) ? WRITE : READ; unsigned short nr_sects; unsigned long buffer; int i, tot_sects; pending_req_t *pending_req; /* We map virtual scatter/gather segments to physical segments. */ int new_segs, nr_psegs = 0; phys_seg_t phys_seg[MAX_BLK_SEGS * 2]; /* Check that number of segments is sane. */ if ( unlikely(req->nr_segments == 0) || unlikely(req->nr_segments > MAX_BLK_SEGS) ) { DPRINTK("Bad number of segments in request (%d)\n", req->nr_segments); goto bad_descriptor; } /* * Check each address/size pair is sane, and convert into a * physical device and block offset. Note that if the offset and size * crosses a virtual extent boundary, we may end up with more * physical scatter/gather segments than virtual segments. */ for ( i = tot_sects = 0; i < req->nr_segments; i++, tot_sects += nr_sects ) { buffer = req->buffer_and_sects[i] & ~0x1FF; nr_sects = req->buffer_and_sects[i] & 0x1FF; if ( unlikely(nr_sects == 0) ) { DPRINTK("zero-sized data request\n"); goto bad_descriptor; } phys_seg[nr_psegs].dev = req->device; phys_seg[nr_psegs].sector_number = req->sector_number + tot_sects; phys_seg[nr_psegs].buffer = buffer; phys_seg[nr_psegs].nr_sects = nr_sects;