aboutsummaryrefslogtreecommitdiffstats
path: root/lib/lufa/Projects/Webserver/Lib/DHCPClientApp.c
blob: 760718127e619be4f71e919f35e86e5f5b89a816 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
/*
             LUFA Library
     Copyright (C) Dean Camera, 2017.

  dean [at] fourwalledcubicle [dot] com
           www.lufa-lib.org
*/

/*
  Copyright 2017  Dean Camera (dean [at] fourwalledcubicle [dot] com)

  Permission to use, copy, modify, distribute, and sell this
  software and its documentation for any purpose is hereby granted
  without fee, provided that the above copyright notice appear in
  all copies and that both that the copyright notice and this
  permission notice and warranty disclaimer appear in supporting
  documentation, and that the name of the author not be used in
  advertising or publicity pertaining to distribution of the
  software without specific, written prior permission.

  The author disclaims all warranties with regard to this
  software, including all implied warranties of merchantability
  and fitness.  In no event shall the author be liable for any
  special, indirect or consequential damages or any damages
  whatsoever resulting from loss of use, data or profits, whether
  in an action of contract, negligence or other tortious action,
  arising out of or in connection with the use or performance of
  this software.
*/

/** \file
 *
 *  DHCP Client Application. When connected to the uIP stack, this will retrieve IP configuration settings from the
 *  DHCP server on the network.
 */

#define  INCLUDE_FROM_DHCPCLIENTAPP_C
#include "DHCPClientApp.h"

#if defined(ENABLE_DHCP_CLIENT) || defined(__DOXYGEN__)

/** Initialization function for the DHCP client. */
void DHCPClientApp_Init(void)
{
	/* Create a new UDP connection to the DHCP server port for the DHCP solicitation */
	struct uip_udp_conn* Connection = uip_udp_new(&uip_broadcast_addr, HTONS(DHCP_SERVER_PORT));

	/* If the connection was successfully created, bind it to the local DHCP client port */
	if (Connection != NULL)
	{
		uip_udp_appstate_t* const AppState = &Connection->appstate;
		uip_udp_bind(Connection, HTONS(DHCP_CLIENT_PORT));

		/* Set the initial client state */
		AppState->DHCPClient.CurrentState = DHCP_STATE_SendDiscover;

		/* Set timeout period to half a second for a DHCP server to respond */
		timer_set(&AppState->DHCPClient.Timeout, CLOCK_SECOND / 2);
	}
}

/** uIP stack application callback for the DHCP client. This function must be called each time the TCP/IP stack
 *  needs a UDP packet to be processed.
 */
void DHCPClientApp_Callback(void)
{
	uip_udp_appstate_t* const AppState    = &uip_udp_conn->appstate;
	DHCP_Header_t*      const AppData     = (DHCP_Header_t*)uip_appdata;
	uint16_t                  AppDataSize = 0;

	switch (AppState->DHCPClient.CurrentState)
	{
		case DHCP_STATE_SendDiscover:
			/* Clear all DHCP settings, reset client IP address */
			memset(&AppState->DHCPClient.DHCPOffer_Data, 0x00, sizeof(AppState->DHCPClient.DHCPOffer_Data));
			uip_sethostaddr((uip_ipaddr_t*)&AppState->DHCPClient.DHCPOffer_Data.AllocatedIP);

			/* Fill out the DHCP response header */
			AppDataSize += DHCPClientApp_FillDHCPHeader(AppData, DHCP_DISCOVER, AppState);

			/* Add the required DHCP options list to the packet */
			uint8_t RequiredOptionList[] = {DHCP_OPTION_SUBNET_MASK, DHCP_OPTION_ROUTER, DHCP_OPTION_DNS_SERVER};
			AppDataSize += DHCPCommon_SetOption(AppData->Options, DHCP_OPTION_REQ_LIST, sizeof(RequiredOptionList),
			                                    RequiredOptionList);

			/* Send the DHCP DISCOVER packet */
			uip_udp_send(AppDataSize);

			/* Reset the timeout timer, progress to next state */
			timer_reset(&AppState->DHCPClient.Timeout);
			AppState->DHCPClient.CurrentState = DHCP_STATE_WaitForOffer;

			break;
		case DHCP_STATE_WaitForOffer:
			if (!(uip_newdata()))
			{
				/* Check if the DHCP timeout period has expired while waiting for a response */
				if (timer_expired(&AppState->DHCPClient.Timeout))
				  AppState->DHCPClient.CurrentState = DHCP_STATE_SendDiscover;

				break;
			}

			uint8_t OfferResponse_MessageType;
			if ((AppData->TransactionID == DHCP_TRANSACTION_ID) &&
			    DHCPCommon_GetOption(AppData->Options, DHCP_OPTION_MSG_TYPE, &OfferResponse_MessageType) &&
			    (OfferResponse_MessageType == DHCP_OFFER))
			{
				/* Received a DHCP offer for an IP address, copy over values for later request */
				memcpy(&AppState->DHCPClient.DHCPOffer_Data.AllocatedIP, &AppData->YourIP, sizeof(uip_ipaddr_t));
				DHCPCommon_GetOption(AppData->Options, DHCP_OPTION_SUBNET_MASK, &AppState->DHCPClient.DHCPOffer_Data.Netmask);
				DHCPCommon_GetOption(AppData->Options, DHCP_OPTION_ROUTER,      &AppState->DHCPClient.DHCPOffer_Data.GatewayIP);
				DHCPCommon_GetOption(AppData->Options, DHCP_OPTION_SERVER_ID,   &AppState->DHCPClient.DHCPOffer_Data.ServerIP);

				timer_reset(&AppState->DHCPClient.Timeout);
				AppState->DHCPClient.CurrentState = DHCP_STATE_SendRequest;
			}

			break;
		case DHCP_STATE_SendRequest:
			/* Fill out the DHCP response header */
			AppDataSize += DHCPClientApp_FillDHCPHeader(AppData, DHCP_REQUEST, AppState);

			/* Add the DHCP REQUESTED IP ADDRESS option to the packet */
			AppDataSize += DHCPCommon_SetOption(AppData->Options, DHCP_OPTION_REQ_IPADDR, sizeof(uip_ipaddr_t),
			                                    &AppState->DHCPClient.DHCPOffer_Data.AllocatedIP);

			/* Add the DHCP SERVER IP ADDRESS option to the packet */
			AppDataSize += DHCPCommon_SetOption(AppData->Options, DHCP_OPTION_SERVER_ID, sizeof(uip_ipaddr_t),
			                                    &AppState->DHCPClient.DHCPOffer_Data.ServerIP);

			/* Send the DHCP REQUEST packet */
			uip_udp_send(AppDataSize);

			/* Reset the timeout timer, progress to next state */
			timer_reset(&AppState->DHCPClient.Timeout);
			AppState->DHCPClient.CurrentState = DHCP_STATE_WaitForACK;

			break;
		case DHCP_STATE_WaitForACK:
			if (!(uip_newdata()))
			{
				/* Check if the DHCP timeout period has expired while waiting for a response */
				if (timer_expired(&AppState->DHCPClient.Timeout))
				  AppState->DHCPClient.CurrentState = DHCP_STATE_SendDiscover;

				break;
			}

			uint8_t RequestResponse_MessageType;
			if ((AppData->TransactionID == DHCP_TRANSACTION_ID) &&
			    DHCPCommon_GetOption(AppData->Options, DHCP_OPTION_MSG_TYPE, &RequestResponse_MessageType) &&
			    (RequestResponse_MessageType == DHCP_ACK))
			{
				/* Set the new network parameters from the DHCP server */
				uip_sethostaddr((uip_ipaddr_t*)&AppState->DHCPClient.DHCPOffer_Data.AllocatedIP);
				uip_setnetmask((uip_ipaddr_t*)&AppState->DHCPClient.DHCPOffer_Data.Netmask);
				uip_setdraddr((uip_ipaddr_t*)&AppState->DHCPClient.DHCPOffer_Data.GatewayIP);

				AppState->DHCPClient.CurrentState = DHCP_STATE_AddressLeased;
			}

			break;
	}
}

/** Fills the DHCP packet response with the appropriate BOOTP header for DHCP. This fills out all the required
 *  fields, leaving only the additional DHCP options to be added to the packet before it is sent to the DHCP server.
 *
 *  \param[out] DHCPHeader       Location in the packet buffer where the BOOTP header should be written to
 *  \param[in]  DHCPMessageType  DHCP Message type, such as DHCP_DISCOVER
 *  \param[in]  AppState         Application state of the current UDP connection
 *
 *  \return Size in bytes of the created DHCP packet
 */
static uint16_t DHCPClientApp_FillDHCPHeader(DHCP_Header_t* const DHCPHeader,
                                             const uint8_t DHCPMessageType,
                                             uip_udp_appstate_t* const AppState)
{
	/* Erase existing packet data so that we start will all 0x00 DHCP header data */
 	memset(DHCPHeader, 0, sizeof(DHCP_Header_t));

	/* Fill out the DHCP packet header */
	DHCPHeader->Operation             = DHCP_OP_BOOTREQUEST;
	DHCPHeader->HardwareType          = DHCP_HTYPE_ETHERNET;
	DHCPHeader->HardwareAddressLength = sizeof(MACAddress);
	DHCPHeader->Hops                  = 0;
	DHCPHeader->TransactionID         = DHCP_TRANSACTION_ID;
	DHCPHeader->ElapsedSeconds        = 0;
	DHCPHeader->Flags                 = HTONS(BOOTP_BROADCAST);
	memcpy(&DHCPHeader->ClientIP,     &uip_hostaddr,        sizeof(uip_ipaddr_t));
	memcpy(&DHCPHeader->YourIP,       &AppState->DHCPClient.DHCPOffer_Data.AllocatedIP, sizeof(uip_ipaddr_t));
	memcpy(&DHCPHeader->NextServerIP, &AppState->DHCPClient.DHCPOffer_Data.ServerIP,    sizeof(uip_ipaddr_t));
	memcpy(&DHCPHeader->ClientHardwareAddress, &MACAddress, sizeof(struct uip_eth_addr));
	DHCPHeader->Cookie                = DHCP_MAGIC_COOKIE;

	/* Add a DHCP message type and terminator options to the start of the DHCP options field */
	DHCPHeader->Options[0]            = DHCP_OPTION_MSG_TYPE;
	DHCPHeader->Options[1]            = 1;
	DHCPHeader->Options[2]            = DHCPMessageType;
	DHCPHeader->Options[3]            = DHCP_OPTION_END;

	/* Calculate the total number of bytes added to the outgoing packet */
	return (sizeof(DHCP_Header_t) + 4);
}

#endif
>vpd_low; /* CPUID init */ for (i = 0; i < 5; i++) mregs->vcpuid[i] = ia64_get_cpuid(i); /* Limit the CPUID number to 5 */ cpuid3.value = mregs->vcpuid[3]; cpuid3.number = 4; /* 5 - 1 */ mregs->vcpuid[3] = cpuid3.value; mregs->vac.a_from_int_cr = 1; mregs->vac.a_to_int_cr = 1; mregs->vac.a_from_psr = 1; mregs->vac.a_from_cpuid = 1; mregs->vac.a_cover = 1; mregs->vac.a_bsw = 1; mregs->vac.a_int = 1; mregs->vdc.d_vmsw = 1; return vpd; } /* Free vpd to xenheap */ static void free_vpd(struct vcpu *v) { if ( v->arch.privregs ) free_xenheap_pages(v->arch.privregs, get_order(VPD_SIZE)); } /* * Create a VP on intialized VMX environment. */ static void vmx_create_vp(struct vcpu *v) { u64 ret; vpd_t *vpd = (vpd_t *)v->arch.privregs; u64 ivt_base; extern char vmx_ia64_ivt; /* ia64_ivt is function pointer, so need this tranlation */ ivt_base = (u64) &vmx_ia64_ivt; printk(XENLOG_DEBUG "ivt_base: 0x%lx\n", ivt_base); ret = ia64_pal_vp_create((u64 *)vpd, (u64 *)ivt_base, 0); if (ret != PAL_STATUS_SUCCESS){ panic_domain(vcpu_regs(v),"ia64_pal_vp_create failed. \n"); } } /* Other non-context related tasks can be done in context switch */ void vmx_save_state(struct vcpu *v) { u64 status; /* FIXME: about setting of pal_proc_vector... time consuming */ status = ia64_pal_vp_save((u64 *)v->arch.privregs, 0); if (status != PAL_STATUS_SUCCESS){ panic_domain(vcpu_regs(v),"Save vp status failed\n"); } /* Need to save KR when domain switch, though HV itself doesn;t * use them. */ v->arch.arch_vmx.vkr[0] = ia64_get_kr(0); v->arch.arch_vmx.vkr[1] = ia64_get_kr(1); v->arch.arch_vmx.vkr[2] = ia64_get_kr(2); v->arch.arch_vmx.vkr[3] = ia64_get_kr(3); v->arch.arch_vmx.vkr[4] = ia64_get_kr(4); v->arch.arch_vmx.vkr[5] = ia64_get_kr(5); v->arch.arch_vmx.vkr[6] = ia64_get_kr(6); v->arch.arch_vmx.vkr[7] = ia64_get_kr(7); } /* Even guest is in physical mode, we still need such double mapping */ void vmx_load_state(struct vcpu *v) { u64 status; status = ia64_pal_vp_restore((u64 *)v->arch.privregs, 0); if (status != PAL_STATUS_SUCCESS){ panic_domain(vcpu_regs(v),"Restore vp status failed\n"); } ia64_set_kr(0, v->arch.arch_vmx.vkr[0]); ia64_set_kr(1, v->arch.arch_vmx.vkr[1]); ia64_set_kr(2, v->arch.arch_vmx.vkr[2]); ia64_set_kr(3, v->arch.arch_vmx.vkr[3]); ia64_set_kr(4, v->arch.arch_vmx.vkr[4]); ia64_set_kr(5, v->arch.arch_vmx.vkr[5]); ia64_set_kr(6, v->arch.arch_vmx.vkr[6]); ia64_set_kr(7, v->arch.arch_vmx.vkr[7]); /* Guest vTLB is not required to be switched explicitly, since * anchored in vcpu */ } static int vmx_vcpu_initialise(struct vcpu *v) { struct vmx_ioreq_page *iorp = &v->domain->arch.hvm_domain.ioreq; int rc = alloc_unbound_xen_event_channel(v, 0); if (rc < 0) return rc; v->arch.arch_vmx.xen_port = rc; spin_lock(&iorp->lock); if (v->domain->arch.vmx_platform.ioreq.va != 0) { vcpu_iodata_t *p = get_vio(v); p->vp_eport = v->arch.arch_vmx.xen_port; } spin_unlock(&iorp->lock); gdprintk(XENLOG_INFO, "Allocated port %ld for hvm %d vcpu %d.\n", v->arch.arch_vmx.xen_port, v->domain->domain_id, v->vcpu_id); return 0; } static int vmx_create_event_channels(struct vcpu *v) { struct vcpu *o; if (v->vcpu_id == 0) { /* Ugly: create event channels for every vcpu when vcpu 0 starts, so that they're available for ioemu to bind to. */ for_each_vcpu(v->domain, o) { int rc = vmx_vcpu_initialise(o); if (rc < 0) //XXX error recovery return rc; } } return 0; } /* * Event channel has destoryed in domain_kill(), so we needn't * do anything here */ static void vmx_release_assist_channel(struct vcpu *v) { return; } /* following three functions are based from hvm_xxx_ioreq_page() * in xen/arch/x86/hvm/hvm.c */ static void vmx_init_ioreq_page( struct domain *d, struct vmx_ioreq_page *iorp) { memset(iorp, 0, sizeof(*iorp)); spin_lock_init(&iorp->lock); domain_pause(d); } static void vmx_destroy_ioreq_page( struct domain *d, struct vmx_ioreq_page *iorp) { spin_lock(&iorp->lock); ASSERT(d->is_dying); if (iorp->va != NULL) { put_page(iorp->page); iorp->page = NULL; iorp->va = NULL; } spin_unlock(&iorp->lock); } int vmx_set_ioreq_page( struct domain *d, struct vmx_ioreq_page *iorp, unsigned long gpfn) { struct page_info *page; unsigned long mfn; pte_t pte; pte = *lookup_noalloc_domain_pte(d, gpfn << PAGE_SHIFT); if (!pte_present(pte) || !pte_mem(pte)) return -EINVAL; mfn = (pte_val(pte) & _PFN_MASK) >> PAGE_SHIFT; ASSERT(mfn_valid(mfn)); page = mfn_to_page(mfn); if (get_page(page, d) == 0) return -EINVAL; spin_lock(&iorp->lock); if ((iorp->va != NULL) || d->is_dying) { spin_unlock(&iorp->lock); put_page(page); return -EINVAL; } iorp->va = mfn_to_virt(mfn); iorp->page = page; spin_unlock(&iorp->lock); domain_unpause(d); return 0; } /* * Initialize VMX envirenment for guest. Only the 1st vp/vcpu * is registered here. */ int vmx_final_setup_guest(struct vcpu *v) { vpd_t *vpd; int rc; struct switch_stack *sw; vpd = alloc_vpd(); ASSERT(vpd); if (!vpd) return -ENOMEM; v->arch.privregs = (mapped_regs_t *)vpd; vpd->vpd_low.virt_env_vaddr = vm_buffer; v->domain->arch.vmx_platform.gos_type = OS_UNKNOWN; /* Per-domain vTLB and vhpt implementation. Now vmx domain will stick * to this solution. Maybe it can be deferred until we know created * one as vmx domain */ rc = init_domain_tlb(v); if (rc) return rc; rc = vmx_create_event_channels(v); if (rc) return rc; /* v->arch.schedule_tail = arch_vmx_do_launch; */ vmx_create_vp(v); /* Physical mode emulation initialization, including * emulation ID allcation and related memory request */ physical_mode_init(v); vlsapic_reset(v); vtm_init(v); /* Set up guest 's indicator for VTi domain*/ set_bit(ARCH_VMX_DOMAIN, &v->arch.arch_vmx.flags); /* Initialize pNonSys=1 for the first context switching */ sw = (struct switch_stack *)vcpu_regs(v) - 1; sw->pr = (1UL << PRED_NON_SYSCALL); return 0; } void vmx_relinquish_guest_resources(struct domain *d) { struct vcpu *v; for_each_vcpu(d, v) vmx_release_assist_channel(v); vacpi_relinquish_resources(d); vmx_destroy_ioreq_page(d, &d->arch.vmx_platform.ioreq); vmx_destroy_ioreq_page(d, &d->arch.vmx_platform.buf_ioreq); vmx_destroy_ioreq_page(d, &d->arch.vmx_platform.buf_pioreq); } void vmx_relinquish_vcpu_resources(struct vcpu *v) { vtime_t *vtm = &(v->arch.arch_vmx.vtm); kill_timer(&vtm->vtm_timer); free_domain_tlb(v); free_vpd(v); } typedef struct io_range { unsigned long start; unsigned long size; unsigned long type; } io_range_t; static const io_range_t io_ranges[] = { {VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER}, {MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO}, {LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO}, {IO_SAPIC_START, IO_SAPIC_SIZE, GPFN_IOSAPIC}, {PIB_START, PIB_SIZE, GPFN_PIB}, }; // The P2M table is built in libxc/ia64/xc_ia64_hvm_build.c @ setup_guest() // so only mark IO memory space here static void vmx_build_io_physmap_table(struct domain *d) { unsigned long i, j; /* Mark I/O ranges */ for (i = 0; i < (sizeof(io_ranges) / sizeof(io_range_t)); i++) { for (j = io_ranges[i].start; j < io_ranges[i].start + io_ranges[i].size; j += PAGE_SIZE) (void)__assign_domain_page(d, j, io_ranges[i].type, ASSIGN_writable); } } int vmx_setup_platform(struct domain *d) { ASSERT(d != dom0); /* only for non-privileged vti domain */ vmx_build_io_physmap_table(d); vmx_init_ioreq_page(d, &d->arch.vmx_platform.ioreq); vmx_init_ioreq_page(d, &d->arch.vmx_platform.buf_ioreq); vmx_init_ioreq_page(d, &d->arch.vmx_platform.buf_pioreq); /* TEMP */ d->arch.vmx_platform.pib_base = 0xfee00000UL; d->arch.sal_data = xmalloc(struct xen_sal_data); if (d->arch.sal_data == NULL) return -ENOMEM; /* Only open one port for I/O and interrupt emulation */ memset(&d->shared_info->evtchn_mask[0], 0xff, sizeof(d->shared_info->evtchn_mask)); /* Initialize iosapic model within hypervisor */ viosapic_init(d); vacpi_init(d); return 0; } void vmx_do_resume(struct vcpu *v) { ioreq_t *p; vmx_load_all_rr(v); migrate_timer(&v->arch.arch_vmx.vtm.vtm_timer, v->processor); /* stolen from hvm_do_resume() in arch/x86/hvm/hvm.c */ /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */ p = &get_vio(v)->vp_ioreq; while (p->state != STATE_IOREQ_NONE) { switch (p->state) { case STATE_IORESP_READY: /* IORESP_READY -> NONE */ vmx_io_assist(v); break; case STATE_IOREQ_READY: case STATE_IOREQ_INPROCESS: /* IOREQ_{READY,INPROCESS} -> IORESP_READY */ wait_on_xen_event_channel(v->arch.arch_vmx.xen_port, (p->state != STATE_IOREQ_READY) && (p->state != STATE_IOREQ_INPROCESS)); break; default: gdprintk(XENLOG_ERR, "Weird HVM iorequest state %d.\n", p->state); domain_crash_synchronous(); } } }