/****************************************************************************** * arch/x86/irq.c * * Portions of this file are: * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar */ #include #include #include #include #include #include #include #include #include #include #include /* opt_noirqbalance: If true, software IRQ balancing/affinity is disabled. */ int opt_noirqbalance = 0; boolean_param("noirqbalance", opt_noirqbalance); irq_desc_t irq_desc[NR_IRQS]; static void __do_IRQ_guest(int vector); void no_action(int cpl, void *dev_id, struct cpu_user_regs *regs) { } static void enable_none(unsigned int vector) { } static unsigned int startup_none(unsigned int vector) { return 0; } static void disable_none(unsigned int vector) { } static void ack_none(unsigned int vector) { ack_bad_irq(vector); } #define shutdown_none disable_none #define end_none enable_none struct hw_interrupt_type no_irq_type = { "none", startup_none, shutdown_none, enable_none, disable_none, ack_none, end_none }; atomic_t irq_err_count; asmlinkage void do_IRQ(struct cpu_user_regs *regs) { unsigned int vector = regs->entry_vector; irq_desc_t *desc = &irq_desc[vector]; struct irqaction *action; perfc_incr(irqs); spin_lock(&desc->lock); desc->handler->ack(vector); if ( likely(desc->status & IRQ_GUEST) ) { __do_IRQ_guest(vector); spin_unlock(&desc->lock); return; } desc->status &= ~IRQ_REPLAY; desc->status |= IRQ_PENDING; /* * Since we set PENDING, if another processor is handling a different * instance of this same irq, the other processor will take care of it. */ if ( desc->status & (IRQ_DISABLED | IRQ_INPROGRESS) ) goto out; desc->status |= IRQ_INPROGRESS; action = desc->action; while ( desc->status & IRQ_PENDING ) { desc->status &= ~IRQ_PENDING; irq_enter(); spin_unlock_irq(&desc->lock); action->handler(vector_to_irq(vector), action->dev_id, regs); spin_lock_irq(&desc->lock); irq_exit(); } desc->status &= ~IRQ_INPROGRESS; out: desc->handler->end(vector); spin_unlock(&desc->lock); } int request_irq(unsigned int irq, void (*handler)(int, void *, struct cpu_user_regs *), unsigned long irqflags, const char * devname, void *dev_id) { struct irqaction * action; int retval; /* * Sanity-check: shared interrupts must pass in a real dev-ID, * otherwise we'll have trouble later trying to figure out * which interrupt is which (messes up the interrupt freeing * logic etc). */ if (irq >= NR_IRQS) return -EINVAL; if (!handler) return -EINVAL; action = xmalloc(struct irqaction); if (!action) return -ENOMEM; action->handler = handler; action->name = devname; action->dev_id = dev_id; retval = setup_irq(irq, action); if (retval) xfree(action); return retval; } void free_irq(unsigned int irq) { unsigned int vector = irq_to_vector(irq); irq_desc_t *desc = &irq_desc[vector]; unsigned long flags; spin_lock_irqsave(&desc->lock,flags); desc->action = NULL; desc->depth = 1; desc->status |= IRQ_DISABLED; desc->handler->shutdown(irq); spin_unlock_irqrestore(&desc->lock,flags); /* Wait to make sure it's not being used on another CPU */ do { smp_mb(); } while ( desc->status & IRQ_INPROGRESS ); } int setup_irq(unsigned int irq, struct irqaction *new) { unsigned int vector = irq_to_vector(irq); irq_desc_t *desc = &irq_desc[vector]; unsigned long flags; spin_lock_irqsave(&desc->lock,flags); if ( desc->action != NULL ) { spin_unlock_irqrestore(&desc->lock,flags); return -EBUSY; } desc->action = new; desc->depth = 0; desc->status &= ~IRQ_DISABLED; desc->handler->startup(vector); spin_unlock_irqrestore(&desc->lock,flags); return 0; } /* * HANDLING OF GUEST-BOUND PHYSICAL IRQS */ #define IRQ_MAX_GUESTS 7 typedef struct { u8 nr_guests; u8 in_flight; u8 shareable; u8 ack_type; #define ACKTYPE_NONE 0 /* No final acknowledgement is required */ #define ACKTYPE_UNMASK 1 /* Unmask PIC hardware (from any CPU) */ #define ACKTYPE_EOI 2 /* EOI on the CPU that was interrupted */ cpumask_t cpu_eoi_map; /* CPUs that need to EOI this interrupt */ struct domain *guest[IRQ_MAX_GUESTS]; } irq_guest_action_t; /* * Stack of interrupts awaiting EOI on each CPU. These must be popped in * order, as only the current highest-priority pending irq can be EOIed. */ struct pending_eoi { u8 vector; /* Vector awaiting EOI */ u8 ready; /* Ready for EOI now? */ }; static DEFINE_PER_CPU(struct pending_eoi, pending_eoi[NR_VECTORS]); #define pending_eoi_sp(p) ((p)[NR_VECTORS-1].vector) static void __do_IRQ_guest(int vector) { unsigned int irq = vector_to_irq(vector); irq_desc_t *desc = &irq_desc[vector]; irq_guest_action_t *action = (irq_guest_action_t *)desc->action; struct domain *d; int i, sp; struct pending_eoi *peoi = this_cpu(pending_eoi); if ( unlikely(action->nr_guests == 0) ) { /* An interrupt may slip through while freeing an ACKTYPE_EOI irq. */ ASSERT(action->ack_type == ACKTYPE_EOI); ASSERT(desc->status & IRQ_DISABLED); desc->handler->end(vector); return; } if ( action->ack_type == ACKTYPE_EOI ) { sp = pending_eoi_sp(peoi); ASSERT((sp == 0) || (peoi[sp-1].vector < vector)); ASSERT(sp < (NR_VECTORS-1)); peoi[sp].vector = vector; peoi[sp].ready = 0; pending_eoi_sp(peoi) = sp+1; cpu_set(smp_processor_id(), action->cpu_eoi_map); } for ( i = 0; i < action->nr_guests; i++ ) { d = action->guest[i]; if ( (action->ack_type != ACKTYPE_NONE) && !test_and_set_bit(irq, d->pirq_mask) ) action->in_flight++; if (!hvm_do_IRQ_dpci(d, irq)) send_guest_pirq(d, irq); } } /* Flush all ready EOIs from the top of this CPU's pending-EOI stack. */ static void flush_ready_eoi(void *unused) { struct pending_eoi *peoi = this_cpu(pending_eoi); irq_desc_t *desc; int vector, sp; ASSERT(!local_irq_is_enabled()); sp = pending_eoi_sp(peoi); while ( (--sp >= 0) && peoi[sp].ready ) { vector = peoi[sp].vector; desc = &irq_desc[vector]; spin_lock(&desc->lock); desc->handler->end(vector); spin_unlock(&desc->lock); } pending_eoi_sp(peoi) = sp+1; } static void __set_eoi_ready(irq_desc_t *desc) { irq_guest_action_t *action = (irq_guest_action_t *)desc->action; struct pending_eoi *peoi = this_cpu(pending_eoi); int vector, sp; vector = desc - irq_desc; if ( !(desc->status & IRQ_GUEST) || (action->in_flight != 0) || !cpu_test_and_clear(smp_processor_id(), action->cpu_eoi_map) ) return; sp = pending_eoi_sp(peoi); do { ASSERT(sp > 0); } while ( peoi[--sp].vector != vector ); ASSERT(!peoi[sp].ready); peoi[sp].ready = 1; } /* Mark specified IRQ as ready-for-EOI (if it really is) and attempt to EOI. */ static void set_eoi_ready(void *data) { irq_desc_t *desc = data; ASSERT(!local_irq_is_enabled()); spin_lock(&desc->lock); __set_eoi_ready(desc); spin_unlock(&desc->lock); flush_ready_eoi(NULL); } static void __pirq_guest_eoi(struct domain *d, int irq) { irq_desc_t *desc; irq_guest_action_t *action; cpumask_t cpu_eoi_map; desc = &irq_desc[irq_to_vector(irq)]; action = (irq_guest_action_t *)desc->action; spin_lock_irq(&desc->lock); ASSERT(!test_bit(irq, d->pirq_mask) || (action->ack_type != ACKTYPE_NONE)); if ( unlikely(!test_and_clear_bit(irq, d->pirq_mask)) || unlikely(--action->in_flight != 0) ) { spin_unlock_irq(&desc->lock); return; } if ( action->ack_type == ACKTYPE_UNMASK ) { ASSERT(cpus_empty(action->cpu_eoi_map)); desc->handler->end(irq_to_vector(irq)); spin_unlock_irq(&desc->lock); return; } ASSERT(action->ack_type == ACKTYPE_EOI); cpu_eoi_map = action->cpu_eoi_map; if ( cpu_test_and_clear(smp_processor_id(), cpu_eoi_map) ) { __set_eoi_ready(desc); spin_unlock(&desc->lock); flush_ready_eoi(NULL); local_irq_enable(); } else { spin_unlock_irq(&desc->lock); } if ( !cpus_empty(cpu_eoi_map) ) on_selected_cpus(cpu_eoi_map, set_eoi_ready, desc, 1, 0); } int pirq_guest_eoi(struct domain *d, int irq) { if ( (irq < 0) || (irq >= NR_IRQS) ) return -EINVAL; __pirq_guest_eoi(d, irq); return 0; } int pirq_guest_unmask(struct domain *d) { unsigned int irq; shared_info_t *s = d->shared_info; for ( irq = find_first_bit(d->pirq_mask, NR_IRQS); irq < NR_IRQS; irq = find_next_bit(d->pirq_mask, NR_IRQS, irq+1) ) { if ( !test_bit(d->pirq_to_evtchn[irq], __shared_info_addr(d, s, evtchn_mask)) ) __pirq_guest_eoi(d, irq); } return 0; } extern int ioapic_ack_new; int pirq_acktype(int irq) { irq_desc_t *desc; unsigned int vector; vector = irq_to_vector(irq); if ( vector == 0 ) return ACKTYPE_NONE; desc = &irq_desc[vector]; if ( desc->handler == &no_irq_type ) return ACKTYPE_NONE; /* * Edge-triggered IO-APIC and LAPIC interrupts need no final * acknowledgement: we ACK early during interrupt processing. */ if ( !strcmp(desc->handler->typename, "IO-APIC-edge") || !strcmp(desc->handler->typename, "local-APIC-edge") ) return ACKTYPE_NONE; /* * Level-triggered IO-APIC interrupts need to be acknowledged on the CPU * on which they were received. This is because we tickle the LAPIC to EOI. */ if ( !strcmp(desc->handler->typename, "IO-APIC-level") ) return ioapic_ack_new ? ACKTYPE_EOI : ACKTYPE_UNMASK; /* Legacy PIC interrupts can be acknowledged from any CPU. */ if ( !strcmp(desc->handler->typename, "XT-PIC") ) return ACKTYPE_UNMASK; if ( strstr(desc->handler->typename, "MPIC") ) { if ( desc->status & IRQ_LEVEL ) return (desc->status & IRQ_PER_CPU) ? ACKTYPE_EOI : ACKTYPE_UNMASK; return ACKTYPE_NONE; /* edge-triggered => no final EOI */ } printk("Unknown PIC type '%s' for IRQ %d\n", desc->handler->typename, irq); BUG(); return 0; } int pirq_shared(int irq) { unsigned int vector; irq_desc_t *desc; irq_guest_action_t *action; unsigned long flags; int shared; vector = irq_to_vector(irq); if ( vector == 0 ) return 0; desc = &irq_desc[vector]; spin_lock_irqsave(&desc->lock, flags); action = (irq_guest_action_t *)desc->action; shared = ((desc->status & IRQ_GUEST) && (action->nr_guests > 1)); spin_unlock_irqrestore(&desc->lock, flags); return shared; } int pirq_guest_bind(struct vcpu *v, int irq, int will_share) { unsigned int vector; irq_desc_t *desc; irq_guest_action_t *action; unsigned long flags; int rc = 0; cpumask_t cpumask = CPU_MASK_NONE; retry: vector = irq_to_vector(irq); if ( vector == 0 ) return -EINVAL; desc = &irq_desc[vector]; spin_lock_irqsave(&desc->lock, flags); action = (irq_guest_action_t *)desc->action; if ( !(desc->status & IRQ_GUEST) ) { if ( desc->action != NULL ) { gdprintk(XENLOG_INFO, "Cannot bind IRQ %d to guest. In use by '%s'.\n", irq, desc->action->name); rc = -EBUSY; goto out; } action = xmalloc(irq_guest_action_t); if ( (desc->action = (struct irqaction *)action) == NULL ) { gdprintk(XENLOG_INFO, "Cannot bind IRQ %d to guest. Out of memory.\n", irq); rc = -ENOMEM; goto out; } action->nr_guests = 0; action->in_flight = 0; action->shareable = will_share; action->ack_type = pirq_acktype(irq); cpus_clear(action->cpu_eoi_map); desc->depth = 0; desc->status |= IRQ_GUEST; desc->status &= ~IRQ_DISABLED; desc->handler->startup(vector); /* Attempt to bind the interrupt target to the correct CPU. */ cpu_set(v->processor, cpumask); if ( !opt_noirqbalance && (desc->handler->set_affinity != NULL) ) desc->handler->set_affinity(vector, cpumask); } else if ( !will_share || !action->shareable ) { gdprintk(XENLOG_INFO, "Cannot bind IRQ %d to guest. " "Will not share with others.\n", irq); rc = -EBUSY; goto out; } else if ( action->nr_guests == 0 ) { /* * Indicates that an ACKTYPE_EOI interrupt is being released. * Wait for that to happen before continuing. */ ASSERT(action->ack_type == ACKTYPE_EOI); ASSERT(desc->status & IRQ_DISABLED); spin_unlock_irqrestore(&desc->lock, flags); cpu_relax(); goto retry; } if ( action->nr_guests == IRQ_MAX_GUESTS ) { gdprintk(XENLOG_INFO, "Cannot bind IRQ %d to guest. " "Already at max share.\n", irq); rc = -EBUSY; goto out; } action->guest[action->nr_guests++] = v->domain; out: spin_unlock_irqrestore(&desc->lock, flags); return rc; } int pirq_guest_unbind(struct domain *d, int irq) { unsigned int vector = irq_to_vector(irq); irq_desc_t *desc = &irq_desc[vector]; irq_guest_action_t *action; cpumask_t cpu_eoi_map; unsigned long flags; int i; BUG_ON(vector == 0); spin_lock_irqsave(&desc->lock, flags); action = (irq_guest_action_t *)desc->action; i = 0; while ( action->guest[i] && (action->guest[i] != d) ) i++; memmove(&action->guest[i], &action->guest[i+1], IRQ_MAX_GUESTS-i-1); action->nr_guests--; switch ( action->ack_type ) { case ACKTYPE_UNMASK: if ( test_and_clear_bit(irq, d->pirq_mask) && (--action->in_flight == 0) ) desc->handler->end(vector); break; case ACKTYPE_EOI: /* NB. If #guests == 0 then we clear the eoi_map later on. */ if ( test_and_clear_bit(irq, d->pirq_mask) && (--action->in_flight == 0) && (action->nr_guests != 0) ) { cpu_eoi_map = action->cpu_eoi_map; spin_unlock_irqrestore(&desc->lock, flags); on_selected_cpus(cpu_eoi_map, set_eoi_ready, desc, 1, 0); spin_lock_irqsave(&desc->lock, flags); } break; } /* * The guest cannot re-bind to this IRQ until this function returns. So, * when we have flushed this IRQ from pirq_mask, it should remain flushed. */ BUG_ON(test_bit(irq, d->pirq_mask)); if ( action->nr_guests != 0 ) goto out; BUG_ON(action->in_fl
/*
 *  Button Hotplug driver
 *
 *  Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
 *
 *  Based on the diag.c - GPIO interface driver for Broadcom boards
 *    Copyright (C) 2006 Mike Baker <mbm@openwrt.org>,
 *    Copyright (C) 2006-2007 Felix Fietkau <nbd@openwrt.org>
 *    Copyright (C) 2008 Andy Boyett <agb@openwrt.org>
 *
 *  This program is free software; you can redistribute it and/or modify it
 *  under the terms of the GNU General Public License version 2 as published
 *  by the Free Software Foundation.
 */

#include <linux/module.h>
#include <linux/version.h>
#include <linux/kmod.h>
#include <linux/input.h>

#include <linux/workqueue.h>
#include <linux/skbuff.h>
#include <linux/netlink.h>
#include <net/sock.h>

#define DRV_NAME	"button-hotplug"
#define DRV_VERSION	"0.4.0"
#define DRV_DESC	"Button Hotplug driver"

#define BH_SKB_SIZE	2048

#define PFX	DRV_NAME ": "

#undef BH_DEBUG

#ifdef BH_DEBUG
#define BH_DBG(fmt, args...) printk(KERN_DEBUG "%s: " fmt, DRV_NAME, ##args )
#else
#define BH_DBG(fmt, args...) do {} while (0)
#endif

#define BH_ERR(fmt, args...) printk(KERN_ERR "%s: " fmt, DRV_NAME, ##args )

#ifndef BIT_MASK
#define BIT_MASK(nr)            (1UL << ((nr) % BITS_PER_LONG))
#endif

struct bh_priv {
	unsigned long		*seen;
	struct input_handle	handle;
};

struct bh_event {
	const char		*name;
	char			*action;
	unsigned long		seen;

	struct sk_buff		*skb;
	struct work_struct	work;
};

struct bh_map {
	unsigned int	code;
	const char	*name;
};

extern struct sock *uevent_sock;
extern u64 uevent_next_seqnum(void);

#define BH_MAP(_code, _name)		\
	{				\
		.code = (_code),	\
		.name = (_name),	\
	}

static struct bh_map button_map[] = {
	BH_MAP(BTN_0,		"BTN_0"),
	BH_MAP(BTN_1,		"BTN_1"),
	BH_MAP(BTN_2,		"BTN_2"),
	BH_MAP(BTN_3,		"BTN_3"),
	BH_MAP(BTN_4,		"BTN_4"),
	BH_MAP(BTN_5,		"BTN_5"),
	BH_MAP(BTN_6,		"BTN_6"),
	BH_MAP(BTN_7,		"BTN_7"),
	BH_MAP(BTN_8,		"BTN_8"),
	BH_MAP(BTN_9,		"BTN_9"),
	BH_MAP(KEY_RESTART,	"reset"),
#ifdef KEY_WPS_BUTTON
	BH_MAP(KEY_WPS_BUTTON,	"wps"),
#endif /* KEY_WPS_BUTTON */
};

/* -------------------------------------------------------------------------*/

static int bh_event_add_var(struct bh_event *event, int argv,
		const char *format, ...)
{
	static char buf[128];
	char *s;
	va_list args;
	int len;

	if (argv)
		return 0;

	va_start(args, format);
	len = vsnprintf(buf, sizeof(buf), format, args);
	va_end(args);

	if (len >= sizeof(buf)) {
		BH_ERR("buffer size too small\n");
		WARN_ON(1);
		return -ENOMEM;
	}

	s = skb_put(event->skb, len + 1);
	strcpy(s, buf);

	BH_DBG("added variable '%s'\n", s);

	return 0;
}

static int button_hotplug_fill_event(struct bh_event *event)
{
	int ret;

	ret = bh_event_add_var(event, 0, "HOME=%s", "/");
	if (ret)
		return ret;

	ret = bh_event_add_var(event, 0, "PATH=%s",
					"/sbin:/bin:/usr/sbin:/usr/bin");
	if (ret)
		return ret;

	ret = bh_event_add_var(event, 0, "SUBSYSTEM=%s", "button");
	if (ret)
		return ret;

	ret = bh_event_add_var(event, 0, "ACTION=%s", event->action);
	if (ret)
		return ret;

	ret = bh_event_add_var(event, 0, "BUTTON=%s", event->name);
	if (ret)
		return ret;

	ret = bh_event_add_var(event, 0, "SEEN=%ld", event->seen);
	if (ret)
		return ret;

	ret = bh_event_add_var(event, 0, "SEQNUM=%llu", uevent_next_seqnum());

	return ret;
}

static void button_hotplug_work(struct work_struct *work)
{
	struct bh_event *event = container_of(work, struct bh_event, work);
	int ret = 0;

	if (!uevent_sock)
		goto out_free_event;

	event->skb = alloc_skb(BH_SKB_SIZE, GFP_KERNEL);
	if (!event->skb)
		goto out_free_event;

	ret = bh_event_add_var(event, 0, "%s@", event->action);
	if (ret)
		goto out_free_skb;

	ret = button_hotplug_fill_event(event);
	if (ret)
		goto out_free_skb;

	NETLINK_CB(event->skb).dst_group = 1;
	netlink_broadcast(uevent_sock, event->skb, 0, 1, GFP_KERNEL);

 out_free_skb:
	if (ret) {
		BH_ERR("work error %d\n", ret);
		kfree_skb(event->skb);
	}
 out_free_event:
	kfree(event);
}

static int button_hotplug_create_event(const char *name, unsigned long seen,
		int pressed)
{
	struct bh_event *event;

	BH_DBG("create event, name=%s, seen=%lu, pressed=%d\n",
		name, seen, pressed);

	event = kzalloc(sizeof(*event), GFP_KERNEL);
	if (!event)
		return -ENOMEM;

	event->name = name;
	event->seen = seen;
	event->action = pressed ? "pressed" : "released";

	INIT_WORK(&event->work, (void *)(void *)button_hotplug_work);
	schedule_work(&event->work);

	return 0;
}

/* -------------------------------------------------------------------------*/

#ifdef	CONFIG_HOTPLUG
static int button_get_index(unsigned int code)
{
	int i;

	for (i = 0; i < ARRAY_SIZE(button_map); i++)
		if (button_map[i].code == code)
			return i;

	return -1;
}
static void button_hotplug_event(struct input_handle *handle,
			   unsigned int type, unsigned int code, int value)
{
	struct bh_priv *priv = handle->private;
	unsigned long seen = jiffies;
	int btn;

	BH_DBG("event type=%u, code=%u, value=%d\n", type, code, value);

	if (type != EV_KEY)
		return;

	btn = button_get_index(code);
	if (btn < 0)
		return;

	button_hotplug_create_event(button_map[btn].name,
			(seen - priv->seen[btn]) / HZ, value);
	priv->seen[btn] = seen;
}
#else
static void button_hotplug_event(struct input_handle *handle,
			   unsigned int type, unsigned int code, int value)
{
}
#endif	/* CONFIG_HOTPLUG */

static int button_hotplug_connect(struct input_handler *handler,
		struct input_dev *dev, const struct input_device_id *id)
{
	struct bh_priv *priv;
	int ret;
	int i;

	for (i = 0; i < ARRAY_SIZE(button_map); i++)
		if (test_bit(button_map[i].code, dev->keybit))
			break;

	if (i == ARRAY_SIZE(button_map))
		return -ENODEV;

	priv = kzalloc(sizeof(*priv) +
		       (sizeof(unsigned long) * ARRAY_SIZE(button_map)),
		       GFP_KERNEL);
	if (!priv)
		return -ENOMEM;

	priv->seen = (unsigned long *) &priv[1];
	priv->handle.private = priv;
	priv->handle.dev = dev;
	priv->handle.handler = handler;
	priv->handle.name = DRV_NAME;

	ret = input_register_handle(&priv->handle);
	if (ret)
		goto err_free_priv;

	ret = input_open_device(&priv->handle);
	if (ret)
		goto err_unregister_handle;

	BH_DBG("connected to %s\n", dev->name);

	return 0;

 err_unregister_handle:
	input_unregister_handle(&priv->handle);

 err_free_priv:
	kfree(priv);
	return ret;
}

static void button_hotplug_disconnect(struct input_handle *handle)
{
	struct bh_priv *priv = handle->private;

	input_close_device(handle);
	input_unregister_handle(handle);

	kfree(priv);
}

static const struct input_device_id button_hotplug_ids[] = {
	{
                .flags = INPUT_DEVICE_ID_MATCH_EVBIT,
                .evbit = { BIT_MASK(EV_KEY) },
        },
	{
		/* Terminating entry */
	},
};

MODULE_DEVICE_TABLE(input, button_hotplug_ids);

static struct input_handler button_hotplug_handler = {
	.event =	button_hotplug_event,
	.connect =	button_hotplug_connect,
	.disconnect =	button_hotplug_disconnect,
	.name =		DRV_NAME,
	.id_table =	button_hotplug_ids,
};

/* -------------------------------------------------------------------------*/

static int __init button_hotplug_init(void)
{
	int ret;

	printk(KERN_INFO DRV_DESC " version " DRV_VERSION "\n");
	ret = input_register_handler(&button_hotplug_handler);
	if (ret)
		BH_ERR("unable to register input handler\n");

	return ret;
}
module_init(button_hotplug_init);

static void __exit button_hotplug_exit(void)
{
	input_unregister_handler(&button_hotplug_handler);
}
module_exit(button_hotplug_exit);

MODULE_DESCRIPTION(DRV_DESC);
MODULE_VERSION(DRV_VERSION);
MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
MODULE_LICENSE("GPL v2");