aboutsummaryrefslogtreecommitdiffstats
path: root/package/kernel/gpio-button-hotplug/src
Commit message (Expand)AuthorAgeFilesLines
* treewide: replace nbd@openwrt.org with nbd@nbd.nameFelix Fietkau2016-06-071-3/+3
* kernel: gpio-button-hotplug: Add missing ONESHOT flag to threaded IRQ requestJohn Crispin2016-03-031-1/+1
* kernel: gpio-button-hotplug: update to use threaded irq'sJohn Crispin2016-02-121-8/+5
* gpio-button-hotplug: handle EPROBE_DEFER and other errorsHauke Mehrtens2015-07-261-5/+15
* gpio-button-hotplug: remove #ifdef CONFIG_HOTPLUG, it is gone in newer kernel...Felix Fietkau2014-05-231-7/+0
* gpio-button-hotplug: add wwan buttonHauke Mehrtens2014-01-141-0/+1
* gpio-button-hotplug: fix crash on removeJonas Gorski2013-12-171-1/+1
* gpio-button-hotplug: add irq mode to driverJohn Crispin2013-12-091-94/+176
* gpio-button-hotplug: add support for sliding switchesJohn Crispin2013-11-111-12/+8
* gpio-button-hotplug: add support for power buttonsJohn Crispin2013-10-281-0/+1
* gpio-button-hotplug: debounce the initial button state, the first reads at bo...Felix Fietkau2013-08-051-2/+4
* gpio-button-hotplug: cleanup, fix compiler warningFelix Fietkau2013-08-031-4/+2
* gpio-button-hotplug: fix active_low handling, possibly broken in r37643Felix Fietkau2013-08-031-8/+9
* gpio-button-hotplug: use gpio_button_get_value() to initialize last_state.John Crispin2013-08-011-1/+1
* gpio-button-hotplug: use gpio_button_get_value() to fetch state.John Crispin2013-08-011-4/+1
* gpio-button-hotplug: add inline function gpio_button_get_value().John Crispin2013-08-011-0/+9
* gpio-button-hotplug: add support for EV_SWLuka Perkov2013-07-021-7/+21
* gpio-button-hotplug: improve gpio button debouncing, verify state changes ove...Felix Fietkau2013-06-291-6/+8
* packages: clean up the package folderJohn Crispin2013-06-212-0/+565
n260' href='#n260'>260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490
/*
 * Copyright (c) 2006, Intel Corporation.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
 * Place - Suite 330, Boston, MA 02111-1307 USA.
 *
 * Copyright (C) Allen Kay <allen.m.kay@intel.com>
 * Copyright (C) Xiaohui Xin <xiaohui.xin@intel.com>
 */


#include <xen/sched.h>
#include <xen/iommu.h>
#include <xen/time.h>
#include <xen/pci.h>
#include <xen/pci_regs.h>
#include "iommu.h"
#include "dmar.h"
#include "vtd.h"
#include "extern.h"

int qinval_enabled;

static void print_qi_regs(struct iommu *iommu)
{
    u64 val;

    val = dmar_readq(iommu->reg, DMAR_IQA_REG);
    printk("DMAR_IQA_REG = %"PRIx64"\n", val);

    val = dmar_readq(iommu->reg, DMAR_IQH_REG);
    printk("DMAR_IQH_REG = %"PRIx64"\n", val);

    val = dmar_readq(iommu->reg, DMAR_IQT_REG);
    printk("DMAR_IQT_REG = %"PRIx64"\n", val);
}

static int qinval_next_index(struct iommu *iommu)
{
    u64 val;
    val = dmar_readq(iommu->reg, DMAR_IQT_REG);
    return (val >> 4);
}

static int qinval_update_qtail(struct iommu *iommu, int index)
{
    u64 val;

    /* Need an ASSERT to insure that we have got register lock */
    val = (index < (QINVAL_ENTRY_NR-1)) ? (index + 1) : 0;
    dmar_writeq(iommu->reg, DMAR_IQT_REG, (val << 4));
    return 0;
}

static int gen_cc_inv_dsc(struct iommu *iommu, int index,
    u16 did, u16 source_id, u8 function_mask, u8 granu)
{
    unsigned long flags;
    struct qinval_entry *qinval_entry = NULL, *qinval_entries;
    struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);

    spin_lock_irqsave(&qi_ctrl->qinval_lock, flags);
    qinval_entries =
        (struct qinval_entry *)map_vtd_domain_page(qi_ctrl->qinval_maddr);
    qinval_entry = &qinval_entries[index];
    qinval_entry->q.cc_inv_dsc.lo.type = TYPE_INVAL_CONTEXT;
    qinval_entry->q.cc_inv_dsc.lo.granu = granu;
    qinval_entry->q.cc_inv_dsc.lo.res_1 = 0;
    qinval_entry->q.cc_inv_dsc.lo.did = did;
    qinval_entry->q.cc_inv_dsc.lo.sid = source_id;
    qinval_entry->q.cc_inv_dsc.lo.fm = function_mask;
    qinval_entry->q.cc_inv_dsc.lo.res_2 = 0;
    qinval_entry->q.cc_inv_dsc.hi.res = 0;

    unmap_vtd_domain_page(qinval_entries);
    spin_unlock_irqrestore(&qi_ctrl->qinval_lock, flags);

    return 0;
}

int queue_invalidate_context(struct iommu *iommu,
    u16 did, u16 source_id, u8 function_mask, u8 granu)
{
    int ret = -1;
    unsigned long flags;
    int index = -1;

    spin_lock_irqsave(&iommu->register_lock, flags);
    index = qinval_next_index(iommu);
    if ( index == -1 )
        return -EBUSY;
    ret = gen_cc_inv_dsc(iommu, index, did, source_id,
                         function_mask, granu);
    ret |= qinval_update_qtail(iommu, index);
    spin_unlock_irqrestore(&iommu->register_lock, flags);
    return ret;
}

static int gen_iotlb_inv_dsc(struct iommu *iommu, int index,
    u8 granu, u8 dr, u8 dw, u16 did, u8 am, u8 ih, u64 addr)
{
    unsigned long flags;
    struct qinval_entry *qinval_entry = NULL, *qinval_entries;
    struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);

    if ( index == -1 )
        return -1;
    spin_lock_irqsave(&qi_ctrl->qinval_lock, flags);

    qinval_entries =
        (struct qinval_entry *)map_vtd_domain_page(qi_ctrl->qinval_maddr);
    qinval_entry = &qinval_entries[index];
    qinval_entry->q.iotlb_inv_dsc.lo.type = TYPE_INVAL_IOTLB;
    qinval_entry->q.iotlb_inv_dsc.lo.granu = granu;
    qinval_entry->q.iotlb_inv_dsc.lo.dr = 0;
    qinval_entry->q.iotlb_inv_dsc.lo.dw = 0;
    qinval_entry->q.iotlb_inv_dsc.lo.res_1 = 0;
    qinval_entry->q.iotlb_inv_dsc.lo.did = did;
    qinval_entry->q.iotlb_inv_dsc.lo.res_2 = 0;

    qinval_entry->q.iotlb_inv_dsc.hi.am = am;
    qinval_entry->q.iotlb_inv_dsc.hi.ih = ih;
    qinval_entry->q.iotlb_inv_dsc.hi.res_1 = 0;
    qinval_entry->q.iotlb_inv_dsc.hi.addr = addr;

    unmap_vtd_domain_page(qinval_entries);
    spin_unlock_irqrestore(&qi_ctrl->qinval_lock, flags);
    return 0;
}

int queue_invalidate_iotlb(struct iommu *iommu,
    u8 granu, u8 dr, u8 dw, u16 did, u8 am, u8 ih, u64 addr)
{
    int ret = -1;
    unsigned long flags;
    int index = -1;

    spin_lock_irqsave(&iommu->register_lock, flags);

    index = qinval_next_index(iommu);
    ret = gen_iotlb_inv_dsc(iommu, index, granu, dr, dw, did,
                            am, ih, addr);
    ret |= qinval_update_qtail(iommu, index);
    spin_unlock_irqrestore(&iommu->register_lock, flags);
    return ret;
}

static int gen_wait_dsc(struct iommu *iommu, int index,
    u8 iflag, u8 sw, u8 fn, u32 sdata, volatile u32 *saddr)
{
    unsigned long flags;
    struct qinval_entry *qinval_entry = NULL, *qinval_entries;
    struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);

    if ( index == -1 )
        return -1;
    spin_lock_irqsave(&qi_ctrl->qinval_lock, flags);
    qinval_entries =
        (struct qinval_entry *)map_vtd_domain_page(qi_ctrl->qinval_maddr);
    qinval_entry = &qinval_entries[index];
    qinval_entry->q.inv_wait_dsc.lo.type = TYPE_INVAL_WAIT;
    qinval_entry->q.inv_wait_dsc.lo.iflag = iflag;
    qinval_entry->q.inv_wait_dsc.lo.sw = sw;
    qinval_entry->q.inv_wait_dsc.lo.fn = fn;
    qinval_entry->q.inv_wait_dsc.lo.res_1 = 0;
    qinval_entry->q.inv_wait_dsc.lo.sdata = sdata;
    qinval_entry->q.inv_wait_dsc.hi.res_1 = 0;
    qinval_entry->q.inv_wait_dsc.hi.saddr = virt_to_maddr(saddr) >> 2;
    unmap_vtd_domain_page(qinval_entries);
    spin_unlock_irqrestore(&qi_ctrl->qinval_lock, flags);
    return 0;
}

static int queue_invalidate_wait(struct iommu *iommu,
    u8 iflag, u8 sw, u8 fn, u32 sdata, volatile u32 *saddr)
{
    unsigned long flags;
    s_time_t start_time;
    int index = -1;
    int ret = -1;
    struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);

    spin_lock_irqsave(&qi_ctrl->qinval_poll_lock, flags);
    spin_lock(&iommu->register_lock);
    index = qinval_next_index(iommu);
    if ( *saddr == 1 )
        *saddr = 0;
    ret = gen_wait_dsc(iommu, index, iflag, sw, fn, sdata, saddr);
    ret |= qinval_update_qtail(iommu, index);
    spin_unlock(&iommu->register_lock);

    /* Now we don't support interrupt method */
    if ( sw )
    {
        /* In case all wait descriptor writes to same addr with same data */
        start_time = NOW();
        while ( *saddr != 1 )
        {
            if ( NOW() > (start_time + DMAR_OPERATION_TIMEOUT) )
            {
                print_qi_regs(iommu);
                panic("queue invalidate wait descriptor was not executed\n");
            }
            cpu_relax();
        }
    }
    spin_unlock_irqrestore(&qi_ctrl->qinval_poll_lock, flags);
    return ret;
}

int invalidate_sync(struct iommu *iommu)
{
    int ret = -1;
    struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);

    if ( qi_ctrl->qinval_maddr != 0 )
    {
        ret = queue_invalidate_wait(iommu,
            0, 1, 1, 1, &qi_ctrl->qinval_poll_status);
        return ret;
    }
    return 0;
}

static int gen_dev_iotlb_inv_dsc(struct iommu *iommu, int index,
    u32 max_invs_pend, u16 sid, u16 size, u64 addr)
{
    unsigned long flags;
    struct qinval_entry *qinval_entry = NULL, *qinval_entries;
    struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);

    if ( index == -1 )
        return -1;
    spin_lock_irqsave(&qi_ctrl->qinval_lock, flags);

    qinval_entries =
        (struct qinval_entry *)map_vtd_domain_page(qi_ctrl->qinval_maddr);
    qinval_entry = &qinval_entries[index];
    qinval_entry->q.dev_iotlb_inv_dsc.lo.type = TYPE_INVAL_DEVICE_IOTLB;
    qinval_entry->q.dev_iotlb_inv_dsc.lo.res_1 = 0;
    qinval_entry->q.dev_iotlb_inv_dsc.lo.max_invs_pend = max_invs_pend;
    qinval_entry->q.dev_iotlb_inv_dsc.lo.res_2 = 0;
    qinval_entry->q.dev_iotlb_inv_dsc.lo.sid = sid;
    qinval_entry->q.dev_iotlb_inv_dsc.lo.res_3 = 0;

    qinval_entry->q.dev_iotlb_inv_dsc.hi.size = size;
    qinval_entry->q.dev_iotlb_inv_dsc.hi.res_1 = 0;
    qinval_entry->q.dev_iotlb_inv_dsc.hi.addr = addr >> PAGE_SHIFT_4K;

    unmap_vtd_domain_page(qinval_entries);
    spin_unlock_irqrestore(&qi_ctrl->qinval_lock, flags);
    return 0;
}

int qinval_device_iotlb(struct iommu *iommu,
    u32 max_invs_pend, u16 sid, u16 size, u64 addr)
{
    int ret = -1;
    unsigned long flags;
    int index = -1;

    spin_lock_irqsave(&iommu->register_lock, flags);
    index = qinval_next_index(iommu);
    ret = gen_dev_iotlb_inv_dsc(iommu, index, max_invs_pend,
                                sid, size, addr);
    ret |= qinval_update_qtail(iommu, index);
    spin_unlock_irqrestore(&iommu->register_lock, flags);
    return ret;
}

static int gen_iec_inv_dsc(struct iommu *iommu, int index,
    u8 granu, u8 im, u16 iidx)
{
    unsigned long flags;
    struct qinval_entry *qinval_entry = NULL, *qinval_entries;
    struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);

    if ( index == -1 )
        return -1;
    spin_lock_irqsave(&qi_ctrl->qinval_lock, flags);

    qinval_entries =
        (struct qinval_entry *)map_vtd_domain_page(qi_ctrl->qinval_maddr);
    qinval_entry = &qinval_entries[index];
    qinval_entry->q.iec_inv_dsc.lo.type = TYPE_INVAL_IEC;
    qinval_entry->q.iec_inv_dsc.lo.granu = granu;
    qinval_entry->q.iec_inv_dsc.lo.res_1 = 0;
    qinval_entry->q.iec_inv_dsc.lo.im = im;
    qinval_entry->q.iec_inv_dsc.lo.iidx = iidx;
    qinval_entry->q.iec_inv_dsc.lo.res_2 = 0;
    qinval_entry->q.iec_inv_dsc.hi.res = 0;

    unmap_vtd_domain_page(qinval_entries);
    spin_unlock_irqrestore(&qi_ctrl->qinval_lock, flags);
    return 0;
}

int queue_invalidate_iec(struct iommu *iommu, u8 granu, u8 im, u16 iidx)
{
    int ret;
    unsigned long flags;
    int index = -1;

    spin_lock_irqsave(&iommu->register_lock, flags);
    index = qinval_next_index(iommu);
    ret = gen_iec_inv_dsc(iommu, index, granu, im, iidx);
    ret |= qinval_update_qtail(iommu, index);
    spin_unlock_irqrestore(&iommu->register_lock, flags);
    return ret;
}

int __iommu_flush_iec(struct iommu *iommu, u8 granu, u8 im, u16 iidx)
{
    int ret;
    ret = queue_invalidate_iec(iommu, granu, im, iidx);
    ret |= invalidate_sync(iommu);

    /*
     * reading vt-d architecture register will ensure
     * draining happens in implementation independent way.
     */
    (void)dmar_readq(iommu->reg, DMAR_CAP_REG);
    return ret;
}

int iommu_flush_iec_global(struct iommu *iommu)
{
    return __iommu_flush_iec(iommu, IEC_GLOBAL_INVL, 0, 0);
}

int iommu_flush_iec_index(struct iommu *iommu, u8 im, u16 iidx)
{
   return __iommu_flush_iec(iommu, IEC_INDEX_INVL, im, iidx);
}

static int flush_context_qi(
    void *_iommu, u16 did, u16 sid, u8 fm, u64 type,
    int flush_non_present_entry)
{
    int ret = 0;
    struct iommu *iommu = (struct iommu *)_iommu;
    struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);

    /*
     * In the non-present entry flush case, if hardware doesn't cache
     * non-present entry we do nothing and if hardware cache non-present
     * entry, we flush entries of domain 0 (the domain id is used to cache
     * any non-present entries)
     */
    if ( flush_non_present_entry )
    {
        if ( !cap_caching_mode(iommu->cap) )
            return 1;
        else
            did = 0;
    }

    if ( qi_ctrl->qinval_maddr != 0 )
    {
        ret = queue_invalidate_context(iommu, did, sid, fm,
                                       type >> DMA_CCMD_INVL_GRANU_OFFSET);
        ret |= invalidate_sync(iommu);
    }
    return ret;
}

static int flush_iotlb_qi(
    void *_iommu, u16 did,
    u64 addr, unsigned int size_order, u64 type,
    int flush_non_present_entry, int flush_dev_iotlb)
{
    u8 dr = 0, dw = 0;
    int ret = 0;
    struct iommu *iommu = (struct iommu *)_iommu;
    struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);

    /*
     * In the non-present entry flush case, if hardware doesn't cache
     * non-present entry we do nothing and if hardware cache non-present
     * entry, we flush entries of domain 0 (the domain id is used to cache
     * any non-present entries)
     */
    if ( flush_non_present_entry )
    {
        if ( !cap_caching_mode(iommu->cap) )
            return 1;
        else
            did = 0;
    }

    if ( qi_ctrl->qinval_maddr != 0 )
    {
        /* use queued invalidation */
        if (cap_write_drain(iommu->cap))
            dw = 1;
        if (cap_read_drain(iommu->cap))
            dr = 1;
        /* Need to conside the ih bit later */
        ret = queue_invalidate_iotlb(iommu,
                  (type >> DMA_TLB_FLUSH_GRANU_OFFSET), dr,
                  dw, did, (u8)size_order, 0, addr);
        if ( flush_dev_iotlb )
            ret |= dev_invalidate_iotlb(iommu, did, addr, size_order, type);
        ret |= invalidate_sync(iommu);
    }
    return ret;
}

int enable_qinval(struct iommu *iommu)
{
    s_time_t start_time;
    struct qi_ctrl *qi_ctrl;
    struct iommu_flush *flush;

    qi_ctrl = iommu_qi_ctrl(iommu);
    flush = iommu_get_flush(iommu);

    ASSERT(ecap_queued_inval(iommu->ecap) && iommu_qinval);

    if ( qi_ctrl->qinval_maddr == 0 )
    {
        qi_ctrl->qinval_maddr = alloc_pgtable_maddr(NULL, NUM_QINVAL_PAGES);
        if ( qi_ctrl->qinval_maddr == 0 )
        {
            dprintk(XENLOG_WARNING VTDPREFIX,
                    "Cannot allocate memory for qi_ctrl->qinval_maddr\n");
            return -ENOMEM;
        }
    }

    flush->context = flush_context_qi;
    flush->iotlb = flush_iotlb_qi;

    /* Setup Invalidation Queue Address(IQA) register with the
     * address of the page we just allocated.  QS field at
     * bits[2:0] to indicate size of queue is one 4KB page.
     * That's 256 entries.  Queued Head (IQH) and Queue Tail (IQT)
     * registers are automatically reset to 0 with write
     * to IQA register.
     */
    qi_ctrl->qinval_maddr |= IQA_REG_QS;
    dmar_writeq(iommu->reg, DMAR_IQA_REG, qi_ctrl->qinval_maddr);

    dmar_writeq(iommu->reg, DMAR_IQT_REG, 0);

    /* enable queued invalidation hardware */
    iommu->gcmd |= DMA_GCMD_QIE;
    dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd);

    /* Make sure hardware complete it */
    start_time = NOW();
    while ( !(dmar_readl(iommu->reg, DMAR_GSTS_REG) & DMA_GSTS_QIES) )
    {
        if ( NOW() > (start_time + DMAR_OPERATION_TIMEOUT) )
            panic("Cannot set QIE field for queue invalidation\n");
        cpu_relax();
    }

    qinval_enabled = 1;
    return 0;
}

void disable_qinval(struct iommu *iommu)
{
    s_time_t start_time;

    ASSERT(ecap_queued_inval(iommu->ecap) && iommu_qinval);

    iommu->gcmd &= ~DMA_GCMD_QIE;
    dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd);

    /* Make sure hardware complete it */
    start_time = NOW();
    while ( dmar_readl(iommu->reg, DMAR_GSTS_REG) & DMA_GSTS_QIES )
    {
        if ( NOW() > (start_time + DMAR_OPERATION_TIMEOUT) )
            panic("Cannot clear QIE field for queue invalidation\n");
        cpu_relax();
    }
}