aboutsummaryrefslogtreecommitdiffstats
path: root/tests/arch/anlogic/run-test.sh
blob: bf19b887d99406c7e7ddd5ea88297b69ca36745c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
#!/usr/bin/env bash
set -e
{
echo "all::"
for x in *.ys; do
	echo "all:: run-$x"
	echo "run-$x:"
	echo "	@echo 'Running $x..'"
	echo "	@../../../yosys -ql ${x%.ys}.log -w 'Yosys has only limited support for tri-state logic at the moment.' $x"
done
for s in *.sh; do
	if [ "$s" != "run-test.sh" ]; then
		echo "all:: run-$s"
		echo "run-$s:"
		echo "	@echo 'Running $s..'"
		echo "	@bash $s"
	fi
done
} > run-test.mk
exec ${MAKE:-make} -f run-test.mk
#n255'>255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707
/* -*-  Mode:C; c-basic-offset:4; tab-width:4 -*-
 ****************************************************************************
 * (C) 2004      Grzegorz Milos - University of Cambridge
 * Based on the implementation of the BVT scheduler by Rolf Neugebauer
 * and Mark Williamson (look in sched_bvt.c)
 ****************************************************************************
 *
 *        File: common/sched_fair_bvt.c
 *      Author: Grzegorz Milos
 *
 * Description: CPU scheduling
 *              implements Fair Borrowed Virtual Time Scheduler.
 *              FBVT is modification of BVT (see Duda & Cheriton SOSP'99)
 *              which tries to allocate fair shares of processor even 
 *              when there is mix between CPU and I/O bound domains.
 *              TODO - more information about the scheduler in TODO
 */
#include <xen/config.h>
#include <xen/init.h>
#include <xen/lib.h>
#include <xen/sched.h>
#include <xen/delay.h>
#include <xen/event.h>
#include <xen/time.h>
#include <xen/ac_timer.h>
#include <xen/perfc.h>
#include <xen/sched-if.h>
#include <xen/slab.h>
#include <xen/softirq.h>
#include <xen/trace.h>

/* For tracing - TODO - put all the defines in some common hearder file */
#define TRC_SCHED_FBVT_DO_SCHED             0x00020000
#define TRC_SCHED_FBVT_DO_SCHED_UPDATE      0x00020001

/* all per-domain BVT-specific scheduling info is stored here */
struct fbvt_dom_info
{
    struct domain       *domain;          /* domain this info belongs to */
    struct list_head    run_list;         /* runqueue pointers */
    unsigned long       mcu_advance;      /* inverse of weight */
    u32                 avt;              /* actual virtual time */
    u32                 evt;              /* effective virtual time */
    u32                 time_slept;       /* amount of time slept */
    int                 warpback;         /* warp?  */
    long                warp;             /* virtual time warp */
    long                warpl;            /* warp limit */
    long                warpu;            /* unwarp time requirement */
    s_time_t            warped;           /* time it ran warped last time */
    s_time_t            uwarped;          /* time it ran unwarped last time */
};

struct fbvt_cpu_info
{
    spinlock_t          run_lock;  /* protects runqueue */
    struct list_head    runqueue;  /* runqueue for this CPU */
    unsigned long       svt;       /* XXX check this is unsigned long! */
    u32                 vtb;       /* virtual time bonus */
    u32                 r_time;    /* last time to run */  
};


#define FBVT_INFO(p)  ((struct fbvt_dom_info *)(p)->sched_priv)
#define CPU_INFO(cpu) ((struct fbvt_cpu_info *)(schedule_data[cpu]).sched_priv)
#define RUNLIST(p)    ((struct list_head *)&(FBVT_INFO(p)->run_list))
#define RUNQUEUE(cpu) ((struct list_head *)&(CPU_INFO(cpu)->runqueue))
#define CPU_SVT(cpu)  (CPU_INFO(cpu)->svt)
#define LAST_VTB(cpu) (CPU_INFO(cpu)->vtb)
#define R_TIME(cpu)   (CPU_INFO(cpu)->r_time) 

#define MCU            (s32)MICROSECS(100)    /* Minimum unit */
#define MCU_ADVANCE    10                     /* default weight */
#define TIME_SLOP      (s32)MICROSECS(50)     /* allow time to slip a bit */
static s32 ctx_allow = (s32)MILLISECS(5);     /* context switch allowance */
static s32 max_vtb   = (s32)MILLISECS(5);

/* SLAB cache for struct fbvt_dom_info objects */
static xmem_cache_t *dom_info_cache;


/*
 * Wrappers for run-queue management. Must be called with the run_lock
 * held.
 */
static inline void __add_to_runqueue_head(struct domain *d)
{
    list_add(RUNLIST(d), RUNQUEUE(d->processor));
}

static inline void __add_to_runqueue_tail(struct domain *d)
{
    list_add_tail(RUNLIST(d), RUNQUEUE(d->processor));
}

static inline void __del_from_runqueue(struct domain *d)
{
    struct list_head *runlist = RUNLIST(d);
    list_del(runlist);
    runlist->next = NULL;
}

static inline int __task_on_runqueue(struct domain *d)
{
    return (RUNLIST(d))->next != NULL;
}

/*
 * Calculate the effective virtual time for a domain. Take into account 
 * warping limits
 */
static void __calc_evt(struct fbvt_dom_info *inf)
{
    s_time_t now = NOW();

    if ( inf->warpback ) 
    {
        if ( ((now - inf->warped) < inf->warpl) &&
             ((now - inf->uwarped) > inf->warpu) )
        {
            /* allowed to warp */
            inf->evt = inf->avt - inf->warp;
        } 
        else 
        {
            /* warped for too long -> unwarp */
            inf->evt      = inf->avt;
            inf->uwarped  = now;
            inf->warpback = 0;
        }
    } 
    else 
    {
        inf->evt = inf->avt;
    }
}

/**
 * fbvt_alloc_task - allocate FBVT private structures for a task
 * @p:              task to allocate private structures for
 *
 * Returns non-zero on failure.
 */
int fbvt_alloc_task(struct domain *p)
{
    p->sched_priv = xmem_cache_alloc(dom_info_cache);
    if ( p->sched_priv == NULL )
        return -1;
    
    return 0;
}

/*
 * Add and remove a domain
 */
void fbvt_add_task(struct domain *p) 
{
    struct fbvt_dom_info *inf = FBVT_INFO(p);

    ASSERT(inf != NULL);
    ASSERT(p   != NULL);

    inf->mcu_advance = MCU_ADVANCE;
    inf->domain = p;
    if ( p->domain == IDLE_DOMAIN_ID )
    {
        inf->avt = inf->evt = ~0U;
    } 
    else 
    {
        /* Set avt and evt to system virtual time. */
        inf->avt         = CPU_SVT(p->processor);
        inf->evt         = CPU_SVT(p->processor);
        /* Set some default values here. */
        inf->time_slept  = 0;
        inf->warpback    = 0;
        inf->warp        = 0;
        inf->warpl       = 0;
        inf->warpu       = 0;
    }

    return;
}

int fbvt_init_idle_task(struct domain *p)
{
    unsigned long flags;

    if(fbvt_alloc_task(p) < 0) return -1;

    fbvt_add_task(p);
    spin_lock_irqsave(&CPU_INFO(p->processor)->run_lock, flags);
    set_bit(DF_RUNNING, &p->flags);
    if ( !__task_on_runqueue(p) )
    __add_to_runqueue_head(p);
    spin_unlock_irqrestore(&CPU_INFO(p->processor)->run_lock, flags);

    return 0;
}
                                        
static void fbvt_wake(struct domain *d)
{
    unsigned long        flags;
    struct fbvt_dom_info *inf = FBVT_INFO(d);
    struct domain        *curr;
    s_time_t             now, min_time;
    int                  cpu = d->processor;
    s32                  io_warp;

    /* The runqueue accesses must be protected */
    spin_lock_irqsave(&CPU_INFO(cpu)->run_lock, flags);
    
    /* If on the runqueue already then someone has done the wakeup work. */
    if ( unlikely(__task_on_runqueue(d)) )
    {
        spin_unlock_irqrestore(&CPU_INFO(cpu)->run_lock, flags); 
        return;
    }    
    
    __add_to_runqueue_head(d);
 
    now = NOW();

#if 0
    /*
     * XXX KAF: This was fbvt_unpause(). Not sure if it's the right thing
     * to do, in light of the stuff that fbvt_wake_up() does.
     * e.g., setting 'inf->avt = CPU_SVT(cpu);' would make the later test
     * 'inf->avt < CPU_SVT(cpu)' redundant!
     */
    if ( d->domain == IDLE_DOMAIN_ID )
    {
        inf->avt = inf->evt = ~0U;
    } 
    else 
    {
        /* Set avt to system virtual time. */
        inf->avt = CPU_SVT(cpu);
        /* Set some default values here. */
        LAST_VTB(cpu) = 0;
        __calc_evt(inf);
    }
#endif

    /* Set the BVT parameters. */
    if ( inf->avt < CPU_SVT(cpu) )
    {
        /*
         * We want IO bound processes to gain dispatch precedence. It is 
         * especially for device driver domains. Therefore AVT 
         * not be updated to SVT but to a value marginally smaller.
         * Since frequently sleeping domains have high time_slept
         * values, the virtual time can be determined as:
         * SVT - const * TIME_SLEPT
         */
        io_warp = inf->time_slept/2;
        if ( io_warp > 1000 )
            io_warp = 1000;

        ASSERT(inf->time_slept + CPU_SVT(cpu) > inf->avt + io_warp);
        inf->time_slept += CPU_SVT(cpu) - inf->avt - io_warp;
        inf->avt = CPU_SVT(cpu) - io_warp;
    }

    /* Deal with warping here. */
    inf->warpback  = 1;
    inf->warped    = now;
    __calc_evt(inf);
    spin_unlock_irqrestore(&CPU_INFO(cpu)->run_lock, flags);
    
    /* Access to schedule_data protected by schedule_lock */
    spin_lock_irqsave(&schedule_data[cpu].schedule_lock, flags);
    
 
    curr = schedule_data[cpu].curr;
 
    /* Currently-running domain should run at least for ctx_allow. */
    min_time = curr->lastschd + ctx_allow;
    
    if ( is_idle_task(curr) || (min_time <= now) )
        cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
    else if ( schedule_data[cpu].s_timer.expires > (min_time + TIME_SLOP) )
        mod_ac_timer(&schedule_data[cpu].s_timer, min_time);

    spin_unlock_irqrestore(&schedule_data[cpu].schedule_lock, flags);   
}


static void fbvt_sleep(struct domain *d)
{
    unsigned long flags;

    
    if ( test_bit(DF_RUNNING, &d->flags) )
        cpu_raise_softirq(d->processor, SCHEDULE_SOFTIRQ);
    else
    {
         /* The runqueue accesses must be protected */
        spin_lock_irqsave(&CPU_INFO(d->processor)->run_lock, flags);       
    
        if ( __task_on_runqueue(d) )
            __del_from_runqueue(d);

        spin_unlock_irqrestore(&CPU_INFO(d->processor)->run_lock, flags);
    }
}


/**
 * fbvt_free_task - free FBVT private structures for a task
 * @p:             task
 */
void fbvt_free_task(struct domain *p)
{
    ASSERT( p->sched_priv != NULL );
    xmem_cache_free( dom_info_cache, p->sched_priv );
}

/* 
 * Block the currently-executing domain until a pertinent event occurs.
 */
static void fbvt_do_block(struct domain *p)
{
    FBVT_INFO(p)->warpback = 0; 
}

/* Control the scheduler. */
int fbvt_ctl(struct sched_ctl_cmd *cmd)
{
    struct fbvt_ctl *params = &cmd->u.fbvt;

    if ( cmd->direction == SCHED_INFO_PUT )
    { 
        ctx_allow = params->ctx_allow;
        /* The max_vtb should be of the order o the ctx_allow */
        max_vtb = ctx_allow;
    }
    else
    {
        params->ctx_allow = ctx_allow;
    }
    
    return 0;
}

/* Adjust scheduling parameter for a given domain. */
int fbvt_adjdom(struct domain *p,
                struct sched_adjdom_cmd *cmd)
{
    struct fbvt_adjdom *params = &cmd->u.fbvt;
    unsigned long flags;

    if ( cmd->direction == SCHED_INFO_PUT )
    {
        unsigned long mcu_adv = params->mcu_adv,
            warp  = params->warp,
            warpl = params->warpl,
            warpu = params->warpu;
        
        struct fbvt_dom_info *inf = FBVT_INFO(p);
        
        DPRINTK("Get domain %u fbvt mcu_adv=%ld, warp=%ld, "
                "warpl=%ld, warpu=%ld\n",
                p->domain, inf->mcu_advance, inf->warp,
                inf->warpl, inf->warpu );

        /* Sanity -- this can avoid divide-by-zero. */
        if ( mcu_adv == 0 )
            return -EINVAL;
        
        spin_lock_irqsave(&CPU_INFO(p->processor)->run_lock, flags);   
        inf->mcu_advance = mcu_adv;
        inf->warp = warp;
        inf->warpl = warpl;
        inf->warpu = warpu;

        DPRINTK("Set domain %u fbvt mcu_adv=%ld, warp=%ld, "
                "warpl=%ld, warpu=%ld\n",
                p->domain, inf->mcu_advance, inf->warp,
                inf->warpl, inf->warpu );

        spin_unlock_irqrestore(&CPU_INFO(p->processor)->run_lock, flags);
    }
    else if ( cmd->direction == SCHED_INFO_GET )
    {
        struct fbvt_dom_info *inf = FBVT_INFO(p);

        spin_lock_irqsave(&CPU_INFO(p->processor)->run_lock, flags);   
        params->mcu_adv = inf->mcu_advance;
        params->warp    = inf->warp;
        params->warpl   = inf->warpl;
        params->warpu   = inf->warpu;
        spin_unlock_irqrestore(&CPU_INFO(p->processor)->run_lock, flags);
    }
    
    return 0;
}


/* 
 * The main function
 * - deschedule the current domain.
 * - pick a new domain.
 *   i.e., the domain with lowest EVT.
 *   The runqueue should be ordered by EVT so that is easy.
 */
static task_slice_t fbvt_do_schedule(s_time_t now)
{
    unsigned long flags;
    struct domain *prev = current, *next = NULL, *next_prime, *p;
    struct list_head   *tmp;
    int                 cpu = prev->processor;
    s32                 r_time;     /* time for new dom to run */
    s32                 ranfor;     /* assume we never run longer than 2.1s! */
    s32                 mcus;
    u32                 next_evt, next_prime_evt, min_avt;
    u32                 sl_decrement;
    struct fbvt_dom_info *prev_inf       = FBVT_INFO(prev);
    struct fbvt_dom_info *p_inf          = NULL;
    struct fbvt_dom_info *next_inf       = NULL;
    struct fbvt_dom_info *next_prime_inf = NULL;
    task_slice_t        ret;

    ASSERT(prev->sched_priv != NULL);
    ASSERT(prev_inf != NULL);
    
    spin_lock_irqsave(&CPU_INFO(cpu)->run_lock, flags);

    ASSERT(__task_on_runqueue(prev));

    if ( likely(!is_idle_task(prev)) ) 
    {
        ranfor = (s32)(now - prev->lastschd);
        /* Calculate mcu and update avt. */
        mcus = (ranfor + MCU - 1) / MCU;
        
        TRACE_3D(TRC_SCHED_FBVT_DO_SCHED_UPDATE, prev->domain, 
                 mcus, LAST_VTB(cpu));
    
        sl_decrement = mcus * LAST_VTB(cpu) / R_TIME(cpu);
        prev_inf->time_slept -=  sl_decrement;
        prev_inf->avt += mcus * prev_inf->mcu_advance - sl_decrement;
  
        /*if(mcus * prev_inf->mcu_advance < LAST_VTB(cpu))
          {
          ASSERT(prev_inf->time_slept >= mcus * prev_inf->mcu_advance);
          prev_inf->time_slept -= mcus * prev_inf->mcu_advance;
          }
          else
          {
          prev_inf->avt += mcus * prev_inf->mcu_advance - LAST_VTB(cpu);
  
          ASSERT(prev_inf->time_slept >= LAST_VTB(cpu));
          prev_inf->time_slept -= LAST_VTB(cpu);
          }*/
        
        __calc_evt(prev_inf);
        
        __del_from_runqueue(prev);
        
        if ( domain_runnable(prev) )
            __add_to_runqueue_tail(prev);
    }

    /* We should at least have the idle task */
    ASSERT(!list_empty(RUNQUEUE(cpu)));

    /*
     * scan through the run queue and pick the task with the lowest evt
     * *and* the task the second lowest evt.
     * this code is O(n) but we expect n to be small.
     */
    next_inf        = FBVT_INFO(schedule_data[cpu].idle);
    next_prime_inf  = NULL;

    next_evt       = ~0U;
    next_prime_evt = ~0U;
    min_avt        = ~0U;

    list_for_each ( tmp, RUNQUEUE(cpu) )
    {
        p_inf = list_entry(tmp, struct fbvt_dom_info, run_list);

        if ( p_inf->evt < next_evt )
        {
            next_prime_inf  = next_inf;
            next_prime_evt  = next_evt;
            next_inf        = p_inf;
            next_evt        = p_inf->evt;
        }
        else if ( next_prime_evt == ~0U )
        {
            next_prime_evt  = p_inf->evt;
            next_prime_inf  = p_inf;
        }
        else if ( p_inf->evt < next_prime_evt )
        {
            next_prime_evt  = p_inf->evt;
            next_prime_inf  = p_inf;
        }

        /* Determine system virtual time. */
        if ( p_inf->avt < min_avt )
            min_avt = p_inf->avt;
    }

    spin_unlock_irqrestore(&CPU_INFO(cpu)->run_lock, flags);

    /* Extract the domain pointers from the dom infos */
    next        = next_inf->domain;
    next_prime  = next_prime_inf->domain;
     

    /* Update system virtual time. */
    if ( min_avt != ~0U )
        CPU_SVT(cpu) = min_avt;

    /* check for virtual time overrun on this cpu */
    if ( CPU_SVT(cpu) >= 0xf0000000 )
    {
        u_long t_flags; 
        write_lock_irqsave(&tasklist_lock, t_flags); 
        for_each_domain ( p )
        {
            if ( p->processor == cpu )
            {
                p_inf = FBVT_INFO(p);
                p_inf->evt -= 0xe0000000;
                p_inf->avt -= 0xe0000000;
            }
        } 
        write_unlock_irqrestore(&tasklist_lock, t_flags);