diff options
-rw-r--r-- | tools/xentrace/xentrace.c | 10 | ||||
-rw-r--r-- | xen/common/trace.c | 23 |
2 files changed, 22 insertions, 11 deletions
diff --git a/tools/xentrace/xentrace.c b/tools/xentrace/xentrace.c index 4fa6c696e1..73846f39d1 100644 --- a/tools/xentrace/xentrace.c +++ b/tools/xentrace/xentrace.c @@ -131,19 +131,16 @@ struct t_buf *map_tbufs(unsigned long tbufs_phys, unsigned int num, { int dm_fd; /* file descriptor for /dev/mem */ struct t_buf *tbufs_mapped; - unsigned int page_size = getpagesize(); - unsigned int off_in_pg = (tbufs_phys % page_size); - - tbufs_phys -= off_in_pg; /* correct tbufs_phys if not page-aligned */ dm_fd = open("/dev/mem", O_RDONLY); + if ( dm_fd < 0 ) { PERROR("Open /dev/mem when mapping trace buffers\n"); exit(EXIT_FAILURE); } - tbufs_mapped = (struct t_buf *)mmap(NULL, size * num + off_in_pg, + tbufs_mapped = (struct t_buf *)mmap(NULL, size * num, PROT_READ, MAP_SHARED, dm_fd, (off_t)tbufs_phys); @@ -155,8 +152,7 @@ struct t_buf *map_tbufs(unsigned long tbufs_phys, unsigned int num, exit(EXIT_FAILURE); } - /* add offset to get buffers in case original address wasn't pg aligned */ - return (struct t_buf *)((unsigned long)tbufs_mapped + off_in_pg); + return (struct t_buf *)tbufs_mapped; } diff --git a/xen/common/trace.c b/xen/common/trace.c index aa2e7b949f..76c39a67bd 100644 --- a/xen/common/trace.c +++ b/xen/common/trace.c @@ -49,8 +49,8 @@ int tb_init_done = 0; void init_trace_bufs(void) { extern int opt_tbuf_size; - - int i; + int i, pages_order; + unsigned long total_size; char *rawbuf; struct t_buf *buf; @@ -60,12 +60,27 @@ void init_trace_bufs(void) return; } - if ( (rawbuf = kmalloc(smp_num_cpus * opt_tbuf_size * PAGE_SIZE, - GFP_KERNEL)) == NULL ) + /* calculate page_order - we'll allocate 2^page_order pages */ + pages_order = 0; + total_size = smp_num_cpus * opt_tbuf_size; + + while( (total_size) >> ( pages_order + 1 ) ) + pages_order++; + + /* if total_size is not an exact power of two then over-allocate */ + if( total_size & ~( 1 << pages_order ) ) + pages_order++; + + /* we allocate 2^page_order pages to hold the data */ + if ( (rawbuf = (char *)__get_free_pages(GFP_KERNEL, pages_order)) == NULL ) { printk("Xen trace buffers: memory allocation failed\n"); return; } + + /* share pages so that xentrace can map them */ + for( i = 0; i < total_size; i++) + SHARE_PFN_WITH_DOMAIN( &frame_table[(__pa(rawbuf)>>PAGE_SHIFT)+i], 0); for ( i = 0; i < smp_num_cpus; i++ ) { |