From 650afa3995c32c50f3272dbe4514781abed8bdc0 Mon Sep 17 00:00:00 2001 From: Keir Fraser Date: Wed, 20 Jan 2010 20:33:35 +0000 Subject: xentrace: Per-cpu xentrace buffers In the current xentrace configuration, xentrace buffers are all allocated in a single contiguous chunk, and then divided among logical cpus, one buffer per cpu. The size of an allocatable chunk is fairly limited, in my experience about 128 pages (512KiB). As the number of logical cores increase, this means a much smaller maximum per-cpu trace buffer per cpu; on my dual-socket quad-core nehalem box with hyperthreading (16 logical cpus), that comes to 8 pages per logical cpu. This patch addresses this issue by allocating per-cpu buffers separately. Signed-off-by: George Dunlap --- tools/xentrace/xentrace.c | 133 +++++++++++++++++++++------------------------- 1 file changed, 60 insertions(+), 73 deletions(-) (limited to 'tools/xentrace') diff --git a/tools/xentrace/xentrace.c b/tools/xentrace/xentrace.c index 2b5647e6fb..b6da08e38c 100644 --- a/tools/xentrace/xentrace.c +++ b/tools/xentrace/xentrace.c @@ -61,6 +61,12 @@ typedef struct settings_st { disable_tracing:1; } settings_t; +struct t_struct { + struct t_info *t_info; /* Structure with information about individual buffers */ + struct t_buf **meta; /* Pointers to trace buffer metadata */ + unsigned char **data; /* Pointers to trace buffer data areas */ +}; + settings_t opts; int interrupted = 0; /* gets set if we get a SIGHUP */ @@ -446,22 +452,61 @@ static void get_tbufs(unsigned long *mfn, unsigned long *size) * * Maps the Xen trace buffers them into process address space. */ -static struct t_buf *map_tbufs(unsigned long tbufs_mfn, unsigned int num, - unsigned long size) +static struct t_struct *map_tbufs(unsigned long tbufs_mfn, unsigned int num, + unsigned long tinfo_size) { - struct t_buf *tbufs_mapped; + static struct t_struct tbufs = { 0 }; + int i; - tbufs_mapped = xc_map_foreign_range(xc_handle, DOMID_XEN, - size * num, PROT_READ | PROT_WRITE, + /* Map t_info metadata structure */ + tbufs.t_info = xc_map_foreign_range(xc_handle, DOMID_XEN, + tinfo_size, PROT_READ | PROT_WRITE, tbufs_mfn); - if ( tbufs_mapped == 0 ) + if ( tbufs.t_info == 0 ) { PERROR("Failed to mmap trace buffers"); exit(EXIT_FAILURE); } - return tbufs_mapped; + if ( tbufs.t_info->tbuf_size == 0 ) + { + fprintf(stderr, "%s: tbuf_size 0!\n", __func__); + exit(EXIT_FAILURE); + } + + /* Map per-cpu buffers */ + tbufs.meta = (struct t_buf **)calloc(num, sizeof(struct t_buf *)); + tbufs.data = (unsigned char **)calloc(num, sizeof(unsigned char *)); + if ( tbufs.meta == NULL || tbufs.data == NULL ) + { + PERROR( "Failed to allocate memory for buffer pointers\n"); + exit(EXIT_FAILURE); + } + + for(i=0; imfn_offset[i]; + int j; + xen_pfn_t pfn_list[tbufs.t_info->tbuf_size]; + + for ( j=0; jtbuf_size; j++) + pfn_list[j] = (xen_pfn_t)mfn_list[j]; + + tbufs.meta[i] = xc_map_foreign_batch(xc_handle, DOMID_XEN, + PROT_READ | PROT_WRITE, + pfn_list, + tbufs.t_info->tbuf_size); + if ( tbufs.meta[i] == NULL ) + { + PERROR("Failed to map cpu buffer!"); + exit(EXIT_FAILURE); + } + tbufs.data[i] = (unsigned char *)(tbufs.meta[i]+1); + } + + return &tbufs; } /** @@ -489,66 +534,6 @@ static void set_mask(uint32_t mask, int type) } } -/** - * init_bufs_ptrs - initialises an array of pointers to the trace buffers - * @bufs_mapped: the userspace address where the trace buffers are mapped - * @num: number of trace buffers - * @size: trace buffer size - * - * Initialises an array of pointers to individual trace buffers within the - * mapped region containing all trace buffers. - */ -static struct t_buf **init_bufs_ptrs(void *bufs_mapped, unsigned int num, - unsigned long size) -{ - int i; - struct t_buf **user_ptrs; - - user_ptrs = (struct t_buf **)calloc(num, sizeof(struct t_buf *)); - if ( user_ptrs == NULL ) - { - PERROR( "Failed to allocate memory for buffer pointers\n"); - exit(EXIT_FAILURE); - } - - /* initialise pointers to the trace buffers - given the size of a trace - * buffer and the value of bufs_maped, we can easily calculate these */ - for ( i = 0; it_info->tbuf_size * PAGE_SIZE; data_size = size - sizeof(struct t_buf); - /* build arrays of convenience ptrs */ - meta = init_bufs_ptrs(tbufs_mapped, num, size); - data = init_rec_ptrs(meta, num); + meta = tbufs->meta; + data = tbufs->data; if ( opts.discard ) for ( i = 0; i < num; i++ ) -- cgit v1.2.3