diff options
author | Álvaro Fernández Rojas <noltari@gmail.com> | 2021-08-21 10:54:34 +0200 |
---|---|---|
committer | Álvaro Fernández Rojas <noltari@gmail.com> | 2021-08-21 19:07:07 +0200 |
commit | 8299d1f057439f94c6a4412e2e5c5082b82a30c9 (patch) | |
tree | 1bf678d61f11f7394493be464c7876e496f7faed /target/linux/bcm27xx/patches-5.10/950-0308-vchiq_2835_arm-Implement-a-DMA-pool-for-small-bulk-t.patch | |
parent | 33b6885975ce376ff075362b7f0890326043111b (diff) | |
download | upstream-8299d1f057439f94c6a4412e2e5c5082b82a30c9.tar.gz upstream-8299d1f057439f94c6a4412e2e5c5082b82a30c9.tar.bz2 upstream-8299d1f057439f94c6a4412e2e5c5082b82a30c9.zip |
bcm27xx: add kernel 5.10 support
Rebased RPi foundation patches on linux 5.10.59, removed applied and reverted
patches, wireless patches and defconfig patches.
bcm2708: boot tested on RPi B+ v1.2
bcm2709: boot tested on RPi 4B v1.1 4G
bcm2711: boot tested on RPi 4B v1.1 4G
Signed-off-by: Álvaro Fernández Rojas <noltari@gmail.com>
Diffstat (limited to 'target/linux/bcm27xx/patches-5.10/950-0308-vchiq_2835_arm-Implement-a-DMA-pool-for-small-bulk-t.patch')
-rw-r--r-- | target/linux/bcm27xx/patches-5.10/950-0308-vchiq_2835_arm-Implement-a-DMA-pool-for-small-bulk-t.patch | 122 |
1 files changed, 122 insertions, 0 deletions
diff --git a/target/linux/bcm27xx/patches-5.10/950-0308-vchiq_2835_arm-Implement-a-DMA-pool-for-small-bulk-t.patch b/target/linux/bcm27xx/patches-5.10/950-0308-vchiq_2835_arm-Implement-a-DMA-pool-for-small-bulk-t.patch new file mode 100644 index 0000000000..991c962ac6 --- /dev/null +++ b/target/linux/bcm27xx/patches-5.10/950-0308-vchiq_2835_arm-Implement-a-DMA-pool-for-small-bulk-t.patch @@ -0,0 +1,122 @@ +From afbc6c9f890bd3d877451138b2ffd5bd19bfdf61 Mon Sep 17 00:00:00 2001 +From: detule <ogjoneski@gmail.com> +Date: Tue, 2 Oct 2018 04:10:08 -0400 +Subject: [PATCH] vchiq_2835_arm: Implement a DMA pool for small bulk + transfers (#2699) + +During a bulk transfer we request a DMA allocation to hold the +scatter-gather list. Most of the time, this allocation is small +(<< PAGE_SIZE), however it can be requested at a high enough frequency +to cause fragmentation and/or stress the CMA allocator (think time +spent in compaction here, or during allocations elsewhere). + +Implement a pool to serve up small DMA allocations, falling back +to a coherent allocation if the request is greater than +VCHIQ_DMA_POOL_SIZE. + +Signed-off-by: Oliver Gjoneski <ogjoneski@gmail.com> +--- + .../interface/vchiq_arm/vchiq_2835_arm.c | 36 ++++++++++++++++--- + 1 file changed, 32 insertions(+), 4 deletions(-) + +--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c ++++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c +@@ -7,6 +7,7 @@ + #include <linux/interrupt.h> + #include <linux/pagemap.h> + #include <linux/dma-mapping.h> ++#include <linux/dmapool.h> + #include <linux/io.h> + #include <linux/platform_device.h> + #include <linux/uaccess.h> +@@ -29,6 +30,8 @@ + #define BELL0 0x00 + #define BELL2 0x08 + ++#define VCHIQ_DMA_POOL_SIZE PAGE_SIZE ++ + struct vchiq_2835_state { + int inited; + struct vchiq_arm_state arm_state; +@@ -38,6 +41,7 @@ struct vchiq_pagelist_info { + struct pagelist *pagelist; + size_t pagelist_buffer_size; + dma_addr_t dma_addr; ++ bool is_from_pool; + enum dma_data_direction dma_dir; + unsigned int num_pages; + unsigned int pages_need_release; +@@ -58,6 +62,7 @@ static void __iomem *g_regs; + * of 32. + */ + static unsigned int g_cache_line_size = 32; ++static struct dma_pool *g_dma_pool; + static unsigned int g_use_36bit_addrs = 0; + static unsigned int g_fragments_size; + static char *g_fragments_base; +@@ -182,6 +187,13 @@ int vchiq_platform_init(struct platform_ + + g_dev = dev; + g_dma_dev = dma_dev ?: dev; ++ g_dma_pool = dmam_pool_create("vchiq_scatter_pool", dev, ++ VCHIQ_DMA_POOL_SIZE, g_cache_line_size, ++ 0); ++ if (!g_dma_pool) { ++ dev_err(dev, "failed to create dma pool"); ++ return -ENOMEM; ++ } + + vchiq_log_info(vchiq_arm_log_level, + "vchiq_init - done (slots %pK, phys %pad)", +@@ -314,8 +326,14 @@ cleanup_pagelistinfo(struct vchiq_pageli + if (pagelistinfo->pages_need_release) + unpin_user_pages(pagelistinfo->pages, pagelistinfo->num_pages); + +- dma_free_coherent(g_dev, pagelistinfo->pagelist_buffer_size, +- pagelistinfo->pagelist, pagelistinfo->dma_addr); ++ if (pagelistinfo->is_from_pool) { ++ dma_pool_free(g_dma_pool, pagelistinfo->pagelist, ++ pagelistinfo->dma_addr); ++ } else { ++ dma_free_coherent(g_dev, pagelistinfo->pagelist_buffer_size, ++ pagelistinfo->pagelist, ++ pagelistinfo->dma_addr); ++ } + } + + /* There is a potential problem with partial cache lines (pages?) +@@ -336,6 +354,7 @@ create_pagelist(char *buf, char __user * + u32 *addrs; + unsigned int num_pages, offset, i, k; + int actual_pages; ++ bool is_from_pool; + size_t pagelist_size; + struct scatterlist *scatterlist, *sg; + int dma_buffers; +@@ -365,8 +384,16 @@ create_pagelist(char *buf, char __user * + /* Allocate enough storage to hold the page pointers and the page + * list + */ +- pagelist = dma_alloc_coherent(g_dev, pagelist_size, &dma_addr, +- GFP_KERNEL); ++ if (pagelist_size > VCHIQ_DMA_POOL_SIZE) { ++ pagelist = dma_alloc_coherent(g_dev, ++ pagelist_size, ++ &dma_addr, ++ GFP_KERNEL); ++ is_from_pool = false; ++ } else { ++ pagelist = dma_pool_alloc(g_dma_pool, GFP_KERNEL, &dma_addr); ++ is_from_pool = true; ++ } + + vchiq_log_trace(vchiq_arm_log_level, "%s - %pK", __func__, pagelist); + +@@ -387,6 +414,7 @@ create_pagelist(char *buf, char __user * + pagelistinfo->pagelist = pagelist; + pagelistinfo->pagelist_buffer_size = pagelist_size; + pagelistinfo->dma_addr = dma_addr; ++ pagelistinfo->is_from_pool = is_from_pool; + pagelistinfo->dma_dir = (type == PAGELIST_WRITE) ? + DMA_TO_DEVICE : DMA_FROM_DEVICE; + pagelistinfo->num_pages = num_pages; |