aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/bcm27xx/patches-5.4/950-0735-dma-buf-heaps-Add-CMA-heap-to-dmabuf-heaps.patch
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/bcm27xx/patches-5.4/950-0735-dma-buf-heaps-Add-CMA-heap-to-dmabuf-heaps.patch')
-rw-r--r--target/linux/bcm27xx/patches-5.4/950-0735-dma-buf-heaps-Add-CMA-heap-to-dmabuf-heaps.patch251
1 files changed, 251 insertions, 0 deletions
diff --git a/target/linux/bcm27xx/patches-5.4/950-0735-dma-buf-heaps-Add-CMA-heap-to-dmabuf-heaps.patch b/target/linux/bcm27xx/patches-5.4/950-0735-dma-buf-heaps-Add-CMA-heap-to-dmabuf-heaps.patch
new file mode 100644
index 0000000000..d1828702b4
--- /dev/null
+++ b/target/linux/bcm27xx/patches-5.4/950-0735-dma-buf-heaps-Add-CMA-heap-to-dmabuf-heaps.patch
@@ -0,0 +1,251 @@
+From d5e996267c71a9517b2c831d072e76bacb8f0e56 Mon Sep 17 00:00:00 2001
+From: John Stultz <john.stultz@linaro.org>
+Date: Tue, 3 Dec 2019 17:26:40 +0000
+Subject: [PATCH] dma-buf: heaps: Add CMA heap to dmabuf heaps
+
+Commit b61614ec318aae0c77ecd2816878d851dd61d9a6 upstream.
+
+This adds a CMA heap, which allows userspace to allocate
+a dma-buf of contiguous memory out of a CMA region.
+
+This code is an evolution of the Android ION implementation, so
+thanks to its original author and maintainters:
+ Benjamin Gaignard, Laura Abbott, and others!
+
+NOTE: This patch only adds the default CMA heap. We will enable
+selectively adding other CMA memory regions to the dmabuf heaps
+interface with a later patch (which requires a dt binding)
+
+Cc: Laura Abbott <labbott@redhat.com>
+Cc: Benjamin Gaignard <benjamin.gaignard@linaro.org>
+Cc: Sumit Semwal <sumit.semwal@linaro.org>
+Cc: Liam Mark <lmark@codeaurora.org>
+Cc: Pratik Patel <pratikp@codeaurora.org>
+Cc: Brian Starkey <Brian.Starkey@arm.com>
+Cc: Vincent Donnefort <Vincent.Donnefort@arm.com>
+Cc: Sudipto Paul <Sudipto.Paul@arm.com>
+Cc: Andrew F. Davis <afd@ti.com>
+Cc: Christoph Hellwig <hch@infradead.org>
+Cc: Chenbo Feng <fengc@google.com>
+Cc: Alistair Strachan <astrachan@google.com>
+Cc: Hridya Valsaraju <hridya@google.com>
+Cc: Sandeep Patil <sspatil@google.com>
+Cc: Hillf Danton <hdanton@sina.com>
+Cc: Dave Airlie <airlied@gmail.com>
+Cc: dri-devel@lists.freedesktop.org
+Reviewed-by: Benjamin Gaignard <benjamin.gaignard@linaro.org>
+Reviewed-by: Brian Starkey <brian.starkey@arm.com>
+Acked-by: Sandeep Patil <sspatil@android.com>
+Acked-by: Laura Abbott <labbott@redhat.com>
+Tested-by: Ayan Kumar Halder <ayan.halder@arm.com>
+Signed-off-by: John Stultz <john.stultz@linaro.org>
+Signed-off-by: Sumit Semwal <sumit.semwal@linaro.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20191203172641.66642-5-john.stultz@linaro.org
+---
+ drivers/dma-buf/heaps/Kconfig | 8 ++
+ drivers/dma-buf/heaps/Makefile | 1 +
+ drivers/dma-buf/heaps/cma_heap.c | 177 +++++++++++++++++++++++++++++++
+ 3 files changed, 186 insertions(+)
+ create mode 100644 drivers/dma-buf/heaps/cma_heap.c
+
+--- a/drivers/dma-buf/heaps/Kconfig
++++ b/drivers/dma-buf/heaps/Kconfig
+@@ -4,3 +4,11 @@ config DMABUF_HEAPS_SYSTEM
+ help
+ Choose this option to enable the system dmabuf heap. The system heap
+ is backed by pages from the buddy allocator. If in doubt, say Y.
++
++config DMABUF_HEAPS_CMA
++ bool "DMA-BUF CMA Heap"
++ depends on DMABUF_HEAPS && DMA_CMA
++ help
++ Choose this option to enable dma-buf CMA heap. This heap is backed
++ by the Contiguous Memory Allocator (CMA). If your system has these
++ regions, you should say Y here.
+--- a/drivers/dma-buf/heaps/Makefile
++++ b/drivers/dma-buf/heaps/Makefile
+@@ -1,3 +1,4 @@
+ # SPDX-License-Identifier: GPL-2.0
+ obj-y += heap-helpers.o
+ obj-$(CONFIG_DMABUF_HEAPS_SYSTEM) += system_heap.o
++obj-$(CONFIG_DMABUF_HEAPS_CMA) += cma_heap.o
+--- /dev/null
++++ b/drivers/dma-buf/heaps/cma_heap.c
+@@ -0,0 +1,177 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * DMABUF CMA heap exporter
++ *
++ * Copyright (C) 2012, 2019 Linaro Ltd.
++ * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
++ */
++
++#include <linux/cma.h>
++#include <linux/device.h>
++#include <linux/dma-buf.h>
++#include <linux/dma-heap.h>
++#include <linux/dma-contiguous.h>
++#include <linux/err.h>
++#include <linux/errno.h>
++#include <linux/highmem.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/scatterlist.h>
++#include <linux/sched/signal.h>
++
++#include "heap-helpers.h"
++
++struct cma_heap {
++ struct dma_heap *heap;
++ struct cma *cma;
++};
++
++static void cma_heap_free(struct heap_helper_buffer *buffer)
++{
++ struct cma_heap *cma_heap = dma_heap_get_drvdata(buffer->heap);
++ unsigned long nr_pages = buffer->pagecount;
++ struct page *cma_pages = buffer->priv_virt;
++
++ /* free page list */
++ kfree(buffer->pages);
++ /* release memory */
++ cma_release(cma_heap->cma, cma_pages, nr_pages);
++ kfree(buffer);
++}
++
++/* dmabuf heap CMA operations functions */
++static int cma_heap_allocate(struct dma_heap *heap,
++ unsigned long len,
++ unsigned long fd_flags,
++ unsigned long heap_flags)
++{
++ struct cma_heap *cma_heap = dma_heap_get_drvdata(heap);
++ struct heap_helper_buffer *helper_buffer;
++ struct page *cma_pages;
++ size_t size = PAGE_ALIGN(len);
++ unsigned long nr_pages = size >> PAGE_SHIFT;
++ unsigned long align = get_order(size);
++ struct dma_buf *dmabuf;
++ int ret = -ENOMEM;
++ pgoff_t pg;
++
++ if (align > CONFIG_CMA_ALIGNMENT)
++ align = CONFIG_CMA_ALIGNMENT;
++
++ helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL);
++ if (!helper_buffer)
++ return -ENOMEM;
++
++ init_heap_helper_buffer(helper_buffer, cma_heap_free);
++ helper_buffer->heap = heap;
++ helper_buffer->size = len;
++
++ cma_pages = cma_alloc(cma_heap->cma, nr_pages, align, false);
++ if (!cma_pages)
++ goto free_buf;
++
++ if (PageHighMem(cma_pages)) {
++ unsigned long nr_clear_pages = nr_pages;
++ struct page *page = cma_pages;
++
++ while (nr_clear_pages > 0) {
++ void *vaddr = kmap_atomic(page);
++
++ memset(vaddr, 0, PAGE_SIZE);
++ kunmap_atomic(vaddr);
++ /*
++ * Avoid wasting time zeroing memory if the process
++ * has been killed by by SIGKILL
++ */
++ if (fatal_signal_pending(current))
++ goto free_cma;
++
++ page++;
++ nr_clear_pages--;
++ }
++ } else {
++ memset(page_address(cma_pages), 0, size);
++ }
++
++ helper_buffer->pagecount = nr_pages;
++ helper_buffer->pages = kmalloc_array(helper_buffer->pagecount,
++ sizeof(*helper_buffer->pages),
++ GFP_KERNEL);
++ if (!helper_buffer->pages) {
++ ret = -ENOMEM;
++ goto free_cma;
++ }
++
++ for (pg = 0; pg < helper_buffer->pagecount; pg++)
++ helper_buffer->pages[pg] = &cma_pages[pg];
++
++ /* create the dmabuf */
++ dmabuf = heap_helper_export_dmabuf(helper_buffer, fd_flags);
++ if (IS_ERR(dmabuf)) {
++ ret = PTR_ERR(dmabuf);
++ goto free_pages;
++ }
++
++ helper_buffer->dmabuf = dmabuf;
++ helper_buffer->priv_virt = cma_pages;
++
++ ret = dma_buf_fd(dmabuf, fd_flags);
++ if (ret < 0) {
++ dma_buf_put(dmabuf);
++ /* just return, as put will call release and that will free */
++ return ret;
++ }
++
++ return ret;
++
++free_pages:
++ kfree(helper_buffer->pages);
++free_cma:
++ cma_release(cma_heap->cma, cma_pages, nr_pages);
++free_buf:
++ kfree(helper_buffer);
++ return ret;
++}
++
++static const struct dma_heap_ops cma_heap_ops = {
++ .allocate = cma_heap_allocate,
++};
++
++static int __add_cma_heap(struct cma *cma, void *data)
++{
++ struct cma_heap *cma_heap;
++ struct dma_heap_export_info exp_info;
++
++ cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
++ if (!cma_heap)
++ return -ENOMEM;
++ cma_heap->cma = cma;
++
++ exp_info.name = cma_get_name(cma);
++ exp_info.ops = &cma_heap_ops;
++ exp_info.priv = cma_heap;
++
++ cma_heap->heap = dma_heap_add(&exp_info);
++ if (IS_ERR(cma_heap->heap)) {
++ int ret = PTR_ERR(cma_heap->heap);
++
++ kfree(cma_heap);
++ return ret;
++ }
++
++ return 0;
++}
++
++static int add_default_cma_heap(void)
++{
++ struct cma *default_cma = dev_get_cma_area(NULL);
++ int ret = 0;
++
++ if (default_cma)
++ ret = __add_cma_heap(default_cma, NULL);
++
++ return ret;
++}
++module_init(add_default_cma_heap);
++MODULE_DESCRIPTION("DMA-BUF CMA Heap");
++MODULE_LICENSE("GPL v2");