1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
*
* Author: Weijie Gao <weijie.gao@mediatek.com>
*/
#ifndef _MTK_SNAND_OS_H_
#define _MTK_SNAND_OS_H_
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/limits.h>
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/sizes.h>
#include <linux/iopoll.h>
#include <linux/hrtimer.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <asm/div64.h>
struct mtk_snand_plat_dev {
struct device *dev;
struct completion done;
};
/* Polling helpers */
#define read16_poll_timeout(addr, val, cond, sleep_us, timeout_us) \
readw_poll_timeout((addr), (val), (cond), (sleep_us), (timeout_us))
#define read32_poll_timeout(addr, val, cond, sleep_us, timeout_us) \
readl_poll_timeout((addr), (val), (cond), (sleep_us), (timeout_us))
/* Timer helpers */
#define mtk_snand_time_t ktime_t
static inline mtk_snand_time_t timer_get_ticks(void)
{
return ktime_get();
}
static inline mtk_snand_time_t timer_time_to_tick(uint32_t timeout_us)
{
return ktime_add_us(ktime_set(0, 0), timeout_us);
}
static inline bool timer_is_timeout(mtk_snand_time_t start_tick,
mtk_snand_time_t timeout_tick)
{
ktime_t tmo = ktime_add(start_tick, timeout_tick);
return ktime_compare(ktime_get(), tmo) > 0;
}
/* Memory helpers */
static inline void *generic_mem_alloc(struct mtk_snand_plat_dev *pdev,
size_t size)
{
return devm_kzalloc(pdev->dev, size, GFP_KERNEL);
}
static inline void generic_mem_free(struct mtk_snand_plat_dev *pdev, void *ptr)
{
devm_kfree(pdev->dev, ptr);
}
static inline void *dma_mem_alloc(struct mtk_snand_plat_dev *pdev, size_t size)
{
return kzalloc(size, GFP_KERNEL);
}
static inline void dma_mem_free(struct mtk_snand_plat_dev *pdev, void *ptr)
{
kfree(ptr);
}
static inline int dma_mem_map(struct mtk_snand_plat_dev *pdev, void *vaddr,
uintptr_t *dma_addr, size_t size, bool to_device)
{
dma_addr_t addr;
int ret;
addr = dma_map_single(pdev->dev, vaddr, size,
to_device ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
ret = dma_mapping_error(pdev->dev, addr);
if (ret)
return ret;
*dma_addr = (uintptr_t)addr;
return 0;
}
static inline void dma_mem_unmap(struct mtk_snand_plat_dev *pdev,
uintptr_t dma_addr, size_t size,
bool to_device)
{
dma_unmap_single(pdev->dev, dma_addr, size,
to_device ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
}
/* Interrupt helpers */
static inline void irq_completion_done(struct mtk_snand_plat_dev *pdev)
{
complete(&pdev->done);
}
static inline void irq_completion_init(struct mtk_snand_plat_dev *pdev)
{
init_completion(&pdev->done);
}
static inline int irq_completion_wait(struct mtk_snand_plat_dev *pdev,
void __iomem *reg, uint32_t bit,
uint32_t timeout_us)
{
int ret;
ret = wait_for_completion_timeout(&pdev->done,
usecs_to_jiffies(timeout_us));
if (!ret)
return -ETIMEDOUT;
return 0;
}
#endif /* _MTK_SNAND_OS_H_ */
|