drm/msm/dpu: merge base_off with blk_off in struct dpu_hw_blk_reg_map
[linux-2.6-microblaze.git] / drivers / gpu / drm / msm / disp / dpu1 / dpu_hw_vbif.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
3  */
4
5 #include "dpu_hwio.h"
6 #include "dpu_hw_catalog.h"
7 #include "dpu_hw_vbif.h"
8
9 #define VBIF_VERSION                    0x0000
10 #define VBIF_CLK_FORCE_CTRL0            0x0008
11 #define VBIF_CLK_FORCE_CTRL1            0x000C
12 #define VBIF_QOS_REMAP_00               0x0020
13 #define VBIF_QOS_REMAP_01               0x0024
14 #define VBIF_QOS_REMAP_10               0x0028
15 #define VBIF_QOS_REMAP_11               0x002C
16 #define VBIF_WRITE_GATHER_EN            0x00AC
17 #define VBIF_IN_RD_LIM_CONF0            0x00B0
18 #define VBIF_IN_RD_LIM_CONF1            0x00B4
19 #define VBIF_IN_RD_LIM_CONF2            0x00B8
20 #define VBIF_IN_WR_LIM_CONF0            0x00C0
21 #define VBIF_IN_WR_LIM_CONF1            0x00C4
22 #define VBIF_IN_WR_LIM_CONF2            0x00C8
23 #define VBIF_OUT_RD_LIM_CONF0           0x00D0
24 #define VBIF_OUT_WR_LIM_CONF0           0x00D4
25 #define VBIF_OUT_AXI_AMEMTYPE_CONF0     0x0160
26 #define VBIF_OUT_AXI_AMEMTYPE_CONF1     0x0164
27 #define VBIF_XIN_PND_ERR                0x0190
28 #define VBIF_XIN_SRC_ERR                0x0194
29 #define VBIF_XIN_CLR_ERR                0x019C
30 #define VBIF_XIN_HALT_CTRL0             0x0200
31 #define VBIF_XIN_HALT_CTRL1             0x0204
32 #define VBIF_XINL_QOS_RP_REMAP_000      0x0550
33 #define VBIF_XINL_QOS_LVL_REMAP_000(vbif)       (VBIF_XINL_QOS_RP_REMAP_000 + (vbif)->cap->qos_rp_remap_size)
34
35 static void dpu_hw_clear_errors(struct dpu_hw_vbif *vbif,
36                 u32 *pnd_errors, u32 *src_errors)
37 {
38         struct dpu_hw_blk_reg_map *c;
39         u32 pnd, src;
40
41         if (!vbif)
42                 return;
43         c = &vbif->hw;
44         pnd = DPU_REG_READ(c, VBIF_XIN_PND_ERR);
45         src = DPU_REG_READ(c, VBIF_XIN_SRC_ERR);
46
47         if (pnd_errors)
48                 *pnd_errors = pnd;
49         if (src_errors)
50                 *src_errors = src;
51
52         DPU_REG_WRITE(c, VBIF_XIN_CLR_ERR, pnd | src);
53 }
54
55 static void dpu_hw_set_mem_type(struct dpu_hw_vbif *vbif,
56                 u32 xin_id, u32 value)
57 {
58         struct dpu_hw_blk_reg_map *c;
59         u32 reg_off;
60         u32 bit_off;
61         u32 reg_val;
62
63         /*
64          * Assume 4 bits per bit field, 8 fields per 32-bit register so
65          * 16 bit fields maximum across two registers
66          */
67         if (!vbif || xin_id >= MAX_XIN_COUNT || xin_id >= 16)
68                 return;
69
70         c = &vbif->hw;
71
72         if (xin_id >= 8) {
73                 xin_id -= 8;
74                 reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF1;
75         } else {
76                 reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF0;
77         }
78         bit_off = (xin_id & 0x7) * 4;
79         reg_val = DPU_REG_READ(c, reg_off);
80         reg_val &= ~(0x7 << bit_off);
81         reg_val |= (value & 0x7) << bit_off;
82         DPU_REG_WRITE(c, reg_off, reg_val);
83 }
84
85 static void dpu_hw_set_limit_conf(struct dpu_hw_vbif *vbif,
86                 u32 xin_id, bool rd, u32 limit)
87 {
88         struct dpu_hw_blk_reg_map *c = &vbif->hw;
89         u32 reg_val;
90         u32 reg_off;
91         u32 bit_off;
92
93         if (rd)
94                 reg_off = VBIF_IN_RD_LIM_CONF0;
95         else
96                 reg_off = VBIF_IN_WR_LIM_CONF0;
97
98         reg_off += (xin_id / 4) * 4;
99         bit_off = (xin_id % 4) * 8;
100         reg_val = DPU_REG_READ(c, reg_off);
101         reg_val &= ~(0xFF << bit_off);
102         reg_val |= (limit) << bit_off;
103         DPU_REG_WRITE(c, reg_off, reg_val);
104 }
105
106 static u32 dpu_hw_get_limit_conf(struct dpu_hw_vbif *vbif,
107                 u32 xin_id, bool rd)
108 {
109         struct dpu_hw_blk_reg_map *c = &vbif->hw;
110         u32 reg_val;
111         u32 reg_off;
112         u32 bit_off;
113         u32 limit;
114
115         if (rd)
116                 reg_off = VBIF_IN_RD_LIM_CONF0;
117         else
118                 reg_off = VBIF_IN_WR_LIM_CONF0;
119
120         reg_off += (xin_id / 4) * 4;
121         bit_off = (xin_id % 4) * 8;
122         reg_val = DPU_REG_READ(c, reg_off);
123         limit = (reg_val >> bit_off) & 0xFF;
124
125         return limit;
126 }
127
128 static void dpu_hw_set_halt_ctrl(struct dpu_hw_vbif *vbif,
129                 u32 xin_id, bool enable)
130 {
131         struct dpu_hw_blk_reg_map *c = &vbif->hw;
132         u32 reg_val;
133
134         reg_val = DPU_REG_READ(c, VBIF_XIN_HALT_CTRL0);
135
136         if (enable)
137                 reg_val |= BIT(xin_id);
138         else
139                 reg_val &= ~BIT(xin_id);
140
141         DPU_REG_WRITE(c, VBIF_XIN_HALT_CTRL0, reg_val);
142 }
143
144 static bool dpu_hw_get_halt_ctrl(struct dpu_hw_vbif *vbif,
145                 u32 xin_id)
146 {
147         struct dpu_hw_blk_reg_map *c = &vbif->hw;
148         u32 reg_val;
149
150         reg_val = DPU_REG_READ(c, VBIF_XIN_HALT_CTRL1);
151
152         return (reg_val & BIT(xin_id)) ? true : false;
153 }
154
155 static void dpu_hw_set_qos_remap(struct dpu_hw_vbif *vbif,
156                 u32 xin_id, u32 level, u32 remap_level)
157 {
158         struct dpu_hw_blk_reg_map *c;
159         u32 reg_lvl, reg_val, reg_val_lvl, mask, reg_high, reg_shift;
160
161         if (!vbif)
162                 return;
163
164         c = &vbif->hw;
165
166         reg_lvl = VBIF_XINL_QOS_LVL_REMAP_000(vbif);
167         reg_high = ((xin_id & 0x8) >> 3) * 4 + (level * 8);
168         reg_shift = (xin_id & 0x7) * 4;
169
170         reg_val = DPU_REG_READ(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high);
171         reg_val_lvl = DPU_REG_READ(c, reg_lvl + reg_high);
172
173         mask = 0x7 << reg_shift;
174
175         reg_val &= ~mask;
176         reg_val |= (remap_level << reg_shift) & mask;
177
178         reg_val_lvl &= ~mask;
179         reg_val_lvl |= (remap_level << reg_shift) & mask;
180
181         DPU_REG_WRITE(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high, reg_val);
182         DPU_REG_WRITE(c, reg_lvl + reg_high, reg_val_lvl);
183 }
184
185 static void dpu_hw_set_write_gather_en(struct dpu_hw_vbif *vbif, u32 xin_id)
186 {
187         struct dpu_hw_blk_reg_map *c;
188         u32 reg_val;
189
190         if (!vbif || xin_id >= MAX_XIN_COUNT)
191                 return;
192
193         c = &vbif->hw;
194
195         reg_val = DPU_REG_READ(c, VBIF_WRITE_GATHER_EN);
196         reg_val |= BIT(xin_id);
197         DPU_REG_WRITE(c, VBIF_WRITE_GATHER_EN, reg_val);
198 }
199
200 static void _setup_vbif_ops(struct dpu_hw_vbif_ops *ops,
201                 unsigned long cap)
202 {
203         ops->set_limit_conf = dpu_hw_set_limit_conf;
204         ops->get_limit_conf = dpu_hw_get_limit_conf;
205         ops->set_halt_ctrl = dpu_hw_set_halt_ctrl;
206         ops->get_halt_ctrl = dpu_hw_get_halt_ctrl;
207         if (test_bit(DPU_VBIF_QOS_REMAP, &cap))
208                 ops->set_qos_remap = dpu_hw_set_qos_remap;
209         ops->set_mem_type = dpu_hw_set_mem_type;
210         ops->clear_errors = dpu_hw_clear_errors;
211         ops->set_write_gather_en = dpu_hw_set_write_gather_en;
212 }
213
214 static const struct dpu_vbif_cfg *_top_offset(enum dpu_vbif vbif,
215                 const struct dpu_mdss_cfg *m,
216                 void __iomem *addr,
217                 struct dpu_hw_blk_reg_map *b)
218 {
219         int i;
220
221         for (i = 0; i < m->vbif_count; i++) {
222                 if (vbif == m->vbif[i].id) {
223                         b->blk_addr = addr + m->vbif[i].base;
224                         b->log_mask = DPU_DBG_MASK_VBIF;
225                         return &m->vbif[i];
226                 }
227         }
228
229         return ERR_PTR(-EINVAL);
230 }
231
232 struct dpu_hw_vbif *dpu_hw_vbif_init(enum dpu_vbif idx,
233                 void __iomem *addr,
234                 const struct dpu_mdss_cfg *m)
235 {
236         struct dpu_hw_vbif *c;
237         const struct dpu_vbif_cfg *cfg;
238
239         c = kzalloc(sizeof(*c), GFP_KERNEL);
240         if (!c)
241                 return ERR_PTR(-ENOMEM);
242
243         cfg = _top_offset(idx, m, addr, &c->hw);
244         if (IS_ERR_OR_NULL(cfg)) {
245                 kfree(c);
246                 return ERR_PTR(-EINVAL);
247         }
248
249         /*
250          * Assign ops
251          */
252         c->idx = idx;
253         c->cap = cfg;
254         _setup_vbif_ops(&c->ops, c->cap->features);
255
256         /* no need to register sub-range in dpu dbg, dump entire vbif io base */
257
258         return c;
259 }
260
261 void dpu_hw_vbif_destroy(struct dpu_hw_vbif *vbif)
262 {
263         kfree(vbif);
264 }