Merge tag 'drm-misc-fixes-2021-07-22' of git://anongit.freedesktop.org/drm/drm-misc...
[linux-2.6-microblaze.git] / drivers / net / wwan / iosm / iosm_ipc_imem_ops.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-21 Intel Corporation.
4  */
5
6 #include <linux/delay.h>
7
8 #include "iosm_ipc_chnl_cfg.h"
9 #include "iosm_ipc_imem.h"
10 #include "iosm_ipc_imem_ops.h"
11 #include "iosm_ipc_port.h"
12 #include "iosm_ipc_task_queue.h"
13
14 /* Open a packet data online channel between the network layer and CP. */
15 int ipc_imem_sys_wwan_open(struct iosm_imem *ipc_imem, int if_id)
16 {
17         dev_dbg(ipc_imem->dev, "%s if id: %d",
18                 ipc_imem_phase_get_string(ipc_imem->phase), if_id);
19
20         /* The network interface is only supported in the runtime phase. */
21         if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN) {
22                 dev_err(ipc_imem->dev, "net:%d : refused phase %s", if_id,
23                         ipc_imem_phase_get_string(ipc_imem->phase));
24                 return -EIO;
25         }
26
27         return ipc_mux_open_session(ipc_imem->mux, if_id);
28 }
29
30 /* Release a net link to CP. */
31 void ipc_imem_sys_wwan_close(struct iosm_imem *ipc_imem, int if_id,
32                              int channel_id)
33 {
34         if (ipc_imem->mux && if_id >= IP_MUX_SESSION_START &&
35             if_id <= IP_MUX_SESSION_END)
36                 ipc_mux_close_session(ipc_imem->mux, if_id);
37 }
38
39 /* Tasklet call to do uplink transfer. */
40 static int ipc_imem_tq_cdev_write(struct iosm_imem *ipc_imem, int arg,
41                                   void *msg, size_t size)
42 {
43         ipc_imem->ev_cdev_write_pending = false;
44         ipc_imem_ul_send(ipc_imem);
45
46         return 0;
47 }
48
49 /* Through tasklet to do sio write. */
50 static int ipc_imem_call_cdev_write(struct iosm_imem *ipc_imem)
51 {
52         if (ipc_imem->ev_cdev_write_pending)
53                 return -1;
54
55         ipc_imem->ev_cdev_write_pending = true;
56
57         return ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_cdev_write, 0,
58                                         NULL, 0, false);
59 }
60
61 /* Function for transfer UL data */
62 int ipc_imem_sys_wwan_transmit(struct iosm_imem *ipc_imem,
63                                int if_id, int channel_id, struct sk_buff *skb)
64 {
65         int ret = -EINVAL;
66
67         if (!ipc_imem || channel_id < 0)
68                 goto out;
69
70         /* Is CP Running? */
71         if (ipc_imem->phase != IPC_P_RUN) {
72                 dev_dbg(ipc_imem->dev, "phase %s transmit",
73                         ipc_imem_phase_get_string(ipc_imem->phase));
74                 ret = -EIO;
75                 goto out;
76         }
77
78         /* Route the UL packet through IP MUX Layer */
79         ret = ipc_mux_ul_trigger_encode(ipc_imem->mux, if_id, skb);
80 out:
81         return ret;
82 }
83
84 /* Initialize wwan channel */
85 void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
86                                 enum ipc_mux_protocol mux_type)
87 {
88         struct ipc_chnl_cfg chnl_cfg = { 0 };
89
90         ipc_imem->cp_version = ipc_mmio_get_cp_version(ipc_imem->mmio);
91
92         /* If modem version is invalid (0xffffffff), do not initialize WWAN. */
93         if (ipc_imem->cp_version == -1) {
94                 dev_err(ipc_imem->dev, "invalid CP version");
95                 return;
96         }
97
98         ipc_chnl_cfg_get(&chnl_cfg, ipc_imem->nr_of_channels);
99         ipc_imem_channel_init(ipc_imem, IPC_CTYPE_WWAN, chnl_cfg,
100                               IRQ_MOD_OFF);
101
102         /* WWAN registration. */
103         ipc_imem->wwan = ipc_wwan_init(ipc_imem, ipc_imem->dev);
104         if (!ipc_imem->wwan)
105                 dev_err(ipc_imem->dev,
106                         "failed to register the ipc_wwan interfaces");
107 }
108
109 /* Map SKB to DMA for transfer */
110 static int ipc_imem_map_skb_to_dma(struct iosm_imem *ipc_imem,
111                                    struct sk_buff *skb)
112 {
113         struct iosm_pcie *ipc_pcie = ipc_imem->pcie;
114         char *buf = skb->data;
115         int len = skb->len;
116         dma_addr_t mapping;
117         int ret;
118
119         ret = ipc_pcie_addr_map(ipc_pcie, buf, len, &mapping, DMA_TO_DEVICE);
120
121         if (ret)
122                 goto err;
123
124         BUILD_BUG_ON(sizeof(*IPC_CB(skb)) > sizeof(skb->cb));
125
126         IPC_CB(skb)->mapping = mapping;
127         IPC_CB(skb)->direction = DMA_TO_DEVICE;
128         IPC_CB(skb)->len = len;
129         IPC_CB(skb)->op_type = (u8)UL_DEFAULT;
130
131 err:
132         return ret;
133 }
134
135 /* return true if channel is ready for use */
136 static bool ipc_imem_is_channel_active(struct iosm_imem *ipc_imem,
137                                        struct ipc_mem_channel *channel)
138 {
139         enum ipc_phase phase;
140
141         /* Update the current operation phase. */
142         phase = ipc_imem->phase;
143
144         /* Select the operation depending on the execution stage. */
145         switch (phase) {
146         case IPC_P_RUN:
147         case IPC_P_PSI:
148         case IPC_P_EBL:
149                 break;
150
151         case IPC_P_ROM:
152                 /* Prepare the PSI image for the CP ROM driver and
153                  * suspend the flash app.
154                  */
155                 if (channel->state != IMEM_CHANNEL_RESERVED) {
156                         dev_err(ipc_imem->dev,
157                                 "ch[%d]:invalid channel state %d,expected %d",
158                                 channel->channel_id, channel->state,
159                                 IMEM_CHANNEL_RESERVED);
160                         goto channel_unavailable;
161                 }
162                 goto channel_available;
163
164         default:
165                 /* Ignore uplink actions in all other phases. */
166                 dev_err(ipc_imem->dev, "ch[%d]: confused phase %d",
167                         channel->channel_id, phase);
168                 goto channel_unavailable;
169         }
170         /* Check the full availability of the channel. */
171         if (channel->state != IMEM_CHANNEL_ACTIVE) {
172                 dev_err(ipc_imem->dev, "ch[%d]: confused channel state %d",
173                         channel->channel_id, channel->state);
174                 goto channel_unavailable;
175         }
176
177 channel_available:
178         return true;
179
180 channel_unavailable:
181         return false;
182 }
183
184 /* Release a sio link to CP. */
185 void ipc_imem_sys_cdev_close(struct iosm_cdev *ipc_cdev)
186 {
187         struct iosm_imem *ipc_imem = ipc_cdev->ipc_imem;
188         struct ipc_mem_channel *channel = ipc_cdev->channel;
189         enum ipc_phase curr_phase;
190         int status = 0;
191         u32 tail = 0;
192
193         curr_phase = ipc_imem->phase;
194
195         /* If current phase is IPC_P_OFF or SIO ID is -ve then
196          * channel is already freed. Nothing to do.
197          */
198         if (curr_phase == IPC_P_OFF) {
199                 dev_err(ipc_imem->dev,
200                         "nothing to do. Current Phase: %s",
201                         ipc_imem_phase_get_string(curr_phase));
202                 return;
203         }
204
205         if (channel->state == IMEM_CHANNEL_FREE) {
206                 dev_err(ipc_imem->dev, "ch[%d]: invalid channel state %d",
207                         channel->channel_id, channel->state);
208                 return;
209         }
210
211         /* If there are any pending TDs then wait for Timeout/Completion before
212          * closing pipe.
213          */
214         if (channel->ul_pipe.old_tail != channel->ul_pipe.old_head) {
215                 ipc_imem->app_notify_ul_pend = 1;
216
217                 /* Suspend the user app and wait a certain time for processing
218                  * UL Data.
219                  */
220                 status = wait_for_completion_interruptible_timeout
221                          (&ipc_imem->ul_pend_sem,
222                           msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
223                 if (status == 0) {
224                         dev_dbg(ipc_imem->dev,
225                                 "Pend data Timeout UL-Pipe:%d Head:%d Tail:%d",
226                                 channel->ul_pipe.pipe_nr,
227                                 channel->ul_pipe.old_head,
228                                 channel->ul_pipe.old_tail);
229                 }
230
231                 ipc_imem->app_notify_ul_pend = 0;
232         }
233
234         /* If there are any pending TDs then wait for Timeout/Completion before
235          * closing pipe.
236          */
237         ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol,
238                                          &channel->dl_pipe, NULL, &tail);
239
240         if (tail != channel->dl_pipe.old_tail) {
241                 ipc_imem->app_notify_dl_pend = 1;
242
243                 /* Suspend the user app and wait a certain time for processing
244                  * DL Data.
245                  */
246                 status = wait_for_completion_interruptible_timeout
247                          (&ipc_imem->dl_pend_sem,
248                           msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
249                 if (status == 0) {
250                         dev_dbg(ipc_imem->dev,
251                                 "Pend data Timeout DL-Pipe:%d Head:%d Tail:%d",
252                                 channel->dl_pipe.pipe_nr,
253                                 channel->dl_pipe.old_head,
254                                 channel->dl_pipe.old_tail);
255                 }
256
257                 ipc_imem->app_notify_dl_pend = 0;
258         }
259
260         /* Due to wait for completion in messages, there is a small window
261          * between closing the pipe and updating the channel is closed. In this
262          * small window there could be HP update from Host Driver. Hence update
263          * the channel state as CLOSING to aviod unnecessary interrupt
264          * towards CP.
265          */
266         channel->state = IMEM_CHANNEL_CLOSING;
267
268         ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
269         ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
270
271         ipc_imem_channel_free(channel);
272 }
273
274 /* Open a PORT link to CP and return the channel */
275 struct ipc_mem_channel *ipc_imem_sys_port_open(struct iosm_imem *ipc_imem,
276                                                int chl_id, int hp_id)
277 {
278         struct ipc_mem_channel *channel;
279         int ch_id;
280
281         /* The PORT interface is only supported in the runtime phase. */
282         if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN) {
283                 dev_err(ipc_imem->dev, "PORT open refused, phase %s",
284                         ipc_imem_phase_get_string(ipc_imem->phase));
285                 return NULL;
286         }
287
288         ch_id = ipc_imem_channel_alloc(ipc_imem, chl_id, IPC_CTYPE_CTRL);
289
290         if (ch_id < 0) {
291                 dev_err(ipc_imem->dev, "reservation of an PORT chnl id failed");
292                 return NULL;
293         }
294
295         channel = ipc_imem_channel_open(ipc_imem, ch_id, hp_id);
296
297         if (!channel) {
298                 dev_err(ipc_imem->dev, "PORT channel id open failed");
299                 return NULL;
300         }
301
302         return channel;
303 }
304
305 /* transfer skb to modem */
306 int ipc_imem_sys_cdev_write(struct iosm_cdev *ipc_cdev, struct sk_buff *skb)
307 {
308         struct ipc_mem_channel *channel = ipc_cdev->channel;
309         struct iosm_imem *ipc_imem = ipc_cdev->ipc_imem;
310         int ret = -EIO;
311
312         if (!ipc_imem_is_channel_active(ipc_imem, channel) ||
313             ipc_imem->phase == IPC_P_OFF_REQ)
314                 goto out;
315
316         ret = ipc_imem_map_skb_to_dma(ipc_imem, skb);
317
318         if (ret)
319                 goto out;
320
321         /* Add skb to the uplink skbuf accumulator. */
322         skb_queue_tail(&channel->ul_list, skb);
323
324         ret = ipc_imem_call_cdev_write(ipc_imem);
325
326         if (ret) {
327                 skb_dequeue_tail(&channel->ul_list);
328                 dev_err(ipc_cdev->dev, "channel id[%d] write failed\n",
329                         ipc_cdev->channel->channel_id);
330         }
331 out:
332         return ret;
333 }