Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec...
[linux-2.6-microblaze.git] / net / smc / smc_tx.c
1 /*
2  * Shared Memory Communications over RDMA (SMC-R) and RoCE
3  *
4  * Manage send buffer.
5  * Producer:
6  * Copy user space data into send buffer, if send buffer space available.
7  * Consumer:
8  * Trigger RDMA write into RMBE of peer and send CDC, if RMBE space available.
9  *
10  * Copyright IBM Corp. 2016
11  *
12  * Author(s):  Ursula Braun <ubraun@linux.vnet.ibm.com>
13  */
14
15 #include <linux/net.h>
16 #include <linux/rcupdate.h>
17 #include <linux/workqueue.h>
18 #include <linux/sched/signal.h>
19
20 #include <net/sock.h>
21
22 #include "smc.h"
23 #include "smc_wr.h"
24 #include "smc_cdc.h"
25 #include "smc_tx.h"
26
27 #define SMC_TX_WORK_DELAY       HZ
28
29 /***************************** sndbuf producer *******************************/
30
31 /* callback implementation for sk.sk_write_space()
32  * to wakeup sndbuf producers that blocked with smc_tx_wait_memory().
33  * called under sk_socket lock.
34  */
35 static void smc_tx_write_space(struct sock *sk)
36 {
37         struct socket *sock = sk->sk_socket;
38         struct smc_sock *smc = smc_sk(sk);
39         struct socket_wq *wq;
40
41         /* similar to sk_stream_write_space */
42         if (atomic_read(&smc->conn.sndbuf_space) && sock) {
43                 clear_bit(SOCK_NOSPACE, &sock->flags);
44                 rcu_read_lock();
45                 wq = rcu_dereference(sk->sk_wq);
46                 if (skwq_has_sleeper(wq))
47                         wake_up_interruptible_poll(&wq->wait,
48                                                    POLLOUT | POLLWRNORM |
49                                                    POLLWRBAND);
50                 if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
51                         sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT);
52                 rcu_read_unlock();
53         }
54 }
55
56 /* Wakeup sndbuf producers that blocked with smc_tx_wait_memory().
57  * Cf. tcp_data_snd_check()=>tcp_check_space()=>tcp_new_space().
58  */
59 void smc_tx_sndbuf_nonfull(struct smc_sock *smc)
60 {
61         if (smc->sk.sk_socket &&
62             test_bit(SOCK_NOSPACE, &smc->sk.sk_socket->flags))
63                 smc->sk.sk_write_space(&smc->sk);
64 }
65
66 /* blocks sndbuf producer until at least one byte of free space available */
67 static int smc_tx_wait_memory(struct smc_sock *smc, int flags)
68 {
69         DEFINE_WAIT_FUNC(wait, woken_wake_function);
70         struct smc_connection *conn = &smc->conn;
71         struct sock *sk = &smc->sk;
72         bool noblock;
73         long timeo;
74         int rc = 0;
75
76         /* similar to sk_stream_wait_memory */
77         timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
78         noblock = timeo ? false : true;
79         add_wait_queue(sk_sleep(sk), &wait);
80         while (1) {
81                 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
82                 if (sk->sk_err ||
83                     (sk->sk_shutdown & SEND_SHUTDOWN) ||
84                     conn->local_tx_ctrl.conn_state_flags.peer_done_writing) {
85                         rc = -EPIPE;
86                         break;
87                 }
88                 if (conn->local_rx_ctrl.conn_state_flags.peer_conn_abort) {
89                         rc = -ECONNRESET;
90                         break;
91                 }
92                 if (!timeo) {
93                         if (noblock)
94                                 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
95                         rc = -EAGAIN;
96                         break;
97                 }
98                 if (signal_pending(current)) {
99                         rc = sock_intr_errno(timeo);
100                         break;
101                 }
102                 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
103                 if (atomic_read(&conn->sndbuf_space))
104                         break; /* at least 1 byte of free space available */
105                 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
106                 sk->sk_write_pending++;
107                 sk_wait_event(sk, &timeo,
108                               sk->sk_err ||
109                               (sk->sk_shutdown & SEND_SHUTDOWN) ||
110                               smc_cdc_rxed_any_close_or_senddone(conn) ||
111                               atomic_read(&conn->sndbuf_space),
112                               &wait);
113                 sk->sk_write_pending--;
114         }
115         remove_wait_queue(sk_sleep(sk), &wait);
116         return rc;
117 }
118
119 /* sndbuf producer: main API called by socket layer.
120  * called under sock lock.
121  */
122 int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len)
123 {
124         size_t copylen, send_done = 0, send_remaining = len;
125         size_t chunk_len, chunk_off, chunk_len_sum;
126         struct smc_connection *conn = &smc->conn;
127         union smc_host_cursor prep;
128         struct sock *sk = &smc->sk;
129         char *sndbuf_base;
130         int tx_cnt_prep;
131         int writespace;
132         int rc, chunk;
133
134         /* This should be in poll */
135         sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
136
137         if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) {
138                 rc = -EPIPE;
139                 goto out_err;
140         }
141
142         while (msg_data_left(msg)) {
143                 if (sk->sk_state == SMC_INIT)
144                         return -ENOTCONN;
145                 if (smc->sk.sk_shutdown & SEND_SHUTDOWN ||
146                     (smc->sk.sk_err == ECONNABORTED) ||
147                     conn->local_tx_ctrl.conn_state_flags.peer_conn_abort)
148                         return -EPIPE;
149                 if (smc_cdc_rxed_any_close(conn))
150                         return send_done ?: -ECONNRESET;
151
152                 if (!atomic_read(&conn->sndbuf_space)) {
153                         rc = smc_tx_wait_memory(smc, msg->msg_flags);
154                         if (rc) {
155                                 if (send_done)
156                                         return send_done;
157                                 goto out_err;
158                         }
159                         continue;
160                 }
161
162                 /* initialize variables for 1st iteration of subsequent loop */
163                 /* could be just 1 byte, even after smc_tx_wait_memory above */
164                 writespace = atomic_read(&conn->sndbuf_space);
165                 /* not more than what user space asked for */
166                 copylen = min_t(size_t, send_remaining, writespace);
167                 /* determine start of sndbuf */
168                 sndbuf_base = conn->sndbuf_desc->cpu_addr;
169                 smc_curs_write(&prep,
170                                smc_curs_read(&conn->tx_curs_prep, conn),
171                                conn);
172                 tx_cnt_prep = prep.count;
173                 /* determine chunks where to write into sndbuf */
174                 /* either unwrapped case, or 1st chunk of wrapped case */
175                 chunk_len = min_t(size_t,
176                                   copylen, conn->sndbuf_size - tx_cnt_prep);
177                 chunk_len_sum = chunk_len;
178                 chunk_off = tx_cnt_prep;
179                 smc_sndbuf_sync_sg_for_cpu(conn);
180                 for (chunk = 0; chunk < 2; chunk++) {
181                         rc = memcpy_from_msg(sndbuf_base + chunk_off,
182                                              msg, chunk_len);
183                         if (rc) {
184                                 smc_sndbuf_sync_sg_for_device(conn);
185                                 if (send_done)
186                                         return send_done;
187                                 goto out_err;
188                         }
189                         send_done += chunk_len;
190                         send_remaining -= chunk_len;
191
192                         if (chunk_len_sum == copylen)
193                                 break; /* either on 1st or 2nd iteration */
194                         /* prepare next (== 2nd) iteration */
195                         chunk_len = copylen - chunk_len; /* remainder */
196                         chunk_len_sum += chunk_len;
197                         chunk_off = 0; /* modulo offset in send ring buffer */
198                 }
199                 smc_sndbuf_sync_sg_for_device(conn);
200                 /* update cursors */
201                 smc_curs_add(conn->sndbuf_size, &prep, copylen);
202                 smc_curs_write(&conn->tx_curs_prep,
203                                smc_curs_read(&prep, conn),
204                                conn);
205                 /* increased in send tasklet smc_cdc_tx_handler() */
206                 smp_mb__before_atomic();
207                 atomic_sub(copylen, &conn->sndbuf_space);
208                 /* guarantee 0 <= sndbuf_space <= sndbuf_size */
209                 smp_mb__after_atomic();
210                 /* since we just produced more new data into sndbuf,
211                  * trigger sndbuf consumer: RDMA write into peer RMBE and CDC
212                  */
213                 smc_tx_sndbuf_nonempty(conn);
214         } /* while (msg_data_left(msg)) */
215
216         return send_done;
217
218 out_err:
219         rc = sk_stream_error(sk, msg->msg_flags, rc);
220         /* make sure we wake any epoll edge trigger waiter */
221         if (unlikely(rc == -EAGAIN))
222                 sk->sk_write_space(sk);
223         return rc;
224 }
225
226 /***************************** sndbuf consumer *******************************/
227
228 /* sndbuf consumer: actual data transfer of one target chunk with RDMA write */
229 static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset,
230                              int num_sges, struct ib_sge sges[])
231 {
232         struct smc_link_group *lgr = conn->lgr;
233         struct ib_send_wr *failed_wr = NULL;
234         struct ib_rdma_wr rdma_wr;
235         struct smc_link *link;
236         int rc;
237
238         memset(&rdma_wr, 0, sizeof(rdma_wr));
239         link = &lgr->lnk[SMC_SINGLE_LINK];
240         rdma_wr.wr.wr_id = smc_wr_tx_get_next_wr_id(link);
241         rdma_wr.wr.sg_list = sges;
242         rdma_wr.wr.num_sge = num_sges;
243         rdma_wr.wr.opcode = IB_WR_RDMA_WRITE;
244         rdma_wr.remote_addr =
245                 lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].dma_addr +
246                 /* RMBE within RMB */
247                 ((conn->peer_conn_idx - 1) * conn->peer_rmbe_size) +
248                 /* offset within RMBE */
249                 peer_rmbe_offset;
250         rdma_wr.rkey = lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey;
251         rc = ib_post_send(link->roce_qp, &rdma_wr.wr, &failed_wr);
252         if (rc)
253                 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
254         return rc;
255 }
256
257 /* sndbuf consumer */
258 static inline void smc_tx_advance_cursors(struct smc_connection *conn,
259                                           union smc_host_cursor *prod,
260                                           union smc_host_cursor *sent,
261                                           size_t len)
262 {
263         smc_curs_add(conn->peer_rmbe_size, prod, len);
264         /* increased in recv tasklet smc_cdc_msg_rcv() */
265         smp_mb__before_atomic();
266         /* data in flight reduces usable snd_wnd */
267         atomic_sub(len, &conn->peer_rmbe_space);
268         /* guarantee 0 <= peer_rmbe_space <= peer_rmbe_size */
269         smp_mb__after_atomic();
270         smc_curs_add(conn->sndbuf_size, sent, len);
271 }
272
273 /* sndbuf consumer: prepare all necessary (src&dst) chunks of data transmit;
274  * usable snd_wnd as max transmit
275  */
276 static int smc_tx_rdma_writes(struct smc_connection *conn)
277 {
278         size_t src_off, src_len, dst_off, dst_len; /* current chunk values */
279         size_t len, dst_len_sum, src_len_sum, dstchunk, srcchunk;
280         union smc_host_cursor sent, prep, prod, cons;
281         struct ib_sge sges[SMC_IB_MAX_SEND_SGE];
282         struct smc_link_group *lgr = conn->lgr;
283         int to_send, rmbespace;
284         struct smc_link *link;
285         dma_addr_t dma_addr;
286         int num_sges;
287         int rc;
288
289         /* source: sndbuf */
290         smc_curs_write(&sent, smc_curs_read(&conn->tx_curs_sent, conn), conn);
291         smc_curs_write(&prep, smc_curs_read(&conn->tx_curs_prep, conn), conn);
292         /* cf. wmem_alloc - (snd_max - snd_una) */
293         to_send = smc_curs_diff(conn->sndbuf_size, &sent, &prep);
294         if (to_send <= 0)
295                 return 0;
296
297         /* destination: RMBE */
298         /* cf. snd_wnd */
299         rmbespace = atomic_read(&conn->peer_rmbe_space);
300         if (rmbespace <= 0)
301                 return 0;
302         smc_curs_write(&prod,
303                        smc_curs_read(&conn->local_tx_ctrl.prod, conn),
304                        conn);
305         smc_curs_write(&cons,
306                        smc_curs_read(&conn->local_rx_ctrl.cons, conn),
307                        conn);
308
309         /* if usable snd_wnd closes ask peer to advertise once it opens again */
310         conn->local_tx_ctrl.prod_flags.write_blocked = (to_send >= rmbespace);
311         /* cf. usable snd_wnd */
312         len = min(to_send, rmbespace);
313
314         /* initialize variables for first iteration of subsequent nested loop */
315         link = &lgr->lnk[SMC_SINGLE_LINK];
316         dst_off = prod.count;
317         if (prod.wrap == cons.wrap) {
318                 /* the filled destination area is unwrapped,
319                  * hence the available free destination space is wrapped
320                  * and we need 2 destination chunks of sum len; start with 1st
321                  * which is limited by what's available in sndbuf
322                  */
323                 dst_len = min_t(size_t,
324                                 conn->peer_rmbe_size - prod.count, len);
325         } else {
326                 /* the filled destination area is wrapped,
327                  * hence the available free destination space is unwrapped
328                  * and we need a single destination chunk of entire len
329                  */
330                 dst_len = len;
331         }
332         dst_len_sum = dst_len;
333         src_off = sent.count;
334         /* dst_len determines the maximum src_len */
335         if (sent.count + dst_len <= conn->sndbuf_size) {
336                 /* unwrapped src case: single chunk of entire dst_len */
337                 src_len = dst_len;
338         } else {
339                 /* wrapped src case: 2 chunks of sum dst_len; start with 1st: */
340                 src_len = conn->sndbuf_size - sent.count;
341         }
342         src_len_sum = src_len;
343         dma_addr = sg_dma_address(conn->sndbuf_desc->sgt[SMC_SINGLE_LINK].sgl);
344         for (dstchunk = 0; dstchunk < 2; dstchunk++) {
345                 num_sges = 0;
346                 for (srcchunk = 0; srcchunk < 2; srcchunk++) {
347                         sges[srcchunk].addr = dma_addr + src_off;
348                         sges[srcchunk].length = src_len;
349                         sges[srcchunk].lkey = link->roce_pd->local_dma_lkey;
350                         num_sges++;
351                         src_off += src_len;
352                         if (src_off >= conn->sndbuf_size)
353                                 src_off -= conn->sndbuf_size;
354                                                 /* modulo in send ring */
355                         if (src_len_sum == dst_len)
356                                 break; /* either on 1st or 2nd iteration */
357                         /* prepare next (== 2nd) iteration */
358                         src_len = dst_len - src_len; /* remainder */
359                         src_len_sum += src_len;
360                 }
361                 rc = smc_tx_rdma_write(conn, dst_off, num_sges, sges);
362                 if (rc)
363                         return rc;
364                 if (dst_len_sum == len)
365                         break; /* either on 1st or 2nd iteration */
366                 /* prepare next (== 2nd) iteration */
367                 dst_off = 0; /* modulo offset in RMBE ring buffer */
368                 dst_len = len - dst_len; /* remainder */
369                 dst_len_sum += dst_len;
370                 src_len = min_t(int,
371                                 dst_len, conn->sndbuf_size - sent.count);
372                 src_len_sum = src_len;
373         }
374
375         smc_tx_advance_cursors(conn, &prod, &sent, len);
376         /* update connection's cursors with advanced local cursors */
377         smc_curs_write(&conn->local_tx_ctrl.prod,
378                        smc_curs_read(&prod, conn),
379                        conn);
380                                                         /* dst: peer RMBE */
381         smc_curs_write(&conn->tx_curs_sent,
382                        smc_curs_read(&sent, conn),
383                        conn);
384                                                         /* src: local sndbuf */
385
386         return 0;
387 }
388
389 /* Wakeup sndbuf consumers from any context (IRQ or process)
390  * since there is more data to transmit; usable snd_wnd as max transmit
391  */
392 int smc_tx_sndbuf_nonempty(struct smc_connection *conn)
393 {
394         struct smc_cdc_tx_pend *pend;
395         struct smc_wr_buf *wr_buf;
396         int rc;
397
398         spin_lock_bh(&conn->send_lock);
399         rc = smc_cdc_get_free_slot(conn, &wr_buf, &pend);
400         if (rc < 0) {
401                 if (rc == -EBUSY) {
402                         struct smc_sock *smc =
403                                 container_of(conn, struct smc_sock, conn);
404
405                         if (smc->sk.sk_err == ECONNABORTED) {
406                                 rc = sock_error(&smc->sk);
407                                 goto out_unlock;
408                         }
409                         rc = 0;
410                         schedule_delayed_work(&conn->tx_work,
411                                               SMC_TX_WORK_DELAY);
412                 }
413                 goto out_unlock;
414         }
415
416         rc = smc_tx_rdma_writes(conn);
417         if (rc) {
418                 smc_wr_tx_put_slot(&conn->lgr->lnk[SMC_SINGLE_LINK],
419                                    (struct smc_wr_tx_pend_priv *)pend);
420                 goto out_unlock;
421         }
422
423         rc = smc_cdc_msg_send(conn, wr_buf, pend);
424
425 out_unlock:
426         spin_unlock_bh(&conn->send_lock);
427         return rc;
428 }
429
430 /* Wakeup sndbuf consumers from process context
431  * since there is more data to transmit
432  */
433 static void smc_tx_work(struct work_struct *work)
434 {
435         struct smc_connection *conn = container_of(to_delayed_work(work),
436                                                    struct smc_connection,
437                                                    tx_work);
438         struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
439         int rc;
440
441         lock_sock(&smc->sk);
442         rc = smc_tx_sndbuf_nonempty(conn);
443         if (!rc && conn->local_rx_ctrl.prod_flags.write_blocked &&
444             !atomic_read(&conn->bytes_to_rcv))
445                 conn->local_rx_ctrl.prod_flags.write_blocked = 0;
446         release_sock(&smc->sk);
447 }
448
449 void smc_tx_consumer_update(struct smc_connection *conn)
450 {
451         union smc_host_cursor cfed, cons;
452         struct smc_cdc_tx_pend *pend;
453         struct smc_wr_buf *wr_buf;
454         int to_confirm, rc;
455
456         smc_curs_write(&cons,
457                        smc_curs_read(&conn->local_tx_ctrl.cons, conn),
458                        conn);
459         smc_curs_write(&cfed,
460                        smc_curs_read(&conn->rx_curs_confirmed, conn),
461                        conn);
462         to_confirm = smc_curs_diff(conn->rmbe_size, &cfed, &cons);
463
464         if (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req ||
465             ((to_confirm > conn->rmbe_update_limit) &&
466              ((to_confirm > (conn->rmbe_size / 2)) ||
467               conn->local_rx_ctrl.prod_flags.write_blocked))) {
468                 rc = smc_cdc_get_free_slot(conn, &wr_buf, &pend);
469                 if (!rc)
470                         rc = smc_cdc_msg_send(conn, wr_buf, pend);
471                 if (rc < 0) {
472                         schedule_delayed_work(&conn->tx_work,
473                                               SMC_TX_WORK_DELAY);
474                         return;
475                 }
476                 smc_curs_write(&conn->rx_curs_confirmed,
477                                smc_curs_read(&conn->local_tx_ctrl.cons, conn),
478                                conn);
479                 conn->local_rx_ctrl.prod_flags.cons_curs_upd_req = 0;
480         }
481         if (conn->local_rx_ctrl.prod_flags.write_blocked &&
482             !atomic_read(&conn->bytes_to_rcv))
483                 conn->local_rx_ctrl.prod_flags.write_blocked = 0;
484 }
485
486 /***************************** send initialize *******************************/
487
488 /* Initialize send properties on connection establishment. NB: not __init! */
489 void smc_tx_init(struct smc_sock *smc)
490 {
491         smc->sk.sk_write_space = smc_tx_write_space;
492         INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work);
493         spin_lock_init(&smc->conn.send_lock);
494 }