rxrpc: Fix the excessive initial retransmission timeout
[linux-2.6-microblaze.git] / net / rxrpc / call_accept.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* incoming call handling
3  *
4  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/module.h>
11 #include <linux/net.h>
12 #include <linux/skbuff.h>
13 #include <linux/errqueue.h>
14 #include <linux/udp.h>
15 #include <linux/in.h>
16 #include <linux/in6.h>
17 #include <linux/icmp.h>
18 #include <linux/gfp.h>
19 #include <linux/circ_buf.h>
20 #include <net/sock.h>
21 #include <net/af_rxrpc.h>
22 #include <net/ip.h>
23 #include "ar-internal.h"
24
25 /*
26  * Preallocate a single service call, connection and peer and, if possible,
27  * give them a user ID and attach the user's side of the ID to them.
28  */
29 static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
30                                       struct rxrpc_backlog *b,
31                                       rxrpc_notify_rx_t notify_rx,
32                                       rxrpc_user_attach_call_t user_attach_call,
33                                       unsigned long user_call_ID, gfp_t gfp,
34                                       unsigned int debug_id)
35 {
36         const void *here = __builtin_return_address(0);
37         struct rxrpc_call *call;
38         struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
39         int max, tmp;
40         unsigned int size = RXRPC_BACKLOG_MAX;
41         unsigned int head, tail, call_head, call_tail;
42
43         max = rx->sk.sk_max_ack_backlog;
44         tmp = rx->sk.sk_ack_backlog;
45         if (tmp >= max) {
46                 _leave(" = -ENOBUFS [full %u]", max);
47                 return -ENOBUFS;
48         }
49         max -= tmp;
50
51         /* We don't need more conns and peers than we have calls, but on the
52          * other hand, we shouldn't ever use more peers than conns or conns
53          * than calls.
54          */
55         call_head = b->call_backlog_head;
56         call_tail = READ_ONCE(b->call_backlog_tail);
57         tmp = CIRC_CNT(call_head, call_tail, size);
58         if (tmp >= max) {
59                 _leave(" = -ENOBUFS [enough %u]", tmp);
60                 return -ENOBUFS;
61         }
62         max = tmp + 1;
63
64         head = b->peer_backlog_head;
65         tail = READ_ONCE(b->peer_backlog_tail);
66         if (CIRC_CNT(head, tail, size) < max) {
67                 struct rxrpc_peer *peer = rxrpc_alloc_peer(rx->local, gfp);
68                 if (!peer)
69                         return -ENOMEM;
70                 b->peer_backlog[head] = peer;
71                 smp_store_release(&b->peer_backlog_head,
72                                   (head + 1) & (size - 1));
73         }
74
75         head = b->conn_backlog_head;
76         tail = READ_ONCE(b->conn_backlog_tail);
77         if (CIRC_CNT(head, tail, size) < max) {
78                 struct rxrpc_connection *conn;
79
80                 conn = rxrpc_prealloc_service_connection(rxnet, gfp);
81                 if (!conn)
82                         return -ENOMEM;
83                 b->conn_backlog[head] = conn;
84                 smp_store_release(&b->conn_backlog_head,
85                                   (head + 1) & (size - 1));
86
87                 trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_service,
88                                  atomic_read(&conn->usage), here);
89         }
90
91         /* Now it gets complicated, because calls get registered with the
92          * socket here, particularly if a user ID is preassigned by the user.
93          */
94         call = rxrpc_alloc_call(rx, gfp, debug_id);
95         if (!call)
96                 return -ENOMEM;
97         call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
98         call->state = RXRPC_CALL_SERVER_PREALLOC;
99
100         trace_rxrpc_call(call->debug_id, rxrpc_call_new_service,
101                          atomic_read(&call->usage),
102                          here, (const void *)user_call_ID);
103
104         write_lock(&rx->call_lock);
105         if (user_attach_call) {
106                 struct rxrpc_call *xcall;
107                 struct rb_node *parent, **pp;
108
109                 /* Check the user ID isn't already in use */
110                 pp = &rx->calls.rb_node;
111                 parent = NULL;
112                 while (*pp) {
113                         parent = *pp;
114                         xcall = rb_entry(parent, struct rxrpc_call, sock_node);
115                         if (user_call_ID < xcall->user_call_ID)
116                                 pp = &(*pp)->rb_left;
117                         else if (user_call_ID > xcall->user_call_ID)
118                                 pp = &(*pp)->rb_right;
119                         else
120                                 goto id_in_use;
121                 }
122
123                 call->user_call_ID = user_call_ID;
124                 call->notify_rx = notify_rx;
125                 rxrpc_get_call(call, rxrpc_call_got_kernel);
126                 user_attach_call(call, user_call_ID);
127                 rxrpc_get_call(call, rxrpc_call_got_userid);
128                 rb_link_node(&call->sock_node, parent, pp);
129                 rb_insert_color(&call->sock_node, &rx->calls);
130                 set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
131         }
132
133         list_add(&call->sock_link, &rx->sock_calls);
134
135         write_unlock(&rx->call_lock);
136
137         rxnet = call->rxnet;
138         write_lock(&rxnet->call_lock);
139         list_add_tail(&call->link, &rxnet->calls);
140         write_unlock(&rxnet->call_lock);
141
142         b->call_backlog[call_head] = call;
143         smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1));
144         _leave(" = 0 [%d -> %lx]", call->debug_id, user_call_ID);
145         return 0;
146
147 id_in_use:
148         write_unlock(&rx->call_lock);
149         rxrpc_cleanup_call(call);
150         _leave(" = -EBADSLT");
151         return -EBADSLT;
152 }
153
154 /*
155  * Preallocate sufficient service connections, calls and peers to cover the
156  * entire backlog of a socket.  When a new call comes in, if we don't have
157  * sufficient of each available, the call gets rejected as busy or ignored.
158  *
159  * The backlog is replenished when a connection is accepted or rejected.
160  */
161 int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp)
162 {
163         struct rxrpc_backlog *b = rx->backlog;
164
165         if (!b) {
166                 b = kzalloc(sizeof(struct rxrpc_backlog), gfp);
167                 if (!b)
168                         return -ENOMEM;
169                 rx->backlog = b;
170         }
171
172         if (rx->discard_new_call)
173                 return 0;
174
175         while (rxrpc_service_prealloc_one(rx, b, NULL, NULL, 0, gfp,
176                                           atomic_inc_return(&rxrpc_debug_id)) == 0)
177                 ;
178
179         return 0;
180 }
181
182 /*
183  * Discard the preallocation on a service.
184  */
185 void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
186 {
187         struct rxrpc_backlog *b = rx->backlog;
188         struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
189         unsigned int size = RXRPC_BACKLOG_MAX, head, tail;
190
191         if (!b)
192                 return;
193         rx->backlog = NULL;
194
195         /* Make sure that there aren't any incoming calls in progress before we
196          * clear the preallocation buffers.
197          */
198         spin_lock_bh(&rx->incoming_lock);
199         spin_unlock_bh(&rx->incoming_lock);
200
201         head = b->peer_backlog_head;
202         tail = b->peer_backlog_tail;
203         while (CIRC_CNT(head, tail, size) > 0) {
204                 struct rxrpc_peer *peer = b->peer_backlog[tail];
205                 kfree(peer);
206                 tail = (tail + 1) & (size - 1);
207         }
208
209         head = b->conn_backlog_head;
210         tail = b->conn_backlog_tail;
211         while (CIRC_CNT(head, tail, size) > 0) {
212                 struct rxrpc_connection *conn = b->conn_backlog[tail];
213                 write_lock(&rxnet->conn_lock);
214                 list_del(&conn->link);
215                 list_del(&conn->proc_link);
216                 write_unlock(&rxnet->conn_lock);
217                 kfree(conn);
218                 if (atomic_dec_and_test(&rxnet->nr_conns))
219                         wake_up_var(&rxnet->nr_conns);
220                 tail = (tail + 1) & (size - 1);
221         }
222
223         head = b->call_backlog_head;
224         tail = b->call_backlog_tail;
225         while (CIRC_CNT(head, tail, size) > 0) {
226                 struct rxrpc_call *call = b->call_backlog[tail];
227                 rcu_assign_pointer(call->socket, rx);
228                 if (rx->discard_new_call) {
229                         _debug("discard %lx", call->user_call_ID);
230                         rx->discard_new_call(call, call->user_call_ID);
231                         rxrpc_put_call(call, rxrpc_call_put_kernel);
232                 }
233                 rxrpc_call_completed(call);
234                 rxrpc_release_call(rx, call);
235                 rxrpc_put_call(call, rxrpc_call_put);
236                 tail = (tail + 1) & (size - 1);
237         }
238
239         kfree(b);
240 }
241
242 /*
243  * Ping the other end to fill our RTT cache and to retrieve the rwind
244  * and MTU parameters.
245  */
246 static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb)
247 {
248         struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
249         ktime_t now = skb->tstamp;
250
251         if (call->peer->rtt_count < 3 ||
252             ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now))
253                 rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial,
254                                   true, true,
255                                   rxrpc_propose_ack_ping_for_params);
256 }
257
258 /*
259  * Allocate a new incoming call from the prealloc pool, along with a connection
260  * and a peer as necessary.
261  */
262 static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
263                                                     struct rxrpc_local *local,
264                                                     struct rxrpc_peer *peer,
265                                                     struct rxrpc_connection *conn,
266                                                     const struct rxrpc_security *sec,
267                                                     struct key *key,
268                                                     struct sk_buff *skb)
269 {
270         struct rxrpc_backlog *b = rx->backlog;
271         struct rxrpc_call *call;
272         unsigned short call_head, conn_head, peer_head;
273         unsigned short call_tail, conn_tail, peer_tail;
274         unsigned short call_count, conn_count;
275
276         /* #calls >= #conns >= #peers must hold true. */
277         call_head = smp_load_acquire(&b->call_backlog_head);
278         call_tail = b->call_backlog_tail;
279         call_count = CIRC_CNT(call_head, call_tail, RXRPC_BACKLOG_MAX);
280         conn_head = smp_load_acquire(&b->conn_backlog_head);
281         conn_tail = b->conn_backlog_tail;
282         conn_count = CIRC_CNT(conn_head, conn_tail, RXRPC_BACKLOG_MAX);
283         ASSERTCMP(conn_count, >=, call_count);
284         peer_head = smp_load_acquire(&b->peer_backlog_head);
285         peer_tail = b->peer_backlog_tail;
286         ASSERTCMP(CIRC_CNT(peer_head, peer_tail, RXRPC_BACKLOG_MAX), >=,
287                   conn_count);
288
289         if (call_count == 0)
290                 return NULL;
291
292         if (!conn) {
293                 if (peer && !rxrpc_get_peer_maybe(peer))
294                         peer = NULL;
295                 if (!peer) {
296                         peer = b->peer_backlog[peer_tail];
297                         if (rxrpc_extract_addr_from_skb(&peer->srx, skb) < 0)
298                                 return NULL;
299                         b->peer_backlog[peer_tail] = NULL;
300                         smp_store_release(&b->peer_backlog_tail,
301                                           (peer_tail + 1) &
302                                           (RXRPC_BACKLOG_MAX - 1));
303
304                         rxrpc_new_incoming_peer(rx, local, peer);
305                 }
306
307                 /* Now allocate and set up the connection */
308                 conn = b->conn_backlog[conn_tail];
309                 b->conn_backlog[conn_tail] = NULL;
310                 smp_store_release(&b->conn_backlog_tail,
311                                   (conn_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
312                 conn->params.local = rxrpc_get_local(local);
313                 conn->params.peer = peer;
314                 rxrpc_see_connection(conn);
315                 rxrpc_new_incoming_connection(rx, conn, sec, key, skb);
316         } else {
317                 rxrpc_get_connection(conn);
318         }
319
320         /* And now we can allocate and set up a new call */
321         call = b->call_backlog[call_tail];
322         b->call_backlog[call_tail] = NULL;
323         smp_store_release(&b->call_backlog_tail,
324                           (call_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
325
326         rxrpc_see_call(call);
327         call->conn = conn;
328         call->security = conn->security;
329         call->peer = rxrpc_get_peer(conn->params.peer);
330         call->cong_cwnd = call->peer->cong_cwnd;
331         return call;
332 }
333
334 /*
335  * Set up a new incoming call.  Called in BH context with the RCU read lock
336  * held.
337  *
338  * If this is for a kernel service, when we allocate the call, it will have
339  * three refs on it: (1) the kernel service, (2) the user_call_ID tree, (3) the
340  * retainer ref obtained from the backlog buffer.  Prealloc calls for userspace
341  * services only have the ref from the backlog buffer.  We want to pass this
342  * ref to non-BH context to dispose of.
343  *
344  * If we want to report an error, we mark the skb with the packet type and
345  * abort code and return NULL.
346  *
347  * The call is returned with the user access mutex held.
348  */
349 struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
350                                            struct rxrpc_sock *rx,
351                                            struct sk_buff *skb)
352 {
353         struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
354         const struct rxrpc_security *sec = NULL;
355         struct rxrpc_connection *conn;
356         struct rxrpc_peer *peer = NULL;
357         struct rxrpc_call *call = NULL;
358         struct key *key = NULL;
359
360         _enter("");
361
362         spin_lock(&rx->incoming_lock);
363         if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED ||
364             rx->sk.sk_state == RXRPC_CLOSE) {
365                 trace_rxrpc_abort(0, "CLS", sp->hdr.cid, sp->hdr.callNumber,
366                                   sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
367                 skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
368                 skb->priority = RX_INVALID_OPERATION;
369                 goto no_call;
370         }
371
372         /* The peer, connection and call may all have sprung into existence due
373          * to a duplicate packet being handled on another CPU in parallel, so
374          * we have to recheck the routing.  However, we're now holding
375          * rx->incoming_lock, so the values should remain stable.
376          */
377         conn = rxrpc_find_connection_rcu(local, skb, &peer);
378
379         if (!conn && !rxrpc_look_up_server_security(local, rx, &sec, &key, skb))
380                 goto no_call;
381
382         call = rxrpc_alloc_incoming_call(rx, local, peer, conn, sec, key, skb);
383         key_put(key);
384         if (!call) {
385                 skb->mark = RXRPC_SKB_MARK_REJECT_BUSY;
386                 goto no_call;
387         }
388
389         trace_rxrpc_receive(call, rxrpc_receive_incoming,
390                             sp->hdr.serial, sp->hdr.seq);
391
392         /* Make the call live. */
393         rxrpc_incoming_call(rx, call, skb);
394         conn = call->conn;
395
396         if (rx->notify_new_call)
397                 rx->notify_new_call(&rx->sk, call, call->user_call_ID);
398         else
399                 sk_acceptq_added(&rx->sk);
400
401         spin_lock(&conn->state_lock);
402         switch (conn->state) {
403         case RXRPC_CONN_SERVICE_UNSECURED:
404                 conn->state = RXRPC_CONN_SERVICE_CHALLENGING;
405                 set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events);
406                 rxrpc_queue_conn(call->conn);
407                 break;
408
409         case RXRPC_CONN_SERVICE:
410                 write_lock(&call->state_lock);
411                 if (call->state < RXRPC_CALL_COMPLETE) {
412                         if (rx->discard_new_call)
413                                 call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
414                         else
415                                 call->state = RXRPC_CALL_SERVER_ACCEPTING;
416                 }
417                 write_unlock(&call->state_lock);
418                 break;
419
420         case RXRPC_CONN_REMOTELY_ABORTED:
421                 rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
422                                           conn->abort_code, conn->error);
423                 break;
424         case RXRPC_CONN_LOCALLY_ABORTED:
425                 rxrpc_abort_call("CON", call, sp->hdr.seq,
426                                  conn->abort_code, conn->error);
427                 break;
428         default:
429                 BUG();
430         }
431         spin_unlock(&conn->state_lock);
432         spin_unlock(&rx->incoming_lock);
433
434         rxrpc_send_ping(call, skb);
435
436         if (call->state == RXRPC_CALL_SERVER_ACCEPTING)
437                 rxrpc_notify_socket(call);
438
439         /* We have to discard the prealloc queue's ref here and rely on a
440          * combination of the RCU read lock and refs held either by the socket
441          * (recvmsg queue, to-be-accepted queue or user ID tree) or the kernel
442          * service to prevent the call from being deallocated too early.
443          */
444         rxrpc_put_call(call, rxrpc_call_put);
445
446         _leave(" = %p{%d}", call, call->debug_id);
447         return call;
448
449 no_call:
450         spin_unlock(&rx->incoming_lock);
451         _leave(" = NULL [%u]", skb->mark);
452         return NULL;
453 }
454
455 /*
456  * handle acceptance of a call by userspace
457  * - assign the user call ID to the call at the front of the queue
458  * - called with the socket locked.
459  */
460 struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx,
461                                      unsigned long user_call_ID,
462                                      rxrpc_notify_rx_t notify_rx)
463         __releases(&rx->sk.sk_lock.slock)
464         __acquires(call->user_mutex)
465 {
466         struct rxrpc_call *call;
467         struct rb_node *parent, **pp;
468         int ret;
469
470         _enter(",%lx", user_call_ID);
471
472         ASSERT(!irqs_disabled());
473
474         write_lock(&rx->call_lock);
475
476         if (list_empty(&rx->to_be_accepted)) {
477                 write_unlock(&rx->call_lock);
478                 release_sock(&rx->sk);
479                 kleave(" = -ENODATA [empty]");
480                 return ERR_PTR(-ENODATA);
481         }
482
483         /* check the user ID isn't already in use */
484         pp = &rx->calls.rb_node;
485         parent = NULL;
486         while (*pp) {
487                 parent = *pp;
488                 call = rb_entry(parent, struct rxrpc_call, sock_node);
489
490                 if (user_call_ID < call->user_call_ID)
491                         pp = &(*pp)->rb_left;
492                 else if (user_call_ID > call->user_call_ID)
493                         pp = &(*pp)->rb_right;
494                 else
495                         goto id_in_use;
496         }
497
498         /* Dequeue the first call and check it's still valid.  We gain
499          * responsibility for the queue's reference.
500          */
501         call = list_entry(rx->to_be_accepted.next,
502                           struct rxrpc_call, accept_link);
503         write_unlock(&rx->call_lock);
504
505         /* We need to gain the mutex from the interrupt handler without
506          * upsetting lockdep, so we have to release it there and take it here.
507          * We are, however, still holding the socket lock, so other accepts
508          * must wait for us and no one can add the user ID behind our backs.
509          */
510         if (mutex_lock_interruptible(&call->user_mutex) < 0) {
511                 release_sock(&rx->sk);
512                 kleave(" = -ERESTARTSYS");
513                 return ERR_PTR(-ERESTARTSYS);
514         }
515
516         write_lock(&rx->call_lock);
517         list_del_init(&call->accept_link);
518         sk_acceptq_removed(&rx->sk);
519         rxrpc_see_call(call);
520
521         /* Find the user ID insertion point. */
522         pp = &rx->calls.rb_node;
523         parent = NULL;
524         while (*pp) {
525                 parent = *pp;
526                 call = rb_entry(parent, struct rxrpc_call, sock_node);
527
528                 if (user_call_ID < call->user_call_ID)
529                         pp = &(*pp)->rb_left;
530                 else if (user_call_ID > call->user_call_ID)
531                         pp = &(*pp)->rb_right;
532                 else
533                         BUG();
534         }
535
536         write_lock_bh(&call->state_lock);
537         switch (call->state) {
538         case RXRPC_CALL_SERVER_ACCEPTING:
539                 call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
540                 break;
541         case RXRPC_CALL_COMPLETE:
542                 ret = call->error;
543                 goto out_release;
544         default:
545                 BUG();
546         }
547
548         /* formalise the acceptance */
549         call->notify_rx = notify_rx;
550         call->user_call_ID = user_call_ID;
551         rxrpc_get_call(call, rxrpc_call_got_userid);
552         rb_link_node(&call->sock_node, parent, pp);
553         rb_insert_color(&call->sock_node, &rx->calls);
554         if (test_and_set_bit(RXRPC_CALL_HAS_USERID, &call->flags))
555                 BUG();
556
557         write_unlock_bh(&call->state_lock);
558         write_unlock(&rx->call_lock);
559         rxrpc_notify_socket(call);
560         rxrpc_service_prealloc(rx, GFP_KERNEL);
561         release_sock(&rx->sk);
562         _leave(" = %p{%d}", call, call->debug_id);
563         return call;
564
565 out_release:
566         _debug("release %p", call);
567         write_unlock_bh(&call->state_lock);
568         write_unlock(&rx->call_lock);
569         rxrpc_release_call(rx, call);
570         rxrpc_put_call(call, rxrpc_call_put);
571         goto out;
572
573 id_in_use:
574         ret = -EBADSLT;
575         write_unlock(&rx->call_lock);
576 out:
577         rxrpc_service_prealloc(rx, GFP_KERNEL);
578         release_sock(&rx->sk);
579         _leave(" = %d", ret);
580         return ERR_PTR(ret);
581 }
582
583 /*
584  * Handle rejection of a call by userspace
585  * - reject the call at the front of the queue
586  */
587 int rxrpc_reject_call(struct rxrpc_sock *rx)
588 {
589         struct rxrpc_call *call;
590         bool abort = false;
591         int ret;
592
593         _enter("");
594
595         ASSERT(!irqs_disabled());
596
597         write_lock(&rx->call_lock);
598
599         if (list_empty(&rx->to_be_accepted)) {
600                 write_unlock(&rx->call_lock);
601                 return -ENODATA;
602         }
603
604         /* Dequeue the first call and check it's still valid.  We gain
605          * responsibility for the queue's reference.
606          */
607         call = list_entry(rx->to_be_accepted.next,
608                           struct rxrpc_call, accept_link);
609         list_del_init(&call->accept_link);
610         sk_acceptq_removed(&rx->sk);
611         rxrpc_see_call(call);
612
613         write_lock_bh(&call->state_lock);
614         switch (call->state) {
615         case RXRPC_CALL_SERVER_ACCEPTING:
616                 __rxrpc_abort_call("REJ", call, 1, RX_USER_ABORT, -ECONNABORTED);
617                 abort = true;
618                 /* fall through */
619         case RXRPC_CALL_COMPLETE:
620                 ret = call->error;
621                 goto out_discard;
622         default:
623                 BUG();
624         }
625
626 out_discard:
627         write_unlock_bh(&call->state_lock);
628         write_unlock(&rx->call_lock);
629         if (abort) {
630                 rxrpc_send_abort_packet(call);
631                 rxrpc_release_call(rx, call);
632                 rxrpc_put_call(call, rxrpc_call_put);
633         }
634         rxrpc_service_prealloc(rx, GFP_KERNEL);
635         _leave(" = %d", ret);
636         return ret;
637 }
638
639 /*
640  * rxrpc_kernel_charge_accept - Charge up socket with preallocated calls
641  * @sock: The socket on which to preallocate
642  * @notify_rx: Event notification function for the call
643  * @user_attach_call: Func to attach call to user_call_ID
644  * @user_call_ID: The tag to attach to the preallocated call
645  * @gfp: The allocation conditions.
646  * @debug_id: The tracing debug ID.
647  *
648  * Charge up the socket with preallocated calls, each with a user ID.  A
649  * function should be provided to effect the attachment from the user's side.
650  * The user is given a ref to hold on the call.
651  *
652  * Note that the call may be come connected before this function returns.
653  */
654 int rxrpc_kernel_charge_accept(struct socket *sock,
655                                rxrpc_notify_rx_t notify_rx,
656                                rxrpc_user_attach_call_t user_attach_call,
657                                unsigned long user_call_ID, gfp_t gfp,
658                                unsigned int debug_id)
659 {
660         struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
661         struct rxrpc_backlog *b = rx->backlog;
662
663         if (sock->sk->sk_state == RXRPC_CLOSE)
664                 return -ESHUTDOWN;
665
666         return rxrpc_service_prealloc_one(rx, b, notify_rx,
667                                           user_attach_call, user_call_ID,
668                                           gfp, debug_id);
669 }
670 EXPORT_SYMBOL(rxrpc_kernel_charge_accept);