1 /* Peer event handling, typically ICMP messages.
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/module.h>
13 #include <linux/net.h>
14 #include <linux/skbuff.h>
15 #include <linux/errqueue.h>
16 #include <linux/udp.h>
18 #include <linux/in6.h>
19 #include <linux/icmp.h>
21 #include <net/af_rxrpc.h>
23 #include "ar-internal.h"
25 static void rxrpc_store_error(struct rxrpc_peer *, struct sock_exterr_skb *);
26 static void rxrpc_distribute_error(struct rxrpc_peer *, int,
27 enum rxrpc_call_completion);
30 * Find the peer associated with an ICMP packet.
32 static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local,
33 const struct sk_buff *skb,
34 struct sockaddr_rxrpc *srx)
36 struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
40 memset(srx, 0, sizeof(*srx));
41 srx->transport_type = local->srx.transport_type;
42 srx->transport_len = local->srx.transport_len;
43 srx->transport.family = local->srx.transport.family;
45 /* Can we see an ICMP4 packet on an ICMP6 listening socket? and vice
48 switch (srx->transport.family) {
50 srx->transport.sin.sin_port = serr->port;
51 switch (serr->ee.ee_origin) {
52 case SO_EE_ORIGIN_ICMP:
54 memcpy(&srx->transport.sin.sin_addr,
55 skb_network_header(skb) + serr->addr_offset,
56 sizeof(struct in_addr));
58 case SO_EE_ORIGIN_ICMP6:
59 _net("Rx ICMP6 on v4 sock");
60 memcpy(&srx->transport.sin.sin_addr,
61 skb_network_header(skb) + serr->addr_offset + 12,
62 sizeof(struct in_addr));
65 memcpy(&srx->transport.sin.sin_addr, &ip_hdr(skb)->saddr,
66 sizeof(struct in_addr));
71 #ifdef CONFIG_AF_RXRPC_IPV6
73 srx->transport.sin6.sin6_port = serr->port;
74 switch (serr->ee.ee_origin) {
75 case SO_EE_ORIGIN_ICMP6:
77 memcpy(&srx->transport.sin6.sin6_addr,
78 skb_network_header(skb) + serr->addr_offset,
79 sizeof(struct in6_addr));
81 case SO_EE_ORIGIN_ICMP:
82 _net("Rx ICMP on v6 sock");
83 srx->transport.sin6.sin6_addr.s6_addr32[0] = 0;
84 srx->transport.sin6.sin6_addr.s6_addr32[1] = 0;
85 srx->transport.sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
86 memcpy(srx->transport.sin6.sin6_addr.s6_addr + 12,
87 skb_network_header(skb) + serr->addr_offset,
88 sizeof(struct in_addr));
91 memcpy(&srx->transport.sin6.sin6_addr,
92 &ipv6_hdr(skb)->saddr,
93 sizeof(struct in6_addr));
103 return rxrpc_lookup_peer_rcu(local, srx);
107 * Handle an MTU/fragmentation problem.
109 static void rxrpc_adjust_mtu(struct rxrpc_peer *peer, struct sock_exterr_skb *serr)
111 u32 mtu = serr->ee.ee_info;
113 _net("Rx ICMP Fragmentation Needed (%d)", mtu);
115 /* wind down the local interface MTU */
116 if (mtu > 0 && peer->if_mtu == 65535 && mtu < peer->if_mtu) {
118 _net("I/F MTU %u", mtu);
122 /* they didn't give us a size, estimate one */
130 if (mtu < peer->hdrsize)
131 mtu = peer->hdrsize + 4;
135 if (mtu < peer->mtu) {
136 spin_lock_bh(&peer->lock);
138 peer->maxdata = peer->mtu - peer->hdrsize;
139 spin_unlock_bh(&peer->lock);
140 _net("Net MTU %u (maxdata %u)",
141 peer->mtu, peer->maxdata);
146 * Handle an error received on the local endpoint.
148 void rxrpc_error_report(struct sock *sk)
150 struct sock_exterr_skb *serr;
151 struct sockaddr_rxrpc srx;
152 struct rxrpc_local *local = sk->sk_user_data;
153 struct rxrpc_peer *peer;
156 _enter("%p{%d}", sk, local->debug_id);
158 skb = sock_dequeue_err_skb(sk);
160 _leave("UDP socket errqueue empty");
163 rxrpc_new_skb(skb, rxrpc_skb_rx_received);
164 serr = SKB_EXT_ERR(skb);
165 if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) {
166 _leave("UDP empty message");
167 rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
172 peer = rxrpc_lookup_peer_icmp_rcu(local, skb, &srx);
173 if (peer && !rxrpc_get_peer_maybe(peer))
177 rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
178 _leave(" [no peer]");
182 trace_rxrpc_rx_icmp(peer, &serr->ee, &srx);
184 if ((serr->ee.ee_origin == SO_EE_ORIGIN_ICMP &&
185 serr->ee.ee_type == ICMP_DEST_UNREACH &&
186 serr->ee.ee_code == ICMP_FRAG_NEEDED)) {
187 rxrpc_adjust_mtu(peer, serr);
189 rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
190 rxrpc_put_peer(peer);
191 _leave(" [MTU update]");
195 rxrpc_store_error(peer, serr);
197 rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
203 * Map an error report to error codes on the peer record.
205 static void rxrpc_store_error(struct rxrpc_peer *peer,
206 struct sock_exterr_skb *serr)
208 enum rxrpc_call_completion compl = RXRPC_CALL_NETWORK_ERROR;
209 struct sock_extended_err *ee;
218 switch (ee->ee_origin) {
219 case SO_EE_ORIGIN_ICMP:
220 switch (ee->ee_type) {
221 case ICMP_DEST_UNREACH:
222 switch (ee->ee_code) {
223 case ICMP_NET_UNREACH:
224 _net("Rx Received ICMP Network Unreachable");
226 case ICMP_HOST_UNREACH:
227 _net("Rx Received ICMP Host Unreachable");
229 case ICMP_PORT_UNREACH:
230 _net("Rx Received ICMP Port Unreachable");
232 case ICMP_NET_UNKNOWN:
233 _net("Rx Received ICMP Unknown Network");
235 case ICMP_HOST_UNKNOWN:
236 _net("Rx Received ICMP Unknown Host");
239 _net("Rx Received ICMP DestUnreach code=%u",
245 case ICMP_TIME_EXCEEDED:
246 _net("Rx Received ICMP TTL Exceeded");
250 _proto("Rx Received ICMP error { type=%u code=%u }",
251 ee->ee_type, ee->ee_code);
256 case SO_EE_ORIGIN_NONE:
257 case SO_EE_ORIGIN_LOCAL:
258 _proto("Rx Received local error { error=%d }", err);
259 compl = RXRPC_CALL_LOCAL_ERROR;
262 case SO_EE_ORIGIN_ICMP6:
264 _proto("Rx Received error report { orig=%u }", ee->ee_origin);
268 rxrpc_distribute_error(peer, err, compl);
272 * Distribute an error that occurred on a peer.
274 static void rxrpc_distribute_error(struct rxrpc_peer *peer, int error,
275 enum rxrpc_call_completion compl)
277 struct rxrpc_call *call;
279 hlist_for_each_entry_rcu(call, &peer->error_targets, error_link) {
280 rxrpc_see_call(call);
281 if (call->state < RXRPC_CALL_COMPLETE &&
282 rxrpc_set_call_completion(call, compl, 0, -error))
283 rxrpc_notify_socket(call);
288 * Add RTT information to cache. This is called in softirq mode and has
289 * exclusive access to the peer RTT data.
291 void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
292 rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial,
293 ktime_t send_time, ktime_t resp_time)
295 struct rxrpc_peer *peer = call->peer;
297 u64 sum = peer->rtt_sum, avg;
298 u8 cursor = peer->rtt_cursor, usage = peer->rtt_usage;
300 rtt = ktime_to_ns(ktime_sub(resp_time, send_time));
304 /* Replace the oldest datum in the RTT buffer */
305 sum -= peer->rtt_cache[cursor];
307 peer->rtt_cache[cursor] = rtt;
308 peer->rtt_cursor = (cursor + 1) & (RXRPC_RTT_CACHE_SIZE - 1);
310 if (usage < RXRPC_RTT_CACHE_SIZE) {
312 peer->rtt_usage = usage;
315 /* Now recalculate the average */
316 if (usage == RXRPC_RTT_CACHE_SIZE) {
317 avg = sum / RXRPC_RTT_CACHE_SIZE;
324 trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial, rtt,
329 * Perform keep-alive pings.
331 static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
332 struct list_head *collector,
336 struct rxrpc_peer *peer;
337 const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1;
338 time64_t keepalive_at;
341 spin_lock_bh(&rxnet->peer_hash_lock);
343 while (!list_empty(collector)) {
344 peer = list_entry(collector->next,
345 struct rxrpc_peer, keepalive_link);
347 list_del_init(&peer->keepalive_link);
348 if (!rxrpc_get_peer_maybe(peer))
351 spin_unlock_bh(&rxnet->peer_hash_lock);
353 keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME;
354 slot = keepalive_at - base;
355 _debug("%02x peer %u t=%d {%pISp}",
356 cursor, peer->debug_id, slot, &peer->srx.transport);
358 if (keepalive_at <= base ||
359 keepalive_at > base + RXRPC_KEEPALIVE_TIME) {
360 rxrpc_send_keepalive(peer);
361 slot = RXRPC_KEEPALIVE_TIME;
364 /* A transmission to this peer occurred since last we examined
365 * it so put it into the appropriate future bucket.
369 spin_lock_bh(&rxnet->peer_hash_lock);
370 list_add_tail(&peer->keepalive_link,
371 &rxnet->peer_keepalive[slot & mask]);
372 rxrpc_put_peer(peer);
375 spin_unlock_bh(&rxnet->peer_hash_lock);
379 * Perform keep-alive pings with VERSION packets to keep any NAT alive.
381 void rxrpc_peer_keepalive_worker(struct work_struct *work)
383 struct rxrpc_net *rxnet =
384 container_of(work, struct rxrpc_net, peer_keepalive_work);
385 const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1;
386 time64_t base, now, delay;
388 LIST_HEAD(collector);
390 now = ktime_get_seconds();
391 base = rxnet->peer_keepalive_base;
392 cursor = rxnet->peer_keepalive_cursor;
393 _enter("%lld,%u", base - now, cursor);
398 /* Remove to a temporary list all the peers that are currently lodged
399 * in expired buckets plus all new peers.
401 * Everything in the bucket at the cursor is processed this
402 * second; the bucket at cursor + 1 goes at now + 1s and so
405 spin_lock_bh(&rxnet->peer_hash_lock);
406 list_splice_init(&rxnet->peer_keepalive_new, &collector);
408 stop = cursor + ARRAY_SIZE(rxnet->peer_keepalive);
409 while (base <= now && (s8)(cursor - stop) < 0) {
410 list_splice_tail_init(&rxnet->peer_keepalive[cursor & mask],
417 spin_unlock_bh(&rxnet->peer_hash_lock);
419 rxnet->peer_keepalive_base = base;
420 rxnet->peer_keepalive_cursor = cursor;
421 rxrpc_peer_keepalive_dispatch(rxnet, &collector, base, cursor);
422 ASSERT(list_empty(&collector));
424 /* Schedule the timer for the next occupied timeslot. */
425 cursor = rxnet->peer_keepalive_cursor;
426 stop = cursor + RXRPC_KEEPALIVE_TIME - 1;
427 for (; (s8)(cursor - stop) < 0; cursor++) {
428 if (!list_empty(&rxnet->peer_keepalive[cursor & mask]))
433 now = ktime_get_seconds();
439 timer_reduce(&rxnet->peer_keepalive_timer, jiffies + delay);