int i;
struct lt_sendlap cbuf;
+ unsigned char *hdr;
cbuf.command = LT_SENDLAP;
cbuf.dnode = skb->data[0];
printk("\n");
}
- do_write(dev,&cbuf,sizeof(cbuf),skb->h.raw,skb->len);
+ hdr = skb_transport_header(skb);
+ do_write(dev, &cbuf, sizeof(cbuf), hdr, skb->len);
if(debug & DEBUG_UPPER) {
printk("sent %d ddp bytes\n",skb->len);
- for(i=0;i<skb->len;i++) printk("%02x ",skb->h.raw[i]);
+ for (i = 0; i < skb->len; i++)
+ printk("%02x ", hdr[i]);
printk("\n");
}
flits = skb_transport_offset(skb) / 8;
sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
- sgl_flits = make_sgl(skb, sgp, skb->h.raw, skb->tail - skb->h.raw,
+ sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
+ skb->tail - skb_transport_header(skb),
adap->pdev);
if (need_skb_unmap()) {
setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
skb->destructor = deferred_unmap_destructor;
- ((struct unmap_info *)skb->cb)->len = skb->tail - skb->h.raw;
+ ((struct unmap_info *)skb->cb)->len = (skb->tail -
+ skb_transport_header(skb));
}
write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
return 1; /* packet fits as immediate data */
flits = skb_transport_offset(skb) / 8; /* headers */
- if (skb->tail != skb->h.raw)
+ if (skb->tail != skb_transport_header(skb))
cnt++;
return flits_to_desc(flits + sgl_len(cnt));
}
eddp = qeth_eddp_create_eddp_data(qhdr,
skb_network_header(skb),
ip_hdrlen(skb),
- skb->h.raw,
+ skb_transport_header(skb),
tcp_hdrlen(skb));
else
eddp = qeth_eddp_create_eddp_data(qhdr,
skb_network_header(skb),
sizeof(struct ipv6hdr),
- skb->h.raw,
+ skb_transport_header(skb),
tcp_hdrlen(skb));
if (eddp == NULL) {
static __inline__ struct ddpehdr *ddp_hdr(struct sk_buff *skb)
{
- return (struct ddpehdr *)skb->h.raw;
+ return (struct ddpehdr *)skb_transport_header(skb);
}
/* AppleTalk AARP headers */
static __inline__ struct elapaarp *aarp_hdr(struct sk_buff *skb)
{
- return (struct elapaarp *)skb->h.raw;
+ return (struct elapaarp *)skb_transport_header(skb);
}
/* Not specified - how long till we drop a resolved entry */
static inline struct dccp_hdr *dccp_hdr(const struct sk_buff *skb)
{
- return (struct dccp_hdr *)skb->h.raw;
+ return (struct dccp_hdr *)skb_transport_header(skb);
}
static inline struct dccp_hdr *dccp_zeroed_hdr(struct sk_buff *skb, int headlen)
{
skb_push(skb, headlen);
skb_reset_transport_header(skb);
- return memset(skb->h.raw, 0, headlen);
+ return memset(skb_transport_header(skb), 0, headlen);
}
static inline struct dccp_hdr_ext *dccp_hdrx(const struct sk_buff *skb)
{
- return (struct dccp_hdr_ext *)(skb->h.raw + sizeof(struct dccp_hdr));
+ return (struct dccp_hdr_ext *)(skb_transport_header(skb) +
+ sizeof(struct dccp_hdr));
}
static inline unsigned int __dccp_basic_hdr_len(const struct dccp_hdr *dh)
static inline struct dccp_hdr_request *dccp_hdr_request(struct sk_buff *skb)
{
- return (struct dccp_hdr_request *)(skb->h.raw + dccp_basic_hdr_len(skb));
+ return (struct dccp_hdr_request *)(skb_transport_header(skb) +
+ dccp_basic_hdr_len(skb));
}
static inline struct dccp_hdr_ack_bits *dccp_hdr_ack_bits(const struct sk_buff *skb)
{
- return (struct dccp_hdr_ack_bits *)(skb->h.raw + dccp_basic_hdr_len(skb));
+ return (struct dccp_hdr_ack_bits *)(skb_transport_header(skb) +
+ dccp_basic_hdr_len(skb));
}
static inline u64 dccp_hdr_ack_seq(const struct sk_buff *skb)
static inline struct dccp_hdr_response *dccp_hdr_response(struct sk_buff *skb)
{
- return (struct dccp_hdr_response *)(skb->h.raw + dccp_basic_hdr_len(skb));
+ return (struct dccp_hdr_response *)(skb_transport_header(skb) +
+ dccp_basic_hdr_len(skb));
}
static inline struct dccp_hdr_reset *dccp_hdr_reset(struct sk_buff *skb)
{
- return (struct dccp_hdr_reset *)(skb->h.raw + dccp_basic_hdr_len(skb));
+ return (struct dccp_hdr_reset *)(skb_transport_header(skb) +
+ dccp_basic_hdr_len(skb));
}
static inline unsigned int __dccp_hdr_len(const struct dccp_hdr *dh)
static inline struct icmphdr *icmp_hdr(const struct sk_buff *skb)
{
- return (struct icmphdr *)skb->h.raw;
+ return (struct icmphdr *)skb_transport_header(skb);
}
#endif
static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb)
{
- return (struct icmp6hdr *)skb->h.raw;
+ return (struct icmp6hdr *)skb_transport_header(skb);
}
#endif
static inline struct igmphdr *igmp_hdr(const struct sk_buff *skb)
{
- return (struct igmphdr *)skb->h.raw;
+ return (struct igmphdr *)skb_transport_header(skb);
}
static inline struct igmpv3_report *
igmpv3_report_hdr(const struct sk_buff *skb)
{
- return (struct igmpv3_report *)skb->h.raw;
+ return (struct igmpv3_report *)skb_transport_header(skb);
}
static inline struct igmpv3_query *
igmpv3_query_hdr(const struct sk_buff *skb)
{
- return (struct igmpv3_query *)skb->h.raw;
+ return (struct igmpv3_query *)skb_transport_header(skb);
}
#endif
static inline struct iphdr *ipip_hdr(const struct sk_buff *skb)
{
- return (struct iphdr *)skb->h.raw;
+ return (struct iphdr *)skb_transport_header(skb);
}
#endif
static inline struct ipv6hdr *ipipv6_hdr(const struct sk_buff *skb)
{
- return (struct ipv6hdr *)skb->h.raw;
+ return (struct ipv6hdr *)skb_transport_header(skb);
}
/*
static inline struct sctphdr *sctp_hdr(const struct sk_buff *skb)
{
- return (struct sctphdr *)skb->h.raw;
+ return (struct sctphdr *)skb_transport_header(skb);
}
#endif
skb->tail += len;
}
+static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
+{
+ return skb->h.raw;
+}
+
static inline void skb_reset_transport_header(struct sk_buff *skb)
{
skb->h.raw = skb->data;
static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb)
{
- return (struct tcphdr *)skb->h.raw;
+ return (struct tcphdr *)skb_transport_header(skb);
}
static inline unsigned int tcp_hdrlen(const struct sk_buff *skb)
static inline struct udphdr *udp_hdr(const struct sk_buff *skb)
{
- return (struct udphdr *)skb->h.raw;
+ return (struct udphdr *)skb_transport_header(skb);
}
#endif
static __inline__ struct ipxhdr *ipx_hdr(struct sk_buff *skb)
{
- return (struct ipxhdr *)skb->h.raw;
+ return (struct ipxhdr *)skb_transport_header(skb);
}
struct ipx_interface {
case TCF_LAYER_NETWORK:
return skb_network_header(skb);
case TCF_LAYER_TRANSPORT:
- return skb->h.raw;
+ return skb_transport_header(skb);
}
return NULL;
*/
static inline __wsum udp_csum_outgoing(struct sock *sk, struct sk_buff *skb)
{
- __wsum csum = csum_partial(skb->h.raw, sizeof(struct udphdr), 0);
-
+ __wsum csum = csum_partial(skb_transport_header(skb),
+ sizeof(struct udphdr), 0);
skb_queue_walk(&sk->sk_write_queue, skb) {
csum = csum_add(csum, skb->csum);
}
};
rcu_read_lock();
- proto = find_snap_client(skb->h.raw);
+ proto = find_snap_client(skb_transport_header(skb));
if (proto) {
/* Pass the frame on. */
skb->h.raw += 5;
skb_set_transport_header(skb, lv);
- SOCK_DEBUG(sk, "base=%p pos=%p\n", skb->data, skb->h.raw);
+ SOCK_DEBUG(sk, "base=%p pos=%p\n",
+ skb->data, skb_transport_header(skb));
- *skb->h.raw = AX25_UI;
+ *skb_transport_header(skb) = AX25_UI;
/* Datagram frames go straight out of the door as UI */
ax25_queue_xmit(skb, ax25->ax25_dev->dev);
skb_push(skb, HCI_ACL_HDR_SIZE);
skb_reset_transport_header(skb);
- hdr = (struct hci_acl_hdr *)skb->h.raw;
+ hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
hdr->dlen = cpu_to_le16(len);
}
skb_push(skb, HCI_SCO_HDR_SIZE);
skb_reset_transport_header(skb);
- memcpy(skb->h.raw, &hdr, HCI_SCO_HDR_SIZE);
+ memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
skb->dev = (void *) hdev;
bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
BUG_ON(offset > (int)skb->len);
csum = skb_checksum(skb, offset, skb->len-offset, 0);
- offset = skb->tail - skb->h.raw;
+ offset = skb->tail - skb_transport_header(skb);
BUG_ON(offset <= 0);
BUG_ON(skb->csum_offset + 2 > offset);
- *(__sum16*)(skb->h.raw + skb->csum_offset) = csum_fold(csum);
-
+ *(__sum16 *)(skb_transport_header(skb) +
+ skb->csum_offset) = csum_fold(csum);
out_set_summed:
skb->ip_summed = CHECKSUM_NONE;
out:
printk(KERN_DEBUG "AUN: recvfrom() error %d\n", -err);
}
- data = skb->h.raw + sizeof(struct udphdr);
+ data = skb_transport_header(skb) + sizeof(struct udphdr);
ah = (struct aunhdr *)data;
len = skb->len - sizeof(struct udphdr);
ip = ip_hdr(skb);
struct iphdr *pip = ip_hdr(skb);
struct igmphdr *pig = igmp_hdr(skb);
const int iplen = skb->tail - skb->nh.raw;
- const int igmplen = skb->tail - skb->h.raw;
+ const int igmplen = skb->tail - skb_transport_header(skb);
pip->tot_len = htons(iplen);
ip_send_check(pip);
skb_reset_mac_header(skb);
__pskb_pull(skb, offset);
skb_reset_network_header(skb);
- skb_postpull_rcsum(skb, skb->h.raw, offset);
+ skb_postpull_rcsum(skb, skb_transport_header(skb), offset);
skb->pkt_type = PACKET_HOST;
#ifdef CONFIG_NET_IPGRE_BROADCAST
if (MULTICAST(iph->daddr)) {
if (fraggap) {
skb->csum = skb_copy_and_csum_bits(skb_prev,
maxfraglen,
- skb->h.raw,
+ skb_transport_header(skb),
fraggap, 0);
skb_prev->csum = csum_sub(skb_prev->csum,
skb->csum);
&ipc, rt, MSG_DONTWAIT);
if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
if (arg->csumoffset >= 0)
- *((__sum16 *)skb->h.raw + arg->csumoffset) = csum_fold(csum_add(skb->csum, arg->csum));
+ *((__sum16 *)skb_transport_header(skb) +
+ arg->csumoffset) = csum_fold(csum_add(skb->csum,
+ arg->csum));
skb->ip_summed = CHECKSUM_NONE;
ip_push_pending_frames(sk);
}
goto drop;
/* Basic sanity checks can be done without the lock. */
- rarp = (struct arphdr *)skb->h.raw;
+ rarp = (struct arphdr *)skb_transport_header(skb);
/* If this test doesn't pass, it's not IP, or we should
* ignore it anyway.
goto drop;
/* OK, it is all there and looks valid, process... */
- rarp = (struct arphdr *)skb->h.raw;
+ rarp = (struct arphdr *)skb_transport_header(skb);
rarp_ptr = (unsigned char *) (rarp + 1);
/* One reply at a time, please. */
pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
goto drop;
- encap = (struct iphdr*)(skb->h.raw + sizeof(struct igmphdr));
+ encap = (struct iphdr *)(skb_transport_header(skb) +
+ sizeof(struct igmphdr));
/*
Check that:
a. packet is really destinted to a multicast group
if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
goto drop;
- pim = (struct pimreghdr*)skb->h.raw;
+ pim = (struct pimreghdr *)skb_transport_header(skb);
if (pim->type != ((PIM_VERSION<<4)|(PIM_REGISTER)) ||
(pim->flags&PIM_NULL_REGISTER) ||
(ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
goto drop;
/* check if the inner packet is destined to mcast group */
- encap = (struct iphdr*)(skb->h.raw + sizeof(struct pimreghdr));
+ encap = (struct iphdr *)(skb_transport_header(skb) +
+ sizeof(struct pimreghdr));
if (!MULTICAST(encap->daddr) ||
encap->tot_len == 0 ||
ntohs(encap->tot_len) + sizeof(*pim) > skb->len)
th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
(__force u32)delta));
if (skb->ip_summed != CHECKSUM_PARTIAL)
- th->check = csum_fold(csum_partial(skb->h.raw, thlen,
- skb->csum));
+ th->check =
+ csum_fold(csum_partial(skb_transport_header(skb),
+ thlen, skb->csum));
seq += len;
skb = skb->next;
th->cwr = 0;
} while (skb->next);
- delta = htonl(oldlen + (skb->tail - skb->h.raw) + skb->data_len);
+ delta = htonl(oldlen + (skb->tail - skb_transport_header(skb)) +
+ skb->data_len);
th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
(__force u32)delta));
if (skb->ip_summed != CHECKSUM_PARTIAL)
- th->check = csum_fold(csum_partial(skb->h.raw, thlen,
- skb->csum));
+ th->check = csum_fold(csum_partial(skb_transport_header(skb),
+ thlen, skb->csum));
out:
return segs;
*
* "len" is invariant segment length, including TCP header.
*/
- len += skb->data - skb->h.raw;
+ len += skb->data - skb_transport_header(skb);
if (len >= TCP_MIN_RCVMSS + sizeof(struct tcphdr) ||
/* If PSH is not set, packet should be
* full sized, provided peer TCP is not badly broken.
{
const struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
- unsigned char *ptr = ack_skb->h.raw + TCP_SKB_CB(ack_skb)->sacked;
+ unsigned char *ptr = (skb_transport_header(ack_skb) +
+ TCP_SKB_CB(ack_skb)->sacked);
struct tcp_sack_block_wire *sp = (struct tcp_sack_block_wire *)(ptr+2);
struct sk_buff *cached_skb;
int num_sacks = (ptr[1] - TCPOLEN_SACK_BASE)>>3;
return;
skb_set_mac_header(nskb, skb_mac_header(skb) - skb->head);
- skb_set_network_header(nskb,
- skb_network_header(skb) - skb->head);
- skb_set_transport_header(nskb, skb->h.raw - skb->head);
-
+ skb_set_network_header(nskb, (skb_network_header(skb) -
+ skb->head));
+ skb_set_transport_header(nskb, (skb_transport_header(skb) -
+ skb->head));
skb_reserve(nskb, header);
memcpy(nskb->head, skb->head, header);
memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
BUG_ON(optlen < 0);
- ph = (struct ip_beet_phdr *)skb->h.raw;
+ ph = (struct ip_beet_phdr *)skb_transport_header(skb);
ph->padlen = 4 - (optlen & 4);
ph->hdrlen = optlen / 8;
ph->nexthdr = top_iph->protocol;
*/
static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb)
{
- int ihl = skb->data - skb->h.raw;
+ int ihl = skb->data - skb_transport_header(skb);
if (skb->h.raw != skb->nh.raw) {
- memmove(skb->h.raw, skb_network_header(skb), ihl);
+ memmove(skb_transport_header(skb),
+ skb_network_header(skb), ihl);
skb->nh.raw = skb->h.raw;
}
ip_hdr(skb)->tot_len = htons(skb->len + ihl);
goto error_free_iph;
}
- ah = (struct ip_auth_hdr *)skb->h.raw;
+ ah = (struct ip_auth_hdr *)skb_transport_header(skb);
ah->nexthdr = nexthdr;
top_iph->priority = 0;
pskb_put(skb, trailer, clen - skb->len);
top_iph = (struct ipv6hdr *)__skb_push(skb, hdr_len);
- esph = (struct ipv6_esp_hdr *)skb->h.raw;
+ esph = (struct ipv6_esp_hdr *)skb_transport_header(skb);
top_iph->payload_len = htons(skb->len + alen - sizeof(*top_iph));
*(u8 *)(trailer->tail - 1) = *skb_network_header(skb);
*skb_network_header(skb) = IPPROTO_ESP;
struct tlvtype_proc *curr;
const unsigned char *nh = skb_network_header(skb);
int off = skb->h.raw - skb->nh.raw;
- int len = ((skb->h.raw[1]+1)<<3);
+ int len = (skb_transport_header(skb)[1] + 1) << 3;
if (skb_transport_offset(skb) + len > skb_headlen(skb))
goto bad;
if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
!pskb_may_pull(skb, (skb_transport_offset(skb) +
- ((skb->h.raw[1] + 1) << 3)))) {
+ ((skb_transport_header(skb)[1] + 1) << 3)))) {
IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
IPSTATS_MIB_INHDRERRORS);
kfree_skb(skb);
return -1;
}
- opt->lastopt = skb->h.raw - skb->nh.raw;
- opt->dst1 = skb->h.raw - skb->nh.raw;
+ opt->lastopt = opt->dst1 = skb->h.raw - skb->nh.raw;
#ifdef CONFIG_IPV6_MIP6
dstbuf = opt->dst1;
#endif
if (ip6_parse_tlv(tlvprocdestopt_lst, skbp)) {
dst_release(dst);
skb = *skbp;
- skb->h.raw += ((skb->h.raw[1]+1)<<3);
+ skb->h.raw += (skb_transport_header(skb)[1] + 1) << 3;
opt = IP6CB(skb);
#ifdef CONFIG_IPV6_MIP6
opt->nhoff = dstbuf;
if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
!pskb_may_pull(skb, (skb_transport_offset(skb) +
- ((skb->h.raw[1] + 1) << 3)))) {
+ ((skb_transport_header(skb)[1] + 1) << 3)))) {
IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
IPSTATS_MIB_INHDRERRORS);
kfree_skb(skb);
return -1;
}
- hdr = (struct ipv6_rt_hdr *) skb->h.raw;
+ hdr = (struct ipv6_rt_hdr *)skb_transport_header(skb);
switch (hdr->type) {
#ifdef CONFIG_IPV6_MIP6
break;
}
- opt->lastopt = skb->h.raw - skb->nh.raw;
- opt->srcrt = skb->h.raw - skb->nh.raw;
+ opt->lastopt = opt->srcrt = skb->h.raw - skb->nh.raw;
skb->h.raw += (hdr->hdrlen + 1) << 3;
opt->dst0 = opt->dst1;
opt->dst1 = 0;
* hop-by-hop options.
*/
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + 8) ||
- !pskb_may_pull(skb, sizeof(struct ipv6hdr) + ((skb->h.raw[1] + 1) << 3))) {
+ !pskb_may_pull(skb, (sizeof(struct ipv6hdr) +
+ ((skb_transport_header(skb)[1] + 1) << 3)))) {
kfree_skb(skb);
return -1;
}
opt->hop = sizeof(struct ipv6hdr);
if (ip6_parse_tlv(tlvprochopopt_lst, skbp)) {
skb = *skbp;
- skb->h.raw += (skb->h.raw[1]+1)<<3;
+ skb->h.raw += (skb_transport_header(skb)[1] + 1) << 3;
opt = IP6CB(skb);
opt->nhoff = sizeof(struct ipv6hdr);
return 1;
/* compression */
plen = skb->len - hdr_len;
dlen = IPCOMP_SCRATCH_SIZE;
- start = skb->h.raw;
+ start = skb_transport_header(skb);
cpu = get_cpu();
scratch = *per_cpu_ptr(ipcomp6_scratches, cpu);
in6_dev_put(idev);
return -EINVAL;
}
- mlh2 = (struct mld2_query *) skb->h.raw;
+ mlh2 = (struct mld2_query *)skb_transport_header(skb);
max_delay = (MLDV2_MRC(ntohs(mlh2->mrc))*HZ)/1000;
if (!max_delay)
max_delay = 1;
in6_dev_put(idev);
return -EINVAL;
}
- mlh2 = (struct mld2_query *) skb->h.raw;
+ mlh2 = (struct mld2_query *)skb_transport_header(skb);
mark = 1;
}
} else {
static void mld_sendpack(struct sk_buff *skb)
{
struct ipv6hdr *pip6 = ipv6_hdr(skb);
- struct mld2_report *pmr = (struct mld2_report *)skb->h.raw;
+ struct mld2_report *pmr =
+ (struct mld2_report *)skb_transport_header(skb);
int payload_len, mldlen;
struct inet6_dev *idev = in6_dev_get(skb->dev);
int err;
IP6_INC_STATS(idev, IPSTATS_MIB_OUTREQUESTS);
payload_len = skb->tail - skb_network_header(skb) - sizeof(*pip6);
- mldlen = skb->tail - skb->h.raw;
+ mldlen = skb->tail - skb_transport_header(skb);
pip6->payload_len = htons(payload_len);
pmr->csum = csum_ipv6_magic(&pip6->saddr, &pip6->daddr, mldlen,
- IPPROTO_ICMPV6, csum_partial(skb->h.raw, mldlen, 0));
+ IPPROTO_ICMPV6, csum_partial(skb_transport_header(skb),
+ mldlen, 0));
err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, skb->dev,
mld_dev_queue_xmit);
if (!err) {
pgr->grec_auxwords = 0;
pgr->grec_nsrcs = 0;
pgr->grec_mca = pmc->mca_addr; /* structure copy */
- pmr = (struct mld2_report *)skb->h.raw;
+ pmr = (struct mld2_report *)skb_transport_header(skb);
pmr->ngrec = htons(ntohs(pmr->ngrec)+1);
*ppgr = pgr;
return skb;
if (!*psf_list)
goto empty_source;
- pmr = skb ? (struct mld2_report *)skb->h.raw : NULL;
+ pmr = skb ? (struct mld2_report *)skb_transport_header(skb) : NULL;
/* EX and TO_EX get a fresh packet, if needed */
if (truncate) {
if (!pskb_may_pull(skb, (skb_transport_offset(skb)) + 8) ||
!pskb_may_pull(skb, (skb_transport_offset(skb) +
- ((skb->h.raw[1] + 1) << 3))))
+ ((skb_transport_header(skb)[1] + 1) << 3))))
return -1;
- mh = (struct ip6_mh *)skb->h.raw;
+ mh = (struct ip6_mh *)skb_transport_header(skb);
if (mh->ip6mh_hdrlen < mip6_mh_len(mh->ip6mh_type)) {
LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH message too short: %d vs >=%d\n",
nexthdr = *skb_network_header(skb);
*skb_network_header(skb) = IPPROTO_DSTOPTS;
- dstopt = (struct ipv6_destopt_hdr *)skb->h.raw;
+ dstopt = (struct ipv6_destopt_hdr *)skb_transport_header(skb);
dstopt->nexthdr = nexthdr;
hao = mip6_padn((char *)(dstopt + 1),
nexthdr = *skb_network_header(skb);
*skb_network_header(skb) = IPPROTO_ROUTING;
- rt2 = (struct rt2_hdr *)skb->h.raw;
+ rt2 = (struct rt2_hdr *)skb_transport_header(skb);
rt2->rt_hdr.nexthdr = nexthdr;
rt2->rt_hdr.hdrlen = (x->props.header_len >> 3) - 1;
rt2->rt_hdr.type = IPV6_SRCRT_TYPE_2;
static void ndisc_recv_ns(struct sk_buff *skb)
{
- struct nd_msg *msg = (struct nd_msg *)skb->h.raw;
+ struct nd_msg *msg = (struct nd_msg *)skb_transport_header(skb);
struct in6_addr *saddr = &ipv6_hdr(skb)->saddr;
struct in6_addr *daddr = &ipv6_hdr(skb)->daddr;
u8 *lladdr = NULL;
static void ndisc_recv_na(struct sk_buff *skb)
{
- struct nd_msg *msg = (struct nd_msg *)skb->h.raw;
+ struct nd_msg *msg = (struct nd_msg *)skb_transport_header(skb);
struct in6_addr *saddr = &ipv6_hdr(skb)->saddr;
struct in6_addr *daddr = &ipv6_hdr(skb)->daddr;
u8 *lladdr = NULL;
static void ndisc_recv_rs(struct sk_buff *skb)
{
- struct rs_msg *rs_msg = (struct rs_msg *) skb->h.raw;
+ struct rs_msg *rs_msg = (struct rs_msg *)skb_transport_header(skb);
unsigned long ndoptlen = skb->len - sizeof(*rs_msg);
struct neighbour *neigh;
struct inet6_dev *idev;
static void ndisc_router_discovery(struct sk_buff *skb)
{
- struct ra_msg *ra_msg = (struct ra_msg *) skb->h.raw;
+ struct ra_msg *ra_msg = (struct ra_msg *)skb_transport_header(skb);
struct neighbour *neigh = NULL;
struct inet6_dev *in6_dev;
struct rt6_info *rt = NULL;
__u8 * opt = (__u8 *)(ra_msg + 1);
- optlen = (skb->tail - skb->h.raw) - sizeof(struct ra_msg);
+ optlen = (skb->tail - skb_transport_header(skb)) -
+ sizeof(struct ra_msg);
if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) {
ND_PRINTK2(KERN_WARNING
return;
}
- optlen = skb->tail - skb->h.raw;
+ optlen = skb->tail - skb_transport_header(skb);
optlen -= sizeof(struct icmp6hdr) + 2 * sizeof(struct in6_addr);
if (optlen < 0) {
if (!pskb_may_pull(skb, skb->len))
return 0;
- msg = (struct nd_msg *) skb->h.raw;
+ msg = (struct nd_msg *)skb_transport_header(skb);
- __skb_push(skb, skb->data-skb->h.raw);
+ __skb_push(skb, skb->data - skb_transport_header(skb));
if (ipv6_hdr(skb)->hop_limit != 255) {
ND_PRINTK2(KERN_WARNING
spin_lock_bh(&sk->sk_receive_queue.lock);
skb = skb_peek(&sk->sk_receive_queue);
if (skb != NULL)
- amount = skb->tail - skb->h.raw;
+ amount = skb->tail - skb_transport_header(skb);
spin_unlock_bh(&sk->sk_receive_queue.lock);
return put_user(amount, (int __user *)arg);
}
}
hdr = ipv6_hdr(skb);
- fhdr = (struct frag_hdr *)skb->h.raw;
+ fhdr = (struct frag_hdr *)skb_transport_header(skb);
if (!(fhdr->frag_off & htons(0xFFF9))) {
/* It is not a fragmented frame */
*/
static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb)
{
- int ihl = skb->data - skb->h.raw;
+ int ihl = skb->data - skb_transport_header(skb);
if (skb->h.raw != skb->nh.raw) {
- memmove(skb->h.raw, skb_network_header(skb), ihl);
+ memmove(skb_transport_header(skb),
+ skb_network_header(skb), ihl);
skb->nh.raw = skb->h.raw;
}
ipv6_hdr(skb)->payload_len = htons(skb->len + ihl -
case IPPROTO_COMP:
if (!pskb_may_pull(skb, sizeof(struct ip_comp_hdr)))
return -EINVAL;
- *spi = htonl(ntohs(*(__be16*)(skb->h.raw + 2)));
+ *spi = htonl(ntohs(*(__be16*)(skb_transport_header(skb) + 2)));
*seq = 0;
return 0;
default:
if (!pskb_may_pull(skb, 16))
return -EINVAL;
- *spi = *(__be32*)(skb->h.raw + offset);
- *seq = *(__be32*)(skb->h.raw + offset_seq);
+ *spi = *(__be32*)(skb_transport_header(skb) + offset);
+ *seq = *(__be32*)(skb_transport_header(skb) + offset_seq);
return 0;
}
EXPORT_SYMBOL(xfrm_parse_spi);