Merge tag 'fixes-2020-08-18' of git://git.kernel.org/pub/scm/linux/kernel/git/rppt...
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 18 Aug 2020 19:05:46 +0000 (12:05 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 18 Aug 2020 19:05:46 +0000 (12:05 -0700)
Pull ia64 page table fix from Mike Rapoport:
 "Fix regression in IA-64 caused by page table allocation refactoring

  The refactoring and consolidation of <asm/pgalloc.h> caused regression
  on parisc and ia64. The fix for parisc made it into v5.9-rc1 while the
  fix ia64 got delayed a bit and here it is"

* tag 'fixes-2020-08-18' of git://git.kernel.org/pub/scm/linux/kernel/git/rppt/memblock:
  arch/ia64: Restore arch-specific pgd_offset_k implementation

80 files changed:
.mailmap
Documentation/bpf/index.rst
Documentation/networking/bonding.rst
drivers/net/bonding/bond_3ad.c
drivers/net/bonding/bond_alb.c
drivers/net/bonding/bond_main.c
drivers/net/ethernet/3com/3c574_cs.c
drivers/net/ethernet/8390/axnet_cs.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
drivers/net/ethernet/intel/i40e/i40e_common.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/igc/igc_main.c
drivers/net/ethernet/intel/igc/igc_ptp.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
drivers/net/ethernet/sfc/ef100_rx.c
drivers/net/ethernet/sfc/ef100_rx.h
drivers/net/ethernet/sfc/efx.h
drivers/net/ethernet/sfc/net_driver.h
drivers/net/ethernet/sfc/rx_common.c
drivers/net/fddi/skfp/cfm.c
drivers/net/fddi/skfp/fplustm.c
drivers/net/fddi/skfp/hwmtm.c
drivers/net/fddi/skfp/smt.c
drivers/net/ipvlan/ipvlan_main.c
drivers/net/wan/dlci.c
drivers/net/wan/hdlc.c
drivers/net/wan/hdlc_x25.c
include/linux/netfilter_ipv6.h
include/linux/phylink.h
include/linux/sched/user.h
kernel/bpf/stackmap.c
kernel/watch_queue.c
mm/memory.c
net/bridge/netfilter/ebtables.c
net/bridge/netfilter/nf_conntrack_bridge.c
net/can/j1939/socket.c
net/can/j1939/transport.c
net/core/dev.c
net/core/devlink.c
net/core/filter.c
net/core/skbuff.c
net/ipv6/netfilter.c
net/mptcp/protocol.c
net/netfilter/nf_tables_api.c
net/netfilter/nft_compat.c
net/netfilter/nft_exthdr.c
net/qrtr/qrtr.c
net/tipc/Kconfig
net/tipc/netlink_compat.c
tools/bpf/bpftool/btf_dumper.c
tools/bpf/bpftool/gen.c
tools/bpf/bpftool/link.c
tools/bpf/bpftool/main.h
tools/bpf/bpftool/prog.c
tools/lib/bpf/bpf_helpers.h
tools/lib/bpf/btf.c
tools/lib/bpf/btf.h
tools/lib/bpf/btf_dump.c
tools/lib/bpf/libbpf.c
tools/lib/bpf/libbpf.map
tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c
tools/testing/selftests/bpf/prog_tests/btf_dump.c
tools/testing/selftests/bpf/prog_tests/core_extern.c
tools/testing/selftests/bpf/prog_tests/core_reloc.c
tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
tools/testing/selftests/bpf/prog_tests/flow_dissector.c
tools/testing/selftests/bpf/prog_tests/global_data.c
tools/testing/selftests/bpf/prog_tests/mmap.c
tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c
tools/testing/selftests/bpf/prog_tests/sk_lookup.c
tools/testing/selftests/bpf/prog_tests/skb_ctx.c
tools/testing/selftests/bpf/prog_tests/varlen.c
tools/testing/selftests/bpf/progs/core_reloc_types.h
tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c
tools/testing/selftests/bpf/progs/test_varlen.c
tools/testing/selftests/bpf/test_btf.c
tools/testing/selftests/bpf/test_progs.h
tools/testing/selftests/net/icmp_redirect.sh
tools/testing/selftests/netfilter/nft_flowtable.sh

index 57fe008..97fd835 100644 (file)
--- a/.mailmap
+++ b/.mailmap
 Aaron Durbin <adurbin@google.com>
 Adam Oldham <oldhamca@gmail.com>
 Adam Radford <aradford@gmail.com>
-Adrian Bunk <bunk@stusta.de>
 Adriana Reus <adi.reus@gmail.com> <adriana.reus@intel.com>
+Adrian Bunk <bunk@stusta.de>
 Alan Cox <alan@lxorguk.ukuu.org.uk>
 Alan Cox <root@hraefn.swansea.linux.org.uk>
-Aleksey Gorelov <aleksey_gorelov@phoenix.com>
 Aleksandar Markovic <aleksandar.markovic@mips.com> <aleksandar.markovic@imgtec.com>
-Alex Shi <alex.shi@linux.alibaba.com> <alex.shi@intel.com>
-Alex Shi <alex.shi@linux.alibaba.com> <alex.shi@linaro.org>
+Aleksey Gorelov <aleksey_gorelov@phoenix.com>
 Alexander Lobakin <alobakin@pm.me> <alobakin@dlink.ru>
 Alexander Lobakin <alobakin@pm.me> <alobakin@marvell.com>
 Alexander Lobakin <alobakin@pm.me> <bloodyreaper@yandex.ru>
 Alexandre Belloni <alexandre.belloni@bootlin.com> <alexandre.belloni@free-electrons.com>
-Alexei Starovoitov <ast@kernel.org> <ast@plumgrid.com>
 Alexei Starovoitov <ast@kernel.org> <alexei.starovoitov@gmail.com>
 Alexei Starovoitov <ast@kernel.org> <ast@fb.com>
+Alexei Starovoitov <ast@kernel.org> <ast@plumgrid.com>
+Alex Shi <alex.shi@linux.alibaba.com> <alex.shi@intel.com>
+Alex Shi <alex.shi@linux.alibaba.com> <alex.shi@linaro.org>
 Al Viro <viro@ftp.linux.org.uk>
 Al Viro <viro@zenIV.linux.org.uk>
 Andi Shyti <andi@etezian.org> <andi.shyti@samsung.com>
 Andreas Herrmann <aherrman@de.ibm.com>
-Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com>
 Andrew Morton <akpm@linux-foundation.org>
-Andrew Murray <amurray@thegoodpenguin.co.uk> <andrew.murray@arm.com>
 Andrew Murray <amurray@thegoodpenguin.co.uk> <amurray@embedded-bits.co.uk>
+Andrew Murray <amurray@thegoodpenguin.co.uk> <andrew.murray@arm.com>
 Andrew Vasquez <andrew.vasquez@qlogic.com>
+Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com>
 Andy Adamson <andros@citi.umich.edu>
 Antoine Tenart <antoine.tenart@free-electrons.com>
 Antonio Ospite <ao2@ao2.it> <ao2@amarulasolutions.com>
@@ -48,40 +48,42 @@ Arnaud Patard <arnaud.patard@rtp-net.org>
 Arnd Bergmann <arnd@arndb.de>
 Axel Dyks <xl@xlsigned.net>
 Axel Lin <axel.lin@gmail.com>
-Bart Van Assche <bvanassche@acm.org> <bart.vanassche@wdc.com>
 Bart Van Assche <bvanassche@acm.org> <bart.vanassche@sandisk.com>
+Bart Van Assche <bvanassche@acm.org> <bart.vanassche@wdc.com>
 Ben Gardner <bgardner@wabtec.com>
 Ben M Cahill <ben.m.cahill@intel.com>
 Björn Steinbrink <B.Steinbrink@gmx.de>
-Boris Brezillon <bbrezillon@kernel.org> <boris.brezillon@bootlin.com>
-Boris Brezillon <bbrezillon@kernel.org> <boris.brezillon@free-electrons.com>
 Boris Brezillon <bbrezillon@kernel.org> <b.brezillon.dev@gmail.com>
 Boris Brezillon <bbrezillon@kernel.org> <b.brezillon@overkiz.com>
+Boris Brezillon <bbrezillon@kernel.org> <boris.brezillon@bootlin.com>
+Boris Brezillon <bbrezillon@kernel.org> <boris.brezillon@free-electrons.com>
 Brian Avery <b.avery@hp.com>
 Brian King <brking@us.ibm.com>
+Changbin Du <changbin.du@intel.com> <changbin.du@gmail.com>
+Changbin Du <changbin.du@intel.com> <changbin.du@intel.com>
 Chao Yu <chao@kernel.org> <chao2.yu@samsung.com>
 Chao Yu <chao@kernel.org> <yuchao0@huawei.com>
-Christoph Hellwig <hch@lst.de>
 Christophe Ricard <christophe.ricard@gmail.com>
+Christoph Hellwig <hch@lst.de>
 Corey Minyard <minyard@acm.org>
 Damian Hobson-Garcia <dhobsong@igel.co.jp>
-Daniel Borkmann <daniel@iogearbox.net> <dborkman@redhat.com>
-Daniel Borkmann <daniel@iogearbox.net> <dborkmann@redhat.com>
+Daniel Borkmann <daniel@iogearbox.net> <danborkmann@googlemail.com>
 Daniel Borkmann <daniel@iogearbox.net> <danborkmann@iogearbox.net>
 Daniel Borkmann <daniel@iogearbox.net> <daniel.borkmann@tik.ee.ethz.ch>
-Daniel Borkmann <daniel@iogearbox.net> <danborkmann@googlemail.com>
+Daniel Borkmann <daniel@iogearbox.net> <dborkmann@redhat.com>
+Daniel Borkmann <daniel@iogearbox.net> <dborkman@redhat.com>
 Daniel Borkmann <daniel@iogearbox.net> <dxchgb@gmail.com>
 David Brownell <david-b@pacbell.net>
 David Woodhouse <dwmw2@shinybook.infradead.org>
-Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@mips.com>
-Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@imgtec.com>
 Dengcheng Zhu <dzhu@wavecomp.com> <dczhu@mips.com>
 Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@gmail.com>
+Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@imgtec.com>
+Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@mips.com>
 <dev.kurt@vandijck-laurijssen.be> <kurt.van.dijck@eia.be>
 Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
-Dmitry Safonov <0x7f454c46@gmail.com> <dsafonov@virtuozzo.com>
-Dmitry Safonov <0x7f454c46@gmail.com> <d.safonov@partner.samsung.com>
 Dmitry Safonov <0x7f454c46@gmail.com> <dima@arista.com>
+Dmitry Safonov <0x7f454c46@gmail.com> <d.safonov@partner.samsung.com>
+Dmitry Safonov <0x7f454c46@gmail.com> <dsafonov@virtuozzo.com>
 Domen Puncer <domen@coderock.org>
 Douglas Gilbert <dougg@torque.net>
 Ed L. Cashin <ecashin@coraid.com>
@@ -92,20 +94,22 @@ Felix Kuhling <fxkuehl@gmx.de>
 Felix Moeller <felix@derklecks.de>
 Filipe Lautert <filipe@icewall.org>
 Franck Bui-Huu <vagabon.xyz@gmail.com>
-Frank Rowand <frowand.list@gmail.com> <frowand@mvista.com>
 Frank Rowand <frowand.list@gmail.com> <frank.rowand@am.sony.com>
 Frank Rowand <frowand.list@gmail.com> <frank.rowand@sonymobile.com>
+Frank Rowand <frowand.list@gmail.com> <frowand@mvista.com>
 Frank Zago <fzago@systemfabricworks.com>
 Gao Xiang <xiang@kernel.org> <gaoxiang25@huawei.com>
 Gao Xiang <xiang@kernel.org> <hsiangkao@aol.com>
-Gerald Schaefer <gerald.schaefer@linux.ibm.com> <gerald.schaefer@de.ibm.com>
 Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@de.ibm.com>
+Gerald Schaefer <gerald.schaefer@linux.ibm.com> <gerald.schaefer@de.ibm.com>
 Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@linux.vnet.ibm.com>
 Greg Kroah-Hartman <greg@echidna.(none)>
 Greg Kroah-Hartman <gregkh@suse.de>
 Greg Kroah-Hartman <greg@kroah.com>
 Greg Kurz <groug@kaod.org> <gkurz@linux.vnet.ibm.com>
 Gregory CLEMENT <gregory.clement@bootlin.com> <gregory.clement@free-electrons.com>
+Gustavo Padovan <gustavo@las.ic.unicamp.br>
+Gustavo Padovan <padovan@profusion.mobi>
 Hanjun Guo <guohanjun@huawei.com> <hanjun.guo@linaro.org>
 Heiko Carstens <hca@linux.ibm.com> <h.carstens@de.ibm.com>
 Heiko Carstens <hca@linux.ibm.com> <heiko.carstens@de.ibm.com>
@@ -115,32 +119,32 @@ Henrik Rydberg <rydberg@bitmath.org>
 Herbert Xu <herbert@gondor.apana.org.au>
 Jacob Shin <Jacob.Shin@amd.com>
 Jaegeuk Kim <jaegeuk@kernel.org> <jaegeuk@google.com>
-Jaegeuk Kim <jaegeuk@kernel.org> <jaegeuk@motorola.com>
 Jaegeuk Kim <jaegeuk@kernel.org> <jaegeuk.kim@samsung.com>
+Jaegeuk Kim <jaegeuk@kernel.org> <jaegeuk@motorola.com>
 Jakub Kicinski <kuba@kernel.org> <jakub.kicinski@netronome.com>
 James Bottomley <jejb@mulgrave.(none)>
 James Bottomley <jejb@titanic.il.steeleye.com>
 James E Wilson <wilson@specifix.com>
-James Hogan <jhogan@kernel.org> <james.hogan@imgtec.com>
 James Hogan <jhogan@kernel.org> <james@albanarts.com>
+James Hogan <jhogan@kernel.org> <james.hogan@imgtec.com>
 James Ketrenos <jketreno@io.(none)>
 Jan Glauber <jan.glauber@gmail.com> <jang@de.ibm.com>
 Jan Glauber <jan.glauber@gmail.com> <jang@linux.vnet.ibm.com>
 Jan Glauber <jan.glauber@gmail.com> <jglauber@cavium.com>
 Jason Gunthorpe <jgg@ziepe.ca> <jgg@mellanox.com>
 Jason Gunthorpe <jgg@ziepe.ca> <jgunthorpe@obsidianresearch.com>
-Javi Merino <javi.merino@kernel.org> <javi.merino@arm.com>
 <javier@osg.samsung.com> <javier.martinez@collabora.co.uk>
+Javi Merino <javi.merino@kernel.org> <javi.merino@arm.com>
 Jayachandran C <c.jayachandran@gmail.com> <jayachandranc@netlogicmicro.com>
 Jayachandran C <c.jayachandran@gmail.com> <jchandra@broadcom.com>
 Jayachandran C <c.jayachandran@gmail.com> <jchandra@digeo.com>
 Jayachandran C <c.jayachandran@gmail.com> <jnair@caviumnetworks.com>
-Jean Tourrilhes <jt@hpl.hp.com>
 <jean-philippe@linaro.org> <jean-philippe.brucker@arm.com>
+Jean Tourrilhes <jt@hpl.hp.com>
 Jeff Garzik <jgarzik@pretzel.yyz.us>
-Jeff Layton <jlayton@kernel.org> <jlayton@redhat.com>
 Jeff Layton <jlayton@kernel.org> <jlayton@poochiereds.net>
 Jeff Layton <jlayton@kernel.org> <jlayton@primarydata.com>
+Jeff Layton <jlayton@kernel.org> <jlayton@redhat.com>
 Jens Axboe <axboe@suse.de>
 Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
 Jiri Slaby <jirislaby@kernel.org> <jirislaby@gmail.com>
@@ -164,30 +168,30 @@ Julien Thierry <julien.thierry.kdev@gmail.com> <julien.thierry@arm.com>
 Kamil Konieczny <k.konieczny@samsung.com> <k.konieczny@partner.samsung.com>
 Kay Sievers <kay.sievers@vrfy.org>
 Kenneth W Chen <kenneth.w.chen@intel.com>
-Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
 Konstantin Khlebnikov <koct9i@gmail.com> <khlebnikov@yandex-team.ru>
+Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
 Koushik <raghavendra.koushik@neterion.com>
-Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski@samsung.com>
 Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski.k@gmail.com>
+Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski@samsung.com>
 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
-Leon Romanovsky <leon@kernel.org> <leon@leon.nu>
-Leon Romanovsky <leon@kernel.org> <leonro@mellanox.com>
 Leonardo Bras <leobras.c@gmail.com> <leonardo@linux.ibm.com>
 Leonid I Ananiev <leonid.i.ananiev@intel.com>
+Leon Romanovsky <leon@kernel.org> <leon@leon.nu>
+Leon Romanovsky <leon@kernel.org> <leonro@mellanox.com>
 Linas Vepstas <linas@austin.ibm.com>
-Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de>
 Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch>
-Li Yang <leoyang.li@nxp.com> <leo@zh-kernel.org>
+Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de>
 Li Yang <leoyang.li@nxp.com> <leoli@freescale.com>
+Li Yang <leoyang.li@nxp.com> <leo@zh-kernel.org>
 Lukasz Luba <lukasz.luba@arm.com> <l.luba@partner.samsung.com>
 Maciej W. Rozycki <macro@mips.com> <macro@imgtec.com>
-Marc Zyngier <maz@kernel.org> <marc.zyngier@arm.com>
 Marcin Nowakowski <marcin.nowakowski@mips.com> <marcin.nowakowski@imgtec.com>
+Marc Zyngier <maz@kernel.org> <marc.zyngier@arm.com>
 Mark Brown <broonie@sirena.org.uk>
 Mark Yao <markyao0591@gmail.com> <mark.yao@rock-chips.com>
-Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com>
 Martin Kepplinger <martink@posteo.de> <martin.kepplinger@ginzinger.com>
 Martin Kepplinger <martink@posteo.de> <martin.kepplinger@puri.sm>
+Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com>
 Mathieu Othacehe <m.othacehe@gmail.com>
 Matthew Wilcox <willy@infradead.org> <matthew.r.wilcox@intel.com>
 Matthew Wilcox <willy@infradead.org> <matthew@wil.cx>
@@ -197,17 +201,17 @@ Matthew Wilcox <willy@infradead.org> <willy@debian.org>
 Matthew Wilcox <willy@infradead.org> <willy@linux.intel.com>
 Matthew Wilcox <willy@infradead.org> <willy@parisc-linux.org>
 Matthieu CASTET <castet.matthieu@free.fr>
-Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@brturbo.com.br>
+Matt Ranostay <matt.ranostay@konsulko.com> <matt@ranostay.consulting>
+Matt Ranostay <mranostay@gmail.com> Matthew Ranostay <mranostay@embeddedalley.com>
+Matt Ranostay <mranostay@gmail.com> <matt.ranostay@intel.com>
+Matt Redfearn <matt.redfearn@mips.com> <matt.redfearn@imgtec.com>
 Mauro Carvalho Chehab <mchehab@kernel.org> <maurochehab@gmail.com>
+Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@brturbo.com.br>
 Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@infradead.org>
+Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@osg.samsung.com>
 Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@redhat.com>
 Mauro Carvalho Chehab <mchehab@kernel.org> <m.chehab@samsung.com>
-Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@osg.samsung.com>
 Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@s-opensource.com>
-Matt Ranostay <mranostay@gmail.com> Matthew Ranostay <mranostay@embeddedalley.com>
-Matt Ranostay <mranostay@gmail.com> <matt.ranostay@intel.com>
-Matt Ranostay <matt.ranostay@konsulko.com> <matt@ranostay.consulting>
-Matt Redfearn <matt.redfearn@mips.com> <matt.redfearn@imgtec.com>
 Maxime Ripard <mripard@kernel.org> <maxime.ripard@bootlin.com>
 Maxime Ripard <mripard@kernel.org> <maxime.ripard@free-electrons.com>
 Mayuresh Janorkar <mayur@ti.com>
@@ -239,13 +243,13 @@ Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
 Patrick Mochel <mochel@digitalimplant.org>
 Paul Burton <paulburton@kernel.org> <paul.burton@imgtec.com>
 Paul Burton <paulburton@kernel.org> <paul.burton@mips.com>
+Paul E. McKenney <paulmck@kernel.org> <paul.mckenney@linaro.org>
 Paul E. McKenney <paulmck@kernel.org> <paulmck@linux.ibm.com>
 Paul E. McKenney <paulmck@kernel.org> <paulmck@linux.vnet.ibm.com>
-Paul E. McKenney <paulmck@kernel.org> <paul.mckenney@linaro.org>
 Paul E. McKenney <paulmck@kernel.org> <paulmck@us.ibm.com>
 Peter A Jonsson <pj@ludd.ltu.se>
-Peter Oruba <peter@oruba.de>
 Peter Oruba <peter.oruba@amd.com>
+Peter Oruba <peter@oruba.de>
 Pratyush Anand <pratyush.anand@gmail.com> <pratyush.anand@st.com>
 Praveen BP <praveenbp@ti.com>
 Punit Agrawal <punitagrawal@gmail.com> <punit.agrawal@arm.com>
@@ -258,23 +262,23 @@ Ralf Baechle <ralf@linux-mips.org>
 Ralf Wildenhues <Ralf.Wildenhues@gmx.de>
 Randy Dunlap <rdunlap@infradead.org> <rdunlap@xenotime.net>
 Rémi Denis-Courmont <rdenis@simphalempin.com>
-Ricardo Ribalda <ribalda@kernel.org> <ricardo.ribalda@gmail.com>
 Ricardo Ribalda <ribalda@kernel.org> <ricardo@ribalda.com>
 Ricardo Ribalda <ribalda@kernel.org> Ricardo Ribalda Delgado <ribalda@kernel.org>
+Ricardo Ribalda <ribalda@kernel.org> <ricardo.ribalda@gmail.com>
 Ross Zwisler <zwisler@kernel.org> <ross.zwisler@linux.intel.com>
 Rudolf Marek <R.Marek@sh.cvut.cz>
 Rui Saraiva <rmps@joel.ist.utl.pt>
 Sachin P Sant <ssant@in.ibm.com>
-Sarangdhar Joshi <spjoshi@codeaurora.org>
+Sakari Ailus <sakari.ailus@linux.intel.com> <sakari.ailus@iki.fi>
 Sam Ravnborg <sam@mars.ravnborg.org>
-Santosh Shilimkar <ssantosh@kernel.org>
 Santosh Shilimkar <santosh.shilimkar@oracle.org>
+Santosh Shilimkar <ssantosh@kernel.org>
+Sarangdhar Joshi <spjoshi@codeaurora.org>
 Sascha Hauer <s.hauer@pengutronix.de>
 S.ÇaÄŸlar Onur <caglar@pardus.org.tr>
-Sakari Ailus <sakari.ailus@linux.intel.com> <sakari.ailus@iki.fi>
 Sean Nyekjaer <sean@geanix.com> <sean.nyekjaer@prevas.dk>
-Sebastian Reichel <sre@kernel.org> <sre@debian.org>
 Sebastian Reichel <sre@kernel.org> <sebastian.reichel@collabora.co.uk>
+Sebastian Reichel <sre@kernel.org> <sre@debian.org>
 Sedat Dilek <sedat.dilek@gmail.com> <sedat.dilek@credativ.de>
 Shiraz Hashim <shiraz.linux.kernel@gmail.com> <shiraz.hashim@st.com>
 Shuah Khan <shuah@kernel.org> <shuahkhan@gmail.com>
@@ -285,18 +289,21 @@ Simon Arlott <simon@octiron.net> <simon@fire.lp0.eu>
 Simon Kelley <simon@thekelleys.org.uk>
 Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr>
 Stephen Hemminger <shemminger@osdl.org>
+Steve Wise <larrystevenwise@gmail.com> <swise@chelsio.com>
+Steve Wise <larrystevenwise@gmail.com> <swise@opengridcomputing.com>
 Subash Abhinov Kasiviswanathan <subashab@codeaurora.org>
 Subhash Jadavani <subhashj@codeaurora.org>
 Sudeep Holla <sudeep.holla@arm.com> Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
 Sumit Semwal <sumit.semwal@ti.com>
+Takashi YOSHII <takashi.yoshii.zj@renesas.com>
 Tejun Heo <htejun@gmail.com>
 Thomas Graf <tgraf@suug.ch>
 Thomas Pedersen <twp@codeaurora.org>
 Tiezhu Yang <yangtiezhu@loongson.cn> <kernelpatch@126.com>
 Todor Tomov <todor.too@gmail.com> <todor.tomov@linaro.org>
 Tony Luck <tony.luck@intel.com>
-TripleX Chung <xxx.phy@gmail.com> <zhongyu@18mail.cn>
 TripleX Chung <xxx.phy@gmail.com> <triplex@zh-kernel.org>
+TripleX Chung <xxx.phy@gmail.com> <zhongyu@18mail.cn>
 Tsuneo Yoshioka <Tsuneo.Yoshioka@f-secure.com>
 Uwe Kleine-König <ukleinek@informatik.uni-freiburg.de>
 Uwe Kleine-König <ukl@pengutronix.de>
@@ -305,22 +312,16 @@ Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
 Vinod Koul <vkoul@kernel.org> <vinod.koul@intel.com>
 Vinod Koul <vkoul@kernel.org> <vinod.koul@linux.intel.com>
 Vinod Koul <vkoul@kernel.org> <vkoul@infradead.org>
+Viresh Kumar <vireshk@kernel.org> <viresh.kumar2@arm.com>
 Viresh Kumar <vireshk@kernel.org> <viresh.kumar@st.com>
 Viresh Kumar <vireshk@kernel.org> <viresh.linux@gmail.com>
-Viresh Kumar <vireshk@kernel.org> <viresh.kumar2@arm.com>
 Vivien Didelot <vivien.didelot@gmail.com> <vivien.didelot@savoirfairelinux.com>
 Vlad Dogaru <ddvlad@gmail.com> <vlad.dogaru@intel.com>
-Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com>
 Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com>
-Takashi YOSHII <takashi.yoshii.zj@renesas.com>
+Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com>
+WeiXiong Liao <gmpy.liaowx@gmail.com> <liaoweixiong@allwinnertech.com>
 Will Deacon <will@kernel.org> <will.deacon@arm.com>
-Wolfram Sang <wsa@kernel.org> <wsa@the-dreams.de>
 Wolfram Sang <wsa@kernel.org> <w.sang@pengutronix.de>
+Wolfram Sang <wsa@kernel.org> <wsa@the-dreams.de>
 Yakir Yang <kuankuan.y@gmail.com> <ykk@rock-chips.com>
 Yusuke Goda <goda.yusuke@renesas.com>
-Gustavo Padovan <gustavo@las.ic.unicamp.br>
-Gustavo Padovan <padovan@profusion.mobi>
-Changbin Du <changbin.du@intel.com> <changbin.du@intel.com>
-Changbin Du <changbin.du@intel.com> <changbin.du@gmail.com>
-Steve Wise <larrystevenwise@gmail.com> <swise@chelsio.com>
-Steve Wise <larrystevenwise@gmail.com> <swise@opengridcomputing.com>
index d46429b..7df2465 100644 (file)
@@ -36,6 +36,12 @@ Two sets of Questions and Answers (Q&A) are maintained.
    bpf_devel_QA
 
 
+Helper functions
+================
+
+* `bpf-helpers(7)`_ maintains a list of helpers available to eBPF programs.
+
+
 Program types
 =============
 
@@ -79,4 +85,5 @@ Other
 .. _networking-filter: ../networking/filter.rst
 .. _man-pages: https://www.kernel.org/doc/man-pages/
 .. _bpf(2): https://man7.org/linux/man-pages/man2/bpf.2.html
+.. _bpf-helpers(7): https://man7.org/linux/man-pages/man7/bpf-helpers.7.html
 .. _BPF and XDP Reference Guide: https://docs.cilium.io/en/latest/bpf/
index 24168b0..adc3146 100644 (file)
@@ -2860,17 +2860,6 @@ version of the linux kernel, found on http://kernel.org
 The latest version of this document can be found in the latest kernel
 source (named Documentation/networking/bonding.rst).
 
-Discussions regarding the usage of the bonding driver take place on the
-bonding-devel mailing list, hosted at sourceforge.net. If you have questions or
-problems, post them to the list.  The list address is:
-
-bonding-devel@lists.sourceforge.net
-
-The administrative interface (to subscribe or unsubscribe) can
-be found at:
-
-https://lists.sourceforge.net/lists/listinfo/bonding-devel
-
 Discussions regarding the development of the bonding driver take place
 on the main Linux network mailing list, hosted at vger.kernel.org. The list
 address is:
@@ -2881,10 +2870,3 @@ The administrative interface (to subscribe or unsubscribe) can
 be found at:
 
 http://vger.kernel.org/vger-lists.html#netdev
-
-Donald Becker's Ethernet Drivers and diag programs may be found at :
-
- - http://web.archive.org/web/%2E/http://www.scyld.com/network/
-
-You will also find a lot of information regarding Ethernet, NWay, MII,
-etc. at www.scyld.com.
index 31e43a2..cddaa43 100644 (file)
@@ -130,7 +130,7 @@ static inline struct bonding *__get_bond_by_port(struct port *port)
 
 /**
  * __get_first_agg - get the first aggregator in the bond
- * @bond: the bond we're looking at
+ * @port: the port we're looking at
  *
  * Return the aggregator of the first slave in @bond, or %NULL if it can't be
  * found.
@@ -1626,7 +1626,7 @@ static int agg_device_up(const struct aggregator *agg)
 
 /**
  * ad_agg_selection_logic - select an aggregation group for a team
- * @aggregator: the aggregator we're looking at
+ * @agg: the aggregator we're looking at
  * @update_slave_arr: Does slave array need update?
  *
  * It is assumed that only one aggregator may be selected for a team.
@@ -1810,7 +1810,7 @@ static void ad_initialize_agg(struct aggregator *aggregator)
 
 /**
  * ad_initialize_port - initialize a given port's parameters
- * @aggregator: the aggregator we're looking at
+ * @port: the port we're looking at
  * @lacp_fast: boolean. whether fast periodic should be used
  */
 static void ad_initialize_port(struct port *port, int lacp_fast)
@@ -1967,6 +1967,7 @@ static void ad_marker_response_received(struct bond_marker *marker,
 /**
  * bond_3ad_initiate_agg_selection - initate aggregator selection
  * @bond: bonding struct
+ * @timeout: timeout value to set
  *
  * Set the aggregation selection timer, to initiate an agg selection in
  * the very near future.  Called during first initialization, and during
@@ -2259,7 +2260,7 @@ void bond_3ad_update_ad_actor_settings(struct bonding *bond)
 
 /**
  * bond_3ad_state_machine_handler - handle state machines timeout
- * @bond: bonding struct to work on
+ * @work: work context to fetch bonding struct to work on from
  *
  * The state machine handling concept in this module is to check every tick
  * which state machine should operate any function. The execution order is
@@ -2500,7 +2501,7 @@ void bond_3ad_adapter_speed_duplex_changed(struct slave *slave)
 /**
  * bond_3ad_handle_link_change - handle a slave's link status change indication
  * @slave: slave struct to work on
- * @status: whether the link is now up or down
+ * @link: whether the link is now up or down
  *
  * Handle reselection of aggregator (if needed) for this port.
  */
@@ -2551,7 +2552,7 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
 
 /**
  * bond_3ad_set_carrier - set link state for bonding master
- * @bond - bonding structure
+ * @bond: bonding structure
  *
  * if we have an active aggregator, we're up, if not, we're down.
  * Presumes that we cannot have an active aggregator if there are
@@ -2664,7 +2665,7 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
 
 /**
  * bond_3ad_update_lacp_rate - change the lacp rate
- * @bond - bonding struct
+ * @bond: bonding struct
  *
  * When modify lacp_rate parameter via sysfs,
  * update actor_oper_port_state of each port.
index 095ea51..4e1b7de 100644 (file)
@@ -1206,8 +1206,8 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
 
 /**
  * alb_set_mac_address
- * @bond:
- * @addr:
+ * @bond: bonding we're working on
+ * @addr: MAC address to set
  *
  * In TLB mode all slaves are configured to the bond's hw address, but set
  * their dev_addr field to different addresses (based on their permanent hw
index 5ad43aa..415a37e 100644 (file)
@@ -322,6 +322,7 @@ netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
 /**
  * bond_vlan_rx_add_vid - Propagates adding an id to slaves
  * @bond_dev: bonding net device that got called
+ * @proto: network protocol ID
  * @vid: vlan id being added
  */
 static int bond_vlan_rx_add_vid(struct net_device *bond_dev,
@@ -355,6 +356,7 @@ unwind:
 /**
  * bond_vlan_rx_kill_vid - Propagates deleting an id to slaves
  * @bond_dev: bonding net device that got called
+ * @proto: network protocol ID
  * @vid: vlan id being removed
  */
 static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
@@ -948,7 +950,7 @@ static bool bond_should_notify_peers(struct bonding *bond)
 /**
  * change_active_interface - change the active slave into the specified one
  * @bond: our bonding struct
- * @new: the new slave to make the active one
+ * @new_active: the new slave to make the active one
  *
  * Set the new slave to the bond's settings and unset them on the old
  * curr_active_slave.
@@ -2205,7 +2207,8 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
        int ret;
 
        ret = __bond_release_one(bond_dev, slave_dev, false, true);
-       if (ret == 0 && !bond_has_slaves(bond)) {
+       if (ret == 0 && !bond_has_slaves(bond) &&
+           bond_dev->reg_state != NETREG_UNREGISTERING) {
                bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
                netdev_info(bond_dev, "Destroying bond\n");
                bond_remove_proc_entry(bond);
@@ -4552,13 +4555,23 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
        return ret;
 }
 
+static u32 bond_mode_bcast_speed(struct slave *slave, u32 speed)
+{
+       if (speed == 0 || speed == SPEED_UNKNOWN)
+               speed = slave->speed;
+       else
+               speed = min(speed, slave->speed);
+
+       return speed;
+}
+
 static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev,
                                           struct ethtool_link_ksettings *cmd)
 {
        struct bonding *bond = netdev_priv(bond_dev);
-       unsigned long speed = 0;
        struct list_head *iter;
        struct slave *slave;
+       u32 speed = 0;
 
        cmd->base.duplex = DUPLEX_UNKNOWN;
        cmd->base.port = PORT_OTHER;
@@ -4570,8 +4583,13 @@ static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev,
         */
        bond_for_each_slave(bond, slave, iter) {
                if (bond_slave_can_tx(slave)) {
-                       if (slave->speed != SPEED_UNKNOWN)
-                               speed += slave->speed;
+                       if (slave->speed != SPEED_UNKNOWN) {
+                               if (BOND_MODE(bond) == BOND_MODE_BROADCAST)
+                                       speed = bond_mode_bcast_speed(slave,
+                                                                     speed);
+                               else
+                                       speed += slave->speed;
+                       }
                        if (cmd->base.duplex == DUPLEX_UNKNOWN &&
                            slave->duplex != DUPLEX_UNKNOWN)
                                cmd->base.duplex = slave->duplex;
index ef1c315..bd0ada4 100644 (file)
@@ -951,7 +951,7 @@ static struct net_device_stats *el3_get_stats(struct net_device *dev)
 static void update_stats(struct net_device *dev)
 {
        unsigned int ioaddr = dev->base_addr;
-       u8 rx, tx, up;
+       u8 up;
 
        pr_debug("%s: updating the statistics.\n", dev->name);
 
@@ -972,8 +972,8 @@ static void update_stats(struct net_device *dev)
        dev->stats.tx_packets                   += (up&0x30) << 4;
        /* Rx packets   */                         inb(ioaddr + 7);
        /* Tx deferrals */                         inb(ioaddr + 8);
-       rx                                       = inw(ioaddr + 10);
-       tx                                       = inw(ioaddr + 12);
+       /* rx */                                   inw(ioaddr + 10);
+       /* tx */                                   inw(ioaddr + 12);
 
        EL3WINDOW(4);
        /* BadSSD */                               inb(ioaddr + 12);
index aeae796..08db4c9 100644 (file)
@@ -898,6 +898,7 @@ static int ax_close(struct net_device *dev)
 /**
  * axnet_tx_timeout - handle transmit time out condition
  * @dev: network device which has apparently fallen asleep
+ * @txqueue: unused
  *
  * Called by kernel when device never acknowledges a transmit has
  * completed (or failed) - i.e. never posted a Tx related interrupt.
index 9934421..fb37816 100644 (file)
@@ -3715,11 +3715,11 @@ failed_mii_init:
 failed_irq:
 failed_init:
        fec_ptp_stop(pdev);
-       if (fep->reg_phy)
-               regulator_disable(fep->reg_phy);
 failed_reset:
        pm_runtime_put_noidle(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
+       if (fep->reg_phy)
+               regulator_disable(fep->reg_phy);
 failed_regulator:
        clk_disable_unprepare(fep->clk_ahb);
 failed_clk_ahb:
index a62ddd6..c0c8efe 100644 (file)
@@ -981,7 +981,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes {
 #define I40E_AQC_SET_VSI_PROMISC_BROADCAST     0x04
 #define I40E_AQC_SET_VSI_DEFAULT               0x08
 #define I40E_AQC_SET_VSI_PROMISC_VLAN          0x10
-#define I40E_AQC_SET_VSI_PROMISC_TX            0x8000
+#define I40E_AQC_SET_VSI_PROMISC_RX_ONLY       0x8000
        __le16  seid;
        __le16  vlan_tag;
 #define I40E_AQC_SET_VSI_VLAN_VALID            0x8000
index afad5e9..6ab52cb 100644 (file)
@@ -1966,6 +1966,21 @@ i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
        return status;
 }
 
+/**
+ * i40e_is_aq_api_ver_ge
+ * @aq: pointer to AdminQ info containing HW API version to compare
+ * @maj: API major value
+ * @min: API minor value
+ *
+ * Assert whether current HW API version is greater/equal than provided.
+ **/
+static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj,
+                                 u16 min)
+{
+       return (aq->api_maj_ver > maj ||
+               (aq->api_maj_ver == maj && aq->api_min_ver >= min));
+}
+
 /**
  * i40e_aq_add_vsi
  * @hw: pointer to the hw struct
@@ -2091,18 +2106,16 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
 
        if (set) {
                flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
-               if (rx_only_promisc &&
-                   (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) ||
-                    (hw->aq.api_maj_ver > 1)))
-                       flags |= I40E_AQC_SET_VSI_PROMISC_TX;
+               if (rx_only_promisc && i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
+                       flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
        }
 
        cmd->promiscuous_flags = cpu_to_le16(flags);
 
        cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
-       if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) ||
-           (hw->aq.api_maj_ver > 1))
-               cmd->valid_flags |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_TX);
+       if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
+               cmd->valid_flags |=
+                       cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
 
        cmd->seid = cpu_to_le16(seid);
        status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -2199,11 +2212,17 @@ enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
        i40e_fill_default_direct_cmd_desc(&desc,
                                          i40e_aqc_opc_set_vsi_promiscuous_modes);
 
-       if (enable)
+       if (enable) {
                flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
+               if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
+                       flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
+       }
 
        cmd->promiscuous_flags = cpu_to_le16(flags);
        cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
+       if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
+               cmd->valid_flags |=
+                       cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
        cmd->seid = cpu_to_le16(seid);
        cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
 
index b539935..2e433fd 100644 (file)
@@ -15463,6 +15463,9 @@ static void i40e_remove(struct pci_dev *pdev)
        i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
        i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
 
+       while (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
+               usleep_range(1000, 2000);
+
        /* no more scheduling of any task */
        set_bit(__I40E_SUSPENDED, pf->state);
        set_bit(__I40E_DOWN, pf->state);
index 7a6f2a0..9593aa4 100644 (file)
@@ -5142,6 +5142,8 @@ static int igc_probe(struct pci_dev *pdev,
        device_set_wakeup_enable(&adapter->pdev->dev,
                                 adapter->flags & IGC_FLAG_WOL_SUPPORTED);
 
+       igc_ptp_init(adapter);
+
        /* reset the hardware with the new settings */
        igc_reset(adapter);
 
@@ -5158,9 +5160,6 @@ static int igc_probe(struct pci_dev *pdev,
         /* carrier off reporting is important to ethtool even BEFORE open */
        netif_carrier_off(netdev);
 
-       /* do hw tstamp init after resetting */
-       igc_ptp_init(adapter);
-
        /* Check if Media Autosense is enabled */
        adapter->ei = *ei;
 
index e67d465..36c9992 100644 (file)
@@ -496,8 +496,6 @@ void igc_ptp_init(struct igc_adapter *adapter)
        adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
        adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
 
-       igc_ptp_reset(adapter);
-
        adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
                                                &adapter->pdev->dev);
        if (IS_ERR(adapter->ptp_clock)) {
index 5975521..93c4cf7 100644 (file)
@@ -1226,8 +1226,8 @@ int otx2_config_npa(struct otx2_nic *pfvf)
        if (!hw->pool_cnt)
                return -EINVAL;
 
-       qset->pool = devm_kzalloc(pfvf->dev, sizeof(struct otx2_pool) *
-                                 hw->pool_cnt, GFP_KERNEL);
+       qset->pool = devm_kcalloc(pfvf->dev, hw->pool_cnt,
+                                 sizeof(struct otx2_pool), GFP_KERNEL);
        if (!qset->pool)
                return -ENOMEM;
 
index 13ba1a4..012925e 100644 (file)
 #define ESF_GZ_RX_PREFIX_NT_OR_INNER_L3_CLASS_WIDTH    \
                ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L3_CLASS_WIDTH
 
+bool ef100_rx_buf_hash_valid(const u8 *prefix)
+{
+       return PREFIX_FIELD(prefix, RSS_HASH_VALID);
+}
+
 static bool check_fcs(struct efx_channel *channel, u32 *prefix)
 {
        u16 rxclass;
index f2f2668..fe45b36 100644 (file)
@@ -14,6 +14,7 @@
 
 #include "net_driver.h"
 
+bool ef100_rx_buf_hash_valid(const u8 *prefix);
 void efx_ef100_ev_rx(struct efx_channel *channel, const efx_qword_t *p_event);
 void ef100_rx_write(struct efx_rx_queue *rx_queue);
 void __ef100_rx_packet(struct efx_channel *channel);
index a9808e8..daf0c00 100644 (file)
@@ -45,6 +45,14 @@ static inline void efx_rx_flush_packet(struct efx_channel *channel)
                                __ef100_rx_packet, __efx_rx_packet,
                                channel);
 }
+static inline bool efx_rx_buf_hash_valid(struct efx_nic *efx, const u8 *prefix)
+{
+       if (efx->type->rx_buf_hash_valid)
+               return INDIRECT_CALL_1(efx->type->rx_buf_hash_valid,
+                                      ef100_rx_buf_hash_valid,
+                                      prefix);
+       return true;
+}
 
 /* Maximum number of TCP segments we support for soft-TSO */
 #define EFX_TSO_MAX_SEGS       100
index 7bb7ecb..dcb741d 100644 (file)
@@ -1265,6 +1265,7 @@ struct efx_udp_tunnel {
  * @rx_write: Write RX descriptors and doorbell
  * @rx_defer_refill: Generate a refill reminder event
  * @rx_packet: Receive the queued RX buffer on a channel
+ * @rx_buf_hash_valid: Determine whether the RX prefix contains a valid hash
  * @ev_probe: Allocate resources for event queue
  * @ev_init: Initialise event queue on the NIC
  * @ev_fini: Deinitialise event queue on the NIC
@@ -1409,6 +1410,7 @@ struct efx_nic_type {
        void (*rx_write)(struct efx_rx_queue *rx_queue);
        void (*rx_defer_refill)(struct efx_rx_queue *rx_queue);
        void (*rx_packet)(struct efx_channel *channel);
+       bool (*rx_buf_hash_valid)(const u8 *prefix);
        int (*ev_probe)(struct efx_channel *channel);
        int (*ev_init)(struct efx_channel *channel);
        void (*ev_fini)(struct efx_channel *channel);
index fb77c7b..ef9bca9 100644 (file)
@@ -525,7 +525,8 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
                return;
        }
 
-       if (efx->net_dev->features & NETIF_F_RXHASH)
+       if (efx->net_dev->features & NETIF_F_RXHASH &&
+           efx_rx_buf_hash_valid(efx, eh))
                skb_set_hash(skb, efx_rx_buf_hash(efx, eh),
                             PKT_HASH_TYPE_L3);
        if (csum) {
index e9bf429..4eea340 100644 (file)
 #define KERNEL
 #include "h/smtstate.h"
 
-#ifndef        lint
-static const char ID_sccs[] = "@(#)cfm.c       2.18 98/10/06 (C) SK " ;
-#endif
-
 /*
  * FSM Macros
  */
@@ -208,7 +204,6 @@ void cfm(struct s_smc *smc, int event)
 {
        int     state ;         /* remember last state */
        int     cond ;
-       int     oldstate ;
 
        /* We will do the following: */
        /*  - compute the variable WC_Flag for every port (This is where */
@@ -222,7 +217,6 @@ void cfm(struct s_smc *smc, int event)
        /*  - change the portstates */
        cem_priv_state (smc, event);
 
-       oldstate = smc->mib.fddiSMTCF_State ;
        do {
                DB_CFM("CFM : state %s%s event %s",
                       smc->mib.fddiSMTCF_State & AFLAG ? "ACTIONS " : "",
@@ -250,18 +244,11 @@ void cfm(struct s_smc *smc, int event)
        if (cond != smc->mib.fddiSMTPeerWrapFlag)
                smt_srf_event(smc,SMT_COND_SMT_PEER_WRAP,0,cond) ;
 
-#if    0
        /*
-        * Don't send ever MAC_PATH_CHANGE events. Our MAC is hard-wired
+        * Don't ever send MAC_PATH_CHANGE events. Our MAC is hard-wired
         * to the primary path.
         */
-       /*
-        * path change
-        */
-       if (smc->mib.fddiSMTCF_State != oldstate) {
-               smt_srf_event(smc,SMT_EVENT_MAC_PATH_CHANGE,INDEX_MAC,0) ;
-       }
-#endif
+
 #endif /* no SLIM_SMT */
 
        /*
index 02966d1..4cbb145 100644 (file)
 #include <linux/bitrev.h>
 #include <linux/etherdevice.h>
 
-#ifndef        lint
-static const char ID_sccs[] = "@(#)fplustm.c   1.32 99/02/23 (C) SK " ;
-#endif
-
 #ifndef UNUSED
 #ifdef  lint
 #define UNUSED(x)      (x) = (x)
index 3412e0f..1070390 100644 (file)
  *
  ******************************************************************************/
 
-#ifndef        lint
-static char const ID_sccs[] = "@(#)hwmtm.c     1.40 99/05/31 (C) SK" ;
-#endif
-
 #define        HWMTM
 
 #ifndef FDDI
index b8c59d8..774a6e3 100644 (file)
 #define KERNEL
 #include "h/smtstate.h"
 
-#ifndef        lint
-static const char ID_sccs[] = "@(#)smt.c       2.43 98/11/23 (C) SK " ;
-#endif
-
 /*
  * FC in SMbuf
  */
@@ -1561,7 +1557,7 @@ u_long smt_get_tid(struct s_smc *smc)
        return tid & 0x3fffffffL;
 }
 
-
+#ifdef LITTLE_ENDIAN
 /*
  * table of parameter lengths
  */
@@ -1641,6 +1637,7 @@ static const struct smt_pdef {
 } ;
 
 #define N_SMT_PLEN     ARRAY_SIZE(smt_pdef)
+#endif
 
 int smt_check_para(struct s_smc *smc, struct smt_header        *sm,
                   const u_short list[])
index 15e87c0..5bca94c 100644 (file)
@@ -106,12 +106,21 @@ static void ipvlan_port_destroy(struct net_device *dev)
        kfree(port);
 }
 
+#define IPVLAN_ALWAYS_ON_OFLOADS \
+       (NETIF_F_SG | NETIF_F_HW_CSUM | \
+        NETIF_F_GSO_ROBUST | NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL)
+
+#define IPVLAN_ALWAYS_ON \
+       (IPVLAN_ALWAYS_ON_OFLOADS | NETIF_F_LLTX | NETIF_F_VLAN_CHALLENGED)
+
 #define IPVLAN_FEATURES \
-       (NETIF_F_SG | NETIF_F_CSUM_MASK | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
+       (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
         NETIF_F_GSO | NETIF_F_ALL_TSO | NETIF_F_GSO_ROBUST | \
         NETIF_F_GRO | NETIF_F_RXCSUM | \
         NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)
 
+       /* NETIF_F_GSO_ENCAP_ALL NETIF_F_GSO_SOFTWARE Newly added */
+
 #define IPVLAN_STATE_MASK \
        ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
 
@@ -125,7 +134,9 @@ static int ipvlan_init(struct net_device *dev)
        dev->state = (dev->state & ~IPVLAN_STATE_MASK) |
                     (phy_dev->state & IPVLAN_STATE_MASK);
        dev->features = phy_dev->features & IPVLAN_FEATURES;
-       dev->features |= NETIF_F_LLTX | NETIF_F_VLAN_CHALLENGED;
+       dev->features |= IPVLAN_ALWAYS_ON;
+       dev->vlan_features = phy_dev->vlan_features & IPVLAN_FEATURES;
+       dev->vlan_features |= IPVLAN_ALWAYS_ON_OFLOADS;
        dev->hw_enc_features |= dev->features;
        dev->gso_max_size = phy_dev->gso_max_size;
        dev->gso_max_segs = phy_dev->gso_max_segs;
@@ -227,7 +238,14 @@ static netdev_features_t ipvlan_fix_features(struct net_device *dev,
 {
        struct ipvl_dev *ipvlan = netdev_priv(dev);
 
-       return features & (ipvlan->sfeatures | ~IPVLAN_FEATURES);
+       features |= NETIF_F_ALL_FOR_ALL;
+       features &= (ipvlan->sfeatures | ~IPVLAN_FEATURES);
+       features = netdev_increment_features(ipvlan->phy_dev->features,
+                                            features, features);
+       features |= IPVLAN_ALWAYS_ON;
+       features &= (IPVLAN_FEATURES | IPVLAN_ALWAYS_ON);
+
+       return features;
 }
 
 static void ipvlan_change_rx_flags(struct net_device *dev, int change)
@@ -734,10 +752,9 @@ static int ipvlan_device_event(struct notifier_block *unused,
 
        case NETDEV_FEAT_CHANGE:
                list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
-                       ipvlan->dev->features = dev->features & IPVLAN_FEATURES;
                        ipvlan->dev->gso_max_size = dev->gso_max_size;
                        ipvlan->dev->gso_max_segs = dev->gso_max_segs;
-                       netdev_features_change(ipvlan->dev);
+                       netdev_update_features(ipvlan->dev);
                }
                break;
 
index 7bcee41..3ca4daf 100644 (file)
@@ -295,14 +295,13 @@ static int dlci_close(struct net_device *dev)
 {
        struct dlci_local       *dlp;
        struct frad_local       *flp;
-       int                     err;
 
        netif_stop_queue(dev);
 
        dlp = netdev_priv(dev);
 
        flp = netdev_priv(dlp->slave);
-       err = (*flp->deactivate)(dlp->slave, dev);
+       (*flp->deactivate)(dlp->slave, dev);
 
        return 0;
 }
index dfc1677..386ed2a 100644 (file)
@@ -230,6 +230,7 @@ static void hdlc_setup_dev(struct net_device *dev)
        dev->max_mtu             = HDLC_MAX_MTU;
        dev->type                = ARPHRD_RAWHDLC;
        dev->hard_header_len     = 16;
+       dev->needed_headroom     = 0;
        dev->addr_len            = 0;
        dev->header_ops          = &hdlc_null_ops;
 }
index f70336b..f52b9fe 100644 (file)
@@ -107,8 +107,14 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        int result;
 
+       /* There should be a pseudo header of 1 byte added by upper layers.
+        * Check to make sure it is there before reading it.
+        */
+       if (skb->len < 1) {
+               kfree_skb(skb);
+               return NETDEV_TX_OK;
+       }
 
-       /* X.25 to LAPB */
        switch (skb->data[0]) {
        case X25_IFACE_DATA:    /* Data to be transmitted */
                skb_pull(skb, 1);
@@ -294,6 +300,15 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
                        return result;
 
                memcpy(&state(hdlc)->settings, &new_settings, size);
+
+               /* There's no header_ops so hard_header_len should be 0. */
+               dev->hard_header_len = 0;
+               /* When transmitting data:
+                * first we'll remove a pseudo header of 1 byte,
+                * then we'll prepend an LAPB header of at most 3 bytes.
+                */
+               dev->needed_headroom = 3 - 1;
+
                dev->type = ARPHRD_X25;
                call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
                netif_dormant_off(dev);
index aac42c2..9b67394 100644 (file)
@@ -58,7 +58,6 @@ struct nf_ipv6_ops {
                        int (*output)(struct net *, struct sock *, struct sk_buff *));
        int (*reroute)(struct sk_buff *skb, const struct nf_queue_entry *entry);
 #if IS_MODULE(CONFIG_IPV6)
-       int (*br_defrag)(struct net *net, struct sk_buff *skb, u32 user);
        int (*br_fragment)(struct net *net, struct sock *sk,
                           struct sk_buff *skb,
                           struct nf_bridge_frag_data *data,
@@ -117,23 +116,6 @@ static inline int nf_ip6_route(struct net *net, struct dst_entry **dst,
 
 #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
 
-static inline int nf_ipv6_br_defrag(struct net *net, struct sk_buff *skb,
-                                   u32 user)
-{
-#if IS_MODULE(CONFIG_IPV6)
-       const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops();
-
-       if (!v6_ops)
-               return 1;
-
-       return v6_ops->br_defrag(net, skb, user);
-#elif IS_BUILTIN(CONFIG_IPV6)
-       return nf_ct_frag6_gather(net, skb, user);
-#else
-       return 1;
-#endif
-}
-
 int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
                    struct nf_bridge_frag_data *data,
                    int (*output)(struct net *, struct sock *sk,
index a8e8763..c36fb41 100644 (file)
@@ -402,7 +402,8 @@ void pcs_get_state(struct phylink_pcs *pcs,
  * For most 10GBASE-R, there is no advertisement.
  */
 int pcs_config(struct phylink_pcs *pcs, unsigned int mode,
-              phy_interface_t interface, const unsigned long *advertising);
+              phy_interface_t interface, const unsigned long *advertising,
+              bool permit_pause_to_mac);
 
 /**
  * pcs_an_restart() - restart 802.3z BaseX autonegotiation
index 917d88e..a8ec3b6 100644 (file)
@@ -36,6 +36,9 @@ struct user_struct {
     defined(CONFIG_NET) || defined(CONFIG_IO_URING)
        atomic_long_t locked_vm;
 #endif
+#ifdef CONFIG_WATCH_QUEUE
+       atomic_t nr_watches;    /* The number of watches this user currently has */
+#endif
 
        /* Miscellaneous per-user rate limit */
        struct ratelimit_state ratelimit;
index 4fd830a..cfed0ac 100644 (file)
@@ -213,11 +213,13 @@ static int stack_map_get_build_id_32(void *page_addr,
 
        phdr = (Elf32_Phdr *)(page_addr + sizeof(Elf32_Ehdr));
 
-       for (i = 0; i < ehdr->e_phnum; ++i)
-               if (phdr[i].p_type == PT_NOTE)
-                       return stack_map_parse_build_id(page_addr, build_id,
-                                       page_addr + phdr[i].p_offset,
-                                       phdr[i].p_filesz);
+       for (i = 0; i < ehdr->e_phnum; ++i) {
+               if (phdr[i].p_type == PT_NOTE &&
+                   !stack_map_parse_build_id(page_addr, build_id,
+                                             page_addr + phdr[i].p_offset,
+                                             phdr[i].p_filesz))
+                       return 0;
+       }
        return -EINVAL;
 }
 
@@ -236,11 +238,13 @@ static int stack_map_get_build_id_64(void *page_addr,
 
        phdr = (Elf64_Phdr *)(page_addr + sizeof(Elf64_Ehdr));
 
-       for (i = 0; i < ehdr->e_phnum; ++i)
-               if (phdr[i].p_type == PT_NOTE)
-                       return stack_map_parse_build_id(page_addr, build_id,
-                                       page_addr + phdr[i].p_offset,
-                                       phdr[i].p_filesz);
+       for (i = 0; i < ehdr->e_phnum; ++i) {
+               if (phdr[i].p_type == PT_NOTE &&
+                   !stack_map_parse_build_id(page_addr, build_id,
+                                             page_addr + phdr[i].p_offset,
+                                             phdr[i].p_filesz))
+                       return 0;
+       }
        return -EINVAL;
 }
 
index f74020f..0ef8f65 100644 (file)
@@ -393,6 +393,7 @@ static void free_watch(struct rcu_head *rcu)
        struct watch *watch = container_of(rcu, struct watch, rcu);
 
        put_watch_queue(rcu_access_pointer(watch->queue));
+       atomic_dec(&watch->cred->user->nr_watches);
        put_cred(watch->cred);
 }
 
@@ -452,6 +453,13 @@ int add_watch_to_object(struct watch *watch, struct watch_list *wlist)
        watch->cred = get_current_cred();
        rcu_assign_pointer(watch->watch_list, wlist);
 
+       if (atomic_inc_return(&watch->cred->user->nr_watches) >
+           task_rlimit(current, RLIMIT_NOFILE)) {
+               atomic_dec(&watch->cred->user->nr_watches);
+               put_cred(watch->cred);
+               return -EAGAIN;
+       }
+
        spin_lock_bh(&wqueue->lock);
        kref_get(&wqueue->usage);
        kref_get(&watch->usage);
index 3a7779d..602f428 100644 (file)
@@ -4247,6 +4247,9 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
                                vmf->flags & FAULT_FLAG_WRITE)) {
                update_mmu_cache(vmf->vma, vmf->address, vmf->pte);
        } else {
+               /* Skip spurious TLB flush for retried page fault */
+               if (vmf->flags & FAULT_FLAG_TRIED)
+                       goto unlock;
                /*
                 * This is needed only for protection faults but the arch code
                 * is not yet telling us if this is a protection fault or not.
index 1641f41..ebe33b6 100644 (file)
@@ -2238,6 +2238,10 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
        struct ebt_table *t;
        struct net *net = sock_net(sk);
 
+       if ((cmd == EBT_SO_GET_INFO || cmd == EBT_SO_GET_INIT_INFO) &&
+           *len != sizeof(struct compat_ebt_replace))
+               return -EINVAL;
+
        if (copy_from_user(&tmp, user, sizeof(tmp)))
                return -EFAULT;
 
index 8096732..8d033a7 100644 (file)
@@ -168,6 +168,7 @@ static unsigned int nf_ct_br_defrag4(struct sk_buff *skb,
 static unsigned int nf_ct_br_defrag6(struct sk_buff *skb,
                                     const struct nf_hook_state *state)
 {
+#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
        u16 zone_id = NF_CT_DEFAULT_ZONE_ID;
        enum ip_conntrack_info ctinfo;
        struct br_input_skb_cb cb;
@@ -180,14 +181,17 @@ static unsigned int nf_ct_br_defrag6(struct sk_buff *skb,
 
        br_skb_cb_save(skb, &cb, sizeof(struct inet6_skb_parm));
 
-       err = nf_ipv6_br_defrag(state->net, skb,
-                               IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone_id);
+       err = nf_ct_frag6_gather(state->net, skb,
+                                IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone_id);
        /* queued */
        if (err == -EINPROGRESS)
                return NF_STOLEN;
 
        br_skb_cb_restore(skb, &cb, IP6CB(skb)->frag_max_size);
        return err == 0 ? NF_ACCEPT : NF_DROP;
+#else
+       return NF_ACCEPT;
+#endif
 }
 
 static int nf_ct_br_ip_check(const struct sk_buff *skb)
index 78ff9b3..b93876c 100644 (file)
@@ -398,6 +398,7 @@ static int j1939_sk_init(struct sock *sk)
        spin_lock_init(&jsk->sk_session_queue_lock);
        INIT_LIST_HEAD(&jsk->sk_session_queue);
        sk->sk_destruct = j1939_sk_sock_destruct;
+       sk->sk_protocol = CAN_J1939;
 
        return 0;
 }
@@ -466,6 +467,14 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len)
                        goto out_release_sock;
                }
 
+               if (!ndev->ml_priv) {
+                       netdev_warn_once(ndev,
+                                        "No CAN mid layer private allocated, please fix your driver and use alloc_candev()!\n");
+                       dev_put(ndev);
+                       ret = -ENODEV;
+                       goto out_release_sock;
+               }
+
                priv = j1939_netdev_start(ndev);
                dev_put(ndev);
                if (IS_ERR(priv)) {
@@ -553,6 +562,11 @@ static int j1939_sk_connect(struct socket *sock, struct sockaddr *uaddr,
 static void j1939_sk_sock2sockaddr_can(struct sockaddr_can *addr,
                                       const struct j1939_sock *jsk, int peer)
 {
+       /* There are two holes (2 bytes and 3 bytes) to clear to avoid
+        * leaking kernel information to user space.
+        */
+       memset(addr, 0, J1939_MIN_NAMELEN);
+
        addr->can_family = AF_CAN;
        addr->can_ifindex = jsk->ifindex;
        addr->can_addr.j1939.pgn = jsk->addr.pgn;
index 9f99af5..a8dd956 100644 (file)
@@ -352,17 +352,16 @@ void j1939_session_skb_queue(struct j1939_session *session,
        skb_queue_tail(&session->skb_queue, skb);
 }
 
-static struct sk_buff *j1939_session_skb_find(struct j1939_session *session)
+static struct
+sk_buff *j1939_session_skb_find_by_offset(struct j1939_session *session,
+                                         unsigned int offset_start)
 {
        struct j1939_priv *priv = session->priv;
+       struct j1939_sk_buff_cb *do_skcb;
        struct sk_buff *skb = NULL;
        struct sk_buff *do_skb;
-       struct j1939_sk_buff_cb *do_skcb;
-       unsigned int offset_start;
        unsigned long flags;
 
-       offset_start = session->pkt.dpo * 7;
-
        spin_lock_irqsave(&session->skb_queue.lock, flags);
        skb_queue_walk(&session->skb_queue, do_skb) {
                do_skcb = j1939_skb_to_cb(do_skb);
@@ -382,6 +381,14 @@ static struct sk_buff *j1939_session_skb_find(struct j1939_session *session)
        return skb;
 }
 
+static struct sk_buff *j1939_session_skb_find(struct j1939_session *session)
+{
+       unsigned int offset_start;
+
+       offset_start = session->pkt.dpo * 7;
+       return j1939_session_skb_find_by_offset(session, offset_start);
+}
+
 /* see if we are receiver
  * returns 0 for broadcasts, although we will receive them
  */
@@ -716,10 +723,12 @@ static int j1939_session_tx_rts(struct j1939_session *session)
                return ret;
 
        session->last_txcmd = dat[0];
-       if (dat[0] == J1939_TP_CMD_BAM)
+       if (dat[0] == J1939_TP_CMD_BAM) {
                j1939_tp_schedule_txtimer(session, 50);
-
-       j1939_tp_set_rxtimeout(session, 1250);
+               j1939_tp_set_rxtimeout(session, 250);
+       } else {
+               j1939_tp_set_rxtimeout(session, 1250);
+       }
 
        netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session);
 
@@ -766,7 +775,7 @@ static int j1939_session_tx_dat(struct j1939_session *session)
        int ret = 0;
        u8 dat[8];
 
-       se_skb = j1939_session_skb_find(session);
+       se_skb = j1939_session_skb_find_by_offset(session, session->pkt.tx * 7);
        if (!se_skb)
                return -ENOBUFS;
 
@@ -787,6 +796,18 @@ static int j1939_session_tx_dat(struct j1939_session *session)
                if (len > 7)
                        len = 7;
 
+               if (offset + len > se_skb->len) {
+                       netdev_err_once(priv->ndev,
+                                       "%s: 0x%p: requested data outside of queued buffer: offset %i, len %i, pkt.tx: %i\n",
+                                       __func__, session, skcb->offset, se_skb->len , session->pkt.tx);
+                       return -EOVERFLOW;
+               }
+
+               if (!len) {
+                       ret = -ENOBUFS;
+                       break;
+               }
+
                memcpy(&dat[1], &tpdat[offset], len);
                ret = j1939_tp_tx_dat(session, dat, len + 1);
                if (ret < 0) {
@@ -1055,9 +1076,9 @@ static void __j1939_session_cancel(struct j1939_session *session,
        lockdep_assert_held(&session->priv->active_session_list_lock);
 
        session->err = j1939_xtp_abort_to_errno(priv, err);
+       session->state = J1939_SESSION_WAITING_ABORT;
        /* do not send aborts on incoming broadcasts */
        if (!j1939_cb_is_broadcast(&session->skcb)) {
-               session->state = J1939_SESSION_WAITING_ABORT;
                j1939_xtp_tx_abort(priv, &session->skcb,
                                   !session->transmission,
                                   err, session->skcb.addr.pgn);
@@ -1120,6 +1141,9 @@ static enum hrtimer_restart j1939_tp_txtimer(struct hrtimer *hrtimer)
                 * cleanup including propagation of the error to user space.
                 */
                break;
+       case -EOVERFLOW:
+               j1939_session_cancel(session, J1939_XTP_ABORT_ECTS_TOO_BIG);
+               break;
        case 0:
                session->tx_retry = 0;
                break;
@@ -1651,8 +1675,12 @@ static void j1939_xtp_rx_rts(struct j1939_priv *priv, struct sk_buff *skb,
                        return;
                }
                session = j1939_xtp_rx_rts_session_new(priv, skb);
-               if (!session)
+               if (!session) {
+                       if (cmd == J1939_TP_CMD_BAM && j1939_sk_recv_match(priv, skcb))
+                               netdev_info(priv->ndev, "%s: failed to create TP BAM session\n",
+                                           __func__);
                        return;
+               }
        } else {
                if (j1939_xtp_rx_rts_session_active(session, skb)) {
                        j1939_session_put(session);
@@ -1661,11 +1689,15 @@ static void j1939_xtp_rx_rts(struct j1939_priv *priv, struct sk_buff *skb,
        }
        session->last_cmd = cmd;
 
-       j1939_tp_set_rxtimeout(session, 1250);
-
-       if (cmd != J1939_TP_CMD_BAM && !session->transmission) {
-               j1939_session_txtimer_cancel(session);
-               j1939_tp_schedule_txtimer(session, 0);
+       if (cmd == J1939_TP_CMD_BAM) {
+               if (!session->transmission)
+                       j1939_tp_set_rxtimeout(session, 750);
+       } else {
+               if (!session->transmission) {
+                       j1939_session_txtimer_cancel(session);
+                       j1939_tp_schedule_txtimer(session, 0);
+               }
+               j1939_tp_set_rxtimeout(session, 1250);
        }
 
        j1939_session_put(session);
@@ -1716,6 +1748,7 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
        int offset;
        int nbytes;
        bool final = false;
+       bool remain = false;
        bool do_cts_eoma = false;
        int packet;
 
@@ -1750,7 +1783,8 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
                            __func__, session);
                goto out_session_cancel;
        }
-       se_skb = j1939_session_skb_find(session);
+
+       se_skb = j1939_session_skb_find_by_offset(session, packet * 7);
        if (!se_skb) {
                netdev_warn(priv->ndev, "%s: 0x%p: no skb found\n", __func__,
                            session);
@@ -1769,7 +1803,20 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
        }
 
        tpdat = se_skb->data;
-       memcpy(&tpdat[offset], &dat[1], nbytes);
+       if (!session->transmission) {
+               memcpy(&tpdat[offset], &dat[1], nbytes);
+       } else {
+               int err;
+
+               err = memcmp(&tpdat[offset], &dat[1], nbytes);
+               if (err)
+                       netdev_err_once(priv->ndev,
+                                       "%s: 0x%p: Data of RX-looped back packet (%*ph) doesn't match TX data (%*ph)!\n",
+                                       __func__, session,
+                                       nbytes, &dat[1],
+                                       nbytes, &tpdat[offset]);
+       }
+
        if (packet == session->pkt.rx)
                session->pkt.rx++;
 
@@ -1777,6 +1824,8 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
            j1939_cb_is_broadcast(&session->skcb)) {
                if (session->pkt.rx >= session->pkt.total)
                        final = true;
+               else
+                       remain = true;
        } else {
                /* never final, an EOMA must follow */
                if (session->pkt.rx >= session->pkt.last)
@@ -1784,7 +1833,11 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
        }
 
        if (final) {
+               j1939_session_timers_cancel(session);
                j1939_session_completed(session);
+       } else if (remain) {
+               if (!session->transmission)
+                       j1939_tp_set_rxtimeout(session, 750);
        } else if (do_cts_eoma) {
                j1939_tp_set_rxtimeout(session, 1250);
                if (!session->transmission)
@@ -1829,6 +1882,13 @@ static void j1939_xtp_rx_dat(struct j1939_priv *priv, struct sk_buff *skb)
                else
                        j1939_xtp_rx_dat_one(session, skb);
        }
+
+       if (j1939_cb_is_broadcast(skcb)) {
+               session = j1939_session_get_by_addr(priv, &skcb->addr, false,
+                                                   false);
+               if (session)
+                       j1939_xtp_rx_dat_one(session, skb);
+       }
 }
 
 /* j1939 main intf */
@@ -1920,7 +1980,7 @@ static void j1939_tp_cmd_recv(struct j1939_priv *priv, struct sk_buff *skb)
                if (j1939_tp_im_transmitter(skcb))
                        j1939_xtp_rx_rts(priv, skb, true);
 
-               if (j1939_tp_im_receiver(skcb))
+               if (j1939_tp_im_receiver(skcb) || j1939_cb_is_broadcast(skcb))
                        j1939_xtp_rx_rts(priv, skb, false);
 
                break;
@@ -1984,7 +2044,7 @@ int j1939_tp_recv(struct j1939_priv *priv, struct sk_buff *skb)
 {
        struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
 
-       if (!j1939_tp_im_involved_anydir(skcb))
+       if (!j1939_tp_im_involved_anydir(skcb) && !j1939_cb_is_broadcast(skcb))
                return 0;
 
        switch (skcb->addr.pgn) {
@@ -2017,6 +2077,10 @@ void j1939_simple_recv(struct j1939_priv *priv, struct sk_buff *skb)
        if (!skb->sk)
                return;
 
+       if (skb->sk->sk_family != AF_CAN ||
+           skb->sk->sk_protocol != CAN_J1939)
+               return;
+
        j1939_session_list_lock(priv);
        session = j1939_session_get_simple(priv, skb);
        j1939_session_list_unlock(priv);
index 7df6c96..b5d1129 100644 (file)
@@ -8913,10 +8913,6 @@ static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack
                NL_SET_ERR_MSG(extack, "Active program does not match expected");
                return -EEXIST;
        }
-       if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && cur_prog) {
-               NL_SET_ERR_MSG(extack, "XDP program already attached");
-               return -EBUSY;
-       }
 
        /* put effective new program into new_prog */
        if (link)
@@ -8927,6 +8923,10 @@ static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack
                enum bpf_xdp_mode other_mode = mode == XDP_MODE_SKB
                                               ? XDP_MODE_DRV : XDP_MODE_SKB;
 
+               if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && cur_prog) {
+                       NL_SET_ERR_MSG(extack, "XDP program already attached");
+                       return -EBUSY;
+               }
                if (!offload && dev_xdp_prog(dev, other_mode)) {
                        NL_SET_ERR_MSG(extack, "Native and generic XDP can't be active at the same time");
                        return -EEXIST;
index e674f0f..e5feb87 100644 (file)
@@ -4063,7 +4063,7 @@ static int __devlink_snapshot_id_insert(struct devlink *devlink, u32 id)
 {
        lockdep_assert_held(&devlink->lock);
 
-       if (WARN_ON(xa_load(&devlink->snapshot_ids, id)))
+       if (xa_load(&devlink->snapshot_ids, id))
                return -EEXIST;
 
        return xa_err(xa_store(&devlink->snapshot_ids, id, xa_mk_value(0),
index 7124f0f..b2df520 100644 (file)
@@ -8317,15 +8317,31 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
 /* Helper macro for adding read access to tcp_sock or sock fields. */
 #define SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ)                        \
        do {                                                                  \
+               int fullsock_reg = si->dst_reg, reg = BPF_REG_9, jmp = 2;     \
                BUILD_BUG_ON(sizeof_field(OBJ, OBJ_FIELD) >                   \
                             sizeof_field(struct bpf_sock_ops, BPF_FIELD));   \
+               if (si->dst_reg == reg || si->src_reg == reg)                 \
+                       reg--;                                                \
+               if (si->dst_reg == reg || si->src_reg == reg)                 \
+                       reg--;                                                \
+               if (si->dst_reg == si->src_reg) {                             \
+                       *insn++ = BPF_STX_MEM(BPF_DW, si->src_reg, reg,       \
+                                         offsetof(struct bpf_sock_ops_kern,  \
+                                         temp));                             \
+                       fullsock_reg = reg;                                   \
+                       jmp += 2;                                             \
+               }                                                             \
                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(                       \
                                                struct bpf_sock_ops_kern,     \
                                                is_fullsock),                 \
-                                     si->dst_reg, si->src_reg,               \
+                                     fullsock_reg, si->src_reg,              \
                                      offsetof(struct bpf_sock_ops_kern,      \
                                               is_fullsock));                 \
-               *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 2);            \
+               *insn++ = BPF_JMP_IMM(BPF_JEQ, fullsock_reg, 0, jmp);         \
+               if (si->dst_reg == si->src_reg)                               \
+                       *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg,       \
+                                     offsetof(struct bpf_sock_ops_kern,      \
+                                     temp));                                 \
                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(                       \
                                                struct bpf_sock_ops_kern, sk),\
                                      si->dst_reg, si->src_reg,               \
@@ -8334,6 +8350,49 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
                                                       OBJ_FIELD),            \
                                      si->dst_reg, si->dst_reg,               \
                                      offsetof(OBJ, OBJ_FIELD));              \
+               if (si->dst_reg == si->src_reg) {                             \
+                       *insn++ = BPF_JMP_A(1);                               \
+                       *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg,       \
+                                     offsetof(struct bpf_sock_ops_kern,      \
+                                     temp));                                 \
+               }                                                             \
+       } while (0)
+
+#define SOCK_OPS_GET_SK()                                                            \
+       do {                                                                  \
+               int fullsock_reg = si->dst_reg, reg = BPF_REG_9, jmp = 1;     \
+               if (si->dst_reg == reg || si->src_reg == reg)                 \
+                       reg--;                                                \
+               if (si->dst_reg == reg || si->src_reg == reg)                 \
+                       reg--;                                                \
+               if (si->dst_reg == si->src_reg) {                             \
+                       *insn++ = BPF_STX_MEM(BPF_DW, si->src_reg, reg,       \
+                                         offsetof(struct bpf_sock_ops_kern,  \
+                                         temp));                             \
+                       fullsock_reg = reg;                                   \
+                       jmp += 2;                                             \
+               }                                                             \
+               *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(                       \
+                                               struct bpf_sock_ops_kern,     \
+                                               is_fullsock),                 \
+                                     fullsock_reg, si->src_reg,              \
+                                     offsetof(struct bpf_sock_ops_kern,      \
+                                              is_fullsock));                 \
+               *insn++ = BPF_JMP_IMM(BPF_JEQ, fullsock_reg, 0, jmp);         \
+               if (si->dst_reg == si->src_reg)                               \
+                       *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg,       \
+                                     offsetof(struct bpf_sock_ops_kern,      \
+                                     temp));                                 \
+               *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(                       \
+                                               struct bpf_sock_ops_kern, sk),\
+                                     si->dst_reg, si->src_reg,               \
+                                     offsetof(struct bpf_sock_ops_kern, sk));\
+               if (si->dst_reg == si->src_reg) {                             \
+                       *insn++ = BPF_JMP_A(1);                               \
+                       *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg,       \
+                                     offsetof(struct bpf_sock_ops_kern,      \
+                                     temp));                                 \
+               }                                                             \
        } while (0)
 
 #define SOCK_OPS_GET_TCP_SOCK_FIELD(FIELD) \
@@ -8620,17 +8679,7 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
                SOCK_OPS_GET_TCP_SOCK_FIELD(bytes_acked);
                break;
        case offsetof(struct bpf_sock_ops, sk):
-               *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
-                                               struct bpf_sock_ops_kern,
-                                               is_fullsock),
-                                     si->dst_reg, si->src_reg,
-                                     offsetof(struct bpf_sock_ops_kern,
-                                              is_fullsock));
-               *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
-               *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
-                                               struct bpf_sock_ops_kern, sk),
-                                     si->dst_reg, si->src_reg,
-                                     offsetof(struct bpf_sock_ops_kern, sk));
+               SOCK_OPS_GET_SK();
                break;
        }
        return insn - insn_buf;
index 7e2e502..5c3b906 100644 (file)
@@ -5418,8 +5418,8 @@ struct sk_buff *skb_vlan_untag(struct sk_buff *skb)
        skb = skb_share_check(skb, GFP_ATOMIC);
        if (unlikely(!skb))
                goto err_free;
-
-       if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
+       /* We may access the two bytes after vlan_hdr in vlan_set_encap_proto(). */
+       if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short))))
                goto err_free;
 
        vhdr = (struct vlan_hdr *)skb->data;
index 409e79b..6d0e942 100644 (file)
@@ -245,9 +245,6 @@ static const struct nf_ipv6_ops ipv6ops = {
        .route_input            = ip6_route_input,
        .fragment               = ip6_fragment,
        .reroute                = nf_ip6_reroute,
-#if IS_MODULE(CONFIG_IPV6) && IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
-       .br_defrag              = nf_ct_frag6_gather,
-#endif
 #if IS_MODULE(CONFIG_IPV6)
        .br_fragment            = br_ip6_fragment,
 #endif
index 8c1d1a5..1aad411 100644 (file)
@@ -725,8 +725,10 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
                if (!psize)
                        return -EINVAL;
 
-               if (!sk_wmem_schedule(sk, psize + dfrag->overhead))
+               if (!sk_wmem_schedule(sk, psize + dfrag->overhead)) {
+                       iov_iter_revert(&msg->msg_iter, psize);
                        return -ENOMEM;
+               }
        } else {
                offset = dfrag->offset;
                psize = min_t(size_t, dfrag->data_len, avail_size);
@@ -737,8 +739,11 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
         */
        ret = do_tcp_sendpages(ssk, page, offset, psize,
                               msg->msg_flags | MSG_SENDPAGE_NOTLAST | MSG_DONTWAIT);
-       if (ret <= 0)
+       if (ret <= 0) {
+               if (!retransmission)
+                       iov_iter_revert(&msg->msg_iter, psize);
                return ret;
+       }
 
        frag_truesize += ret;
        if (!retransmission) {
@@ -1388,7 +1393,9 @@ static void mptcp_worker(struct work_struct *work)
        struct mptcp_data_frag *dfrag;
        u64 orig_write_seq;
        size_t copied = 0;
-       struct msghdr msg;
+       struct msghdr msg = {
+               .msg_flags = MSG_DONTWAIT,
+       };
        long timeo = 0;
 
        lock_sock(sk);
@@ -1421,7 +1428,6 @@ static void mptcp_worker(struct work_struct *work)
 
        lock_sock(ssk);
 
-       msg.msg_flags = MSG_DONTWAIT;
        orig_len = dfrag->data_len;
        orig_offset = dfrag->offset;
        orig_write_seq = dfrag->data_seq;
index d878e34..fd814e5 100644 (file)
@@ -2018,8 +2018,10 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
        if (nla[NFTA_CHAIN_NAME]) {
                chain->name = nla_strdup(nla[NFTA_CHAIN_NAME], GFP_KERNEL);
        } else {
-               if (!(flags & NFT_CHAIN_BINDING))
-                       return -EINVAL;
+               if (!(flags & NFT_CHAIN_BINDING)) {
+                       err = -EINVAL;
+                       goto err1;
+               }
 
                snprintf(name, sizeof(name), "__chain%llu", ++chain_id);
                chain->name = kstrdup(name, GFP_KERNEL);
index 6428856..8e56f35 100644 (file)
@@ -27,8 +27,6 @@ struct nft_xt_match_priv {
        void *info;
 };
 
-static refcount_t nft_compat_pending_destroy = REFCOUNT_INIT(1);
-
 static int nft_compat_chain_validate_dependency(const struct nft_ctx *ctx,
                                                const char *tablename)
 {
@@ -215,6 +213,17 @@ static int nft_parse_compat(const struct nlattr *attr, u16 *proto, bool *inv)
        return 0;
 }
 
+static void nft_compat_wait_for_destructors(void)
+{
+       /* xtables matches or targets can have side effects, e.g.
+        * creation/destruction of /proc files.
+        * The xt ->destroy functions are run asynchronously from
+        * work queue.  If we have pending invocations we thus
+        * need to wait for those to finish.
+        */
+       nf_tables_trans_destroy_flush_work();
+}
+
 static int
 nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
                const struct nlattr * const tb[])
@@ -238,14 +247,7 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
 
        nft_target_set_tgchk_param(&par, ctx, target, info, &e, proto, inv);
 
-       /* xtables matches or targets can have side effects, e.g.
-        * creation/destruction of /proc files.
-        * The xt ->destroy functions are run asynchronously from
-        * work queue.  If we have pending invocations we thus
-        * need to wait for those to finish.
-        */
-       if (refcount_read(&nft_compat_pending_destroy) > 1)
-               nf_tables_trans_destroy_flush_work();
+       nft_compat_wait_for_destructors();
 
        ret = xt_check_target(&par, size, proto, inv);
        if (ret < 0)
@@ -260,7 +262,6 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
 
 static void __nft_mt_tg_destroy(struct module *me, const struct nft_expr *expr)
 {
-       refcount_dec(&nft_compat_pending_destroy);
        module_put(me);
        kfree(expr->ops);
 }
@@ -468,6 +469,8 @@ __nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
 
        nft_match_set_mtchk_param(&par, ctx, match, info, &e, proto, inv);
 
+       nft_compat_wait_for_destructors();
+
        return xt_check_match(&par, size, proto, inv);
 }
 
@@ -716,14 +719,6 @@ static const struct nfnetlink_subsystem nfnl_compat_subsys = {
 
 static struct nft_expr_type nft_match_type;
 
-static void nft_mt_tg_deactivate(const struct nft_ctx *ctx,
-                                const struct nft_expr *expr,
-                                enum nft_trans_phase phase)
-{
-       if (phase == NFT_TRANS_COMMIT)
-               refcount_inc(&nft_compat_pending_destroy);
-}
-
 static const struct nft_expr_ops *
 nft_match_select_ops(const struct nft_ctx *ctx,
                     const struct nlattr * const tb[])
@@ -762,7 +757,6 @@ nft_match_select_ops(const struct nft_ctx *ctx,
        ops->type = &nft_match_type;
        ops->eval = nft_match_eval;
        ops->init = nft_match_init;
-       ops->deactivate = nft_mt_tg_deactivate,
        ops->destroy = nft_match_destroy;
        ops->dump = nft_match_dump;
        ops->validate = nft_match_validate;
@@ -853,7 +847,6 @@ nft_target_select_ops(const struct nft_ctx *ctx,
        ops->size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize));
        ops->init = nft_target_init;
        ops->destroy = nft_target_destroy;
-       ops->deactivate = nft_mt_tg_deactivate,
        ops->dump = nft_target_dump;
        ops->validate = nft_target_validate;
        ops->data = target;
@@ -917,8 +910,6 @@ static void __exit nft_compat_module_exit(void)
        nfnetlink_subsys_unregister(&nfnl_compat_subsys);
        nft_unregister_expr(&nft_target_type);
        nft_unregister_expr(&nft_match_type);
-
-       WARN_ON_ONCE(refcount_read(&nft_compat_pending_destroy) != 1);
 }
 
 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_NFT_COMPAT);
index 0778283..3c48cdc 100644 (file)
@@ -44,7 +44,7 @@ static void nft_exthdr_ipv6_eval(const struct nft_expr *expr,
 
        err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL);
        if (priv->flags & NFT_EXTHDR_F_PRESENT) {
-               *dest = (err >= 0);
+               nft_reg_store8(dest, err >= 0);
                return;
        } else if (err < 0) {
                goto err;
@@ -141,7 +141,7 @@ static void nft_exthdr_ipv4_eval(const struct nft_expr *expr,
 
        err = ipv4_find_option(nft_net(pkt), skb, &offset, priv->type);
        if (priv->flags & NFT_EXTHDR_F_PRESENT) {
-               *dest = (err >= 0);
+               nft_reg_store8(dest, err >= 0);
                return;
        } else if (err < 0) {
                goto err;
index b4c0db0..90c558f 100644 (file)
@@ -692,23 +692,25 @@ static void qrtr_port_remove(struct qrtr_sock *ipc)
  */
 static int qrtr_port_assign(struct qrtr_sock *ipc, int *port)
 {
+       u32 min_port;
        int rc;
 
        mutex_lock(&qrtr_port_lock);
        if (!*port) {
-               rc = idr_alloc(&qrtr_ports, ipc,
-                              QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET + 1,
-                              GFP_ATOMIC);
-               if (rc >= 0)
-                       *port = rc;
+               min_port = QRTR_MIN_EPH_SOCKET;
+               rc = idr_alloc_u32(&qrtr_ports, ipc, &min_port, QRTR_MAX_EPH_SOCKET, GFP_ATOMIC);
+               if (!rc)
+                       *port = min_port;
        } else if (*port < QRTR_MIN_EPH_SOCKET && !capable(CAP_NET_ADMIN)) {
                rc = -EACCES;
        } else if (*port == QRTR_PORT_CTRL) {
-               rc = idr_alloc(&qrtr_ports, ipc, 0, 1, GFP_ATOMIC);
+               min_port = 0;
+               rc = idr_alloc_u32(&qrtr_ports, ipc, &min_port, 0, GFP_ATOMIC);
        } else {
-               rc = idr_alloc(&qrtr_ports, ipc, *port, *port + 1, GFP_ATOMIC);
-               if (rc >= 0)
-                       *port = rc;
+               min_port = *port;
+               rc = idr_alloc_u32(&qrtr_ports, ipc, &min_port, *port, GFP_ATOMIC);
+               if (!rc)
+                       *port = min_port;
        }
        mutex_unlock(&qrtr_port_lock);
 
index 9dd7802..be1c400 100644 (file)
@@ -6,6 +6,7 @@
 menuconfig TIPC
        tristate "The TIPC Protocol"
        depends on INET
+       depends on IPV6 || IPV6=n
        help
          The Transparent Inter Process Communication (TIPC) protocol is
          specially designed for intra cluster communication. This protocol
index 2175163..90e3c70 100644 (file)
@@ -275,8 +275,9 @@ err_out:
 static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
                                 struct tipc_nl_compat_msg *msg)
 {
-       int err;
+       struct nlmsghdr *nlh;
        struct sk_buff *arg;
+       int err;
 
        if (msg->req_type && (!msg->req_size ||
                              !TLV_CHECK_TYPE(msg->req, msg->req_type)))
@@ -305,6 +306,15 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
                return -ENOMEM;
        }
 
+       nlh = nlmsg_put(arg, 0, 0, tipc_genl_family.id, 0, NLM_F_MULTI);
+       if (!nlh) {
+               kfree_skb(arg);
+               kfree_skb(msg->rep);
+               msg->rep = NULL;
+               return -EMSGSIZE;
+       }
+       nlmsg_end(arg, nlh);
+
        err = __tipc_nl_compat_dumpit(cmd, msg, arg);
        if (err) {
                kfree_skb(msg->rep);
index ede162f..0e93107 100644 (file)
@@ -67,7 +67,7 @@ static int dump_prog_id_as_func_ptr(const struct btf_dumper *d,
        if (!info->btf_id || !info->nr_func_info ||
            btf__get_from_id(info->btf_id, &prog_btf))
                goto print;
-       finfo = (struct bpf_func_info *)info->func_info;
+       finfo = u64_to_ptr(info->func_info);
        func_type = btf__type_by_id(prog_btf, finfo->type_id);
        if (!func_type || !btf_is_func(func_type))
                goto print;
index 8a4c2b3..f611846 100644 (file)
@@ -143,6 +143,20 @@ static int codegen_datasec_def(struct bpf_object *obj,
                              var_name, align);
                        return -EINVAL;
                }
+               /* Assume 32-bit architectures when generating data section
+                * struct memory layout. Given bpftool can't know which target
+                * host architecture it's emitting skeleton for, we need to be
+                * conservative and assume 32-bit one to ensure enough padding
+                * bytes are generated for pointer and long types. This will
+                * still work correctly for 64-bit architectures, because in
+                * the worst case we'll generate unnecessary padding field,
+                * which on 64-bit architectures is not strictly necessary and
+                * would be handled by natural 8-byte alignment. But it still
+                * will be a correct memory layout, based on recorded offsets
+                * in BTF.
+                */
+               if (align > 4)
+                       align = 4;
 
                align_off = (off + align - 1) / align * align;
                if (align_off != need_off) {
@@ -397,7 +411,7 @@ static int do_skeleton(int argc, char **argv)
                {                                                           \n\
                        struct %1$s *obj;                                   \n\
                                                                            \n\
-                       obj = (typeof(obj))calloc(1, sizeof(*obj));         \n\
+                       obj = (struct %1$s *)calloc(1, sizeof(*obj));       \n\
                        if (!obj)                                           \n\
                                return NULL;                                \n\
                        if (%1$s__create_skeleton(obj))                     \n\
@@ -461,7 +475,7 @@ static int do_skeleton(int argc, char **argv)
                {                                                           \n\
                        struct bpf_object_skeleton *s;                      \n\
                                                                            \n\
-                       s = (typeof(s))calloc(1, sizeof(*s));               \n\
+                       s = (struct bpf_object_skeleton *)calloc(1, sizeof(*s));\n\
                        if (!s)                                             \n\
                                return -1;                                  \n\
                        obj->skeleton = s;                                  \n\
@@ -479,7 +493,7 @@ static int do_skeleton(int argc, char **argv)
                                /* maps */                                  \n\
                                s->map_cnt = %zu;                           \n\
                                s->map_skel_sz = sizeof(*s->maps);          \n\
-                               s->maps = (typeof(s->maps))calloc(s->map_cnt, s->map_skel_sz);\n\
+                               s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt, s->map_skel_sz);\n\
                                if (!s->maps)                               \n\
                                        goto err;                           \n\
                        ",
@@ -515,7 +529,7 @@ static int do_skeleton(int argc, char **argv)
                                /* programs */                              \n\
                                s->prog_cnt = %zu;                          \n\
                                s->prog_skel_sz = sizeof(*s->progs);        \n\
-                               s->progs = (typeof(s->progs))calloc(s->prog_cnt, s->prog_skel_sz);\n\
+                               s->progs = (struct bpf_prog_skeleton *)calloc(s->prog_cnt, s->prog_skel_sz);\n\
                                if (!s->progs)                              \n\
                                        goto err;                           \n\
                        ",
index 1b79375..a89f09e 100644 (file)
@@ -106,7 +106,7 @@ static int show_link_close_json(int fd, struct bpf_link_info *info)
        switch (info->type) {
        case BPF_LINK_TYPE_RAW_TRACEPOINT:
                jsonw_string_field(json_wtr, "tp_name",
-                                  (const char *)info->raw_tracepoint.tp_name);
+                                  u64_to_ptr(info->raw_tracepoint.tp_name));
                break;
        case BPF_LINK_TYPE_TRACING:
                err = get_prog_info(info->prog_id, &prog_info);
@@ -185,7 +185,7 @@ static int show_link_close_plain(int fd, struct bpf_link_info *info)
        switch (info->type) {
        case BPF_LINK_TYPE_RAW_TRACEPOINT:
                printf("\n\ttp '%s'  ",
-                      (const char *)info->raw_tracepoint.tp_name);
+                      (const char *)u64_to_ptr(info->raw_tracepoint.tp_name));
                break;
        case BPF_LINK_TYPE_TRACING:
                err = get_prog_info(info->prog_id, &prog_info);
index e3a79b5..c46e521 100644 (file)
 /* Make sure we do not use kernel-only integer typedefs */
 #pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
 
-#define ptr_to_u64(ptr)        ((__u64)(unsigned long)(ptr))
+static inline __u64 ptr_to_u64(const void *ptr)
+{
+       return (__u64)(unsigned long)ptr;
+}
+
+static inline void *u64_to_ptr(__u64 ptr)
+{
+       return (void *)(unsigned long)ptr;
+}
 
 #define NEXT_ARG()     ({ argc--; argv++; if (argc < 0) usage(); })
 #define NEXT_ARGP()    ({ (*argc)--; (*argv)++; if (*argc < 0) usage(); })
index 158995d..d393eb8 100644 (file)
@@ -428,14 +428,14 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
                        p_info("no instructions returned");
                        return -1;
                }
-               buf = (unsigned char *)(info->jited_prog_insns);
+               buf = u64_to_ptr(info->jited_prog_insns);
                member_len = info->jited_prog_len;
        } else {        /* DUMP_XLATED */
                if (info->xlated_prog_len == 0 || !info->xlated_prog_insns) {
                        p_err("error retrieving insn dump: kernel.kptr_restrict set?");
                        return -1;
                }
-               buf = (unsigned char *)info->xlated_prog_insns;
+               buf = u64_to_ptr(info->xlated_prog_insns);
                member_len = info->xlated_prog_len;
        }
 
@@ -444,7 +444,7 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
                return -1;
        }
 
-       func_info = (void *)info->func_info;
+       func_info = u64_to_ptr(info->func_info);
 
        if (info->nr_line_info) {
                prog_linfo = bpf_prog_linfo__new(info);
@@ -462,7 +462,7 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
 
                n = write(fd, buf, member_len);
                close(fd);
-               if (n != member_len) {
+               if (n != (ssize_t)member_len) {
                        p_err("error writing output file: %s",
                              n < 0 ? strerror(errno) : "short write");
                        return -1;
@@ -492,13 +492,13 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
                        __u32 i;
                        if (info->nr_jited_ksyms) {
                                kernel_syms_load(&dd);
-                               ksyms = (__u64 *) info->jited_ksyms;
+                               ksyms = u64_to_ptr(info->jited_ksyms);
                        }
 
                        if (json_output)
                                jsonw_start_array(json_wtr);
 
-                       lens = (__u32 *) info->jited_func_lens;
+                       lens = u64_to_ptr(info->jited_func_lens);
                        for (i = 0; i < info->nr_jited_func_lens; i++) {
                                if (ksyms) {
                                        sym = kernel_syms_search(&dd, ksyms[i]);
@@ -559,7 +559,7 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
        } else {
                kernel_syms_load(&dd);
                dd.nr_jited_ksyms = info->nr_jited_ksyms;
-               dd.jited_ksyms = (__u64 *) info->jited_ksyms;
+               dd.jited_ksyms = u64_to_ptr(info->jited_ksyms);
                dd.btf = btf;
                dd.func_info = func_info;
                dd.finfo_rec_size = info->func_info_rec_size;
@@ -1681,7 +1681,7 @@ static char *profile_target_name(int tgt_fd)
                goto out;
        }
 
-       func_info = (struct bpf_func_info *)(info_linear->info.func_info);
+       func_info = u64_to_ptr(info_linear->info.func_info);
        t = btf__type_by_id(btf, func_info[0].type_id);
        if (!t) {
                p_err("btf %d doesn't have type %d",
index bc14db7..e9a4ecd 100644 (file)
@@ -40,7 +40,7 @@
  * Helper macro to manipulate data structures
  */
 #ifndef offsetof
-#define offsetof(TYPE, MEMBER)  __builtin_offsetof(TYPE, MEMBER)
+#define offsetof(TYPE, MEMBER) ((unsigned long)&((TYPE *)0)->MEMBER)
 #endif
 #ifndef container_of
 #define container_of(ptr, type, member)                                \
index 4843e44..7dfca70 100644 (file)
@@ -41,6 +41,7 @@ struct btf {
        __u32 types_size;
        __u32 data_size;
        int fd;
+       int ptr_sz;
 };
 
 static inline __u64 ptr_to_u64(const void *ptr)
@@ -221,6 +222,70 @@ const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id)
        return btf->types[type_id];
 }
 
+static int determine_ptr_size(const struct btf *btf)
+{
+       const struct btf_type *t;
+       const char *name;
+       int i;
+
+       for (i = 1; i <= btf->nr_types; i++) {
+               t = btf__type_by_id(btf, i);
+               if (!btf_is_int(t))
+                       continue;
+
+               name = btf__name_by_offset(btf, t->name_off);
+               if (!name)
+                       continue;
+
+               if (strcmp(name, "long int") == 0 ||
+                   strcmp(name, "long unsigned int") == 0) {
+                       if (t->size != 4 && t->size != 8)
+                               continue;
+                       return t->size;
+               }
+       }
+
+       return -1;
+}
+
+static size_t btf_ptr_sz(const struct btf *btf)
+{
+       if (!btf->ptr_sz)
+               ((struct btf *)btf)->ptr_sz = determine_ptr_size(btf);
+       return btf->ptr_sz < 0 ? sizeof(void *) : btf->ptr_sz;
+}
+
+/* Return pointer size this BTF instance assumes. The size is heuristically
+ * determined by looking for 'long' or 'unsigned long' integer type and
+ * recording its size in bytes. If BTF type information doesn't have any such
+ * type, this function returns 0. In the latter case, native architecture's
+ * pointer size is assumed, so will be either 4 or 8, depending on
+ * architecture that libbpf was compiled for. It's possible to override
+ * guessed value by using btf__set_pointer_size() API.
+ */
+size_t btf__pointer_size(const struct btf *btf)
+{
+       if (!btf->ptr_sz)
+               ((struct btf *)btf)->ptr_sz = determine_ptr_size(btf);
+
+       if (btf->ptr_sz < 0)
+               /* not enough BTF type info to guess */
+               return 0;
+
+       return btf->ptr_sz;
+}
+
+/* Override or set pointer size in bytes. Only values of 4 and 8 are
+ * supported.
+ */
+int btf__set_pointer_size(struct btf *btf, size_t ptr_sz)
+{
+       if (ptr_sz != 4 && ptr_sz != 8)
+               return -EINVAL;
+       btf->ptr_sz = ptr_sz;
+       return 0;
+}
+
 static bool btf_type_is_void(const struct btf_type *t)
 {
        return t == &btf_void || btf_is_fwd(t);
@@ -253,7 +318,7 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
                        size = t->size;
                        goto done;
                case BTF_KIND_PTR:
-                       size = sizeof(void *);
+                       size = btf_ptr_sz(btf);
                        goto done;
                case BTF_KIND_TYPEDEF:
                case BTF_KIND_VOLATILE:
@@ -293,9 +358,9 @@ int btf__align_of(const struct btf *btf, __u32 id)
        switch (kind) {
        case BTF_KIND_INT:
        case BTF_KIND_ENUM:
-               return min(sizeof(void *), (size_t)t->size);
+               return min(btf_ptr_sz(btf), (size_t)t->size);
        case BTF_KIND_PTR:
-               return sizeof(void *);
+               return btf_ptr_sz(btf);
        case BTF_KIND_TYPEDEF:
        case BTF_KIND_VOLATILE:
        case BTF_KIND_CONST:
@@ -533,6 +598,18 @@ struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext)
        if (IS_ERR(btf))
                goto done;
 
+       switch (gelf_getclass(elf)) {
+       case ELFCLASS32:
+               btf__set_pointer_size(btf, 4);
+               break;
+       case ELFCLASS64:
+               btf__set_pointer_size(btf, 8);
+               break;
+       default:
+               pr_warn("failed to get ELF class (bitness) for %s\n", path);
+               break;
+       }
+
        if (btf_ext && btf_ext_data) {
                *btf_ext = btf_ext__new(btf_ext_data->d_buf,
                                        btf_ext_data->d_size);
index f4a1a1d..1ca1444 100644 (file)
@@ -76,6 +76,8 @@ LIBBPF_API __s32 btf__find_by_name_kind(const struct btf *btf,
 LIBBPF_API __u32 btf__get_nr_types(const struct btf *btf);
 LIBBPF_API const struct btf_type *btf__type_by_id(const struct btf *btf,
                                                  __u32 id);
+LIBBPF_API size_t btf__pointer_size(const struct btf *btf);
+LIBBPF_API int btf__set_pointer_size(struct btf *btf, size_t ptr_sz);
 LIBBPF_API __s64 btf__resolve_size(const struct btf *btf, __u32 type_id);
 LIBBPF_API int btf__resolve_type(const struct btf *btf, __u32 type_id);
 LIBBPF_API int btf__align_of(const struct btf *btf, __u32 id);
index cf71116..fe39bd7 100644 (file)
@@ -13,6 +13,7 @@
 #include <errno.h>
 #include <linux/err.h>
 #include <linux/btf.h>
+#include <linux/kernel.h>
 #include "btf.h"
 #include "hashmap.h"
 #include "libbpf.h"
@@ -60,6 +61,7 @@ struct btf_dump {
        const struct btf_ext *btf_ext;
        btf_dump_printf_fn_t printf_fn;
        struct btf_dump_opts opts;
+       int ptr_sz;
        bool strip_mods;
 
        /* per-type auxiliary state */
@@ -138,6 +140,7 @@ struct btf_dump *btf_dump__new(const struct btf *btf,
        d->btf_ext = btf_ext;
        d->printf_fn = printf_fn;
        d->opts.ctx = opts ? opts->ctx : NULL;
+       d->ptr_sz = btf__pointer_size(btf) ? : sizeof(void *);
 
        d->type_names = hashmap__new(str_hash_fn, str_equal_fn, NULL);
        if (IS_ERR(d->type_names)) {
@@ -549,6 +552,9 @@ static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr)
        }
 }
 
+static void btf_dump_emit_missing_aliases(struct btf_dump *d, __u32 id,
+                                         const struct btf_type *t);
+
 static void btf_dump_emit_struct_fwd(struct btf_dump *d, __u32 id,
                                     const struct btf_type *t);
 static void btf_dump_emit_struct_def(struct btf_dump *d, __u32 id,
@@ -671,6 +677,9 @@ static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id)
 
        switch (kind) {
        case BTF_KIND_INT:
+               /* Emit type alias definitions if necessary */
+               btf_dump_emit_missing_aliases(d, id, t);
+
                tstate->emit_state = EMITTED;
                break;
        case BTF_KIND_ENUM:
@@ -797,7 +806,7 @@ static void btf_dump_emit_bit_padding(const struct btf_dump *d,
                                      int align, int lvl)
 {
        int off_diff = m_off - cur_off;
-       int ptr_bits = sizeof(void *) * 8;
+       int ptr_bits = d->ptr_sz * 8;
 
        if (off_diff <= 0)
                /* no gap */
@@ -870,7 +879,7 @@ static void btf_dump_emit_struct_def(struct btf_dump *d,
                        btf_dump_printf(d, ": %d", m_sz);
                        off = m_off + m_sz;
                } else {
-                       m_sz = max(0, btf__resolve_size(d->btf, m->type));
+                       m_sz = max(0LL, btf__resolve_size(d->btf, m->type));
                        off = m_off + m_sz * 8;
                }
                btf_dump_printf(d, ";");
@@ -890,6 +899,32 @@ static void btf_dump_emit_struct_def(struct btf_dump *d,
                btf_dump_printf(d, " __attribute__((packed))");
 }
 
+static const char *missing_base_types[][2] = {
+       /*
+        * GCC emits typedefs to its internal __PolyX_t types when compiling Arm
+        * SIMD intrinsics. Alias them to standard base types.
+        */
+       { "__Poly8_t",          "unsigned char" },
+       { "__Poly16_t",         "unsigned short" },
+       { "__Poly64_t",         "unsigned long long" },
+       { "__Poly128_t",        "unsigned __int128" },
+};
+
+static void btf_dump_emit_missing_aliases(struct btf_dump *d, __u32 id,
+                                         const struct btf_type *t)
+{
+       const char *name = btf_dump_type_name(d, id);
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(missing_base_types); i++) {
+               if (strcmp(name, missing_base_types[i][0]) == 0) {
+                       btf_dump_printf(d, "typedef %s %s;\n\n",
+                                       missing_base_types[i][1], name);
+                       break;
+               }
+       }
+}
+
 static void btf_dump_emit_enum_fwd(struct btf_dump *d, __u32 id,
                                   const struct btf_type *t)
 {
index 0a06124..5d20b2d 100644 (file)
@@ -2434,6 +2434,8 @@ static int bpf_object__init_btf(struct bpf_object *obj,
                                BTF_ELF_SEC, err);
                        goto out;
                }
+               /* enforce 8-byte pointers for BPF-targeted BTFs */
+               btf__set_pointer_size(obj->btf, 8);
                err = 0;
        }
        if (btf_ext_data) {
@@ -2542,6 +2544,8 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
                if (IS_ERR(kern_btf))
                        return PTR_ERR(kern_btf);
 
+               /* enforce 8-byte pointers for BPF-targeted BTFs */
+               btf__set_pointer_size(obj->btf, 8);
                bpf_object__sanitize_btf(obj, kern_btf);
        }
 
@@ -3478,10 +3482,11 @@ bpf_object__probe_global_data(struct bpf_object *obj)
 
        map = bpf_create_map_xattr(&map_attr);
        if (map < 0) {
-               cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
+               ret = -errno;
+               cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
                pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
-                       __func__, cp, errno);
-               return -errno;
+                       __func__, cp, -ret);
+               return ret;
        }
 
        insns[0].imm = map;
@@ -5194,7 +5199,8 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
 static int bpf_object__collect_map_relos(struct bpf_object *obj,
                                         GElf_Shdr *shdr, Elf_Data *data)
 {
-       int i, j, nrels, new_sz, ptr_sz = sizeof(void *);
+       const int bpf_ptr_sz = 8, host_ptr_sz = sizeof(void *);
+       int i, j, nrels, new_sz;
        const struct btf_var_secinfo *vi = NULL;
        const struct btf_type *sec, *var, *def;
        const struct btf_member *member;
@@ -5243,7 +5249,7 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj,
 
                        vi = btf_var_secinfos(sec) + map->btf_var_idx;
                        if (vi->offset <= rel.r_offset &&
-                           rel.r_offset + sizeof(void *) <= vi->offset + vi->size)
+                           rel.r_offset + bpf_ptr_sz <= vi->offset + vi->size)
                                break;
                }
                if (j == obj->nr_maps) {
@@ -5279,17 +5285,20 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj,
                        return -EINVAL;
 
                moff = rel.r_offset - vi->offset - moff;
-               if (moff % ptr_sz)
+               /* here we use BPF pointer size, which is always 64 bit, as we
+                * are parsing ELF that was built for BPF target
+                */
+               if (moff % bpf_ptr_sz)
                        return -EINVAL;
-               moff /= ptr_sz;
+               moff /= bpf_ptr_sz;
                if (moff >= map->init_slots_sz) {
                        new_sz = moff + 1;
-                       tmp = realloc(map->init_slots, new_sz * ptr_sz);
+                       tmp = realloc(map->init_slots, new_sz * host_ptr_sz);
                        if (!tmp)
                                return -ENOMEM;
                        map->init_slots = tmp;
                        memset(map->init_slots + map->init_slots_sz, 0,
-                              (new_sz - map->init_slots_sz) * ptr_sz);
+                              (new_sz - map->init_slots_sz) * host_ptr_sz);
                        map->init_slots_sz = new_sz;
                }
                map->init_slots[moff] = targ_map;
@@ -6012,9 +6021,10 @@ int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
        }
 
        if (bpf_obj_pin(prog->instances.fds[instance], path)) {
-               cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
+               err = -errno;
+               cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
                pr_warn("failed to pin program: %s\n", cp);
-               return -errno;
+               return err;
        }
        pr_debug("pinned program '%s'\n", path);
 
index 0c4722b..e35bd6c 100644 (file)
@@ -295,5 +295,7 @@ LIBBPF_0.1.0 {
                bpf_program__set_sk_lookup;
                btf__parse;
                btf__parse_raw;
+               btf__pointer_size;
                btf__set_fd;
+               btf__set_pointer_size;
 } LIBBPF_0.0.9;
index 7afa416..284d592 100644 (file)
@@ -159,15 +159,15 @@ void test_bpf_obj_id(void)
                /* Check getting link info */
                info_len = sizeof(struct bpf_link_info) * 2;
                bzero(&link_infos[i], info_len);
-               link_infos[i].raw_tracepoint.tp_name = (__u64)&tp_name;
+               link_infos[i].raw_tracepoint.tp_name = ptr_to_u64(&tp_name);
                link_infos[i].raw_tracepoint.tp_name_len = sizeof(tp_name);
                err = bpf_obj_get_info_by_fd(bpf_link__fd(links[i]),
                                             &link_infos[i], &info_len);
                if (CHECK(err ||
                          link_infos[i].type != BPF_LINK_TYPE_RAW_TRACEPOINT ||
                          link_infos[i].prog_id != prog_infos[i].id ||
-                         link_infos[i].raw_tracepoint.tp_name != (__u64)&tp_name ||
-                         strcmp((char *)link_infos[i].raw_tracepoint.tp_name,
+                         link_infos[i].raw_tracepoint.tp_name != ptr_to_u64(&tp_name) ||
+                         strcmp(u64_to_ptr(link_infos[i].raw_tracepoint.tp_name),
                                 "sys_enter") ||
                          info_len != sizeof(struct bpf_link_info),
                          "get-link-info(fd)",
@@ -178,7 +178,7 @@ void test_bpf_obj_id(void)
                          link_infos[i].type, BPF_LINK_TYPE_RAW_TRACEPOINT,
                          link_infos[i].id,
                          link_infos[i].prog_id, prog_infos[i].id,
-                         (char *)link_infos[i].raw_tracepoint.tp_name,
+                         (const char *)u64_to_ptr(link_infos[i].raw_tracepoint.tp_name),
                          "sys_enter"))
                        goto done;
 
index cb33a7e..39fb81d 100644 (file)
@@ -12,15 +12,16 @@ void btf_dump_printf(void *ctx, const char *fmt, va_list args)
 static struct btf_dump_test_case {
        const char *name;
        const char *file;
+       bool known_ptr_sz;
        struct btf_dump_opts opts;
 } btf_dump_test_cases[] = {
-       {"btf_dump: syntax", "btf_dump_test_case_syntax", {}},
-       {"btf_dump: ordering", "btf_dump_test_case_ordering", {}},
-       {"btf_dump: padding", "btf_dump_test_case_padding", {}},
-       {"btf_dump: packing", "btf_dump_test_case_packing", {}},
-       {"btf_dump: bitfields", "btf_dump_test_case_bitfields", {}},
-       {"btf_dump: multidim", "btf_dump_test_case_multidim", {}},
-       {"btf_dump: namespacing", "btf_dump_test_case_namespacing", {}},
+       {"btf_dump: syntax", "btf_dump_test_case_syntax", true, {}},
+       {"btf_dump: ordering", "btf_dump_test_case_ordering", false, {}},
+       {"btf_dump: padding", "btf_dump_test_case_padding", true, {}},
+       {"btf_dump: packing", "btf_dump_test_case_packing", true, {}},
+       {"btf_dump: bitfields", "btf_dump_test_case_bitfields", true, {}},
+       {"btf_dump: multidim", "btf_dump_test_case_multidim", false, {}},
+       {"btf_dump: namespacing", "btf_dump_test_case_namespacing", false, {}},
 };
 
 static int btf_dump_all_types(const struct btf *btf,
@@ -62,6 +63,18 @@ static int test_btf_dump_case(int n, struct btf_dump_test_case *t)
                goto done;
        }
 
+       /* tests with t->known_ptr_sz have no "long" or "unsigned long" type,
+        * so it's impossible to determine correct pointer size; but if they
+        * do, it should be 8 regardless of host architecture, becaues BPF
+        * target is always 64-bit
+        */
+       if (!t->known_ptr_sz) {
+               btf__set_pointer_size(btf, 8);
+       } else {
+               CHECK(btf__pointer_size(btf) != 8, "ptr_sz", "exp %d, got %zu\n",
+                     8, btf__pointer_size(btf));
+       }
+
        snprintf(out_file, sizeof(out_file), "/tmp/%s.output.XXXXXX", t->file);
        fd = mkstemp(out_file);
        if (CHECK(fd < 0, "create_tmp", "failed to create file: %d\n", fd)) {
index b093787..1931a15 100644 (file)
@@ -159,8 +159,8 @@ void test_core_extern(void)
                exp = (uint64_t *)&t->data;
                for (j = 0; j < n; j++) {
                        CHECK(got[j] != exp[j], "check_res",
-                             "result #%d: expected %lx, but got %lx\n",
-                              j, exp[j], got[j]);
+                             "result #%d: expected %llx, but got %llx\n",
+                              j, (__u64)exp[j], (__u64)got[j]);
                }
 cleanup:
                test_core_extern__destroy(skel);
index 084ed26..a54eafc 100644 (file)
                .union_sz = sizeof(((type *)0)->union_field),           \
                .arr_sz = sizeof(((type *)0)->arr_field),               \
                .arr_elem_sz = sizeof(((type *)0)->arr_field[0]),       \
-               .ptr_sz = sizeof(((type *)0)->ptr_field),               \
-               .enum_sz = sizeof(((type *)0)->enum_field),     \
+               .ptr_sz = 8, /* always 8-byte pointer for BPF */        \
+               .enum_sz = sizeof(((type *)0)->enum_field),             \
        }
 
 #define SIZE_CASE(name) {                                              \
@@ -432,20 +432,20 @@ static struct core_reloc_test_case test_cases[] = {
                .sb4 = -1,
                .sb20 = -0x17654321,
                .u32 = 0xBEEF,
-               .s32 = -0x3FEDCBA987654321,
+               .s32 = -0x3FEDCBA987654321LL,
        }),
        BITFIELDS_CASE(bitfields___bitfield_vs_int, {
-               .ub1 = 0xFEDCBA9876543210,
+               .ub1 = 0xFEDCBA9876543210LL,
                .ub2 = 0xA6,
-               .ub7 = -0x7EDCBA987654321,
-               .sb4 = -0x6123456789ABCDE,
-               .sb20 = 0xD00D,
+               .ub7 = -0x7EDCBA987654321LL,
+               .sb4 = -0x6123456789ABCDELL,
+               .sb20 = 0xD00DLL,
                .u32 = -0x76543,
-               .s32 = 0x0ADEADBEEFBADB0B,
+               .s32 = 0x0ADEADBEEFBADB0BLL,
        }),
        BITFIELDS_CASE(bitfields___just_big_enough, {
-               .ub1 = 0xF,
-               .ub2 = 0x0812345678FEDCBA,
+               .ub1 = 0xFLL,
+               .ub2 = 0x0812345678FEDCBALL,
        }),
        BITFIELDS_ERR_CASE(bitfields___err_too_big_bitfield),
 
index a895bfe..197d0d2 100644 (file)
@@ -16,7 +16,7 @@ static void test_fexit_bpf2bpf_common(const char *obj_file,
        __u32 duration = 0, retval;
        struct bpf_map *data_map;
        const int zero = 0;
-       u64 *result = NULL;
+       __u64 *result = NULL;
 
        err = bpf_prog_load(target_obj_file, BPF_PROG_TYPE_UNSPEC,
                            &pkt_obj, &pkt_fd);
@@ -29,7 +29,7 @@ static void test_fexit_bpf2bpf_common(const char *obj_file,
 
        link = calloc(sizeof(struct bpf_link *), prog_cnt);
        prog = calloc(sizeof(struct bpf_program *), prog_cnt);
-       result = malloc((prog_cnt + 32 /* spare */) * sizeof(u64));
+       result = malloc((prog_cnt + 32 /* spare */) * sizeof(__u64));
        if (CHECK(!link || !prog || !result, "alloc_memory",
                  "failed to alloc memory"))
                goto close_prog;
@@ -72,7 +72,7 @@ static void test_fexit_bpf2bpf_common(const char *obj_file,
                goto close_prog;
 
        for (i = 0; i < prog_cnt; i++)
-               if (CHECK(result[i] != 1, "result", "fexit_bpf2bpf failed err %ld\n",
+               if (CHECK(result[i] != 1, "result", "fexit_bpf2bpf failed err %llu\n",
                          result[i]))
                        goto close_prog;
 
index f11f187..cd6dc80 100644 (file)
@@ -591,7 +591,7 @@ void test_flow_dissector(void)
                CHECK_ATTR(tattr.data_size_out != sizeof(flow_keys) ||
                           err || tattr.retval != 1,
                           tests[i].name,
-                          "err %d errno %d retval %d duration %d size %u/%lu\n",
+                          "err %d errno %d retval %d duration %d size %u/%zu\n",
                           err, errno, tattr.retval, tattr.duration,
                           tattr.data_size_out, sizeof(flow_keys));
                CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys);
index e3cb62b..9efa7e5 100644 (file)
@@ -5,7 +5,7 @@
 static void test_global_data_number(struct bpf_object *obj, __u32 duration)
 {
        int i, err, map_fd;
-       uint64_t num;
+       __u64 num;
 
        map_fd = bpf_find_map(__func__, obj, "result_number");
        if (CHECK_FAIL(map_fd < 0))
@@ -14,7 +14,7 @@ static void test_global_data_number(struct bpf_object *obj, __u32 duration)
        struct {
                char *name;
                uint32_t key;
-               uint64_t num;
+               __u64 num;
        } tests[] = {
                { "relocate .bss reference",     0, 0 },
                { "relocate .data reference",    1, 42 },
@@ -32,7 +32,7 @@ static void test_global_data_number(struct bpf_object *obj, __u32 duration)
        for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
                err = bpf_map_lookup_elem(map_fd, &tests[i].key, &num);
                CHECK(err || num != tests[i].num, tests[i].name,
-                     "err %d result %lx expected %lx\n",
+                     "err %d result %llx expected %llx\n",
                      err, num, tests[i].num);
        }
 }
index 43d0b55..9c3c5c0 100644 (file)
@@ -21,7 +21,7 @@ void test_mmap(void)
        const long page_size = sysconf(_SC_PAGE_SIZE);
        int err, duration = 0, i, data_map_fd, data_map_id, tmp_fd, rdmap_fd;
        struct bpf_map *data_map, *bss_map;
-       void *bss_mmaped = NULL, *map_mmaped = NULL, *tmp1, *tmp2;
+       void *bss_mmaped = NULL, *map_mmaped = NULL, *tmp0, *tmp1, *tmp2;
        struct test_mmap__bss *bss_data;
        struct bpf_map_info map_info;
        __u32 map_info_sz = sizeof(map_info);
@@ -183,16 +183,23 @@ void test_mmap(void)
 
        /* check some more advanced mmap() manipulations */
 
+       tmp0 = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED | MAP_ANONYMOUS,
+                         -1, 0);
+       if (CHECK(tmp0 == MAP_FAILED, "adv_mmap0", "errno %d\n", errno))
+               goto cleanup;
+
        /* map all but last page: pages 1-3 mapped */
-       tmp1 = mmap(NULL, 3 * page_size, PROT_READ, MAP_SHARED,
+       tmp1 = mmap(tmp0, 3 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED,
                          data_map_fd, 0);
-       if (CHECK(tmp1 == MAP_FAILED, "adv_mmap1", "errno %d\n", errno))
+       if (CHECK(tmp0 != tmp1, "adv_mmap1", "tmp0: %p, tmp1: %p\n", tmp0, tmp1)) {
+               munmap(tmp0, 4 * page_size);
                goto cleanup;
+       }
 
        /* unmap second page: pages 1, 3 mapped */
        err = munmap(tmp1 + page_size, page_size);
        if (CHECK(err, "adv_mmap2", "errno %d\n", errno)) {
-               munmap(tmp1, map_sz);
+               munmap(tmp1, 4 * page_size);
                goto cleanup;
        }
 
@@ -201,7 +208,7 @@ void test_mmap(void)
                    MAP_SHARED | MAP_FIXED, data_map_fd, 0);
        if (CHECK(tmp2 == MAP_FAILED, "adv_mmap3", "errno %d\n", errno)) {
                munmap(tmp1, page_size);
-               munmap(tmp1 + 2*page_size, page_size);
+               munmap(tmp1 + 2*page_size, 2 * page_size);
                goto cleanup;
        }
        CHECK(tmp1 + page_size != tmp2, "adv_mmap4",
@@ -211,7 +218,7 @@ void test_mmap(void)
        tmp2 = mmap(tmp1, 4 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED,
                    data_map_fd, 0);
        if (CHECK(tmp2 == MAP_FAILED, "adv_mmap5", "errno %d\n", errno)) {
-               munmap(tmp1, 3 * page_size); /* unmap page 1 */
+               munmap(tmp1, 4 * page_size); /* unmap page 1 */
                goto cleanup;
        }
        CHECK(tmp1 != tmp2, "adv_mmap6", "tmp1: %p, tmp2: %p\n", tmp1, tmp2);
index dde2b7a..935a294 100644 (file)
@@ -28,7 +28,7 @@ void test_prog_run_xattr(void)
              "err %d errno %d retval %d\n", err, errno, tattr.retval);
 
        CHECK_ATTR(tattr.data_size_out != sizeof(pkt_v4), "data_size_out",
-             "incorrect output size, want %lu have %u\n",
+             "incorrect output size, want %zu have %u\n",
              sizeof(pkt_v4), tattr.data_size_out);
 
        CHECK_ATTR(buf[5] != 0, "overflow",
index c571584..9ff0412 100644 (file)
@@ -309,6 +309,7 @@ static void v4_to_v6(struct sockaddr_storage *ss)
        v6->sin6_addr.s6_addr[10] = 0xff;
        v6->sin6_addr.s6_addr[11] = 0xff;
        memcpy(&v6->sin6_addr.s6_addr[12], &v4.sin_addr.s_addr, 4);
+       memset(&v6->sin6_addr.s6_addr[0], 0, 10);
 }
 
 static int udp_recv_send(int server_fd)
index 25de86a..fafedda 100644 (file)
@@ -81,7 +81,7 @@ void test_skb_ctx(void)
 
        CHECK_ATTR(tattr.ctx_size_out != sizeof(skb),
                   "ctx_size_out",
-                  "incorrect output size, want %lu have %u\n",
+                  "incorrect output size, want %zu have %u\n",
                   sizeof(skb), tattr.ctx_size_out);
 
        for (i = 0; i < 5; i++)
index c75525e..dd324b4 100644 (file)
@@ -44,25 +44,25 @@ void test_varlen(void)
        CHECK_VAL(bss->payload1_len2, size2);
        CHECK_VAL(bss->total1, size1 + size2);
        CHECK(memcmp(bss->payload1, exp_str, size1 + size2), "content_check",
-             "doesn't match!");
+             "doesn't match!\n");
 
        CHECK_VAL(data->payload2_len1, size1);
        CHECK_VAL(data->payload2_len2, size2);
        CHECK_VAL(data->total2, size1 + size2);
        CHECK(memcmp(data->payload2, exp_str, size1 + size2), "content_check",
-             "doesn't match!");
+             "doesn't match!\n");
 
        CHECK_VAL(data->payload3_len1, size1);
        CHECK_VAL(data->payload3_len2, size2);
        CHECK_VAL(data->total3, size1 + size2);
        CHECK(memcmp(data->payload3, exp_str, size1 + size2), "content_check",
-             "doesn't match!");
+             "doesn't match!\n");
 
        CHECK_VAL(data->payload4_len1, size1);
        CHECK_VAL(data->payload4_len2, size2);
        CHECK_VAL(data->total4, size1 + size2);
        CHECK(memcmp(data->payload4, exp_str, size1 + size2), "content_check",
-             "doesn't match!");
+             "doesn't match!\n");
 cleanup:
        test_varlen__destroy(skel);
 }
index 34d8471..69139ed 100644 (file)
@@ -1,5 +1,10 @@
 #include <stdint.h>
 #include <stdbool.h>
+
+void preserce_ptr_sz_fn(long x) {}
+
+#define __bpf_aligned __attribute__((aligned(8)))
+
 /*
  * KERNEL
  */
@@ -444,51 +449,51 @@ struct core_reloc_primitives {
        char a;
        int b;
        enum core_reloc_primitives_enum c;
-       void *d;
-       int (*f)(const char *);
+       void *d __bpf_aligned;
+       int (*f)(const char *) __bpf_aligned;
 };
 
 struct core_reloc_primitives___diff_enum_def {
        char a;
        int b;
-       void *d;
-       int (*f)(const char *);
+       void *d __bpf_aligned;
+       int (*f)(const char *) __bpf_aligned;
        enum {
                X = 100,
                Y = 200,
-       } c; /* inline enum def with differing set of values */
+       } c __bpf_aligned; /* inline enum def with differing set of values */
 };
 
 struct core_reloc_primitives___diff_func_proto {
-       void (*f)(int); /* incompatible function prototype */
-       void *d;
-       enum core_reloc_primitives_enum c;
+       void (*f)(int) __bpf_aligned; /* incompatible function prototype */
+       void *d __bpf_aligned;
+       enum core_reloc_primitives_enum c __bpf_aligned;
        int b;
        char a;
 };
 
 struct core_reloc_primitives___diff_ptr_type {
-       const char * const d; /* different pointee type + modifiers */
-       char a;
+       const char * const d __bpf_aligned; /* different pointee type + modifiers */
+       char a __bpf_aligned;
        int b;
        enum core_reloc_primitives_enum c;
-       int (*f)(const char *);
+       int (*f)(const char *) __bpf_aligned;
 };
 
 struct core_reloc_primitives___err_non_enum {
        char a[1];
        int b;
        int c; /* int instead of enum */
-       void *d;
-       int (*f)(const char *);
+       void *d __bpf_aligned;
+       int (*f)(const char *) __bpf_aligned;
 };
 
 struct core_reloc_primitives___err_non_int {
        char a[1];
-       int *b; /* ptr instead of int */
-       enum core_reloc_primitives_enum c;
-       void *d;
-       int (*f)(const char *);
+       int *b __bpf_aligned; /* ptr instead of int */
+       enum core_reloc_primitives_enum c __bpf_aligned;
+       void *d __bpf_aligned;
+       int (*f)(const char *) __bpf_aligned;
 };
 
 struct core_reloc_primitives___err_non_ptr {
@@ -496,7 +501,7 @@ struct core_reloc_primitives___err_non_ptr {
        int b;
        enum core_reloc_primitives_enum c;
        int d; /* int instead of ptr */
-       int (*f)(const char *);
+       int (*f)(const char *) __bpf_aligned;
 };
 
 /*
@@ -507,7 +512,7 @@ struct core_reloc_mods_output {
 };
 
 typedef const int int_t;
-typedef const char *char_ptr_t;
+typedef const char *char_ptr_t __bpf_aligned;
 typedef const int arr_t[7];
 
 struct core_reloc_mods_substruct {
@@ -523,9 +528,9 @@ typedef struct {
 struct core_reloc_mods {
        int a;
        int_t b;
-       char *c;
+       char *c __bpf_aligned;
        char_ptr_t d;
-       int e[3];
+       int e[3] __bpf_aligned;
        arr_t f;
        struct core_reloc_mods_substruct g;
        core_reloc_mods_substruct_t h;
@@ -535,9 +540,9 @@ struct core_reloc_mods {
 struct core_reloc_mods___mod_swap {
        int b;
        int_t a;
-       char *d;
+       char *d __bpf_aligned;
        char_ptr_t c;
-       int f[3];
+       int f[3] __bpf_aligned;
        arr_t e;
        struct {
                int y;
@@ -555,7 +560,7 @@ typedef arr1_t arr2_t;
 typedef arr2_t arr3_t;
 typedef arr3_t arr4_t;
 
-typedef const char * const volatile fancy_char_ptr_t;
+typedef const char * const volatile fancy_char_ptr_t __bpf_aligned;
 
 typedef core_reloc_mods_substruct_t core_reloc_mods_substruct_tt;
 
@@ -567,7 +572,7 @@ struct core_reloc_mods___typedefs {
        arr4_t e;
        fancy_char_ptr_t d;
        fancy_char_ptr_t c;
-       int3_t b;
+       int3_t b __bpf_aligned;
        int3_t a;
 };
 
@@ -739,19 +744,19 @@ struct core_reloc_bitfields___bit_sz_change {
        int8_t          sb4: 1;         /*  4 ->  1 */
        int32_t         sb20: 30;       /* 20 -> 30 */
        /* non-bitfields */
-       uint16_t        u32;            /* 32 -> 16 */
-       int64_t         s32;            /* 32 -> 64 */
+       uint16_t        u32;                    /* 32 -> 16 */
+       int64_t         s32 __bpf_aligned;      /* 32 -> 64 */
 };
 
 /* turn bitfield into non-bitfield and vice versa */
 struct core_reloc_bitfields___bitfield_vs_int {
        uint64_t        ub1;            /*  3 -> 64 non-bitfield */
        uint8_t         ub2;            /* 20 ->  8 non-bitfield */
-       int64_t         ub7;            /*  7 -> 64 non-bitfield signed */
-       int64_t         sb4;            /*  4 -> 64 non-bitfield signed */
-       uint64_t        sb20;           /* 20 -> 16 non-bitfield unsigned */
-       int32_t         u32: 20;        /* 32 non-bitfield -> 20 bitfield */
-       uint64_t        s32: 60;        /* 32 non-bitfield -> 60 bitfield */
+       int64_t         ub7 __bpf_aligned;      /*  7 -> 64 non-bitfield signed */
+       int64_t         sb4 __bpf_aligned;      /*  4 -> 64 non-bitfield signed */
+       uint64_t        sb20 __bpf_aligned;     /* 20 -> 16 non-bitfield unsigned */
+       int32_t         u32: 20;                /* 32 non-bitfield -> 20 bitfield */
+       uint64_t        s32: 60 __bpf_aligned;  /* 32 non-bitfield -> 60 bitfield */
 };
 
 struct core_reloc_bitfields___just_big_enough {
index 1f1966e..3e6912e 100644 (file)
@@ -54,6 +54,7 @@ SEC("sockops")
 int bpf_testcb(struct bpf_sock_ops *skops)
 {
        char header[sizeof(struct ipv6hdr) + sizeof(struct tcphdr)];
+       struct bpf_sock_ops *reuse = skops;
        struct tcphdr *thdr;
        int good_call_rv = 0;
        int bad_call_rv = 0;
@@ -62,6 +63,46 @@ int bpf_testcb(struct bpf_sock_ops *skops)
        int v = 0;
        int op;
 
+       /* Test reading fields in bpf_sock_ops using single register */
+       asm volatile (
+               "%[reuse] = *(u32 *)(%[reuse] +96)"
+               : [reuse] "+r"(reuse)
+               :);
+
+       asm volatile (
+               "%[op] = *(u32 *)(%[skops] +96)"
+               : [op] "+r"(op)
+               : [skops] "r"(skops)
+               :);
+
+       asm volatile (
+               "r9 = %[skops];\n"
+               "r8 = *(u32 *)(r9 +164);\n"
+               "*(u32 *)(r9 +164) = r8;\n"
+               :: [skops] "r"(skops)
+               : "r9", "r8");
+
+       asm volatile (
+               "r1 = %[skops];\n"
+               "r1 = *(u64 *)(r1 +184);\n"
+               "if r1 == 0 goto +1;\n"
+               "r1 = *(u32 *)(r1 +4);\n"
+               :: [skops] "r"(skops):"r1");
+
+       asm volatile (
+               "r9 = %[skops];\n"
+               "r9 = *(u64 *)(r9 +184);\n"
+               "if r9 == 0 goto +1;\n"
+               "r9 = *(u32 *)(r9 +4);\n"
+               :: [skops] "r"(skops):"r9");
+
+       asm volatile (
+               "r1 = %[skops];\n"
+               "r2 = *(u64 *)(r1 +184);\n"
+               "if r2 == 0 goto +1;\n"
+               "r2 = *(u32 *)(r2 +4);\n"
+               :: [skops] "r"(skops):"r1", "r2");
+
        op = (int) skops->op;
 
        update_event_map(op);
index cd4b72c..913acdf 100644 (file)
@@ -15,9 +15,9 @@ int test_pid = 0;
 bool capture = false;
 
 /* .bss */
-long payload1_len1 = 0;
-long payload1_len2 = 0;
-long total1 = 0;
+__u64 payload1_len1 = 0;
+__u64 payload1_len2 = 0;
+__u64 total1 = 0;
 char payload1[MAX_LEN + MAX_LEN] = {};
 
 /* .data */
index 305fae8..c75fc64 100644 (file)
@@ -3883,7 +3883,7 @@ static int test_big_btf_info(unsigned int test_num)
        info_garbage.garbage = 0;
        err = bpf_obj_get_info_by_fd(btf_fd, info, &info_len);
        if (CHECK(err || info_len != sizeof(*info),
-                 "err:%d errno:%d info_len:%u sizeof(*info):%lu",
+                 "err:%d errno:%d info_len:%u sizeof(*info):%zu",
                  err, errno, info_len, sizeof(*info))) {
                err = -1;
                goto done;
@@ -4094,7 +4094,7 @@ static int do_test_get_info(unsigned int test_num)
        if (CHECK(err || !info.id || info_len != sizeof(info) ||
                  info.btf_size != raw_btf_size ||
                  (ret = memcmp(raw_btf, user_btf, expected_nbytes)),
-                 "err:%d errno:%d info.id:%u info_len:%u sizeof(info):%lu raw_btf_size:%u info.btf_size:%u expected_nbytes:%u memcmp:%d",
+                 "err:%d errno:%d info.id:%u info_len:%u sizeof(info):%zu raw_btf_size:%u info.btf_size:%u expected_nbytes:%u memcmp:%d",
                  err, errno, info.id, info_len, sizeof(info),
                  raw_btf_size, info.btf_size, expected_nbytes, ret)) {
                err = -1;
@@ -4730,7 +4730,7 @@ ssize_t get_pprint_expected_line(enum pprint_mapv_kind_t mapv_kind,
 
                nexpected_line = snprintf(expected_line, line_size,
                                          "%s%u: {%u,0,%d,0x%x,0x%x,0x%x,"
-                                         "{%lu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s,"
+                                         "{%llu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s,"
                                          "%u,0x%x,[[%d,%d],[%d,%d]]}\n",
                                          percpu_map ? "\tcpu" : "",
                                          percpu_map ? cpu : next_key,
@@ -4738,7 +4738,7 @@ ssize_t get_pprint_expected_line(enum pprint_mapv_kind_t mapv_kind,
                                          v->unused_bits2a,
                                          v->bits28,
                                          v->unused_bits2b,
-                                         v->ui64,
+                                         (__u64)v->ui64,
                                          v->ui8a[0], v->ui8a[1],
                                          v->ui8a[2], v->ui8a[3],
                                          v->ui8a[4], v->ui8a[5],
index 6e09bf7..dbb820d 100644 (file)
@@ -135,6 +135,11 @@ static inline __u64 ptr_to_u64(const void *ptr)
        return (__u64) (unsigned long) ptr;
 }
 
+static inline void *u64_to_ptr(__u64 ptr)
+{
+       return (void *) (unsigned long) ptr;
+}
+
 int bpf_find_map(const char *test, struct bpf_object *obj, const char *name);
 int compare_map_keys(int map1_fd, int map2_fd);
 int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len);
index 18c5de5..bf361f3 100755 (executable)
@@ -180,6 +180,8 @@ setup()
                        ;;
                r[12]) ip netns exec $ns sysctl -q -w net.ipv4.ip_forward=1
                       ip netns exec $ns sysctl -q -w net.ipv4.conf.all.send_redirects=1
+                      ip netns exec $ns sysctl -q -w net.ipv4.conf.default.rp_filter=0
+                      ip netns exec $ns sysctl -q -w net.ipv4.conf.all.rp_filter=0
 
                       ip netns exec $ns sysctl -q -w net.ipv6.conf.all.forwarding=1
                       ip netns exec $ns sysctl -q -w net.ipv6.route.mtu_expires=10
index d3e0809..a47d1d8 100755 (executable)
@@ -2,13 +2,18 @@
 # SPDX-License-Identifier: GPL-2.0
 #
 # This tests basic flowtable functionality.
-# Creates following topology:
+# Creates following default topology:
 #
 # Originator (MTU 9000) <-Router1-> MTU 1500 <-Router2-> Responder (MTU 2000)
 # Router1 is the one doing flow offloading, Router2 has no special
 # purpose other than having a link that is smaller than either Originator
 # and responder, i.e. TCPMSS announced values are too large and will still
 # result in fragmentation and/or PMTU discovery.
+#
+# You can check with different Orgininator/Link/Responder MTU eg:
+# sh nft_flowtable.sh -o1000 -l500 -r100
+#
+
 
 # Kselftest framework requirement - SKIP code is 4.
 ksft_skip=4
@@ -21,29 +26,18 @@ ns2out=""
 
 log_netns=$(sysctl -n net.netfilter.nf_log_all_netns)
 
-nft --version > /dev/null 2>&1
-if [ $? -ne 0 ];then
-       echo "SKIP: Could not run test without nft tool"
-       exit $ksft_skip
-fi
-
-ip -Version > /dev/null 2>&1
-if [ $? -ne 0 ];then
-       echo "SKIP: Could not run test without ip tool"
-       exit $ksft_skip
-fi
-
-which nc > /dev/null 2>&1
-if [ $? -ne 0 ];then
-       echo "SKIP: Could not run test without nc (netcat)"
-       exit $ksft_skip
-fi
+checktool (){
+       $1 > /dev/null 2>&1
+       if [ $? -ne 0 ];then
+               echo "SKIP: Could not $2"
+               exit $ksft_skip
+       fi
+}
 
-ip netns add nsr1
-if [ $? -ne 0 ];then
-       echo "SKIP: Could not create net namespace"
-       exit $ksft_skip
-fi
+checktool "nft --version" "run test without nft tool"
+checktool "ip -Version" "run test without ip tool"
+checktool "which nc" "run test without nc (netcat)"
+checktool "ip netns add nsr1" "create net namespace"
 
 ip netns add ns1
 ip netns add ns2
@@ -89,11 +83,24 @@ ip -net nsr2 addr add dead:2::1/64 dev veth1
 # ns2 is going via nsr2 with a smaller mtu, so that TCPMSS announced by both peers
 # is NOT the lowest link mtu.
 
-ip -net nsr1 link set veth0 mtu 9000
-ip -net ns1 link set eth0 mtu 9000
+omtu=9000
+lmtu=1500
+rmtu=2000
+
+while getopts "o:l:r:" o
+do
+       case $o in
+               o) omtu=$OPTARG;;
+               l) lmtu=$OPTARG;;
+               r) rmtu=$OPTARG;;
+       esac
+done
+
+ip -net nsr1 link set veth0 mtu $omtu
+ip -net ns1 link set eth0 mtu $omtu
 
-ip -net nsr2 link set veth1 mtu 2000
-ip -net ns2 link set eth0 mtu 2000
+ip -net nsr2 link set veth1 mtu $rmtu
+ip -net ns2 link set eth0 mtu $rmtu
 
 # transfer-net between nsr1 and nsr2.
 # these addresses are not used for connections.
@@ -147,7 +154,7 @@ table inet filter {
       # as PMTUd is off.
       # This rule is deleted for the last test, when we expect PMTUd
       # to kick in and ensure all packets meet mtu requirements.
-      meta length gt 1500 accept comment something-to-grep-for
+      meta length gt $lmtu accept comment something-to-grep-for
 
       # next line blocks connection w.o. working offload.
       # we only do this for reverse dir, because we expect packets to
@@ -243,8 +250,14 @@ test_tcp_forwarding_ip()
 
        sleep 3
 
-       kill $lpid
-       kill $cpid
+       if ps -p $lpid > /dev/null;then
+               kill $lpid
+       fi
+
+       if ps -p $cpid > /dev/null;then
+               kill $cpid
+       fi
+
        wait
 
        check_transfer "$ns1in" "$ns2out" "ns1 -> ns2"