YNL_INDEX:=$(srctree)/Documentation/networking/netlink_spec/index.rst
YNL_RST_DIR:=$(srctree)/Documentation/networking/netlink_spec
YNL_YAML_DIR:=$(srctree)/Documentation/netlink/specs
-YNL_TOOL:=$(srctree)/tools/net/ynl/ynl-gen-rst.py
+YNL_TOOL:=$(srctree)/tools/net/ynl/pyynl/ynl_gen_rst.py
YNL_RST_FILES_TMP := $(patsubst %.yaml,%.rst,$(wildcard $(YNL_YAML_DIR)/*.yaml))
YNL_RST_FILES := $(patsubst $(YNL_YAML_DIR)%,$(YNL_RST_DIR)%, $(YNL_RST_FILES_TMP))
=============
The relation between PF, irq, napi, and queue can be observed via netlink spec::
- $ ./tools/net/ynl/cli.py --spec Documentation/netlink/specs/netdev.yaml --dump queue-get --json='{"ifindex": 13}'
+ $ ./tools/net/ynl/pyynl/cli.py --spec Documentation/netlink/specs/netdev.yaml --dump queue-get --json='{"ifindex": 13}'
[{'id': 0, 'ifindex': 13, 'napi-id': 539, 'type': 'rx'},
{'id': 1, 'ifindex': 13, 'napi-id': 540, 'type': 'rx'},
{'id': 2, 'ifindex': 13, 'napi-id': 541, 'type': 'rx'},
{'id': 3, 'ifindex': 13, 'napi-id': 542, 'type': 'tx'},
{'id': 4, 'ifindex': 13, 'napi-id': 543, 'type': 'tx'}]
- $ ./tools/net/ynl/cli.py --spec Documentation/netlink/specs/netdev.yaml --dump napi-get --json='{"ifindex": 13}'
+ $ ./tools/net/ynl/pyynl/cli.py --spec Documentation/netlink/specs/netdev.yaml --dump napi-get --json='{"ifindex": 13}'
[{'id': 543, 'ifindex': 13, 'irq': 42},
{'id': 542, 'ifindex': 13, 'irq': 41},
{'id': 541, 'ifindex': 13, 'irq': 40},
Per-NAPI configuration can be done programmatically in a user application
or by using a script included in the kernel source tree:
-``tools/net/ynl/cli.py``.
+``tools/net/ynl/pyynl/cli.py``.
For example, using the script:
.. code-block:: bash
- $ kernel-source/tools/net/ynl/cli.py \
+ $ kernel-source/tools/net/ynl/pyynl/cli.py \
--spec Documentation/netlink/specs/netdev.yaml \
--do napi-set \
--json='{"id": 345,
SPDX-License-Identifier: GPL-2.0
This file is populated during the build of the documentation (htmldocs) by the
-tools/net/ynl/ynl-gen-rst.py script.
+tools/net/ynl/pyynl/ynl_gen_rst.py script.
and can use a YAML specification to issue Netlink requests
to the kernel. Only Generic Netlink is supported.
-The tool is located at ``tools/net/ynl/cli.py``. It accepts
+The tool is located at ``tools/net/ynl/pyynl/cli.py``. It accepts
a handul of arguments, the most important ones are:
- ``--spec`` - point to the spec file
Example use::
- $ ./tools/net/ynl/cli.py --spec Documentation/netlink/specs/ethtool.yaml \
+ $ ./tools/net/ynl/pyynl/cli.py --spec Documentation/netlink/specs/ethtool.yaml \
--do rings-get \
--json '{"header":{"dev-index": 18}}'
{'header': {'dev-index': 18, 'dev-name': 'eni1np1'},
and run the regeneration tool. Grep the tree for ``YNL-GEN``
to see other examples.
-The code generation itself is performed by ``tools/net/ynl/ynl-gen-c.py``
+The code generation itself is performed by ``tools/net/ynl/pyynl/ynl_gen_c.py``
but it takes a few arguments so calling it directly for each file
quickly becomes tedious.
``tools/net/ynl/lib/`` contains an implementation of a C library
(based on libmnl) which integrates with code generated by
-``tools/net/ynl/ynl-gen-c.py`` to create easy to use netlink wrappers.
+``tools/net/ynl/pyynl/ynl_gen_c.py`` to create easy to use netlink wrappers.
YNL basics
----------
fi \
done
rm -f libynl.a
+ rm -rf pyynl/__pycache__
+ rm -rf pyynl/lib/__pycache__
.PHONY: all clean distclean $(SUBDIRS)
+++ /dev/null
-#!/usr/bin/env python3
-# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
-
-import argparse
-import json
-import pathlib
-import pprint
-import sys
-
-sys.path.append(pathlib.Path(__file__).resolve().parent.as_posix())
-from lib import YnlFamily, Netlink, NlError
-
-
-class YnlEncoder(json.JSONEncoder):
- def default(self, obj):
- if isinstance(obj, bytes):
- return bytes.hex(obj)
- if isinstance(obj, set):
- return list(obj)
- return json.JSONEncoder.default(self, obj)
-
-
-def main():
- description = """
- YNL CLI utility - a general purpose netlink utility that uses YAML
- specs to drive protocol encoding and decoding.
- """
- epilog = """
- The --multi option can be repeated to include several do operations
- in the same netlink payload.
- """
-
- parser = argparse.ArgumentParser(description=description,
- epilog=epilog)
- parser.add_argument('--spec', dest='spec', type=str, required=True)
- parser.add_argument('--schema', dest='schema', type=str)
- parser.add_argument('--no-schema', action='store_true')
- parser.add_argument('--json', dest='json_text', type=str)
-
- group = parser.add_mutually_exclusive_group()
- group.add_argument('--do', dest='do', metavar='DO-OPERATION', type=str)
- group.add_argument('--multi', dest='multi', nargs=2, action='append',
- metavar=('DO-OPERATION', 'JSON_TEXT'), type=str)
- group.add_argument('--dump', dest='dump', metavar='DUMP-OPERATION', type=str)
- group.add_argument('--list-ops', action='store_true')
- group.add_argument('--list-msgs', action='store_true')
-
- parser.add_argument('--duration', dest='duration', type=int,
- help='when subscribed, watch for DURATION seconds')
- parser.add_argument('--sleep', dest='duration', type=int,
- help='alias for duration')
- parser.add_argument('--subscribe', dest='ntf', type=str)
- parser.add_argument('--replace', dest='flags', action='append_const',
- const=Netlink.NLM_F_REPLACE)
- parser.add_argument('--excl', dest='flags', action='append_const',
- const=Netlink.NLM_F_EXCL)
- parser.add_argument('--create', dest='flags', action='append_const',
- const=Netlink.NLM_F_CREATE)
- parser.add_argument('--append', dest='flags', action='append_const',
- const=Netlink.NLM_F_APPEND)
- parser.add_argument('--process-unknown', action=argparse.BooleanOptionalAction)
- parser.add_argument('--output-json', action='store_true')
- parser.add_argument('--dbg-small-recv', default=0, const=4000,
- action='store', nargs='?', type=int)
- args = parser.parse_args()
-
- def output(msg):
- if args.output_json:
- print(json.dumps(msg, cls=YnlEncoder))
- else:
- pprint.PrettyPrinter().pprint(msg)
-
- if args.no_schema:
- args.schema = ''
-
- attrs = {}
- if args.json_text:
- attrs = json.loads(args.json_text)
-
- ynl = YnlFamily(args.spec, args.schema, args.process_unknown,
- recv_size=args.dbg_small_recv)
- if args.dbg_small_recv:
- ynl.set_recv_dbg(True)
-
- if args.ntf:
- ynl.ntf_subscribe(args.ntf)
-
- if args.list_ops:
- for op_name, op in ynl.ops.items():
- print(op_name, " [", ", ".join(op.modes), "]")
- if args.list_msgs:
- for op_name, op in ynl.msgs.items():
- print(op_name, " [", ", ".join(op.modes), "]")
-
- try:
- if args.do:
- reply = ynl.do(args.do, attrs, args.flags)
- output(reply)
- if args.dump:
- reply = ynl.dump(args.dump, attrs)
- output(reply)
- if args.multi:
- ops = [ (item[0], json.loads(item[1]), args.flags or []) for item in args.multi ]
- reply = ynl.do_multi(ops)
- output(reply)
- except NlError as e:
- print(e)
- exit(1)
-
- if args.ntf:
- try:
- for msg in ynl.poll_ntf(duration=args.duration):
- output(msg)
- except KeyboardInterrupt:
- pass
-
-
-if __name__ == "__main__":
- main()
+++ /dev/null
-#!/usr/bin/env python3
-# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
-
-import argparse
-import json
-import pathlib
-import pprint
-import sys
-import re
-import os
-
-sys.path.append(pathlib.Path(__file__).resolve().parent.as_posix())
-from lib import YnlFamily
-
-def args_to_req(ynl, op_name, args, req):
- """
- Verify and convert command-line arguments to the ynl-compatible request.
- """
- valid_attrs = ynl.operation_do_attributes(op_name)
- valid_attrs.remove('header') # not user-provided
-
- if len(args) == 0:
- print(f'no attributes, expected: {valid_attrs}')
- sys.exit(1)
-
- i = 0
- while i < len(args):
- attr = args[i]
- if i + 1 >= len(args):
- print(f'expected value for \'{attr}\'')
- sys.exit(1)
-
- if attr not in valid_attrs:
- print(f'invalid attribute \'{attr}\', expected: {valid_attrs}')
- sys.exit(1)
-
- val = args[i+1]
- i += 2
-
- req[attr] = val
-
-def print_field(reply, *desc):
- """
- Pretty-print a set of fields from the reply. desc specifies the
- fields and the optional type (bool/yn).
- """
- if len(desc) == 0:
- return print_field(reply, *zip(reply.keys(), reply.keys()))
-
- for spec in desc:
- try:
- field, name, tp = spec
- except:
- field, name = spec
- tp = 'int'
-
- value = reply.get(field, None)
- if tp == 'yn':
- value = 'yes' if value else 'no'
- elif tp == 'bool' or isinstance(value, bool):
- value = 'on' if value else 'off'
- else:
- value = 'n/a' if value is None else value
-
- print(f'{name}: {value}')
-
-def print_speed(name, value):
- """
- Print out the speed-like strings from the value dict.
- """
- speed_re = re.compile(r'[0-9]+base[^/]+/.+')
- speed = [ k for k, v in value.items() if v and speed_re.match(k) ]
- print(f'{name}: {" ".join(speed)}')
-
-def doit(ynl, args, op_name):
- """
- Prepare request header, parse arguments and doit.
- """
- req = {
- 'header': {
- 'dev-name': args.device,
- },
- }
-
- args_to_req(ynl, op_name, args.args, req)
- ynl.do(op_name, req)
-
-def dumpit(ynl, args, op_name, extra = {}):
- """
- Prepare request header, parse arguments and dumpit (filtering out the
- devices we're not interested in).
- """
- reply = ynl.dump(op_name, { 'header': {} } | extra)
- if not reply:
- return {}
-
- for msg in reply:
- if msg['header']['dev-name'] == args.device:
- if args.json:
- pprint.PrettyPrinter().pprint(msg)
- sys.exit(0)
- msg.pop('header', None)
- return msg
-
- print(f"Not supported for device {args.device}")
- sys.exit(1)
-
-def bits_to_dict(attr):
- """
- Convert ynl-formatted bitmask to a dict of bit=value.
- """
- ret = {}
- if 'bits' not in attr:
- return dict()
- if 'bit' not in attr['bits']:
- return dict()
- for bit in attr['bits']['bit']:
- if bit['name'] == '':
- continue
- name = bit['name']
- value = bit.get('value', False)
- ret[name] = value
- return ret
-
-def main():
- parser = argparse.ArgumentParser(description='ethtool wannabe')
- parser.add_argument('--json', action=argparse.BooleanOptionalAction)
- parser.add_argument('--show-priv-flags', action=argparse.BooleanOptionalAction)
- parser.add_argument('--set-priv-flags', action=argparse.BooleanOptionalAction)
- parser.add_argument('--show-eee', action=argparse.BooleanOptionalAction)
- parser.add_argument('--set-eee', action=argparse.BooleanOptionalAction)
- parser.add_argument('-a', '--show-pause', action=argparse.BooleanOptionalAction)
- parser.add_argument('-A', '--set-pause', action=argparse.BooleanOptionalAction)
- parser.add_argument('-c', '--show-coalesce', action=argparse.BooleanOptionalAction)
- parser.add_argument('-C', '--set-coalesce', action=argparse.BooleanOptionalAction)
- parser.add_argument('-g', '--show-ring', action=argparse.BooleanOptionalAction)
- parser.add_argument('-G', '--set-ring', action=argparse.BooleanOptionalAction)
- parser.add_argument('-k', '--show-features', action=argparse.BooleanOptionalAction)
- parser.add_argument('-K', '--set-features', action=argparse.BooleanOptionalAction)
- parser.add_argument('-l', '--show-channels', action=argparse.BooleanOptionalAction)
- parser.add_argument('-L', '--set-channels', action=argparse.BooleanOptionalAction)
- parser.add_argument('-T', '--show-time-stamping', action=argparse.BooleanOptionalAction)
- parser.add_argument('-S', '--statistics', action=argparse.BooleanOptionalAction)
- # TODO: --show-tunnels tunnel-info-get
- # TODO: --show-module module-get
- # TODO: --get-plca-cfg plca-get
- # TODO: --get-plca-status plca-get-status
- # TODO: --show-mm mm-get
- # TODO: --show-fec fec-get
- # TODO: --dump-module-eerpom module-eeprom-get
- # TODO: pse-get
- # TODO: rss-get
- parser.add_argument('device', metavar='device', type=str)
- parser.add_argument('args', metavar='args', type=str, nargs='*')
- global args
- args = parser.parse_args()
-
- script_abs_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
- spec = os.path.join(script_abs_dir,
- '../../../Documentation/netlink/specs/ethtool.yaml')
- schema = os.path.join(script_abs_dir,
- '../../../Documentation/netlink/genetlink-legacy.yaml')
-
- ynl = YnlFamily(spec, schema)
-
- if args.set_priv_flags:
- # TODO: parse the bitmask
- print("not implemented")
- return
-
- if args.set_eee:
- return doit(ynl, args, 'eee-set')
-
- if args.set_pause:
- return doit(ynl, args, 'pause-set')
-
- if args.set_coalesce:
- return doit(ynl, args, 'coalesce-set')
-
- if args.set_features:
- # TODO: parse the bitmask
- print("not implemented")
- return
-
- if args.set_channels:
- return doit(ynl, args, 'channels-set')
-
- if args.set_ring:
- return doit(ynl, args, 'rings-set')
-
- if args.show_priv_flags:
- flags = bits_to_dict(dumpit(ynl, args, 'privflags-get')['flags'])
- print_field(flags)
- return
-
- if args.show_eee:
- eee = dumpit(ynl, args, 'eee-get')
- ours = bits_to_dict(eee['modes-ours'])
- peer = bits_to_dict(eee['modes-peer'])
-
- if 'enabled' in eee:
- status = 'enabled' if eee['enabled'] else 'disabled'
- if 'active' in eee and eee['active']:
- status = status + ' - active'
- else:
- status = status + ' - inactive'
- else:
- status = 'not supported'
-
- print(f'EEE status: {status}')
- print_field(eee, ('tx-lpi-timer', 'Tx LPI'))
- print_speed('Advertised EEE link modes', ours)
- print_speed('Link partner advertised EEE link modes', peer)
-
- return
-
- if args.show_pause:
- print_field(dumpit(ynl, args, 'pause-get'),
- ('autoneg', 'Autonegotiate', 'bool'),
- ('rx', 'RX', 'bool'),
- ('tx', 'TX', 'bool'))
- return
-
- if args.show_coalesce:
- print_field(dumpit(ynl, args, 'coalesce-get'))
- return
-
- if args.show_features:
- reply = dumpit(ynl, args, 'features-get')
- available = bits_to_dict(reply['hw'])
- requested = bits_to_dict(reply['wanted']).keys()
- active = bits_to_dict(reply['active']).keys()
- never_changed = bits_to_dict(reply['nochange']).keys()
-
- for f in sorted(available):
- value = "off"
- if f in active:
- value = "on"
-
- fixed = ""
- if f not in available or f in never_changed:
- fixed = " [fixed]"
-
- req = ""
- if f in requested:
- if f in active:
- req = " [requested on]"
- else:
- req = " [requested off]"
-
- print(f'{f}: {value}{fixed}{req}')
-
- return
-
- if args.show_channels:
- reply = dumpit(ynl, args, 'channels-get')
- print(f'Channel parameters for {args.device}:')
-
- print(f'Pre-set maximums:')
- print_field(reply,
- ('rx-max', 'RX'),
- ('tx-max', 'TX'),
- ('other-max', 'Other'),
- ('combined-max', 'Combined'))
-
- print(f'Current hardware settings:')
- print_field(reply,
- ('rx-count', 'RX'),
- ('tx-count', 'TX'),
- ('other-count', 'Other'),
- ('combined-count', 'Combined'))
-
- return
-
- if args.show_ring:
- reply = dumpit(ynl, args, 'channels-get')
-
- print(f'Ring parameters for {args.device}:')
-
- print(f'Pre-set maximums:')
- print_field(reply,
- ('rx-max', 'RX'),
- ('rx-mini-max', 'RX Mini'),
- ('rx-jumbo-max', 'RX Jumbo'),
- ('tx-max', 'TX'))
-
- print(f'Current hardware settings:')
- print_field(reply,
- ('rx', 'RX'),
- ('rx-mini', 'RX Mini'),
- ('rx-jumbo', 'RX Jumbo'),
- ('tx', 'TX'))
-
- print_field(reply,
- ('rx-buf-len', 'RX Buf Len'),
- ('cqe-size', 'CQE Size'),
- ('tx-push', 'TX Push', 'bool'))
-
- return
-
- if args.statistics:
- print(f'NIC statistics:')
-
- # TODO: pass id?
- strset = dumpit(ynl, args, 'strset-get')
- pprint.PrettyPrinter().pprint(strset)
-
- req = {
- 'groups': {
- 'size': 1,
- 'bits': {
- 'bit':
- # TODO: support passing the bitmask
- #[
- #{ 'name': 'eth-phy', 'value': True },
- { 'name': 'eth-mac', 'value': True },
- #{ 'name': 'eth-ctrl', 'value': True },
- #{ 'name': 'rmon', 'value': True },
- #],
- },
- },
- }
-
- rsp = dumpit(ynl, args, 'stats-get', req)
- pprint.PrettyPrinter().pprint(rsp)
- return
-
- if args.show_time_stamping:
- req = {
- 'header': {
- 'flags': 'stats',
- },
- }
-
- tsinfo = dumpit(ynl, args, 'tsinfo-get', req)
-
- print(f'Time stamping parameters for {args.device}:')
-
- print('Capabilities:')
- [print(f'\t{v}') for v in bits_to_dict(tsinfo['timestamping'])]
-
- print(f'PTP Hardware Clock: {tsinfo["phc-index"]}')
-
- print('Hardware Transmit Timestamp Modes:')
- [print(f'\t{v}') for v in bits_to_dict(tsinfo['tx-types'])]
-
- print('Hardware Receive Filter Modes:')
- [print(f'\t{v}') for v in bits_to_dict(tsinfo['rx-filters'])]
-
- print('Statistics:')
- [print(f'\t{k}: {v}') for k, v in tsinfo['stats'].items()]
- return
-
- print(f'Settings for {args.device}:')
- linkmodes = dumpit(ynl, args, 'linkmodes-get')
- ours = bits_to_dict(linkmodes['ours'])
-
- supported_ports = ('TP', 'AUI', 'BNC', 'MII', 'FIBRE', 'Backplane')
- ports = [ p for p in supported_ports if ours.get(p, False)]
- print(f'Supported ports: [ {" ".join(ports)} ]')
-
- print_speed('Supported link modes', ours)
-
- print_field(ours, ('Pause', 'Supported pause frame use', 'yn'))
- print_field(ours, ('Autoneg', 'Supports auto-negotiation', 'yn'))
-
- supported_fec = ('None', 'PS', 'BASER', 'LLRS')
- fec = [ p for p in supported_fec if ours.get(p, False)]
- fec_str = " ".join(fec)
- if len(fec) == 0:
- fec_str = "Not reported"
-
- print(f'Supported FEC modes: {fec_str}')
-
- speed = 'Unknown!'
- if linkmodes['speed'] > 0 and linkmodes['speed'] < 0xffffffff:
- speed = f'{linkmodes["speed"]}Mb/s'
- print(f'Speed: {speed}')
-
- duplex_modes = {
- 0: 'Half',
- 1: 'Full',
- }
- duplex = duplex_modes.get(linkmodes["duplex"], None)
- if not duplex:
- duplex = f'Unknown! ({linkmodes["duplex"]})'
- print(f'Duplex: {duplex}')
-
- autoneg = "off"
- if linkmodes.get("autoneg", 0) != 0:
- autoneg = "on"
- print(f'Auto-negotiation: {autoneg}')
-
- ports = {
- 0: 'Twisted Pair',
- 1: 'AUI',
- 2: 'MII',
- 3: 'FIBRE',
- 4: 'BNC',
- 5: 'Directly Attached Copper',
- 0xef: 'None',
- }
- linkinfo = dumpit(ynl, args, 'linkinfo-get')
- print(f'Port: {ports.get(linkinfo["port"], "Other")}')
-
- print_field(linkinfo, ('phyaddr', 'PHYAD'))
-
- transceiver = {
- 0: 'Internal',
- 1: 'External',
- }
- print(f'Transceiver: {transceiver.get(linkinfo["transceiver"], "Unknown")}')
-
- mdix_ctrl = {
- 1: 'off',
- 2: 'on',
- }
- mdix = mdix_ctrl.get(linkinfo['tp-mdix-ctrl'], None)
- if mdix:
- mdix = mdix + ' (forced)'
- else:
- mdix = mdix_ctrl.get(linkinfo['tp-mdix'], 'Unknown (auto)')
- print(f'MDI-X: {mdix}')
-
- debug = dumpit(ynl, args, 'debug-get')
- msgmask = bits_to_dict(debug.get("msgmask", [])).keys()
- print(f'Current message level: {" ".join(msgmask)}')
-
- linkstate = dumpit(ynl, args, 'linkstate-get')
- detected_states = {
- 0: 'no',
- 1: 'yes',
- }
- # TODO: wol-get
- detected = detected_states.get(linkstate['link'], 'unknown')
- print(f'Link detected: {detected}')
-
-if __name__ == '__main__':
- main()
YNL_GEN_ARG_ethtool:=--user-header linux/ethtool_netlink.h \
--exclude-op stats-get
-TOOL:=../ynl-gen-c.py
+TOOL:=../pyynl/ynl_gen_c.py
GENS_PATHS=$(shell grep -nrI --files-without-match \
'protocol: netlink' \
clean:
rm -f *.o *.d *~
- rm -rf __pycache__
distclean: clean
rm -f *.a
+++ /dev/null
-# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
-
-from .nlspec import SpecAttr, SpecAttrSet, SpecEnumEntry, SpecEnumSet, \
- SpecFamily, SpecOperation
-from .ynl import YnlFamily, Netlink, NlError
-
-__all__ = ["SpecAttr", "SpecAttrSet", "SpecEnumEntry", "SpecEnumSet",
- "SpecFamily", "SpecOperation", "YnlFamily", "Netlink", "NlError"]
+++ /dev/null
-# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
-
-import collections
-import importlib
-import os
-import yaml
-
-
-# To be loaded dynamically as needed
-jsonschema = None
-
-
-class SpecElement:
- """Netlink spec element.
-
- Abstract element of the Netlink spec. Implements the dictionary interface
- for access to the raw spec. Supports iterative resolution of dependencies
- across elements and class inheritance levels. The elements of the spec
- may refer to each other, and although loops should be very rare, having
- to maintain correct ordering of instantiation is painful, so the resolve()
- method should be used to perform parts of init which require access to
- other parts of the spec.
-
- Attributes:
- yaml raw spec as loaded from the spec file
- family back reference to the full family
-
- name name of the entity as listed in the spec (optional)
- ident_name name which can be safely used as identifier in code (optional)
- """
- def __init__(self, family, yaml):
- self.yaml = yaml
- self.family = family
-
- if 'name' in self.yaml:
- self.name = self.yaml['name']
- self.ident_name = self.name.replace('-', '_')
-
- self._super_resolved = False
- family.add_unresolved(self)
-
- def __getitem__(self, key):
- return self.yaml[key]
-
- def __contains__(self, key):
- return key in self.yaml
-
- def get(self, key, default=None):
- return self.yaml.get(key, default)
-
- def resolve_up(self, up):
- if not self._super_resolved:
- up.resolve()
- self._super_resolved = True
-
- def resolve(self):
- pass
-
-
-class SpecEnumEntry(SpecElement):
- """ Entry within an enum declared in the Netlink spec.
-
- Attributes:
- doc documentation string
- enum_set back reference to the enum
- value numerical value of this enum (use accessors in most situations!)
-
- Methods:
- raw_value raw value, i.e. the id in the enum, unlike user value which is a mask for flags
- user_value user value, same as raw value for enums, for flags it's the mask
- """
- def __init__(self, enum_set, yaml, prev, value_start):
- if isinstance(yaml, str):
- yaml = {'name': yaml}
- super().__init__(enum_set.family, yaml)
-
- self.doc = yaml.get('doc', '')
- self.enum_set = enum_set
-
- if 'value' in yaml:
- self.value = yaml['value']
- elif prev:
- self.value = prev.value + 1
- else:
- self.value = value_start
-
- def has_doc(self):
- return bool(self.doc)
-
- def raw_value(self):
- return self.value
-
- def user_value(self, as_flags=None):
- if self.enum_set['type'] == 'flags' or as_flags:
- return 1 << self.value
- else:
- return self.value
-
-
-class SpecEnumSet(SpecElement):
- """ Enum type
-
- Represents an enumeration (list of numerical constants)
- as declared in the "definitions" section of the spec.
-
- Attributes:
- type enum or flags
- entries entries by name
- entries_by_val entries by value
- Methods:
- get_mask for flags compute the mask of all defined values
- """
- def __init__(self, family, yaml):
- super().__init__(family, yaml)
-
- self.type = yaml['type']
-
- prev_entry = None
- value_start = self.yaml.get('value-start', 0)
- self.entries = dict()
- self.entries_by_val = dict()
- for entry in self.yaml['entries']:
- e = self.new_entry(entry, prev_entry, value_start)
- self.entries[e.name] = e
- self.entries_by_val[e.raw_value()] = e
- prev_entry = e
-
- def new_entry(self, entry, prev_entry, value_start):
- return SpecEnumEntry(self, entry, prev_entry, value_start)
-
- def has_doc(self):
- if 'doc' in self.yaml:
- return True
- return self.has_entry_doc()
-
- def has_entry_doc(self):
- for entry in self.entries.values():
- if entry.has_doc():
- return True
- return False
-
- def get_mask(self, as_flags=None):
- mask = 0
- for e in self.entries.values():
- mask += e.user_value(as_flags)
- return mask
-
-
-class SpecAttr(SpecElement):
- """ Single Netlink attribute type
-
- Represents a single attribute type within an attr space.
-
- Attributes:
- type string, attribute type
- value numerical ID when serialized
- attr_set Attribute Set containing this attr
- is_multi bool, attr may repeat multiple times
- struct_name string, name of struct definition
- sub_type string, name of sub type
- len integer, optional byte length of binary types
- display_hint string, hint to help choose format specifier
- when displaying the value
- sub_message string, name of sub message type
- selector string, name of attribute used to select
- sub-message type
-
- is_auto_scalar bool, attr is a variable-size scalar
- """
- def __init__(self, family, attr_set, yaml, value):
- super().__init__(family, yaml)
-
- self.type = yaml['type']
- self.value = value
- self.attr_set = attr_set
- self.is_multi = yaml.get('multi-attr', False)
- self.struct_name = yaml.get('struct')
- self.sub_type = yaml.get('sub-type')
- self.byte_order = yaml.get('byte-order')
- self.len = yaml.get('len')
- self.display_hint = yaml.get('display-hint')
- self.sub_message = yaml.get('sub-message')
- self.selector = yaml.get('selector')
-
- self.is_auto_scalar = self.type == "sint" or self.type == "uint"
-
-
-class SpecAttrSet(SpecElement):
- """ Netlink Attribute Set class.
-
- Represents a ID space of attributes within Netlink.
-
- Note that unlike other elements, which expose contents of the raw spec
- via the dictionary interface Attribute Set exposes attributes by name.
-
- Attributes:
- attrs ordered dict of all attributes (indexed by name)
- attrs_by_val ordered dict of all attributes (indexed by value)
- subset_of parent set if this is a subset, otherwise None
- """
- def __init__(self, family, yaml):
- super().__init__(family, yaml)
-
- self.subset_of = self.yaml.get('subset-of', None)
-
- self.attrs = collections.OrderedDict()
- self.attrs_by_val = collections.OrderedDict()
-
- if self.subset_of is None:
- val = 1
- for elem in self.yaml['attributes']:
- if 'value' in elem:
- val = elem['value']
-
- attr = self.new_attr(elem, val)
- self.attrs[attr.name] = attr
- self.attrs_by_val[attr.value] = attr
- val += 1
- else:
- real_set = family.attr_sets[self.subset_of]
- for elem in self.yaml['attributes']:
- real_attr = real_set[elem['name']]
- combined_elem = real_attr.yaml | elem
- attr = self.new_attr(combined_elem, real_attr.value)
-
- self.attrs[attr.name] = attr
- self.attrs_by_val[attr.value] = attr
-
- def new_attr(self, elem, value):
- return SpecAttr(self.family, self, elem, value)
-
- def __getitem__(self, key):
- return self.attrs[key]
-
- def __contains__(self, key):
- return key in self.attrs
-
- def __iter__(self):
- yield from self.attrs
-
- def items(self):
- return self.attrs.items()
-
-
-class SpecStructMember(SpecElement):
- """Struct member attribute
-
- Represents a single struct member attribute.
-
- Attributes:
- type string, type of the member attribute
- byte_order string or None for native byte order
- enum string, name of the enum definition
- len integer, optional byte length of binary types
- display_hint string, hint to help choose format specifier
- when displaying the value
- struct string, name of nested struct type
- """
- def __init__(self, family, yaml):
- super().__init__(family, yaml)
- self.type = yaml['type']
- self.byte_order = yaml.get('byte-order')
- self.enum = yaml.get('enum')
- self.len = yaml.get('len')
- self.display_hint = yaml.get('display-hint')
- self.struct = yaml.get('struct')
-
-
-class SpecStruct(SpecElement):
- """Netlink struct type
-
- Represents a C struct definition.
-
- Attributes:
- members ordered list of struct members
- """
- def __init__(self, family, yaml):
- super().__init__(family, yaml)
-
- self.members = []
- for member in yaml.get('members', []):
- self.members.append(self.new_member(family, member))
-
- def new_member(self, family, elem):
- return SpecStructMember(family, elem)
-
- def __iter__(self):
- yield from self.members
-
- def items(self):
- return self.members.items()
-
-
-class SpecSubMessage(SpecElement):
- """ Netlink sub-message definition
-
- Represents a set of sub-message formats for polymorphic nlattrs
- that contain type-specific sub messages.
-
- Attributes:
- name string, name of sub-message definition
- formats dict of sub-message formats indexed by match value
- """
- def __init__(self, family, yaml):
- super().__init__(family, yaml)
-
- self.formats = collections.OrderedDict()
- for elem in self.yaml['formats']:
- format = self.new_format(family, elem)
- self.formats[format.value] = format
-
- def new_format(self, family, format):
- return SpecSubMessageFormat(family, format)
-
-
-class SpecSubMessageFormat(SpecElement):
- """ Netlink sub-message format definition
-
- Represents a single format for a sub-message.
-
- Attributes:
- value attribute value to match against type selector
- fixed_header string, name of fixed header, or None
- attr_set string, name of attribute set, or None
- """
- def __init__(self, family, yaml):
- super().__init__(family, yaml)
-
- self.value = yaml.get('value')
- self.fixed_header = yaml.get('fixed-header')
- self.attr_set = yaml.get('attribute-set')
-
-
-class SpecOperation(SpecElement):
- """Netlink Operation
-
- Information about a single Netlink operation.
-
- Attributes:
- value numerical ID when serialized, None if req/rsp values differ
-
- req_value numerical ID when serialized, user -> kernel
- rsp_value numerical ID when serialized, user <- kernel
- modes supported operation modes (do, dump, event etc.)
- is_call bool, whether the operation is a call
- is_async bool, whether the operation is a notification
- is_resv bool, whether the operation does not exist (it's just a reserved ID)
- attr_set attribute set name
- fixed_header string, optional name of fixed header struct
-
- yaml raw spec as loaded from the spec file
- """
- def __init__(self, family, yaml, req_value, rsp_value):
- super().__init__(family, yaml)
-
- self.value = req_value if req_value == rsp_value else None
- self.req_value = req_value
- self.rsp_value = rsp_value
-
- self.modes = yaml.keys() & {'do', 'dump', 'event', 'notify'}
- self.is_call = 'do' in yaml or 'dump' in yaml
- self.is_async = 'notify' in yaml or 'event' in yaml
- self.is_resv = not self.is_async and not self.is_call
- self.fixed_header = self.yaml.get('fixed-header', family.fixed_header)
-
- # Added by resolve:
- self.attr_set = None
- delattr(self, "attr_set")
-
- def resolve(self):
- self.resolve_up(super())
-
- if 'attribute-set' in self.yaml:
- attr_set_name = self.yaml['attribute-set']
- elif 'notify' in self.yaml:
- msg = self.family.msgs[self.yaml['notify']]
- attr_set_name = msg['attribute-set']
- elif self.is_resv:
- attr_set_name = ''
- else:
- raise Exception(f"Can't resolve attribute set for op '{self.name}'")
- if attr_set_name:
- self.attr_set = self.family.attr_sets[attr_set_name]
-
-
-class SpecMcastGroup(SpecElement):
- """Netlink Multicast Group
-
- Information about a multicast group.
-
- Value is only used for classic netlink families that use the
- netlink-raw schema. Genetlink families use dynamic ID allocation
- where the ids of multicast groups get resolved at runtime. Value
- will be None for genetlink families.
-
- Attributes:
- name name of the mulitcast group
- value integer id of this multicast group for netlink-raw or None
- yaml raw spec as loaded from the spec file
- """
- def __init__(self, family, yaml):
- super().__init__(family, yaml)
- self.value = self.yaml.get('value')
-
-
-class SpecFamily(SpecElement):
- """ Netlink Family Spec class.
-
- Netlink family information loaded from a spec (e.g. in YAML).
- Takes care of unfolding implicit information which can be skipped
- in the spec itself for brevity.
-
- The class can be used like a dictionary to access the raw spec
- elements but that's usually a bad idea.
-
- Attributes:
- proto protocol type (e.g. genetlink)
- msg_id_model enum-model for operations (unified, directional etc.)
- license spec license (loaded from an SPDX tag on the spec)
-
- attr_sets dict of attribute sets
- msgs dict of all messages (index by name)
- sub_msgs dict of all sub messages (index by name)
- ops dict of all valid requests / responses
- ntfs dict of all async events
- consts dict of all constants/enums
- fixed_header string, optional name of family default fixed header struct
- mcast_groups dict of all multicast groups (index by name)
- kernel_family dict of kernel family attributes
- """
- def __init__(self, spec_path, schema_path=None, exclude_ops=None):
- with open(spec_path, "r") as stream:
- prefix = '# SPDX-License-Identifier: '
- first = stream.readline().strip()
- if not first.startswith(prefix):
- raise Exception('SPDX license tag required in the spec')
- self.license = first[len(prefix):]
-
- stream.seek(0)
- spec = yaml.safe_load(stream)
-
- self._resolution_list = []
-
- super().__init__(self, spec)
-
- self._exclude_ops = exclude_ops if exclude_ops else []
-
- self.proto = self.yaml.get('protocol', 'genetlink')
- self.msg_id_model = self.yaml['operations'].get('enum-model', 'unified')
-
- if schema_path is None:
- schema_path = os.path.dirname(os.path.dirname(spec_path)) + f'/{self.proto}.yaml'
- if schema_path:
- global jsonschema
-
- with open(schema_path, "r") as stream:
- schema = yaml.safe_load(stream)
-
- if jsonschema is None:
- jsonschema = importlib.import_module("jsonschema")
-
- jsonschema.validate(self.yaml, schema)
-
- self.attr_sets = collections.OrderedDict()
- self.sub_msgs = collections.OrderedDict()
- self.msgs = collections.OrderedDict()
- self.req_by_value = collections.OrderedDict()
- self.rsp_by_value = collections.OrderedDict()
- self.ops = collections.OrderedDict()
- self.ntfs = collections.OrderedDict()
- self.consts = collections.OrderedDict()
- self.mcast_groups = collections.OrderedDict()
- self.kernel_family = collections.OrderedDict(self.yaml.get('kernel-family', {}))
-
- last_exception = None
- while len(self._resolution_list) > 0:
- resolved = []
- unresolved = self._resolution_list
- self._resolution_list = []
-
- for elem in unresolved:
- try:
- elem.resolve()
- except (KeyError, AttributeError) as e:
- self._resolution_list.append(elem)
- last_exception = e
- continue
-
- resolved.append(elem)
-
- if len(resolved) == 0:
- raise last_exception
-
- def new_enum(self, elem):
- return SpecEnumSet(self, elem)
-
- def new_attr_set(self, elem):
- return SpecAttrSet(self, elem)
-
- def new_struct(self, elem):
- return SpecStruct(self, elem)
-
- def new_sub_message(self, elem):
- return SpecSubMessage(self, elem);
-
- def new_operation(self, elem, req_val, rsp_val):
- return SpecOperation(self, elem, req_val, rsp_val)
-
- def new_mcast_group(self, elem):
- return SpecMcastGroup(self, elem)
-
- def add_unresolved(self, elem):
- self._resolution_list.append(elem)
-
- def _dictify_ops_unified(self):
- self.fixed_header = self.yaml['operations'].get('fixed-header')
- val = 1
- for elem in self.yaml['operations']['list']:
- if 'value' in elem:
- val = elem['value']
-
- op = self.new_operation(elem, val, val)
- val += 1
-
- self.msgs[op.name] = op
-
- def _dictify_ops_directional(self):
- self.fixed_header = self.yaml['operations'].get('fixed-header')
- req_val = rsp_val = 1
- for elem in self.yaml['operations']['list']:
- if 'notify' in elem or 'event' in elem:
- if 'value' in elem:
- rsp_val = elem['value']
- req_val_next = req_val
- rsp_val_next = rsp_val + 1
- req_val = None
- elif 'do' in elem or 'dump' in elem:
- mode = elem['do'] if 'do' in elem else elem['dump']
-
- v = mode.get('request', {}).get('value', None)
- if v:
- req_val = v
- v = mode.get('reply', {}).get('value', None)
- if v:
- rsp_val = v
-
- rsp_inc = 1 if 'reply' in mode else 0
- req_val_next = req_val + 1
- rsp_val_next = rsp_val + rsp_inc
- else:
- raise Exception("Can't parse directional ops")
-
- if req_val == req_val_next:
- req_val = None
- if rsp_val == rsp_val_next:
- rsp_val = None
-
- skip = False
- for exclude in self._exclude_ops:
- skip |= bool(exclude.match(elem['name']))
- if not skip:
- op = self.new_operation(elem, req_val, rsp_val)
-
- req_val = req_val_next
- rsp_val = rsp_val_next
-
- self.msgs[op.name] = op
-
- def find_operation(self, name):
- """
- For a given operation name, find and return operation spec.
- """
- for op in self.yaml['operations']['list']:
- if name == op['name']:
- return op
- return None
-
- def resolve(self):
- self.resolve_up(super())
-
- definitions = self.yaml.get('definitions', [])
- for elem in definitions:
- if elem['type'] == 'enum' or elem['type'] == 'flags':
- self.consts[elem['name']] = self.new_enum(elem)
- elif elem['type'] == 'struct':
- self.consts[elem['name']] = self.new_struct(elem)
- else:
- self.consts[elem['name']] = elem
-
- for elem in self.yaml['attribute-sets']:
- attr_set = self.new_attr_set(elem)
- self.attr_sets[elem['name']] = attr_set
-
- for elem in self.yaml.get('sub-messages', []):
- sub_message = self.new_sub_message(elem)
- self.sub_msgs[sub_message.name] = sub_message
-
- if self.msg_id_model == 'unified':
- self._dictify_ops_unified()
- elif self.msg_id_model == 'directional':
- self._dictify_ops_directional()
-
- for op in self.msgs.values():
- if op.req_value is not None:
- self.req_by_value[op.req_value] = op
- if op.rsp_value is not None:
- self.rsp_by_value[op.rsp_value] = op
- if not op.is_async and 'attribute-set' in op:
- self.ops[op.name] = op
- elif op.is_async:
- self.ntfs[op.name] = op
-
- mcgs = self.yaml.get('mcast-groups')
- if mcgs:
- for elem in mcgs['list']:
- mcg = self.new_mcast_group(elem)
- self.mcast_groups[elem['name']] = mcg
+++ /dev/null
-# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
-
-from collections import namedtuple
-from enum import Enum
-import functools
-import os
-import random
-import socket
-import struct
-from struct import Struct
-import sys
-import yaml
-import ipaddress
-import uuid
-import queue
-import selectors
-import time
-
-from .nlspec import SpecFamily
-
-#
-# Generic Netlink code which should really be in some library, but I can't quickly find one.
-#
-
-
-class Netlink:
- # Netlink socket
- SOL_NETLINK = 270
-
- NETLINK_ADD_MEMBERSHIP = 1
- NETLINK_CAP_ACK = 10
- NETLINK_EXT_ACK = 11
- NETLINK_GET_STRICT_CHK = 12
-
- # Netlink message
- NLMSG_ERROR = 2
- NLMSG_DONE = 3
-
- NLM_F_REQUEST = 1
- NLM_F_ACK = 4
- NLM_F_ROOT = 0x100
- NLM_F_MATCH = 0x200
-
- NLM_F_REPLACE = 0x100
- NLM_F_EXCL = 0x200
- NLM_F_CREATE = 0x400
- NLM_F_APPEND = 0x800
-
- NLM_F_CAPPED = 0x100
- NLM_F_ACK_TLVS = 0x200
-
- NLM_F_DUMP = NLM_F_ROOT | NLM_F_MATCH
-
- NLA_F_NESTED = 0x8000
- NLA_F_NET_BYTEORDER = 0x4000
-
- NLA_TYPE_MASK = NLA_F_NESTED | NLA_F_NET_BYTEORDER
-
- # Genetlink defines
- NETLINK_GENERIC = 16
-
- GENL_ID_CTRL = 0x10
-
- # nlctrl
- CTRL_CMD_GETFAMILY = 3
-
- CTRL_ATTR_FAMILY_ID = 1
- CTRL_ATTR_FAMILY_NAME = 2
- CTRL_ATTR_MAXATTR = 5
- CTRL_ATTR_MCAST_GROUPS = 7
-
- CTRL_ATTR_MCAST_GRP_NAME = 1
- CTRL_ATTR_MCAST_GRP_ID = 2
-
- # Extack types
- NLMSGERR_ATTR_MSG = 1
- NLMSGERR_ATTR_OFFS = 2
- NLMSGERR_ATTR_COOKIE = 3
- NLMSGERR_ATTR_POLICY = 4
- NLMSGERR_ATTR_MISS_TYPE = 5
- NLMSGERR_ATTR_MISS_NEST = 6
-
- # Policy types
- NL_POLICY_TYPE_ATTR_TYPE = 1
- NL_POLICY_TYPE_ATTR_MIN_VALUE_S = 2
- NL_POLICY_TYPE_ATTR_MAX_VALUE_S = 3
- NL_POLICY_TYPE_ATTR_MIN_VALUE_U = 4
- NL_POLICY_TYPE_ATTR_MAX_VALUE_U = 5
- NL_POLICY_TYPE_ATTR_MIN_LENGTH = 6
- NL_POLICY_TYPE_ATTR_MAX_LENGTH = 7
- NL_POLICY_TYPE_ATTR_POLICY_IDX = 8
- NL_POLICY_TYPE_ATTR_POLICY_MAXTYPE = 9
- NL_POLICY_TYPE_ATTR_BITFIELD32_MASK = 10
- NL_POLICY_TYPE_ATTR_PAD = 11
- NL_POLICY_TYPE_ATTR_MASK = 12
-
- AttrType = Enum('AttrType', ['flag', 'u8', 'u16', 'u32', 'u64',
- 's8', 's16', 's32', 's64',
- 'binary', 'string', 'nul-string',
- 'nested', 'nested-array',
- 'bitfield32', 'sint', 'uint'])
-
-class NlError(Exception):
- def __init__(self, nl_msg):
- self.nl_msg = nl_msg
- self.error = -nl_msg.error
-
- def __str__(self):
- return f"Netlink error: {os.strerror(self.error)}\n{self.nl_msg}"
-
-
-class ConfigError(Exception):
- pass
-
-
-class NlAttr:
- ScalarFormat = namedtuple('ScalarFormat', ['native', 'big', 'little'])
- type_formats = {
- 'u8' : ScalarFormat(Struct('B'), Struct("B"), Struct("B")),
- 's8' : ScalarFormat(Struct('b'), Struct("b"), Struct("b")),
- 'u16': ScalarFormat(Struct('H'), Struct(">H"), Struct("<H")),
- 's16': ScalarFormat(Struct('h'), Struct(">h"), Struct("<h")),
- 'u32': ScalarFormat(Struct('I'), Struct(">I"), Struct("<I")),
- 's32': ScalarFormat(Struct('i'), Struct(">i"), Struct("<i")),
- 'u64': ScalarFormat(Struct('Q'), Struct(">Q"), Struct("<Q")),
- 's64': ScalarFormat(Struct('q'), Struct(">q"), Struct("<q"))
- }
-
- def __init__(self, raw, offset):
- self._len, self._type = struct.unpack("HH", raw[offset : offset + 4])
- self.type = self._type & ~Netlink.NLA_TYPE_MASK
- self.is_nest = self._type & Netlink.NLA_F_NESTED
- self.payload_len = self._len
- self.full_len = (self.payload_len + 3) & ~3
- self.raw = raw[offset + 4 : offset + self.payload_len]
-
- @classmethod
- def get_format(cls, attr_type, byte_order=None):
- format = cls.type_formats[attr_type]
- if byte_order:
- return format.big if byte_order == "big-endian" \
- else format.little
- return format.native
-
- def as_scalar(self, attr_type, byte_order=None):
- format = self.get_format(attr_type, byte_order)
- return format.unpack(self.raw)[0]
-
- def as_auto_scalar(self, attr_type, byte_order=None):
- if len(self.raw) != 4 and len(self.raw) != 8:
- raise Exception(f"Auto-scalar len payload be 4 or 8 bytes, got {len(self.raw)}")
- real_type = attr_type[0] + str(len(self.raw) * 8)
- format = self.get_format(real_type, byte_order)
- return format.unpack(self.raw)[0]
-
- def as_strz(self):
- return self.raw.decode('ascii')[:-1]
-
- def as_bin(self):
- return self.raw
-
- def as_c_array(self, type):
- format = self.get_format(type)
- return [ x[0] for x in format.iter_unpack(self.raw) ]
-
- def __repr__(self):
- return f"[type:{self.type} len:{self._len}] {self.raw}"
-
-
-class NlAttrs:
- def __init__(self, msg, offset=0):
- self.attrs = []
-
- while offset < len(msg):
- attr = NlAttr(msg, offset)
- offset += attr.full_len
- self.attrs.append(attr)
-
- def __iter__(self):
- yield from self.attrs
-
- def __repr__(self):
- msg = ''
- for a in self.attrs:
- if msg:
- msg += '\n'
- msg += repr(a)
- return msg
-
-
-class NlMsg:
- def __init__(self, msg, offset, attr_space=None):
- self.hdr = msg[offset : offset + 16]
-
- self.nl_len, self.nl_type, self.nl_flags, self.nl_seq, self.nl_portid = \
- struct.unpack("IHHII", self.hdr)
-
- self.raw = msg[offset + 16 : offset + self.nl_len]
-
- self.error = 0
- self.done = 0
-
- extack_off = None
- if self.nl_type == Netlink.NLMSG_ERROR:
- self.error = struct.unpack("i", self.raw[0:4])[0]
- self.done = 1
- extack_off = 20
- elif self.nl_type == Netlink.NLMSG_DONE:
- self.error = struct.unpack("i", self.raw[0:4])[0]
- self.done = 1
- extack_off = 4
-
- self.extack = None
- if self.nl_flags & Netlink.NLM_F_ACK_TLVS and extack_off:
- self.extack = dict()
- extack_attrs = NlAttrs(self.raw[extack_off:])
- for extack in extack_attrs:
- if extack.type == Netlink.NLMSGERR_ATTR_MSG:
- self.extack['msg'] = extack.as_strz()
- elif extack.type == Netlink.NLMSGERR_ATTR_MISS_TYPE:
- self.extack['miss-type'] = extack.as_scalar('u32')
- elif extack.type == Netlink.NLMSGERR_ATTR_MISS_NEST:
- self.extack['miss-nest'] = extack.as_scalar('u32')
- elif extack.type == Netlink.NLMSGERR_ATTR_OFFS:
- self.extack['bad-attr-offs'] = extack.as_scalar('u32')
- elif extack.type == Netlink.NLMSGERR_ATTR_POLICY:
- self.extack['policy'] = self._decode_policy(extack.raw)
- else:
- if 'unknown' not in self.extack:
- self.extack['unknown'] = []
- self.extack['unknown'].append(extack)
-
- if attr_space:
- # We don't have the ability to parse nests yet, so only do global
- if 'miss-type' in self.extack and 'miss-nest' not in self.extack:
- miss_type = self.extack['miss-type']
- if miss_type in attr_space.attrs_by_val:
- spec = attr_space.attrs_by_val[miss_type]
- self.extack['miss-type'] = spec['name']
- if 'doc' in spec:
- self.extack['miss-type-doc'] = spec['doc']
-
- def _decode_policy(self, raw):
- policy = {}
- for attr in NlAttrs(raw):
- if attr.type == Netlink.NL_POLICY_TYPE_ATTR_TYPE:
- type = attr.as_scalar('u32')
- policy['type'] = Netlink.AttrType(type).name
- elif attr.type == Netlink.NL_POLICY_TYPE_ATTR_MIN_VALUE_S:
- policy['min-value'] = attr.as_scalar('s64')
- elif attr.type == Netlink.NL_POLICY_TYPE_ATTR_MAX_VALUE_S:
- policy['max-value'] = attr.as_scalar('s64')
- elif attr.type == Netlink.NL_POLICY_TYPE_ATTR_MIN_VALUE_U:
- policy['min-value'] = attr.as_scalar('u64')
- elif attr.type == Netlink.NL_POLICY_TYPE_ATTR_MAX_VALUE_U:
- policy['max-value'] = attr.as_scalar('u64')
- elif attr.type == Netlink.NL_POLICY_TYPE_ATTR_MIN_LENGTH:
- policy['min-length'] = attr.as_scalar('u32')
- elif attr.type == Netlink.NL_POLICY_TYPE_ATTR_MAX_LENGTH:
- policy['max-length'] = attr.as_scalar('u32')
- elif attr.type == Netlink.NL_POLICY_TYPE_ATTR_BITFIELD32_MASK:
- policy['bitfield32-mask'] = attr.as_scalar('u32')
- elif attr.type == Netlink.NL_POLICY_TYPE_ATTR_MASK:
- policy['mask'] = attr.as_scalar('u64')
- return policy
-
- def cmd(self):
- return self.nl_type
-
- def __repr__(self):
- msg = f"nl_len = {self.nl_len} ({len(self.raw)}) nl_flags = 0x{self.nl_flags:x} nl_type = {self.nl_type}"
- if self.error:
- msg += '\n\terror: ' + str(self.error)
- if self.extack:
- msg += '\n\textack: ' + repr(self.extack)
- return msg
-
-
-class NlMsgs:
- def __init__(self, data, attr_space=None):
- self.msgs = []
-
- offset = 0
- while offset < len(data):
- msg = NlMsg(data, offset, attr_space=attr_space)
- offset += msg.nl_len
- self.msgs.append(msg)
-
- def __iter__(self):
- yield from self.msgs
-
-
-genl_family_name_to_id = None
-
-
-def _genl_msg(nl_type, nl_flags, genl_cmd, genl_version, seq=None):
- # we prepend length in _genl_msg_finalize()
- if seq is None:
- seq = random.randint(1, 1024)
- nlmsg = struct.pack("HHII", nl_type, nl_flags, seq, 0)
- genlmsg = struct.pack("BBH", genl_cmd, genl_version, 0)
- return nlmsg + genlmsg
-
-
-def _genl_msg_finalize(msg):
- return struct.pack("I", len(msg) + 4) + msg
-
-
-def _genl_load_families():
- with socket.socket(socket.AF_NETLINK, socket.SOCK_RAW, Netlink.NETLINK_GENERIC) as sock:
- sock.setsockopt(Netlink.SOL_NETLINK, Netlink.NETLINK_CAP_ACK, 1)
-
- msg = _genl_msg(Netlink.GENL_ID_CTRL,
- Netlink.NLM_F_REQUEST | Netlink.NLM_F_ACK | Netlink.NLM_F_DUMP,
- Netlink.CTRL_CMD_GETFAMILY, 1)
- msg = _genl_msg_finalize(msg)
-
- sock.send(msg, 0)
-
- global genl_family_name_to_id
- genl_family_name_to_id = dict()
-
- while True:
- reply = sock.recv(128 * 1024)
- nms = NlMsgs(reply)
- for nl_msg in nms:
- if nl_msg.error:
- print("Netlink error:", nl_msg.error)
- return
- if nl_msg.done:
- return
-
- gm = GenlMsg(nl_msg)
- fam = dict()
- for attr in NlAttrs(gm.raw):
- if attr.type == Netlink.CTRL_ATTR_FAMILY_ID:
- fam['id'] = attr.as_scalar('u16')
- elif attr.type == Netlink.CTRL_ATTR_FAMILY_NAME:
- fam['name'] = attr.as_strz()
- elif attr.type == Netlink.CTRL_ATTR_MAXATTR:
- fam['maxattr'] = attr.as_scalar('u32')
- elif attr.type == Netlink.CTRL_ATTR_MCAST_GROUPS:
- fam['mcast'] = dict()
- for entry in NlAttrs(attr.raw):
- mcast_name = None
- mcast_id = None
- for entry_attr in NlAttrs(entry.raw):
- if entry_attr.type == Netlink.CTRL_ATTR_MCAST_GRP_NAME:
- mcast_name = entry_attr.as_strz()
- elif entry_attr.type == Netlink.CTRL_ATTR_MCAST_GRP_ID:
- mcast_id = entry_attr.as_scalar('u32')
- if mcast_name and mcast_id is not None:
- fam['mcast'][mcast_name] = mcast_id
- if 'name' in fam and 'id' in fam:
- genl_family_name_to_id[fam['name']] = fam
-
-
-class GenlMsg:
- def __init__(self, nl_msg):
- self.nl = nl_msg
- self.genl_cmd, self.genl_version, _ = struct.unpack_from("BBH", nl_msg.raw, 0)
- self.raw = nl_msg.raw[4:]
-
- def cmd(self):
- return self.genl_cmd
-
- def __repr__(self):
- msg = repr(self.nl)
- msg += f"\tgenl_cmd = {self.genl_cmd} genl_ver = {self.genl_version}\n"
- for a in self.raw_attrs:
- msg += '\t\t' + repr(a) + '\n'
- return msg
-
-
-class NetlinkProtocol:
- def __init__(self, family_name, proto_num):
- self.family_name = family_name
- self.proto_num = proto_num
-
- def _message(self, nl_type, nl_flags, seq=None):
- if seq is None:
- seq = random.randint(1, 1024)
- nlmsg = struct.pack("HHII", nl_type, nl_flags, seq, 0)
- return nlmsg
-
- def message(self, flags, command, version, seq=None):
- return self._message(command, flags, seq)
-
- def _decode(self, nl_msg):
- return nl_msg
-
- def decode(self, ynl, nl_msg, op):
- msg = self._decode(nl_msg)
- if op is None:
- op = ynl.rsp_by_value[msg.cmd()]
- fixed_header_size = ynl._struct_size(op.fixed_header)
- msg.raw_attrs = NlAttrs(msg.raw, fixed_header_size)
- return msg
-
- def get_mcast_id(self, mcast_name, mcast_groups):
- if mcast_name not in mcast_groups:
- raise Exception(f'Multicast group "{mcast_name}" not present in the spec')
- return mcast_groups[mcast_name].value
-
- def msghdr_size(self):
- return 16
-
-
-class GenlProtocol(NetlinkProtocol):
- def __init__(self, family_name):
- super().__init__(family_name, Netlink.NETLINK_GENERIC)
-
- global genl_family_name_to_id
- if genl_family_name_to_id is None:
- _genl_load_families()
-
- self.genl_family = genl_family_name_to_id[family_name]
- self.family_id = genl_family_name_to_id[family_name]['id']
-
- def message(self, flags, command, version, seq=None):
- nlmsg = self._message(self.family_id, flags, seq)
- genlmsg = struct.pack("BBH", command, version, 0)
- return nlmsg + genlmsg
-
- def _decode(self, nl_msg):
- return GenlMsg(nl_msg)
-
- def get_mcast_id(self, mcast_name, mcast_groups):
- if mcast_name not in self.genl_family['mcast']:
- raise Exception(f'Multicast group "{mcast_name}" not present in the family')
- return self.genl_family['mcast'][mcast_name]
-
- def msghdr_size(self):
- return super().msghdr_size() + 4
-
-
-class SpaceAttrs:
- SpecValuesPair = namedtuple('SpecValuesPair', ['spec', 'values'])
-
- def __init__(self, attr_space, attrs, outer = None):
- outer_scopes = outer.scopes if outer else []
- inner_scope = self.SpecValuesPair(attr_space, attrs)
- self.scopes = [inner_scope] + outer_scopes
-
- def lookup(self, name):
- for scope in self.scopes:
- if name in scope.spec:
- if name in scope.values:
- return scope.values[name]
- spec_name = scope.spec.yaml['name']
- raise Exception(
- f"No value for '{name}' in attribute space '{spec_name}'")
- raise Exception(f"Attribute '{name}' not defined in any attribute-set")
-
-
-#
-# YNL implementation details.
-#
-
-
-class YnlFamily(SpecFamily):
- def __init__(self, def_path, schema=None, process_unknown=False,
- recv_size=0):
- super().__init__(def_path, schema)
-
- self.include_raw = False
- self.process_unknown = process_unknown
-
- try:
- if self.proto == "netlink-raw":
- self.nlproto = NetlinkProtocol(self.yaml['name'],
- self.yaml['protonum'])
- else:
- self.nlproto = GenlProtocol(self.yaml['name'])
- except KeyError:
- raise Exception(f"Family '{self.yaml['name']}' not supported by the kernel")
-
- self._recv_dbg = False
- # Note that netlink will use conservative (min) message size for
- # the first dump recv() on the socket, our setting will only matter
- # from the second recv() on.
- self._recv_size = recv_size if recv_size else 131072
- # Netlink will always allocate at least PAGE_SIZE - sizeof(skb_shinfo)
- # for a message, so smaller receive sizes will lead to truncation.
- # Note that the min size for other families may be larger than 4k!
- if self._recv_size < 4000:
- raise ConfigError()
-
- self.sock = socket.socket(socket.AF_NETLINK, socket.SOCK_RAW, self.nlproto.proto_num)
- self.sock.setsockopt(Netlink.SOL_NETLINK, Netlink.NETLINK_CAP_ACK, 1)
- self.sock.setsockopt(Netlink.SOL_NETLINK, Netlink.NETLINK_EXT_ACK, 1)
- self.sock.setsockopt(Netlink.SOL_NETLINK, Netlink.NETLINK_GET_STRICT_CHK, 1)
-
- self.async_msg_ids = set()
- self.async_msg_queue = queue.Queue()
-
- for msg in self.msgs.values():
- if msg.is_async:
- self.async_msg_ids.add(msg.rsp_value)
-
- for op_name, op in self.ops.items():
- bound_f = functools.partial(self._op, op_name)
- setattr(self, op.ident_name, bound_f)
-
-
- def ntf_subscribe(self, mcast_name):
- mcast_id = self.nlproto.get_mcast_id(mcast_name, self.mcast_groups)
- self.sock.bind((0, 0))
- self.sock.setsockopt(Netlink.SOL_NETLINK, Netlink.NETLINK_ADD_MEMBERSHIP,
- mcast_id)
-
- def set_recv_dbg(self, enabled):
- self._recv_dbg = enabled
-
- def _recv_dbg_print(self, reply, nl_msgs):
- if not self._recv_dbg:
- return
- print("Recv: read", len(reply), "bytes,",
- len(nl_msgs.msgs), "messages", file=sys.stderr)
- for nl_msg in nl_msgs:
- print(" ", nl_msg, file=sys.stderr)
-
- def _encode_enum(self, attr_spec, value):
- enum = self.consts[attr_spec['enum']]
- if enum.type == 'flags' or attr_spec.get('enum-as-flags', False):
- scalar = 0
- if isinstance(value, str):
- value = [value]
- for single_value in value:
- scalar += enum.entries[single_value].user_value(as_flags = True)
- return scalar
- else:
- return enum.entries[value].user_value()
-
- def _get_scalar(self, attr_spec, value):
- try:
- return int(value)
- except (ValueError, TypeError) as e:
- if 'enum' not in attr_spec:
- raise e
- return self._encode_enum(attr_spec, value)
-
- def _add_attr(self, space, name, value, search_attrs):
- try:
- attr = self.attr_sets[space][name]
- except KeyError:
- raise Exception(f"Space '{space}' has no attribute '{name}'")
- nl_type = attr.value
-
- if attr.is_multi and isinstance(value, list):
- attr_payload = b''
- for subvalue in value:
- attr_payload += self._add_attr(space, name, subvalue, search_attrs)
- return attr_payload
-
- if attr["type"] == 'nest':
- nl_type |= Netlink.NLA_F_NESTED
- attr_payload = b''
- sub_space = attr['nested-attributes']
- sub_attrs = SpaceAttrs(self.attr_sets[sub_space], value, search_attrs)
- for subname, subvalue in value.items():
- attr_payload += self._add_attr(sub_space, subname, subvalue, sub_attrs)
- elif attr["type"] == 'flag':
- if not value:
- # If value is absent or false then skip attribute creation.
- return b''
- attr_payload = b''
- elif attr["type"] == 'string':
- attr_payload = str(value).encode('ascii') + b'\x00'
- elif attr["type"] == 'binary':
- if isinstance(value, bytes):
- attr_payload = value
- elif isinstance(value, str):
- attr_payload = bytes.fromhex(value)
- elif isinstance(value, dict) and attr.struct_name:
- attr_payload = self._encode_struct(attr.struct_name, value)
- else:
- raise Exception(f'Unknown type for binary attribute, value: {value}')
- elif attr['type'] in NlAttr.type_formats or attr.is_auto_scalar:
- scalar = self._get_scalar(attr, value)
- if attr.is_auto_scalar:
- attr_type = attr["type"][0] + ('32' if scalar.bit_length() <= 32 else '64')
- else:
- attr_type = attr["type"]
- format = NlAttr.get_format(attr_type, attr.byte_order)
- attr_payload = format.pack(scalar)
- elif attr['type'] in "bitfield32":
- scalar_value = self._get_scalar(attr, value["value"])
- scalar_selector = self._get_scalar(attr, value["selector"])
- attr_payload = struct.pack("II", scalar_value, scalar_selector)
- elif attr['type'] == 'sub-message':
- msg_format = self._resolve_selector(attr, search_attrs)
- attr_payload = b''
- if msg_format.fixed_header:
- attr_payload += self._encode_struct(msg_format.fixed_header, value)
- if msg_format.attr_set:
- if msg_format.attr_set in self.attr_sets:
- nl_type |= Netlink.NLA_F_NESTED
- sub_attrs = SpaceAttrs(msg_format.attr_set, value, search_attrs)
- for subname, subvalue in value.items():
- attr_payload += self._add_attr(msg_format.attr_set,
- subname, subvalue, sub_attrs)
- else:
- raise Exception(f"Unknown attribute-set '{msg_format.attr_set}'")
- else:
- raise Exception(f'Unknown type at {space} {name} {value} {attr["type"]}')
-
- pad = b'\x00' * ((4 - len(attr_payload) % 4) % 4)
- return struct.pack('HH', len(attr_payload) + 4, nl_type) + attr_payload + pad
-
- def _decode_enum(self, raw, attr_spec):
- enum = self.consts[attr_spec['enum']]
- if enum.type == 'flags' or attr_spec.get('enum-as-flags', False):
- i = 0
- value = set()
- while raw:
- if raw & 1:
- value.add(enum.entries_by_val[i].name)
- raw >>= 1
- i += 1
- else:
- value = enum.entries_by_val[raw].name
- return value
-
- def _decode_binary(self, attr, attr_spec):
- if attr_spec.struct_name:
- decoded = self._decode_struct(attr.raw, attr_spec.struct_name)
- elif attr_spec.sub_type:
- decoded = attr.as_c_array(attr_spec.sub_type)
- else:
- decoded = attr.as_bin()
- if attr_spec.display_hint:
- decoded = self._formatted_string(decoded, attr_spec.display_hint)
- return decoded
-
- def _decode_array_attr(self, attr, attr_spec):
- decoded = []
- offset = 0
- while offset < len(attr.raw):
- item = NlAttr(attr.raw, offset)
- offset += item.full_len
-
- if attr_spec["sub-type"] == 'nest':
- subattrs = self._decode(NlAttrs(item.raw), attr_spec['nested-attributes'])
- decoded.append({ item.type: subattrs })
- elif attr_spec["sub-type"] == 'binary':
- subattrs = item.as_bin()
- if attr_spec.display_hint:
- subattrs = self._formatted_string(subattrs, attr_spec.display_hint)
- decoded.append(subattrs)
- elif attr_spec["sub-type"] in NlAttr.type_formats:
- subattrs = item.as_scalar(attr_spec['sub-type'], attr_spec.byte_order)
- if attr_spec.display_hint:
- subattrs = self._formatted_string(subattrs, attr_spec.display_hint)
- decoded.append(subattrs)
- else:
- raise Exception(f'Unknown {attr_spec["sub-type"]} with name {attr_spec["name"]}')
- return decoded
-
- def _decode_nest_type_value(self, attr, attr_spec):
- decoded = {}
- value = attr
- for name in attr_spec['type-value']:
- value = NlAttr(value.raw, 0)
- decoded[name] = value.type
- subattrs = self._decode(NlAttrs(value.raw), attr_spec['nested-attributes'])
- decoded.update(subattrs)
- return decoded
-
- def _decode_unknown(self, attr):
- if attr.is_nest:
- return self._decode(NlAttrs(attr.raw), None)
- else:
- return attr.as_bin()
-
- def _rsp_add(self, rsp, name, is_multi, decoded):
- if is_multi == None:
- if name in rsp and type(rsp[name]) is not list:
- rsp[name] = [rsp[name]]
- is_multi = True
- else:
- is_multi = False
-
- if not is_multi:
- rsp[name] = decoded
- elif name in rsp:
- rsp[name].append(decoded)
- else:
- rsp[name] = [decoded]
-
- def _resolve_selector(self, attr_spec, search_attrs):
- sub_msg = attr_spec.sub_message
- if sub_msg not in self.sub_msgs:
- raise Exception(f"No sub-message spec named {sub_msg} for {attr_spec.name}")
- sub_msg_spec = self.sub_msgs[sub_msg]
-
- selector = attr_spec.selector
- value = search_attrs.lookup(selector)
- if value not in sub_msg_spec.formats:
- raise Exception(f"No message format for '{value}' in sub-message spec '{sub_msg}'")
-
- spec = sub_msg_spec.formats[value]
- return spec
-
- def _decode_sub_msg(self, attr, attr_spec, search_attrs):
- msg_format = self._resolve_selector(attr_spec, search_attrs)
- decoded = {}
- offset = 0
- if msg_format.fixed_header:
- decoded.update(self._decode_struct(attr.raw, msg_format.fixed_header));
- offset = self._struct_size(msg_format.fixed_header)
- if msg_format.attr_set:
- if msg_format.attr_set in self.attr_sets:
- subdict = self._decode(NlAttrs(attr.raw, offset), msg_format.attr_set)
- decoded.update(subdict)
- else:
- raise Exception(f"Unknown attribute-set '{attr_space}' when decoding '{attr_spec.name}'")
- return decoded
-
- def _decode(self, attrs, space, outer_attrs = None):
- rsp = dict()
- if space:
- attr_space = self.attr_sets[space]
- search_attrs = SpaceAttrs(attr_space, rsp, outer_attrs)
-
- for attr in attrs:
- try:
- attr_spec = attr_space.attrs_by_val[attr.type]
- except (KeyError, UnboundLocalError):
- if not self.process_unknown:
- raise Exception(f"Space '{space}' has no attribute with value '{attr.type}'")
- attr_name = f"UnknownAttr({attr.type})"
- self._rsp_add(rsp, attr_name, None, self._decode_unknown(attr))
- continue
-
- try:
- if attr_spec["type"] == 'nest':
- subdict = self._decode(NlAttrs(attr.raw), attr_spec['nested-attributes'], search_attrs)
- decoded = subdict
- elif attr_spec["type"] == 'string':
- decoded = attr.as_strz()
- elif attr_spec["type"] == 'binary':
- decoded = self._decode_binary(attr, attr_spec)
- elif attr_spec["type"] == 'flag':
- decoded = True
- elif attr_spec.is_auto_scalar:
- decoded = attr.as_auto_scalar(attr_spec['type'], attr_spec.byte_order)
- elif attr_spec["type"] in NlAttr.type_formats:
- decoded = attr.as_scalar(attr_spec['type'], attr_spec.byte_order)
- if 'enum' in attr_spec:
- decoded = self._decode_enum(decoded, attr_spec)
- elif attr_spec.display_hint:
- decoded = self._formatted_string(decoded, attr_spec.display_hint)
- elif attr_spec["type"] == 'indexed-array':
- decoded = self._decode_array_attr(attr, attr_spec)
- elif attr_spec["type"] == 'bitfield32':
- value, selector = struct.unpack("II", attr.raw)
- if 'enum' in attr_spec:
- value = self._decode_enum(value, attr_spec)
- selector = self._decode_enum(selector, attr_spec)
- decoded = {"value": value, "selector": selector}
- elif attr_spec["type"] == 'sub-message':
- decoded = self._decode_sub_msg(attr, attr_spec, search_attrs)
- elif attr_spec["type"] == 'nest-type-value':
- decoded = self._decode_nest_type_value(attr, attr_spec)
- else:
- if not self.process_unknown:
- raise Exception(f'Unknown {attr_spec["type"]} with name {attr_spec["name"]}')
- decoded = self._decode_unknown(attr)
-
- self._rsp_add(rsp, attr_spec["name"], attr_spec.is_multi, decoded)
- except:
- print(f"Error decoding '{attr_spec.name}' from '{space}'")
- raise
-
- return rsp
-
- def _decode_extack_path(self, attrs, attr_set, offset, target):
- for attr in attrs:
- try:
- attr_spec = attr_set.attrs_by_val[attr.type]
- except KeyError:
- raise Exception(f"Space '{attr_set.name}' has no attribute with value '{attr.type}'")
- if offset > target:
- break
- if offset == target:
- return '.' + attr_spec.name
-
- if offset + attr.full_len <= target:
- offset += attr.full_len
- continue
- if attr_spec['type'] != 'nest':
- raise Exception(f"Can't dive into {attr.type} ({attr_spec['name']}) for extack")
- offset += 4
- subpath = self._decode_extack_path(NlAttrs(attr.raw),
- self.attr_sets[attr_spec['nested-attributes']],
- offset, target)
- if subpath is None:
- return None
- return '.' + attr_spec.name + subpath
-
- return None
-
- def _decode_extack(self, request, op, extack):
- if 'bad-attr-offs' not in extack:
- return
-
- msg = self.nlproto.decode(self, NlMsg(request, 0, op.attr_set), op)
- offset = self.nlproto.msghdr_size() + self._struct_size(op.fixed_header)
- path = self._decode_extack_path(msg.raw_attrs, op.attr_set, offset,
- extack['bad-attr-offs'])
- if path:
- del extack['bad-attr-offs']
- extack['bad-attr'] = path
-
- def _struct_size(self, name):
- if name:
- members = self.consts[name].members
- size = 0
- for m in members:
- if m.type in ['pad', 'binary']:
- if m.struct:
- size += self._struct_size(m.struct)
- else:
- size += m.len
- else:
- format = NlAttr.get_format(m.type, m.byte_order)
- size += format.size
- return size
- else:
- return 0
-
- def _decode_struct(self, data, name):
- members = self.consts[name].members
- attrs = dict()
- offset = 0
- for m in members:
- value = None
- if m.type == 'pad':
- offset += m.len
- elif m.type == 'binary':
- if m.struct:
- len = self._struct_size(m.struct)
- value = self._decode_struct(data[offset : offset + len],
- m.struct)
- offset += len
- else:
- value = data[offset : offset + m.len]
- offset += m.len
- else:
- format = NlAttr.get_format(m.type, m.byte_order)
- [ value ] = format.unpack_from(data, offset)
- offset += format.size
- if value is not None:
- if m.enum:
- value = self._decode_enum(value, m)
- elif m.display_hint:
- value = self._formatted_string(value, m.display_hint)
- attrs[m.name] = value
- return attrs
-
- def _encode_struct(self, name, vals):
- members = self.consts[name].members
- attr_payload = b''
- for m in members:
- value = vals.pop(m.name) if m.name in vals else None
- if m.type == 'pad':
- attr_payload += bytearray(m.len)
- elif m.type == 'binary':
- if m.struct:
- if value is None:
- value = dict()
- attr_payload += self._encode_struct(m.struct, value)
- else:
- if value is None:
- attr_payload += bytearray(m.len)
- else:
- attr_payload += bytes.fromhex(value)
- else:
- if value is None:
- value = 0
- format = NlAttr.get_format(m.type, m.byte_order)
- attr_payload += format.pack(value)
- return attr_payload
-
- def _formatted_string(self, raw, display_hint):
- if display_hint == 'mac':
- formatted = ':'.join('%02x' % b for b in raw)
- elif display_hint == 'hex':
- if isinstance(raw, int):
- formatted = hex(raw)
- else:
- formatted = bytes.hex(raw, ' ')
- elif display_hint in [ 'ipv4', 'ipv6' ]:
- formatted = format(ipaddress.ip_address(raw))
- elif display_hint == 'uuid':
- formatted = str(uuid.UUID(bytes=raw))
- else:
- formatted = raw
- return formatted
-
- def handle_ntf(self, decoded):
- msg = dict()
- if self.include_raw:
- msg['raw'] = decoded
- op = self.rsp_by_value[decoded.cmd()]
- attrs = self._decode(decoded.raw_attrs, op.attr_set.name)
- if op.fixed_header:
- attrs.update(self._decode_struct(decoded.raw, op.fixed_header))
-
- msg['name'] = op['name']
- msg['msg'] = attrs
- self.async_msg_queue.put(msg)
-
- def check_ntf(self):
- while True:
- try:
- reply = self.sock.recv(self._recv_size, socket.MSG_DONTWAIT)
- except BlockingIOError:
- return
-
- nms = NlMsgs(reply)
- self._recv_dbg_print(reply, nms)
- for nl_msg in nms:
- if nl_msg.error:
- print("Netlink error in ntf!?", os.strerror(-nl_msg.error))
- print(nl_msg)
- continue
- if nl_msg.done:
- print("Netlink done while checking for ntf!?")
- continue
-
- decoded = self.nlproto.decode(self, nl_msg, None)
- if decoded.cmd() not in self.async_msg_ids:
- print("Unexpected msg id while checking for ntf", decoded)
- continue
-
- self.handle_ntf(decoded)
-
- def poll_ntf(self, duration=None):
- start_time = time.time()
- selector = selectors.DefaultSelector()
- selector.register(self.sock, selectors.EVENT_READ)
-
- while True:
- try:
- yield self.async_msg_queue.get_nowait()
- except queue.Empty:
- if duration is not None:
- timeout = start_time + duration - time.time()
- if timeout <= 0:
- return
- else:
- timeout = None
- events = selector.select(timeout)
- if events:
- self.check_ntf()
-
- def operation_do_attributes(self, name):
- """
- For a given operation name, find and return a supported
- set of attributes (as a dict).
- """
- op = self.find_operation(name)
- if not op:
- return None
-
- return op['do']['request']['attributes'].copy()
-
- def _encode_message(self, op, vals, flags, req_seq):
- nl_flags = Netlink.NLM_F_REQUEST | Netlink.NLM_F_ACK
- for flag in flags or []:
- nl_flags |= flag
-
- msg = self.nlproto.message(nl_flags, op.req_value, 1, req_seq)
- if op.fixed_header:
- msg += self._encode_struct(op.fixed_header, vals)
- search_attrs = SpaceAttrs(op.attr_set, vals)
- for name, value in vals.items():
- msg += self._add_attr(op.attr_set.name, name, value, search_attrs)
- msg = _genl_msg_finalize(msg)
- return msg
-
- def _ops(self, ops):
- reqs_by_seq = {}
- req_seq = random.randint(1024, 65535)
- payload = b''
- for (method, vals, flags) in ops:
- op = self.ops[method]
- msg = self._encode_message(op, vals, flags, req_seq)
- reqs_by_seq[req_seq] = (op, msg, flags)
- payload += msg
- req_seq += 1
-
- self.sock.send(payload, 0)
-
- done = False
- rsp = []
- op_rsp = []
- while not done:
- reply = self.sock.recv(self._recv_size)
- nms = NlMsgs(reply, attr_space=op.attr_set)
- self._recv_dbg_print(reply, nms)
- for nl_msg in nms:
- if nl_msg.nl_seq in reqs_by_seq:
- (op, req_msg, req_flags) = reqs_by_seq[nl_msg.nl_seq]
- if nl_msg.extack:
- self._decode_extack(req_msg, op, nl_msg.extack)
- else:
- op = None
- req_flags = []
-
- if nl_msg.error:
- raise NlError(nl_msg)
- if nl_msg.done:
- if nl_msg.extack:
- print("Netlink warning:")
- print(nl_msg)
-
- if Netlink.NLM_F_DUMP in req_flags:
- rsp.append(op_rsp)
- elif not op_rsp:
- rsp.append(None)
- elif len(op_rsp) == 1:
- rsp.append(op_rsp[0])
- else:
- rsp.append(op_rsp)
- op_rsp = []
-
- del reqs_by_seq[nl_msg.nl_seq]
- done = len(reqs_by_seq) == 0
- break
-
- decoded = self.nlproto.decode(self, nl_msg, op)
-
- # Check if this is a reply to our request
- if nl_msg.nl_seq not in reqs_by_seq or decoded.cmd() != op.rsp_value:
- if decoded.cmd() in self.async_msg_ids:
- self.handle_ntf(decoded)
- continue
- else:
- print('Unexpected message: ' + repr(decoded))
- continue
-
- rsp_msg = self._decode(decoded.raw_attrs, op.attr_set.name)
- if op.fixed_header:
- rsp_msg.update(self._decode_struct(decoded.raw, op.fixed_header))
- op_rsp.append(rsp_msg)
-
- return rsp
-
- def _op(self, method, vals, flags=None, dump=False):
- req_flags = flags or []
- if dump:
- req_flags.append(Netlink.NLM_F_DUMP)
-
- ops = [(method, vals, req_flags)]
- return self._ops(ops)[0]
-
- def do(self, method, vals, flags=None):
- return self._op(method, vals, flags)
-
- def dump(self, method, vals):
- return self._op(method, vals, dump=True)
-
- def do_multi(self, ops):
- return self._ops(ops)
--- /dev/null
+__pycache__/
+lib/__pycache__/
--- /dev/null
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+import argparse
+import json
+import pathlib
+import pprint
+import sys
+
+sys.path.append(pathlib.Path(__file__).resolve().parent.as_posix())
+from lib import YnlFamily, Netlink, NlError
+
+
+class YnlEncoder(json.JSONEncoder):
+ def default(self, obj):
+ if isinstance(obj, bytes):
+ return bytes.hex(obj)
+ if isinstance(obj, set):
+ return list(obj)
+ return json.JSONEncoder.default(self, obj)
+
+
+def main():
+ description = """
+ YNL CLI utility - a general purpose netlink utility that uses YAML
+ specs to drive protocol encoding and decoding.
+ """
+ epilog = """
+ The --multi option can be repeated to include several do operations
+ in the same netlink payload.
+ """
+
+ parser = argparse.ArgumentParser(description=description,
+ epilog=epilog)
+ parser.add_argument('--spec', dest='spec', type=str, required=True)
+ parser.add_argument('--schema', dest='schema', type=str)
+ parser.add_argument('--no-schema', action='store_true')
+ parser.add_argument('--json', dest='json_text', type=str)
+
+ group = parser.add_mutually_exclusive_group()
+ group.add_argument('--do', dest='do', metavar='DO-OPERATION', type=str)
+ group.add_argument('--multi', dest='multi', nargs=2, action='append',
+ metavar=('DO-OPERATION', 'JSON_TEXT'), type=str)
+ group.add_argument('--dump', dest='dump', metavar='DUMP-OPERATION', type=str)
+ group.add_argument('--list-ops', action='store_true')
+ group.add_argument('--list-msgs', action='store_true')
+
+ parser.add_argument('--duration', dest='duration', type=int,
+ help='when subscribed, watch for DURATION seconds')
+ parser.add_argument('--sleep', dest='duration', type=int,
+ help='alias for duration')
+ parser.add_argument('--subscribe', dest='ntf', type=str)
+ parser.add_argument('--replace', dest='flags', action='append_const',
+ const=Netlink.NLM_F_REPLACE)
+ parser.add_argument('--excl', dest='flags', action='append_const',
+ const=Netlink.NLM_F_EXCL)
+ parser.add_argument('--create', dest='flags', action='append_const',
+ const=Netlink.NLM_F_CREATE)
+ parser.add_argument('--append', dest='flags', action='append_const',
+ const=Netlink.NLM_F_APPEND)
+ parser.add_argument('--process-unknown', action=argparse.BooleanOptionalAction)
+ parser.add_argument('--output-json', action='store_true')
+ parser.add_argument('--dbg-small-recv', default=0, const=4000,
+ action='store', nargs='?', type=int)
+ args = parser.parse_args()
+
+ def output(msg):
+ if args.output_json:
+ print(json.dumps(msg, cls=YnlEncoder))
+ else:
+ pprint.PrettyPrinter().pprint(msg)
+
+ if args.no_schema:
+ args.schema = ''
+
+ attrs = {}
+ if args.json_text:
+ attrs = json.loads(args.json_text)
+
+ ynl = YnlFamily(args.spec, args.schema, args.process_unknown,
+ recv_size=args.dbg_small_recv)
+ if args.dbg_small_recv:
+ ynl.set_recv_dbg(True)
+
+ if args.ntf:
+ ynl.ntf_subscribe(args.ntf)
+
+ if args.list_ops:
+ for op_name, op in ynl.ops.items():
+ print(op_name, " [", ", ".join(op.modes), "]")
+ if args.list_msgs:
+ for op_name, op in ynl.msgs.items():
+ print(op_name, " [", ", ".join(op.modes), "]")
+
+ try:
+ if args.do:
+ reply = ynl.do(args.do, attrs, args.flags)
+ output(reply)
+ if args.dump:
+ reply = ynl.dump(args.dump, attrs)
+ output(reply)
+ if args.multi:
+ ops = [ (item[0], json.loads(item[1]), args.flags or []) for item in args.multi ]
+ reply = ynl.do_multi(ops)
+ output(reply)
+ except NlError as e:
+ print(e)
+ exit(1)
+
+ if args.ntf:
+ try:
+ for msg in ynl.poll_ntf(duration=args.duration):
+ output(msg)
+ except KeyboardInterrupt:
+ pass
+
+
+if __name__ == "__main__":
+ main()
--- /dev/null
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+import argparse
+import json
+import pathlib
+import pprint
+import sys
+import re
+import os
+
+sys.path.append(pathlib.Path(__file__).resolve().parent.as_posix())
+from lib import YnlFamily
+
+def args_to_req(ynl, op_name, args, req):
+ """
+ Verify and convert command-line arguments to the ynl-compatible request.
+ """
+ valid_attrs = ynl.operation_do_attributes(op_name)
+ valid_attrs.remove('header') # not user-provided
+
+ if len(args) == 0:
+ print(f'no attributes, expected: {valid_attrs}')
+ sys.exit(1)
+
+ i = 0
+ while i < len(args):
+ attr = args[i]
+ if i + 1 >= len(args):
+ print(f'expected value for \'{attr}\'')
+ sys.exit(1)
+
+ if attr not in valid_attrs:
+ print(f'invalid attribute \'{attr}\', expected: {valid_attrs}')
+ sys.exit(1)
+
+ val = args[i+1]
+ i += 2
+
+ req[attr] = val
+
+def print_field(reply, *desc):
+ """
+ Pretty-print a set of fields from the reply. desc specifies the
+ fields and the optional type (bool/yn).
+ """
+ if len(desc) == 0:
+ return print_field(reply, *zip(reply.keys(), reply.keys()))
+
+ for spec in desc:
+ try:
+ field, name, tp = spec
+ except:
+ field, name = spec
+ tp = 'int'
+
+ value = reply.get(field, None)
+ if tp == 'yn':
+ value = 'yes' if value else 'no'
+ elif tp == 'bool' or isinstance(value, bool):
+ value = 'on' if value else 'off'
+ else:
+ value = 'n/a' if value is None else value
+
+ print(f'{name}: {value}')
+
+def print_speed(name, value):
+ """
+ Print out the speed-like strings from the value dict.
+ """
+ speed_re = re.compile(r'[0-9]+base[^/]+/.+')
+ speed = [ k for k, v in value.items() if v and speed_re.match(k) ]
+ print(f'{name}: {" ".join(speed)}')
+
+def doit(ynl, args, op_name):
+ """
+ Prepare request header, parse arguments and doit.
+ """
+ req = {
+ 'header': {
+ 'dev-name': args.device,
+ },
+ }
+
+ args_to_req(ynl, op_name, args.args, req)
+ ynl.do(op_name, req)
+
+def dumpit(ynl, args, op_name, extra = {}):
+ """
+ Prepare request header, parse arguments and dumpit (filtering out the
+ devices we're not interested in).
+ """
+ reply = ynl.dump(op_name, { 'header': {} } | extra)
+ if not reply:
+ return {}
+
+ for msg in reply:
+ if msg['header']['dev-name'] == args.device:
+ if args.json:
+ pprint.PrettyPrinter().pprint(msg)
+ sys.exit(0)
+ msg.pop('header', None)
+ return msg
+
+ print(f"Not supported for device {args.device}")
+ sys.exit(1)
+
+def bits_to_dict(attr):
+ """
+ Convert ynl-formatted bitmask to a dict of bit=value.
+ """
+ ret = {}
+ if 'bits' not in attr:
+ return dict()
+ if 'bit' not in attr['bits']:
+ return dict()
+ for bit in attr['bits']['bit']:
+ if bit['name'] == '':
+ continue
+ name = bit['name']
+ value = bit.get('value', False)
+ ret[name] = value
+ return ret
+
+def main():
+ parser = argparse.ArgumentParser(description='ethtool wannabe')
+ parser.add_argument('--json', action=argparse.BooleanOptionalAction)
+ parser.add_argument('--show-priv-flags', action=argparse.BooleanOptionalAction)
+ parser.add_argument('--set-priv-flags', action=argparse.BooleanOptionalAction)
+ parser.add_argument('--show-eee', action=argparse.BooleanOptionalAction)
+ parser.add_argument('--set-eee', action=argparse.BooleanOptionalAction)
+ parser.add_argument('-a', '--show-pause', action=argparse.BooleanOptionalAction)
+ parser.add_argument('-A', '--set-pause', action=argparse.BooleanOptionalAction)
+ parser.add_argument('-c', '--show-coalesce', action=argparse.BooleanOptionalAction)
+ parser.add_argument('-C', '--set-coalesce', action=argparse.BooleanOptionalAction)
+ parser.add_argument('-g', '--show-ring', action=argparse.BooleanOptionalAction)
+ parser.add_argument('-G', '--set-ring', action=argparse.BooleanOptionalAction)
+ parser.add_argument('-k', '--show-features', action=argparse.BooleanOptionalAction)
+ parser.add_argument('-K', '--set-features', action=argparse.BooleanOptionalAction)
+ parser.add_argument('-l', '--show-channels', action=argparse.BooleanOptionalAction)
+ parser.add_argument('-L', '--set-channels', action=argparse.BooleanOptionalAction)
+ parser.add_argument('-T', '--show-time-stamping', action=argparse.BooleanOptionalAction)
+ parser.add_argument('-S', '--statistics', action=argparse.BooleanOptionalAction)
+ # TODO: --show-tunnels tunnel-info-get
+ # TODO: --show-module module-get
+ # TODO: --get-plca-cfg plca-get
+ # TODO: --get-plca-status plca-get-status
+ # TODO: --show-mm mm-get
+ # TODO: --show-fec fec-get
+ # TODO: --dump-module-eerpom module-eeprom-get
+ # TODO: pse-get
+ # TODO: rss-get
+ parser.add_argument('device', metavar='device', type=str)
+ parser.add_argument('args', metavar='args', type=str, nargs='*')
+ global args
+ args = parser.parse_args()
+
+ script_abs_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
+ spec = os.path.join(script_abs_dir,
+ '../../../Documentation/netlink/specs/ethtool.yaml')
+ schema = os.path.join(script_abs_dir,
+ '../../../Documentation/netlink/genetlink-legacy.yaml')
+
+ ynl = YnlFamily(spec, schema)
+
+ if args.set_priv_flags:
+ # TODO: parse the bitmask
+ print("not implemented")
+ return
+
+ if args.set_eee:
+ return doit(ynl, args, 'eee-set')
+
+ if args.set_pause:
+ return doit(ynl, args, 'pause-set')
+
+ if args.set_coalesce:
+ return doit(ynl, args, 'coalesce-set')
+
+ if args.set_features:
+ # TODO: parse the bitmask
+ print("not implemented")
+ return
+
+ if args.set_channels:
+ return doit(ynl, args, 'channels-set')
+
+ if args.set_ring:
+ return doit(ynl, args, 'rings-set')
+
+ if args.show_priv_flags:
+ flags = bits_to_dict(dumpit(ynl, args, 'privflags-get')['flags'])
+ print_field(flags)
+ return
+
+ if args.show_eee:
+ eee = dumpit(ynl, args, 'eee-get')
+ ours = bits_to_dict(eee['modes-ours'])
+ peer = bits_to_dict(eee['modes-peer'])
+
+ if 'enabled' in eee:
+ status = 'enabled' if eee['enabled'] else 'disabled'
+ if 'active' in eee and eee['active']:
+ status = status + ' - active'
+ else:
+ status = status + ' - inactive'
+ else:
+ status = 'not supported'
+
+ print(f'EEE status: {status}')
+ print_field(eee, ('tx-lpi-timer', 'Tx LPI'))
+ print_speed('Advertised EEE link modes', ours)
+ print_speed('Link partner advertised EEE link modes', peer)
+
+ return
+
+ if args.show_pause:
+ print_field(dumpit(ynl, args, 'pause-get'),
+ ('autoneg', 'Autonegotiate', 'bool'),
+ ('rx', 'RX', 'bool'),
+ ('tx', 'TX', 'bool'))
+ return
+
+ if args.show_coalesce:
+ print_field(dumpit(ynl, args, 'coalesce-get'))
+ return
+
+ if args.show_features:
+ reply = dumpit(ynl, args, 'features-get')
+ available = bits_to_dict(reply['hw'])
+ requested = bits_to_dict(reply['wanted']).keys()
+ active = bits_to_dict(reply['active']).keys()
+ never_changed = bits_to_dict(reply['nochange']).keys()
+
+ for f in sorted(available):
+ value = "off"
+ if f in active:
+ value = "on"
+
+ fixed = ""
+ if f not in available or f in never_changed:
+ fixed = " [fixed]"
+
+ req = ""
+ if f in requested:
+ if f in active:
+ req = " [requested on]"
+ else:
+ req = " [requested off]"
+
+ print(f'{f}: {value}{fixed}{req}')
+
+ return
+
+ if args.show_channels:
+ reply = dumpit(ynl, args, 'channels-get')
+ print(f'Channel parameters for {args.device}:')
+
+ print(f'Pre-set maximums:')
+ print_field(reply,
+ ('rx-max', 'RX'),
+ ('tx-max', 'TX'),
+ ('other-max', 'Other'),
+ ('combined-max', 'Combined'))
+
+ print(f'Current hardware settings:')
+ print_field(reply,
+ ('rx-count', 'RX'),
+ ('tx-count', 'TX'),
+ ('other-count', 'Other'),
+ ('combined-count', 'Combined'))
+
+ return
+
+ if args.show_ring:
+ reply = dumpit(ynl, args, 'channels-get')
+
+ print(f'Ring parameters for {args.device}:')
+
+ print(f'Pre-set maximums:')
+ print_field(reply,
+ ('rx-max', 'RX'),
+ ('rx-mini-max', 'RX Mini'),
+ ('rx-jumbo-max', 'RX Jumbo'),
+ ('tx-max', 'TX'))
+
+ print(f'Current hardware settings:')
+ print_field(reply,
+ ('rx', 'RX'),
+ ('rx-mini', 'RX Mini'),
+ ('rx-jumbo', 'RX Jumbo'),
+ ('tx', 'TX'))
+
+ print_field(reply,
+ ('rx-buf-len', 'RX Buf Len'),
+ ('cqe-size', 'CQE Size'),
+ ('tx-push', 'TX Push', 'bool'))
+
+ return
+
+ if args.statistics:
+ print(f'NIC statistics:')
+
+ # TODO: pass id?
+ strset = dumpit(ynl, args, 'strset-get')
+ pprint.PrettyPrinter().pprint(strset)
+
+ req = {
+ 'groups': {
+ 'size': 1,
+ 'bits': {
+ 'bit':
+ # TODO: support passing the bitmask
+ #[
+ #{ 'name': 'eth-phy', 'value': True },
+ { 'name': 'eth-mac', 'value': True },
+ #{ 'name': 'eth-ctrl', 'value': True },
+ #{ 'name': 'rmon', 'value': True },
+ #],
+ },
+ },
+ }
+
+ rsp = dumpit(ynl, args, 'stats-get', req)
+ pprint.PrettyPrinter().pprint(rsp)
+ return
+
+ if args.show_time_stamping:
+ req = {
+ 'header': {
+ 'flags': 'stats',
+ },
+ }
+
+ tsinfo = dumpit(ynl, args, 'tsinfo-get', req)
+
+ print(f'Time stamping parameters for {args.device}:')
+
+ print('Capabilities:')
+ [print(f'\t{v}') for v in bits_to_dict(tsinfo['timestamping'])]
+
+ print(f'PTP Hardware Clock: {tsinfo["phc-index"]}')
+
+ print('Hardware Transmit Timestamp Modes:')
+ [print(f'\t{v}') for v in bits_to_dict(tsinfo['tx-types'])]
+
+ print('Hardware Receive Filter Modes:')
+ [print(f'\t{v}') for v in bits_to_dict(tsinfo['rx-filters'])]
+
+ print('Statistics:')
+ [print(f'\t{k}: {v}') for k, v in tsinfo['stats'].items()]
+ return
+
+ print(f'Settings for {args.device}:')
+ linkmodes = dumpit(ynl, args, 'linkmodes-get')
+ ours = bits_to_dict(linkmodes['ours'])
+
+ supported_ports = ('TP', 'AUI', 'BNC', 'MII', 'FIBRE', 'Backplane')
+ ports = [ p for p in supported_ports if ours.get(p, False)]
+ print(f'Supported ports: [ {" ".join(ports)} ]')
+
+ print_speed('Supported link modes', ours)
+
+ print_field(ours, ('Pause', 'Supported pause frame use', 'yn'))
+ print_field(ours, ('Autoneg', 'Supports auto-negotiation', 'yn'))
+
+ supported_fec = ('None', 'PS', 'BASER', 'LLRS')
+ fec = [ p for p in supported_fec if ours.get(p, False)]
+ fec_str = " ".join(fec)
+ if len(fec) == 0:
+ fec_str = "Not reported"
+
+ print(f'Supported FEC modes: {fec_str}')
+
+ speed = 'Unknown!'
+ if linkmodes['speed'] > 0 and linkmodes['speed'] < 0xffffffff:
+ speed = f'{linkmodes["speed"]}Mb/s'
+ print(f'Speed: {speed}')
+
+ duplex_modes = {
+ 0: 'Half',
+ 1: 'Full',
+ }
+ duplex = duplex_modes.get(linkmodes["duplex"], None)
+ if not duplex:
+ duplex = f'Unknown! ({linkmodes["duplex"]})'
+ print(f'Duplex: {duplex}')
+
+ autoneg = "off"
+ if linkmodes.get("autoneg", 0) != 0:
+ autoneg = "on"
+ print(f'Auto-negotiation: {autoneg}')
+
+ ports = {
+ 0: 'Twisted Pair',
+ 1: 'AUI',
+ 2: 'MII',
+ 3: 'FIBRE',
+ 4: 'BNC',
+ 5: 'Directly Attached Copper',
+ 0xef: 'None',
+ }
+ linkinfo = dumpit(ynl, args, 'linkinfo-get')
+ print(f'Port: {ports.get(linkinfo["port"], "Other")}')
+
+ print_field(linkinfo, ('phyaddr', 'PHYAD'))
+
+ transceiver = {
+ 0: 'Internal',
+ 1: 'External',
+ }
+ print(f'Transceiver: {transceiver.get(linkinfo["transceiver"], "Unknown")}')
+
+ mdix_ctrl = {
+ 1: 'off',
+ 2: 'on',
+ }
+ mdix = mdix_ctrl.get(linkinfo['tp-mdix-ctrl'], None)
+ if mdix:
+ mdix = mdix + ' (forced)'
+ else:
+ mdix = mdix_ctrl.get(linkinfo['tp-mdix'], 'Unknown (auto)')
+ print(f'MDI-X: {mdix}')
+
+ debug = dumpit(ynl, args, 'debug-get')
+ msgmask = bits_to_dict(debug.get("msgmask", [])).keys()
+ print(f'Current message level: {" ".join(msgmask)}')
+
+ linkstate = dumpit(ynl, args, 'linkstate-get')
+ detected_states = {
+ 0: 'no',
+ 1: 'yes',
+ }
+ # TODO: wol-get
+ detected = detected_states.get(linkstate['link'], 'unknown')
+ print(f'Link detected: {detected}')
+
+if __name__ == '__main__':
+ main()
--- /dev/null
+# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+from .nlspec import SpecAttr, SpecAttrSet, SpecEnumEntry, SpecEnumSet, \
+ SpecFamily, SpecOperation
+from .ynl import YnlFamily, Netlink, NlError
+
+__all__ = ["SpecAttr", "SpecAttrSet", "SpecEnumEntry", "SpecEnumSet",
+ "SpecFamily", "SpecOperation", "YnlFamily", "Netlink", "NlError"]
--- /dev/null
+# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+import collections
+import importlib
+import os
+import yaml
+
+
+# To be loaded dynamically as needed
+jsonschema = None
+
+
+class SpecElement:
+ """Netlink spec element.
+
+ Abstract element of the Netlink spec. Implements the dictionary interface
+ for access to the raw spec. Supports iterative resolution of dependencies
+ across elements and class inheritance levels. The elements of the spec
+ may refer to each other, and although loops should be very rare, having
+ to maintain correct ordering of instantiation is painful, so the resolve()
+ method should be used to perform parts of init which require access to
+ other parts of the spec.
+
+ Attributes:
+ yaml raw spec as loaded from the spec file
+ family back reference to the full family
+
+ name name of the entity as listed in the spec (optional)
+ ident_name name which can be safely used as identifier in code (optional)
+ """
+ def __init__(self, family, yaml):
+ self.yaml = yaml
+ self.family = family
+
+ if 'name' in self.yaml:
+ self.name = self.yaml['name']
+ self.ident_name = self.name.replace('-', '_')
+
+ self._super_resolved = False
+ family.add_unresolved(self)
+
+ def __getitem__(self, key):
+ return self.yaml[key]
+
+ def __contains__(self, key):
+ return key in self.yaml
+
+ def get(self, key, default=None):
+ return self.yaml.get(key, default)
+
+ def resolve_up(self, up):
+ if not self._super_resolved:
+ up.resolve()
+ self._super_resolved = True
+
+ def resolve(self):
+ pass
+
+
+class SpecEnumEntry(SpecElement):
+ """ Entry within an enum declared in the Netlink spec.
+
+ Attributes:
+ doc documentation string
+ enum_set back reference to the enum
+ value numerical value of this enum (use accessors in most situations!)
+
+ Methods:
+ raw_value raw value, i.e. the id in the enum, unlike user value which is a mask for flags
+ user_value user value, same as raw value for enums, for flags it's the mask
+ """
+ def __init__(self, enum_set, yaml, prev, value_start):
+ if isinstance(yaml, str):
+ yaml = {'name': yaml}
+ super().__init__(enum_set.family, yaml)
+
+ self.doc = yaml.get('doc', '')
+ self.enum_set = enum_set
+
+ if 'value' in yaml:
+ self.value = yaml['value']
+ elif prev:
+ self.value = prev.value + 1
+ else:
+ self.value = value_start
+
+ def has_doc(self):
+ return bool(self.doc)
+
+ def raw_value(self):
+ return self.value
+
+ def user_value(self, as_flags=None):
+ if self.enum_set['type'] == 'flags' or as_flags:
+ return 1 << self.value
+ else:
+ return self.value
+
+
+class SpecEnumSet(SpecElement):
+ """ Enum type
+
+ Represents an enumeration (list of numerical constants)
+ as declared in the "definitions" section of the spec.
+
+ Attributes:
+ type enum or flags
+ entries entries by name
+ entries_by_val entries by value
+ Methods:
+ get_mask for flags compute the mask of all defined values
+ """
+ def __init__(self, family, yaml):
+ super().__init__(family, yaml)
+
+ self.type = yaml['type']
+
+ prev_entry = None
+ value_start = self.yaml.get('value-start', 0)
+ self.entries = dict()
+ self.entries_by_val = dict()
+ for entry in self.yaml['entries']:
+ e = self.new_entry(entry, prev_entry, value_start)
+ self.entries[e.name] = e
+ self.entries_by_val[e.raw_value()] = e
+ prev_entry = e
+
+ def new_entry(self, entry, prev_entry, value_start):
+ return SpecEnumEntry(self, entry, prev_entry, value_start)
+
+ def has_doc(self):
+ if 'doc' in self.yaml:
+ return True
+ return self.has_entry_doc()
+
+ def has_entry_doc(self):
+ for entry in self.entries.values():
+ if entry.has_doc():
+ return True
+ return False
+
+ def get_mask(self, as_flags=None):
+ mask = 0
+ for e in self.entries.values():
+ mask += e.user_value(as_flags)
+ return mask
+
+
+class SpecAttr(SpecElement):
+ """ Single Netlink attribute type
+
+ Represents a single attribute type within an attr space.
+
+ Attributes:
+ type string, attribute type
+ value numerical ID when serialized
+ attr_set Attribute Set containing this attr
+ is_multi bool, attr may repeat multiple times
+ struct_name string, name of struct definition
+ sub_type string, name of sub type
+ len integer, optional byte length of binary types
+ display_hint string, hint to help choose format specifier
+ when displaying the value
+ sub_message string, name of sub message type
+ selector string, name of attribute used to select
+ sub-message type
+
+ is_auto_scalar bool, attr is a variable-size scalar
+ """
+ def __init__(self, family, attr_set, yaml, value):
+ super().__init__(family, yaml)
+
+ self.type = yaml['type']
+ self.value = value
+ self.attr_set = attr_set
+ self.is_multi = yaml.get('multi-attr', False)
+ self.struct_name = yaml.get('struct')
+ self.sub_type = yaml.get('sub-type')
+ self.byte_order = yaml.get('byte-order')
+ self.len = yaml.get('len')
+ self.display_hint = yaml.get('display-hint')
+ self.sub_message = yaml.get('sub-message')
+ self.selector = yaml.get('selector')
+
+ self.is_auto_scalar = self.type == "sint" or self.type == "uint"
+
+
+class SpecAttrSet(SpecElement):
+ """ Netlink Attribute Set class.
+
+ Represents a ID space of attributes within Netlink.
+
+ Note that unlike other elements, which expose contents of the raw spec
+ via the dictionary interface Attribute Set exposes attributes by name.
+
+ Attributes:
+ attrs ordered dict of all attributes (indexed by name)
+ attrs_by_val ordered dict of all attributes (indexed by value)
+ subset_of parent set if this is a subset, otherwise None
+ """
+ def __init__(self, family, yaml):
+ super().__init__(family, yaml)
+
+ self.subset_of = self.yaml.get('subset-of', None)
+
+ self.attrs = collections.OrderedDict()
+ self.attrs_by_val = collections.OrderedDict()
+
+ if self.subset_of is None:
+ val = 1
+ for elem in self.yaml['attributes']:
+ if 'value' in elem:
+ val = elem['value']
+
+ attr = self.new_attr(elem, val)
+ self.attrs[attr.name] = attr
+ self.attrs_by_val[attr.value] = attr
+ val += 1
+ else:
+ real_set = family.attr_sets[self.subset_of]
+ for elem in self.yaml['attributes']:
+ real_attr = real_set[elem['name']]
+ combined_elem = real_attr.yaml | elem
+ attr = self.new_attr(combined_elem, real_attr.value)
+
+ self.attrs[attr.name] = attr
+ self.attrs_by_val[attr.value] = attr
+
+ def new_attr(self, elem, value):
+ return SpecAttr(self.family, self, elem, value)
+
+ def __getitem__(self, key):
+ return self.attrs[key]
+
+ def __contains__(self, key):
+ return key in self.attrs
+
+ def __iter__(self):
+ yield from self.attrs
+
+ def items(self):
+ return self.attrs.items()
+
+
+class SpecStructMember(SpecElement):
+ """Struct member attribute
+
+ Represents a single struct member attribute.
+
+ Attributes:
+ type string, type of the member attribute
+ byte_order string or None for native byte order
+ enum string, name of the enum definition
+ len integer, optional byte length of binary types
+ display_hint string, hint to help choose format specifier
+ when displaying the value
+ struct string, name of nested struct type
+ """
+ def __init__(self, family, yaml):
+ super().__init__(family, yaml)
+ self.type = yaml['type']
+ self.byte_order = yaml.get('byte-order')
+ self.enum = yaml.get('enum')
+ self.len = yaml.get('len')
+ self.display_hint = yaml.get('display-hint')
+ self.struct = yaml.get('struct')
+
+
+class SpecStruct(SpecElement):
+ """Netlink struct type
+
+ Represents a C struct definition.
+
+ Attributes:
+ members ordered list of struct members
+ """
+ def __init__(self, family, yaml):
+ super().__init__(family, yaml)
+
+ self.members = []
+ for member in yaml.get('members', []):
+ self.members.append(self.new_member(family, member))
+
+ def new_member(self, family, elem):
+ return SpecStructMember(family, elem)
+
+ def __iter__(self):
+ yield from self.members
+
+ def items(self):
+ return self.members.items()
+
+
+class SpecSubMessage(SpecElement):
+ """ Netlink sub-message definition
+
+ Represents a set of sub-message formats for polymorphic nlattrs
+ that contain type-specific sub messages.
+
+ Attributes:
+ name string, name of sub-message definition
+ formats dict of sub-message formats indexed by match value
+ """
+ def __init__(self, family, yaml):
+ super().__init__(family, yaml)
+
+ self.formats = collections.OrderedDict()
+ for elem in self.yaml['formats']:
+ format = self.new_format(family, elem)
+ self.formats[format.value] = format
+
+ def new_format(self, family, format):
+ return SpecSubMessageFormat(family, format)
+
+
+class SpecSubMessageFormat(SpecElement):
+ """ Netlink sub-message format definition
+
+ Represents a single format for a sub-message.
+
+ Attributes:
+ value attribute value to match against type selector
+ fixed_header string, name of fixed header, or None
+ attr_set string, name of attribute set, or None
+ """
+ def __init__(self, family, yaml):
+ super().__init__(family, yaml)
+
+ self.value = yaml.get('value')
+ self.fixed_header = yaml.get('fixed-header')
+ self.attr_set = yaml.get('attribute-set')
+
+
+class SpecOperation(SpecElement):
+ """Netlink Operation
+
+ Information about a single Netlink operation.
+
+ Attributes:
+ value numerical ID when serialized, None if req/rsp values differ
+
+ req_value numerical ID when serialized, user -> kernel
+ rsp_value numerical ID when serialized, user <- kernel
+ modes supported operation modes (do, dump, event etc.)
+ is_call bool, whether the operation is a call
+ is_async bool, whether the operation is a notification
+ is_resv bool, whether the operation does not exist (it's just a reserved ID)
+ attr_set attribute set name
+ fixed_header string, optional name of fixed header struct
+
+ yaml raw spec as loaded from the spec file
+ """
+ def __init__(self, family, yaml, req_value, rsp_value):
+ super().__init__(family, yaml)
+
+ self.value = req_value if req_value == rsp_value else None
+ self.req_value = req_value
+ self.rsp_value = rsp_value
+
+ self.modes = yaml.keys() & {'do', 'dump', 'event', 'notify'}
+ self.is_call = 'do' in yaml or 'dump' in yaml
+ self.is_async = 'notify' in yaml or 'event' in yaml
+ self.is_resv = not self.is_async and not self.is_call
+ self.fixed_header = self.yaml.get('fixed-header', family.fixed_header)
+
+ # Added by resolve:
+ self.attr_set = None
+ delattr(self, "attr_set")
+
+ def resolve(self):
+ self.resolve_up(super())
+
+ if 'attribute-set' in self.yaml:
+ attr_set_name = self.yaml['attribute-set']
+ elif 'notify' in self.yaml:
+ msg = self.family.msgs[self.yaml['notify']]
+ attr_set_name = msg['attribute-set']
+ elif self.is_resv:
+ attr_set_name = ''
+ else:
+ raise Exception(f"Can't resolve attribute set for op '{self.name}'")
+ if attr_set_name:
+ self.attr_set = self.family.attr_sets[attr_set_name]
+
+
+class SpecMcastGroup(SpecElement):
+ """Netlink Multicast Group
+
+ Information about a multicast group.
+
+ Value is only used for classic netlink families that use the
+ netlink-raw schema. Genetlink families use dynamic ID allocation
+ where the ids of multicast groups get resolved at runtime. Value
+ will be None for genetlink families.
+
+ Attributes:
+ name name of the mulitcast group
+ value integer id of this multicast group for netlink-raw or None
+ yaml raw spec as loaded from the spec file
+ """
+ def __init__(self, family, yaml):
+ super().__init__(family, yaml)
+ self.value = self.yaml.get('value')
+
+
+class SpecFamily(SpecElement):
+ """ Netlink Family Spec class.
+
+ Netlink family information loaded from a spec (e.g. in YAML).
+ Takes care of unfolding implicit information which can be skipped
+ in the spec itself for brevity.
+
+ The class can be used like a dictionary to access the raw spec
+ elements but that's usually a bad idea.
+
+ Attributes:
+ proto protocol type (e.g. genetlink)
+ msg_id_model enum-model for operations (unified, directional etc.)
+ license spec license (loaded from an SPDX tag on the spec)
+
+ attr_sets dict of attribute sets
+ msgs dict of all messages (index by name)
+ sub_msgs dict of all sub messages (index by name)
+ ops dict of all valid requests / responses
+ ntfs dict of all async events
+ consts dict of all constants/enums
+ fixed_header string, optional name of family default fixed header struct
+ mcast_groups dict of all multicast groups (index by name)
+ kernel_family dict of kernel family attributes
+ """
+ def __init__(self, spec_path, schema_path=None, exclude_ops=None):
+ with open(spec_path, "r") as stream:
+ prefix = '# SPDX-License-Identifier: '
+ first = stream.readline().strip()
+ if not first.startswith(prefix):
+ raise Exception('SPDX license tag required in the spec')
+ self.license = first[len(prefix):]
+
+ stream.seek(0)
+ spec = yaml.safe_load(stream)
+
+ self._resolution_list = []
+
+ super().__init__(self, spec)
+
+ self._exclude_ops = exclude_ops if exclude_ops else []
+
+ self.proto = self.yaml.get('protocol', 'genetlink')
+ self.msg_id_model = self.yaml['operations'].get('enum-model', 'unified')
+
+ if schema_path is None:
+ schema_path = os.path.dirname(os.path.dirname(spec_path)) + f'/{self.proto}.yaml'
+ if schema_path:
+ global jsonschema
+
+ with open(schema_path, "r") as stream:
+ schema = yaml.safe_load(stream)
+
+ if jsonschema is None:
+ jsonschema = importlib.import_module("jsonschema")
+
+ jsonschema.validate(self.yaml, schema)
+
+ self.attr_sets = collections.OrderedDict()
+ self.sub_msgs = collections.OrderedDict()
+ self.msgs = collections.OrderedDict()
+ self.req_by_value = collections.OrderedDict()
+ self.rsp_by_value = collections.OrderedDict()
+ self.ops = collections.OrderedDict()
+ self.ntfs = collections.OrderedDict()
+ self.consts = collections.OrderedDict()
+ self.mcast_groups = collections.OrderedDict()
+ self.kernel_family = collections.OrderedDict(self.yaml.get('kernel-family', {}))
+
+ last_exception = None
+ while len(self._resolution_list) > 0:
+ resolved = []
+ unresolved = self._resolution_list
+ self._resolution_list = []
+
+ for elem in unresolved:
+ try:
+ elem.resolve()
+ except (KeyError, AttributeError) as e:
+ self._resolution_list.append(elem)
+ last_exception = e
+ continue
+
+ resolved.append(elem)
+
+ if len(resolved) == 0:
+ raise last_exception
+
+ def new_enum(self, elem):
+ return SpecEnumSet(self, elem)
+
+ def new_attr_set(self, elem):
+ return SpecAttrSet(self, elem)
+
+ def new_struct(self, elem):
+ return SpecStruct(self, elem)
+
+ def new_sub_message(self, elem):
+ return SpecSubMessage(self, elem);
+
+ def new_operation(self, elem, req_val, rsp_val):
+ return SpecOperation(self, elem, req_val, rsp_val)
+
+ def new_mcast_group(self, elem):
+ return SpecMcastGroup(self, elem)
+
+ def add_unresolved(self, elem):
+ self._resolution_list.append(elem)
+
+ def _dictify_ops_unified(self):
+ self.fixed_header = self.yaml['operations'].get('fixed-header')
+ val = 1
+ for elem in self.yaml['operations']['list']:
+ if 'value' in elem:
+ val = elem['value']
+
+ op = self.new_operation(elem, val, val)
+ val += 1
+
+ self.msgs[op.name] = op
+
+ def _dictify_ops_directional(self):
+ self.fixed_header = self.yaml['operations'].get('fixed-header')
+ req_val = rsp_val = 1
+ for elem in self.yaml['operations']['list']:
+ if 'notify' in elem or 'event' in elem:
+ if 'value' in elem:
+ rsp_val = elem['value']
+ req_val_next = req_val
+ rsp_val_next = rsp_val + 1
+ req_val = None
+ elif 'do' in elem or 'dump' in elem:
+ mode = elem['do'] if 'do' in elem else elem['dump']
+
+ v = mode.get('request', {}).get('value', None)
+ if v:
+ req_val = v
+ v = mode.get('reply', {}).get('value', None)
+ if v:
+ rsp_val = v
+
+ rsp_inc = 1 if 'reply' in mode else 0
+ req_val_next = req_val + 1
+ rsp_val_next = rsp_val + rsp_inc
+ else:
+ raise Exception("Can't parse directional ops")
+
+ if req_val == req_val_next:
+ req_val = None
+ if rsp_val == rsp_val_next:
+ rsp_val = None
+
+ skip = False
+ for exclude in self._exclude_ops:
+ skip |= bool(exclude.match(elem['name']))
+ if not skip:
+ op = self.new_operation(elem, req_val, rsp_val)
+
+ req_val = req_val_next
+ rsp_val = rsp_val_next
+
+ self.msgs[op.name] = op
+
+ def find_operation(self, name):
+ """
+ For a given operation name, find and return operation spec.
+ """
+ for op in self.yaml['operations']['list']:
+ if name == op['name']:
+ return op
+ return None
+
+ def resolve(self):
+ self.resolve_up(super())
+
+ definitions = self.yaml.get('definitions', [])
+ for elem in definitions:
+ if elem['type'] == 'enum' or elem['type'] == 'flags':
+ self.consts[elem['name']] = self.new_enum(elem)
+ elif elem['type'] == 'struct':
+ self.consts[elem['name']] = self.new_struct(elem)
+ else:
+ self.consts[elem['name']] = elem
+
+ for elem in self.yaml['attribute-sets']:
+ attr_set = self.new_attr_set(elem)
+ self.attr_sets[elem['name']] = attr_set
+
+ for elem in self.yaml.get('sub-messages', []):
+ sub_message = self.new_sub_message(elem)
+ self.sub_msgs[sub_message.name] = sub_message
+
+ if self.msg_id_model == 'unified':
+ self._dictify_ops_unified()
+ elif self.msg_id_model == 'directional':
+ self._dictify_ops_directional()
+
+ for op in self.msgs.values():
+ if op.req_value is not None:
+ self.req_by_value[op.req_value] = op
+ if op.rsp_value is not None:
+ self.rsp_by_value[op.rsp_value] = op
+ if not op.is_async and 'attribute-set' in op:
+ self.ops[op.name] = op
+ elif op.is_async:
+ self.ntfs[op.name] = op
+
+ mcgs = self.yaml.get('mcast-groups')
+ if mcgs:
+ for elem in mcgs['list']:
+ mcg = self.new_mcast_group(elem)
+ self.mcast_groups[elem['name']] = mcg
--- /dev/null
+# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+from collections import namedtuple
+from enum import Enum
+import functools
+import os
+import random
+import socket
+import struct
+from struct import Struct
+import sys
+import yaml
+import ipaddress
+import uuid
+import queue
+import selectors
+import time
+
+from .nlspec import SpecFamily
+
+#
+# Generic Netlink code which should really be in some library, but I can't quickly find one.
+#
+
+
+class Netlink:
+ # Netlink socket
+ SOL_NETLINK = 270
+
+ NETLINK_ADD_MEMBERSHIP = 1
+ NETLINK_CAP_ACK = 10
+ NETLINK_EXT_ACK = 11
+ NETLINK_GET_STRICT_CHK = 12
+
+ # Netlink message
+ NLMSG_ERROR = 2
+ NLMSG_DONE = 3
+
+ NLM_F_REQUEST = 1
+ NLM_F_ACK = 4
+ NLM_F_ROOT = 0x100
+ NLM_F_MATCH = 0x200
+
+ NLM_F_REPLACE = 0x100
+ NLM_F_EXCL = 0x200
+ NLM_F_CREATE = 0x400
+ NLM_F_APPEND = 0x800
+
+ NLM_F_CAPPED = 0x100
+ NLM_F_ACK_TLVS = 0x200
+
+ NLM_F_DUMP = NLM_F_ROOT | NLM_F_MATCH
+
+ NLA_F_NESTED = 0x8000
+ NLA_F_NET_BYTEORDER = 0x4000
+
+ NLA_TYPE_MASK = NLA_F_NESTED | NLA_F_NET_BYTEORDER
+
+ # Genetlink defines
+ NETLINK_GENERIC = 16
+
+ GENL_ID_CTRL = 0x10
+
+ # nlctrl
+ CTRL_CMD_GETFAMILY = 3
+
+ CTRL_ATTR_FAMILY_ID = 1
+ CTRL_ATTR_FAMILY_NAME = 2
+ CTRL_ATTR_MAXATTR = 5
+ CTRL_ATTR_MCAST_GROUPS = 7
+
+ CTRL_ATTR_MCAST_GRP_NAME = 1
+ CTRL_ATTR_MCAST_GRP_ID = 2
+
+ # Extack types
+ NLMSGERR_ATTR_MSG = 1
+ NLMSGERR_ATTR_OFFS = 2
+ NLMSGERR_ATTR_COOKIE = 3
+ NLMSGERR_ATTR_POLICY = 4
+ NLMSGERR_ATTR_MISS_TYPE = 5
+ NLMSGERR_ATTR_MISS_NEST = 6
+
+ # Policy types
+ NL_POLICY_TYPE_ATTR_TYPE = 1
+ NL_POLICY_TYPE_ATTR_MIN_VALUE_S = 2
+ NL_POLICY_TYPE_ATTR_MAX_VALUE_S = 3
+ NL_POLICY_TYPE_ATTR_MIN_VALUE_U = 4
+ NL_POLICY_TYPE_ATTR_MAX_VALUE_U = 5
+ NL_POLICY_TYPE_ATTR_MIN_LENGTH = 6
+ NL_POLICY_TYPE_ATTR_MAX_LENGTH = 7
+ NL_POLICY_TYPE_ATTR_POLICY_IDX = 8
+ NL_POLICY_TYPE_ATTR_POLICY_MAXTYPE = 9
+ NL_POLICY_TYPE_ATTR_BITFIELD32_MASK = 10
+ NL_POLICY_TYPE_ATTR_PAD = 11
+ NL_POLICY_TYPE_ATTR_MASK = 12
+
+ AttrType = Enum('AttrType', ['flag', 'u8', 'u16', 'u32', 'u64',
+ 's8', 's16', 's32', 's64',
+ 'binary', 'string', 'nul-string',
+ 'nested', 'nested-array',
+ 'bitfield32', 'sint', 'uint'])
+
+class NlError(Exception):
+ def __init__(self, nl_msg):
+ self.nl_msg = nl_msg
+ self.error = -nl_msg.error
+
+ def __str__(self):
+ return f"Netlink error: {os.strerror(self.error)}\n{self.nl_msg}"
+
+
+class ConfigError(Exception):
+ pass
+
+
+class NlAttr:
+ ScalarFormat = namedtuple('ScalarFormat', ['native', 'big', 'little'])
+ type_formats = {
+ 'u8' : ScalarFormat(Struct('B'), Struct("B"), Struct("B")),
+ 's8' : ScalarFormat(Struct('b'), Struct("b"), Struct("b")),
+ 'u16': ScalarFormat(Struct('H'), Struct(">H"), Struct("<H")),
+ 's16': ScalarFormat(Struct('h'), Struct(">h"), Struct("<h")),
+ 'u32': ScalarFormat(Struct('I'), Struct(">I"), Struct("<I")),
+ 's32': ScalarFormat(Struct('i'), Struct(">i"), Struct("<i")),
+ 'u64': ScalarFormat(Struct('Q'), Struct(">Q"), Struct("<Q")),
+ 's64': ScalarFormat(Struct('q'), Struct(">q"), Struct("<q"))
+ }
+
+ def __init__(self, raw, offset):
+ self._len, self._type = struct.unpack("HH", raw[offset : offset + 4])
+ self.type = self._type & ~Netlink.NLA_TYPE_MASK
+ self.is_nest = self._type & Netlink.NLA_F_NESTED
+ self.payload_len = self._len
+ self.full_len = (self.payload_len + 3) & ~3
+ self.raw = raw[offset + 4 : offset + self.payload_len]
+
+ @classmethod
+ def get_format(cls, attr_type, byte_order=None):
+ format = cls.type_formats[attr_type]
+ if byte_order:
+ return format.big if byte_order == "big-endian" \
+ else format.little
+ return format.native
+
+ def as_scalar(self, attr_type, byte_order=None):
+ format = self.get_format(attr_type, byte_order)
+ return format.unpack(self.raw)[0]
+
+ def as_auto_scalar(self, attr_type, byte_order=None):
+ if len(self.raw) != 4 and len(self.raw) != 8:
+ raise Exception(f"Auto-scalar len payload be 4 or 8 bytes, got {len(self.raw)}")
+ real_type = attr_type[0] + str(len(self.raw) * 8)
+ format = self.get_format(real_type, byte_order)
+ return format.unpack(self.raw)[0]
+
+ def as_strz(self):
+ return self.raw.decode('ascii')[:-1]
+
+ def as_bin(self):
+ return self.raw
+
+ def as_c_array(self, type):
+ format = self.get_format(type)
+ return [ x[0] for x in format.iter_unpack(self.raw) ]
+
+ def __repr__(self):
+ return f"[type:{self.type} len:{self._len}] {self.raw}"
+
+
+class NlAttrs:
+ def __init__(self, msg, offset=0):
+ self.attrs = []
+
+ while offset < len(msg):
+ attr = NlAttr(msg, offset)
+ offset += attr.full_len
+ self.attrs.append(attr)
+
+ def __iter__(self):
+ yield from self.attrs
+
+ def __repr__(self):
+ msg = ''
+ for a in self.attrs:
+ if msg:
+ msg += '\n'
+ msg += repr(a)
+ return msg
+
+
+class NlMsg:
+ def __init__(self, msg, offset, attr_space=None):
+ self.hdr = msg[offset : offset + 16]
+
+ self.nl_len, self.nl_type, self.nl_flags, self.nl_seq, self.nl_portid = \
+ struct.unpack("IHHII", self.hdr)
+
+ self.raw = msg[offset + 16 : offset + self.nl_len]
+
+ self.error = 0
+ self.done = 0
+
+ extack_off = None
+ if self.nl_type == Netlink.NLMSG_ERROR:
+ self.error = struct.unpack("i", self.raw[0:4])[0]
+ self.done = 1
+ extack_off = 20
+ elif self.nl_type == Netlink.NLMSG_DONE:
+ self.error = struct.unpack("i", self.raw[0:4])[0]
+ self.done = 1
+ extack_off = 4
+
+ self.extack = None
+ if self.nl_flags & Netlink.NLM_F_ACK_TLVS and extack_off:
+ self.extack = dict()
+ extack_attrs = NlAttrs(self.raw[extack_off:])
+ for extack in extack_attrs:
+ if extack.type == Netlink.NLMSGERR_ATTR_MSG:
+ self.extack['msg'] = extack.as_strz()
+ elif extack.type == Netlink.NLMSGERR_ATTR_MISS_TYPE:
+ self.extack['miss-type'] = extack.as_scalar('u32')
+ elif extack.type == Netlink.NLMSGERR_ATTR_MISS_NEST:
+ self.extack['miss-nest'] = extack.as_scalar('u32')
+ elif extack.type == Netlink.NLMSGERR_ATTR_OFFS:
+ self.extack['bad-attr-offs'] = extack.as_scalar('u32')
+ elif extack.type == Netlink.NLMSGERR_ATTR_POLICY:
+ self.extack['policy'] = self._decode_policy(extack.raw)
+ else:
+ if 'unknown' not in self.extack:
+ self.extack['unknown'] = []
+ self.extack['unknown'].append(extack)
+
+ if attr_space:
+ # We don't have the ability to parse nests yet, so only do global
+ if 'miss-type' in self.extack and 'miss-nest' not in self.extack:
+ miss_type = self.extack['miss-type']
+ if miss_type in attr_space.attrs_by_val:
+ spec = attr_space.attrs_by_val[miss_type]
+ self.extack['miss-type'] = spec['name']
+ if 'doc' in spec:
+ self.extack['miss-type-doc'] = spec['doc']
+
+ def _decode_policy(self, raw):
+ policy = {}
+ for attr in NlAttrs(raw):
+ if attr.type == Netlink.NL_POLICY_TYPE_ATTR_TYPE:
+ type = attr.as_scalar('u32')
+ policy['type'] = Netlink.AttrType(type).name
+ elif attr.type == Netlink.NL_POLICY_TYPE_ATTR_MIN_VALUE_S:
+ policy['min-value'] = attr.as_scalar('s64')
+ elif attr.type == Netlink.NL_POLICY_TYPE_ATTR_MAX_VALUE_S:
+ policy['max-value'] = attr.as_scalar('s64')
+ elif attr.type == Netlink.NL_POLICY_TYPE_ATTR_MIN_VALUE_U:
+ policy['min-value'] = attr.as_scalar('u64')
+ elif attr.type == Netlink.NL_POLICY_TYPE_ATTR_MAX_VALUE_U:
+ policy['max-value'] = attr.as_scalar('u64')
+ elif attr.type == Netlink.NL_POLICY_TYPE_ATTR_MIN_LENGTH:
+ policy['min-length'] = attr.as_scalar('u32')
+ elif attr.type == Netlink.NL_POLICY_TYPE_ATTR_MAX_LENGTH:
+ policy['max-length'] = attr.as_scalar('u32')
+ elif attr.type == Netlink.NL_POLICY_TYPE_ATTR_BITFIELD32_MASK:
+ policy['bitfield32-mask'] = attr.as_scalar('u32')
+ elif attr.type == Netlink.NL_POLICY_TYPE_ATTR_MASK:
+ policy['mask'] = attr.as_scalar('u64')
+ return policy
+
+ def cmd(self):
+ return self.nl_type
+
+ def __repr__(self):
+ msg = f"nl_len = {self.nl_len} ({len(self.raw)}) nl_flags = 0x{self.nl_flags:x} nl_type = {self.nl_type}"
+ if self.error:
+ msg += '\n\terror: ' + str(self.error)
+ if self.extack:
+ msg += '\n\textack: ' + repr(self.extack)
+ return msg
+
+
+class NlMsgs:
+ def __init__(self, data, attr_space=None):
+ self.msgs = []
+
+ offset = 0
+ while offset < len(data):
+ msg = NlMsg(data, offset, attr_space=attr_space)
+ offset += msg.nl_len
+ self.msgs.append(msg)
+
+ def __iter__(self):
+ yield from self.msgs
+
+
+genl_family_name_to_id = None
+
+
+def _genl_msg(nl_type, nl_flags, genl_cmd, genl_version, seq=None):
+ # we prepend length in _genl_msg_finalize()
+ if seq is None:
+ seq = random.randint(1, 1024)
+ nlmsg = struct.pack("HHII", nl_type, nl_flags, seq, 0)
+ genlmsg = struct.pack("BBH", genl_cmd, genl_version, 0)
+ return nlmsg + genlmsg
+
+
+def _genl_msg_finalize(msg):
+ return struct.pack("I", len(msg) + 4) + msg
+
+
+def _genl_load_families():
+ with socket.socket(socket.AF_NETLINK, socket.SOCK_RAW, Netlink.NETLINK_GENERIC) as sock:
+ sock.setsockopt(Netlink.SOL_NETLINK, Netlink.NETLINK_CAP_ACK, 1)
+
+ msg = _genl_msg(Netlink.GENL_ID_CTRL,
+ Netlink.NLM_F_REQUEST | Netlink.NLM_F_ACK | Netlink.NLM_F_DUMP,
+ Netlink.CTRL_CMD_GETFAMILY, 1)
+ msg = _genl_msg_finalize(msg)
+
+ sock.send(msg, 0)
+
+ global genl_family_name_to_id
+ genl_family_name_to_id = dict()
+
+ while True:
+ reply = sock.recv(128 * 1024)
+ nms = NlMsgs(reply)
+ for nl_msg in nms:
+ if nl_msg.error:
+ print("Netlink error:", nl_msg.error)
+ return
+ if nl_msg.done:
+ return
+
+ gm = GenlMsg(nl_msg)
+ fam = dict()
+ for attr in NlAttrs(gm.raw):
+ if attr.type == Netlink.CTRL_ATTR_FAMILY_ID:
+ fam['id'] = attr.as_scalar('u16')
+ elif attr.type == Netlink.CTRL_ATTR_FAMILY_NAME:
+ fam['name'] = attr.as_strz()
+ elif attr.type == Netlink.CTRL_ATTR_MAXATTR:
+ fam['maxattr'] = attr.as_scalar('u32')
+ elif attr.type == Netlink.CTRL_ATTR_MCAST_GROUPS:
+ fam['mcast'] = dict()
+ for entry in NlAttrs(attr.raw):
+ mcast_name = None
+ mcast_id = None
+ for entry_attr in NlAttrs(entry.raw):
+ if entry_attr.type == Netlink.CTRL_ATTR_MCAST_GRP_NAME:
+ mcast_name = entry_attr.as_strz()
+ elif entry_attr.type == Netlink.CTRL_ATTR_MCAST_GRP_ID:
+ mcast_id = entry_attr.as_scalar('u32')
+ if mcast_name and mcast_id is not None:
+ fam['mcast'][mcast_name] = mcast_id
+ if 'name' in fam and 'id' in fam:
+ genl_family_name_to_id[fam['name']] = fam
+
+
+class GenlMsg:
+ def __init__(self, nl_msg):
+ self.nl = nl_msg
+ self.genl_cmd, self.genl_version, _ = struct.unpack_from("BBH", nl_msg.raw, 0)
+ self.raw = nl_msg.raw[4:]
+
+ def cmd(self):
+ return self.genl_cmd
+
+ def __repr__(self):
+ msg = repr(self.nl)
+ msg += f"\tgenl_cmd = {self.genl_cmd} genl_ver = {self.genl_version}\n"
+ for a in self.raw_attrs:
+ msg += '\t\t' + repr(a) + '\n'
+ return msg
+
+
+class NetlinkProtocol:
+ def __init__(self, family_name, proto_num):
+ self.family_name = family_name
+ self.proto_num = proto_num
+
+ def _message(self, nl_type, nl_flags, seq=None):
+ if seq is None:
+ seq = random.randint(1, 1024)
+ nlmsg = struct.pack("HHII", nl_type, nl_flags, seq, 0)
+ return nlmsg
+
+ def message(self, flags, command, version, seq=None):
+ return self._message(command, flags, seq)
+
+ def _decode(self, nl_msg):
+ return nl_msg
+
+ def decode(self, ynl, nl_msg, op):
+ msg = self._decode(nl_msg)
+ if op is None:
+ op = ynl.rsp_by_value[msg.cmd()]
+ fixed_header_size = ynl._struct_size(op.fixed_header)
+ msg.raw_attrs = NlAttrs(msg.raw, fixed_header_size)
+ return msg
+
+ def get_mcast_id(self, mcast_name, mcast_groups):
+ if mcast_name not in mcast_groups:
+ raise Exception(f'Multicast group "{mcast_name}" not present in the spec')
+ return mcast_groups[mcast_name].value
+
+ def msghdr_size(self):
+ return 16
+
+
+class GenlProtocol(NetlinkProtocol):
+ def __init__(self, family_name):
+ super().__init__(family_name, Netlink.NETLINK_GENERIC)
+
+ global genl_family_name_to_id
+ if genl_family_name_to_id is None:
+ _genl_load_families()
+
+ self.genl_family = genl_family_name_to_id[family_name]
+ self.family_id = genl_family_name_to_id[family_name]['id']
+
+ def message(self, flags, command, version, seq=None):
+ nlmsg = self._message(self.family_id, flags, seq)
+ genlmsg = struct.pack("BBH", command, version, 0)
+ return nlmsg + genlmsg
+
+ def _decode(self, nl_msg):
+ return GenlMsg(nl_msg)
+
+ def get_mcast_id(self, mcast_name, mcast_groups):
+ if mcast_name not in self.genl_family['mcast']:
+ raise Exception(f'Multicast group "{mcast_name}" not present in the family')
+ return self.genl_family['mcast'][mcast_name]
+
+ def msghdr_size(self):
+ return super().msghdr_size() + 4
+
+
+class SpaceAttrs:
+ SpecValuesPair = namedtuple('SpecValuesPair', ['spec', 'values'])
+
+ def __init__(self, attr_space, attrs, outer = None):
+ outer_scopes = outer.scopes if outer else []
+ inner_scope = self.SpecValuesPair(attr_space, attrs)
+ self.scopes = [inner_scope] + outer_scopes
+
+ def lookup(self, name):
+ for scope in self.scopes:
+ if name in scope.spec:
+ if name in scope.values:
+ return scope.values[name]
+ spec_name = scope.spec.yaml['name']
+ raise Exception(
+ f"No value for '{name}' in attribute space '{spec_name}'")
+ raise Exception(f"Attribute '{name}' not defined in any attribute-set")
+
+
+#
+# YNL implementation details.
+#
+
+
+class YnlFamily(SpecFamily):
+ def __init__(self, def_path, schema=None, process_unknown=False,
+ recv_size=0):
+ super().__init__(def_path, schema)
+
+ self.include_raw = False
+ self.process_unknown = process_unknown
+
+ try:
+ if self.proto == "netlink-raw":
+ self.nlproto = NetlinkProtocol(self.yaml['name'],
+ self.yaml['protonum'])
+ else:
+ self.nlproto = GenlProtocol(self.yaml['name'])
+ except KeyError:
+ raise Exception(f"Family '{self.yaml['name']}' not supported by the kernel")
+
+ self._recv_dbg = False
+ # Note that netlink will use conservative (min) message size for
+ # the first dump recv() on the socket, our setting will only matter
+ # from the second recv() on.
+ self._recv_size = recv_size if recv_size else 131072
+ # Netlink will always allocate at least PAGE_SIZE - sizeof(skb_shinfo)
+ # for a message, so smaller receive sizes will lead to truncation.
+ # Note that the min size for other families may be larger than 4k!
+ if self._recv_size < 4000:
+ raise ConfigError()
+
+ self.sock = socket.socket(socket.AF_NETLINK, socket.SOCK_RAW, self.nlproto.proto_num)
+ self.sock.setsockopt(Netlink.SOL_NETLINK, Netlink.NETLINK_CAP_ACK, 1)
+ self.sock.setsockopt(Netlink.SOL_NETLINK, Netlink.NETLINK_EXT_ACK, 1)
+ self.sock.setsockopt(Netlink.SOL_NETLINK, Netlink.NETLINK_GET_STRICT_CHK, 1)
+
+ self.async_msg_ids = set()
+ self.async_msg_queue = queue.Queue()
+
+ for msg in self.msgs.values():
+ if msg.is_async:
+ self.async_msg_ids.add(msg.rsp_value)
+
+ for op_name, op in self.ops.items():
+ bound_f = functools.partial(self._op, op_name)
+ setattr(self, op.ident_name, bound_f)
+
+
+ def ntf_subscribe(self, mcast_name):
+ mcast_id = self.nlproto.get_mcast_id(mcast_name, self.mcast_groups)
+ self.sock.bind((0, 0))
+ self.sock.setsockopt(Netlink.SOL_NETLINK, Netlink.NETLINK_ADD_MEMBERSHIP,
+ mcast_id)
+
+ def set_recv_dbg(self, enabled):
+ self._recv_dbg = enabled
+
+ def _recv_dbg_print(self, reply, nl_msgs):
+ if not self._recv_dbg:
+ return
+ print("Recv: read", len(reply), "bytes,",
+ len(nl_msgs.msgs), "messages", file=sys.stderr)
+ for nl_msg in nl_msgs:
+ print(" ", nl_msg, file=sys.stderr)
+
+ def _encode_enum(self, attr_spec, value):
+ enum = self.consts[attr_spec['enum']]
+ if enum.type == 'flags' or attr_spec.get('enum-as-flags', False):
+ scalar = 0
+ if isinstance(value, str):
+ value = [value]
+ for single_value in value:
+ scalar += enum.entries[single_value].user_value(as_flags = True)
+ return scalar
+ else:
+ return enum.entries[value].user_value()
+
+ def _get_scalar(self, attr_spec, value):
+ try:
+ return int(value)
+ except (ValueError, TypeError) as e:
+ if 'enum' not in attr_spec:
+ raise e
+ return self._encode_enum(attr_spec, value)
+
+ def _add_attr(self, space, name, value, search_attrs):
+ try:
+ attr = self.attr_sets[space][name]
+ except KeyError:
+ raise Exception(f"Space '{space}' has no attribute '{name}'")
+ nl_type = attr.value
+
+ if attr.is_multi and isinstance(value, list):
+ attr_payload = b''
+ for subvalue in value:
+ attr_payload += self._add_attr(space, name, subvalue, search_attrs)
+ return attr_payload
+
+ if attr["type"] == 'nest':
+ nl_type |= Netlink.NLA_F_NESTED
+ attr_payload = b''
+ sub_space = attr['nested-attributes']
+ sub_attrs = SpaceAttrs(self.attr_sets[sub_space], value, search_attrs)
+ for subname, subvalue in value.items():
+ attr_payload += self._add_attr(sub_space, subname, subvalue, sub_attrs)
+ elif attr["type"] == 'flag':
+ if not value:
+ # If value is absent or false then skip attribute creation.
+ return b''
+ attr_payload = b''
+ elif attr["type"] == 'string':
+ attr_payload = str(value).encode('ascii') + b'\x00'
+ elif attr["type"] == 'binary':
+ if isinstance(value, bytes):
+ attr_payload = value
+ elif isinstance(value, str):
+ attr_payload = bytes.fromhex(value)
+ elif isinstance(value, dict) and attr.struct_name:
+ attr_payload = self._encode_struct(attr.struct_name, value)
+ else:
+ raise Exception(f'Unknown type for binary attribute, value: {value}')
+ elif attr['type'] in NlAttr.type_formats or attr.is_auto_scalar:
+ scalar = self._get_scalar(attr, value)
+ if attr.is_auto_scalar:
+ attr_type = attr["type"][0] + ('32' if scalar.bit_length() <= 32 else '64')
+ else:
+ attr_type = attr["type"]
+ format = NlAttr.get_format(attr_type, attr.byte_order)
+ attr_payload = format.pack(scalar)
+ elif attr['type'] in "bitfield32":
+ scalar_value = self._get_scalar(attr, value["value"])
+ scalar_selector = self._get_scalar(attr, value["selector"])
+ attr_payload = struct.pack("II", scalar_value, scalar_selector)
+ elif attr['type'] == 'sub-message':
+ msg_format = self._resolve_selector(attr, search_attrs)
+ attr_payload = b''
+ if msg_format.fixed_header:
+ attr_payload += self._encode_struct(msg_format.fixed_header, value)
+ if msg_format.attr_set:
+ if msg_format.attr_set in self.attr_sets:
+ nl_type |= Netlink.NLA_F_NESTED
+ sub_attrs = SpaceAttrs(msg_format.attr_set, value, search_attrs)
+ for subname, subvalue in value.items():
+ attr_payload += self._add_attr(msg_format.attr_set,
+ subname, subvalue, sub_attrs)
+ else:
+ raise Exception(f"Unknown attribute-set '{msg_format.attr_set}'")
+ else:
+ raise Exception(f'Unknown type at {space} {name} {value} {attr["type"]}')
+
+ pad = b'\x00' * ((4 - len(attr_payload) % 4) % 4)
+ return struct.pack('HH', len(attr_payload) + 4, nl_type) + attr_payload + pad
+
+ def _decode_enum(self, raw, attr_spec):
+ enum = self.consts[attr_spec['enum']]
+ if enum.type == 'flags' or attr_spec.get('enum-as-flags', False):
+ i = 0
+ value = set()
+ while raw:
+ if raw & 1:
+ value.add(enum.entries_by_val[i].name)
+ raw >>= 1
+ i += 1
+ else:
+ value = enum.entries_by_val[raw].name
+ return value
+
+ def _decode_binary(self, attr, attr_spec):
+ if attr_spec.struct_name:
+ decoded = self._decode_struct(attr.raw, attr_spec.struct_name)
+ elif attr_spec.sub_type:
+ decoded = attr.as_c_array(attr_spec.sub_type)
+ else:
+ decoded = attr.as_bin()
+ if attr_spec.display_hint:
+ decoded = self._formatted_string(decoded, attr_spec.display_hint)
+ return decoded
+
+ def _decode_array_attr(self, attr, attr_spec):
+ decoded = []
+ offset = 0
+ while offset < len(attr.raw):
+ item = NlAttr(attr.raw, offset)
+ offset += item.full_len
+
+ if attr_spec["sub-type"] == 'nest':
+ subattrs = self._decode(NlAttrs(item.raw), attr_spec['nested-attributes'])
+ decoded.append({ item.type: subattrs })
+ elif attr_spec["sub-type"] == 'binary':
+ subattrs = item.as_bin()
+ if attr_spec.display_hint:
+ subattrs = self._formatted_string(subattrs, attr_spec.display_hint)
+ decoded.append(subattrs)
+ elif attr_spec["sub-type"] in NlAttr.type_formats:
+ subattrs = item.as_scalar(attr_spec['sub-type'], attr_spec.byte_order)
+ if attr_spec.display_hint:
+ subattrs = self._formatted_string(subattrs, attr_spec.display_hint)
+ decoded.append(subattrs)
+ else:
+ raise Exception(f'Unknown {attr_spec["sub-type"]} with name {attr_spec["name"]}')
+ return decoded
+
+ def _decode_nest_type_value(self, attr, attr_spec):
+ decoded = {}
+ value = attr
+ for name in attr_spec['type-value']:
+ value = NlAttr(value.raw, 0)
+ decoded[name] = value.type
+ subattrs = self._decode(NlAttrs(value.raw), attr_spec['nested-attributes'])
+ decoded.update(subattrs)
+ return decoded
+
+ def _decode_unknown(self, attr):
+ if attr.is_nest:
+ return self._decode(NlAttrs(attr.raw), None)
+ else:
+ return attr.as_bin()
+
+ def _rsp_add(self, rsp, name, is_multi, decoded):
+ if is_multi == None:
+ if name in rsp and type(rsp[name]) is not list:
+ rsp[name] = [rsp[name]]
+ is_multi = True
+ else:
+ is_multi = False
+
+ if not is_multi:
+ rsp[name] = decoded
+ elif name in rsp:
+ rsp[name].append(decoded)
+ else:
+ rsp[name] = [decoded]
+
+ def _resolve_selector(self, attr_spec, search_attrs):
+ sub_msg = attr_spec.sub_message
+ if sub_msg not in self.sub_msgs:
+ raise Exception(f"No sub-message spec named {sub_msg} for {attr_spec.name}")
+ sub_msg_spec = self.sub_msgs[sub_msg]
+
+ selector = attr_spec.selector
+ value = search_attrs.lookup(selector)
+ if value not in sub_msg_spec.formats:
+ raise Exception(f"No message format for '{value}' in sub-message spec '{sub_msg}'")
+
+ spec = sub_msg_spec.formats[value]
+ return spec
+
+ def _decode_sub_msg(self, attr, attr_spec, search_attrs):
+ msg_format = self._resolve_selector(attr_spec, search_attrs)
+ decoded = {}
+ offset = 0
+ if msg_format.fixed_header:
+ decoded.update(self._decode_struct(attr.raw, msg_format.fixed_header));
+ offset = self._struct_size(msg_format.fixed_header)
+ if msg_format.attr_set:
+ if msg_format.attr_set in self.attr_sets:
+ subdict = self._decode(NlAttrs(attr.raw, offset), msg_format.attr_set)
+ decoded.update(subdict)
+ else:
+ raise Exception(f"Unknown attribute-set '{attr_space}' when decoding '{attr_spec.name}'")
+ return decoded
+
+ def _decode(self, attrs, space, outer_attrs = None):
+ rsp = dict()
+ if space:
+ attr_space = self.attr_sets[space]
+ search_attrs = SpaceAttrs(attr_space, rsp, outer_attrs)
+
+ for attr in attrs:
+ try:
+ attr_spec = attr_space.attrs_by_val[attr.type]
+ except (KeyError, UnboundLocalError):
+ if not self.process_unknown:
+ raise Exception(f"Space '{space}' has no attribute with value '{attr.type}'")
+ attr_name = f"UnknownAttr({attr.type})"
+ self._rsp_add(rsp, attr_name, None, self._decode_unknown(attr))
+ continue
+
+ try:
+ if attr_spec["type"] == 'nest':
+ subdict = self._decode(NlAttrs(attr.raw), attr_spec['nested-attributes'], search_attrs)
+ decoded = subdict
+ elif attr_spec["type"] == 'string':
+ decoded = attr.as_strz()
+ elif attr_spec["type"] == 'binary':
+ decoded = self._decode_binary(attr, attr_spec)
+ elif attr_spec["type"] == 'flag':
+ decoded = True
+ elif attr_spec.is_auto_scalar:
+ decoded = attr.as_auto_scalar(attr_spec['type'], attr_spec.byte_order)
+ elif attr_spec["type"] in NlAttr.type_formats:
+ decoded = attr.as_scalar(attr_spec['type'], attr_spec.byte_order)
+ if 'enum' in attr_spec:
+ decoded = self._decode_enum(decoded, attr_spec)
+ elif attr_spec.display_hint:
+ decoded = self._formatted_string(decoded, attr_spec.display_hint)
+ elif attr_spec["type"] == 'indexed-array':
+ decoded = self._decode_array_attr(attr, attr_spec)
+ elif attr_spec["type"] == 'bitfield32':
+ value, selector = struct.unpack("II", attr.raw)
+ if 'enum' in attr_spec:
+ value = self._decode_enum(value, attr_spec)
+ selector = self._decode_enum(selector, attr_spec)
+ decoded = {"value": value, "selector": selector}
+ elif attr_spec["type"] == 'sub-message':
+ decoded = self._decode_sub_msg(attr, attr_spec, search_attrs)
+ elif attr_spec["type"] == 'nest-type-value':
+ decoded = self._decode_nest_type_value(attr, attr_spec)
+ else:
+ if not self.process_unknown:
+ raise Exception(f'Unknown {attr_spec["type"]} with name {attr_spec["name"]}')
+ decoded = self._decode_unknown(attr)
+
+ self._rsp_add(rsp, attr_spec["name"], attr_spec.is_multi, decoded)
+ except:
+ print(f"Error decoding '{attr_spec.name}' from '{space}'")
+ raise
+
+ return rsp
+
+ def _decode_extack_path(self, attrs, attr_set, offset, target):
+ for attr in attrs:
+ try:
+ attr_spec = attr_set.attrs_by_val[attr.type]
+ except KeyError:
+ raise Exception(f"Space '{attr_set.name}' has no attribute with value '{attr.type}'")
+ if offset > target:
+ break
+ if offset == target:
+ return '.' + attr_spec.name
+
+ if offset + attr.full_len <= target:
+ offset += attr.full_len
+ continue
+ if attr_spec['type'] != 'nest':
+ raise Exception(f"Can't dive into {attr.type} ({attr_spec['name']}) for extack")
+ offset += 4
+ subpath = self._decode_extack_path(NlAttrs(attr.raw),
+ self.attr_sets[attr_spec['nested-attributes']],
+ offset, target)
+ if subpath is None:
+ return None
+ return '.' + attr_spec.name + subpath
+
+ return None
+
+ def _decode_extack(self, request, op, extack):
+ if 'bad-attr-offs' not in extack:
+ return
+
+ msg = self.nlproto.decode(self, NlMsg(request, 0, op.attr_set), op)
+ offset = self.nlproto.msghdr_size() + self._struct_size(op.fixed_header)
+ path = self._decode_extack_path(msg.raw_attrs, op.attr_set, offset,
+ extack['bad-attr-offs'])
+ if path:
+ del extack['bad-attr-offs']
+ extack['bad-attr'] = path
+
+ def _struct_size(self, name):
+ if name:
+ members = self.consts[name].members
+ size = 0
+ for m in members:
+ if m.type in ['pad', 'binary']:
+ if m.struct:
+ size += self._struct_size(m.struct)
+ else:
+ size += m.len
+ else:
+ format = NlAttr.get_format(m.type, m.byte_order)
+ size += format.size
+ return size
+ else:
+ return 0
+
+ def _decode_struct(self, data, name):
+ members = self.consts[name].members
+ attrs = dict()
+ offset = 0
+ for m in members:
+ value = None
+ if m.type == 'pad':
+ offset += m.len
+ elif m.type == 'binary':
+ if m.struct:
+ len = self._struct_size(m.struct)
+ value = self._decode_struct(data[offset : offset + len],
+ m.struct)
+ offset += len
+ else:
+ value = data[offset : offset + m.len]
+ offset += m.len
+ else:
+ format = NlAttr.get_format(m.type, m.byte_order)
+ [ value ] = format.unpack_from(data, offset)
+ offset += format.size
+ if value is not None:
+ if m.enum:
+ value = self._decode_enum(value, m)
+ elif m.display_hint:
+ value = self._formatted_string(value, m.display_hint)
+ attrs[m.name] = value
+ return attrs
+
+ def _encode_struct(self, name, vals):
+ members = self.consts[name].members
+ attr_payload = b''
+ for m in members:
+ value = vals.pop(m.name) if m.name in vals else None
+ if m.type == 'pad':
+ attr_payload += bytearray(m.len)
+ elif m.type == 'binary':
+ if m.struct:
+ if value is None:
+ value = dict()
+ attr_payload += self._encode_struct(m.struct, value)
+ else:
+ if value is None:
+ attr_payload += bytearray(m.len)
+ else:
+ attr_payload += bytes.fromhex(value)
+ else:
+ if value is None:
+ value = 0
+ format = NlAttr.get_format(m.type, m.byte_order)
+ attr_payload += format.pack(value)
+ return attr_payload
+
+ def _formatted_string(self, raw, display_hint):
+ if display_hint == 'mac':
+ formatted = ':'.join('%02x' % b for b in raw)
+ elif display_hint == 'hex':
+ if isinstance(raw, int):
+ formatted = hex(raw)
+ else:
+ formatted = bytes.hex(raw, ' ')
+ elif display_hint in [ 'ipv4', 'ipv6' ]:
+ formatted = format(ipaddress.ip_address(raw))
+ elif display_hint == 'uuid':
+ formatted = str(uuid.UUID(bytes=raw))
+ else:
+ formatted = raw
+ return formatted
+
+ def handle_ntf(self, decoded):
+ msg = dict()
+ if self.include_raw:
+ msg['raw'] = decoded
+ op = self.rsp_by_value[decoded.cmd()]
+ attrs = self._decode(decoded.raw_attrs, op.attr_set.name)
+ if op.fixed_header:
+ attrs.update(self._decode_struct(decoded.raw, op.fixed_header))
+
+ msg['name'] = op['name']
+ msg['msg'] = attrs
+ self.async_msg_queue.put(msg)
+
+ def check_ntf(self):
+ while True:
+ try:
+ reply = self.sock.recv(self._recv_size, socket.MSG_DONTWAIT)
+ except BlockingIOError:
+ return
+
+ nms = NlMsgs(reply)
+ self._recv_dbg_print(reply, nms)
+ for nl_msg in nms:
+ if nl_msg.error:
+ print("Netlink error in ntf!?", os.strerror(-nl_msg.error))
+ print(nl_msg)
+ continue
+ if nl_msg.done:
+ print("Netlink done while checking for ntf!?")
+ continue
+
+ decoded = self.nlproto.decode(self, nl_msg, None)
+ if decoded.cmd() not in self.async_msg_ids:
+ print("Unexpected msg id while checking for ntf", decoded)
+ continue
+
+ self.handle_ntf(decoded)
+
+ def poll_ntf(self, duration=None):
+ start_time = time.time()
+ selector = selectors.DefaultSelector()
+ selector.register(self.sock, selectors.EVENT_READ)
+
+ while True:
+ try:
+ yield self.async_msg_queue.get_nowait()
+ except queue.Empty:
+ if duration is not None:
+ timeout = start_time + duration - time.time()
+ if timeout <= 0:
+ return
+ else:
+ timeout = None
+ events = selector.select(timeout)
+ if events:
+ self.check_ntf()
+
+ def operation_do_attributes(self, name):
+ """
+ For a given operation name, find and return a supported
+ set of attributes (as a dict).
+ """
+ op = self.find_operation(name)
+ if not op:
+ return None
+
+ return op['do']['request']['attributes'].copy()
+
+ def _encode_message(self, op, vals, flags, req_seq):
+ nl_flags = Netlink.NLM_F_REQUEST | Netlink.NLM_F_ACK
+ for flag in flags or []:
+ nl_flags |= flag
+
+ msg = self.nlproto.message(nl_flags, op.req_value, 1, req_seq)
+ if op.fixed_header:
+ msg += self._encode_struct(op.fixed_header, vals)
+ search_attrs = SpaceAttrs(op.attr_set, vals)
+ for name, value in vals.items():
+ msg += self._add_attr(op.attr_set.name, name, value, search_attrs)
+ msg = _genl_msg_finalize(msg)
+ return msg
+
+ def _ops(self, ops):
+ reqs_by_seq = {}
+ req_seq = random.randint(1024, 65535)
+ payload = b''
+ for (method, vals, flags) in ops:
+ op = self.ops[method]
+ msg = self._encode_message(op, vals, flags, req_seq)
+ reqs_by_seq[req_seq] = (op, msg, flags)
+ payload += msg
+ req_seq += 1
+
+ self.sock.send(payload, 0)
+
+ done = False
+ rsp = []
+ op_rsp = []
+ while not done:
+ reply = self.sock.recv(self._recv_size)
+ nms = NlMsgs(reply, attr_space=op.attr_set)
+ self._recv_dbg_print(reply, nms)
+ for nl_msg in nms:
+ if nl_msg.nl_seq in reqs_by_seq:
+ (op, req_msg, req_flags) = reqs_by_seq[nl_msg.nl_seq]
+ if nl_msg.extack:
+ self._decode_extack(req_msg, op, nl_msg.extack)
+ else:
+ op = None
+ req_flags = []
+
+ if nl_msg.error:
+ raise NlError(nl_msg)
+ if nl_msg.done:
+ if nl_msg.extack:
+ print("Netlink warning:")
+ print(nl_msg)
+
+ if Netlink.NLM_F_DUMP in req_flags:
+ rsp.append(op_rsp)
+ elif not op_rsp:
+ rsp.append(None)
+ elif len(op_rsp) == 1:
+ rsp.append(op_rsp[0])
+ else:
+ rsp.append(op_rsp)
+ op_rsp = []
+
+ del reqs_by_seq[nl_msg.nl_seq]
+ done = len(reqs_by_seq) == 0
+ break
+
+ decoded = self.nlproto.decode(self, nl_msg, op)
+
+ # Check if this is a reply to our request
+ if nl_msg.nl_seq not in reqs_by_seq or decoded.cmd() != op.rsp_value:
+ if decoded.cmd() in self.async_msg_ids:
+ self.handle_ntf(decoded)
+ continue
+ else:
+ print('Unexpected message: ' + repr(decoded))
+ continue
+
+ rsp_msg = self._decode(decoded.raw_attrs, op.attr_set.name)
+ if op.fixed_header:
+ rsp_msg.update(self._decode_struct(decoded.raw, op.fixed_header))
+ op_rsp.append(rsp_msg)
+
+ return rsp
+
+ def _op(self, method, vals, flags=None, dump=False):
+ req_flags = flags or []
+ if dump:
+ req_flags.append(Netlink.NLM_F_DUMP)
+
+ ops = [(method, vals, req_flags)]
+ return self._ops(ops)[0]
+
+ def do(self, method, vals, flags=None):
+ return self._op(method, vals, flags)
+
+ def dump(self, method, vals):
+ return self._op(method, vals, dump=True)
+
+ def do_multi(self, ops):
+ return self._ops(ops)
--- /dev/null
+#!/usr/bin/env python3
+# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+
+import argparse
+import collections
+import filecmp
+import pathlib
+import os
+import re
+import shutil
+import sys
+import tempfile
+import yaml
+
+sys.path.append(pathlib.Path(__file__).resolve().parent.as_posix())
+from lib import SpecFamily, SpecAttrSet, SpecAttr, SpecOperation, SpecEnumSet, SpecEnumEntry
+
+
+def c_upper(name):
+ return name.upper().replace('-', '_')
+
+
+def c_lower(name):
+ return name.lower().replace('-', '_')
+
+
+def limit_to_number(name):
+ """
+ Turn a string limit like u32-max or s64-min into its numerical value
+ """
+ if name[0] == 'u' and name.endswith('-min'):
+ return 0
+ width = int(name[1:-4])
+ if name[0] == 's':
+ width -= 1
+ value = (1 << width) - 1
+ if name[0] == 's' and name.endswith('-min'):
+ value = -value - 1
+ return value
+
+
+class BaseNlLib:
+ def get_family_id(self):
+ return 'ys->family_id'
+
+
+class Type(SpecAttr):
+ def __init__(self, family, attr_set, attr, value):
+ super().__init__(family, attr_set, attr, value)
+
+ self.attr = attr
+ self.attr_set = attr_set
+ self.type = attr['type']
+ self.checks = attr.get('checks', {})
+
+ self.request = False
+ self.reply = False
+
+ if 'len' in attr:
+ self.len = attr['len']
+
+ if 'nested-attributes' in attr:
+ self.nested_attrs = attr['nested-attributes']
+ if self.nested_attrs == family.name:
+ self.nested_render_name = c_lower(f"{family.ident_name}")
+ else:
+ self.nested_render_name = c_lower(f"{family.ident_name}_{self.nested_attrs}")
+
+ if self.nested_attrs in self.family.consts:
+ self.nested_struct_type = 'struct ' + self.nested_render_name + '_'
+ else:
+ self.nested_struct_type = 'struct ' + self.nested_render_name
+
+ self.c_name = c_lower(self.name)
+ if self.c_name in _C_KW:
+ self.c_name += '_'
+
+ # Added by resolve():
+ self.enum_name = None
+ delattr(self, "enum_name")
+
+ def _get_real_attr(self):
+ # if the attr is for a subset return the "real" attr (just one down, does not recurse)
+ return self.family.attr_sets[self.attr_set.subset_of][self.name]
+
+ def set_request(self):
+ self.request = True
+ if self.attr_set.subset_of:
+ self._get_real_attr().set_request()
+
+ def set_reply(self):
+ self.reply = True
+ if self.attr_set.subset_of:
+ self._get_real_attr().set_reply()
+
+ def get_limit(self, limit, default=None):
+ value = self.checks.get(limit, default)
+ if value is None:
+ return value
+ if isinstance(value, int):
+ return value
+ if value in self.family.consts:
+ raise Exception("Resolving family constants not implemented, yet")
+ return limit_to_number(value)
+
+ def get_limit_str(self, limit, default=None, suffix=''):
+ value = self.checks.get(limit, default)
+ if value is None:
+ return ''
+ if isinstance(value, int):
+ return str(value) + suffix
+ if value in self.family.consts:
+ return c_upper(f"{self.family['name']}-{value}")
+ return c_upper(value)
+
+ def resolve(self):
+ if 'name-prefix' in self.attr:
+ enum_name = f"{self.attr['name-prefix']}{self.name}"
+ else:
+ enum_name = f"{self.attr_set.name_prefix}{self.name}"
+ self.enum_name = c_upper(enum_name)
+
+ if self.attr_set.subset_of:
+ if self.checks != self._get_real_attr().checks:
+ raise Exception("Overriding checks not supported by codegen, yet")
+
+ def is_multi_val(self):
+ return None
+
+ def is_scalar(self):
+ return self.type in {'u8', 'u16', 'u32', 'u64', 's32', 's64'}
+
+ def is_recursive(self):
+ return False
+
+ def is_recursive_for_op(self, ri):
+ return self.is_recursive() and not ri.op
+
+ def presence_type(self):
+ return 'bit'
+
+ def presence_member(self, space, type_filter):
+ if self.presence_type() != type_filter:
+ return
+
+ if self.presence_type() == 'bit':
+ pfx = '__' if space == 'user' else ''
+ return f"{pfx}u32 {self.c_name}:1;"
+
+ if self.presence_type() == 'len':
+ pfx = '__' if space == 'user' else ''
+ return f"{pfx}u32 {self.c_name}_len;"
+
+ def _complex_member_type(self, ri):
+ return None
+
+ def free_needs_iter(self):
+ return False
+
+ def free(self, ri, var, ref):
+ if self.is_multi_val() or self.presence_type() == 'len':
+ ri.cw.p(f'free({var}->{ref}{self.c_name});')
+
+ def arg_member(self, ri):
+ member = self._complex_member_type(ri)
+ if member:
+ arg = [member + ' *' + self.c_name]
+ if self.presence_type() == 'count':
+ arg += ['unsigned int n_' + self.c_name]
+ return arg
+ raise Exception(f"Struct member not implemented for class type {self.type}")
+
+ def struct_member(self, ri):
+ if self.is_multi_val():
+ ri.cw.p(f"unsigned int n_{self.c_name};")
+ member = self._complex_member_type(ri)
+ if member:
+ ptr = '*' if self.is_multi_val() else ''
+ if self.is_recursive_for_op(ri):
+ ptr = '*'
+ ri.cw.p(f"{member} {ptr}{self.c_name};")
+ return
+ members = self.arg_member(ri)
+ for one in members:
+ ri.cw.p(one + ';')
+
+ def _attr_policy(self, policy):
+ return '{ .type = ' + policy + ', }'
+
+ def attr_policy(self, cw):
+ policy = f'NLA_{c_upper(self.type)}'
+ if self.attr.get('byte-order') == 'big-endian':
+ if self.type in {'u16', 'u32'}:
+ policy = f'NLA_BE{self.type[1:]}'
+
+ spec = self._attr_policy(policy)
+ cw.p(f"\t[{self.enum_name}] = {spec},")
+
+ def _attr_typol(self):
+ raise Exception(f"Type policy not implemented for class type {self.type}")
+
+ def attr_typol(self, cw):
+ typol = self._attr_typol()
+ cw.p(f'[{self.enum_name}] = {"{"} .name = "{self.name}", {typol}{"}"},')
+
+ def _attr_put_line(self, ri, var, line):
+ if self.presence_type() == 'bit':
+ ri.cw.p(f"if ({var}->_present.{self.c_name})")
+ elif self.presence_type() == 'len':
+ ri.cw.p(f"if ({var}->_present.{self.c_name}_len)")
+ ri.cw.p(f"{line};")
+
+ def _attr_put_simple(self, ri, var, put_type):
+ line = f"ynl_attr_put_{put_type}(nlh, {self.enum_name}, {var}->{self.c_name})"
+ self._attr_put_line(ri, var, line)
+
+ def attr_put(self, ri, var):
+ raise Exception(f"Put not implemented for class type {self.type}")
+
+ def _attr_get(self, ri, var):
+ raise Exception(f"Attr get not implemented for class type {self.type}")
+
+ def attr_get(self, ri, var, first):
+ lines, init_lines, local_vars = self._attr_get(ri, var)
+ if type(lines) is str:
+ lines = [lines]
+ if type(init_lines) is str:
+ init_lines = [init_lines]
+
+ kw = 'if' if first else 'else if'
+ ri.cw.block_start(line=f"{kw} (type == {self.enum_name})")
+ if local_vars:
+ for local in local_vars:
+ ri.cw.p(local)
+ ri.cw.nl()
+
+ if not self.is_multi_val():
+ ri.cw.p("if (ynl_attr_validate(yarg, attr))")
+ ri.cw.p("return YNL_PARSE_CB_ERROR;")
+ if self.presence_type() == 'bit':
+ ri.cw.p(f"{var}->_present.{self.c_name} = 1;")
+
+ if init_lines:
+ ri.cw.nl()
+ for line in init_lines:
+ ri.cw.p(line)
+
+ for line in lines:
+ ri.cw.p(line)
+ ri.cw.block_end()
+ return True
+
+ def _setter_lines(self, ri, member, presence):
+ raise Exception(f"Setter not implemented for class type {self.type}")
+
+ def setter(self, ri, space, direction, deref=False, ref=None):
+ ref = (ref if ref else []) + [self.c_name]
+ var = "req"
+ member = f"{var}->{'.'.join(ref)}"
+
+ code = []
+ presence = ''
+ for i in range(0, len(ref)):
+ presence = f"{var}->{'.'.join(ref[:i] + [''])}_present.{ref[i]}"
+ # Every layer below last is a nest, so we know it uses bit presence
+ # last layer is "self" and may be a complex type
+ if i == len(ref) - 1 and self.presence_type() != 'bit':
+ continue
+ code.append(presence + ' = 1;')
+ code += self._setter_lines(ri, member, presence)
+
+ func_name = f"{op_prefix(ri, direction, deref=deref)}_set_{'_'.join(ref)}"
+ free = bool([x for x in code if 'free(' in x])
+ alloc = bool([x for x in code if 'alloc(' in x])
+ if free and not alloc:
+ func_name = '__' + func_name
+ ri.cw.write_func('static inline void', func_name, body=code,
+ args=[f'{type_name(ri, direction, deref=deref)} *{var}'] + self.arg_member(ri))
+
+
+class TypeUnused(Type):
+ def presence_type(self):
+ return ''
+
+ def arg_member(self, ri):
+ return []
+
+ def _attr_get(self, ri, var):
+ return ['return YNL_PARSE_CB_ERROR;'], None, None
+
+ def _attr_typol(self):
+ return '.type = YNL_PT_REJECT, '
+
+ def attr_policy(self, cw):
+ pass
+
+ def attr_put(self, ri, var):
+ pass
+
+ def attr_get(self, ri, var, first):
+ pass
+
+ def setter(self, ri, space, direction, deref=False, ref=None):
+ pass
+
+
+class TypePad(Type):
+ def presence_type(self):
+ return ''
+
+ def arg_member(self, ri):
+ return []
+
+ def _attr_typol(self):
+ return '.type = YNL_PT_IGNORE, '
+
+ def attr_put(self, ri, var):
+ pass
+
+ def attr_get(self, ri, var, first):
+ pass
+
+ def attr_policy(self, cw):
+ pass
+
+ def setter(self, ri, space, direction, deref=False, ref=None):
+ pass
+
+
+class TypeScalar(Type):
+ def __init__(self, family, attr_set, attr, value):
+ super().__init__(family, attr_set, attr, value)
+
+ self.byte_order_comment = ''
+ if 'byte-order' in attr:
+ self.byte_order_comment = f" /* {attr['byte-order']} */"
+
+ if 'enum' in self.attr:
+ enum = self.family.consts[self.attr['enum']]
+ low, high = enum.value_range()
+ if 'min' not in self.checks:
+ if low != 0 or self.type[0] == 's':
+ self.checks['min'] = low
+ if 'max' not in self.checks:
+ self.checks['max'] = high
+
+ if 'min' in self.checks and 'max' in self.checks:
+ if self.get_limit('min') > self.get_limit('max'):
+ raise Exception(f'Invalid limit for "{self.name}" min: {self.get_limit("min")} max: {self.get_limit("max")}')
+ self.checks['range'] = True
+
+ low = min(self.get_limit('min', 0), self.get_limit('max', 0))
+ high = max(self.get_limit('min', 0), self.get_limit('max', 0))
+ if low < 0 and self.type[0] == 'u':
+ raise Exception(f'Invalid limit for "{self.name}" negative limit for unsigned type')
+ if low < -32768 or high > 32767:
+ self.checks['full-range'] = True
+
+ # Added by resolve():
+ self.is_bitfield = None
+ delattr(self, "is_bitfield")
+ self.type_name = None
+ delattr(self, "type_name")
+
+ def resolve(self):
+ self.resolve_up(super())
+
+ if 'enum-as-flags' in self.attr and self.attr['enum-as-flags']:
+ self.is_bitfield = True
+ elif 'enum' in self.attr:
+ self.is_bitfield = self.family.consts[self.attr['enum']]['type'] == 'flags'
+ else:
+ self.is_bitfield = False
+
+ if not self.is_bitfield and 'enum' in self.attr:
+ self.type_name = self.family.consts[self.attr['enum']].user_type
+ elif self.is_auto_scalar:
+ self.type_name = '__' + self.type[0] + '64'
+ else:
+ self.type_name = '__' + self.type
+
+ def _attr_policy(self, policy):
+ if 'flags-mask' in self.checks or self.is_bitfield:
+ if self.is_bitfield:
+ enum = self.family.consts[self.attr['enum']]
+ mask = enum.get_mask(as_flags=True)
+ else:
+ flags = self.family.consts[self.checks['flags-mask']]
+ flag_cnt = len(flags['entries'])
+ mask = (1 << flag_cnt) - 1
+ return f"NLA_POLICY_MASK({policy}, 0x{mask:x})"
+ elif 'full-range' in self.checks:
+ return f"NLA_POLICY_FULL_RANGE({policy}, &{c_lower(self.enum_name)}_range)"
+ elif 'range' in self.checks:
+ return f"NLA_POLICY_RANGE({policy}, {self.get_limit_str('min')}, {self.get_limit_str('max')})"
+ elif 'min' in self.checks:
+ return f"NLA_POLICY_MIN({policy}, {self.get_limit_str('min')})"
+ elif 'max' in self.checks:
+ return f"NLA_POLICY_MAX({policy}, {self.get_limit_str('max')})"
+ return super()._attr_policy(policy)
+
+ def _attr_typol(self):
+ return f'.type = YNL_PT_U{c_upper(self.type[1:])}, '
+
+ def arg_member(self, ri):
+ return [f'{self.type_name} {self.c_name}{self.byte_order_comment}']
+
+ def attr_put(self, ri, var):
+ self._attr_put_simple(ri, var, self.type)
+
+ def _attr_get(self, ri, var):
+ return f"{var}->{self.c_name} = ynl_attr_get_{self.type}(attr);", None, None
+
+ def _setter_lines(self, ri, member, presence):
+ return [f"{member} = {self.c_name};"]
+
+
+class TypeFlag(Type):
+ def arg_member(self, ri):
+ return []
+
+ def _attr_typol(self):
+ return '.type = YNL_PT_FLAG, '
+
+ def attr_put(self, ri, var):
+ self._attr_put_line(ri, var, f"ynl_attr_put(nlh, {self.enum_name}, NULL, 0)")
+
+ def _attr_get(self, ri, var):
+ return [], None, None
+
+ def _setter_lines(self, ri, member, presence):
+ return []
+
+
+class TypeString(Type):
+ def arg_member(self, ri):
+ return [f"const char *{self.c_name}"]
+
+ def presence_type(self):
+ return 'len'
+
+ def struct_member(self, ri):
+ ri.cw.p(f"char *{self.c_name};")
+
+ def _attr_typol(self):
+ return f'.type = YNL_PT_NUL_STR, '
+
+ def _attr_policy(self, policy):
+ if 'exact-len' in self.checks:
+ mem = 'NLA_POLICY_EXACT_LEN(' + self.get_limit_str('exact-len') + ')'
+ else:
+ mem = '{ .type = ' + policy
+ if 'max-len' in self.checks:
+ mem += ', .len = ' + self.get_limit_str('max-len')
+ mem += ', }'
+ return mem
+
+ def attr_policy(self, cw):
+ if self.checks.get('unterminated-ok', False):
+ policy = 'NLA_STRING'
+ else:
+ policy = 'NLA_NUL_STRING'
+
+ spec = self._attr_policy(policy)
+ cw.p(f"\t[{self.enum_name}] = {spec},")
+
+ def attr_put(self, ri, var):
+ self._attr_put_simple(ri, var, 'str')
+
+ def _attr_get(self, ri, var):
+ len_mem = var + '->_present.' + self.c_name + '_len'
+ return [f"{len_mem} = len;",
+ f"{var}->{self.c_name} = malloc(len + 1);",
+ f"memcpy({var}->{self.c_name}, ynl_attr_get_str(attr), len);",
+ f"{var}->{self.c_name}[len] = 0;"], \
+ ['len = strnlen(ynl_attr_get_str(attr), ynl_attr_data_len(attr));'], \
+ ['unsigned int len;']
+
+ def _setter_lines(self, ri, member, presence):
+ return [f"free({member});",
+ f"{presence}_len = strlen({self.c_name});",
+ f"{member} = malloc({presence}_len + 1);",
+ f'memcpy({member}, {self.c_name}, {presence}_len);',
+ f'{member}[{presence}_len] = 0;']
+
+
+class TypeBinary(Type):
+ def arg_member(self, ri):
+ return [f"const void *{self.c_name}", 'size_t len']
+
+ def presence_type(self):
+ return 'len'
+
+ def struct_member(self, ri):
+ ri.cw.p(f"void *{self.c_name};")
+
+ def _attr_typol(self):
+ return f'.type = YNL_PT_BINARY,'
+
+ def _attr_policy(self, policy):
+ if len(self.checks) == 0:
+ pass
+ elif len(self.checks) == 1:
+ check_name = list(self.checks)[0]
+ if check_name not in {'exact-len', 'min-len', 'max-len'}:
+ raise Exception('Unsupported check for binary type: ' + check_name)
+ else:
+ raise Exception('More than one check for binary type not implemented, yet')
+
+ if len(self.checks) == 0:
+ mem = '{ .type = NLA_BINARY, }'
+ elif 'exact-len' in self.checks:
+ mem = 'NLA_POLICY_EXACT_LEN(' + self.get_limit_str('exact-len') + ')'
+ elif 'min-len' in self.checks:
+ mem = '{ .len = ' + self.get_limit_str('min-len') + ', }'
+ elif 'max-len' in self.checks:
+ mem = 'NLA_POLICY_MAX_LEN(' + self.get_limit_str('max-len') + ')'
+
+ return mem
+
+ def attr_put(self, ri, var):
+ self._attr_put_line(ri, var, f"ynl_attr_put(nlh, {self.enum_name}, " +
+ f"{var}->{self.c_name}, {var}->_present.{self.c_name}_len)")
+
+ def _attr_get(self, ri, var):
+ len_mem = var + '->_present.' + self.c_name + '_len'
+ return [f"{len_mem} = len;",
+ f"{var}->{self.c_name} = malloc(len);",
+ f"memcpy({var}->{self.c_name}, ynl_attr_data(attr), len);"], \
+ ['len = ynl_attr_data_len(attr);'], \
+ ['unsigned int len;']
+
+ def _setter_lines(self, ri, member, presence):
+ return [f"free({member});",
+ f"{presence}_len = len;",
+ f"{member} = malloc({presence}_len);",
+ f'memcpy({member}, {self.c_name}, {presence}_len);']
+
+
+class TypeBitfield32(Type):
+ def _complex_member_type(self, ri):
+ return "struct nla_bitfield32"
+
+ def _attr_typol(self):
+ return f'.type = YNL_PT_BITFIELD32, '
+
+ def _attr_policy(self, policy):
+ if not 'enum' in self.attr:
+ raise Exception('Enum required for bitfield32 attr')
+ enum = self.family.consts[self.attr['enum']]
+ mask = enum.get_mask(as_flags=True)
+ return f"NLA_POLICY_BITFIELD32({mask})"
+
+ def attr_put(self, ri, var):
+ line = f"ynl_attr_put(nlh, {self.enum_name}, &{var}->{self.c_name}, sizeof(struct nla_bitfield32))"
+ self._attr_put_line(ri, var, line)
+
+ def _attr_get(self, ri, var):
+ return f"memcpy(&{var}->{self.c_name}, ynl_attr_data(attr), sizeof(struct nla_bitfield32));", None, None
+
+ def _setter_lines(self, ri, member, presence):
+ return [f"memcpy(&{member}, {self.c_name}, sizeof(struct nla_bitfield32));"]
+
+
+class TypeNest(Type):
+ def is_recursive(self):
+ return self.family.pure_nested_structs[self.nested_attrs].recursive
+
+ def _complex_member_type(self, ri):
+ return self.nested_struct_type
+
+ def free(self, ri, var, ref):
+ at = '&'
+ if self.is_recursive_for_op(ri):
+ at = ''
+ ri.cw.p(f'if ({var}->{ref}{self.c_name})')
+ ri.cw.p(f'{self.nested_render_name}_free({at}{var}->{ref}{self.c_name});')
+
+ def _attr_typol(self):
+ return f'.type = YNL_PT_NEST, .nest = &{self.nested_render_name}_nest, '
+
+ def _attr_policy(self, policy):
+ return 'NLA_POLICY_NESTED(' + self.nested_render_name + '_nl_policy)'
+
+ def attr_put(self, ri, var):
+ at = '' if self.is_recursive_for_op(ri) else '&'
+ self._attr_put_line(ri, var, f"{self.nested_render_name}_put(nlh, " +
+ f"{self.enum_name}, {at}{var}->{self.c_name})")
+
+ def _attr_get(self, ri, var):
+ get_lines = [f"if ({self.nested_render_name}_parse(&parg, attr))",
+ "return YNL_PARSE_CB_ERROR;"]
+ init_lines = [f"parg.rsp_policy = &{self.nested_render_name}_nest;",
+ f"parg.data = &{var}->{self.c_name};"]
+ return get_lines, init_lines, None
+
+ def setter(self, ri, space, direction, deref=False, ref=None):
+ ref = (ref if ref else []) + [self.c_name]
+
+ for _, attr in ri.family.pure_nested_structs[self.nested_attrs].member_list():
+ if attr.is_recursive():
+ continue
+ attr.setter(ri, self.nested_attrs, direction, deref=deref, ref=ref)
+
+
+class TypeMultiAttr(Type):
+ def __init__(self, family, attr_set, attr, value, base_type):
+ super().__init__(family, attr_set, attr, value)
+
+ self.base_type = base_type
+
+ def is_multi_val(self):
+ return True
+
+ def presence_type(self):
+ return 'count'
+
+ def _complex_member_type(self, ri):
+ if 'type' not in self.attr or self.attr['type'] == 'nest':
+ return self.nested_struct_type
+ elif self.attr['type'] in scalars:
+ scalar_pfx = '__' if ri.ku_space == 'user' else ''
+ return scalar_pfx + self.attr['type']
+ else:
+ raise Exception(f"Sub-type {self.attr['type']} not supported yet")
+
+ def free_needs_iter(self):
+ return 'type' not in self.attr or self.attr['type'] == 'nest'
+
+ def free(self, ri, var, ref):
+ if self.attr['type'] in scalars:
+ ri.cw.p(f"free({var}->{ref}{self.c_name});")
+ elif 'type' not in self.attr or self.attr['type'] == 'nest':
+ ri.cw.p(f"for (i = 0; i < {var}->{ref}n_{self.c_name}; i++)")
+ ri.cw.p(f'{self.nested_render_name}_free(&{var}->{ref}{self.c_name}[i]);')
+ ri.cw.p(f"free({var}->{ref}{self.c_name});")
+ else:
+ raise Exception(f"Free of MultiAttr sub-type {self.attr['type']} not supported yet")
+
+ def _attr_policy(self, policy):
+ return self.base_type._attr_policy(policy)
+
+ def _attr_typol(self):
+ return self.base_type._attr_typol()
+
+ def _attr_get(self, ri, var):
+ return f'n_{self.c_name}++;', None, None
+
+ def attr_put(self, ri, var):
+ if self.attr['type'] in scalars:
+ put_type = self.type
+ ri.cw.p(f"for (unsigned int i = 0; i < {var}->n_{self.c_name}; i++)")
+ ri.cw.p(f"ynl_attr_put_{put_type}(nlh, {self.enum_name}, {var}->{self.c_name}[i]);")
+ elif 'type' not in self.attr or self.attr['type'] == 'nest':
+ ri.cw.p(f"for (unsigned int i = 0; i < {var}->n_{self.c_name}; i++)")
+ self._attr_put_line(ri, var, f"{self.nested_render_name}_put(nlh, " +
+ f"{self.enum_name}, &{var}->{self.c_name}[i])")
+ else:
+ raise Exception(f"Put of MultiAttr sub-type {self.attr['type']} not supported yet")
+
+ def _setter_lines(self, ri, member, presence):
+ # For multi-attr we have a count, not presence, hack up the presence
+ presence = presence[:-(len('_present.') + len(self.c_name))] + "n_" + self.c_name
+ return [f"free({member});",
+ f"{member} = {self.c_name};",
+ f"{presence} = n_{self.c_name};"]
+
+
+class TypeArrayNest(Type):
+ def is_multi_val(self):
+ return True
+
+ def presence_type(self):
+ return 'count'
+
+ def _complex_member_type(self, ri):
+ if 'sub-type' not in self.attr or self.attr['sub-type'] == 'nest':
+ return self.nested_struct_type
+ elif self.attr['sub-type'] in scalars:
+ scalar_pfx = '__' if ri.ku_space == 'user' else ''
+ return scalar_pfx + self.attr['sub-type']
+ else:
+ raise Exception(f"Sub-type {self.attr['sub-type']} not supported yet")
+
+ def _attr_typol(self):
+ return f'.type = YNL_PT_NEST, .nest = &{self.nested_render_name}_nest, '
+
+ def _attr_get(self, ri, var):
+ local_vars = ['const struct nlattr *attr2;']
+ get_lines = [f'attr_{self.c_name} = attr;',
+ 'ynl_attr_for_each_nested(attr2, attr)',
+ f'\t{var}->n_{self.c_name}++;']
+ return get_lines, None, local_vars
+
+
+class TypeNestTypeValue(Type):
+ def _complex_member_type(self, ri):
+ return self.nested_struct_type
+
+ def _attr_typol(self):
+ return f'.type = YNL_PT_NEST, .nest = &{self.nested_render_name}_nest, '
+
+ def _attr_get(self, ri, var):
+ prev = 'attr'
+ tv_args = ''
+ get_lines = []
+ local_vars = []
+ init_lines = [f"parg.rsp_policy = &{self.nested_render_name}_nest;",
+ f"parg.data = &{var}->{self.c_name};"]
+ if 'type-value' in self.attr:
+ tv_names = [c_lower(x) for x in self.attr["type-value"]]
+ local_vars += [f'const struct nlattr *attr_{", *attr_".join(tv_names)};']
+ local_vars += [f'__u32 {", ".join(tv_names)};']
+ for level in self.attr["type-value"]:
+ level = c_lower(level)
+ get_lines += [f'attr_{level} = ynl_attr_data({prev});']
+ get_lines += [f'{level} = ynl_attr_type(attr_{level});']
+ prev = 'attr_' + level
+
+ tv_args = f", {', '.join(tv_names)}"
+
+ get_lines += [f"{self.nested_render_name}_parse(&parg, {prev}{tv_args});"]
+ return get_lines, init_lines, local_vars
+
+
+class Struct:
+ def __init__(self, family, space_name, type_list=None, inherited=None):
+ self.family = family
+ self.space_name = space_name
+ self.attr_set = family.attr_sets[space_name]
+ # Use list to catch comparisons with empty sets
+ self._inherited = inherited if inherited is not None else []
+ self.inherited = []
+
+ self.nested = type_list is None
+ if family.name == c_lower(space_name):
+ self.render_name = c_lower(family.ident_name)
+ else:
+ self.render_name = c_lower(family.ident_name + '-' + space_name)
+ self.struct_name = 'struct ' + self.render_name
+ if self.nested and space_name in family.consts:
+ self.struct_name += '_'
+ self.ptr_name = self.struct_name + ' *'
+ # All attr sets this one contains, directly or multiple levels down
+ self.child_nests = set()
+
+ self.request = False
+ self.reply = False
+ self.recursive = False
+
+ self.attr_list = []
+ self.attrs = dict()
+ if type_list is not None:
+ for t in type_list:
+ self.attr_list.append((t, self.attr_set[t]),)
+ else:
+ for t in self.attr_set:
+ self.attr_list.append((t, self.attr_set[t]),)
+
+ max_val = 0
+ self.attr_max_val = None
+ for name, attr in self.attr_list:
+ if attr.value >= max_val:
+ max_val = attr.value
+ self.attr_max_val = attr
+ self.attrs[name] = attr
+
+ def __iter__(self):
+ yield from self.attrs
+
+ def __getitem__(self, key):
+ return self.attrs[key]
+
+ def member_list(self):
+ return self.attr_list
+
+ def set_inherited(self, new_inherited):
+ if self._inherited != new_inherited:
+ raise Exception("Inheriting different members not supported")
+ self.inherited = [c_lower(x) for x in sorted(self._inherited)]
+
+
+class EnumEntry(SpecEnumEntry):
+ def __init__(self, enum_set, yaml, prev, value_start):
+ super().__init__(enum_set, yaml, prev, value_start)
+
+ if prev:
+ self.value_change = (self.value != prev.value + 1)
+ else:
+ self.value_change = (self.value != 0)
+ self.value_change = self.value_change or self.enum_set['type'] == 'flags'
+
+ # Added by resolve:
+ self.c_name = None
+ delattr(self, "c_name")
+
+ def resolve(self):
+ self.resolve_up(super())
+
+ self.c_name = c_upper(self.enum_set.value_pfx + self.name)
+
+
+class EnumSet(SpecEnumSet):
+ def __init__(self, family, yaml):
+ self.render_name = c_lower(family.ident_name + '-' + yaml['name'])
+
+ if 'enum-name' in yaml:
+ if yaml['enum-name']:
+ self.enum_name = 'enum ' + c_lower(yaml['enum-name'])
+ self.user_type = self.enum_name
+ else:
+ self.enum_name = None
+ else:
+ self.enum_name = 'enum ' + self.render_name
+
+ if self.enum_name:
+ self.user_type = self.enum_name
+ else:
+ self.user_type = 'int'
+
+ self.value_pfx = yaml.get('name-prefix', f"{family.ident_name}-{yaml['name']}-")
+ self.header = yaml.get('header', None)
+ self.enum_cnt_name = yaml.get('enum-cnt-name', None)
+
+ super().__init__(family, yaml)
+
+ def new_entry(self, entry, prev_entry, value_start):
+ return EnumEntry(self, entry, prev_entry, value_start)
+
+ def value_range(self):
+ low = min([x.value for x in self.entries.values()])
+ high = max([x.value for x in self.entries.values()])
+
+ if high - low + 1 != len(self.entries):
+ raise Exception("Can't get value range for a noncontiguous enum")
+
+ return low, high
+
+
+class AttrSet(SpecAttrSet):
+ def __init__(self, family, yaml):
+ super().__init__(family, yaml)
+
+ if self.subset_of is None:
+ if 'name-prefix' in yaml:
+ pfx = yaml['name-prefix']
+ elif self.name == family.name:
+ pfx = family.ident_name + '-a-'
+ else:
+ pfx = f"{family.ident_name}-a-{self.name}-"
+ self.name_prefix = c_upper(pfx)
+ self.max_name = c_upper(self.yaml.get('attr-max-name', f"{self.name_prefix}max"))
+ self.cnt_name = c_upper(self.yaml.get('attr-cnt-name', f"__{self.name_prefix}max"))
+ else:
+ self.name_prefix = family.attr_sets[self.subset_of].name_prefix
+ self.max_name = family.attr_sets[self.subset_of].max_name
+ self.cnt_name = family.attr_sets[self.subset_of].cnt_name
+
+ # Added by resolve:
+ self.c_name = None
+ delattr(self, "c_name")
+
+ def resolve(self):
+ self.c_name = c_lower(self.name)
+ if self.c_name in _C_KW:
+ self.c_name += '_'
+ if self.c_name == self.family.c_name:
+ self.c_name = ''
+
+ def new_attr(self, elem, value):
+ if elem['type'] in scalars:
+ t = TypeScalar(self.family, self, elem, value)
+ elif elem['type'] == 'unused':
+ t = TypeUnused(self.family, self, elem, value)
+ elif elem['type'] == 'pad':
+ t = TypePad(self.family, self, elem, value)
+ elif elem['type'] == 'flag':
+ t = TypeFlag(self.family, self, elem, value)
+ elif elem['type'] == 'string':
+ t = TypeString(self.family, self, elem, value)
+ elif elem['type'] == 'binary':
+ t = TypeBinary(self.family, self, elem, value)
+ elif elem['type'] == 'bitfield32':
+ t = TypeBitfield32(self.family, self, elem, value)
+ elif elem['type'] == 'nest':
+ t = TypeNest(self.family, self, elem, value)
+ elif elem['type'] == 'indexed-array' and 'sub-type' in elem:
+ if elem["sub-type"] == 'nest':
+ t = TypeArrayNest(self.family, self, elem, value)
+ else:
+ raise Exception(f'new_attr: unsupported sub-type {elem["sub-type"]}')
+ elif elem['type'] == 'nest-type-value':
+ t = TypeNestTypeValue(self.family, self, elem, value)
+ else:
+ raise Exception(f"No typed class for type {elem['type']}")
+
+ if 'multi-attr' in elem and elem['multi-attr']:
+ t = TypeMultiAttr(self.family, self, elem, value, t)
+
+ return t
+
+
+class Operation(SpecOperation):
+ def __init__(self, family, yaml, req_value, rsp_value):
+ super().__init__(family, yaml, req_value, rsp_value)
+
+ self.render_name = c_lower(family.ident_name + '_' + self.name)
+
+ self.dual_policy = ('do' in yaml and 'request' in yaml['do']) and \
+ ('dump' in yaml and 'request' in yaml['dump'])
+
+ self.has_ntf = False
+
+ # Added by resolve:
+ self.enum_name = None
+ delattr(self, "enum_name")
+
+ def resolve(self):
+ self.resolve_up(super())
+
+ if not self.is_async:
+ self.enum_name = self.family.op_prefix + c_upper(self.name)
+ else:
+ self.enum_name = self.family.async_op_prefix + c_upper(self.name)
+
+ def mark_has_ntf(self):
+ self.has_ntf = True
+
+
+class Family(SpecFamily):
+ def __init__(self, file_name, exclude_ops):
+ # Added by resolve:
+ self.c_name = None
+ delattr(self, "c_name")
+ self.op_prefix = None
+ delattr(self, "op_prefix")
+ self.async_op_prefix = None
+ delattr(self, "async_op_prefix")
+ self.mcgrps = None
+ delattr(self, "mcgrps")
+ self.consts = None
+ delattr(self, "consts")
+ self.hooks = None
+ delattr(self, "hooks")
+
+ super().__init__(file_name, exclude_ops=exclude_ops)
+
+ self.fam_key = c_upper(self.yaml.get('c-family-name', self.yaml["name"] + '_FAMILY_NAME'))
+ self.ver_key = c_upper(self.yaml.get('c-version-name', self.yaml["name"] + '_FAMILY_VERSION'))
+
+ if 'definitions' not in self.yaml:
+ self.yaml['definitions'] = []
+
+ if 'uapi-header' in self.yaml:
+ self.uapi_header = self.yaml['uapi-header']
+ else:
+ self.uapi_header = f"linux/{self.ident_name}.h"
+ if self.uapi_header.startswith("linux/") and self.uapi_header.endswith('.h'):
+ self.uapi_header_name = self.uapi_header[6:-2]
+ else:
+ self.uapi_header_name = self.ident_name
+
+ def resolve(self):
+ self.resolve_up(super())
+
+ if self.yaml.get('protocol', 'genetlink') not in {'genetlink', 'genetlink-c', 'genetlink-legacy'}:
+ raise Exception("Codegen only supported for genetlink")
+
+ self.c_name = c_lower(self.ident_name)
+ if 'name-prefix' in self.yaml['operations']:
+ self.op_prefix = c_upper(self.yaml['operations']['name-prefix'])
+ else:
+ self.op_prefix = c_upper(self.yaml['name'] + '-cmd-')
+ if 'async-prefix' in self.yaml['operations']:
+ self.async_op_prefix = c_upper(self.yaml['operations']['async-prefix'])
+ else:
+ self.async_op_prefix = self.op_prefix
+
+ self.mcgrps = self.yaml.get('mcast-groups', {'list': []})
+
+ self.hooks = dict()
+ for when in ['pre', 'post']:
+ self.hooks[when] = dict()
+ for op_mode in ['do', 'dump']:
+ self.hooks[when][op_mode] = dict()
+ self.hooks[when][op_mode]['set'] = set()
+ self.hooks[when][op_mode]['list'] = []
+
+ # dict space-name -> 'request': set(attrs), 'reply': set(attrs)
+ self.root_sets = dict()
+ # dict space-name -> set('request', 'reply')
+ self.pure_nested_structs = dict()
+
+ self._mark_notify()
+ self._mock_up_events()
+
+ self._load_root_sets()
+ self._load_nested_sets()
+ self._load_attr_use()
+ self._load_hooks()
+
+ self.kernel_policy = self.yaml.get('kernel-policy', 'split')
+ if self.kernel_policy == 'global':
+ self._load_global_policy()
+
+ def new_enum(self, elem):
+ return EnumSet(self, elem)
+
+ def new_attr_set(self, elem):
+ return AttrSet(self, elem)
+
+ def new_operation(self, elem, req_value, rsp_value):
+ return Operation(self, elem, req_value, rsp_value)
+
+ def _mark_notify(self):
+ for op in self.msgs.values():
+ if 'notify' in op:
+ self.ops[op['notify']].mark_has_ntf()
+
+ # Fake a 'do' equivalent of all events, so that we can render their response parsing
+ def _mock_up_events(self):
+ for op in self.yaml['operations']['list']:
+ if 'event' in op:
+ op['do'] = {
+ 'reply': {
+ 'attributes': op['event']['attributes']
+ }
+ }
+
+ def _load_root_sets(self):
+ for op_name, op in self.msgs.items():
+ if 'attribute-set' not in op:
+ continue
+
+ req_attrs = set()
+ rsp_attrs = set()
+ for op_mode in ['do', 'dump']:
+ if op_mode in op and 'request' in op[op_mode]:
+ req_attrs.update(set(op[op_mode]['request']['attributes']))
+ if op_mode in op and 'reply' in op[op_mode]:
+ rsp_attrs.update(set(op[op_mode]['reply']['attributes']))
+ if 'event' in op:
+ rsp_attrs.update(set(op['event']['attributes']))
+
+ if op['attribute-set'] not in self.root_sets:
+ self.root_sets[op['attribute-set']] = {'request': req_attrs, 'reply': rsp_attrs}
+ else:
+ self.root_sets[op['attribute-set']]['request'].update(req_attrs)
+ self.root_sets[op['attribute-set']]['reply'].update(rsp_attrs)
+
+ def _sort_pure_types(self):
+ # Try to reorder according to dependencies
+ pns_key_list = list(self.pure_nested_structs.keys())
+ pns_key_seen = set()
+ rounds = len(pns_key_list) ** 2 # it's basically bubble sort
+ for _ in range(rounds):
+ if len(pns_key_list) == 0:
+ break
+ name = pns_key_list.pop(0)
+ finished = True
+ for _, spec in self.attr_sets[name].items():
+ if 'nested-attributes' in spec:
+ nested = spec['nested-attributes']
+ # If the unknown nest we hit is recursive it's fine, it'll be a pointer
+ if self.pure_nested_structs[nested].recursive:
+ continue
+ if nested not in pns_key_seen:
+ # Dicts are sorted, this will make struct last
+ struct = self.pure_nested_structs.pop(name)
+ self.pure_nested_structs[name] = struct
+ finished = False
+ break
+ if finished:
+ pns_key_seen.add(name)
+ else:
+ pns_key_list.append(name)
+
+ def _load_nested_sets(self):
+ attr_set_queue = list(self.root_sets.keys())
+ attr_set_seen = set(self.root_sets.keys())
+
+ while len(attr_set_queue):
+ a_set = attr_set_queue.pop(0)
+ for attr, spec in self.attr_sets[a_set].items():
+ if 'nested-attributes' not in spec:
+ continue
+
+ nested = spec['nested-attributes']
+ if nested not in attr_set_seen:
+ attr_set_queue.append(nested)
+ attr_set_seen.add(nested)
+
+ inherit = set()
+ if nested not in self.root_sets:
+ if nested not in self.pure_nested_structs:
+ self.pure_nested_structs[nested] = Struct(self, nested, inherited=inherit)
+ else:
+ raise Exception(f'Using attr set as root and nested not supported - {nested}')
+
+ if 'type-value' in spec:
+ if nested in self.root_sets:
+ raise Exception("Inheriting members to a space used as root not supported")
+ inherit.update(set(spec['type-value']))
+ elif spec['type'] == 'indexed-array':
+ inherit.add('idx')
+ self.pure_nested_structs[nested].set_inherited(inherit)
+
+ for root_set, rs_members in self.root_sets.items():
+ for attr, spec in self.attr_sets[root_set].items():
+ if 'nested-attributes' in spec:
+ nested = spec['nested-attributes']
+ if attr in rs_members['request']:
+ self.pure_nested_structs[nested].request = True
+ if attr in rs_members['reply']:
+ self.pure_nested_structs[nested].reply = True
+
+ self._sort_pure_types()
+
+ # Propagate the request / reply / recursive
+ for attr_set, struct in reversed(self.pure_nested_structs.items()):
+ for _, spec in self.attr_sets[attr_set].items():
+ if 'nested-attributes' in spec:
+ child_name = spec['nested-attributes']
+ struct.child_nests.add(child_name)
+ child = self.pure_nested_structs.get(child_name)
+ if child:
+ if not child.recursive:
+ struct.child_nests.update(child.child_nests)
+ child.request |= struct.request
+ child.reply |= struct.reply
+ if attr_set in struct.child_nests:
+ struct.recursive = True
+
+ self._sort_pure_types()
+
+ def _load_attr_use(self):
+ for _, struct in self.pure_nested_structs.items():
+ if struct.request:
+ for _, arg in struct.member_list():
+ arg.set_request()
+ if struct.reply:
+ for _, arg in struct.member_list():
+ arg.set_reply()
+
+ for root_set, rs_members in self.root_sets.items():
+ for attr, spec in self.attr_sets[root_set].items():
+ if attr in rs_members['request']:
+ spec.set_request()
+ if attr in rs_members['reply']:
+ spec.set_reply()
+
+ def _load_global_policy(self):
+ global_set = set()
+ attr_set_name = None
+ for op_name, op in self.ops.items():
+ if not op:
+ continue
+ if 'attribute-set' not in op:
+ continue
+
+ if attr_set_name is None:
+ attr_set_name = op['attribute-set']
+ if attr_set_name != op['attribute-set']:
+ raise Exception('For a global policy all ops must use the same set')
+
+ for op_mode in ['do', 'dump']:
+ if op_mode in op:
+ req = op[op_mode].get('request')
+ if req:
+ global_set.update(req.get('attributes', []))
+
+ self.global_policy = []
+ self.global_policy_set = attr_set_name
+ for attr in self.attr_sets[attr_set_name]:
+ if attr in global_set:
+ self.global_policy.append(attr)
+
+ def _load_hooks(self):
+ for op in self.ops.values():
+ for op_mode in ['do', 'dump']:
+ if op_mode not in op:
+ continue
+ for when in ['pre', 'post']:
+ if when not in op[op_mode]:
+ continue
+ name = op[op_mode][when]
+ if name in self.hooks[when][op_mode]['set']:
+ continue
+ self.hooks[when][op_mode]['set'].add(name)
+ self.hooks[when][op_mode]['list'].append(name)
+
+
+class RenderInfo:
+ def __init__(self, cw, family, ku_space, op, op_mode, attr_set=None):
+ self.family = family
+ self.nl = cw.nlib
+ self.ku_space = ku_space
+ self.op_mode = op_mode
+ self.op = op
+
+ self.fixed_hdr = None
+ if op and op.fixed_header:
+ self.fixed_hdr = 'struct ' + c_lower(op.fixed_header)
+
+ # 'do' and 'dump' response parsing is identical
+ self.type_consistent = True
+ if op_mode != 'do' and 'dump' in op:
+ if 'do' in op:
+ if ('reply' in op['do']) != ('reply' in op["dump"]):
+ self.type_consistent = False
+ elif 'reply' in op['do'] and op["do"]["reply"] != op["dump"]["reply"]:
+ self.type_consistent = False
+ else:
+ self.type_consistent = False
+
+ self.attr_set = attr_set
+ if not self.attr_set:
+ self.attr_set = op['attribute-set']
+
+ self.type_name_conflict = False
+ if op:
+ self.type_name = c_lower(op.name)
+ else:
+ self.type_name = c_lower(attr_set)
+ if attr_set in family.consts:
+ self.type_name_conflict = True
+
+ self.cw = cw
+
+ self.struct = dict()
+ if op_mode == 'notify':
+ op_mode = 'do'
+ for op_dir in ['request', 'reply']:
+ if op:
+ type_list = []
+ if op_dir in op[op_mode]:
+ type_list = op[op_mode][op_dir]['attributes']
+ self.struct[op_dir] = Struct(family, self.attr_set, type_list=type_list)
+ if op_mode == 'event':
+ self.struct['reply'] = Struct(family, self.attr_set, type_list=op['event']['attributes'])
+
+
+class CodeWriter:
+ def __init__(self, nlib, out_file=None, overwrite=True):
+ self.nlib = nlib
+ self._overwrite = overwrite
+
+ self._nl = False
+ self._block_end = False
+ self._silent_block = False
+ self._ind = 0
+ self._ifdef_block = None
+ if out_file is None:
+ self._out = os.sys.stdout
+ else:
+ self._out = tempfile.NamedTemporaryFile('w+')
+ self._out_file = out_file
+
+ def __del__(self):
+ self.close_out_file()
+
+ def close_out_file(self):
+ if self._out == os.sys.stdout:
+ return
+ # Avoid modifying the file if contents didn't change
+ self._out.flush()
+ if not self._overwrite and os.path.isfile(self._out_file):
+ if filecmp.cmp(self._out.name, self._out_file, shallow=False):
+ return
+ with open(self._out_file, 'w+') as out_file:
+ self._out.seek(0)
+ shutil.copyfileobj(self._out, out_file)
+ self._out.close()
+ self._out = os.sys.stdout
+
+ @classmethod
+ def _is_cond(cls, line):
+ return line.startswith('if') or line.startswith('while') or line.startswith('for')
+
+ def p(self, line, add_ind=0):
+ if self._block_end:
+ self._block_end = False
+ if line.startswith('else'):
+ line = '} ' + line
+ else:
+ self._out.write('\t' * self._ind + '}\n')
+
+ if self._nl:
+ self._out.write('\n')
+ self._nl = False
+
+ ind = self._ind
+ if line[-1] == ':':
+ ind -= 1
+ if self._silent_block:
+ ind += 1
+ self._silent_block = line.endswith(')') and CodeWriter._is_cond(line)
+ if line[0] == '#':
+ ind = 0
+ if add_ind:
+ ind += add_ind
+ self._out.write('\t' * ind + line + '\n')
+
+ def nl(self):
+ self._nl = True
+
+ def block_start(self, line=''):
+ if line:
+ line = line + ' '
+ self.p(line + '{')
+ self._ind += 1
+
+ def block_end(self, line=''):
+ if line and line[0] not in {';', ','}:
+ line = ' ' + line
+ self._ind -= 1
+ self._nl = False
+ if not line:
+ # Delay printing closing bracket in case "else" comes next
+ if self._block_end:
+ self._out.write('\t' * (self._ind + 1) + '}\n')
+ self._block_end = True
+ else:
+ self.p('}' + line)
+
+ def write_doc_line(self, doc, indent=True):
+ words = doc.split()
+ line = ' *'
+ for word in words:
+ if len(line) + len(word) >= 79:
+ self.p(line)
+ line = ' *'
+ if indent:
+ line += ' '
+ line += ' ' + word
+ self.p(line)
+
+ def write_func_prot(self, qual_ret, name, args=None, doc=None, suffix=''):
+ if not args:
+ args = ['void']
+
+ if doc:
+ self.p('/*')
+ self.p(' * ' + doc)
+ self.p(' */')
+
+ oneline = qual_ret
+ if qual_ret[-1] != '*':
+ oneline += ' '
+ oneline += f"{name}({', '.join(args)}){suffix}"
+
+ if len(oneline) < 80:
+ self.p(oneline)
+ return
+
+ v = qual_ret
+ if len(v) > 3:
+ self.p(v)
+ v = ''
+ elif qual_ret[-1] != '*':
+ v += ' '
+ v += name + '('
+ ind = '\t' * (len(v) // 8) + ' ' * (len(v) % 8)
+ delta_ind = len(v) - len(ind)
+ v += args[0]
+ i = 1
+ while i < len(args):
+ next_len = len(v) + len(args[i])
+ if v[0] == '\t':
+ next_len += delta_ind
+ if next_len > 76:
+ self.p(v + ',')
+ v = ind
+ else:
+ v += ', '
+ v += args[i]
+ i += 1
+ self.p(v + ')' + suffix)
+
+ def write_func_lvar(self, local_vars):
+ if not local_vars:
+ return
+
+ if type(local_vars) is str:
+ local_vars = [local_vars]
+
+ local_vars.sort(key=len, reverse=True)
+ for var in local_vars:
+ self.p(var)
+ self.nl()
+
+ def write_func(self, qual_ret, name, body, args=None, local_vars=None):
+ self.write_func_prot(qual_ret=qual_ret, name=name, args=args)
+ self.write_func_lvar(local_vars=local_vars)
+
+ self.block_start()
+ for line in body:
+ self.p(line)
+ self.block_end()
+
+ def writes_defines(self, defines):
+ longest = 0
+ for define in defines:
+ if len(define[0]) > longest:
+ longest = len(define[0])
+ longest = ((longest + 8) // 8) * 8
+ for define in defines:
+ line = '#define ' + define[0]
+ line += '\t' * ((longest - len(define[0]) + 7) // 8)
+ if type(define[1]) is int:
+ line += str(define[1])
+ elif type(define[1]) is str:
+ line += '"' + define[1] + '"'
+ self.p(line)
+
+ def write_struct_init(self, members):
+ longest = max([len(x[0]) for x in members])
+ longest += 1 # because we prepend a .
+ longest = ((longest + 8) // 8) * 8
+ for one in members:
+ line = '.' + one[0]
+ line += '\t' * ((longest - len(one[0]) - 1 + 7) // 8)
+ line += '= ' + str(one[1]) + ','
+ self.p(line)
+
+ def ifdef_block(self, config):
+ config_option = None
+ if config:
+ config_option = 'CONFIG_' + c_upper(config)
+ if self._ifdef_block == config_option:
+ return
+
+ if self._ifdef_block:
+ self.p('#endif /* ' + self._ifdef_block + ' */')
+ if config_option:
+ self.p('#ifdef ' + config_option)
+ self._ifdef_block = config_option
+
+
+scalars = {'u8', 'u16', 'u32', 'u64', 's32', 's64', 'uint', 'sint'}
+
+direction_to_suffix = {
+ 'reply': '_rsp',
+ 'request': '_req',
+ '': ''
+}
+
+op_mode_to_wrapper = {
+ 'do': '',
+ 'dump': '_list',
+ 'notify': '_ntf',
+ 'event': '',
+}
+
+_C_KW = {
+ 'auto',
+ 'bool',
+ 'break',
+ 'case',
+ 'char',
+ 'const',
+ 'continue',
+ 'default',
+ 'do',
+ 'double',
+ 'else',
+ 'enum',
+ 'extern',
+ 'float',
+ 'for',
+ 'goto',
+ 'if',
+ 'inline',
+ 'int',
+ 'long',
+ 'register',
+ 'return',
+ 'short',
+ 'signed',
+ 'sizeof',
+ 'static',
+ 'struct',
+ 'switch',
+ 'typedef',
+ 'union',
+ 'unsigned',
+ 'void',
+ 'volatile',
+ 'while'
+}
+
+
+def rdir(direction):
+ if direction == 'reply':
+ return 'request'
+ if direction == 'request':
+ return 'reply'
+ return direction
+
+
+def op_prefix(ri, direction, deref=False):
+ suffix = f"_{ri.type_name}"
+
+ if not ri.op_mode or ri.op_mode == 'do':
+ suffix += f"{direction_to_suffix[direction]}"
+ else:
+ if direction == 'request':
+ suffix += '_req_dump'
+ else:
+ if ri.type_consistent:
+ if deref:
+ suffix += f"{direction_to_suffix[direction]}"
+ else:
+ suffix += op_mode_to_wrapper[ri.op_mode]
+ else:
+ suffix += '_rsp'
+ suffix += '_dump' if deref else '_list'
+
+ return f"{ri.family.c_name}{suffix}"
+
+
+def type_name(ri, direction, deref=False):
+ return f"struct {op_prefix(ri, direction, deref=deref)}"
+
+
+def print_prototype(ri, direction, terminate=True, doc=None):
+ suffix = ';' if terminate else ''
+
+ fname = ri.op.render_name
+ if ri.op_mode == 'dump':
+ fname += '_dump'
+
+ args = ['struct ynl_sock *ys']
+ if 'request' in ri.op[ri.op_mode]:
+ args.append(f"{type_name(ri, direction)} *" + f"{direction_to_suffix[direction][1:]}")
+
+ ret = 'int'
+ if 'reply' in ri.op[ri.op_mode]:
+ ret = f"{type_name(ri, rdir(direction))} *"
+
+ ri.cw.write_func_prot(ret, fname, args, doc=doc, suffix=suffix)
+
+
+def print_req_prototype(ri):
+ print_prototype(ri, "request", doc=ri.op['doc'])
+
+
+def print_dump_prototype(ri):
+ print_prototype(ri, "request")
+
+
+def put_typol_fwd(cw, struct):
+ cw.p(f'extern const struct ynl_policy_nest {struct.render_name}_nest;')
+
+
+def put_typol(cw, struct):
+ type_max = struct.attr_set.max_name
+ cw.block_start(line=f'const struct ynl_policy_attr {struct.render_name}_policy[{type_max} + 1] =')
+
+ for _, arg in struct.member_list():
+ arg.attr_typol(cw)
+
+ cw.block_end(line=';')
+ cw.nl()
+
+ cw.block_start(line=f'const struct ynl_policy_nest {struct.render_name}_nest =')
+ cw.p(f'.max_attr = {type_max},')
+ cw.p(f'.table = {struct.render_name}_policy,')
+ cw.block_end(line=';')
+ cw.nl()
+
+
+def _put_enum_to_str_helper(cw, render_name, map_name, arg_name, enum=None):
+ args = [f'int {arg_name}']
+ if enum:
+ args = [enum.user_type + ' ' + arg_name]
+ cw.write_func_prot('const char *', f'{render_name}_str', args)
+ cw.block_start()
+ if enum and enum.type == 'flags':
+ cw.p(f'{arg_name} = ffs({arg_name}) - 1;')
+ cw.p(f'if ({arg_name} < 0 || {arg_name} >= (int)YNL_ARRAY_SIZE({map_name}))')
+ cw.p('return NULL;')
+ cw.p(f'return {map_name}[{arg_name}];')
+ cw.block_end()
+ cw.nl()
+
+
+def put_op_name_fwd(family, cw):
+ cw.write_func_prot('const char *', f'{family.c_name}_op_str', ['int op'], suffix=';')
+
+
+def put_op_name(family, cw):
+ map_name = f'{family.c_name}_op_strmap'
+ cw.block_start(line=f"static const char * const {map_name}[] =")
+ for op_name, op in family.msgs.items():
+ if op.rsp_value:
+ # Make sure we don't add duplicated entries, if multiple commands
+ # produce the same response in legacy families.
+ if family.rsp_by_value[op.rsp_value] != op:
+ cw.p(f'// skip "{op_name}", duplicate reply value')
+ continue
+
+ if op.req_value == op.rsp_value:
+ cw.p(f'[{op.enum_name}] = "{op_name}",')
+ else:
+ cw.p(f'[{op.rsp_value}] = "{op_name}",')
+ cw.block_end(line=';')
+ cw.nl()
+
+ _put_enum_to_str_helper(cw, family.c_name + '_op', map_name, 'op')
+
+
+def put_enum_to_str_fwd(family, cw, enum):
+ args = [enum.user_type + ' value']
+ cw.write_func_prot('const char *', f'{enum.render_name}_str', args, suffix=';')
+
+
+def put_enum_to_str(family, cw, enum):
+ map_name = f'{enum.render_name}_strmap'
+ cw.block_start(line=f"static const char * const {map_name}[] =")
+ for entry in enum.entries.values():
+ cw.p(f'[{entry.value}] = "{entry.name}",')
+ cw.block_end(line=';')
+ cw.nl()
+
+ _put_enum_to_str_helper(cw, enum.render_name, map_name, 'value', enum=enum)
+
+
+def put_req_nested_prototype(ri, struct, suffix=';'):
+ func_args = ['struct nlmsghdr *nlh',
+ 'unsigned int attr_type',
+ f'{struct.ptr_name}obj']
+
+ ri.cw.write_func_prot('int', f'{struct.render_name}_put', func_args,
+ suffix=suffix)
+
+
+def put_req_nested(ri, struct):
+ put_req_nested_prototype(ri, struct, suffix='')
+ ri.cw.block_start()
+ ri.cw.write_func_lvar('struct nlattr *nest;')
+
+ ri.cw.p("nest = ynl_attr_nest_start(nlh, attr_type);")
+
+ for _, arg in struct.member_list():
+ arg.attr_put(ri, "obj")
+
+ ri.cw.p("ynl_attr_nest_end(nlh, nest);")
+
+ ri.cw.nl()
+ ri.cw.p('return 0;')
+ ri.cw.block_end()
+ ri.cw.nl()
+
+
+def _multi_parse(ri, struct, init_lines, local_vars):
+ if struct.nested:
+ iter_line = "ynl_attr_for_each_nested(attr, nested)"
+ else:
+ if ri.fixed_hdr:
+ local_vars += ['void *hdr;']
+ iter_line = "ynl_attr_for_each(attr, nlh, yarg->ys->family->hdr_len)"
+
+ array_nests = set()
+ multi_attrs = set()
+ needs_parg = False
+ for arg, aspec in struct.member_list():
+ if aspec['type'] == 'indexed-array' and 'sub-type' in aspec:
+ if aspec["sub-type"] == 'nest':
+ local_vars.append(f'const struct nlattr *attr_{aspec.c_name};')
+ array_nests.add(arg)
+ else:
+ raise Exception(f'Not supported sub-type {aspec["sub-type"]}')
+ if 'multi-attr' in aspec:
+ multi_attrs.add(arg)
+ needs_parg |= 'nested-attributes' in aspec
+ if array_nests or multi_attrs:
+ local_vars.append('int i;')
+ if needs_parg:
+ local_vars.append('struct ynl_parse_arg parg;')
+ init_lines.append('parg.ys = yarg->ys;')
+
+ all_multi = array_nests | multi_attrs
+
+ for anest in sorted(all_multi):
+ local_vars.append(f"unsigned int n_{struct[anest].c_name} = 0;")
+
+ ri.cw.block_start()
+ ri.cw.write_func_lvar(local_vars)
+
+ for line in init_lines:
+ ri.cw.p(line)
+ ri.cw.nl()
+
+ for arg in struct.inherited:
+ ri.cw.p(f'dst->{arg} = {arg};')
+
+ if ri.fixed_hdr:
+ ri.cw.p('hdr = ynl_nlmsg_data_offset(nlh, sizeof(struct genlmsghdr));')
+ ri.cw.p(f"memcpy(&dst->_hdr, hdr, sizeof({ri.fixed_hdr}));")
+ for anest in sorted(all_multi):
+ aspec = struct[anest]
+ ri.cw.p(f"if (dst->{aspec.c_name})")
+ ri.cw.p(f'return ynl_error_parse(yarg, "attribute already present ({struct.attr_set.name}.{aspec.name})");')
+
+ ri.cw.nl()
+ ri.cw.block_start(line=iter_line)
+ ri.cw.p('unsigned int type = ynl_attr_type(attr);')
+ ri.cw.nl()
+
+ first = True
+ for _, arg in struct.member_list():
+ good = arg.attr_get(ri, 'dst', first=first)
+ # First may be 'unused' or 'pad', ignore those
+ first &= not good
+
+ ri.cw.block_end()
+ ri.cw.nl()
+
+ for anest in sorted(array_nests):
+ aspec = struct[anest]
+
+ ri.cw.block_start(line=f"if (n_{aspec.c_name})")
+ ri.cw.p(f"dst->{aspec.c_name} = calloc(n_{aspec.c_name}, sizeof(*dst->{aspec.c_name}));")
+ ri.cw.p(f"dst->n_{aspec.c_name} = n_{aspec.c_name};")
+ ri.cw.p('i = 0;')
+ ri.cw.p(f"parg.rsp_policy = &{aspec.nested_render_name}_nest;")
+ ri.cw.block_start(line=f"ynl_attr_for_each_nested(attr, attr_{aspec.c_name})")
+ ri.cw.p(f"parg.data = &dst->{aspec.c_name}[i];")
+ ri.cw.p(f"if ({aspec.nested_render_name}_parse(&parg, attr, ynl_attr_type(attr)))")
+ ri.cw.p('return YNL_PARSE_CB_ERROR;')
+ ri.cw.p('i++;')
+ ri.cw.block_end()
+ ri.cw.block_end()
+ ri.cw.nl()
+
+ for anest in sorted(multi_attrs):
+ aspec = struct[anest]
+ ri.cw.block_start(line=f"if (n_{aspec.c_name})")
+ ri.cw.p(f"dst->{aspec.c_name} = calloc(n_{aspec.c_name}, sizeof(*dst->{aspec.c_name}));")
+ ri.cw.p(f"dst->n_{aspec.c_name} = n_{aspec.c_name};")
+ ri.cw.p('i = 0;')
+ if 'nested-attributes' in aspec:
+ ri.cw.p(f"parg.rsp_policy = &{aspec.nested_render_name}_nest;")
+ ri.cw.block_start(line=iter_line)
+ ri.cw.block_start(line=f"if (ynl_attr_type(attr) == {aspec.enum_name})")
+ if 'nested-attributes' in aspec:
+ ri.cw.p(f"parg.data = &dst->{aspec.c_name}[i];")
+ ri.cw.p(f"if ({aspec.nested_render_name}_parse(&parg, attr))")
+ ri.cw.p('return YNL_PARSE_CB_ERROR;')
+ elif aspec.type in scalars:
+ ri.cw.p(f"dst->{aspec.c_name}[i] = ynl_attr_get_{aspec.type}(attr);")
+ else:
+ raise Exception('Nest parsing type not supported yet')
+ ri.cw.p('i++;')
+ ri.cw.block_end()
+ ri.cw.block_end()
+ ri.cw.block_end()
+ ri.cw.nl()
+
+ if struct.nested:
+ ri.cw.p('return 0;')
+ else:
+ ri.cw.p('return YNL_PARSE_CB_OK;')
+ ri.cw.block_end()
+ ri.cw.nl()
+
+
+def parse_rsp_nested_prototype(ri, struct, suffix=';'):
+ func_args = ['struct ynl_parse_arg *yarg',
+ 'const struct nlattr *nested']
+ for arg in struct.inherited:
+ func_args.append('__u32 ' + arg)
+
+ ri.cw.write_func_prot('int', f'{struct.render_name}_parse', func_args,
+ suffix=suffix)
+
+
+def parse_rsp_nested(ri, struct):
+ parse_rsp_nested_prototype(ri, struct, suffix='')
+
+ local_vars = ['const struct nlattr *attr;',
+ f'{struct.ptr_name}dst = yarg->data;']
+ init_lines = []
+
+ if struct.member_list():
+ _multi_parse(ri, struct, init_lines, local_vars)
+ else:
+ # Empty nest
+ ri.cw.block_start()
+ ri.cw.p('return 0;')
+ ri.cw.block_end()
+ ri.cw.nl()
+
+
+def parse_rsp_msg(ri, deref=False):
+ if 'reply' not in ri.op[ri.op_mode] and ri.op_mode != 'event':
+ return
+
+ func_args = ['const struct nlmsghdr *nlh',
+ 'struct ynl_parse_arg *yarg']
+
+ local_vars = [f'{type_name(ri, "reply", deref=deref)} *dst;',
+ 'const struct nlattr *attr;']
+ init_lines = ['dst = yarg->data;']
+
+ ri.cw.write_func_prot('int', f'{op_prefix(ri, "reply", deref=deref)}_parse', func_args)
+
+ if ri.struct["reply"].member_list():
+ _multi_parse(ri, ri.struct["reply"], init_lines, local_vars)
+ else:
+ # Empty reply
+ ri.cw.block_start()
+ ri.cw.p('return YNL_PARSE_CB_OK;')
+ ri.cw.block_end()
+ ri.cw.nl()
+
+
+def print_req(ri):
+ ret_ok = '0'
+ ret_err = '-1'
+ direction = "request"
+ local_vars = ['struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };',
+ 'struct nlmsghdr *nlh;',
+ 'int err;']
+
+ if 'reply' in ri.op[ri.op_mode]:
+ ret_ok = 'rsp'
+ ret_err = 'NULL'
+ local_vars += [f'{type_name(ri, rdir(direction))} *rsp;']
+
+ if ri.fixed_hdr:
+ local_vars += ['size_t hdr_len;',
+ 'void *hdr;']
+
+ print_prototype(ri, direction, terminate=False)
+ ri.cw.block_start()
+ ri.cw.write_func_lvar(local_vars)
+
+ ri.cw.p(f"nlh = ynl_gemsg_start_req(ys, {ri.nl.get_family_id()}, {ri.op.enum_name}, 1);")
+
+ ri.cw.p(f"ys->req_policy = &{ri.struct['request'].render_name}_nest;")
+ if 'reply' in ri.op[ri.op_mode]:
+ ri.cw.p(f"yrs.yarg.rsp_policy = &{ri.struct['reply'].render_name}_nest;")
+ ri.cw.nl()
+
+ if ri.fixed_hdr:
+ ri.cw.p("hdr_len = sizeof(req->_hdr);")
+ ri.cw.p("hdr = ynl_nlmsg_put_extra_header(nlh, hdr_len);")
+ ri.cw.p("memcpy(hdr, &req->_hdr, hdr_len);")
+ ri.cw.nl()
+
+ for _, attr in ri.struct["request"].member_list():
+ attr.attr_put(ri, "req")
+ ri.cw.nl()
+
+ if 'reply' in ri.op[ri.op_mode]:
+ ri.cw.p('rsp = calloc(1, sizeof(*rsp));')
+ ri.cw.p('yrs.yarg.data = rsp;')
+ ri.cw.p(f"yrs.cb = {op_prefix(ri, 'reply')}_parse;")
+ if ri.op.value is not None:
+ ri.cw.p(f'yrs.rsp_cmd = {ri.op.enum_name};')
+ else:
+ ri.cw.p(f'yrs.rsp_cmd = {ri.op.rsp_value};')
+ ri.cw.nl()
+ ri.cw.p("err = ynl_exec(ys, nlh, &yrs);")
+ ri.cw.p('if (err < 0)')
+ if 'reply' in ri.op[ri.op_mode]:
+ ri.cw.p('goto err_free;')
+ else:
+ ri.cw.p('return -1;')
+ ri.cw.nl()
+
+ ri.cw.p(f"return {ret_ok};")
+ ri.cw.nl()
+
+ if 'reply' in ri.op[ri.op_mode]:
+ ri.cw.p('err_free:')
+ ri.cw.p(f"{call_free(ri, rdir(direction), 'rsp')}")
+ ri.cw.p(f"return {ret_err};")
+
+ ri.cw.block_end()
+
+
+def print_dump(ri):
+ direction = "request"
+ print_prototype(ri, direction, terminate=False)
+ ri.cw.block_start()
+ local_vars = ['struct ynl_dump_state yds = {};',
+ 'struct nlmsghdr *nlh;',
+ 'int err;']
+
+ if ri.fixed_hdr:
+ local_vars += ['size_t hdr_len;',
+ 'void *hdr;']
+
+ ri.cw.write_func_lvar(local_vars)
+
+ ri.cw.p('yds.yarg.ys = ys;')
+ ri.cw.p(f"yds.yarg.rsp_policy = &{ri.struct['reply'].render_name}_nest;")
+ ri.cw.p("yds.yarg.data = NULL;")
+ ri.cw.p(f"yds.alloc_sz = sizeof({type_name(ri, rdir(direction))});")
+ ri.cw.p(f"yds.cb = {op_prefix(ri, 'reply', deref=True)}_parse;")
+ if ri.op.value is not None:
+ ri.cw.p(f'yds.rsp_cmd = {ri.op.enum_name};')
+ else:
+ ri.cw.p(f'yds.rsp_cmd = {ri.op.rsp_value};')
+ ri.cw.nl()
+ ri.cw.p(f"nlh = ynl_gemsg_start_dump(ys, {ri.nl.get_family_id()}, {ri.op.enum_name}, 1);")
+
+ if ri.fixed_hdr:
+ ri.cw.p("hdr_len = sizeof(req->_hdr);")
+ ri.cw.p("hdr = ynl_nlmsg_put_extra_header(nlh, hdr_len);")
+ ri.cw.p("memcpy(hdr, &req->_hdr, hdr_len);")
+ ri.cw.nl()
+
+ if "request" in ri.op[ri.op_mode]:
+ ri.cw.p(f"ys->req_policy = &{ri.struct['request'].render_name}_nest;")
+ ri.cw.nl()
+ for _, attr in ri.struct["request"].member_list():
+ attr.attr_put(ri, "req")
+ ri.cw.nl()
+
+ ri.cw.p('err = ynl_exec_dump(ys, nlh, &yds);')
+ ri.cw.p('if (err < 0)')
+ ri.cw.p('goto free_list;')
+ ri.cw.nl()
+
+ ri.cw.p('return yds.first;')
+ ri.cw.nl()
+ ri.cw.p('free_list:')
+ ri.cw.p(call_free(ri, rdir(direction), 'yds.first'))
+ ri.cw.p('return NULL;')
+ ri.cw.block_end()
+
+
+def call_free(ri, direction, var):
+ return f"{op_prefix(ri, direction)}_free({var});"
+
+
+def free_arg_name(direction):
+ if direction:
+ return direction_to_suffix[direction][1:]
+ return 'obj'
+
+
+def print_alloc_wrapper(ri, direction):
+ name = op_prefix(ri, direction)
+ ri.cw.write_func_prot(f'static inline struct {name} *', f"{name}_alloc", [f"void"])
+ ri.cw.block_start()
+ ri.cw.p(f'return calloc(1, sizeof(struct {name}));')
+ ri.cw.block_end()
+
+
+def print_free_prototype(ri, direction, suffix=';'):
+ name = op_prefix(ri, direction)
+ struct_name = name
+ if ri.type_name_conflict:
+ struct_name += '_'
+ arg = free_arg_name(direction)
+ ri.cw.write_func_prot('void', f"{name}_free", [f"struct {struct_name} *{arg}"], suffix=suffix)
+
+
+def _print_type(ri, direction, struct):
+ suffix = f'_{ri.type_name}{direction_to_suffix[direction]}'
+ if not direction and ri.type_name_conflict:
+ suffix += '_'
+
+ if ri.op_mode == 'dump':
+ suffix += '_dump'
+
+ ri.cw.block_start(line=f"struct {ri.family.c_name}{suffix}")
+
+ if ri.fixed_hdr:
+ ri.cw.p(ri.fixed_hdr + ' _hdr;')
+ ri.cw.nl()
+
+ meta_started = False
+ for _, attr in struct.member_list():
+ for type_filter in ['len', 'bit']:
+ line = attr.presence_member(ri.ku_space, type_filter)
+ if line:
+ if not meta_started:
+ ri.cw.block_start(line=f"struct")
+ meta_started = True
+ ri.cw.p(line)
+ if meta_started:
+ ri.cw.block_end(line='_present;')
+ ri.cw.nl()
+
+ for arg in struct.inherited:
+ ri.cw.p(f"__u32 {arg};")
+
+ for _, attr in struct.member_list():
+ attr.struct_member(ri)
+
+ ri.cw.block_end(line=';')
+ ri.cw.nl()
+
+
+def print_type(ri, direction):
+ _print_type(ri, direction, ri.struct[direction])
+
+
+def print_type_full(ri, struct):
+ _print_type(ri, "", struct)
+
+
+def print_type_helpers(ri, direction, deref=False):
+ print_free_prototype(ri, direction)
+ ri.cw.nl()
+
+ if ri.ku_space == 'user' and direction == 'request':
+ for _, attr in ri.struct[direction].member_list():
+ attr.setter(ri, ri.attr_set, direction, deref=deref)
+ ri.cw.nl()
+
+
+def print_req_type_helpers(ri):
+ if len(ri.struct["request"].attr_list) == 0:
+ return
+ print_alloc_wrapper(ri, "request")
+ print_type_helpers(ri, "request")
+
+
+def print_rsp_type_helpers(ri):
+ if 'reply' not in ri.op[ri.op_mode]:
+ return
+ print_type_helpers(ri, "reply")
+
+
+def print_parse_prototype(ri, direction, terminate=True):
+ suffix = "_rsp" if direction == "reply" else "_req"
+ term = ';' if terminate else ''
+
+ ri.cw.write_func_prot('void', f"{ri.op.render_name}{suffix}_parse",
+ ['const struct nlattr **tb',
+ f"struct {ri.op.render_name}{suffix} *req"],
+ suffix=term)
+
+
+def print_req_type(ri):
+ if len(ri.struct["request"].attr_list) == 0:
+ return
+ print_type(ri, "request")
+
+
+def print_req_free(ri):
+ if 'request' not in ri.op[ri.op_mode]:
+ return
+ _free_type(ri, 'request', ri.struct['request'])
+
+
+def print_rsp_type(ri):
+ if (ri.op_mode == 'do' or ri.op_mode == 'dump') and 'reply' in ri.op[ri.op_mode]:
+ direction = 'reply'
+ elif ri.op_mode == 'event':
+ direction = 'reply'
+ else:
+ return
+ print_type(ri, direction)
+
+
+def print_wrapped_type(ri):
+ ri.cw.block_start(line=f"{type_name(ri, 'reply')}")
+ if ri.op_mode == 'dump':
+ ri.cw.p(f"{type_name(ri, 'reply')} *next;")
+ elif ri.op_mode == 'notify' or ri.op_mode == 'event':
+ ri.cw.p('__u16 family;')
+ ri.cw.p('__u8 cmd;')
+ ri.cw.p('struct ynl_ntf_base_type *next;')
+ ri.cw.p(f"void (*free)({type_name(ri, 'reply')} *ntf);")
+ ri.cw.p(f"{type_name(ri, 'reply', deref=True)} obj __attribute__((aligned(8)));")
+ ri.cw.block_end(line=';')
+ ri.cw.nl()
+ print_free_prototype(ri, 'reply')
+ ri.cw.nl()
+
+
+def _free_type_members_iter(ri, struct):
+ for _, attr in struct.member_list():
+ if attr.free_needs_iter():
+ ri.cw.p('unsigned int i;')
+ ri.cw.nl()
+ break
+
+
+def _free_type_members(ri, var, struct, ref=''):
+ for _, attr in struct.member_list():
+ attr.free(ri, var, ref)
+
+
+def _free_type(ri, direction, struct):
+ var = free_arg_name(direction)
+
+ print_free_prototype(ri, direction, suffix='')
+ ri.cw.block_start()
+ _free_type_members_iter(ri, struct)
+ _free_type_members(ri, var, struct)
+ if direction:
+ ri.cw.p(f'free({var});')
+ ri.cw.block_end()
+ ri.cw.nl()
+
+
+def free_rsp_nested_prototype(ri):
+ print_free_prototype(ri, "")
+
+
+def free_rsp_nested(ri, struct):
+ _free_type(ri, "", struct)
+
+
+def print_rsp_free(ri):
+ if 'reply' not in ri.op[ri.op_mode]:
+ return
+ _free_type(ri, 'reply', ri.struct['reply'])
+
+
+def print_dump_type_free(ri):
+ sub_type = type_name(ri, 'reply')
+
+ print_free_prototype(ri, 'reply', suffix='')
+ ri.cw.block_start()
+ ri.cw.p(f"{sub_type} *next = rsp;")
+ ri.cw.nl()
+ ri.cw.block_start(line='while ((void *)next != YNL_LIST_END)')
+ _free_type_members_iter(ri, ri.struct['reply'])
+ ri.cw.p('rsp = next;')
+ ri.cw.p('next = rsp->next;')
+ ri.cw.nl()
+
+ _free_type_members(ri, 'rsp', ri.struct['reply'], ref='obj.')
+ ri.cw.p(f'free(rsp);')
+ ri.cw.block_end()
+ ri.cw.block_end()
+ ri.cw.nl()
+
+
+def print_ntf_type_free(ri):
+ print_free_prototype(ri, 'reply', suffix='')
+ ri.cw.block_start()
+ _free_type_members_iter(ri, ri.struct['reply'])
+ _free_type_members(ri, 'rsp', ri.struct['reply'], ref='obj.')
+ ri.cw.p(f'free(rsp);')
+ ri.cw.block_end()
+ ri.cw.nl()
+
+
+def print_req_policy_fwd(cw, struct, ri=None, terminate=True):
+ if terminate and ri and policy_should_be_static(struct.family):
+ return
+
+ if terminate:
+ prefix = 'extern '
+ else:
+ if ri and policy_should_be_static(struct.family):
+ prefix = 'static '
+ else:
+ prefix = ''
+
+ suffix = ';' if terminate else ' = {'
+
+ max_attr = struct.attr_max_val
+ if ri:
+ name = ri.op.render_name
+ if ri.op.dual_policy:
+ name += '_' + ri.op_mode
+ else:
+ name = struct.render_name
+ cw.p(f"{prefix}const struct nla_policy {name}_nl_policy[{max_attr.enum_name} + 1]{suffix}")
+
+
+def print_req_policy(cw, struct, ri=None):
+ if ri and ri.op:
+ cw.ifdef_block(ri.op.get('config-cond', None))
+ print_req_policy_fwd(cw, struct, ri=ri, terminate=False)
+ for _, arg in struct.member_list():
+ arg.attr_policy(cw)
+ cw.p("};")
+ cw.ifdef_block(None)
+ cw.nl()
+
+
+def kernel_can_gen_family_struct(family):
+ return family.proto == 'genetlink'
+
+
+def policy_should_be_static(family):
+ return family.kernel_policy == 'split' or kernel_can_gen_family_struct(family)
+
+
+def print_kernel_policy_ranges(family, cw):
+ first = True
+ for _, attr_set in family.attr_sets.items():
+ if attr_set.subset_of:
+ continue
+
+ for _, attr in attr_set.items():
+ if not attr.request:
+ continue
+ if 'full-range' not in attr.checks:
+ continue
+
+ if first:
+ cw.p('/* Integer value ranges */')
+ first = False
+
+ sign = '' if attr.type[0] == 'u' else '_signed'
+ suffix = 'ULL' if attr.type[0] == 'u' else 'LL'
+ cw.block_start(line=f'static const struct netlink_range_validation{sign} {c_lower(attr.enum_name)}_range =')
+ members = []
+ if 'min' in attr.checks:
+ members.append(('min', attr.get_limit_str('min', suffix=suffix)))
+ if 'max' in attr.checks:
+ members.append(('max', attr.get_limit_str('max', suffix=suffix)))
+ cw.write_struct_init(members)
+ cw.block_end(line=';')
+ cw.nl()
+
+
+def print_kernel_op_table_fwd(family, cw, terminate):
+ exported = not kernel_can_gen_family_struct(family)
+
+ if not terminate or exported:
+ cw.p(f"/* Ops table for {family.ident_name} */")
+
+ pol_to_struct = {'global': 'genl_small_ops',
+ 'per-op': 'genl_ops',
+ 'split': 'genl_split_ops'}
+ struct_type = pol_to_struct[family.kernel_policy]
+
+ if not exported:
+ cnt = ""
+ elif family.kernel_policy == 'split':
+ cnt = 0
+ for op in family.ops.values():
+ if 'do' in op:
+ cnt += 1
+ if 'dump' in op:
+ cnt += 1
+ else:
+ cnt = len(family.ops)
+
+ qual = 'static const' if not exported else 'const'
+ line = f"{qual} struct {struct_type} {family.c_name}_nl_ops[{cnt}]"
+ if terminate:
+ cw.p(f"extern {line};")
+ else:
+ cw.block_start(line=line + ' =')
+
+ if not terminate:
+ return
+
+ cw.nl()
+ for name in family.hooks['pre']['do']['list']:
+ cw.write_func_prot('int', c_lower(name),
+ ['const struct genl_split_ops *ops',
+ 'struct sk_buff *skb', 'struct genl_info *info'], suffix=';')
+ for name in family.hooks['post']['do']['list']:
+ cw.write_func_prot('void', c_lower(name),
+ ['const struct genl_split_ops *ops',
+ 'struct sk_buff *skb', 'struct genl_info *info'], suffix=';')
+ for name in family.hooks['pre']['dump']['list']:
+ cw.write_func_prot('int', c_lower(name),
+ ['struct netlink_callback *cb'], suffix=';')
+ for name in family.hooks['post']['dump']['list']:
+ cw.write_func_prot('int', c_lower(name),
+ ['struct netlink_callback *cb'], suffix=';')
+
+ cw.nl()
+
+ for op_name, op in family.ops.items():
+ if op.is_async:
+ continue
+
+ if 'do' in op:
+ name = c_lower(f"{family.ident_name}-nl-{op_name}-doit")
+ cw.write_func_prot('int', name,
+ ['struct sk_buff *skb', 'struct genl_info *info'], suffix=';')
+
+ if 'dump' in op:
+ name = c_lower(f"{family.ident_name}-nl-{op_name}-dumpit")
+ cw.write_func_prot('int', name,
+ ['struct sk_buff *skb', 'struct netlink_callback *cb'], suffix=';')
+ cw.nl()
+
+
+def print_kernel_op_table_hdr(family, cw):
+ print_kernel_op_table_fwd(family, cw, terminate=True)
+
+
+def print_kernel_op_table(family, cw):
+ print_kernel_op_table_fwd(family, cw, terminate=False)
+ if family.kernel_policy == 'global' or family.kernel_policy == 'per-op':
+ for op_name, op in family.ops.items():
+ if op.is_async:
+ continue
+
+ cw.ifdef_block(op.get('config-cond', None))
+ cw.block_start()
+ members = [('cmd', op.enum_name)]
+ if 'dont-validate' in op:
+ members.append(('validate',
+ ' | '.join([c_upper('genl-dont-validate-' + x)
+ for x in op['dont-validate']])), )
+ for op_mode in ['do', 'dump']:
+ if op_mode in op:
+ name = c_lower(f"{family.ident_name}-nl-{op_name}-{op_mode}it")
+ members.append((op_mode + 'it', name))
+ if family.kernel_policy == 'per-op':
+ struct = Struct(family, op['attribute-set'],
+ type_list=op['do']['request']['attributes'])
+
+ name = c_lower(f"{family.ident_name}-{op_name}-nl-policy")
+ members.append(('policy', name))
+ members.append(('maxattr', struct.attr_max_val.enum_name))
+ if 'flags' in op:
+ members.append(('flags', ' | '.join([c_upper('genl-' + x) for x in op['flags']])))
+ cw.write_struct_init(members)
+ cw.block_end(line=',')
+ elif family.kernel_policy == 'split':
+ cb_names = {'do': {'pre': 'pre_doit', 'post': 'post_doit'},
+ 'dump': {'pre': 'start', 'post': 'done'}}
+
+ for op_name, op in family.ops.items():
+ for op_mode in ['do', 'dump']:
+ if op.is_async or op_mode not in op:
+ continue
+
+ cw.ifdef_block(op.get('config-cond', None))
+ cw.block_start()
+ members = [('cmd', op.enum_name)]
+ if 'dont-validate' in op:
+ dont_validate = []
+ for x in op['dont-validate']:
+ if op_mode == 'do' and x in ['dump', 'dump-strict']:
+ continue
+ if op_mode == "dump" and x == 'strict':
+ continue
+ dont_validate.append(x)
+
+ if dont_validate:
+ members.append(('validate',
+ ' | '.join([c_upper('genl-dont-validate-' + x)
+ for x in dont_validate])), )
+ name = c_lower(f"{family.ident_name}-nl-{op_name}-{op_mode}it")
+ if 'pre' in op[op_mode]:
+ members.append((cb_names[op_mode]['pre'], c_lower(op[op_mode]['pre'])))
+ members.append((op_mode + 'it', name))
+ if 'post' in op[op_mode]:
+ members.append((cb_names[op_mode]['post'], c_lower(op[op_mode]['post'])))
+ if 'request' in op[op_mode]:
+ struct = Struct(family, op['attribute-set'],
+ type_list=op[op_mode]['request']['attributes'])
+
+ if op.dual_policy:
+ name = c_lower(f"{family.ident_name}-{op_name}-{op_mode}-nl-policy")
+ else:
+ name = c_lower(f"{family.ident_name}-{op_name}-nl-policy")
+ members.append(('policy', name))
+ members.append(('maxattr', struct.attr_max_val.enum_name))
+ flags = (op['flags'] if 'flags' in op else []) + ['cmd-cap-' + op_mode]
+ members.append(('flags', ' | '.join([c_upper('genl-' + x) for x in flags])))
+ cw.write_struct_init(members)
+ cw.block_end(line=',')
+ cw.ifdef_block(None)
+
+ cw.block_end(line=';')
+ cw.nl()
+
+
+def print_kernel_mcgrp_hdr(family, cw):
+ if not family.mcgrps['list']:
+ return
+
+ cw.block_start('enum')
+ for grp in family.mcgrps['list']:
+ grp_id = c_upper(f"{family.ident_name}-nlgrp-{grp['name']},")
+ cw.p(grp_id)
+ cw.block_end(';')
+ cw.nl()
+
+
+def print_kernel_mcgrp_src(family, cw):
+ if not family.mcgrps['list']:
+ return
+
+ cw.block_start('static const struct genl_multicast_group ' + family.c_name + '_nl_mcgrps[] =')
+ for grp in family.mcgrps['list']:
+ name = grp['name']
+ grp_id = c_upper(f"{family.ident_name}-nlgrp-{name}")
+ cw.p('[' + grp_id + '] = { "' + name + '", },')
+ cw.block_end(';')
+ cw.nl()
+
+
+def print_kernel_family_struct_hdr(family, cw):
+ if not kernel_can_gen_family_struct(family):
+ return
+
+ cw.p(f"extern struct genl_family {family.c_name}_nl_family;")
+ cw.nl()
+ if 'sock-priv' in family.kernel_family:
+ cw.p(f'void {family.c_name}_nl_sock_priv_init({family.kernel_family["sock-priv"]} *priv);')
+ cw.p(f'void {family.c_name}_nl_sock_priv_destroy({family.kernel_family["sock-priv"]} *priv);')
+ cw.nl()
+
+
+def print_kernel_family_struct_src(family, cw):
+ if not kernel_can_gen_family_struct(family):
+ return
+
+ cw.block_start(f"struct genl_family {family.ident_name}_nl_family __ro_after_init =")
+ cw.p('.name\t\t= ' + family.fam_key + ',')
+ cw.p('.version\t= ' + family.ver_key + ',')
+ cw.p('.netnsok\t= true,')
+ cw.p('.parallel_ops\t= true,')
+ cw.p('.module\t\t= THIS_MODULE,')
+ if family.kernel_policy == 'per-op':
+ cw.p(f'.ops\t\t= {family.c_name}_nl_ops,')
+ cw.p(f'.n_ops\t\t= ARRAY_SIZE({family.c_name}_nl_ops),')
+ elif family.kernel_policy == 'split':
+ cw.p(f'.split_ops\t= {family.c_name}_nl_ops,')
+ cw.p(f'.n_split_ops\t= ARRAY_SIZE({family.c_name}_nl_ops),')
+ if family.mcgrps['list']:
+ cw.p(f'.mcgrps\t\t= {family.c_name}_nl_mcgrps,')
+ cw.p(f'.n_mcgrps\t= ARRAY_SIZE({family.c_name}_nl_mcgrps),')
+ if 'sock-priv' in family.kernel_family:
+ cw.p(f'.sock_priv_size\t= sizeof({family.kernel_family["sock-priv"]}),')
+ # Force cast here, actual helpers take pointer to the real type.
+ cw.p(f'.sock_priv_init\t= (void *){family.c_name}_nl_sock_priv_init,')
+ cw.p(f'.sock_priv_destroy = (void *){family.c_name}_nl_sock_priv_destroy,')
+ cw.block_end(';')
+
+
+def uapi_enum_start(family, cw, obj, ckey='', enum_name='enum-name'):
+ start_line = 'enum'
+ if enum_name in obj:
+ if obj[enum_name]:
+ start_line = 'enum ' + c_lower(obj[enum_name])
+ elif ckey and ckey in obj:
+ start_line = 'enum ' + family.c_name + '_' + c_lower(obj[ckey])
+ cw.block_start(line=start_line)
+
+
+def render_uapi_unified(family, cw, max_by_define, separate_ntf):
+ max_name = c_upper(family.get('cmd-max-name', f"{family.op_prefix}MAX"))
+ cnt_name = c_upper(family.get('cmd-cnt-name', f"__{family.op_prefix}MAX"))
+ max_value = f"({cnt_name} - 1)"
+
+ uapi_enum_start(family, cw, family['operations'], 'enum-name')
+ val = 0
+ for op in family.msgs.values():
+ if separate_ntf and ('notify' in op or 'event' in op):
+ continue
+
+ suffix = ','
+ if op.value != val:
+ suffix = f" = {op.value},"
+ val = op.value
+ cw.p(op.enum_name + suffix)
+ val += 1
+ cw.nl()
+ cw.p(cnt_name + ('' if max_by_define else ','))
+ if not max_by_define:
+ cw.p(f"{max_name} = {max_value}")
+ cw.block_end(line=';')
+ if max_by_define:
+ cw.p(f"#define {max_name} {max_value}")
+ cw.nl()
+
+
+def render_uapi_directional(family, cw, max_by_define):
+ max_name = f"{family.op_prefix}USER_MAX"
+ cnt_name = f"__{family.op_prefix}USER_CNT"
+ max_value = f"({cnt_name} - 1)"
+
+ cw.block_start(line='enum')
+ cw.p(c_upper(f'{family.name}_MSG_USER_NONE = 0,'))
+ val = 0
+ for op in family.msgs.values():
+ if 'do' in op and 'event' not in op:
+ suffix = ','
+ if op.value and op.value != val:
+ suffix = f" = {op.value},"
+ val = op.value
+ cw.p(op.enum_name + suffix)
+ val += 1
+ cw.nl()
+ cw.p(cnt_name + ('' if max_by_define else ','))
+ if not max_by_define:
+ cw.p(f"{max_name} = {max_value}")
+ cw.block_end(line=';')
+ if max_by_define:
+ cw.p(f"#define {max_name} {max_value}")
+ cw.nl()
+
+ max_name = f"{family.op_prefix}KERNEL_MAX"
+ cnt_name = f"__{family.op_prefix}KERNEL_CNT"
+ max_value = f"({cnt_name} - 1)"
+
+ cw.block_start(line='enum')
+ cw.p(c_upper(f'{family.name}_MSG_KERNEL_NONE = 0,'))
+ val = 0
+ for op in family.msgs.values():
+ if ('do' in op and 'reply' in op['do']) or 'notify' in op or 'event' in op:
+ enum_name = op.enum_name
+ if 'event' not in op and 'notify' not in op:
+ enum_name = f'{enum_name}_REPLY'
+
+ suffix = ','
+ if op.value and op.value != val:
+ suffix = f" = {op.value},"
+ val = op.value
+ cw.p(enum_name + suffix)
+ val += 1
+ cw.nl()
+ cw.p(cnt_name + ('' if max_by_define else ','))
+ if not max_by_define:
+ cw.p(f"{max_name} = {max_value}")
+ cw.block_end(line=';')
+ if max_by_define:
+ cw.p(f"#define {max_name} {max_value}")
+ cw.nl()
+
+
+def render_uapi(family, cw):
+ hdr_prot = f"_UAPI_LINUX_{c_upper(family.uapi_header_name)}_H"
+ hdr_prot = hdr_prot.replace('/', '_')
+ cw.p('#ifndef ' + hdr_prot)
+ cw.p('#define ' + hdr_prot)
+ cw.nl()
+
+ defines = [(family.fam_key, family["name"]),
+ (family.ver_key, family.get('version', 1))]
+ cw.writes_defines(defines)
+ cw.nl()
+
+ defines = []
+ for const in family['definitions']:
+ if const['type'] != 'const':
+ cw.writes_defines(defines)
+ defines = []
+ cw.nl()
+
+ # Write kdoc for enum and flags (one day maybe also structs)
+ if const['type'] == 'enum' or const['type'] == 'flags':
+ enum = family.consts[const['name']]
+
+ if enum.header:
+ continue
+
+ if enum.has_doc():
+ if enum.has_entry_doc():
+ cw.p('/**')
+ doc = ''
+ if 'doc' in enum:
+ doc = ' - ' + enum['doc']
+ cw.write_doc_line(enum.enum_name + doc)
+ else:
+ cw.p('/*')
+ cw.write_doc_line(enum['doc'], indent=False)
+ for entry in enum.entries.values():
+ if entry.has_doc():
+ doc = '@' + entry.c_name + ': ' + entry['doc']
+ cw.write_doc_line(doc)
+ cw.p(' */')
+
+ uapi_enum_start(family, cw, const, 'name')
+ name_pfx = const.get('name-prefix', f"{family.ident_name}-{const['name']}-")
+ for entry in enum.entries.values():
+ suffix = ','
+ if entry.value_change:
+ suffix = f" = {entry.user_value()}" + suffix
+ cw.p(entry.c_name + suffix)
+
+ if const.get('render-max', False):
+ cw.nl()
+ cw.p('/* private: */')
+ if const['type'] == 'flags':
+ max_name = c_upper(name_pfx + 'mask')
+ max_val = f' = {enum.get_mask()},'
+ cw.p(max_name + max_val)
+ else:
+ cnt_name = enum.enum_cnt_name
+ max_name = c_upper(name_pfx + 'max')
+ if not cnt_name:
+ cnt_name = '__' + name_pfx + 'max'
+ cw.p(c_upper(cnt_name) + ',')
+ cw.p(max_name + ' = (' + c_upper(cnt_name) + ' - 1)')
+ cw.block_end(line=';')
+ cw.nl()
+ elif const['type'] == 'const':
+ defines.append([c_upper(family.get('c-define-name',
+ f"{family.ident_name}-{const['name']}")),
+ const['value']])
+
+ if defines:
+ cw.writes_defines(defines)
+ cw.nl()
+
+ max_by_define = family.get('max-by-define', False)
+
+ for _, attr_set in family.attr_sets.items():
+ if attr_set.subset_of:
+ continue
+
+ max_value = f"({attr_set.cnt_name} - 1)"
+
+ val = 0
+ uapi_enum_start(family, cw, attr_set.yaml, 'enum-name')
+ for _, attr in attr_set.items():
+ suffix = ','
+ if attr.value != val:
+ suffix = f" = {attr.value},"
+ val = attr.value
+ val += 1
+ cw.p(attr.enum_name + suffix)
+ if attr_set.items():
+ cw.nl()
+ cw.p(attr_set.cnt_name + ('' if max_by_define else ','))
+ if not max_by_define:
+ cw.p(f"{attr_set.max_name} = {max_value}")
+ cw.block_end(line=';')
+ if max_by_define:
+ cw.p(f"#define {attr_set.max_name} {max_value}")
+ cw.nl()
+
+ # Commands
+ separate_ntf = 'async-prefix' in family['operations']
+
+ if family.msg_id_model == 'unified':
+ render_uapi_unified(family, cw, max_by_define, separate_ntf)
+ elif family.msg_id_model == 'directional':
+ render_uapi_directional(family, cw, max_by_define)
+ else:
+ raise Exception(f'Unsupported message enum-model {family.msg_id_model}')
+
+ if separate_ntf:
+ uapi_enum_start(family, cw, family['operations'], enum_name='async-enum')
+ for op in family.msgs.values():
+ if separate_ntf and not ('notify' in op or 'event' in op):
+ continue
+
+ suffix = ','
+ if 'value' in op:
+ suffix = f" = {op['value']},"
+ cw.p(op.enum_name + suffix)
+ cw.block_end(line=';')
+ cw.nl()
+
+ # Multicast
+ defines = []
+ for grp in family.mcgrps['list']:
+ name = grp['name']
+ defines.append([c_upper(grp.get('c-define-name', f"{family.ident_name}-mcgrp-{name}")),
+ f'{name}'])
+ cw.nl()
+ if defines:
+ cw.writes_defines(defines)
+ cw.nl()
+
+ cw.p(f'#endif /* {hdr_prot} */')
+
+
+def _render_user_ntf_entry(ri, op):
+ ri.cw.block_start(line=f"[{op.enum_name}] = ")
+ ri.cw.p(f".alloc_sz\t= sizeof({type_name(ri, 'event')}),")
+ ri.cw.p(f".cb\t\t= {op_prefix(ri, 'reply', deref=True)}_parse,")
+ ri.cw.p(f".policy\t\t= &{ri.struct['reply'].render_name}_nest,")
+ ri.cw.p(f".free\t\t= (void *){op_prefix(ri, 'notify')}_free,")
+ ri.cw.block_end(line=',')
+
+
+def render_user_family(family, cw, prototype):
+ symbol = f'const struct ynl_family ynl_{family.c_name}_family'
+ if prototype:
+ cw.p(f'extern {symbol};')
+ return
+
+ if family.ntfs:
+ cw.block_start(line=f"static const struct ynl_ntf_info {family['name']}_ntf_info[] = ")
+ for ntf_op_name, ntf_op in family.ntfs.items():
+ if 'notify' in ntf_op:
+ op = family.ops[ntf_op['notify']]
+ ri = RenderInfo(cw, family, "user", op, "notify")
+ elif 'event' in ntf_op:
+ ri = RenderInfo(cw, family, "user", ntf_op, "event")
+ else:
+ raise Exception('Invalid notification ' + ntf_op_name)
+ _render_user_ntf_entry(ri, ntf_op)
+ for op_name, op in family.ops.items():
+ if 'event' not in op:
+ continue
+ ri = RenderInfo(cw, family, "user", op, "event")
+ _render_user_ntf_entry(ri, op)
+ cw.block_end(line=";")
+ cw.nl()
+
+ cw.block_start(f'{symbol} = ')
+ cw.p(f'.name\t\t= "{family.c_name}",')
+ if family.fixed_header:
+ cw.p(f'.hdr_len\t= sizeof(struct genlmsghdr) + sizeof(struct {c_lower(family.fixed_header)}),')
+ else:
+ cw.p('.hdr_len\t= sizeof(struct genlmsghdr),')
+ if family.ntfs:
+ cw.p(f".ntf_info\t= {family['name']}_ntf_info,")
+ cw.p(f".ntf_info_size\t= YNL_ARRAY_SIZE({family['name']}_ntf_info),")
+ cw.block_end(line=';')
+
+
+def family_contains_bitfield32(family):
+ for _, attr_set in family.attr_sets.items():
+ if attr_set.subset_of:
+ continue
+ for _, attr in attr_set.items():
+ if attr.type == "bitfield32":
+ return True
+ return False
+
+
+def find_kernel_root(full_path):
+ sub_path = ''
+ while True:
+ sub_path = os.path.join(os.path.basename(full_path), sub_path)
+ full_path = os.path.dirname(full_path)
+ maintainers = os.path.join(full_path, "MAINTAINERS")
+ if os.path.exists(maintainers):
+ return full_path, sub_path[:-1]
+
+
+def main():
+ parser = argparse.ArgumentParser(description='Netlink simple parsing generator')
+ parser.add_argument('--mode', dest='mode', type=str, required=True,
+ choices=('user', 'kernel', 'uapi'))
+ parser.add_argument('--spec', dest='spec', type=str, required=True)
+ parser.add_argument('--header', dest='header', action='store_true', default=None)
+ parser.add_argument('--source', dest='header', action='store_false')
+ parser.add_argument('--user-header', nargs='+', default=[])
+ parser.add_argument('--cmp-out', action='store_true', default=None,
+ help='Do not overwrite the output file if the new output is identical to the old')
+ parser.add_argument('--exclude-op', action='append', default=[])
+ parser.add_argument('-o', dest='out_file', type=str, default=None)
+ args = parser.parse_args()
+
+ if args.header is None:
+ parser.error("--header or --source is required")
+
+ exclude_ops = [re.compile(expr) for expr in args.exclude_op]
+
+ try:
+ parsed = Family(args.spec, exclude_ops)
+ if parsed.license != '((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)':
+ print('Spec license:', parsed.license)
+ print('License must be: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)')
+ os.sys.exit(1)
+ except yaml.YAMLError as exc:
+ print(exc)
+ os.sys.exit(1)
+ return
+
+ cw = CodeWriter(BaseNlLib(), args.out_file, overwrite=(not args.cmp_out))
+
+ _, spec_kernel = find_kernel_root(args.spec)
+ if args.mode == 'uapi' or args.header:
+ cw.p(f'/* SPDX-License-Identifier: {parsed.license} */')
+ else:
+ cw.p(f'// SPDX-License-Identifier: {parsed.license}')
+ cw.p("/* Do not edit directly, auto-generated from: */")
+ cw.p(f"/*\t{spec_kernel} */")
+ cw.p(f"/* YNL-GEN {args.mode} {'header' if args.header else 'source'} */")
+ if args.exclude_op or args.user_header:
+ line = ''
+ line += ' --user-header '.join([''] + args.user_header)
+ line += ' --exclude-op '.join([''] + args.exclude_op)
+ cw.p(f'/* YNL-ARG{line} */')
+ cw.nl()
+
+ if args.mode == 'uapi':
+ render_uapi(parsed, cw)
+ return
+
+ hdr_prot = f"_LINUX_{parsed.c_name.upper()}_GEN_H"
+ if args.header:
+ cw.p('#ifndef ' + hdr_prot)
+ cw.p('#define ' + hdr_prot)
+ cw.nl()
+
+ if args.out_file:
+ hdr_file = os.path.basename(args.out_file[:-2]) + ".h"
+ else:
+ hdr_file = "generated_header_file.h"
+
+ if args.mode == 'kernel':
+ cw.p('#include <net/netlink.h>')
+ cw.p('#include <net/genetlink.h>')
+ cw.nl()
+ if not args.header:
+ if args.out_file:
+ cw.p(f'#include "{hdr_file}"')
+ cw.nl()
+ headers = ['uapi/' + parsed.uapi_header]
+ headers += parsed.kernel_family.get('headers', [])
+ else:
+ cw.p('#include <stdlib.h>')
+ cw.p('#include <string.h>')
+ if args.header:
+ cw.p('#include <linux/types.h>')
+ if family_contains_bitfield32(parsed):
+ cw.p('#include <linux/netlink.h>')
+ else:
+ cw.p(f'#include "{hdr_file}"')
+ cw.p('#include "ynl.h"')
+ headers = []
+ for definition in parsed['definitions']:
+ if 'header' in definition:
+ headers.append(definition['header'])
+ if args.mode == 'user':
+ headers.append(parsed.uapi_header)
+ seen_header = []
+ for one in headers:
+ if one not in seen_header:
+ cw.p(f"#include <{one}>")
+ seen_header.append(one)
+ cw.nl()
+
+ if args.mode == "user":
+ if not args.header:
+ cw.p("#include <linux/genetlink.h>")
+ cw.nl()
+ for one in args.user_header:
+ cw.p(f'#include "{one}"')
+ else:
+ cw.p('struct ynl_sock;')
+ cw.nl()
+ render_user_family(parsed, cw, True)
+ cw.nl()
+
+ if args.mode == "kernel":
+ if args.header:
+ for _, struct in sorted(parsed.pure_nested_structs.items()):
+ if struct.request:
+ cw.p('/* Common nested types */')
+ break
+ for attr_set, struct in sorted(parsed.pure_nested_structs.items()):
+ if struct.request:
+ print_req_policy_fwd(cw, struct)
+ cw.nl()
+
+ if parsed.kernel_policy == 'global':
+ cw.p(f"/* Global operation policy for {parsed.name} */")
+
+ struct = Struct(parsed, parsed.global_policy_set, type_list=parsed.global_policy)
+ print_req_policy_fwd(cw, struct)
+ cw.nl()
+
+ if parsed.kernel_policy in {'per-op', 'split'}:
+ for op_name, op in parsed.ops.items():
+ if 'do' in op and 'event' not in op:
+ ri = RenderInfo(cw, parsed, args.mode, op, "do")
+ print_req_policy_fwd(cw, ri.struct['request'], ri=ri)
+ cw.nl()
+
+ print_kernel_op_table_hdr(parsed, cw)
+ print_kernel_mcgrp_hdr(parsed, cw)
+ print_kernel_family_struct_hdr(parsed, cw)
+ else:
+ print_kernel_policy_ranges(parsed, cw)
+
+ for _, struct in sorted(parsed.pure_nested_structs.items()):
+ if struct.request:
+ cw.p('/* Common nested types */')
+ break
+ for attr_set, struct in sorted(parsed.pure_nested_structs.items()):
+ if struct.request:
+ print_req_policy(cw, struct)
+ cw.nl()
+
+ if parsed.kernel_policy == 'global':
+ cw.p(f"/* Global operation policy for {parsed.name} */")
+
+ struct = Struct(parsed, parsed.global_policy_set, type_list=parsed.global_policy)
+ print_req_policy(cw, struct)
+ cw.nl()
+
+ for op_name, op in parsed.ops.items():
+ if parsed.kernel_policy in {'per-op', 'split'}:
+ for op_mode in ['do', 'dump']:
+ if op_mode in op and 'request' in op[op_mode]:
+ cw.p(f"/* {op.enum_name} - {op_mode} */")
+ ri = RenderInfo(cw, parsed, args.mode, op, op_mode)
+ print_req_policy(cw, ri.struct['request'], ri=ri)
+ cw.nl()
+
+ print_kernel_op_table(parsed, cw)
+ print_kernel_mcgrp_src(parsed, cw)
+ print_kernel_family_struct_src(parsed, cw)
+
+ if args.mode == "user":
+ if args.header:
+ cw.p('/* Enums */')
+ put_op_name_fwd(parsed, cw)
+
+ for name, const in parsed.consts.items():
+ if isinstance(const, EnumSet):
+ put_enum_to_str_fwd(parsed, cw, const)
+ cw.nl()
+
+ cw.p('/* Common nested types */')
+ for attr_set, struct in parsed.pure_nested_structs.items():
+ ri = RenderInfo(cw, parsed, args.mode, "", "", attr_set)
+ print_type_full(ri, struct)
+
+ for op_name, op in parsed.ops.items():
+ cw.p(f"/* ============== {op.enum_name} ============== */")
+
+ if 'do' in op and 'event' not in op:
+ cw.p(f"/* {op.enum_name} - do */")
+ ri = RenderInfo(cw, parsed, args.mode, op, "do")
+ print_req_type(ri)
+ print_req_type_helpers(ri)
+ cw.nl()
+ print_rsp_type(ri)
+ print_rsp_type_helpers(ri)
+ cw.nl()
+ print_req_prototype(ri)
+ cw.nl()
+
+ if 'dump' in op:
+ cw.p(f"/* {op.enum_name} - dump */")
+ ri = RenderInfo(cw, parsed, args.mode, op, 'dump')
+ print_req_type(ri)
+ print_req_type_helpers(ri)
+ if not ri.type_consistent:
+ print_rsp_type(ri)
+ print_wrapped_type(ri)
+ print_dump_prototype(ri)
+ cw.nl()
+
+ if op.has_ntf:
+ cw.p(f"/* {op.enum_name} - notify */")
+ ri = RenderInfo(cw, parsed, args.mode, op, 'notify')
+ if not ri.type_consistent:
+ raise Exception(f'Only notifications with consistent types supported ({op.name})')
+ print_wrapped_type(ri)
+
+ for op_name, op in parsed.ntfs.items():
+ if 'event' in op:
+ ri = RenderInfo(cw, parsed, args.mode, op, 'event')
+ cw.p(f"/* {op.enum_name} - event */")
+ print_rsp_type(ri)
+ cw.nl()
+ print_wrapped_type(ri)
+ cw.nl()
+ else:
+ cw.p('/* Enums */')
+ put_op_name(parsed, cw)
+
+ for name, const in parsed.consts.items():
+ if isinstance(const, EnumSet):
+ put_enum_to_str(parsed, cw, const)
+ cw.nl()
+
+ has_recursive_nests = False
+ cw.p('/* Policies */')
+ for struct in parsed.pure_nested_structs.values():
+ if struct.recursive:
+ put_typol_fwd(cw, struct)
+ has_recursive_nests = True
+ if has_recursive_nests:
+ cw.nl()
+ for name in parsed.pure_nested_structs:
+ struct = Struct(parsed, name)
+ put_typol(cw, struct)
+ for name in parsed.root_sets:
+ struct = Struct(parsed, name)
+ put_typol(cw, struct)
+
+ cw.p('/* Common nested types */')
+ if has_recursive_nests:
+ for attr_set, struct in parsed.pure_nested_structs.items():
+ ri = RenderInfo(cw, parsed, args.mode, "", "", attr_set)
+ free_rsp_nested_prototype(ri)
+ if struct.request:
+ put_req_nested_prototype(ri, struct)
+ if struct.reply:
+ parse_rsp_nested_prototype(ri, struct)
+ cw.nl()
+ for attr_set, struct in parsed.pure_nested_structs.items():
+ ri = RenderInfo(cw, parsed, args.mode, "", "", attr_set)
+
+ free_rsp_nested(ri, struct)
+ if struct.request:
+ put_req_nested(ri, struct)
+ if struct.reply:
+ parse_rsp_nested(ri, struct)
+
+ for op_name, op in parsed.ops.items():
+ cw.p(f"/* ============== {op.enum_name} ============== */")
+ if 'do' in op and 'event' not in op:
+ cw.p(f"/* {op.enum_name} - do */")
+ ri = RenderInfo(cw, parsed, args.mode, op, "do")
+ print_req_free(ri)
+ print_rsp_free(ri)
+ parse_rsp_msg(ri)
+ print_req(ri)
+ cw.nl()
+
+ if 'dump' in op:
+ cw.p(f"/* {op.enum_name} - dump */")
+ ri = RenderInfo(cw, parsed, args.mode, op, "dump")
+ if not ri.type_consistent:
+ parse_rsp_msg(ri, deref=True)
+ print_req_free(ri)
+ print_dump_type_free(ri)
+ print_dump(ri)
+ cw.nl()
+
+ if op.has_ntf:
+ cw.p(f"/* {op.enum_name} - notify */")
+ ri = RenderInfo(cw, parsed, args.mode, op, 'notify')
+ if not ri.type_consistent:
+ raise Exception(f'Only notifications with consistent types supported ({op.name})')
+ print_ntf_type_free(ri)
+
+ for op_name, op in parsed.ntfs.items():
+ if 'event' in op:
+ cw.p(f"/* {op.enum_name} - event */")
+
+ ri = RenderInfo(cw, parsed, args.mode, op, "do")
+ parse_rsp_msg(ri)
+
+ ri = RenderInfo(cw, parsed, args.mode, op, "event")
+ print_ntf_type_free(ri)
+ cw.nl()
+ render_user_family(parsed, cw, False)
+
+ if args.header:
+ cw.p(f'#endif /* {hdr_prot} */')
+
+
+if __name__ == "__main__":
+ main()
--- /dev/null
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+# -*- coding: utf-8; mode: python -*-
+
+"""
+ Script to auto generate the documentation for Netlink specifications.
+
+ :copyright: Copyright (C) 2023 Breno Leitao <leitao@debian.org>
+ :license: GPL Version 2, June 1991 see linux/COPYING for details.
+
+ This script performs extensive parsing to the Linux kernel's netlink YAML
+ spec files, in an effort to avoid needing to heavily mark up the original
+ YAML file.
+
+ This code is split in three big parts:
+ 1) RST formatters: Use to convert a string to a RST output
+ 2) Parser helpers: Functions to parse the YAML data structure
+ 3) Main function and small helpers
+"""
+
+from typing import Any, Dict, List
+import os.path
+import sys
+import argparse
+import logging
+import yaml
+
+
+SPACE_PER_LEVEL = 4
+
+
+# RST Formatters
+# ==============
+def headroom(level: int) -> str:
+ """Return space to format"""
+ return " " * (level * SPACE_PER_LEVEL)
+
+
+def bold(text: str) -> str:
+ """Format bold text"""
+ return f"**{text}**"
+
+
+def inline(text: str) -> str:
+ """Format inline text"""
+ return f"``{text}``"
+
+
+def sanitize(text: str) -> str:
+ """Remove newlines and multiple spaces"""
+ # This is useful for some fields that are spread across multiple lines
+ return str(text).replace("\n", " ").strip()
+
+
+def rst_fields(key: str, value: str, level: int = 0) -> str:
+ """Return a RST formatted field"""
+ return headroom(level) + f":{key}: {value}"
+
+
+def rst_definition(key: str, value: Any, level: int = 0) -> str:
+ """Format a single rst definition"""
+ return headroom(level) + key + "\n" + headroom(level + 1) + str(value)
+
+
+def rst_paragraph(paragraph: str, level: int = 0) -> str:
+ """Return a formatted paragraph"""
+ return headroom(level) + paragraph
+
+
+def rst_bullet(item: str, level: int = 0) -> str:
+ """Return a formatted a bullet"""
+ return headroom(level) + f"- {item}"
+
+
+def rst_subsection(title: str) -> str:
+ """Add a sub-section to the document"""
+ return f"{title}\n" + "-" * len(title)
+
+
+def rst_subsubsection(title: str) -> str:
+ """Add a sub-sub-section to the document"""
+ return f"{title}\n" + "~" * len(title)
+
+
+def rst_section(namespace: str, prefix: str, title: str) -> str:
+ """Add a section to the document"""
+ return f".. _{namespace}-{prefix}-{title}:\n\n{title}\n" + "=" * len(title)
+
+
+def rst_subtitle(title: str) -> str:
+ """Add a subtitle to the document"""
+ return "\n" + "-" * len(title) + f"\n{title}\n" + "-" * len(title) + "\n\n"
+
+
+def rst_title(title: str) -> str:
+ """Add a title to the document"""
+ return "=" * len(title) + f"\n{title}\n" + "=" * len(title) + "\n\n"
+
+
+def rst_list_inline(list_: List[str], level: int = 0) -> str:
+ """Format a list using inlines"""
+ return headroom(level) + "[" + ", ".join(inline(i) for i in list_) + "]"
+
+
+def rst_ref(namespace: str, prefix: str, name: str) -> str:
+ """Add a hyperlink to the document"""
+ mappings = {'enum': 'definition',
+ 'fixed-header': 'definition',
+ 'nested-attributes': 'attribute-set',
+ 'struct': 'definition'}
+ if prefix in mappings:
+ prefix = mappings[prefix]
+ return f":ref:`{namespace}-{prefix}-{name}`"
+
+
+def rst_header() -> str:
+ """The headers for all the auto generated RST files"""
+ lines = []
+
+ lines.append(rst_paragraph(".. SPDX-License-Identifier: GPL-2.0"))
+ lines.append(rst_paragraph(".. NOTE: This document was auto-generated.\n\n"))
+
+ return "\n".join(lines)
+
+
+def rst_toctree(maxdepth: int = 2) -> str:
+ """Generate a toctree RST primitive"""
+ lines = []
+
+ lines.append(".. toctree::")
+ lines.append(f" :maxdepth: {maxdepth}\n\n")
+
+ return "\n".join(lines)
+
+
+def rst_label(title: str) -> str:
+ """Return a formatted label"""
+ return f".. _{title}:\n\n"
+
+
+# Parsers
+# =======
+
+
+def parse_mcast_group(mcast_group: List[Dict[str, Any]]) -> str:
+ """Parse 'multicast' group list and return a formatted string"""
+ lines = []
+ for group in mcast_group:
+ lines.append(rst_bullet(group["name"]))
+
+ return "\n".join(lines)
+
+
+def parse_do(do_dict: Dict[str, Any], level: int = 0) -> str:
+ """Parse 'do' section and return a formatted string"""
+ lines = []
+ for key in do_dict.keys():
+ lines.append(rst_paragraph(bold(key), level + 1))
+ if key in ['request', 'reply']:
+ lines.append(parse_do_attributes(do_dict[key], level + 1) + "\n")
+ else:
+ lines.append(headroom(level + 2) + do_dict[key] + "\n")
+
+ return "\n".join(lines)
+
+
+def parse_do_attributes(attrs: Dict[str, Any], level: int = 0) -> str:
+ """Parse 'attributes' section"""
+ if "attributes" not in attrs:
+ return ""
+ lines = [rst_fields("attributes", rst_list_inline(attrs["attributes"]), level + 1)]
+
+ return "\n".join(lines)
+
+
+def parse_operations(operations: List[Dict[str, Any]], namespace: str) -> str:
+ """Parse operations block"""
+ preprocessed = ["name", "doc", "title", "do", "dump", "flags"]
+ linkable = ["fixed-header", "attribute-set"]
+ lines = []
+
+ for operation in operations:
+ lines.append(rst_section(namespace, 'operation', operation["name"]))
+ lines.append(rst_paragraph(operation["doc"]) + "\n")
+
+ for key in operation.keys():
+ if key in preprocessed:
+ # Skip the special fields
+ continue
+ value = operation[key]
+ if key in linkable:
+ value = rst_ref(namespace, key, value)
+ lines.append(rst_fields(key, value, 0))
+ if 'flags' in operation:
+ lines.append(rst_fields('flags', rst_list_inline(operation['flags'])))
+
+ if "do" in operation:
+ lines.append(rst_paragraph(":do:", 0))
+ lines.append(parse_do(operation["do"], 0))
+ if "dump" in operation:
+ lines.append(rst_paragraph(":dump:", 0))
+ lines.append(parse_do(operation["dump"], 0))
+
+ # New line after fields
+ lines.append("\n")
+
+ return "\n".join(lines)
+
+
+def parse_entries(entries: List[Dict[str, Any]], level: int) -> str:
+ """Parse a list of entries"""
+ ignored = ["pad"]
+ lines = []
+ for entry in entries:
+ if isinstance(entry, dict):
+ # entries could be a list or a dictionary
+ field_name = entry.get("name", "")
+ if field_name in ignored:
+ continue
+ type_ = entry.get("type")
+ if type_:
+ field_name += f" ({inline(type_)})"
+ lines.append(
+ rst_fields(field_name, sanitize(entry.get("doc", "")), level)
+ )
+ elif isinstance(entry, list):
+ lines.append(rst_list_inline(entry, level))
+ else:
+ lines.append(rst_bullet(inline(sanitize(entry)), level))
+
+ lines.append("\n")
+ return "\n".join(lines)
+
+
+def parse_definitions(defs: Dict[str, Any], namespace: str) -> str:
+ """Parse definitions section"""
+ preprocessed = ["name", "entries", "members"]
+ ignored = ["render-max"] # This is not printed
+ lines = []
+
+ for definition in defs:
+ lines.append(rst_section(namespace, 'definition', definition["name"]))
+ for k in definition.keys():
+ if k in preprocessed + ignored:
+ continue
+ lines.append(rst_fields(k, sanitize(definition[k]), 0))
+
+ # Field list needs to finish with a new line
+ lines.append("\n")
+ if "entries" in definition:
+ lines.append(rst_paragraph(":entries:", 0))
+ lines.append(parse_entries(definition["entries"], 1))
+ if "members" in definition:
+ lines.append(rst_paragraph(":members:", 0))
+ lines.append(parse_entries(definition["members"], 1))
+
+ return "\n".join(lines)
+
+
+def parse_attr_sets(entries: List[Dict[str, Any]], namespace: str) -> str:
+ """Parse attribute from attribute-set"""
+ preprocessed = ["name", "type"]
+ linkable = ["enum", "nested-attributes", "struct", "sub-message"]
+ ignored = ["checks"]
+ lines = []
+
+ for entry in entries:
+ lines.append(rst_section(namespace, 'attribute-set', entry["name"]))
+ for attr in entry["attributes"]:
+ type_ = attr.get("type")
+ attr_line = attr["name"]
+ if type_:
+ # Add the attribute type in the same line
+ attr_line += f" ({inline(type_)})"
+
+ lines.append(rst_subsubsection(attr_line))
+
+ for k in attr.keys():
+ if k in preprocessed + ignored:
+ continue
+ if k in linkable:
+ value = rst_ref(namespace, k, attr[k])
+ else:
+ value = sanitize(attr[k])
+ lines.append(rst_fields(k, value, 0))
+ lines.append("\n")
+
+ return "\n".join(lines)
+
+
+def parse_sub_messages(entries: List[Dict[str, Any]], namespace: str) -> str:
+ """Parse sub-message definitions"""
+ lines = []
+
+ for entry in entries:
+ lines.append(rst_section(namespace, 'sub-message', entry["name"]))
+ for fmt in entry["formats"]:
+ value = fmt["value"]
+
+ lines.append(rst_bullet(bold(value)))
+ for attr in ['fixed-header', 'attribute-set']:
+ if attr in fmt:
+ lines.append(rst_fields(attr,
+ rst_ref(namespace, attr, fmt[attr]),
+ 1))
+ lines.append("\n")
+
+ return "\n".join(lines)
+
+
+def parse_yaml(obj: Dict[str, Any]) -> str:
+ """Format the whole YAML into a RST string"""
+ lines = []
+
+ # Main header
+
+ lines.append(rst_header())
+
+ family = obj['name']
+
+ title = f"Family ``{family}`` netlink specification"
+ lines.append(rst_title(title))
+ lines.append(rst_paragraph(".. contents:: :depth: 3\n"))
+
+ if "doc" in obj:
+ lines.append(rst_subtitle("Summary"))
+ lines.append(rst_paragraph(obj["doc"], 0))
+
+ # Operations
+ if "operations" in obj:
+ lines.append(rst_subtitle("Operations"))
+ lines.append(parse_operations(obj["operations"]["list"], family))
+
+ # Multicast groups
+ if "mcast-groups" in obj:
+ lines.append(rst_subtitle("Multicast groups"))
+ lines.append(parse_mcast_group(obj["mcast-groups"]["list"]))
+
+ # Definitions
+ if "definitions" in obj:
+ lines.append(rst_subtitle("Definitions"))
+ lines.append(parse_definitions(obj["definitions"], family))
+
+ # Attributes set
+ if "attribute-sets" in obj:
+ lines.append(rst_subtitle("Attribute sets"))
+ lines.append(parse_attr_sets(obj["attribute-sets"], family))
+
+ # Sub-messages
+ if "sub-messages" in obj:
+ lines.append(rst_subtitle("Sub-messages"))
+ lines.append(parse_sub_messages(obj["sub-messages"], family))
+
+ return "\n".join(lines)
+
+
+# Main functions
+# ==============
+
+
+def parse_arguments() -> argparse.Namespace:
+ """Parse arguments from user"""
+ parser = argparse.ArgumentParser(description="Netlink RST generator")
+
+ parser.add_argument("-v", "--verbose", action="store_true")
+ parser.add_argument("-o", "--output", help="Output file name")
+
+ # Index and input are mutually exclusive
+ group = parser.add_mutually_exclusive_group()
+ group.add_argument(
+ "-x", "--index", action="store_true", help="Generate the index page"
+ )
+ group.add_argument("-i", "--input", help="YAML file name")
+
+ args = parser.parse_args()
+
+ if args.verbose:
+ logging.basicConfig(level=logging.DEBUG)
+
+ if args.input and not os.path.isfile(args.input):
+ logging.warning("%s is not a valid file.", args.input)
+ sys.exit(-1)
+
+ if not args.output:
+ logging.error("No output file specified.")
+ sys.exit(-1)
+
+ if os.path.isfile(args.output):
+ logging.debug("%s already exists. Overwriting it.", args.output)
+
+ return args
+
+
+def parse_yaml_file(filename: str) -> str:
+ """Transform the YAML specified by filename into a rst-formmated string"""
+ with open(filename, "r", encoding="utf-8") as spec_file:
+ yaml_data = yaml.safe_load(spec_file)
+ content = parse_yaml(yaml_data)
+
+ return content
+
+
+def write_to_rstfile(content: str, filename: str) -> None:
+ """Write the generated content into an RST file"""
+ logging.debug("Saving RST file to %s", filename)
+
+ with open(filename, "w", encoding="utf-8") as rst_file:
+ rst_file.write(content)
+
+
+def generate_main_index_rst(output: str) -> None:
+ """Generate the `networking_spec/index` content and write to the file"""
+ lines = []
+
+ lines.append(rst_header())
+ lines.append(rst_label("specs"))
+ lines.append(rst_title("Netlink Family Specifications"))
+ lines.append(rst_toctree(1))
+
+ index_dir = os.path.dirname(output)
+ logging.debug("Looking for .rst files in %s", index_dir)
+ for filename in sorted(os.listdir(index_dir)):
+ if not filename.endswith(".rst") or filename == "index.rst":
+ continue
+ lines.append(f" {filename.replace('.rst', '')}\n")
+
+ logging.debug("Writing an index file at %s", output)
+ write_to_rstfile("".join(lines), output)
+
+
+def main() -> None:
+ """Main function that reads the YAML files and generates the RST files"""
+
+ args = parse_arguments()
+
+ if args.input:
+ logging.debug("Parsing %s", args.input)
+ try:
+ content = parse_yaml_file(os.path.join(args.input))
+ except Exception as exception:
+ logging.warning("Failed to parse %s.", args.input)
+ logging.warning(exception)
+ sys.exit(-1)
+
+ write_to_rstfile(content, args.output)
+
+ if args.index:
+ # Generate the index RST file
+ generate_main_index_rst(args.output)
+
+
+if __name__ == "__main__":
+ main()
+++ /dev/null
-#!/usr/bin/env python3
-# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
-
-import argparse
-import collections
-import filecmp
-import pathlib
-import os
-import re
-import shutil
-import sys
-import tempfile
-import yaml
-
-sys.path.append(pathlib.Path(__file__).resolve().parent.as_posix())
-from lib import SpecFamily, SpecAttrSet, SpecAttr, SpecOperation, SpecEnumSet, SpecEnumEntry
-
-
-def c_upper(name):
- return name.upper().replace('-', '_')
-
-
-def c_lower(name):
- return name.lower().replace('-', '_')
-
-
-def limit_to_number(name):
- """
- Turn a string limit like u32-max or s64-min into its numerical value
- """
- if name[0] == 'u' and name.endswith('-min'):
- return 0
- width = int(name[1:-4])
- if name[0] == 's':
- width -= 1
- value = (1 << width) - 1
- if name[0] == 's' and name.endswith('-min'):
- value = -value - 1
- return value
-
-
-class BaseNlLib:
- def get_family_id(self):
- return 'ys->family_id'
-
-
-class Type(SpecAttr):
- def __init__(self, family, attr_set, attr, value):
- super().__init__(family, attr_set, attr, value)
-
- self.attr = attr
- self.attr_set = attr_set
- self.type = attr['type']
- self.checks = attr.get('checks', {})
-
- self.request = False
- self.reply = False
-
- if 'len' in attr:
- self.len = attr['len']
-
- if 'nested-attributes' in attr:
- self.nested_attrs = attr['nested-attributes']
- if self.nested_attrs == family.name:
- self.nested_render_name = c_lower(f"{family.ident_name}")
- else:
- self.nested_render_name = c_lower(f"{family.ident_name}_{self.nested_attrs}")
-
- if self.nested_attrs in self.family.consts:
- self.nested_struct_type = 'struct ' + self.nested_render_name + '_'
- else:
- self.nested_struct_type = 'struct ' + self.nested_render_name
-
- self.c_name = c_lower(self.name)
- if self.c_name in _C_KW:
- self.c_name += '_'
-
- # Added by resolve():
- self.enum_name = None
- delattr(self, "enum_name")
-
- def _get_real_attr(self):
- # if the attr is for a subset return the "real" attr (just one down, does not recurse)
- return self.family.attr_sets[self.attr_set.subset_of][self.name]
-
- def set_request(self):
- self.request = True
- if self.attr_set.subset_of:
- self._get_real_attr().set_request()
-
- def set_reply(self):
- self.reply = True
- if self.attr_set.subset_of:
- self._get_real_attr().set_reply()
-
- def get_limit(self, limit, default=None):
- value = self.checks.get(limit, default)
- if value is None:
- return value
- if isinstance(value, int):
- return value
- if value in self.family.consts:
- raise Exception("Resolving family constants not implemented, yet")
- return limit_to_number(value)
-
- def get_limit_str(self, limit, default=None, suffix=''):
- value = self.checks.get(limit, default)
- if value is None:
- return ''
- if isinstance(value, int):
- return str(value) + suffix
- if value in self.family.consts:
- return c_upper(f"{self.family['name']}-{value}")
- return c_upper(value)
-
- def resolve(self):
- if 'name-prefix' in self.attr:
- enum_name = f"{self.attr['name-prefix']}{self.name}"
- else:
- enum_name = f"{self.attr_set.name_prefix}{self.name}"
- self.enum_name = c_upper(enum_name)
-
- if self.attr_set.subset_of:
- if self.checks != self._get_real_attr().checks:
- raise Exception("Overriding checks not supported by codegen, yet")
-
- def is_multi_val(self):
- return None
-
- def is_scalar(self):
- return self.type in {'u8', 'u16', 'u32', 'u64', 's32', 's64'}
-
- def is_recursive(self):
- return False
-
- def is_recursive_for_op(self, ri):
- return self.is_recursive() and not ri.op
-
- def presence_type(self):
- return 'bit'
-
- def presence_member(self, space, type_filter):
- if self.presence_type() != type_filter:
- return
-
- if self.presence_type() == 'bit':
- pfx = '__' if space == 'user' else ''
- return f"{pfx}u32 {self.c_name}:1;"
-
- if self.presence_type() == 'len':
- pfx = '__' if space == 'user' else ''
- return f"{pfx}u32 {self.c_name}_len;"
-
- def _complex_member_type(self, ri):
- return None
-
- def free_needs_iter(self):
- return False
-
- def free(self, ri, var, ref):
- if self.is_multi_val() or self.presence_type() == 'len':
- ri.cw.p(f'free({var}->{ref}{self.c_name});')
-
- def arg_member(self, ri):
- member = self._complex_member_type(ri)
- if member:
- arg = [member + ' *' + self.c_name]
- if self.presence_type() == 'count':
- arg += ['unsigned int n_' + self.c_name]
- return arg
- raise Exception(f"Struct member not implemented for class type {self.type}")
-
- def struct_member(self, ri):
- if self.is_multi_val():
- ri.cw.p(f"unsigned int n_{self.c_name};")
- member = self._complex_member_type(ri)
- if member:
- ptr = '*' if self.is_multi_val() else ''
- if self.is_recursive_for_op(ri):
- ptr = '*'
- ri.cw.p(f"{member} {ptr}{self.c_name};")
- return
- members = self.arg_member(ri)
- for one in members:
- ri.cw.p(one + ';')
-
- def _attr_policy(self, policy):
- return '{ .type = ' + policy + ', }'
-
- def attr_policy(self, cw):
- policy = f'NLA_{c_upper(self.type)}'
- if self.attr.get('byte-order') == 'big-endian':
- if self.type in {'u16', 'u32'}:
- policy = f'NLA_BE{self.type[1:]}'
-
- spec = self._attr_policy(policy)
- cw.p(f"\t[{self.enum_name}] = {spec},")
-
- def _attr_typol(self):
- raise Exception(f"Type policy not implemented for class type {self.type}")
-
- def attr_typol(self, cw):
- typol = self._attr_typol()
- cw.p(f'[{self.enum_name}] = {"{"} .name = "{self.name}", {typol}{"}"},')
-
- def _attr_put_line(self, ri, var, line):
- if self.presence_type() == 'bit':
- ri.cw.p(f"if ({var}->_present.{self.c_name})")
- elif self.presence_type() == 'len':
- ri.cw.p(f"if ({var}->_present.{self.c_name}_len)")
- ri.cw.p(f"{line};")
-
- def _attr_put_simple(self, ri, var, put_type):
- line = f"ynl_attr_put_{put_type}(nlh, {self.enum_name}, {var}->{self.c_name})"
- self._attr_put_line(ri, var, line)
-
- def attr_put(self, ri, var):
- raise Exception(f"Put not implemented for class type {self.type}")
-
- def _attr_get(self, ri, var):
- raise Exception(f"Attr get not implemented for class type {self.type}")
-
- def attr_get(self, ri, var, first):
- lines, init_lines, local_vars = self._attr_get(ri, var)
- if type(lines) is str:
- lines = [lines]
- if type(init_lines) is str:
- init_lines = [init_lines]
-
- kw = 'if' if first else 'else if'
- ri.cw.block_start(line=f"{kw} (type == {self.enum_name})")
- if local_vars:
- for local in local_vars:
- ri.cw.p(local)
- ri.cw.nl()
-
- if not self.is_multi_val():
- ri.cw.p("if (ynl_attr_validate(yarg, attr))")
- ri.cw.p("return YNL_PARSE_CB_ERROR;")
- if self.presence_type() == 'bit':
- ri.cw.p(f"{var}->_present.{self.c_name} = 1;")
-
- if init_lines:
- ri.cw.nl()
- for line in init_lines:
- ri.cw.p(line)
-
- for line in lines:
- ri.cw.p(line)
- ri.cw.block_end()
- return True
-
- def _setter_lines(self, ri, member, presence):
- raise Exception(f"Setter not implemented for class type {self.type}")
-
- def setter(self, ri, space, direction, deref=False, ref=None):
- ref = (ref if ref else []) + [self.c_name]
- var = "req"
- member = f"{var}->{'.'.join(ref)}"
-
- code = []
- presence = ''
- for i in range(0, len(ref)):
- presence = f"{var}->{'.'.join(ref[:i] + [''])}_present.{ref[i]}"
- # Every layer below last is a nest, so we know it uses bit presence
- # last layer is "self" and may be a complex type
- if i == len(ref) - 1 and self.presence_type() != 'bit':
- continue
- code.append(presence + ' = 1;')
- code += self._setter_lines(ri, member, presence)
-
- func_name = f"{op_prefix(ri, direction, deref=deref)}_set_{'_'.join(ref)}"
- free = bool([x for x in code if 'free(' in x])
- alloc = bool([x for x in code if 'alloc(' in x])
- if free and not alloc:
- func_name = '__' + func_name
- ri.cw.write_func('static inline void', func_name, body=code,
- args=[f'{type_name(ri, direction, deref=deref)} *{var}'] + self.arg_member(ri))
-
-
-class TypeUnused(Type):
- def presence_type(self):
- return ''
-
- def arg_member(self, ri):
- return []
-
- def _attr_get(self, ri, var):
- return ['return YNL_PARSE_CB_ERROR;'], None, None
-
- def _attr_typol(self):
- return '.type = YNL_PT_REJECT, '
-
- def attr_policy(self, cw):
- pass
-
- def attr_put(self, ri, var):
- pass
-
- def attr_get(self, ri, var, first):
- pass
-
- def setter(self, ri, space, direction, deref=False, ref=None):
- pass
-
-
-class TypePad(Type):
- def presence_type(self):
- return ''
-
- def arg_member(self, ri):
- return []
-
- def _attr_typol(self):
- return '.type = YNL_PT_IGNORE, '
-
- def attr_put(self, ri, var):
- pass
-
- def attr_get(self, ri, var, first):
- pass
-
- def attr_policy(self, cw):
- pass
-
- def setter(self, ri, space, direction, deref=False, ref=None):
- pass
-
-
-class TypeScalar(Type):
- def __init__(self, family, attr_set, attr, value):
- super().__init__(family, attr_set, attr, value)
-
- self.byte_order_comment = ''
- if 'byte-order' in attr:
- self.byte_order_comment = f" /* {attr['byte-order']} */"
-
- if 'enum' in self.attr:
- enum = self.family.consts[self.attr['enum']]
- low, high = enum.value_range()
- if 'min' not in self.checks:
- if low != 0 or self.type[0] == 's':
- self.checks['min'] = low
- if 'max' not in self.checks:
- self.checks['max'] = high
-
- if 'min' in self.checks and 'max' in self.checks:
- if self.get_limit('min') > self.get_limit('max'):
- raise Exception(f'Invalid limit for "{self.name}" min: {self.get_limit("min")} max: {self.get_limit("max")}')
- self.checks['range'] = True
-
- low = min(self.get_limit('min', 0), self.get_limit('max', 0))
- high = max(self.get_limit('min', 0), self.get_limit('max', 0))
- if low < 0 and self.type[0] == 'u':
- raise Exception(f'Invalid limit for "{self.name}" negative limit for unsigned type')
- if low < -32768 or high > 32767:
- self.checks['full-range'] = True
-
- # Added by resolve():
- self.is_bitfield = None
- delattr(self, "is_bitfield")
- self.type_name = None
- delattr(self, "type_name")
-
- def resolve(self):
- self.resolve_up(super())
-
- if 'enum-as-flags' in self.attr and self.attr['enum-as-flags']:
- self.is_bitfield = True
- elif 'enum' in self.attr:
- self.is_bitfield = self.family.consts[self.attr['enum']]['type'] == 'flags'
- else:
- self.is_bitfield = False
-
- if not self.is_bitfield and 'enum' in self.attr:
- self.type_name = self.family.consts[self.attr['enum']].user_type
- elif self.is_auto_scalar:
- self.type_name = '__' + self.type[0] + '64'
- else:
- self.type_name = '__' + self.type
-
- def _attr_policy(self, policy):
- if 'flags-mask' in self.checks or self.is_bitfield:
- if self.is_bitfield:
- enum = self.family.consts[self.attr['enum']]
- mask = enum.get_mask(as_flags=True)
- else:
- flags = self.family.consts[self.checks['flags-mask']]
- flag_cnt = len(flags['entries'])
- mask = (1 << flag_cnt) - 1
- return f"NLA_POLICY_MASK({policy}, 0x{mask:x})"
- elif 'full-range' in self.checks:
- return f"NLA_POLICY_FULL_RANGE({policy}, &{c_lower(self.enum_name)}_range)"
- elif 'range' in self.checks:
- return f"NLA_POLICY_RANGE({policy}, {self.get_limit_str('min')}, {self.get_limit_str('max')})"
- elif 'min' in self.checks:
- return f"NLA_POLICY_MIN({policy}, {self.get_limit_str('min')})"
- elif 'max' in self.checks:
- return f"NLA_POLICY_MAX({policy}, {self.get_limit_str('max')})"
- return super()._attr_policy(policy)
-
- def _attr_typol(self):
- return f'.type = YNL_PT_U{c_upper(self.type[1:])}, '
-
- def arg_member(self, ri):
- return [f'{self.type_name} {self.c_name}{self.byte_order_comment}']
-
- def attr_put(self, ri, var):
- self._attr_put_simple(ri, var, self.type)
-
- def _attr_get(self, ri, var):
- return f"{var}->{self.c_name} = ynl_attr_get_{self.type}(attr);", None, None
-
- def _setter_lines(self, ri, member, presence):
- return [f"{member} = {self.c_name};"]
-
-
-class TypeFlag(Type):
- def arg_member(self, ri):
- return []
-
- def _attr_typol(self):
- return '.type = YNL_PT_FLAG, '
-
- def attr_put(self, ri, var):
- self._attr_put_line(ri, var, f"ynl_attr_put(nlh, {self.enum_name}, NULL, 0)")
-
- def _attr_get(self, ri, var):
- return [], None, None
-
- def _setter_lines(self, ri, member, presence):
- return []
-
-
-class TypeString(Type):
- def arg_member(self, ri):
- return [f"const char *{self.c_name}"]
-
- def presence_type(self):
- return 'len'
-
- def struct_member(self, ri):
- ri.cw.p(f"char *{self.c_name};")
-
- def _attr_typol(self):
- return f'.type = YNL_PT_NUL_STR, '
-
- def _attr_policy(self, policy):
- if 'exact-len' in self.checks:
- mem = 'NLA_POLICY_EXACT_LEN(' + self.get_limit_str('exact-len') + ')'
- else:
- mem = '{ .type = ' + policy
- if 'max-len' in self.checks:
- mem += ', .len = ' + self.get_limit_str('max-len')
- mem += ', }'
- return mem
-
- def attr_policy(self, cw):
- if self.checks.get('unterminated-ok', False):
- policy = 'NLA_STRING'
- else:
- policy = 'NLA_NUL_STRING'
-
- spec = self._attr_policy(policy)
- cw.p(f"\t[{self.enum_name}] = {spec},")
-
- def attr_put(self, ri, var):
- self._attr_put_simple(ri, var, 'str')
-
- def _attr_get(self, ri, var):
- len_mem = var + '->_present.' + self.c_name + '_len'
- return [f"{len_mem} = len;",
- f"{var}->{self.c_name} = malloc(len + 1);",
- f"memcpy({var}->{self.c_name}, ynl_attr_get_str(attr), len);",
- f"{var}->{self.c_name}[len] = 0;"], \
- ['len = strnlen(ynl_attr_get_str(attr), ynl_attr_data_len(attr));'], \
- ['unsigned int len;']
-
- def _setter_lines(self, ri, member, presence):
- return [f"free({member});",
- f"{presence}_len = strlen({self.c_name});",
- f"{member} = malloc({presence}_len + 1);",
- f'memcpy({member}, {self.c_name}, {presence}_len);',
- f'{member}[{presence}_len] = 0;']
-
-
-class TypeBinary(Type):
- def arg_member(self, ri):
- return [f"const void *{self.c_name}", 'size_t len']
-
- def presence_type(self):
- return 'len'
-
- def struct_member(self, ri):
- ri.cw.p(f"void *{self.c_name};")
-
- def _attr_typol(self):
- return f'.type = YNL_PT_BINARY,'
-
- def _attr_policy(self, policy):
- if len(self.checks) == 0:
- pass
- elif len(self.checks) == 1:
- check_name = list(self.checks)[0]
- if check_name not in {'exact-len', 'min-len', 'max-len'}:
- raise Exception('Unsupported check for binary type: ' + check_name)
- else:
- raise Exception('More than one check for binary type not implemented, yet')
-
- if len(self.checks) == 0:
- mem = '{ .type = NLA_BINARY, }'
- elif 'exact-len' in self.checks:
- mem = 'NLA_POLICY_EXACT_LEN(' + self.get_limit_str('exact-len') + ')'
- elif 'min-len' in self.checks:
- mem = '{ .len = ' + self.get_limit_str('min-len') + ', }'
- elif 'max-len' in self.checks:
- mem = 'NLA_POLICY_MAX_LEN(' + self.get_limit_str('max-len') + ')'
-
- return mem
-
- def attr_put(self, ri, var):
- self._attr_put_line(ri, var, f"ynl_attr_put(nlh, {self.enum_name}, " +
- f"{var}->{self.c_name}, {var}->_present.{self.c_name}_len)")
-
- def _attr_get(self, ri, var):
- len_mem = var + '->_present.' + self.c_name + '_len'
- return [f"{len_mem} = len;",
- f"{var}->{self.c_name} = malloc(len);",
- f"memcpy({var}->{self.c_name}, ynl_attr_data(attr), len);"], \
- ['len = ynl_attr_data_len(attr);'], \
- ['unsigned int len;']
-
- def _setter_lines(self, ri, member, presence):
- return [f"free({member});",
- f"{presence}_len = len;",
- f"{member} = malloc({presence}_len);",
- f'memcpy({member}, {self.c_name}, {presence}_len);']
-
-
-class TypeBitfield32(Type):
- def _complex_member_type(self, ri):
- return "struct nla_bitfield32"
-
- def _attr_typol(self):
- return f'.type = YNL_PT_BITFIELD32, '
-
- def _attr_policy(self, policy):
- if not 'enum' in self.attr:
- raise Exception('Enum required for bitfield32 attr')
- enum = self.family.consts[self.attr['enum']]
- mask = enum.get_mask(as_flags=True)
- return f"NLA_POLICY_BITFIELD32({mask})"
-
- def attr_put(self, ri, var):
- line = f"ynl_attr_put(nlh, {self.enum_name}, &{var}->{self.c_name}, sizeof(struct nla_bitfield32))"
- self._attr_put_line(ri, var, line)
-
- def _attr_get(self, ri, var):
- return f"memcpy(&{var}->{self.c_name}, ynl_attr_data(attr), sizeof(struct nla_bitfield32));", None, None
-
- def _setter_lines(self, ri, member, presence):
- return [f"memcpy(&{member}, {self.c_name}, sizeof(struct nla_bitfield32));"]
-
-
-class TypeNest(Type):
- def is_recursive(self):
- return self.family.pure_nested_structs[self.nested_attrs].recursive
-
- def _complex_member_type(self, ri):
- return self.nested_struct_type
-
- def free(self, ri, var, ref):
- at = '&'
- if self.is_recursive_for_op(ri):
- at = ''
- ri.cw.p(f'if ({var}->{ref}{self.c_name})')
- ri.cw.p(f'{self.nested_render_name}_free({at}{var}->{ref}{self.c_name});')
-
- def _attr_typol(self):
- return f'.type = YNL_PT_NEST, .nest = &{self.nested_render_name}_nest, '
-
- def _attr_policy(self, policy):
- return 'NLA_POLICY_NESTED(' + self.nested_render_name + '_nl_policy)'
-
- def attr_put(self, ri, var):
- at = '' if self.is_recursive_for_op(ri) else '&'
- self._attr_put_line(ri, var, f"{self.nested_render_name}_put(nlh, " +
- f"{self.enum_name}, {at}{var}->{self.c_name})")
-
- def _attr_get(self, ri, var):
- get_lines = [f"if ({self.nested_render_name}_parse(&parg, attr))",
- "return YNL_PARSE_CB_ERROR;"]
- init_lines = [f"parg.rsp_policy = &{self.nested_render_name}_nest;",
- f"parg.data = &{var}->{self.c_name};"]
- return get_lines, init_lines, None
-
- def setter(self, ri, space, direction, deref=False, ref=None):
- ref = (ref if ref else []) + [self.c_name]
-
- for _, attr in ri.family.pure_nested_structs[self.nested_attrs].member_list():
- if attr.is_recursive():
- continue
- attr.setter(ri, self.nested_attrs, direction, deref=deref, ref=ref)
-
-
-class TypeMultiAttr(Type):
- def __init__(self, family, attr_set, attr, value, base_type):
- super().__init__(family, attr_set, attr, value)
-
- self.base_type = base_type
-
- def is_multi_val(self):
- return True
-
- def presence_type(self):
- return 'count'
-
- def _complex_member_type(self, ri):
- if 'type' not in self.attr or self.attr['type'] == 'nest':
- return self.nested_struct_type
- elif self.attr['type'] in scalars:
- scalar_pfx = '__' if ri.ku_space == 'user' else ''
- return scalar_pfx + self.attr['type']
- else:
- raise Exception(f"Sub-type {self.attr['type']} not supported yet")
-
- def free_needs_iter(self):
- return 'type' not in self.attr or self.attr['type'] == 'nest'
-
- def free(self, ri, var, ref):
- if self.attr['type'] in scalars:
- ri.cw.p(f"free({var}->{ref}{self.c_name});")
- elif 'type' not in self.attr or self.attr['type'] == 'nest':
- ri.cw.p(f"for (i = 0; i < {var}->{ref}n_{self.c_name}; i++)")
- ri.cw.p(f'{self.nested_render_name}_free(&{var}->{ref}{self.c_name}[i]);')
- ri.cw.p(f"free({var}->{ref}{self.c_name});")
- else:
- raise Exception(f"Free of MultiAttr sub-type {self.attr['type']} not supported yet")
-
- def _attr_policy(self, policy):
- return self.base_type._attr_policy(policy)
-
- def _attr_typol(self):
- return self.base_type._attr_typol()
-
- def _attr_get(self, ri, var):
- return f'n_{self.c_name}++;', None, None
-
- def attr_put(self, ri, var):
- if self.attr['type'] in scalars:
- put_type = self.type
- ri.cw.p(f"for (unsigned int i = 0; i < {var}->n_{self.c_name}; i++)")
- ri.cw.p(f"ynl_attr_put_{put_type}(nlh, {self.enum_name}, {var}->{self.c_name}[i]);")
- elif 'type' not in self.attr or self.attr['type'] == 'nest':
- ri.cw.p(f"for (unsigned int i = 0; i < {var}->n_{self.c_name}; i++)")
- self._attr_put_line(ri, var, f"{self.nested_render_name}_put(nlh, " +
- f"{self.enum_name}, &{var}->{self.c_name}[i])")
- else:
- raise Exception(f"Put of MultiAttr sub-type {self.attr['type']} not supported yet")
-
- def _setter_lines(self, ri, member, presence):
- # For multi-attr we have a count, not presence, hack up the presence
- presence = presence[:-(len('_present.') + len(self.c_name))] + "n_" + self.c_name
- return [f"free({member});",
- f"{member} = {self.c_name};",
- f"{presence} = n_{self.c_name};"]
-
-
-class TypeArrayNest(Type):
- def is_multi_val(self):
- return True
-
- def presence_type(self):
- return 'count'
-
- def _complex_member_type(self, ri):
- if 'sub-type' not in self.attr or self.attr['sub-type'] == 'nest':
- return self.nested_struct_type
- elif self.attr['sub-type'] in scalars:
- scalar_pfx = '__' if ri.ku_space == 'user' else ''
- return scalar_pfx + self.attr['sub-type']
- else:
- raise Exception(f"Sub-type {self.attr['sub-type']} not supported yet")
-
- def _attr_typol(self):
- return f'.type = YNL_PT_NEST, .nest = &{self.nested_render_name}_nest, '
-
- def _attr_get(self, ri, var):
- local_vars = ['const struct nlattr *attr2;']
- get_lines = [f'attr_{self.c_name} = attr;',
- 'ynl_attr_for_each_nested(attr2, attr)',
- f'\t{var}->n_{self.c_name}++;']
- return get_lines, None, local_vars
-
-
-class TypeNestTypeValue(Type):
- def _complex_member_type(self, ri):
- return self.nested_struct_type
-
- def _attr_typol(self):
- return f'.type = YNL_PT_NEST, .nest = &{self.nested_render_name}_nest, '
-
- def _attr_get(self, ri, var):
- prev = 'attr'
- tv_args = ''
- get_lines = []
- local_vars = []
- init_lines = [f"parg.rsp_policy = &{self.nested_render_name}_nest;",
- f"parg.data = &{var}->{self.c_name};"]
- if 'type-value' in self.attr:
- tv_names = [c_lower(x) for x in self.attr["type-value"]]
- local_vars += [f'const struct nlattr *attr_{", *attr_".join(tv_names)};']
- local_vars += [f'__u32 {", ".join(tv_names)};']
- for level in self.attr["type-value"]:
- level = c_lower(level)
- get_lines += [f'attr_{level} = ynl_attr_data({prev});']
- get_lines += [f'{level} = ynl_attr_type(attr_{level});']
- prev = 'attr_' + level
-
- tv_args = f", {', '.join(tv_names)}"
-
- get_lines += [f"{self.nested_render_name}_parse(&parg, {prev}{tv_args});"]
- return get_lines, init_lines, local_vars
-
-
-class Struct:
- def __init__(self, family, space_name, type_list=None, inherited=None):
- self.family = family
- self.space_name = space_name
- self.attr_set = family.attr_sets[space_name]
- # Use list to catch comparisons with empty sets
- self._inherited = inherited if inherited is not None else []
- self.inherited = []
-
- self.nested = type_list is None
- if family.name == c_lower(space_name):
- self.render_name = c_lower(family.ident_name)
- else:
- self.render_name = c_lower(family.ident_name + '-' + space_name)
- self.struct_name = 'struct ' + self.render_name
- if self.nested and space_name in family.consts:
- self.struct_name += '_'
- self.ptr_name = self.struct_name + ' *'
- # All attr sets this one contains, directly or multiple levels down
- self.child_nests = set()
-
- self.request = False
- self.reply = False
- self.recursive = False
-
- self.attr_list = []
- self.attrs = dict()
- if type_list is not None:
- for t in type_list:
- self.attr_list.append((t, self.attr_set[t]),)
- else:
- for t in self.attr_set:
- self.attr_list.append((t, self.attr_set[t]),)
-
- max_val = 0
- self.attr_max_val = None
- for name, attr in self.attr_list:
- if attr.value >= max_val:
- max_val = attr.value
- self.attr_max_val = attr
- self.attrs[name] = attr
-
- def __iter__(self):
- yield from self.attrs
-
- def __getitem__(self, key):
- return self.attrs[key]
-
- def member_list(self):
- return self.attr_list
-
- def set_inherited(self, new_inherited):
- if self._inherited != new_inherited:
- raise Exception("Inheriting different members not supported")
- self.inherited = [c_lower(x) for x in sorted(self._inherited)]
-
-
-class EnumEntry(SpecEnumEntry):
- def __init__(self, enum_set, yaml, prev, value_start):
- super().__init__(enum_set, yaml, prev, value_start)
-
- if prev:
- self.value_change = (self.value != prev.value + 1)
- else:
- self.value_change = (self.value != 0)
- self.value_change = self.value_change or self.enum_set['type'] == 'flags'
-
- # Added by resolve:
- self.c_name = None
- delattr(self, "c_name")
-
- def resolve(self):
- self.resolve_up(super())
-
- self.c_name = c_upper(self.enum_set.value_pfx + self.name)
-
-
-class EnumSet(SpecEnumSet):
- def __init__(self, family, yaml):
- self.render_name = c_lower(family.ident_name + '-' + yaml['name'])
-
- if 'enum-name' in yaml:
- if yaml['enum-name']:
- self.enum_name = 'enum ' + c_lower(yaml['enum-name'])
- self.user_type = self.enum_name
- else:
- self.enum_name = None
- else:
- self.enum_name = 'enum ' + self.render_name
-
- if self.enum_name:
- self.user_type = self.enum_name
- else:
- self.user_type = 'int'
-
- self.value_pfx = yaml.get('name-prefix', f"{family.ident_name}-{yaml['name']}-")
- self.header = yaml.get('header', None)
- self.enum_cnt_name = yaml.get('enum-cnt-name', None)
-
- super().__init__(family, yaml)
-
- def new_entry(self, entry, prev_entry, value_start):
- return EnumEntry(self, entry, prev_entry, value_start)
-
- def value_range(self):
- low = min([x.value for x in self.entries.values()])
- high = max([x.value for x in self.entries.values()])
-
- if high - low + 1 != len(self.entries):
- raise Exception("Can't get value range for a noncontiguous enum")
-
- return low, high
-
-
-class AttrSet(SpecAttrSet):
- def __init__(self, family, yaml):
- super().__init__(family, yaml)
-
- if self.subset_of is None:
- if 'name-prefix' in yaml:
- pfx = yaml['name-prefix']
- elif self.name == family.name:
- pfx = family.ident_name + '-a-'
- else:
- pfx = f"{family.ident_name}-a-{self.name}-"
- self.name_prefix = c_upper(pfx)
- self.max_name = c_upper(self.yaml.get('attr-max-name', f"{self.name_prefix}max"))
- self.cnt_name = c_upper(self.yaml.get('attr-cnt-name', f"__{self.name_prefix}max"))
- else:
- self.name_prefix = family.attr_sets[self.subset_of].name_prefix
- self.max_name = family.attr_sets[self.subset_of].max_name
- self.cnt_name = family.attr_sets[self.subset_of].cnt_name
-
- # Added by resolve:
- self.c_name = None
- delattr(self, "c_name")
-
- def resolve(self):
- self.c_name = c_lower(self.name)
- if self.c_name in _C_KW:
- self.c_name += '_'
- if self.c_name == self.family.c_name:
- self.c_name = ''
-
- def new_attr(self, elem, value):
- if elem['type'] in scalars:
- t = TypeScalar(self.family, self, elem, value)
- elif elem['type'] == 'unused':
- t = TypeUnused(self.family, self, elem, value)
- elif elem['type'] == 'pad':
- t = TypePad(self.family, self, elem, value)
- elif elem['type'] == 'flag':
- t = TypeFlag(self.family, self, elem, value)
- elif elem['type'] == 'string':
- t = TypeString(self.family, self, elem, value)
- elif elem['type'] == 'binary':
- t = TypeBinary(self.family, self, elem, value)
- elif elem['type'] == 'bitfield32':
- t = TypeBitfield32(self.family, self, elem, value)
- elif elem['type'] == 'nest':
- t = TypeNest(self.family, self, elem, value)
- elif elem['type'] == 'indexed-array' and 'sub-type' in elem:
- if elem["sub-type"] == 'nest':
- t = TypeArrayNest(self.family, self, elem, value)
- else:
- raise Exception(f'new_attr: unsupported sub-type {elem["sub-type"]}')
- elif elem['type'] == 'nest-type-value':
- t = TypeNestTypeValue(self.family, self, elem, value)
- else:
- raise Exception(f"No typed class for type {elem['type']}")
-
- if 'multi-attr' in elem and elem['multi-attr']:
- t = TypeMultiAttr(self.family, self, elem, value, t)
-
- return t
-
-
-class Operation(SpecOperation):
- def __init__(self, family, yaml, req_value, rsp_value):
- super().__init__(family, yaml, req_value, rsp_value)
-
- self.render_name = c_lower(family.ident_name + '_' + self.name)
-
- self.dual_policy = ('do' in yaml and 'request' in yaml['do']) and \
- ('dump' in yaml and 'request' in yaml['dump'])
-
- self.has_ntf = False
-
- # Added by resolve:
- self.enum_name = None
- delattr(self, "enum_name")
-
- def resolve(self):
- self.resolve_up(super())
-
- if not self.is_async:
- self.enum_name = self.family.op_prefix + c_upper(self.name)
- else:
- self.enum_name = self.family.async_op_prefix + c_upper(self.name)
-
- def mark_has_ntf(self):
- self.has_ntf = True
-
-
-class Family(SpecFamily):
- def __init__(self, file_name, exclude_ops):
- # Added by resolve:
- self.c_name = None
- delattr(self, "c_name")
- self.op_prefix = None
- delattr(self, "op_prefix")
- self.async_op_prefix = None
- delattr(self, "async_op_prefix")
- self.mcgrps = None
- delattr(self, "mcgrps")
- self.consts = None
- delattr(self, "consts")
- self.hooks = None
- delattr(self, "hooks")
-
- super().__init__(file_name, exclude_ops=exclude_ops)
-
- self.fam_key = c_upper(self.yaml.get('c-family-name', self.yaml["name"] + '_FAMILY_NAME'))
- self.ver_key = c_upper(self.yaml.get('c-version-name', self.yaml["name"] + '_FAMILY_VERSION'))
-
- if 'definitions' not in self.yaml:
- self.yaml['definitions'] = []
-
- if 'uapi-header' in self.yaml:
- self.uapi_header = self.yaml['uapi-header']
- else:
- self.uapi_header = f"linux/{self.ident_name}.h"
- if self.uapi_header.startswith("linux/") and self.uapi_header.endswith('.h'):
- self.uapi_header_name = self.uapi_header[6:-2]
- else:
- self.uapi_header_name = self.ident_name
-
- def resolve(self):
- self.resolve_up(super())
-
- if self.yaml.get('protocol', 'genetlink') not in {'genetlink', 'genetlink-c', 'genetlink-legacy'}:
- raise Exception("Codegen only supported for genetlink")
-
- self.c_name = c_lower(self.ident_name)
- if 'name-prefix' in self.yaml['operations']:
- self.op_prefix = c_upper(self.yaml['operations']['name-prefix'])
- else:
- self.op_prefix = c_upper(self.yaml['name'] + '-cmd-')
- if 'async-prefix' in self.yaml['operations']:
- self.async_op_prefix = c_upper(self.yaml['operations']['async-prefix'])
- else:
- self.async_op_prefix = self.op_prefix
-
- self.mcgrps = self.yaml.get('mcast-groups', {'list': []})
-
- self.hooks = dict()
- for when in ['pre', 'post']:
- self.hooks[when] = dict()
- for op_mode in ['do', 'dump']:
- self.hooks[when][op_mode] = dict()
- self.hooks[when][op_mode]['set'] = set()
- self.hooks[when][op_mode]['list'] = []
-
- # dict space-name -> 'request': set(attrs), 'reply': set(attrs)
- self.root_sets = dict()
- # dict space-name -> set('request', 'reply')
- self.pure_nested_structs = dict()
-
- self._mark_notify()
- self._mock_up_events()
-
- self._load_root_sets()
- self._load_nested_sets()
- self._load_attr_use()
- self._load_hooks()
-
- self.kernel_policy = self.yaml.get('kernel-policy', 'split')
- if self.kernel_policy == 'global':
- self._load_global_policy()
-
- def new_enum(self, elem):
- return EnumSet(self, elem)
-
- def new_attr_set(self, elem):
- return AttrSet(self, elem)
-
- def new_operation(self, elem, req_value, rsp_value):
- return Operation(self, elem, req_value, rsp_value)
-
- def _mark_notify(self):
- for op in self.msgs.values():
- if 'notify' in op:
- self.ops[op['notify']].mark_has_ntf()
-
- # Fake a 'do' equivalent of all events, so that we can render their response parsing
- def _mock_up_events(self):
- for op in self.yaml['operations']['list']:
- if 'event' in op:
- op['do'] = {
- 'reply': {
- 'attributes': op['event']['attributes']
- }
- }
-
- def _load_root_sets(self):
- for op_name, op in self.msgs.items():
- if 'attribute-set' not in op:
- continue
-
- req_attrs = set()
- rsp_attrs = set()
- for op_mode in ['do', 'dump']:
- if op_mode in op and 'request' in op[op_mode]:
- req_attrs.update(set(op[op_mode]['request']['attributes']))
- if op_mode in op and 'reply' in op[op_mode]:
- rsp_attrs.update(set(op[op_mode]['reply']['attributes']))
- if 'event' in op:
- rsp_attrs.update(set(op['event']['attributes']))
-
- if op['attribute-set'] not in self.root_sets:
- self.root_sets[op['attribute-set']] = {'request': req_attrs, 'reply': rsp_attrs}
- else:
- self.root_sets[op['attribute-set']]['request'].update(req_attrs)
- self.root_sets[op['attribute-set']]['reply'].update(rsp_attrs)
-
- def _sort_pure_types(self):
- # Try to reorder according to dependencies
- pns_key_list = list(self.pure_nested_structs.keys())
- pns_key_seen = set()
- rounds = len(pns_key_list) ** 2 # it's basically bubble sort
- for _ in range(rounds):
- if len(pns_key_list) == 0:
- break
- name = pns_key_list.pop(0)
- finished = True
- for _, spec in self.attr_sets[name].items():
- if 'nested-attributes' in spec:
- nested = spec['nested-attributes']
- # If the unknown nest we hit is recursive it's fine, it'll be a pointer
- if self.pure_nested_structs[nested].recursive:
- continue
- if nested not in pns_key_seen:
- # Dicts are sorted, this will make struct last
- struct = self.pure_nested_structs.pop(name)
- self.pure_nested_structs[name] = struct
- finished = False
- break
- if finished:
- pns_key_seen.add(name)
- else:
- pns_key_list.append(name)
-
- def _load_nested_sets(self):
- attr_set_queue = list(self.root_sets.keys())
- attr_set_seen = set(self.root_sets.keys())
-
- while len(attr_set_queue):
- a_set = attr_set_queue.pop(0)
- for attr, spec in self.attr_sets[a_set].items():
- if 'nested-attributes' not in spec:
- continue
-
- nested = spec['nested-attributes']
- if nested not in attr_set_seen:
- attr_set_queue.append(nested)
- attr_set_seen.add(nested)
-
- inherit = set()
- if nested not in self.root_sets:
- if nested not in self.pure_nested_structs:
- self.pure_nested_structs[nested] = Struct(self, nested, inherited=inherit)
- else:
- raise Exception(f'Using attr set as root and nested not supported - {nested}')
-
- if 'type-value' in spec:
- if nested in self.root_sets:
- raise Exception("Inheriting members to a space used as root not supported")
- inherit.update(set(spec['type-value']))
- elif spec['type'] == 'indexed-array':
- inherit.add('idx')
- self.pure_nested_structs[nested].set_inherited(inherit)
-
- for root_set, rs_members in self.root_sets.items():
- for attr, spec in self.attr_sets[root_set].items():
- if 'nested-attributes' in spec:
- nested = spec['nested-attributes']
- if attr in rs_members['request']:
- self.pure_nested_structs[nested].request = True
- if attr in rs_members['reply']:
- self.pure_nested_structs[nested].reply = True
-
- self._sort_pure_types()
-
- # Propagate the request / reply / recursive
- for attr_set, struct in reversed(self.pure_nested_structs.items()):
- for _, spec in self.attr_sets[attr_set].items():
- if 'nested-attributes' in spec:
- child_name = spec['nested-attributes']
- struct.child_nests.add(child_name)
- child = self.pure_nested_structs.get(child_name)
- if child:
- if not child.recursive:
- struct.child_nests.update(child.child_nests)
- child.request |= struct.request
- child.reply |= struct.reply
- if attr_set in struct.child_nests:
- struct.recursive = True
-
- self._sort_pure_types()
-
- def _load_attr_use(self):
- for _, struct in self.pure_nested_structs.items():
- if struct.request:
- for _, arg in struct.member_list():
- arg.set_request()
- if struct.reply:
- for _, arg in struct.member_list():
- arg.set_reply()
-
- for root_set, rs_members in self.root_sets.items():
- for attr, spec in self.attr_sets[root_set].items():
- if attr in rs_members['request']:
- spec.set_request()
- if attr in rs_members['reply']:
- spec.set_reply()
-
- def _load_global_policy(self):
- global_set = set()
- attr_set_name = None
- for op_name, op in self.ops.items():
- if not op:
- continue
- if 'attribute-set' not in op:
- continue
-
- if attr_set_name is None:
- attr_set_name = op['attribute-set']
- if attr_set_name != op['attribute-set']:
- raise Exception('For a global policy all ops must use the same set')
-
- for op_mode in ['do', 'dump']:
- if op_mode in op:
- req = op[op_mode].get('request')
- if req:
- global_set.update(req.get('attributes', []))
-
- self.global_policy = []
- self.global_policy_set = attr_set_name
- for attr in self.attr_sets[attr_set_name]:
- if attr in global_set:
- self.global_policy.append(attr)
-
- def _load_hooks(self):
- for op in self.ops.values():
- for op_mode in ['do', 'dump']:
- if op_mode not in op:
- continue
- for when in ['pre', 'post']:
- if when not in op[op_mode]:
- continue
- name = op[op_mode][when]
- if name in self.hooks[when][op_mode]['set']:
- continue
- self.hooks[when][op_mode]['set'].add(name)
- self.hooks[when][op_mode]['list'].append(name)
-
-
-class RenderInfo:
- def __init__(self, cw, family, ku_space, op, op_mode, attr_set=None):
- self.family = family
- self.nl = cw.nlib
- self.ku_space = ku_space
- self.op_mode = op_mode
- self.op = op
-
- self.fixed_hdr = None
- if op and op.fixed_header:
- self.fixed_hdr = 'struct ' + c_lower(op.fixed_header)
-
- # 'do' and 'dump' response parsing is identical
- self.type_consistent = True
- if op_mode != 'do' and 'dump' in op:
- if 'do' in op:
- if ('reply' in op['do']) != ('reply' in op["dump"]):
- self.type_consistent = False
- elif 'reply' in op['do'] and op["do"]["reply"] != op["dump"]["reply"]:
- self.type_consistent = False
- else:
- self.type_consistent = False
-
- self.attr_set = attr_set
- if not self.attr_set:
- self.attr_set = op['attribute-set']
-
- self.type_name_conflict = False
- if op:
- self.type_name = c_lower(op.name)
- else:
- self.type_name = c_lower(attr_set)
- if attr_set in family.consts:
- self.type_name_conflict = True
-
- self.cw = cw
-
- self.struct = dict()
- if op_mode == 'notify':
- op_mode = 'do'
- for op_dir in ['request', 'reply']:
- if op:
- type_list = []
- if op_dir in op[op_mode]:
- type_list = op[op_mode][op_dir]['attributes']
- self.struct[op_dir] = Struct(family, self.attr_set, type_list=type_list)
- if op_mode == 'event':
- self.struct['reply'] = Struct(family, self.attr_set, type_list=op['event']['attributes'])
-
-
-class CodeWriter:
- def __init__(self, nlib, out_file=None, overwrite=True):
- self.nlib = nlib
- self._overwrite = overwrite
-
- self._nl = False
- self._block_end = False
- self._silent_block = False
- self._ind = 0
- self._ifdef_block = None
- if out_file is None:
- self._out = os.sys.stdout
- else:
- self._out = tempfile.NamedTemporaryFile('w+')
- self._out_file = out_file
-
- def __del__(self):
- self.close_out_file()
-
- def close_out_file(self):
- if self._out == os.sys.stdout:
- return
- # Avoid modifying the file if contents didn't change
- self._out.flush()
- if not self._overwrite and os.path.isfile(self._out_file):
- if filecmp.cmp(self._out.name, self._out_file, shallow=False):
- return
- with open(self._out_file, 'w+') as out_file:
- self._out.seek(0)
- shutil.copyfileobj(self._out, out_file)
- self._out.close()
- self._out = os.sys.stdout
-
- @classmethod
- def _is_cond(cls, line):
- return line.startswith('if') or line.startswith('while') or line.startswith('for')
-
- def p(self, line, add_ind=0):
- if self._block_end:
- self._block_end = False
- if line.startswith('else'):
- line = '} ' + line
- else:
- self._out.write('\t' * self._ind + '}\n')
-
- if self._nl:
- self._out.write('\n')
- self._nl = False
-
- ind = self._ind
- if line[-1] == ':':
- ind -= 1
- if self._silent_block:
- ind += 1
- self._silent_block = line.endswith(')') and CodeWriter._is_cond(line)
- if line[0] == '#':
- ind = 0
- if add_ind:
- ind += add_ind
- self._out.write('\t' * ind + line + '\n')
-
- def nl(self):
- self._nl = True
-
- def block_start(self, line=''):
- if line:
- line = line + ' '
- self.p(line + '{')
- self._ind += 1
-
- def block_end(self, line=''):
- if line and line[0] not in {';', ','}:
- line = ' ' + line
- self._ind -= 1
- self._nl = False
- if not line:
- # Delay printing closing bracket in case "else" comes next
- if self._block_end:
- self._out.write('\t' * (self._ind + 1) + '}\n')
- self._block_end = True
- else:
- self.p('}' + line)
-
- def write_doc_line(self, doc, indent=True):
- words = doc.split()
- line = ' *'
- for word in words:
- if len(line) + len(word) >= 79:
- self.p(line)
- line = ' *'
- if indent:
- line += ' '
- line += ' ' + word
- self.p(line)
-
- def write_func_prot(self, qual_ret, name, args=None, doc=None, suffix=''):
- if not args:
- args = ['void']
-
- if doc:
- self.p('/*')
- self.p(' * ' + doc)
- self.p(' */')
-
- oneline = qual_ret
- if qual_ret[-1] != '*':
- oneline += ' '
- oneline += f"{name}({', '.join(args)}){suffix}"
-
- if len(oneline) < 80:
- self.p(oneline)
- return
-
- v = qual_ret
- if len(v) > 3:
- self.p(v)
- v = ''
- elif qual_ret[-1] != '*':
- v += ' '
- v += name + '('
- ind = '\t' * (len(v) // 8) + ' ' * (len(v) % 8)
- delta_ind = len(v) - len(ind)
- v += args[0]
- i = 1
- while i < len(args):
- next_len = len(v) + len(args[i])
- if v[0] == '\t':
- next_len += delta_ind
- if next_len > 76:
- self.p(v + ',')
- v = ind
- else:
- v += ', '
- v += args[i]
- i += 1
- self.p(v + ')' + suffix)
-
- def write_func_lvar(self, local_vars):
- if not local_vars:
- return
-
- if type(local_vars) is str:
- local_vars = [local_vars]
-
- local_vars.sort(key=len, reverse=True)
- for var in local_vars:
- self.p(var)
- self.nl()
-
- def write_func(self, qual_ret, name, body, args=None, local_vars=None):
- self.write_func_prot(qual_ret=qual_ret, name=name, args=args)
- self.write_func_lvar(local_vars=local_vars)
-
- self.block_start()
- for line in body:
- self.p(line)
- self.block_end()
-
- def writes_defines(self, defines):
- longest = 0
- for define in defines:
- if len(define[0]) > longest:
- longest = len(define[0])
- longest = ((longest + 8) // 8) * 8
- for define in defines:
- line = '#define ' + define[0]
- line += '\t' * ((longest - len(define[0]) + 7) // 8)
- if type(define[1]) is int:
- line += str(define[1])
- elif type(define[1]) is str:
- line += '"' + define[1] + '"'
- self.p(line)
-
- def write_struct_init(self, members):
- longest = max([len(x[0]) for x in members])
- longest += 1 # because we prepend a .
- longest = ((longest + 8) // 8) * 8
- for one in members:
- line = '.' + one[0]
- line += '\t' * ((longest - len(one[0]) - 1 + 7) // 8)
- line += '= ' + str(one[1]) + ','
- self.p(line)
-
- def ifdef_block(self, config):
- config_option = None
- if config:
- config_option = 'CONFIG_' + c_upper(config)
- if self._ifdef_block == config_option:
- return
-
- if self._ifdef_block:
- self.p('#endif /* ' + self._ifdef_block + ' */')
- if config_option:
- self.p('#ifdef ' + config_option)
- self._ifdef_block = config_option
-
-
-scalars = {'u8', 'u16', 'u32', 'u64', 's32', 's64', 'uint', 'sint'}
-
-direction_to_suffix = {
- 'reply': '_rsp',
- 'request': '_req',
- '': ''
-}
-
-op_mode_to_wrapper = {
- 'do': '',
- 'dump': '_list',
- 'notify': '_ntf',
- 'event': '',
-}
-
-_C_KW = {
- 'auto',
- 'bool',
- 'break',
- 'case',
- 'char',
- 'const',
- 'continue',
- 'default',
- 'do',
- 'double',
- 'else',
- 'enum',
- 'extern',
- 'float',
- 'for',
- 'goto',
- 'if',
- 'inline',
- 'int',
- 'long',
- 'register',
- 'return',
- 'short',
- 'signed',
- 'sizeof',
- 'static',
- 'struct',
- 'switch',
- 'typedef',
- 'union',
- 'unsigned',
- 'void',
- 'volatile',
- 'while'
-}
-
-
-def rdir(direction):
- if direction == 'reply':
- return 'request'
- if direction == 'request':
- return 'reply'
- return direction
-
-
-def op_prefix(ri, direction, deref=False):
- suffix = f"_{ri.type_name}"
-
- if not ri.op_mode or ri.op_mode == 'do':
- suffix += f"{direction_to_suffix[direction]}"
- else:
- if direction == 'request':
- suffix += '_req_dump'
- else:
- if ri.type_consistent:
- if deref:
- suffix += f"{direction_to_suffix[direction]}"
- else:
- suffix += op_mode_to_wrapper[ri.op_mode]
- else:
- suffix += '_rsp'
- suffix += '_dump' if deref else '_list'
-
- return f"{ri.family.c_name}{suffix}"
-
-
-def type_name(ri, direction, deref=False):
- return f"struct {op_prefix(ri, direction, deref=deref)}"
-
-
-def print_prototype(ri, direction, terminate=True, doc=None):
- suffix = ';' if terminate else ''
-
- fname = ri.op.render_name
- if ri.op_mode == 'dump':
- fname += '_dump'
-
- args = ['struct ynl_sock *ys']
- if 'request' in ri.op[ri.op_mode]:
- args.append(f"{type_name(ri, direction)} *" + f"{direction_to_suffix[direction][1:]}")
-
- ret = 'int'
- if 'reply' in ri.op[ri.op_mode]:
- ret = f"{type_name(ri, rdir(direction))} *"
-
- ri.cw.write_func_prot(ret, fname, args, doc=doc, suffix=suffix)
-
-
-def print_req_prototype(ri):
- print_prototype(ri, "request", doc=ri.op['doc'])
-
-
-def print_dump_prototype(ri):
- print_prototype(ri, "request")
-
-
-def put_typol_fwd(cw, struct):
- cw.p(f'extern const struct ynl_policy_nest {struct.render_name}_nest;')
-
-
-def put_typol(cw, struct):
- type_max = struct.attr_set.max_name
- cw.block_start(line=f'const struct ynl_policy_attr {struct.render_name}_policy[{type_max} + 1] =')
-
- for _, arg in struct.member_list():
- arg.attr_typol(cw)
-
- cw.block_end(line=';')
- cw.nl()
-
- cw.block_start(line=f'const struct ynl_policy_nest {struct.render_name}_nest =')
- cw.p(f'.max_attr = {type_max},')
- cw.p(f'.table = {struct.render_name}_policy,')
- cw.block_end(line=';')
- cw.nl()
-
-
-def _put_enum_to_str_helper(cw, render_name, map_name, arg_name, enum=None):
- args = [f'int {arg_name}']
- if enum:
- args = [enum.user_type + ' ' + arg_name]
- cw.write_func_prot('const char *', f'{render_name}_str', args)
- cw.block_start()
- if enum and enum.type == 'flags':
- cw.p(f'{arg_name} = ffs({arg_name}) - 1;')
- cw.p(f'if ({arg_name} < 0 || {arg_name} >= (int)YNL_ARRAY_SIZE({map_name}))')
- cw.p('return NULL;')
- cw.p(f'return {map_name}[{arg_name}];')
- cw.block_end()
- cw.nl()
-
-
-def put_op_name_fwd(family, cw):
- cw.write_func_prot('const char *', f'{family.c_name}_op_str', ['int op'], suffix=';')
-
-
-def put_op_name(family, cw):
- map_name = f'{family.c_name}_op_strmap'
- cw.block_start(line=f"static const char * const {map_name}[] =")
- for op_name, op in family.msgs.items():
- if op.rsp_value:
- # Make sure we don't add duplicated entries, if multiple commands
- # produce the same response in legacy families.
- if family.rsp_by_value[op.rsp_value] != op:
- cw.p(f'// skip "{op_name}", duplicate reply value')
- continue
-
- if op.req_value == op.rsp_value:
- cw.p(f'[{op.enum_name}] = "{op_name}",')
- else:
- cw.p(f'[{op.rsp_value}] = "{op_name}",')
- cw.block_end(line=';')
- cw.nl()
-
- _put_enum_to_str_helper(cw, family.c_name + '_op', map_name, 'op')
-
-
-def put_enum_to_str_fwd(family, cw, enum):
- args = [enum.user_type + ' value']
- cw.write_func_prot('const char *', f'{enum.render_name}_str', args, suffix=';')
-
-
-def put_enum_to_str(family, cw, enum):
- map_name = f'{enum.render_name}_strmap'
- cw.block_start(line=f"static const char * const {map_name}[] =")
- for entry in enum.entries.values():
- cw.p(f'[{entry.value}] = "{entry.name}",')
- cw.block_end(line=';')
- cw.nl()
-
- _put_enum_to_str_helper(cw, enum.render_name, map_name, 'value', enum=enum)
-
-
-def put_req_nested_prototype(ri, struct, suffix=';'):
- func_args = ['struct nlmsghdr *nlh',
- 'unsigned int attr_type',
- f'{struct.ptr_name}obj']
-
- ri.cw.write_func_prot('int', f'{struct.render_name}_put', func_args,
- suffix=suffix)
-
-
-def put_req_nested(ri, struct):
- put_req_nested_prototype(ri, struct, suffix='')
- ri.cw.block_start()
- ri.cw.write_func_lvar('struct nlattr *nest;')
-
- ri.cw.p("nest = ynl_attr_nest_start(nlh, attr_type);")
-
- for _, arg in struct.member_list():
- arg.attr_put(ri, "obj")
-
- ri.cw.p("ynl_attr_nest_end(nlh, nest);")
-
- ri.cw.nl()
- ri.cw.p('return 0;')
- ri.cw.block_end()
- ri.cw.nl()
-
-
-def _multi_parse(ri, struct, init_lines, local_vars):
- if struct.nested:
- iter_line = "ynl_attr_for_each_nested(attr, nested)"
- else:
- if ri.fixed_hdr:
- local_vars += ['void *hdr;']
- iter_line = "ynl_attr_for_each(attr, nlh, yarg->ys->family->hdr_len)"
-
- array_nests = set()
- multi_attrs = set()
- needs_parg = False
- for arg, aspec in struct.member_list():
- if aspec['type'] == 'indexed-array' and 'sub-type' in aspec:
- if aspec["sub-type"] == 'nest':
- local_vars.append(f'const struct nlattr *attr_{aspec.c_name};')
- array_nests.add(arg)
- else:
- raise Exception(f'Not supported sub-type {aspec["sub-type"]}')
- if 'multi-attr' in aspec:
- multi_attrs.add(arg)
- needs_parg |= 'nested-attributes' in aspec
- if array_nests or multi_attrs:
- local_vars.append('int i;')
- if needs_parg:
- local_vars.append('struct ynl_parse_arg parg;')
- init_lines.append('parg.ys = yarg->ys;')
-
- all_multi = array_nests | multi_attrs
-
- for anest in sorted(all_multi):
- local_vars.append(f"unsigned int n_{struct[anest].c_name} = 0;")
-
- ri.cw.block_start()
- ri.cw.write_func_lvar(local_vars)
-
- for line in init_lines:
- ri.cw.p(line)
- ri.cw.nl()
-
- for arg in struct.inherited:
- ri.cw.p(f'dst->{arg} = {arg};')
-
- if ri.fixed_hdr:
- ri.cw.p('hdr = ynl_nlmsg_data_offset(nlh, sizeof(struct genlmsghdr));')
- ri.cw.p(f"memcpy(&dst->_hdr, hdr, sizeof({ri.fixed_hdr}));")
- for anest in sorted(all_multi):
- aspec = struct[anest]
- ri.cw.p(f"if (dst->{aspec.c_name})")
- ri.cw.p(f'return ynl_error_parse(yarg, "attribute already present ({struct.attr_set.name}.{aspec.name})");')
-
- ri.cw.nl()
- ri.cw.block_start(line=iter_line)
- ri.cw.p('unsigned int type = ynl_attr_type(attr);')
- ri.cw.nl()
-
- first = True
- for _, arg in struct.member_list():
- good = arg.attr_get(ri, 'dst', first=first)
- # First may be 'unused' or 'pad', ignore those
- first &= not good
-
- ri.cw.block_end()
- ri.cw.nl()
-
- for anest in sorted(array_nests):
- aspec = struct[anest]
-
- ri.cw.block_start(line=f"if (n_{aspec.c_name})")
- ri.cw.p(f"dst->{aspec.c_name} = calloc(n_{aspec.c_name}, sizeof(*dst->{aspec.c_name}));")
- ri.cw.p(f"dst->n_{aspec.c_name} = n_{aspec.c_name};")
- ri.cw.p('i = 0;')
- ri.cw.p(f"parg.rsp_policy = &{aspec.nested_render_name}_nest;")
- ri.cw.block_start(line=f"ynl_attr_for_each_nested(attr, attr_{aspec.c_name})")
- ri.cw.p(f"parg.data = &dst->{aspec.c_name}[i];")
- ri.cw.p(f"if ({aspec.nested_render_name}_parse(&parg, attr, ynl_attr_type(attr)))")
- ri.cw.p('return YNL_PARSE_CB_ERROR;')
- ri.cw.p('i++;')
- ri.cw.block_end()
- ri.cw.block_end()
- ri.cw.nl()
-
- for anest in sorted(multi_attrs):
- aspec = struct[anest]
- ri.cw.block_start(line=f"if (n_{aspec.c_name})")
- ri.cw.p(f"dst->{aspec.c_name} = calloc(n_{aspec.c_name}, sizeof(*dst->{aspec.c_name}));")
- ri.cw.p(f"dst->n_{aspec.c_name} = n_{aspec.c_name};")
- ri.cw.p('i = 0;')
- if 'nested-attributes' in aspec:
- ri.cw.p(f"parg.rsp_policy = &{aspec.nested_render_name}_nest;")
- ri.cw.block_start(line=iter_line)
- ri.cw.block_start(line=f"if (ynl_attr_type(attr) == {aspec.enum_name})")
- if 'nested-attributes' in aspec:
- ri.cw.p(f"parg.data = &dst->{aspec.c_name}[i];")
- ri.cw.p(f"if ({aspec.nested_render_name}_parse(&parg, attr))")
- ri.cw.p('return YNL_PARSE_CB_ERROR;')
- elif aspec.type in scalars:
- ri.cw.p(f"dst->{aspec.c_name}[i] = ynl_attr_get_{aspec.type}(attr);")
- else:
- raise Exception('Nest parsing type not supported yet')
- ri.cw.p('i++;')
- ri.cw.block_end()
- ri.cw.block_end()
- ri.cw.block_end()
- ri.cw.nl()
-
- if struct.nested:
- ri.cw.p('return 0;')
- else:
- ri.cw.p('return YNL_PARSE_CB_OK;')
- ri.cw.block_end()
- ri.cw.nl()
-
-
-def parse_rsp_nested_prototype(ri, struct, suffix=';'):
- func_args = ['struct ynl_parse_arg *yarg',
- 'const struct nlattr *nested']
- for arg in struct.inherited:
- func_args.append('__u32 ' + arg)
-
- ri.cw.write_func_prot('int', f'{struct.render_name}_parse', func_args,
- suffix=suffix)
-
-
-def parse_rsp_nested(ri, struct):
- parse_rsp_nested_prototype(ri, struct, suffix='')
-
- local_vars = ['const struct nlattr *attr;',
- f'{struct.ptr_name}dst = yarg->data;']
- init_lines = []
-
- if struct.member_list():
- _multi_parse(ri, struct, init_lines, local_vars)
- else:
- # Empty nest
- ri.cw.block_start()
- ri.cw.p('return 0;')
- ri.cw.block_end()
- ri.cw.nl()
-
-
-def parse_rsp_msg(ri, deref=False):
- if 'reply' not in ri.op[ri.op_mode] and ri.op_mode != 'event':
- return
-
- func_args = ['const struct nlmsghdr *nlh',
- 'struct ynl_parse_arg *yarg']
-
- local_vars = [f'{type_name(ri, "reply", deref=deref)} *dst;',
- 'const struct nlattr *attr;']
- init_lines = ['dst = yarg->data;']
-
- ri.cw.write_func_prot('int', f'{op_prefix(ri, "reply", deref=deref)}_parse', func_args)
-
- if ri.struct["reply"].member_list():
- _multi_parse(ri, ri.struct["reply"], init_lines, local_vars)
- else:
- # Empty reply
- ri.cw.block_start()
- ri.cw.p('return YNL_PARSE_CB_OK;')
- ri.cw.block_end()
- ri.cw.nl()
-
-
-def print_req(ri):
- ret_ok = '0'
- ret_err = '-1'
- direction = "request"
- local_vars = ['struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };',
- 'struct nlmsghdr *nlh;',
- 'int err;']
-
- if 'reply' in ri.op[ri.op_mode]:
- ret_ok = 'rsp'
- ret_err = 'NULL'
- local_vars += [f'{type_name(ri, rdir(direction))} *rsp;']
-
- if ri.fixed_hdr:
- local_vars += ['size_t hdr_len;',
- 'void *hdr;']
-
- print_prototype(ri, direction, terminate=False)
- ri.cw.block_start()
- ri.cw.write_func_lvar(local_vars)
-
- ri.cw.p(f"nlh = ynl_gemsg_start_req(ys, {ri.nl.get_family_id()}, {ri.op.enum_name}, 1);")
-
- ri.cw.p(f"ys->req_policy = &{ri.struct['request'].render_name}_nest;")
- if 'reply' in ri.op[ri.op_mode]:
- ri.cw.p(f"yrs.yarg.rsp_policy = &{ri.struct['reply'].render_name}_nest;")
- ri.cw.nl()
-
- if ri.fixed_hdr:
- ri.cw.p("hdr_len = sizeof(req->_hdr);")
- ri.cw.p("hdr = ynl_nlmsg_put_extra_header(nlh, hdr_len);")
- ri.cw.p("memcpy(hdr, &req->_hdr, hdr_len);")
- ri.cw.nl()
-
- for _, attr in ri.struct["request"].member_list():
- attr.attr_put(ri, "req")
- ri.cw.nl()
-
- if 'reply' in ri.op[ri.op_mode]:
- ri.cw.p('rsp = calloc(1, sizeof(*rsp));')
- ri.cw.p('yrs.yarg.data = rsp;')
- ri.cw.p(f"yrs.cb = {op_prefix(ri, 'reply')}_parse;")
- if ri.op.value is not None:
- ri.cw.p(f'yrs.rsp_cmd = {ri.op.enum_name};')
- else:
- ri.cw.p(f'yrs.rsp_cmd = {ri.op.rsp_value};')
- ri.cw.nl()
- ri.cw.p("err = ynl_exec(ys, nlh, &yrs);")
- ri.cw.p('if (err < 0)')
- if 'reply' in ri.op[ri.op_mode]:
- ri.cw.p('goto err_free;')
- else:
- ri.cw.p('return -1;')
- ri.cw.nl()
-
- ri.cw.p(f"return {ret_ok};")
- ri.cw.nl()
-
- if 'reply' in ri.op[ri.op_mode]:
- ri.cw.p('err_free:')
- ri.cw.p(f"{call_free(ri, rdir(direction), 'rsp')}")
- ri.cw.p(f"return {ret_err};")
-
- ri.cw.block_end()
-
-
-def print_dump(ri):
- direction = "request"
- print_prototype(ri, direction, terminate=False)
- ri.cw.block_start()
- local_vars = ['struct ynl_dump_state yds = {};',
- 'struct nlmsghdr *nlh;',
- 'int err;']
-
- if ri.fixed_hdr:
- local_vars += ['size_t hdr_len;',
- 'void *hdr;']
-
- ri.cw.write_func_lvar(local_vars)
-
- ri.cw.p('yds.yarg.ys = ys;')
- ri.cw.p(f"yds.yarg.rsp_policy = &{ri.struct['reply'].render_name}_nest;")
- ri.cw.p("yds.yarg.data = NULL;")
- ri.cw.p(f"yds.alloc_sz = sizeof({type_name(ri, rdir(direction))});")
- ri.cw.p(f"yds.cb = {op_prefix(ri, 'reply', deref=True)}_parse;")
- if ri.op.value is not None:
- ri.cw.p(f'yds.rsp_cmd = {ri.op.enum_name};')
- else:
- ri.cw.p(f'yds.rsp_cmd = {ri.op.rsp_value};')
- ri.cw.nl()
- ri.cw.p(f"nlh = ynl_gemsg_start_dump(ys, {ri.nl.get_family_id()}, {ri.op.enum_name}, 1);")
-
- if ri.fixed_hdr:
- ri.cw.p("hdr_len = sizeof(req->_hdr);")
- ri.cw.p("hdr = ynl_nlmsg_put_extra_header(nlh, hdr_len);")
- ri.cw.p("memcpy(hdr, &req->_hdr, hdr_len);")
- ri.cw.nl()
-
- if "request" in ri.op[ri.op_mode]:
- ri.cw.p(f"ys->req_policy = &{ri.struct['request'].render_name}_nest;")
- ri.cw.nl()
- for _, attr in ri.struct["request"].member_list():
- attr.attr_put(ri, "req")
- ri.cw.nl()
-
- ri.cw.p('err = ynl_exec_dump(ys, nlh, &yds);')
- ri.cw.p('if (err < 0)')
- ri.cw.p('goto free_list;')
- ri.cw.nl()
-
- ri.cw.p('return yds.first;')
- ri.cw.nl()
- ri.cw.p('free_list:')
- ri.cw.p(call_free(ri, rdir(direction), 'yds.first'))
- ri.cw.p('return NULL;')
- ri.cw.block_end()
-
-
-def call_free(ri, direction, var):
- return f"{op_prefix(ri, direction)}_free({var});"
-
-
-def free_arg_name(direction):
- if direction:
- return direction_to_suffix[direction][1:]
- return 'obj'
-
-
-def print_alloc_wrapper(ri, direction):
- name = op_prefix(ri, direction)
- ri.cw.write_func_prot(f'static inline struct {name} *', f"{name}_alloc", [f"void"])
- ri.cw.block_start()
- ri.cw.p(f'return calloc(1, sizeof(struct {name}));')
- ri.cw.block_end()
-
-
-def print_free_prototype(ri, direction, suffix=';'):
- name = op_prefix(ri, direction)
- struct_name = name
- if ri.type_name_conflict:
- struct_name += '_'
- arg = free_arg_name(direction)
- ri.cw.write_func_prot('void', f"{name}_free", [f"struct {struct_name} *{arg}"], suffix=suffix)
-
-
-def _print_type(ri, direction, struct):
- suffix = f'_{ri.type_name}{direction_to_suffix[direction]}'
- if not direction and ri.type_name_conflict:
- suffix += '_'
-
- if ri.op_mode == 'dump':
- suffix += '_dump'
-
- ri.cw.block_start(line=f"struct {ri.family.c_name}{suffix}")
-
- if ri.fixed_hdr:
- ri.cw.p(ri.fixed_hdr + ' _hdr;')
- ri.cw.nl()
-
- meta_started = False
- for _, attr in struct.member_list():
- for type_filter in ['len', 'bit']:
- line = attr.presence_member(ri.ku_space, type_filter)
- if line:
- if not meta_started:
- ri.cw.block_start(line=f"struct")
- meta_started = True
- ri.cw.p(line)
- if meta_started:
- ri.cw.block_end(line='_present;')
- ri.cw.nl()
-
- for arg in struct.inherited:
- ri.cw.p(f"__u32 {arg};")
-
- for _, attr in struct.member_list():
- attr.struct_member(ri)
-
- ri.cw.block_end(line=';')
- ri.cw.nl()
-
-
-def print_type(ri, direction):
- _print_type(ri, direction, ri.struct[direction])
-
-
-def print_type_full(ri, struct):
- _print_type(ri, "", struct)
-
-
-def print_type_helpers(ri, direction, deref=False):
- print_free_prototype(ri, direction)
- ri.cw.nl()
-
- if ri.ku_space == 'user' and direction == 'request':
- for _, attr in ri.struct[direction].member_list():
- attr.setter(ri, ri.attr_set, direction, deref=deref)
- ri.cw.nl()
-
-
-def print_req_type_helpers(ri):
- if len(ri.struct["request"].attr_list) == 0:
- return
- print_alloc_wrapper(ri, "request")
- print_type_helpers(ri, "request")
-
-
-def print_rsp_type_helpers(ri):
- if 'reply' not in ri.op[ri.op_mode]:
- return
- print_type_helpers(ri, "reply")
-
-
-def print_parse_prototype(ri, direction, terminate=True):
- suffix = "_rsp" if direction == "reply" else "_req"
- term = ';' if terminate else ''
-
- ri.cw.write_func_prot('void', f"{ri.op.render_name}{suffix}_parse",
- ['const struct nlattr **tb',
- f"struct {ri.op.render_name}{suffix} *req"],
- suffix=term)
-
-
-def print_req_type(ri):
- if len(ri.struct["request"].attr_list) == 0:
- return
- print_type(ri, "request")
-
-
-def print_req_free(ri):
- if 'request' not in ri.op[ri.op_mode]:
- return
- _free_type(ri, 'request', ri.struct['request'])
-
-
-def print_rsp_type(ri):
- if (ri.op_mode == 'do' or ri.op_mode == 'dump') and 'reply' in ri.op[ri.op_mode]:
- direction = 'reply'
- elif ri.op_mode == 'event':
- direction = 'reply'
- else:
- return
- print_type(ri, direction)
-
-
-def print_wrapped_type(ri):
- ri.cw.block_start(line=f"{type_name(ri, 'reply')}")
- if ri.op_mode == 'dump':
- ri.cw.p(f"{type_name(ri, 'reply')} *next;")
- elif ri.op_mode == 'notify' or ri.op_mode == 'event':
- ri.cw.p('__u16 family;')
- ri.cw.p('__u8 cmd;')
- ri.cw.p('struct ynl_ntf_base_type *next;')
- ri.cw.p(f"void (*free)({type_name(ri, 'reply')} *ntf);")
- ri.cw.p(f"{type_name(ri, 'reply', deref=True)} obj __attribute__((aligned(8)));")
- ri.cw.block_end(line=';')
- ri.cw.nl()
- print_free_prototype(ri, 'reply')
- ri.cw.nl()
-
-
-def _free_type_members_iter(ri, struct):
- for _, attr in struct.member_list():
- if attr.free_needs_iter():
- ri.cw.p('unsigned int i;')
- ri.cw.nl()
- break
-
-
-def _free_type_members(ri, var, struct, ref=''):
- for _, attr in struct.member_list():
- attr.free(ri, var, ref)
-
-
-def _free_type(ri, direction, struct):
- var = free_arg_name(direction)
-
- print_free_prototype(ri, direction, suffix='')
- ri.cw.block_start()
- _free_type_members_iter(ri, struct)
- _free_type_members(ri, var, struct)
- if direction:
- ri.cw.p(f'free({var});')
- ri.cw.block_end()
- ri.cw.nl()
-
-
-def free_rsp_nested_prototype(ri):
- print_free_prototype(ri, "")
-
-
-def free_rsp_nested(ri, struct):
- _free_type(ri, "", struct)
-
-
-def print_rsp_free(ri):
- if 'reply' not in ri.op[ri.op_mode]:
- return
- _free_type(ri, 'reply', ri.struct['reply'])
-
-
-def print_dump_type_free(ri):
- sub_type = type_name(ri, 'reply')
-
- print_free_prototype(ri, 'reply', suffix='')
- ri.cw.block_start()
- ri.cw.p(f"{sub_type} *next = rsp;")
- ri.cw.nl()
- ri.cw.block_start(line='while ((void *)next != YNL_LIST_END)')
- _free_type_members_iter(ri, ri.struct['reply'])
- ri.cw.p('rsp = next;')
- ri.cw.p('next = rsp->next;')
- ri.cw.nl()
-
- _free_type_members(ri, 'rsp', ri.struct['reply'], ref='obj.')
- ri.cw.p(f'free(rsp);')
- ri.cw.block_end()
- ri.cw.block_end()
- ri.cw.nl()
-
-
-def print_ntf_type_free(ri):
- print_free_prototype(ri, 'reply', suffix='')
- ri.cw.block_start()
- _free_type_members_iter(ri, ri.struct['reply'])
- _free_type_members(ri, 'rsp', ri.struct['reply'], ref='obj.')
- ri.cw.p(f'free(rsp);')
- ri.cw.block_end()
- ri.cw.nl()
-
-
-def print_req_policy_fwd(cw, struct, ri=None, terminate=True):
- if terminate and ri and policy_should_be_static(struct.family):
- return
-
- if terminate:
- prefix = 'extern '
- else:
- if ri and policy_should_be_static(struct.family):
- prefix = 'static '
- else:
- prefix = ''
-
- suffix = ';' if terminate else ' = {'
-
- max_attr = struct.attr_max_val
- if ri:
- name = ri.op.render_name
- if ri.op.dual_policy:
- name += '_' + ri.op_mode
- else:
- name = struct.render_name
- cw.p(f"{prefix}const struct nla_policy {name}_nl_policy[{max_attr.enum_name} + 1]{suffix}")
-
-
-def print_req_policy(cw, struct, ri=None):
- if ri and ri.op:
- cw.ifdef_block(ri.op.get('config-cond', None))
- print_req_policy_fwd(cw, struct, ri=ri, terminate=False)
- for _, arg in struct.member_list():
- arg.attr_policy(cw)
- cw.p("};")
- cw.ifdef_block(None)
- cw.nl()
-
-
-def kernel_can_gen_family_struct(family):
- return family.proto == 'genetlink'
-
-
-def policy_should_be_static(family):
- return family.kernel_policy == 'split' or kernel_can_gen_family_struct(family)
-
-
-def print_kernel_policy_ranges(family, cw):
- first = True
- for _, attr_set in family.attr_sets.items():
- if attr_set.subset_of:
- continue
-
- for _, attr in attr_set.items():
- if not attr.request:
- continue
- if 'full-range' not in attr.checks:
- continue
-
- if first:
- cw.p('/* Integer value ranges */')
- first = False
-
- sign = '' if attr.type[0] == 'u' else '_signed'
- suffix = 'ULL' if attr.type[0] == 'u' else 'LL'
- cw.block_start(line=f'static const struct netlink_range_validation{sign} {c_lower(attr.enum_name)}_range =')
- members = []
- if 'min' in attr.checks:
- members.append(('min', attr.get_limit_str('min', suffix=suffix)))
- if 'max' in attr.checks:
- members.append(('max', attr.get_limit_str('max', suffix=suffix)))
- cw.write_struct_init(members)
- cw.block_end(line=';')
- cw.nl()
-
-
-def print_kernel_op_table_fwd(family, cw, terminate):
- exported = not kernel_can_gen_family_struct(family)
-
- if not terminate or exported:
- cw.p(f"/* Ops table for {family.ident_name} */")
-
- pol_to_struct = {'global': 'genl_small_ops',
- 'per-op': 'genl_ops',
- 'split': 'genl_split_ops'}
- struct_type = pol_to_struct[family.kernel_policy]
-
- if not exported:
- cnt = ""
- elif family.kernel_policy == 'split':
- cnt = 0
- for op in family.ops.values():
- if 'do' in op:
- cnt += 1
- if 'dump' in op:
- cnt += 1
- else:
- cnt = len(family.ops)
-
- qual = 'static const' if not exported else 'const'
- line = f"{qual} struct {struct_type} {family.c_name}_nl_ops[{cnt}]"
- if terminate:
- cw.p(f"extern {line};")
- else:
- cw.block_start(line=line + ' =')
-
- if not terminate:
- return
-
- cw.nl()
- for name in family.hooks['pre']['do']['list']:
- cw.write_func_prot('int', c_lower(name),
- ['const struct genl_split_ops *ops',
- 'struct sk_buff *skb', 'struct genl_info *info'], suffix=';')
- for name in family.hooks['post']['do']['list']:
- cw.write_func_prot('void', c_lower(name),
- ['const struct genl_split_ops *ops',
- 'struct sk_buff *skb', 'struct genl_info *info'], suffix=';')
- for name in family.hooks['pre']['dump']['list']:
- cw.write_func_prot('int', c_lower(name),
- ['struct netlink_callback *cb'], suffix=';')
- for name in family.hooks['post']['dump']['list']:
- cw.write_func_prot('int', c_lower(name),
- ['struct netlink_callback *cb'], suffix=';')
-
- cw.nl()
-
- for op_name, op in family.ops.items():
- if op.is_async:
- continue
-
- if 'do' in op:
- name = c_lower(f"{family.ident_name}-nl-{op_name}-doit")
- cw.write_func_prot('int', name,
- ['struct sk_buff *skb', 'struct genl_info *info'], suffix=';')
-
- if 'dump' in op:
- name = c_lower(f"{family.ident_name}-nl-{op_name}-dumpit")
- cw.write_func_prot('int', name,
- ['struct sk_buff *skb', 'struct netlink_callback *cb'], suffix=';')
- cw.nl()
-
-
-def print_kernel_op_table_hdr(family, cw):
- print_kernel_op_table_fwd(family, cw, terminate=True)
-
-
-def print_kernel_op_table(family, cw):
- print_kernel_op_table_fwd(family, cw, terminate=False)
- if family.kernel_policy == 'global' or family.kernel_policy == 'per-op':
- for op_name, op in family.ops.items():
- if op.is_async:
- continue
-
- cw.ifdef_block(op.get('config-cond', None))
- cw.block_start()
- members = [('cmd', op.enum_name)]
- if 'dont-validate' in op:
- members.append(('validate',
- ' | '.join([c_upper('genl-dont-validate-' + x)
- for x in op['dont-validate']])), )
- for op_mode in ['do', 'dump']:
- if op_mode in op:
- name = c_lower(f"{family.ident_name}-nl-{op_name}-{op_mode}it")
- members.append((op_mode + 'it', name))
- if family.kernel_policy == 'per-op':
- struct = Struct(family, op['attribute-set'],
- type_list=op['do']['request']['attributes'])
-
- name = c_lower(f"{family.ident_name}-{op_name}-nl-policy")
- members.append(('policy', name))
- members.append(('maxattr', struct.attr_max_val.enum_name))
- if 'flags' in op:
- members.append(('flags', ' | '.join([c_upper('genl-' + x) for x in op['flags']])))
- cw.write_struct_init(members)
- cw.block_end(line=',')
- elif family.kernel_policy == 'split':
- cb_names = {'do': {'pre': 'pre_doit', 'post': 'post_doit'},
- 'dump': {'pre': 'start', 'post': 'done'}}
-
- for op_name, op in family.ops.items():
- for op_mode in ['do', 'dump']:
- if op.is_async or op_mode not in op:
- continue
-
- cw.ifdef_block(op.get('config-cond', None))
- cw.block_start()
- members = [('cmd', op.enum_name)]
- if 'dont-validate' in op:
- dont_validate = []
- for x in op['dont-validate']:
- if op_mode == 'do' and x in ['dump', 'dump-strict']:
- continue
- if op_mode == "dump" and x == 'strict':
- continue
- dont_validate.append(x)
-
- if dont_validate:
- members.append(('validate',
- ' | '.join([c_upper('genl-dont-validate-' + x)
- for x in dont_validate])), )
- name = c_lower(f"{family.ident_name}-nl-{op_name}-{op_mode}it")
- if 'pre' in op[op_mode]:
- members.append((cb_names[op_mode]['pre'], c_lower(op[op_mode]['pre'])))
- members.append((op_mode + 'it', name))
- if 'post' in op[op_mode]:
- members.append((cb_names[op_mode]['post'], c_lower(op[op_mode]['post'])))
- if 'request' in op[op_mode]:
- struct = Struct(family, op['attribute-set'],
- type_list=op[op_mode]['request']['attributes'])
-
- if op.dual_policy:
- name = c_lower(f"{family.ident_name}-{op_name}-{op_mode}-nl-policy")
- else:
- name = c_lower(f"{family.ident_name}-{op_name}-nl-policy")
- members.append(('policy', name))
- members.append(('maxattr', struct.attr_max_val.enum_name))
- flags = (op['flags'] if 'flags' in op else []) + ['cmd-cap-' + op_mode]
- members.append(('flags', ' | '.join([c_upper('genl-' + x) for x in flags])))
- cw.write_struct_init(members)
- cw.block_end(line=',')
- cw.ifdef_block(None)
-
- cw.block_end(line=';')
- cw.nl()
-
-
-def print_kernel_mcgrp_hdr(family, cw):
- if not family.mcgrps['list']:
- return
-
- cw.block_start('enum')
- for grp in family.mcgrps['list']:
- grp_id = c_upper(f"{family.ident_name}-nlgrp-{grp['name']},")
- cw.p(grp_id)
- cw.block_end(';')
- cw.nl()
-
-
-def print_kernel_mcgrp_src(family, cw):
- if not family.mcgrps['list']:
- return
-
- cw.block_start('static const struct genl_multicast_group ' + family.c_name + '_nl_mcgrps[] =')
- for grp in family.mcgrps['list']:
- name = grp['name']
- grp_id = c_upper(f"{family.ident_name}-nlgrp-{name}")
- cw.p('[' + grp_id + '] = { "' + name + '", },')
- cw.block_end(';')
- cw.nl()
-
-
-def print_kernel_family_struct_hdr(family, cw):
- if not kernel_can_gen_family_struct(family):
- return
-
- cw.p(f"extern struct genl_family {family.c_name}_nl_family;")
- cw.nl()
- if 'sock-priv' in family.kernel_family:
- cw.p(f'void {family.c_name}_nl_sock_priv_init({family.kernel_family["sock-priv"]} *priv);')
- cw.p(f'void {family.c_name}_nl_sock_priv_destroy({family.kernel_family["sock-priv"]} *priv);')
- cw.nl()
-
-
-def print_kernel_family_struct_src(family, cw):
- if not kernel_can_gen_family_struct(family):
- return
-
- cw.block_start(f"struct genl_family {family.ident_name}_nl_family __ro_after_init =")
- cw.p('.name\t\t= ' + family.fam_key + ',')
- cw.p('.version\t= ' + family.ver_key + ',')
- cw.p('.netnsok\t= true,')
- cw.p('.parallel_ops\t= true,')
- cw.p('.module\t\t= THIS_MODULE,')
- if family.kernel_policy == 'per-op':
- cw.p(f'.ops\t\t= {family.c_name}_nl_ops,')
- cw.p(f'.n_ops\t\t= ARRAY_SIZE({family.c_name}_nl_ops),')
- elif family.kernel_policy == 'split':
- cw.p(f'.split_ops\t= {family.c_name}_nl_ops,')
- cw.p(f'.n_split_ops\t= ARRAY_SIZE({family.c_name}_nl_ops),')
- if family.mcgrps['list']:
- cw.p(f'.mcgrps\t\t= {family.c_name}_nl_mcgrps,')
- cw.p(f'.n_mcgrps\t= ARRAY_SIZE({family.c_name}_nl_mcgrps),')
- if 'sock-priv' in family.kernel_family:
- cw.p(f'.sock_priv_size\t= sizeof({family.kernel_family["sock-priv"]}),')
- # Force cast here, actual helpers take pointer to the real type.
- cw.p(f'.sock_priv_init\t= (void *){family.c_name}_nl_sock_priv_init,')
- cw.p(f'.sock_priv_destroy = (void *){family.c_name}_nl_sock_priv_destroy,')
- cw.block_end(';')
-
-
-def uapi_enum_start(family, cw, obj, ckey='', enum_name='enum-name'):
- start_line = 'enum'
- if enum_name in obj:
- if obj[enum_name]:
- start_line = 'enum ' + c_lower(obj[enum_name])
- elif ckey and ckey in obj:
- start_line = 'enum ' + family.c_name + '_' + c_lower(obj[ckey])
- cw.block_start(line=start_line)
-
-
-def render_uapi_unified(family, cw, max_by_define, separate_ntf):
- max_name = c_upper(family.get('cmd-max-name', f"{family.op_prefix}MAX"))
- cnt_name = c_upper(family.get('cmd-cnt-name', f"__{family.op_prefix}MAX"))
- max_value = f"({cnt_name} - 1)"
-
- uapi_enum_start(family, cw, family['operations'], 'enum-name')
- val = 0
- for op in family.msgs.values():
- if separate_ntf and ('notify' in op or 'event' in op):
- continue
-
- suffix = ','
- if op.value != val:
- suffix = f" = {op.value},"
- val = op.value
- cw.p(op.enum_name + suffix)
- val += 1
- cw.nl()
- cw.p(cnt_name + ('' if max_by_define else ','))
- if not max_by_define:
- cw.p(f"{max_name} = {max_value}")
- cw.block_end(line=';')
- if max_by_define:
- cw.p(f"#define {max_name} {max_value}")
- cw.nl()
-
-
-def render_uapi_directional(family, cw, max_by_define):
- max_name = f"{family.op_prefix}USER_MAX"
- cnt_name = f"__{family.op_prefix}USER_CNT"
- max_value = f"({cnt_name} - 1)"
-
- cw.block_start(line='enum')
- cw.p(c_upper(f'{family.name}_MSG_USER_NONE = 0,'))
- val = 0
- for op in family.msgs.values():
- if 'do' in op and 'event' not in op:
- suffix = ','
- if op.value and op.value != val:
- suffix = f" = {op.value},"
- val = op.value
- cw.p(op.enum_name + suffix)
- val += 1
- cw.nl()
- cw.p(cnt_name + ('' if max_by_define else ','))
- if not max_by_define:
- cw.p(f"{max_name} = {max_value}")
- cw.block_end(line=';')
- if max_by_define:
- cw.p(f"#define {max_name} {max_value}")
- cw.nl()
-
- max_name = f"{family.op_prefix}KERNEL_MAX"
- cnt_name = f"__{family.op_prefix}KERNEL_CNT"
- max_value = f"({cnt_name} - 1)"
-
- cw.block_start(line='enum')
- cw.p(c_upper(f'{family.name}_MSG_KERNEL_NONE = 0,'))
- val = 0
- for op in family.msgs.values():
- if ('do' in op and 'reply' in op['do']) or 'notify' in op or 'event' in op:
- enum_name = op.enum_name
- if 'event' not in op and 'notify' not in op:
- enum_name = f'{enum_name}_REPLY'
-
- suffix = ','
- if op.value and op.value != val:
- suffix = f" = {op.value},"
- val = op.value
- cw.p(enum_name + suffix)
- val += 1
- cw.nl()
- cw.p(cnt_name + ('' if max_by_define else ','))
- if not max_by_define:
- cw.p(f"{max_name} = {max_value}")
- cw.block_end(line=';')
- if max_by_define:
- cw.p(f"#define {max_name} {max_value}")
- cw.nl()
-
-
-def render_uapi(family, cw):
- hdr_prot = f"_UAPI_LINUX_{c_upper(family.uapi_header_name)}_H"
- hdr_prot = hdr_prot.replace('/', '_')
- cw.p('#ifndef ' + hdr_prot)
- cw.p('#define ' + hdr_prot)
- cw.nl()
-
- defines = [(family.fam_key, family["name"]),
- (family.ver_key, family.get('version', 1))]
- cw.writes_defines(defines)
- cw.nl()
-
- defines = []
- for const in family['definitions']:
- if const['type'] != 'const':
- cw.writes_defines(defines)
- defines = []
- cw.nl()
-
- # Write kdoc for enum and flags (one day maybe also structs)
- if const['type'] == 'enum' or const['type'] == 'flags':
- enum = family.consts[const['name']]
-
- if enum.header:
- continue
-
- if enum.has_doc():
- if enum.has_entry_doc():
- cw.p('/**')
- doc = ''
- if 'doc' in enum:
- doc = ' - ' + enum['doc']
- cw.write_doc_line(enum.enum_name + doc)
- else:
- cw.p('/*')
- cw.write_doc_line(enum['doc'], indent=False)
- for entry in enum.entries.values():
- if entry.has_doc():
- doc = '@' + entry.c_name + ': ' + entry['doc']
- cw.write_doc_line(doc)
- cw.p(' */')
-
- uapi_enum_start(family, cw, const, 'name')
- name_pfx = const.get('name-prefix', f"{family.ident_name}-{const['name']}-")
- for entry in enum.entries.values():
- suffix = ','
- if entry.value_change:
- suffix = f" = {entry.user_value()}" + suffix
- cw.p(entry.c_name + suffix)
-
- if const.get('render-max', False):
- cw.nl()
- cw.p('/* private: */')
- if const['type'] == 'flags':
- max_name = c_upper(name_pfx + 'mask')
- max_val = f' = {enum.get_mask()},'
- cw.p(max_name + max_val)
- else:
- cnt_name = enum.enum_cnt_name
- max_name = c_upper(name_pfx + 'max')
- if not cnt_name:
- cnt_name = '__' + name_pfx + 'max'
- cw.p(c_upper(cnt_name) + ',')
- cw.p(max_name + ' = (' + c_upper(cnt_name) + ' - 1)')
- cw.block_end(line=';')
- cw.nl()
- elif const['type'] == 'const':
- defines.append([c_upper(family.get('c-define-name',
- f"{family.ident_name}-{const['name']}")),
- const['value']])
-
- if defines:
- cw.writes_defines(defines)
- cw.nl()
-
- max_by_define = family.get('max-by-define', False)
-
- for _, attr_set in family.attr_sets.items():
- if attr_set.subset_of:
- continue
-
- max_value = f"({attr_set.cnt_name} - 1)"
-
- val = 0
- uapi_enum_start(family, cw, attr_set.yaml, 'enum-name')
- for _, attr in attr_set.items():
- suffix = ','
- if attr.value != val:
- suffix = f" = {attr.value},"
- val = attr.value
- val += 1
- cw.p(attr.enum_name + suffix)
- if attr_set.items():
- cw.nl()
- cw.p(attr_set.cnt_name + ('' if max_by_define else ','))
- if not max_by_define:
- cw.p(f"{attr_set.max_name} = {max_value}")
- cw.block_end(line=';')
- if max_by_define:
- cw.p(f"#define {attr_set.max_name} {max_value}")
- cw.nl()
-
- # Commands
- separate_ntf = 'async-prefix' in family['operations']
-
- if family.msg_id_model == 'unified':
- render_uapi_unified(family, cw, max_by_define, separate_ntf)
- elif family.msg_id_model == 'directional':
- render_uapi_directional(family, cw, max_by_define)
- else:
- raise Exception(f'Unsupported message enum-model {family.msg_id_model}')
-
- if separate_ntf:
- uapi_enum_start(family, cw, family['operations'], enum_name='async-enum')
- for op in family.msgs.values():
- if separate_ntf and not ('notify' in op or 'event' in op):
- continue
-
- suffix = ','
- if 'value' in op:
- suffix = f" = {op['value']},"
- cw.p(op.enum_name + suffix)
- cw.block_end(line=';')
- cw.nl()
-
- # Multicast
- defines = []
- for grp in family.mcgrps['list']:
- name = grp['name']
- defines.append([c_upper(grp.get('c-define-name', f"{family.ident_name}-mcgrp-{name}")),
- f'{name}'])
- cw.nl()
- if defines:
- cw.writes_defines(defines)
- cw.nl()
-
- cw.p(f'#endif /* {hdr_prot} */')
-
-
-def _render_user_ntf_entry(ri, op):
- ri.cw.block_start(line=f"[{op.enum_name}] = ")
- ri.cw.p(f".alloc_sz\t= sizeof({type_name(ri, 'event')}),")
- ri.cw.p(f".cb\t\t= {op_prefix(ri, 'reply', deref=True)}_parse,")
- ri.cw.p(f".policy\t\t= &{ri.struct['reply'].render_name}_nest,")
- ri.cw.p(f".free\t\t= (void *){op_prefix(ri, 'notify')}_free,")
- ri.cw.block_end(line=',')
-
-
-def render_user_family(family, cw, prototype):
- symbol = f'const struct ynl_family ynl_{family.c_name}_family'
- if prototype:
- cw.p(f'extern {symbol};')
- return
-
- if family.ntfs:
- cw.block_start(line=f"static const struct ynl_ntf_info {family['name']}_ntf_info[] = ")
- for ntf_op_name, ntf_op in family.ntfs.items():
- if 'notify' in ntf_op:
- op = family.ops[ntf_op['notify']]
- ri = RenderInfo(cw, family, "user", op, "notify")
- elif 'event' in ntf_op:
- ri = RenderInfo(cw, family, "user", ntf_op, "event")
- else:
- raise Exception('Invalid notification ' + ntf_op_name)
- _render_user_ntf_entry(ri, ntf_op)
- for op_name, op in family.ops.items():
- if 'event' not in op:
- continue
- ri = RenderInfo(cw, family, "user", op, "event")
- _render_user_ntf_entry(ri, op)
- cw.block_end(line=";")
- cw.nl()
-
- cw.block_start(f'{symbol} = ')
- cw.p(f'.name\t\t= "{family.c_name}",')
- if family.fixed_header:
- cw.p(f'.hdr_len\t= sizeof(struct genlmsghdr) + sizeof(struct {c_lower(family.fixed_header)}),')
- else:
- cw.p('.hdr_len\t= sizeof(struct genlmsghdr),')
- if family.ntfs:
- cw.p(f".ntf_info\t= {family['name']}_ntf_info,")
- cw.p(f".ntf_info_size\t= YNL_ARRAY_SIZE({family['name']}_ntf_info),")
- cw.block_end(line=';')
-
-
-def family_contains_bitfield32(family):
- for _, attr_set in family.attr_sets.items():
- if attr_set.subset_of:
- continue
- for _, attr in attr_set.items():
- if attr.type == "bitfield32":
- return True
- return False
-
-
-def find_kernel_root(full_path):
- sub_path = ''
- while True:
- sub_path = os.path.join(os.path.basename(full_path), sub_path)
- full_path = os.path.dirname(full_path)
- maintainers = os.path.join(full_path, "MAINTAINERS")
- if os.path.exists(maintainers):
- return full_path, sub_path[:-1]
-
-
-def main():
- parser = argparse.ArgumentParser(description='Netlink simple parsing generator')
- parser.add_argument('--mode', dest='mode', type=str, required=True,
- choices=('user', 'kernel', 'uapi'))
- parser.add_argument('--spec', dest='spec', type=str, required=True)
- parser.add_argument('--header', dest='header', action='store_true', default=None)
- parser.add_argument('--source', dest='header', action='store_false')
- parser.add_argument('--user-header', nargs='+', default=[])
- parser.add_argument('--cmp-out', action='store_true', default=None,
- help='Do not overwrite the output file if the new output is identical to the old')
- parser.add_argument('--exclude-op', action='append', default=[])
- parser.add_argument('-o', dest='out_file', type=str, default=None)
- args = parser.parse_args()
-
- if args.header is None:
- parser.error("--header or --source is required")
-
- exclude_ops = [re.compile(expr) for expr in args.exclude_op]
-
- try:
- parsed = Family(args.spec, exclude_ops)
- if parsed.license != '((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)':
- print('Spec license:', parsed.license)
- print('License must be: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)')
- os.sys.exit(1)
- except yaml.YAMLError as exc:
- print(exc)
- os.sys.exit(1)
- return
-
- cw = CodeWriter(BaseNlLib(), args.out_file, overwrite=(not args.cmp_out))
-
- _, spec_kernel = find_kernel_root(args.spec)
- if args.mode == 'uapi' or args.header:
- cw.p(f'/* SPDX-License-Identifier: {parsed.license} */')
- else:
- cw.p(f'// SPDX-License-Identifier: {parsed.license}')
- cw.p("/* Do not edit directly, auto-generated from: */")
- cw.p(f"/*\t{spec_kernel} */")
- cw.p(f"/* YNL-GEN {args.mode} {'header' if args.header else 'source'} */")
- if args.exclude_op or args.user_header:
- line = ''
- line += ' --user-header '.join([''] + args.user_header)
- line += ' --exclude-op '.join([''] + args.exclude_op)
- cw.p(f'/* YNL-ARG{line} */')
- cw.nl()
-
- if args.mode == 'uapi':
- render_uapi(parsed, cw)
- return
-
- hdr_prot = f"_LINUX_{parsed.c_name.upper()}_GEN_H"
- if args.header:
- cw.p('#ifndef ' + hdr_prot)
- cw.p('#define ' + hdr_prot)
- cw.nl()
-
- if args.out_file:
- hdr_file = os.path.basename(args.out_file[:-2]) + ".h"
- else:
- hdr_file = "generated_header_file.h"
-
- if args.mode == 'kernel':
- cw.p('#include <net/netlink.h>')
- cw.p('#include <net/genetlink.h>')
- cw.nl()
- if not args.header:
- if args.out_file:
- cw.p(f'#include "{hdr_file}"')
- cw.nl()
- headers = ['uapi/' + parsed.uapi_header]
- headers += parsed.kernel_family.get('headers', [])
- else:
- cw.p('#include <stdlib.h>')
- cw.p('#include <string.h>')
- if args.header:
- cw.p('#include <linux/types.h>')
- if family_contains_bitfield32(parsed):
- cw.p('#include <linux/netlink.h>')
- else:
- cw.p(f'#include "{hdr_file}"')
- cw.p('#include "ynl.h"')
- headers = []
- for definition in parsed['definitions']:
- if 'header' in definition:
- headers.append(definition['header'])
- if args.mode == 'user':
- headers.append(parsed.uapi_header)
- seen_header = []
- for one in headers:
- if one not in seen_header:
- cw.p(f"#include <{one}>")
- seen_header.append(one)
- cw.nl()
-
- if args.mode == "user":
- if not args.header:
- cw.p("#include <linux/genetlink.h>")
- cw.nl()
- for one in args.user_header:
- cw.p(f'#include "{one}"')
- else:
- cw.p('struct ynl_sock;')
- cw.nl()
- render_user_family(parsed, cw, True)
- cw.nl()
-
- if args.mode == "kernel":
- if args.header:
- for _, struct in sorted(parsed.pure_nested_structs.items()):
- if struct.request:
- cw.p('/* Common nested types */')
- break
- for attr_set, struct in sorted(parsed.pure_nested_structs.items()):
- if struct.request:
- print_req_policy_fwd(cw, struct)
- cw.nl()
-
- if parsed.kernel_policy == 'global':
- cw.p(f"/* Global operation policy for {parsed.name} */")
-
- struct = Struct(parsed, parsed.global_policy_set, type_list=parsed.global_policy)
- print_req_policy_fwd(cw, struct)
- cw.nl()
-
- if parsed.kernel_policy in {'per-op', 'split'}:
- for op_name, op in parsed.ops.items():
- if 'do' in op and 'event' not in op:
- ri = RenderInfo(cw, parsed, args.mode, op, "do")
- print_req_policy_fwd(cw, ri.struct['request'], ri=ri)
- cw.nl()
-
- print_kernel_op_table_hdr(parsed, cw)
- print_kernel_mcgrp_hdr(parsed, cw)
- print_kernel_family_struct_hdr(parsed, cw)
- else:
- print_kernel_policy_ranges(parsed, cw)
-
- for _, struct in sorted(parsed.pure_nested_structs.items()):
- if struct.request:
- cw.p('/* Common nested types */')
- break
- for attr_set, struct in sorted(parsed.pure_nested_structs.items()):
- if struct.request:
- print_req_policy(cw, struct)
- cw.nl()
-
- if parsed.kernel_policy == 'global':
- cw.p(f"/* Global operation policy for {parsed.name} */")
-
- struct = Struct(parsed, parsed.global_policy_set, type_list=parsed.global_policy)
- print_req_policy(cw, struct)
- cw.nl()
-
- for op_name, op in parsed.ops.items():
- if parsed.kernel_policy in {'per-op', 'split'}:
- for op_mode in ['do', 'dump']:
- if op_mode in op and 'request' in op[op_mode]:
- cw.p(f"/* {op.enum_name} - {op_mode} */")
- ri = RenderInfo(cw, parsed, args.mode, op, op_mode)
- print_req_policy(cw, ri.struct['request'], ri=ri)
- cw.nl()
-
- print_kernel_op_table(parsed, cw)
- print_kernel_mcgrp_src(parsed, cw)
- print_kernel_family_struct_src(parsed, cw)
-
- if args.mode == "user":
- if args.header:
- cw.p('/* Enums */')
- put_op_name_fwd(parsed, cw)
-
- for name, const in parsed.consts.items():
- if isinstance(const, EnumSet):
- put_enum_to_str_fwd(parsed, cw, const)
- cw.nl()
-
- cw.p('/* Common nested types */')
- for attr_set, struct in parsed.pure_nested_structs.items():
- ri = RenderInfo(cw, parsed, args.mode, "", "", attr_set)
- print_type_full(ri, struct)
-
- for op_name, op in parsed.ops.items():
- cw.p(f"/* ============== {op.enum_name} ============== */")
-
- if 'do' in op and 'event' not in op:
- cw.p(f"/* {op.enum_name} - do */")
- ri = RenderInfo(cw, parsed, args.mode, op, "do")
- print_req_type(ri)
- print_req_type_helpers(ri)
- cw.nl()
- print_rsp_type(ri)
- print_rsp_type_helpers(ri)
- cw.nl()
- print_req_prototype(ri)
- cw.nl()
-
- if 'dump' in op:
- cw.p(f"/* {op.enum_name} - dump */")
- ri = RenderInfo(cw, parsed, args.mode, op, 'dump')
- print_req_type(ri)
- print_req_type_helpers(ri)
- if not ri.type_consistent:
- print_rsp_type(ri)
- print_wrapped_type(ri)
- print_dump_prototype(ri)
- cw.nl()
-
- if op.has_ntf:
- cw.p(f"/* {op.enum_name} - notify */")
- ri = RenderInfo(cw, parsed, args.mode, op, 'notify')
- if not ri.type_consistent:
- raise Exception(f'Only notifications with consistent types supported ({op.name})')
- print_wrapped_type(ri)
-
- for op_name, op in parsed.ntfs.items():
- if 'event' in op:
- ri = RenderInfo(cw, parsed, args.mode, op, 'event')
- cw.p(f"/* {op.enum_name} - event */")
- print_rsp_type(ri)
- cw.nl()
- print_wrapped_type(ri)
- cw.nl()
- else:
- cw.p('/* Enums */')
- put_op_name(parsed, cw)
-
- for name, const in parsed.consts.items():
- if isinstance(const, EnumSet):
- put_enum_to_str(parsed, cw, const)
- cw.nl()
-
- has_recursive_nests = False
- cw.p('/* Policies */')
- for struct in parsed.pure_nested_structs.values():
- if struct.recursive:
- put_typol_fwd(cw, struct)
- has_recursive_nests = True
- if has_recursive_nests:
- cw.nl()
- for name in parsed.pure_nested_structs:
- struct = Struct(parsed, name)
- put_typol(cw, struct)
- for name in parsed.root_sets:
- struct = Struct(parsed, name)
- put_typol(cw, struct)
-
- cw.p('/* Common nested types */')
- if has_recursive_nests:
- for attr_set, struct in parsed.pure_nested_structs.items():
- ri = RenderInfo(cw, parsed, args.mode, "", "", attr_set)
- free_rsp_nested_prototype(ri)
- if struct.request:
- put_req_nested_prototype(ri, struct)
- if struct.reply:
- parse_rsp_nested_prototype(ri, struct)
- cw.nl()
- for attr_set, struct in parsed.pure_nested_structs.items():
- ri = RenderInfo(cw, parsed, args.mode, "", "", attr_set)
-
- free_rsp_nested(ri, struct)
- if struct.request:
- put_req_nested(ri, struct)
- if struct.reply:
- parse_rsp_nested(ri, struct)
-
- for op_name, op in parsed.ops.items():
- cw.p(f"/* ============== {op.enum_name} ============== */")
- if 'do' in op and 'event' not in op:
- cw.p(f"/* {op.enum_name} - do */")
- ri = RenderInfo(cw, parsed, args.mode, op, "do")
- print_req_free(ri)
- print_rsp_free(ri)
- parse_rsp_msg(ri)
- print_req(ri)
- cw.nl()
-
- if 'dump' in op:
- cw.p(f"/* {op.enum_name} - dump */")
- ri = RenderInfo(cw, parsed, args.mode, op, "dump")
- if not ri.type_consistent:
- parse_rsp_msg(ri, deref=True)
- print_req_free(ri)
- print_dump_type_free(ri)
- print_dump(ri)
- cw.nl()
-
- if op.has_ntf:
- cw.p(f"/* {op.enum_name} - notify */")
- ri = RenderInfo(cw, parsed, args.mode, op, 'notify')
- if not ri.type_consistent:
- raise Exception(f'Only notifications with consistent types supported ({op.name})')
- print_ntf_type_free(ri)
-
- for op_name, op in parsed.ntfs.items():
- if 'event' in op:
- cw.p(f"/* {op.enum_name} - event */")
-
- ri = RenderInfo(cw, parsed, args.mode, op, "do")
- parse_rsp_msg(ri)
-
- ri = RenderInfo(cw, parsed, args.mode, op, "event")
- print_ntf_type_free(ri)
- cw.nl()
- render_user_family(parsed, cw, False)
-
- if args.header:
- cw.p(f'#endif /* {hdr_prot} */')
-
-
-if __name__ == "__main__":
- main()
+++ /dev/null
-#!/usr/bin/env python3
-# SPDX-License-Identifier: GPL-2.0
-# -*- coding: utf-8; mode: python -*-
-
-"""
- Script to auto generate the documentation for Netlink specifications.
-
- :copyright: Copyright (C) 2023 Breno Leitao <leitao@debian.org>
- :license: GPL Version 2, June 1991 see linux/COPYING for details.
-
- This script performs extensive parsing to the Linux kernel's netlink YAML
- spec files, in an effort to avoid needing to heavily mark up the original
- YAML file.
-
- This code is split in three big parts:
- 1) RST formatters: Use to convert a string to a RST output
- 2) Parser helpers: Functions to parse the YAML data structure
- 3) Main function and small helpers
-"""
-
-from typing import Any, Dict, List
-import os.path
-import sys
-import argparse
-import logging
-import yaml
-
-
-SPACE_PER_LEVEL = 4
-
-
-# RST Formatters
-# ==============
-def headroom(level: int) -> str:
- """Return space to format"""
- return " " * (level * SPACE_PER_LEVEL)
-
-
-def bold(text: str) -> str:
- """Format bold text"""
- return f"**{text}**"
-
-
-def inline(text: str) -> str:
- """Format inline text"""
- return f"``{text}``"
-
-
-def sanitize(text: str) -> str:
- """Remove newlines and multiple spaces"""
- # This is useful for some fields that are spread across multiple lines
- return str(text).replace("\n", " ").strip()
-
-
-def rst_fields(key: str, value: str, level: int = 0) -> str:
- """Return a RST formatted field"""
- return headroom(level) + f":{key}: {value}"
-
-
-def rst_definition(key: str, value: Any, level: int = 0) -> str:
- """Format a single rst definition"""
- return headroom(level) + key + "\n" + headroom(level + 1) + str(value)
-
-
-def rst_paragraph(paragraph: str, level: int = 0) -> str:
- """Return a formatted paragraph"""
- return headroom(level) + paragraph
-
-
-def rst_bullet(item: str, level: int = 0) -> str:
- """Return a formatted a bullet"""
- return headroom(level) + f"- {item}"
-
-
-def rst_subsection(title: str) -> str:
- """Add a sub-section to the document"""
- return f"{title}\n" + "-" * len(title)
-
-
-def rst_subsubsection(title: str) -> str:
- """Add a sub-sub-section to the document"""
- return f"{title}\n" + "~" * len(title)
-
-
-def rst_section(namespace: str, prefix: str, title: str) -> str:
- """Add a section to the document"""
- return f".. _{namespace}-{prefix}-{title}:\n\n{title}\n" + "=" * len(title)
-
-
-def rst_subtitle(title: str) -> str:
- """Add a subtitle to the document"""
- return "\n" + "-" * len(title) + f"\n{title}\n" + "-" * len(title) + "\n\n"
-
-
-def rst_title(title: str) -> str:
- """Add a title to the document"""
- return "=" * len(title) + f"\n{title}\n" + "=" * len(title) + "\n\n"
-
-
-def rst_list_inline(list_: List[str], level: int = 0) -> str:
- """Format a list using inlines"""
- return headroom(level) + "[" + ", ".join(inline(i) for i in list_) + "]"
-
-
-def rst_ref(namespace: str, prefix: str, name: str) -> str:
- """Add a hyperlink to the document"""
- mappings = {'enum': 'definition',
- 'fixed-header': 'definition',
- 'nested-attributes': 'attribute-set',
- 'struct': 'definition'}
- if prefix in mappings:
- prefix = mappings[prefix]
- return f":ref:`{namespace}-{prefix}-{name}`"
-
-
-def rst_header() -> str:
- """The headers for all the auto generated RST files"""
- lines = []
-
- lines.append(rst_paragraph(".. SPDX-License-Identifier: GPL-2.0"))
- lines.append(rst_paragraph(".. NOTE: This document was auto-generated.\n\n"))
-
- return "\n".join(lines)
-
-
-def rst_toctree(maxdepth: int = 2) -> str:
- """Generate a toctree RST primitive"""
- lines = []
-
- lines.append(".. toctree::")
- lines.append(f" :maxdepth: {maxdepth}\n\n")
-
- return "\n".join(lines)
-
-
-def rst_label(title: str) -> str:
- """Return a formatted label"""
- return f".. _{title}:\n\n"
-
-
-# Parsers
-# =======
-
-
-def parse_mcast_group(mcast_group: List[Dict[str, Any]]) -> str:
- """Parse 'multicast' group list and return a formatted string"""
- lines = []
- for group in mcast_group:
- lines.append(rst_bullet(group["name"]))
-
- return "\n".join(lines)
-
-
-def parse_do(do_dict: Dict[str, Any], level: int = 0) -> str:
- """Parse 'do' section and return a formatted string"""
- lines = []
- for key in do_dict.keys():
- lines.append(rst_paragraph(bold(key), level + 1))
- if key in ['request', 'reply']:
- lines.append(parse_do_attributes(do_dict[key], level + 1) + "\n")
- else:
- lines.append(headroom(level + 2) + do_dict[key] + "\n")
-
- return "\n".join(lines)
-
-
-def parse_do_attributes(attrs: Dict[str, Any], level: int = 0) -> str:
- """Parse 'attributes' section"""
- if "attributes" not in attrs:
- return ""
- lines = [rst_fields("attributes", rst_list_inline(attrs["attributes"]), level + 1)]
-
- return "\n".join(lines)
-
-
-def parse_operations(operations: List[Dict[str, Any]], namespace: str) -> str:
- """Parse operations block"""
- preprocessed = ["name", "doc", "title", "do", "dump", "flags"]
- linkable = ["fixed-header", "attribute-set"]
- lines = []
-
- for operation in operations:
- lines.append(rst_section(namespace, 'operation', operation["name"]))
- lines.append(rst_paragraph(operation["doc"]) + "\n")
-
- for key in operation.keys():
- if key in preprocessed:
- # Skip the special fields
- continue
- value = operation[key]
- if key in linkable:
- value = rst_ref(namespace, key, value)
- lines.append(rst_fields(key, value, 0))
- if 'flags' in operation:
- lines.append(rst_fields('flags', rst_list_inline(operation['flags'])))
-
- if "do" in operation:
- lines.append(rst_paragraph(":do:", 0))
- lines.append(parse_do(operation["do"], 0))
- if "dump" in operation:
- lines.append(rst_paragraph(":dump:", 0))
- lines.append(parse_do(operation["dump"], 0))
-
- # New line after fields
- lines.append("\n")
-
- return "\n".join(lines)
-
-
-def parse_entries(entries: List[Dict[str, Any]], level: int) -> str:
- """Parse a list of entries"""
- ignored = ["pad"]
- lines = []
- for entry in entries:
- if isinstance(entry, dict):
- # entries could be a list or a dictionary
- field_name = entry.get("name", "")
- if field_name in ignored:
- continue
- type_ = entry.get("type")
- if type_:
- field_name += f" ({inline(type_)})"
- lines.append(
- rst_fields(field_name, sanitize(entry.get("doc", "")), level)
- )
- elif isinstance(entry, list):
- lines.append(rst_list_inline(entry, level))
- else:
- lines.append(rst_bullet(inline(sanitize(entry)), level))
-
- lines.append("\n")
- return "\n".join(lines)
-
-
-def parse_definitions(defs: Dict[str, Any], namespace: str) -> str:
- """Parse definitions section"""
- preprocessed = ["name", "entries", "members"]
- ignored = ["render-max"] # This is not printed
- lines = []
-
- for definition in defs:
- lines.append(rst_section(namespace, 'definition', definition["name"]))
- for k in definition.keys():
- if k in preprocessed + ignored:
- continue
- lines.append(rst_fields(k, sanitize(definition[k]), 0))
-
- # Field list needs to finish with a new line
- lines.append("\n")
- if "entries" in definition:
- lines.append(rst_paragraph(":entries:", 0))
- lines.append(parse_entries(definition["entries"], 1))
- if "members" in definition:
- lines.append(rst_paragraph(":members:", 0))
- lines.append(parse_entries(definition["members"], 1))
-
- return "\n".join(lines)
-
-
-def parse_attr_sets(entries: List[Dict[str, Any]], namespace: str) -> str:
- """Parse attribute from attribute-set"""
- preprocessed = ["name", "type"]
- linkable = ["enum", "nested-attributes", "struct", "sub-message"]
- ignored = ["checks"]
- lines = []
-
- for entry in entries:
- lines.append(rst_section(namespace, 'attribute-set', entry["name"]))
- for attr in entry["attributes"]:
- type_ = attr.get("type")
- attr_line = attr["name"]
- if type_:
- # Add the attribute type in the same line
- attr_line += f" ({inline(type_)})"
-
- lines.append(rst_subsubsection(attr_line))
-
- for k in attr.keys():
- if k in preprocessed + ignored:
- continue
- if k in linkable:
- value = rst_ref(namespace, k, attr[k])
- else:
- value = sanitize(attr[k])
- lines.append(rst_fields(k, value, 0))
- lines.append("\n")
-
- return "\n".join(lines)
-
-
-def parse_sub_messages(entries: List[Dict[str, Any]], namespace: str) -> str:
- """Parse sub-message definitions"""
- lines = []
-
- for entry in entries:
- lines.append(rst_section(namespace, 'sub-message', entry["name"]))
- for fmt in entry["formats"]:
- value = fmt["value"]
-
- lines.append(rst_bullet(bold(value)))
- for attr in ['fixed-header', 'attribute-set']:
- if attr in fmt:
- lines.append(rst_fields(attr,
- rst_ref(namespace, attr, fmt[attr]),
- 1))
- lines.append("\n")
-
- return "\n".join(lines)
-
-
-def parse_yaml(obj: Dict[str, Any]) -> str:
- """Format the whole YAML into a RST string"""
- lines = []
-
- # Main header
-
- lines.append(rst_header())
-
- family = obj['name']
-
- title = f"Family ``{family}`` netlink specification"
- lines.append(rst_title(title))
- lines.append(rst_paragraph(".. contents:: :depth: 3\n"))
-
- if "doc" in obj:
- lines.append(rst_subtitle("Summary"))
- lines.append(rst_paragraph(obj["doc"], 0))
-
- # Operations
- if "operations" in obj:
- lines.append(rst_subtitle("Operations"))
- lines.append(parse_operations(obj["operations"]["list"], family))
-
- # Multicast groups
- if "mcast-groups" in obj:
- lines.append(rst_subtitle("Multicast groups"))
- lines.append(parse_mcast_group(obj["mcast-groups"]["list"]))
-
- # Definitions
- if "definitions" in obj:
- lines.append(rst_subtitle("Definitions"))
- lines.append(parse_definitions(obj["definitions"], family))
-
- # Attributes set
- if "attribute-sets" in obj:
- lines.append(rst_subtitle("Attribute sets"))
- lines.append(parse_attr_sets(obj["attribute-sets"], family))
-
- # Sub-messages
- if "sub-messages" in obj:
- lines.append(rst_subtitle("Sub-messages"))
- lines.append(parse_sub_messages(obj["sub-messages"], family))
-
- return "\n".join(lines)
-
-
-# Main functions
-# ==============
-
-
-def parse_arguments() -> argparse.Namespace:
- """Parse arguments from user"""
- parser = argparse.ArgumentParser(description="Netlink RST generator")
-
- parser.add_argument("-v", "--verbose", action="store_true")
- parser.add_argument("-o", "--output", help="Output file name")
-
- # Index and input are mutually exclusive
- group = parser.add_mutually_exclusive_group()
- group.add_argument(
- "-x", "--index", action="store_true", help="Generate the index page"
- )
- group.add_argument("-i", "--input", help="YAML file name")
-
- args = parser.parse_args()
-
- if args.verbose:
- logging.basicConfig(level=logging.DEBUG)
-
- if args.input and not os.path.isfile(args.input):
- logging.warning("%s is not a valid file.", args.input)
- sys.exit(-1)
-
- if not args.output:
- logging.error("No output file specified.")
- sys.exit(-1)
-
- if os.path.isfile(args.output):
- logging.debug("%s already exists. Overwriting it.", args.output)
-
- return args
-
-
-def parse_yaml_file(filename: str) -> str:
- """Transform the YAML specified by filename into a rst-formmated string"""
- with open(filename, "r", encoding="utf-8") as spec_file:
- yaml_data = yaml.safe_load(spec_file)
- content = parse_yaml(yaml_data)
-
- return content
-
-
-def write_to_rstfile(content: str, filename: str) -> None:
- """Write the generated content into an RST file"""
- logging.debug("Saving RST file to %s", filename)
-
- with open(filename, "w", encoding="utf-8") as rst_file:
- rst_file.write(content)
-
-
-def generate_main_index_rst(output: str) -> None:
- """Generate the `networking_spec/index` content and write to the file"""
- lines = []
-
- lines.append(rst_header())
- lines.append(rst_label("specs"))
- lines.append(rst_title("Netlink Family Specifications"))
- lines.append(rst_toctree(1))
-
- index_dir = os.path.dirname(output)
- logging.debug("Looking for .rst files in %s", index_dir)
- for filename in sorted(os.listdir(index_dir)):
- if not filename.endswith(".rst") or filename == "index.rst":
- continue
- lines.append(f" {filename.replace('.rst', '')}\n")
-
- logging.debug("Writing an index file at %s", output)
- write_to_rstfile("".join(lines), output)
-
-
-def main() -> None:
- """Main function that reads the YAML files and generates the RST files"""
-
- args = parse_arguments()
-
- if args.input:
- logging.debug("Parsing %s", args.input)
- try:
- content = parse_yaml_file(os.path.join(args.input))
- except Exception as exception:
- logging.warning("Failed to parse %s.", args.input)
- logging.warning(exception)
- sys.exit(-1)
-
- write_to_rstfile(content, args.output)
-
- if args.index:
- # Generate the index RST file
- generate_main_index_rst(args.output)
-
-
-if __name__ == "__main__":
- main()
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
-TOOL=$(dirname $(realpath $0))/ynl-gen-c.py
+TOOL=$(dirname $(realpath $0))/pyynl/ynl_gen_c.py
force=
search=
SPEC_PATH = KSFT_DIR / "net/lib/specs"
sys.path.append(tools_full_path.as_posix())
- from net.lib.ynl.lib import YnlFamily, NlError
+ from net.lib.ynl.pyynl.lib import YnlFamily, NlError
else:
# Running in tree
tools_full_path = KSRC / "tools"
SPEC_PATH = KSRC / "Documentation/netlink/specs"
sys.path.append(tools_full_path.as_posix())
- from net.ynl.lib import YnlFamily, NlError
+ from net.ynl.pyynl.lib import YnlFamily, NlError
except ModuleNotFoundError as e:
ksft_pr("Failed importing `ynl` library from kernel sources")
ksft_pr(str(e))
$(Q)cp $(top_srcdir)/tools/net/ynl/libynl.a $(OUTPUT)/libynl.a
EXTRA_CLEAN += \
- $(top_srcdir)/tools/net/ynl/lib/__pycache__ \
+ $(top_srcdir)/tools/net/ynl/pyynl/__pycache__ \
+ $(top_srcdir)/tools/net/ynl/pyynl/lib/__pycache__ \
$(top_srcdir)/tools/net/ynl/lib/*.[ado] \
$(OUTPUT)/.libynl-*.sig \
$(OUTPUT)/libynl.a