diff --git a/.gitmodules b/.gitmodules index f23a7a3e78..f7387686e5 100644 --- a/.gitmodules +++ b/.gitmodules @@ -64,3 +64,6 @@ path = src/lib/crypto/libtommath url = https://github.com/PX4/libtommath.git branch = px4 +[submodule "platforms/nuttx/src/px4/stm/hal_stm32"] + path = platforms/nuttx/src/px4/stm/hal_stm32 + url = git@github.com:zephyrproject-rtos/hal_stm32.git diff --git a/CMakeLists.txt b/CMakeLists.txt index bca9b38eaa..81c8610c31 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -413,6 +413,8 @@ add_subdirectory(src/lib EXCLUDE_FROM_ALL) add_subdirectory(platforms/${PX4_PLATFORM}/src/px4) add_subdirectory(platforms EXCLUDE_FROM_ALL) +include(dts) + if(EXISTS "${PX4_BOARD_DIR}/CMakeLists.txt") add_subdirectory(${PX4_BOARD_DIR}) endif() diff --git a/Tools/astyle/files_to_check_code_style.sh b/Tools/astyle/files_to_check_code_style.sh index 963c8a51cf..b71ff94869 100755 --- a/Tools/astyle/files_to_check_code_style.sh +++ b/Tools/astyle/files_to_check_code_style.sh @@ -10,6 +10,7 @@ fi exec find boards msg src platforms test \ -path msg/templates/urtps -prune -o \ -path platforms/nuttx/NuttX -prune -o \ + -path platforms/nuttx/src/px4/stm/hal_stm32 -prune -o \ -path platforms/qurt/dspal -prune -o \ -path src/drivers/uavcan/libuavcan -prune -o \ -path src/drivers/uavcan/uavcan_drivers/kinetis/driver/include/uavcan_kinetis -prune -o \ diff --git a/Tools/dts/README.txt b/Tools/dts/README.txt new file mode 100644 index 0000000000..f6705bcf78 --- /dev/null +++ b/Tools/dts/README.txt @@ -0,0 +1,32 @@ +This directory used to contain the edtlib.py and dtlib.py libraries +and tests, alongside the gen_defines.py script that uses them for +converting DTS to the C macros used by Zephyr. + +The libraries and tests have now been moved to the 'python-devicetree' +subdirectory. + +We are now in the process of extracting edtlib and dtlib into a +standalone source code library that we intend to share with other +projects. + +Links related to the work making this standalone: + + https://pypi.org/project/devicetree/ + https://python-devicetree.readthedocs.io/en/latest/ + https://github.com/zephyrproject-rtos/python-devicetree + +The 'python-devicetree' subdirectory you find here next to this +README.txt matches the standalone python-devicetree repository linked +above. + +For now, the 'main' copy will continue to be hosted here in the zephyr +repository. We will mirror changes into the standalone repository as +needed; you can just ignore it for now. + +Code in the zephyr repository which needs these libraries will import +devicetree.edtlib from now on, but the code will continue to be found +by manipulating sys.path for now. + +Eventually, as APIs stabilize, the python-devicetree code in this +repository will disappear, and a standalone repository will be the +'main' one. diff --git a/Tools/dts/gen_defines.py b/Tools/dts/gen_defines.py new file mode 100755 index 0000000000..7cff885edf --- /dev/null +++ b/Tools/dts/gen_defines.py @@ -0,0 +1,1032 @@ +#!/usr/bin/env python3 + +# Copyright (c) 2019 - 2020 Nordic Semiconductor ASA +# Copyright (c) 2019 Linaro Limited +# SPDX-License-Identifier: BSD-3-Clause + +# This script uses edtlib to generate a header file from a devicetree +# (.dts) file. Information from binding files in YAML format is used +# as well. +# +# Bindings are files that describe devicetree nodes. Devicetree nodes are +# usually mapped to bindings via their 'compatible = "..."' property. +# +# See Zephyr's Devicetree user guide for details. +# +# Note: Do not access private (_-prefixed) identifiers from edtlib here (and +# also note that edtlib is not meant to expose the dtlib API directly). +# Instead, think of what API you need, and add it as a public documented API in +# edtlib. This will keep this script simple. + +import argparse +from collections import defaultdict +import logging +import os +import pathlib +import pickle +import re +import sys + +sys.path.append(os.path.join(os.path.dirname(__file__), 'python-devicetree', + 'src')) + +from devicetree import edtlib + +# The set of binding types whose values can be iterated over with +# DT_FOREACH_PROP_ELEM(). If you change this, make sure to update the +# doxygen string for that macro. +FOREACH_PROP_ELEM_TYPES = set(['string', 'array', 'uint8-array', 'string-array', + 'phandles', 'phandle-array']) + +class LogFormatter(logging.Formatter): + '''A log formatter that prints the level name in lower case, + for compatibility with earlier versions of edtlib.''' + + def __init__(self): + super().__init__(fmt='%(levelnamelower)s: %(message)s') + + def format(self, record): + record.levelnamelower = record.levelname.lower() + return super().format(record) + +def main(): + global header_file + global flash_area_num + + args = parse_args() + + setup_edtlib_logging() + + vendor_prefixes = {} + for prefixes_file in args.vendor_prefixes: + vendor_prefixes.update(edtlib.load_vendor_prefixes_txt(prefixes_file)) + + try: + edt = edtlib.EDT(args.dts, args.bindings_dirs, + # Suppress this warning if it's suppressed in dtc + warn_reg_unit_address_mismatch= + "-Wno-simple_bus_reg" not in args.dtc_flags, + default_prop_types=True, + infer_binding_for_paths=["/zephyr,user"], + werror=args.edtlib_Werror, + vendor_prefixes=vendor_prefixes) + except edtlib.EDTError as e: + sys.exit(f"devicetree error: {e}") + + flash_area_num = 0 + + # Save merged DTS source, as a debugging aid + with open(args.dts_out, "w", encoding="utf-8") as f: + print(edt.dts_source, file=f) + + # The raw index into edt.compat2nodes[compat] is used for node + # instance numbering within a compatible. + # + # As a way to satisfy people's intuitions about instance numbers, + # though, we sort this list so enabled instances come first. + # + # This might look like a hack, but it keeps drivers and + # applications which don't use instance numbers carefully working + # as expected, since e.g. instance number 0 is always the + # singleton instance if there's just one enabled node of a + # particular compatible. + # + # This doesn't violate any devicetree.h API guarantees about + # instance ordering, since we make no promises that instance + # numbers are stable across builds. + for compat, nodes in edt.compat2nodes.items(): + edt.compat2nodes[compat] = sorted( + nodes, key=lambda node: 0 if node.status == "okay" else 1) + + # Create the generated header. + with open(args.header_out, "w", encoding="utf-8") as header_file: + write_top_comment(edt) + + # populate all z_path_id first so any children references will + # work correctly. + for node in sorted(edt.nodes, key=lambda node: node.dep_ordinal): + node.z_path_id = node_z_path_id(node) + + # Check to see if we have duplicate "zephyr,memory-region" property values. + regions = dict() + for node in sorted(edt.nodes, key=lambda node: node.dep_ordinal): + if 'zephyr,memory-region' in node.props: + region = node.props['zephyr,memory-region'].val + if region in regions: + sys.exit(f"ERROR: Duplicate 'zephyr,memory-region' ({region}) properties " + f"between {regions[region].path} and {node.path}") + regions[region] = node + + for node in sorted(edt.nodes, key=lambda node: node.dep_ordinal): + write_node_comment(node) + + out_comment("Node's full path:") + out_dt_define(f"{node.z_path_id}_PATH", f'"{escape(node.path)}"') + + out_comment("Node's name with unit-address:") + out_dt_define(f"{node.z_path_id}_FULL_NAME", + f'"{escape(node.name)}"') + + if node.parent is not None: + out_comment(f"Node parent ({node.parent.path}) identifier:") + out_dt_define(f"{node.z_path_id}_PARENT", + f"DT_{node.parent.z_path_id}") + + write_child_functions(node) + write_child_functions_status_okay(node) + write_dep_info(node) + write_idents_and_existence(node) + write_bus(node) + write_special_props(node) + write_vanilla_props(node) + + write_chosen(edt) + write_global_compat_info(edt) + + write_device_extern_header(args.device_header_out, edt) + + if args.edt_pickle_out: + write_pickled_edt(edt, args.edt_pickle_out) + + +def write_device_extern_header(device_header_out, edt): + # Generate header that will extern devicetree struct device's + + with open(device_header_out, "w", encoding="utf-8") as dev_header_file: + print("#ifndef DEVICE_EXTERN_GEN_H", file=dev_header_file) + print("#define DEVICE_EXTERN_GEN_H", file=dev_header_file) + print("", file=dev_header_file) + print("#ifdef __cplusplus", file=dev_header_file) + print('extern "C" {', file=dev_header_file) + print("#endif", file=dev_header_file) + print("", file=dev_header_file) + + for node in sorted(edt.nodes, key=lambda node: node.dep_ordinal): + print(f"extern const struct device DEVICE_DT_NAME_GET(DT_{node.z_path_id}); /* dts_ord_{node.dep_ordinal} */", + file=dev_header_file) + + print("", file=dev_header_file) + print("#ifdef __cplusplus", file=dev_header_file) + print("}", file=dev_header_file) + print("#endif", file=dev_header_file) + print("", file=dev_header_file) + print("#endif /* DEVICE_EXTERN_GEN_H */", file=dev_header_file) + + +def setup_edtlib_logging(): + # The edtlib module emits logs using the standard 'logging' module. + # Configure it so that warnings and above are printed to stderr, + # using the LogFormatter class defined above to format each message. + + handler = logging.StreamHandler(sys.stderr) + handler.setFormatter(LogFormatter()) + + logger = logging.getLogger('edtlib') + logger.setLevel(logging.WARNING) + logger.addHandler(handler) + +def node_z_path_id(node): + # Return the node specific bit of the node's path identifier: + # + # - the root node's path "/" has path identifier "N" + # - "/foo" has "N_S_foo" + # - "/foo/bar" has "N_S_foo_S_bar" + # - "/foo/bar@123" has "N_S_foo_S_bar_123" + # + # This is used throughout this file to generate macros related to + # the node. + + components = ["N"] + if node.parent is not None: + components.extend(f"S_{str2ident(component)}" for component in + node.path.split("/")[1:]) + + return "_".join(components) + +def parse_args(): + # Returns parsed command-line arguments + + parser = argparse.ArgumentParser() + parser.add_argument("--dts", required=True, help="DTS file") + parser.add_argument("--dtc-flags", + help="'dtc' devicetree compiler flags, some of which " + "might be respected here") + parser.add_argument("--bindings-dirs", nargs='+', required=True, + help="directory with bindings in YAML format, " + "we allow multiple") + parser.add_argument("--header-out", required=True, + help="path to write header to") + parser.add_argument("--dts-out", required=True, + help="path to write merged DTS source code to (e.g. " + "as a debugging aid)") + parser.add_argument("--device-header-out", required=True, + help="path to write device struct extern header to") + parser.add_argument("--edt-pickle-out", + help="path to write pickled edtlib.EDT object to") + parser.add_argument("--vendor-prefixes", action='append', default=[], + help="vendor-prefixes.txt path; used for validation; " + "may be given multiple times") + parser.add_argument("--edtlib-Werror", action="store_true", + help="if set, edtlib-specific warnings become errors. " + "(this does not apply to warnings shared " + "with dtc.)") + + return parser.parse_args() + + +def write_top_comment(edt): + # Writes an overview comment with misc. info at the top of the header and + # configuration file + + s = f"""\ +Generated by gen_defines.py + +DTS input file: + {edt.dts_path} + +Directories with bindings: + {", ".join(map(relativize, edt.bindings_dirs))} + +Node dependency ordering (ordinal and path): +""" + + for scc in edt.scc_order: + if len(scc) > 1: + err("cycle in devicetree involving " + + ", ".join(node.path for node in scc)) + s += f" {scc[0].dep_ordinal:<3} {scc[0].path}\n" + + s += """ +Definitions derived from these nodes in dependency order are next, +followed by /chosen nodes. +""" + + out_comment(s, blank_before=False) + + +def write_node_comment(node): + # Writes a comment describing 'node' to the header and configuration file + + s = f"""\ +Devicetree node: {node.path} + +Node identifier: DT_{node.z_path_id} +""" + + if node.matching_compat: + if node.binding_path: + s += f""" +Binding (compatible = {node.matching_compat}): + {relativize(node.binding_path)} +""" + else: + s += f""" +Binding (compatible = {node.matching_compat}): + No yaml (bindings inferred from properties) +""" + + if node.description: + # We used to put descriptions in the generated file, but + # devicetree bindings now have pages in the HTML + # documentation. Let users who are accustomed to digging + # around in the generated file where to find the descriptions + # now. + # + # Keeping them here would mean that the descriptions + # themselves couldn't contain C multi-line comments, which is + # inconvenient when we want to do things like quote snippets + # of .dtsi files within the descriptions, or otherwise + # include the string "*/". + s += ("\n(Descriptions have moved to the Devicetree Bindings Index\n" + "in the documentation.)\n") + + out_comment(s) + + +def relativize(path): + # If 'path' is within $ZEPHYR_BASE, returns it relative to $ZEPHYR_BASE, + # with a "$ZEPHYR_BASE/..." hint at the start of the string. Otherwise, + # returns 'path' unchanged. + + zbase = os.getenv("ZEPHYR_BASE") + if zbase is None: + return path + + try: + return str("$ZEPHYR_BASE" / pathlib.Path(path).relative_to(zbase)) + except ValueError: + # Not within ZEPHYR_BASE + return path + + +def write_idents_and_existence(node): + # Writes macros related to the node's aliases, labels, etc., + # as well as existence flags. + + # Aliases + idents = [f"N_ALIAS_{str2ident(alias)}" for alias in node.aliases] + # Instances + for compat in node.compats: + instance_no = node.edt.compat2nodes[compat].index(node) + idents.append(f"N_INST_{instance_no}_{str2ident(compat)}") + # Node labels + idents.extend(f"N_NODELABEL_{str2ident(label)}" for label in node.labels) + + out_comment("Existence and alternate IDs:") + out_dt_define(node.z_path_id + "_EXISTS", 1) + + # Only determine maxlen if we have any idents + if idents: + maxlen = max(len("DT_" + ident) for ident in idents) + for ident in idents: + out_dt_define(ident, "DT_" + node.z_path_id, width=maxlen) + + +def write_bus(node): + # Macros about the node's bus controller, if there is one + + bus = node.bus_node + if not bus: + return + + if not bus.label: + err(f"missing 'label' property on bus node {bus!r}") + + out_comment(f"Bus info (controller: '{bus.path}', type: '{node.on_bus}')") + out_dt_define(f"{node.z_path_id}_BUS_{str2ident(node.on_bus)}", 1) + out_dt_define(f"{node.z_path_id}_BUS", f"DT_{bus.z_path_id}") + + +def write_special_props(node): + # Writes required macros for special case properties, when the + # data cannot otherwise be obtained from write_vanilla_props() + # results + + # Macros that are special to the devicetree specification + out_comment("Macros for properties that are special in the specification:") + write_regs(node) + write_ranges(node) + write_interrupts(node) + write_compatibles(node) + write_status(node) + + # Macros that are special to bindings inherited from Linux, which + # we can't capture with the current bindings language. + write_pinctrls(node) + write_fixed_partitions(node) + +def write_ranges(node): + # ranges property: edtlib knows the right #address-cells and + # #size-cells of parent and child, and can therefore pack the + # child & parent addresses and sizes correctly + + idx_vals = [] + path_id = node.z_path_id + + if node.ranges is not None: + idx_vals.append((f"{path_id}_RANGES_NUM", len(node.ranges))) + + for i,range in enumerate(node.ranges): + idx_vals.append((f"{path_id}_RANGES_IDX_{i}_EXISTS", 1)) + + if node.bus == "pcie": + idx_vals.append((f"{path_id}_RANGES_IDX_{i}_VAL_CHILD_BUS_FLAGS_EXISTS", 1)) + idx_macro = f"{path_id}_RANGES_IDX_{i}_VAL_CHILD_BUS_FLAGS" + idx_value = range.child_bus_addr >> ((range.child_bus_cells - 1) * 32) + idx_vals.append((idx_macro, + f"{idx_value} /* {hex(idx_value)} */")) + if range.child_bus_addr is not None: + idx_macro = f"{path_id}_RANGES_IDX_{i}_VAL_CHILD_BUS_ADDRESS" + if node.bus == "pcie": + idx_value = range.child_bus_addr & ((1 << (range.child_bus_cells - 1) * 32) - 1) + else: + idx_value = range.child_bus_addr + idx_vals.append((idx_macro, + f"{idx_value} /* {hex(idx_value)} */")) + if range.parent_bus_addr is not None: + idx_macro = f"{path_id}_RANGES_IDX_{i}_VAL_PARENT_BUS_ADDRESS" + idx_vals.append((idx_macro, + f"{range.parent_bus_addr} /* {hex(range.parent_bus_addr)} */")) + if range.length is not None: + idx_macro = f"{path_id}_RANGES_IDX_{i}_VAL_LENGTH" + idx_vals.append((idx_macro, + f"{range.length} /* {hex(range.length)} */")) + + for macro, val in idx_vals: + out_dt_define(macro, val) + + out_dt_define(f"{path_id}_FOREACH_RANGE(fn)", + " ".join(f"fn(DT_{path_id}, {i})" for i,range in enumerate(node.ranges))) + +def write_regs(node): + # reg property: edtlib knows the right #address-cells and + # #size-cells, and can therefore pack the register base addresses + # and sizes correctly + + idx_vals = [] + name_vals = [] + path_id = node.z_path_id + + if node.regs is not None: + idx_vals.append((f"{path_id}_REG_NUM", len(node.regs))) + + for i, reg in enumerate(node.regs): + idx_vals.append((f"{path_id}_REG_IDX_{i}_EXISTS", 1)) + if reg.addr is not None: + idx_macro = f"{path_id}_REG_IDX_{i}_VAL_ADDRESS" + idx_vals.append((idx_macro, + f"{reg.addr} /* {hex(reg.addr)} */")) + if reg.name: + name_macro = f"{path_id}_REG_NAME_{reg.name}_VAL_ADDRESS" + name_vals.append((name_macro, f"DT_{idx_macro}")) + + if reg.size is not None: + idx_macro = f"{path_id}_REG_IDX_{i}_VAL_SIZE" + idx_vals.append((idx_macro, + f"{reg.size} /* {hex(reg.size)} */")) + if reg.name: + name_macro = f"{path_id}_REG_NAME_{reg.name}_VAL_SIZE" + name_vals.append((name_macro, f"DT_{idx_macro}")) + + for macro, val in idx_vals: + out_dt_define(macro, val) + for macro, val in name_vals: + out_dt_define(macro, val) + +def write_interrupts(node): + # interrupts property: we have some hard-coded logic for interrupt + # mapping here. + # + # TODO: can we push map_arm_gic_irq_type() and + # encode_zephyr_multi_level_irq() out of Python and into C with + # macro magic in devicetree.h? + + def map_arm_gic_irq_type(irq, irq_num): + # Maps ARM GIC IRQ (type)+(index) combo to linear IRQ number + if "type" not in irq.data: + err(f"Expected binding for {irq.controller!r} to have 'type' in " + "interrupt-cells") + irq_type = irq.data["type"] + + if irq_type == 0: # GIC_SPI + return irq_num + 32 + if irq_type == 1: # GIC_PPI + return irq_num + 16 + err(f"Invalid interrupt type specified for {irq!r}") + + def encode_zephyr_multi_level_irq(irq, irq_num): + # See doc/reference/kernel/other/interrupts.rst for details + # on how this encoding works + + irq_ctrl = irq.controller + # Look for interrupt controller parent until we have none + while irq_ctrl.interrupts: + irq_num = (irq_num + 1) << 8 + if "irq" not in irq_ctrl.interrupts[0].data: + err(f"Expected binding for {irq_ctrl!r} to have 'irq' in " + "interrupt-cells") + irq_num |= irq_ctrl.interrupts[0].data["irq"] + irq_ctrl = irq_ctrl.interrupts[0].controller + return irq_num + + idx_vals = [] + name_vals = [] + path_id = node.z_path_id + + if node.interrupts is not None: + idx_vals.append((f"{path_id}_IRQ_NUM", len(node.interrupts))) + + for i, irq in enumerate(node.interrupts): + for cell_name, cell_value in irq.data.items(): + name = str2ident(cell_name) + + if cell_name == "irq": + if "arm,gic" in irq.controller.compats: + cell_value = map_arm_gic_irq_type(irq, cell_value) + cell_value = encode_zephyr_multi_level_irq(irq, cell_value) + + idx_vals.append((f"{path_id}_IRQ_IDX_{i}_EXISTS", 1)) + idx_macro = f"{path_id}_IRQ_IDX_{i}_VAL_{name}" + idx_vals.append((idx_macro, cell_value)) + idx_vals.append((idx_macro + "_EXISTS", 1)) + if irq.name: + name_macro = \ + f"{path_id}_IRQ_NAME_{str2ident(irq.name)}_VAL_{name}" + name_vals.append((name_macro, f"DT_{idx_macro}")) + name_vals.append((name_macro + "_EXISTS", 1)) + + for macro, val in idx_vals: + out_dt_define(macro, val) + for macro, val in name_vals: + out_dt_define(macro, val) + + +def write_compatibles(node): + # Writes a macro for each of the node's compatibles. We don't care + # about whether edtlib / Zephyr's binding language recognizes + # them. The compatibles the node provides are what is important. + + for compat in node.compats: + out_dt_define( + f"{node.z_path_id}_COMPAT_MATCHES_{str2ident(compat)}", 1) + + +def write_child_functions(node): + # Writes macro that are helpers that will call a macro/function + # for each child node. + + out_dt_define(f"{node.z_path_id}_FOREACH_CHILD(fn)", + " ".join(f"fn(DT_{child.z_path_id})" for child in + node.children.values())) + + out_dt_define(f"{node.z_path_id}_FOREACH_CHILD_VARGS(fn, ...)", + " ".join(f"fn(DT_{child.z_path_id}, __VA_ARGS__)" for child in + node.children.values())) + +def write_child_functions_status_okay(node): + # Writes macros that are helpers that will call a macro/function + # for each child node with status "okay". + + functions = '' + functions_args = '' + for child in node.children.values(): + if child.status == "okay": + functions = functions + f"fn(DT_{child.z_path_id}) " + functions_args = functions_args + f"fn(DT_{child.z_path_id}, " \ + "__VA_ARGS__) " + + out_dt_define(f"{node.z_path_id}_FOREACH_CHILD_STATUS_OKAY(fn)", functions) + out_dt_define(f"{node.z_path_id}_FOREACH_CHILD_STATUS_OKAY_VARGS(fn, ...)", + functions_args) + + +def write_status(node): + out_dt_define(f"{node.z_path_id}_STATUS_{str2ident(node.status)}", 1) + + +def write_pinctrls(node): + # Write special macros for pinctrl- and pinctrl-names properties. + + out_comment("Pin control (pinctrl-, pinctrl-names) properties:") + + out_dt_define(f"{node.z_path_id}_PINCTRL_NUM", len(node.pinctrls)) + + if not node.pinctrls: + return + + for pc_idx, pinctrl in enumerate(node.pinctrls): + out_dt_define(f"{node.z_path_id}_PINCTRL_IDX_{pc_idx}_EXISTS", 1) + + if not pinctrl.name: + continue + + name = pinctrl.name_as_token + + # Below we rely on the fact that edtlib ensures the + # pinctrl- properties are contiguous, start from 0, + # and contain only phandles. + out_dt_define(f"{node.z_path_id}_PINCTRL_IDX_{pc_idx}_TOKEN", name) + out_dt_define(f"{node.z_path_id}_PINCTRL_IDX_{pc_idx}_UPPER_TOKEN", name.upper()) + out_dt_define(f"{node.z_path_id}_PINCTRL_NAME_{name}_EXISTS", 1) + out_dt_define(f"{node.z_path_id}_PINCTRL_NAME_{name}_IDX", pc_idx) + for idx, ph in enumerate(pinctrl.conf_nodes): + out_dt_define(f"{node.z_path_id}_PINCTRL_NAME_{name}_IDX_{idx}_PH", + f"DT_{ph.z_path_id}") + + +def write_fixed_partitions(node): + # Macros for child nodes of each fixed-partitions node. + + if not (node.parent and "fixed-partitions" in node.parent.compats): + return + + global flash_area_num + out_comment("fixed-partitions identifier:") + out_dt_define(f"{node.z_path_id}_PARTITION_ID", flash_area_num) + flash_area_num += 1 + + +def write_vanilla_props(node): + # Writes macros for any and all properties defined in the + # "properties" section of the binding for the node. + # + # This does generate macros for special properties as well, like + # regs, etc. Just let that be rather than bothering to add + # never-ending amounts of special case code here to skip special + # properties. This function's macros can't conflict with + # write_special_props() macros, because they're in different + # namespaces. Special cases aren't special enough to break the rules. + + macro2val = {} + for prop_name, prop in node.props.items(): + prop_id = str2ident(prop_name) + macro = f"{node.z_path_id}_P_{prop_id}" + val = prop2value(prop) + if val is not None: + # DT_N__P_ + macro2val[macro] = val + + if prop.spec.type == 'string': + macro2val[macro + "_STRING_TOKEN"] = prop.val_as_token + macro2val[macro + "_STRING_UPPER_TOKEN"] = prop.val_as_token.upper() + + if prop.enum_index is not None: + # DT_N__P__ENUM_IDX + macro2val[macro + "_ENUM_IDX"] = prop.enum_index + spec = prop.spec + + if spec.enum_tokenizable: + as_token = prop.val_as_token + + # DT_N__P__ENUM_TOKEN + macro2val[macro + "_ENUM_TOKEN"] = as_token + + if spec.enum_upper_tokenizable: + # DT_N__P__ENUM_UPPER_TOKEN + macro2val[macro + "_ENUM_UPPER_TOKEN"] = as_token.upper() + + if "phandle" in prop.type: + macro2val.update(phandle_macros(prop, macro)) + elif "array" in prop.type: + # DT_N__P__IDX_ + # DT_N__P__IDX__EXISTS + for i, subval in enumerate(prop.val): + if isinstance(subval, str): + macro2val[macro + f"_IDX_{i}"] = quote_str(subval) + else: + macro2val[macro + f"_IDX_{i}"] = subval + macro2val[macro + f"_IDX_{i}_EXISTS"] = 1 + + if prop.type in FOREACH_PROP_ELEM_TYPES: + # DT_N__P__FOREACH_PROP_ELEM + macro2val[f"{macro}_FOREACH_PROP_ELEM(fn)"] = \ + ' \\\n\t'.join(f'fn(DT_{node.z_path_id}, {prop_id}, {i})' + for i in range(len(prop.val))) + + macro2val[f"{macro}_FOREACH_PROP_ELEM_VARGS(fn, ...)"] = \ + ' \\\n\t'.join(f'fn(DT_{node.z_path_id}, {prop_id}, {i},' + ' __VA_ARGS__)' + for i in range(len(prop.val))) + + plen = prop_len(prop) + if plen is not None: + # DT_N__P__LEN + macro2val[macro + "_LEN"] = plen + + macro2val[f"{macro}_EXISTS"] = 1 + + if macro2val: + out_comment("Generic property macros:") + for macro, val in macro2val.items(): + out_dt_define(macro, val) + else: + out_comment("(No generic property macros)") + + +def write_dep_info(node): + # Write dependency-related information about the node. + + def fmt_dep_list(dep_list): + if dep_list: + # Sort the list by dependency ordinal for predictability. + sorted_list = sorted(dep_list, key=lambda node: node.dep_ordinal) + return "\\\n\t" + \ + " \\\n\t".join(f"{n.dep_ordinal}, /* {n.path} */" + for n in sorted_list) + else: + return "/* nothing */" + + out_comment("Node's dependency ordinal:") + out_dt_define(f"{node.z_path_id}_ORD", node.dep_ordinal) + + out_comment("Ordinals for what this node depends on directly:") + out_dt_define(f"{node.z_path_id}_REQUIRES_ORDS", + fmt_dep_list(node.depends_on)) + + out_comment("Ordinals for what depends directly on this node:") + out_dt_define(f"{node.z_path_id}_SUPPORTS_ORDS", + fmt_dep_list(node.required_by)) + + +def prop2value(prop): + # Gets the macro value for property 'prop', if there is + # a single well-defined C rvalue that it can be represented as. + # Returns None if there isn't one. + + if prop.type == "string": + return quote_str(prop.val) + + if prop.type == "int": + return prop.val + + if prop.type == "boolean": + return 1 if prop.val else 0 + + if prop.type in ["array", "uint8-array"]: + return list2init(f"{val} /* {hex(val)} */" for val in prop.val) + + if prop.type == "string-array": + return list2init(quote_str(val) for val in prop.val) + + # phandle, phandles, phandle-array, path, compound: nothing + return None + + +def prop_len(prop): + # Returns the property's length if and only if we should generate + # a _LEN macro for the property. Otherwise, returns None. + # + # This deliberately excludes ranges, dma-ranges, reg and interrupts. + # While they have array type, their lengths as arrays are + # basically nonsense semantically due to #address-cells and + # #size-cells for "reg", #interrupt-cells for "interrupts" + # and #address-cells, #size-cells and the #address-cells from the + # parent node for "ranges" and "dma-ranges". + # + # We have special purpose macros for the number of register blocks + # / interrupt specifiers. Excluding them from this list means + # DT_PROP_LEN(node_id, ...) fails fast at the devicetree.h layer + # with a build error. This forces users to switch to the right + # macros. + + if prop.type == "phandle": + return 1 + + if (prop.type in ["array", "uint8-array", "string-array", + "phandles", "phandle-array"] and + prop.name not in ["ranges", "dma-ranges", "reg", "interrupts"]): + return len(prop.val) + + return None + + +def phandle_macros(prop, macro): + # Returns a dict of macros for phandle or phandles property 'prop'. + # + # The 'macro' argument is the N__P_ bit. + # + # These are currently special because we can't serialize their + # values without using label properties, which we're trying to get + # away from needing in Zephyr. (Label properties are great for + # humans, but have drawbacks for code size and boot time.) + # + # The names look a bit weird to make it easier for devicetree.h + # to use the same macros for phandle, phandles, and phandle-array. + + ret = {} + + if prop.type == "phandle": + # A phandle is treated as a phandles with fixed length 1. + ret[f"{macro}"] = f"DT_{prop.val.z_path_id}" + ret[f"{macro}_IDX_0"] = f"DT_{prop.val.z_path_id}" + ret[f"{macro}_IDX_0_PH"] = f"DT_{prop.val.z_path_id}" + ret[f"{macro}_IDX_0_EXISTS"] = 1 + elif prop.type == "phandles": + for i, node in enumerate(prop.val): + ret[f"{macro}_IDX_{i}"] = f"DT_{node.z_path_id}" + ret[f"{macro}_IDX_{i}_PH"] = f"DT_{node.z_path_id}" + ret[f"{macro}_IDX_{i}_EXISTS"] = 1 + elif prop.type == "phandle-array": + for i, entry in enumerate(prop.val): + if entry is None: + # Unspecified element. The phandle-array at this index + # does not point at a ControllerAndData value, but + # subsequent indices in the array may. + ret[f"{macro}_IDX_{i}_EXISTS"] = 0 + continue + + ret.update(controller_and_data_macros(entry, i, macro)) + + return ret + + +def controller_and_data_macros(entry, i, macro): + # Helper procedure used by phandle_macros(). + # + # Its purpose is to write the "controller" (i.e. label property of + # the phandle's node) and associated data macros for a + # ControllerAndData. + + ret = {} + data = entry.data + + # DT_N__P__IDX__EXISTS + ret[f"{macro}_IDX_{i}_EXISTS"] = 1 + # DT_N__P__IDX__PH + ret[f"{macro}_IDX_{i}_PH"] = f"DT_{entry.controller.z_path_id}" + # DT_N__P__IDX__VAL_ + for cell, val in data.items(): + ret[f"{macro}_IDX_{i}_VAL_{str2ident(cell)}"] = val + ret[f"{macro}_IDX_{i}_VAL_{str2ident(cell)}_EXISTS"] = 1 + + if not entry.name: + return ret + + name = str2ident(entry.name) + # DT_N__P__IDX__EXISTS + ret[f"{macro}_IDX_{i}_EXISTS"] = 1 + # DT_N__P__IDX__NAME + ret[f"{macro}_IDX_{i}_NAME"] = quote_str(entry.name) + # DT_N__P__NAME__PH + ret[f"{macro}_NAME_{name}_PH"] = f"DT_{entry.controller.z_path_id}" + # DT_N__P__NAME__EXISTS + ret[f"{macro}_NAME_{name}_EXISTS"] = 1 + # DT_N__P__NAME__VAL_ + for cell, val in data.items(): + cell_ident = str2ident(cell) + ret[f"{macro}_NAME_{name}_VAL_{cell_ident}"] = \ + f"DT_{macro}_IDX_{i}_VAL_{cell_ident}" + ret[f"{macro}_NAME_{name}_VAL_{cell_ident}_EXISTS"] = 1 + + return ret + + +def write_chosen(edt): + # Tree-wide information such as chosen nodes is printed here. + + out_comment("Chosen nodes\n") + chosen = {} + for name, node in edt.chosen_nodes.items(): + chosen[f"DT_CHOSEN_{str2ident(name)}"] = f"DT_{node.z_path_id}" + chosen[f"DT_CHOSEN_{str2ident(name)}_EXISTS"] = 1 + max_len = max(map(len, chosen), default=0) + for macro, value in chosen.items(): + out_define(macro, value, width=max_len) + + +def write_global_compat_info(edt): + # Tree-wide information related to each compatible, such as number + # of instances with status "okay", is printed here. + + n_okay_macros = {} + for_each_macros = {} + compat2buses = defaultdict(list) # just for "okay" nodes + for compat, okay_nodes in edt.compat2okay.items(): + for node in okay_nodes: + bus = node.on_bus + if bus is not None and bus not in compat2buses[compat]: + compat2buses[compat].append(bus) + + ident = str2ident(compat) + n_okay_macros[f"DT_N_INST_{ident}_NUM_OKAY"] = len(okay_nodes) + + # Helpers for non-INST for-each macros that take node + # identifiers as arguments. + for_each_macros[f"DT_FOREACH_OKAY_{ident}(fn)"] = \ + " ".join(f"fn(DT_{node.z_path_id})" + for node in okay_nodes) + for_each_macros[f"DT_FOREACH_OKAY_VARGS_{ident}(fn, ...)"] = \ + " ".join(f"fn(DT_{node.z_path_id}, __VA_ARGS__)" + for node in okay_nodes) + + # Helpers for INST versions of for-each macros, which take + # instance numbers. We emit separate helpers for these because + # avoiding an intermediate node_id --> instance number + # conversion in the preprocessor helps to keep the macro + # expansions simpler. That hopefully eases debugging. + for_each_macros[f"DT_FOREACH_OKAY_INST_{ident}(fn)"] = \ + " ".join(f"fn({edt.compat2nodes[compat].index(node)})" + for node in okay_nodes) + for_each_macros[f"DT_FOREACH_OKAY_INST_VARGS_{ident}(fn, ...)"] = \ + " ".join(f"fn({edt.compat2nodes[compat].index(node)}, __VA_ARGS__)" + for node in okay_nodes) + + for compat, nodes in edt.compat2nodes.items(): + for node in nodes: + if compat == "fixed-partitions": + for child in node.children.values(): + if "label" in child.props: + label = child.props["label"].val + macro = f"COMPAT_{str2ident(compat)}_LABEL_{str2ident(label)}" + val = f"DT_{child.z_path_id}" + + out_dt_define(macro, val) + out_dt_define(macro + "_EXISTS", 1) + + out_comment('Macros for compatibles with status "okay" nodes\n') + for compat, okay_nodes in edt.compat2okay.items(): + if okay_nodes: + out_define(f"DT_COMPAT_HAS_OKAY_{str2ident(compat)}", 1) + + out_comment('Macros for status "okay" instances of each compatible\n') + for macro, value in n_okay_macros.items(): + out_define(macro, value) + for macro, value in for_each_macros.items(): + out_define(macro, value) + + out_comment('Bus information for status "okay" nodes of each compatible\n') + for compat, buses in compat2buses.items(): + for bus in buses: + out_define( + f"DT_COMPAT_{str2ident(compat)}_BUS_{str2ident(bus)}", 1) + +def str2ident(s): + # Converts 's' to a form suitable for (part of) an identifier + + return re.sub('[-,.@/+]', '_', s.lower()) + + +def list2init(l): + # Converts 'l', a Python list (or iterable), to a C array initializer + + return "{" + ", ".join(l) + "}" + + +def out_dt_define(macro, val, width=None, deprecation_msg=None): + # Writes "#define DT_ " to the header file + # + # The macro will be left-justified to 'width' characters if that + # is specified, and the value will follow immediately after in + # that case. Otherwise, this function decides how to add + # whitespace between 'macro' and 'val'. + # + # If a 'deprecation_msg' string is passed, the generated identifiers will + # generate a warning if used, via __WARN()). + # + # Returns the full generated macro for 'macro', with leading "DT_". + ret = "DT_" + macro + out_define(ret, val, width=width, deprecation_msg=deprecation_msg) + return ret + + +def out_define(macro, val, width=None, deprecation_msg=None): + # Helper for out_dt_define(). Outputs "#define ", + # adds a deprecation message if given, and allocates whitespace + # unless told not to. + + warn = fr' __WARN("{deprecation_msg}")' if deprecation_msg else "" + + if width: + s = f"#define {macro.ljust(width)}{warn} {val}" + else: + s = f"#define {macro}{warn} {val}" + + print(s, file=header_file) + + +def out_comment(s, blank_before=True): + # Writes 's' as a comment to the header and configuration file. 's' is + # allowed to have multiple lines. blank_before=True adds a blank line + # before the comment. + + if blank_before: + print(file=header_file) + + if "\n" in s: + # Format multi-line comments like + # + # /* + # * first line + # * second line + # * + # * empty line before this line + # */ + res = ["/*"] + for line in s.splitlines(): + # Avoid an extra space after '*' for empty lines. They turn red in + # Vim if space error checking is on, which is annoying. + res.append(" *" if not line.strip() else " * " + line) + res.append(" */") + print("\n".join(res), file=header_file) + else: + # Format single-line comments like + # + # /* foo bar */ + print("/* " + s + " */", file=header_file) + + +def escape(s): + # Backslash-escapes any double quotes and backslashes in 's' + + # \ must be escaped before " to avoid double escaping + return s.replace("\\", "\\\\").replace('"', '\\"') + + +def quote_str(s): + # Puts quotes around 's' and escapes any double quotes and + # backslashes within it + + return f'"{escape(s)}"' + + +def write_pickled_edt(edt, out_file): + # Writes the edt object in pickle format to out_file. + + with open(out_file, 'wb') as f: + # Pickle protocol version 4 is the default as of Python 3.8 + # and was introduced in 3.4, so it is both available and + # recommended on all versions of Python that Zephyr supports + # (at time of writing, Python 3.6 was Zephyr's minimum + # version, and 3.8 the most recent CPython release). + # + # Using a common protocol version here will hopefully avoid + # reproducibility issues in different Python installations. + pickle.dump(edt, f, protocol=4) + + +def err(s): + raise Exception(s) + + +if __name__ == "__main__": + main() diff --git a/Tools/dts/gen_dts_cmake.py b/Tools/dts/gen_dts_cmake.py new file mode 100755 index 0000000000..f86d2a875e --- /dev/null +++ b/Tools/dts/gen_dts_cmake.py @@ -0,0 +1,151 @@ +#!/usr/bin/env python3 + +# Copyright (c) 2021 Nordic Semiconductor ASA +# SPDX-License-Identifier: Apache-2.0 + +''' +This script uses edtlib and the devicetree data in the build directory +to generate a CMake file which contains devicetree data. + +That data can then be used in the rest of the build system. + +The generated CMake file looks like this: + + add_custom_target(devicetree_target) + set_target_properties(devicetree_target PROPERTIES + "DT_PROP|/soc|compatible" "vnd,soc;") + ... + +It defines a special CMake target, and saves various values in the +devicetree as CMake target properties. + +Be careful: + + "Property" here can refer to a CMake target property or a + DTS property. DTS property values are stored inside + CMake target properties, along with other devicetree data. + +The build system includes this generated file early on, so +devicetree values can be used at CMake processing time. + +Accss is not done directly, but with Zephyr CMake extension APIs, +like this: + + # sets 'compat' to "vnd,soc" in CMake + dt_prop(compat PATH "/soc" PROPERTY compatible INDEX 0) + +This is analogous to how DTS values are encoded as C macros, +which can be read in source code using C APIs like +DT_PROP(node_id, foo) from devicetree.h. +''' + +import argparse +import os +import pickle +import sys + +sys.path.append(os.path.join(os.path.dirname(__file__), 'python-devicetree', + 'src')) + + +def parse_args(): + # Returns parsed command-line arguments + + parser = argparse.ArgumentParser() + parser.add_argument("--cmake-out", required=True, + help="path to write the CMake property file") + parser.add_argument("--edt-pickle", required=True, + help="path to read the pickled edtlib.EDT object from") + + return parser.parse_args() + + +def main(): + args = parse_args() + + with open(args.edt_pickle, 'rb') as f: + edt = pickle.load(f) + + # In what looks like an undocumented implementation detail, CMake + # target properties are stored in a C++ standard library map whose + # keys and values are each arbitrary strings, so we can use + # whatever we want as target property names. + # + # We therefore use '|' as a field separator character below within + # because it's not a valid character in DTS node paths or property + # names. This lets us store the "real" paths and property names + # without conversion to lowercase-and-underscores like we have to + # do in C. + # + # If CMake adds restrictions on target property names later, we + # can just tweak the generated file to use a more restrictive + # property encoding, perhaps reusing the same style documented in + # macros.bnf for C macros. + + cmake_props = [] + chosen_nodes = edt.chosen_nodes + for node in chosen_nodes: + path = chosen_nodes[node].path + cmake_props.append(f'"DT_CHOSEN|{node}" "{path}"') + + # The separate loop over edt.nodes here is meant to keep + # all of the alias-related properties in one place. + for node in edt.nodes: + path = node.path + for alias in node.aliases: + cmake_props.append(f'"DT_ALIAS|{alias}" "{path}"') + + for node in edt.nodes: + cmake_props.append(f'"DT_NODE|{node.path}" TRUE') + + for label in node.labels: + cmake_props.append(f'"DT_NODELABEL|{label}" "{node.path}"') + + for item in node.props: + # We currently do not support phandles for edt -> cmake conversion. + if "phandle" not in node.props[item].type: + if "array" in node.props[item].type: + # Convert array to CMake list + cmake_value = '' + for val in node.props[item].val: + cmake_value = f'{cmake_value}{val};' + else: + cmake_value = node.props[item].val + + # Encode node's property 'item' as a CMake target property + # with a name like 'DT_PROP||'. + cmake_prop = f'DT_PROP|{node.path}|{item}' + cmake_props.append(f'"{cmake_prop}" "{cmake_value}"') + + if node.regs is not None: + cmake_props.append(f'"DT_REG|{node.path}|NUM" "{len(node.regs)}"') + cmake_addr = '' + cmake_size = '' + + for reg in node.regs: + if reg.addr is None: + cmake_addr = f'{cmake_addr}NONE;' + else: + cmake_addr = f'{cmake_addr}{hex(reg.addr)};' + + if reg.size is None: + cmake_size = f'{cmake_size}NONE;' + else: + cmake_size = f'{cmake_size}{hex(reg.size)};' + + cmake_props.append(f'"DT_REG|{node.path}|ADDR" "{cmake_addr}"') + cmake_props.append(f'"DT_REG|{node.path}|SIZE" "{cmake_size}"') + + with open(args.cmake_out, "w", encoding="utf-8") as cmake_file: + print('add_custom_target(devicetree_target)', file=cmake_file) + print(file=cmake_file) + + for prop in cmake_props: + print( + f'set_target_properties(devicetree_target PROPERTIES {prop})', + file=cmake_file + ) + + +if __name__ == "__main__": + main() diff --git a/Tools/dts/python-devicetree/.gitignore b/Tools/dts/python-devicetree/.gitignore new file mode 100644 index 0000000000..20574a3456 --- /dev/null +++ b/Tools/dts/python-devicetree/.gitignore @@ -0,0 +1,7 @@ +dist/ +src/devicetree.egg-info/ +build/ +devicetree.egg-info/ +__pycache__/ +.tox/ +doc/build/ diff --git a/Tools/dts/python-devicetree/requirements.txt b/Tools/dts/python-devicetree/requirements.txt new file mode 100644 index 0000000000..f827ad8d1d --- /dev/null +++ b/Tools/dts/python-devicetree/requirements.txt @@ -0,0 +1 @@ +sphinx_rtd_theme # docs diff --git a/Tools/dts/python-devicetree/setup.py b/Tools/dts/python-devicetree/setup.py new file mode 100644 index 0000000000..590ba31510 --- /dev/null +++ b/Tools/dts/python-devicetree/setup.py @@ -0,0 +1,42 @@ +# Copyright (c) 2021, Nordic Semiconductor ASA +# +# SPDX-License-Identifier: Apache-2.0 + +import setuptools + +long_description = ''' +Placeholder +=========== + +This is just a placeholder for moving Zephyr's devicetree libraries +to PyPI. +''' + +version = '0.0.1' + +setuptools.setup( + # TBD, just use these for now. + author='Zephyr Project', + author_email='devel@lists.zephyrproject.org', + + name='devicetree', + version=version, + description='Python libraries for devicetree', + long_description=long_description, + # http://docutils.sourceforge.net/FAQ.html#what-s-the-official-mime-type-for-restructuredtext-data + long_description_content_type="text/x-rst", + url='https://github.com/zephyrproject-rtos/python-devicetree', + packages=setuptools.find_packages(where='src'), + package_dir={'': 'src'}, + classifiers=[ + 'Programming Language :: Python :: 3 :: Only', + 'License :: OSI Approved :: Apache Software License', + 'Operating System :: POSIX :: Linux', + 'Operating System :: MacOS :: MacOS X', + 'Operating System :: Microsoft :: Windows', + ], + install_requires=[ + 'PyYAML>=5.1', + ], + python_requires='>=3.6', +) diff --git a/Tools/dts/python-devicetree/src/devicetree/__init__.py b/Tools/dts/python-devicetree/src/devicetree/__init__.py new file mode 100644 index 0000000000..e9a568330b --- /dev/null +++ b/Tools/dts/python-devicetree/src/devicetree/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2021 Nordic Semiconductor ASA +# SPDX-License-Identifier: Apache-2.0 + +__all__ = ['edtlib', 'dtlib'] diff --git a/Tools/dts/python-devicetree/src/devicetree/dtlib.py b/Tools/dts/python-devicetree/src/devicetree/dtlib.py new file mode 100644 index 0000000000..dc337a671f --- /dev/null +++ b/Tools/dts/python-devicetree/src/devicetree/dtlib.py @@ -0,0 +1,2022 @@ +# Copyright (c) 2019, Nordic Semiconductor +# SPDX-License-Identifier: BSD-3-Clause + +# Tip: You can view just the documentation with 'pydoc3 devicetree.dtlib' + +""" +A library for extracting information from .dts (devicetree) files. See the +documentation for the DT and Node classes for more information. + +The top-level entry point of the library is the DT class. DT.__init__() takes a +.dts file to parse and a list of directories to search for any /include/d +files. +""" + +import collections +import enum +import errno +import os +import re +import string +import sys +import textwrap +from typing import Any, Dict, Iterable, List, \ + NamedTuple, NoReturn, Optional, Tuple, Union + +# NOTE: tests/test_dtlib.py is the test suite for this library. + +class DTError(Exception): + "Exception raised for devicetree-related errors" + +class Node: + r""" + Represents a node in the devicetree ('node-name { ... };'). + + These attributes are available on Node instances: + + name: + The name of the node (a string). + + unit_addr: + The portion after the '@' in the node's name, or the empty string if the + name has no '@' in it. + + Note that this is a string. Run int(node.unit_addr, 16) to get an + integer. + + props: + A collections.OrderedDict that maps the properties defined on the node to + their values. 'props' is indexed by property name (a string), and values + are Property objects. + + To convert property values to Python numbers or strings, use + dtlib.to_num(), dtlib.to_nums(), or dtlib.to_string(). + + Property values are represented as 'bytes' arrays to support the full + generality of DTS, which allows assignments like + + x = "foo", < 0x12345678 >, [ 9A ]; + + This gives x the value b"foo\0\x12\x34\x56\x78\x9A". Numbers in DTS are + stored in big-endian format. + + nodes: + A collections.OrderedDict containing the subnodes of the node, indexed by + name. + + labels: + A list with all labels pointing to the node, in the same order as the + labels appear, but with duplicates removed. + + 'label_1: label_2: node { ... };' gives 'labels' the value + ["label_1", "label_2"]. + + parent: + The parent Node of the node. 'None' for the root node. + + path: + The path to the node as a string, e.g. "/foo/bar". + + dt: + The DT instance this node belongs to. + """ + + # + # Public interface + # + + def __init__(self, name: str, parent: Optional['Node'], dt: 'DT'): + """ + Node constructor. Not meant to be called directly by clients. + """ + self.name = name + self.parent = parent + self.dt = dt + + if name.count("@") > 1: + dt._parse_error("multiple '@' in node name") + if not name == "/": + for char in name: + if char not in _nodename_chars: + dt._parse_error(f"{self.path}: bad character '{char}' " + "in node name") + + self.props: Dict[str, 'Property'] = collections.OrderedDict() + self.nodes: Dict[str, 'Node'] = collections.OrderedDict() + self.labels: List[str] = [] + self._omit_if_no_ref = False + self._is_referenced = False + + @property + def unit_addr(self) -> str: + """ + See the class documentation. + """ + return self.name.partition("@")[2] + + @property + def path(self) -> str: + """ + See the class documentation. + """ + node_names = [] + + cur = self + while cur.parent: + node_names.append(cur.name) + cur = cur.parent + + return "/" + "/".join(reversed(node_names)) + + def node_iter(self) -> Iterable['Node']: + """ + Returns a generator for iterating over the node and its children, + recursively. + + For example, this will iterate over all nodes in the tree (like + dt.node_iter()). + + for node in dt.root.node_iter(): + ... + """ + yield self + for node in self.nodes.values(): + yield from node.node_iter() + + def _get_prop(self, name: str) -> 'Property': + # Returns the property named 'name' on the node, creating it if it + # doesn't already exist + + prop = self.props.get(name) + if not prop: + prop = Property(self, name) + self.props[name] = prop + return prop + + def _del(self) -> None: + # Removes the node from the tree + self.parent.nodes.pop(self.name) # type: ignore + + def __str__(self): + """ + Returns a DTS representation of the node. Called automatically if the + node is print()ed. + """ + s = "".join(label + ": " for label in self.labels) + + s += f"{self.name} {{\n" + + for prop in self.props.values(): + s += "\t" + str(prop) + "\n" + + for child in self.nodes.values(): + s += textwrap.indent(child.__str__(), "\t") + "\n" + + s += "};" + + return s + + def __repr__(self): + """ + Returns some information about the Node instance. Called automatically + if the Node instance is evaluated. + """ + return f"" + +# See Property.type +class Type(enum.IntEnum): + EMPTY = 0 + BYTES = 1 + NUM = 2 + NUMS = 3 + STRING = 4 + STRINGS = 5 + PATH = 6 + PHANDLE = 7 + PHANDLES = 8 + PHANDLES_AND_NUMS = 9 + COMPOUND = 10 + +class _MarkerType(enum.IntEnum): + # Types of markers in property values + + # References + PATH = 0 # &foo + PHANDLE = 1 # <&foo> + LABEL = 2 # foo: <1 2 3> + + # Start of data blocks of specific type + UINT8 = 3 # [00 01 02] (and also used for /incbin/) + UINT16 = 4 # /bits/ 16 <1 2 3> + UINT32 = 5 # <1 2 3> + UINT64 = 6 # /bits/ 64 <1 2 3> + STRING = 7 # "foo" + +class Property: + """ + Represents a property ('x = ...'). + + These attributes are available on Property instances: + + name: + The name of the property (a string). + + value: + The value of the property, as a 'bytes' string. Numbers are stored in + big-endian format, and strings are null-terminated. Putting multiple + comma-separated values in an assignment (e.g., 'x = < 1 >, "foo"') will + concatenate the values. + + See the to_*() methods for converting the value to other types. + + type: + The type of the property, inferred from the syntax used in the + assignment. This is one of the following constants (with example + assignments): + + Assignment | Property.type + ----------------------------+------------------------ + foo; | dtlib.Type.EMPTY + foo = []; | dtlib.Type.BYTES + foo = [01 02]; | dtlib.Type.BYTES + foo = /bits/ 8 <1>; | dtlib.Type.BYTES + foo = <1>; | dtlib.Type.NUM + foo = <>; | dtlib.Type.NUMS + foo = <1 2 3>; | dtlib.Type.NUMS + foo = <1 2>, <3>; | dtlib.Type.NUMS + foo = "foo"; | dtlib.Type.STRING + foo = "foo", "bar"; | dtlib.Type.STRINGS + foo = <&l>; | dtlib.Type.PHANDLE + foo = <&l1 &l2 &l3>; | dtlib.Type.PHANDLES + foo = <&l1 &l2>, <&l3>; | dtlib.Type.PHANDLES + foo = <&l1 1 2 &l2 3 4>; | dtlib.Type.PHANDLES_AND_NUMS + foo = <&l1 1 2>, <&l2 3 4>; | dtlib.Type.PHANDLES_AND_NUMS + foo = &l; | dtlib.Type.PATH + *Anything else* | dtlib.Type.COMPOUND + + *Anything else* includes properties mixing phandle (<&label>) and node + path (&label) references with other data. + + Data labels in the property value do not influence the type. + + labels: + A list with all labels pointing to the property, in the same order as the + labels appear, but with duplicates removed. + + 'label_1: label2: x = ...' gives 'labels' the value + {"label_1", "label_2"}. + + offset_labels: + A dictionary that maps any labels within the property's value to their + offset, in bytes. For example, 'x = < 0 label_1: 1 label_2: >' gives + 'offset_labels' the value {"label_1": 4, "label_2": 8}. + + Iteration order will match the order of the labels on Python versions + that preserve dict insertion order. + + node: + The Node the property is on. + """ + + # + # Public interface + # + + def __init__(self, node: Node, name: str): + if "@" in name: + node.dt._parse_error("'@' is only allowed in node names") + + self.name = name + self.node = node + self.value = b"" + self.labels: List[str] = [] + self._label_offset_lst: List[Tuple[str, int]] = [] + # We have to wait to set this until later, when we've got + # the entire tree. + self.offset_labels: Dict[str, int] = {} + + # A list of [offset, label, type] lists (sorted by offset), + # giving the locations of references within the value. 'type' + # is either _MarkerType.PATH, for a node path reference, + # _MarkerType.PHANDLE, for a phandle reference, or + # _MarkerType.LABEL, for a label on/within data. Node paths + # and phandles need to be patched in after parsing. + self._markers: List[List] = [] + + def to_num(self, signed=False) -> int: + """ + Returns the value of the property as a number. + + Raises DTError if the property was not assigned with this syntax (has + Property.type Type.NUM): + + foo = < 1 >; + + signed (default: False): + If True, the value will be interpreted as signed rather than + unsigned. + """ + if self.type is not Type.NUM: + _err("expected property '{0}' on {1} in {2} to be assigned with " + "'{0} = < (number) >;', not '{3}'" + .format(self.name, self.node.path, self.node.dt.filename, + self)) + + return int.from_bytes(self.value, "big", signed=signed) + + def to_nums(self, signed=False) -> List[int]: + """ + Returns the value of the property as a list of numbers. + + Raises DTError if the property was not assigned with this syntax (has + Property.type Type.NUM or Type.NUMS): + + foo = < 1 2 ... >; + + signed (default: False): + If True, the values will be interpreted as signed rather than + unsigned. + """ + if self.type not in (Type.NUM, Type.NUMS): + _err("expected property '{0}' on {1} in {2} to be assigned with " + "'{0} = < (number) (number) ... >;', not '{3}'" + .format(self.name, self.node.path, self.node.dt.filename, + self)) + + return [int.from_bytes(self.value[i:i + 4], "big", signed=signed) + for i in range(0, len(self.value), 4)] + + def to_bytes(self) -> bytes: + """ + Returns the value of the property as a raw 'bytes', like + Property.value, except with added type checking. + + Raises DTError if the property was not assigned with this syntax (has + Property.type Type.BYTES): + + foo = [ 01 ... ]; + """ + if self.type is not Type.BYTES: + _err("expected property '{0}' on {1} in {2} to be assigned with " + "'{0} = [ (byte) (byte) ... ];', not '{3}'" + .format(self.name, self.node.path, self.node.dt.filename, + self)) + + return self.value + + def to_string(self) -> str: + """ + Returns the value of the property as a string. + + Raises DTError if the property was not assigned with this syntax (has + Property.type Type.STRING): + + foo = "string"; + + This function might also raise UnicodeDecodeError if the string is + not valid UTF-8. + """ + if self.type is not Type.STRING: + _err("expected property '{0}' on {1} in {2} to be assigned with " + "'{0} = \"string\";', not '{3}'" + .format(self.name, self.node.path, self.node.dt.filename, + self)) + + try: + ret = self.value.decode("utf-8")[:-1] # Strip null + except UnicodeDecodeError: + _err(f"value of property '{self.name}' ({self.value!r}) " + f"on {self.node.path} in {self.node.dt.filename} " + "is not valid UTF-8") + + return ret # The separate 'return' appeases the type checker. + + def to_strings(self) -> List[str]: + """ + Returns the value of the property as a list of strings. + + Raises DTError if the property was not assigned with this syntax (has + Property.type Type.STRING or Type.STRINGS): + + foo = "string", "string", ... ; + + Also raises DTError if any of the strings are not valid UTF-8. + """ + if self.type not in (Type.STRING, Type.STRINGS): + _err("expected property '{0}' on {1} in {2} to be assigned with " + "'{0} = \"string\", \"string\", ... ;', not '{3}'" + .format(self.name, self.node.path, self.node.dt.filename, + self)) + + try: + ret = self.value.decode("utf-8").split("\0")[:-1] + except UnicodeDecodeError: + _err(f"value of property '{self.name}' ({self.value!r}) " + f"on {self.node.path} in {self.node.dt.filename} " + "is not valid UTF-8") + + return ret # The separate 'return' appeases the type checker. + + def to_node(self) -> Node: + """ + Returns the Node the phandle in the property points to. + + Raises DTError if the property was not assigned with this syntax (has + Property.type Type.PHANDLE). + + foo = < &bar >; + """ + if self.type is not Type.PHANDLE: + _err("expected property '{0}' on {1} in {2} to be assigned with " + "'{0} = < &foo >;', not '{3}'" + .format(self.name, self.node.path, self.node.dt.filename, + self)) + + return self.node.dt.phandle2node[int.from_bytes(self.value, "big")] + + def to_nodes(self) -> List[Node]: + """ + Returns a list with the Nodes the phandles in the property point to. + + Raises DTError if the property value contains anything other than + phandles. All of the following are accepted: + + foo = < > + foo = < &bar >; + foo = < &bar &baz ... >; + foo = < &bar ... >, < &baz ... >; + """ + def type_ok(): + if self.type in (Type.PHANDLE, Type.PHANDLES): + return True + # Also accept 'foo = < >;' + return self.type is Type.NUMS and not self.value + + if not type_ok(): + _err("expected property '{0}' on {1} in {2} to be assigned with " + "'{0} = < &foo &bar ... >;', not '{3}'" + .format(self.name, self.node.path, + self.node.dt.filename, self)) + + return [self.node.dt.phandle2node[int.from_bytes(self.value[i:i + 4], + "big")] + for i in range(0, len(self.value), 4)] + + def to_path(self) -> Node: + """ + Returns the Node referenced by the path stored in the property. + + Raises DTError if the property was not assigned with either of these + syntaxes (has Property.type Type.PATH or Type.STRING): + + foo = &bar; + foo = "/bar"; + + For the second case, DTError is raised if the path does not exist. + """ + if self.type not in (Type.PATH, Type.STRING): + _err("expected property '{0}' on {1} in {2} to be assigned with " + "either '{0} = &foo' or '{0} = \"/path/to/node\"', not '{3}'" + .format(self.name, self.node.path, self.node.dt.filename, + self)) + + try: + path = self.value.decode("utf-8")[:-1] + except UnicodeDecodeError: + _err(f"value of property '{self.name}' ({self.value!r}) " + f"on {self.node.path} in {self.node.dt.filename} " + "is not valid UTF-8") + + try: + ret = self.node.dt.get_node(path) + except DTError: + _err(f"property '{self.name}' on {self.node.path} in " + f"{self.node.dt.filename} points to the non-existent node " + f'"{path}"') + + return ret # The separate 'return' appeases the type checker. + + @property + def type(self) -> int: + """ + See the class docstring. + """ + # Data labels (e.g. 'foo = label: <3>') are irrelevant, so filter them + # out + types = [marker[1] for marker in self._markers + if marker[1] != _MarkerType.LABEL] + + if not types: + return Type.EMPTY + + if types == [_MarkerType.UINT8]: + return Type.BYTES + + if types == [_MarkerType.UINT32]: + return Type.NUM if len(self.value) == 4 else Type.NUMS + + # Treat 'foo = <1 2 3>, <4 5>, ...' as Type.NUMS too + if set(types) == {_MarkerType.UINT32}: + return Type.NUMS + + if set(types) == {_MarkerType.STRING}: + return Type.STRING if len(types) == 1 else Type.STRINGS + + if types == [_MarkerType.PATH]: + return Type.PATH + + if types == [_MarkerType.UINT32, _MarkerType.PHANDLE] and \ + len(self.value) == 4: + return Type.PHANDLE + + if set(types) == {_MarkerType.UINT32, _MarkerType.PHANDLE}: + if len(self.value) == 4*types.count(_MarkerType.PHANDLE): + # Array with just phandles in it + return Type.PHANDLES + # Array with both phandles and numbers + return Type.PHANDLES_AND_NUMS + + return Type.COMPOUND + + def __str__(self): + s = "".join(label + ": " for label in self.labels) + self.name + if not self.value: + return s + ";" + + s += " =" + + for i, (pos, marker_type, ref) in enumerate(self._markers): + if i < len(self._markers) - 1: + next_marker = self._markers[i + 1] + else: + next_marker = None + + # End of current marker + end = next_marker[0] if next_marker else len(self.value) + + if marker_type is _MarkerType.STRING: + # end - 1 to strip off the null terminator + s += f' "{_decode_and_escape(self.value[pos:end - 1])}"' + if end != len(self.value): + s += "," + elif marker_type is _MarkerType.PATH: + s += " &" + ref + if end != len(self.value): + s += "," + else: + # <> or [] + + if marker_type is _MarkerType.LABEL: + s += f" {ref}:" + elif marker_type is _MarkerType.PHANDLE: + s += " &" + ref + pos += 4 + # Subtle: There might be more data between the phandle and + # the next marker, so we can't 'continue' here + else: # marker_type is _MarkerType.UINT* + elm_size = _TYPE_TO_N_BYTES[marker_type] + s += _N_BYTES_TO_START_STR[elm_size] + + while pos != end: + num = int.from_bytes(self.value[pos:pos + elm_size], + "big") + if elm_size == 1: + s += f" {num:02X}" + else: + s += f" {hex(num)}" + + pos += elm_size + + if pos != 0 and \ + (not next_marker or + next_marker[1] not in (_MarkerType.PHANDLE, _MarkerType.LABEL)): + + s += _N_BYTES_TO_END_STR[elm_size] + if pos != len(self.value): + s += "," + + return s + ";" + + + def __repr__(self): + return f"" + + # + # Internal functions + # + + def _add_marker(self, marker_type: _MarkerType, data: Any = None): + # Helper for registering markers in the value that are processed after + # parsing. See _fixup_props(). 'marker_type' identifies the type of + # marker, and 'data' has any optional data associated with the marker. + + # len(self.value) gives the current offset. This function is called + # while the value is built. We use a list instead of a tuple to be able + # to fix up offsets later (they might increase if the value includes + # path references, e.g. 'foo = &bar, <3>;', which are expanded later). + self._markers.append([len(self.value), marker_type, data]) + + # For phandle references, add a dummy value with the same length as a + # phandle. This is handy for the length check in _register_phandles(). + if marker_type is _MarkerType.PHANDLE: + self.value += b"\0\0\0\0" + +class _T(enum.IntEnum): + # Token IDs used by the DT lexer. + + # These values must be contiguous and start from 1. + INCLUDE = 1 + LINE = 2 + STRING = 3 + DTS_V1 = 4 + PLUGIN = 5 + MEMRESERVE = 6 + BITS = 7 + DEL_PROP = 8 + DEL_NODE = 9 + OMIT_IF_NO_REF = 10 + LABEL = 11 + CHAR_LITERAL = 12 + REF = 13 + INCBIN = 14 + SKIP = 15 + EOF = 16 + + # These values must be larger than the above contiguous range. + NUM = 17 + PROPNODENAME = 18 + MISC = 19 + BYTE = 20 + BAD = 21 + +class _FileStackElt(NamedTuple): + # Used for maintaining the /include/ stack. + + filename: str + lineno: int + contents: str + pos: int + +_TokVal = Union[int, str] + +class _Token(NamedTuple): + id: int + val: _TokVal + + def __repr__(self): + id_repr = _T(self.id).name + return f'Token(id=_T.{id_repr}, val={repr(self.val)})' + +class DT: + """ + Represents a devicetree parsed from a .dts file (or from many files, if the + .dts file /include/s other files). Creating many instances of this class is + fine. The library has no global state. + + These attributes are available on DT instances: + + root: + A Node instance representing the root (/) node. + + alias2node: + A dictionary that maps maps alias strings (from /aliases) to Node + instances + + label2node: + A dictionary that maps each node label (a string) to the Node instance + for the node. + + label2prop: + A dictionary that maps each property label (a string) to a Property + instance. + + label2prop_offset: + A dictionary that maps each label (a string) within a property value + (e.g., 'x = label_1: < 1 label2: 2 >;') to a (prop, offset) tuple, where + 'prop' is a Property instance and 'offset' the byte offset (0 for label_1 + and 4 for label_2 in the example). + + phandle2node: + A dictionary that maps each phandle (a number) to a Node instance. + + memreserves: + A list of (labels, address, length) tuples for the /memreserve/s in the + .dts file, in the same order as they appear in the file. + + 'labels' is a possibly empty set with all labels preceding the memreserve + (e.g., 'label1: label2: /memreserve/ ...'). 'address' and 'length' are + numbers. + + filename: + The filename passed to the DT constructor. + """ + + # + # Public interface + # + + def __init__(self, filename: str, include_path: Iterable[str] = (), + force: bool = False): + """ + Parses a DTS file to create a DT instance. Raises OSError if 'filename' + can't be opened, and DTError for any parse errors. + + filename: + Path to the .dts file to parse. + + include_path: + An iterable (e.g. list or tuple) containing paths to search for + /include/d and /incbin/'d files. By default, files are only looked up + relative to the .dts file that contains the /include/ or /incbin/. + + force: + Try not to raise DTError even if the input tree has errors. + For experimental use; results not guaranteed. + """ + self.filename = filename + self._include_path = list(include_path) + self._force = force + + with open(filename, encoding="utf-8") as f: + self._file_contents = f.read() + + self._tok_i = self._tok_end_i = 0 + self._filestack: List[_FileStackElt] = [] + + self.alias2node: Dict[str, Node] = {} + + self._lexer_state: int = _DEFAULT + self._saved_token: Optional[_Token] = None + + self._lineno: int = 1 + + self._root: Optional[Node] = None + + self._parse_dt() + + self._register_phandles() + self._fixup_props() + self._register_aliases() + self._remove_unreferenced() + self._register_labels() + + @property + def root(self) -> Node: + """ + See the class documentation. + """ + # This is necessary because mypy can't tell that we never + # treat self._root as a non-None value until it's initialized + # properly in _parse_dt(). + return self._root # type: ignore + + def get_node(self, path: str) -> Node: + """ + Returns the Node instance for the node with path or alias 'path' (a + string). Raises DTError if the path or alias doesn't exist. + + For example, both dt.get_node("/foo/bar") and dt.get_node("bar-alias") + will return the 'bar' node below: + + /dts-v1/; + + / { + foo { + bar_label: bar { + baz { + }; + }; + }; + + aliases { + bar-alias = &bar-label; + }; + }; + + Fetching subnodes via aliases is supported: + dt.get_node("bar-alias/baz") returns the 'baz' node. + """ + if path.startswith("/"): + return _root_and_path_to_node(self.root, path, path) + + # Path does not start with '/'. First component must be an alias. + alias, _, rest = path.partition("/") + if alias not in self.alias2node: + _err(f"no alias '{alias}' found -- did you forget the leading " + "'/' in the node path?") + + return _root_and_path_to_node(self.alias2node[alias], rest, path) + + def has_node(self, path: str) -> bool: + """ + Returns True if the path or alias 'path' exists. See Node.get_node(). + """ + try: + self.get_node(path) + return True + except DTError: + return False + + def node_iter(self) -> Iterable[Node]: + """ + Returns a generator for iterating over all nodes in the devicetree. + + For example, this will print the name of each node that has a property + called 'foo': + + for node in dt.node_iter(): + if "foo" in node.props: + print(node.name) + """ + yield from self.root.node_iter() + + def __str__(self): + """ + Returns a DTS representation of the devicetree. Called automatically if + the DT instance is print()ed. + """ + s = "/dts-v1/;\n\n" + + if self.memreserves: + for labels, address, offset in self.memreserves: + # List the labels in a consistent order to help with testing + for label in labels: + s += f"{label}: " + s += f"/memreserve/ {address:#018x} {offset:#018x};\n" + s += "\n" + + return s + str(self.root) + + def __repr__(self): + """ + Returns some information about the DT instance. Called automatically if + the DT instance is evaluated. + """ + return f"DT(filename='{self.filename}', " \ + f"include_path={self._include_path})" + + # + # Parsing + # + + def _parse_dt(self): + # Top-level parsing loop + + self._parse_header() + self._parse_memreserves() + + while True: + tok = self._next_token() + + if tok.val == "/": + # '/ { ... };', the root node + if not self._root: + self._root = Node(name="/", parent=None, dt=self) + self._parse_node(self.root) + + elif tok.id in (_T.LABEL, _T.REF): + # '&foo { ... };' or 'label: &foo { ... };'. The C tools only + # support a single label here too. + + if tok.id == _T.LABEL: + label = tok.val + tok = self._next_token() + if tok.id != _T.REF: + self._parse_error("expected label reference (&foo)") + else: + label = None + + try: + node = self._ref2node(tok.val) + except DTError as e: + self._parse_error(e) + node = self._parse_node(node) + + if label: + _append_no_dup(node.labels, label) + + elif tok.id == _T.DEL_NODE: + self._next_ref2node()._del() + self._expect_token(";") + + elif tok.id == _T.OMIT_IF_NO_REF: + self._next_ref2node()._omit_if_no_ref = True + self._expect_token(";") + + elif tok.id == _T.EOF: + if not self._root: + self._parse_error("no root node defined") + return + + else: + self._parse_error("expected '/' or label reference (&foo)") + + def _parse_header(self): + # Parses /dts-v1/ (expected) and /plugin/ (unsupported) at the start of + # files. There may be multiple /dts-v1/ at the start of a file. + + has_dts_v1 = False + + while self._peek_token().id == _T.DTS_V1: + has_dts_v1 = True + self._next_token() + self._expect_token(";") + # /plugin/ always comes after /dts-v1/ + if self._peek_token().id == _T.PLUGIN: + self._parse_error("/plugin/ is not supported") + + if not has_dts_v1: + self._parse_error("expected '/dts-v1/;' at start of file") + + def _parse_memreserves(self): + # Parses /memreserve/, which appears after /dts-v1/ + + self.memreserves = [] + while True: + # Labels before /memreserve/ + labels = [] + while self._peek_token().id == _T.LABEL: + _append_no_dup(labels, self._next_token().val) + + if self._peek_token().id == _T.MEMRESERVE: + self._next_token() + self.memreserves.append( + (labels, self._eval_prim(), self._eval_prim())) + self._expect_token(";") + elif labels: + self._parse_error("expected /memreserve/ after labels at " + "beginning of file") + else: + return + + def _parse_node(self, node): + # Parses the '{ ... };' part of 'node-name { ... };'. Returns the new + # Node. + + self._expect_token("{") + while True: + labels, omit_if_no_ref = self._parse_propnode_labels() + tok = self._next_token() + + if tok.id == _T.PROPNODENAME: + if self._peek_token().val == "{": + # ' { ...', expect node + + # Fetch the existing node if it already exists. This + # happens when overriding nodes. + child = node.nodes.get(tok.val) or \ + Node(name=tok.val, parent=node, dt=self) + + for label in labels: + _append_no_dup(child.labels, label) + + if omit_if_no_ref: + child._omit_if_no_ref = True + + node.nodes[child.name] = child + self._parse_node(child) + + else: + # Not ' { ...', expect property assignment + + if omit_if_no_ref: + self._parse_error( + "/omit-if-no-ref/ can only be used on nodes") + + prop = node._get_prop(tok.val) + + if self._check_token("="): + self._parse_assignment(prop) + elif not self._check_token(";"): + # ';' is for an empty property, like 'foo;' + self._parse_error("expected '{', '=', or ';'") + + for label in labels: + _append_no_dup(prop.labels, label) + + elif tok.id == _T.DEL_NODE: + tok2 = self._next_token() + if tok2.id != _T.PROPNODENAME: + self._parse_error("expected node name") + if tok2.val in node.nodes: + node.nodes[tok2.val]._del() + self._expect_token(";") + + elif tok.id == _T.DEL_PROP: + tok2 = self._next_token() + if tok2.id != _T.PROPNODENAME: + self._parse_error("expected property name") + node.props.pop(tok2.val, None) + self._expect_token(";") + + elif tok.val == "}": + self._expect_token(";") + return node + + else: + self._parse_error("expected node name, property name, or '}'") + + def _parse_propnode_labels(self): + # _parse_node() helpers for parsing labels and /omit-if-no-ref/s before + # nodes and properties. Returns a (