diff --git a/Documentation/core-api/packing.rst b/Documentation/core-api/packing.rst index 821691f23c541..5f729a9d4e87b 100644 --- a/Documentation/core-api/packing.rst +++ b/Documentation/core-api/packing.rst @@ -235,3 +235,61 @@ programmer against incorrect API use. The errors are not expected to occur during runtime, therefore it is reasonable for xxx_packing() to return void and simply swallow those errors. Optionally it can dump stack or print the error description. + +The pack_fields() and unpack_fields() macros automatically select the +appropriate function at compile time based on the type of the fields array +passed in. + +Packed Fields +------------- + +Drivers are encouraged to use the ``pack_fields()`` and ``unpack_fields()`` +APIs over using ``pack()``, ``unpack()``, or ``packing()``. + +These APIs use field definitions in arrays of ``struct packed_field_s`` or +``struct packed_field_m`` stored as ``.rodata``. This significantly reduces +the code footprint required to pack or unpack many fields. In addition, +sanity checks on the field definitions are handled at compile time with +``BUILD_BUG_ON`` rather than only when the offending code is executed. + +It is recommended, but not required, that you wrap your packed buffer into a +structured type with a fixed size. This generally makes it easier for the +compiler to enforce that the correct size buffer is used. + +Here is an example of how to use the fields APIs: + +.. code-block:: c + + struct data { + u64 field3; + u32 field4; + u16 field1; + u8 field2; + }; + + #define SIZE 13 + + typdef struct __packed { u8 buf[SIZE]; } packed_buf_t; + + static const struct packed_field_s fields[] = { + PACKED_FIELD(100, 90, struct data, field1), + PACKED_FIELD(90, 87, struct data, field2), + PACKED_FIELD(86, 30, struct data, field3), + PACKED_FIELD(29, 0, struct data, field4), + }; + + void unpack_your_data(const packed_buf_t *buf, struct data *unpacked) + { + BUILD_BUG_ON(sizeof(*buf) != SIZE; + + unpack_fields(buf, sizeof(*buf), unpacked, fields, + QUIRK_LITTLE_ENDIAN); + } + + void pack_your_data(const struct data *unpacked, packed_buf_t *buf) + { + BUILD_BUG_ON(sizeof(*buf) != SIZE; + + pack_fields(buf, sizeof(*buf), unpacked, fields, + QUIRK_LITTLE_ENDIAN); + } diff --git a/Documentation/networking/bareudp.rst b/Documentation/networking/bareudp.rst index b9d04ee6dac14..621cb9575c8f7 100644 --- a/Documentation/networking/bareudp.rst +++ b/Documentation/networking/bareudp.rst @@ -6,16 +6,17 @@ Bare UDP Tunnelling Module Documentation There are various L3 encapsulation standards using UDP being discussed to leverage the UDP based load balancing capability of different networks. -MPLSoUDP (__ https://tools.ietf.org/html/rfc7510) is one among them. +MPLSoUDP (https://tools.ietf.org/html/rfc7510) is one among them. The Bareudp tunnel module provides a generic L3 encapsulation support for tunnelling different L3 protocols like MPLS, IP, NSH etc. inside a UDP tunnel. Special Handling ---------------- + The bareudp device supports special handling for MPLS & IP as they can have multiple ethertypes. -MPLS procotcol can have ethertypes ETH_P_MPLS_UC (unicast) & ETH_P_MPLS_MC (multicast). +The MPLS protocol can have ethertypes ETH_P_MPLS_UC (unicast) & ETH_P_MPLS_MC (multicast). IP protocol can have ethertypes ETH_P_IP (v4) & ETH_P_IPV6 (v6). This special handling can be enabled only for ethertypes ETH_P_IP & ETH_P_MPLS_UC with a flag called multiproto mode. @@ -52,7 +53,7 @@ be enabled explicitly with the "multiproto" flag. 3) Device Usage The bareudp device could be used along with OVS or flower filter in TC. -The OVS or TC flower layer must set the tunnel information in SKB dst field before -sending packet buffer to the bareudp device for transmission. On reception the -bareudp device extracts and stores the tunnel information in SKB dst field before +The OVS or TC flower layer must set the tunnel information in the SKB dst field before +sending the packet buffer to the bareudp device for transmission. On reception, the +bareUDP device extracts and stores the tunnel information in the SKB dst field before passing the packet buffer to the network stack. diff --git a/Documentation/networking/netconsole.rst b/Documentation/networking/netconsole.rst index d55c2a22ec7af..94c4680fdf3e7 100644 --- a/Documentation/networking/netconsole.rst +++ b/Documentation/networking/netconsole.rst @@ -124,7 +124,7 @@ To remove a target:: The interface exposes these parameters of a netconsole target to userspace: - ============== ================================= ============ + =============== ================================= ============ enabled Is this target currently enabled? (read-write) extended Extended mode enabled (read-write) release Prepend kernel release to message (read-write) @@ -135,7 +135,8 @@ The interface exposes these parameters of a netconsole target to userspace: remote_ip Remote agent's IP address (read-write) local_mac Local interface's MAC address (read-only) remote_mac Remote agent's MAC address (read-write) - ============== ================================= ============ + transmit_errors Number of packet send errors (read-only) + =============== ================================= ============ The "enabled" attribute is also used to control whether the parameters of a target can be updated or not -- you can modify the parameters of only diff --git a/MAINTAINERS b/MAINTAINERS index 1e930c7a58b13..d911c9b42956e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -16267,6 +16267,7 @@ F: Documentation/devicetree/bindings/net/ F: Documentation/networking/net_cachelines/net_device.rst F: drivers/connector/ F: drivers/net/ +F: drivers/ptp/ F: include/dt-bindings/net/ F: include/linux/cn_proc.h F: include/linux/etherdevice.h @@ -17633,8 +17634,10 @@ L: netdev@vger.kernel.org S: Supported F: Documentation/core-api/packing.rst F: include/linux/packing.h +F: include/linux/packing_types.h F: lib/packing.c F: lib/packing_test.c +F: scripts/gen_packed_field_checks.c PADATA PARALLEL EXECUTION MECHANISM M: Steffen Klassert diff --git a/Makefile b/Makefile index 93ab62cef244f..9a9fd5504ae8f 100644 --- a/Makefile +++ b/Makefile @@ -1367,6 +1367,10 @@ PHONY += scripts_unifdef scripts_unifdef: scripts_basic $(Q)$(MAKE) $(build)=scripts scripts/unifdef +PHONY += scripts_gen_packed_field_checks +scripts_gen_packed_field_checks: scripts_basic + $(Q)$(MAKE) $(build)=scripts scripts/gen_packed_field_checks + # --------------------------------------------------------------------------- # Install diff --git a/drivers/net/can/c_can/c_can_main.c b/drivers/net/can/c_can/c_can_main.c index 511615dc33419..cc371d0c9f3c7 100644 --- a/drivers/net/can/c_can/c_can_main.c +++ b/drivers/net/can/c_can/c_can_main.c @@ -1014,49 +1014,57 @@ static int c_can_handle_bus_err(struct net_device *dev, /* propagate the error condition to the CAN stack */ skb = alloc_can_err_skb(dev, &cf); - if (unlikely(!skb)) - return 0; /* check for 'last error code' which tells us the * type of the last error to occur on the CAN bus */ - cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; + if (likely(skb)) + cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; switch (lec_type) { case LEC_STUFF_ERROR: netdev_dbg(dev, "stuff error\n"); - cf->data[2] |= CAN_ERR_PROT_STUFF; + if (likely(skb)) + cf->data[2] |= CAN_ERR_PROT_STUFF; stats->rx_errors++; break; case LEC_FORM_ERROR: netdev_dbg(dev, "form error\n"); - cf->data[2] |= CAN_ERR_PROT_FORM; + if (likely(skb)) + cf->data[2] |= CAN_ERR_PROT_FORM; stats->rx_errors++; break; case LEC_ACK_ERROR: netdev_dbg(dev, "ack error\n"); - cf->data[3] = CAN_ERR_PROT_LOC_ACK; + if (likely(skb)) + cf->data[3] = CAN_ERR_PROT_LOC_ACK; stats->tx_errors++; break; case LEC_BIT1_ERROR: netdev_dbg(dev, "bit1 error\n"); - cf->data[2] |= CAN_ERR_PROT_BIT1; + if (likely(skb)) + cf->data[2] |= CAN_ERR_PROT_BIT1; stats->tx_errors++; break; case LEC_BIT0_ERROR: netdev_dbg(dev, "bit0 error\n"); - cf->data[2] |= CAN_ERR_PROT_BIT0; + if (likely(skb)) + cf->data[2] |= CAN_ERR_PROT_BIT0; stats->tx_errors++; break; case LEC_CRC_ERROR: netdev_dbg(dev, "CRC error\n"); - cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; + if (likely(skb)) + cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; stats->rx_errors++; break; default: break; } + if (unlikely(!skb)) + return 0; + netif_receive_skb(skb); return 1; } diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c index 6792c14fd7eb0..681643ab37804 100644 --- a/drivers/net/can/dev/dev.c +++ b/drivers/net/can/dev/dev.c @@ -468,7 +468,7 @@ static int can_set_termination(struct net_device *ndev, u16 term) else set = 0; - gpiod_set_value(priv->termination_gpio, set); + gpiod_set_value_cansleep(priv->termination_gpio, set); return 0; } diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c index d32b10900d2f6..c86b57d47085f 100644 --- a/drivers/net/can/ifi_canfd/ifi_canfd.c +++ b/drivers/net/can/ifi_canfd/ifi_canfd.c @@ -390,36 +390,55 @@ static int ifi_canfd_handle_lec_err(struct net_device *ndev) return 0; priv->can.can_stats.bus_error++; - stats->rx_errors++; /* Propagate the error condition to the CAN stack. */ skb = alloc_can_err_skb(ndev, &cf); - if (unlikely(!skb)) - return 0; /* Read the error counter register and check for new errors. */ - cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; + if (likely(skb)) + cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; - if (errctr & IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST) - cf->data[2] |= CAN_ERR_PROT_OVERLOAD; + if (errctr & IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST) { + stats->rx_errors++; + if (likely(skb)) + cf->data[2] |= CAN_ERR_PROT_OVERLOAD; + } - if (errctr & IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST) - cf->data[3] = CAN_ERR_PROT_LOC_ACK; + if (errctr & IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST) { + stats->tx_errors++; + if (likely(skb)) + cf->data[3] = CAN_ERR_PROT_LOC_ACK; + } - if (errctr & IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST) - cf->data[2] |= CAN_ERR_PROT_BIT0; + if (errctr & IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST) { + stats->tx_errors++; + if (likely(skb)) + cf->data[2] |= CAN_ERR_PROT_BIT0; + } - if (errctr & IFI_CANFD_ERROR_CTR_BIT1_ERROR_FIRST) - cf->data[2] |= CAN_ERR_PROT_BIT1; + if (errctr & IFI_CANFD_ERROR_CTR_BIT1_ERROR_FIRST) { + stats->tx_errors++; + if (likely(skb)) + cf->data[2] |= CAN_ERR_PROT_BIT1; + } - if (errctr & IFI_CANFD_ERROR_CTR_STUFF_ERROR_FIRST) - cf->data[2] |= CAN_ERR_PROT_STUFF; + if (errctr & IFI_CANFD_ERROR_CTR_STUFF_ERROR_FIRST) { + stats->rx_errors++; + if (likely(skb)) + cf->data[2] |= CAN_ERR_PROT_STUFF; + } - if (errctr & IFI_CANFD_ERROR_CTR_CRC_ERROR_FIRST) - cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; + if (errctr & IFI_CANFD_ERROR_CTR_CRC_ERROR_FIRST) { + stats->rx_errors++; + if (likely(skb)) + cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; + } - if (errctr & IFI_CANFD_ERROR_CTR_FORM_ERROR_FIRST) - cf->data[2] |= CAN_ERR_PROT_FORM; + if (errctr & IFI_CANFD_ERROR_CTR_FORM_ERROR_FIRST) { + stats->rx_errors++; + if (likely(skb)) + cf->data[2] |= CAN_ERR_PROT_FORM; + } /* Reset the error counter, ack the IRQ and re-enable the counter. */ writel(IFI_CANFD_ERROR_CTR_ER_RESET, priv->base + IFI_CANFD_ERROR_CTR); @@ -427,6 +446,9 @@ static int ifi_canfd_handle_lec_err(struct net_device *ndev) priv->base + IFI_CANFD_INTERRUPT); writel(IFI_CANFD_ERROR_CTR_ER_ENABLE, priv->base + IFI_CANFD_ERROR_CTR); + if (unlikely(!skb)) + return 0; + netif_receive_skb(skb); return 1; diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 16e9e7d7527d9..533bcb77c9f93 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -695,47 +695,60 @@ static int m_can_handle_lec_err(struct net_device *dev, u32 timestamp = 0; cdev->can.can_stats.bus_error++; - stats->rx_errors++; /* propagate the error condition to the CAN stack */ skb = alloc_can_err_skb(dev, &cf); - if (unlikely(!skb)) - return 0; /* check for 'last error code' which tells us the * type of the last error to occur on the CAN bus */ - cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; + if (likely(skb)) + cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; switch (lec_type) { case LEC_STUFF_ERROR: netdev_dbg(dev, "stuff error\n"); - cf->data[2] |= CAN_ERR_PROT_STUFF; + stats->rx_errors++; + if (likely(skb)) + cf->data[2] |= CAN_ERR_PROT_STUFF; break; case LEC_FORM_ERROR: netdev_dbg(dev, "form error\n"); - cf->data[2] |= CAN_ERR_PROT_FORM; + stats->rx_errors++; + if (likely(skb)) + cf->data[2] |= CAN_ERR_PROT_FORM; break; case LEC_ACK_ERROR: netdev_dbg(dev, "ack error\n"); - cf->data[3] = CAN_ERR_PROT_LOC_ACK; + stats->tx_errors++; + if (likely(skb)) + cf->data[3] = CAN_ERR_PROT_LOC_ACK; break; case LEC_BIT1_ERROR: netdev_dbg(dev, "bit1 error\n"); - cf->data[2] |= CAN_ERR_PROT_BIT1; + stats->tx_errors++; + if (likely(skb)) + cf->data[2] |= CAN_ERR_PROT_BIT1; break; case LEC_BIT0_ERROR: netdev_dbg(dev, "bit0 error\n"); - cf->data[2] |= CAN_ERR_PROT_BIT0; + stats->tx_errors++; + if (likely(skb)) + cf->data[2] |= CAN_ERR_PROT_BIT0; break; case LEC_CRC_ERROR: netdev_dbg(dev, "CRC error\n"); - cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; + stats->rx_errors++; + if (likely(skb)) + cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; break; default: break; } + if (unlikely(!skb)) + return 0; + if (cdev->is_peripheral) timestamp = m_can_get_timestamp(cdev); diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index ddb3247948ad2..4d245857ef1ce 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c @@ -416,8 +416,6 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) int ret = 0; skb = alloc_can_err_skb(dev, &cf); - if (skb == NULL) - return -ENOMEM; txerr = priv->read_reg(priv, SJA1000_TXERR); rxerr = priv->read_reg(priv, SJA1000_RXERR); @@ -425,8 +423,11 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) if (isrc & IRQ_DOI) { /* data overrun interrupt */ netdev_dbg(dev, "data overrun interrupt\n"); - cf->can_id |= CAN_ERR_CRTL; - cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; + if (skb) { + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; + } + stats->rx_over_errors++; stats->rx_errors++; sja1000_write_cmdreg(priv, CMD_CDO); /* clear bit */ @@ -452,7 +453,7 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) else state = CAN_STATE_ERROR_ACTIVE; } - if (state != CAN_STATE_BUS_OFF) { + if (state != CAN_STATE_BUS_OFF && skb) { cf->can_id |= CAN_ERR_CNT; cf->data[6] = txerr; cf->data[7] = rxerr; @@ -460,33 +461,38 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) if (isrc & IRQ_BEI) { /* bus error interrupt */ priv->can.can_stats.bus_error++; - stats->rx_errors++; ecc = priv->read_reg(priv, SJA1000_ECC); + if (skb) { + cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; - cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; - - /* set error type */ - switch (ecc & ECC_MASK) { - case ECC_BIT: - cf->data[2] |= CAN_ERR_PROT_BIT; - break; - case ECC_FORM: - cf->data[2] |= CAN_ERR_PROT_FORM; - break; - case ECC_STUFF: - cf->data[2] |= CAN_ERR_PROT_STUFF; - break; - default: - break; - } + /* set error type */ + switch (ecc & ECC_MASK) { + case ECC_BIT: + cf->data[2] |= CAN_ERR_PROT_BIT; + break; + case ECC_FORM: + cf->data[2] |= CAN_ERR_PROT_FORM; + break; + case ECC_STUFF: + cf->data[2] |= CAN_ERR_PROT_STUFF; + break; + default: + break; + } - /* set error location */ - cf->data[3] = ecc & ECC_SEG; + /* set error location */ + cf->data[3] = ecc & ECC_SEG; + } /* Error occurred during transmission? */ - if ((ecc & ECC_DIR) == 0) - cf->data[2] |= CAN_ERR_PROT_TX; + if ((ecc & ECC_DIR) == 0) { + stats->tx_errors++; + if (skb) + cf->data[2] |= CAN_ERR_PROT_TX; + } else { + stats->rx_errors++; + } } if (isrc & IRQ_EPI) { /* error passive interrupt */ @@ -502,8 +508,10 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) netdev_dbg(dev, "arbitration lost interrupt\n"); alc = priv->read_reg(priv, SJA1000_ALC); priv->can.can_stats.arbitration_lost++; - cf->can_id |= CAN_ERR_LOSTARB; - cf->data[0] = alc & 0x1f; + if (skb) { + cf->can_id |= CAN_ERR_LOSTARB; + cf->data[0] = alc & 0x1f; + } } if (state != priv->can.state) { @@ -516,6 +524,9 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) can_bus_off(dev); } + if (!skb) + return -ENOMEM; + netif_rx(skb); return ret; diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c index 148d974ebb210..09ae218315d73 100644 --- a/drivers/net/can/spi/hi311x.c +++ b/drivers/net/can/spi/hi311x.c @@ -663,27 +663,27 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id) u8 rxerr, txerr; skb = alloc_can_err_skb(net, &cf); - if (!skb) - break; txerr = hi3110_read(spi, HI3110_READ_TEC); rxerr = hi3110_read(spi, HI3110_READ_REC); tx_state = txerr >= rxerr ? new_state : 0; rx_state = txerr <= rxerr ? new_state : 0; can_change_state(net, cf, tx_state, rx_state); - netif_rx(skb); if (new_state == CAN_STATE_BUS_OFF) { + if (skb) + netif_rx(skb); can_bus_off(net); if (priv->can.restart_ms == 0) { priv->force_quit = 1; hi3110_hw_sleep(spi); break; } - } else { + } else if (skb) { cf->can_id |= CAN_ERR_CNT; cf->data[6] = txerr; cf->data[7] = rxerr; + netif_rx(skb); } } @@ -696,27 +696,38 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id) /* Check for protocol errors */ if (eflag & HI3110_ERR_PROTOCOL_MASK) { skb = alloc_can_err_skb(net, &cf); - if (!skb) - break; + if (skb) + cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; - cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; priv->can.can_stats.bus_error++; - priv->net->stats.rx_errors++; - if (eflag & HI3110_ERR_BITERR) - cf->data[2] |= CAN_ERR_PROT_BIT; - else if (eflag & HI3110_ERR_FRMERR) - cf->data[2] |= CAN_ERR_PROT_FORM; - else if (eflag & HI3110_ERR_STUFERR) - cf->data[2] |= CAN_ERR_PROT_STUFF; - else if (eflag & HI3110_ERR_CRCERR) - cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ; - else if (eflag & HI3110_ERR_ACKERR) - cf->data[3] |= CAN_ERR_PROT_LOC_ACK; - - cf->data[6] = hi3110_read(spi, HI3110_READ_TEC); - cf->data[7] = hi3110_read(spi, HI3110_READ_REC); + if (eflag & HI3110_ERR_BITERR) { + priv->net->stats.tx_errors++; + if (skb) + cf->data[2] |= CAN_ERR_PROT_BIT; + } else if (eflag & HI3110_ERR_FRMERR) { + priv->net->stats.rx_errors++; + if (skb) + cf->data[2] |= CAN_ERR_PROT_FORM; + } else if (eflag & HI3110_ERR_STUFERR) { + priv->net->stats.rx_errors++; + if (skb) + cf->data[2] |= CAN_ERR_PROT_STUFF; + } else if (eflag & HI3110_ERR_CRCERR) { + priv->net->stats.rx_errors++; + if (skb) + cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ; + } else if (eflag & HI3110_ERR_ACKERR) { + priv->net->stats.tx_errors++; + if (skb) + cf->data[3] |= CAN_ERR_PROT_LOC_ACK; + } + netdev_dbg(priv->net, "Bus Error\n"); - netif_rx(skb); + if (skb) { + cf->data[6] = hi3110_read(spi, HI3110_READ_TEC); + cf->data[7] = hi3110_read(spi, HI3110_READ_REC); + netif_rx(skb); + } } } diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c index d3ac865933fdf..e94321849fd7e 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c @@ -21,6 +21,11 @@ static inline bool mcp251xfd_tx_fifo_sta_empty(u32 fifo_sta) return fifo_sta & MCP251XFD_REG_FIFOSTA_TFERFFIF; } +static inline bool mcp251xfd_tx_fifo_sta_less_than_half_full(u32 fifo_sta) +{ + return fifo_sta & MCP251XFD_REG_FIFOSTA_TFHRFHIF; +} + static inline int mcp251xfd_tef_tail_get_from_chip(const struct mcp251xfd_priv *priv, u8 *tef_tail) @@ -147,7 +152,29 @@ mcp251xfd_get_tef_len(struct mcp251xfd_priv *priv, u8 *len_p) BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(len)); len = (chip_tx_tail << shift) - (tail << shift); - *len_p = len >> shift; + len >>= shift; + + /* According to mcp2518fd erratum DS80000789E 6. the FIFOCI + * bits of a FIFOSTA register, here the TX-FIFO tail index + * might be corrupted. + * + * However here it seems the bit indicating that the TX-FIFO + * is empty (MCP251XFD_REG_FIFOSTA_TFERFFIF) is not correct + * while the TX-FIFO tail index is. + * + * We assume the TX-FIFO is empty, i.e. all pending CAN frames + * haven been send, if: + * - Chip's head and tail index are equal (len == 0). + * - The TX-FIFO is less than half full. + * (The TX-FIFO empty case has already been checked at the + * beginning of this function.) + * - No free buffers in the TX ring. + */ + if (len == 0 && mcp251xfd_tx_fifo_sta_less_than_half_full(fifo_sta) && + mcp251xfd_get_tx_free(tx_ring) == 0) + len = tx_ring->obj_num; + + *len_p = len; return 0; } diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c index 360158c295d34..4311c1f0eafd8 100644 --- a/drivers/net/can/sun4i_can.c +++ b/drivers/net/can/sun4i_can.c @@ -579,11 +579,9 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status) /* bus error interrupt */ netdev_dbg(dev, "bus error interrupt\n"); priv->can.can_stats.bus_error++; - stats->rx_errors++; + ecc = readl(priv->base + SUN4I_REG_STA_ADDR); if (likely(skb)) { - ecc = readl(priv->base + SUN4I_REG_STA_ADDR); - cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; switch (ecc & SUN4I_STA_MASK_ERR) { @@ -601,9 +599,15 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status) >> 16; break; } - /* error occurred during transmission? */ - if ((ecc & SUN4I_STA_ERR_DIR) == 0) + } + + /* error occurred during transmission? */ + if ((ecc & SUN4I_STA_ERR_DIR) == 0) { + if (likely(skb)) cf->data[2] |= CAN_ERR_PROT_TX; + stats->tx_errors++; + } else { + stats->rx_errors++; } } if (isrc & SUN4I_INT_ERR_PASSIVE) { @@ -629,10 +633,10 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status) tx_state = txerr >= rxerr ? state : 0; rx_state = txerr <= rxerr ? state : 0; - if (likely(skb)) - can_change_state(dev, cf, tx_state, rx_state); - else - priv->can.state = state; + /* The skb allocation might fail, but can_change_state() + * handles cf == NULL. + */ + can_change_state(dev, cf, tx_state, rx_state); if (state == CAN_STATE_BUS_OFF) can_bus_off(dev); } diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index 050c0b49938a4..5355bac4dccbe 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c @@ -335,15 +335,14 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg) struct net_device_stats *stats = &dev->netdev->stats; skb = alloc_can_err_skb(dev->netdev, &cf); - if (skb == NULL) - return; if (msg->type == CPC_MSG_TYPE_CAN_STATE) { u8 state = msg->msg.can_state; if (state & SJA1000_SR_BS) { dev->can.state = CAN_STATE_BUS_OFF; - cf->can_id |= CAN_ERR_BUSOFF; + if (skb) + cf->can_id |= CAN_ERR_BUSOFF; dev->can.can_stats.bus_off++; can_bus_off(dev->netdev); @@ -361,44 +360,53 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg) /* bus error interrupt */ dev->can.can_stats.bus_error++; - stats->rx_errors++; - cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; + if (skb) { + cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; - switch (ecc & SJA1000_ECC_MASK) { - case SJA1000_ECC_BIT: - cf->data[2] |= CAN_ERR_PROT_BIT; - break; - case SJA1000_ECC_FORM: - cf->data[2] |= CAN_ERR_PROT_FORM; - break; - case SJA1000_ECC_STUFF: - cf->data[2] |= CAN_ERR_PROT_STUFF; - break; - default: - cf->data[3] = ecc & SJA1000_ECC_SEG; - break; + switch (ecc & SJA1000_ECC_MASK) { + case SJA1000_ECC_BIT: + cf->data[2] |= CAN_ERR_PROT_BIT; + break; + case SJA1000_ECC_FORM: + cf->data[2] |= CAN_ERR_PROT_FORM; + break; + case SJA1000_ECC_STUFF: + cf->data[2] |= CAN_ERR_PROT_STUFF; + break; + default: + cf->data[3] = ecc & SJA1000_ECC_SEG; + break; + } } /* Error occurred during transmission? */ - if ((ecc & SJA1000_ECC_DIR) == 0) - cf->data[2] |= CAN_ERR_PROT_TX; + if ((ecc & SJA1000_ECC_DIR) == 0) { + stats->tx_errors++; + if (skb) + cf->data[2] |= CAN_ERR_PROT_TX; + } else { + stats->rx_errors++; + } - if (dev->can.state == CAN_STATE_ERROR_WARNING || - dev->can.state == CAN_STATE_ERROR_PASSIVE) { + if (skb && (dev->can.state == CAN_STATE_ERROR_WARNING || + dev->can.state == CAN_STATE_ERROR_PASSIVE)) { cf->can_id |= CAN_ERR_CRTL; cf->data[1] = (txerr > rxerr) ? CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE; } } else if (msg->type == CPC_MSG_TYPE_OVERRUN) { - cf->can_id |= CAN_ERR_CRTL; - cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; + if (skb) { + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; + } stats->rx_over_errors++; stats->rx_errors++; } - netif_rx(skb); + if (skb) + netif_rx(skb); } /* diff --git a/drivers/net/can/usb/f81604.c b/drivers/net/can/usb/f81604.c index bc0c8903fe779..e0cfa1460b0b8 100644 --- a/drivers/net/can/usb/f81604.c +++ b/drivers/net/can/usb/f81604.c @@ -526,7 +526,6 @@ static void f81604_handle_can_bus_errors(struct f81604_port_priv *priv, netdev_dbg(netdev, "bus error interrupt\n"); priv->can.can_stats.bus_error++; - stats->rx_errors++; if (skb) { cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; @@ -548,10 +547,15 @@ static void f81604_handle_can_bus_errors(struct f81604_port_priv *priv, /* set error location */ cf->data[3] = data->ecc & F81604_SJA1000_ECC_SEG; + } - /* Error occurred during transmission? */ - if ((data->ecc & F81604_SJA1000_ECC_DIR) == 0) + /* Error occurred during transmission? */ + if ((data->ecc & F81604_SJA1000_ECC_DIR) == 0) { + stats->tx_errors++; + if (skb) cf->data[2] |= CAN_ERR_PROT_TX; + } else { + stats->rx_errors++; } set_bit(F81604_CLEAR_ECC, &priv->clear_flags); diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index bc86e9b329fd1..b6f4de375df75 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -43,9 +43,6 @@ #define USB_XYLANTA_SAINT3_VENDOR_ID 0x16d0 #define USB_XYLANTA_SAINT3_PRODUCT_ID 0x0f30 -#define GS_USB_ENDPOINT_IN 1 -#define GS_USB_ENDPOINT_OUT 2 - /* Timestamp 32 bit timer runs at 1 MHz (1 µs tick). Worker accounts * for timer overflow (will be after ~71 minutes) */ @@ -336,6 +333,9 @@ struct gs_usb { unsigned int hf_size_rx; u8 active_channels; + + unsigned int pipe_in; + unsigned int pipe_out; }; /* 'allocate' a tx context. @@ -687,7 +687,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb) resubmit_urb: usb_fill_bulk_urb(urb, parent->udev, - usb_rcvbulkpipe(parent->udev, GS_USB_ENDPOINT_IN), + parent->pipe_in, hf, dev->parent->hf_size_rx, gs_usb_receive_bulk_callback, parent); @@ -819,7 +819,7 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, } usb_fill_bulk_urb(urb, dev->udev, - usb_sndbulkpipe(dev->udev, GS_USB_ENDPOINT_OUT), + dev->parent->pipe_out, hf, dev->hf_size_tx, gs_usb_xmit_callback, txc); @@ -925,8 +925,7 @@ static int gs_can_open(struct net_device *netdev) /* fill, anchor, and submit rx urb */ usb_fill_bulk_urb(urb, dev->udev, - usb_rcvbulkpipe(dev->udev, - GS_USB_ENDPOINT_IN), + dev->parent->pipe_in, buf, dev->parent->hf_size_rx, gs_usb_receive_bulk_callback, parent); @@ -1413,6 +1412,7 @@ static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(intf); + struct usb_endpoint_descriptor *ep_in, *ep_out; struct gs_host_frame *hf; struct gs_usb *parent; struct gs_host_config hconf = { @@ -1422,6 +1422,13 @@ static int gs_usb_probe(struct usb_interface *intf, unsigned int icount, i; int rc; + rc = usb_find_common_endpoints(intf->cur_altsetting, + &ep_in, &ep_out, NULL, NULL); + if (rc) { + dev_err(&intf->dev, "Required endpoints not found\n"); + return rc; + } + /* send host config */ rc = usb_control_msg_send(udev, 0, GS_USB_BREQ_HOST_FORMAT, @@ -1466,6 +1473,10 @@ static int gs_usb_probe(struct usb_interface *intf, usb_set_intfdata(intf, parent); parent->udev = udev; + /* store the detected endpoints */ + parent->pipe_in = usb_rcvbulkpipe(parent->udev, ep_in->bEndpointAddress); + parent->pipe_out = usb_sndbulkpipe(parent->udev, ep_out->bEndpointAddress); + for (i = 0; i < icount; i++) { unsigned int hf_size_rx = 0; diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c index da7c72105fb6e..6d03a5314034a 100644 --- a/drivers/net/can/vxcan.c +++ b/drivers/net/can/vxcan.c @@ -204,6 +204,9 @@ static int vxcan_newlink(struct net *net, struct net_device *dev, } peer_net = rtnl_link_get_net(net, tbp); + if (IS_ERR(peer_net)) + return PTR_ERR(peer_net); + peer = rtnl_create_link(peer_net, ifname, name_assign_type, &vxcan_link_ops, tbp, extack); if (IS_ERR(peer)) { diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.c b/drivers/net/dsa/sja1105/sja1105_static_config.c index baba204ad62f6..3d790f8c6f4da 100644 --- a/drivers/net/dsa/sja1105/sja1105_static_config.c +++ b/drivers/net/dsa/sja1105/sja1105_static_config.c @@ -26,12 +26,8 @@ void sja1105_pack(void *buf, const u64 *val, int start, int end, size_t len) pr_err("Start bit (%d) expected to be larger than end (%d)\n", start, end); } else if (rc == -ERANGE) { - if ((start - end + 1) > 64) - pr_err("Field %d-%d too large for 64 bits!\n", - start, end); - else - pr_err("Cannot store %llx inside bits %d-%d (would truncate)\n", - *val, start, end); + pr_err("Field %d-%d too large for 64 bits!\n", + start, end); } dump_stack(); } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index f1f6bb328a55b..d87681d711067 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -1187,10 +1187,14 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd) } } - if (fltr->base.flags & BNXT_ACT_DROP) + if (fltr->base.flags & BNXT_ACT_DROP) { fs->ring_cookie = RX_CLS_FLOW_DISC; - else + } else if (fltr->base.flags & BNXT_ACT_RSS_CTX) { + fs->flow_type |= FLOW_RSS; + cmd->rss_context = fltr->base.fw_vnic_id; + } else { fs->ring_cookie = fltr->base.rxq; + } rc = 0; fltr_err: diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 35634c516e266..535969fa0fdbe 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -29,6 +29,9 @@ EXPORT_SYMBOL_GPL(enetc_port_mac_wr); static void enetc_change_preemptible_tcs(struct enetc_ndev_priv *priv, u8 preemptible_tcs) { + if (!(priv->si->hw_features & ENETC_SI_F_QBU)) + return; + priv->preemptible_tcs = preemptible_tcs; enetc_mm_commit_preemptible_tcs(priv); } @@ -1756,15 +1759,6 @@ void enetc_get_si_caps(struct enetc_si *si) rss = enetc_rd(hw, ENETC_SIRSSCAPR); si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(rss); } - - if (val & ENETC_SIPCAPR0_QBV) - si->hw_features |= ENETC_SI_F_QBV; - - if (val & ENETC_SIPCAPR0_QBU) - si->hw_features |= ENETC_SI_F_QBU; - - if (val & ENETC_SIPCAPR0_PSFP) - si->hw_features |= ENETC_SI_F_PSFP; } EXPORT_SYMBOL_GPL(enetc_get_si_caps); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h index 7c3285584f8a5..55ba949230ffd 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h +++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h @@ -23,10 +23,7 @@ #define ENETC_SICTR0 0x18 #define ENETC_SICTR1 0x1c #define ENETC_SIPCAPR0 0x20 -#define ENETC_SIPCAPR0_PSFP BIT(9) #define ENETC_SIPCAPR0_RSS BIT(8) -#define ENETC_SIPCAPR0_QBV BIT(4) -#define ENETC_SIPCAPR0_QBU BIT(3) #define ENETC_SIPCAPR0_RFS BIT(2) #define ENETC_SIPCAPR1 0x24 #define ENETC_SITGTGR 0x30 @@ -194,6 +191,9 @@ enum enetc_bdr_type {TX, RX}; #define ENETC_PCAPR0 0x0900 #define ENETC_PCAPR0_RXBDR(val) ((val) >> 24) #define ENETC_PCAPR0_TXBDR(val) (((val) >> 16) & 0xff) +#define ENETC_PCAPR0_PSFP BIT(9) +#define ENETC_PCAPR0_QBV BIT(4) +#define ENETC_PCAPR0_QBU BIT(3) #define ENETC_PCAPR1 0x0904 #define ENETC_PSICFGR0(n) (0x0940 + (n) * 0xc) /* n = SI index */ #define ENETC_PSICFGR0_SET_TXBDR(val) ((val) & 0xff) diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c index c47b4a743d93b..203862ec11142 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -409,6 +409,23 @@ static void enetc_port_assign_rfs_entries(struct enetc_si *si) enetc_port_wr(hw, ENETC_PRFSMR, ENETC_PRFSMR_RFSE); } +static void enetc_port_get_caps(struct enetc_si *si) +{ + struct enetc_hw *hw = &si->hw; + u32 val; + + val = enetc_port_rd(hw, ENETC_PCAPR0); + + if (val & ENETC_PCAPR0_QBV) + si->hw_features |= ENETC_SI_F_QBV; + + if (val & ENETC_PCAPR0_QBU) + si->hw_features |= ENETC_SI_F_QBU; + + if (val & ENETC_PCAPR0_PSFP) + si->hw_features |= ENETC_SI_F_PSFP; +} + static void enetc_port_si_configure(struct enetc_si *si) { struct enetc_pf *pf = enetc_si_priv(si); @@ -416,6 +433,8 @@ static void enetc_port_si_configure(struct enetc_si *si) int num_rings, i; u32 val; + enetc_port_get_caps(si); + val = enetc_port_rd(hw, ENETC_PCAPR0); num_rings = min(ENETC_PCAPR0_RXBDR(val), ENETC_PCAPR0_TXBDR(val)); diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c index 56d2f79fb7e32..12b6c11d9cf94 100644 --- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c +++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c @@ -409,20 +409,20 @@ static void set_tbipa(const u32 tbipa_val, struct platform_device *pdev, static int fsl_pq_mdio_probe(struct platform_device *pdev) { const struct fsl_pq_mdio_data *data; - struct device_node *np = pdev->dev.of_node; - struct resource res; - struct device_node *tbi; + struct device *dev = &pdev->dev; struct fsl_pq_mdio_priv *priv; + struct device_node *tbi; struct mii_bus *new_bus; - int err; + struct device_node *np; + struct resource *res; - data = device_get_match_data(&pdev->dev); + data = device_get_match_data(dev); if (!data) { - dev_err(&pdev->dev, "Failed to match device\n"); + dev_err(dev, "Failed to match device\n"); return -ENODEV; } - new_bus = mdiobus_alloc_size(sizeof(*priv)); + new_bus = devm_mdiobus_alloc_size(dev, sizeof(*priv)); if (!new_bus) return -ENOMEM; @@ -432,20 +432,24 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev) new_bus->write = &fsl_pq_mdio_write; new_bus->reset = &fsl_pq_mdio_reset; - err = of_address_to_resource(np, 0, &res); - if (err < 0) { - dev_err(&pdev->dev, "could not obtain address information\n"); - goto error; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(dev, "could not obtain address information\n"); + return -ENOMEM; } + np = dev->of_node; snprintf(new_bus->id, MII_BUS_ID_SIZE, "%pOFn@%llx", np, - (unsigned long long)res.start); + (unsigned long long)res->start); - priv->map = of_iomap(np, 0); - if (!priv->map) { - err = -ENOMEM; - goto error; - } + /* + * While tempting, this cannot be converted to + * devm_platform_get_and_ioremap_resource as some platforms overlap the + * memory regions with the ethernet node. + */ + priv->map = devm_of_iomap(dev, np, 0, NULL); + if (!priv->map) + return -ENOMEM; /* * Some device tree nodes represent only the MII registers, and @@ -453,21 +457,18 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev) * contains the offset of the MII registers inside the mapped register * space. */ - if (data->mii_offset > resource_size(&res)) { - dev_err(&pdev->dev, "invalid register map\n"); - err = -EINVAL; - goto error; + if (data->mii_offset > resource_size(res)) { + dev_err(dev, "invalid register map\n"); + return -EINVAL; } priv->regs = priv->map + data->mii_offset; - new_bus->parent = &pdev->dev; - platform_set_drvdata(pdev, new_bus); + new_bus->parent = dev; if (data->get_tbipa) { for_each_child_of_node(np, tbi) { if (of_node_is_type(tbi, "tbi-phy")) { - dev_dbg(&pdev->dev, "found TBI PHY node %pOFP\n", - tbi); + dev_dbg(dev, "found TBI PHY node %pOFP\n", tbi); break; } } @@ -475,49 +476,19 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev) if (tbi) { const u32 *prop = of_get_property(tbi, "reg", NULL); if (!prop) { - dev_err(&pdev->dev, + dev_err(dev, "missing 'reg' property in node %pOF\n", tbi); - err = -EBUSY; - goto error; + return -EBUSY; } - set_tbipa(*prop, pdev, - data->get_tbipa, priv->map, &res); + set_tbipa(*prop, pdev, data->get_tbipa, priv->map, res); } } if (data->ucc_configure) - data->ucc_configure(res.start, res.end); - - err = of_mdiobus_register(new_bus, np); - if (err) { - dev_err(&pdev->dev, "cannot register %s as MDIO bus\n", - new_bus->name); - goto error; - } - - return 0; - -error: - if (priv->map) - iounmap(priv->map); - - kfree(new_bus); - - return err; -} - - -static void fsl_pq_mdio_remove(struct platform_device *pdev) -{ - struct device *device = &pdev->dev; - struct mii_bus *bus = dev_get_drvdata(device); - struct fsl_pq_mdio_priv *priv = bus->priv; - - mdiobus_unregister(bus); + data->ucc_configure(res->start, res->end); - iounmap(priv->map); - mdiobus_free(bus); + return devm_mdiobus_register(dev, new_bus); } static struct platform_driver fsl_pq_mdio_driver = { @@ -526,7 +497,6 @@ static struct platform_driver fsl_pq_mdio_driver = { .of_match_table = fsl_pq_mdio_match, }, .probe = fsl_pq_mdio_probe, - .remove = fsl_pq_mdio_remove, }; module_platform_driver(fsl_pq_mdio_driver); diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 435138f4699d2..d610adc485f71 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -412,8 +412,8 @@ static int gfar_alloc_tx_queues(struct gfar_private *priv) int i; for (i = 0; i < priv->num_tx_queues; i++) { - priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q), - GFP_KERNEL); + priv->tx_queue[i] = devm_kzalloc( + priv->dev, sizeof(struct gfar_priv_tx_q), GFP_KERNEL); if (!priv->tx_queue[i]) return -ENOMEM; @@ -430,8 +430,8 @@ static int gfar_alloc_rx_queues(struct gfar_private *priv) int i; for (i = 0; i < priv->num_rx_queues; i++) { - priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q), - GFP_KERNEL); + priv->rx_queue[i] = devm_kzalloc( + priv->dev, sizeof(struct gfar_priv_rx_q), GFP_KERNEL); if (!priv->rx_queue[i]) return -ENOMEM; @@ -441,44 +441,6 @@ static int gfar_alloc_rx_queues(struct gfar_private *priv) return 0; } -static void gfar_free_tx_queues(struct gfar_private *priv) -{ - int i; - - for (i = 0; i < priv->num_tx_queues; i++) - kfree(priv->tx_queue[i]); -} - -static void gfar_free_rx_queues(struct gfar_private *priv) -{ - int i; - - for (i = 0; i < priv->num_rx_queues; i++) - kfree(priv->rx_queue[i]); -} - -static void unmap_group_regs(struct gfar_private *priv) -{ - int i; - - for (i = 0; i < MAXGROUPS; i++) - if (priv->gfargrp[i].regs) - iounmap(priv->gfargrp[i].regs); -} - -static void free_gfar_dev(struct gfar_private *priv) -{ - int i, j; - - for (i = 0; i < priv->num_grps; i++) - for (j = 0; j < GFAR_NUM_IRQS; j++) { - kfree(priv->gfargrp[i].irqinfo[j]); - priv->gfargrp[i].irqinfo[j] = NULL; - } - - free_netdev(priv->ndev); -} - static void disable_napi(struct gfar_private *priv) { int i; @@ -506,13 +468,13 @@ static int gfar_parse_group(struct device_node *np, int i; for (i = 0; i < GFAR_NUM_IRQS; i++) { - grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo), - GFP_KERNEL); + grp->irqinfo[i] = devm_kzalloc( + priv->dev, sizeof(struct gfar_irqinfo), GFP_KERNEL); if (!grp->irqinfo[i]) return -ENOMEM; } - grp->regs = of_iomap(np, 0); + grp->regs = devm_of_iomap(priv->dev, np, 0, NULL); if (!grp->regs) return -ENOMEM; @@ -681,13 +643,16 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) return -EINVAL; } - *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs); + *pdev = devm_alloc_etherdev_mqs(&ofdev->dev, sizeof(*priv), num_tx_qs, + num_rx_qs); dev = *pdev; if (NULL == dev) return -ENOMEM; priv = netdev_priv(dev); priv->ndev = dev; + priv->ofdev = ofdev; + priv->dev = &ofdev->dev; priv->mode = mode; @@ -697,16 +662,16 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) err = gfar_alloc_tx_queues(priv); if (err) - goto tx_alloc_failed; + return err; err = gfar_alloc_rx_queues(priv); if (err) - goto rx_alloc_failed; + return err; err = of_property_read_string(np, "model", &model); if (err) { pr_err("Device model property missing, aborting\n"); - goto rx_alloc_failed; + return err; } /* Init Rx queue filer rule set linked list */ @@ -726,13 +691,13 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) err = gfar_parse_group(child, priv, model); if (err) { of_node_put(child); - goto err_grp_init; + return err; } } } else { /* SQ_SG_MODE */ err = gfar_parse_group(np, priv, model); if (err) - goto err_grp_init; + return err; } if (of_property_read_bool(np, "bd-stash")) { @@ -755,7 +720,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) err = of_get_ethdev_address(np, dev); if (err == -EPROBE_DEFER) - goto err_grp_init; + return err; if (err) { eth_hw_addr_random(dev); dev_info(&ofdev->dev, "Using random MAC address: %pM\n", dev->dev_addr); @@ -803,7 +768,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) if (!priv->phy_node && of_phy_is_fixed_link(np)) { err = of_phy_register_fixed_link(np); if (err) - goto err_grp_init; + return err; priv->phy_node = of_node_get(np); } @@ -812,15 +777,6 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0); return 0; - -err_grp_init: - unmap_group_regs(priv); -rx_alloc_failed: - gfar_free_rx_queues(priv); -tx_alloc_failed: - gfar_free_tx_queues(priv); - free_gfar_dev(priv); - return err; } static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar, @@ -3306,7 +3262,7 @@ static int gfar_probe(struct platform_device *ofdev) /* Carrier starts down, phylib will bring it up */ netif_carrier_off(dev); - err = register_netdev(dev); + err = devm_register_netdev(&ofdev->dev, dev); if (err) { pr_err("%s: Cannot register net device, aborting\n", dev->name); @@ -3358,12 +3314,8 @@ static int gfar_probe(struct platform_device *ofdev) register_fail: if (of_phy_is_fixed_link(np)) of_phy_deregister_fixed_link(np); - unmap_group_regs(priv); - gfar_free_rx_queues(priv); - gfar_free_tx_queues(priv); of_node_put(priv->phy_node); of_node_put(priv->tbi_node); - free_gfar_dev(priv); return err; } @@ -3375,15 +3327,8 @@ static void gfar_remove(struct platform_device *ofdev) of_node_put(priv->phy_node); of_node_put(priv->tbi_node); - unregister_netdev(priv->ndev); - if (of_phy_is_fixed_link(np)) of_phy_deregister_fixed_link(np); - - unmap_group_regs(priv); - gfar_free_rx_queues(priv); - gfar_free_tx_queues(priv); - free_gfar_dev(priv); } #ifdef CONFIG_PM diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 20bc40eec487a..24ec9a4f1ffa8 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -292,6 +292,7 @@ config ICE select DIMLIB select LIBIE select NET_DEVLINK + select PACKING select PLDMFW select DPLL help diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 1489a8ceec51d..3bf05b135b355 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -12,6 +12,13 @@ #define ICE_AQC_TOPO_MAX_LEVEL_NUM 0x9 #define ICE_AQ_SET_MAC_FRAME_SIZE_MAX 9728 +#define ICE_RXQ_CTX_SIZE_DWORDS 8 +#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32)) +#define ICE_TXQ_CTX_SZ 22 + +typedef struct __packed { u8 buf[ICE_RXQ_CTX_SZ]; } ice_rxq_ctx_buf_t; +typedef struct __packed { u8 buf[ICE_TXQ_CTX_SZ]; } ice_txq_ctx_buf_t; + struct ice_aqc_generic { __le32 param0; __le32 param1; @@ -2084,10 +2091,10 @@ struct ice_aqc_add_txqs_perq { __le16 txq_id; u8 rsvd[2]; __le32 q_teid; - u8 txq_ctx[22]; + ice_txq_ctx_buf_t txq_ctx; u8 rsvd2[2]; struct ice_aqc_txsched_elem info; -}; +} __packed; /* The format of the command buffer for Add Tx LAN Queues (0x0C30) * is an array of the following structs. Please note that the length of diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index 82a9cd4ec7aec..b2af8e3586f76 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -454,6 +454,9 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring) /* Rx queue threshold in units of 64 */ rlan_ctx.lrxqthresh = 1; + /* Enable descriptor prefetch */ + rlan_ctx.prefena = 1; + /* PF acts as uplink for switchdev; set flex descriptor with src_vsi * metadata and flags to allow redirecting to PR netdev */ @@ -910,8 +913,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring, ice_setup_tx_ctx(ring, &tlan_ctx, pf_q); /* copy context contents into the qg_buf */ qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); - ice_set_ctx(hw, (u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, - ice_tlan_ctx_info); + ice_pack_txq_ctx(&tlan_ctx, &qg_buf->txqs[0].txq_ctx); /* init queue specific tail reg. It is referred as * transmit comm scheduler queue doorbell. diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index b22e71dc59d4e..6c6862beab6a9 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -6,6 +6,7 @@ #include "ice_adminq_cmd.h" #include "ice_flow.h" #include "ice_ptp_hw.h" +#include #define ICE_PF_RESET_WAIT_COUNT 300 #define ICE_MAX_NETLIST_SIZE 10 @@ -1357,39 +1358,31 @@ int ice_reset(struct ice_hw *hw, enum ice_reset_req req) } /** - * ice_copy_rxq_ctx_to_hw + * ice_copy_rxq_ctx_to_hw - Copy packed Rx queue context to HW registers * @hw: pointer to the hardware structure - * @ice_rxq_ctx: pointer to the rxq context + * @rxq_ctx: pointer to the packed Rx queue context * @rxq_index: the index of the Rx queue - * - * Copies rxq context from dense structure to HW register space */ -static int -ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index) +static void ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, + const ice_rxq_ctx_buf_t *rxq_ctx, + u32 rxq_index) { - u8 i; - - if (!ice_rxq_ctx) - return -EINVAL; - - if (rxq_index > QRX_CTRL_MAX_INDEX) - return -EINVAL; - /* Copy each dword separately to HW */ - for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { - wr32(hw, QRX_CONTEXT(i, rxq_index), - *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); + for (int i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { + u32 ctx = ((const u32 *)rxq_ctx)[i]; - ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, - *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); - } + wr32(hw, QRX_CONTEXT(i, rxq_index), ctx); - return 0; + ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, ctx); + } } +#define ICE_CTX_STORE(struct_name, struct_field, width, lsb) \ + PACKED_FIELD((lsb) + (width) - 1, (lsb), struct struct_name, struct_field) + /* LAN Rx Queue Context */ -static const struct ice_ctx_ele ice_rlan_ctx_info[] = { - /* Field Width LSB */ +static const struct packed_field_s ice_rlan_ctx_fields[] = { + /* Field Width LSB */ ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0), ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13), ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32), @@ -1410,35 +1403,50 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = { ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196), ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198), ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201), - { 0 } }; /** - * ice_write_rxq_ctx + * ice_pack_rxq_ctx - Pack Rx queue context into a HW buffer + * @ctx: the Rx queue context to pack + * @buf: the HW buffer to pack into + * + * Pack the Rx queue context from the CPU-friendly unpacked buffer into its + * bit-packed HW layout. + */ +static void ice_pack_rxq_ctx(const struct ice_rlan_ctx *ctx, + ice_rxq_ctx_buf_t *buf) +{ + pack_fields(buf, sizeof(*buf), ctx, ice_rlan_ctx_fields, + QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST); +} + +/** + * ice_write_rxq_ctx - Write Rx Queue context to hardware * @hw: pointer to the hardware structure - * @rlan_ctx: pointer to the rxq context + * @rlan_ctx: pointer to the unpacked Rx queue context * @rxq_index: the index of the Rx queue * - * Converts rxq context from sparse to dense structure and then writes - * it to HW register space and enables the hardware to prefetch descriptors - * instead of only fetching them on demand + * Pack the sparse Rx Queue context into dense hardware format and write it + * into the HW register space. + * + * Return: 0 on success, or -EINVAL if the Rx queue index is invalid. */ int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, u32 rxq_index) { - u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 }; + ice_rxq_ctx_buf_t buf = {}; - if (!rlan_ctx) + if (rxq_index > QRX_CTRL_MAX_INDEX) return -EINVAL; - rlan_ctx->prefena = 1; + ice_pack_rxq_ctx(rlan_ctx, &buf); + ice_copy_rxq_ctx_to_hw(hw, &buf, rxq_index); - ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info); - return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index); + return 0; } /* LAN Tx Queue Context */ -const struct ice_ctx_ele ice_tlan_ctx_info[] = { +static const struct packed_field_s ice_tlan_ctx_fields[] = { /* Field Width LSB */ ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0), ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57), @@ -1467,10 +1475,22 @@ const struct ice_ctx_ele ice_tlan_ctx_info[] = { ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165), ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166), ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168), - ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171), - { 0 } }; +/** + * ice_pack_txq_ctx - Pack Tx queue context into a HW buffer + * @ctx: the Tx queue context to pack + * @buf: the HW buffer to pack into + * + * Pack the Tx queue context from the CPU-friendly unpacked buffer into its + * bit-packed HW layout. + */ +void ice_pack_txq_ctx(const struct ice_tlan_ctx *ctx, ice_txq_ctx_buf_t *buf) +{ + pack_fields(buf, sizeof(*buf), ctx, ice_tlan_ctx_fields, + QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST); +} + /* Sideband Queue command wrappers */ /** @@ -4548,205 +4568,6 @@ ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps, /* End of FW Admin Queue command wrappers */ -/** - * ice_pack_ctx_byte - write a byte to a packed context structure - * @src_ctx: unpacked source context structure - * @dest_ctx: packed destination context data - * @ce_info: context element description - */ -static void ice_pack_ctx_byte(u8 *src_ctx, u8 *dest_ctx, - const struct ice_ctx_ele *ce_info) -{ - u8 src_byte, dest_byte, mask; - u8 *from, *dest; - u16 shift_width; - - /* copy from the next struct field */ - from = src_ctx + ce_info->offset; - - /* prepare the bits and mask */ - shift_width = ce_info->lsb % 8; - mask = GENMASK(ce_info->width - 1 + shift_width, shift_width); - - src_byte = *from; - src_byte <<= shift_width; - src_byte &= mask; - - /* get the current bits from the target bit string */ - dest = dest_ctx + (ce_info->lsb / 8); - - memcpy(&dest_byte, dest, sizeof(dest_byte)); - - dest_byte &= ~mask; /* get the bits not changing */ - dest_byte |= src_byte; /* add in the new bits */ - - /* put it all back */ - memcpy(dest, &dest_byte, sizeof(dest_byte)); -} - -/** - * ice_pack_ctx_word - write a word to a packed context structure - * @src_ctx: unpacked source context structure - * @dest_ctx: packed destination context data - * @ce_info: context element description - */ -static void ice_pack_ctx_word(u8 *src_ctx, u8 *dest_ctx, - const struct ice_ctx_ele *ce_info) -{ - u16 src_word, mask; - __le16 dest_word; - u8 *from, *dest; - u16 shift_width; - - /* copy from the next struct field */ - from = src_ctx + ce_info->offset; - - /* prepare the bits and mask */ - shift_width = ce_info->lsb % 8; - mask = GENMASK(ce_info->width - 1 + shift_width, shift_width); - - /* don't swizzle the bits until after the mask because the mask bits - * will be in a different bit position on big endian machines - */ - src_word = *(u16 *)from; - src_word <<= shift_width; - src_word &= mask; - - /* get the current bits from the target bit string */ - dest = dest_ctx + (ce_info->lsb / 8); - - memcpy(&dest_word, dest, sizeof(dest_word)); - - dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */ - dest_word |= cpu_to_le16(src_word); /* add in the new bits */ - - /* put it all back */ - memcpy(dest, &dest_word, sizeof(dest_word)); -} - -/** - * ice_pack_ctx_dword - write a dword to a packed context structure - * @src_ctx: unpacked source context structure - * @dest_ctx: packed destination context data - * @ce_info: context element description - */ -static void ice_pack_ctx_dword(u8 *src_ctx, u8 *dest_ctx, - const struct ice_ctx_ele *ce_info) -{ - u32 src_dword, mask; - __le32 dest_dword; - u8 *from, *dest; - u16 shift_width; - - /* copy from the next struct field */ - from = src_ctx + ce_info->offset; - - /* prepare the bits and mask */ - shift_width = ce_info->lsb % 8; - mask = GENMASK(ce_info->width - 1 + shift_width, shift_width); - - /* don't swizzle the bits until after the mask because the mask bits - * will be in a different bit position on big endian machines - */ - src_dword = *(u32 *)from; - src_dword <<= shift_width; - src_dword &= mask; - - /* get the current bits from the target bit string */ - dest = dest_ctx + (ce_info->lsb / 8); - - memcpy(&dest_dword, dest, sizeof(dest_dword)); - - dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */ - dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */ - - /* put it all back */ - memcpy(dest, &dest_dword, sizeof(dest_dword)); -} - -/** - * ice_pack_ctx_qword - write a qword to a packed context structure - * @src_ctx: unpacked source context structure - * @dest_ctx: packed destination context data - * @ce_info: context element description - */ -static void ice_pack_ctx_qword(u8 *src_ctx, u8 *dest_ctx, - const struct ice_ctx_ele *ce_info) -{ - u64 src_qword, mask; - __le64 dest_qword; - u8 *from, *dest; - u16 shift_width; - - /* copy from the next struct field */ - from = src_ctx + ce_info->offset; - - /* prepare the bits and mask */ - shift_width = ce_info->lsb % 8; - mask = GENMASK_ULL(ce_info->width - 1 + shift_width, shift_width); - - /* don't swizzle the bits until after the mask because the mask bits - * will be in a different bit position on big endian machines - */ - src_qword = *(u64 *)from; - src_qword <<= shift_width; - src_qword &= mask; - - /* get the current bits from the target bit string */ - dest = dest_ctx + (ce_info->lsb / 8); - - memcpy(&dest_qword, dest, sizeof(dest_qword)); - - dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */ - dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */ - - /* put it all back */ - memcpy(dest, &dest_qword, sizeof(dest_qword)); -} - -/** - * ice_set_ctx - set context bits in packed structure - * @hw: pointer to the hardware structure - * @src_ctx: pointer to a generic non-packed context structure - * @dest_ctx: pointer to memory for the packed structure - * @ce_info: List of Rx context elements - */ -int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, - const struct ice_ctx_ele *ce_info) -{ - int f; - - for (f = 0; ce_info[f].width; f++) { - /* We have to deal with each element of the FW response - * using the correct size so that we are correct regardless - * of the endianness of the machine. - */ - if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) { - ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n", - f, ce_info[f].width, ce_info[f].size_of); - continue; - } - switch (ce_info[f].size_of) { - case sizeof(u8): - ice_pack_ctx_byte(src_ctx, dest_ctx, &ce_info[f]); - break; - case sizeof(u16): - ice_pack_ctx_word(src_ctx, dest_ctx, &ce_info[f]); - break; - case sizeof(u32): - ice_pack_ctx_dword(src_ctx, dest_ctx, &ce_info[f]); - break; - case sizeof(u64): - ice_pack_ctx_qword(src_ctx, dest_ctx, &ce_info[f]); - break; - default: - return -EINVAL; - } - } - - return 0; -} - /** * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC * @hw: pointer to the HW struct diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index 27208a60cece5..a68bea3934e35 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -92,9 +92,8 @@ ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq); int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading); void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode); -extern const struct ice_ctx_ele ice_tlan_ctx_info[]; -int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, - const struct ice_ctx_ele *ce_info); + +void ice_pack_txq_ctx(const struct ice_tlan_ctx *ctx, ice_txq_ctx_buf_t *buf); extern struct mutex ice_global_cfg_lock_sw; diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h index 611577ebc29d8..1479b45738af1 100644 --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h @@ -371,29 +371,21 @@ enum ice_rx_flex_desc_status_error_1_bits { ICE_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */ }; -#define ICE_RXQ_CTX_SIZE_DWORDS 8 -#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32)) #define ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS 22 #define ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS 5 #define GLTCLAN_CQ_CNTX(i, CQ) (GLTCLAN_CQ_CNTX0(CQ) + ((i) * 0x0800)) -/* RLAN Rx queue context data - * - * The sizes of the variables may be larger than needed due to crossing byte - * boundaries. If we do not have the width of the variable set to the correct - * size then we could end up shifting bits off the top of the variable when the - * variable is at the top of a byte and crosses over into the next byte. - */ +/* RLAN Rx queue context data */ struct ice_rlan_ctx { u16 head; - u16 cpuid; /* bigger than needed, see above for reason */ + u8 cpuid; #define ICE_RLAN_BASE_S 7 u64 base; u16 qlen; #define ICE_RLAN_CTX_DBUF_S 7 - u16 dbuf; /* bigger than needed, see above for reason */ + u8 dbuf; #define ICE_RLAN_CTX_HBUF_S 6 - u16 hbuf; /* bigger than needed, see above for reason */ + u8 hbuf; u8 dtype; u8 dsize; u8 crcstrip; @@ -401,29 +393,15 @@ struct ice_rlan_ctx { u8 hsplit_0; u8 hsplit_1; u8 showiv; - u32 rxmax; /* bigger than needed, see above for reason */ + u16 rxmax; u8 tphrdesc_ena; u8 tphwdesc_ena; u8 tphdata_ena; u8 tphhead_ena; - u16 lrxqthresh; /* bigger than needed, see above for reason */ + u8 lrxqthresh; u8 prefena; /* NOTE: normally must be set to 1 at init */ }; -struct ice_ctx_ele { - u16 offset; - u16 size_of; - u16 width; - u16 lsb; -}; - -#define ICE_CTX_STORE(_struct, _ele, _width, _lsb) { \ - .offset = offsetof(struct _struct, _ele), \ - .size_of = sizeof_field(struct _struct, _ele), \ - .width = _width, \ - .lsb = _lsb, \ -} - /* for hsplit_0 field of Rx RLAN context */ enum ice_rlan_ctx_rx_hsplit_0 { ICE_RLAN_RX_HSPLIT_0_NO_SPLIT = 0, @@ -551,18 +529,12 @@ enum ice_tx_ctx_desc_eipt_offload { #define ICE_LAN_TXQ_MAX_QGRPS 127 #define ICE_LAN_TXQ_MAX_QDIS 1023 -/* Tx queue context data - * - * The sizes of the variables may be larger than needed due to crossing byte - * boundaries. If we do not have the width of the variable set to the correct - * size then we could end up shifting bits off the top of the variable when the - * variable is at the top of a byte and crosses over into the next byte. - */ +/* Tx queue context data */ struct ice_tlan_ctx { #define ICE_TLAN_CTX_BASE_S 7 u64 base; /* base is defined in 128-byte units */ u8 port_num; - u16 cgd_num; /* bigger than needed, see above for reason */ + u8 cgd_num; u8 pf_num; u16 vmvf_num; u8 vmvf_type; @@ -573,7 +545,7 @@ struct ice_tlan_ctx { u8 tsyn_ena; u8 internal_usage_flag; u8 alt_vlan; - u16 cpuid; /* bigger than needed, see above for reason */ + u8 cpuid; u8 wb_mode; u8 tphrd_desc; u8 tphrd; @@ -582,7 +554,7 @@ struct ice_tlan_ctx { u16 qnum_in_func; u8 itr_notification_mode; u8 adjust_prof_id; - u32 qlen; /* bigger than needed, see above for reason */ + u16 qlen; u8 quanta_prof_idx; u8 tso_ena; u16 tso_qnum; @@ -590,7 +562,6 @@ struct ice_tlan_ctx { u8 drop_ena; u8 cache_prof_idx; u8 pkt_shaper_prof_idx; - u8 int_q_state; /* width not needed - internal - DO NOT WRITE!!! */ }; #endif /* _ICE_LAN_TX_RX_H_ */ diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c index 549436efc2048..226a44823401c 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c @@ -1137,6 +1137,45 @@ static int octep_set_features(struct net_device *dev, netdev_features_t features return err; } +static int octep_get_vf_config(struct net_device *dev, int vf, + struct ifla_vf_info *ivi) +{ + struct octep_device *oct = netdev_priv(dev); + + ivi->vf = vf; + ether_addr_copy(ivi->mac, oct->vf_info[vf].mac_addr); + ivi->spoofchk = true; + ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; + ivi->trusted = oct->vf_info[vf].trusted; + ivi->max_tx_rate = 10000; + ivi->min_tx_rate = 0; + + return 0; +} + +static int octep_set_vf_mac(struct net_device *dev, int vf, u8 *mac) +{ + struct octep_device *oct = netdev_priv(dev); + int err; + + if (!is_valid_ether_addr(mac)) { + dev_err(&oct->pdev->dev, "Invalid MAC Address %pM\n", mac); + return -EADDRNOTAVAIL; + } + + dev_dbg(&oct->pdev->dev, "set vf-%d mac to %pM\n", vf, mac); + ether_addr_copy(oct->vf_info[vf].mac_addr, mac); + oct->vf_info[vf].flags |= OCTEON_PFVF_FLAG_MAC_SET_BY_PF; + + err = octep_ctrl_net_set_mac_addr(oct, vf, mac, true); + if (err) + dev_err(&oct->pdev->dev, + "Set VF%d MAC address failed via host control Mbox\n", + vf); + + return err; +} + static const struct net_device_ops octep_netdev_ops = { .ndo_open = octep_open, .ndo_stop = octep_stop, @@ -1146,6 +1185,8 @@ static const struct net_device_ops octep_netdev_ops = { .ndo_set_mac_address = octep_set_mac, .ndo_change_mtu = octep_change_mtu, .ndo_set_features = octep_set_features, + .ndo_get_vf_config = octep_get_vf_config, + .ndo_set_vf_mac = octep_set_vf_mac }; /** @@ -1560,9 +1601,12 @@ static void octep_remove(struct pci_dev *pdev) static int octep_sriov_enable(struct octep_device *oct, int num_vfs) { struct pci_dev *pdev = oct->pdev; - int err; + int i, err; CFG_GET_ACTIVE_VFS(oct->conf) = num_vfs; + for (i = 0; i < num_vfs; i++) + oct->vf_info[i].trusted = false; + err = pci_enable_sriov(pdev, num_vfs); if (err) { dev_warn(&pdev->dev, "Failed to enable SRIOV err=%d\n", err); diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h index fee59e0e0138f..1c39833ffcc0f 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h @@ -220,7 +220,9 @@ struct octep_iface_link_info { /* The Octeon VF device specific info data structure.*/ struct octep_pfvf_info { u8 mac_addr[ETH_ALEN]; + u32 flags; u32 mbox_version; + bool trusted; }; /* The Octeon device specific private data structure. diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c index e6eb98d70f3c4..3024f6428838d 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c @@ -156,12 +156,24 @@ static void octep_pfvf_set_mac_addr(struct octep_device *oct, u32 vf_id, { int err; + if ((oct->vf_info[vf_id].flags & OCTEON_PFVF_FLAG_MAC_SET_BY_PF) && + !oct->vf_info[vf_id].trusted) { + dev_err(&oct->pdev->dev, + "VF%d attempted to override administrative set MAC address\n", + vf_id); + rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK; + return; + } + err = octep_ctrl_net_set_mac_addr(oct, vf_id, cmd.s_set_mac.mac_addr, true); if (err) { rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK; - dev_err(&oct->pdev->dev, "Set VF MAC address failed via host control Mbox\n"); + dev_err(&oct->pdev->dev, "Set VF%d MAC address failed via host control Mbox\n", + vf_id); return; } + + ether_addr_copy(oct->vf_info[vf_id].mac_addr, cmd.s_set_mac.mac_addr); rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK; } @@ -171,10 +183,18 @@ static void octep_pfvf_get_mac_addr(struct octep_device *oct, u32 vf_id, { int err; + if (oct->vf_info[vf_id].flags & OCTEON_PFVF_FLAG_MAC_SET_BY_PF) { + dev_dbg(&oct->pdev->dev, "VF%d MAC address set by PF\n", vf_id); + ether_addr_copy(rsp->s_set_mac.mac_addr, + oct->vf_info[vf_id].mac_addr); + rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK; + return; + } err = octep_ctrl_net_get_mac_addr(oct, vf_id, rsp->s_set_mac.mac_addr); if (err) { rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK; - dev_err(&oct->pdev->dev, "Get VF MAC address failed via host control Mbox\n"); + dev_err(&oct->pdev->dev, "Get VF%d MAC address failed via host control Mbox\n", + vf_id); return; } rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK; diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h index 0dc6eead292a3..386a095a99bc4 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h @@ -8,8 +8,6 @@ #ifndef _OCTEP_PFVF_MBOX_H_ #define _OCTEP_PFVF_MBOX_H_ -/* VF flags */ -#define OCTEON_PFVF_FLAG_MAC_SET_BY_PF BIT_ULL(0) /* PF has set VF MAC address */ #define OCTEON_SDP_16K_HW_FRS 16380UL #define OCTEON_SDP_64K_HW_FRS 65531UL @@ -23,6 +21,10 @@ enum octep_pfvf_mbox_version { #define OCTEP_PFVF_MBOX_VERSION_CURRENT OCTEP_PFVF_MBOX_VERSION_V2 +/* VF flags */ +/* PF has set VF MAC address */ +#define OCTEON_PFVF_FLAG_MAC_SET_BY_PF BIT(0) + enum octep_pfvf_mbox_opcode { OCTEP_PFVF_MBOX_CMD_VERSION, OCTEP_PFVF_MBOX_CMD_SET_MTU, diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h index 5d84386ed22da..406c59100a35a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/common.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h @@ -159,6 +159,7 @@ enum nix_scheduler { #define SDP_HW_MIN_FRS 16 #define CN10K_LMAC_LINK_MAX_FRS 16380 /* 16k - FCS */ #define CN10K_LBK_LINK_MAX_FRS 65535 /* 64k */ +#define SDP_LINK_CREDIT 0x320202 /* NIX RX action operation*/ #define NIX_RX_ACTIONOP_DROP (0x0ull) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index 5d5a01dbbca11..a5d1e2bddd58d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -4672,6 +4672,9 @@ static void nix_link_config(struct rvu *rvu, int blkaddr, rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs); rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs); + /* Set SDP link credit */ + rvu_write64(rvu, blkaddr, NIX_AF_SDP_LINK_CREDIT, SDP_LINK_CREDIT); + /* Set default min/max packet lengths allowed on NIX Rx links. * * With HW reset minlen value of 60byte, HW will treat ARP pkts diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c index da69e454662a7..8a2444a8b7d33 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c @@ -1457,14 +1457,14 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu, target = req->vf; /* PF installing for its VF */ - if (!from_vf && req->vf && !from_rep_dev) { + else if (!from_vf && req->vf && !from_rep_dev) { target = (req->hdr.pcifunc & ~RVU_PFVF_FUNC_MASK) | req->vf; pf_set_vfs_mac = req->default_rule && (req->features & BIT_ULL(NPC_DMAC)); } /* Representor device installing for a representee */ - if (from_rep_dev && req->vf) + else if (from_rep_dev && req->vf) target = req->vf; else /* msg received from PF/VF */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c index 3d74109f82300..a379e8358f827 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c @@ -297,6 +297,8 @@ dr_domain_add_vport_cap(struct mlx5dr_domain *dmn, u16 vport) if (ret) { mlx5dr_dbg(dmn, "Couldn't insert new vport into xarray (%d)\n", ret); kvfree(vport_caps); + if (ret != -EBUSY) + return NULL; return ERR_PTR(ret); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 26a714bfad4ec..b45efc272fdbb 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -3301,7 +3301,9 @@ int qed_mcp_bist_nvm_get_num_images(struct qed_hwfn *p_hwfn, if (rc) return rc; - if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK)) + if (((rsp & FW_MSG_CODE_MASK) == FW_MSG_CODE_UNSUPPORTED)) + rc = -EOPNOTSUPP; + else if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK)) rc = -EINVAL; return rc; diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index ef9c02b000e48..a79fd2d66534d 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c @@ -909,17 +909,15 @@ qca_spi_probe(struct spi_device *spi) legacy_mode = of_property_read_bool(spi->dev.of_node, "qca,legacy-mode"); - if (qcaspi_clkspeed == 0) { - if (spi->max_speed_hz) - qcaspi_clkspeed = spi->max_speed_hz; - else - qcaspi_clkspeed = QCASPI_CLK_SPEED; - } - - if ((qcaspi_clkspeed < QCASPI_CLK_SPEED_MIN) || - (qcaspi_clkspeed > QCASPI_CLK_SPEED_MAX)) { - dev_err(&spi->dev, "Invalid clkspeed: %d\n", - qcaspi_clkspeed); + if (qcaspi_clkspeed) + spi->max_speed_hz = qcaspi_clkspeed; + else if (!spi->max_speed_hz) + spi->max_speed_hz = QCASPI_CLK_SPEED; + + if (spi->max_speed_hz < QCASPI_CLK_SPEED_MIN || + spi->max_speed_hz > QCASPI_CLK_SPEED_MAX) { + dev_err(&spi->dev, "Invalid clkspeed: %u\n", + spi->max_speed_hz); return -EINVAL; } @@ -944,14 +942,13 @@ qca_spi_probe(struct spi_device *spi) return -EINVAL; } - dev_info(&spi->dev, "ver=%s, clkspeed=%d, burst_len=%d, pluggable=%d\n", + dev_info(&spi->dev, "ver=%s, clkspeed=%u, burst_len=%d, pluggable=%d\n", QCASPI_DRV_VERSION, - qcaspi_clkspeed, + spi->max_speed_hz, qcaspi_burst_len, qcaspi_pluggable); spi->mode = SPI_MODE_3; - spi->max_speed_hz = qcaspi_clkspeed; if (spi_setup(spi) < 0) { dev_err(&spi->dev, "Unable to setup SPI device\n"); return -EFAULT; diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h index be4c9622618d8..8904aae41acae 100644 --- a/drivers/net/ethernet/realtek/r8169.h +++ b/drivers/net/ethernet/realtek/r8169.h @@ -23,7 +23,7 @@ enum mac_version { RTL_GIGA_MAC_VER_08, RTL_GIGA_MAC_VER_09, RTL_GIGA_MAC_VER_10, - RTL_GIGA_MAC_VER_11, + /* support for RTL_GIGA_MAC_VER_11 has been removed */ /* RTL_GIGA_MAC_VER_12 was handled the same as VER_17 */ /* RTL_GIGA_MAC_VER_13 was merged with VER_10 */ RTL_GIGA_MAC_VER_14, diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 739707a7b40fb..cc14cd540f748 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -104,7 +104,6 @@ static const struct { [RTL_GIGA_MAC_VER_08] = {"RTL8102e" }, [RTL_GIGA_MAC_VER_09] = {"RTL8102e/RTL8103e" }, [RTL_GIGA_MAC_VER_10] = {"RTL8101e/RTL8100e" }, - [RTL_GIGA_MAC_VER_11] = {"RTL8168b/8111b" }, [RTL_GIGA_MAC_VER_14] = {"RTL8401" }, [RTL_GIGA_MAC_VER_17] = {"RTL8168b/8111b" }, [RTL_GIGA_MAC_VER_18] = {"RTL8168cp/8111cp" }, @@ -623,7 +622,6 @@ struct rtl8169_tc_offsets { enum rtl_flag { RTL_FLAG_TASK_RESET_PENDING, - RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE, RTL_FLAG_TASK_TX_TIMEOUT, RTL_FLAG_MAX }; @@ -2336,7 +2334,7 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii) /* 8168B family. */ { 0x7c8, 0x380, RTL_GIGA_MAC_VER_17 }, - /* This one is very old and rare, let's see if anybody complains. + /* This one is very old and rare, support has been removed. * { 0x7c8, 0x300, RTL_GIGA_MAC_VER_11 }, */ @@ -3804,7 +3802,6 @@ static void rtl_hw_config(struct rtl8169_private *tp) [RTL_GIGA_MAC_VER_08] = rtl_hw_start_8102e_3, [RTL_GIGA_MAC_VER_09] = rtl_hw_start_8102e_2, [RTL_GIGA_MAC_VER_10] = NULL, - [RTL_GIGA_MAC_VER_11] = rtl_hw_start_8168b, [RTL_GIGA_MAC_VER_14] = rtl_hw_start_8401, [RTL_GIGA_MAC_VER_17] = rtl_hw_start_8168b, [RTL_GIGA_MAC_VER_18] = rtl_hw_start_8168cp_1, @@ -4680,12 +4677,6 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) if (status & LinkChg) phy_mac_interrupt(tp->phydev); - if (unlikely(status & RxFIFOOver && - tp->mac_version == RTL_GIGA_MAC_VER_11)) { - netif_stop_queue(tp->dev); - rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); - } - rtl_irq_disable(tp); napi_schedule(&tp->napi); out: @@ -4723,8 +4714,6 @@ static void rtl_task(struct work_struct *work) reset: rtl_reset_work(tp); netif_wake_queue(tp->dev); - } else if (test_and_clear_bit(RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE, tp->wk.flags)) { - rtl_reset_work(tp); } } @@ -5103,9 +5092,6 @@ static void rtl_set_irq_mask(struct rtl8169_private *tp) if (tp->mac_version <= RTL_GIGA_MAC_VER_06) tp->irq_mask |= SYSErr | RxFIFOOver; - else if (tp->mac_version == RTL_GIGA_MAC_VER_11) - /* special workaround needed */ - tp->irq_mask |= RxFIFOOver; } static int rtl_alloc_irq(struct rtl8169_private *tp) @@ -5300,7 +5286,6 @@ static int rtl_jumbo_max(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: return JUMBO_7K; /* RTL8168b */ - case RTL_GIGA_MAC_VER_11: case RTL_GIGA_MAC_VER_17: return JUMBO_4K; /* RTL8168c */ diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c index 5307c6ff4e252..b28b30390e84d 100644 --- a/drivers/net/ethernet/realtek/r8169_phy_config.c +++ b/drivers/net/ethernet/realtek/r8169_phy_config.c @@ -276,15 +276,6 @@ static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp, rtl_writephy_batch(phydev, phy_reg_init); } -static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp, - struct phy_device *phydev) -{ - phy_write(phydev, 0x1f, 0x0001); - phy_set_bits(phydev, 0x16, BIT(0)); - phy_write(phydev, 0x10, 0xf41b); - phy_write(phydev, 0x1f, 0x0000); -} - static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev) { @@ -1136,7 +1127,6 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev, [RTL_GIGA_MAC_VER_08] = rtl8102e_hw_phy_config, [RTL_GIGA_MAC_VER_09] = rtl8102e_hw_phy_config, [RTL_GIGA_MAC_VER_10] = NULL, - [RTL_GIGA_MAC_VER_11] = rtl8168bb_hw_phy_config, [RTL_GIGA_MAC_VER_14] = rtl8401_hw_phy_config, [RTL_GIGA_MAC_VER_17] = rtl8168bef_hw_phy_config, [RTL_GIGA_MAC_VER_18] = rtl8168cp_1_hw_phy_config, diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c index 8d18dae4d8fbf..89b5f9bd9d93a 100644 --- a/drivers/net/ethernet/renesas/rswitch.c +++ b/drivers/net/ethernet/renesas/rswitch.c @@ -1527,7 +1527,7 @@ static void rswitch_ether_port_deinit_all(struct rswitch_private *priv) { unsigned int i; - for (i = 0; i < RSWITCH_NUM_PORTS; i++) { + rswitch_for_each_enabled_port(priv, i) { phy_exit(priv->rdev[i]->serdes); rswitch_ether_port_deinit_one(priv->rdev[i]); } @@ -1681,8 +1681,9 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd if (dma_mapping_error(ndev->dev.parent, dma_addr_orig)) goto err_kfree; - gq->skbs[gq->cur] = skb; - gq->unmap_addrs[gq->cur] = dma_addr_orig; + /* Stored the skb at the last descriptor to avoid skb free before hardware completes send */ + gq->skbs[(gq->cur + nr_desc - 1) % gq->ring_size] = skb; + gq->unmap_addrs[(gq->cur + nr_desc - 1) % gq->ring_size] = dma_addr_orig; /* DT_FSTART should be set at last. So, this is reverse order. */ for (i = nr_desc; i-- > 0; ) { @@ -1702,6 +1703,7 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd return ret; err_unmap: + gq->skbs[(gq->cur + nr_desc - 1) % gq->ring_size] = NULL; dma_unmap_single(ndev->dev.parent, dma_addr_orig, skb->len, DMA_TO_DEVICE); err_kfree: @@ -1889,7 +1891,6 @@ static int rswitch_device_alloc(struct rswitch_private *priv, unsigned int index rdev->np_port = rswitch_get_port_node(rdev); rdev->disabled = !rdev->np_port; err = of_get_ethdev_address(rdev->np_port, ndev); - of_node_put(rdev->np_port); if (err) { if (is_valid_ether_addr(rdev->etha->mac_addr)) eth_hw_addr_set(ndev, rdev->etha->mac_addr); @@ -1901,9 +1902,6 @@ static int rswitch_device_alloc(struct rswitch_private *priv, unsigned int index if (err < 0) goto out_get_params; - if (rdev->priv->gwca.speed < rdev->etha->speed) - rdev->priv->gwca.speed = rdev->etha->speed; - err = rswitch_rxdmac_alloc(ndev); if (err < 0) goto out_rxdmac; @@ -1919,6 +1917,7 @@ static int rswitch_device_alloc(struct rswitch_private *priv, unsigned int index out_rxdmac: out_get_params: + of_node_put(rdev->np_port); netif_napi_del(&rdev->napi); free_netdev(ndev); @@ -1932,6 +1931,7 @@ static void rswitch_device_free(struct rswitch_private *priv, unsigned int index rswitch_txdmac_free(ndev); rswitch_rxdmac_free(ndev); + of_node_put(rdev->np_port); netif_napi_del(&rdev->napi); free_netdev(ndev); } diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h index 72e3ff596d318..303883369b94a 100644 --- a/drivers/net/ethernet/renesas/rswitch.h +++ b/drivers/net/ethernet/renesas/rswitch.h @@ -993,7 +993,6 @@ struct rswitch_gwca { DECLARE_BITMAP(used, RSWITCH_MAX_NUM_QUEUES); u32 tx_irq_bits[RSWITCH_NUM_IRQ_REGS]; u32 rx_irq_bits[RSWITCH_NUM_IRQ_REGS]; - int speed; }; #define NUM_QUEUES_PER_NDEV 2 diff --git a/drivers/net/mctp/mctp-i2c.c b/drivers/net/mctp/mctp-i2c.c index d2b3f5a591418..e3dcdeacc12c5 100644 --- a/drivers/net/mctp/mctp-i2c.c +++ b/drivers/net/mctp/mctp-i2c.c @@ -177,8 +177,7 @@ static struct mctp_i2c_client *mctp_i2c_new_client(struct i2c_client *client) return mcli; err: if (mcli) { - if (mcli->client) - i2c_unregister_device(mcli->client); + i2c_unregister_device(mcli->client); kfree(mcli); } return ERR_PTR(rc); diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index 4ea44a2f48f7b..636a56bb04cd5 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include @@ -90,6 +91,12 @@ static DEFINE_MUTEX(target_cleanup_list_lock); */ static struct console netconsole_ext; +struct netconsole_target_stats { + u64_stats_t xmit_drop_count; + u64_stats_t enomem_count; + struct u64_stats_sync syncp; +}; + /** * struct netconsole_target - Represents a configured netconsole target. * @list: Links this target into the target_list. @@ -97,6 +104,7 @@ static struct console netconsole_ext; * @userdata_group: Links to the userdata configfs hierarchy * @userdata_complete: Cached, formatted string of append * @userdata_length: String length of userdata_complete + * @stats: Packet send stats for the target. Used for debugging. * @enabled: On / off knob to enable / disable target. * Visible from userspace (read-write). * We maintain a strict 1:1 correspondence between this and @@ -124,6 +132,7 @@ struct netconsole_target { char userdata_complete[MAX_USERDATA_ENTRY_LENGTH * MAX_USERDATA_ITEMS]; size_t userdata_length; #endif + struct netconsole_target_stats stats; bool enabled; bool extended; bool release; @@ -262,6 +271,7 @@ static void netconsole_process_cleanups_core(void) * | remote_ip * | local_mac * | remote_mac + * | transmit_errors * | userdata/ * | / * | value @@ -371,6 +381,21 @@ static ssize_t remote_mac_show(struct config_item *item, char *buf) return sysfs_emit(buf, "%pM\n", to_target(item)->np.remote_mac); } +static ssize_t transmit_errors_show(struct config_item *item, char *buf) +{ + struct netconsole_target *nt = to_target(item); + u64 xmit_drop_count, enomem_count; + unsigned int start; + + do { + start = u64_stats_fetch_begin(&nt->stats.syncp); + xmit_drop_count = u64_stats_read(&nt->stats.xmit_drop_count); + enomem_count = u64_stats_read(&nt->stats.enomem_count); + } while (u64_stats_fetch_retry(&nt->stats.syncp, start)); + + return sysfs_emit(buf, "%llu\n", xmit_drop_count + enomem_count); +} + /* * This one is special -- targets created through the configfs interface * are not enabled (and the corresponding netpoll activated) by default. @@ -842,6 +867,7 @@ CONFIGFS_ATTR(, remote_ip); CONFIGFS_ATTR_RO(, local_mac); CONFIGFS_ATTR(, remote_mac); CONFIGFS_ATTR(, release); +CONFIGFS_ATTR_RO(, transmit_errors); static struct configfs_attribute *netconsole_target_attrs[] = { &attr_enabled, @@ -854,6 +880,7 @@ static struct configfs_attribute *netconsole_target_attrs[] = { &attr_remote_ip, &attr_local_mac, &attr_remote_mac, + &attr_transmit_errors, NULL, }; @@ -1058,6 +1085,34 @@ static struct notifier_block netconsole_netdev_notifier = { .notifier_call = netconsole_netdev_event, }; +/** + * send_udp - Wrapper for netpoll_send_udp that counts errors + * @nt: target to send message to + * @msg: message to send + * @len: length of message + * + * Calls netpoll_send_udp and classifies the return value. If an error + * occurred it increments statistics in nt->stats accordingly. + * Only calls netpoll_send_udp if CONFIG_NETCONSOLE_DYNAMIC is disabled. + */ +static void send_udp(struct netconsole_target *nt, + const char *msg, int len) +{ + int result = netpoll_send_udp(&nt->np, msg, len); + + if (IS_ENABLED(CONFIG_NETCONSOLE_DYNAMIC)) { + if (result == NET_XMIT_DROP) { + u64_stats_update_begin(&nt->stats.syncp); + u64_stats_inc(&nt->stats.xmit_drop_count); + u64_stats_update_end(&nt->stats.syncp); + } else if (result == -ENOMEM) { + u64_stats_update_begin(&nt->stats.syncp); + u64_stats_inc(&nt->stats.enomem_count); + u64_stats_update_end(&nt->stats.syncp); + } + } +} + static void send_msg_no_fragmentation(struct netconsole_target *nt, const char *msg, int msg_len, @@ -1085,7 +1140,7 @@ static void send_msg_no_fragmentation(struct netconsole_target *nt, MAX_PRINT_CHUNK - msg_len, "%s", userdata); - netpoll_send_udp(&nt->np, buf, msg_len); + send_udp(nt, buf, msg_len); } static void append_release(char *buf) @@ -1178,7 +1233,7 @@ static void send_fragmented_body(struct netconsole_target *nt, char *buf, this_offset += this_chunk; } - netpoll_send_udp(&nt->np, buf, this_header + this_offset); + send_udp(nt, buf, this_header + this_offset); offset += this_offset; } } @@ -1288,7 +1343,7 @@ static void write_msg(struct console *con, const char *msg, unsigned int len) tmp = msg; for (left = len; left;) { frag = min(left, MAX_PRINT_CHUNK); - netpoll_send_udp(&nt->np, tmp, frag); + send_udp(nt, tmp, frag); tmp += frag; left -= frag; } diff --git a/drivers/net/netkit.c b/drivers/net/netkit.c index bb07725d1c72b..44fe99a82ac39 100644 --- a/drivers/net/netkit.c +++ b/drivers/net/netkit.c @@ -386,6 +386,9 @@ static int netkit_new_link(struct net *src_net, struct net_device *dev, return -EOPNOTSUPP; net = rtnl_link_get_net(src_net, tbp); + if (IS_ERR(net)) + return PTR_ERR(net); + peer = rtnl_create_link(net, ifname, ifname_assign_type, &netkit_link_ops, tbp, extack); if (IS_ERR(peer)) { diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c index d3273bc0da4a1..691969a4910f2 100644 --- a/drivers/net/phy/microchip.c +++ b/drivers/net/phy/microchip.c @@ -351,6 +351,22 @@ static int lan88xx_config_aneg(struct phy_device *phydev) static void lan88xx_link_change_notify(struct phy_device *phydev) { int temp; + int ret; + + /* Reset PHY to ensure MII_LPA provides up-to-date information. This + * issue is reproducible only after parallel detection, as described + * in IEEE 802.3-2022, Section 28.2.3.1 ("Parallel detection function"), + * where the link partner does not support auto-negotiation. + */ + if (phydev->state == PHY_NOLINK) { + ret = phy_init_hw(phydev); + if (ret < 0) + goto link_change_notify_failed; + + ret = _phy_start_aneg(phydev); + if (ret < 0) + goto link_change_notify_failed; + } /* At forced 100 F/H mode, chip may fail to set mode correctly * when cable is switched between long(~50+m) and short one. @@ -377,6 +393,11 @@ static void lan88xx_link_change_notify(struct phy_device *phydev) temp |= LAN88XX_INT_MASK_MDINTPIN_EN_; phy_write(phydev, LAN88XX_INT_MASK, temp); } + + return; + +link_change_notify_failed: + phydev_err(phydev, "Link change process failed %pe\n", ERR_PTR(ret)); } /** diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 0d6d0d749d440..3a42a982c638a 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -1801,6 +1801,9 @@ static int veth_newlink(struct net *src_net, struct net_device *dev, } net = rtnl_link_get_net(src_net, tbp); + if (IS_ERR(net)) + return PTR_ERR(net); + peer = rtnl_create_link(net, ifname, name_assign_type, &veth_link_ops, tbp, extack); if (IS_ERR(peer)) { diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 64c87bb48a41c..27d58fb47b075 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -3054,7 +3054,6 @@ static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index) if (err < 0) goto err_xdp_reg_mem_model; - netdev_tx_reset_queue(netdev_get_tx_queue(vi->dev, qp_index)); virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi); virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi); @@ -3327,12 +3326,13 @@ static int virtnet_rx_resize(struct virtnet_info *vi, struct receive_queue *rq, u32 ring_num) { int err, qindex; + bool flushed; qindex = rq - vi->rq; virtnet_rx_pause(vi, rq); - err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf); + err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf, &flushed); if (err) netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err); @@ -3390,14 +3390,17 @@ static int virtnet_tx_resize(struct virtnet_info *vi, struct send_queue *sq, u32 ring_num) { int qindex, err; + bool flushed; qindex = sq - vi->sq; virtnet_tx_pause(vi, sq); - err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf); + err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf, &flushed); if (err) netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err); + if (flushed) + netdev_tx_reset_queue(netdev_get_tx_queue(vi->dev, qindex)); virtnet_tx_resume(vi, sq); @@ -5692,6 +5695,7 @@ static int virtnet_rq_bind_xsk_pool(struct virtnet_info *vi, struct receive_queu struct xsk_buff_pool *pool) { int err, qindex; + bool flushed; qindex = rq - vi->rq; @@ -5710,7 +5714,7 @@ static int virtnet_rq_bind_xsk_pool(struct virtnet_info *vi, struct receive_queu virtnet_rx_pause(vi, rq); - err = virtqueue_reset(rq->vq, virtnet_rq_unmap_free_buf); + err = virtqueue_reset(rq->vq, virtnet_rq_unmap_free_buf, &flushed); if (err) { netdev_err(vi->dev, "reset rx fail: rx queue index: %d err: %d\n", qindex, err); @@ -5734,16 +5738,19 @@ static int virtnet_sq_bind_xsk_pool(struct virtnet_info *vi, struct xsk_buff_pool *pool) { int err, qindex; + bool flushed; qindex = sq - vi->sq; virtnet_tx_pause(vi, sq); - err = virtqueue_reset(sq->vq, virtnet_sq_free_unused_buf); + err = virtqueue_reset(sq->vq, virtnet_sq_free_unused_buf, &flushed); if (err) { netdev_err(vi->dev, "reset tx fail: tx queue index: %d err: %d\n", qindex, err); pool = NULL; } + if (flushed) + netdev_tx_reset_queue(netdev_get_tx_queue(vi->dev, qindex)); sq->xsk_pool = pool; @@ -6243,6 +6250,7 @@ static void free_unused_bufs(struct virtnet_info *vi) struct virtqueue *vq = vi->sq[i].vq; while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) virtnet_sq_free_unused_buf(vq, buf); + netdev_tx_reset_queue(netdev_get_tx_queue(vi->dev, i)); cond_resched(); } diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c index c56cd0f63909a..77a36e7bddd54 100644 --- a/drivers/ptp/ptp_clock.c +++ b/drivers/ptp/ptp_clock.c @@ -150,7 +150,8 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx) if (ppb > ops->max_adj || ppb < -ops->max_adj) return -ERANGE; err = ops->adjfine(ops, tx->freq); - ptp->dialed_frequency = tx->freq; + if (!err) + ptp->dialed_frequency = tx->freq; } else if (tx->modes & ADJ_OFFSET) { if (ops->adjphase) { s32 max_phase_adj = ops->getmaxphase(ops); diff --git a/drivers/ptp/ptp_kvm_x86.c b/drivers/ptp/ptp_kvm_x86.c index 617c8d6706d3d..6cea4fe39bcfe 100644 --- a/drivers/ptp/ptp_kvm_x86.c +++ b/drivers/ptp/ptp_kvm_x86.c @@ -26,7 +26,7 @@ int kvm_arch_ptp_init(void) long ret; if (!kvm_para_available()) - return -ENODEV; + return -EOPNOTSUPP; if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) { p = alloc_page(GFP_KERNEL | __GFP_ZERO); @@ -46,14 +46,14 @@ int kvm_arch_ptp_init(void) clock_pair_gpa = slow_virt_to_phys(clock_pair); if (!pvclock_get_pvti_cpu0_va()) { - ret = -ENODEV; + ret = -EOPNOTSUPP; goto err; } ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, clock_pair_gpa, KVM_CLOCK_PAIRING_WALLCLOCK); if (ret == -KVM_ENOSYS) { - ret = -ENODEV; + ret = -EOPNOTSUPP; goto err; } diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 82a7d2cbc7045..b522ef798946e 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -2772,6 +2772,7 @@ EXPORT_SYMBOL_GPL(vring_create_virtqueue_dma); * @_vq: the struct virtqueue we're talking about. * @num: new ring num * @recycle: callback to recycle unused buffers + * @flushed: whether or not unused buffers are all flushed * * When it is really necessary to create a new vring, it will set the current vq * into the reset state. Then call the passed callback to recycle the buffer @@ -2792,11 +2793,14 @@ EXPORT_SYMBOL_GPL(vring_create_virtqueue_dma); * */ int virtqueue_resize(struct virtqueue *_vq, u32 num, - void (*recycle)(struct virtqueue *vq, void *buf)) + void (*recycle)(struct virtqueue *vq, void *buf), + bool *flushed) { struct vring_virtqueue *vq = to_vvq(_vq); int err; + *flushed = false; + if (num > vq->vq.num_max) return -E2BIG; @@ -2809,6 +2813,7 @@ int virtqueue_resize(struct virtqueue *_vq, u32 num, err = virtqueue_disable_and_recycle(_vq, recycle); if (err) return err; + *flushed = true; if (vq->packed_ring) err = virtqueue_resize_packed(_vq, num); @@ -2823,6 +2828,7 @@ EXPORT_SYMBOL_GPL(virtqueue_resize); * virtqueue_reset - detach and recycle all unused buffers * @_vq: the struct virtqueue we're talking about. * @recycle: callback to recycle unused buffers + * @flushed: whether or not unused buffers are all flushed * * Caller must ensure we don't call this with other virtqueue operations * at the same time (except where noted). @@ -2834,14 +2840,17 @@ EXPORT_SYMBOL_GPL(virtqueue_resize); * -EPERM: Operation not permitted */ int virtqueue_reset(struct virtqueue *_vq, - void (*recycle)(struct virtqueue *vq, void *buf)) + void (*recycle)(struct virtqueue *vq, void *buf), + bool *flushed) { struct vring_virtqueue *vq = to_vvq(_vq); int err; + *flushed = false; err = virtqueue_disable_and_recycle(_vq, recycle); if (err) return err; + *flushed = true; if (vq->packed_ring) virtqueue_reinit_packed(vq); diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c index e0d34e4e9076e..0967147e03c65 100644 --- a/fs/9p/vfs_dir.c +++ b/fs/9p/vfs_dir.c @@ -74,6 +74,14 @@ static struct p9_rdir *v9fs_alloc_rdir_buf(struct file *filp, int buflen) return fid->rdir; } +static void v9fs_free_rdir_buf(struct file *filp) +{ + struct p9_fid *fid = filp->private_data; + + kfree(fid->rdir); + fid->rdir = NULL; +} + /** * v9fs_dir_readdir - iterate through a directory * @file: opened file structure @@ -129,8 +137,10 @@ static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx) over = !dir_emit(ctx, st.name, strlen(st.name), QID2INO(&st.qid), dt_type(&st)); p9stat_free(&st); - if (over) + if (over) { + v9fs_free_rdir_buf(file); return 0; + } rdir->head += err; ctx->pos += err; @@ -185,8 +195,10 @@ static int v9fs_dir_readdir_dotl(struct file *file, struct dir_context *ctx) if (!dir_emit(ctx, curdirent.d_name, strlen(curdirent.d_name), QID2INO(&curdirent.qid), - curdirent.d_type)) + curdirent.d_type)) { + v9fs_free_rdir_buf(file); return 0; + } ctx->pos = curdirent.d_off; rdir->head += err; diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index b34301650c479..f91e50a76efd4 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -57,7 +57,7 @@ static inline void netpoll_poll_disable(struct net_device *dev) { return; } static inline void netpoll_poll_enable(struct net_device *dev) { return; } #endif -void netpoll_send_udp(struct netpoll *np, const char *msg, int len); +int netpoll_send_udp(struct netpoll *np, const char *msg, int len); void netpoll_print_options(struct netpoll *np); int netpoll_parse_options(struct netpoll *np, char *opt); int __netpoll_setup(struct netpoll *np, struct net_device *ndev); diff --git a/include/linux/packing.h b/include/linux/packing.h index 5d36dcd06f604..f9bfb20060300 100644 --- a/include/linux/packing.h +++ b/include/linux/packing.h @@ -7,6 +7,7 @@ #include #include +#include #define QUIRK_MSB_ON_THE_RIGHT BIT(0) #define QUIRK_LITTLE_ENDIAN BIT(1) @@ -26,4 +27,40 @@ int pack(void *pbuf, u64 uval, size_t startbit, size_t endbit, size_t pbuflen, int unpack(const void *pbuf, u64 *uval, size_t startbit, size_t endbit, size_t pbuflen, u8 quirks); +void pack_fields_s(void *pbuf, size_t pbuflen, const void *ustruct, + const struct packed_field_s *fields, size_t num_fields, + u8 quirks); + +void pack_fields_m(void *pbuf, size_t pbuflen, const void *ustruct, + const struct packed_field_m *fields, size_t num_fields, + u8 quirks); + +void unpack_fields_s(const void *pbuf, size_t pbuflen, void *ustruct, + const struct packed_field_s *fields, size_t num_fields, + u8 quirks); + +void unpack_fields_m(const void *pbuf, size_t pbuflen, void *ustruct, + const struct packed_field_m *fields, size_t num_fields, + u8 quirks); + +#define pack_fields(pbuf, pbuflen, ustruct, fields, quirks) \ + ({ \ + CHECK_PACKED_FIELDS(fields); \ + CHECK_PACKED_FIELDS_SIZE((fields), (pbuflen)); \ + _Generic((fields), \ + const struct packed_field_s * : pack_fields_s, \ + const struct packed_field_m * : pack_fields_m \ + )((pbuf), (pbuflen), (ustruct), (fields), ARRAY_SIZE(fields), (quirks)); \ + }) + +#define unpack_fields(pbuf, pbuflen, ustruct, fields, quirks) \ + ({ \ + CHECK_PACKED_FIELDS(fields); \ + CHECK_PACKED_FIELDS_SIZE((fields), (pbuflen)); \ + _Generic((fields), \ + const struct packed_field_s * : unpack_fields_s, \ + const struct packed_field_m * : unpack_fields_m \ + )((pbuf), (pbuflen), (ustruct), (fields), ARRAY_SIZE(fields), (quirks)); \ + }) + #endif diff --git a/include/linux/packing_types.h b/include/linux/packing_types.h new file mode 100644 index 0000000000000..3bcc46850a596 --- /dev/null +++ b/include/linux/packing_types.h @@ -0,0 +1,2831 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2024, Intel Corporation + * Copyright (c) 2024, Vladimir Oltean + */ +#ifndef _LINUX_PACKING_TYPES_H +#define _LINUX_PACKING_TYPES_H + +#include + +/* If you add another packed field type, please update + * scripts/mod/packed_fields.c to enable compile time sanity checks. + */ + +#define GEN_PACKED_FIELD_MEMBERS(__type) \ + __type startbit; \ + __type endbit; \ + __type offset; \ + __type size + +/* Small packed field. Use with bit offsets < 256, buffers < 32B and + * unpacked structures < 256B. + */ +struct packed_field_s { + GEN_PACKED_FIELD_MEMBERS(u8); +}; + +/* Medium packed field. Use with bit offsets < 65536, buffers < 8KB and + * unpacked structures < 64KB. + */ +struct packed_field_m { + GEN_PACKED_FIELD_MEMBERS(u16); +}; + +#define PACKED_FIELD(start, end, struct_name, struct_field) \ +{ \ + (start), \ + (end), \ + offsetof(struct_name, struct_field), \ + sizeof_field(struct_name, struct_field), \ +} + +#define CHECK_PACKED_FIELD(field) ({ \ + typeof(field) __f = (field); \ + BUILD_BUG_ON(__f.startbit < __f.endbit); \ + BUILD_BUG_ON(__f.startbit - __f.endbit >= BITS_PER_BYTE * __f.size); \ + BUILD_BUG_ON(__f.size != 1 && __f.size != 2 && \ + __f.size != 4 && __f.size != 8); \ +}) + + +#define CHECK_PACKED_FIELD_OVERLAP(ascending, field1, field2) ({ \ + typeof(field1) _f1 = (field1); typeof(field2) _f2 = (field2); \ + const bool _a = (ascending); \ + BUILD_BUG_ON(_a && _f1.startbit >= _f2.startbit); \ + BUILD_BUG_ON(!_a && _f1.startbit <= _f2.startbit); \ + BUILD_BUG_ON(max(_f1.endbit, _f2.endbit) <= \ + min(_f1.startbit, _f2.startbit)); \ +}) + +#define CHECK_PACKED_FIELDS_SIZE(fields, pbuflen) ({ \ + typeof(&(fields)[0]) _f = (fields); \ + typeof(pbuflen) _len = (pbuflen); \ + const size_t num_fields = ARRAY_SIZE(fields); \ + BUILD_BUG_ON(!__builtin_constant_p(_len)); \ + BUILD_BUG_ON(_f[0].startbit >= BITS_PER_BYTE * _len); \ + BUILD_BUG_ON(_f[num_fields - 1].startbit >= BITS_PER_BYTE * _len); \ +}) + +/* Do not hand-edit the following packed field check macros! + * + * They are generated using scripts/gen_packed_field_checks.c, which may be + * built via "make scripts_gen_packed_field_checks". If larger macro sizes are + * needed in the future, please use this program to re-generate the macros and + * insert them here. + */ + +#define CHECK_PACKED_FIELDS_1(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 1); \ + CHECK_PACKED_FIELD(_f[0]); }) + +#define CHECK_PACKED_FIELDS_2(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 2); \ + CHECK_PACKED_FIELD(_f[0]); \ + CHECK_PACKED_FIELD(_f[1]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[0], _f[1]); }) + +#define CHECK_PACKED_FIELDS_3(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 3); \ + CHECK_PACKED_FIELD(_f[0]); \ + CHECK_PACKED_FIELD(_f[1]); \ + CHECK_PACKED_FIELD(_f[2]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[0], _f[1]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[1], _f[2]); }) + +#define CHECK_PACKED_FIELDS_4(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 4); \ + CHECK_PACKED_FIELD(_f[0]); \ + CHECK_PACKED_FIELD(_f[1]); \ + CHECK_PACKED_FIELD(_f[2]); \ + CHECK_PACKED_FIELD(_f[3]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[0], _f[1]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[1], _f[2]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[2], _f[3]); }) + +#define CHECK_PACKED_FIELDS_5(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 5); \ + CHECK_PACKED_FIELD(_f[0]); \ + CHECK_PACKED_FIELD(_f[1]); \ + CHECK_PACKED_FIELD(_f[2]); \ + CHECK_PACKED_FIELD(_f[3]); \ + CHECK_PACKED_FIELD(_f[4]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[0], _f[1]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[1], _f[2]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[2], _f[3]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[3], _f[4]); }) + +#define CHECK_PACKED_FIELDS_6(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 6); \ + CHECK_PACKED_FIELD(_f[0]); \ + CHECK_PACKED_FIELD(_f[1]); \ + CHECK_PACKED_FIELD(_f[2]); \ + CHECK_PACKED_FIELD(_f[3]); \ + CHECK_PACKED_FIELD(_f[4]); \ + CHECK_PACKED_FIELD(_f[5]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[0], _f[1]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[1], _f[2]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[2], _f[3]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[3], _f[4]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[4], _f[5]); }) + +#define CHECK_PACKED_FIELDS_7(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 7); \ + CHECK_PACKED_FIELD(_f[0]); \ + CHECK_PACKED_FIELD(_f[1]); \ + CHECK_PACKED_FIELD(_f[2]); \ + CHECK_PACKED_FIELD(_f[3]); \ + CHECK_PACKED_FIELD(_f[4]); \ + CHECK_PACKED_FIELD(_f[5]); \ + CHECK_PACKED_FIELD(_f[6]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[0], _f[1]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[1], _f[2]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[2], _f[3]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[3], _f[4]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[4], _f[5]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[5], _f[6]); }) + +#define CHECK_PACKED_FIELDS_8(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 8); \ + CHECK_PACKED_FIELD(_f[0]); \ + CHECK_PACKED_FIELD(_f[1]); \ + CHECK_PACKED_FIELD(_f[2]); \ + CHECK_PACKED_FIELD(_f[3]); \ + CHECK_PACKED_FIELD(_f[4]); \ + CHECK_PACKED_FIELD(_f[5]); \ + CHECK_PACKED_FIELD(_f[6]); \ + CHECK_PACKED_FIELD(_f[7]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[0], _f[1]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[1], _f[2]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[2], _f[3]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[3], _f[4]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[4], _f[5]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[5], _f[6]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[6], _f[7]); }) + +#define CHECK_PACKED_FIELDS_9(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 9); \ + CHECK_PACKED_FIELD(_f[0]); \ + CHECK_PACKED_FIELD(_f[1]); \ + CHECK_PACKED_FIELD(_f[2]); \ + CHECK_PACKED_FIELD(_f[3]); \ + CHECK_PACKED_FIELD(_f[4]); \ + CHECK_PACKED_FIELD(_f[5]); \ + CHECK_PACKED_FIELD(_f[6]); \ + CHECK_PACKED_FIELD(_f[7]); \ + CHECK_PACKED_FIELD(_f[8]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[0], _f[1]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[1], _f[2]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[2], _f[3]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[3], _f[4]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[4], _f[5]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[5], _f[6]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[6], _f[7]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[7], _f[8]); }) + +#define CHECK_PACKED_FIELDS_10(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 10); \ + CHECK_PACKED_FIELD(_f[0]); \ + CHECK_PACKED_FIELD(_f[1]); \ + CHECK_PACKED_FIELD(_f[2]); \ + CHECK_PACKED_FIELD(_f[3]); \ + CHECK_PACKED_FIELD(_f[4]); \ + CHECK_PACKED_FIELD(_f[5]); \ + CHECK_PACKED_FIELD(_f[6]); \ + CHECK_PACKED_FIELD(_f[7]); \ + CHECK_PACKED_FIELD(_f[8]); \ + CHECK_PACKED_FIELD(_f[9]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[0], _f[1]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[1], _f[2]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[2], _f[3]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[3], _f[4]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[4], _f[5]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[5], _f[6]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[6], _f[7]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[7], _f[8]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[8], _f[9]); }) + +#define CHECK_PACKED_FIELDS_11(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 11); \ + CHECK_PACKED_FIELD(_f[0]); \ + CHECK_PACKED_FIELD(_f[1]); \ + CHECK_PACKED_FIELD(_f[2]); \ + CHECK_PACKED_FIELD(_f[3]); \ + CHECK_PACKED_FIELD(_f[4]); \ + CHECK_PACKED_FIELD(_f[5]); \ + CHECK_PACKED_FIELD(_f[6]); \ + CHECK_PACKED_FIELD(_f[7]); \ + CHECK_PACKED_FIELD(_f[8]); \ + CHECK_PACKED_FIELD(_f[9]); \ + CHECK_PACKED_FIELD(_f[10]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[0], _f[1]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[1], _f[2]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[2], _f[3]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[3], _f[4]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[4], _f[5]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[5], _f[6]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[6], _f[7]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[7], _f[8]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[8], _f[9]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[9], _f[10]); }) + +#define CHECK_PACKED_FIELDS_12(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 12); \ + CHECK_PACKED_FIELD(_f[0]); \ + CHECK_PACKED_FIELD(_f[1]); \ + CHECK_PACKED_FIELD(_f[2]); \ + CHECK_PACKED_FIELD(_f[3]); \ + CHECK_PACKED_FIELD(_f[4]); \ + CHECK_PACKED_FIELD(_f[5]); \ + CHECK_PACKED_FIELD(_f[6]); \ + CHECK_PACKED_FIELD(_f[7]); \ + CHECK_PACKED_FIELD(_f[8]); \ + CHECK_PACKED_FIELD(_f[9]); \ + CHECK_PACKED_FIELD(_f[10]); \ + CHECK_PACKED_FIELD(_f[11]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[0], _f[1]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[1], _f[2]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[2], _f[3]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[3], _f[4]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[4], _f[5]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[5], _f[6]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[6], _f[7]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[7], _f[8]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[8], _f[9]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[9], _f[10]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[10], _f[11]); }) + +#define CHECK_PACKED_FIELDS_13(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 13); \ + CHECK_PACKED_FIELD(_f[0]); \ + CHECK_PACKED_FIELD(_f[1]); \ + CHECK_PACKED_FIELD(_f[2]); \ + CHECK_PACKED_FIELD(_f[3]); \ + CHECK_PACKED_FIELD(_f[4]); \ + CHECK_PACKED_FIELD(_f[5]); \ + CHECK_PACKED_FIELD(_f[6]); \ + CHECK_PACKED_FIELD(_f[7]); \ + CHECK_PACKED_FIELD(_f[8]); \ + CHECK_PACKED_FIELD(_f[9]); \ + CHECK_PACKED_FIELD(_f[10]); \ + CHECK_PACKED_FIELD(_f[11]); \ + CHECK_PACKED_FIELD(_f[12]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[0], _f[1]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[1], _f[2]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[2], _f[3]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[3], _f[4]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[4], _f[5]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[5], _f[6]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[6], _f[7]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[7], _f[8]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[8], _f[9]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[9], _f[10]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[10], _f[11]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[11], _f[12]); }) + +#define CHECK_PACKED_FIELDS_14(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 14); \ + CHECK_PACKED_FIELD(_f[0]); \ + CHECK_PACKED_FIELD(_f[1]); \ + CHECK_PACKED_FIELD(_f[2]); \ + CHECK_PACKED_FIELD(_f[3]); \ + CHECK_PACKED_FIELD(_f[4]); \ + CHECK_PACKED_FIELD(_f[5]); \ + CHECK_PACKED_FIELD(_f[6]); \ + CHECK_PACKED_FIELD(_f[7]); \ + CHECK_PACKED_FIELD(_f[8]); \ + CHECK_PACKED_FIELD(_f[9]); \ + CHECK_PACKED_FIELD(_f[10]); \ + CHECK_PACKED_FIELD(_f[11]); \ + CHECK_PACKED_FIELD(_f[12]); \ + CHECK_PACKED_FIELD(_f[13]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[0], _f[1]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[1], _f[2]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[2], _f[3]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[3], _f[4]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[4], _f[5]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[5], _f[6]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[6], _f[7]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[7], _f[8]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[8], _f[9]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[9], _f[10]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[10], _f[11]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[11], _f[12]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[12], _f[13]); }) + +#define CHECK_PACKED_FIELDS_15(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 15); \ + CHECK_PACKED_FIELD(_f[0]); \ + CHECK_PACKED_FIELD(_f[1]); \ + CHECK_PACKED_FIELD(_f[2]); \ + CHECK_PACKED_FIELD(_f[3]); \ + CHECK_PACKED_FIELD(_f[4]); \ + CHECK_PACKED_FIELD(_f[5]); \ + CHECK_PACKED_FIELD(_f[6]); \ + CHECK_PACKED_FIELD(_f[7]); \ + CHECK_PACKED_FIELD(_f[8]); \ + CHECK_PACKED_FIELD(_f[9]); \ + CHECK_PACKED_FIELD(_f[10]); \ + CHECK_PACKED_FIELD(_f[11]); \ + CHECK_PACKED_FIELD(_f[12]); \ + CHECK_PACKED_FIELD(_f[13]); \ + CHECK_PACKED_FIELD(_f[14]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[0], _f[1]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[1], _f[2]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[2], _f[3]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[3], _f[4]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[4], _f[5]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[5], _f[6]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[6], _f[7]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[7], _f[8]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[8], _f[9]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[9], _f[10]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[10], _f[11]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[11], _f[12]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[12], _f[13]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[13], _f[14]); }) + +#define CHECK_PACKED_FIELDS_16(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 16); \ + CHECK_PACKED_FIELD(_f[0]); \ + CHECK_PACKED_FIELD(_f[1]); \ + CHECK_PACKED_FIELD(_f[2]); \ + CHECK_PACKED_FIELD(_f[3]); \ + CHECK_PACKED_FIELD(_f[4]); \ + CHECK_PACKED_FIELD(_f[5]); \ + CHECK_PACKED_FIELD(_f[6]); \ + CHECK_PACKED_FIELD(_f[7]); \ + CHECK_PACKED_FIELD(_f[8]); \ + CHECK_PACKED_FIELD(_f[9]); \ + CHECK_PACKED_FIELD(_f[10]); \ + CHECK_PACKED_FIELD(_f[11]); \ + CHECK_PACKED_FIELD(_f[12]); \ + CHECK_PACKED_FIELD(_f[13]); \ + CHECK_PACKED_FIELD(_f[14]); \ + CHECK_PACKED_FIELD(_f[15]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[0], _f[1]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[1], _f[2]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[2], _f[3]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[3], _f[4]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[4], _f[5]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[5], _f[6]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[6], _f[7]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[7], _f[8]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[8], _f[9]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[9], _f[10]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[10], _f[11]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[11], _f[12]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[12], _f[13]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[13], _f[14]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[14], _f[15]); }) + +#define CHECK_PACKED_FIELDS_17(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 17); \ + CHECK_PACKED_FIELD(_f[0]); \ + CHECK_PACKED_FIELD(_f[1]); \ + CHECK_PACKED_FIELD(_f[2]); \ + CHECK_PACKED_FIELD(_f[3]); \ + CHECK_PACKED_FIELD(_f[4]); \ + CHECK_PACKED_FIELD(_f[5]); \ + CHECK_PACKED_FIELD(_f[6]); \ + CHECK_PACKED_FIELD(_f[7]); \ + CHECK_PACKED_FIELD(_f[8]); \ + CHECK_PACKED_FIELD(_f[9]); \ + CHECK_PACKED_FIELD(_f[10]); \ + CHECK_PACKED_FIELD(_f[11]); \ + CHECK_PACKED_FIELD(_f[12]); \ + CHECK_PACKED_FIELD(_f[13]); \ + CHECK_PACKED_FIELD(_f[14]); \ + CHECK_PACKED_FIELD(_f[15]); \ + CHECK_PACKED_FIELD(_f[16]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[0], _f[1]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[1], _f[2]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[2], _f[3]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[3], _f[4]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[4], _f[5]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[5], _f[6]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[6], _f[7]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[7], _f[8]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[8], _f[9]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[9], _f[10]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[10], _f[11]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[11], _f[12]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[12], _f[13]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[13], _f[14]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[14], _f[15]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[15], _f[16]); }) + +#define CHECK_PACKED_FIELDS_18(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 18); \ + CHECK_PACKED_FIELD(_f[0]); \ + CHECK_PACKED_FIELD(_f[1]); \ + CHECK_PACKED_FIELD(_f[2]); \ + CHECK_PACKED_FIELD(_f[3]); \ + CHECK_PACKED_FIELD(_f[4]); \ + CHECK_PACKED_FIELD(_f[5]); \ + CHECK_PACKED_FIELD(_f[6]); \ + CHECK_PACKED_FIELD(_f[7]); \ + CHECK_PACKED_FIELD(_f[8]); \ + CHECK_PACKED_FIELD(_f[9]); \ + CHECK_PACKED_FIELD(_f[10]); \ + CHECK_PACKED_FIELD(_f[11]); \ + CHECK_PACKED_FIELD(_f[12]); \ + CHECK_PACKED_FIELD(_f[13]); \ + CHECK_PACKED_FIELD(_f[14]); \ + CHECK_PACKED_FIELD(_f[15]); \ + CHECK_PACKED_FIELD(_f[16]); \ + CHECK_PACKED_FIELD(_f[17]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[0], _f[1]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[1], _f[2]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[2], _f[3]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[3], _f[4]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[4], _f[5]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[5], _f[6]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[6], _f[7]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[7], _f[8]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[8], _f[9]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[9], _f[10]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[10], _f[11]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[11], _f[12]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[12], _f[13]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[13], _f[14]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[14], _f[15]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[15], _f[16]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[16], _f[17]); }) + +#define CHECK_PACKED_FIELDS_19(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 19); \ + CHECK_PACKED_FIELD(_f[0]); \ + CHECK_PACKED_FIELD(_f[1]); \ + CHECK_PACKED_FIELD(_f[2]); \ + CHECK_PACKED_FIELD(_f[3]); \ + CHECK_PACKED_FIELD(_f[4]); \ + CHECK_PACKED_FIELD(_f[5]); \ + CHECK_PACKED_FIELD(_f[6]); \ + CHECK_PACKED_FIELD(_f[7]); \ + CHECK_PACKED_FIELD(_f[8]); \ + CHECK_PACKED_FIELD(_f[9]); \ + CHECK_PACKED_FIELD(_f[10]); \ + CHECK_PACKED_FIELD(_f[11]); \ + CHECK_PACKED_FIELD(_f[12]); \ + CHECK_PACKED_FIELD(_f[13]); \ + CHECK_PACKED_FIELD(_f[14]); \ + CHECK_PACKED_FIELD(_f[15]); \ + CHECK_PACKED_FIELD(_f[16]); \ + CHECK_PACKED_FIELD(_f[17]); \ + CHECK_PACKED_FIELD(_f[18]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[0], _f[1]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[1], _f[2]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[2], _f[3]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[3], _f[4]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[4], _f[5]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[5], _f[6]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[6], _f[7]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[7], _f[8]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[8], _f[9]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[9], _f[10]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[10], _f[11]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[11], _f[12]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[12], _f[13]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[13], _f[14]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[14], _f[15]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[15], _f[16]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[16], _f[17]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[17], _f[18]); }) + +#define CHECK_PACKED_FIELDS_20(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 20); \ + CHECK_PACKED_FIELD(_f[0]); \ + CHECK_PACKED_FIELD(_f[1]); \ + CHECK_PACKED_FIELD(_f[2]); \ + CHECK_PACKED_FIELD(_f[3]); \ + CHECK_PACKED_FIELD(_f[4]); \ + CHECK_PACKED_FIELD(_f[5]); \ + CHECK_PACKED_FIELD(_f[6]); \ + CHECK_PACKED_FIELD(_f[7]); \ + CHECK_PACKED_FIELD(_f[8]); \ + CHECK_PACKED_FIELD(_f[9]); \ + CHECK_PACKED_FIELD(_f[10]); \ + CHECK_PACKED_FIELD(_f[11]); \ + CHECK_PACKED_FIELD(_f[12]); \ + CHECK_PACKED_FIELD(_f[13]); \ + CHECK_PACKED_FIELD(_f[14]); \ + CHECK_PACKED_FIELD(_f[15]); \ + CHECK_PACKED_FIELD(_f[16]); \ + CHECK_PACKED_FIELD(_f[17]); \ + CHECK_PACKED_FIELD(_f[18]); \ + CHECK_PACKED_FIELD(_f[19]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[0], _f[1]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[1], _f[2]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[2], _f[3]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[3], _f[4]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[4], _f[5]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[5], _f[6]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[6], _f[7]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[7], _f[8]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[8], _f[9]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[9], _f[10]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[10], _f[11]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[11], _f[12]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[12], _f[13]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[13], _f[14]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[14], _f[15]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[15], _f[16]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[16], _f[17]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[17], _f[18]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[18], _f[19]); }) + +#define CHECK_PACKED_FIELDS_21(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 21); \ + CHECK_PACKED_FIELD(_f[0]); \ + CHECK_PACKED_FIELD(_f[1]); \ + CHECK_PACKED_FIELD(_f[2]); \ + CHECK_PACKED_FIELD(_f[3]); \ + CHECK_PACKED_FIELD(_f[4]); \ + CHECK_PACKED_FIELD(_f[5]); \ + CHECK_PACKED_FIELD(_f[6]); \ + CHECK_PACKED_FIELD(_f[7]); \ + CHECK_PACKED_FIELD(_f[8]); \ + CHECK_PACKED_FIELD(_f[9]); \ + CHECK_PACKED_FIELD(_f[10]); \ + CHECK_PACKED_FIELD(_f[11]); \ + CHECK_PACKED_FIELD(_f[12]); \ + CHECK_PACKED_FIELD(_f[13]); \ + CHECK_PACKED_FIELD(_f[14]); \ + CHECK_PACKED_FIELD(_f[15]); \ + CHECK_PACKED_FIELD(_f[16]); \ + CHECK_PACKED_FIELD(_f[17]); \ + CHECK_PACKED_FIELD(_f[18]); \ + CHECK_PACKED_FIELD(_f[19]); \ + CHECK_PACKED_FIELD(_f[20]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[0], _f[1]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[1], _f[2]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[2], _f[3]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[3], _f[4]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[4], _f[5]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[5], _f[6]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[6], _f[7]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[7], _f[8]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[8], _f[9]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[9], _f[10]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[10], _f[11]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[11], _f[12]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[12], _f[13]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[13], _f[14]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[14], _f[15]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[15], _f[16]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[16], _f[17]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[17], _f[18]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[18], _f[19]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[19], _f[20]); }) + +#define CHECK_PACKED_FIELDS_22(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 22); \ + CHECK_PACKED_FIELD(_f[0]); \ + CHECK_PACKED_FIELD(_f[1]); \ + CHECK_PACKED_FIELD(_f[2]); \ + CHECK_PACKED_FIELD(_f[3]); \ + CHECK_PACKED_FIELD(_f[4]); \ + CHECK_PACKED_FIELD(_f[5]); \ + CHECK_PACKED_FIELD(_f[6]); \ + CHECK_PACKED_FIELD(_f[7]); \ + CHECK_PACKED_FIELD(_f[8]); \ + CHECK_PACKED_FIELD(_f[9]); \ + CHECK_PACKED_FIELD(_f[10]); \ + CHECK_PACKED_FIELD(_f[11]); \ + CHECK_PACKED_FIELD(_f[12]); \ + CHECK_PACKED_FIELD(_f[13]); \ + CHECK_PACKED_FIELD(_f[14]); \ + CHECK_PACKED_FIELD(_f[15]); \ + CHECK_PACKED_FIELD(_f[16]); \ + CHECK_PACKED_FIELD(_f[17]); \ + CHECK_PACKED_FIELD(_f[18]); \ + CHECK_PACKED_FIELD(_f[19]); \ + CHECK_PACKED_FIELD(_f[20]); \ + CHECK_PACKED_FIELD(_f[21]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[0], _f[1]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[1], _f[2]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[2], _f[3]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[3], _f[4]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[4], _f[5]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[5], _f[6]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[6], _f[7]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[7], _f[8]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[8], _f[9]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[9], _f[10]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[10], _f[11]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[11], _f[12]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[12], _f[13]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[13], _f[14]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[14], _f[15]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[15], _f[16]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[16], _f[17]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[17], _f[18]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[18], _f[19]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[19], _f[20]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[20], _f[21]); }) + +#define CHECK_PACKED_FIELDS_23(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 23); \ + CHECK_PACKED_FIELD(_f[0]); \ + CHECK_PACKED_FIELD(_f[1]); \ + CHECK_PACKED_FIELD(_f[2]); \ + CHECK_PACKED_FIELD(_f[3]); \ + CHECK_PACKED_FIELD(_f[4]); \ + CHECK_PACKED_FIELD(_f[5]); \ + CHECK_PACKED_FIELD(_f[6]); \ + CHECK_PACKED_FIELD(_f[7]); \ + CHECK_PACKED_FIELD(_f[8]); \ + CHECK_PACKED_FIELD(_f[9]); \ + CHECK_PACKED_FIELD(_f[10]); \ + CHECK_PACKED_FIELD(_f[11]); \ + CHECK_PACKED_FIELD(_f[12]); \ + CHECK_PACKED_FIELD(_f[13]); \ + CHECK_PACKED_FIELD(_f[14]); \ + CHECK_PACKED_FIELD(_f[15]); \ + CHECK_PACKED_FIELD(_f[16]); \ + CHECK_PACKED_FIELD(_f[17]); \ + CHECK_PACKED_FIELD(_f[18]); \ + CHECK_PACKED_FIELD(_f[19]); \ + CHECK_PACKED_FIELD(_f[20]); \ + CHECK_PACKED_FIELD(_f[21]); \ + CHECK_PACKED_FIELD(_f[22]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[0], _f[1]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[1], _f[2]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[2], _f[3]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[3], _f[4]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[4], _f[5]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[5], _f[6]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[6], _f[7]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[7], _f[8]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[8], _f[9]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[9], _f[10]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[10], _f[11]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[11], _f[12]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[12], _f[13]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[13], _f[14]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[14], _f[15]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[15], _f[16]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[16], _f[17]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[17], _f[18]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[18], _f[19]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[19], _f[20]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[20], _f[21]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[21], _f[22]); }) + +#define CHECK_PACKED_FIELDS_24(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 24); \ + CHECK_PACKED_FIELD(_f[0]); \ + CHECK_PACKED_FIELD(_f[1]); \ + CHECK_PACKED_FIELD(_f[2]); \ + CHECK_PACKED_FIELD(_f[3]); \ + CHECK_PACKED_FIELD(_f[4]); \ + CHECK_PACKED_FIELD(_f[5]); \ + CHECK_PACKED_FIELD(_f[6]); \ + CHECK_PACKED_FIELD(_f[7]); \ + CHECK_PACKED_FIELD(_f[8]); \ + CHECK_PACKED_FIELD(_f[9]); \ + CHECK_PACKED_FIELD(_f[10]); \ + CHECK_PACKED_FIELD(_f[11]); \ + CHECK_PACKED_FIELD(_f[12]); \ + CHECK_PACKED_FIELD(_f[13]); \ + CHECK_PACKED_FIELD(_f[14]); \ + CHECK_PACKED_FIELD(_f[15]); \ + CHECK_PACKED_FIELD(_f[16]); \ + CHECK_PACKED_FIELD(_f[17]); \ + CHECK_PACKED_FIELD(_f[18]); \ + CHECK_PACKED_FIELD(_f[19]); \ + CHECK_PACKED_FIELD(_f[20]); \ + CHECK_PACKED_FIELD(_f[21]); \ + CHECK_PACKED_FIELD(_f[22]); \ + CHECK_PACKED_FIELD(_f[23]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[0], _f[1]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[1], _f[2]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[2], _f[3]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[3], _f[4]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[4], _f[5]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[5], _f[6]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[6], _f[7]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[7], _f[8]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[8], _f[9]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[9], _f[10]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[10], _f[11]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[11], _f[12]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[12], _f[13]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[13], _f[14]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[14], _f[15]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[15], _f[16]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[16], _f[17]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[17], _f[18]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[18], _f[19]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[19], _f[20]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[20], _f[21]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[21], _f[22]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[22], _f[23]); }) + +#define CHECK_PACKED_FIELDS_25(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 25); \ + CHECK_PACKED_FIELD(_f[0]); \ + CHECK_PACKED_FIELD(_f[1]); \ + CHECK_PACKED_FIELD(_f[2]); \ + CHECK_PACKED_FIELD(_f[3]); \ + CHECK_PACKED_FIELD(_f[4]); \ + CHECK_PACKED_FIELD(_f[5]); \ + CHECK_PACKED_FIELD(_f[6]); \ + CHECK_PACKED_FIELD(_f[7]); \ + CHECK_PACKED_FIELD(_f[8]); \ + CHECK_PACKED_FIELD(_f[9]); \ + CHECK_PACKED_FIELD(_f[10]); \ + CHECK_PACKED_FIELD(_f[11]); \ + CHECK_PACKED_FIELD(_f[12]); \ + CHECK_PACKED_FIELD(_f[13]); \ + CHECK_PACKED_FIELD(_f[14]); \ + CHECK_PACKED_FIELD(_f[15]); \ + CHECK_PACKED_FIELD(_f[16]); \ + CHECK_PACKED_FIELD(_f[17]); \ + CHECK_PACKED_FIELD(_f[18]); \ + CHECK_PACKED_FIELD(_f[19]); \ + CHECK_PACKED_FIELD(_f[20]); \ + CHECK_PACKED_FIELD(_f[21]); \ + CHECK_PACKED_FIELD(_f[22]); \ + CHECK_PACKED_FIELD(_f[23]); \ + CHECK_PACKED_FIELD(_f[24]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[0], _f[1]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[1], _f[2]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[2], _f[3]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[3], _f[4]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[4], _f[5]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[5], _f[6]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[6], _f[7]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[7], _f[8]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[8], _f[9]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[9], _f[10]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[10], _f[11]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[11], _f[12]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[12], _f[13]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[13], _f[14]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[14], _f[15]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[15], _f[16]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[16], _f[17]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[17], _f[18]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[18], _f[19]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[19], _f[20]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[20], _f[21]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[21], _f[22]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[22], _f[23]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[23], _f[24]); }) + +#define CHECK_PACKED_FIELDS_26(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 26); \ + CHECK_PACKED_FIELD(_f[0]); \ + CHECK_PACKED_FIELD(_f[1]); \ + CHECK_PACKED_FIELD(_f[2]); \ + CHECK_PACKED_FIELD(_f[3]); \ + CHECK_PACKED_FIELD(_f[4]); \ + CHECK_PACKED_FIELD(_f[5]); \ + CHECK_PACKED_FIELD(_f[6]); \ + CHECK_PACKED_FIELD(_f[7]); \ + CHECK_PACKED_FIELD(_f[8]); \ + CHECK_PACKED_FIELD(_f[9]); \ + CHECK_PACKED_FIELD(_f[10]); \ + CHECK_PACKED_FIELD(_f[11]); \ + CHECK_PACKED_FIELD(_f[12]); \ + CHECK_PACKED_FIELD(_f[13]); \ + CHECK_PACKED_FIELD(_f[14]); \ + CHECK_PACKED_FIELD(_f[15]); \ + CHECK_PACKED_FIELD(_f[16]); \ + CHECK_PACKED_FIELD(_f[17]); \ + CHECK_PACKED_FIELD(_f[18]); \ + CHECK_PACKED_FIELD(_f[19]); \ + CHECK_PACKED_FIELD(_f[20]); \ + CHECK_PACKED_FIELD(_f[21]); \ + CHECK_PACKED_FIELD(_f[22]); \ + CHECK_PACKED_FIELD(_f[23]); \ + CHECK_PACKED_FIELD(_f[24]); \ + CHECK_PACKED_FIELD(_f[25]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[0], _f[1]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[1], _f[2]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[2], _f[3]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[3], _f[4]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[4], _f[5]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[5], _f[6]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[6], _f[7]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[7], _f[8]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[8], _f[9]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[9], _f[10]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[10], _f[11]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[11], _f[12]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[12], _f[13]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[13], _f[14]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[14], _f[15]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[15], _f[16]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[16], _f[17]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[17], _f[18]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[18], _f[19]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[19], _f[20]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[20], _f[21]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[21], _f[22]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[22], _f[23]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[23], _f[24]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[24], _f[25]); }) + +#define CHECK_PACKED_FIELDS_27(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 27); \ + CHECK_PACKED_FIELD(_f[0]); \ + CHECK_PACKED_FIELD(_f[1]); \ + CHECK_PACKED_FIELD(_f[2]); \ + CHECK_PACKED_FIELD(_f[3]); \ + CHECK_PACKED_FIELD(_f[4]); \ + CHECK_PACKED_FIELD(_f[5]); \ + CHECK_PACKED_FIELD(_f[6]); \ + CHECK_PACKED_FIELD(_f[7]); \ + CHECK_PACKED_FIELD(_f[8]); \ + CHECK_PACKED_FIELD(_f[9]); \ + CHECK_PACKED_FIELD(_f[10]); \ + CHECK_PACKED_FIELD(_f[11]); \ + CHECK_PACKED_FIELD(_f[12]); \ + CHECK_PACKED_FIELD(_f[13]); \ + CHECK_PACKED_FIELD(_f[14]); \ + CHECK_PACKED_FIELD(_f[15]); \ + CHECK_PACKED_FIELD(_f[16]); \ + CHECK_PACKED_FIELD(_f[17]); \ + CHECK_PACKED_FIELD(_f[18]); \ + CHECK_PACKED_FIELD(_f[19]); \ + CHECK_PACKED_FIELD(_f[20]); \ + CHECK_PACKED_FIELD(_f[21]); \ + CHECK_PACKED_FIELD(_f[22]); \ + CHECK_PACKED_FIELD(_f[23]); \ + CHECK_PACKED_FIELD(_f[24]); \ + CHECK_PACKED_FIELD(_f[25]); \ + CHECK_PACKED_FIELD(_f[26]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[0], _f[1]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[1], _f[2]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[2], _f[3]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[3], _f[4]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[4], _f[5]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[5], _f[6]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[6], _f[7]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[7], _f[8]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[8], _f[9]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[9], _f[10]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[10], _f[11]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[11], _f[12]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[12], _f[13]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[13], _f[14]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[14], _f[15]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[15], _f[16]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[16], _f[17]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[17], _f[18]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[18], _f[19]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[19], _f[20]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[20], _f[21]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[21], _f[22]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[22], _f[23]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[23], _f[24]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[24], _f[25]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[25], _f[26]); }) + +#define CHECK_PACKED_FIELDS_28(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 28); \ + CHECK_PACKED_FIELD(_f[0]); \ + CHECK_PACKED_FIELD(_f[1]); \ + CHECK_PACKED_FIELD(_f[2]); \ + CHECK_PACKED_FIELD(_f[3]); \ + CHECK_PACKED_FIELD(_f[4]); \ + CHECK_PACKED_FIELD(_f[5]); \ + CHECK_PACKED_FIELD(_f[6]); \ + CHECK_PACKED_FIELD(_f[7]); \ + CHECK_PACKED_FIELD(_f[8]); \ + CHECK_PACKED_FIELD(_f[9]); \ + CHECK_PACKED_FIELD(_f[10]); \ + CHECK_PACKED_FIELD(_f[11]); \ + CHECK_PACKED_FIELD(_f[12]); \ + CHECK_PACKED_FIELD(_f[13]); \ + CHECK_PACKED_FIELD(_f[14]); \ + CHECK_PACKED_FIELD(_f[15]); \ + CHECK_PACKED_FIELD(_f[16]); \ + CHECK_PACKED_FIELD(_f[17]); \ + CHECK_PACKED_FIELD(_f[18]); \ + CHECK_PACKED_FIELD(_f[19]); \ + CHECK_PACKED_FIELD(_f[20]); \ + CHECK_PACKED_FIELD(_f[21]); \ + CHECK_PACKED_FIELD(_f[22]); \ + CHECK_PACKED_FIELD(_f[23]); \ + CHECK_PACKED_FIELD(_f[24]); \ + CHECK_PACKED_FIELD(_f[25]); \ + CHECK_PACKED_FIELD(_f[26]); \ + CHECK_PACKED_FIELD(_f[27]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[0], _f[1]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[1], _f[2]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[2], _f[3]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[3], _f[4]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[4], _f[5]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[5], _f[6]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[6], _f[7]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[7], _f[8]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[8], _f[9]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[9], _f[10]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[10], _f[11]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[11], _f[12]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[12], _f[13]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[13], _f[14]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[14], _f[15]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[15], _f[16]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[16], _f[17]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[17], _f[18]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[18], _f[19]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[19], _f[20]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[20], _f[21]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[21], _f[22]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[22], _f[23]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[23], _f[24]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[24], _f[25]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[25], _f[26]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[26], _f[27]); }) + +#define CHECK_PACKED_FIELDS_29(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 29); \ + CHECK_PACKED_FIELD(_f[0]); \ + CHECK_PACKED_FIELD(_f[1]); \ + CHECK_PACKED_FIELD(_f[2]); \ + CHECK_PACKED_FIELD(_f[3]); \ + CHECK_PACKED_FIELD(_f[4]); \ + CHECK_PACKED_FIELD(_f[5]); \ + CHECK_PACKED_FIELD(_f[6]); \ + CHECK_PACKED_FIELD(_f[7]); \ + CHECK_PACKED_FIELD(_f[8]); \ + CHECK_PACKED_FIELD(_f[9]); \ + CHECK_PACKED_FIELD(_f[10]); \ + CHECK_PACKED_FIELD(_f[11]); \ + CHECK_PACKED_FIELD(_f[12]); \ + CHECK_PACKED_FIELD(_f[13]); \ + CHECK_PACKED_FIELD(_f[14]); \ + CHECK_PACKED_FIELD(_f[15]); \ + CHECK_PACKED_FIELD(_f[16]); \ + CHECK_PACKED_FIELD(_f[17]); \ + CHECK_PACKED_FIELD(_f[18]); \ + CHECK_PACKED_FIELD(_f[19]); \ + CHECK_PACKED_FIELD(_f[20]); \ + CHECK_PACKED_FIELD(_f[21]); \ + CHECK_PACKED_FIELD(_f[22]); \ + CHECK_PACKED_FIELD(_f[23]); \ + CHECK_PACKED_FIELD(_f[24]); \ + CHECK_PACKED_FIELD(_f[25]); \ + CHECK_PACKED_FIELD(_f[26]); \ + CHECK_PACKED_FIELD(_f[27]); \ + CHECK_PACKED_FIELD(_f[28]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[0], _f[1]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[1], _f[2]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[2], _f[3]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[3], _f[4]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[4], _f[5]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[5], _f[6]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[6], _f[7]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[7], _f[8]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[8], _f[9]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[9], _f[10]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[10], _f[11]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[11], _f[12]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[12], _f[13]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[13], _f[14]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[14], _f[15]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[15], _f[16]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[16], _f[17]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[17], _f[18]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[18], _f[19]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[19], _f[20]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[20], _f[21]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[21], _f[22]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[22], _f[23]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[23], _f[24]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[24], _f[25]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[25], _f[26]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[26], _f[27]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[27], _f[28]); }) + +#define CHECK_PACKED_FIELDS_30(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 30); \ + CHECK_PACKED_FIELD(_f[0]); \ + CHECK_PACKED_FIELD(_f[1]); \ + CHECK_PACKED_FIELD(_f[2]); \ + CHECK_PACKED_FIELD(_f[3]); \ + CHECK_PACKED_FIELD(_f[4]); \ + CHECK_PACKED_FIELD(_f[5]); \ + CHECK_PACKED_FIELD(_f[6]); \ + CHECK_PACKED_FIELD(_f[7]); \ + CHECK_PACKED_FIELD(_f[8]); \ + CHECK_PACKED_FIELD(_f[9]); \ + CHECK_PACKED_FIELD(_f[10]); \ + CHECK_PACKED_FIELD(_f[11]); \ + CHECK_PACKED_FIELD(_f[12]); \ + CHECK_PACKED_FIELD(_f[13]); \ + CHECK_PACKED_FIELD(_f[14]); \ + CHECK_PACKED_FIELD(_f[15]); \ + CHECK_PACKED_FIELD(_f[16]); \ + CHECK_PACKED_FIELD(_f[17]); \ + CHECK_PACKED_FIELD(_f[18]); \ + CHECK_PACKED_FIELD(_f[19]); \ + CHECK_PACKED_FIELD(_f[20]); \ + CHECK_PACKED_FIELD(_f[21]); \ + CHECK_PACKED_FIELD(_f[22]); \ + CHECK_PACKED_FIELD(_f[23]); \ + CHECK_PACKED_FIELD(_f[24]); \ + CHECK_PACKED_FIELD(_f[25]); \ + CHECK_PACKED_FIELD(_f[26]); \ + CHECK_PACKED_FIELD(_f[27]); \ + CHECK_PACKED_FIELD(_f[28]); \ + CHECK_PACKED_FIELD(_f[29]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[0], _f[1]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[1], _f[2]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[2], _f[3]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[3], _f[4]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[4], _f[5]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[5], _f[6]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[6], _f[7]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[7], _f[8]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[8], _f[9]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[9], _f[10]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[10], _f[11]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[11], _f[12]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[12], _f[13]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[13], _f[14]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[14], _f[15]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[15], _f[16]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[16], _f[17]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[17], _f[18]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[18], _f[19]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[19], _f[20]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[20], _f[21]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[21], _f[22]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[22], _f[23]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[23], _f[24]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[24], _f[25]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[25], _f[26]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[26], _f[27]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[27], _f[28]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[28], _f[29]); }) + +#define CHECK_PACKED_FIELDS_31(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 31); \ + CHECK_PACKED_FIELD(_f[0]); \ + CHECK_PACKED_FIELD(_f[1]); \ + CHECK_PACKED_FIELD(_f[2]); \ + CHECK_PACKED_FIELD(_f[3]); \ + CHECK_PACKED_FIELD(_f[4]); \ + CHECK_PACKED_FIELD(_f[5]); \ + CHECK_PACKED_FIELD(_f[6]); \ + CHECK_PACKED_FIELD(_f[7]); \ + CHECK_PACKED_FIELD(_f[8]); \ + CHECK_PACKED_FIELD(_f[9]); \ + CHECK_PACKED_FIELD(_f[10]); \ + CHECK_PACKED_FIELD(_f[11]); \ + CHECK_PACKED_FIELD(_f[12]); \ + CHECK_PACKED_FIELD(_f[13]); \ + CHECK_PACKED_FIELD(_f[14]); \ + CHECK_PACKED_FIELD(_f[15]); \ + CHECK_PACKED_FIELD(_f[16]); \ + CHECK_PACKED_FIELD(_f[17]); \ + CHECK_PACKED_FIELD(_f[18]); \ + CHECK_PACKED_FIELD(_f[19]); \ + CHECK_PACKED_FIELD(_f[20]); \ + CHECK_PACKED_FIELD(_f[21]); \ + CHECK_PACKED_FIELD(_f[22]); \ + CHECK_PACKED_FIELD(_f[23]); \ + CHECK_PACKED_FIELD(_f[24]); \ + CHECK_PACKED_FIELD(_f[25]); \ + CHECK_PACKED_FIELD(_f[26]); \ + CHECK_PACKED_FIELD(_f[27]); \ + CHECK_PACKED_FIELD(_f[28]); \ + CHECK_PACKED_FIELD(_f[29]); \ + CHECK_PACKED_FIELD(_f[30]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[0], _f[1]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[1], _f[2]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[2], _f[3]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[3], _f[4]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[4], _f[5]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[5], _f[6]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[6], _f[7]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[7], _f[8]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[8], _f[9]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[9], _f[10]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[10], _f[11]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[11], _f[12]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[12], _f[13]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[13], _f[14]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[14], _f[15]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[15], _f[16]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[16], _f[17]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[17], _f[18]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[18], _f[19]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[19], _f[20]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[20], _f[21]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[21], _f[22]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[22], _f[23]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[23], _f[24]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[24], _f[25]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[25], _f[26]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[26], _f[27]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[27], _f[28]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[28], _f[29]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[29], _f[30]); }) + +#define CHECK_PACKED_FIELDS_32(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 32); \ + CHECK_PACKED_FIELD(_f[0]); \ + CHECK_PACKED_FIELD(_f[1]); \ + CHECK_PACKED_FIELD(_f[2]); \ + CHECK_PACKED_FIELD(_f[3]); \ + CHECK_PACKED_FIELD(_f[4]); \ + CHECK_PACKED_FIELD(_f[5]); \ + CHECK_PACKED_FIELD(_f[6]); \ + CHECK_PACKED_FIELD(_f[7]); \ + CHECK_PACKED_FIELD(_f[8]); \ + CHECK_PACKED_FIELD(_f[9]); \ + CHECK_PACKED_FIELD(_f[10]); \ + CHECK_PACKED_FIELD(_f[11]); \ + CHECK_PACKED_FIELD(_f[12]); \ + CHECK_PACKED_FIELD(_f[13]); \ + CHECK_PACKED_FIELD(_f[14]); \ + CHECK_PACKED_FIELD(_f[15]); \ + CHECK_PACKED_FIELD(_f[16]); \ + CHECK_PACKED_FIELD(_f[17]); \ + CHECK_PACKED_FIELD(_f[18]); \ + CHECK_PACKED_FIELD(_f[19]); \ + CHECK_PACKED_FIELD(_f[20]); \ + CHECK_PACKED_FIELD(_f[21]); \ + CHECK_PACKED_FIELD(_f[22]); \ + CHECK_PACKED_FIELD(_f[23]); \ + CHECK_PACKED_FIELD(_f[24]); \ + CHECK_PACKED_FIELD(_f[25]); \ + CHECK_PACKED_FIELD(_f[26]); \ + CHECK_PACKED_FIELD(_f[27]); \ + CHECK_PACKED_FIELD(_f[28]); \ + CHECK_PACKED_FIELD(_f[29]); \ + CHECK_PACKED_FIELD(_f[30]); \ + CHECK_PACKED_FIELD(_f[31]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[0], _f[1]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[1], _f[2]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[2], _f[3]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[3], _f[4]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[4], _f[5]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[5], _f[6]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[6], _f[7]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[7], _f[8]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[8], _f[9]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[9], _f[10]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[10], _f[11]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[11], _f[12]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[12], _f[13]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[13], _f[14]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[14], _f[15]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[15], _f[16]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[16], _f[17]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[17], _f[18]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[18], _f[19]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[19], _f[20]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[20], _f[21]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[21], _f[22]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[22], _f[23]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[23], _f[24]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[24], _f[25]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[25], _f[26]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[26], _f[27]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[27], _f[28]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[28], _f[29]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[29], _f[30]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[30], _f[31]); }) + +#define CHECK_PACKED_FIELDS_33(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 33); \ + CHECK_PACKED_FIELD(_f[0]); \ + CHECK_PACKED_FIELD(_f[1]); \ + CHECK_PACKED_FIELD(_f[2]); \ + CHECK_PACKED_FIELD(_f[3]); \ + CHECK_PACKED_FIELD(_f[4]); \ + CHECK_PACKED_FIELD(_f[5]); \ + CHECK_PACKED_FIELD(_f[6]); \ + CHECK_PACKED_FIELD(_f[7]); \ + CHECK_PACKED_FIELD(_f[8]); \ + CHECK_PACKED_FIELD(_f[9]); \ + CHECK_PACKED_FIELD(_f[10]); \ + CHECK_PACKED_FIELD(_f[11]); \ + CHECK_PACKED_FIELD(_f[12]); \ + CHECK_PACKED_FIELD(_f[13]); \ + CHECK_PACKED_FIELD(_f[14]); \ + CHECK_PACKED_FIELD(_f[15]); \ + CHECK_PACKED_FIELD(_f[16]); \ + CHECK_PACKED_FIELD(_f[17]); \ + CHECK_PACKED_FIELD(_f[18]); \ + CHECK_PACKED_FIELD(_f[19]); \ + CHECK_PACKED_FIELD(_f[20]); \ + CHECK_PACKED_FIELD(_f[21]); \ + CHECK_PACKED_FIELD(_f[22]); \ + CHECK_PACKED_FIELD(_f[23]); \ + CHECK_PACKED_FIELD(_f[24]); \ + CHECK_PACKED_FIELD(_f[25]); \ + CHECK_PACKED_FIELD(_f[26]); \ + CHECK_PACKED_FIELD(_f[27]); \ + CHECK_PACKED_FIELD(_f[28]); \ + CHECK_PACKED_FIELD(_f[29]); \ + CHECK_PACKED_FIELD(_f[30]); \ + CHECK_PACKED_FIELD(_f[31]); \ + CHECK_PACKED_FIELD(_f[32]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[0], _f[1]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[1], _f[2]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[2], _f[3]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[3], _f[4]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[4], _f[5]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[5], _f[6]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[6], _f[7]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[7], _f[8]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[8], _f[9]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[9], _f[10]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[10], _f[11]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[11], _f[12]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[12], _f[13]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[13], _f[14]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[14], _f[15]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[15], _f[16]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[16], _f[17]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[17], _f[18]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[18], _f[19]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[19], _f[20]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[20], _f[21]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[21], _f[22]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[22], _f[23]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[23], _f[24]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[24], _f[25]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[25], _f[26]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[26], _f[27]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[27], _f[28]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[28], _f[29]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[29], _f[30]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[30], _f[31]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[31], _f[32]); }) + +#define CHECK_PACKED_FIELDS_34(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 34); \ + CHECK_PACKED_FIELD(_f[0]); \ + CHECK_PACKED_FIELD(_f[1]); \ + CHECK_PACKED_FIELD(_f[2]); \ + CHECK_PACKED_FIELD(_f[3]); \ + CHECK_PACKED_FIELD(_f[4]); \ + CHECK_PACKED_FIELD(_f[5]); \ + CHECK_PACKED_FIELD(_f[6]); \ + CHECK_PACKED_FIELD(_f[7]); \ + CHECK_PACKED_FIELD(_f[8]); \ + CHECK_PACKED_FIELD(_f[9]); \ + CHECK_PACKED_FIELD(_f[10]); \ + CHECK_PACKED_FIELD(_f[11]); \ + CHECK_PACKED_FIELD(_f[12]); \ + CHECK_PACKED_FIELD(_f[13]); \ + CHECK_PACKED_FIELD(_f[14]); \ + CHECK_PACKED_FIELD(_f[15]); \ + CHECK_PACKED_FIELD(_f[16]); \ + CHECK_PACKED_FIELD(_f[17]); \ + CHECK_PACKED_FIELD(_f[18]); \ + CHECK_PACKED_FIELD(_f[19]); \ + CHECK_PACKED_FIELD(_f[20]); \ + CHECK_PACKED_FIELD(_f[21]); \ + CHECK_PACKED_FIELD(_f[22]); \ + CHECK_PACKED_FIELD(_f[23]); \ + CHECK_PACKED_FIELD(_f[24]); \ + CHECK_PACKED_FIELD(_f[25]); \ + CHECK_PACKED_FIELD(_f[26]); \ + CHECK_PACKED_FIELD(_f[27]); \ + CHECK_PACKED_FIELD(_f[28]); \ + CHECK_PACKED_FIELD(_f[29]); \ + CHECK_PACKED_FIELD(_f[30]); \ + CHECK_PACKED_FIELD(_f[31]); \ + CHECK_PACKED_FIELD(_f[32]); \ + CHECK_PACKED_FIELD(_f[33]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[0], _f[1]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[1], _f[2]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[2], _f[3]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[3], _f[4]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[4], _f[5]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[5], _f[6]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[6], _f[7]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[7], _f[8]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[8], _f[9]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[9], _f[10]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[10], _f[11]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[11], _f[12]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[12], _f[13]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[13], _f[14]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[14], _f[15]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[15], _f[16]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[16], _f[17]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[17], _f[18]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[18], _f[19]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[19], _f[20]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[20], _f[21]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[21], _f[22]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[22], _f[23]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[23], _f[24]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[24], _f[25]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[25], _f[26]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[26], _f[27]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[27], _f[28]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[28], _f[29]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[29], _f[30]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[30], _f[31]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[31], _f[32]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[32], _f[33]); }) + +#define CHECK_PACKED_FIELDS_35(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 35); \ + CHECK_PACKED_FIELD(_f[0]); \ + CHECK_PACKED_FIELD(_f[1]); \ + CHECK_PACKED_FIELD(_f[2]); \ + CHECK_PACKED_FIELD(_f[3]); \ + CHECK_PACKED_FIELD(_f[4]); \ + CHECK_PACKED_FIELD(_f[5]); \ + CHECK_PACKED_FIELD(_f[6]); \ + CHECK_PACKED_FIELD(_f[7]); \ + CHECK_PACKED_FIELD(_f[8]); \ + CHECK_PACKED_FIELD(_f[9]); \ + CHECK_PACKED_FIELD(_f[10]); \ + CHECK_PACKED_FIELD(_f[11]); \ + CHECK_PACKED_FIELD(_f[12]); \ + CHECK_PACKED_FIELD(_f[13]); \ + CHECK_PACKED_FIELD(_f[14]); \ + CHECK_PACKED_FIELD(_f[15]); \ + CHECK_PACKED_FIELD(_f[16]); \ + CHECK_PACKED_FIELD(_f[17]); \ + CHECK_PACKED_FIELD(_f[18]); \ + CHECK_PACKED_FIELD(_f[19]); \ + CHECK_PACKED_FIELD(_f[20]); \ + CHECK_PACKED_FIELD(_f[21]); \ + CHECK_PACKED_FIELD(_f[22]); \ + CHECK_PACKED_FIELD(_f[23]); \ + CHECK_PACKED_FIELD(_f[24]); \ + CHECK_PACKED_FIELD(_f[25]); \ + CHECK_PACKED_FIELD(_f[26]); \ + CHECK_PACKED_FIELD(_f[27]); \ + CHECK_PACKED_FIELD(_f[28]); \ + CHECK_PACKED_FIELD(_f[29]); \ + CHECK_PACKED_FIELD(_f[30]); \ + CHECK_PACKED_FIELD(_f[31]); \ + CHECK_PACKED_FIELD(_f[32]); \ + CHECK_PACKED_FIELD(_f[33]); \ + CHECK_PACKED_FIELD(_f[34]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[0], _f[1]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[1], _f[2]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[2], _f[3]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[3], _f[4]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[4], _f[5]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[5], _f[6]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[6], _f[7]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[7], _f[8]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[8], _f[9]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[9], _f[10]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[10], _f[11]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[11], _f[12]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[12], _f[13]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[13], _f[14]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[14], _f[15]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[15], _f[16]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[16], _f[17]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[17], _f[18]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[18], _f[19]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[19], _f[20]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[20], _f[21]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[21], _f[22]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[22], _f[23]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[23], _f[24]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[24], _f[25]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[25], _f[26]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[26], _f[27]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[27], _f[28]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[28], _f[29]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[29], _f[30]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[30], _f[31]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[31], _f[32]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[32], _f[33]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[33], _f[34]); }) + +#define CHECK_PACKED_FIELDS_36(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 36); \ + CHECK_PACKED_FIELD(_f[0]); \ + CHECK_PACKED_FIELD(_f[1]); \ + CHECK_PACKED_FIELD(_f[2]); \ + CHECK_PACKED_FIELD(_f[3]); \ + CHECK_PACKED_FIELD(_f[4]); \ + CHECK_PACKED_FIELD(_f[5]); \ + CHECK_PACKED_FIELD(_f[6]); \ + CHECK_PACKED_FIELD(_f[7]); \ + CHECK_PACKED_FIELD(_f[8]); \ + CHECK_PACKED_FIELD(_f[9]); \ + CHECK_PACKED_FIELD(_f[10]); \ + CHECK_PACKED_FIELD(_f[11]); \ + CHECK_PACKED_FIELD(_f[12]); \ + CHECK_PACKED_FIELD(_f[13]); \ + CHECK_PACKED_FIELD(_f[14]); \ + CHECK_PACKED_FIELD(_f[15]); \ + CHECK_PACKED_FIELD(_f[16]); \ + CHECK_PACKED_FIELD(_f[17]); \ + CHECK_PACKED_FIELD(_f[18]); \ + CHECK_PACKED_FIELD(_f[19]); \ + CHECK_PACKED_FIELD(_f[20]); \ + CHECK_PACKED_FIELD(_f[21]); \ + CHECK_PACKED_FIELD(_f[22]); \ + CHECK_PACKED_FIELD(_f[23]); \ + CHECK_PACKED_FIELD(_f[24]); \ + CHECK_PACKED_FIELD(_f[25]); \ + CHECK_PACKED_FIELD(_f[26]); \ + CHECK_PACKED_FIELD(_f[27]); \ + CHECK_PACKED_FIELD(_f[28]); \ + CHECK_PACKED_FIELD(_f[29]); \ + CHECK_PACKED_FIELD(_f[30]); \ + CHECK_PACKED_FIELD(_f[31]); \ + CHECK_PACKED_FIELD(_f[32]); \ + CHECK_PACKED_FIELD(_f[33]); \ + CHECK_PACKED_FIELD(_f[34]); \ + CHECK_PACKED_FIELD(_f[35]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[0], _f[1]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[1], _f[2]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[2], _f[3]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[3], _f[4]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[4], _f[5]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[5], _f[6]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[6], _f[7]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[7], _f[8]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[8], _f[9]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[9], _f[10]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[10], _f[11]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[11], _f[12]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[12], _f[13]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[13], _f[14]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[14], _f[15]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[15], _f[16]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[16], _f[17]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[17], _f[18]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[18], _f[19]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[19], _f[20]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[20], _f[21]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[21], _f[22]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[22], _f[23]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[23], _f[24]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[24], _f[25]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[25], _f[26]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[26], _f[27]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[27], _f[28]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[28], _f[29]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[29], _f[30]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[30], _f[31]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[31], _f[32]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[32], _f[33]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[33], _f[34]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[34], _f[35]); }) + +#define CHECK_PACKED_FIELDS_37(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 37); \ + CHECK_PACKED_FIELD(_f[0]); \ + CHECK_PACKED_FIELD(_f[1]); \ + CHECK_PACKED_FIELD(_f[2]); \ + CHECK_PACKED_FIELD(_f[3]); \ + CHECK_PACKED_FIELD(_f[4]); \ + CHECK_PACKED_FIELD(_f[5]); \ + CHECK_PACKED_FIELD(_f[6]); \ + CHECK_PACKED_FIELD(_f[7]); \ + CHECK_PACKED_FIELD(_f[8]); \ + CHECK_PACKED_FIELD(_f[9]); \ + CHECK_PACKED_FIELD(_f[10]); \ + CHECK_PACKED_FIELD(_f[11]); \ + CHECK_PACKED_FIELD(_f[12]); \ + CHECK_PACKED_FIELD(_f[13]); \ + CHECK_PACKED_FIELD(_f[14]); \ + CHECK_PACKED_FIELD(_f[15]); \ + CHECK_PACKED_FIELD(_f[16]); \ + CHECK_PACKED_FIELD(_f[17]); \ + CHECK_PACKED_FIELD(_f[18]); \ + CHECK_PACKED_FIELD(_f[19]); \ + CHECK_PACKED_FIELD(_f[20]); \ + CHECK_PACKED_FIELD(_f[21]); \ + CHECK_PACKED_FIELD(_f[22]); \ + CHECK_PACKED_FIELD(_f[23]); \ + CHECK_PACKED_FIELD(_f[24]); \ + CHECK_PACKED_FIELD(_f[25]); \ + CHECK_PACKED_FIELD(_f[26]); \ + CHECK_PACKED_FIELD(_f[27]); \ + CHECK_PACKED_FIELD(_f[28]); \ + CHECK_PACKED_FIELD(_f[29]); \ + CHECK_PACKED_FIELD(_f[30]); \ + CHECK_PACKED_FIELD(_f[31]); \ + CHECK_PACKED_FIELD(_f[32]); \ + CHECK_PACKED_FIELD(_f[33]); \ + CHECK_PACKED_FIELD(_f[34]); \ + CHECK_PACKED_FIELD(_f[35]); \ + CHECK_PACKED_FIELD(_f[36]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[0], _f[1]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[1], _f[2]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[2], _f[3]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[3], _f[4]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[4], _f[5]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[5], _f[6]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[6], _f[7]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[7], _f[8]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[8], _f[9]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[9], _f[10]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[10], _f[11]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[11], _f[12]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[12], _f[13]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[13], _f[14]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[14], _f[15]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[15], _f[16]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[16], _f[17]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[17], _f[18]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[18], _f[19]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[19], _f[20]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[20], _f[21]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[21], _f[22]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[22], _f[23]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[23], _f[24]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[24], _f[25]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[25], _f[26]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[26], _f[27]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[27], _f[28]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[28], _f[29]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[29], _f[30]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[30], _f[31]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[31], _f[32]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[32], _f[33]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[33], _f[34]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[34], _f[35]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[35], _f[36]); }) + +#define CHECK_PACKED_FIELDS_38(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 38); \ + CHECK_PACKED_FIELD(_f[0]); \ + CHECK_PACKED_FIELD(_f[1]); \ + CHECK_PACKED_FIELD(_f[2]); \ + CHECK_PACKED_FIELD(_f[3]); \ + CHECK_PACKED_FIELD(_f[4]); \ + CHECK_PACKED_FIELD(_f[5]); \ + CHECK_PACKED_FIELD(_f[6]); \ + CHECK_PACKED_FIELD(_f[7]); \ + CHECK_PACKED_FIELD(_f[8]); \ + CHECK_PACKED_FIELD(_f[9]); \ + CHECK_PACKED_FIELD(_f[10]); \ + CHECK_PACKED_FIELD(_f[11]); \ + CHECK_PACKED_FIELD(_f[12]); \ + CHECK_PACKED_FIELD(_f[13]); \ + CHECK_PACKED_FIELD(_f[14]); \ + CHECK_PACKED_FIELD(_f[15]); \ + CHECK_PACKED_FIELD(_f[16]); \ + CHECK_PACKED_FIELD(_f[17]); \ + CHECK_PACKED_FIELD(_f[18]); \ + CHECK_PACKED_FIELD(_f[19]); \ + CHECK_PACKED_FIELD(_f[20]); \ + CHECK_PACKED_FIELD(_f[21]); \ + CHECK_PACKED_FIELD(_f[22]); \ + CHECK_PACKED_FIELD(_f[23]); \ + CHECK_PACKED_FIELD(_f[24]); \ + CHECK_PACKED_FIELD(_f[25]); \ + CHECK_PACKED_FIELD(_f[26]); \ + CHECK_PACKED_FIELD(_f[27]); \ + CHECK_PACKED_FIELD(_f[28]); \ + CHECK_PACKED_FIELD(_f[29]); \ + CHECK_PACKED_FIELD(_f[30]); \ + CHECK_PACKED_FIELD(_f[31]); \ + CHECK_PACKED_FIELD(_f[32]); \ + CHECK_PACKED_FIELD(_f[33]); \ + CHECK_PACKED_FIELD(_f[34]); \ + CHECK_PACKED_FIELD(_f[35]); \ + CHECK_PACKED_FIELD(_f[36]); \ + CHECK_PACKED_FIELD(_f[37]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[0], _f[1]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[1], _f[2]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[2], _f[3]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[3], _f[4]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[4], _f[5]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[5], _f[6]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[6], _f[7]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[7], _f[8]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[8], _f[9]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[9], _f[10]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[10], _f[11]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[11], _f[12]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[12], _f[13]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[13], _f[14]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[14], _f[15]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[15], _f[16]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[16], _f[17]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[17], _f[18]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[18], _f[19]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[19], _f[20]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[20], _f[21]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[21], _f[22]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[22], _f[23]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[23], _f[24]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[24], _f[25]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[25], _f[26]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[26], _f[27]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[27], _f[28]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[28], _f[29]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[29], _f[30]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[30], _f[31]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[31], _f[32]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[32], _f[33]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[33], _f[34]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[34], _f[35]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[35], _f[36]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[36], _f[37]); }) + +#define CHECK_PACKED_FIELDS_39(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 39); \ + CHECK_PACKED_FIELD(_f[0]); \ + CHECK_PACKED_FIELD(_f[1]); \ + CHECK_PACKED_FIELD(_f[2]); \ + CHECK_PACKED_FIELD(_f[3]); \ + CHECK_PACKED_FIELD(_f[4]); \ + CHECK_PACKED_FIELD(_f[5]); \ + CHECK_PACKED_FIELD(_f[6]); \ + CHECK_PACKED_FIELD(_f[7]); \ + CHECK_PACKED_FIELD(_f[8]); \ + CHECK_PACKED_FIELD(_f[9]); \ + CHECK_PACKED_FIELD(_f[10]); \ + CHECK_PACKED_FIELD(_f[11]); \ + CHECK_PACKED_FIELD(_f[12]); \ + CHECK_PACKED_FIELD(_f[13]); \ + CHECK_PACKED_FIELD(_f[14]); \ + CHECK_PACKED_FIELD(_f[15]); \ + CHECK_PACKED_FIELD(_f[16]); \ + CHECK_PACKED_FIELD(_f[17]); \ + CHECK_PACKED_FIELD(_f[18]); \ + CHECK_PACKED_FIELD(_f[19]); \ + CHECK_PACKED_FIELD(_f[20]); \ + CHECK_PACKED_FIELD(_f[21]); \ + CHECK_PACKED_FIELD(_f[22]); \ + CHECK_PACKED_FIELD(_f[23]); \ + CHECK_PACKED_FIELD(_f[24]); \ + CHECK_PACKED_FIELD(_f[25]); \ + CHECK_PACKED_FIELD(_f[26]); \ + CHECK_PACKED_FIELD(_f[27]); \ + CHECK_PACKED_FIELD(_f[28]); \ + CHECK_PACKED_FIELD(_f[29]); \ + CHECK_PACKED_FIELD(_f[30]); \ + CHECK_PACKED_FIELD(_f[31]); \ + CHECK_PACKED_FIELD(_f[32]); \ + CHECK_PACKED_FIELD(_f[33]); \ + CHECK_PACKED_FIELD(_f[34]); \ + CHECK_PACKED_FIELD(_f[35]); \ + CHECK_PACKED_FIELD(_f[36]); \ + CHECK_PACKED_FIELD(_f[37]); \ + CHECK_PACKED_FIELD(_f[38]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[0], _f[1]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[1], _f[2]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[2], _f[3]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[3], _f[4]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[4], _f[5]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[5], _f[6]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[6], _f[7]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[7], _f[8]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[8], _f[9]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[9], _f[10]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[10], _f[11]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[11], _f[12]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[12], _f[13]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[13], _f[14]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[14], _f[15]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[15], _f[16]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[16], _f[17]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[17], _f[18]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[18], _f[19]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[19], _f[20]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[20], _f[21]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[21], _f[22]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[22], _f[23]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[23], _f[24]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[24], _f[25]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[25], _f[26]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[26], _f[27]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[27], _f[28]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[28], _f[29]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[29], _f[30]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[30], _f[31]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[31], _f[32]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[32], _f[33]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[33], _f[34]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[34], _f[35]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[35], _f[36]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[36], _f[37]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[37], _f[38]); }) + +#define CHECK_PACKED_FIELDS_40(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 40); \ + CHECK_PACKED_FIELD(_f[0]); \ + CHECK_PACKED_FIELD(_f[1]); \ + CHECK_PACKED_FIELD(_f[2]); \ + CHECK_PACKED_FIELD(_f[3]); \ + CHECK_PACKED_FIELD(_f[4]); \ + CHECK_PACKED_FIELD(_f[5]); \ + CHECK_PACKED_FIELD(_f[6]); \ + CHECK_PACKED_FIELD(_f[7]); \ + CHECK_PACKED_FIELD(_f[8]); \ + CHECK_PACKED_FIELD(_f[9]); \ + CHECK_PACKED_FIELD(_f[10]); \ + CHECK_PACKED_FIELD(_f[11]); \ + CHECK_PACKED_FIELD(_f[12]); \ + CHECK_PACKED_FIELD(_f[13]); \ + CHECK_PACKED_FIELD(_f[14]); \ + CHECK_PACKED_FIELD(_f[15]); \ + CHECK_PACKED_FIELD(_f[16]); \ + CHECK_PACKED_FIELD(_f[17]); \ + CHECK_PACKED_FIELD(_f[18]); \ + CHECK_PACKED_FIELD(_f[19]); \ + CHECK_PACKED_FIELD(_f[20]); \ + CHECK_PACKED_FIELD(_f[21]); \ + CHECK_PACKED_FIELD(_f[22]); \ + CHECK_PACKED_FIELD(_f[23]); \ + CHECK_PACKED_FIELD(_f[24]); \ + CHECK_PACKED_FIELD(_f[25]); \ + CHECK_PACKED_FIELD(_f[26]); \ + CHECK_PACKED_FIELD(_f[27]); \ + CHECK_PACKED_FIELD(_f[28]); \ + CHECK_PACKED_FIELD(_f[29]); \ + CHECK_PACKED_FIELD(_f[30]); \ + CHECK_PACKED_FIELD(_f[31]); \ + CHECK_PACKED_FIELD(_f[32]); \ + CHECK_PACKED_FIELD(_f[33]); \ + CHECK_PACKED_FIELD(_f[34]); \ + CHECK_PACKED_FIELD(_f[35]); \ + CHECK_PACKED_FIELD(_f[36]); \ + CHECK_PACKED_FIELD(_f[37]); \ + CHECK_PACKED_FIELD(_f[38]); \ + CHECK_PACKED_FIELD(_f[39]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[0], _f[1]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[1], _f[2]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[2], _f[3]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[3], _f[4]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[4], _f[5]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[5], _f[6]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[6], _f[7]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[7], _f[8]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[8], _f[9]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[9], _f[10]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[10], _f[11]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[11], _f[12]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[12], _f[13]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[13], _f[14]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[14], _f[15]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[15], _f[16]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[16], _f[17]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[17], _f[18]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[18], _f[19]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[19], _f[20]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[20], _f[21]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[21], _f[22]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[22], _f[23]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[23], _f[24]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[24], _f[25]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[25], _f[26]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[26], _f[27]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[27], _f[28]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[28], _f[29]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[29], _f[30]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[30], _f[31]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[31], _f[32]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[32], _f[33]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[33], _f[34]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[34], _f[35]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[35], _f[36]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[36], _f[37]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[37], _f[38]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[38], _f[39]); }) + +#define CHECK_PACKED_FIELDS_41(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 41); \ + CHECK_PACKED_FIELD(_f[0]); \ + CHECK_PACKED_FIELD(_f[1]); \ + CHECK_PACKED_FIELD(_f[2]); \ + CHECK_PACKED_FIELD(_f[3]); \ + CHECK_PACKED_FIELD(_f[4]); \ + CHECK_PACKED_FIELD(_f[5]); \ + CHECK_PACKED_FIELD(_f[6]); \ + CHECK_PACKED_FIELD(_f[7]); \ + CHECK_PACKED_FIELD(_f[8]); \ + CHECK_PACKED_FIELD(_f[9]); \ + CHECK_PACKED_FIELD(_f[10]); \ + CHECK_PACKED_FIELD(_f[11]); \ + CHECK_PACKED_FIELD(_f[12]); \ + CHECK_PACKED_FIELD(_f[13]); \ + CHECK_PACKED_FIELD(_f[14]); \ + CHECK_PACKED_FIELD(_f[15]); \ + CHECK_PACKED_FIELD(_f[16]); \ + CHECK_PACKED_FIELD(_f[17]); \ + CHECK_PACKED_FIELD(_f[18]); \ + CHECK_PACKED_FIELD(_f[19]); \ + CHECK_PACKED_FIELD(_f[20]); \ + CHECK_PACKED_FIELD(_f[21]); \ + CHECK_PACKED_FIELD(_f[22]); \ + CHECK_PACKED_FIELD(_f[23]); \ + CHECK_PACKED_FIELD(_f[24]); \ + CHECK_PACKED_FIELD(_f[25]); \ + CHECK_PACKED_FIELD(_f[26]); \ + CHECK_PACKED_FIELD(_f[27]); \ + CHECK_PACKED_FIELD(_f[28]); \ + CHECK_PACKED_FIELD(_f[29]); \ + CHECK_PACKED_FIELD(_f[30]); \ + CHECK_PACKED_FIELD(_f[31]); \ + CHECK_PACKED_FIELD(_f[32]); \ + CHECK_PACKED_FIELD(_f[33]); \ + CHECK_PACKED_FIELD(_f[34]); \ + CHECK_PACKED_FIELD(_f[35]); \ + CHECK_PACKED_FIELD(_f[36]); \ + CHECK_PACKED_FIELD(_f[37]); \ + CHECK_PACKED_FIELD(_f[38]); \ + CHECK_PACKED_FIELD(_f[39]); \ + CHECK_PACKED_FIELD(_f[40]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[0], _f[1]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[1], _f[2]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[2], _f[3]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[3], _f[4]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[4], _f[5]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[5], _f[6]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[6], _f[7]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[7], _f[8]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[8], _f[9]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[9], _f[10]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[10], _f[11]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[11], _f[12]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[12], _f[13]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[13], _f[14]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[14], _f[15]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[15], _f[16]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[16], _f[17]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[17], _f[18]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[18], _f[19]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[19], _f[20]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[20], _f[21]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[21], _f[22]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[22], _f[23]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[23], _f[24]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[24], _f[25]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[25], _f[26]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[26], _f[27]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[27], _f[28]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[28], _f[29]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[29], _f[30]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[30], _f[31]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[31], _f[32]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[32], _f[33]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[33], _f[34]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[34], _f[35]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[35], _f[36]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[36], _f[37]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[37], _f[38]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[38], _f[39]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[39], _f[40]); }) + +#define CHECK_PACKED_FIELDS_42(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 42); \ + CHECK_PACKED_FIELD(_f[0]); \ + CHECK_PACKED_FIELD(_f[1]); \ + CHECK_PACKED_FIELD(_f[2]); \ + CHECK_PACKED_FIELD(_f[3]); \ + CHECK_PACKED_FIELD(_f[4]); \ + CHECK_PACKED_FIELD(_f[5]); \ + CHECK_PACKED_FIELD(_f[6]); \ + CHECK_PACKED_FIELD(_f[7]); \ + CHECK_PACKED_FIELD(_f[8]); \ + CHECK_PACKED_FIELD(_f[9]); \ + CHECK_PACKED_FIELD(_f[10]); \ + CHECK_PACKED_FIELD(_f[11]); \ + CHECK_PACKED_FIELD(_f[12]); \ + CHECK_PACKED_FIELD(_f[13]); \ + CHECK_PACKED_FIELD(_f[14]); \ + CHECK_PACKED_FIELD(_f[15]); \ + CHECK_PACKED_FIELD(_f[16]); \ + CHECK_PACKED_FIELD(_f[17]); \ + CHECK_PACKED_FIELD(_f[18]); \ + CHECK_PACKED_FIELD(_f[19]); \ + CHECK_PACKED_FIELD(_f[20]); \ + CHECK_PACKED_FIELD(_f[21]); \ + CHECK_PACKED_FIELD(_f[22]); \ + CHECK_PACKED_FIELD(_f[23]); \ + CHECK_PACKED_FIELD(_f[24]); \ + CHECK_PACKED_FIELD(_f[25]); \ + CHECK_PACKED_FIELD(_f[26]); \ + CHECK_PACKED_FIELD(_f[27]); \ + CHECK_PACKED_FIELD(_f[28]); \ + CHECK_PACKED_FIELD(_f[29]); \ + CHECK_PACKED_FIELD(_f[30]); \ + CHECK_PACKED_FIELD(_f[31]); \ + CHECK_PACKED_FIELD(_f[32]); \ + CHECK_PACKED_FIELD(_f[33]); \ + CHECK_PACKED_FIELD(_f[34]); \ + CHECK_PACKED_FIELD(_f[35]); \ + CHECK_PACKED_FIELD(_f[36]); \ + CHECK_PACKED_FIELD(_f[37]); \ + CHECK_PACKED_FIELD(_f[38]); \ + CHECK_PACKED_FIELD(_f[39]); \ + CHECK_PACKED_FIELD(_f[40]); \ + CHECK_PACKED_FIELD(_f[41]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[0], _f[1]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[1], _f[2]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[2], _f[3]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[3], _f[4]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[4], _f[5]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[5], _f[6]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[6], _f[7]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[7], _f[8]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[8], _f[9]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[9], _f[10]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[10], _f[11]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[11], _f[12]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[12], _f[13]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[13], _f[14]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[14], _f[15]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[15], _f[16]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[16], _f[17]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[17], _f[18]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[18], _f[19]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[19], _f[20]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[20], _f[21]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[21], _f[22]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[22], _f[23]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[23], _f[24]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[24], _f[25]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[25], _f[26]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[26], _f[27]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[27], _f[28]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[28], _f[29]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[29], _f[30]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[30], _f[31]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[31], _f[32]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[32], _f[33]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[33], _f[34]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[34], _f[35]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[35], _f[36]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[36], _f[37]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[37], _f[38]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[38], _f[39]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[39], _f[40]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[40], _f[41]); }) + +#define CHECK_PACKED_FIELDS_43(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 43); \ + CHECK_PACKED_FIELD(_f[0]); \ + CHECK_PACKED_FIELD(_f[1]); \ + CHECK_PACKED_FIELD(_f[2]); \ + CHECK_PACKED_FIELD(_f[3]); \ + CHECK_PACKED_FIELD(_f[4]); \ + CHECK_PACKED_FIELD(_f[5]); \ + CHECK_PACKED_FIELD(_f[6]); \ + CHECK_PACKED_FIELD(_f[7]); \ + CHECK_PACKED_FIELD(_f[8]); \ + CHECK_PACKED_FIELD(_f[9]); \ + CHECK_PACKED_FIELD(_f[10]); \ + CHECK_PACKED_FIELD(_f[11]); \ + CHECK_PACKED_FIELD(_f[12]); \ + CHECK_PACKED_FIELD(_f[13]); \ + CHECK_PACKED_FIELD(_f[14]); \ + CHECK_PACKED_FIELD(_f[15]); \ + CHECK_PACKED_FIELD(_f[16]); \ + CHECK_PACKED_FIELD(_f[17]); \ + CHECK_PACKED_FIELD(_f[18]); \ + CHECK_PACKED_FIELD(_f[19]); \ + CHECK_PACKED_FIELD(_f[20]); \ + CHECK_PACKED_FIELD(_f[21]); \ + CHECK_PACKED_FIELD(_f[22]); \ + CHECK_PACKED_FIELD(_f[23]); \ + CHECK_PACKED_FIELD(_f[24]); \ + CHECK_PACKED_FIELD(_f[25]); \ + CHECK_PACKED_FIELD(_f[26]); \ + CHECK_PACKED_FIELD(_f[27]); \ + CHECK_PACKED_FIELD(_f[28]); \ + CHECK_PACKED_FIELD(_f[29]); \ + CHECK_PACKED_FIELD(_f[30]); \ + CHECK_PACKED_FIELD(_f[31]); \ + CHECK_PACKED_FIELD(_f[32]); \ + CHECK_PACKED_FIELD(_f[33]); \ + CHECK_PACKED_FIELD(_f[34]); \ + CHECK_PACKED_FIELD(_f[35]); \ + CHECK_PACKED_FIELD(_f[36]); \ + CHECK_PACKED_FIELD(_f[37]); \ + CHECK_PACKED_FIELD(_f[38]); \ + CHECK_PACKED_FIELD(_f[39]); \ + CHECK_PACKED_FIELD(_f[40]); \ + CHECK_PACKED_FIELD(_f[41]); \ + CHECK_PACKED_FIELD(_f[42]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[0], _f[1]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[1], _f[2]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[2], _f[3]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[3], _f[4]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[4], _f[5]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[5], _f[6]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[6], _f[7]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[7], _f[8]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[8], _f[9]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[9], _f[10]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[10], _f[11]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[11], _f[12]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[12], _f[13]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[13], _f[14]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[14], _f[15]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[15], _f[16]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[16], _f[17]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[17], _f[18]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[18], _f[19]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[19], _f[20]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[20], _f[21]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[21], _f[22]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[22], _f[23]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[23], _f[24]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[24], _f[25]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[25], _f[26]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[26], _f[27]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[27], _f[28]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[28], _f[29]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[29], _f[30]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[30], _f[31]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[31], _f[32]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[32], _f[33]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[33], _f[34]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[34], _f[35]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[35], _f[36]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[36], _f[37]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[37], _f[38]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[38], _f[39]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[39], _f[40]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[40], _f[41]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[41], _f[42]); }) + +#define CHECK_PACKED_FIELDS_44(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 44); \ + CHECK_PACKED_FIELD(_f[0]); \ + CHECK_PACKED_FIELD(_f[1]); \ + CHECK_PACKED_FIELD(_f[2]); \ + CHECK_PACKED_FIELD(_f[3]); \ + CHECK_PACKED_FIELD(_f[4]); \ + CHECK_PACKED_FIELD(_f[5]); \ + CHECK_PACKED_FIELD(_f[6]); \ + CHECK_PACKED_FIELD(_f[7]); \ + CHECK_PACKED_FIELD(_f[8]); \ + CHECK_PACKED_FIELD(_f[9]); \ + CHECK_PACKED_FIELD(_f[10]); \ + CHECK_PACKED_FIELD(_f[11]); \ + CHECK_PACKED_FIELD(_f[12]); \ + CHECK_PACKED_FIELD(_f[13]); \ + CHECK_PACKED_FIELD(_f[14]); \ + CHECK_PACKED_FIELD(_f[15]); \ + CHECK_PACKED_FIELD(_f[16]); \ + CHECK_PACKED_FIELD(_f[17]); \ + CHECK_PACKED_FIELD(_f[18]); \ + CHECK_PACKED_FIELD(_f[19]); \ + CHECK_PACKED_FIELD(_f[20]); \ + CHECK_PACKED_FIELD(_f[21]); \ + CHECK_PACKED_FIELD(_f[22]); \ + CHECK_PACKED_FIELD(_f[23]); \ + CHECK_PACKED_FIELD(_f[24]); \ + CHECK_PACKED_FIELD(_f[25]); \ + CHECK_PACKED_FIELD(_f[26]); \ + CHECK_PACKED_FIELD(_f[27]); \ + CHECK_PACKED_FIELD(_f[28]); \ + CHECK_PACKED_FIELD(_f[29]); \ + CHECK_PACKED_FIELD(_f[30]); \ + CHECK_PACKED_FIELD(_f[31]); \ + CHECK_PACKED_FIELD(_f[32]); \ + CHECK_PACKED_FIELD(_f[33]); \ + CHECK_PACKED_FIELD(_f[34]); \ + CHECK_PACKED_FIELD(_f[35]); \ + CHECK_PACKED_FIELD(_f[36]); \ + CHECK_PACKED_FIELD(_f[37]); \ + CHECK_PACKED_FIELD(_f[38]); \ + CHECK_PACKED_FIELD(_f[39]); \ + CHECK_PACKED_FIELD(_f[40]); \ + CHECK_PACKED_FIELD(_f[41]); \ + CHECK_PACKED_FIELD(_f[42]); \ + CHECK_PACKED_FIELD(_f[43]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[0], _f[1]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[1], _f[2]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[2], _f[3]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[3], _f[4]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[4], _f[5]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[5], _f[6]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[6], _f[7]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[7], _f[8]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[8], _f[9]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[9], _f[10]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[10], _f[11]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[11], _f[12]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[12], _f[13]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[13], _f[14]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[14], _f[15]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[15], _f[16]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[16], _f[17]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[17], _f[18]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[18], _f[19]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[19], _f[20]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[20], _f[21]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[21], _f[22]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[22], _f[23]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[23], _f[24]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[24], _f[25]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[25], _f[26]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[26], _f[27]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[27], _f[28]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[28], _f[29]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[29], _f[30]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[30], _f[31]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[31], _f[32]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[32], _f[33]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[33], _f[34]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[34], _f[35]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[35], _f[36]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[36], _f[37]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[37], _f[38]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[38], _f[39]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[39], _f[40]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[40], _f[41]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[41], _f[42]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[42], _f[43]); }) + +#define CHECK_PACKED_FIELDS_45(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 45); \ + CHECK_PACKED_FIELD(_f[0]); \ + CHECK_PACKED_FIELD(_f[1]); \ + CHECK_PACKED_FIELD(_f[2]); \ + CHECK_PACKED_FIELD(_f[3]); \ + CHECK_PACKED_FIELD(_f[4]); \ + CHECK_PACKED_FIELD(_f[5]); \ + CHECK_PACKED_FIELD(_f[6]); \ + CHECK_PACKED_FIELD(_f[7]); \ + CHECK_PACKED_FIELD(_f[8]); \ + CHECK_PACKED_FIELD(_f[9]); \ + CHECK_PACKED_FIELD(_f[10]); \ + CHECK_PACKED_FIELD(_f[11]); \ + CHECK_PACKED_FIELD(_f[12]); \ + CHECK_PACKED_FIELD(_f[13]); \ + CHECK_PACKED_FIELD(_f[14]); \ + CHECK_PACKED_FIELD(_f[15]); \ + CHECK_PACKED_FIELD(_f[16]); \ + CHECK_PACKED_FIELD(_f[17]); \ + CHECK_PACKED_FIELD(_f[18]); \ + CHECK_PACKED_FIELD(_f[19]); \ + CHECK_PACKED_FIELD(_f[20]); \ + CHECK_PACKED_FIELD(_f[21]); \ + CHECK_PACKED_FIELD(_f[22]); \ + CHECK_PACKED_FIELD(_f[23]); \ + CHECK_PACKED_FIELD(_f[24]); \ + CHECK_PACKED_FIELD(_f[25]); \ + CHECK_PACKED_FIELD(_f[26]); \ + CHECK_PACKED_FIELD(_f[27]); \ + CHECK_PACKED_FIELD(_f[28]); \ + CHECK_PACKED_FIELD(_f[29]); \ + CHECK_PACKED_FIELD(_f[30]); \ + CHECK_PACKED_FIELD(_f[31]); \ + CHECK_PACKED_FIELD(_f[32]); \ + CHECK_PACKED_FIELD(_f[33]); \ + CHECK_PACKED_FIELD(_f[34]); \ + CHECK_PACKED_FIELD(_f[35]); \ + CHECK_PACKED_FIELD(_f[36]); \ + CHECK_PACKED_FIELD(_f[37]); \ + CHECK_PACKED_FIELD(_f[38]); \ + CHECK_PACKED_FIELD(_f[39]); \ + CHECK_PACKED_FIELD(_f[40]); \ + CHECK_PACKED_FIELD(_f[41]); \ + CHECK_PACKED_FIELD(_f[42]); \ + CHECK_PACKED_FIELD(_f[43]); \ + CHECK_PACKED_FIELD(_f[44]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[0], _f[1]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[1], _f[2]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[2], _f[3]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[3], _f[4]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[4], _f[5]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[5], _f[6]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[6], _f[7]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[7], _f[8]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[8], _f[9]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[9], _f[10]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[10], _f[11]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[11], _f[12]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[12], _f[13]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[13], _f[14]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[14], _f[15]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[15], _f[16]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[16], _f[17]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[17], _f[18]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[18], _f[19]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[19], _f[20]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[20], _f[21]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[21], _f[22]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[22], _f[23]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[23], _f[24]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[24], _f[25]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[25], _f[26]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[26], _f[27]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[27], _f[28]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[28], _f[29]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[29], _f[30]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[30], _f[31]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[31], _f[32]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[32], _f[33]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[33], _f[34]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[34], _f[35]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[35], _f[36]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[36], _f[37]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[37], _f[38]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[38], _f[39]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[39], _f[40]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[40], _f[41]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[41], _f[42]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[42], _f[43]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[43], _f[44]); }) + +#define CHECK_PACKED_FIELDS_46(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 46); \ + CHECK_PACKED_FIELD(_f[0]); \ + CHECK_PACKED_FIELD(_f[1]); \ + CHECK_PACKED_FIELD(_f[2]); \ + CHECK_PACKED_FIELD(_f[3]); \ + CHECK_PACKED_FIELD(_f[4]); \ + CHECK_PACKED_FIELD(_f[5]); \ + CHECK_PACKED_FIELD(_f[6]); \ + CHECK_PACKED_FIELD(_f[7]); \ + CHECK_PACKED_FIELD(_f[8]); \ + CHECK_PACKED_FIELD(_f[9]); \ + CHECK_PACKED_FIELD(_f[10]); \ + CHECK_PACKED_FIELD(_f[11]); \ + CHECK_PACKED_FIELD(_f[12]); \ + CHECK_PACKED_FIELD(_f[13]); \ + CHECK_PACKED_FIELD(_f[14]); \ + CHECK_PACKED_FIELD(_f[15]); \ + CHECK_PACKED_FIELD(_f[16]); \ + CHECK_PACKED_FIELD(_f[17]); \ + CHECK_PACKED_FIELD(_f[18]); \ + CHECK_PACKED_FIELD(_f[19]); \ + CHECK_PACKED_FIELD(_f[20]); \ + CHECK_PACKED_FIELD(_f[21]); \ + CHECK_PACKED_FIELD(_f[22]); \ + CHECK_PACKED_FIELD(_f[23]); \ + CHECK_PACKED_FIELD(_f[24]); \ + CHECK_PACKED_FIELD(_f[25]); \ + CHECK_PACKED_FIELD(_f[26]); \ + CHECK_PACKED_FIELD(_f[27]); \ + CHECK_PACKED_FIELD(_f[28]); \ + CHECK_PACKED_FIELD(_f[29]); \ + CHECK_PACKED_FIELD(_f[30]); \ + CHECK_PACKED_FIELD(_f[31]); \ + CHECK_PACKED_FIELD(_f[32]); \ + CHECK_PACKED_FIELD(_f[33]); \ + CHECK_PACKED_FIELD(_f[34]); \ + CHECK_PACKED_FIELD(_f[35]); \ + CHECK_PACKED_FIELD(_f[36]); \ + CHECK_PACKED_FIELD(_f[37]); \ + CHECK_PACKED_FIELD(_f[38]); \ + CHECK_PACKED_FIELD(_f[39]); \ + CHECK_PACKED_FIELD(_f[40]); \ + CHECK_PACKED_FIELD(_f[41]); \ + CHECK_PACKED_FIELD(_f[42]); \ + CHECK_PACKED_FIELD(_f[43]); \ + CHECK_PACKED_FIELD(_f[44]); \ + CHECK_PACKED_FIELD(_f[45]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[0], _f[1]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[1], _f[2]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[2], _f[3]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[3], _f[4]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[4], _f[5]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[5], _f[6]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[6], _f[7]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[7], _f[8]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[8], _f[9]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[9], _f[10]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[10], _f[11]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[11], _f[12]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[12], _f[13]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[13], _f[14]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[14], _f[15]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[15], _f[16]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[16], _f[17]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[17], _f[18]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[18], _f[19]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[19], _f[20]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[20], _f[21]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[21], _f[22]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[22], _f[23]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[23], _f[24]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[24], _f[25]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[25], _f[26]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[26], _f[27]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[27], _f[28]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[28], _f[29]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[29], _f[30]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[30], _f[31]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[31], _f[32]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[32], _f[33]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[33], _f[34]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[34], _f[35]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[35], _f[36]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[36], _f[37]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[37], _f[38]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[38], _f[39]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[39], _f[40]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[40], _f[41]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[41], _f[42]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[42], _f[43]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[43], _f[44]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[44], _f[45]); }) + +#define CHECK_PACKED_FIELDS_47(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 47); \ + CHECK_PACKED_FIELD(_f[0]); \ + CHECK_PACKED_FIELD(_f[1]); \ + CHECK_PACKED_FIELD(_f[2]); \ + CHECK_PACKED_FIELD(_f[3]); \ + CHECK_PACKED_FIELD(_f[4]); \ + CHECK_PACKED_FIELD(_f[5]); \ + CHECK_PACKED_FIELD(_f[6]); \ + CHECK_PACKED_FIELD(_f[7]); \ + CHECK_PACKED_FIELD(_f[8]); \ + CHECK_PACKED_FIELD(_f[9]); \ + CHECK_PACKED_FIELD(_f[10]); \ + CHECK_PACKED_FIELD(_f[11]); \ + CHECK_PACKED_FIELD(_f[12]); \ + CHECK_PACKED_FIELD(_f[13]); \ + CHECK_PACKED_FIELD(_f[14]); \ + CHECK_PACKED_FIELD(_f[15]); \ + CHECK_PACKED_FIELD(_f[16]); \ + CHECK_PACKED_FIELD(_f[17]); \ + CHECK_PACKED_FIELD(_f[18]); \ + CHECK_PACKED_FIELD(_f[19]); \ + CHECK_PACKED_FIELD(_f[20]); \ + CHECK_PACKED_FIELD(_f[21]); \ + CHECK_PACKED_FIELD(_f[22]); \ + CHECK_PACKED_FIELD(_f[23]); \ + CHECK_PACKED_FIELD(_f[24]); \ + CHECK_PACKED_FIELD(_f[25]); \ + CHECK_PACKED_FIELD(_f[26]); \ + CHECK_PACKED_FIELD(_f[27]); \ + CHECK_PACKED_FIELD(_f[28]); \ + CHECK_PACKED_FIELD(_f[29]); \ + CHECK_PACKED_FIELD(_f[30]); \ + CHECK_PACKED_FIELD(_f[31]); \ + CHECK_PACKED_FIELD(_f[32]); \ + CHECK_PACKED_FIELD(_f[33]); \ + CHECK_PACKED_FIELD(_f[34]); \ + CHECK_PACKED_FIELD(_f[35]); \ + CHECK_PACKED_FIELD(_f[36]); \ + CHECK_PACKED_FIELD(_f[37]); \ + CHECK_PACKED_FIELD(_f[38]); \ + CHECK_PACKED_FIELD(_f[39]); \ + CHECK_PACKED_FIELD(_f[40]); \ + CHECK_PACKED_FIELD(_f[41]); \ + CHECK_PACKED_FIELD(_f[42]); \ + CHECK_PACKED_FIELD(_f[43]); \ + CHECK_PACKED_FIELD(_f[44]); \ + CHECK_PACKED_FIELD(_f[45]); \ + CHECK_PACKED_FIELD(_f[46]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[0], _f[1]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[1], _f[2]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[2], _f[3]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[3], _f[4]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[4], _f[5]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[5], _f[6]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[6], _f[7]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[7], _f[8]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[8], _f[9]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[9], _f[10]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[10], _f[11]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[11], _f[12]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[12], _f[13]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[13], _f[14]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[14], _f[15]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[15], _f[16]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[16], _f[17]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[17], _f[18]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[18], _f[19]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[19], _f[20]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[20], _f[21]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[21], _f[22]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[22], _f[23]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[23], _f[24]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[24], _f[25]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[25], _f[26]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[26], _f[27]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[27], _f[28]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[28], _f[29]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[29], _f[30]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[30], _f[31]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[31], _f[32]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[32], _f[33]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[33], _f[34]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[34], _f[35]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[35], _f[36]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[36], _f[37]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[37], _f[38]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[38], _f[39]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[39], _f[40]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[40], _f[41]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[41], _f[42]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[42], _f[43]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[43], _f[44]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[44], _f[45]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[45], _f[46]); }) + +#define CHECK_PACKED_FIELDS_48(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 48); \ + CHECK_PACKED_FIELD(_f[0]); \ + CHECK_PACKED_FIELD(_f[1]); \ + CHECK_PACKED_FIELD(_f[2]); \ + CHECK_PACKED_FIELD(_f[3]); \ + CHECK_PACKED_FIELD(_f[4]); \ + CHECK_PACKED_FIELD(_f[5]); \ + CHECK_PACKED_FIELD(_f[6]); \ + CHECK_PACKED_FIELD(_f[7]); \ + CHECK_PACKED_FIELD(_f[8]); \ + CHECK_PACKED_FIELD(_f[9]); \ + CHECK_PACKED_FIELD(_f[10]); \ + CHECK_PACKED_FIELD(_f[11]); \ + CHECK_PACKED_FIELD(_f[12]); \ + CHECK_PACKED_FIELD(_f[13]); \ + CHECK_PACKED_FIELD(_f[14]); \ + CHECK_PACKED_FIELD(_f[15]); \ + CHECK_PACKED_FIELD(_f[16]); \ + CHECK_PACKED_FIELD(_f[17]); \ + CHECK_PACKED_FIELD(_f[18]); \ + CHECK_PACKED_FIELD(_f[19]); \ + CHECK_PACKED_FIELD(_f[20]); \ + CHECK_PACKED_FIELD(_f[21]); \ + CHECK_PACKED_FIELD(_f[22]); \ + CHECK_PACKED_FIELD(_f[23]); \ + CHECK_PACKED_FIELD(_f[24]); \ + CHECK_PACKED_FIELD(_f[25]); \ + CHECK_PACKED_FIELD(_f[26]); \ + CHECK_PACKED_FIELD(_f[27]); \ + CHECK_PACKED_FIELD(_f[28]); \ + CHECK_PACKED_FIELD(_f[29]); \ + CHECK_PACKED_FIELD(_f[30]); \ + CHECK_PACKED_FIELD(_f[31]); \ + CHECK_PACKED_FIELD(_f[32]); \ + CHECK_PACKED_FIELD(_f[33]); \ + CHECK_PACKED_FIELD(_f[34]); \ + CHECK_PACKED_FIELD(_f[35]); \ + CHECK_PACKED_FIELD(_f[36]); \ + CHECK_PACKED_FIELD(_f[37]); \ + CHECK_PACKED_FIELD(_f[38]); \ + CHECK_PACKED_FIELD(_f[39]); \ + CHECK_PACKED_FIELD(_f[40]); \ + CHECK_PACKED_FIELD(_f[41]); \ + CHECK_PACKED_FIELD(_f[42]); \ + CHECK_PACKED_FIELD(_f[43]); \ + CHECK_PACKED_FIELD(_f[44]); \ + CHECK_PACKED_FIELD(_f[45]); \ + CHECK_PACKED_FIELD(_f[46]); \ + CHECK_PACKED_FIELD(_f[47]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[0], _f[1]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[1], _f[2]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[2], _f[3]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[3], _f[4]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[4], _f[5]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[5], _f[6]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[6], _f[7]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[7], _f[8]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[8], _f[9]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[9], _f[10]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[10], _f[11]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[11], _f[12]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[12], _f[13]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[13], _f[14]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[14], _f[15]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[15], _f[16]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[16], _f[17]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[17], _f[18]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[18], _f[19]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[19], _f[20]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[20], _f[21]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[21], _f[22]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[22], _f[23]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[23], _f[24]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[24], _f[25]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[25], _f[26]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[26], _f[27]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[27], _f[28]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[28], _f[29]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[29], _f[30]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[30], _f[31]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[31], _f[32]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[32], _f[33]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[33], _f[34]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[34], _f[35]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[35], _f[36]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[36], _f[37]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[37], _f[38]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[38], _f[39]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[39], _f[40]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[40], _f[41]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[41], _f[42]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[42], _f[43]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[43], _f[44]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[44], _f[45]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[45], _f[46]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[46], _f[47]); }) + +#define CHECK_PACKED_FIELDS_49(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 49); \ + CHECK_PACKED_FIELD(_f[0]); \ + CHECK_PACKED_FIELD(_f[1]); \ + CHECK_PACKED_FIELD(_f[2]); \ + CHECK_PACKED_FIELD(_f[3]); \ + CHECK_PACKED_FIELD(_f[4]); \ + CHECK_PACKED_FIELD(_f[5]); \ + CHECK_PACKED_FIELD(_f[6]); \ + CHECK_PACKED_FIELD(_f[7]); \ + CHECK_PACKED_FIELD(_f[8]); \ + CHECK_PACKED_FIELD(_f[9]); \ + CHECK_PACKED_FIELD(_f[10]); \ + CHECK_PACKED_FIELD(_f[11]); \ + CHECK_PACKED_FIELD(_f[12]); \ + CHECK_PACKED_FIELD(_f[13]); \ + CHECK_PACKED_FIELD(_f[14]); \ + CHECK_PACKED_FIELD(_f[15]); \ + CHECK_PACKED_FIELD(_f[16]); \ + CHECK_PACKED_FIELD(_f[17]); \ + CHECK_PACKED_FIELD(_f[18]); \ + CHECK_PACKED_FIELD(_f[19]); \ + CHECK_PACKED_FIELD(_f[20]); \ + CHECK_PACKED_FIELD(_f[21]); \ + CHECK_PACKED_FIELD(_f[22]); \ + CHECK_PACKED_FIELD(_f[23]); \ + CHECK_PACKED_FIELD(_f[24]); \ + CHECK_PACKED_FIELD(_f[25]); \ + CHECK_PACKED_FIELD(_f[26]); \ + CHECK_PACKED_FIELD(_f[27]); \ + CHECK_PACKED_FIELD(_f[28]); \ + CHECK_PACKED_FIELD(_f[29]); \ + CHECK_PACKED_FIELD(_f[30]); \ + CHECK_PACKED_FIELD(_f[31]); \ + CHECK_PACKED_FIELD(_f[32]); \ + CHECK_PACKED_FIELD(_f[33]); \ + CHECK_PACKED_FIELD(_f[34]); \ + CHECK_PACKED_FIELD(_f[35]); \ + CHECK_PACKED_FIELD(_f[36]); \ + CHECK_PACKED_FIELD(_f[37]); \ + CHECK_PACKED_FIELD(_f[38]); \ + CHECK_PACKED_FIELD(_f[39]); \ + CHECK_PACKED_FIELD(_f[40]); \ + CHECK_PACKED_FIELD(_f[41]); \ + CHECK_PACKED_FIELD(_f[42]); \ + CHECK_PACKED_FIELD(_f[43]); \ + CHECK_PACKED_FIELD(_f[44]); \ + CHECK_PACKED_FIELD(_f[45]); \ + CHECK_PACKED_FIELD(_f[46]); \ + CHECK_PACKED_FIELD(_f[47]); \ + CHECK_PACKED_FIELD(_f[48]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[0], _f[1]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[1], _f[2]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[2], _f[3]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[3], _f[4]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[4], _f[5]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[5], _f[6]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[6], _f[7]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[7], _f[8]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[8], _f[9]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[9], _f[10]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[10], _f[11]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[11], _f[12]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[12], _f[13]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[13], _f[14]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[14], _f[15]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[15], _f[16]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[16], _f[17]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[17], _f[18]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[18], _f[19]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[19], _f[20]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[20], _f[21]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[21], _f[22]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[22], _f[23]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[23], _f[24]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[24], _f[25]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[25], _f[26]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[26], _f[27]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[27], _f[28]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[28], _f[29]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[29], _f[30]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[30], _f[31]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[31], _f[32]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[32], _f[33]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[33], _f[34]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[34], _f[35]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[35], _f[36]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[36], _f[37]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[37], _f[38]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[38], _f[39]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[39], _f[40]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[40], _f[41]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[41], _f[42]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[42], _f[43]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[43], _f[44]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[44], _f[45]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[45], _f[46]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[46], _f[47]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[47], _f[48]); }) + +#define CHECK_PACKED_FIELDS_50(fields) \ + ({ typeof(&(fields)[0]) _f = (fields); \ + BUILD_BUG_ON(ARRAY_SIZE(fields) != 50); \ + CHECK_PACKED_FIELD(_f[0]); \ + CHECK_PACKED_FIELD(_f[1]); \ + CHECK_PACKED_FIELD(_f[2]); \ + CHECK_PACKED_FIELD(_f[3]); \ + CHECK_PACKED_FIELD(_f[4]); \ + CHECK_PACKED_FIELD(_f[5]); \ + CHECK_PACKED_FIELD(_f[6]); \ + CHECK_PACKED_FIELD(_f[7]); \ + CHECK_PACKED_FIELD(_f[8]); \ + CHECK_PACKED_FIELD(_f[9]); \ + CHECK_PACKED_FIELD(_f[10]); \ + CHECK_PACKED_FIELD(_f[11]); \ + CHECK_PACKED_FIELD(_f[12]); \ + CHECK_PACKED_FIELD(_f[13]); \ + CHECK_PACKED_FIELD(_f[14]); \ + CHECK_PACKED_FIELD(_f[15]); \ + CHECK_PACKED_FIELD(_f[16]); \ + CHECK_PACKED_FIELD(_f[17]); \ + CHECK_PACKED_FIELD(_f[18]); \ + CHECK_PACKED_FIELD(_f[19]); \ + CHECK_PACKED_FIELD(_f[20]); \ + CHECK_PACKED_FIELD(_f[21]); \ + CHECK_PACKED_FIELD(_f[22]); \ + CHECK_PACKED_FIELD(_f[23]); \ + CHECK_PACKED_FIELD(_f[24]); \ + CHECK_PACKED_FIELD(_f[25]); \ + CHECK_PACKED_FIELD(_f[26]); \ + CHECK_PACKED_FIELD(_f[27]); \ + CHECK_PACKED_FIELD(_f[28]); \ + CHECK_PACKED_FIELD(_f[29]); \ + CHECK_PACKED_FIELD(_f[30]); \ + CHECK_PACKED_FIELD(_f[31]); \ + CHECK_PACKED_FIELD(_f[32]); \ + CHECK_PACKED_FIELD(_f[33]); \ + CHECK_PACKED_FIELD(_f[34]); \ + CHECK_PACKED_FIELD(_f[35]); \ + CHECK_PACKED_FIELD(_f[36]); \ + CHECK_PACKED_FIELD(_f[37]); \ + CHECK_PACKED_FIELD(_f[38]); \ + CHECK_PACKED_FIELD(_f[39]); \ + CHECK_PACKED_FIELD(_f[40]); \ + CHECK_PACKED_FIELD(_f[41]); \ + CHECK_PACKED_FIELD(_f[42]); \ + CHECK_PACKED_FIELD(_f[43]); \ + CHECK_PACKED_FIELD(_f[44]); \ + CHECK_PACKED_FIELD(_f[45]); \ + CHECK_PACKED_FIELD(_f[46]); \ + CHECK_PACKED_FIELD(_f[47]); \ + CHECK_PACKED_FIELD(_f[48]); \ + CHECK_PACKED_FIELD(_f[49]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[0], _f[1]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[1], _f[2]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[2], _f[3]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[3], _f[4]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[4], _f[5]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[5], _f[6]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[6], _f[7]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[7], _f[8]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[8], _f[9]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[9], _f[10]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[10], _f[11]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[11], _f[12]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[12], _f[13]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[13], _f[14]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[14], _f[15]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[15], _f[16]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[16], _f[17]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[17], _f[18]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[18], _f[19]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[19], _f[20]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[20], _f[21]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[21], _f[22]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[22], _f[23]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[23], _f[24]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[24], _f[25]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[25], _f[26]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[26], _f[27]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[27], _f[28]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[28], _f[29]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[29], _f[30]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[30], _f[31]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[31], _f[32]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[32], _f[33]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[33], _f[34]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[34], _f[35]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[35], _f[36]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[36], _f[37]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[37], _f[38]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[38], _f[39]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[39], _f[40]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[40], _f[41]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[41], _f[42]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[42], _f[43]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[43], _f[44]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[44], _f[45]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[45], _f[46]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[46], _f[47]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[47], _f[48]); \ + CHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[48], _f[49]); }) + +#define CHECK_PACKED_FIELDS(fields) \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 1, CHECK_PACKED_FIELDS_1(fields), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 2, CHECK_PACKED_FIELDS_2(fields), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 3, CHECK_PACKED_FIELDS_3(fields), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 4, CHECK_PACKED_FIELDS_4(fields), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 5, CHECK_PACKED_FIELDS_5(fields), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 6, CHECK_PACKED_FIELDS_6(fields), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 7, CHECK_PACKED_FIELDS_7(fields), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 8, CHECK_PACKED_FIELDS_8(fields), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 9, CHECK_PACKED_FIELDS_9(fields), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 10, CHECK_PACKED_FIELDS_10(fields), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 11, CHECK_PACKED_FIELDS_11(fields), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 12, CHECK_PACKED_FIELDS_12(fields), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 13, CHECK_PACKED_FIELDS_13(fields), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 14, CHECK_PACKED_FIELDS_14(fields), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 15, CHECK_PACKED_FIELDS_15(fields), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 16, CHECK_PACKED_FIELDS_16(fields), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 17, CHECK_PACKED_FIELDS_17(fields), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 18, CHECK_PACKED_FIELDS_18(fields), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 19, CHECK_PACKED_FIELDS_19(fields), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 20, CHECK_PACKED_FIELDS_20(fields), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 21, CHECK_PACKED_FIELDS_21(fields), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 22, CHECK_PACKED_FIELDS_22(fields), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 23, CHECK_PACKED_FIELDS_23(fields), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 24, CHECK_PACKED_FIELDS_24(fields), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 25, CHECK_PACKED_FIELDS_25(fields), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 26, CHECK_PACKED_FIELDS_26(fields), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 27, CHECK_PACKED_FIELDS_27(fields), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 28, CHECK_PACKED_FIELDS_28(fields), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 29, CHECK_PACKED_FIELDS_29(fields), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 30, CHECK_PACKED_FIELDS_30(fields), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 31, CHECK_PACKED_FIELDS_31(fields), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 32, CHECK_PACKED_FIELDS_32(fields), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 33, CHECK_PACKED_FIELDS_33(fields), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 34, CHECK_PACKED_FIELDS_34(fields), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 35, CHECK_PACKED_FIELDS_35(fields), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 36, CHECK_PACKED_FIELDS_36(fields), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 37, CHECK_PACKED_FIELDS_37(fields), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 38, CHECK_PACKED_FIELDS_38(fields), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 39, CHECK_PACKED_FIELDS_39(fields), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 40, CHECK_PACKED_FIELDS_40(fields), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 41, CHECK_PACKED_FIELDS_41(fields), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 42, CHECK_PACKED_FIELDS_42(fields), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 43, CHECK_PACKED_FIELDS_43(fields), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 44, CHECK_PACKED_FIELDS_44(fields), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 45, CHECK_PACKED_FIELDS_45(fields), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 46, CHECK_PACKED_FIELDS_46(fields), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 47, CHECK_PACKED_FIELDS_47(fields), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 48, CHECK_PACKED_FIELDS_48(fields), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 49, CHECK_PACKED_FIELDS_49(fields), \ + __builtin_choose_expr(ARRAY_SIZE(fields) == 50, CHECK_PACKED_FIELDS_50(fields), \ + ({ BUILD_BUG_ON_MSG(1, "CHECK_PACKED_FIELDS() must be regenerated to support array sizes larger than 50."); }) \ + )))))))))))))))))))))))))))))))))))))))))))))))))) + +#endif diff --git a/include/linux/virtio.h b/include/linux/virtio.h index 57cc4b07fd175..e5072d64a364c 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -109,9 +109,11 @@ dma_addr_t virtqueue_get_avail_addr(const struct virtqueue *vq); dma_addr_t virtqueue_get_used_addr(const struct virtqueue *vq); int virtqueue_resize(struct virtqueue *vq, u32 num, - void (*recycle)(struct virtqueue *vq, void *buf)); + void (*recycle)(struct virtqueue *vq, void *buf), + bool *flushed); int virtqueue_reset(struct virtqueue *vq, - void (*recycle)(struct virtqueue *vq, void *buf)); + void (*recycle)(struct virtqueue *vq, void *buf), + bool *flushed); struct virtio_admin_cmd { __le16 opcode; diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h index beb533a0e8809..62c0a7e65d6bd 100644 --- a/include/net/inet_timewait_sock.h +++ b/include/net/inet_timewait_sock.h @@ -45,6 +45,8 @@ struct inet_timewait_sock { #define tw_node __tw_common.skc_nulls_node #define tw_bind_node __tw_common.skc_bind_node #define tw_refcnt __tw_common.skc_refcnt +#define tw_tx_queue_mapping __tw_common.skc_tx_queue_mapping +#define tw_rx_queue_mapping __tw_common.skc_rx_queue_mapping #define tw_hash __tw_common.skc_hash #define tw_prot __tw_common.skc_prot #define tw_net __tw_common.skc_net diff --git a/lib/packing.c b/lib/packing.c index 793942745e34f..45164f73fe5bf 100644 --- a/lib/packing.c +++ b/lib/packing.c @@ -5,10 +5,37 @@ #include #include #include +#include #include #include #include +#define __pack_fields(pbuf, pbuflen, ustruct, fields, num_fields, quirks) \ + ({ \ + for (size_t i = 0; i < (num_fields); i++) { \ + typeof(&(fields)[0]) field = &(fields)[i]; \ + u64 uval; \ + \ + uval = ustruct_field_to_u64(ustruct, field->offset, field->size); \ + \ + __pack(pbuf, uval, field->startbit, field->endbit, \ + pbuflen, quirks); \ + } \ + }) + +#define __unpack_fields(pbuf, pbuflen, ustruct, fields, num_fields, quirks) \ + ({ \ + for (size_t i = 0; i < (num_fields); i++) { \ + typeof(&(fields)[0]) field = &fields[i]; \ + u64 uval; \ + \ + __unpack(pbuf, &uval, field->startbit, field->endbit, \ + pbuflen, quirks); \ + \ + u64_to_ustruct_field(ustruct, field->offset, field->size, uval); \ + } \ + }) + /** * calculate_box_addr - Determine physical location of byte in buffer * @box: Index of byte within buffer seen as a logical big-endian big number @@ -51,64 +78,29 @@ static size_t calculate_box_addr(size_t box, size_t len, u8 quirks) return offset_of_group + offset_in_group; } -/** - * pack - Pack u64 number into bitfield of buffer. - * - * @pbuf: Pointer to a buffer holding the packed value. - * @uval: CPU-readable unpacked value to pack. - * @startbit: The index (in logical notation, compensated for quirks) where - * the packed value starts within pbuf. Must be larger than, or - * equal to, endbit. - * @endbit: The index (in logical notation, compensated for quirks) where - * the packed value ends within pbuf. Must be smaller than, or equal - * to, startbit. - * @pbuflen: The length in bytes of the packed buffer pointed to by @pbuf. - * @quirks: A bit mask of QUIRK_LITTLE_ENDIAN, QUIRK_LSW32_IS_FIRST and - * QUIRK_MSB_ON_THE_RIGHT. - * - * Return: 0 on success, EINVAL or ERANGE if called incorrectly. Assuming - * correct usage, return code may be discarded. The @pbuf memory will - * be modified on success. - */ -int pack(void *pbuf, u64 uval, size_t startbit, size_t endbit, size_t pbuflen, - u8 quirks) +static void __pack(void *pbuf, u64 uval, size_t startbit, size_t endbit, + size_t pbuflen, u8 quirks) { /* Logical byte indices corresponding to the * start and end of the field. */ - int plogical_first_u8, plogical_last_u8, box; - /* width of the field to access in the pbuf */ - u64 value_width; - - /* startbit is expected to be larger than endbit, and both are - * expected to be within the logically addressable range of the buffer. - */ - if (unlikely(startbit < endbit || startbit >= BITS_PER_BYTE * pbuflen)) - /* Invalid function call */ - return -EINVAL; - - value_width = startbit - endbit + 1; - if (unlikely(value_width > 64)) - return -ERANGE; + int plogical_first_u8 = startbit / BITS_PER_BYTE; + int plogical_last_u8 = endbit / BITS_PER_BYTE; + int value_width = startbit - endbit + 1; + int box; /* Check if "uval" fits in "value_width" bits. - * If value_width is 64, the check will fail, but any - * 64-bit uval will surely fit. + * The test only works for value_width < 64, but in the latter case, + * any 64-bit uval will surely fit. */ - if (unlikely(value_width < 64 && uval >= (1ull << value_width))) - /* Cannot store "uval" inside "value_width" bits. - * Truncating "uval" is most certainly not desirable, - * so simply erroring out is appropriate. - */ - return -ERANGE; + WARN(value_width < 64 && uval >= (1ull << value_width), + "Cannot store 0x%llx inside bits %zu-%zu - will truncate\n", + uval, startbit, endbit); /* Iterate through an idealistic view of the pbuf as an u64 with * no quirks, u8 by u8 (aligned at u8 boundaries), from high to low * logical bit significance. "box" denotes the current logical u8. */ - plogical_first_u8 = startbit / BITS_PER_BYTE; - plogical_last_u8 = endbit / BITS_PER_BYTE; - for (box = plogical_first_u8; box >= plogical_last_u8; box--) { /* Bit indices into the currently accessed 8-bit box */ size_t box_start_bit, box_end_bit, box_addr; @@ -163,15 +155,13 @@ int pack(void *pbuf, u64 uval, size_t startbit, size_t endbit, size_t pbuflen, ((u8 *)pbuf)[box_addr] &= ~box_mask; ((u8 *)pbuf)[box_addr] |= pval; } - return 0; } -EXPORT_SYMBOL(pack); /** - * unpack - Unpack u64 number from packed buffer. + * pack - Pack u64 number into bitfield of buffer. * * @pbuf: Pointer to a buffer holding the packed value. - * @uval: Pointer to an u64 holding the unpacked value. + * @uval: CPU-readable unpacked value to pack. * @startbit: The index (in logical notation, compensated for quirks) where * the packed value starts within pbuf. Must be larger than, or * equal to, endbit. @@ -183,19 +173,12 @@ EXPORT_SYMBOL(pack); * QUIRK_MSB_ON_THE_RIGHT. * * Return: 0 on success, EINVAL or ERANGE if called incorrectly. Assuming - * correct usage, return code may be discarded. The @uval will be - * modified on success. + * correct usage, return code may be discarded. The @pbuf memory will + * be modified on success. */ -int unpack(const void *pbuf, u64 *uval, size_t startbit, size_t endbit, - size_t pbuflen, u8 quirks) +int pack(void *pbuf, u64 uval, size_t startbit, size_t endbit, size_t pbuflen, + u8 quirks) { - /* Logical byte indices corresponding to the - * start and end of the field. - */ - int plogical_first_u8, plogical_last_u8, box; - /* width of the field to access in the pbuf */ - u64 value_width; - /* startbit is expected to be larger than endbit, and both are * expected to be within the logically addressable range of the buffer. */ @@ -203,10 +186,25 @@ int unpack(const void *pbuf, u64 *uval, size_t startbit, size_t endbit, /* Invalid function call */ return -EINVAL; - value_width = startbit - endbit + 1; - if (unlikely(value_width > 64)) + if (unlikely(startbit - endbit >= 64)) return -ERANGE; + __pack(pbuf, uval, startbit, endbit, pbuflen, quirks); + + return 0; +} +EXPORT_SYMBOL(pack); + +static void __unpack(const void *pbuf, u64 *uval, size_t startbit, size_t endbit, + size_t pbuflen, u8 quirks) +{ + /* Logical byte indices corresponding to the + * start and end of the field. + */ + int plogical_first_u8 = startbit / BITS_PER_BYTE; + int plogical_last_u8 = endbit / BITS_PER_BYTE; + int box; + /* Initialize parameter */ *uval = 0; @@ -214,9 +212,6 @@ int unpack(const void *pbuf, u64 *uval, size_t startbit, size_t endbit, * no quirks, u8 by u8 (aligned at u8 boundaries), from high to low * logical bit significance. "box" denotes the current logical u8. */ - plogical_first_u8 = startbit / BITS_PER_BYTE; - plogical_last_u8 = endbit / BITS_PER_BYTE; - for (box = plogical_first_u8; box >= plogical_last_u8; box--) { /* Bit indices into the currently accessed 8-bit box */ size_t box_start_bit, box_end_bit, box_addr; @@ -271,6 +266,46 @@ int unpack(const void *pbuf, u64 *uval, size_t startbit, size_t endbit, *uval &= ~proj_mask; *uval |= pval; } +} + +/** + * unpack - Unpack u64 number from packed buffer. + * + * @pbuf: Pointer to a buffer holding the packed value. + * @uval: Pointer to an u64 holding the unpacked value. + * @startbit: The index (in logical notation, compensated for quirks) where + * the packed value starts within pbuf. Must be larger than, or + * equal to, endbit. + * @endbit: The index (in logical notation, compensated for quirks) where + * the packed value ends within pbuf. Must be smaller than, or equal + * to, startbit. + * @pbuflen: The length in bytes of the packed buffer pointed to by @pbuf. + * @quirks: A bit mask of QUIRK_LITTLE_ENDIAN, QUIRK_LSW32_IS_FIRST and + * QUIRK_MSB_ON_THE_RIGHT. + * + * Return: 0 on success, EINVAL or ERANGE if called incorrectly. Assuming + * correct usage, return code may be discarded. The @uval will be + * modified on success. + */ +int unpack(const void *pbuf, u64 *uval, size_t startbit, size_t endbit, + size_t pbuflen, u8 quirks) +{ + /* width of the field to access in the pbuf */ + u64 value_width; + + /* startbit is expected to be larger than endbit, and both are + * expected to be within the logically addressable range of the buffer. + */ + if (startbit < endbit || startbit >= BITS_PER_BYTE * pbuflen) + /* Invalid function call */ + return -EINVAL; + + value_width = startbit - endbit + 1; + if (value_width > 64) + return -ERANGE; + + __unpack(pbuf, uval, startbit, endbit, pbuflen, quirks); + return 0; } EXPORT_SYMBOL(unpack); @@ -314,4 +349,122 @@ int packing(void *pbuf, u64 *uval, int startbit, int endbit, size_t pbuflen, } EXPORT_SYMBOL(packing); +static u64 ustruct_field_to_u64(const void *ustruct, size_t field_offset, + size_t field_size) +{ + switch (field_size) { + case 1: + return *((u8 *)(ustruct + field_offset)); + case 2: + return *((u16 *)(ustruct + field_offset)); + case 4: + return *((u32 *)(ustruct + field_offset)); + default: + return *((u64 *)(ustruct + field_offset)); + } +} + +static void u64_to_ustruct_field(void *ustruct, size_t field_offset, + size_t field_size, u64 uval) +{ + switch (field_size) { + case 1: + *((u8 *)(ustruct + field_offset)) = uval; + break; + case 2: + *((u16 *)(ustruct + field_offset)) = uval; + break; + case 4: + *((u32 *)(ustruct + field_offset)) = uval; + break; + default: + *((u64 *)(ustruct + field_offset)) = uval; + break; + } +} + +/** + * pack_fields_s - Pack array of small fields + * + * @pbuf: Pointer to a buffer holding the packed value. + * @pbuflen: The length in bytes of the packed buffer pointed to by @pbuf. + * @ustruct: Pointer to CPU-readable structure holding the unpacked value. + * It is expected (but not checked) that this has the same data type + * as all struct packed_field_s definitions. + * @fields: Array of small packed fields definition. They must not overlap. + * @num_fields: Length of @fields array. + * @quirks: A bit mask of QUIRK_LITTLE_ENDIAN, QUIRK_LSW32_IS_FIRST and + * QUIRK_MSB_ON_THE_RIGHT. + */ +void pack_fields_s(void *pbuf, size_t pbuflen, const void *ustruct, + const struct packed_field_s *fields, size_t num_fields, + u8 quirks) +{ + __pack_fields(pbuf, pbuflen, ustruct, fields, num_fields, quirks); +} +EXPORT_SYMBOL(pack_fields_s); + +/** + * pack_fields_m - Pack array of medium fields + * + * @pbuf: Pointer to a buffer holding the packed value. + * @pbuflen: The length in bytes of the packed buffer pointed to by @pbuf. + * @ustruct: Pointer to CPU-readable structure holding the unpacked value. + * It is expected (but not checked) that this has the same data type + * as all struct packed_field_s definitions. + * @fields: Array of medium packed fields definition. They must not overlap. + * @num_fields: Length of @fields array. + * @quirks: A bit mask of QUIRK_LITTLE_ENDIAN, QUIRK_LSW32_IS_FIRST and + * QUIRK_MSB_ON_THE_RIGHT. + */ +void pack_fields_m(void *pbuf, size_t pbuflen, const void *ustruct, + const struct packed_field_m *fields, size_t num_fields, + u8 quirks) +{ + __pack_fields(pbuf, pbuflen, ustruct, fields, num_fields, quirks); +} +EXPORT_SYMBOL(pack_fields_m); + +/** + * unpack_fields_s - Unpack array of small fields + * + * @pbuf: Pointer to a buffer holding the packed value. + * @pbuflen: The length in bytes of the packed buffer pointed to by @pbuf. + * @ustruct: Pointer to CPU-readable structure holding the unpacked value. + * It is expected (but not checked) that this has the same data type + * as all struct packed_field_s definitions. + * @fields: Array of small packed fields definition. They must not overlap. + * @num_fields: Length of @fields array. + * @quirks: A bit mask of QUIRK_LITTLE_ENDIAN, QUIRK_LSW32_IS_FIRST and + * QUIRK_MSB_ON_THE_RIGHT. + */ +void unpack_fields_s(const void *pbuf, size_t pbuflen, void *ustruct, + const struct packed_field_s *fields, size_t num_fields, + u8 quirks) +{ + __unpack_fields(pbuf, pbuflen, ustruct, fields, num_fields, quirks); +} +EXPORT_SYMBOL(unpack_fields_s); + +/** + * unpack_fields_m - Unpack array of medium fields + * + * @pbuf: Pointer to a buffer holding the packed value. + * @pbuflen: The length in bytes of the packed buffer pointed to by @pbuf. + * @ustruct: Pointer to CPU-readable structure holding the unpacked value. + * It is expected (but not checked) that this has the same data type + * as all struct packed_field_s definitions. + * @fields: Array of medium packed fields definition. They must not overlap. + * @num_fields: Length of @fields array. + * @quirks: A bit mask of QUIRK_LITTLE_ENDIAN, QUIRK_LSW32_IS_FIRST and + * QUIRK_MSB_ON_THE_RIGHT. + */ +void unpack_fields_m(const void *pbuf, size_t pbuflen, void *ustruct, + const struct packed_field_m *fields, size_t num_fields, + u8 quirks) +{ + __unpack_fields(pbuf, pbuflen, ustruct, fields, num_fields, quirks); +} +EXPORT_SYMBOL(unpack_fields_m); + MODULE_DESCRIPTION("Generic bitfield packing and unpacking"); diff --git a/lib/packing_test.c b/lib/packing_test.c index b38ea43c03fd8..3b4167ce56bf6 100644 --- a/lib/packing_test.c +++ b/lib/packing_test.c @@ -396,9 +396,70 @@ static void packing_test_unpack(struct kunit *test) KUNIT_EXPECT_EQ(test, uval, params->uval); } +#define PACKED_BUF_SIZE 8 + +typedef struct __packed { u8 buf[PACKED_BUF_SIZE]; } packed_buf_t; + +struct test_data { + u32 field3; + u16 field2; + u16 field4; + u16 field6; + u8 field1; + u8 field5; +}; + +static const struct packed_field_s test_fields[] = { + PACKED_FIELD(63, 61, struct test_data, field1), + PACKED_FIELD(60, 52, struct test_data, field2), + PACKED_FIELD(51, 28, struct test_data, field3), + PACKED_FIELD(27, 14, struct test_data, field4), + PACKED_FIELD(13, 9, struct test_data, field5), + PACKED_FIELD(8, 0, struct test_data, field6), +}; + +static void packing_test_pack_fields(struct kunit *test) +{ + const struct test_data data = { + .field1 = 0x2, + .field2 = 0x100, + .field3 = 0xF00050, + .field4 = 0x7D3, + .field5 = 0x9, + .field6 = 0x10B, + }; + packed_buf_t expect = { + .buf = { 0x50, 0x0F, 0x00, 0x05, 0x01, 0xF4, 0xD3, 0x0B }, + }; + packed_buf_t buf = {}; + + pack_fields(&buf, sizeof(buf), &data, test_fields, 0); + + KUNIT_EXPECT_MEMEQ(test, &expect, &buf, sizeof(buf)); +} + +static void packing_test_unpack_fields(struct kunit *test) +{ + const packed_buf_t buf = { + .buf = { 0x17, 0x28, 0x10, 0x19, 0x3D, 0xA9, 0x07, 0x9C }, + }; + struct test_data data = {}; + + unpack_fields(&buf, sizeof(buf), &data, test_fields, 0); + + KUNIT_EXPECT_EQ(test, 0, data.field1); + KUNIT_EXPECT_EQ(test, 0x172, data.field2); + KUNIT_EXPECT_EQ(test, 0x810193, data.field3); + KUNIT_EXPECT_EQ(test, 0x36A4, data.field4); + KUNIT_EXPECT_EQ(test, 0x3, data.field5); + KUNIT_EXPECT_EQ(test, 0x19C, data.field6); +} + static struct kunit_case packing_test_cases[] = { KUNIT_CASE_PARAM(packing_test_pack, packing_gen_params), KUNIT_CASE_PARAM(packing_test_unpack, packing_gen_params), + KUNIT_CASE(packing_test_pack_fields), + KUNIT_CASE(packing_test_unpack_fields), {}, }; diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c index 319f47df33300..95f7a7e65a73f 100644 --- a/net/can/j1939/transport.c +++ b/net/can/j1939/transport.c @@ -1505,7 +1505,7 @@ static struct j1939_session *j1939_session_new(struct j1939_priv *priv, session->state = J1939_SESSION_NEW; skb_queue_head_init(&session->skb_queue); - skb_queue_tail(&session->skb_queue, skb); + skb_queue_tail(&session->skb_queue, skb_get(skb)); skcb = j1939_skb_to_cb(skb); memcpy(&session->skcb, skcb, sizeof(session->skcb)); diff --git a/net/core/dev.c b/net/core/dev.c index 13d00fc10f559..45a8c3dd4a648 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -6557,18 +6557,22 @@ static void __napi_hash_add_with_id(struct napi_struct *napi, static void napi_hash_add_with_id(struct napi_struct *napi, unsigned int napi_id) { - spin_lock(&napi_hash_lock); + unsigned long flags; + + spin_lock_irqsave(&napi_hash_lock, flags); WARN_ON_ONCE(napi_by_id(napi_id)); __napi_hash_add_with_id(napi, napi_id); - spin_unlock(&napi_hash_lock); + spin_unlock_irqrestore(&napi_hash_lock, flags); } static void napi_hash_add(struct napi_struct *napi) { + unsigned long flags; + if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state)) return; - spin_lock(&napi_hash_lock); + spin_lock_irqsave(&napi_hash_lock, flags); /* 0..NR_CPUS range is reserved for sender_cpu use */ do { @@ -6578,7 +6582,7 @@ static void napi_hash_add(struct napi_struct *napi) __napi_hash_add_with_id(napi, napi_gen_id); - spin_unlock(&napi_hash_lock); + spin_unlock_irqrestore(&napi_hash_lock, flags); } /* Warning : caller is responsible to make sure rcu grace period @@ -6586,11 +6590,13 @@ static void napi_hash_add(struct napi_struct *napi) */ static void napi_hash_del(struct napi_struct *napi) { - spin_lock(&napi_hash_lock); + unsigned long flags; + + spin_lock_irqsave(&napi_hash_lock, flags); hlist_del_init_rcu(&napi->napi_hash_node); - spin_unlock(&napi_hash_lock); + spin_unlock_irqrestore(&napi_hash_lock, flags); } static enum hrtimer_restart napi_watchdog(struct hrtimer *timer) diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 2e459b9d88eb5..6f2647b000b8f 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -390,7 +390,7 @@ netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) } EXPORT_SYMBOL(netpoll_send_skb); -void netpoll_send_udp(struct netpoll *np, const char *msg, int len) +int netpoll_send_udp(struct netpoll *np, const char *msg, int len) { int total_len, ip_len, udp_len; struct sk_buff *skb; @@ -414,7 +414,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len) skb = find_skb(np, total_len + np->dev->needed_tailroom, total_len - len); if (!skb) - return; + return -ENOMEM; skb_copy_to_linear_data(skb, msg, len); skb_put(skb, len); @@ -490,7 +490,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len) skb->dev = np->dev; - netpoll_send_skb(np, skb); + return (int)netpoll_send_skb(np, skb); } EXPORT_SYMBOL(netpoll_send_udp); @@ -634,7 +634,8 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev) goto out; } - if (!rcu_access_pointer(ndev->npinfo)) { + npinfo = rtnl_dereference(ndev->npinfo); + if (!npinfo) { npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL); if (!npinfo) { err = -ENOMEM; @@ -654,7 +655,6 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev) goto free_npinfo; } } else { - npinfo = rtnl_dereference(ndev->npinfo); refcount_inc(&npinfo->refcnt); } diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 58df76fe408a4..08d3a045edf81 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2533,6 +2533,18 @@ static struct net *rtnl_link_get_net_ifla(struct nlattr *tb[]) return net; } +/** + * rtnl_link_get_net - Get the network namespace from the netlink attributes + * or just @src_net. + * + * @src_net: the source network namespace + * @tb: the netlink attributes + * + * Returns: + * The network namespace specified in the netlink attributes, + * in case of error, an error pointer is returned. + * Or, @src_net if no netns attributes were passed. + */ struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]) { struct net *net = rtnl_link_get_net_ifla(tb); diff --git a/net/dccp/feat.c b/net/dccp/feat.c index 54086bb05c42c..f7554dcdaaba9 100644 --- a/net/dccp/feat.c +++ b/net/dccp/feat.c @@ -1166,8 +1166,12 @@ static u8 dccp_feat_change_recv(struct list_head *fn, u8 is_mandatory, u8 opt, goto not_valid_or_not_known; } - return dccp_feat_push_confirm(fn, feat, local, &fval); + if (dccp_feat_push_confirm(fn, feat, local, &fval)) { + kfree(fval.sp.vec); + return DCCP_RESET_CODE_TOO_BUSY; + } + return 0; } else if (entry->state == FEAT_UNSTABLE) { /* 6.6.2 */ return 0; } diff --git a/net/ethtool/bitset.c b/net/ethtool/bitset.c index 0515d6604b3b9..f0883357d12e5 100644 --- a/net/ethtool/bitset.c +++ b/net/ethtool/bitset.c @@ -425,12 +425,32 @@ static int ethnl_parse_bit(unsigned int *index, bool *val, unsigned int nbits, return 0; } +/** + * ethnl_bitmap32_equal() - Compare two bitmaps + * @map1: first bitmap + * @map2: second bitmap + * @nbits: bit size to compare + * + * Return: true if first @nbits are equal, false if not + */ +static bool ethnl_bitmap32_equal(const u32 *map1, const u32 *map2, + unsigned int nbits) +{ + if (memcmp(map1, map2, nbits / 32 * sizeof(u32))) + return false; + if (nbits % 32 == 0) + return true; + return !((map1[nbits / 32] ^ map2[nbits / 32]) & + ethnl_lower_bits(nbits % 32)); +} + static int ethnl_update_bitset32_verbose(u32 *bitmap, unsigned int nbits, const struct nlattr *attr, struct nlattr **tb, ethnl_string_array_t names, struct netlink_ext_ack *extack, bool *mod) { + u32 *saved_bitmap = NULL; struct nlattr *bit_attr; bool no_mask; int rem; @@ -448,8 +468,20 @@ ethnl_update_bitset32_verbose(u32 *bitmap, unsigned int nbits, } no_mask = tb[ETHTOOL_A_BITSET_NOMASK]; - if (no_mask) - ethnl_bitmap32_clear(bitmap, 0, nbits, mod); + if (no_mask) { + unsigned int nwords = DIV_ROUND_UP(nbits, 32); + unsigned int nbytes = nwords * sizeof(u32); + bool dummy; + + /* The bitmap size is only the size of the map part without + * its mask part. + */ + saved_bitmap = kcalloc(nwords, sizeof(u32), GFP_KERNEL); + if (!saved_bitmap) + return -ENOMEM; + memcpy(saved_bitmap, bitmap, nbytes); + ethnl_bitmap32_clear(bitmap, 0, nbits, &dummy); + } nla_for_each_nested(bit_attr, tb[ETHTOOL_A_BITSET_BITS], rem) { bool old_val, new_val; @@ -458,22 +490,30 @@ ethnl_update_bitset32_verbose(u32 *bitmap, unsigned int nbits, if (nla_type(bit_attr) != ETHTOOL_A_BITSET_BITS_BIT) { NL_SET_ERR_MSG_ATTR(extack, bit_attr, "only ETHTOOL_A_BITSET_BITS_BIT allowed in ETHTOOL_A_BITSET_BITS"); + kfree(saved_bitmap); return -EINVAL; } ret = ethnl_parse_bit(&idx, &new_val, nbits, bit_attr, no_mask, names, extack); - if (ret < 0) + if (ret < 0) { + kfree(saved_bitmap); return ret; + } old_val = bitmap[idx / 32] & ((u32)1 << (idx % 32)); if (new_val != old_val) { if (new_val) bitmap[idx / 32] |= ((u32)1 << (idx % 32)); else bitmap[idx / 32] &= ~((u32)1 << (idx % 32)); - *mod = true; + if (!no_mask) + *mod = true; } } + if (no_mask && !ethnl_bitmap32_equal(saved_bitmap, bitmap, nbits)) + *mod = true; + + kfree(saved_bitmap); return 0; } diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c index 61df8ce44379d..7bb94875a7ec8 100644 --- a/net/ethtool/ioctl.c +++ b/net/ethtool/ioctl.c @@ -993,7 +993,8 @@ static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, return rc; /* Nonzero ring with RSS only makes sense if NIC adds them together */ - if (info.flow_type & FLOW_RSS && !ops->cap_rss_rxnfc_adds && + if (cmd == ETHTOOL_SRXCLSRLINS && info.flow_type & FLOW_RSS && + !ops->cap_rss_rxnfc_adds && ethtool_get_flow_spec_ring(info.fs.ring_cookie)) return -EINVAL; diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c index 31a416ee21ad9..03eadd6c51fd1 100644 --- a/net/hsr/hsr_device.c +++ b/net/hsr/hsr_device.c @@ -246,20 +246,22 @@ static const struct header_ops hsr_header_ops = { .parse = eth_header_parse, }; -static struct sk_buff *hsr_init_skb(struct hsr_port *master) +static struct sk_buff *hsr_init_skb(struct hsr_port *master, int extra) { struct hsr_priv *hsr = master->hsr; struct sk_buff *skb; int hlen, tlen; + int len; hlen = LL_RESERVED_SPACE(master->dev); tlen = master->dev->needed_tailroom; + len = sizeof(struct hsr_sup_tag) + sizeof(struct hsr_sup_payload); /* skb size is same for PRP/HSR frames, only difference * being, for PRP it is a trailer and for HSR it is a - * header + * header. + * RedBox might use @extra more bytes. */ - skb = dev_alloc_skb(sizeof(struct hsr_sup_tag) + - sizeof(struct hsr_sup_payload) + hlen + tlen); + skb = dev_alloc_skb(len + extra + hlen + tlen); if (!skb) return skb; @@ -295,6 +297,7 @@ static void send_hsr_supervision_frame(struct hsr_port *port, struct hsr_sup_tlv *hsr_stlv; struct hsr_sup_tag *hsr_stag; struct sk_buff *skb; + int extra = 0; *interval = msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL); if (hsr->announce_count < 3 && hsr->prot_version == 0) { @@ -303,7 +306,11 @@ static void send_hsr_supervision_frame(struct hsr_port *port, hsr->announce_count++; } - skb = hsr_init_skb(port); + if (hsr->redbox) + extra = sizeof(struct hsr_sup_tlv) + + sizeof(struct hsr_sup_payload); + + skb = hsr_init_skb(port, extra); if (!skb) { netdev_warn_once(port->dev, "HSR: Could not send supervision frame\n"); return; @@ -362,7 +369,7 @@ static void send_prp_supervision_frame(struct hsr_port *master, struct hsr_sup_tag *hsr_stag; struct sk_buff *skb; - skb = hsr_init_skb(master); + skb = hsr_init_skb(master, 0); if (!skb) { netdev_warn_once(master->dev, "PRP: Could not send supervision frame\n"); return; diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c index aa6acebc7c1ef..87bb3a91598ee 100644 --- a/net/hsr/hsr_forward.c +++ b/net/hsr/hsr_forward.c @@ -700,6 +700,8 @@ static int fill_frame_info(struct hsr_frame_info *frame, frame->is_vlan = true; if (frame->is_vlan) { + if (skb->mac_len < offsetofend(struct hsr_vlan_ethhdr, vlanhdr)) + return -EINVAL; vlan_hdr = (struct hsr_vlan_ethhdr *)ethhdr; proto = vlan_hdr->vlanhdr.h_vlan_encapsulated_proto; } diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 4f088fa1c2f2c..963a89ae9c26e 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -517,6 +517,9 @@ static struct rtable *icmp_route_lookup(struct net *net, struct flowi4 *fl4, if (!IS_ERR(dst)) { if (rt != rt2) return rt; + if (inet_addr_type_dev_table(net, route_lookup_dev, + fl4->daddr) == RTN_LOCAL) + return rt; } else if (PTR_ERR(dst) == -EPERM) { rt = NULL; } else { diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index f0a4dda246abc..30a5e9460d006 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -314,7 +314,7 @@ static bool ip_can_use_hint(const struct sk_buff *skb, const struct iphdr *iph, int tcp_v4_early_demux(struct sk_buff *skb); int udp_v4_early_demux(struct sk_buff *skb); -static int ip_rcv_finish_core(struct net *net, struct sock *sk, +static int ip_rcv_finish_core(struct net *net, struct sk_buff *skb, struct net_device *dev, const struct sk_buff *hint) { @@ -442,7 +442,7 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) if (!skb) return NET_RX_SUCCESS; - ret = ip_rcv_finish_core(net, sk, skb, dev, NULL); + ret = ip_rcv_finish_core(net, skb, dev, NULL); if (ret != NET_RX_DROP) ret = dst_input(skb); return ret; @@ -589,8 +589,7 @@ static struct sk_buff *ip_extract_route_hint(const struct net *net, return skb; } -static void ip_list_rcv_finish(struct net *net, struct sock *sk, - struct list_head *head) +static void ip_list_rcv_finish(struct net *net, struct list_head *head) { struct sk_buff *skb, *next, *hint = NULL; struct dst_entry *curr_dst = NULL; @@ -607,7 +606,7 @@ static void ip_list_rcv_finish(struct net *net, struct sock *sk, skb = l3mdev_ip_rcv(skb); if (!skb) continue; - if (ip_rcv_finish_core(net, sk, skb, dev, hint) == NET_RX_DROP) + if (ip_rcv_finish_core(net, skb, dev, hint) == NET_RX_DROP) continue; dst = skb_dst(skb); @@ -633,7 +632,7 @@ static void ip_sublist_rcv(struct list_head *head, struct net_device *dev, { NF_HOOK_LIST(NFPROTO_IPV4, NF_INET_PRE_ROUTING, net, NULL, head, dev, NULL, ip_rcv_finish); - ip_list_rcv_finish(net, NULL, head); + ip_list_rcv_finish(net, head); } /* Receive a list of IP packets */ diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index bb1fe1ba867ac..7121d8573928c 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -326,6 +326,10 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) tcptw->tw_last_oow_ack_time = 0; tcptw->tw_tx_delay = tp->tcp_tx_delay; tw->tw_txhash = sk->sk_txhash; + tw->tw_tx_queue_mapping = sk->sk_tx_queue_mapping; +#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING + tw->tw_rx_queue_mapping = sk->sk_rx_queue_mapping; +#endif #if IS_ENABLED(CONFIG_IPV6) if (tw->tw_family == PF_INET6) { struct ipv6_pinfo *np = inet6_sk(sk); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 6a01905d379fd..e8953e88efef9 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1674,7 +1674,6 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb) struct sk_buff_head *list = &sk->sk_receive_queue; int rmem, err = -ENOMEM; spinlock_t *busy = NULL; - bool becomes_readable; int size, rcvbuf; /* Immediately drop when the receive queue is full. @@ -1715,19 +1714,12 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb) */ sock_skb_set_dropcount(sk, skb); - becomes_readable = skb_queue_empty(list); __skb_queue_tail(list, skb); spin_unlock(&list->lock); - if (!sock_flag(sk, SOCK_DEAD)) { - if (becomes_readable || - sk->sk_data_ready != sock_def_readable || - READ_ONCE(sk->sk_peek_off) >= 0) - INDIRECT_CALL_1(sk->sk_data_ready, - sock_def_readable, sk); - else - sk_wake_async_rcu(sk, SOCK_WAKE_WAITD, POLL_IN); - } + if (!sock_flag(sk, SOCK_DEAD)) + INDIRECT_CALL_1(sk->sk_data_ready, sock_def_readable, sk); + busylock_release(busy); return 0; diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index c489a1e6aec9a..0e765466d7f79 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -4821,7 +4821,7 @@ inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, ifm->ifa_prefixlen, extack); } -static int modify_prefix_route(struct inet6_ifaddr *ifp, +static int modify_prefix_route(struct net *net, struct inet6_ifaddr *ifp, unsigned long expires, u32 flags, bool modify_peer) { @@ -4845,7 +4845,9 @@ static int modify_prefix_route(struct inet6_ifaddr *ifp, ifp->prefix_len, ifp->rt_priority, ifp->idev->dev, expires, flags, GFP_KERNEL); - } else { + return 0; + } + if (f6i != net->ipv6.fib6_null_entry) { table = f6i->fib6_table; spin_lock_bh(&table->tb6_lock); @@ -4858,9 +4860,8 @@ static int modify_prefix_route(struct inet6_ifaddr *ifp, } spin_unlock_bh(&table->tb6_lock); - - fib6_info_release(f6i); } + fib6_info_release(f6i); return 0; } @@ -4939,7 +4940,7 @@ static int inet6_addr_modify(struct net *net, struct inet6_ifaddr *ifp, int rc = -ENOENT; if (had_prefixroute) - rc = modify_prefix_route(ifp, expires, flags, false); + rc = modify_prefix_route(net, ifp, expires, flags, false); /* prefix route could have been deleted; if so restore it */ if (rc == -ENOENT) { @@ -4949,7 +4950,7 @@ static int inet6_addr_modify(struct net *net, struct inet6_ifaddr *ifp, } if (had_prefixroute && !ipv6_addr_any(&ifp->peer_addr)) - rc = modify_prefix_route(ifp, expires, flags, true); + rc = modify_prefix_route(net, ifp, expires, flags, true); if (rc == -ENOENT && !ipv6_addr_any(&ifp->peer_addr)) { addrconf_prefix_route(&ifp->peer_addr, ifp->prefix_len, diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 63d7681c929fc..67ff16c047180 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -2780,10 +2780,10 @@ static void ip6_negative_advice(struct sock *sk, if (rt->rt6i_flags & RTF_CACHE) { rcu_read_lock(); if (rt6_check_expired(rt)) { - /* counteract the dst_release() in sk_dst_reset() */ - dst_hold(dst); + /* rt/dst can not be destroyed yet, + * because of rcu_read_lock() + */ sk_dst_reset(sk); - rt6_remove_exception_rt(rt); } rcu_read_unlock(); diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index e280c27cb9f9a..1008ec8a464c9 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -1369,7 +1369,6 @@ static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key, int err; md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len]; - memset(md, 0xff, sizeof(*md)); md->version = 1; if (!depth) @@ -1398,9 +1397,9 @@ static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key, NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index"); return -EINVAL; } + memset(&md->u.index, 0xff, sizeof(md->u.index)); if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) { nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]; - memset(&md->u, 0x00, sizeof(md->u)); md->u.index = nla_get_be32(nla); } } else if (md->version == 2) { @@ -1409,10 +1408,12 @@ static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key, NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid"); return -EINVAL; } + md->u.md2.dir = 1; if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) { nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]; md->u.md2.dir = nla_get_u8(nla); } + set_hwid(&md->u.md2, 0xff); if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) { nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]; set_hwid(&md->u.md2, nla_get_u8(nla)); diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index f2f9b75008bb0..8d8b2db4653c0 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c @@ -1525,7 +1525,6 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free) b->backlogs[idx] -= len; b->tin_backlog -= len; sch->qstats.backlog -= len; - qdisc_tree_reduce_backlog(sch, 1, len); flow->dropped++; b->tin_dropped++; @@ -1536,6 +1535,7 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free) __qdisc_drop(skb, to_free); sch->q.qlen--; + qdisc_tree_reduce_backlog(sch, 1, len); cake_heapify(q, 0); diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c index 1e940ad0d2fad..59e7bdf5063e8 100644 --- a/net/sched/sch_choke.c +++ b/net/sched/sch_choke.c @@ -123,10 +123,10 @@ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx, if (idx == q->tail) choke_zap_tail_holes(q); + --sch->q.qlen; qdisc_qstats_backlog_dec(sch, skb); qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb)); qdisc_drop(skb, sch, to_free); - --sch->q.qlen; } struct choke_skb_cb { diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index f1d09183ae632..dc26b22d53c73 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c @@ -208,7 +208,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch, struct tbf_sched_data *q = qdisc_priv(sch); struct sk_buff *segs, *nskb; netdev_features_t features = netif_skb_features(skb); - unsigned int len = 0, prev_len = qdisc_pkt_len(skb); + unsigned int len = 0, prev_len = qdisc_pkt_len(skb), seg_len; int ret, nb; segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); @@ -219,21 +219,27 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch, nb = 0; skb_list_walk_safe(segs, segs, nskb) { skb_mark_not_on_list(segs); - qdisc_skb_cb(segs)->pkt_len = segs->len; - len += segs->len; + seg_len = segs->len; + qdisc_skb_cb(segs)->pkt_len = seg_len; ret = qdisc_enqueue(segs, q->qdisc, to_free); if (ret != NET_XMIT_SUCCESS) { if (net_xmit_drop_count(ret)) qdisc_qstats_drop(sch); } else { nb++; + len += seg_len; } } sch->q.qlen += nb; - if (nb > 1) + sch->qstats.backlog += len; + if (nb > 0) { qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len); - consume_skb(skb); - return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP; + consume_skb(skb); + return NET_XMIT_SUCCESS; + } + + kfree_skb(skb); + return NET_XMIT_DROP; } static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch, diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 9d76e902fd770..de5114095702a 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -383,6 +383,7 @@ void smc_sk_init(struct net *net, struct sock *sk, int protocol) smc->limit_smc_hs = net->smc.limit_smc_hs; smc->use_fallback = false; /* assume rdma capability first */ smc->fallback_rsn = 0; + smc_close_init(smc); } static struct sock *smc_sock_alloc(struct net *net, struct socket *sock, @@ -1116,7 +1117,12 @@ static int smc_find_proposal_devices(struct smc_sock *smc, ini->check_smcrv2 = true; ini->smcrv2.saddr = smc->clcsock->sk->sk_rcv_saddr; if (!(ini->smcr_version & SMC_V2) || +#if IS_ENABLED(CONFIG_IPV6) + (smc->clcsock->sk->sk_family != AF_INET && + !ipv6_addr_v4mapped(&smc->clcsock->sk->sk_v6_rcv_saddr)) || +#else smc->clcsock->sk->sk_family != AF_INET || +#endif !smc_clc_ueid_count() || smc_find_rdma_device(smc, ini)) ini->smcr_version &= ~SMC_V2; @@ -1299,7 +1305,6 @@ static int smc_connect_rdma(struct smc_sock *smc, goto connect_abort; } - smc_close_init(smc); smc_rx_init(smc); if (ini->first_contact_local) { @@ -1435,7 +1440,6 @@ static int smc_connect_ism(struct smc_sock *smc, goto connect_abort; } } - smc_close_init(smc); smc_rx_init(smc); smc_tx_init(smc); @@ -1901,6 +1905,7 @@ static void smc_listen_out(struct smc_sock *new_smc) if (tcp_sk(new_smc->clcsock->sk)->syn_smc) atomic_dec(&lsmc->queued_smc_hs); + release_sock(newsmcsk); /* lock in smc_listen_work() */ if (lsmc->sk.sk_state == SMC_LISTEN) { lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING); smc_accept_enqueue(&lsmc->sk, newsmcsk); @@ -2032,6 +2037,8 @@ static int smc_listen_prfx_check(struct smc_sock *new_smc, if (pclc->hdr.typev1 == SMC_TYPE_N) return 0; pclc_prfx = smc_clc_proposal_get_prefix(pclc); + if (!pclc_prfx) + return -EPROTO; if (smc_clc_prfx_match(newclcsock, pclc_prfx)) return SMC_CLC_DECL_DIFFPREFIX; @@ -2145,6 +2152,8 @@ static void smc_find_ism_v2_device_serv(struct smc_sock *new_smc, pclc_smcd = smc_get_clc_msg_smcd(pclc); smc_v2_ext = smc_get_clc_v2_ext(pclc); smcd_v2_ext = smc_get_clc_smcd_v2_ext(smc_v2_ext); + if (!pclc_smcd || !smc_v2_ext || !smcd_v2_ext) + goto not_found; mutex_lock(&smcd_dev_list.mutex); if (pclc_smcd->ism.chid) { @@ -2221,7 +2230,9 @@ static void smc_find_ism_v1_device_serv(struct smc_sock *new_smc, int rc = 0; /* check if ISM V1 is available */ - if (!(ini->smcd_version & SMC_V1) || !smcd_indicated(ini->smc_type_v1)) + if (!(ini->smcd_version & SMC_V1) || + !smcd_indicated(ini->smc_type_v1) || + !pclc_smcd) goto not_found; ini->is_smcd = true; /* prepare ISM check */ ini->ism_peer_gid[0].gid = ntohll(pclc_smcd->ism.gid); @@ -2272,7 +2283,8 @@ static void smc_find_rdma_v2_device_serv(struct smc_sock *new_smc, goto not_found; smc_v2_ext = smc_get_clc_v2_ext(pclc); - if (!smc_clc_match_eid(ini->negotiated_eid, smc_v2_ext, NULL, NULL)) + if (!smc_v2_ext || + !smc_clc_match_eid(ini->negotiated_eid, smc_v2_ext, NULL, NULL)) goto not_found; /* prepare RDMA check */ @@ -2422,6 +2434,7 @@ static void smc_listen_work(struct work_struct *work) u8 accept_version; int rc = 0; + lock_sock(&new_smc->sk); /* release in smc_listen_out() */ if (new_smc->listen_smc->sk.sk_state != SMC_LISTEN) return smc_listen_out_err(new_smc); @@ -2479,7 +2492,6 @@ static void smc_listen_work(struct work_struct *work) goto out_decl; mutex_lock(&smc_server_lgr_pending); - smc_close_init(new_smc); smc_rx_init(new_smc); smc_tx_init(new_smc); diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c index 33fa787c28ebb..521f5df80e10c 100644 --- a/net/smc/smc_clc.c +++ b/net/smc/smc_clc.c @@ -352,8 +352,11 @@ static bool smc_clc_msg_prop_valid(struct smc_clc_msg_proposal *pclc) struct smc_clc_msg_hdr *hdr = &pclc->hdr; struct smc_clc_v2_extension *v2_ext; - v2_ext = smc_get_clc_v2_ext(pclc); pclc_prfx = smc_clc_proposal_get_prefix(pclc); + if (!pclc_prfx || + pclc_prfx->ipv6_prefixes_cnt > SMC_CLC_MAX_V6_PREFIX) + return false; + if (hdr->version == SMC_V1) { if (hdr->typev1 == SMC_TYPE_N) return false; @@ -365,6 +368,13 @@ static bool smc_clc_msg_prop_valid(struct smc_clc_msg_proposal *pclc) sizeof(struct smc_clc_msg_trail)) return false; } else { + v2_ext = smc_get_clc_v2_ext(pclc); + if ((hdr->typev2 != SMC_TYPE_N && + (!v2_ext || v2_ext->hdr.eid_cnt > SMC_CLC_MAX_UEID)) || + (smcd_indicated(hdr->typev2) && + v2_ext->hdr.ism_gid_cnt > SMCD_CLC_MAX_V2_GID_ENTRIES)) + return false; + if (ntohs(hdr->length) != sizeof(*pclc) + sizeof(struct smc_clc_msg_smcd) + @@ -764,6 +774,11 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen, SMC_CLC_RECV_BUF_LEN : datlen; iov_iter_kvec(&msg.msg_iter, ITER_DEST, &vec, 1, recvlen); len = sock_recvmsg(smc->clcsock, &msg, krflags); + if (len < recvlen) { + smc->sk.sk_err = EPROTO; + reason_code = -EPROTO; + goto out; + } datlen -= len; } if (clcm->type == SMC_CLC_DECLINE) { diff --git a/net/smc/smc_clc.h b/net/smc/smc_clc.h index 5fd6f5b8ef034..7672899254108 100644 --- a/net/smc/smc_clc.h +++ b/net/smc/smc_clc.h @@ -336,8 +336,12 @@ struct smc_clc_msg_decline_v2 { /* clc decline message */ static inline struct smc_clc_msg_proposal_prefix * smc_clc_proposal_get_prefix(struct smc_clc_msg_proposal *pclc) { + u16 offset = ntohs(pclc->iparea_offset); + + if (offset > sizeof(struct smc_clc_msg_smcd)) + return NULL; return (struct smc_clc_msg_proposal_prefix *) - ((u8 *)pclc + sizeof(*pclc) + ntohs(pclc->iparea_offset)); + ((u8 *)pclc + sizeof(*pclc) + offset); } static inline bool smcr_indicated(int smc_type) @@ -376,8 +380,14 @@ static inline struct smc_clc_v2_extension * smc_get_clc_v2_ext(struct smc_clc_msg_proposal *prop) { struct smc_clc_msg_smcd *prop_smcd = smc_get_clc_msg_smcd(prop); + u16 max_offset; - if (!prop_smcd || !ntohs(prop_smcd->v2_ext_offset)) + max_offset = offsetof(struct smc_clc_msg_proposal_area, pclc_v2_ext) - + offsetof(struct smc_clc_msg_proposal_area, pclc_smcd) - + offsetofend(struct smc_clc_msg_smcd, v2_ext_offset); + + if (!prop_smcd || !ntohs(prop_smcd->v2_ext_offset) || + ntohs(prop_smcd->v2_ext_offset) > max_offset) return NULL; return (struct smc_clc_v2_extension *) @@ -390,9 +400,15 @@ smc_get_clc_v2_ext(struct smc_clc_msg_proposal *prop) static inline struct smc_clc_smcd_v2_extension * smc_get_clc_smcd_v2_ext(struct smc_clc_v2_extension *prop_v2ext) { + u16 max_offset = offsetof(struct smc_clc_msg_proposal_area, pclc_smcd_v2_ext) - + offsetof(struct smc_clc_msg_proposal_area, pclc_v2_ext) - + offsetof(struct smc_clc_v2_extension, hdr) - + offsetofend(struct smc_clnt_opts_area_hdr, smcd_v2_ext_offset); + if (!prop_v2ext) return NULL; - if (!ntohs(prop_v2ext->hdr.smcd_v2_ext_offset)) + if (!ntohs(prop_v2ext->hdr.smcd_v2_ext_offset) || + ntohs(prop_v2ext->hdr.smcd_v2_ext_offset) > max_offset) return NULL; return (struct smc_clc_smcd_v2_extension *) diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 500952c2e67ba..ccf57b7fe602e 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -795,9 +795,14 @@ int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk, if (lgr->smc_version == SMC_V2) { lnk->smcibdev = ini->smcrv2.ib_dev_v2; lnk->ibport = ini->smcrv2.ib_port_v2; + lnk->wr_rx_sge_cnt = lnk->smcibdev->ibdev->attrs.max_recv_sge < 2 ? 1 : 2; + lnk->wr_rx_buflen = smc_link_shared_v2_rxbuf(lnk) ? + SMC_WR_BUF_SIZE : SMC_WR_BUF_V2_SIZE; } else { lnk->smcibdev = ini->ib_dev; lnk->ibport = ini->ib_port; + lnk->wr_rx_sge_cnt = 1; + lnk->wr_rx_buflen = SMC_WR_BUF_SIZE; } get_device(&lnk->smcibdev->ibdev->dev); atomic_inc(&lnk->smcibdev->lnk_cnt); @@ -1818,7 +1823,9 @@ void smcr_link_down_cond_sched(struct smc_link *lnk) { if (smc_link_downing(&lnk->state)) { trace_smcr_link_down(lnk, __builtin_return_address(0)); - schedule_work(&lnk->link_down_wrk); + smcr_link_hold(lnk); /* smcr_link_put in link_down_wrk */ + if (!schedule_work(&lnk->link_down_wrk)) + smcr_link_put(lnk); } } @@ -1850,11 +1857,14 @@ static void smc_link_down_work(struct work_struct *work) struct smc_link_group *lgr = link->lgr; if (list_empty(&lgr->list)) - return; + goto out; wake_up_all(&lgr->llc_msg_waiter); down_write(&lgr->llc_conf_mutex); smcr_link_down(link); up_write(&lgr->llc_conf_mutex); + +out: + smcr_link_put(link); /* smcr_link_hold by schedulers of link_down_work */ } static int smc_vlan_by_tcpsk_walk(struct net_device *lower_dev, diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h index 69b54ecd65031..48a1b1dcb576e 100644 --- a/net/smc/smc_core.h +++ b/net/smc/smc_core.h @@ -122,10 +122,14 @@ struct smc_link { } ____cacheline_aligned_in_smp; struct completion tx_ref_comp; - struct smc_wr_buf *wr_rx_bufs; /* WR recv payload buffers */ + u8 *wr_rx_bufs; /* WR recv payload buffers */ struct ib_recv_wr *wr_rx_ibs; /* WR recv meta data */ struct ib_sge *wr_rx_sges; /* WR recv scatter meta data */ /* above three vectors have wr_rx_cnt elements and use the same index */ + int wr_rx_sge_cnt; /* rx sge, V1 is 1, V2 is either 2 or 1 */ + int wr_rx_buflen; /* buffer len for the first sge, len for the + * second sge is lgr shared if rx sge is 2. + */ dma_addr_t wr_rx_dma_addr; /* DMA address of wr_rx_bufs */ dma_addr_t wr_rx_v2_dma_addr; /* DMA address of v2 rx buf*/ u64 wr_rx_id; /* seq # of last recv WR */ @@ -506,6 +510,11 @@ static inline bool smc_link_active(struct smc_link *lnk) return lnk->state == SMC_LNK_ACTIVE; } +static inline bool smc_link_shared_v2_rxbuf(struct smc_link *lnk) +{ + return lnk->wr_rx_sge_cnt > 1; +} + static inline void smc_gid_be16_convert(__u8 *buf, u8 *gid_raw) { sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x", diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c index 6fdb2d96777ad..8f7bd40f47594 100644 --- a/net/smc/smc_diag.c +++ b/net/smc/smc_diag.c @@ -71,8 +71,7 @@ static int smc_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb, static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb, struct netlink_callback *cb, - const struct smc_diag_req *req, - struct nlattr *bc) + const struct smc_diag_req *req) { struct smc_sock *smc = smc_sk(sk); struct smc_diag_fallback fallback; @@ -199,7 +198,6 @@ static int smc_diag_dump_proto(struct proto *prot, struct sk_buff *skb, struct smc_diag_dump_ctx *cb_ctx = smc_dump_context(cb); struct net *net = sock_net(skb->sk); int snum = cb_ctx->pos[p_type]; - struct nlattr *bc = NULL; struct hlist_head *head; int rc = 0, num = 0; struct sock *sk; @@ -214,7 +212,7 @@ static int smc_diag_dump_proto(struct proto *prot, struct sk_buff *skb, continue; if (num < snum) goto next; - rc = __smc_diag_dump(sk, skb, cb, nlmsg_data(cb->nlh), bc); + rc = __smc_diag_dump(sk, skb, cb, nlmsg_data(cb->nlh)); if (rc < 0) goto out; next: diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c index 9c563cdbea908..53828833a3f7f 100644 --- a/net/smc/smc_ib.c +++ b/net/smc/smc_ib.c @@ -662,7 +662,6 @@ void smc_ib_destroy_queue_pair(struct smc_link *lnk) /* create a queue pair within the protection domain for a link */ int smc_ib_create_queue_pair(struct smc_link *lnk) { - int sges_per_buf = (lnk->lgr->smc_version == SMC_V2) ? 2 : 1; struct ib_qp_init_attr qp_attr = { .event_handler = smc_ib_qp_event_handler, .qp_context = lnk, @@ -676,7 +675,7 @@ int smc_ib_create_queue_pair(struct smc_link *lnk) .max_send_wr = SMC_WR_BUF_CNT * 3, .max_recv_wr = SMC_WR_BUF_CNT * 3, .max_send_sge = SMC_IB_MAX_SEND_SGE, - .max_recv_sge = sges_per_buf, + .max_recv_sge = lnk->wr_rx_sge_cnt, .max_inline_data = 0, }, .sq_sig_type = IB_SIGNAL_REQ_WR, diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c index 018ce8133b026..f865c58c3aa77 100644 --- a/net/smc/smc_llc.c +++ b/net/smc/smc_llc.c @@ -997,13 +997,14 @@ static int smc_llc_cli_conf_link(struct smc_link *link, } static void smc_llc_save_add_link_rkeys(struct smc_link *link, - struct smc_link *link_new) + struct smc_link *link_new, + u8 *llc_msg) { struct smc_llc_msg_add_link_v2_ext *ext; struct smc_link_group *lgr = link->lgr; int max, i; - ext = (struct smc_llc_msg_add_link_v2_ext *)((u8 *)lgr->wr_rx_buf_v2 + + ext = (struct smc_llc_msg_add_link_v2_ext *)(llc_msg + SMC_WR_TX_SIZE); max = min_t(u8, ext->num_rkeys, SMC_LLC_RKEYS_PER_MSG_V2); down_write(&lgr->rmbs_lock); @@ -1098,7 +1099,9 @@ int smc_llc_cli_add_link(struct smc_link *link, struct smc_llc_qentry *qentry) if (rc) goto out_clear_lnk; if (lgr->smc_version == SMC_V2) { - smc_llc_save_add_link_rkeys(link, lnk_new); + u8 *llc_msg = smc_link_shared_v2_rxbuf(link) ? + (u8 *)lgr->wr_rx_buf_v2 : (u8 *)llc; + smc_llc_save_add_link_rkeys(link, lnk_new, llc_msg); } else { rc = smc_llc_cli_rkey_exchange(link, lnk_new); if (rc) { @@ -1498,7 +1501,9 @@ int smc_llc_srv_add_link(struct smc_link *link, if (rc) goto out_err; if (lgr->smc_version == SMC_V2) { - smc_llc_save_add_link_rkeys(link, link_new); + u8 *llc_msg = smc_link_shared_v2_rxbuf(link) ? + (u8 *)lgr->wr_rx_buf_v2 : (u8 *)add_llc; + smc_llc_save_add_link_rkeys(link, link_new, llc_msg); } else { rc = smc_llc_srv_rkey_exchange(link, link_new); if (rc) @@ -1807,8 +1812,12 @@ static void smc_llc_rmt_delete_rkey(struct smc_link_group *lgr) if (lgr->smc_version == SMC_V2) { struct smc_llc_msg_delete_rkey_v2 *llcv2; - memcpy(lgr->wr_rx_buf_v2, llc, sizeof(*llc)); - llcv2 = (struct smc_llc_msg_delete_rkey_v2 *)lgr->wr_rx_buf_v2; + if (smc_link_shared_v2_rxbuf(link)) { + memcpy(lgr->wr_rx_buf_v2, llc, sizeof(*llc)); + llcv2 = (struct smc_llc_msg_delete_rkey_v2 *)lgr->wr_rx_buf_v2; + } else { + llcv2 = (struct smc_llc_msg_delete_rkey_v2 *)llc; + } llcv2->num_inval_rkeys = 0; max = min_t(u8, llcv2->num_rkeys, SMC_LLC_RKEYS_PER_MSG_V2); diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c index 214ac3cbcf9ad..60cfec8eb255f 100644 --- a/net/smc/smc_tx.c +++ b/net/smc/smc_tx.c @@ -222,8 +222,11 @@ int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len) conn->local_tx_ctrl.prod_flags.urg_data_pending = 1; if (!atomic_read(&conn->sndbuf_space) || conn->urg_tx_pend) { - if (send_done) + if (send_done) { + sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); + set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); return send_done; + } rc = smc_tx_wait(smc, msg->msg_flags); if (rc) goto out_err; diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c index 994c0cd4fddbf..b04a21b8c5111 100644 --- a/net/smc/smc_wr.c +++ b/net/smc/smc_wr.c @@ -439,7 +439,7 @@ static inline void smc_wr_rx_demultiplex(struct ib_wc *wc) return; /* short message */ temp_wr_id = wc->wr_id; index = do_div(temp_wr_id, link->wr_rx_cnt); - wr_rx = (struct smc_wr_rx_hdr *)&link->wr_rx_bufs[index]; + wr_rx = (struct smc_wr_rx_hdr *)(link->wr_rx_bufs + index * link->wr_rx_buflen); hash_for_each_possible(smc_wr_rx_hash, handler, list, wr_rx->type) { if (handler->type == wr_rx->type) handler->handler(wc, wr_rx); @@ -555,7 +555,6 @@ void smc_wr_remember_qp_attr(struct smc_link *lnk) static void smc_wr_init_sge(struct smc_link *lnk) { - int sges_per_buf = (lnk->lgr->smc_version == SMC_V2) ? 2 : 1; bool send_inline = (lnk->qp_attr.cap.max_inline_data > SMC_WR_TX_SIZE); u32 i; @@ -608,13 +607,14 @@ static void smc_wr_init_sge(struct smc_link *lnk) * the larger spillover buffer, allowing easy data mapping. */ for (i = 0; i < lnk->wr_rx_cnt; i++) { - int x = i * sges_per_buf; + int x = i * lnk->wr_rx_sge_cnt; lnk->wr_rx_sges[x].addr = - lnk->wr_rx_dma_addr + i * SMC_WR_BUF_SIZE; - lnk->wr_rx_sges[x].length = SMC_WR_TX_SIZE; + lnk->wr_rx_dma_addr + i * lnk->wr_rx_buflen; + lnk->wr_rx_sges[x].length = smc_link_shared_v2_rxbuf(lnk) ? + SMC_WR_TX_SIZE : lnk->wr_rx_buflen; lnk->wr_rx_sges[x].lkey = lnk->roce_pd->local_dma_lkey; - if (lnk->lgr->smc_version == SMC_V2) { + if (lnk->lgr->smc_version == SMC_V2 && smc_link_shared_v2_rxbuf(lnk)) { lnk->wr_rx_sges[x + 1].addr = lnk->wr_rx_v2_dma_addr + SMC_WR_TX_SIZE; lnk->wr_rx_sges[x + 1].length = @@ -624,7 +624,7 @@ static void smc_wr_init_sge(struct smc_link *lnk) } lnk->wr_rx_ibs[i].next = NULL; lnk->wr_rx_ibs[i].sg_list = &lnk->wr_rx_sges[x]; - lnk->wr_rx_ibs[i].num_sge = sges_per_buf; + lnk->wr_rx_ibs[i].num_sge = lnk->wr_rx_sge_cnt; } lnk->wr_reg.wr.next = NULL; lnk->wr_reg.wr.num_sge = 0; @@ -655,7 +655,7 @@ void smc_wr_free_link(struct smc_link *lnk) if (lnk->wr_rx_dma_addr) { ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr, - SMC_WR_BUF_SIZE * lnk->wr_rx_cnt, + lnk->wr_rx_buflen * lnk->wr_rx_cnt, DMA_FROM_DEVICE); lnk->wr_rx_dma_addr = 0; } @@ -740,13 +740,11 @@ int smc_wr_alloc_lgr_mem(struct smc_link_group *lgr) int smc_wr_alloc_link_mem(struct smc_link *link) { - int sges_per_buf = link->lgr->smc_version == SMC_V2 ? 2 : 1; - /* allocate link related memory */ link->wr_tx_bufs = kcalloc(SMC_WR_BUF_CNT, SMC_WR_BUF_SIZE, GFP_KERNEL); if (!link->wr_tx_bufs) goto no_mem; - link->wr_rx_bufs = kcalloc(SMC_WR_BUF_CNT * 3, SMC_WR_BUF_SIZE, + link->wr_rx_bufs = kcalloc(SMC_WR_BUF_CNT * 3, link->wr_rx_buflen, GFP_KERNEL); if (!link->wr_rx_bufs) goto no_mem_wr_tx_bufs; @@ -774,7 +772,7 @@ int smc_wr_alloc_link_mem(struct smc_link *link) if (!link->wr_tx_sges) goto no_mem_wr_tx_rdma_sges; link->wr_rx_sges = kcalloc(SMC_WR_BUF_CNT * 3, - sizeof(link->wr_rx_sges[0]) * sges_per_buf, + sizeof(link->wr_rx_sges[0]) * link->wr_rx_sge_cnt, GFP_KERNEL); if (!link->wr_rx_sges) goto no_mem_wr_tx_sges; @@ -872,7 +870,7 @@ int smc_wr_create_link(struct smc_link *lnk) smc_wr_tx_set_wr_id(&lnk->wr_tx_id, 0); lnk->wr_rx_id = 0; lnk->wr_rx_dma_addr = ib_dma_map_single( - ibdev, lnk->wr_rx_bufs, SMC_WR_BUF_SIZE * lnk->wr_rx_cnt, + ibdev, lnk->wr_rx_bufs, lnk->wr_rx_buflen * lnk->wr_rx_cnt, DMA_FROM_DEVICE); if (ib_dma_mapping_error(ibdev, lnk->wr_rx_dma_addr)) { lnk->wr_rx_dma_addr = 0; @@ -880,13 +878,15 @@ int smc_wr_create_link(struct smc_link *lnk) goto out; } if (lnk->lgr->smc_version == SMC_V2) { - lnk->wr_rx_v2_dma_addr = ib_dma_map_single(ibdev, - lnk->lgr->wr_rx_buf_v2, SMC_WR_BUF_V2_SIZE, - DMA_FROM_DEVICE); - if (ib_dma_mapping_error(ibdev, lnk->wr_rx_v2_dma_addr)) { - lnk->wr_rx_v2_dma_addr = 0; - rc = -EIO; - goto dma_unmap; + if (smc_link_shared_v2_rxbuf(lnk)) { + lnk->wr_rx_v2_dma_addr = + ib_dma_map_single(ibdev, lnk->lgr->wr_rx_buf_v2, + SMC_WR_BUF_V2_SIZE, DMA_FROM_DEVICE); + if (ib_dma_mapping_error(ibdev, lnk->wr_rx_v2_dma_addr)) { + lnk->wr_rx_v2_dma_addr = 0; + rc = -EIO; + goto dma_unmap; + } } lnk->wr_tx_v2_dma_addr = ib_dma_map_single(ibdev, lnk->lgr->wr_tx_buf_v2, SMC_WR_BUF_V2_SIZE, @@ -935,7 +935,7 @@ int smc_wr_create_link(struct smc_link *lnk) lnk->wr_tx_v2_dma_addr = 0; } ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr, - SMC_WR_BUF_SIZE * lnk->wr_rx_cnt, + lnk->wr_rx_buflen * lnk->wr_rx_cnt, DMA_FROM_DEVICE); lnk->wr_rx_dma_addr = 0; out: diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c index 439f755399772..b7e25e7e9933b 100644 --- a/net/tipc/udp_media.c +++ b/net/tipc/udp_media.c @@ -814,10 +814,10 @@ static void cleanup_bearer(struct work_struct *work) kfree_rcu(rcast, rcu); } - atomic_dec(&tipc_net(sock_net(ub->ubsock->sk))->wq_count); dst_cache_destroy(&ub->rcast.dst_cache); udp_tunnel_sock_release(ub->ubsock); synchronize_net(); + atomic_dec(&tipc_net(sock_net(ub->ubsock->sk))->wq_count); kfree(ub); } diff --git a/scripts/Makefile b/scripts/Makefile index 6bcda4b9d0540..546e8175e1c4c 100644 --- a/scripts/Makefile +++ b/scripts/Makefile @@ -47,7 +47,7 @@ HOSTCFLAGS_sorttable.o += -DMCOUNT_SORT_ENABLED endif # The following programs are only built on demand -hostprogs += unifdef +hostprogs += unifdef gen_packed_field_checks # The module linker script is preprocessed on demand targets += module.lds diff --git a/scripts/gen_packed_field_checks.c b/scripts/gen_packed_field_checks.c new file mode 100644 index 0000000000000..09a21afd640bd --- /dev/null +++ b/scripts/gen_packed_field_checks.c @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2024, Intel Corporation +#include +#include + +#define MAX_PACKED_FIELD_SIZE 50 + +int main(int argc, char **argv) +{ + for (int i = 1; i <= MAX_PACKED_FIELD_SIZE; i++) { + printf("#define CHECK_PACKED_FIELDS_%d(fields) ({ \\\n", i); + printf("\ttypeof(&(fields)[0]) _f = (fields); \\\n"); + printf("\tBUILD_BUG_ON(ARRAY_SIZE(fields) != %d); \\\n", i); + + for (int j = 0; j < i; j++) + printf("\tCHECK_PACKED_FIELD(_f[%d]); \\\n", j); + + for (int j = 1; j < i; j++) + printf("\tCHECK_PACKED_FIELD_OVERLAP(_f[0].startbit < _f[1].startbit, _f[%d], _f[%d]); \\\n", + j - 1, j); + + printf("})\n\n"); + } + + printf("#define CHECK_PACKED_FIELDS(fields) \\\n"); + + for (int i = 1; i <= MAX_PACKED_FIELD_SIZE; i++) + printf("\t__builtin_choose_expr(ARRAY_SIZE(fields) == %d, CHECK_PACKED_FIELDS_%d(fields), \\\n", + i, i); + + printf("\t({ BUILD_BUG_ON_MSG(1, \"CHECK_PACKED_FIELDS() must be regenerated to support array sizes larger than %d.\"); }) \\\n", + MAX_PACKED_FIELD_SIZE); + + for (int i = 1; i <= MAX_PACKED_FIELD_SIZE; i++) + printf(")"); + + printf("\n"); +} diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index f5a08f94e0940..366c87a40bd15 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -5738,7 +5738,7 @@ static unsigned int selinux_ip_output(void *priv, struct sk_buff *skb, /* we do this in the LOCAL_OUT path and not the POST_ROUTING path * because we want to make sure we apply the necessary labeling * before IPsec is applied so we can leverage AH protection */ - sk = skb->sk; + sk = sk_to_full_sk(skb->sk); if (sk) { struct sk_security_struct *sksec; diff --git a/tools/testing/selftests/drivers/net/hw/rss_ctx.py b/tools/testing/selftests/drivers/net/hw/rss_ctx.py index 0b49ce7ae6784..ca8a7edff3dda 100755 --- a/tools/testing/selftests/drivers/net/hw/rss_ctx.py +++ b/tools/testing/selftests/drivers/net/hw/rss_ctx.py @@ -3,7 +3,8 @@ import datetime import random -from lib.py import ksft_run, ksft_pr, ksft_exit, ksft_eq, ksft_ne, ksft_ge, ksft_lt +import re +from lib.py import ksft_run, ksft_pr, ksft_exit, ksft_eq, ksft_ne, ksft_ge, ksft_lt, ksft_true from lib.py import NetDrvEpEnv from lib.py import EthtoolFamily, NetdevFamily from lib.py import KsftSkipEx, KsftFailEx @@ -96,6 +97,13 @@ def _send_traffic_check(cfg, port, name, params): f"traffic on inactive queues ({name}): " + str(cnts)) +def _ntuple_rule_check(cfg, rule_id, ctx_id): + """Check that ntuple rule references RSS context ID""" + text = ethtool(f"-n {cfg.ifname} rule {rule_id}").stdout + pattern = f"RSS Context (ID: )?{ctx_id}" + ksft_true(re.search(pattern, text), "RSS context not referenced in ntuple rule") + + def test_rss_key_indir(cfg): """Test basics like updating the main RSS key and indirection table.""" @@ -459,6 +467,8 @@ def test_rss_context(cfg, ctx_cnt=1, create_with_cfg=None): ntuple = ethtool_create(cfg, "-N", flow) defer(ethtool, f"-N {cfg.ifname} delete {ntuple}") + _ntuple_rule_check(cfg, ntuple, ctx_id) + for i in range(ctx_cnt): _send_traffic_check(cfg, ports[i], f"context {i}", { 'target': (2+i*2, 3+i*2), diff --git a/tools/testing/selftests/kselftest/prefix.pl b/tools/testing/selftests/kselftest/prefix.pl index 12a7f4ca2684d..b8e4d697e3b3a 100755 --- a/tools/testing/selftests/kselftest/prefix.pl +++ b/tools/testing/selftests/kselftest/prefix.pl @@ -4,12 +4,15 @@ # to have unbuffering forced with "stdbuf -i0 -o0 -e0 $cmd". use strict; use IO::Handle; +use Time::HiRes qw( time ); binmode STDIN; binmode STDOUT; STDOUT->autoflush(1); +my $start_time = time(); +my $prev_time = $start_time; my $needed = 1; while (1) { my $char; @@ -17,6 +20,11 @@ exit 0 if ($bytes == 0); if ($needed) { print "# "; + if ($ENV{kselftest_profile}) { + my $now = time(); + printf("%.2f [+%.2f] ", $now - $start_time, $now - $prev_time); + $prev_time = $now; + } $needed = 0; } print $char; diff --git a/tools/testing/selftests/kselftest/runner.sh b/tools/testing/selftests/kselftest/runner.sh index 2c3c58e65a419..e65bffb9981f8 100644 --- a/tools/testing/selftests/kselftest/runner.sh +++ b/tools/testing/selftests/kselftest/runner.sh @@ -89,7 +89,7 @@ run_one() fi field=$(echo "$line" | cut -d= -f1) value=$(echo "$line" | cut -d= -f2-) - eval "kselftest_$field"="$value" + eval "export kselftest_$field"="$value" done < "$settings" fi diff --git a/tools/testing/selftests/net/busy_poller.c b/tools/testing/selftests/net/busy_poller.c index 99b0e8c17fcad..ef62d7145598d 100644 --- a/tools/testing/selftests/net/busy_poller.c +++ b/tools/testing/selftests/net/busy_poller.c @@ -54,16 +54,16 @@ struct epoll_params { #define EPIOCGPARAMS _IOR(EPOLL_IOC_TYPE, 0x02, struct epoll_params) #endif -static uint32_t cfg_port = 8000; +static uint16_t cfg_port = 8000; static struct in_addr cfg_bind_addr = { .s_addr = INADDR_ANY }; static char *cfg_outfile; static int cfg_max_events = 8; -static int cfg_ifindex; +static uint32_t cfg_ifindex; /* busy poll params */ static uint32_t cfg_busy_poll_usecs; -static uint32_t cfg_busy_poll_budget; -static uint32_t cfg_prefer_busy_poll; +static uint16_t cfg_busy_poll_budget; +static uint8_t cfg_prefer_busy_poll; /* IRQ params */ static uint32_t cfg_defer_hard_irqs; @@ -79,6 +79,7 @@ static void usage(const char *filepath) static void parse_opts(int argc, char **argv) { + unsigned long long tmp; int ret; int c; @@ -86,31 +87,40 @@ static void parse_opts(int argc, char **argv) usage(argv[0]); while ((c = getopt(argc, argv, "p:m:b:u:P:g:o:d:r:s:i:")) != -1) { + /* most options take integer values, except o and b, so reduce + * code duplication a bit for the common case by calling + * strtoull here and leave bounds checking and casting per + * option below. + */ + if (c != 'o' && c != 'b') + tmp = strtoull(optarg, NULL, 0); + switch (c) { case 'u': - cfg_busy_poll_usecs = strtoul(optarg, NULL, 0); - if (cfg_busy_poll_usecs == ULONG_MAX || - cfg_busy_poll_usecs > UINT32_MAX) + if (tmp == ULLONG_MAX || tmp > UINT32_MAX) error(1, ERANGE, "busy_poll_usecs too large"); + + cfg_busy_poll_usecs = (uint32_t)tmp; break; case 'P': - cfg_prefer_busy_poll = strtoul(optarg, NULL, 0); - if (cfg_prefer_busy_poll == ULONG_MAX || - cfg_prefer_busy_poll > 1) + if (tmp == ULLONG_MAX || tmp > 1) error(1, ERANGE, "prefer busy poll should be 0 or 1"); + + cfg_prefer_busy_poll = (uint8_t)tmp; break; case 'g': - cfg_busy_poll_budget = strtoul(optarg, NULL, 0); - if (cfg_busy_poll_budget == ULONG_MAX || - cfg_busy_poll_budget > UINT16_MAX) + if (tmp == ULLONG_MAX || tmp > UINT16_MAX) error(1, ERANGE, "busy poll budget must be [0, UINT16_MAX]"); + + cfg_busy_poll_budget = (uint16_t)tmp; break; case 'p': - cfg_port = strtoul(optarg, NULL, 0); - if (cfg_port > UINT16_MAX) + if (tmp == ULLONG_MAX || tmp > UINT16_MAX) error(1, ERANGE, "port must be <= 65535"); + + cfg_port = (uint16_t)tmp; break; case 'b': ret = inet_aton(optarg, &cfg_bind_addr); @@ -124,41 +134,39 @@ static void parse_opts(int argc, char **argv) error(1, 0, "outfile invalid"); break; case 'm': - cfg_max_events = strtol(optarg, NULL, 0); - - if (cfg_max_events == LONG_MIN || - cfg_max_events == LONG_MAX || - cfg_max_events <= 0) + if (tmp == ULLONG_MAX || tmp > INT_MAX) error(1, ERANGE, - "max events must be > 0 and < LONG_MAX"); + "max events must be > 0 and <= INT_MAX"); + + cfg_max_events = (int)tmp; break; case 'd': - cfg_defer_hard_irqs = strtoul(optarg, NULL, 0); - - if (cfg_defer_hard_irqs == ULONG_MAX || - cfg_defer_hard_irqs > INT32_MAX) + if (tmp == ULLONG_MAX || tmp > INT32_MAX) error(1, ERANGE, "defer_hard_irqs must be <= INT32_MAX"); + + cfg_defer_hard_irqs = (uint32_t)tmp; break; case 'r': - cfg_gro_flush_timeout = strtoull(optarg, NULL, 0); - - if (cfg_gro_flush_timeout == ULLONG_MAX) + if (tmp == ULLONG_MAX || tmp > UINT64_MAX) error(1, ERANGE, - "gro_flush_timeout must be < ULLONG_MAX"); + "gro_flush_timeout must be < UINT64_MAX"); + + cfg_gro_flush_timeout = (uint64_t)tmp; break; case 's': - cfg_irq_suspend_timeout = strtoull(optarg, NULL, 0); - - if (cfg_irq_suspend_timeout == ULLONG_MAX) + if (tmp == ULLONG_MAX || tmp > UINT64_MAX) error(1, ERANGE, "irq_suspend_timeout must be < ULLONG_MAX"); + + cfg_irq_suspend_timeout = (uint64_t)tmp; break; case 'i': - cfg_ifindex = strtoul(optarg, NULL, 0); - if (cfg_ifindex == ULONG_MAX) + if (tmp == ULLONG_MAX || tmp > INT_MAX) error(1, ERANGE, - "ifindex must be < ULONG_MAX"); + "ifindex must be <= INT_MAX"); + + cfg_ifindex = (int)tmp; break; } } @@ -277,8 +285,8 @@ static void run_poller(void) * here */ epoll_params.busy_poll_usecs = cfg_busy_poll_usecs; - epoll_params.busy_poll_budget = (uint16_t)cfg_busy_poll_budget; - epoll_params.prefer_busy_poll = (uint8_t)cfg_prefer_busy_poll; + epoll_params.busy_poll_budget = cfg_busy_poll_budget; + epoll_params.prefer_busy_poll = cfg_prefer_busy_poll; epoll_params.__pad = 0; val = 1; @@ -342,5 +350,9 @@ int main(int argc, char *argv[]) parse_opts(argc, argv); setup_queue(); run_poller(); + + if (cfg_outfile) + free(cfg_outfile); + return 0; } diff --git a/tools/testing/selftests/net/forwarding/settings b/tools/testing/selftests/net/forwarding/settings index e7b9417537fbc..ff3de4936a009 100644 --- a/tools/testing/selftests/net/forwarding/settings +++ b/tools/testing/selftests/net/forwarding/settings @@ -1 +1,2 @@ -timeout=0 +timeout=10800 +profile=1 diff --git a/tools/testing/selftests/net/forwarding/tc_actions.sh b/tools/testing/selftests/net/forwarding/tc_actions.sh index ea89e558672db..5823d94b19e9b 100755 --- a/tools/testing/selftests/net/forwarding/tc_actions.sh +++ b/tools/testing/selftests/net/forwarding/tc_actions.sh @@ -223,19 +223,32 @@ mirred_egress_to_ingress_tcp_test() ip_proto icmp \ action drop - ip vrf exec v$h1 ncat --recv-only -w10 -l -p 12345 -o $mirred_e2i_tf2 & - local rpid=$! - ip vrf exec v$h1 ncat -w1 --send-only 192.0.2.2 12345 <$mirred_e2i_tf1 - wait -n $rpid - cmp -s $mirred_e2i_tf1 $mirred_e2i_tf2 - check_err $? "server output check failed" + echo P2 $MZ $h1 -c 10 -p 64 -a $h1mac -b $h1mac -A 192.0.2.1 -B 192.0.2.1 \ -t icmp "ping,id=42,seq=5" -q + echo P2.1 tc_check_packets "dev $h1 egress" 101 10 check_err $? "didn't mirred redirect ICMP" + echo P2.2 tc_check_packets "dev $h1 ingress" 102 10 check_err $? "didn't drop mirred ICMP" + echo P2.3 + + echo P1 + + ip vrf exec v$h1 ncat --recv-only -w10 -l -p 12345 -o $mirred_e2i_tf2 & + local rpid=$! + sleep 0.2 + echo P1.1 + ip vrf exec v$h1 ncat -w1 --send-only 192.0.2.2 12345 <$mirred_e2i_tf1 + echo P1.2 + sleep 0.2 + wait -n $rpid + cmp -s $mirred_e2i_tf1 $mirred_e2i_tf2 + check_err $? "server output check failed" + + echo P3 tc filter del dev $h1 egress protocol ip pref 100 handle 100 flower tc filter del dev $h1 egress protocol ip pref 101 handle 101 flower @@ -359,4 +372,5 @@ else tests_run fi +dmesg exit $EXIT_STATUS diff --git a/tools/testing/selftests/net/settings b/tools/testing/selftests/net/settings index ed8418e8217a0..a38764182822e 100644 --- a/tools/testing/selftests/net/settings +++ b/tools/testing/selftests/net/settings @@ -1 +1,2 @@ timeout=3600 +profile=1 diff --git a/tools/testing/selftests/net/udpgso_bench.sh b/tools/testing/selftests/net/udpgso_bench.sh index 640bc43452faa..88fa1d53ba2b2 100755 --- a/tools/testing/selftests/net/udpgso_bench.sh +++ b/tools/testing/selftests/net/udpgso_bench.sh @@ -92,6 +92,9 @@ run_udp() { echo "udp" run_in_netns ${args} + echo "udp sendmmsg" + run_in_netns ${args} -m + echo "udp gso" run_in_netns ${args} -S 0 diff --git a/tools/testing/vsock/control.c b/tools/testing/vsock/control.c index d2deb4b15b943..875ef0cfa4150 100644 --- a/tools/testing/vsock/control.c +++ b/tools/testing/vsock/control.c @@ -27,6 +27,7 @@ #include "timeout.h" #include "control.h" +#include "util.h" static int control_fd = -1; @@ -50,7 +51,6 @@ void control_init(const char *control_host, for (ai = result; ai; ai = ai->ai_next) { int fd; - int val = 1; fd = socket(ai->ai_family, ai->ai_socktype, ai->ai_protocol); if (fd < 0) @@ -65,11 +65,8 @@ void control_init(const char *control_host, break; } - if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, - &val, sizeof(val)) < 0) { - perror("setsockopt"); - exit(EXIT_FAILURE); - } + setsockopt_int_check(fd, SOL_SOCKET, SO_REUSEADDR, 1, + "setsockopt SO_REUSEADDR"); if (bind(fd, ai->ai_addr, ai->ai_addrlen) < 0) goto next; diff --git a/tools/testing/vsock/msg_zerocopy_common.c b/tools/testing/vsock/msg_zerocopy_common.c index 5a4bdf7b51328..8622e5a0f8b77 100644 --- a/tools/testing/vsock/msg_zerocopy_common.c +++ b/tools/testing/vsock/msg_zerocopy_common.c @@ -14,16 +14,6 @@ #include "msg_zerocopy_common.h" -void enable_so_zerocopy(int fd) -{ - int val = 1; - - if (setsockopt(fd, SOL_SOCKET, SO_ZEROCOPY, &val, sizeof(val))) { - perror("setsockopt"); - exit(EXIT_FAILURE); - } -} - void vsock_recv_completion(int fd, const bool *zerocopied) { struct sock_extended_err *serr; diff --git a/tools/testing/vsock/msg_zerocopy_common.h b/tools/testing/vsock/msg_zerocopy_common.h index 3763c5ccedb95..ad14139e93ca3 100644 --- a/tools/testing/vsock/msg_zerocopy_common.h +++ b/tools/testing/vsock/msg_zerocopy_common.h @@ -12,7 +12,6 @@ #define VSOCK_RECVERR 1 #endif -void enable_so_zerocopy(int fd); void vsock_recv_completion(int fd, const bool *zerocopied); #endif /* MSG_ZEROCOPY_COMMON_H */ diff --git a/tools/testing/vsock/util.c b/tools/testing/vsock/util.c index a3d448a075e3a..e79534b524774 100644 --- a/tools/testing/vsock/util.c +++ b/tools/testing/vsock/util.c @@ -651,3 +651,147 @@ void free_test_iovec(const struct iovec *test_iovec, free(iovec); } + +/* Set "unsigned long long" socket option and check that it's indeed set */ +void setsockopt_ull_check(int fd, int level, int optname, + unsigned long long val, char const *errmsg) +{ + unsigned long long chkval; + socklen_t chklen; + int err; + + err = setsockopt(fd, level, optname, &val, sizeof(val)); + if (err) { + fprintf(stderr, "setsockopt err: %s (%d)\n", + strerror(errno), errno); + goto fail; + } + + chkval = ~val; /* just make storage != val */ + chklen = sizeof(chkval); + + err = getsockopt(fd, level, optname, &chkval, &chklen); + if (err) { + fprintf(stderr, "getsockopt err: %s (%d)\n", + strerror(errno), errno); + goto fail; + } + + if (chklen != sizeof(chkval)) { + fprintf(stderr, "size mismatch: set %zu got %d\n", sizeof(val), + chklen); + goto fail; + } + + if (chkval != val) { + fprintf(stderr, "value mismatch: set %llu got %llu\n", val, + chkval); + goto fail; + } + return; +fail: + fprintf(stderr, "%s val %llu\n", errmsg, val); + exit(EXIT_FAILURE); +; +} + +/* Set "int" socket option and check that it's indeed set */ +void setsockopt_int_check(int fd, int level, int optname, int val, + char const *errmsg) +{ + int chkval; + socklen_t chklen; + int err; + + err = setsockopt(fd, level, optname, &val, sizeof(val)); + if (err) { + fprintf(stderr, "setsockopt err: %s (%d)\n", + strerror(errno), errno); + goto fail; + } + + chkval = ~val; /* just make storage != val */ + chklen = sizeof(chkval); + + err = getsockopt(fd, level, optname, &chkval, &chklen); + if (err) { + fprintf(stderr, "getsockopt err: %s (%d)\n", + strerror(errno), errno); + goto fail; + } + + if (chklen != sizeof(chkval)) { + fprintf(stderr, "size mismatch: set %zu got %d\n", sizeof(val), + chklen); + goto fail; + } + + if (chkval != val) { + fprintf(stderr, "value mismatch: set %d got %d\n", + val, chkval); + goto fail; + } + return; +fail: + fprintf(stderr, "%s val %d\n", errmsg, val); + exit(EXIT_FAILURE); +} + +static void mem_invert(unsigned char *mem, size_t size) +{ + size_t i; + + for (i = 0; i < size; i++) + mem[i] = ~mem[i]; +} + +/* Set "timeval" socket option and check that it's indeed set */ +void setsockopt_timeval_check(int fd, int level, int optname, + struct timeval val, char const *errmsg) +{ + struct timeval chkval; + socklen_t chklen; + int err; + + err = setsockopt(fd, level, optname, &val, sizeof(val)); + if (err) { + fprintf(stderr, "setsockopt err: %s (%d)\n", + strerror(errno), errno); + goto fail; + } + + /* just make storage != val */ + chkval = val; + mem_invert((unsigned char *) &chkval, sizeof(chkval)); + chklen = sizeof(chkval); + + err = getsockopt(fd, level, optname, &chkval, &chklen); + if (err) { + fprintf(stderr, "getsockopt err: %s (%d)\n", + strerror(errno), errno); + goto fail; + } + + if (chklen != sizeof(chkval)) { + fprintf(stderr, "size mismatch: set %zu got %d\n", sizeof(val), + chklen); + goto fail; + } + + if (memcmp(&chkval, &val, sizeof(val)) != 0) { + fprintf(stderr, "value mismatch: set %ld:%ld got %ld:%ld\n", + val.tv_sec, val.tv_usec, + chkval.tv_sec, chkval.tv_usec); + goto fail; + } + return; +fail: + fprintf(stderr, "%s val %ld:%ld\n", errmsg, val.tv_sec, val.tv_usec); + exit(EXIT_FAILURE); +} + +void enable_so_zerocopy_check(int fd) +{ + setsockopt_int_check(fd, SOL_SOCKET, SO_ZEROCOPY, 1, + "setsockopt SO_ZEROCOPY"); +} diff --git a/tools/testing/vsock/util.h b/tools/testing/vsock/util.h index fff22d4a14c0f..f189334591df4 100644 --- a/tools/testing/vsock/util.h +++ b/tools/testing/vsock/util.h @@ -68,4 +68,11 @@ unsigned long iovec_hash_djb2(const struct iovec *iov, size_t iovnum); struct iovec *alloc_test_iovec(const struct iovec *test_iovec, int iovnum); void free_test_iovec(const struct iovec *test_iovec, struct iovec *iovec, int iovnum); +void setsockopt_ull_check(int fd, int level, int optname, + unsigned long long val, char const *errmsg); +void setsockopt_int_check(int fd, int level, int optname, int val, + char const *errmsg); +void setsockopt_timeval_check(int fd, int level, int optname, + struct timeval val, char const *errmsg); +void enable_so_zerocopy_check(int fd); #endif /* UTIL_H */ diff --git a/tools/testing/vsock/vsock_perf.c b/tools/testing/vsock/vsock_perf.c index 4e8578f815e08..75971ac708c9a 100644 --- a/tools/testing/vsock/vsock_perf.c +++ b/tools/testing/vsock/vsock_perf.c @@ -33,7 +33,7 @@ static unsigned int port = DEFAULT_PORT; static unsigned long buf_size_bytes = DEFAULT_BUF_SIZE_BYTES; -static unsigned long vsock_buf_bytes = DEFAULT_VSOCK_BUF_BYTES; +static unsigned long long vsock_buf_bytes = DEFAULT_VSOCK_BUF_BYTES; static bool zerocopy; static void error(const char *s) @@ -133,7 +133,7 @@ static float get_gbps(unsigned long bits, time_t ns_delta) ((float)ns_delta / NSEC_PER_SEC); } -static void run_receiver(unsigned long rcvlowat_bytes) +static void run_receiver(int rcvlowat_bytes) { unsigned int read_cnt; time_t rx_begin_ns; @@ -162,8 +162,8 @@ static void run_receiver(unsigned long rcvlowat_bytes) printf("Run as receiver\n"); printf("Listen port %u\n", port); printf("RX buffer %lu bytes\n", buf_size_bytes); - printf("vsock buffer %lu bytes\n", vsock_buf_bytes); - printf("SO_RCVLOWAT %lu bytes\n", rcvlowat_bytes); + printf("vsock buffer %llu bytes\n", vsock_buf_bytes); + printf("SO_RCVLOWAT %d bytes\n", rcvlowat_bytes); fd = socket(AF_VSOCK, SOCK_STREAM, 0); @@ -251,6 +251,16 @@ static void run_receiver(unsigned long rcvlowat_bytes) close(fd); } +static void enable_so_zerocopy(int fd) +{ + int val = 1; + + if (setsockopt(fd, SOL_SOCKET, SO_ZEROCOPY, &val, sizeof(val))) { + perror("setsockopt"); + exit(EXIT_FAILURE); + } +} + static void run_sender(int peer_cid, unsigned long to_send_bytes) { time_t tx_begin_ns; @@ -439,7 +449,7 @@ static long strtolx(const char *arg) int main(int argc, char **argv) { unsigned long to_send_bytes = DEFAULT_TO_SEND_BYTES; - unsigned long rcvlowat_bytes = DEFAULT_RCVLOWAT_BYTES; + int rcvlowat_bytes = DEFAULT_RCVLOWAT_BYTES; int peer_cid = -1; bool sender = false; diff --git a/tools/testing/vsock/vsock_test.c b/tools/testing/vsock/vsock_test.c index 8d38dbf8f41f0..0b514643a9a58 100644 --- a/tools/testing/vsock/vsock_test.c +++ b/tools/testing/vsock/vsock_test.c @@ -429,7 +429,7 @@ static void test_seqpacket_msg_bounds_client(const struct test_opts *opts) static void test_seqpacket_msg_bounds_server(const struct test_opts *opts) { - unsigned long sock_buf_size; + unsigned long long sock_buf_size; unsigned long remote_hash; unsigned long curr_hash; int fd; @@ -444,17 +444,12 @@ static void test_seqpacket_msg_bounds_server(const struct test_opts *opts) sock_buf_size = SOCK_BUF_SIZE; - if (setsockopt(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_MAX_SIZE, - &sock_buf_size, sizeof(sock_buf_size))) { - perror("setsockopt(SO_VM_SOCKETS_BUFFER_MAX_SIZE)"); - exit(EXIT_FAILURE); - } + setsockopt_ull_check(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_MAX_SIZE, + sock_buf_size, + "setsockopt(SO_VM_SOCKETS_BUFFER_MAX_SIZE)"); - if (setsockopt(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_SIZE, - &sock_buf_size, sizeof(sock_buf_size))) { - perror("setsockopt(SO_VM_SOCKETS_BUFFER_SIZE)"); - exit(EXIT_FAILURE); - } + setsockopt_ull_check(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_SIZE, + sock_buf_size, "setsockopt(SO_VM_SOCKETS_BUFFER_SIZE)"); /* Ready to receive data. */ control_writeln("SRVREADY"); @@ -586,10 +581,8 @@ static void test_seqpacket_timeout_client(const struct test_opts *opts) tv.tv_sec = RCVTIMEO_TIMEOUT_SEC; tv.tv_usec = 0; - if (setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, (void *)&tv, sizeof(tv)) == -1) { - perror("setsockopt(SO_RCVTIMEO)"); - exit(EXIT_FAILURE); - } + setsockopt_timeval_check(fd, SOL_SOCKET, SO_RCVTIMEO, tv, + "setsockopt(SO_RCVTIMEO)"); read_enter_ns = current_nsec(); @@ -634,7 +627,8 @@ static void test_seqpacket_timeout_server(const struct test_opts *opts) static void test_seqpacket_bigmsg_client(const struct test_opts *opts) { - unsigned long sock_buf_size; + unsigned long long sock_buf_size; + size_t buf_size; socklen_t len; void *data; int fd; @@ -655,13 +649,20 @@ static void test_seqpacket_bigmsg_client(const struct test_opts *opts) sock_buf_size++; - data = malloc(sock_buf_size); + /* size_t can be < unsigned long long */ + buf_size = (size_t) sock_buf_size; + if (buf_size != sock_buf_size) { + fprintf(stderr, "Returned BUFFER_SIZE too large\n"); + exit(EXIT_FAILURE); + } + + data = malloc(buf_size); if (!data) { perror("malloc"); exit(EXIT_FAILURE); } - send_buf(fd, data, sock_buf_size, 0, -EMSGSIZE); + send_buf(fd, data, buf_size, 0, -EMSGSIZE); control_writeln("CLISENT"); @@ -835,7 +836,7 @@ static void test_stream_poll_rcvlowat_server(const struct test_opts *opts) static void test_stream_poll_rcvlowat_client(const struct test_opts *opts) { - unsigned long lowat_val = RCVLOWAT_BUF_SIZE; + int lowat_val = RCVLOWAT_BUF_SIZE; char buf[RCVLOWAT_BUF_SIZE]; struct pollfd fds; short poll_flags; @@ -847,11 +848,8 @@ static void test_stream_poll_rcvlowat_client(const struct test_opts *opts) exit(EXIT_FAILURE); } - if (setsockopt(fd, SOL_SOCKET, SO_RCVLOWAT, - &lowat_val, sizeof(lowat_val))) { - perror("setsockopt(SO_RCVLOWAT)"); - exit(EXIT_FAILURE); - } + setsockopt_int_check(fd, SOL_SOCKET, SO_RCVLOWAT, + lowat_val, "setsockopt(SO_RCVLOWAT)"); control_expectln("SRVSENT"); @@ -1357,9 +1355,10 @@ static void test_stream_rcvlowat_def_cred_upd_client(const struct test_opts *opt static void test_stream_credit_update_test(const struct test_opts *opts, bool low_rx_bytes_test) { - size_t recv_buf_size; + int recv_buf_size; struct pollfd fds; size_t buf_size; + unsigned long long sock_buf_size; void *buf; int fd; @@ -1371,11 +1370,11 @@ static void test_stream_credit_update_test(const struct test_opts *opts, buf_size = RCVLOWAT_CREDIT_UPD_BUF_SIZE; - if (setsockopt(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_SIZE, - &buf_size, sizeof(buf_size))) { - perror("setsockopt(SO_VM_SOCKETS_BUFFER_SIZE)"); - exit(EXIT_FAILURE); - } + /* size_t can be < unsigned long long */ + sock_buf_size = buf_size; + + setsockopt_ull_check(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_SIZE, + sock_buf_size, "setsockopt(SO_VM_SOCKETS_BUFFER_SIZE)"); if (low_rx_bytes_test) { /* Set new SO_RCVLOWAT here. This enables sending credit @@ -1384,11 +1383,8 @@ static void test_stream_credit_update_test(const struct test_opts *opts, */ recv_buf_size = 1 + VIRTIO_VSOCK_MAX_PKT_BUF_SIZE; - if (setsockopt(fd, SOL_SOCKET, SO_RCVLOWAT, - &recv_buf_size, sizeof(recv_buf_size))) { - perror("setsockopt(SO_RCVLOWAT)"); - exit(EXIT_FAILURE); - } + setsockopt_int_check(fd, SOL_SOCKET, SO_RCVLOWAT, + recv_buf_size, "setsockopt(SO_RCVLOWAT)"); } /* Send one dummy byte here, because 'setsockopt()' above also @@ -1430,11 +1426,8 @@ static void test_stream_credit_update_test(const struct test_opts *opts, recv_buf_size++; /* Updating SO_RCVLOWAT will send credit update. */ - if (setsockopt(fd, SOL_SOCKET, SO_RCVLOWAT, - &recv_buf_size, sizeof(recv_buf_size))) { - perror("setsockopt(SO_RCVLOWAT)"); - exit(EXIT_FAILURE); - } + setsockopt_int_check(fd, SOL_SOCKET, SO_RCVLOWAT, + recv_buf_size, "setsockopt(SO_RCVLOWAT)"); } fds.fd = fd; diff --git a/tools/testing/vsock/vsock_test_zerocopy.c b/tools/testing/vsock/vsock_test_zerocopy.c index 04c376b6937f5..9d9a6cb9614ad 100644 --- a/tools/testing/vsock/vsock_test_zerocopy.c +++ b/tools/testing/vsock/vsock_test_zerocopy.c @@ -162,7 +162,7 @@ static void test_client(const struct test_opts *opts, } if (test_data->so_zerocopy) - enable_so_zerocopy(fd); + enable_so_zerocopy_check(fd); iovec = alloc_test_iovec(test_data->vecs, test_data->vecs_cnt); diff --git a/tools/testing/vsock/vsock_uring_test.c b/tools/testing/vsock/vsock_uring_test.c index 6c3e6f70c457d..5c3078969659f 100644 --- a/tools/testing/vsock/vsock_uring_test.c +++ b/tools/testing/vsock/vsock_uring_test.c @@ -73,7 +73,7 @@ static void vsock_io_uring_client(const struct test_opts *opts, } if (msg_zerocopy) - enable_so_zerocopy(fd); + enable_so_zerocopy_check(fd); iovec = alloc_test_iovec(test_data->vecs, test_data->vecs_cnt);