Merge tag 'wireless-drivers-next-2020-05-07' of git://git.kernel.org/pub/scm/linux...
authorDavid S. Miller <davem@davemloft.net>
Thu, 7 May 2020 20:22:35 +0000 (13:22 -0700)
committerDavid S. Miller <davem@davemloft.net>
Thu, 7 May 2020 20:22:35 +0000 (13:22 -0700)
Kalle Valo says:

====================
wireless-drivers-next patches for v5.8

First set of patches for v5.8. Changes all over, ath10k apparently
seeing most new features this time. rtw88 also had lots of changes due
to preparation for new hardware support.

In this pull request there's also a new macro to include/linux/iopoll:
read_poll_timeout_atomic(). This is needed by rtw88 for atomic
polling.

Major changes:

ath11k

* add debugfs file for testing ADDBA and DELBA

* add 802.11 encapsulation offload on hardware support

* add htt_peer_stats_reset debugfs file

ath10k

* enable VHT160 and VHT80+80 modes

* enable radar detection in secondary segment

* sdio: disable TX complete indication to improve throughput

* sdio: decrease power consumption

* sdio: add HTT TX bundle support to increase throughput

* sdio: add rx bitrate reporting

ath9k

* improvements to AR9002 calibration logic

carl9170

* remove buggy P2P_GO support

p54usb

* add support for AirVasT USB stick

rtw88

* add support for antenna configuration

ti wlcore

* add support for AES_CMAC cipher

iwlwifi

* support for a few new FW API versions

* new hw configs
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
1  2 
drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
drivers/net/wireless/intel/iwlwifi/fw/acpi.c
drivers/net/wireless/intel/iwlwifi/iwl-drv.c
drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
drivers/net/wireless/intel/iwlwifi/mvm/rs.c
drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
drivers/net/wireless/realtek/rtw88/pci.c

@@@ -579,9 -579,6 +579,6 @@@ static int brcmf_netdev_stop(struct net
  
        brcmf_cfg80211_down(ndev);
  
-       if (ifp->drvr->bus_if->state == BRCMF_BUS_UP)
-               brcmf_fil_iovar_data_set(ifp, "arp_hostip_clear", NULL, 0);
        brcmf_net_setcarrier(ifp, false);
  
        return 0;
@@@ -729,18 -726,9 +726,18 @@@ static int brcmf_net_mon_stop(struct ne
        return err;
  }
  
 +static netdev_tx_t brcmf_net_mon_start_xmit(struct sk_buff *skb,
 +                                          struct net_device *ndev)
 +{
 +      dev_kfree_skb_any(skb);
 +
 +      return NETDEV_TX_OK;
 +}
 +
  static const struct net_device_ops brcmf_netdev_ops_mon = {
        .ndo_open = brcmf_net_mon_open,
        .ndo_stop = brcmf_net_mon_stop,
 +      .ndo_start_xmit = brcmf_net_mon_start_xmit,
  };
  
  int brcmf_net_mon_attach(struct brcmf_if *ifp)
@@@ -151,6 -151,82 +151,82 @@@ found
  }
  IWL_EXPORT_SYMBOL(iwl_acpi_get_wifi_pkg);
  
+ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
+                    __le32 *black_list_array,
+                    int *black_list_size)
+ {
+       union acpi_object *wifi_pkg, *data;
+       int ret, tbl_rev, i;
+       bool enabled;
+       data = iwl_acpi_get_object(fwrt->dev, ACPI_WTAS_METHOD);
+       if (IS_ERR(data))
+               return PTR_ERR(data);
+       wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+                                        ACPI_WTAS_WIFI_DATA_SIZE,
+                                        &tbl_rev);
+       if (IS_ERR(wifi_pkg)) {
+               ret = PTR_ERR(wifi_pkg);
+               goto out_free;
+       }
+       if (wifi_pkg->package.elements[0].type != ACPI_TYPE_INTEGER ||
+           tbl_rev != 0) {
+               ret = -EINVAL;
+               goto out_free;
+       }
+       enabled = !!wifi_pkg->package.elements[0].integer.value;
+       if (!enabled) {
+               *black_list_size = -1;
+               IWL_DEBUG_RADIO(fwrt, "TAS not enabled\n");
+               ret = 0;
+               goto out_free;
+       }
+       if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER ||
+           wifi_pkg->package.elements[1].integer.value >
+           APCI_WTAS_BLACK_LIST_MAX) {
+               IWL_DEBUG_RADIO(fwrt, "TAS invalid array size %llu\n",
+                               wifi_pkg->package.elements[1].integer.value);
+               ret = -EINVAL;
+               goto out_free;
+       }
+       *black_list_size = wifi_pkg->package.elements[1].integer.value;
+       IWL_DEBUG_RADIO(fwrt, "TAS array size %d\n", *black_list_size);
+       if (*black_list_size > APCI_WTAS_BLACK_LIST_MAX) {
+               IWL_DEBUG_RADIO(fwrt, "TAS invalid array size value %u\n",
+                               *black_list_size);
+               ret = -EINVAL;
+               goto out_free;
+       }
+       for (i = 0; i < *black_list_size; i++) {
+               u32 country;
+               if (wifi_pkg->package.elements[2 + i].type !=
+                   ACPI_TYPE_INTEGER) {
+                       IWL_DEBUG_RADIO(fwrt,
+                                       "TAS invalid array elem %d\n", 2 + i);
+                       ret = -EINVAL;
+                       goto out_free;
+               }
+               country = wifi_pkg->package.elements[2 + i].integer.value;
+               black_list_array[i] = cpu_to_le32(country);
+               IWL_DEBUG_RADIO(fwrt, "TAS black list country %d\n", country);
+       }
+       ret = 0;
+ out_free:
+       kfree(data);
+       return ret;
+ }
+ IWL_EXPORT_SYMBOL(iwl_acpi_get_tas);
  int iwl_acpi_get_mcc(struct device *dev, char *mcc)
  {
        union acpi_object *wifi_pkg, *data;
@@@ -296,14 -372,9 +372,14 @@@ int iwl_sar_select_profile(struct iwl_f
                if (!prof->enabled) {
                        IWL_DEBUG_RADIO(fwrt, "SAR profile %d is disabled.\n",
                                        profs[i]);
 -                      /* if one of the profiles is disabled, we fail all */
 -                      return -ENOENT;
 +                      /*
 +                       * if one of the profiles is disabled, we
 +                       * ignore all of them and return 1 to
 +                       * differentiate disabled from other failures.
 +                       */
 +                      return 1;
                }
 +
                IWL_DEBUG_INFO(fwrt,
                               "SAR EWRD: chain %d profile index %d\n",
                               i, profs[i]);
@@@ -1467,7 -1467,7 +1467,7 @@@ static void iwl_req_fw_callback(const s
                                kmemdup(pieces->dbg_conf_tlv[i],
                                        pieces->dbg_conf_tlv_len[i],
                                        GFP_KERNEL);
 -                      if (!pieces->dbg_conf_tlv[i])
 +                      if (!drv->fw.dbg.conf_tlv[i])
                                goto out_free_fw;
                }
        }
@@@ -1872,10 -1872,6 +1872,6 @@@ module_param_named(power_level, iwlwifi
  MODULE_PARM_DESC(power_level,
                 "default power save level (range from 1 - 5, default: 1)");
  
- module_param_named(fw_monitor, iwlwifi_mod_params.fw_monitor, bool, 0444);
- MODULE_PARM_DESC(fw_monitor,
-                "firmware monitor - to debug FW (default: false - needs lots of memory)");
  module_param_named(disable_11ac, iwlwifi_mod_params.disable_11ac, bool, 0444);
  MODULE_PARM_DESC(disable_11ac, "Disable VHT capabilities (default: false)");
  
@@@ -532,7 -532,8 +532,7 @@@ static struct ieee80211_sband_iftype_da
                                        IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
                                        IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
                                .mac_cap_info[2] =
 -                                      IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP |
 -                                      IEEE80211_HE_MAC_CAP2_ACK_EN,
 +                                      IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP,
                                .mac_cap_info[3] =
                                        IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
                                        IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2,
                                        IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
                                        IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
                                .mac_cap_info[2] =
 -                                      IEEE80211_HE_MAC_CAP2_BSR |
 -                                      IEEE80211_HE_MAC_CAP2_ACK_EN,
 +                                      IEEE80211_HE_MAC_CAP2_BSR,
                                .mac_cap_info[3] =
                                        IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
                                        IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2,
@@@ -1166,8 -1168,7 +1166,7 @@@ iwl_parse_nvm_mcc_info(struct device *d
  
        for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
                ch_flags = (u16)__le32_to_cpup(channels + ch_idx);
-               band = (ch_idx < NUM_2GHZ_CHANNELS) ?
-                      NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
+               band = iwl_nl80211_band_from_channel_idx(ch_idx);
                center_freq = ieee80211_channel_to_frequency(nvm_chan[ch_idx],
                                                             band);
                new_rule = false;
@@@ -102,9 -102,23 +102,23 @@@ static int iwl_set_soc_latency(struct i
        if (!mvm->trans->trans_cfg->integrated)
                cmd.flags = cpu_to_le32(SOC_CONFIG_CMD_FLAGS_DISCRETE);
  
-       if (iwl_mvm_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
-                                  SCAN_REQ_UMAC) >= 2 &&
-           (mvm->trans->trans_cfg->low_latency_xtal))
+       BUILD_BUG_ON(IWL_CFG_TRANS_LTR_DELAY_NONE !=
+                    SOC_FLAGS_LTR_APPLY_DELAY_NONE);
+       BUILD_BUG_ON(IWL_CFG_TRANS_LTR_DELAY_200US !=
+                    SOC_FLAGS_LTR_APPLY_DELAY_200);
+       BUILD_BUG_ON(IWL_CFG_TRANS_LTR_DELAY_2500US !=
+                    SOC_FLAGS_LTR_APPLY_DELAY_2500);
+       BUILD_BUG_ON(IWL_CFG_TRANS_LTR_DELAY_1820US !=
+                    SOC_FLAGS_LTR_APPLY_DELAY_1820);
+       if (mvm->trans->trans_cfg->ltr_delay != IWL_CFG_TRANS_LTR_DELAY_NONE &&
+           !WARN_ON(!mvm->trans->trans_cfg->integrated))
+               cmd.flags |= le32_encode_bits(mvm->trans->trans_cfg->ltr_delay,
+                                             SOC_FLAGS_LTR_APPLY_DELAY_MASK);
+       if (iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
+                                 SCAN_REQ_UMAC) >= 2 &&
+           mvm->trans->trans_cfg->low_latency_xtal)
                cmd.flags |= cpu_to_le32(SOC_CONFIG_CMD_FLAGS_LOW_LATENCY);
  
        cmd.latency = cpu_to_le32(mvm->trans->trans_cfg->xtal_latency);
@@@ -550,10 -564,49 +564,49 @@@ error
        return ret;
  }
  
+ #ifdef CONFIG_ACPI
+ static void iwl_mvm_phy_filter_init(struct iwl_mvm *mvm,
+                                   struct iwl_phy_specific_cfg *phy_filters)
+ {
+       /*
+        * TODO: read specific phy config from BIOS
+        * ACPI table for this feature has not been defined yet,
+        * so for now we use hardcoded values.
+        */
+       if (IWL_MVM_PHY_FILTER_CHAIN_A) {
+               phy_filters->filter_cfg_chain_a =
+                       cpu_to_le32(IWL_MVM_PHY_FILTER_CHAIN_A);
+       }
+       if (IWL_MVM_PHY_FILTER_CHAIN_B) {
+               phy_filters->filter_cfg_chain_b =
+                       cpu_to_le32(IWL_MVM_PHY_FILTER_CHAIN_B);
+       }
+       if (IWL_MVM_PHY_FILTER_CHAIN_C) {
+               phy_filters->filter_cfg_chain_c =
+                       cpu_to_le32(IWL_MVM_PHY_FILTER_CHAIN_C);
+       }
+       if (IWL_MVM_PHY_FILTER_CHAIN_D) {
+               phy_filters->filter_cfg_chain_d =
+                       cpu_to_le32(IWL_MVM_PHY_FILTER_CHAIN_D);
+       }
+ }
+ #else /* CONFIG_ACPI */
+ static void iwl_mvm_phy_filter_init(struct iwl_mvm *mvm,
+                                   struct iwl_phy_specific_cfg *phy_filters)
+ {
+ }
+ #endif /* CONFIG_ACPI */
  static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
  {
-       struct iwl_phy_cfg_cmd phy_cfg_cmd;
+       struct iwl_phy_cfg_cmd_v3 phy_cfg_cmd;
        enum iwl_ucode_type ucode_type = mvm->fwrt.cur_fw_img;
+       struct iwl_phy_specific_cfg phy_filters = {};
+       u8 cmd_ver;
+       size_t cmd_size;
  
        if (iwl_mvm_has_unified_ucode(mvm) &&
            !mvm->trans->cfg->tx_with_siso_diversity)
        phy_cfg_cmd.calib_control.flow_trigger =
                mvm->fw->default_calib[ucode_type].flow_trigger;
  
+       cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
+                                       PHY_CONFIGURATION_CMD);
+       if (cmd_ver == 3) {
+               iwl_mvm_phy_filter_init(mvm, &phy_filters);
+               memcpy(&phy_cfg_cmd.phy_specific_cfg, &phy_filters,
+                      sizeof(struct iwl_phy_specific_cfg));
+       }
        IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n",
                       phy_cfg_cmd.phy_cfg);
+       cmd_size = (cmd_ver == 3) ? sizeof(struct iwl_phy_cfg_cmd_v3) :
+                                   sizeof(struct iwl_phy_cfg_cmd_v1);
        return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, 0,
-                                   sizeof(phy_cfg_cmd), &phy_cfg_cmd);
+                                   cmd_size, &phy_cfg_cmd);
  }
  
  int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
@@@ -727,7 -789,6 +789,7 @@@ int iwl_mvm_sar_select_profile(struct i
                struct iwl_dev_tx_power_cmd_v4 v4;
        } cmd;
  
 +      int ret;
        u16 len = 0;
  
        cmd.v5.v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS);
                len = sizeof(cmd.v4.v3);
  
  
 -      if (iwl_sar_select_profile(&mvm->fwrt, cmd.v5.v3.per_chain_restriction,
 -                                 prof_a, prof_b))
 -              return -ENOENT;
 +      ret = iwl_sar_select_profile(&mvm->fwrt,
 +                                   cmd.v5.v3.per_chain_restriction,
 +                                   prof_a, prof_b);
 +
 +      /* return on error or if the profile is disabled (positive number) */
 +      if (ret)
 +              return ret;
 +
        IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n");
        return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
  }
@@@ -937,6 -993,40 +999,40 @@@ static int iwl_mvm_ppag_init(struct iwl
        return iwl_mvm_ppag_send_cmd(mvm);
  }
  
+ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
+ {
+       int ret;
+       struct iwl_tas_config_cmd cmd = {};
+       int list_size;
+       BUILD_BUG_ON(ARRAY_SIZE(cmd.black_list_array) <
+                    APCI_WTAS_BLACK_LIST_MAX);
+       if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TAS_CFG)) {
+               IWL_DEBUG_RADIO(mvm, "TAS not enabled in FW\n");
+               return;
+       }
+       ret = iwl_acpi_get_tas(&mvm->fwrt, cmd.black_list_array, &list_size);
+       if (ret < 0) {
+               IWL_DEBUG_RADIO(mvm,
+                               "TAS table invalid or unavailable. (%d)\n",
+                               ret);
+               return;
+       }
+       if (list_size < 0)
+               return;
+       /* list size if TAS enabled can only be non-negative */
+       cmd.black_list_size = cpu_to_le32((u32)list_size);
+       ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP,
+                                               TAS_CONFIG),
+                                  0, sizeof(cmd), &cmd);
+       if (ret < 0)
+               IWL_DEBUG_RADIO(mvm, "failed to send TAS_CONFIG (%d)\n", ret);
+ }
  #else /* CONFIG_ACPI */
  
  inline int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm,
@@@ -964,6 -1054,10 +1060,10 @@@ static int iwl_mvm_ppag_init(struct iwl
  {
        return 0;
  }
+ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
+ {
+ }
  #endif /* CONFIG_ACPI */
  
  void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags)
@@@ -1040,7 -1134,16 +1140,7 @@@ static int iwl_mvm_sar_init(struct iwl_
                                "EWRD SAR BIOS table invalid or unavailable. (%d)\n",
                                ret);
  
 -      ret = iwl_mvm_sar_select_profile(mvm, 1, 1);
 -      /*
 -       * If we don't have profile 0 from BIOS, just skip it.  This
 -       * means that SAR Geo will not be enabled either, even if we
 -       * have other valid profiles.
 -       */
 -      if (ret == -ENOENT)
 -              return 1;
 -
 -      return ret;
 +      return iwl_mvm_sar_select_profile(mvm, 1, 1);
  }
  
  static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)
@@@ -1269,7 -1372,7 +1369,7 @@@ int iwl_mvm_up(struct iwl_mvm *mvm
        ret = iwl_mvm_sar_init(mvm);
        if (ret == 0) {
                ret = iwl_mvm_sar_geo_init(mvm);
 -      } else if (ret > 0 && !iwl_sar_get_wgds_table(&mvm->fwrt)) {
 +      } else if (ret == -ENOENT && !iwl_sar_get_wgds_table(&mvm->fwrt)) {
                /*
                 * If basic SAR is not available, we check for WGDS,
                 * which should *not* be available either.  If it is
        if (ret < 0)
                goto error;
  
+       iwl_mvm_tas_init(mvm);
        iwl_mvm_leds_sync(mvm);
  
        IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
@@@ -3665,7 -3665,7 +3665,7 @@@ static void rs_fill_lq_cmd(struct iwl_m
                        cpu_to_le16(iwl_mvm_coex_agg_time_limit(mvm, sta));
  }
  
 -static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
 +static void *rs_alloc(struct ieee80211_hw *hw)
  {
        return hw->priv;
  }
@@@ -3740,11 -3740,12 +3740,12 @@@ int rs_pretty_print_rate(char *buf, in
        }
  
        return scnprintf(buf, bufsz,
-                        "0x%x: %s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s",
+                        "0x%x: %s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s%s",
                         rate, type, rs_pretty_ant(ant), bw, mcs, nss,
                         (rate & RATE_MCS_SGI_MSK) ? "SGI " : "NGI ",
                         (rate & RATE_MCS_STBC_MSK) ? "STBC " : "",
                         (rate & RATE_MCS_LDPC_MSK) ? "LDPC " : "",
+                        (rate & RATE_HE_DUAL_CARRIER_MODE_MSK) ? "DCM " : "",
                         (rate & RATE_MCS_BF_MSK) ? "BF " : "");
  }
  
@@@ -5,7 -5,7 +5,7 @@@
   *
   * GPL LICENSE SUMMARY
   *
-  * Copyright(c) 2018 - 2019 Intel Corporation
+  * Copyright(c) 2018 - 2020 Intel Corporation
   *
   * This program is free software; you can redistribute it and/or modify
   * it under the terms of version 2 of the GNU General Public License as
@@@ -18,7 -18,7 +18,7 @@@
   *
   * BSD LICENSE
   *
-  * Copyright(c) 2018 - 2019 Intel Corporation
+  * Copyright(c) 2018 - 2020 Intel Corporation
   * All rights reserved.
   *
   * Redistribution and use in source and binary forms, with or without
@@@ -84,32 -84,35 +84,35 @@@ iwl_pcie_ctxt_info_dbg_enable(struct iw
  
        fw_mon_cfg = &trans->dbg.fw_mon_cfg[alloc_id];
  
-       if (le32_to_cpu(fw_mon_cfg->buf_location) ==
-           IWL_FW_INI_LOCATION_SRAM_PATH) {
+       switch (le32_to_cpu(fw_mon_cfg->buf_location)) {
+       case IWL_FW_INI_LOCATION_SRAM_PATH:
                dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL;
                IWL_DEBUG_FW(trans,
-                            "WRT: Applying SMEM buffer destination\n");
-               goto out;
-       }
-       if (le32_to_cpu(fw_mon_cfg->buf_location) ==
-           IWL_FW_INI_LOCATION_DRAM_PATH &&
-           trans->dbg.fw_mon_ini[alloc_id].num_frags) {
-               struct iwl_dram_data *frag =
-                       &trans->dbg.fw_mon_ini[alloc_id].frags[0];
-               dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;
+                               "WRT: Applying SMEM buffer destination\n");
+               break;
  
+       case IWL_FW_INI_LOCATION_NPK_PATH:
+               dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF;
                IWL_DEBUG_FW(trans,
-                            "WRT: Applying DRAM destination (alloc_id=%u)\n",
-                            alloc_id);
+                            "WRT: Applying NPK buffer destination\n");
+               break;
  
-               dbg_cfg->hwm_base_addr = cpu_to_le64(frag->physical);
-               dbg_cfg->hwm_size = cpu_to_le32(frag->size);
+       case IWL_FW_INI_LOCATION_DRAM_PATH:
+               if (trans->dbg.fw_mon_ini[alloc_id].num_frags) {
+                       struct iwl_dram_data *frag =
+                               &trans->dbg.fw_mon_ini[alloc_id].frags[0];
+                       dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;
+                       dbg_cfg->hwm_base_addr = cpu_to_le64(frag->physical);
+                       dbg_cfg->hwm_size = cpu_to_le32(frag->size);
+                       IWL_DEBUG_FW(trans,
+                                    "WRT: Applying DRAM destination (alloc_id=%u, num_frags=%u)\n",
+                                    alloc_id,
+                                    trans->dbg.fw_mon_ini[alloc_id].num_frags);
+               }
+               break;
+       default:
+               IWL_ERR(trans, "WRT: Invalid buffer destination\n");
        }
  out:
        if (dbg_flags)
                *control_flags |= IWL_PRPH_SCRATCH_EARLY_DEBUG_EN | dbg_flags;
@@@ -129,18 -132,6 +132,18 @@@ int iwl_pcie_ctxt_info_gen3_init(struc
        int cmdq_size = max_t(u32, IWL_CMD_QUEUE_SIZE,
                              trans->cfg->min_txq_size);
  
 +      switch (trans_pcie->rx_buf_size) {
 +      case IWL_AMSDU_DEF:
 +              return -EINVAL;
 +      case IWL_AMSDU_2K:
 +              break;
 +      case IWL_AMSDU_4K:
 +      case IWL_AMSDU_8K:
 +      case IWL_AMSDU_12K:
 +              control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K;
 +              break;
 +      }
 +
        /* Allocate prph scratch */
        prph_scratch = dma_alloc_coherent(trans->dev, sizeof(*prph_scratch),
                                          &trans_pcie->prph_scratch_dma_addr,
                cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV));
        prph_sc_ctrl->version.size = cpu_to_le16(sizeof(*prph_scratch) / 4);
  
 -      control_flags = IWL_PRPH_SCRATCH_RB_SIZE_4K |
 -                      IWL_PRPH_SCRATCH_MTR_MODE |
 -                      (IWL_PRPH_MTR_FORMAT_256B &
 -                       IWL_PRPH_SCRATCH_MTR_FORMAT);
 +      control_flags |= IWL_PRPH_SCRATCH_MTR_MODE;
 +      control_flags |= IWL_PRPH_MTR_FORMAT_256B & IWL_PRPH_SCRATCH_MTR_FORMAT;
  
        /* initialize RX default queue */
        prph_sc_ctrl->rbd_cfg.free_rbd_addr =
@@@ -90,9 -90,7 +90,7 @@@ static void iwl_pcie_gen2_update_byte_t
                                          struct iwl_txq *txq, u16 byte_cnt,
                                          int num_tbs)
  {
-       struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
        struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
-       struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
        int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
        u8 filled_tfd_size, num_fetch_chunks;
        u16 len = byte_cnt;
                return;
  
        filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
-                                  num_tbs * sizeof(struct iwl_tfh_tb);
+                         num_tbs * sizeof(struct iwl_tfh_tb);
        /*
         * filled_tfd_size contains the number of filled bytes in the TFD.
         * Dividing it by 64 will give the number of chunks to fetch
        num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
  
        if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+               struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
                /* Starting from AX210, the HW expects bytes */
                WARN_ON(trans_pcie->bc_table_dword);
                WARN_ON(len > 0x3FFF);
                bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
                scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
        } else {
+               struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
                /* Before AX210, the HW expects DW */
                WARN_ON(!trans_pcie->bc_table_dword);
                len = DIV_ROUND_UP(len, 4);
@@@ -1418,9 -1420,6 +1420,9 @@@ void iwl_trans_pcie_dyn_txq_free(struc
  
        iwl_pcie_gen2_txq_unmap(trans, queue);
  
 +      iwl_pcie_gen2_txq_free_memory(trans, trans_pcie->txq[queue]);
 +      trans_pcie->txq[queue] = NULL;
 +
        IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
  }
  
@@@ -411,12 -411,14 +411,14 @@@ static void rtw_pci_reset_buf_desc(stru
        dma = rtwpci->tx_rings[RTW_TX_QUEUE_BCN].r.dma;
        rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BCNQ, dma);
  
-       len = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.len;
-       dma = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.dma;
-       rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.rp = 0;
-       rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.wp = 0;
-       rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_H2CQ, len & TRX_BD_IDX_MASK);
-       rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_H2CQ, dma);
+       if (!rtw_chip_wcpu_11n(rtwdev)) {
+               len = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.len;
+               dma = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.dma;
+               rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.rp = 0;
+               rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.wp = 0;
+               rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_H2CQ, len & TRX_BD_IDX_MASK);
+               rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_H2CQ, dma);
+       }
  
        len = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.len;
        dma = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.dma;
        rtw_write32(rtwdev, RTK_PCI_TXBD_RWPTR_CLR, 0xffffffff);
  
        /* reset H2C Queue index in a single write */
-       rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR,
-                       BIT_CLR_H2CQ_HOST_IDX | BIT_CLR_H2CQ_HW_IDX);
+       if (rtw_chip_wcpu_11ac(rtwdev))
+               rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR,
+                               BIT_CLR_H2CQ_HOST_IDX | BIT_CLR_H2CQ_HW_IDX);
  }
  
  static void rtw_pci_reset_trx_ring(struct rtw_dev *rtwdev)
@@@ -489,7 -492,9 +492,9 @@@ static void rtw_pci_enable_interrupt(st
  
        rtw_write32(rtwdev, RTK_PCI_HIMR0, rtwpci->irq_mask[0]);
        rtw_write32(rtwdev, RTK_PCI_HIMR1, rtwpci->irq_mask[1]);
-       rtw_write32(rtwdev, RTK_PCI_HIMR3, rtwpci->irq_mask[3]);
+       if (rtw_chip_wcpu_11ac(rtwdev))
+               rtw_write32(rtwdev, RTK_PCI_HIMR3, rtwpci->irq_mask[3]);
        rtwpci->irq_enabled = true;
  
        spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
@@@ -507,7 -512,9 +512,9 @@@ static void rtw_pci_disable_interrupt(s
  
        rtw_write32(rtwdev, RTK_PCI_HIMR0, 0);
        rtw_write32(rtwdev, RTK_PCI_HIMR1, 0);
-       rtw_write32(rtwdev, RTK_PCI_HIMR3, 0);
+       if (rtw_chip_wcpu_11ac(rtwdev))
+               rtw_write32(rtwdev, RTK_PCI_HIMR3, 0);
        rtwpci->irq_enabled = false;
  
  out:
@@@ -1012,13 -1019,17 +1019,17 @@@ static void rtw_pci_irq_recognized(stru
  
        irq_status[0] = rtw_read32(rtwdev, RTK_PCI_HISR0);
        irq_status[1] = rtw_read32(rtwdev, RTK_PCI_HISR1);
-       irq_status[3] = rtw_read32(rtwdev, RTK_PCI_HISR3);
+       if (rtw_chip_wcpu_11ac(rtwdev))
+               irq_status[3] = rtw_read32(rtwdev, RTK_PCI_HISR3);
+       else
+               irq_status[3] = 0;
        irq_status[0] &= rtwpci->irq_mask[0];
        irq_status[1] &= rtwpci->irq_mask[1];
        irq_status[3] &= rtwpci->irq_mask[3];
        rtw_write32(rtwdev, RTK_PCI_HISR0, irq_status[0]);
        rtw_write32(rtwdev, RTK_PCI_HISR1, irq_status[1]);
-       rtw_write32(rtwdev, RTK_PCI_HISR3, irq_status[3]);
+       if (rtw_chip_wcpu_11ac(rtwdev))
+               rtw_write32(rtwdev, RTK_PCI_HISR3, irq_status[3]);
  
        spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
  }
@@@ -1091,6 -1102,7 +1102,7 @@@ static int rtw_pci_io_mapping(struct rt
        len = pci_resource_len(pdev, bar_id);
        rtwpci->mmap = pci_iomap(pdev, bar_id, len);
        if (!rtwpci->mmap) {
+               pci_release_regions(pdev);
                rtw_err(rtwdev, "failed to map pci memory\n");
                return -ENOMEM;
        }
@@@ -1338,17 -1350,22 +1350,17 @@@ static void rtw_pci_phy_cfg(struct rtw_
        rtw_pci_link_cfg(rtwdev);
  }
  
 -#ifdef CONFIG_PM
 -static int rtw_pci_suspend(struct device *dev)
 +static int __maybe_unused rtw_pci_suspend(struct device *dev)
  {
        return 0;
  }
  
 -static int rtw_pci_resume(struct device *dev)
 +static int __maybe_unused rtw_pci_resume(struct device *dev)
  {
        return 0;
  }
  
  static SIMPLE_DEV_PM_OPS(rtw_pm_ops, rtw_pci_suspend, rtw_pci_resume);
 -#define RTW_PM_OPS (&rtw_pm_ops)
 -#else
 -#define RTW_PM_OPS NULL
 -#endif
  
  static int rtw_pci_claim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
  {
@@@ -1567,6 -1584,9 +1579,9 @@@ static const struct pci_device_id rtw_p
  #endif
  #ifdef CONFIG_RTW88_8822CE
        { RTK_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xC822, rtw8822c_hw_spec) },
+ #endif
+ #ifdef CONFIG_RTW88_8723DE
+       { RTK_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xD723, rtw8723d_hw_spec) },
  #endif
        {},
  };
@@@ -1577,7 -1597,7 +1592,7 @@@ static struct pci_driver rtw_pci_drive
        .id_table = rtw_pci_id_table,
        .probe = rtw_pci_probe,
        .remove = rtw_pci_remove,
 -      .driver.pm = RTW_PM_OPS,
 +      .driver.pm = &rtw_pm_ops,
  };
  module_pci_driver(rtw_pci_driver);