RDMA support for DMA handle

From Yishai:

This patch series introduces a new DMA Handle (DMAH) object, along with
corresponding APIs for its allocation and deallocation.

The DMAH object encapsulates attributes relevant for DMA transactions.

While initially intended to support TLP Processing Hints (TPH) [1], the
design is extensible to accommodate future features such as PCI
multipath for DMA, PCI UIO configurations, traffic class selection, and
more.

Additionally, we introduce a new ioctl method on the MR object:
UVERBS_METHOD_REG_MR.

This method consolidates multiple reg_mr variants under a single
user-space ioctl interface, supporting: ibv_reg_mr(), ibv_reg_mr_iova(),
ibv_reg_mr_iova2() and ibv_reg_dmabuf_mr(). It also enables passing a
DMA handle as part of the registration process.

Throughout the patch series, the following DMAH-related stuff can also
be observed in the IB layer:

- Association with a CPU ID and its memory type, for use with Steering
  Tags [2].

- Inclusion of Processing Hints (PH) data for TPH functionality [3].

- Enforces security by ensuring that only tasks allowed to run on a
  given CPU may request a DMA handle for it.

- Reference counting for DMAH life cycle management and safe usage
  across memory regions.

mlx5 driver implementation:
--------------------------
The series includes implementation of the above functionality in the
mlx5 driver.

In mlx5_core:
- Enables TPH over PCIe when both firmware and OS support it.

- Manages Steering Tags and corresponding indices by writing tag values
  to the PCI configuration space.

- Exposes APIs to upper layers (e.g., mlx5_ib) to enable the PCIe TPH
  functionality.

In mlx5_ib:
- Adds full support for DMAH operations.

- Utilizes mlx5_core's Steering Tag APIs to derive tag indices from
  input.

- Stores the resulting index in a mlx5_dmah structure for use during
  MKEY creation with a DMA handle.

- Adds support for allowing MKEYs to be created in conjunction with DMA
  handles.

Additional details are provided in the commit messages.

[1] Background, from PCIe specification 6.2.
TLP Processing Hints (TPH)
--------------------------
TLP Processing Hints is an optional feature that provides hints in
Request TLP headers to facilitate optimized processing of Requests that
target Memory Space.  These Processing Hints enable the system hardware
(e.g., the Root Complex and/ or Endpoints) to optimize platform
resources such as system and memory interconnect on a per TLP basis.
Steering Tags are system-specific values used to identify a processing
resource that a Requester explicitly targets. System software discovers
and identifies TPH capabilities to determine the Steering Tag allocation
for each Function that supports TPH

[2] Steering Tags
Functions that intend to target a TLP towards a specific processing
resource such as a host processor or system cache hierarchy require
topological information of the target cache (e.g., which host cache).
Steering Tags are system-specific values that provide information about
the host or cache structure in the system cache hierarchy. These values
are used to associate processing elements within the platform with the
processing of Requests.

[3] Processing Hints
The Requester provides hints to the Root Complex or other targets about
the intended use of data and data structures by the host and/or device.
The hints are provided by the Requester, which has knowledge of upcoming
Request patterns, and which the Completer would not be able to deduce
autonomously (with good accuracy)

Yishai

Signed-off-by: Leon Romanovsky <leon@kernel.org>

* mlx5-next:
  net/mlx5: Add support for device steering tag
  net/mlx5: Expose IFC bits for TPH
  PCI/TPH: Expose pcie_tph_get_st_table_size()
  net/mlx5: Expose cable_length field in PFCC register
  net/mlx5: Add IFC bits and enums for buf_ownership
  net/mlx5: Add IFC bits to support RSS for IPSec offload
  net/mlx5: IFC updates for disabled host PF
  net/mlx5: Expose disciplined_fr_counter through HCA capabilities in mlx5_ifc
pull/1311/head
Leon Romanovsky 2025-07-23 01:38:56 -04:00
commit b272fc8972
9 changed files with 277 additions and 27 deletions

View File

@ -167,5 +167,10 @@ mlx5_core-$(CONFIG_MLX5_SF) += sf/vhca_event.o sf/dev/dev.o sf/dev/driver.o irq_
#
mlx5_core-$(CONFIG_MLX5_SF_MANAGER) += sf/cmd.o sf/hw_table.o sf/devlink.o
#
# TPH support
#
mlx5_core-$(CONFIG_PCIE_TPH) += lib/st.o
obj-$(CONFIG_MLX5_DPLL) += mlx5_dpll.o
mlx5_dpll-y := dpll.o

View File

@ -0,0 +1,164 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved
*/
#include <linux/mlx5/driver.h>
#include <linux/mlx5/device.h>
#include "mlx5_core.h"
#include "lib/mlx5.h"
struct mlx5_st_idx_data {
refcount_t usecount;
u16 tag;
};
struct mlx5_st {
/* serialize access upon alloc/free flows */
struct mutex lock;
struct xa_limit index_limit;
struct xarray idx_xa; /* key == index, value == struct mlx5_st_idx_data */
};
struct mlx5_st *mlx5_st_create(struct mlx5_core_dev *dev)
{
struct pci_dev *pdev = dev->pdev;
struct mlx5_st *st;
u16 num_entries;
int ret;
if (!MLX5_CAP_GEN(dev, mkey_pcie_tph))
return NULL;
#ifdef CONFIG_MLX5_SF
if (mlx5_core_is_sf(dev))
return dev->priv.parent_mdev->st;
#endif
/* Checking whether the device is capable */
if (!pdev->tph_cap)
return NULL;
num_entries = pcie_tph_get_st_table_size(pdev);
/* We need a reserved entry for non TPH cases */
if (num_entries < 2)
return NULL;
/* The OS doesn't support ST */
ret = pcie_enable_tph(pdev, PCI_TPH_ST_DS_MODE);
if (ret)
return NULL;
st = kzalloc(sizeof(*st), GFP_KERNEL);
if (!st)
goto end;
mutex_init(&st->lock);
xa_init_flags(&st->idx_xa, XA_FLAGS_ALLOC);
/* entry 0 is reserved for non TPH cases */
st->index_limit.min = MLX5_MKC_PCIE_TPH_NO_STEERING_TAG_INDEX + 1;
st->index_limit.max = num_entries - 1;
return st;
end:
pcie_disable_tph(dev->pdev);
return NULL;
}
void mlx5_st_destroy(struct mlx5_core_dev *dev)
{
struct mlx5_st *st = dev->st;
if (mlx5_core_is_sf(dev) || !st)
return;
pcie_disable_tph(dev->pdev);
WARN_ON_ONCE(!xa_empty(&st->idx_xa));
kfree(st);
}
int mlx5_st_alloc_index(struct mlx5_core_dev *dev, enum tph_mem_type mem_type,
unsigned int cpu_uid, u16 *st_index)
{
struct mlx5_st_idx_data *idx_data;
struct mlx5_st *st = dev->st;
unsigned long index;
u32 xa_id;
u16 tag;
int ret;
if (!st)
return -EOPNOTSUPP;
ret = pcie_tph_get_cpu_st(dev->pdev, mem_type, cpu_uid, &tag);
if (ret)
return ret;
mutex_lock(&st->lock);
xa_for_each(&st->idx_xa, index, idx_data) {
if (tag == idx_data->tag) {
refcount_inc(&idx_data->usecount);
*st_index = index;
goto end;
}
}
idx_data = kzalloc(sizeof(*idx_data), GFP_KERNEL);
if (!idx_data) {
ret = -ENOMEM;
goto end;
}
refcount_set(&idx_data->usecount, 1);
idx_data->tag = tag;
ret = xa_alloc(&st->idx_xa, &xa_id, idx_data, st->index_limit, GFP_KERNEL);
if (ret)
goto clean_idx_data;
ret = pcie_tph_set_st_entry(dev->pdev, xa_id, tag);
if (ret)
goto clean_idx_xa;
*st_index = xa_id;
goto end;
clean_idx_xa:
xa_erase(&st->idx_xa, xa_id);
clean_idx_data:
kfree(idx_data);
end:
mutex_unlock(&st->lock);
return ret;
}
EXPORT_SYMBOL_GPL(mlx5_st_alloc_index);
int mlx5_st_dealloc_index(struct mlx5_core_dev *dev, u16 st_index)
{
struct mlx5_st_idx_data *idx_data;
struct mlx5_st *st = dev->st;
int ret = 0;
if (!st)
return -EOPNOTSUPP;
mutex_lock(&st->lock);
idx_data = xa_load(&st->idx_xa, st_index);
if (WARN_ON_ONCE(!idx_data)) {
ret = -EINVAL;
goto end;
}
if (refcount_dec_and_test(&idx_data->usecount)) {
xa_erase(&st->idx_xa, st_index);
/* We leave PCI config space as was before, no mkey will refer to it */
}
end:
mutex_unlock(&st->lock);
return ret;
}
EXPORT_SYMBOL_GPL(mlx5_st_dealloc_index);

View File

@ -1102,6 +1102,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
}
dev->dm = mlx5_dm_create(dev);
dev->st = mlx5_st_create(dev);
dev->tracer = mlx5_fw_tracer_create(dev);
dev->hv_vhca = mlx5_hv_vhca_create(dev);
dev->rsc_dump = mlx5_rsc_dump_create(dev);
@ -1150,6 +1151,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_rsc_dump_destroy(dev);
mlx5_hv_vhca_destroy(dev->hv_vhca);
mlx5_fw_tracer_destroy(dev->tracer);
mlx5_st_destroy(dev);
mlx5_dm_cleanup(dev);
mlx5_fs_core_free(dev);
mlx5_sf_table_cleanup(dev);

View File

@ -300,6 +300,15 @@ int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode);
struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev);
void mlx5_dm_cleanup(struct mlx5_core_dev *dev);
#ifdef CONFIG_PCIE_TPH
struct mlx5_st *mlx5_st_create(struct mlx5_core_dev *dev);
void mlx5_st_destroy(struct mlx5_core_dev *dev);
#else
static inline struct mlx5_st *
mlx5_st_create(struct mlx5_core_dev *dev) { return NULL; }
static inline void mlx5_st_destroy(struct mlx5_core_dev *dev) { return; }
#endif
void mlx5_toggle_port_link(struct mlx5_core_dev *dev);
int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
enum mlx5_port_status status);

View File

@ -727,8 +727,9 @@ hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd,
u32 *s_ipv6, *d_ipv6;
if (HWS_IS_FLD_SET_SZ(match_param, outer_headers.l4_type, 0x2) ||
HWS_IS_FLD_SET_SZ(match_param, outer_headers.reserved_at_c2, 0xe) ||
HWS_IS_FLD_SET_SZ(match_param, outer_headers.reserved_at_c4, 0x4)) {
HWS_IS_FLD_SET_SZ(match_param, outer_headers.l4_type_ext, 0x4) ||
HWS_IS_FLD_SET_SZ(match_param, outer_headers.reserved_at_c6, 0xa) ||
HWS_IS_FLD_SET_SZ(match_param, outer_headers.reserved_at_d4, 0x4)) {
mlx5hws_err(cd->ctx, "Unsupported outer parameters set\n");
return -EINVAL;
}
@ -903,8 +904,9 @@ hws_definer_conv_inner(struct mlx5hws_definer_conv_data *cd,
u32 *s_ipv6, *d_ipv6;
if (HWS_IS_FLD_SET_SZ(match_param, inner_headers.l4_type, 0x2) ||
HWS_IS_FLD_SET_SZ(match_param, inner_headers.reserved_at_c2, 0xe) ||
HWS_IS_FLD_SET_SZ(match_param, inner_headers.reserved_at_c4, 0x4)) {
HWS_IS_FLD_SET_SZ(match_param, inner_headers.l4_type_ext, 0x4) ||
HWS_IS_FLD_SET_SZ(match_param, inner_headers.reserved_at_c6, 0xa) ||
HWS_IS_FLD_SET_SZ(match_param, inner_headers.reserved_at_d4, 0x4)) {
mlx5hws_err(cd->ctx, "Unsupported inner parameters set\n");
return -EINVAL;
}
@ -1279,7 +1281,8 @@ hws_definer_conv_misc2(struct mlx5hws_definer_conv_data *cd,
struct mlx5hws_definer_fc *curr_fc;
if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.reserved_at_1a0, 0x8) ||
HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.reserved_at_1b8, 0x8) ||
HWS_IS_FLD_SET_SZ(match_param,
misc_parameters_2.ipsec_next_header, 0x8) ||
HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.reserved_at_1c0, 0x40) ||
HWS_IS_FLD_SET(match_param, misc_parameters_2.macsec_syndrome) ||
HWS_IS_FLD_SET(match_param, misc_parameters_2.ipsec_syndrome)) {

View File

@ -168,7 +168,7 @@ static u32 get_st_table_loc(struct pci_dev *pdev)
* Return the size of ST table. If ST table is not in TPH Requester Extended
* Capability space, return 0. Otherwise return the ST Table Size + 1.
*/
static u16 get_st_table_size(struct pci_dev *pdev)
u16 pcie_tph_get_st_table_size(struct pci_dev *pdev)
{
u32 reg;
u32 loc;
@ -185,6 +185,7 @@ static u16 get_st_table_size(struct pci_dev *pdev)
return FIELD_GET(PCI_TPH_CAP_ST_MASK, reg) + 1;
}
EXPORT_SYMBOL(pcie_tph_get_st_table_size);
/* Return device's Root Port completer capability */
static u8 get_rp_completer_type(struct pci_dev *pdev)
@ -211,7 +212,7 @@ static int write_tag_to_st_table(struct pci_dev *pdev, int index, u16 tag)
int offset;
/* Check if index is out of bound */
st_table_size = get_st_table_size(pdev);
st_table_size = pcie_tph_get_st_table_size(pdev);
if (index >= st_table_size)
return -ENXIO;
@ -443,7 +444,7 @@ void pci_restore_tph_state(struct pci_dev *pdev)
pci_write_config_dword(pdev, pdev->tph_cap + PCI_TPH_CTRL, *cap++);
st_entry = (u16 *)cap;
offset = PCI_TPH_BASE_SIZEOF;
num_entries = get_st_table_size(pdev);
num_entries = pcie_tph_get_st_table_size(pdev);
for (i = 0; i < num_entries; i++) {
pci_write_config_word(pdev, pdev->tph_cap + offset,
*st_entry++);
@ -475,7 +476,7 @@ void pci_save_tph_state(struct pci_dev *pdev)
/* Save all ST entries in extended capability structure */
st_entry = (u16 *)cap;
offset = PCI_TPH_BASE_SIZEOF;
num_entries = get_st_table_size(pdev);
num_entries = pcie_tph_get_st_table_size(pdev);
for (i = 0; i < num_entries; i++) {
pci_read_config_word(pdev, pdev->tph_cap + offset,
st_entry++);
@ -499,7 +500,7 @@ void pci_tph_init(struct pci_dev *pdev)
if (!pdev->tph_cap)
return;
num_entries = get_st_table_size(pdev);
num_entries = pcie_tph_get_st_table_size(pdev);
save_size = sizeof(u32) + num_entries * sizeof(u16);
pci_add_ext_cap_save_buffer(pdev, PCI_EXT_CAP_ID_TPH, save_size);
}

View File

@ -36,6 +36,7 @@
#include <linux/kernel.h>
#include <linux/completion.h>
#include <linux/pci.h>
#include <linux/pci-tph.h>
#include <linux/irq.h>
#include <linux/spinlock_types.h>
#include <linux/semaphore.h>
@ -688,6 +689,7 @@ struct mlx5_fw_tracer;
struct mlx5_vxlan;
struct mlx5_geneve;
struct mlx5_hv_vhca;
struct mlx5_st;
#define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity))
#define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))
@ -757,6 +759,7 @@ struct mlx5_core_dev {
u32 issi;
struct mlx5e_resources mlx5e_res;
struct mlx5_dm *dm;
struct mlx5_st *st;
struct mlx5_vxlan *vxlan;
struct mlx5_geneve *geneve;
struct {
@ -1160,6 +1163,23 @@ int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
u64 length, u16 uid, phys_addr_t addr, u32 obj_id);
#ifdef CONFIG_PCIE_TPH
int mlx5_st_alloc_index(struct mlx5_core_dev *dev, enum tph_mem_type mem_type,
unsigned int cpu_uid, u16 *st_index);
int mlx5_st_dealloc_index(struct mlx5_core_dev *dev, u16 st_index);
#else
static inline int mlx5_st_alloc_index(struct mlx5_core_dev *dev,
enum tph_mem_type mem_type,
unsigned int cpu_uid, u16 *st_index)
{
return -EOPNOTSUPP;
}
static inline int mlx5_st_dealloc_index(struct mlx5_core_dev *dev, u16 st_index)
{
return -EOPNOTSUPP;
}
#endif
struct mlx5_core_dev *mlx5_vf_get_core_dev(struct pci_dev *pdev);
void mlx5_vf_put_core_dev(struct mlx5_core_dev *mdev);

View File

@ -420,7 +420,8 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
/* Table 2170 - Flow Table Fields Supported 2 Format */
struct mlx5_ifc_flow_table_fields_supported_2_bits {
u8 reserved_at_0[0x2];
u8 inner_l4_type_ext[0x1];
u8 outer_l4_type_ext[0x1];
u8 inner_l4_type[0x1];
u8 outer_l4_type[0x1];
u8 reserved_at_4[0xa];
@ -429,7 +430,11 @@ struct mlx5_ifc_flow_table_fields_supported_2_bits {
u8 tunnel_header_0_1[0x1];
u8 reserved_at_11[0xf];
u8 reserved_at_20[0x60];
u8 reserved_at_20[0xf];
u8 ipsec_next_header[0x1];
u8 reserved_at_30[0x10];
u8 reserved_at_40[0x40];
};
struct mlx5_ifc_flow_table_prop_layout_bits {
@ -552,6 +557,13 @@ enum {
MLX5_PACKET_L4_TYPE_UDP,
};
enum {
MLX5_PACKET_L4_TYPE_EXT_NONE,
MLX5_PACKET_L4_TYPE_EXT_TCP,
MLX5_PACKET_L4_TYPE_EXT_UDP,
MLX5_PACKET_L4_TYPE_EXT_ICMP,
};
struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
u8 smac_47_16[0x20];
@ -578,10 +590,10 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
u8 tcp_dport[0x10];
u8 l4_type[0x2];
u8 reserved_at_c2[0xe];
u8 l4_type_ext[0x4];
u8 reserved_at_c6[0xa];
u8 ipv4_ihl[0x4];
u8 reserved_at_c4[0x4];
u8 reserved_at_d4[0x4];
u8 ttl_hoplimit[0x8];
u8 udp_sport[0x10];
@ -689,10 +701,9 @@ struct mlx5_ifc_fte_match_set_misc2_bits {
u8 metadata_reg_a[0x20];
u8 reserved_at_1a0[0x8];
u8 macsec_syndrome[0x8];
u8 ipsec_syndrome[0x8];
u8 reserved_at_1b8[0x8];
u8 ipsec_next_header[0x8];
u8 reserved_at_1c0[0x40];
};
@ -1846,7 +1857,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_bf_reg_size[0x5];
u8 reserved_at_270[0x3];
u8 disciplined_fr_counter[0x1];
u8 reserved_at_271[0x2];
u8 qp_error_syndrome[0x1];
u8 reserved_at_274[0x2];
u8 lag_dct[0x2];
@ -1859,7 +1871,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_280[0x10];
u8 max_wqe_sz_sq[0x10];
u8 reserved_at_2a0[0xb];
u8 reserved_at_2a0[0x7];
u8 mkey_pcie_tph[0x1];
u8 reserved_at_2a8[0x3];
u8 shampo[0x1];
u8 reserved_at_2ac[0x4];
u8 max_wqe_sz_rq[0x10];
@ -4406,6 +4420,10 @@ enum {
MLX5_MKC_ACCESS_MODE_CROSSING = 0x6,
};
enum {
MLX5_MKC_PCIE_TPH_NO_STEERING_TAG_INDEX = 0,
};
struct mlx5_ifc_mkc_bits {
u8 reserved_at_0[0x1];
u8 free[0x1];
@ -4457,7 +4475,11 @@ struct mlx5_ifc_mkc_bits {
u8 relaxed_ordering_read[0x1];
u8 log_page_size[0x6];
u8 reserved_at_1e0[0x20];
u8 reserved_at_1e0[0x5];
u8 pcie_tph_en[0x1];
u8 pcie_tph_ph[0x2];
u8 pcie_tph_steering_tag_index[0x8];
u8 reserved_at_1f0[0x10];
};
struct mlx5_ifc_pkey_bits {
@ -9982,6 +10004,10 @@ struct mlx5_ifc_pude_reg_bits {
u8 reserved_at_20[0x60];
};
enum {
MLX5_PTYS_CONNECTOR_TYPE_PORT_DA = 0x7,
};
struct mlx5_ifc_ptys_reg_bits {
u8 reserved_at_0[0x1];
u8 an_disable_admin[0x1];
@ -10018,7 +10044,8 @@ struct mlx5_ifc_ptys_reg_bits {
u8 ib_link_width_oper[0x10];
u8 ib_proto_oper[0x10];
u8 reserved_at_160[0x1c];
u8 reserved_at_160[0x8];
u8 lane_rate_oper[0x14];
u8 connector_type[0x4];
u8 eth_proto_lp_advertise[0x20];
@ -10462,10 +10489,19 @@ struct mlx5_ifc_pifr_reg_bits {
u8 port_filter_update_en[8][0x20];
};
enum {
MLX5_BUF_OWNERSHIP_UNKNOWN = 0x0,
MLX5_BUF_OWNERSHIP_FW_OWNED = 0x1,
MLX5_BUF_OWNERSHIP_SW_OWNED = 0x2,
};
struct mlx5_ifc_pfcc_reg_bits {
u8 reserved_at_0[0x8];
u8 reserved_at_0[0x4];
u8 buf_ownership[0x2];
u8 reserved_at_6[0x2];
u8 local_port[0x8];
u8 reserved_at_10[0xb];
u8 reserved_at_10[0xa];
u8 cable_length_mask[0x1];
u8 ppan_mask_n[0x1];
u8 minor_stall_mask[0x1];
u8 critical_stall_mask[0x1];
@ -10494,7 +10530,10 @@ struct mlx5_ifc_pfcc_reg_bits {
u8 device_stall_minor_watermark[0x10];
u8 device_stall_critical_watermark[0x10];
u8 reserved_at_a0[0x60];
u8 reserved_at_a0[0x18];
u8 cable_length[0x8];
u8 reserved_at_c0[0x40];
};
struct mlx5_ifc_pelc_reg_bits {
@ -10595,11 +10634,15 @@ struct mlx5_ifc_mtutc_reg_bits {
struct mlx5_ifc_pcam_enhanced_features_bits {
u8 reserved_at_0[0x10];
u8 ppcnt_recovery_counters[0x1];
u8 reserved_at_11[0xc];
u8 reserved_at_11[0x7];
u8 cable_length[0x1];
u8 reserved_at_19[0x4];
u8 fec_200G_per_lane_in_pplm[0x1];
u8 reserved_at_1e[0x2a];
u8 fec_100G_per_lane_in_pplm[0x1];
u8 reserved_at_49[0x1f];
u8 reserved_at_49[0xa];
u8 buffer_ownership[0x1];
u8 resereved_at_54[0x14];
u8 fec_50G_per_lane_in_pplm[0x1];
u8 reserved_at_69[0x4];
u8 rx_icrc_encapsulated_counter[0x1];
@ -12382,7 +12425,9 @@ struct mlx5_ifc_mtrc_ctrl_bits {
struct mlx5_ifc_host_params_context_bits {
u8 host_number[0x8];
u8 reserved_at_8[0x7];
u8 reserved_at_8[0x5];
u8 host_pf_not_exist[0x1];
u8 reserved_at_14[0x1];
u8 host_pf_disabled[0x1];
u8 host_num_of_vfs[0x10];

View File

@ -28,6 +28,7 @@ int pcie_tph_get_cpu_st(struct pci_dev *dev,
unsigned int cpu_uid, u16 *tag);
void pcie_disable_tph(struct pci_dev *pdev);
int pcie_enable_tph(struct pci_dev *pdev, int mode);
u16 pcie_tph_get_st_table_size(struct pci_dev *pdev);
#else
static inline int pcie_tph_set_st_entry(struct pci_dev *pdev,
unsigned int index, u16 tag)