for_each_dss((dss), (gt)) \
for_each_if((xe_gt_mcr_get_dss_steering((gt), (dss), &(group), &(instance)), true))
+/*
+ * Loop over each DSS available for geometry and determine the group and
+ * instance IDs that should be used to steer MCR accesses toward this DSS.
+ * @dss: DSS ID to obtain steering for
+ * @gt: GT structure
+ * @group: steering group ID, data type: u16
+ * @instance: steering instance ID, data type: u16
+ */
+#define for_each_geometry_dss(dss, gt, group, instance) \
+ for_each_dss_steering(dss, gt, group, instance) \
+ if (xe_gt_has_geometry_dss(gt, dss))
+
+/*
+ * Loop over each DSS available for compute and determine the group and
+ * instance IDs that should be used to steer MCR accesses toward this DSS.
+ * @dss: DSS ID to obtain steering for
+ * @gt: GT structure
+ * @group: steering group ID, data type: u16
+ * @instance: steering instance ID, data type: u16
+ */
+#define for_each_compute_dss(dss, gt, group, instance) \
+ for_each_dss_steering(dss, gt, group, instance) \
+ if (xe_gt_has_compute_dss(gt, dss))
+
#endif /* _XE_GT_MCR_H_ */
return quad_first < (quad + 1) * dss_per_quad;
}
+
+bool xe_gt_has_geometry_dss(struct xe_gt *gt, unsigned int dss)
+{
+ return test_bit(dss, gt->fuse_topo.g_dss_mask);
+}
+
+bool xe_gt_has_compute_dss(struct xe_gt *gt, unsigned int dss)
+{
+ return test_bit(dss, gt->fuse_topo.c_dss_mask);
+}