#define RDRAND_RETRY_LOOPS 10
-#define RDRAND_INT ".byte 0x0f,0xc7,0xf0"
-#define RDSEED_INT ".byte 0x0f,0xc7,0xf8"
-#ifdef CONFIG_X86_64
-# define RDRAND_LONG ".byte 0x48,0x0f,0xc7,0xf0"
-# define RDSEED_LONG ".byte 0x48,0x0f,0xc7,0xf8"
-#else
-# define RDRAND_LONG RDRAND_INT
-# define RDSEED_LONG RDSEED_INT
-#endif
-
/* Unconditional execution of RDRAND and RDSEED */
static inline bool __must_check rdrand_long(unsigned long *v)
bool ok;
unsigned int retry = RDRAND_RETRY_LOOPS;
do {
- asm volatile(RDRAND_LONG
+ asm volatile("rdrand %[out]"
CC_SET(c)
- : CC_OUT(c) (ok), "=a" (*v));
+ : CC_OUT(c) (ok), [out] "=r" (*v));
if (ok)
return true;
} while (--retry);
bool ok;
unsigned int retry = RDRAND_RETRY_LOOPS;
do {
- asm volatile(RDRAND_INT
+ asm volatile("rdrand %[out]"
CC_SET(c)
- : CC_OUT(c) (ok), "=a" (*v));
+ : CC_OUT(c) (ok), [out] "=r" (*v));
if (ok)
return true;
} while (--retry);
static inline bool __must_check rdseed_long(unsigned long *v)
{
bool ok;
- asm volatile(RDSEED_LONG
+ asm volatile("rdseed %[out]"
CC_SET(c)
- : CC_OUT(c) (ok), "=a" (*v));
+ : CC_OUT(c) (ok), [out] "=r" (*v));
return ok;
}
static inline bool __must_check rdseed_int(unsigned int *v)
{
bool ok;
- asm volatile(RDSEED_INT
+ asm volatile("rdseed %[out]"
CC_SET(c)
- : CC_OUT(c) (ok), "=a" (*v));
+ : CC_OUT(c) (ok), [out] "=r" (*v));
return ok;
}
#define X86_CENTAUR_FAM6_C7_D 0xd
#define X86_CENTAUR_FAM6_NANO 0xf
+#define X86_STEPPINGS(mins, maxs) GENMASK(maxs, mins)
/**
- * X86_MATCH_VENDOR_FAM_MODEL_FEATURE - Base macro for CPU matching
+ * X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE - Base macro for CPU matching
* @_vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY
* The name is expanded to X86_VENDOR_@_vendor
* @_family: The family number or X86_FAMILY_ANY
* @_model: The model number, model constant or X86_MODEL_ANY
+ * @_steppings: Bitmask for steppings, stepping constant or X86_STEPPING_ANY
* @_feature: A X86_FEATURE bit or X86_FEATURE_ANY
* @_data: Driver specific data or NULL. The internal storage
* format is unsigned long. The supplied value, pointer
* into another macro at the usage site for good reasons, then please
* start this local macro with X86_MATCH to allow easy grepping.
*/
-#define X86_MATCH_VENDOR_FAM_MODEL_FEATURE(_vendor, _family, _model, \
- _feature, _data) { \
+#define X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(_vendor, _family, _model, \
+ _steppings, _feature, _data) { \
.vendor = X86_VENDOR_##_vendor, \
.family = _family, \
.model = _model, \
+ .steppings = _steppings, \
.feature = _feature, \
.driver_data = (unsigned long) _data \
}
+/**
+ * X86_MATCH_VENDOR_FAM_MODEL_FEATURE - Macro for CPU matching
+ * @_vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY
+ * The name is expanded to X86_VENDOR_@_vendor
+ * @_family: The family number or X86_FAMILY_ANY
+ * @_model: The model number, model constant or X86_MODEL_ANY
+ * @_feature: A X86_FEATURE bit or X86_FEATURE_ANY
+ * @_data: Driver specific data or NULL. The internal storage
+ * format is unsigned long. The supplied value, pointer
+ * etc. is casted to unsigned long internally.
+ *
+ * The steppings arguments of X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE() is
+ * set to wildcards.
+ */
+#define X86_MATCH_VENDOR_FAM_MODEL_FEATURE(vendor, family, model, feature, data) \
+ X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(vendor, family, model, \
+ X86_STEPPING_ANY, feature, data)
+
/**
* X86_MATCH_VENDOR_FAM_FEATURE - Macro for matching vendor, family and CPU feature
* @vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY
#define X86_MATCH_INTEL_FAM6_MODEL(model, data) \
X86_MATCH_VENDOR_FAM_MODEL(INTEL, 6, INTEL_FAM6_##model, data)
+#define X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(model, steppings, data) \
+ X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6, INTEL_FAM6_##model, \
+ steppings, X86_FEATURE_ANY, data)
+
/*
* Match specific microcode revisions.
*
* stale TLB entries and, especially if we're flushing global
* mappings, we don't want the compiler to reorder any subsequent
* memory accesses before the TLB flush.
- *
- * The hex opcode is invpcid (%ecx), %eax in 32-bit mode and
- * invpcid (%rcx), %rax in long mode.
*/
- asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01"
- : : "m" (desc), "a" (type), "c" (&desc) : "memory");
+ asm volatile("invpcid %[desc], %[type]"
+ :: [desc] "m" (desc), [type] "r" (type) : "memory");
}
#define INVPCID_TYPE_INDIV_ADDR 0
};
static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
-static __init u32 hsx_deadline_rev(void)
-{
- switch (boot_cpu_data.x86_stepping) {
- case 0x02: return 0x3a; /* EP */
- case 0x04: return 0x0f; /* EX */
- }
-
- return ~0U;
-}
-
-static __init u32 bdx_deadline_rev(void)
-{
- switch (boot_cpu_data.x86_stepping) {
- case 0x02: return 0x00000011;
- case 0x03: return 0x0700000e;
- case 0x04: return 0x0f00000c;
- case 0x05: return 0x0e000003;
- }
-
- return ~0U;
-}
-
-static __init u32 skx_deadline_rev(void)
-{
- switch (boot_cpu_data.x86_stepping) {
- case 0x03: return 0x01000136;
- case 0x04: return 0x02000014;
- }
+static const struct x86_cpu_id deadline_match[] __initconst = {
+ X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(HASWELL_X, X86_STEPPINGS(0x2, 0x2), 0x3a), /* EP */
+ X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(HASWELL_X, X86_STEPPINGS(0x4, 0x4), 0x0f), /* EX */
- if (boot_cpu_data.x86_stepping > 4)
- return 0;
+ X86_MATCH_INTEL_FAM6_MODEL( BROADWELL_X, 0x0b000020),
- return ~0U;
-}
+ X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x2, 0x2), 0x00000011),
+ X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x3, 0x3), 0x0700000e),
+ X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x4, 0x4), 0x0f00000c),
+ X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x5, 0x5), 0x0e000003),
-static const struct x86_cpu_id deadline_match[] __initconst = {
- X86_MATCH_INTEL_FAM6_MODEL( HASWELL_X, &hsx_deadline_rev),
- X86_MATCH_INTEL_FAM6_MODEL( BROADWELL_X, 0x0b000020),
- X86_MATCH_INTEL_FAM6_MODEL( BROADWELL_D, &bdx_deadline_rev),
- X86_MATCH_INTEL_FAM6_MODEL( SKYLAKE_X, &skx_deadline_rev),
+ X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SKYLAKE_X, X86_STEPPINGS(0x3, 0x3), 0x01000136),
+ X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SKYLAKE_X, X86_STEPPINGS(0x4, 0x4), 0x02000014),
+ X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SKYLAKE_X, X86_STEPPINGS(0x5, 0xf), 0),
X86_MATCH_INTEL_FAM6_MODEL( HASWELL, 0x22),
X86_MATCH_INTEL_FAM6_MODEL( HASWELL_L, 0x20),
if (!m)
return true;
- /*
- * Function pointers will have the MSB set due to address layout,
- * immediate revisions will not.
- */
- if ((long)m->driver_data < 0)
- rev = ((u32 (*)(void))(m->driver_data))();
- else
- rev = (u32)m->driver_data;
+ rev = (u32)m->driver_data;
if (boot_cpu_data.microcode >= rev)
return true;
/* #1054: Instructions Retired Performance Counter May Be Inaccurate */
static const int amd_erratum_1054[] =
- AMD_OSVW_ERRATUM(0, AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf));
-
+ AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf));
static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
{
const struct x86_cpu_id *m;
struct cpuinfo_x86 *c = &boot_cpu_data;
- for (m = match; m->vendor | m->family | m->model | m->feature; m++) {
+ for (m = match;
+ m->vendor | m->family | m->model | m->steppings | m->feature;
+ m++) {
if (m->vendor != X86_VENDOR_ANY && c->x86_vendor != m->vendor)
continue;
if (m->family != X86_FAMILY_ANY && c->x86 != m->family)
continue;
if (m->model != X86_MODEL_ANY && c->x86_model != m->model)
continue;
+ if (m->steppings != X86_STEPPING_ANY &&
+ !(BIT(c->x86_stepping) & m->steppings))
+ continue;
if (m->feature != X86_FEATURE_ANY && !cpu_has(c, m->feature))
continue;
return m;
__u16 vendor;
__u16 family;
__u16 model;
+ __u16 steppings;
__u16 feature; /* bit index */
kernel_ulong_t driver_data;
};
#define X86_VENDOR_ANY 0xffff
#define X86_FAMILY_ANY 0
#define X86_MODEL_ANY 0
+#define X86_STEPPING_ANY 0
#define X86_FEATURE_ANY 0 /* Same as FPU, you can't test for that */
/*