x86/events/amd/iommu: Clean up bitwise operations
authorSuravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Fri, 24 Feb 2017 08:48:14 +0000 (02:48 -0600)
committerIngo Molnar <mingo@kernel.org>
Thu, 30 Mar 2017 07:53:51 +0000 (09:53 +0200)
Clean up register initialization and make use of BIT_ULL(x) where
appropriate. This should not affect logic and functionality.

Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Signed-off-by: Borislav Petkov <bp@suse.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Jörg Rödel <joro@8bytes.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: iommu@lists.linux-foundation.org
Link: http://lkml.kernel.org/r/1487926102-13073-3-git-send-email-Suravee.Suthikulpanit@amd.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/events/amd/iommu.c

index 8d8ed40..e112f49 100644 (file)
@@ -164,11 +164,11 @@ static int get_next_avail_iommu_bnk_cntr(struct perf_amd_iommu *perf_iommu)
        for (bank = 0, shift = 0; bank < max_banks; bank++) {
                for (cntr = 0; cntr < max_cntrs; cntr++) {
                        shift = bank + (bank*3) + cntr;
-                       if (perf_iommu->cntr_assign_mask & (1ULL<<shift)) {
+                       if (perf_iommu->cntr_assign_mask & BIT_ULL(shift)) {
                                continue;
                        } else {
-                               perf_iommu->cntr_assign_mask |= (1ULL<<shift);
-                               retval = ((u16)((u16)bank<<8) | (u8)(cntr));
+                               perf_iommu->cntr_assign_mask |= BIT_ULL(shift);
+                               retval = ((bank & 0xFF) << 8) | (cntr & 0xFF);
                                goto out;
                        }
                }
@@ -265,23 +265,23 @@ static void perf_iommu_enable_event(struct perf_event *ev)
                        _GET_BANK(ev), _GET_CNTR(ev) ,
                         IOMMU_PC_COUNTER_SRC_REG, &reg, true);
 
-       reg = 0ULL | devid | (_GET_DEVID_MASK(ev) << 32);
+       reg = devid | (_GET_DEVID_MASK(ev) << 32);
        if (reg)
-               reg |= (1UL << 31);
+               reg |= BIT(31);
        amd_iommu_pc_get_set_reg_val(devid,
                        _GET_BANK(ev), _GET_CNTR(ev) ,
                         IOMMU_PC_DEVID_MATCH_REG, &reg, true);
 
-       reg = 0ULL | _GET_PASID(ev) | (_GET_PASID_MASK(ev) << 32);
+       reg = _GET_PASID(ev) | (_GET_PASID_MASK(ev) << 32);
        if (reg)
-               reg |= (1UL << 31);
+               reg |= BIT(31);
        amd_iommu_pc_get_set_reg_val(devid,
                        _GET_BANK(ev), _GET_CNTR(ev) ,
                         IOMMU_PC_PASID_MATCH_REG, &reg, true);
 
-       reg = 0ULL | _GET_DOMID(ev) | (_GET_DOMID_MASK(ev) << 32);
+       reg = _GET_DOMID(ev) | (_GET_DOMID_MASK(ev) << 32);
        if (reg)
-               reg |= (1UL << 31);
+               reg |= BIT(31);
        amd_iommu_pc_get_set_reg_val(devid,
                        _GET_BANK(ev), _GET_CNTR(ev) ,
                         IOMMU_PC_DOMID_MATCH_REG, &reg, true);