[PATCH 00/11] target/arm: Implement ARMv8.1-PAN + ARMv8.2-ATS1E1

Previous Topic Next Topic
 
classic Classic list List threaded Threaded
24 messages Options
12
Reply | Threaded
Open this post in threaded view
|

[PATCH 00/11] target/arm: Implement ARMv8.1-PAN + ARMv8.2-ATS1E1

Richard Henderson-3
Based-on: <[hidden email]>
("target/arm: Implement ARMv8.1-VHE")

At least the PAN portion is supported in the Linux kernel,
and thus easily tested.  The ats1e1 extension is closely
related, reusing the same mmu_idx to implement.


r~


Richard Henderson (11):
  cputlb: Handle NB_MMU_MODES > TARGET_PAGE_BITS_MIN
  target/arm: Add arm_mmu_idx_is_stage1
  target/arm: Add mmu_idx for EL1 and EL2 w/ PAN enabled
  target/arm: Reduce CPSR_RESERVED
  target/arm: Add isar_feature tests for PAN + ATS1E1
  target/arm: Update MSR access for PAN
  target/arm: Update arm_mmu_idx_el for PAN
  target/arm: Enforce PAN semantics in get_S1prot
  target/arm: Set PAN bit as required on exception entry
  target/arm: Implement ATS1E1 system registers
  target/arm: Enable ARMv8.2-ATS1E1 in -cpu max

 target/arm/cpu-param.h     |   2 +-
 target/arm/cpu.h           |  74 +++++++++++---
 target/arm/internals.h     |  33 +++++++
 accel/tcg/cputlb.c         | 167 ++++++++++++++++++++++++-------
 target/arm/cpu.c           |   4 +
 target/arm/cpu64.c         |   5 +
 target/arm/helper.c        | 197 ++++++++++++++++++++++++++++++++-----
 target/arm/translate-a64.c |  16 +++
 target/arm/translate.c     |   6 ++
 9 files changed, 428 insertions(+), 76 deletions(-)

--
2.17.1


Reply | Threaded
Open this post in threaded view
|

[PATCH 01/11] cputlb: Handle NB_MMU_MODES > TARGET_PAGE_BITS_MIN

Richard Henderson-3
In target/arm we will shortly have "too many" mmu_idx.
The current minimum barrier is caused by the way in which
tlb_flush_page_by_mmuidx is coded.

We can remove this limitation by allocating memory for
consumption by the worker.  Let us assume that this is
the unlikely case, as will be the case for the majority
of targets which have so far satisfied the BUILD_BUG_ON,
and only allocate memory when necessary.

Signed-off-by: Richard Henderson <[hidden email]>
---
 accel/tcg/cputlb.c | 167 +++++++++++++++++++++++++++++++++++----------
 1 file changed, 132 insertions(+), 35 deletions(-)

diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 98221948d6..0c2adb93ea 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -447,28 +447,29 @@ static void tlb_flush_page_locked(CPUArchState *env, int midx,
     }
 }
 
-/* As we are going to hijack the bottom bits of the page address for a
- * mmuidx bit mask we need to fail to build if we can't do that
+/**
+ * tlb_flush_page_by_mmuidx_async_0:
+ * @cpu: cpu on which to flush
+ * @addr: page of virtual address to flush
+ * @idxmap: set of mmu_idx to flush
+ *
+ * Helper for tlb_flush_page_by_mmuidx and friends, flush one page
+ * at @addr from the tlbs indicated by @idxmap from @cpu.
  */
-QEMU_BUILD_BUG_ON(NB_MMU_MODES > TARGET_PAGE_BITS_MIN);
-
-static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
-                                                run_on_cpu_data data)
+static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
+                                             target_ulong addr,
+                                             uint16_t idxmap)
 {
     CPUArchState *env = cpu->env_ptr;
-    target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
-    target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
-    unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
     int mmu_idx;
 
     assert_cpu_is_self(cpu);
 
-    tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%lx\n",
-              addr, mmu_idx_bitmap);
+    tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%x\n", addr, idxmap);
 
     qemu_spin_lock(&env_tlb(env)->c.lock);
     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
-        if (test_bit(mmu_idx, &mmu_idx_bitmap)) {
+        if ((idxmap >> mmu_idx) & 1) {
             tlb_flush_page_locked(env, mmu_idx, addr);
         }
     }
@@ -477,22 +478,75 @@ static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
     tb_flush_jmp_cache(cpu, addr);
 }
 
+/**
+ * tlb_flush_page_by_mmuidx_async_1:
+ * @cpu: cpu on which to flush
+ * @data: encoded addr + idxmap
+ *
+ * Helper for tlb_flush_page_by_mmuidx and friends, called through
+ * async_run_on_cpu.  The idxmap parameter is encoded in the page
+ * offset of the target_ptr field.  This limits the set of mmu_idx
+ * that can be passed via this method.
+ */
+static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu,
+                                             run_on_cpu_data data)
+{
+    target_ulong addr_and_idxmap = (target_ulong) data.target_ptr;
+    target_ulong addr = addr_and_idxmap & TARGET_PAGE_MASK;
+    uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK;
+
+    tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
+}
+
+typedef struct {
+    target_ulong addr;
+    uint16_t idxmap;
+} TLBFlushPageByMMUIdxData;
+
+/**
+ * tlb_flush_page_by_mmuidx_async_2:
+ * @cpu: cpu on which to flush
+ * @data: allocated addr + idxmap
+ *
+ * Helper for tlb_flush_page_by_mmuidx and friends, called through
+ * async_run_on_cpu.  The addr+idxmap parameters are stored in a
+ * TLBFlushPageByMMUIdxData structure that has been allocated
+ * specifically for this helper.  Free the structure when done.
+ */
+static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu,
+                                             run_on_cpu_data data)
+{
+    TLBFlushPageByMMUIdxData *d = data.host_ptr;
+
+    tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap);
+    g_free(d);
+}
+
 void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
 {
-    target_ulong addr_and_mmu_idx;
-
     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
 
     /* This should already be page aligned */
-    addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
-    addr_and_mmu_idx |= idxmap;
+    addr &= TARGET_PAGE_MASK;
 
-    if (!qemu_cpu_is_self(cpu)) {
-        async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_work,
-                         RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
+    if (qemu_cpu_is_self(cpu)) {
+        tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
+    } else if (idxmap < TARGET_PAGE_SIZE) {
+        /*
+         * Most targets have only a few mmu_idx.  In the case where
+         * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid
+         * allocating memory for this operation.
+         */
+        async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1,
+                         RUN_ON_CPU_TARGET_PTR(addr | idxmap));
     } else {
-        tlb_flush_page_by_mmuidx_async_work(
-            cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
+        TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1);
+
+        /* Otherwise allocate a structure, freed by the worker.  */
+        d->addr = addr;
+        d->idxmap = idxmap;
+        async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2,
+                         RUN_ON_CPU_HOST_PTR(d));
     }
 }
 
@@ -504,17 +558,36 @@ void tlb_flush_page(CPUState *cpu, target_ulong addr)
 void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
                                        uint16_t idxmap)
 {
-    const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work;
-    target_ulong addr_and_mmu_idx;
-
     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
 
     /* This should already be page aligned */
-    addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
-    addr_and_mmu_idx |= idxmap;
+    addr &= TARGET_PAGE_MASK;
 
-    flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
-    fn(src_cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
+    /*
+     * Allocate memory to hold addr+idxmap only when needed.
+     * See tlb_flush_page_by_mmuidx for details.
+     */
+    if (idxmap < TARGET_PAGE_SIZE) {
+        flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
+                         RUN_ON_CPU_TARGET_PTR(addr | idxmap));
+    } else {
+        CPUState *dst_cpu;
+
+        /* Allocate a separate data block for each destination cpu.  */
+        CPU_FOREACH(dst_cpu) {
+            if (dst_cpu != src_cpu) {
+                TLBFlushPageByMMUIdxData *d
+                    = g_new(TLBFlushPageByMMUIdxData, 1);
+
+                d->addr = addr;
+                d->idxmap = idxmap;
+                async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
+                                 RUN_ON_CPU_HOST_PTR(d));
+            }
+        }
+    }
+
+    tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap);
 }
 
 void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
@@ -526,17 +599,41 @@ void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
                                               target_ulong addr,
                                               uint16_t idxmap)
 {
-    const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work;
-    target_ulong addr_and_mmu_idx;
-
     tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
 
     /* This should already be page aligned */
-    addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
-    addr_and_mmu_idx |= idxmap;
+    addr &= TARGET_PAGE_MASK;
 
-    flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
-    async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
+    /*
+     * Allocate memory to hold addr+idxmap only when needed.
+     * See tlb_flush_page_by_mmuidx for details.
+     */
+    if (idxmap < TARGET_PAGE_SIZE) {
+        flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
+                         RUN_ON_CPU_TARGET_PTR(addr | idxmap));
+        async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1,
+                              RUN_ON_CPU_TARGET_PTR(addr | idxmap));
+    } else {
+        CPUState *dst_cpu;
+        TLBFlushPageByMMUIdxData *d;
+
+        /* Allocate a separate data block for each destination cpu.  */
+        CPU_FOREACH(dst_cpu) {
+            if (dst_cpu != src_cpu) {
+                d = g_new(TLBFlushPageByMMUIdxData, 1);
+                d->addr = addr;
+                d->idxmap = idxmap;
+                async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
+                                 RUN_ON_CPU_HOST_PTR(d));
+            }
+        }
+
+        d = g_new(TLBFlushPageByMMUIdxData, 1);
+        d->addr = addr;
+        d->idxmap = idxmap;
+        async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2,
+                              RUN_ON_CPU_HOST_PTR(d));
+    }
 }
 
 void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
--
2.17.1


Reply | Threaded
Open this post in threaded view
|

[PATCH 02/11] target/arm: Add arm_mmu_idx_is_stage1

Richard Henderson-3
In reply to this post by Richard Henderson-3
Use a common predicate for querying stage1-ness.

Signed-off-by: Richard Henderson <[hidden email]>
---
 target/arm/internals.h | 11 +++++++++++
 target/arm/helper.c    |  8 +++-----
 2 files changed, 14 insertions(+), 5 deletions(-)

diff --git a/target/arm/internals.h b/target/arm/internals.h
index 49dac2a677..850f204f14 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -1034,6 +1034,17 @@ static inline ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env);
 #endif
 
+static inline bool arm_mmu_idx_is_stage1(ARMMMUIdx mmu_idx)
+{
+    switch (mmu_idx) {
+    case ARMMMUIdx_Stage1_E0:
+    case ARMMMUIdx_Stage1_E1:
+        return true;
+    default:
+        return false;
+    }
+}
+
 /*
  * Parameters of a given virtual address, as extracted from the
  * translation control register (TCR) for a given regime.
diff --git a/target/arm/helper.c b/target/arm/helper.c
index f3785d5ad6..fdb86ea427 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -3212,8 +3212,7 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
         bool take_exc = false;
 
         if (fi.s1ptw && current_el == 1 && !arm_is_secure(env)
-            && (mmu_idx == ARMMMUIdx_Stage1_E1
-                || mmu_idx == ARMMMUIdx_Stage1_E0)) {
+            && arm_mmu_idx_is_stage1(mmu_idx)) {
             /*
              * Synchronous stage 2 fault on an access made as part of the
              * translation table walk for AT S1E0* or AT S1E1* insn
@@ -9159,8 +9158,7 @@ static inline bool regime_translation_disabled(CPUARMState *env,
         }
     }
 
-    if ((env->cp15.hcr_el2 & HCR_DC) &&
-        (mmu_idx == ARMMMUIdx_Stage1_E0 || mmu_idx == ARMMMUIdx_Stage1_E1)) {
+    if ((env->cp15.hcr_el2 & HCR_DC) && arm_mmu_idx_is_stage1(mmu_idx)) {
         /* HCR.DC means SCTLR_EL1.M behaves as 0 */
         return true;
     }
@@ -9469,7 +9467,7 @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
                                hwaddr addr, MemTxAttrs txattrs,
                                ARMMMUFaultInfo *fi)
 {
-    if ((mmu_idx == ARMMMUIdx_Stage1_E0 || mmu_idx == ARMMMUIdx_Stage1_E1) &&
+    if (arm_mmu_idx_is_stage1(mmu_idx) &&
         !regime_translation_disabled(env, ARMMMUIdx_Stage2)) {
         target_ulong s2size;
         hwaddr s2pa;
--
2.17.1


Reply | Threaded
Open this post in threaded view
|

[PATCH 03/11] target/arm: Add mmu_idx for EL1 and EL2 w/ PAN enabled

Richard Henderson-3
In reply to this post by Richard Henderson-3
To implement PAN, we will want to swap, for short periods
of time, to a different privileged mmu_idx.  In addition,
we cannot do this with flushing alone, because the AT*
instructions have both PAN and PAN-less versions.

Add the ARMMMUIdx*_PAN constants where necessary next to
the corresponding ARMMMUIdx* constant.

Signed-off-by: Richard Henderson <[hidden email]>
---
 target/arm/cpu-param.h     |  2 +-
 target/arm/cpu.h           | 35 +++++++++++++--------
 target/arm/internals.h     |  9 ++++++
 target/arm/helper.c        | 63 +++++++++++++++++++++++++++++++-------
 target/arm/translate-a64.c |  2 ++
 target/arm/translate.c     |  2 ++
 6 files changed, 88 insertions(+), 25 deletions(-)

diff --git a/target/arm/cpu-param.h b/target/arm/cpu-param.h
index 18ac562346..d593b60b28 100644
--- a/target/arm/cpu-param.h
+++ b/target/arm/cpu-param.h
@@ -29,6 +29,6 @@
 # define TARGET_PAGE_BITS_MIN  10
 #endif
 
-#define NB_MMU_MODES 9
+#define NB_MMU_MODES 12
 
 #endif
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index 22935e4433..22c5706835 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -2715,20 +2715,22 @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync);
  *  5. we want to be able to use the TLB for accesses done as part of a
  *     stage1 page table walk, rather than having to walk the stage2 page
  *     table over and over.
+ *  6. we need separate EL1/EL2 mmu_idx for handling the Priviledged Access
+ *     Never (PAN) bit within PSTATE.
  *
  * This gives us the following list of cases:
  *
  * NS EL0 (aka NS PL0) EL1&0 stage 1+2
- * NS EL1 (aka NS PL1) EL1&0 stage 1+2
+ * NS EL1 (aka NS PL1) EL1&0 stage 1+2 (+PAN)
  * NS EL0 EL2&0
- * NS EL2 EL2&0
+ * NS EL2 EL2&0 (+PAN)
  * NS EL2 (aka NS PL2)
  * S EL0 (aka S PL0)
- * S EL1 (not used if EL3 is 32 bit)
+ * S EL1 (not used if EL3 is 32 bit) (+PAN)
  * S EL3 (aka S PL1)
  * NS EL0&1 stage 2
  *
- * for a total of 9 different mmu_idx.
+ * for a total of 12 different mmu_idx.
  *
  * R profile CPUs have an MPU, but can use the same set of MMU indexes
  * as A profile. They only need to distinguish NS EL0 and NS EL1 (and
@@ -2783,19 +2785,22 @@ typedef enum ARMMMUIdx {
     /*
      * A-profile.
      */
-    ARMMMUIdx_EL10_0 = 0 | ARM_MMU_IDX_A,
-    ARMMMUIdx_EL20_0 = 1 | ARM_MMU_IDX_A,
+    ARMMMUIdx_EL10_0 =     0 | ARM_MMU_IDX_A,
+    ARMMMUIdx_EL20_0 =     1 | ARM_MMU_IDX_A,
 
-    ARMMMUIdx_EL10_1 = 2 | ARM_MMU_IDX_A,
+    ARMMMUIdx_EL10_1 =     2 | ARM_MMU_IDX_A,
+    ARMMMUIdx_EL10_1_PAN = 3 | ARM_MMU_IDX_A,
 
-    ARMMMUIdx_E2 =     3 | ARM_MMU_IDX_A,
-    ARMMMUIdx_EL20_2 = 4 | ARM_MMU_IDX_A,
+    ARMMMUIdx_E2 =         4 | ARM_MMU_IDX_A,
+    ARMMMUIdx_EL20_2 =     5 | ARM_MMU_IDX_A,
+    ARMMMUIdx_EL20_2_PAN = 6 | ARM_MMU_IDX_A,
 
-    ARMMMUIdx_SE0 =    5 | ARM_MMU_IDX_A,
-    ARMMMUIdx_SE1 =    6 | ARM_MMU_IDX_A,
-    ARMMMUIdx_SE3 =    7 | ARM_MMU_IDX_A,
+    ARMMMUIdx_SE0 =        7 | ARM_MMU_IDX_A,
+    ARMMMUIdx_SE1 =        8 | ARM_MMU_IDX_A,
+    ARMMMUIdx_SE1_PAN =    9 | ARM_MMU_IDX_A,
+    ARMMMUIdx_SE3 =       10 | ARM_MMU_IDX_A,
 
-    ARMMMUIdx_Stage2 = 8 | ARM_MMU_IDX_A,
+    ARMMMUIdx_Stage2 =    11 | ARM_MMU_IDX_A,
 
     /*
      * These are not allocated TLBs and are used only for AT system
@@ -2803,6 +2808,7 @@ typedef enum ARMMMUIdx {
      */
     ARMMMUIdx_Stage1_E0 = 0 | ARM_MMU_IDX_NOTLB,
     ARMMMUIdx_Stage1_E1 = 1 | ARM_MMU_IDX_NOTLB,
+    ARMMMUIdx_Stage1_E1_PAN = 2 | ARM_MMU_IDX_NOTLB,
 
     /*
      * M-profile.
@@ -2828,10 +2834,13 @@ typedef enum ARMMMUIdxBit {
     TO_CORE_BIT(EL10_0),
     TO_CORE_BIT(EL20_0),
     TO_CORE_BIT(EL10_1),
+    TO_CORE_BIT(EL10_1_PAN),
     TO_CORE_BIT(E2),
     TO_CORE_BIT(EL20_2),
+    TO_CORE_BIT(EL20_2_PAN),
     TO_CORE_BIT(SE0),
     TO_CORE_BIT(SE1),
+    TO_CORE_BIT(SE1_PAN),
     TO_CORE_BIT(SE3),
     TO_CORE_BIT(Stage2),
 
diff --git a/target/arm/internals.h b/target/arm/internals.h
index 850f204f14..2408953031 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -843,12 +843,16 @@ static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx)
     switch (mmu_idx) {
     case ARMMMUIdx_Stage1_E0:
     case ARMMMUIdx_Stage1_E1:
+    case ARMMMUIdx_Stage1_E1_PAN:
     case ARMMMUIdx_EL10_0:
     case ARMMMUIdx_EL10_1:
+    case ARMMMUIdx_EL10_1_PAN:
     case ARMMMUIdx_EL20_0:
     case ARMMMUIdx_EL20_2:
+    case ARMMMUIdx_EL20_2_PAN:
     case ARMMMUIdx_SE0:
     case ARMMMUIdx_SE1:
+    case ARMMMUIdx_SE1_PAN:
         return true;
     default:
         return false;
@@ -861,10 +865,13 @@ static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx)
     switch (mmu_idx) {
     case ARMMMUIdx_EL10_0:
     case ARMMMUIdx_EL10_1:
+    case ARMMMUIdx_EL10_1_PAN:
     case ARMMMUIdx_EL20_0:
     case ARMMMUIdx_EL20_2:
+    case ARMMMUIdx_EL20_2_PAN:
     case ARMMMUIdx_Stage1_E0:
     case ARMMMUIdx_Stage1_E1:
+    case ARMMMUIdx_Stage1_E1_PAN:
     case ARMMMUIdx_E2:
     case ARMMMUIdx_Stage2:
     case ARMMMUIdx_MPrivNegPri:
@@ -874,6 +881,7 @@ static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx)
         return false;
     case ARMMMUIdx_SE0:
     case ARMMMUIdx_SE1:
+    case ARMMMUIdx_SE1_PAN:
     case ARMMMUIdx_SE3:
     case ARMMMUIdx_MSPrivNegPri:
     case ARMMMUIdx_MSUserNegPri:
@@ -1039,6 +1047,7 @@ static inline bool arm_mmu_idx_is_stage1(ARMMMUIdx mmu_idx)
     switch (mmu_idx) {
     case ARMMMUIdx_Stage1_E0:
     case ARMMMUIdx_Stage1_E1:
+    case ARMMMUIdx_Stage1_E1_PAN:
         return true;
     default:
         return false;
diff --git a/target/arm/helper.c b/target/arm/helper.c
index fdb86ea427..4e3fe00316 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -671,6 +671,7 @@ static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
 
     tlb_flush_by_mmuidx(cs,
                         ARMMMUIdxBit_EL10_1 |
+                        ARMMMUIdxBit_EL10_1_PAN |
                         ARMMMUIdxBit_EL10_0 |
                         ARMMMUIdxBit_Stage2);
 }
@@ -682,6 +683,7 @@ static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 
     tlb_flush_by_mmuidx_all_cpus_synced(cs,
                                         ARMMMUIdxBit_EL10_1 |
+                                        ARMMMUIdxBit_EL10_1_PAN |
                                         ARMMMUIdxBit_EL10_0 |
                                         ARMMMUIdxBit_Stage2);
 }
@@ -2660,6 +2662,7 @@ static int gt_phys_redir_timeridx(CPUARMState *env)
     switch (arm_mmu_idx(env)) {
     case ARMMMUIdx_EL20_0:
     case ARMMMUIdx_EL20_2:
+    case ARMMMUIdx_EL20_2_PAN:
         return GTIMER_HYP;
     default:
         return GTIMER_PHYS;
@@ -2671,6 +2674,7 @@ static int gt_virt_redir_timeridx(CPUARMState *env)
     switch (arm_mmu_idx(env)) {
     case ARMMMUIdx_EL20_0:
     case ARMMMUIdx_EL20_2:
+    case ARMMMUIdx_EL20_2_PAN:
         return GTIMER_HYPVIRT;
     default:
         return GTIMER_VIRT;
@@ -3288,7 +3292,9 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
         format64 = arm_s1_regime_using_lpae_format(env, mmu_idx);
 
         if (arm_feature(env, ARM_FEATURE_EL2)) {
-            if (mmu_idx == ARMMMUIdx_EL10_0 || mmu_idx == ARMMMUIdx_EL10_1) {
+            if (mmu_idx == ARMMMUIdx_EL10_0 ||
+                mmu_idx == ARMMMUIdx_EL10_1 ||
+                mmu_idx == ARMMMUIdx_EL10_1_PAN) {
                 format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC);
             } else {
                 format64 |= arm_current_el(env) == 2;
@@ -3746,7 +3752,9 @@ static void vmsa_tcr_ttbr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
     if ((arm_hcr_el2_eff(env) & HCR_E2H) &&
         extract64(raw_read(env, ri) ^ value, 48, 16)) {
         tlb_flush_by_mmuidx(env_cpu(env),
-                            ARMMMUIdxBit_EL20_2 | ARMMMUIdxBit_EL20_0);
+                            ARMMMUIdxBit_EL20_2 |
+                            ARMMMUIdxBit_EL20_2_PAN |
+                            ARMMMUIdxBit_EL20_0);
     }
     raw_write(env, ri, value);
 }
@@ -3764,6 +3772,7 @@ static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
     if (raw_read(env, ri) != value) {
         tlb_flush_by_mmuidx(cs,
                             ARMMMUIdxBit_EL10_1 |
+                            ARMMMUIdxBit_EL10_1_PAN |
                             ARMMMUIdxBit_EL10_0 |
                             ARMMMUIdxBit_Stage2);
         raw_write(env, ri, value);
@@ -4124,12 +4133,18 @@ static int vae1_tlbmask(CPUARMState *env)
 {
     /* Since we exclude secure first, we may read HCR_EL2 directly. */
     if (arm_is_secure_below_el3(env)) {
-        return ARMMMUIdxBit_SE1 | ARMMMUIdxBit_SE0;
+        return ARMMMUIdxBit_SE1 |
+               ARMMMUIdxBit_SE1_PAN |
+               ARMMMUIdxBit_SE0;
     } else if ((env->cp15.hcr_el2 & (HCR_E2H | HCR_TGE))
                == (HCR_E2H | HCR_TGE)) {
-        return ARMMMUIdxBit_EL20_2 | ARMMMUIdxBit_EL20_0;
+        return ARMMMUIdxBit_EL20_2 |
+               ARMMMUIdxBit_EL20_2_PAN |
+               ARMMMUIdxBit_EL20_0;
     } else {
-        return ARMMMUIdxBit_EL10_1 | ARMMMUIdxBit_EL10_0;
+        return ARMMMUIdxBit_EL10_1 |
+               ARMMMUIdxBit_EL10_1_PAN |
+               ARMMMUIdxBit_EL10_0;
     }
 }
 
@@ -4165,14 +4180,23 @@ static int vmalle1_tlbmask(CPUARMState *env)
      * Since we exclude secure first, we may read HCR_EL2 directly.
      */
     if (arm_is_secure_below_el3(env)) {
-        return ARMMMUIdxBit_SE1 | ARMMMUIdxBit_SE0;
+        return ARMMMUIdxBit_SE1 |
+               ARMMMUIdxBit_SE1_PAN |
+               ARMMMUIdxBit_SE0;
     } else if ((env->cp15.hcr_el2 & (HCR_E2H | HCR_TGE))
                == (HCR_E2H | HCR_TGE)) {
-        return ARMMMUIdxBit_EL20_2 | ARMMMUIdxBit_EL20_0;
+        return ARMMMUIdxBit_EL20_2 |
+               ARMMMUIdxBit_EL20_2_PAN |
+               ARMMMUIdxBit_EL20_0;
     } else if (arm_feature(env, ARM_FEATURE_EL2)) {
-        return ARMMMUIdxBit_EL10_1 | ARMMMUIdxBit_EL10_0 | ARMMMUIdxBit_Stage2;
+        return ARMMMUIdxBit_EL10_1 |
+               ARMMMUIdxBit_EL10_1_PAN |
+               ARMMMUIdxBit_EL10_0 |
+               ARMMMUIdxBit_Stage2;
     } else {
-        return ARMMMUIdxBit_EL10_1 | ARMMMUIdxBit_EL10_0;
+        return ARMMMUIdxBit_EL10_1 |
+               ARMMMUIdxBit_EL10_1_PAN |
+               ARMMMUIdxBit_EL10_0;
     }
 }
 
@@ -4188,7 +4212,9 @@ static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
 static int vae2_tlbmask(CPUARMState *env)
 {
     if (arm_hcr_el2_eff(env) & HCR_E2H) {
-        return ARMMMUIdxBit_EL20_0 | ARMMMUIdxBit_EL20_2;
+        return ARMMMUIdxBit_EL20_0 |
+               ARMMMUIdxBit_EL20_2 |
+               ARMMMUIdxBit_EL20_2_PAN;
     } else {
         return ARMMMUIdxBit_E2;
     }
@@ -9080,6 +9106,7 @@ static uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
     switch (mmu_idx) {
     case ARMMMUIdx_EL20_0:
     case ARMMMUIdx_EL20_2:
+    case ARMMMUIdx_EL20_2_PAN:
     case ARMMMUIdx_Stage2:
     case ARMMMUIdx_E2:
         return 2;
@@ -9088,10 +9115,13 @@ static uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
     case ARMMMUIdx_SE0:
         return arm_el_is_aa64(env, 3) ? 1 : 3;
     case ARMMMUIdx_SE1:
+    case ARMMMUIdx_SE1_PAN:
     case ARMMMUIdx_Stage1_E0:
     case ARMMMUIdx_Stage1_E1:
+    case ARMMMUIdx_Stage1_E1_PAN:
     case ARMMMUIdx_EL10_0:
     case ARMMMUIdx_EL10_1:
+    case ARMMMUIdx_EL10_1_PAN:
     case ARMMMUIdx_MPrivNegPri:
     case ARMMMUIdx_MUserNegPri:
     case ARMMMUIdx_MPriv:
@@ -9207,6 +9237,8 @@ static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
         return ARMMMUIdx_Stage1_E0;
     case ARMMMUIdx_EL10_1:
         return ARMMMUIdx_Stage1_E1;
+    case ARMMMUIdx_EL10_1_PAN:
+        return ARMMMUIdx_Stage1_E1_PAN;
     default:
         return mmu_idx;
     }
@@ -9253,6 +9285,7 @@ static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
         return false;
     case ARMMMUIdx_EL10_0:
     case ARMMMUIdx_EL10_1:
+    case ARMMMUIdx_EL10_1_PAN:
         g_assert_not_reached();
     }
 }
@@ -11145,7 +11178,9 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
                    target_ulong *page_size,
                    ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
 {
-    if (mmu_idx == ARMMMUIdx_EL10_0 || mmu_idx == ARMMMUIdx_EL10_1) {
+    if (mmu_idx == ARMMMUIdx_EL10_0 ||
+        mmu_idx == ARMMMUIdx_EL10_1 ||
+        mmu_idx == ARMMMUIdx_EL10_1_PAN) {
         /* Call ourselves recursively to do the stage 1 and then stage 2
          * translations.
          */
@@ -11672,10 +11707,13 @@ int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
     case ARMMMUIdx_SE0:
         return 0;
     case ARMMMUIdx_EL10_1:
+    case ARMMMUIdx_EL10_1_PAN:
     case ARMMMUIdx_SE1:
+    case ARMMMUIdx_SE1_PAN:
         return 1;
     case ARMMMUIdx_E2:
     case ARMMMUIdx_EL20_2:
+    case ARMMMUIdx_EL20_2_PAN:
         return 2;
     case ARMMMUIdx_SE3:
         return 3;
@@ -11886,11 +11924,14 @@ static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
     /* TODO: ARMv8.2-UAO */
     switch (mmu_idx) {
     case ARMMMUIdx_EL10_1:
+    case ARMMMUIdx_EL10_1_PAN:
     case ARMMMUIdx_SE1:
+    case ARMMMUIdx_SE1_PAN:
         /* TODO: ARMv8.3-NV */
         flags = FIELD_DP32(flags, TBFLAG_A64, UNPRIV, 1);
         break;
     case ARMMMUIdx_EL20_2:
+    case ARMMMUIdx_EL20_2_PAN:
         /* TODO: ARMv8.4-SecEL2 */
         /*
          * Note that EL20_2 is gated by HCR_EL2.E2H == 1, but EL20_0 is
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index fe492bea90..b5c7bc2d76 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -124,9 +124,11 @@ static int get_a64_user_mem_index(DisasContext *s)
          */
         switch (useridx) {
         case ARMMMUIdx_EL10_1:
+        case ARMMMUIdx_EL10_1_PAN:
             useridx = ARMMMUIdx_EL10_0;
             break;
         case ARMMMUIdx_EL20_2:
+        case ARMMMUIdx_EL20_2_PAN:
             useridx = ARMMMUIdx_EL20_0;
             break;
         case ARMMMUIdx_SE1:
diff --git a/target/arm/translate.c b/target/arm/translate.c
index b7f726e733..47a374b53d 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -155,10 +155,12 @@ static inline int get_a32_user_mem_index(DisasContext *s)
     case ARMMMUIdx_E2:        /* this one is UNPREDICTABLE */
     case ARMMMUIdx_EL10_0:
     case ARMMMUIdx_EL10_1:
+    case ARMMMUIdx_EL10_1_PAN:
         return arm_to_core_mmu_idx(ARMMMUIdx_EL10_0);
     case ARMMMUIdx_SE3:
     case ARMMMUIdx_SE0:
     case ARMMMUIdx_SE1:
+    case ARMMMUIdx_SE1_PAN:
         return arm_to_core_mmu_idx(ARMMMUIdx_SE0);
     case ARMMMUIdx_MUser:
     case ARMMMUIdx_MPriv:
--
2.17.1


Reply | Threaded
Open this post in threaded view
|

[PATCH 04/11] target/arm: Reduce CPSR_RESERVED

Richard Henderson-3
In reply to this post by Richard Henderson-3
Since v8.0, the CPSR_RESERVED bits have been allocated.
We are not yet implementing ARMv8.4-DIT; retain CPSR_RESERVED,
since that overlaps with our current hack for AA32 single step.

Signed-off-by: Richard Henderson <[hidden email]>
---
 target/arm/cpu.h | 8 ++++++--
 1 file changed, 6 insertions(+), 2 deletions(-)

diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index 22c5706835..49dc436e5e 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -1149,12 +1149,16 @@ void pmu_init(ARMCPU *cpu);
 #define CPSR_IT_2_7 (0xfc00U)
 #define CPSR_GE (0xfU << 16)
 #define CPSR_IL (1U << 20)
-/* Note that the RESERVED bits include bit 21, which is PSTATE_SS in
+/*
+ * Note that the RESERVED bits include bit 21, which is PSTATE_SS in
  * an AArch64 SPSR but RES0 in AArch32 SPSR and CPSR. In QEMU we use
  * env->uncached_cpsr bit 21 to store PSTATE.SS when executing in AArch32,
  * where it is live state but not accessible to the AArch32 code.
+ *
+ * TODO: With ARMv8.4-DIT, bit 21 is DIT in AArch32 (bit 24 for AArch64).
+ * We will need to move AArch32 SS somewhere else at that point.
  */
-#define CPSR_RESERVED (0x7U << 21)
+#define CPSR_RESERVED (1U << 21)
 #define CPSR_J (1U << 24)
 #define CPSR_IT_0_1 (3U << 25)
 #define CPSR_Q (1U << 27)
--
2.17.1


Reply | Threaded
Open this post in threaded view
|

[PATCH 05/11] target/arm: Add isar_feature tests for PAN + ATS1E1

Richard Henderson-3
In reply to this post by Richard Henderson-3
Include definitions for all of the bits in ID_MMFR3.
We already have a definition for ID_AA64MMFR1.PAN.

Signed-off-by: Richard Henderson <[hidden email]>
---
 target/arm/cpu.h | 29 +++++++++++++++++++++++++++++
 1 file changed, 29 insertions(+)

diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index 49dc436e5e..170dd5b124 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -1694,6 +1694,15 @@ FIELD(ID_ISAR6, FHM, 8, 4)
 FIELD(ID_ISAR6, SB, 12, 4)
 FIELD(ID_ISAR6, SPECRES, 16, 4)
 
+FIELD(ID_MMFR3, CMAINTVA, 0, 4)
+FIELD(ID_MMFR3, CMAINTSW, 4, 4)
+FIELD(ID_MMFR3, BPMAINT, 8, 4)
+FIELD(ID_MMFR3, MAINTBCST, 12, 4)
+FIELD(ID_MMFR3, PAN, 16, 4)
+FIELD(ID_MMFR3, COHWALK, 20, 4)
+FIELD(ID_MMFR3, CMEMSZ, 24, 4)
+FIELD(ID_MMFR3, SUPERSEC, 28, 4)
+
 FIELD(ID_MMFR4, SPECSEI, 0, 4)
 FIELD(ID_MMFR4, AC2, 4, 4)
 FIELD(ID_MMFR4, XNX, 8, 4)
@@ -3401,6 +3410,16 @@ static inline bool isar_feature_aa32_vminmaxnm(const ARMISARegisters *id)
     return FIELD_EX64(id->mvfr2, MVFR2, FPMISC) >= 4;
 }
 
+static inline bool isar_feature_aa32_pan(const ARMISARegisters *id)
+{
+    return FIELD_EX64(id->mvfr0, ID_MMFR3, PAN) != 0;
+}
+
+static inline bool isar_feature_aa32_ats1e1(const ARMISARegisters *id)
+{
+    return FIELD_EX64(id->mvfr0, ID_MMFR3, PAN) >= 2;
+}
+
 /*
  * 64-bit feature tests via id registers.
  */
@@ -3550,6 +3569,16 @@ static inline bool isar_feature_aa64_lor(const ARMISARegisters *id)
     return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, LO) != 0;
 }
 
+static inline bool isar_feature_aa64_pan(const ARMISARegisters *id)
+{
+    return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, PAN) != 0;
+}
+
+static inline bool isar_feature_aa64_ats1e1(const ARMISARegisters *id)
+{
+    return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, PAN) >= 2;
+}
+
 static inline bool isar_feature_aa64_bti(const ARMISARegisters *id)
 {
     return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, BT) != 0;
--
2.17.1


Reply | Threaded
Open this post in threaded view
|

[PATCH 06/11] target/arm: Update MSR access for PAN

Richard Henderson-3
In reply to this post by Richard Henderson-3
For aarch64, there's a dedicated msr (imm, reg) insn.
For aarch32, this is done via msr to cpsr; and writes
from el0 are ignored.

Signed-off-by: Richard Henderson <[hidden email]>
---
 target/arm/cpu.h           |  2 ++
 target/arm/helper.c        | 22 ++++++++++++++++++++++
 target/arm/translate-a64.c | 14 ++++++++++++++
 target/arm/translate.c     |  4 ++++
 4 files changed, 42 insertions(+)

diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index 170dd5b124..f0e61bf34f 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -1159,6 +1159,7 @@ void pmu_init(ARMCPU *cpu);
  * We will need to move AArch32 SS somewhere else at that point.
  */
 #define CPSR_RESERVED (1U << 21)
+#define CPSR_PAN (1U << 22)
 #define CPSR_J (1U << 24)
 #define CPSR_IT_0_1 (3U << 25)
 #define CPSR_Q (1U << 27)
@@ -1225,6 +1226,7 @@ void pmu_init(ARMCPU *cpu);
 #define PSTATE_BTYPE (3U << 10)
 #define PSTATE_IL (1U << 20)
 #define PSTATE_SS (1U << 21)
+#define PSTATE_PAN (1U << 22)
 #define PSTATE_V (1U << 28)
 #define PSTATE_C (1U << 29)
 #define PSTATE_Z (1U << 30)
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 4e3fe00316..512be5c644 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -4112,6 +4112,17 @@ static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
     env->daif = value & PSTATE_DAIF;
 }
 
+static uint64_t aa64_pan_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+    return env->pstate & PSTATE_PAN;
+}
+
+static void aa64_pan_write(CPUARMState *env, const ARMCPRegInfo *ri,
+                           uint64_t value)
+{
+    env->pstate = (env->pstate & ~PSTATE_PAN) | (value & PSTATE_PAN);
+}
+
 static CPAccessResult aa64_cacheop_access(CPUARMState *env,
                                           const ARMCPRegInfo *ri,
                                           bool isread)
@@ -7405,6 +7416,17 @@ void register_cp_regs_for_features(ARMCPU *cpu)
         define_arm_cp_regs(cpu, lor_reginfo);
     }
 
+    if (cpu_isar_feature(aa64_pan, cpu)) {
+        static const ARMCPRegInfo pan_reginfo[] = {
+            { .name = "PAN", .state = ARM_CP_STATE_AA64,
+              .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 3,
+              .type = ARM_CP_NO_RAW, .access = PL1_RW,
+              .readfn = aa64_pan_read, .writefn = aa64_pan_write, },
+            REGINFO_SENTINEL
+        };
+        define_arm_cp_regs(cpu, pan_reginfo);
+    }
+
     if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) {
         static const ARMCPRegInfo vhe_reginfo[] = {
             { .name = "CONTEXTIDR_EL2", .state = ARM_CP_STATE_AA64,
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index b5c7bc2d76..7f5a68106b 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -1601,6 +1601,20 @@ static void handle_msr_i(DisasContext *s, uint32_t insn,
         s->base.is_jmp = DISAS_NEXT;
         break;
 
+    case 0x04: /* PAN */
+        if (!dc_isar_feature(aa64_pan, s) || s->current_el == 0) {
+            goto do_unallocated;
+        }
+        if (crm & 1) {
+            set_pstate_bits(PSTATE_PAN);
+        } else {
+            clear_pstate_bits(PSTATE_PAN);
+        }
+        t1 = tcg_const_i32(s->current_el);
+        gen_helper_rebuild_hflags_a64(cpu_env, t1);
+        tcg_temp_free_i32(t1);
+        break;
+
     case 0x05: /* SPSel */
         if (s->current_el == 0) {
             goto do_unallocated;
diff --git a/target/arm/translate.c b/target/arm/translate.c
index 47a374b53d..98e6072dd4 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -2785,6 +2785,10 @@ static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
         tcg_gen_or_i32(tmp, tmp, t0);
         store_cpu_field(tmp, spsr);
     } else {
+        /* Data writes to CPSR.PAN using an MSR insn at EL0 are ignored.  */
+        if (IS_USER(s)) {
+            mask &= ~CPSR_PAN;
+        }
         gen_set_cpsr(t0, mask);
     }
     tcg_temp_free_i32(t0);
--
2.17.1


Reply | Threaded
Open this post in threaded view
|

[PATCH 07/11] target/arm: Update arm_mmu_idx_el for PAN

Richard Henderson-3
In reply to this post by Richard Henderson-3
Examine the PAN bit for EL1, EL2, and Secure EL1 to
determine if it applies.

Signed-off-by: Richard Henderson <[hidden email]>
---
 target/arm/helper.c | 9 +++++++++
 1 file changed, 9 insertions(+)

diff --git a/target/arm/helper.c b/target/arm/helper.c
index 512be5c644..6c65dd799e 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -11770,13 +11770,22 @@ ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el)
         return ARMMMUIdx_EL10_0;
     case 1:
         if (arm_is_secure_below_el3(env)) {
+            if (env->pstate & PSTATE_PAN) {
+                return ARMMMUIdx_SE1_PAN;
+            }
             return ARMMMUIdx_SE1;
         }
+        if (env->pstate & PSTATE_PAN) {
+            return ARMMMUIdx_EL10_1_PAN;
+        }
         return ARMMMUIdx_EL10_1;
     case 2:
         /* TODO: ARMv8.4-SecEL2 */
         /* Note that TGE does not apply at EL2.  */
         if ((env->cp15.hcr_el2 & HCR_E2H) && arm_el_is_aa64(env, 2)) {
+            if (env->pstate & PSTATE_PAN) {
+                return ARMMMUIdx_EL20_2_PAN;
+            }
             return ARMMMUIdx_EL20_2;
         }
         return ARMMMUIdx_E2;
--
2.17.1


Reply | Threaded
Open this post in threaded view
|

[PATCH 08/11] target/arm: Enforce PAN semantics in get_S1prot

Richard Henderson-3
In reply to this post by Richard Henderson-3
If we have a PAN-enforcing mmu_idx, set prot == 0 if user_rw != 0.

Signed-off-by: Richard Henderson <[hidden email]>
---
 target/arm/internals.h | 13 +++++++++++++
 target/arm/helper.c    |  3 +++
 2 files changed, 16 insertions(+)

diff --git a/target/arm/internals.h b/target/arm/internals.h
index 2408953031..ab3b436379 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -893,6 +893,19 @@ static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx)
     }
 }
 
+static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx)
+{
+    switch (mmu_idx) {
+    case ARMMMUIdx_Stage1_E1_PAN:
+    case ARMMMUIdx_EL10_1_PAN:
+    case ARMMMUIdx_EL20_2_PAN:
+    case ARMMMUIdx_SE1_PAN:
+        return true;
+    default:
+        return false;
+    }
+}
+
 /* Return the FSR value for a debug exception (watchpoint, hardware
  * breakpoint or BKPT insn) targeting the specified exception level.
  */
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 6c65dd799e..a1dbafb9b2 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -9444,6 +9444,9 @@ static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
     if (is_user) {
         prot_rw = user_rw;
     } else {
+        if (user_rw && regime_is_pan(env, mmu_idx)) {
+            return 0;
+        }
         prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
     }
 
--
2.17.1


Reply | Threaded
Open this post in threaded view
|

[PATCH 09/11] target/arm: Set PAN bit as required on exception entry

Richard Henderson-3
In reply to this post by Richard Henderson-3
The PAN bit is preserved, or set as per SCTLR_ELx.SPAN,
plus several other conditions listed in the ARM ARM.

Signed-off-by: Richard Henderson <[hidden email]>
---
 target/arm/helper.c | 42 +++++++++++++++++++++++++++++++++++++++---
 1 file changed, 39 insertions(+), 3 deletions(-)

diff --git a/target/arm/helper.c b/target/arm/helper.c
index a1dbafb9b2..043e44d73d 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -8634,8 +8634,12 @@ static void take_aarch32_exception(CPUARMState *env, int new_mode,
                                    uint32_t mask, uint32_t offset,
                                    uint32_t newpc)
 {
+    int new_el;
+
     /* Change the CPU state so as to actually take the exception. */
     switch_mode(env, new_mode);
+    new_el = arm_current_el(env);
+
     /*
      * For exceptions taken to AArch32 we must clear the SS bit in both
      * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
@@ -8648,7 +8652,7 @@ static void take_aarch32_exception(CPUARMState *env, int new_mode,
     env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
     /* Set new mode endianness */
     env->uncached_cpsr &= ~CPSR_E;
-    if (env->cp15.sctlr_el[arm_current_el(env)] & SCTLR_EE) {
+    if (env->cp15.sctlr_el[new_el] & SCTLR_EE) {
         env->uncached_cpsr |= CPSR_E;
     }
     /* J and IL must always be cleared for exception entry */
@@ -8659,6 +8663,14 @@ static void take_aarch32_exception(CPUARMState *env, int new_mode,
         env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0;
         env->elr_el[2] = env->regs[15];
     } else {
+        /* CPSR.PAN is preserved unless target is EL1 and SCTLR.SPAN == 0. */
+        if (cpu_isar_feature(aa64_pan, env_archcpu(env))) {
+            env->uncached_cpsr |=
+                (new_el == 1 &&
+                 (env->cp15.sctlr_el[1] & SCTLR_SPAN) == 0
+                 ? CPSR_PAN
+                 : env->spsr & CPSR_PAN);
+        }
         /*
          * this is a lie, as there was no c1_sys on V4T/V5, but who cares
          * and we should just guard the thumb mode on V4
@@ -8921,6 +8933,7 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
     unsigned int new_el = env->exception.target_el;
     target_ulong addr = env->cp15.vbar_el[new_el];
     unsigned int new_mode = aarch64_pstate_mode(new_el, true);
+    unsigned int old_mode;
     unsigned int cur_el = arm_current_el(env);
 
     /*
@@ -9006,20 +9019,43 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
     }
 
     if (is_a64(env)) {
-        env->banked_spsr[aarch64_banked_spsr_index(new_el)] = pstate_read(env);
+        old_mode = pstate_read(env);
         aarch64_save_sp(env, arm_current_el(env));
         env->elr_el[new_el] = env->pc;
     } else {
-        env->banked_spsr[aarch64_banked_spsr_index(new_el)] = cpsr_read(env);
+        old_mode = cpsr_read(env);
         env->elr_el[new_el] = env->regs[15];
 
         aarch64_sync_32_to_64(env);
 
         env->condexec_bits = 0;
     }
+    env->banked_spsr[aarch64_banked_spsr_index(new_el)] = old_mode;
+
     qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n",
                   env->elr_el[new_el]);
 
+    if (cpu_isar_feature(aa64_pan, cpu)) {
+        /* The value of PSTATE.PAN is normally preserved, except when ... */
+        new_mode |= old_mode & PSTATE_PAN;
+        switch (new_el) {
+        case 2:
+            /* ... the target is EL2 with HCR_EL2.{E2H,TGE} == '11' ...  */
+            if ((arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE))
+                != (HCR_E2H | HCR_TGE)) {
+                break;
+            }
+            /* fall through */
+        case 1:
+            /* ... the target is EL1 ... */
+            /* ... and SCTLR_ELx.SPAN == 0, then set to 1.  */
+            if ((env->cp15.sctlr_el[new_el] & SCTLR_SPAN) == 0) {
+                new_mode |= PSTATE_PAN;
+            }
+            break;
+        }
+    }
+
     pstate_write(env, PSTATE_DAIF | new_mode);
     env->aarch64 = 1;
     aarch64_restore_sp(env, new_el);
--
2.17.1


Reply | Threaded
Open this post in threaded view
|

[PATCH 10/11] target/arm: Implement ATS1E1 system registers

Richard Henderson-3
In reply to this post by Richard Henderson-3
This is a minor enhancement over ARMv8.1-PAN.
The *_PAN mmu_idx are used with the existing do_ats_write.

Signed-off-by: Richard Henderson <[hidden email]>
---
 target/arm/helper.c | 50 +++++++++++++++++++++++++++++++++++++++------
 1 file changed, 44 insertions(+), 6 deletions(-)

diff --git a/target/arm/helper.c b/target/arm/helper.c
index 043e44d73d..f1eab4fb28 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -3360,16 +3360,20 @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
 
     switch (ri->opc2 & 6) {
     case 0:
-        /* stage 1 current state PL1: ATS1CPR, ATS1CPW */
+        /* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */
         switch (el) {
         case 3:
             mmu_idx = ARMMMUIdx_SE3;
             break;
         case 2:
-            mmu_idx = ARMMMUIdx_Stage1_E1;
-            break;
+            g_assert(!secure);  /* TODO: ARMv8.4-SecEL2 */
+            /* fall through */
         case 1:
-            mmu_idx = secure ? ARMMMUIdx_SE1 : ARMMMUIdx_Stage1_E1;
+            if (ri->crm == 9 && (env->uncached_cpsr & CPSR_PAN)) {
+                mmu_idx = secure ? ARMMMUIdx_SE1_PAN : ARMMMUIdx_Stage1_E1_PAN;
+            } else {
+                mmu_idx = secure ? ARMMMUIdx_SE1 : ARMMMUIdx_Stage1_E1;
+            }
             break;
         default:
             g_assert_not_reached();
@@ -3438,8 +3442,12 @@ static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
     switch (ri->opc2 & 6) {
     case 0:
         switch (ri->opc1) {
-        case 0: /* AT S1E1R, AT S1E1W */
-            mmu_idx = secure ? ARMMMUIdx_SE1 : ARMMMUIdx_Stage1_E1;
+        case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */
+            if (ri->crm == 9 && (env->pstate & PSTATE_PAN)) {
+                mmu_idx = secure ? ARMMMUIdx_SE1_PAN : ARMMMUIdx_Stage1_E1_PAN;
+            } else {
+                mmu_idx = secure ? ARMMMUIdx_SE1 : ARMMMUIdx_Stage1_E1;
+            }
             break;
         case 4: /* AT S1E2R, AT S1E2W */
             mmu_idx = ARMMMUIdx_E2;
@@ -7426,6 +7434,36 @@ void register_cp_regs_for_features(ARMCPU *cpu)
         };
         define_arm_cp_regs(cpu, pan_reginfo);
     }
+#ifndef CONFIG_USER_ONLY
+    if (cpu_isar_feature(aa64_ats1e1, cpu)) {
+        static const ARMCPRegInfo ats1e1_reginfo[] = {
+            { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
+              .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
+              .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+              .writefn = ats_write64 },
+            { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
+              .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
+              .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+              .writefn = ats_write64 },
+            REGINFO_SENTINEL
+        };
+        define_arm_cp_regs(cpu, ats1e1_reginfo);
+    }
+    if (cpu_isar_feature(aa32_ats1e1, cpu)) {
+        static const ARMCPRegInfo ats1cp_reginfo[] = {
+            { .name = "ATS1CPRP",
+              .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
+              .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+              .writefn = ats_write },
+            { .name = "ATS1CPWP",
+              .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
+              .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+              .writefn = ats_write },
+            REGINFO_SENTINEL
+        };
+        define_arm_cp_regs(cpu, ats1cp_reginfo);
+    }
+#endif
 
     if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) {
         static const ARMCPRegInfo vhe_reginfo[] = {
--
2.17.1


Reply | Threaded
Open this post in threaded view
|

[PATCH 11/11] target/arm: Enable ARMv8.2-ATS1E1 in -cpu max

Richard Henderson-3
In reply to this post by Richard Henderson-3
This includes enablement of ARMv8.1-PAN.

Signed-off-by: Richard Henderson <[hidden email]>
---
 target/arm/cpu.c   | 4 ++++
 target/arm/cpu64.c | 5 +++++
 2 files changed, 9 insertions(+)

diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index f3360dbb98..3b0c466137 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -2640,6 +2640,10 @@ static void arm_max_initfn(Object *obj)
             t = FIELD_DP32(t, MVFR2, FPMISC, 4);   /* FP MaxNum */
             cpu->isar.mvfr2 = t;
 
+            t = cpu->id_mmfr3;
+            t = FIELD_DP32(t, ID_MMFR3, PAN, 2); /* ATS1E1 */
+            cpu->id_mmfr3 = t;
+
             t = cpu->id_mmfr4;
             t = FIELD_DP32(t, ID_MMFR4, HPDS, 1); /* AA32HPD */
             cpu->id_mmfr4 = t;
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
index 009411813f..9399253b4c 100644
--- a/target/arm/cpu64.c
+++ b/target/arm/cpu64.c
@@ -671,6 +671,7 @@ static void aarch64_max_initfn(Object *obj)
         t = FIELD_DP64(t, ID_AA64MMFR1, HPDS, 1); /* HPD */
         t = FIELD_DP64(t, ID_AA64MMFR1, LO, 1);
         t = FIELD_DP64(t, ID_AA64MMFR1, VH, 1);
+        t = FIELD_DP64(t, ID_AA64MMFR1, PAN, 2); /* ATS1E1 */
         cpu->isar.id_aa64mmfr1 = t;
 
         /* Replicate the same data to the 32-bit id registers.  */
@@ -691,6 +692,10 @@ static void aarch64_max_initfn(Object *obj)
         u = FIELD_DP32(u, ID_ISAR6, SPECRES, 1);
         cpu->isar.id_isar6 = u;
 
+        u = cpu->id_mmfr3;
+        u = FIELD_DP32(u, ID_MMFR3, PAN, 2); /* ATS1E1 */
+        cpu->id_mmfr3 = u;
+
         /*
          * FIXME: We do not yet support ARMv8.2-fp16 for AArch32 yet,
          * so do not set MVFR1.FPHP.  Strictly speaking this is not legal,
--
2.17.1


Reply | Threaded
Open this post in threaded view
|

Re: [PATCH 02/11] target/arm: Add arm_mmu_idx_is_stage1

Philippe Mathieu-Daudé-3
In reply to this post by Richard Henderson-3
On 12/3/19 11:53 PM, Richard Henderson wrote:
> Use a common predicate for querying stage1-ness.
>
> Signed-off-by: Richard Henderson <[hidden email]>

Reviewed-by: Philippe Mathieu-Daudé <[hidden email]>

> ---
>   target/arm/internals.h | 11 +++++++++++
>   target/arm/helper.c    |  8 +++-----
>   2 files changed, 14 insertions(+), 5 deletions(-)
>
> diff --git a/target/arm/internals.h b/target/arm/internals.h
> index 49dac2a677..850f204f14 100644
> --- a/target/arm/internals.h
> +++ b/target/arm/internals.h
> @@ -1034,6 +1034,17 @@ static inline ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
>   ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env);
>   #endif
>  
> +static inline bool arm_mmu_idx_is_stage1(ARMMMUIdx mmu_idx)
> +{
> +    switch (mmu_idx) {
> +    case ARMMMUIdx_Stage1_E0:
> +    case ARMMMUIdx_Stage1_E1:
> +        return true;
> +    default:
> +        return false;
> +    }
> +}
> +
>   /*
>    * Parameters of a given virtual address, as extracted from the
>    * translation control register (TCR) for a given regime.
> diff --git a/target/arm/helper.c b/target/arm/helper.c
> index f3785d5ad6..fdb86ea427 100644
> --- a/target/arm/helper.c
> +++ b/target/arm/helper.c
> @@ -3212,8 +3212,7 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
>           bool take_exc = false;
>  
>           if (fi.s1ptw && current_el == 1 && !arm_is_secure(env)
> -            && (mmu_idx == ARMMMUIdx_Stage1_E1
> -                || mmu_idx == ARMMMUIdx_Stage1_E0)) {
> +            && arm_mmu_idx_is_stage1(mmu_idx)) {
>               /*
>                * Synchronous stage 2 fault on an access made as part of the
>                * translation table walk for AT S1E0* or AT S1E1* insn
> @@ -9159,8 +9158,7 @@ static inline bool regime_translation_disabled(CPUARMState *env,
>           }
>       }
>  
> -    if ((env->cp15.hcr_el2 & HCR_DC) &&
> -        (mmu_idx == ARMMMUIdx_Stage1_E0 || mmu_idx == ARMMMUIdx_Stage1_E1)) {
> +    if ((env->cp15.hcr_el2 & HCR_DC) && arm_mmu_idx_is_stage1(mmu_idx)) {
>           /* HCR.DC means SCTLR_EL1.M behaves as 0 */
>           return true;
>       }
> @@ -9469,7 +9467,7 @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
>                                  hwaddr addr, MemTxAttrs txattrs,
>                                  ARMMMUFaultInfo *fi)
>   {
> -    if ((mmu_idx == ARMMMUIdx_Stage1_E0 || mmu_idx == ARMMMUIdx_Stage1_E1) &&
> +    if (arm_mmu_idx_is_stage1(mmu_idx) &&
>           !regime_translation_disabled(env, ARMMMUIdx_Stage2)) {
>           target_ulong s2size;
>           hwaddr s2pa;
>


Reply | Threaded
Open this post in threaded view
|

Re: [PATCH 01/11] cputlb: Handle NB_MMU_MODES > TARGET_PAGE_BITS_MIN

Peter Maydell-5
In reply to this post by Richard Henderson-3
On Tue, 3 Dec 2019 at 22:53, Richard Henderson
<[hidden email]> wrote:

>
> In target/arm we will shortly have "too many" mmu_idx.
> The current minimum barrier is caused by the way in which
> tlb_flush_page_by_mmuidx is coded.
>
> We can remove this limitation by allocating memory for
> consumption by the worker.  Let us assume that this is
> the unlikely case, as will be the case for the majority
> of targets which have so far satisfied the BUILD_BUG_ON,
> and only allocate memory when necessary.
>
> Signed-off-by: Richard Henderson <[hidden email]>

Reviewed-by: Peter Maydell <[hidden email]>

thanks
-- PMM

Reply | Threaded
Open this post in threaded view
|

Re: [PATCH 02/11] target/arm: Add arm_mmu_idx_is_stage1

Peter Maydell-5
In reply to this post by Richard Henderson-3
On Tue, 3 Dec 2019 at 22:53, Richard Henderson
<[hidden email]> wrote:

>
> Use a common predicate for querying stage1-ness.
>
> Signed-off-by: Richard Henderson <[hidden email]>
> ---
>  target/arm/internals.h | 11 +++++++++++
>  target/arm/helper.c    |  8 +++-----
>  2 files changed, 14 insertions(+), 5 deletions(-)
>
> diff --git a/target/arm/internals.h b/target/arm/internals.h
> index 49dac2a677..850f204f14 100644
> --- a/target/arm/internals.h
> +++ b/target/arm/internals.h
> @@ -1034,6 +1034,17 @@ static inline ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
>  ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env);
>  #endif
>
> +static inline bool arm_mmu_idx_is_stage1(ARMMMUIdx mmu_idx)
> +{
> +    switch (mmu_idx) {
> +    case ARMMMUIdx_Stage1_E0:
> +    case ARMMMUIdx_Stage1_E1:
> +        return true;
> +    default:
> +        return false;
> +    }
> +}

This definition of 'stage 1' doesn't match the architecture's,
which has a lot more than 2 things that are stage1; eg whatever
your renaming is calling S1E2, S1E3, etc are all stage 1.
(That's why those names have 'S1' in them: they're stage 1
translation stages.)

thanks
-- PMM

Reply | Threaded
Open this post in threaded view
|

Re: [PATCH 04/11] target/arm: Reduce CPSR_RESERVED

Peter Maydell-5
In reply to this post by Richard Henderson-3
On Tue, 3 Dec 2019 at 22:53, Richard Henderson
<[hidden email]> wrote:

>
> Since v8.0, the CPSR_RESERVED bits have been allocated.
> We are not yet implementing ARMv8.4-DIT; retain CPSR_RESERVED,
> since that overlaps with our current hack for AA32 single step.
>
> Signed-off-by: Richard Henderson <[hidden email]>
> ---
>  target/arm/cpu.h | 8 ++++++--
>  1 file changed, 6 insertions(+), 2 deletions(-)
>
> diff --git a/target/arm/cpu.h b/target/arm/cpu.h
> index 22c5706835..49dc436e5e 100644
> --- a/target/arm/cpu.h
> +++ b/target/arm/cpu.h
> @@ -1149,12 +1149,16 @@ void pmu_init(ARMCPU *cpu);
>  #define CPSR_IT_2_7 (0xfc00U)
>  #define CPSR_GE (0xfU << 16)
>  #define CPSR_IL (1U << 20)
> -/* Note that the RESERVED bits include bit 21, which is PSTATE_SS in
> +/*
> + * Note that the RESERVED bits include bit 21, which is PSTATE_SS in
>   * an AArch64 SPSR but RES0 in AArch32 SPSR and CPSR. In QEMU we use
>   * env->uncached_cpsr bit 21 to store PSTATE.SS when executing in AArch32,
>   * where it is live state but not accessible to the AArch32 code.
> + *
> + * TODO: With ARMv8.4-DIT, bit 21 is DIT in AArch32 (bit 24 for AArch64).
> + * We will need to move AArch32 SS somewhere else at that point.
>   */
> -#define CPSR_RESERVED (0x7U << 21)
> +#define CPSR_RESERVED (1U << 21)
>  #define CPSR_J (1U << 24)
>  #define CPSR_IT_0_1 (3U << 25)
>  #define CPSR_Q (1U << 27)

Should we retain the current behaviour of forbidding
guest writes to the CPSR (directly, or via exception
return and SPSR->CPSR writes) from messing with the
PAN and SSBS bits on CPUs which don't implement those
features ?

thanks
-- PMM

Reply | Threaded
Open this post in threaded view
|

Re: [PATCH 05/11] target/arm: Add isar_feature tests for PAN + ATS1E1

Peter Maydell-5
In reply to this post by Richard Henderson-3
On Tue, 3 Dec 2019 at 22:53, Richard Henderson
<[hidden email]> wrote:
>
> Include definitions for all of the bits in ID_MMFR3.
> We already have a definition for ID_AA64MMFR1.PAN.
>
> Signed-off-by: Richard Henderson <[hidden email]>

Reviewed-by: Peter Maydell <[hidden email]>

thanks
-- PMM

Reply | Threaded
Open this post in threaded view
|

Re: [PATCH 06/11] target/arm: Update MSR access for PAN

Peter Maydell-5
In reply to this post by Richard Henderson-3
On Tue, 3 Dec 2019 at 22:53, Richard Henderson
<[hidden email]> wrote:

>
> For aarch64, there's a dedicated msr (imm, reg) insn.
> For aarch32, this is done via msr to cpsr; and writes
> from el0 are ignored.
>
> Signed-off-by: Richard Henderson <[hidden email]>
> ---
>  target/arm/cpu.h           |  2 ++
>  target/arm/helper.c        | 22 ++++++++++++++++++++++
>  target/arm/translate-a64.c | 14 ++++++++++++++
>  target/arm/translate.c     |  4 ++++
>  4 files changed, 42 insertions(+)
>
> diff --git a/target/arm/cpu.h b/target/arm/cpu.h
> index 170dd5b124..f0e61bf34f 100644
> --- a/target/arm/cpu.h
> +++ b/target/arm/cpu.h
> @@ -1159,6 +1159,7 @@ void pmu_init(ARMCPU *cpu);
>   * We will need to move AArch32 SS somewhere else at that point.
>   */
>  #define CPSR_RESERVED (1U << 21)
> +#define CPSR_PAN (1U << 22)
>  #define CPSR_J (1U << 24)
>  #define CPSR_IT_0_1 (3U << 25)
>  #define CPSR_Q (1U << 27)
> @@ -1225,6 +1226,7 @@ void pmu_init(ARMCPU *cpu);
>  #define PSTATE_BTYPE (3U << 10)
>  #define PSTATE_IL (1U << 20)
>  #define PSTATE_SS (1U << 21)
> +#define PSTATE_PAN (1U << 22)
>  #define PSTATE_V (1U << 28)
>  #define PSTATE_C (1U << 29)
>  #define PSTATE_Z (1U << 30)
> diff --git a/target/arm/helper.c b/target/arm/helper.c
> index 4e3fe00316..512be5c644 100644
> --- a/target/arm/helper.c
> +++ b/target/arm/helper.c
> @@ -4112,6 +4112,17 @@ static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
>      env->daif = value & PSTATE_DAIF;
>  }
>
> +static uint64_t aa64_pan_read(CPUARMState *env, const ARMCPRegInfo *ri)
> +{
> +    return env->pstate & PSTATE_PAN;
> +}
> +
> +static void aa64_pan_write(CPUARMState *env, const ARMCPRegInfo *ri,
> +                           uint64_t value)
> +{
> +    env->pstate = (env->pstate & ~PSTATE_PAN) | (value & PSTATE_PAN);
> +}
> +
>  static CPAccessResult aa64_cacheop_access(CPUARMState *env,
>                                            const ARMCPRegInfo *ri,
>                                            bool isread)
> @@ -7405,6 +7416,17 @@ void register_cp_regs_for_features(ARMCPU *cpu)
>          define_arm_cp_regs(cpu, lor_reginfo);
>      }
>
> +    if (cpu_isar_feature(aa64_pan, cpu)) {
> +        static const ARMCPRegInfo pan_reginfo[] = {
> +            { .name = "PAN", .state = ARM_CP_STATE_AA64,
> +              .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 3,
> +              .type = ARM_CP_NO_RAW, .access = PL1_RW,
> +              .readfn = aa64_pan_read, .writefn = aa64_pan_write, },
> +            REGINFO_SENTINEL
> +        };

Same remarks about regdef as for UAO.

> +        define_arm_cp_regs(cpu, pan_reginfo);
> +    }
> +
>      if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) {
>          static const ARMCPRegInfo vhe_reginfo[] = {
>              { .name = "CONTEXTIDR_EL2", .state = ARM_CP_STATE_AA64,
> diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
> index b5c7bc2d76..7f5a68106b 100644
> --- a/target/arm/translate-a64.c
> +++ b/target/arm/translate-a64.c
> @@ -1601,6 +1601,20 @@ static void handle_msr_i(DisasContext *s, uint32_t insn,
>          s->base.is_jmp = DISAS_NEXT;
>          break;
>
> +    case 0x04: /* PAN */
> +        if (!dc_isar_feature(aa64_pan, s) || s->current_el == 0) {
> +            goto do_unallocated;
> +        }
> +        if (crm & 1) {
> +            set_pstate_bits(PSTATE_PAN);
> +        } else {
> +            clear_pstate_bits(PSTATE_PAN);
> +        }
> +        t1 = tcg_const_i32(s->current_el);
> +        gen_helper_rebuild_hflags_a64(cpu_env, t1);
> +        tcg_temp_free_i32(t1);
> +        break;

and same question about whether we need to break the TB here.

> +
>      case 0x05: /* SPSel */
>          if (s->current_el == 0) {
>              goto do_unallocated;
> diff --git a/target/arm/translate.c b/target/arm/translate.c
> index 47a374b53d..98e6072dd4 100644
> --- a/target/arm/translate.c
> +++ b/target/arm/translate.c
> @@ -2785,6 +2785,10 @@ static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
>          tcg_gen_or_i32(tmp, tmp, t0);
>          store_cpu_field(tmp, spsr);
>      } else {
> +        /* Data writes to CPSR.PAN using an MSR insn at EL0 are ignored.  */
> +        if (IS_USER(s)) {
> +            mask &= ~CPSR_PAN;
> +        }

I think we should also ignore the write if the PAN feature
isn't present (see remark on earlier patch).

>          gen_set_cpsr(t0, mask);
>      }
>      tcg_temp_free_i32(t0);
> --
> 2.17.1



thanks
-- PMM

Reply | Threaded
Open this post in threaded view
|

Re: [PATCH 07/11] target/arm: Update arm_mmu_idx_el for PAN

Peter Maydell-5
In reply to this post by Richard Henderson-3
On Tue, 3 Dec 2019 at 22:53, Richard Henderson
<[hidden email]> wrote:

>
> Examine the PAN bit for EL1, EL2, and Secure EL1 to
> determine if it applies.
>
> Signed-off-by: Richard Henderson <[hidden email]>
> ---
>  target/arm/helper.c | 9 +++++++++
>  1 file changed, 9 insertions(+)
>
> diff --git a/target/arm/helper.c b/target/arm/helper.c
> index 512be5c644..6c65dd799e 100644
> --- a/target/arm/helper.c
> +++ b/target/arm/helper.c
> @@ -11770,13 +11770,22 @@ ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el)
>          return ARMMMUIdx_EL10_0;
>      case 1:
>          if (arm_is_secure_below_el3(env)) {
> +            if (env->pstate & PSTATE_PAN) {
> +                return ARMMMUIdx_SE1_PAN;
> +            }
>              return ARMMMUIdx_SE1;
>          }
> +        if (env->pstate & PSTATE_PAN) {
> +            return ARMMMUIdx_EL10_1_PAN;
> +        }
>          return ARMMMUIdx_EL10_1;
>      case 2:
>          /* TODO: ARMv8.4-SecEL2 */
>          /* Note that TGE does not apply at EL2.  */
>          if ((env->cp15.hcr_el2 & HCR_E2H) && arm_el_is_aa64(env, 2)) {
> +            if (env->pstate & PSTATE_PAN) {
> +                return ARMMMUIdx_EL20_2_PAN;
> +            }
>              return ARMMMUIdx_EL20_2;
>          }
>          return ARMMMUIdx_E2;
> --

Reviewed-by: Peter Maydell <[hidden email]>

thanks
-- PMM

Reply | Threaded
Open this post in threaded view
|

Re: [PATCH 08/11] target/arm: Enforce PAN semantics in get_S1prot

Peter Maydell-5
In reply to this post by Richard Henderson-3
On Tue, 3 Dec 2019 at 22:53, Richard Henderson
<[hidden email]> wrote:

>
> If we have a PAN-enforcing mmu_idx, set prot == 0 if user_rw != 0.
>
> Signed-off-by: Richard Henderson <[hidden email]>
> ---
>  target/arm/internals.h | 13 +++++++++++++
>  target/arm/helper.c    |  3 +++
>  2 files changed, 16 insertions(+)
>
> diff --git a/target/arm/internals.h b/target/arm/internals.h
> index 2408953031..ab3b436379 100644
> --- a/target/arm/internals.h
> +++ b/target/arm/internals.h
> @@ -893,6 +893,19 @@ static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx)
>      }
>  }
>
> +static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx)
> +{
> +    switch (mmu_idx) {
> +    case ARMMMUIdx_Stage1_E1_PAN:
> +    case ARMMMUIdx_EL10_1_PAN:
> +    case ARMMMUIdx_EL20_2_PAN:
> +    case ARMMMUIdx_SE1_PAN:
> +        return true;
> +    default:
> +        return false;
> +    }
> +}
> +
>  /* Return the FSR value for a debug exception (watchpoint, hardware
>   * breakpoint or BKPT insn) targeting the specified exception level.
>   */
> diff --git a/target/arm/helper.c b/target/arm/helper.c
> index 6c65dd799e..a1dbafb9b2 100644
> --- a/target/arm/helper.c
> +++ b/target/arm/helper.c
> @@ -9444,6 +9444,9 @@ static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
>      if (is_user) {
>          prot_rw = user_rw;
>      } else {
> +        if (user_rw && regime_is_pan(env, mmu_idx)) {
> +            return 0;
> +        }
>          prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
>      }
>
> --
> 2.17.1
>
Reviewed-by: Peter Maydell <[hidden email]>

thanks
-- PMM

12