diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h index 3b4e17c23d9a..0b96464ff033 100644 --- a/arch/powerpc/include/asm/interrupt.h +++ b/arch/powerpc/include/asm/interrupt.h @@ -341,10 +341,8 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte * embedded. With CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK enabled there * are chances where percpu allocation can come from vmalloc area. */ -#ifdef CONFIG_PPC64 if (IS_ENABLED(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK) && !is_embed_first_chunk) return; -#endif /* Otherwise, it should be safe to call it */ nmi_enter(); @@ -361,10 +359,8 @@ static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct inter // no nmi_exit for a pseries hash guest taking a real mode exception } else if (IS_ENABLED(CONFIG_KASAN)) { // no nmi_exit for KASAN in real mode -#ifdef CONFIG_PPC64 } else if (IS_ENABLED(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK) && !is_embed_first_chunk) { // no nmi_exit if percpu first chunk is not embedded -#endif } else { nmi_exit(); } diff --git a/arch/powerpc/include/asm/percpu.h b/arch/powerpc/include/asm/percpu.h index b46fef84f685..d1fdf25897dc 100644 --- a/arch/powerpc/include/asm/percpu.h +++ b/arch/powerpc/include/asm/percpu.h @@ -12,14 +12,18 @@ #define __my_cpu_offset local_paca->data_offset +#endif /* CONFIG_SMP */ +#endif /* __powerpc64__ */ + +#ifdef CONFIG_PPC64 #include DECLARE_STATIC_KEY_FALSE(__percpu_embed_first_chunk); #define is_embed_first_chunk \ (static_key_enabled(&__percpu_embed_first_chunk.key)) - -#endif /* CONFIG_SMP */ -#endif /* __powerpc64__ */ +#else +#define is_embed_first_chunk (true) +#endif /* CONFIG_PPC64 */ #include