Commit c5538db2 authored by Lorenzo "Palinuro" Faletra's avatar Lorenzo "Palinuro" Faletra
Browse files

Import Upstream version 5.7.10

parent 675a03b4
Pipeline #742 failed with stages
......@@ -98,6 +98,7 @@ &watchdog0 {
};
&qspi {
status = "okay";
flash@0 {
#address-cells = <1>;
#size-cells = <1>;
......
......@@ -73,11 +73,11 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
".pushsection .altinstructions,\"a\"\n" \
ALTINSTR_ENTRY(feature) \
".popsection\n" \
".pushsection .altinstr_replacement, \"a\"\n" \
".subsection 1\n" \
"663:\n\t" \
newinstr "\n" \
"664:\n\t" \
".popsection\n\t" \
".previous\n\t" \
".org . - (664b-663b) + (662b-661b)\n\t" \
".org . - (662b-661b) + (664b-663b)\n" \
".endif\n"
......@@ -117,9 +117,9 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
662: .pushsection .altinstructions, "a"
altinstruction_entry 661b, 663f, \cap, 662b-661b, 664f-663f
.popsection
.pushsection .altinstr_replacement, "ax"
.subsection 1
663: \insn2
664: .popsection
664: .previous
.org . - (664b-663b) + (662b-661b)
.org . - (662b-661b) + (664b-663b)
.endif
......@@ -160,7 +160,7 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
.pushsection .altinstructions, "a"
altinstruction_entry 663f, 661f, \cap, 664f-663f, 662f-661f
.popsection
.pushsection .altinstr_replacement, "ax"
.subsection 1
.align 2 /* So GAS knows label 661 is suitably aligned */
661:
.endm
......@@ -179,9 +179,9 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
.macro alternative_else
662:
.if .Lasm_alt_mode==0
.pushsection .altinstr_replacement, "ax"
.subsection 1
.else
.popsection
.previous
.endif
663:
.endm
......@@ -192,7 +192,7 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
.macro alternative_endif
664:
.if .Lasm_alt_mode==0
.popsection
.previous
.endif
.org . - (664b-663b) + (662b-661b)
.org . - (662b-661b) + (664b-663b)
......
......@@ -109,7 +109,7 @@ static inline u32 gic_read_pmr(void)
return read_sysreg_s(SYS_ICC_PMR_EL1);
}
static inline void gic_write_pmr(u32 val)
static __always_inline void gic_write_pmr(u32 val)
{
write_sysreg_s(val, SYS_ICC_PMR_EL1);
}
......
......@@ -58,6 +58,7 @@ struct arch_timer_erratum_workaround {
u64 (*read_cntvct_el0)(void);
int (*set_next_event_phys)(unsigned long, struct clock_event_device *);
int (*set_next_event_virt)(unsigned long, struct clock_event_device *);
bool disable_compat_vdso;
};
DECLARE_PER_CPU(const struct arch_timer_erratum_workaround *,
......
......@@ -668,7 +668,7 @@ static inline bool system_supports_generic_auth(void)
cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH);
}
static inline bool system_uses_irq_prio_masking(void)
static __always_inline bool system_uses_irq_prio_masking(void)
{
return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
cpus_have_const_cap(ARM64_HAS_IRQ_PRIO_MASKING);
......
......@@ -86,6 +86,7 @@
#define QCOM_CPU_PART_FALKOR 0xC00
#define QCOM_CPU_PART_KRYO 0x200
#define QCOM_CPU_PART_KRYO_3XX_SILVER 0x803
#define QCOM_CPU_PART_KRYO_4XX_GOLD 0x804
#define QCOM_CPU_PART_KRYO_4XX_SILVER 0x805
#define NVIDIA_CPU_PART_DENVER 0x003
......@@ -114,6 +115,7 @@
#define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR)
#define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO)
#define MIDR_QCOM_KRYO_3XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_3XX_SILVER)
#define MIDR_QCOM_KRYO_4XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_GOLD)
#define MIDR_QCOM_KRYO_4XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_SILVER)
#define MIDR_NVIDIA_DENVER MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_DENVER)
#define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL)
......
......@@ -109,6 +109,8 @@ void disable_debug_monitors(enum dbg_active_el el);
void user_rewind_single_step(struct task_struct *task);
void user_fastforward_single_step(struct task_struct *task);
void user_regs_reset_single_step(struct user_pt_regs *regs,
struct task_struct *task);
void kernel_enable_single_step(struct pt_regs *regs);
void kernel_disable_single_step(void);
......
......@@ -56,7 +56,7 @@ extern bool arm64_use_ng_mappings;
#define PAGE_HYP __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN)
#define PAGE_HYP_EXEC __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY)
#define PAGE_HYP_RO __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN)
#define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
#define PAGE_HYP_DEVICE __pgprot(_PROT_DEFAULT | PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_HYP | PTE_HYP_XN)
#define PAGE_S2_MEMATTR(attr) \
({ \
......
......@@ -34,6 +34,10 @@ static inline long syscall_get_error(struct task_struct *task,
struct pt_regs *regs)
{
unsigned long error = regs->regs[0];
if (is_compat_thread(task_thread_info(task)))
error = sign_extend64(error, 31);
return IS_ERR_VALUE(error) ? error : 0;
}
......@@ -47,7 +51,13 @@ static inline void syscall_set_return_value(struct task_struct *task,
struct pt_regs *regs,
int error, long val)
{
regs->regs[0] = (long) error ? error : val;
if (error)
val = error;
if (is_compat_thread(task_thread_info(task)))
val = lower_32_bits(val);
regs->regs[0] = val;
}
#define SYSCALL_MAX_ARGS 6
......
......@@ -89,6 +89,7 @@ void arch_release_task_struct(struct task_struct *tsk);
#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_FSCHECK (1 << TIF_FSCHECK)
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
#define _TIF_32BIT (1 << TIF_32BIT)
#define _TIF_SVE (1 << TIF_SVE)
......
......@@ -2,7 +2,10 @@
#ifndef __ASM_VDSOCLOCKSOURCE_H
#define __ASM_VDSOCLOCKSOURCE_H
#define VDSO_ARCH_CLOCKMODES \
VDSO_CLOCKMODE_ARCHTIMER
#define VDSO_ARCH_CLOCKMODES \
/* vdso clocksource for both 32 and 64bit tasks */ \
VDSO_CLOCKMODE_ARCHTIMER, \
/* vdso clocksource for 64bit tasks only */ \
VDSO_CLOCKMODE_ARCHTIMER_NOCOMPAT
#endif
......@@ -111,7 +111,7 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode)
* update. Return something. Core will do another round and then
* see the mode change and fallback to the syscall.
*/
if (clock_mode == VDSO_CLOCKMODE_NONE)
if (clock_mode != VDSO_CLOCKMODE_ARCHTIMER)
return 0;
/*
......@@ -152,6 +152,12 @@ static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
return ret;
}
static inline bool vdso_clocksource_ok(const struct vdso_data *vd)
{
return vd->clock_mode == VDSO_CLOCKMODE_ARCHTIMER;
}
#define vdso_clocksource_ok vdso_clocksource_ok
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_VDSO_GETTIMEOFDAY_H */
......@@ -43,20 +43,8 @@ bool alternative_is_applied(u16 cpufeature)
*/
static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
{
unsigned long replptr;
if (kernel_text_address(pc))
return true;
replptr = (unsigned long)ALT_REPL_PTR(alt);
if (pc >= replptr && pc <= (replptr + alt->alt_len))
return false;
/*
* Branching into *another* alternate sequence is doomed, and
* we're not even trying to fix it up.
*/
BUG();
unsigned long replptr = (unsigned long)ALT_REPL_PTR(alt);
return !(pc >= replptr && pc <= (replptr + alt->alt_len));
}
#define align_down(x, a) ((unsigned long)(x) & ~(((unsigned long)(a)) - 1))
......
......@@ -460,6 +460,8 @@ static const struct midr_range arm64_ssb_cpus[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
{},
};
......@@ -470,12 +472,7 @@ static bool
has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
int scope)
{
u32 midr = read_cpuid_id();
/* Cortex-A76 r0p0 - r3p1 */
struct midr_range range = MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1);
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
return is_midr_in_range(midr, &range) && is_kernel_in_hyp_mode();
return is_affected_midr_range_list(entry, scope) && is_kernel_in_hyp_mode();
}
#endif
......@@ -726,6 +723,8 @@ static const struct midr_range erratum_1418040_list[] = {
MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
/* Neoverse-N1 r0p0 to r3p1 */
MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1),
/* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
{},
};
#endif
......@@ -766,11 +765,23 @@ static const struct midr_range erratum_speculative_at_vhe_list[] = {
#ifdef CONFIG_ARM64_ERRATUM_1530923
/* Cortex A55 r0p0 to r2p0 */
MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 2, 0),
/* Kryo4xx Silver (rdpe => r1p0) */
MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe),
#endif
{},
};
#endif
#ifdef CONFIG_ARM64_ERRATUM_1463225
static const struct midr_range erratum_1463225[] = {
/* Cortex-A76 r0p0 - r3p1 */
MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
/* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
{},
};
#endif
const struct arm64_cpu_capabilities arm64_errata[] = {
#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
{
......@@ -910,6 +921,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
.capability = ARM64_WORKAROUND_1463225,
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
.matches = has_cortex_a76_erratum_1463225,
.midr_range_list = erratum_1463225,
},
#endif
#ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
......
......@@ -1059,6 +1059,8 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL),
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
{ /* sentinel */ }
};
char const *str = "kpti command line option";
......@@ -1175,6 +1177,8 @@ static bool cpu_has_broken_dbm(void)
static const struct midr_range cpus[] = {
#ifdef CONFIG_ARM64_ERRATUM_1024718
MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 1, 0), // A55 r0p0 -r1p0
/* Kryo4xx Silver (rdpe => r1p0) */
MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe),
#endif
{},
};
......
......@@ -141,17 +141,20 @@ postcore_initcall(debug_monitors_init);
/*
* Single step API and exception handling.
*/
static void set_regs_spsr_ss(struct pt_regs *regs)
static void set_user_regs_spsr_ss(struct user_pt_regs *regs)
{
regs->pstate |= DBG_SPSR_SS;
}
NOKPROBE_SYMBOL(set_regs_spsr_ss);
NOKPROBE_SYMBOL(set_user_regs_spsr_ss);
static void clear_regs_spsr_ss(struct pt_regs *regs)
static void clear_user_regs_spsr_ss(struct user_pt_regs *regs)
{
regs->pstate &= ~DBG_SPSR_SS;
}
NOKPROBE_SYMBOL(clear_regs_spsr_ss);
NOKPROBE_SYMBOL(clear_user_regs_spsr_ss);
#define set_regs_spsr_ss(r) set_user_regs_spsr_ss(&(r)->user_regs)
#define clear_regs_spsr_ss(r) clear_user_regs_spsr_ss(&(r)->user_regs)
static DEFINE_SPINLOCK(debug_hook_lock);
static LIST_HEAD(user_step_hook);
......@@ -404,6 +407,15 @@ void user_fastforward_single_step(struct task_struct *task)
clear_regs_spsr_ss(task_pt_regs(task));
}
void user_regs_reset_single_step(struct user_pt_regs *regs,
struct task_struct *task)
{
if (test_tsk_thread_flag(task, TIF_SINGLESTEP))
set_user_regs_spsr_ss(regs);
else
clear_user_regs_spsr_ss(regs);
}
/* Kernel API */
void kernel_enable_single_step(struct pt_regs *regs)
{
......
......@@ -338,7 +338,7 @@ static unsigned int find_supported_vector_length(unsigned int vl)
return sve_vl_from_vq(__bit_to_vq(bit));
}
#ifdef CONFIG_SYSCTL
#if defined(CONFIG_ARM64_SVE) && defined(CONFIG_SYSCTL)
static int sve_proc_do_default_vl(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
......@@ -384,9 +384,9 @@ static int __init sve_sysctl_init(void)
return 0;
}
#else /* ! CONFIG_SYSCTL */
#else /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */
static int __init sve_sysctl_init(void) { return 0; }
#endif /* ! CONFIG_SYSCTL */
#endif /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */
#define ZREG(sve_state, vq, n) ((char *)(sve_state) + \
(SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET))
......
......@@ -252,7 +252,7 @@ static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr)
if (!kgdb_single_step)
return DBG_HOOK_ERROR;
kgdb_handle_exception(1, SIGTRAP, 0, regs);
kgdb_handle_exception(0, SIGTRAP, 0, regs);
return DBG_HOOK_HANDLED;
}
NOKPROBE_SYMBOL(kgdb_step_brk_fn);
......
......@@ -15,15 +15,34 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
return 0;
/*
* Compat (i.e. 32 bit) mode:
* - PC has been set in the pt_regs struct in kernel_entry,
* - Handle SP and LR here.
* Our handling of compat tasks (PERF_SAMPLE_REGS_ABI_32) is weird, but
* we're stuck with it for ABI compatability reasons.
*
* For a 32-bit consumer inspecting a 32-bit task, then it will look at
* the first 16 registers (see arch/arm/include/uapi/asm/perf_regs.h).
* These correspond directly to a prefix of the registers saved in our
* 'struct pt_regs', with the exception of the PC, so we copy that down
* (x15 corresponds to SP_hyp in the architecture).
*
* So far, so good.
*
* The oddity arises when a 64-bit consumer looks at a 32-bit task and
* asks for registers beyond PERF_REG_ARM_MAX. In this case, we return
* SP_usr, LR_usr and PC in the positions where the AArch64 SP, LR and
* PC registers would normally live. The initial idea was to allow a
* 64-bit unwinder to unwind a 32-bit task and, although it's not clear
* how well that works in practice, somebody might be relying on it.
*
* At the time we make a sample, we don't know whether the consumer is
* 32-bit or 64-bit, so we have to cater for both possibilities.
*/
if (compat_user_mode(regs)) {
if ((u32)idx == PERF_REG_ARM64_SP)
return regs->compat_sp;
if ((u32)idx == PERF_REG_ARM64_LR)
return regs->compat_lr;
if (idx == 15)
return regs->pc;
}
if ((u32)idx == PERF_REG_ARM64_SP)
......
......@@ -1819,12 +1819,23 @@ static void tracehook_report_syscall(struct pt_regs *regs,
saved_reg = regs->regs[regno];
regs->regs[regno] = dir;
if (dir == PTRACE_SYSCALL_EXIT)
if (dir == PTRACE_SYSCALL_ENTER) {
if (tracehook_report_syscall_entry(regs))
forget_syscall(regs);
regs->regs[regno] = saved_reg;
} else if (!test_thread_flag(TIF_SINGLESTEP)) {
tracehook_report_syscall_exit(regs, 0);
else if (tracehook_report_syscall_entry(regs))
forget_syscall(regs);
regs->regs[regno] = saved_reg;
} else {
regs->regs[regno] = saved_reg;
regs->regs[regno] = saved_reg;
/*
* Signal a pseudo-step exception since we are stepping but
* tracer modifications to the registers may have rewound the
* state machine.
*/
tracehook_report_syscall_exit(regs, 1);
}
}
int syscall_trace_enter(struct pt_regs *regs)
......@@ -1852,12 +1863,14 @@ int syscall_trace_enter(struct pt_regs *regs)
void syscall_trace_exit(struct pt_regs *regs)
{
unsigned long flags = READ_ONCE(current_thread_info()->flags);
audit_syscall_exit(regs);
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
if (flags & _TIF_SYSCALL_TRACEPOINT)
trace_sys_exit(regs, regs_return_value(regs));
if (test_thread_flag(TIF_SYSCALL_TRACE))
if (flags & (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP))
tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
rseq_syscall(regs);
......@@ -1935,8 +1948,8 @@ static int valid_native_regs(struct user_pt_regs *regs)
*/
int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task)
{
if (!test_tsk_thread_flag(task, TIF_SINGLESTEP))
regs->pstate &= ~DBG_SPSR_SS;
/* https://lore.kernel.org/lkml/20191118131525.GA4180@willie-the-truck */
user_regs_reset_single_step(regs, task);
if (is_compat_thread(task_thread_info(task)))
return valid_compat_regs(regs);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment