Commit 3c9e35af authored by Lorenzo "Palinuro" Faletra's avatar Lorenzo "Palinuro" Faletra
Browse files

Import Upstream version 4.13.13

parent 238a495f
VERSION = 4
PATCHLEVEL = 13
SUBLEVEL = 10
SUBLEVEL = 13
EXTRAVERSION =
NAME = Fearless Coyote
......
......@@ -178,9 +178,9 @@ L2: cache-controller@8000 {
reg = <0x8000 0x1000>;
cache-unified;
cache-level = <2>;
arm,double-linefill-incr = <1>;
arm,double-linefill-incr = <0>;
arm,double-linefill-wrap = <0>;
arm,double-linefill = <1>;
arm,double-linefill = <0>;
prefetch-data = <1>;
};
......
......@@ -143,9 +143,9 @@ L2: cache-controller@8000 {
reg = <0x8000 0x1000>;
cache-unified;
cache-level = <2>;
arm,double-linefill-incr = <1>;
arm,double-linefill-incr = <0>;
arm,double-linefill-wrap = <0>;
arm,double-linefill = <1>;
arm,double-linefill = <0>;
prefetch-data = <1>;
};
......
......@@ -111,9 +111,9 @@ L2: cache-controller@8000 {
reg = <0x8000 0x1000>;
cache-unified;
cache-level = <2>;
arm,double-linefill-incr = <1>;
arm,double-linefill-incr = <0>;
arm,double-linefill-wrap = <0>;
arm,double-linefill = <1>;
arm,double-linefill = <0>;
prefetch-data = <1>;
};
......
......@@ -20,7 +20,6 @@ generic-y += simd.h
generic-y += sizes.h
generic-y += timex.h
generic-y += trace_clock.h
generic-y += unaligned.h
generated-y += mach-types.h
generated-y += unistd-nr.h
#ifndef __ASM_ARM_UNALIGNED_H
#define __ASM_ARM_UNALIGNED_H
/*
* We generally want to set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS on ARMv6+,
* but we don't want to use linux/unaligned/access_ok.h since that can lead
* to traps on unaligned stm/ldm or strd/ldrd.
*/
#include <asm/byteorder.h>
#if defined(__LITTLE_ENDIAN)
# include <linux/unaligned/le_struct.h>
# include <linux/unaligned/be_byteshift.h>
# include <linux/unaligned/generic.h>
# define get_unaligned __get_unaligned_le
# define put_unaligned __put_unaligned_le
#elif defined(__BIG_ENDIAN)
# include <linux/unaligned/be_struct.h>
# include <linux/unaligned/le_byteshift.h>
# include <linux/unaligned/generic.h>
# define get_unaligned __get_unaligned_be
# define put_unaligned __put_unaligned_be
#else
# error need to define endianess
#endif
#endif /* __ASM_ARM_UNALIGNED_H */
......@@ -154,30 +154,26 @@ static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
set_fs(fs);
}
static void dump_instr(const char *lvl, struct pt_regs *regs)
static void __dump_instr(const char *lvl, struct pt_regs *regs)
{
unsigned long addr = instruction_pointer(regs);
const int thumb = thumb_mode(regs);
const int width = thumb ? 4 : 8;
mm_segment_t fs;
char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
int i;
/*
* We need to switch to kernel mode so that we can use __get_user
* to safely read from kernel space. Note that we now dump the
* code first, just in case the backtrace kills us.
* Note that we now dump the code first, just in case the backtrace
* kills us.
*/
fs = get_fs();
set_fs(KERNEL_DS);
for (i = -4; i < 1 + !!thumb; i++) {
unsigned int val, bad;
if (thumb)
bad = __get_user(val, &((u16 *)addr)[i]);
bad = get_user(val, &((u16 *)addr)[i]);
else
bad = __get_user(val, &((u32 *)addr)[i]);
bad = get_user(val, &((u32 *)addr)[i]);
if (!bad)
p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ",
......@@ -188,8 +184,20 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
}
}
printk("%sCode: %s\n", lvl, str);
}
set_fs(fs);
static void dump_instr(const char *lvl, struct pt_regs *regs)
{
mm_segment_t fs;
if (!user_mode(regs)) {
fs = get_fs();
set_fs(KERNEL_DS);
__dump_instr(lvl, regs);
set_fs(fs);
} else {
__dump_instr(lvl, regs);
}
}
#ifdef CONFIG_ARM_UNWIND
......
......@@ -227,7 +227,7 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
u32 return_offset = (is_thumb) ? 2 : 4;
kvm_update_psr(vcpu, UND_MODE);
*vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) - return_offset;
*vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
/* Branch to exception vector */
*vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
......@@ -239,10 +239,8 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
*/
static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr)
{
unsigned long cpsr = *vcpu_cpsr(vcpu);
bool is_thumb = (cpsr & PSR_T_BIT);
u32 vect_offset;
u32 return_offset = (is_thumb) ? 4 : 0;
u32 return_offset = (is_pabt) ? 4 : 8;
bool is_lpae;
kvm_update_psr(vcpu, ABT_MODE);
......
......@@ -2,7 +2,7 @@
# Makefile for Kernel-based Virtual Machine module, HYP part
#
ccflags-y += -fno-stack-protector
ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING
KVM=../../../../virt/kvm
......
......@@ -116,7 +116,7 @@ static void __dump_instr(const char *lvl, struct pt_regs *regs)
for (i = -4; i < 1; i++) {
unsigned int val, bad;
bad = __get_user(val, &((u32 *)addr)[i]);
bad = get_user(val, &((u32 *)addr)[i]);
if (!bad)
p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
......
......@@ -2,7 +2,7 @@
# Makefile for Kernel-based Virtual Machine module, HYP part
#
ccflags-y += -fno-stack-protector
ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING
KVM=../../../../virt/kvm
......
......@@ -33,12 +33,26 @@
#define LOWER_EL_AArch64_VECTOR 0x400
#define LOWER_EL_AArch32_VECTOR 0x600
/*
* Table taken from ARMv8 ARM DDI0487B-B, table G1-10.
*/
static const u8 return_offsets[8][2] = {
[0] = { 0, 0 }, /* Reset, unused */
[1] = { 4, 2 }, /* Undefined */
[2] = { 0, 0 }, /* SVC, unused */
[3] = { 4, 4 }, /* Prefetch abort */
[4] = { 8, 8 }, /* Data abort */
[5] = { 0, 0 }, /* HVC, unused */
[6] = { 4, 4 }, /* IRQ, unused */
[7] = { 4, 4 }, /* FIQ, unused */
};
static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
{
unsigned long cpsr;
unsigned long new_spsr_value = *vcpu_cpsr(vcpu);
bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT);
u32 return_offset = (is_thumb) ? 4 : 0;
u32 return_offset = return_offsets[vect_offset >> 2][is_thumb];
u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
cpsr = mode | COMPAT_PSR_I_BIT;
......
......@@ -575,6 +575,7 @@ static int __init ar7_register_uarts(void)
uart_port.type = PORT_AR7;
uart_port.uartclk = clk_get_rate(bus_clk) / 2;
uart_port.iotype = UPIO_MEM32;
uart_port.flags = UPF_FIXED_TYPE;
uart_port.regshift = 2;
uart_port.line = 0;
......@@ -653,6 +654,10 @@ static int __init ar7_register_devices(void)
u32 val;
int res;
res = ar7_gpio_init();
if (res)
pr_warn("unable to register gpios: %d\n", res);
res = ar7_register_uarts();
if (res)
pr_err("unable to setup uart(s): %d\n", res);
......
......@@ -246,8 +246,6 @@ void __init prom_init(void)
ar7_init_cmdline(fw_arg0, (char **)fw_arg1);
ar7_init_env((struct env_var *)fw_arg2);
console_config();
ar7_gpio_init();
}
#define PORT(offset) (KSEG1ADDR(AR7_REGS_UART0 + (offset * 4)))
......
......@@ -240,8 +240,8 @@ BUILD_CM_Cx_R_(tcid_8_priority, 0x80)
#define CM_GCR_BASE_GCRBASE_MSK (_ULCAST_(0x1ffff) << 15)
#define CM_GCR_BASE_CMDEFTGT_SHF 0
#define CM_GCR_BASE_CMDEFTGT_MSK (_ULCAST_(0x3) << 0)
#define CM_GCR_BASE_CMDEFTGT_DISABLED 0
#define CM_GCR_BASE_CMDEFTGT_MEM 1
#define CM_GCR_BASE_CMDEFTGT_MEM 0
#define CM_GCR_BASE_CMDEFTGT_RESERVED 1
#define CM_GCR_BASE_CMDEFTGT_IOCU0 2
#define CM_GCR_BASE_CMDEFTGT_IOCU1 3
......
......@@ -589,11 +589,11 @@ void __init bmips_cpu_setup(void)
/* Flush and enable RAC */
cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG);
__raw_writel(cfg | 0x100, BMIPS_RAC_CONFIG);
__raw_writel(cfg | 0x100, cbr + BMIPS_RAC_CONFIG);
__raw_readl(cbr + BMIPS_RAC_CONFIG);
cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG);
__raw_writel(cfg | 0xf, BMIPS_RAC_CONFIG);
__raw_writel(cfg | 0xf, cbr + BMIPS_RAC_CONFIG);
__raw_readl(cbr + BMIPS_RAC_CONFIG);
cfg = __raw_readl(cbr + BMIPS_RAC_ADDRESS_RANGE);
......
......@@ -19,7 +19,7 @@
#undef DEBUG
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/smp.h>
#include <linux/cpumask.h>
#include <linux/interrupt.h>
......
......@@ -66,6 +66,7 @@ EXPORT_SYMBOL(cpu_sibling_map);
cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_core_map);
static DECLARE_COMPLETION(cpu_starting);
static DECLARE_COMPLETION(cpu_running);
/*
......@@ -376,6 +377,12 @@ asmlinkage void start_secondary(void)
cpumask_set_cpu(cpu, &cpu_coherent_mask);
notify_cpu_starting(cpu);
/* Notify boot CPU that we're starting & ready to sync counters */
complete(&cpu_starting);
synchronise_count_slave(cpu);
/* The CPU is running and counters synchronised, now mark it online */
set_cpu_online(cpu, true);
set_cpu_sibling_map(cpu);
......@@ -383,8 +390,11 @@ asmlinkage void start_secondary(void)
calculate_cpu_foreign_map();
/*
* Notify boot CPU that we're up & online and it can safely return
* from __cpu_up
*/
complete(&cpu_running);
synchronise_count_slave(cpu);
/*
* irq will be enabled in ->smp_finish(), enabling it too early
......@@ -443,17 +453,17 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
mp_ops->boot_secondary(cpu, tidle);
/*
* We must check for timeout here, as the CPU will not be marked
* online until the counters are synchronised.
*/
if (!wait_for_completion_timeout(&cpu_running,
/* Wait for CPU to start and be ready to sync counters */
if (!wait_for_completion_timeout(&cpu_starting,
msecs_to_jiffies(1000))) {
pr_crit("CPU%u: failed to start\n", cpu);
return -EIO;
}
synchronise_count_master(cpu);
/* Wait for CPU to finish startup & mark itself online before return */
wait_for_completion(&cpu_running);
return 0;
}
......
......@@ -80,7 +80,7 @@ static const struct insn const insn_table_MM[insn_invalid] = {
[insn_jr] = {M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RS},
[insn_lb] = {M(mm_lb32_op, 0, 0, 0, 0, 0), RT | RS | SIMM},
[insn_ld] = {0, 0},
[insn_lh] = {M(mm_lh32_op, 0, 0, 0, 0, 0), RS | RS | SIMM},
[insn_lh] = {M(mm_lh32_op, 0, 0, 0, 0, 0), RT | RS | SIMM},
[insn_ll] = {M(mm_pool32c_op, 0, 0, (mm_ll_func << 1), 0, 0), RS | RT | SIMM},
[insn_lld] = {0, 0},
[insn_lui] = {M(mm_pool32i_op, mm_lui_op, 0, 0, 0, 0), RS | SIMM},
......
......@@ -1485,7 +1485,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
}
src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
if (src < 0)
return dst;
return src;
if (BPF_MODE(insn->code) == BPF_XADD) {
switch (BPF_SIZE(insn->code)) {
case BPF_W:
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment