Commit cc02f499 authored by Lorenzo "Palinuro" Faletra's avatar Lorenzo "Palinuro" Faletra
Browse files

Import Upstream version 5.4.13

parent 086e5551
......@@ -62,6 +62,7 @@ config PARISC
select HAVE_FTRACE_MCOUNT_RECORD if HAVE_DYNAMIC_FTRACE
select HAVE_KPROBES_ON_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_COPY_THREAD_TLS
help
The PA-RISC microprocessor is designed by Hewlett-Packard and used
......
......@@ -44,8 +44,14 @@ __xchg(unsigned long x, __volatile__ void *ptr, int size)
** if (((unsigned long)p & 0xf) == 0)
** return __ldcw(p);
*/
#define xchg(ptr, x) \
((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
#define xchg(ptr, x) \
({ \
__typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) _x_ = (x); \
__ret = (__typeof__(*(ptr))) \
__xchg((unsigned long)_x_, (ptr), sizeof(*(ptr))); \
__ret; \
})
/* bug catcher for when unsupported size is used - won't link */
extern void __cmpxchg_called_with_bad_pointer(void);
......
......@@ -2,8 +2,6 @@
#ifndef _ASM_PARISC_KEXEC_H
#define _ASM_PARISC_KEXEC_H
#ifdef CONFIG_KEXEC
/* Maximum physical address we can use pages from */
#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
/* Maximum address we can reach in physical address mode */
......@@ -32,6 +30,4 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_KEXEC */
#endif /* _ASM_PARISC_KEXEC_H */
......@@ -37,5 +37,5 @@ obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_KEXEC) += kexec.o relocate_kernel.o
obj-$(CONFIG_KEXEC_CORE) += kexec.o relocate_kernel.o
obj-$(CONFIG_KEXEC_FILE) += kexec_file.o
......@@ -810,7 +810,7 @@ EXPORT_SYMBOL(device_to_hwpath);
static void walk_native_bus(unsigned long io_io_low, unsigned long io_io_high,
struct device *parent);
static void walk_lower_bus(struct parisc_device *dev)
static void __init walk_lower_bus(struct parisc_device *dev)
{
unsigned long io_io_low, io_io_high;
......
......@@ -208,8 +208,8 @@ arch_initcall(parisc_idle_init);
* Copy architecture-specific thread state
*/
int
copy_thread(unsigned long clone_flags, unsigned long usp,
unsigned long kthread_arg, struct task_struct *p)
copy_thread_tls(unsigned long clone_flags, unsigned long usp,
unsigned long kthread_arg, struct task_struct *p, unsigned long tls)
{
struct pt_regs *cregs = &(p->thread.regs);
void *stack = task_stack_page(p);
......@@ -254,9 +254,9 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
cregs->ksp = (unsigned long)stack + THREAD_SZ_ALGN + FRAME_SIZE;
cregs->kpc = (unsigned long) &child_return;
/* Setup thread TLS area from the 4th parameter in clone */
/* Setup thread TLS area */
if (clone_flags & CLONE_SETTLS)
cregs->cr27 = cregs->gr[23];
cregs->cr27 = tls;
}
return 0;
......
......@@ -15,6 +15,7 @@
*
* (the type definitions are in asm/spinlock_types.h)
*/
#include <linux/jump_label.h>
#include <linux/irqflags.h>
#ifdef CONFIG_PPC64
#include <asm/paca.h>
......@@ -36,10 +37,12 @@
#endif
#ifdef CONFIG_PPC_PSERIES
DECLARE_STATIC_KEY_FALSE(shared_processor);
#define vcpu_is_preempted vcpu_is_preempted
static inline bool vcpu_is_preempted(int cpu)
{
if (!firmware_has_feature(FW_FEATURE_SPLPAR))
if (!static_branch_unlikely(&shared_processor))
return false;
return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1);
}
......
......@@ -104,6 +104,27 @@ int __weak remove_section_mapping(unsigned long start, unsigned long end)
return -ENODEV;
}
#define FLUSH_CHUNK_SIZE SZ_1G
/**
* flush_dcache_range_chunked(): Write any modified data cache blocks out to
* memory and invalidate them, in chunks of up to FLUSH_CHUNK_SIZE
* Does not invalidate the corresponding instruction cache blocks.
*
* @start: the start address
* @stop: the stop address (exclusive)
* @chunk: the max size of the chunks
*/
static void flush_dcache_range_chunked(unsigned long start, unsigned long stop,
unsigned long chunk)
{
unsigned long i;
for (i = start; i < stop; i += chunk) {
flush_dcache_range(i, min(stop, i + chunk));
cond_resched();
}
}
int __ref arch_add_memory(int nid, u64 start, u64 size,
struct mhp_restrictions *restrictions)
{
......@@ -120,7 +141,8 @@ int __ref arch_add_memory(int nid, u64 start, u64 size,
start, start + size, rc);
return -EFAULT;
}
flush_dcache_range(start, start + size);
flush_dcache_range_chunked(start, start + size, FLUSH_CHUNK_SIZE);
return __add_pages(nid, start_pfn, nr_pages, restrictions);
}
......@@ -130,14 +152,14 @@ void __ref arch_remove_memory(int nid, u64 start, u64 size,
{
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
struct page *page = pfn_to_page(start_pfn) + vmem_altmap_offset(altmap);
int ret;
__remove_pages(page_zone(page), start_pfn, nr_pages, altmap);
__remove_pages(start_pfn, nr_pages, altmap);
/* Remove htab bolted mappings for this section of memory */
start = (unsigned long)__va(start);
flush_dcache_range(start, start + size);
flush_dcache_range_chunked(start, start + size, FLUSH_CHUNK_SIZE);
ret = remove_section_mapping(start, start + size);
WARN_ON_ONCE(ret);
......@@ -260,6 +282,14 @@ void __init mem_init(void)
BUILD_BUG_ON(MMU_PAGE_COUNT > 16);
#ifdef CONFIG_SWIOTLB
/*
* Some platforms (e.g. 85xx) limit DMA-able memory way below
* 4G. We force memblock to bottom-up mode to ensure that the
* memory allocated in swiotlb_init() is DMA-able.
* As it's the last memblock allocation, no need to reset it
* back to to-down.
*/
memblock_set_bottom_up(true);
swiotlb_init(0);
#endif
......
......@@ -50,7 +50,7 @@ static void slice_print_mask(const char *label, const struct slice_mask *mask) {
#endif
static inline bool slice_addr_is_low(unsigned long addr)
static inline notrace bool slice_addr_is_low(unsigned long addr)
{
u64 tmp = (u64)addr;
......@@ -659,7 +659,7 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp,
mm_ctx_user_psize(&current->mm->context), 1);
}
unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
unsigned int notrace get_slice_psize(struct mm_struct *mm, unsigned long addr)
{
unsigned char *psizes;
int index, mask_index;
......
......@@ -945,6 +945,23 @@ void __init pnv_pci_init(void)
if (!firmware_has_feature(FW_FEATURE_OPAL))
return;
#ifdef CONFIG_PCIEPORTBUS
/*
* On PowerNV PCIe devices are (currently) managed in cooperation
* with firmware. This isn't *strictly* required, but there's enough
* assumptions baked into both firmware and the platform code that
* it's unwise to allow the portbus services to be used.
*
* We need to fix this eventually, but for now set this flag to disable
* the portbus driver. The AER service isn't required since that AER
* events are handled via EEH. The pciehp hotplug driver can't work
* without kernel changes (and portbus binding breaks pnv_php). The
* other services also require some thinking about how we're going
* to integrate them.
*/
pcie_ports_disabled = true;
#endif
/* Look for IODA IO-Hubs. */
for_each_compatible_node(np, NULL, "ibm,ioda-hub") {
pnv_pci_init_ioda_hub(np);
......
......@@ -74,6 +74,9 @@
#include "pseries.h"
#include "../../../../drivers/pci/pci.h"
DEFINE_STATIC_KEY_FALSE(shared_processor);
EXPORT_SYMBOL_GPL(shared_processor);
int CMO_PrPSP = -1;
int CMO_SecPSP = -1;
unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K);
......@@ -758,6 +761,10 @@ static void __init pSeries_setup_arch(void)
if (firmware_has_feature(FW_FEATURE_LPAR)) {
vpa_init(boot_cpuid);
if (lppaca_shared_proc(get_lppaca()))
static_branch_enable(&shared_processor);
ppc_md.power_save = pseries_lpar_idle;
ppc_md.enable_pmcs = pseries_lpar_enable_pmcs;
#ifdef CONFIG_PCI_IOV
......
......@@ -61,6 +61,7 @@ config RISCV
select SPARSEMEM_STATIC if 32BIT
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
select HAVE_ARCH_MMAP_RND_BITS
select HAVE_COPY_THREAD_TLS
config ARCH_MMAP_RND_BITS_MIN
default 18 if 64BIT
......
......@@ -142,7 +142,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
*/
old = *parent;
if (function_graph_enter(old, self_addr, frame_pointer, parent))
if (!function_graph_enter(old, self_addr, frame_pointer, parent))
*parent = return_hooker;
}
......
......@@ -99,8 +99,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
return 0;
}
int copy_thread(unsigned long clone_flags, unsigned long usp,
unsigned long arg, struct task_struct *p)
int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
unsigned long arg, struct task_struct *p, unsigned long tls)
{
struct pt_regs *childregs = task_pt_regs(p);
......@@ -120,7 +120,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
if (usp) /* User fork */
childregs->sp = usp;
if (clone_flags & CLONE_SETTLS)
childregs->tp = childregs->a5;
childregs->tp = tls;
childregs->a0 = 0; /* Return value of fork() */
p->thread.ra = (unsigned long)ret_from_fork;
}
......
......@@ -14,6 +14,7 @@ void flush_icache_all(void)
{
sbi_remote_fence_i(NULL);
}
EXPORT_SYMBOL(flush_icache_all);
/*
* Performs an icache flush for the given MM context. RISC-V has no direct
......
......@@ -631,14 +631,14 @@ static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx)
return -1;
emit(rv_bgeu(RV_REG_A2, RV_REG_T1, off >> 1), ctx);
/* if (--TCC < 0)
/* if (TCC-- < 0)
* goto out;
*/
emit(rv_addi(RV_REG_T1, tcc, -1), ctx);
off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2;
if (is_13b_check(off, insn))
return -1;
emit(rv_blt(RV_REG_T1, RV_REG_ZERO, off >> 1), ctx);
emit(rv_blt(tcc, RV_REG_ZERO, off >> 1), ctx);
/* prog = array->ptrs[index];
* if (!prog)
......
......@@ -1313,18 +1313,28 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
*/
if (flush_all && done)
break;
/* If an event overflow happened, discard samples by
* processing any remaining sample-data-blocks.
*/
if (event_overflow)
flush_all = 1;
}
/* Account sample overflows in the event hardware structure */
if (sampl_overflow)
OVERFLOW_REG(hwc) = DIV_ROUND_UP(OVERFLOW_REG(hwc) +
sampl_overflow, 1 + num_sdb);
/* Perf_event_overflow() and perf_event_account_interrupt() limit
* the interrupt rate to an upper limit. Roughly 1000 samples per
* task tick.
* Hitting this limit results in a large number
* of throttled REF_REPORT_THROTTLE entries and the samples
* are dropped.
* Slightly increase the interval to avoid hitting this limit.
*/
if (event_overflow) {
SAMPL_RATE(hwc) += DIV_ROUND_UP(SAMPL_RATE(hwc), 10);
debug_sprintf_event(sfdbg, 1, "%s: rate adjustment %ld\n",
__func__,
DIV_ROUND_UP(SAMPL_RATE(hwc), 10));
}
if (sampl_overflow || event_overflow)
debug_sprintf_event(sfdbg, 4, "hw_perf_event_update: "
"overflow stats: sample=%llu event=%llu\n",
......
......@@ -727,39 +727,67 @@ static void __ref smp_get_core_info(struct sclp_core_info *info, int early)
static int smp_add_present_cpu(int cpu);
static int __smp_rescan_cpus(struct sclp_core_info *info, int sysfs_add)
static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail,
bool configured, bool early)
{
struct pcpu *pcpu;
cpumask_t avail;
int cpu, nr, i, j;
int cpu, nr, i;
u16 address;
nr = 0;
cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
cpu = cpumask_first(&avail);
for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) {
if (sclp.has_core_type && info->core[i].type != boot_core_type)
if (sclp.has_core_type && core->type != boot_core_type)
return nr;
cpu = cpumask_first(avail);
address = core->core_id << smp_cpu_mt_shift;
for (i = 0; (i <= smp_cpu_mtid) && (cpu < nr_cpu_ids); i++) {
if (pcpu_find_address(cpu_present_mask, address + i))
continue;
address = info->core[i].core_id << smp_cpu_mt_shift;
for (j = 0; j <= smp_cpu_mtid; j++) {
if (pcpu_find_address(cpu_present_mask, address + j))
continue;
pcpu = pcpu_devices + cpu;
pcpu->address = address + j;
pcpu->state =
(cpu >= info->configured*(smp_cpu_mtid + 1)) ?
CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
set_cpu_present(cpu, true);
if (sysfs_add && smp_add_present_cpu(cpu) != 0)
set_cpu_present(cpu, false);
else
nr++;
cpu = cpumask_next(cpu, &avail);
if (cpu >= nr_cpu_ids)
pcpu = pcpu_devices + cpu;
pcpu->address = address + i;
if (configured)
pcpu->state = CPU_STATE_CONFIGURED;
else
pcpu->state = CPU_STATE_STANDBY;
smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
set_cpu_present(cpu, true);
if (!early && smp_add_present_cpu(cpu) != 0)
set_cpu_present(cpu, false);
else
nr++;
cpumask_clear_cpu(cpu, avail);
cpu = cpumask_next(cpu, avail);
}
return nr;
}
static int __smp_rescan_cpus(struct sclp_core_info *info, bool early)
{
struct sclp_core_entry *core;
cpumask_t avail;
bool configured;
u16 core_id;
int nr, i;
nr = 0;
cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
/*
* Add IPL core first (which got logical CPU number 0) to make sure
* that all SMT threads get subsequent logical CPU numbers.
*/
if (early) {
core_id = pcpu_devices[0].address >> smp_cpu_mt_shift;
for (i = 0; i < info->configured; i++) {
core = &info->core[i];
if (core->core_id == core_id) {
nr += smp_add_core(core, &avail, true, early);
break;
}
}
}
for (i = 0; i < info->combined; i++) {
configured = i < info->configured;
nr += smp_add_core(&info->core[i], &avail, configured, early);
}
return nr;
}
......@@ -808,7 +836,7 @@ void __init smp_detect_cpus(void)
/* Add CPUs present at boot */
get_online_cpus();
__smp_rescan_cpus(info, 0);
__smp_rescan_cpus(info, true);
put_online_cpus();
memblock_free_early((unsigned long)info, sizeof(*info));
}
......@@ -1153,7 +1181,7 @@ int __ref smp_rescan_cpus(void)
smp_get_core_info(info, 0);
get_online_cpus();
mutex_lock(&smp_cpu_state_mutex);
nr = __smp_rescan_cpus(info, 1);
nr = __smp_rescan_cpus(info, false);
mutex_unlock(&smp_cpu_state_mutex);
put_online_cpus();
kfree(info);
......
......@@ -291,10 +291,8 @@ void arch_remove_memory(int nid, u64 start, u64 size,
{
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
struct zone *zone;
zone = page_zone(pfn_to_page(start_pfn));
__remove_pages(zone, start_pfn, nr_pages, altmap);
__remove_pages(start_pfn, nr_pages, altmap);
vmem_remove_mapping(start, size);
}
#endif /* CONFIG_MEMORY_HOTPLUG */
......@@ -15,8 +15,10 @@ CFLAGS_sha256.o := -D__DISABLE_EXPORTS
$(obj)/mem.o: $(srctree)/arch/s390/lib/mem.S FORCE
$(call if_changed_rule,as_o_S)
$(obj)/string.o: $(srctree)/arch/s390/lib/string.c FORCE
$(call if_changed_rule,cc_o_c)
KCOV_INSTRUMENT := n
GCOV_PROFILE := n
UBSAN_SANITIZE := n
KASAN_SANITIZE := n
KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes
KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment