Skip to content
This repository has been archived by the owner on Apr 18, 2024. It is now read-only.

Commit

Permalink
Merge tag 'v4.14.133' into mptcp_v0.94
Browse files Browse the repository at this point in the history
This is the 4.14.133 stable release

Signed-off-by: Matthieu Baerts <[email protected]>
  • Loading branch information
matttbe committed Jul 12, 2019
2 parents 7501eda + aea8526 commit cf0fc2a
Show file tree
Hide file tree
Showing 268 changed files with 1,780 additions and 742 deletions.
3 changes: 1 addition & 2 deletions Documentation/robust-futexes.txt
Original file line number Diff line number Diff line change
Expand Up @@ -218,5 +218,4 @@ All other architectures should build just fine too - but they won't have
the new syscalls yet.

Architectures need to implement the new futex_atomic_cmpxchg_inatomic()
inline function before writing up the syscalls (that function returns
-ENOSYS right now).
inline function before writing up the syscalls.
4 changes: 2 additions & 2 deletions Makefile
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 14
SUBLEVEL = 127
SUBLEVEL = 133
EXTRAVERSION =
NAME = Petit Gorille

Expand Down Expand Up @@ -650,6 +650,7 @@ KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,)
KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation)
KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow)
KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context)
KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
KBUILD_CFLAGS += $(call cc-disable-warning, attribute-alias)

ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
Expand Down Expand Up @@ -716,7 +717,6 @@ ifeq ($(cc-name),clang)
KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
# Quiet clang warning: comparison of unsigned expression < 0 is always false
KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
# CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the
Expand Down
4 changes: 4 additions & 0 deletions arch/arc/boot/dts/hsdk.dts
Original file line number Diff line number Diff line change
Expand Up @@ -163,12 +163,16 @@
interrupt-names = "macirq";
phy-mode = "rgmii";
snps,pbl = <32>;
snps,multicast-filter-bins = <256>;
clocks = <&gmacclk>;
clock-names = "stmmaceth";
phy-handle = <&phy0>;
resets = <&cgu_rst HSDK_ETH_RESET>;
reset-names = "stmmaceth";

tx-fifo-depth = <4096>;
rx-fifo-depth = <4096>;

mdio {
#address-cells = <1>;
#size-cells = <0>;
Expand Down
14 changes: 10 additions & 4 deletions arch/arc/include/asm/cmpxchg.h
Original file line number Diff line number Diff line change
Expand Up @@ -92,8 +92,11 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)

#endif /* CONFIG_ARC_HAS_LLSC */

#define cmpxchg(ptr, o, n) ((typeof(*(ptr)))__cmpxchg((ptr), \
(unsigned long)(o), (unsigned long)(n)))
#define cmpxchg(ptr, o, n) ({ \
(typeof(*(ptr)))__cmpxchg((ptr), \
(unsigned long)(o), \
(unsigned long)(n)); \
})

/*
* atomic_cmpxchg is same as cmpxchg
Expand Down Expand Up @@ -198,8 +201,11 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
return __xchg_bad_pointer();
}

#define xchg(ptr, with) ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), \
sizeof(*(ptr))))
#define xchg(ptr, with) ({ \
(typeof(*(ptr)))__xchg((unsigned long)(with), \
(ptr), \
sizeof(*(ptr))); \
})

#endif /* CONFIG_ARC_PLAT_EZNPS */

Expand Down
8 changes: 8 additions & 0 deletions arch/arc/kernel/traps.c
Original file line number Diff line number Diff line change
Expand Up @@ -155,3 +155,11 @@ void do_insterror_or_kprobe(unsigned long address, struct pt_regs *regs)

insterror_is_error(address, regs);
}

/*
* abort() call generated by older gcc for __builtin_trap()
*/
void abort(void)
{
__asm__ __volatile__("trap_s 5\n");
}
13 changes: 8 additions & 5 deletions arch/arc/mm/tlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -902,19 +902,22 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
struct pt_regs *regs)
{
struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
unsigned int pd0[mmu->ways];
unsigned long flags;
int set;
int set, n_ways = mmu->ways;

n_ways = min(n_ways, 4);
BUG_ON(mmu->ways > 4);

local_irq_save(flags);

/* loop thru all sets of TLB */
for (set = 0; set < mmu->sets; set++) {

int is_valid, way;
unsigned int pd0[4];

/* read out all the ways of current set */
for (way = 0, is_valid = 0; way < mmu->ways; way++) {
for (way = 0, is_valid = 0; way < n_ways; way++) {
write_aux_reg(ARC_REG_TLBINDEX,
SET_WAY_TO_IDX(mmu, set, way));
write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead);
Expand All @@ -928,14 +931,14 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
continue;

/* Scan the set for duplicate ways: needs a nested loop */
for (way = 0; way < mmu->ways - 1; way++) {
for (way = 0; way < n_ways - 1; way++) {

int n;

if (!pd0[way])
continue;

for (n = way + 1; n < mmu->ways; n++) {
for (n = way + 1; n < n_ways; n++) {
if (pd0[way] != pd0[n])
continue;

Expand Down
1 change: 1 addition & 0 deletions arch/arm/boot/dts/am57xx-idk-common.dtsi
Original file line number Diff line number Diff line change
Expand Up @@ -405,6 +405,7 @@
vqmmc-supply = <&ldo1_reg>;
bus-width = <4>;
cd-gpios = <&gpio6 27 GPIO_ACTIVE_LOW>; /* gpio 219 */
no-1-8-v;
};

&mmc2 {
Expand Down
3 changes: 2 additions & 1 deletion arch/arm/mach-imx/cpuidle-imx6sx.c
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@

#include "common.h"
#include "cpuidle.h"
#include "hardware.h"

static int imx6sx_idle_finish(unsigned long val)
{
Expand Down Expand Up @@ -108,7 +109,7 @@ int __init imx6sx_cpuidle_init(void)
* except for power up sw2iso which need to be
* larger than LDO ramp up time.
*/
imx_gpc_set_arm_power_up_timing(0xf, 1);
imx_gpc_set_arm_power_up_timing(cpu_is_imx6sx() ? 0xf : 0x2, 1);
imx_gpc_set_arm_power_down_timing(1, 1);

return cpuidle_register(&imx6sx_cpuidle_driver, NULL);
Expand Down
4 changes: 3 additions & 1 deletion arch/arm64/include/asm/futex.h
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,9 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr,
: "memory");
uaccess_disable();

*uval = val;
if (!ret)
*uval = val;

return ret;
}

Expand Down
8 changes: 8 additions & 0 deletions arch/arm64/include/asm/insn.h
Original file line number Diff line number Diff line change
Expand Up @@ -271,6 +271,7 @@ __AARCH64_INSN_FUNCS(adrp, 0x9F000000, 0x90000000)
__AARCH64_INSN_FUNCS(prfm, 0x3FC00000, 0x39800000)
__AARCH64_INSN_FUNCS(prfm_lit, 0xFF000000, 0xD8000000)
__AARCH64_INSN_FUNCS(str_reg, 0x3FE0EC00, 0x38206800)
__AARCH64_INSN_FUNCS(ldadd, 0x3F20FC00, 0x38200000)
__AARCH64_INSN_FUNCS(ldr_reg, 0x3FE0EC00, 0x38606800)
__AARCH64_INSN_FUNCS(ldr_lit, 0xBF000000, 0x18000000)
__AARCH64_INSN_FUNCS(ldrsw_lit, 0xFF000000, 0x98000000)
Expand Down Expand Up @@ -383,6 +384,13 @@ u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
enum aarch64_insn_register state,
enum aarch64_insn_size_type size,
enum aarch64_insn_ldst_type type);
u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
enum aarch64_insn_register address,
enum aarch64_insn_register value,
enum aarch64_insn_size_type size);
u32 aarch64_insn_gen_stadd(enum aarch64_insn_register address,
enum aarch64_insn_register value,
enum aarch64_insn_size_type size);
u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
enum aarch64_insn_register src,
int imm, enum aarch64_insn_variant variant,
Expand Down
40 changes: 40 additions & 0 deletions arch/arm64/kernel/insn.c
Original file line number Diff line number Diff line change
Expand Up @@ -793,6 +793,46 @@ u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
state);
}

u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
enum aarch64_insn_register address,
enum aarch64_insn_register value,
enum aarch64_insn_size_type size)
{
u32 insn = aarch64_insn_get_ldadd_value();

switch (size) {
case AARCH64_INSN_SIZE_32:
case AARCH64_INSN_SIZE_64:
break;
default:
pr_err("%s: unimplemented size encoding %d\n", __func__, size);
return AARCH64_BREAK_FAULT;
}

insn = aarch64_insn_encode_ldst_size(size, insn);

insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
result);

insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
address);

return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
value);
}

u32 aarch64_insn_gen_stadd(enum aarch64_insn_register address,
enum aarch64_insn_register value,
enum aarch64_insn_size_type size)
{
/*
* STADD is simply encoded as an alias for LDADD with XZR as
* the destination register.
*/
return aarch64_insn_gen_ldadd(AARCH64_INSN_REG_ZR, address,
value, size);
}

static u32 aarch64_insn_encode_prfm_imm(enum aarch64_insn_prfm_type type,
enum aarch64_insn_prfm_target target,
enum aarch64_insn_prfm_policy policy,
Expand Down
8 changes: 6 additions & 2 deletions arch/arm64/kernel/module.c
Original file line number Diff line number Diff line change
Expand Up @@ -32,16 +32,20 @@

void *module_alloc(unsigned long size)
{
u64 module_alloc_end = module_alloc_base + MODULES_VSIZE;
gfp_t gfp_mask = GFP_KERNEL;
void *p;

/* Silence the initial allocation */
if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
gfp_mask |= __GFP_NOWARN;

if (IS_ENABLED(CONFIG_KASAN))
/* don't exceed the static module region - see below */
module_alloc_end = MODULES_END;

p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
module_alloc_base + MODULES_VSIZE,
gfp_mask, PAGE_KERNEL_EXEC, 0,
module_alloc_end, gfp_mask, PAGE_KERNEL_EXEC, 0,
NUMA_NO_NODE, __builtin_return_address(0));

if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
Expand Down
11 changes: 8 additions & 3 deletions arch/arm64/mm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -899,13 +899,18 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys)

int __init arch_ioremap_pud_supported(void)
{
/* only 4k granule supports level 1 block mappings */
return IS_ENABLED(CONFIG_ARM64_4K_PAGES);
/*
* Only 4k granule supports level 1 block mappings.
* SW table walks can't handle removal of intermediate entries.
*/
return IS_ENABLED(CONFIG_ARM64_4K_PAGES) &&
!IS_ENABLED(CONFIG_ARM64_PTDUMP_DEBUGFS);
}

int __init arch_ioremap_pmd_supported(void)
{
return 1;
/* See arch_ioremap_pud_supported() */
return !IS_ENABLED(CONFIG_ARM64_PTDUMP_DEBUGFS);
}

int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
Expand Down
4 changes: 4 additions & 0 deletions arch/arm64/net/bpf_jit.h
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,10 @@
#define A64_STXR(sf, Rt, Rn, Rs) \
A64_LSX(sf, Rt, Rn, Rs, STORE_EX)

/* LSE atomics */
#define A64_STADD(sf, Rn, Rs) \
aarch64_insn_gen_stadd(Rn, Rs, A64_SIZE(sf))

/* Add/subtract (immediate) */
#define A64_ADDSUB_IMM(sf, Rd, Rn, imm12, type) \
aarch64_insn_gen_add_sub_imm(Rd, Rn, imm12, \
Expand Down
28 changes: 19 additions & 9 deletions arch/arm64/net/bpf_jit_comp.c
Original file line number Diff line number Diff line change
Expand Up @@ -330,7 +330,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
const int i = insn - ctx->prog->insnsi;
const bool is64 = BPF_CLASS(code) == BPF_ALU64;
const bool isdw = BPF_SIZE(code) == BPF_DW;
u8 jmp_cond;
u8 jmp_cond, reg;
s32 jmp_offset;

#define check_imm(bits, imm) do { \
Expand Down Expand Up @@ -706,18 +706,28 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
break;
}
break;

/* STX XADD: lock *(u32 *)(dst + off) += src */
case BPF_STX | BPF_XADD | BPF_W:
/* STX XADD: lock *(u64 *)(dst + off) += src */
case BPF_STX | BPF_XADD | BPF_DW:
emit_a64_mov_i(1, tmp, off, ctx);
emit(A64_ADD(1, tmp, tmp, dst), ctx);
emit(A64_LDXR(isdw, tmp2, tmp), ctx);
emit(A64_ADD(isdw, tmp2, tmp2, src), ctx);
emit(A64_STXR(isdw, tmp2, tmp, tmp3), ctx);
jmp_offset = -3;
check_imm19(jmp_offset);
emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
if (!off) {
reg = dst;
} else {
emit_a64_mov_i(1, tmp, off, ctx);
emit(A64_ADD(1, tmp, tmp, dst), ctx);
reg = tmp;
}
if (cpus_have_cap(ARM64_HAS_LSE_ATOMICS)) {
emit(A64_STADD(isdw, reg, src), ctx);
} else {
emit(A64_LDXR(isdw, tmp2, reg), ctx);
emit(A64_ADD(isdw, tmp2, tmp2, src), ctx);
emit(A64_STXR(isdw, tmp2, reg, tmp3), ctx);
jmp_offset = -3;
check_imm19(jmp_offset);
emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
}
break;

/* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */
Expand Down
1 change: 1 addition & 0 deletions arch/ia64/mm/numa.c
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@ paddr_to_nid(unsigned long paddr)

return (i < num_node_memblks) ? node_memblk[i].nid : (num_node_memblks ? -1 : 0);
}
EXPORT_SYMBOL(paddr_to_nid);

#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_NUMA)
/*
Expand Down
2 changes: 0 additions & 2 deletions arch/mips/include/asm/netlogic/xlr/fmn.h
Original file line number Diff line number Diff line change
Expand Up @@ -301,8 +301,6 @@ static inline int nlm_fmn_send(unsigned int size, unsigned int code,
for (i = 0; i < 8; i++) {
nlm_msgsnd(dest);
status = nlm_read_c2_status0();
if ((status & 0x2) == 1)
pr_info("Send pending fail!\n");
if ((status & 0x4) == 0)
return 0;
}
Expand Down
3 changes: 0 additions & 3 deletions arch/mips/kernel/uprobes.c
Original file line number Diff line number Diff line change
Expand Up @@ -112,9 +112,6 @@ int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs)
*/
aup->resume_epc = regs->cp0_epc + 4;
if (insn_has_delay_slot((union mips_instruction) aup->insn[0])) {
unsigned long epc;

epc = regs->cp0_epc;
__compute_return_epc_for_insn(regs,
(union mips_instruction) aup->insn[0]);
aup->resume_epc = regs->cp0_epc;
Expand Down
2 changes: 1 addition & 1 deletion arch/mips/mm/mmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,7 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)

int __virt_addr_valid(const volatile void *kaddr)
{
unsigned long vaddr = (unsigned long)vaddr;
unsigned long vaddr = (unsigned long)kaddr;

if ((vaddr < PAGE_OFFSET) || (vaddr >= MAP_BASE))
return 0;
Expand Down
Loading

0 comments on commit cf0fc2a

Please sign in to comment.