Skip to content

Commit

Permalink
Changes representative of linux-4.18.0-193.13.2.el8_2.tar.xz
Browse files Browse the repository at this point in the history
  • Loading branch information
da-x committed Jul 13, 2020
1 parent 3ecb02e commit ffe78a3
Show file tree
Hide file tree
Showing 67 changed files with 2,127 additions and 714 deletions.
2 changes: 1 addition & 1 deletion Makefile.rhelver
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ RHEL_MINOR = 2
#
# Use this spot to avoid future merge conflicts.
# Do not trim this comment.
RHEL_RELEASE = 193.6.3
RHEL_RELEASE = 193.13.2

#
# Early y+1 numbering
Expand Down
9 changes: 9 additions & 0 deletions arch/powerpc/platforms/pseries/iommu.c
Original file line number Diff line number Diff line change
Expand Up @@ -943,6 +943,15 @@ static phys_addr_t ddw_memory_hotplug_max(void)
phys_addr_t max_addr = memory_hotplug_max();
struct device_node *memory;

/*
* The "ibm,pmemory" can appear anywhere in the address space.
* Assuming it is still backed by page structs, set the upper limit
* for the huge DMA window as MAX_PHYSMEM_BITS.
*/
if (of_find_node_by_type(NULL, "ibm,pmemory"))
return (sizeof(phys_addr_t) * 8 <= MAX_PHYSMEM_BITS) ?
(phys_addr_t) -1 : (1ULL << MAX_PHYSMEM_BITS);

for_each_node_by_type(memory, "memory") {
unsigned long start, size;
int ranges, n_mem_addr_cells, n_mem_size_cells, len;
Expand Down
2 changes: 1 addition & 1 deletion arch/s390/pci/pci_dma.c
Original file line number Diff line number Diff line change
Expand Up @@ -404,7 +404,7 @@ static void *s390_dma_alloc(struct device *dev, size_t size,
dma_addr_t map;

size = PAGE_ALIGN(size);
page = alloc_pages(flag, get_order(size));
page = alloc_pages(flag | __GFP_ZERO, get_order(size));
if (!page)
return NULL;

Expand Down
64 changes: 41 additions & 23 deletions arch/x86/boot/compressed/eboot.c
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@

/* -----------------------------------------------------------------------
*
* Copyright 2011 Intel Corporation; author Matt Fleming
Expand Down Expand Up @@ -814,38 +815,55 @@ static efi_status_t alloc_e820ext(u32 nr_desc, struct setup_data **e820ext,
return status;
}

static efi_status_t allocate_e820(struct boot_params *params,
struct setup_data **e820ext,
u32 *e820ext_size)
{
unsigned long map_size, desc_size, buff_size;
struct efi_boot_memmap boot_map;
efi_memory_desc_t *map;
efi_status_t status;
__u32 nr_desc;

boot_map.map = &map;
boot_map.map_size = &map_size;
boot_map.desc_size = &desc_size;
boot_map.desc_ver = NULL;
boot_map.key_ptr = NULL;
boot_map.buff_size = &buff_size;

status = efi_get_memory_map(sys_table, &boot_map);
if (status != EFI_SUCCESS)
return status;

nr_desc = buff_size / desc_size;

if (nr_desc > ARRAY_SIZE(params->e820_table)) {
u32 nr_e820ext = nr_desc - ARRAY_SIZE(params->e820_table);

status = alloc_e820ext(nr_e820ext, e820ext, e820ext_size);
if (status != EFI_SUCCESS)
return status;
}

return EFI_SUCCESS;
}

struct exit_boot_struct {
struct boot_params *boot_params;
struct efi_info *efi;
struct setup_data *e820ext;
__u32 e820ext_size;
bool is64;
};

static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
struct efi_boot_memmap *map,
void *priv)
{
static bool first = true;
const char *signature;
__u32 nr_desc;
efi_status_t status;
struct exit_boot_struct *p = priv;

if (first) {
nr_desc = *map->buff_size / *map->desc_size;
if (nr_desc > ARRAY_SIZE(p->boot_params->e820_table)) {
u32 nr_e820ext = nr_desc -
ARRAY_SIZE(p->boot_params->e820_table);

status = alloc_e820ext(nr_e820ext, &p->e820ext,
&p->e820ext_size);
if (status != EFI_SUCCESS)
return status;
}
first = false;
}

signature = p->is64 ? EFI64_LOADER_SIGNATURE : EFI32_LOADER_SIGNATURE;
memcpy(&p->efi->efi_loader_signature, signature, sizeof(__u32));

Expand All @@ -868,8 +886,8 @@ static efi_status_t exit_boot(struct boot_params *boot_params,
{
unsigned long map_sz, key, desc_size, buff_size;
efi_memory_desc_t *mem_map;
struct setup_data *e820ext;
__u32 e820ext_size;
struct setup_data *e820ext = NULL;
__u32 e820ext_size = 0;
efi_status_t status;
__u32 desc_version;
struct efi_boot_memmap map;
Expand All @@ -883,18 +901,18 @@ static efi_status_t exit_boot(struct boot_params *boot_params,
map.buff_size = &buff_size;
priv.boot_params = boot_params;
priv.efi = &boot_params->efi_info;
priv.e820ext = NULL;
priv.e820ext_size = 0;
priv.is64 = is64;

status = allocate_e820(boot_params, &e820ext, &e820ext_size);
if (status != EFI_SUCCESS)
return status;

/* Might as well exit boot services now */
status = efi_exit_boot_services(sys_table, handle, &map, &priv,
exit_boot_func);
if (status != EFI_SUCCESS)
return status;

e820ext = priv.e820ext;
e820ext_size = priv.e820ext_size;
/* Historic? */
boot_params->alt_mem_k = 32 * 1024;

Expand Down
4 changes: 2 additions & 2 deletions arch/x86/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -670,10 +670,10 @@ struct kvm_vcpu_arch {
bool pvclock_set_guest_stopped_request;

struct {
u8 preempted;
u64 msr_val;
u64 last_steal;
struct gfn_to_hva_cache stime;
struct kvm_steal_time steal;
struct gfn_to_pfn_cache cache;
} st;

u64 tsc_offset;
Expand Down
1 change: 1 addition & 0 deletions arch/x86/include/asm/nospec-branch.h
Original file line number Diff line number Diff line change
Expand Up @@ -235,6 +235,7 @@ enum spectre_v2_mitigation {
enum spectre_v2_user_mitigation {
SPECTRE_V2_USER_NONE,
SPECTRE_V2_USER_STRICT,
SPECTRE_V2_USER_STRICT_PREFERRED,
SPECTRE_V2_USER_PRCTL,
SPECTRE_V2_USER_SECCOMP,
};
Expand Down
1 change: 1 addition & 0 deletions arch/x86/include/asm/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -251,6 +251,7 @@ static inline int pmd_large(pmd_t pte)
}

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/* NOTE: when predicate huge page, consider also pmd_devmap, or use pmd_large */
static inline int pmd_trans_huge(pmd_t pmd)
{
return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
Expand Down
14 changes: 8 additions & 6 deletions arch/x86/kernel/apic/vector.c
Original file line number Diff line number Diff line change
Expand Up @@ -838,13 +838,15 @@ static void free_moved_vector(struct apic_chip_data *apicd)
bool managed = apicd->is_managed;

/*
* This should never happen. Managed interrupts are not
* migrated except on CPU down, which does not involve the
* cleanup vector. But try to keep the accounting correct
* nevertheless.
* Managed interrupts are usually not migrated away
* from an online CPU, but CPU isolation 'managed_irq'
* can make that happen.
* 1) Activation does not take the isolation into account
* to keep the code simple
* 2) Migration away from an isolated CPU can happen when
* a non-isolated CPU which is in the calculated
* affinity mask comes online.
*/
WARN_ON_ONCE(managed);

trace_vector_free_moved(apicd->irq, cpu, vector, managed);
irq_matrix_free(vector_matrix, cpu, vector, managed);
per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
Expand Down
94 changes: 63 additions & 31 deletions arch/x86/kernel/cpu/bugs.c
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
u64 __ro_after_init x86_amd_ls_cfg_base;
u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;

/* Control conditional STIPB in switch_to() */
/* Control conditional STIBP in switch_to() */
DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
/* Control conditional IBPB in switch_mm() */
DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
Expand Down Expand Up @@ -586,7 +586,9 @@ early_param("nospectre_v1", nospectre_v1_cmdline);
static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
SPECTRE_V2_NONE;

static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init =
static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init =
SPECTRE_V2_USER_NONE;
static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init =
SPECTRE_V2_USER_NONE;

#ifdef RETPOLINE
Expand Down Expand Up @@ -641,10 +643,11 @@ enum spectre_v2_user_cmd {
};

static const char * const spectre_v2_user_strings[] = {
[SPECTRE_V2_USER_NONE] = "User space: Vulnerable",
[SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection",
[SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl",
[SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl",
[SPECTRE_V2_USER_NONE] = "User space: Vulnerable",
[SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection",
[SPECTRE_V2_USER_STRICT_PREFERRED] = "User space: Mitigation: STIBP always-on protection",
[SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl",
[SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl",
};

static const struct {
Expand Down Expand Up @@ -756,23 +759,36 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
static_key_enabled(&switch_mm_always_ibpb) ?
"always-on" : "conditional");

spectre_v2_user_ibpb = mode;
}

/* If enhanced IBRS is enabled no STIPB required */
if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
/*
* If enhanced IBRS is enabled or SMT impossible, STIBP is not
* required.
*/
if (!smt_possible || spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
return;

/*
* If SMT is not possible or STIBP is not available clear the STIPB
* mode.
* At this point, an STIBP mode other than "off" has been set.
* If STIBP support is not being forced, check if STIBP always-on
* is preferred.
*/
if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP))
if (mode != SPECTRE_V2_USER_STRICT &&
boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
mode = SPECTRE_V2_USER_STRICT_PREFERRED;

/*
* If STIBP is not available, clear the STIBP mode.
*/
if (!boot_cpu_has(X86_FEATURE_STIBP))
mode = SPECTRE_V2_USER_NONE;

spectre_v2_user_stibp = mode;

set_mode:
spectre_v2_user = mode;
/* Only print the STIBP mode when SMT possible */
if (smt_possible)
pr_info("%s\n", spectre_v2_user_strings[mode]);
pr_info("%s\n", spectre_v2_user_strings[mode]);
}

static const char * const spectre_v2_strings[] = {
Expand Down Expand Up @@ -1049,10 +1065,11 @@ void arch_smt_update(void)
{
mutex_lock(&spec_ctrl_mutex);

switch (spectre_v2_user) {
switch (spectre_v2_user_stibp) {
case SPECTRE_V2_USER_NONE:
break;
case SPECTRE_V2_USER_STRICT:
case SPECTRE_V2_USER_STRICT_PREFERRED:
update_stibp_strict();
break;
case SPECTRE_V2_USER_PRCTL:
Expand Down Expand Up @@ -1291,13 +1308,19 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
{
switch (ctrl) {
case PR_SPEC_ENABLE:
if (spectre_v2_user == SPECTRE_V2_USER_NONE)
if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
return 0;
/*
* Indirect branch speculation is always disabled in strict
* mode.
* mode. It can neither be enabled if it was force-disabled
* by a previous prctl call.
*/
if (spectre_v2_user == SPECTRE_V2_USER_STRICT)
if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ||
task_spec_ib_force_disable(task))
return -EPERM;
task_clear_spec_ib_disable(task);
task_update_spec_tif(task);
Expand All @@ -1308,9 +1331,12 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
* Indirect branch speculation is always allowed when
* mitigation is force disabled.
*/
if (spectre_v2_user == SPECTRE_V2_USER_NONE)
if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
return -EPERM;
if (spectre_v2_user == SPECTRE_V2_USER_STRICT)
if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
return 0;
task_set_spec_ib_disable(task);
if (ctrl == PR_SPEC_FORCE_DISABLE)
Expand Down Expand Up @@ -1341,7 +1367,8 @@ void arch_seccomp_spec_mitigate(struct task_struct *task)
{
if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
if (spectre_v2_user == SPECTRE_V2_USER_SECCOMP)
if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP)
ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
}
#endif
Expand Down Expand Up @@ -1372,21 +1399,24 @@ static int ib_prctl_get(struct task_struct *task)
if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
return PR_SPEC_NOT_AFFECTED;

switch (spectre_v2_user) {
case SPECTRE_V2_USER_NONE:
if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
return PR_SPEC_ENABLE;
case SPECTRE_V2_USER_PRCTL:
case SPECTRE_V2_USER_SECCOMP:
else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
return PR_SPEC_DISABLE;
else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) {
if (task_spec_ib_force_disable(task))
return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
if (task_spec_ib_disable(task))
return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
case SPECTRE_V2_USER_STRICT:
return PR_SPEC_DISABLE;
default:
} else
return PR_SPEC_NOT_AFFECTED;
}
}

int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
Expand Down Expand Up @@ -1626,11 +1656,13 @@ static char *stibp_state(void)
if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
return "";

switch (spectre_v2_user) {
switch (spectre_v2_user_stibp) {
case SPECTRE_V2_USER_NONE:
return ", STIBP: disabled";
case SPECTRE_V2_USER_STRICT:
return ", STIBP: forced";
case SPECTRE_V2_USER_STRICT_PREFERRED:
return ", STIBP: always-on";
case SPECTRE_V2_USER_PRCTL:
case SPECTRE_V2_USER_SECCOMP:
if (static_key_enabled(&switch_to_cond_stibp))
Expand Down
Loading

0 comments on commit ffe78a3

Please sign in to comment.