Skip to content

Commit

Permalink
Merge tag 'v4.9.209'
Browse files Browse the repository at this point in the history
This is the 4.9.209 stable release
  • Loading branch information
negrroo1 committed Jan 19, 2020
2 parents 3ebd778 + 753a4bc commit 32ebde7
Show file tree
Hide file tree
Showing 99 changed files with 655 additions and 354 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 9
SUBLEVEL = 208
SUBLEVEL = 209
EXTRAVERSION =
NAME = Roaring Lionus

Expand Down
2 changes: 1 addition & 1 deletion arch/arm/boot/dts/am437x-gp-evm.dts
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@
};

lcd0: display {
compatible = "osddisplays,osd057T0559-34ts", "panel-dpi";
compatible = "osddisplays,osd070t1718-19ts", "panel-dpi";
label = "lcd";

panel-timing {
Expand Down
2 changes: 1 addition & 1 deletion arch/arm/boot/dts/am43x-epos-evm.dts
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@
};

lcd0: display {
compatible = "osddisplays,osd057T0559-34ts", "panel-dpi";
compatible = "osddisplays,osd070t1718-19ts", "panel-dpi";
label = "lcd";

panel-timing {
Expand Down
12 changes: 11 additions & 1 deletion arch/arm/mach-vexpress/spc.c
Original file line number Diff line number Diff line change
Expand Up @@ -555,8 +555,9 @@ static struct clk *ve_spc_clk_register(struct device *cpu_dev)

static int __init ve_spc_clk_init(void)
{
int cpu;
int cpu, cluster;
struct clk *clk;
bool init_opp_table[MAX_CLUSTERS] = { false };

if (!info)
return 0; /* Continue only if SPC is initialised */
Expand All @@ -582,8 +583,17 @@ static int __init ve_spc_clk_init(void)
continue;
}

cluster = topology_physical_package_id(cpu_dev->id);
if (init_opp_table[cluster])
continue;

if (ve_init_opp_table(cpu_dev))
pr_warn("failed to initialise cpu%d opp table\n", cpu);
else if (dev_pm_opp_set_sharing_cpus(cpu_dev,
topology_core_cpumask(cpu_dev->id)))
pr_warn("failed to mark OPPs shared for cpu%d\n", cpu);
else
init_opp_table[cluster] = true;
}

platform_device_register_simple("vexpress-spc-cpufreq", -1, NULL, 0);
Expand Down
5 changes: 2 additions & 3 deletions arch/arm64/include/asm/pgtable-prot.h
Original file line number Diff line number Diff line change
Expand Up @@ -77,13 +77,12 @@
#define PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
#define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
#define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_PXN)

#define __P000 PAGE_NONE
#define __P001 PAGE_READONLY
#define __P010 PAGE_COPY
#define __P011 PAGE_COPY
#define __P100 PAGE_EXECONLY
#define __P100 PAGE_READONLY_EXEC
#define __P101 PAGE_READONLY_EXEC
#define __P110 PAGE_COPY_EXEC
#define __P111 PAGE_COPY_EXEC
Expand All @@ -92,7 +91,7 @@
#define __S001 PAGE_READONLY
#define __S010 PAGE_SHARED
#define __S011 PAGE_SHARED
#define __S100 PAGE_EXECONLY
#define __S100 PAGE_READONLY_EXEC
#define __S101 PAGE_READONLY_EXEC
#define __S110 PAGE_SHARED_EXEC
#define __S111 PAGE_SHARED_EXEC
Expand Down
10 changes: 3 additions & 7 deletions arch/arm64/include/asm/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -84,12 +84,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))

#define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
/*
* Execute-only user mappings do not have the PTE_USER bit set. All valid
* kernel mappings have the PTE_UXN bit set.
*/
#define pte_valid_not_user(pte) \
((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
#define pte_valid_young(pte) \
((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
#define pte_valid_user(pte) \
Expand All @@ -105,8 +101,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];

/*
* p??_access_permitted() is true for valid user mappings (subject to the
* write permission check) other than user execute-only which do not have the
* PTE_USER bit set. PROT_NONE mappings do not have the PTE_VALID bit set.
* write permission check). PROT_NONE mappings do not have the PTE_VALID bit
* set.
*/
#define pte_access_permitted(pte, write) \
(pte_valid_user(pte) && (!(write) || pte_write(pte)))
Expand Down
2 changes: 1 addition & 1 deletion arch/arm64/mm/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -327,7 +327,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
struct task_struct *tsk;
struct mm_struct *mm;
int fault, sig, code;
unsigned long vm_flags = VM_READ | VM_WRITE;
unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;
unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
struct vm_area_struct *vma = NULL;

Expand Down
20 changes: 19 additions & 1 deletion arch/mips/include/asm/thread_info.h
Original file line number Diff line number Diff line change
Expand Up @@ -52,8 +52,26 @@ struct thread_info {
#define init_thread_info (init_thread_union.thread_info)
#define init_stack (init_thread_union.stack)

/* How to get the thread information struct from C. */
/*
* A pointer to the struct thread_info for the currently executing thread is
* held in register $28/$gp.
*
* We declare __current_thread_info as a global register variable rather than a
* local register variable within current_thread_info() because clang doesn't
* support explicit local register variables.
*
* When building the VDSO we take care not to declare the global register
* variable because this causes GCC to not preserve the value of $28/$gp in
* functions that change its value (which is common in the PIC VDSO when
* accessing the GOT). Since the VDSO shouldn't be accessing
* __current_thread_info anyway we declare it extern in order to cause a link
* failure if it's referenced.
*/
#ifdef __VDSO__
extern struct thread_info *__current_thread_info;
#else
register struct thread_info *__current_thread_info __asm__("$28");
#endif

static inline struct thread_info *current_thread_info(void)
{
Expand Down
10 changes: 8 additions & 2 deletions arch/parisc/include/asm/cmpxchg.h
Original file line number Diff line number Diff line change
Expand Up @@ -43,8 +43,14 @@ __xchg(unsigned long x, __volatile__ void *ptr, int size)
** if (((unsigned long)p & 0xf) == 0)
** return __ldcw(p);
*/
#define xchg(ptr, x) \
((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
#define xchg(ptr, x) \
({ \
__typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) _x_ = (x); \
__ret = (__typeof__(*(ptr))) \
__xchg((unsigned long)_x_, (ptr), sizeof(*(ptr))); \
__ret; \
})

/* bug catcher for when unsupported size is used - won't link */
extern void __cmpxchg_called_with_bad_pointer(void);
Expand Down
8 changes: 8 additions & 0 deletions arch/powerpc/mm/mem.c
Original file line number Diff line number Diff line change
Expand Up @@ -345,6 +345,14 @@ void __init mem_init(void)
BUILD_BUG_ON(MMU_PAGE_COUNT > 16);

#ifdef CONFIG_SWIOTLB
/*
* Some platforms (e.g. 85xx) limit DMA-able memory way below
* 4G. We force memblock to bottom-up mode to ensure that the
* memory allocated in swiotlb_init() is DMA-able.
* As it's the last memblock allocation, no need to reset it
* back to to-down.
*/
memblock_set_bottom_up(true);
swiotlb_init(0);
#endif

Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/platforms/pseries/hvconsole.c
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ EXPORT_SYMBOL(hvc_get_chars);
* @vtermno: The vtermno or unit_address of the adapter from which the data
* originated.
* @buf: The character buffer that contains the character data to send to
* firmware.
* firmware. Must be at least 16 bytes, even if count is less than 16.
* @count: Send this number of characters.
*/
int hvc_put_chars(uint32_t vtermno, const char *buf, int count)
Expand Down
22 changes: 16 additions & 6 deletions arch/s390/kernel/perf_cpum_sf.c
Original file line number Diff line number Diff line change
Expand Up @@ -1295,18 +1295,28 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
*/
if (flush_all && done)
break;

/* If an event overflow happened, discard samples by
* processing any remaining sample-data-blocks.
*/
if (event_overflow)
flush_all = 1;
}

/* Account sample overflows in the event hardware structure */
if (sampl_overflow)
OVERFLOW_REG(hwc) = DIV_ROUND_UP(OVERFLOW_REG(hwc) +
sampl_overflow, 1 + num_sdb);

/* Perf_event_overflow() and perf_event_account_interrupt() limit
* the interrupt rate to an upper limit. Roughly 1000 samples per
* task tick.
* Hitting this limit results in a large number
* of throttled REF_REPORT_THROTTLE entries and the samples
* are dropped.
* Slightly increase the interval to avoid hitting this limit.
*/
if (event_overflow) {
SAMPL_RATE(hwc) += DIV_ROUND_UP(SAMPL_RATE(hwc), 10);
debug_sprintf_event(sfdbg, 1, "%s: rate adjustment %ld\n",
__func__,
DIV_ROUND_UP(SAMPL_RATE(hwc), 10));
}

if (sampl_overflow || event_overflow)
debug_sprintf_event(sfdbg, 4, "hw_perf_event_update: "
"overflow stats: sample=%llu event=%llu\n",
Expand Down
80 changes: 54 additions & 26 deletions arch/s390/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -691,39 +691,67 @@ static struct sclp_core_info *smp_get_core_info(void)

static int smp_add_present_cpu(int cpu);

static int __smp_rescan_cpus(struct sclp_core_info *info, int sysfs_add)
static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail,
bool configured, bool early)
{
struct pcpu *pcpu;
cpumask_t avail;
int cpu, nr, i, j;
int cpu, nr, i;
u16 address;

nr = 0;
cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
cpu = cpumask_first(&avail);
for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) {
if (sclp.has_core_type && info->core[i].type != boot_core_type)
if (sclp.has_core_type && core->type != boot_core_type)
return nr;
cpu = cpumask_first(avail);
address = core->core_id << smp_cpu_mt_shift;
for (i = 0; (i <= smp_cpu_mtid) && (cpu < nr_cpu_ids); i++) {
if (pcpu_find_address(cpu_present_mask, address + i))
continue;
address = info->core[i].core_id << smp_cpu_mt_shift;
for (j = 0; j <= smp_cpu_mtid; j++) {
if (pcpu_find_address(cpu_present_mask, address + j))
continue;
pcpu = pcpu_devices + cpu;
pcpu->address = address + j;
pcpu->state =
(cpu >= info->configured*(smp_cpu_mtid + 1)) ?
CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
set_cpu_present(cpu, true);
if (sysfs_add && smp_add_present_cpu(cpu) != 0)
set_cpu_present(cpu, false);
else
nr++;
cpu = cpumask_next(cpu, &avail);
if (cpu >= nr_cpu_ids)
pcpu = pcpu_devices + cpu;
pcpu->address = address + i;
if (configured)
pcpu->state = CPU_STATE_CONFIGURED;
else
pcpu->state = CPU_STATE_STANDBY;
smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
set_cpu_present(cpu, true);
if (!early && smp_add_present_cpu(cpu) != 0)
set_cpu_present(cpu, false);
else
nr++;
cpumask_clear_cpu(cpu, avail);
cpu = cpumask_next(cpu, avail);
}
return nr;
}

static int __smp_rescan_cpus(struct sclp_core_info *info, bool early)
{
struct sclp_core_entry *core;
cpumask_t avail;
bool configured;
u16 core_id;
int nr, i;

nr = 0;
cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
/*
* Add IPL core first (which got logical CPU number 0) to make sure
* that all SMT threads get subsequent logical CPU numbers.
*/
if (early) {
core_id = pcpu_devices[0].address >> smp_cpu_mt_shift;
for (i = 0; i < info->configured; i++) {
core = &info->core[i];
if (core->core_id == core_id) {
nr += smp_add_core(core, &avail, true, early);
break;
}
}
}
for (i = 0; i < info->combined; i++) {
configured = i < info->configured;
nr += smp_add_core(&info->core[i], &avail, configured, early);
}
return nr;
}

Expand Down Expand Up @@ -771,7 +799,7 @@ static void __init smp_detect_cpus(void)

/* Add CPUs present at boot */
get_online_cpus();
__smp_rescan_cpus(info, 0);
__smp_rescan_cpus(info, true);
put_online_cpus();
kfree(info);
}
Expand Down Expand Up @@ -1127,7 +1155,7 @@ int __ref smp_rescan_cpus(void)
return -ENOMEM;
get_online_cpus();
mutex_lock(&smp_cpu_state_mutex);
nr = __smp_rescan_cpus(info, 1);
nr = __smp_rescan_cpus(info, false);
mutex_unlock(&smp_cpu_state_mutex);
put_online_cpus();
kfree(info);
Expand Down
3 changes: 1 addition & 2 deletions arch/tile/lib/atomic_asm_32.S
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,7 @@
* has an opportunity to return -EFAULT to the user if needed.
* The 64-bit routines just return a "long long" with the value,
* since they are only used from kernel space and don't expect to fault.
* Support for 16-bit ops is included in the framework but we don't provide
* any (x86_64 has an atomic_inc_short(), so we might want to some day).
* Support for 16-bit ops is included in the framework but we don't provide any.
*
* Note that the caller is advised to issue a suitable L1 or L2
* prefetch on the address being manipulated to avoid extra stalls.
Expand Down
9 changes: 7 additions & 2 deletions arch/x86/events/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -374,7 +374,7 @@ int x86_add_exclusive(unsigned int what)
* LBR and BTS are still mutually exclusive.
*/
if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt)
return 0;
goto out;

if (!atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) {
mutex_lock(&pmc_reserve_mutex);
Expand All @@ -386,6 +386,7 @@ int x86_add_exclusive(unsigned int what)
mutex_unlock(&pmc_reserve_mutex);
}

out:
atomic_inc(&active_events);
return 0;

Expand All @@ -396,11 +397,15 @@ int x86_add_exclusive(unsigned int what)

void x86_del_exclusive(unsigned int what)
{
atomic_dec(&active_events);

/*
* See the comment in x86_add_exclusive().
*/
if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt)
return;

atomic_dec(&x86_pmu.lbr_exclusive[what]);
atomic_dec(&active_events);
}

int x86_setup_perfctr(struct perf_event *event)
Expand Down
13 changes: 0 additions & 13 deletions arch/x86/include/asm/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -249,19 +249,6 @@ static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
return c;
}

/**
* atomic_inc_short - increment of a short integer
* @v: pointer to type int
*
* Atomically adds 1 to @v
* Returns the new value of @u
*/
static __always_inline short int atomic_inc_short(short int *v)
{
asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v));
return *v;
}

#ifdef CONFIG_X86_32
# include <asm/atomic64_32.h>
#else
Expand Down
2 changes: 1 addition & 1 deletion block/blk-map.c
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
return 0;

unmap_rq:
__blk_rq_unmap_user(bio);
blk_rq_unmap_user(bio);
fail:
rq->bio = NULL;
return ret;
Expand Down
Loading

0 comments on commit 32ebde7

Please sign in to comment.