forked from luck/tmp_suning_uos_patched
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "10 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: hfsplus: stop workqueue when fill_super() failed mm: don't allow deferred pages with NEED_PER_CPU_KM MAINTAINERS: add Q: entry to kselftest for patchwork project radix tree: fix multi-order iteration race radix tree test suite: multi-order iteration race radix tree test suite: add item_delete_rcu() radix tree test suite: fix compilation issue radix tree test suite: fix mapshift build target include/linux/mm.h: add new inline function vmf_error() lib/test_bitmap.c: fix bitmap optimisation tests to report errors correctly
This commit is contained in:
commit
73fcb1a370
|
@ -7698,6 +7698,7 @@ KERNEL SELFTEST FRAMEWORK
|
|||
M: Shuah Khan <shuah@kernel.org>
|
||||
L: linux-kselftest@vger.kernel.org
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest.git
|
||||
Q: https://patchwork.kernel.org/project/linux-kselftest/list/
|
||||
S: Maintained
|
||||
F: tools/testing/selftests/
|
||||
F: Documentation/dev-tools/kselftest*
|
||||
|
|
|
@ -588,6 +588,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
|
|||
return 0;
|
||||
|
||||
out_put_hidden_dir:
|
||||
cancel_delayed_work_sync(&sbi->sync_work);
|
||||
iput(sbi->hidden_dir);
|
||||
out_put_root:
|
||||
dput(sb->s_root);
|
||||
|
|
|
@ -2466,6 +2466,13 @@ static inline vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma,
|
|||
return VM_FAULT_NOPAGE;
|
||||
}
|
||||
|
||||
static inline vm_fault_t vmf_error(int err)
|
||||
{
|
||||
if (err == -ENOMEM)
|
||||
return VM_FAULT_OOM;
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
||||
struct page *follow_page_mask(struct vm_area_struct *vma,
|
||||
unsigned long address, unsigned int foll_flags,
|
||||
unsigned int *page_mask);
|
||||
|
|
|
@ -1612,11 +1612,9 @@ static void set_iter_tags(struct radix_tree_iter *iter,
|
|||
static void __rcu **skip_siblings(struct radix_tree_node **nodep,
|
||||
void __rcu **slot, struct radix_tree_iter *iter)
|
||||
{
|
||||
void *sib = node_to_entry(slot - 1);
|
||||
|
||||
while (iter->index < iter->next_index) {
|
||||
*nodep = rcu_dereference_raw(*slot);
|
||||
if (*nodep && *nodep != sib)
|
||||
if (*nodep && !is_sibling_entry(iter->node, *nodep))
|
||||
return slot;
|
||||
slot++;
|
||||
iter->index = __radix_tree_iter_add(iter, 1);
|
||||
|
@ -1631,7 +1629,7 @@ void __rcu **__radix_tree_next_slot(void __rcu **slot,
|
|||
struct radix_tree_iter *iter, unsigned flags)
|
||||
{
|
||||
unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK;
|
||||
struct radix_tree_node *node = rcu_dereference_raw(*slot);
|
||||
struct radix_tree_node *node;
|
||||
|
||||
slot = skip_siblings(&node, slot, iter);
|
||||
|
||||
|
|
|
@ -331,23 +331,32 @@ static void noinline __init test_mem_optimisations(void)
|
|||
unsigned int start, nbits;
|
||||
|
||||
for (start = 0; start < 1024; start += 8) {
|
||||
memset(bmap1, 0x5a, sizeof(bmap1));
|
||||
memset(bmap2, 0x5a, sizeof(bmap2));
|
||||
for (nbits = 0; nbits < 1024 - start; nbits += 8) {
|
||||
memset(bmap1, 0x5a, sizeof(bmap1));
|
||||
memset(bmap2, 0x5a, sizeof(bmap2));
|
||||
|
||||
bitmap_set(bmap1, start, nbits);
|
||||
__bitmap_set(bmap2, start, nbits);
|
||||
if (!bitmap_equal(bmap1, bmap2, 1024))
|
||||
if (!bitmap_equal(bmap1, bmap2, 1024)) {
|
||||
printk("set not equal %d %d\n", start, nbits);
|
||||
if (!__bitmap_equal(bmap1, bmap2, 1024))
|
||||
failed_tests++;
|
||||
}
|
||||
if (!__bitmap_equal(bmap1, bmap2, 1024)) {
|
||||
printk("set not __equal %d %d\n", start, nbits);
|
||||
failed_tests++;
|
||||
}
|
||||
|
||||
bitmap_clear(bmap1, start, nbits);
|
||||
__bitmap_clear(bmap2, start, nbits);
|
||||
if (!bitmap_equal(bmap1, bmap2, 1024))
|
||||
if (!bitmap_equal(bmap1, bmap2, 1024)) {
|
||||
printk("clear not equal %d %d\n", start, nbits);
|
||||
if (!__bitmap_equal(bmap1, bmap2, 1024))
|
||||
failed_tests++;
|
||||
}
|
||||
if (!__bitmap_equal(bmap1, bmap2, 1024)) {
|
||||
printk("clear not __equal %d %d\n", start,
|
||||
nbits);
|
||||
failed_tests++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -636,6 +636,7 @@ config DEFERRED_STRUCT_PAGE_INIT
|
|||
default n
|
||||
depends on NO_BOOTMEM
|
||||
depends on !FLATMEM
|
||||
depends on !NEED_PER_CPU_KM
|
||||
help
|
||||
Ordinarily all struct pages are initialised during early boot in a
|
||||
single thread. On very large machines this can take a considerable
|
||||
|
|
|
@ -6,8 +6,9 @@
|
|||
#include <stdbool.h>
|
||||
|
||||
#define spinlock_t pthread_mutex_t
|
||||
#define DEFINE_SPINLOCK(x) pthread_mutex_t x = PTHREAD_MUTEX_INITIALIZER;
|
||||
#define DEFINE_SPINLOCK(x) pthread_mutex_t x = PTHREAD_MUTEX_INITIALIZER
|
||||
#define __SPIN_LOCK_UNLOCKED(x) (pthread_mutex_t)PTHREAD_MUTEX_INITIALIZER
|
||||
#define spin_lock_init(x) pthread_mutex_init(x, NULL)
|
||||
|
||||
#define spin_lock_irqsave(x, f) (void)f, pthread_mutex_lock(x)
|
||||
#define spin_unlock_irqrestore(x, f) (void)f, pthread_mutex_unlock(x)
|
||||
|
|
|
@ -17,7 +17,7 @@ ifeq ($(BUILD), 32)
|
|||
LDFLAGS += -m32
|
||||
endif
|
||||
|
||||
targets: mapshift $(TARGETS)
|
||||
targets: generated/map-shift.h $(TARGETS)
|
||||
|
||||
main: $(OFILES)
|
||||
|
||||
|
@ -42,9 +42,7 @@ radix-tree.c: ../../../lib/radix-tree.c
|
|||
idr.c: ../../../lib/idr.c
|
||||
sed -e 's/^static //' -e 's/__always_inline //' -e 's/inline //' < $< > $@
|
||||
|
||||
.PHONY: mapshift
|
||||
|
||||
mapshift:
|
||||
generated/map-shift.h:
|
||||
@if ! grep -qws $(SHIFT) generated/map-shift.h; then \
|
||||
echo "#define RADIX_TREE_MAP_SHIFT $(SHIFT)" > \
|
||||
generated/map-shift.h; \
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include <linux/radix-tree.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/errno.h>
|
||||
#include <pthread.h>
|
||||
|
||||
#include "test.h"
|
||||
|
||||
|
@ -624,6 +625,67 @@ static void multiorder_account(void)
|
|||
item_kill_tree(&tree);
|
||||
}
|
||||
|
||||
bool stop_iteration = false;
|
||||
|
||||
static void *creator_func(void *ptr)
|
||||
{
|
||||
/* 'order' is set up to ensure we have sibling entries */
|
||||
unsigned int order = RADIX_TREE_MAP_SHIFT - 1;
|
||||
struct radix_tree_root *tree = ptr;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 10000; i++) {
|
||||
item_insert_order(tree, 0, order);
|
||||
item_delete_rcu(tree, 0);
|
||||
}
|
||||
|
||||
stop_iteration = true;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *iterator_func(void *ptr)
|
||||
{
|
||||
struct radix_tree_root *tree = ptr;
|
||||
struct radix_tree_iter iter;
|
||||
struct item *item;
|
||||
void **slot;
|
||||
|
||||
while (!stop_iteration) {
|
||||
rcu_read_lock();
|
||||
radix_tree_for_each_slot(slot, tree, &iter, 0) {
|
||||
item = radix_tree_deref_slot(slot);
|
||||
|
||||
if (!item)
|
||||
continue;
|
||||
if (radix_tree_deref_retry(item)) {
|
||||
slot = radix_tree_iter_retry(&iter);
|
||||
continue;
|
||||
}
|
||||
|
||||
item_sanity(item, iter.index);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void multiorder_iteration_race(void)
|
||||
{
|
||||
const int num_threads = sysconf(_SC_NPROCESSORS_ONLN);
|
||||
pthread_t worker_thread[num_threads];
|
||||
RADIX_TREE(tree, GFP_KERNEL);
|
||||
int i;
|
||||
|
||||
pthread_create(&worker_thread[0], NULL, &creator_func, &tree);
|
||||
for (i = 1; i < num_threads; i++)
|
||||
pthread_create(&worker_thread[i], NULL, &iterator_func, &tree);
|
||||
|
||||
for (i = 0; i < num_threads; i++)
|
||||
pthread_join(worker_thread[i], NULL);
|
||||
|
||||
item_kill_tree(&tree);
|
||||
}
|
||||
|
||||
void multiorder_checks(void)
|
||||
{
|
||||
int i;
|
||||
|
@ -644,6 +706,7 @@ void multiorder_checks(void)
|
|||
multiorder_join();
|
||||
multiorder_split();
|
||||
multiorder_account();
|
||||
multiorder_iteration_race();
|
||||
|
||||
radix_tree_cpu_dead(0);
|
||||
}
|
||||
|
|
|
@ -75,6 +75,25 @@ int item_delete(struct radix_tree_root *root, unsigned long index)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void item_free_rcu(struct rcu_head *head)
|
||||
{
|
||||
struct item *item = container_of(head, struct item, rcu_head);
|
||||
|
||||
free(item);
|
||||
}
|
||||
|
||||
int item_delete_rcu(struct radix_tree_root *root, unsigned long index)
|
||||
{
|
||||
struct item *item = radix_tree_delete(root, index);
|
||||
|
||||
if (item) {
|
||||
item_sanity(item, index);
|
||||
call_rcu(&item->rcu_head, item_free_rcu);
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void item_check_present(struct radix_tree_root *root, unsigned long index)
|
||||
{
|
||||
struct item *item;
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
#include <linux/rcupdate.h>
|
||||
|
||||
struct item {
|
||||
struct rcu_head rcu_head;
|
||||
unsigned long index;
|
||||
unsigned int order;
|
||||
};
|
||||
|
@ -12,9 +13,11 @@ struct item {
|
|||
struct item *item_create(unsigned long index, unsigned int order);
|
||||
int __item_insert(struct radix_tree_root *root, struct item *item);
|
||||
int item_insert(struct radix_tree_root *root, unsigned long index);
|
||||
void item_sanity(struct item *item, unsigned long index);
|
||||
int item_insert_order(struct radix_tree_root *root, unsigned long index,
|
||||
unsigned order);
|
||||
int item_delete(struct radix_tree_root *root, unsigned long index);
|
||||
int item_delete_rcu(struct radix_tree_root *root, unsigned long index);
|
||||
struct item *item_lookup(struct radix_tree_root *root, unsigned long index);
|
||||
|
||||
void item_check_present(struct radix_tree_root *root, unsigned long index);
|
||||
|
|
Loading…
Reference in New Issue
Block a user