forked from luck/tmp_suning_uos_patched
- Fix zone state management race in DM zoned target by eliminating
the unnecessary DMZ_ACTIVE state. - A couple fixes for issues the DM snapshot target's optional discard support added during first week of the 5.3 merge. - Increase default size of outstanding IO that is allowed for a each dm-kcopyd client and introduce tunable to allow user adjust. - Update DM core to use printk ratelimiting functions rather than duplicate them and in doing so fix an issue where DMDEBUG_LIMIT() rate limited KERN_DEBUG messages had excessive "callbacks suppressed" messages. -----BEGIN PGP SIGNATURE----- iQFHBAABCAAxFiEEJfWUX4UqZ4x1O2wixSPxCi2dA1oFAl0w2IsTHHNuaXR6ZXJA cmVkaGF0LmNvbQAKCRDFI/EKLZ0DWgPeCACMtDQrHqMTOT7OPDRxSJjZixefzL32 lFi31mjjEb7GoxiS3dBepdJmQiUwROINdGLIGTfBAlH05b/8fgFgE6iCGZ9uzad4 0PNe9q7pbtfQDLXx+mVMjEdK6P/ilmVFXCW8VQpAAeUFL+dwXYHHIbmQZ/rahOz5 8nn6wGBQ/LRRcbV0hBHNXQymIXPxMweMxO3usSuKbfhe7JjRwslThGbZ4KVwjCwl sLl5mEWXwTKUemGXsXFbCbtH/rnZpbaiAkBedT0oV8g8atRBeQyj0vk48htincj7 Uv6xGjJGXuqUkcvQnTx3C1fk3lH5xJb5MTL3WEN0g6fOmyJd1sMd0gd/ =4Rpg -----END PGP SIGNATURE----- Merge tag 'for-5.3/dm-changes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm Pull more device mapper updates from Mike Snitzer: - Fix zone state management race in DM zoned target by eliminating the unnecessary DMZ_ACTIVE state. - A couple fixes for issues the DM snapshot target's optional discard support added during first week of the 5.3 merge. - Increase default size of outstanding IO that is allowed for a each dm-kcopyd client and introduce tunable to allow user adjust. - Update DM core to use printk ratelimiting functions rather than duplicate them and in doing so fix an issue where DMDEBUG_LIMIT() rate limited KERN_DEBUG messages had excessive "callbacks suppressed" messages. * tag 'for-5.3/dm-changes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: dm: use printk ratelimiting functions dm kcopyd: Increase default sub-job size to 512KB dm snapshot: fix oversights in optional discard support dm zoned: fix zone state management race
This commit is contained in:
commit
3bfe1fc467
|
@ -28,10 +28,27 @@
|
|||
|
||||
#include "dm-core.h"
|
||||
|
||||
#define SUB_JOB_SIZE 128
|
||||
#define SPLIT_COUNT 8
|
||||
#define MIN_JOBS 8
|
||||
#define RESERVE_PAGES (DIV_ROUND_UP(SUB_JOB_SIZE << SECTOR_SHIFT, PAGE_SIZE))
|
||||
|
||||
#define DEFAULT_SUB_JOB_SIZE_KB 512
|
||||
#define MAX_SUB_JOB_SIZE_KB 1024
|
||||
|
||||
static unsigned kcopyd_subjob_size_kb = DEFAULT_SUB_JOB_SIZE_KB;
|
||||
|
||||
module_param(kcopyd_subjob_size_kb, uint, S_IRUGO | S_IWUSR);
|
||||
MODULE_PARM_DESC(kcopyd_subjob_size_kb, "Sub-job size for dm-kcopyd clients");
|
||||
|
||||
static unsigned dm_get_kcopyd_subjob_size(void)
|
||||
{
|
||||
unsigned sub_job_size_kb;
|
||||
|
||||
sub_job_size_kb = __dm_get_module_param(&kcopyd_subjob_size_kb,
|
||||
DEFAULT_SUB_JOB_SIZE_KB,
|
||||
MAX_SUB_JOB_SIZE_KB);
|
||||
|
||||
return sub_job_size_kb << 1;
|
||||
}
|
||||
|
||||
/*-----------------------------------------------------------------
|
||||
* Each kcopyd client has its own little pool of preallocated
|
||||
|
@ -41,6 +58,7 @@ struct dm_kcopyd_client {
|
|||
struct page_list *pages;
|
||||
unsigned nr_reserved_pages;
|
||||
unsigned nr_free_pages;
|
||||
unsigned sub_job_size;
|
||||
|
||||
struct dm_io_client *io_client;
|
||||
|
||||
|
@ -693,8 +711,8 @@ static void segment_complete(int read_err, unsigned long write_err,
|
|||
progress = job->progress;
|
||||
count = job->source.count - progress;
|
||||
if (count) {
|
||||
if (count > SUB_JOB_SIZE)
|
||||
count = SUB_JOB_SIZE;
|
||||
if (count > kc->sub_job_size)
|
||||
count = kc->sub_job_size;
|
||||
|
||||
job->progress += count;
|
||||
}
|
||||
|
@ -821,7 +839,7 @@ void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
|
|||
job->master_job = job;
|
||||
job->write_offset = 0;
|
||||
|
||||
if (job->source.count <= SUB_JOB_SIZE)
|
||||
if (job->source.count <= kc->sub_job_size)
|
||||
dispatch_job(job);
|
||||
else {
|
||||
job->progress = 0;
|
||||
|
@ -888,6 +906,7 @@ int kcopyd_cancel(struct kcopyd_job *job, int block)
|
|||
struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *throttle)
|
||||
{
|
||||
int r;
|
||||
unsigned reserve_pages;
|
||||
struct dm_kcopyd_client *kc;
|
||||
|
||||
kc = kzalloc(sizeof(*kc), GFP_KERNEL);
|
||||
|
@ -912,9 +931,12 @@ struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *thro
|
|||
goto bad_workqueue;
|
||||
}
|
||||
|
||||
kc->sub_job_size = dm_get_kcopyd_subjob_size();
|
||||
reserve_pages = DIV_ROUND_UP(kc->sub_job_size << SECTOR_SHIFT, PAGE_SIZE);
|
||||
|
||||
kc->pages = NULL;
|
||||
kc->nr_reserved_pages = kc->nr_free_pages = 0;
|
||||
r = client_reserve_pages(kc, RESERVE_PAGES);
|
||||
r = client_reserve_pages(kc, reserve_pages);
|
||||
if (r)
|
||||
goto bad_client_pages;
|
||||
|
||||
|
|
|
@ -2072,6 +2072,12 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
|
|||
return DM_MAPIO_REMAPPED;
|
||||
}
|
||||
|
||||
if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
|
||||
/* Once merging, discards no longer effect change */
|
||||
bio_endio(bio);
|
||||
return DM_MAPIO_SUBMITTED;
|
||||
}
|
||||
|
||||
chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
|
||||
|
||||
down_write(&s->lock);
|
||||
|
@ -2331,6 +2337,8 @@ static void snapshot_io_hints(struct dm_target *ti, struct queue_limits *limits)
|
|||
if (snap->discard_zeroes_cow) {
|
||||
struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
|
||||
|
||||
down_read(&_origins_lock);
|
||||
|
||||
(void) __find_snapshots_sharing_cow(snap, &snap_src, &snap_dest, NULL);
|
||||
if (snap_src && snap_dest)
|
||||
snap = snap_src;
|
||||
|
@ -2338,6 +2346,8 @@ static void snapshot_io_hints(struct dm_target *ti, struct queue_limits *limits)
|
|||
/* All discards are split on chunk_size boundary */
|
||||
limits->discard_granularity = snap->store->chunk_size;
|
||||
limits->max_discard_sectors = snap->store->chunk_size;
|
||||
|
||||
up_read(&_origins_lock);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1601,30 +1601,6 @@ struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd)
|
|||
return zone;
|
||||
}
|
||||
|
||||
/*
|
||||
* Activate a zone (increment its reference count).
|
||||
*/
|
||||
void dmz_activate_zone(struct dm_zone *zone)
|
||||
{
|
||||
set_bit(DMZ_ACTIVE, &zone->flags);
|
||||
atomic_inc(&zone->refcount);
|
||||
}
|
||||
|
||||
/*
|
||||
* Deactivate a zone. This decrement the zone reference counter
|
||||
* and clears the active state of the zone once the count reaches 0,
|
||||
* indicating that all BIOs to the zone have completed. Returns
|
||||
* true if the zone was deactivated.
|
||||
*/
|
||||
void dmz_deactivate_zone(struct dm_zone *zone)
|
||||
{
|
||||
if (atomic_dec_and_test(&zone->refcount)) {
|
||||
WARN_ON(!test_bit(DMZ_ACTIVE, &zone->flags));
|
||||
clear_bit_unlock(DMZ_ACTIVE, &zone->flags);
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the zone mapping a chunk, if the chunk is mapped already.
|
||||
* If no mapping exist and the operation is WRITE, a zone is
|
||||
|
|
|
@ -115,7 +115,6 @@ enum {
|
|||
DMZ_BUF,
|
||||
|
||||
/* Zone internal state */
|
||||
DMZ_ACTIVE,
|
||||
DMZ_RECLAIM,
|
||||
DMZ_SEQ_WRITE_ERR,
|
||||
};
|
||||
|
@ -128,7 +127,6 @@ enum {
|
|||
#define dmz_is_empty(z) ((z)->wp_block == 0)
|
||||
#define dmz_is_offline(z) test_bit(DMZ_OFFLINE, &(z)->flags)
|
||||
#define dmz_is_readonly(z) test_bit(DMZ_READ_ONLY, &(z)->flags)
|
||||
#define dmz_is_active(z) test_bit(DMZ_ACTIVE, &(z)->flags)
|
||||
#define dmz_in_reclaim(z) test_bit(DMZ_RECLAIM, &(z)->flags)
|
||||
#define dmz_seq_write_err(z) test_bit(DMZ_SEQ_WRITE_ERR, &(z)->flags)
|
||||
|
||||
|
@ -188,8 +186,30 @@ void dmz_unmap_zone(struct dmz_metadata *zmd, struct dm_zone *zone);
|
|||
unsigned int dmz_nr_rnd_zones(struct dmz_metadata *zmd);
|
||||
unsigned int dmz_nr_unmap_rnd_zones(struct dmz_metadata *zmd);
|
||||
|
||||
void dmz_activate_zone(struct dm_zone *zone);
|
||||
void dmz_deactivate_zone(struct dm_zone *zone);
|
||||
/*
|
||||
* Activate a zone (increment its reference count).
|
||||
*/
|
||||
static inline void dmz_activate_zone(struct dm_zone *zone)
|
||||
{
|
||||
atomic_inc(&zone->refcount);
|
||||
}
|
||||
|
||||
/*
|
||||
* Deactivate a zone. This decrement the zone reference counter
|
||||
* indicating that all BIOs to the zone have completed when the count is 0.
|
||||
*/
|
||||
static inline void dmz_deactivate_zone(struct dm_zone *zone)
|
||||
{
|
||||
atomic_dec(&zone->refcount);
|
||||
}
|
||||
|
||||
/*
|
||||
* Test if a zone is active, that is, has a refcount > 0.
|
||||
*/
|
||||
static inline bool dmz_is_active(struct dm_zone *zone)
|
||||
{
|
||||
return atomic_read(&zone->refcount);
|
||||
}
|
||||
|
||||
int dmz_lock_zone_reclaim(struct dm_zone *zone);
|
||||
void dmz_unlock_zone_reclaim(struct dm_zone *zone);
|
||||
|
|
|
@ -529,29 +529,20 @@ void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
|
|||
*---------------------------------------------------------------*/
|
||||
#define DM_NAME "device-mapper"
|
||||
|
||||
#define DM_RATELIMIT(pr_func, fmt, ...) \
|
||||
do { \
|
||||
static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, \
|
||||
DEFAULT_RATELIMIT_BURST); \
|
||||
\
|
||||
if (__ratelimit(&rs)) \
|
||||
pr_func(DM_FMT(fmt), ##__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n"
|
||||
|
||||
#define DMCRIT(fmt, ...) pr_crit(DM_FMT(fmt), ##__VA_ARGS__)
|
||||
|
||||
#define DMERR(fmt, ...) pr_err(DM_FMT(fmt), ##__VA_ARGS__)
|
||||
#define DMERR_LIMIT(fmt, ...) DM_RATELIMIT(pr_err, fmt, ##__VA_ARGS__)
|
||||
#define DMERR_LIMIT(fmt, ...) pr_err_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
|
||||
#define DMWARN(fmt, ...) pr_warn(DM_FMT(fmt), ##__VA_ARGS__)
|
||||
#define DMWARN_LIMIT(fmt, ...) DM_RATELIMIT(pr_warn, fmt, ##__VA_ARGS__)
|
||||
#define DMWARN_LIMIT(fmt, ...) pr_warn_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
|
||||
#define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__)
|
||||
#define DMINFO_LIMIT(fmt, ...) DM_RATELIMIT(pr_info, fmt, ##__VA_ARGS__)
|
||||
#define DMINFO_LIMIT(fmt, ...) pr_info_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
|
||||
|
||||
#ifdef CONFIG_DM_DEBUG
|
||||
#define DMDEBUG(fmt, ...) printk(KERN_DEBUG DM_FMT(fmt), ##__VA_ARGS__)
|
||||
#define DMDEBUG_LIMIT(fmt, ...) DM_RATELIMIT(pr_debug, fmt, ##__VA_ARGS__)
|
||||
#define DMDEBUG_LIMIT(fmt, ...) pr_debug_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
|
||||
#else
|
||||
#define DMDEBUG(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
|
||||
#define DMDEBUG_LIMIT(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
|
||||
|
|
Loading…
Reference in New Issue
Block a user