forked from luck/tmp_suning_uos_patched
libnvdimm: rework region badblocks clearing
Toshi noticed that the new support for a region-level badblocks missed the case where errors are cleared due to BTT I/O. An initial attempt to fix this ran into a "sleeping while atomic" warning due to taking the nvdimm_bus_lock() in the BTT I/O path to satisfy the locking requirements of __nvdimm_bus_badblocks_clear(). However, that lock is not needed since we are not acting on any data that is subject to change under that lock. The badblocks instance has its own internal lock to handle mutations of the error list. So, in order to make it clear that we are just acting on region devices, rename __nvdimm_bus_badblocks_clear() to nvdimm_clear_badblocks_regions(). Eliminate the lock and consolidate all support routines for the new nvdimm_account_cleared_poison() in drivers/nvdimm/bus.c. Finally, to the opportunity to cleanup to some unnecessary casts, make the calling convention of nvdimm_clear_badblocks_regions() clearer by replacing struct resource with the minimal struct clear_badblocks_context, and use the DEVICE_ATTR macro. Cc: Dave Jiang <dave.jiang@intel.com> Cc: Vishal Verma <vishal.l.verma@intel.com> Reported-by: Toshi Kani <toshi.kani@hpe.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
parent
7699a6a36b
commit
23f4984483
|
@ -172,6 +172,57 @@ void nvdimm_region_notify(struct nd_region *nd_region, enum nvdimm_event event)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(nvdimm_region_notify);
|
||||
|
||||
struct clear_badblocks_context {
|
||||
resource_size_t phys, cleared;
|
||||
};
|
||||
|
||||
static int nvdimm_clear_badblocks_region(struct device *dev, void *data)
|
||||
{
|
||||
struct clear_badblocks_context *ctx = data;
|
||||
struct nd_region *nd_region;
|
||||
resource_size_t ndr_end;
|
||||
sector_t sector;
|
||||
|
||||
/* make sure device is a region */
|
||||
if (!is_nd_pmem(dev))
|
||||
return 0;
|
||||
|
||||
nd_region = to_nd_region(dev);
|
||||
ndr_end = nd_region->ndr_start + nd_region->ndr_size - 1;
|
||||
|
||||
/* make sure we are in the region */
|
||||
if (ctx->phys < nd_region->ndr_start
|
||||
|| (ctx->phys + ctx->cleared) > ndr_end)
|
||||
return 0;
|
||||
|
||||
sector = (ctx->phys - nd_region->ndr_start) / 512;
|
||||
badblocks_clear(&nd_region->bb, sector, ctx->cleared / 512);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void nvdimm_clear_badblocks_regions(struct nvdimm_bus *nvdimm_bus,
|
||||
phys_addr_t phys, u64 cleared)
|
||||
{
|
||||
struct clear_badblocks_context ctx = {
|
||||
.phys = phys,
|
||||
.cleared = cleared,
|
||||
};
|
||||
|
||||
device_for_each_child(&nvdimm_bus->dev, &ctx,
|
||||
nvdimm_clear_badblocks_region);
|
||||
}
|
||||
|
||||
static void nvdimm_account_cleared_poison(struct nvdimm_bus *nvdimm_bus,
|
||||
phys_addr_t phys, u64 cleared)
|
||||
{
|
||||
if (cleared > 0)
|
||||
nvdimm_forget_poison(nvdimm_bus, phys, cleared);
|
||||
|
||||
if (cleared > 0 && cleared / 512)
|
||||
nvdimm_clear_badblocks_regions(nvdimm_bus, phys, cleared);
|
||||
}
|
||||
|
||||
long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
|
||||
unsigned int len)
|
||||
{
|
||||
|
@ -219,22 +270,12 @@ long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
|
|||
if (cmd_rc < 0)
|
||||
return cmd_rc;
|
||||
|
||||
if (clear_err.cleared > 0)
|
||||
nvdimm_forget_poison(nvdimm_bus, phys, clear_err.cleared);
|
||||
nvdimm_account_cleared_poison(nvdimm_bus, phys, clear_err.cleared);
|
||||
|
||||
return clear_err.cleared;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvdimm_clear_poison);
|
||||
|
||||
void __nvdimm_bus_badblocks_clear(struct nvdimm_bus *nvdimm_bus,
|
||||
struct resource *res)
|
||||
{
|
||||
lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
|
||||
device_for_each_child(&nvdimm_bus->dev, (void *)res,
|
||||
nvdimm_region_badblocks_clear);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__nvdimm_bus_badblocks_clear);
|
||||
|
||||
static int nvdimm_bus_match(struct device *dev, struct device_driver *drv);
|
||||
|
||||
static struct bus_type nvdimm_bus_type = {
|
||||
|
@ -989,18 +1030,9 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
|
|||
|
||||
if (!nvdimm && cmd == ND_CMD_CLEAR_ERROR && cmd_rc >= 0) {
|
||||
struct nd_cmd_clear_error *clear_err = buf;
|
||||
struct resource res;
|
||||
|
||||
if (clear_err->cleared) {
|
||||
/* clearing the poison list we keep track of */
|
||||
nvdimm_forget_poison(nvdimm_bus, clear_err->address,
|
||||
clear_err->cleared);
|
||||
|
||||
/* now sync the badblocks lists */
|
||||
res.start = clear_err->address;
|
||||
res.end = clear_err->address + clear_err->cleared - 1;
|
||||
__nvdimm_bus_badblocks_clear(nvdimm_bus, &res);
|
||||
}
|
||||
nvdimm_account_cleared_poison(nvdimm_bus, clear_err->address,
|
||||
clear_err->cleared);
|
||||
}
|
||||
nvdimm_bus_unlock(&nvdimm_bus->dev);
|
||||
|
||||
|
|
|
@ -131,31 +131,6 @@ static void nd_region_notify(struct device *dev, enum nvdimm_event event)
|
|||
device_for_each_child(dev, &event, child_notify);
|
||||
}
|
||||
|
||||
int nvdimm_region_badblocks_clear(struct device *dev, void *data)
|
||||
{
|
||||
struct resource *res = (struct resource *)data;
|
||||
struct nd_region *nd_region;
|
||||
resource_size_t ndr_end;
|
||||
sector_t sector;
|
||||
|
||||
/* make sure device is a region */
|
||||
if (!is_nd_pmem(dev))
|
||||
return 0;
|
||||
|
||||
nd_region = to_nd_region(dev);
|
||||
ndr_end = nd_region->ndr_start + nd_region->ndr_size - 1;
|
||||
|
||||
/* make sure we are in the region */
|
||||
if (res->start < nd_region->ndr_start || res->end > ndr_end)
|
||||
return 0;
|
||||
|
||||
sector = (res->start - nd_region->ndr_start) >> 9;
|
||||
badblocks_clear(&nd_region->bb, sector, resource_size(res) >> 9);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvdimm_region_badblocks_clear);
|
||||
|
||||
static struct nd_device_driver nd_region_driver = {
|
||||
.probe = nd_region_probe,
|
||||
.remove = nd_region_remove,
|
||||
|
|
|
@ -477,20 +477,15 @@ static ssize_t read_only_store(struct device *dev,
|
|||
}
|
||||
static DEVICE_ATTR_RW(read_only);
|
||||
|
||||
static ssize_t nd_badblocks_show(struct device *dev,
|
||||
static ssize_t region_badblocks_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct nd_region *nd_region = to_nd_region(dev);
|
||||
|
||||
return badblocks_show(&nd_region->bb, buf, 0);
|
||||
}
|
||||
static struct device_attribute dev_attr_nd_badblocks = {
|
||||
.attr = {
|
||||
.name = "badblocks",
|
||||
.mode = S_IRUGO
|
||||
},
|
||||
.show = nd_badblocks_show,
|
||||
};
|
||||
|
||||
static DEVICE_ATTR(badblocks, 0444, region_badblocks_show, NULL);
|
||||
|
||||
static ssize_t resource_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
|
@ -514,7 +509,7 @@ static struct attribute *nd_region_attributes[] = {
|
|||
&dev_attr_available_size.attr,
|
||||
&dev_attr_namespace_seed.attr,
|
||||
&dev_attr_init_namespaces.attr,
|
||||
&dev_attr_nd_badblocks.attr,
|
||||
&dev_attr_badblocks.attr,
|
||||
&dev_attr_resource.attr,
|
||||
NULL,
|
||||
};
|
||||
|
@ -532,7 +527,7 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
|
|||
if (!is_nd_pmem(dev) && a == &dev_attr_dax_seed.attr)
|
||||
return 0;
|
||||
|
||||
if (!is_nd_pmem(dev) && a == &dev_attr_nd_badblocks.attr)
|
||||
if (!is_nd_pmem(dev) && a == &dev_attr_badblocks.attr)
|
||||
return 0;
|
||||
|
||||
if (!is_nd_pmem(dev) && a == &dev_attr_resource.attr)
|
||||
|
|
|
@ -162,7 +162,4 @@ void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane);
|
|||
u64 nd_fletcher64(void *addr, size_t len, bool le);
|
||||
void nvdimm_flush(struct nd_region *nd_region);
|
||||
int nvdimm_has_flush(struct nd_region *nd_region);
|
||||
int nvdimm_region_badblocks_clear(struct device *dev, void *data);
|
||||
void __nvdimm_bus_badblocks_clear(struct nvdimm_bus *nvdimm_bus,
|
||||
struct resource *res);
|
||||
#endif /* __LIBNVDIMM_H__ */
|
||||
|
|
Loading…
Reference in New Issue
Block a user