forked from luck/tmp_suning_uos_patched
lightnvm: remove hybrid ocssd 1.2 support
Now that rrpc have been removed. Also remove the hybrid 1.2 support from the core. Signed-off-by: Matias Bjørling <m@bjorling.me> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
26f76dce60
commit
e3e13bcc14
|
@ -45,12 +45,6 @@ struct nvm_dev_map {
|
|||
int nr_chnls;
|
||||
};
|
||||
|
||||
struct nvm_area {
|
||||
struct list_head list;
|
||||
sector_t begin;
|
||||
sector_t end; /* end is excluded */
|
||||
};
|
||||
|
||||
static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name)
|
||||
{
|
||||
struct nvm_target *tgt;
|
||||
|
@ -524,35 +518,6 @@ static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
|
|||
nvm_ppa_dev_to_tgt(tgt_dev, rqd->ppa_list, rqd->nr_ppas);
|
||||
}
|
||||
|
||||
void nvm_part_to_tgt(struct nvm_dev *dev, sector_t *entries,
|
||||
int len)
|
||||
{
|
||||
struct nvm_geo *geo = &dev->geo;
|
||||
struct nvm_dev_map *dev_rmap = dev->rmap;
|
||||
u64 i;
|
||||
|
||||
for (i = 0; i < len; i++) {
|
||||
struct nvm_ch_map *ch_rmap;
|
||||
int *lun_roffs;
|
||||
struct ppa_addr gaddr;
|
||||
u64 pba = le64_to_cpu(entries[i]);
|
||||
u64 diff;
|
||||
|
||||
if (!pba)
|
||||
continue;
|
||||
|
||||
gaddr = linear_to_generic_addr(geo, pba);
|
||||
ch_rmap = &dev_rmap->chnls[gaddr.g.ch];
|
||||
lun_roffs = ch_rmap->lun_offs;
|
||||
|
||||
diff = ((ch_rmap->ch_off * geo->luns_per_chnl) +
|
||||
(lun_roffs[gaddr.g.lun])) * geo->sec_per_lun;
|
||||
|
||||
entries[i] -= cpu_to_le64(diff);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(nvm_part_to_tgt);
|
||||
|
||||
int nvm_register_tgt_type(struct nvm_tgt_type *tt)
|
||||
{
|
||||
int ret = 0;
|
||||
|
@ -726,112 +691,6 @@ int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
|
|||
}
|
||||
EXPORT_SYMBOL(nvm_submit_io_sync);
|
||||
|
||||
int nvm_erase_sync(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
|
||||
int nr_ppas)
|
||||
{
|
||||
struct nvm_geo *geo = &tgt_dev->geo;
|
||||
struct nvm_rq rqd;
|
||||
int ret;
|
||||
|
||||
memset(&rqd, 0, sizeof(struct nvm_rq));
|
||||
|
||||
rqd.opcode = NVM_OP_ERASE;
|
||||
rqd.flags = geo->plane_mode >> 1;
|
||||
|
||||
ret = nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvm_submit_io_sync(tgt_dev, &rqd);
|
||||
if (ret) {
|
||||
pr_err("rrpr: erase I/O submission failed: %d\n", ret);
|
||||
goto free_ppa_list;
|
||||
}
|
||||
|
||||
free_ppa_list:
|
||||
nvm_free_rqd_ppalist(tgt_dev, &rqd);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(nvm_erase_sync);
|
||||
|
||||
int nvm_get_l2p_tbl(struct nvm_tgt_dev *tgt_dev, u64 slba, u32 nlb,
|
||||
nvm_l2p_update_fn *update_l2p, void *priv)
|
||||
{
|
||||
struct nvm_dev *dev = tgt_dev->parent;
|
||||
|
||||
if (!dev->ops->get_l2p_tbl)
|
||||
return 0;
|
||||
|
||||
return dev->ops->get_l2p_tbl(dev, slba, nlb, update_l2p, priv);
|
||||
}
|
||||
EXPORT_SYMBOL(nvm_get_l2p_tbl);
|
||||
|
||||
int nvm_get_area(struct nvm_tgt_dev *tgt_dev, sector_t *lba, sector_t len)
|
||||
{
|
||||
struct nvm_dev *dev = tgt_dev->parent;
|
||||
struct nvm_geo *geo = &dev->geo;
|
||||
struct nvm_area *area, *prev, *next;
|
||||
sector_t begin = 0;
|
||||
sector_t max_sectors = (geo->sec_size * dev->total_secs) >> 9;
|
||||
|
||||
if (len > max_sectors)
|
||||
return -EINVAL;
|
||||
|
||||
area = kmalloc(sizeof(struct nvm_area), GFP_KERNEL);
|
||||
if (!area)
|
||||
return -ENOMEM;
|
||||
|
||||
prev = NULL;
|
||||
|
||||
spin_lock(&dev->lock);
|
||||
list_for_each_entry(next, &dev->area_list, list) {
|
||||
if (begin + len > next->begin) {
|
||||
begin = next->end;
|
||||
prev = next;
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if ((begin + len) > max_sectors) {
|
||||
spin_unlock(&dev->lock);
|
||||
kfree(area);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
area->begin = *lba = begin;
|
||||
area->end = begin + len;
|
||||
|
||||
if (prev) /* insert into sorted order */
|
||||
list_add(&area->list, &prev->list);
|
||||
else
|
||||
list_add(&area->list, &dev->area_list);
|
||||
spin_unlock(&dev->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(nvm_get_area);
|
||||
|
||||
void nvm_put_area(struct nvm_tgt_dev *tgt_dev, sector_t begin)
|
||||
{
|
||||
struct nvm_dev *dev = tgt_dev->parent;
|
||||
struct nvm_area *area;
|
||||
|
||||
spin_lock(&dev->lock);
|
||||
list_for_each_entry(area, &dev->area_list, list) {
|
||||
if (area->begin != begin)
|
||||
continue;
|
||||
|
||||
list_del(&area->list);
|
||||
spin_unlock(&dev->lock);
|
||||
kfree(area);
|
||||
return;
|
||||
}
|
||||
spin_unlock(&dev->lock);
|
||||
}
|
||||
EXPORT_SYMBOL(nvm_put_area);
|
||||
|
||||
void nvm_end_io(struct nvm_rq *rqd)
|
||||
{
|
||||
struct nvm_tgt_dev *tgt_dev = rqd->dev;
|
||||
|
|
|
@ -31,27 +31,10 @@
|
|||
|
||||
enum nvme_nvm_admin_opcode {
|
||||
nvme_nvm_admin_identity = 0xe2,
|
||||
nvme_nvm_admin_get_l2p_tbl = 0xea,
|
||||
nvme_nvm_admin_get_bb_tbl = 0xf2,
|
||||
nvme_nvm_admin_set_bb_tbl = 0xf1,
|
||||
};
|
||||
|
||||
struct nvme_nvm_hb_rw {
|
||||
__u8 opcode;
|
||||
__u8 flags;
|
||||
__u16 command_id;
|
||||
__le32 nsid;
|
||||
__u64 rsvd2;
|
||||
__le64 metadata;
|
||||
__le64 prp1;
|
||||
__le64 prp2;
|
||||
__le64 spba;
|
||||
__le16 length;
|
||||
__le16 control;
|
||||
__le32 dsmgmt;
|
||||
__le64 slba;
|
||||
};
|
||||
|
||||
struct nvme_nvm_ph_rw {
|
||||
__u8 opcode;
|
||||
__u8 flags;
|
||||
|
@ -80,19 +63,6 @@ struct nvme_nvm_identity {
|
|||
__u32 rsvd11[5];
|
||||
};
|
||||
|
||||
struct nvme_nvm_l2ptbl {
|
||||
__u8 opcode;
|
||||
__u8 flags;
|
||||
__u16 command_id;
|
||||
__le32 nsid;
|
||||
__le32 cdw2[4];
|
||||
__le64 prp1;
|
||||
__le64 prp2;
|
||||
__le64 slba;
|
||||
__le32 nlb;
|
||||
__le16 cdw14[6];
|
||||
};
|
||||
|
||||
struct nvme_nvm_getbbtbl {
|
||||
__u8 opcode;
|
||||
__u8 flags;
|
||||
|
@ -139,9 +109,7 @@ struct nvme_nvm_command {
|
|||
union {
|
||||
struct nvme_common_command common;
|
||||
struct nvme_nvm_identity identity;
|
||||
struct nvme_nvm_hb_rw hb_rw;
|
||||
struct nvme_nvm_ph_rw ph_rw;
|
||||
struct nvme_nvm_l2ptbl l2p;
|
||||
struct nvme_nvm_getbbtbl get_bb;
|
||||
struct nvme_nvm_setbbtbl set_bb;
|
||||
struct nvme_nvm_erase_blk erase;
|
||||
|
@ -234,11 +202,9 @@ struct nvme_nvm_bb_tbl {
|
|||
static inline void _nvme_nvm_check_size(void)
|
||||
{
|
||||
BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64);
|
||||
BUILD_BUG_ON(sizeof(struct nvme_nvm_hb_rw) != 64);
|
||||
BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64);
|
||||
BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64);
|
||||
BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64);
|
||||
BUILD_BUG_ON(sizeof(struct nvme_nvm_l2ptbl) != 64);
|
||||
BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64);
|
||||
BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960);
|
||||
BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 16);
|
||||
|
@ -332,62 +298,6 @@ static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb,
|
||||
nvm_l2p_update_fn *update_l2p, void *priv)
|
||||
{
|
||||
struct nvme_ns *ns = nvmdev->q->queuedata;
|
||||
struct nvme_nvm_command c = {};
|
||||
u32 len = queue_max_hw_sectors(ns->ctrl->admin_q) << 9;
|
||||
u32 nlb_pr_rq = len / sizeof(u64);
|
||||
u64 cmd_slba = slba;
|
||||
void *entries;
|
||||
int ret = 0;
|
||||
|
||||
c.l2p.opcode = nvme_nvm_admin_get_l2p_tbl;
|
||||
c.l2p.nsid = cpu_to_le32(ns->head->ns_id);
|
||||
entries = kmalloc(len, GFP_KERNEL);
|
||||
if (!entries)
|
||||
return -ENOMEM;
|
||||
|
||||
while (nlb) {
|
||||
u32 cmd_nlb = min(nlb_pr_rq, nlb);
|
||||
u64 elba = slba + cmd_nlb;
|
||||
|
||||
c.l2p.slba = cpu_to_le64(cmd_slba);
|
||||
c.l2p.nlb = cpu_to_le32(cmd_nlb);
|
||||
|
||||
ret = nvme_submit_sync_cmd(ns->ctrl->admin_q,
|
||||
(struct nvme_command *)&c, entries, len);
|
||||
if (ret) {
|
||||
dev_err(ns->ctrl->device,
|
||||
"L2P table transfer failed (%d)\n", ret);
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (unlikely(elba > nvmdev->total_secs)) {
|
||||
pr_err("nvm: L2P data from device is out of bounds!\n");
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Transform physical address to target address space */
|
||||
nvm_part_to_tgt(nvmdev, entries, cmd_nlb);
|
||||
|
||||
if (update_l2p(cmd_slba, cmd_nlb, entries, priv)) {
|
||||
ret = -EINTR;
|
||||
goto out;
|
||||
}
|
||||
|
||||
cmd_slba += cmd_nlb;
|
||||
nlb -= cmd_nlb;
|
||||
}
|
||||
|
||||
out:
|
||||
kfree(entries);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
|
||||
u8 *blks)
|
||||
{
|
||||
|
@ -474,10 +384,6 @@ static inline void nvme_nvm_rqtocmd(struct nvm_rq *rqd, struct nvme_ns *ns,
|
|||
c->ph_rw.metadata = cpu_to_le64(rqd->dma_meta_list);
|
||||
c->ph_rw.control = cpu_to_le16(rqd->flags);
|
||||
c->ph_rw.length = cpu_to_le16(rqd->nr_ppas - 1);
|
||||
|
||||
if (rqd->opcode == NVM_OP_HBWRITE || rqd->opcode == NVM_OP_HBREAD)
|
||||
c->hb_rw.slba = cpu_to_le64(nvme_block_nr(ns,
|
||||
rqd->bio->bi_iter.bi_sector));
|
||||
}
|
||||
|
||||
static void nvme_nvm_end_io(struct request *rq, blk_status_t status)
|
||||
|
@ -597,8 +503,6 @@ static void nvme_nvm_dev_dma_free(void *pool, void *addr,
|
|||
static struct nvm_dev_ops nvme_nvm_dev_ops = {
|
||||
.identity = nvme_nvm_identity,
|
||||
|
||||
.get_l2p_tbl = nvme_nvm_get_l2p_tbl,
|
||||
|
||||
.get_bb_tbl = nvme_nvm_get_bb_tbl,
|
||||
.set_bb_tbl = nvme_nvm_set_bb_tbl,
|
||||
|
||||
|
|
|
@ -50,10 +50,7 @@ struct nvm_id;
|
|||
struct nvm_dev;
|
||||
struct nvm_tgt_dev;
|
||||
|
||||
typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
|
||||
typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *);
|
||||
typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
|
||||
nvm_l2p_update_fn *, void *);
|
||||
typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *);
|
||||
typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int);
|
||||
typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
|
||||
|
@ -66,7 +63,6 @@ typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
|
|||
|
||||
struct nvm_dev_ops {
|
||||
nvm_id_fn *identity;
|
||||
nvm_get_l2p_tbl_fn *get_l2p_tbl;
|
||||
nvm_op_bb_tbl_fn *get_bb_tbl;
|
||||
nvm_op_set_bb_fn *set_bb_tbl;
|
||||
|
||||
|
@ -112,8 +108,6 @@ enum {
|
|||
NVM_RSP_WARN_HIGHECC = 0x4700,
|
||||
|
||||
/* Device opcodes */
|
||||
NVM_OP_HBREAD = 0x02,
|
||||
NVM_OP_HBWRITE = 0x81,
|
||||
NVM_OP_PWRITE = 0x91,
|
||||
NVM_OP_PREAD = 0x92,
|
||||
NVM_OP_ERASE = 0x90,
|
||||
|
@ -346,36 +340,6 @@ struct nvm_dev {
|
|||
struct list_head targets;
|
||||
};
|
||||
|
||||
static inline struct ppa_addr linear_to_generic_addr(struct nvm_geo *geo,
|
||||
u64 pba)
|
||||
{
|
||||
struct ppa_addr l;
|
||||
int secs, pgs, blks, luns;
|
||||
sector_t ppa = pba;
|
||||
|
||||
l.ppa = 0;
|
||||
|
||||
div_u64_rem(ppa, geo->sec_per_pg, &secs);
|
||||
l.g.sec = secs;
|
||||
|
||||
sector_div(ppa, geo->sec_per_pg);
|
||||
div_u64_rem(ppa, geo->pgs_per_blk, &pgs);
|
||||
l.g.pg = pgs;
|
||||
|
||||
sector_div(ppa, geo->pgs_per_blk);
|
||||
div_u64_rem(ppa, geo->blks_per_lun, &blks);
|
||||
l.g.blk = blks;
|
||||
|
||||
sector_div(ppa, geo->blks_per_lun);
|
||||
div_u64_rem(ppa, geo->luns_per_chnl, &luns);
|
||||
l.g.lun = luns;
|
||||
|
||||
sector_div(ppa, geo->luns_per_chnl);
|
||||
l.g.ch = ppa;
|
||||
|
||||
return l;
|
||||
}
|
||||
|
||||
static inline struct ppa_addr generic_to_dev_addr(struct nvm_tgt_dev *tgt_dev,
|
||||
struct ppa_addr r)
|
||||
{
|
||||
|
@ -462,17 +426,10 @@ extern int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr *,
|
|||
extern int nvm_max_phys_sects(struct nvm_tgt_dev *);
|
||||
extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *);
|
||||
extern int nvm_submit_io_sync(struct nvm_tgt_dev *, struct nvm_rq *);
|
||||
extern int nvm_erase_sync(struct nvm_tgt_dev *, struct ppa_addr *, int);
|
||||
extern int nvm_get_l2p_tbl(struct nvm_tgt_dev *, u64, u32, nvm_l2p_update_fn *,
|
||||
void *);
|
||||
extern int nvm_get_area(struct nvm_tgt_dev *, sector_t *, sector_t);
|
||||
extern void nvm_put_area(struct nvm_tgt_dev *, sector_t);
|
||||
extern void nvm_end_io(struct nvm_rq *);
|
||||
extern int nvm_bb_tbl_fold(struct nvm_dev *, u8 *, int);
|
||||
extern int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr, u8 *);
|
||||
|
||||
extern void nvm_part_to_tgt(struct nvm_dev *, sector_t *, int);
|
||||
|
||||
#else /* CONFIG_NVM */
|
||||
struct nvm_dev_ops;
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user