forked from luck/tmp_suning_uos_patched
scsi: bsg: refactor bsg_ioctl
Move all actual functionality into helpers, just leaving the dispatch in this function. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Benjamin Block <bblock@linux.ibm.com> Tested-by: Benjamin Block <bblock@linux.ibm.com> Tested-by: Avri Altman <avri.altman@wdc.com> Acked-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
parent
c6ded86a16
commit
ccf3209f00
162
block/bsg.c
162
block/bsg.c
|
@ -138,32 +138,35 @@ static const struct bsg_ops bsg_scsi_ops = {
|
||||||
.free_rq = bsg_scsi_free_rq,
|
.free_rq = bsg_scsi_free_rq,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct request *
|
static int bsg_sg_io(struct request_queue *q, fmode_t mode, void __user *uarg)
|
||||||
bsg_map_hdr(struct request_queue *q, struct sg_io_v4 *hdr, fmode_t mode)
|
|
||||||
{
|
{
|
||||||
struct request *rq, *next_rq = NULL;
|
struct request *rq, *next_rq = NULL;
|
||||||
|
struct bio *bio, *bidi_bio = NULL;
|
||||||
|
struct sg_io_v4 hdr;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (copy_from_user(&hdr, uarg, sizeof(hdr)))
|
||||||
|
return -EFAULT;
|
||||||
|
|
||||||
if (!q->bsg_dev.class_dev)
|
if (!q->bsg_dev.class_dev)
|
||||||
return ERR_PTR(-ENXIO);
|
return -ENXIO;
|
||||||
|
|
||||||
if (hdr->guard != 'Q')
|
if (hdr.guard != 'Q')
|
||||||
return ERR_PTR(-EINVAL);
|
return -EINVAL;
|
||||||
|
ret = q->bsg_dev.ops->check_proto(&hdr);
|
||||||
ret = q->bsg_dev.ops->check_proto(hdr);
|
|
||||||
if (ret)
|
if (ret)
|
||||||
return ERR_PTR(ret);
|
return ret;
|
||||||
|
|
||||||
rq = blk_get_request(q, hdr->dout_xfer_len ?
|
rq = blk_get_request(q, hdr.dout_xfer_len ?
|
||||||
REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0);
|
REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0);
|
||||||
if (IS_ERR(rq))
|
if (IS_ERR(rq))
|
||||||
return rq;
|
return PTR_ERR(rq);
|
||||||
|
|
||||||
ret = q->bsg_dev.ops->fill_hdr(rq, hdr, mode);
|
ret = q->bsg_dev.ops->fill_hdr(rq, &hdr, mode);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
rq->timeout = msecs_to_jiffies(hdr->timeout);
|
rq->timeout = msecs_to_jiffies(hdr.timeout);
|
||||||
if (!rq->timeout)
|
if (!rq->timeout)
|
||||||
rq->timeout = q->sg_timeout;
|
rq->timeout = q->sg_timeout;
|
||||||
if (!rq->timeout)
|
if (!rq->timeout)
|
||||||
|
@ -171,7 +174,7 @@ bsg_map_hdr(struct request_queue *q, struct sg_io_v4 *hdr, fmode_t mode)
|
||||||
if (rq->timeout < BLK_MIN_SG_TIMEOUT)
|
if (rq->timeout < BLK_MIN_SG_TIMEOUT)
|
||||||
rq->timeout = BLK_MIN_SG_TIMEOUT;
|
rq->timeout = BLK_MIN_SG_TIMEOUT;
|
||||||
|
|
||||||
if (hdr->dout_xfer_len && hdr->din_xfer_len) {
|
if (hdr.dout_xfer_len && hdr.din_xfer_len) {
|
||||||
if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) {
|
if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) {
|
||||||
ret = -EOPNOTSUPP;
|
ret = -EOPNOTSUPP;
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -188,23 +191,42 @@ bsg_map_hdr(struct request_queue *q, struct sg_io_v4 *hdr, fmode_t mode)
|
||||||
}
|
}
|
||||||
|
|
||||||
rq->next_rq = next_rq;
|
rq->next_rq = next_rq;
|
||||||
ret = blk_rq_map_user(q, next_rq, NULL, uptr64(hdr->din_xferp),
|
ret = blk_rq_map_user(q, next_rq, NULL, uptr64(hdr.din_xferp),
|
||||||
hdr->din_xfer_len, GFP_KERNEL);
|
hdr.din_xfer_len, GFP_KERNEL);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_free_nextrq;
|
goto out_free_nextrq;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (hdr->dout_xfer_len) {
|
if (hdr.dout_xfer_len) {
|
||||||
ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr->dout_xferp),
|
ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr.dout_xferp),
|
||||||
hdr->dout_xfer_len, GFP_KERNEL);
|
hdr.dout_xfer_len, GFP_KERNEL);
|
||||||
} else if (hdr->din_xfer_len) {
|
} else if (hdr.din_xfer_len) {
|
||||||
ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr->din_xferp),
|
ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr.din_xferp),
|
||||||
hdr->din_xfer_len, GFP_KERNEL);
|
hdr.din_xfer_len, GFP_KERNEL);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_unmap_nextrq;
|
goto out_unmap_nextrq;
|
||||||
return rq;
|
|
||||||
|
bio = rq->bio;
|
||||||
|
if (rq->next_rq)
|
||||||
|
bidi_bio = rq->next_rq->bio;
|
||||||
|
|
||||||
|
blk_execute_rq(q, NULL, rq, !(hdr.flags & BSG_FLAG_Q_AT_TAIL));
|
||||||
|
ret = rq->q->bsg_dev.ops->complete_rq(rq, &hdr);
|
||||||
|
|
||||||
|
if (rq->next_rq) {
|
||||||
|
blk_rq_unmap_user(bidi_bio);
|
||||||
|
blk_put_request(rq->next_rq);
|
||||||
|
}
|
||||||
|
|
||||||
|
blk_rq_unmap_user(bio);
|
||||||
|
rq->q->bsg_dev.ops->free_rq(rq);
|
||||||
|
blk_put_request(rq);
|
||||||
|
|
||||||
|
if (copy_to_user(uarg, &hdr, sizeof(hdr)))
|
||||||
|
return -EFAULT;
|
||||||
|
return ret;
|
||||||
|
|
||||||
out_unmap_nextrq:
|
out_unmap_nextrq:
|
||||||
if (rq->next_rq)
|
if (rq->next_rq)
|
||||||
|
@ -215,24 +237,6 @@ bsg_map_hdr(struct request_queue *q, struct sg_io_v4 *hdr, fmode_t mode)
|
||||||
out:
|
out:
|
||||||
q->bsg_dev.ops->free_rq(rq);
|
q->bsg_dev.ops->free_rq(rq);
|
||||||
blk_put_request(rq);
|
blk_put_request(rq);
|
||||||
return ERR_PTR(ret);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
|
|
||||||
struct bio *bio, struct bio *bidi_bio)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
ret = rq->q->bsg_dev.ops->complete_rq(rq, hdr);
|
|
||||||
|
|
||||||
if (rq->next_rq) {
|
|
||||||
blk_rq_unmap_user(bidi_bio);
|
|
||||||
blk_put_request(rq->next_rq);
|
|
||||||
}
|
|
||||||
|
|
||||||
blk_rq_unmap_user(bio);
|
|
||||||
rq->q->bsg_dev.ops->free_rq(rq);
|
|
||||||
blk_put_request(rq);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -367,31 +371,39 @@ static int bsg_release(struct inode *inode, struct file *file)
|
||||||
return bsg_put_device(bd);
|
return bsg_put_device(bd);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int bsg_get_command_q(struct bsg_device *bd, int __user *uarg)
|
||||||
|
{
|
||||||
|
return put_user(bd->max_queue, uarg);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int bsg_set_command_q(struct bsg_device *bd, int __user *uarg)
|
||||||
|
{
|
||||||
|
int queue;
|
||||||
|
|
||||||
|
if (get_user(queue, uarg))
|
||||||
|
return -EFAULT;
|
||||||
|
if (queue < 1)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
spin_lock_irq(&bd->lock);
|
||||||
|
bd->max_queue = queue;
|
||||||
|
spin_unlock_irq(&bd->lock);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||||
{
|
{
|
||||||
struct bsg_device *bd = file->private_data;
|
struct bsg_device *bd = file->private_data;
|
||||||
int __user *uarg = (int __user *) arg;
|
void __user *uarg = (void __user *) arg;
|
||||||
int ret;
|
|
||||||
|
|
||||||
switch (cmd) {
|
switch (cmd) {
|
||||||
/*
|
/*
|
||||||
* our own ioctls
|
* Our own ioctls
|
||||||
*/
|
*/
|
||||||
case SG_GET_COMMAND_Q:
|
case SG_GET_COMMAND_Q:
|
||||||
return put_user(bd->max_queue, uarg);
|
return bsg_get_command_q(bd, uarg);
|
||||||
case SG_SET_COMMAND_Q: {
|
case SG_SET_COMMAND_Q:
|
||||||
int queue;
|
return bsg_set_command_q(bd, uarg);
|
||||||
|
|
||||||
if (get_user(queue, uarg))
|
|
||||||
return -EFAULT;
|
|
||||||
if (queue < 1)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
spin_lock_irq(&bd->lock);
|
|
||||||
bd->max_queue = queue;
|
|
||||||
spin_unlock_irq(&bd->lock);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* SCSI/sg ioctls
|
* SCSI/sg ioctls
|
||||||
|
@ -404,36 +416,10 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||||
case SG_GET_RESERVED_SIZE:
|
case SG_GET_RESERVED_SIZE:
|
||||||
case SG_SET_RESERVED_SIZE:
|
case SG_SET_RESERVED_SIZE:
|
||||||
case SG_EMULATED_HOST:
|
case SG_EMULATED_HOST:
|
||||||
case SCSI_IOCTL_SEND_COMMAND: {
|
case SCSI_IOCTL_SEND_COMMAND:
|
||||||
void __user *uarg = (void __user *) arg;
|
|
||||||
return scsi_cmd_ioctl(bd->queue, NULL, file->f_mode, cmd, uarg);
|
return scsi_cmd_ioctl(bd->queue, NULL, file->f_mode, cmd, uarg);
|
||||||
}
|
case SG_IO:
|
||||||
case SG_IO: {
|
return bsg_sg_io(bd->queue, file->f_mode, uarg);
|
||||||
struct request *rq;
|
|
||||||
struct bio *bio, *bidi_bio = NULL;
|
|
||||||
struct sg_io_v4 hdr;
|
|
||||||
int at_head;
|
|
||||||
|
|
||||||
if (copy_from_user(&hdr, uarg, sizeof(hdr)))
|
|
||||||
return -EFAULT;
|
|
||||||
|
|
||||||
rq = bsg_map_hdr(bd->queue, &hdr, file->f_mode);
|
|
||||||
if (IS_ERR(rq))
|
|
||||||
return PTR_ERR(rq);
|
|
||||||
|
|
||||||
bio = rq->bio;
|
|
||||||
if (rq->next_rq)
|
|
||||||
bidi_bio = rq->next_rq->bio;
|
|
||||||
|
|
||||||
at_head = (0 == (hdr.flags & BSG_FLAG_Q_AT_TAIL));
|
|
||||||
blk_execute_rq(bd->queue, NULL, rq, at_head);
|
|
||||||
ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio);
|
|
||||||
|
|
||||||
if (copy_to_user(uarg, &hdr, sizeof(hdr)))
|
|
||||||
return -EFAULT;
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
default:
|
default:
|
||||||
return -ENOTTY;
|
return -ENOTTY;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue
Block a user