forked from luck/tmp_suning_uos_patched
47ec7f09bc
The cookie tracking in dmaengine expects all submissions completed in order. Some DMA devices like Intel DSA can complete submissions out of order, especially if configured with a work queue sharing multiple DMA engines. Add a status DMA_OUT_OF_ORDER that tx_status can be returned for those DMA devices. The user should use callbacks to track the completion rather than the DMA cookie. This would address the issue of dmatest complaining that descriptors are "busy" when the cookie count goes backwards due to out of order completion. Add DMA_COMPLETION_NO_ORDER DMA capability to allow the driver to flag the device's ability to complete operations out of order. Reported-by: Swathi Kovvuri <swathi.kovvuri@intel.com> Signed-off-by: Dave Jiang <dave.jiang@intel.com> Tested-by: Swathi Kovvuri <swathi.kovvuri@intel.com> Link: https://lore.kernel.org/r/158939557151.20335.12404113976045569870.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul <vkoul@kernel.org>
219 lines
5.3 KiB
C
219 lines
5.3 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
|
|
#include <linux/init.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/module.h>
|
|
#include <linux/pci.h>
|
|
#include <linux/device.h>
|
|
#include <linux/io-64-nonatomic-lo-hi.h>
|
|
#include <linux/dmaengine.h>
|
|
#include <uapi/linux/idxd.h>
|
|
#include "../dmaengine.h"
|
|
#include "registers.h"
|
|
#include "idxd.h"
|
|
|
|
static inline struct idxd_wq *to_idxd_wq(struct dma_chan *c)
|
|
{
|
|
return container_of(c, struct idxd_wq, dma_chan);
|
|
}
|
|
|
|
void idxd_dma_complete_txd(struct idxd_desc *desc,
|
|
enum idxd_complete_type comp_type)
|
|
{
|
|
struct dma_async_tx_descriptor *tx;
|
|
struct dmaengine_result res;
|
|
int complete = 1;
|
|
|
|
if (desc->completion->status == DSA_COMP_SUCCESS)
|
|
res.result = DMA_TRANS_NOERROR;
|
|
else if (desc->completion->status)
|
|
res.result = DMA_TRANS_WRITE_FAILED;
|
|
else if (comp_type == IDXD_COMPLETE_ABORT)
|
|
res.result = DMA_TRANS_ABORTED;
|
|
else
|
|
complete = 0;
|
|
|
|
tx = &desc->txd;
|
|
if (complete && tx->cookie) {
|
|
dma_cookie_complete(tx);
|
|
dma_descriptor_unmap(tx);
|
|
dmaengine_desc_get_callback_invoke(tx, &res);
|
|
tx->callback = NULL;
|
|
tx->callback_result = NULL;
|
|
}
|
|
}
|
|
|
|
static void op_flag_setup(unsigned long flags, u32 *desc_flags)
|
|
{
|
|
*desc_flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR;
|
|
if (flags & DMA_PREP_INTERRUPT)
|
|
*desc_flags |= IDXD_OP_FLAG_RCI;
|
|
}
|
|
|
|
static inline void set_completion_address(struct idxd_desc *desc,
|
|
u64 *compl_addr)
|
|
{
|
|
*compl_addr = desc->compl_dma;
|
|
}
|
|
|
|
static inline void idxd_prep_desc_common(struct idxd_wq *wq,
|
|
struct dsa_hw_desc *hw, char opcode,
|
|
u64 addr_f1, u64 addr_f2, u64 len,
|
|
u64 compl, u32 flags)
|
|
{
|
|
struct idxd_device *idxd = wq->idxd;
|
|
|
|
hw->flags = flags;
|
|
hw->opcode = opcode;
|
|
hw->src_addr = addr_f1;
|
|
hw->dst_addr = addr_f2;
|
|
hw->xfer_size = len;
|
|
hw->priv = !!(wq->type == IDXD_WQT_KERNEL);
|
|
hw->completion_addr = compl;
|
|
|
|
/*
|
|
* Descriptor completion vectors are 1-8 for MSIX. We will round
|
|
* robin through the 8 vectors.
|
|
*/
|
|
wq->vec_ptr = (wq->vec_ptr % idxd->num_wq_irqs) + 1;
|
|
hw->int_handle = wq->vec_ptr;
|
|
}
|
|
|
|
static struct dma_async_tx_descriptor *
|
|
idxd_dma_submit_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
|
|
dma_addr_t dma_src, size_t len, unsigned long flags)
|
|
{
|
|
struct idxd_wq *wq = to_idxd_wq(c);
|
|
u32 desc_flags;
|
|
struct idxd_device *idxd = wq->idxd;
|
|
struct idxd_desc *desc;
|
|
|
|
if (wq->state != IDXD_WQ_ENABLED)
|
|
return NULL;
|
|
|
|
if (len > idxd->max_xfer_bytes)
|
|
return NULL;
|
|
|
|
op_flag_setup(flags, &desc_flags);
|
|
desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK);
|
|
if (IS_ERR(desc))
|
|
return NULL;
|
|
|
|
idxd_prep_desc_common(wq, desc->hw, DSA_OPCODE_MEMMOVE,
|
|
dma_src, dma_dest, len, desc->compl_dma,
|
|
desc_flags);
|
|
|
|
desc->txd.flags = flags;
|
|
|
|
return &desc->txd;
|
|
}
|
|
|
|
static int idxd_dma_alloc_chan_resources(struct dma_chan *chan)
|
|
{
|
|
struct idxd_wq *wq = to_idxd_wq(chan);
|
|
struct device *dev = &wq->idxd->pdev->dev;
|
|
|
|
idxd_wq_get(wq);
|
|
dev_dbg(dev, "%s: client_count: %d\n", __func__,
|
|
idxd_wq_refcount(wq));
|
|
return 0;
|
|
}
|
|
|
|
static void idxd_dma_free_chan_resources(struct dma_chan *chan)
|
|
{
|
|
struct idxd_wq *wq = to_idxd_wq(chan);
|
|
struct device *dev = &wq->idxd->pdev->dev;
|
|
|
|
idxd_wq_put(wq);
|
|
dev_dbg(dev, "%s: client_count: %d\n", __func__,
|
|
idxd_wq_refcount(wq));
|
|
}
|
|
|
|
static enum dma_status idxd_dma_tx_status(struct dma_chan *dma_chan,
|
|
dma_cookie_t cookie,
|
|
struct dma_tx_state *txstate)
|
|
{
|
|
return DMA_OUT_OF_ORDER;
|
|
}
|
|
|
|
/*
|
|
* issue_pending() does not need to do anything since tx_submit() does the job
|
|
* already.
|
|
*/
|
|
static void idxd_dma_issue_pending(struct dma_chan *dma_chan)
|
|
{
|
|
}
|
|
|
|
dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
|
|
{
|
|
struct dma_chan *c = tx->chan;
|
|
struct idxd_wq *wq = to_idxd_wq(c);
|
|
dma_cookie_t cookie;
|
|
int rc;
|
|
struct idxd_desc *desc = container_of(tx, struct idxd_desc, txd);
|
|
|
|
cookie = dma_cookie_assign(tx);
|
|
|
|
rc = idxd_submit_desc(wq, desc);
|
|
if (rc < 0) {
|
|
idxd_free_desc(wq, desc);
|
|
return rc;
|
|
}
|
|
|
|
return cookie;
|
|
}
|
|
|
|
static void idxd_dma_release(struct dma_device *device)
|
|
{
|
|
}
|
|
|
|
int idxd_register_dma_device(struct idxd_device *idxd)
|
|
{
|
|
struct dma_device *dma = &idxd->dma_dev;
|
|
|
|
INIT_LIST_HEAD(&dma->channels);
|
|
dma->dev = &idxd->pdev->dev;
|
|
|
|
dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask);
|
|
dma->device_release = idxd_dma_release;
|
|
|
|
if (idxd->hw.opcap.bits[0] & IDXD_OPCAP_MEMMOVE) {
|
|
dma_cap_set(DMA_MEMCPY, dma->cap_mask);
|
|
dma->device_prep_dma_memcpy = idxd_dma_submit_memcpy;
|
|
}
|
|
|
|
dma->device_tx_status = idxd_dma_tx_status;
|
|
dma->device_issue_pending = idxd_dma_issue_pending;
|
|
dma->device_alloc_chan_resources = idxd_dma_alloc_chan_resources;
|
|
dma->device_free_chan_resources = idxd_dma_free_chan_resources;
|
|
|
|
return dma_async_device_register(&idxd->dma_dev);
|
|
}
|
|
|
|
void idxd_unregister_dma_device(struct idxd_device *idxd)
|
|
{
|
|
dma_async_device_unregister(&idxd->dma_dev);
|
|
}
|
|
|
|
int idxd_register_dma_channel(struct idxd_wq *wq)
|
|
{
|
|
struct idxd_device *idxd = wq->idxd;
|
|
struct dma_device *dma = &idxd->dma_dev;
|
|
struct dma_chan *chan = &wq->dma_chan;
|
|
int rc;
|
|
|
|
memset(&wq->dma_chan, 0, sizeof(struct dma_chan));
|
|
chan->device = dma;
|
|
list_add_tail(&chan->device_node, &dma->channels);
|
|
rc = dma_async_device_channel_register(dma, chan);
|
|
if (rc < 0)
|
|
return rc;
|
|
|
|
return 0;
|
|
}
|
|
|
|
void idxd_unregister_dma_channel(struct idxd_wq *wq)
|
|
{
|
|
dma_async_device_channel_unregister(&wq->idxd->dma_dev, &wq->dma_chan);
|
|
}
|