kernel_optimize_test/drivers/spi/spi-s3c64xx.c

1648 lines
43 KiB
C
Raw Normal View History

/*
* Copyright (C) 2009 Samsung Electronics Ltd.
* Jaswinder Singh <jassi.brar@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
#include <linux/gpio.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/platform_data/spi-s3c64xx.h>
#ifdef CONFIG_S3C_DMA
#include <mach/dma.h>
#endif
#define MAX_SPI_PORTS 3
#define S3C64XX_SPI_QUIRK_POLL (1 << 0)
/* Registers and bit-fields */
#define S3C64XX_SPI_CH_CFG 0x00
#define S3C64XX_SPI_CLK_CFG 0x04
#define S3C64XX_SPI_MODE_CFG 0x08
#define S3C64XX_SPI_SLAVE_SEL 0x0C
#define S3C64XX_SPI_INT_EN 0x10
#define S3C64XX_SPI_STATUS 0x14
#define S3C64XX_SPI_TX_DATA 0x18
#define S3C64XX_SPI_RX_DATA 0x1C
#define S3C64XX_SPI_PACKET_CNT 0x20
#define S3C64XX_SPI_PENDING_CLR 0x24
#define S3C64XX_SPI_SWAP_CFG 0x28
#define S3C64XX_SPI_FB_CLK 0x2C
#define S3C64XX_SPI_CH_HS_EN (1<<6) /* High Speed Enable */
#define S3C64XX_SPI_CH_SW_RST (1<<5)
#define S3C64XX_SPI_CH_SLAVE (1<<4)
#define S3C64XX_SPI_CPOL_L (1<<3)
#define S3C64XX_SPI_CPHA_B (1<<2)
#define S3C64XX_SPI_CH_RXCH_ON (1<<1)
#define S3C64XX_SPI_CH_TXCH_ON (1<<0)
#define S3C64XX_SPI_CLKSEL_SRCMSK (3<<9)
#define S3C64XX_SPI_CLKSEL_SRCSHFT 9
#define S3C64XX_SPI_ENCLK_ENABLE (1<<8)
#define S3C64XX_SPI_PSR_MASK 0xff
#define S3C64XX_SPI_MODE_CH_TSZ_BYTE (0<<29)
#define S3C64XX_SPI_MODE_CH_TSZ_HALFWORD (1<<29)
#define S3C64XX_SPI_MODE_CH_TSZ_WORD (2<<29)
#define S3C64XX_SPI_MODE_CH_TSZ_MASK (3<<29)
#define S3C64XX_SPI_MODE_BUS_TSZ_BYTE (0<<17)
#define S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD (1<<17)
#define S3C64XX_SPI_MODE_BUS_TSZ_WORD (2<<17)
#define S3C64XX_SPI_MODE_BUS_TSZ_MASK (3<<17)
#define S3C64XX_SPI_MODE_RXDMA_ON (1<<2)
#define S3C64XX_SPI_MODE_TXDMA_ON (1<<1)
#define S3C64XX_SPI_MODE_4BURST (1<<0)
#define S3C64XX_SPI_SLAVE_AUTO (1<<1)
#define S3C64XX_SPI_SLAVE_SIG_INACT (1<<0)
#define S3C64XX_SPI_INT_TRAILING_EN (1<<6)
#define S3C64XX_SPI_INT_RX_OVERRUN_EN (1<<5)
#define S3C64XX_SPI_INT_RX_UNDERRUN_EN (1<<4)
#define S3C64XX_SPI_INT_TX_OVERRUN_EN (1<<3)
#define S3C64XX_SPI_INT_TX_UNDERRUN_EN (1<<2)
#define S3C64XX_SPI_INT_RX_FIFORDY_EN (1<<1)
#define S3C64XX_SPI_INT_TX_FIFORDY_EN (1<<0)
#define S3C64XX_SPI_ST_RX_OVERRUN_ERR (1<<5)
#define S3C64XX_SPI_ST_RX_UNDERRUN_ERR (1<<4)
#define S3C64XX_SPI_ST_TX_OVERRUN_ERR (1<<3)
#define S3C64XX_SPI_ST_TX_UNDERRUN_ERR (1<<2)
#define S3C64XX_SPI_ST_RX_FIFORDY (1<<1)
#define S3C64XX_SPI_ST_TX_FIFORDY (1<<0)
#define S3C64XX_SPI_PACKET_CNT_EN (1<<16)
#define S3C64XX_SPI_PND_TX_UNDERRUN_CLR (1<<4)
#define S3C64XX_SPI_PND_TX_OVERRUN_CLR (1<<3)
#define S3C64XX_SPI_PND_RX_UNDERRUN_CLR (1<<2)
#define S3C64XX_SPI_PND_RX_OVERRUN_CLR (1<<1)
#define S3C64XX_SPI_PND_TRAILING_CLR (1<<0)
#define S3C64XX_SPI_SWAP_RX_HALF_WORD (1<<7)
#define S3C64XX_SPI_SWAP_RX_BYTE (1<<6)
#define S3C64XX_SPI_SWAP_RX_BIT (1<<5)
#define S3C64XX_SPI_SWAP_RX_EN (1<<4)
#define S3C64XX_SPI_SWAP_TX_HALF_WORD (1<<3)
#define S3C64XX_SPI_SWAP_TX_BYTE (1<<2)
#define S3C64XX_SPI_SWAP_TX_BIT (1<<1)
#define S3C64XX_SPI_SWAP_TX_EN (1<<0)
#define S3C64XX_SPI_FBCLK_MSK (3<<0)
#define FIFO_LVL_MASK(i) ((i)->port_conf->fifo_lvl_mask[i->port_id])
#define S3C64XX_SPI_ST_TX_DONE(v, i) (((v) & \
(1 << (i)->port_conf->tx_st_done)) ? 1 : 0)
#define TX_FIFO_LVL(v, i) (((v) >> 6) & FIFO_LVL_MASK(i))
#define RX_FIFO_LVL(v, i) (((v) >> (i)->port_conf->rx_lvl_offset) & \
FIFO_LVL_MASK(i))
#define S3C64XX_SPI_MAX_TRAILCNT 0x3ff
#define S3C64XX_SPI_TRAILCNT_OFF 19
#define S3C64XX_SPI_TRAILCNT S3C64XX_SPI_MAX_TRAILCNT
#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
#define is_polling(x) (x->port_conf->quirks & S3C64XX_SPI_QUIRK_POLL)
#define RXBUSY (1<<2)
#define TXBUSY (1<<3)
struct s3c64xx_spi_dma_data {
struct dma_chan *ch;
enum dma_transfer_direction direction;
unsigned int dmach;
};
/**
* struct s3c64xx_spi_info - SPI Controller hardware info
* @fifo_lvl_mask: Bit-mask for {TX|RX}_FIFO_LVL bits in SPI_STATUS register.
* @rx_lvl_offset: Bit offset of RX_FIFO_LVL bits in SPI_STATUS regiter.
* @tx_st_done: Bit offset of TX_DONE bit in SPI_STATUS regiter.
* @high_speed: True, if the controller supports HIGH_SPEED_EN bit.
* @clk_from_cmu: True, if the controller does not include a clock mux and
* prescaler unit.
*
* The Samsung s3c64xx SPI controller are used on various Samsung SoC's but
* differ in some aspects such as the size of the fifo and spi bus clock
* setup. Such differences are specified to the driver using this structure
* which is provided as driver data to the driver.
*/
struct s3c64xx_spi_port_config {
int fifo_lvl_mask[MAX_SPI_PORTS];
int rx_lvl_offset;
int tx_st_done;
int quirks;
bool high_speed;
bool clk_from_cmu;
};
/**
* struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver.
* @clk: Pointer to the spi clock.
* @src_clk: Pointer to the clock used to generate SPI signals.
* @master: Pointer to the SPI Protocol master.
* @cntrlr_info: Platform specific data for the controller this driver manages.
* @tgl_spi: Pointer to the last CS left untoggled by the cs_change hint.
* @lock: Controller specific lock.
* @state: Set of FLAGS to indicate status.
* @rx_dmach: Controller's DMA channel for Rx.
* @tx_dmach: Controller's DMA channel for Tx.
* @sfr_start: BUS address of SPI controller regs.
* @regs: Pointer to ioremap'ed controller registers.
* @irq: interrupt
* @xfer_completion: To indicate completion of xfer task.
* @cur_mode: Stores the active configuration of the controller.
* @cur_bpw: Stores the active bits per word settings.
* @cur_speed: Stores the active xfer clock speed.
*/
struct s3c64xx_spi_driver_data {
void __iomem *regs;
struct clk *clk;
struct clk *src_clk;
struct platform_device *pdev;
struct spi_master *master;
struct s3c64xx_spi_info *cntrlr_info;
struct spi_device *tgl_spi;
spinlock_t lock;
unsigned long sfr_start;
struct completion xfer_completion;
unsigned state;
unsigned cur_mode, cur_bpw;
unsigned cur_speed;
struct s3c64xx_spi_dma_data rx_dma;
struct s3c64xx_spi_dma_data tx_dma;
#ifdef CONFIG_S3C_DMA
struct samsung_dma_ops *ops;
#endif
struct s3c64xx_spi_port_config *port_conf;
unsigned int port_id;
bool cs_gpio;
};
static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
{
void __iomem *regs = sdd->regs;
unsigned long loops;
u32 val;
writel(0, regs + S3C64XX_SPI_PACKET_CNT);
val = readl(regs + S3C64XX_SPI_CH_CFG);
val &= ~(S3C64XX_SPI_CH_RXCH_ON | S3C64XX_SPI_CH_TXCH_ON);
writel(val, regs + S3C64XX_SPI_CH_CFG);
val = readl(regs + S3C64XX_SPI_CH_CFG);
val |= S3C64XX_SPI_CH_SW_RST;
val &= ~S3C64XX_SPI_CH_HS_EN;
writel(val, regs + S3C64XX_SPI_CH_CFG);
/* Flush TxFIFO*/
loops = msecs_to_loops(1);
do {
val = readl(regs + S3C64XX_SPI_STATUS);
} while (TX_FIFO_LVL(val, sdd) && loops--);
if (loops == 0)
dev_warn(&sdd->pdev->dev, "Timed out flushing TX FIFO\n");
/* Flush RxFIFO*/
loops = msecs_to_loops(1);
do {
val = readl(regs + S3C64XX_SPI_STATUS);
if (RX_FIFO_LVL(val, sdd))
readl(regs + S3C64XX_SPI_RX_DATA);
else
break;
} while (loops--);
if (loops == 0)
dev_warn(&sdd->pdev->dev, "Timed out flushing RX FIFO\n");
val = readl(regs + S3C64XX_SPI_CH_CFG);
val &= ~S3C64XX_SPI_CH_SW_RST;
writel(val, regs + S3C64XX_SPI_CH_CFG);
val = readl(regs + S3C64XX_SPI_MODE_CFG);
val &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
writel(val, regs + S3C64XX_SPI_MODE_CFG);
}
static void s3c64xx_spi_dmacb(void *data)
{
struct s3c64xx_spi_driver_data *sdd;
struct s3c64xx_spi_dma_data *dma = data;
unsigned long flags;
if (dma->direction == DMA_DEV_TO_MEM)
sdd = container_of(data,
struct s3c64xx_spi_driver_data, rx_dma);
else
sdd = container_of(data,
struct s3c64xx_spi_driver_data, tx_dma);
spin_lock_irqsave(&sdd->lock, flags);
if (dma->direction == DMA_DEV_TO_MEM) {
sdd->state &= ~RXBUSY;
if (!(sdd->state & TXBUSY))
complete(&sdd->xfer_completion);
} else {
sdd->state &= ~TXBUSY;
if (!(sdd->state & RXBUSY))
complete(&sdd->xfer_completion);
}
spin_unlock_irqrestore(&sdd->lock, flags);
}
#ifdef CONFIG_S3C_DMA
/* FIXME: remove this section once arch/arm/mach-s3c64xx uses dmaengine */
static struct s3c2410_dma_client s3c64xx_spi_dma_client = {
.name = "samsung-spi-dma",
};
static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
unsigned len, dma_addr_t buf)
{
struct s3c64xx_spi_driver_data *sdd;
struct samsung_dma_prep info;
struct samsung_dma_config config;
if (dma->direction == DMA_DEV_TO_MEM) {
sdd = container_of((void *)dma,
struct s3c64xx_spi_driver_data, rx_dma);
config.direction = sdd->rx_dma.direction;
config.fifo = sdd->sfr_start + S3C64XX_SPI_RX_DATA;
config.width = sdd->cur_bpw / 8;
sdd->ops->config((enum dma_ch)sdd->rx_dma.ch, &config);
} else {
sdd = container_of((void *)dma,
struct s3c64xx_spi_driver_data, tx_dma);
config.direction = sdd->tx_dma.direction;
config.fifo = sdd->sfr_start + S3C64XX_SPI_TX_DATA;
config.width = sdd->cur_bpw / 8;
sdd->ops->config((enum dma_ch)sdd->tx_dma.ch, &config);
}
info.cap = DMA_SLAVE;
info.len = len;
info.fp = s3c64xx_spi_dmacb;
info.fp_param = dma;
info.direction = dma->direction;
info.buf = buf;
sdd->ops->prepare((enum dma_ch)dma->ch, &info);
sdd->ops->trigger((enum dma_ch)dma->ch);
}
static int acquire_dma(struct s3c64xx_spi_driver_data *sdd)
{
struct samsung_dma_req req;
struct device *dev = &sdd->pdev->dev;
sdd->ops = samsung_dma_get_ops();
req.cap = DMA_SLAVE;
req.client = &s3c64xx_spi_dma_client;
sdd->rx_dma.ch = (struct dma_chan *)(unsigned long)sdd->ops->request(
sdd->rx_dma.dmach, &req, dev, "rx");
sdd->tx_dma.ch = (struct dma_chan *)(unsigned long)sdd->ops->request(
sdd->tx_dma.dmach, &req, dev, "tx");
return 1;
}
static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
{
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
/*
* If DMA resource was not available during
* probe, no need to continue with dma requests
* else Acquire DMA channels
*/
while (!is_polling(sdd) && !acquire_dma(sdd))
usleep_range(10000, 11000);
return 0;
}
static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
{
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
/* Free DMA channels */
if (!is_polling(sdd)) {
sdd->ops->release((enum dma_ch)sdd->rx_dma.ch,
&s3c64xx_spi_dma_client);
sdd->ops->release((enum dma_ch)sdd->tx_dma.ch,
&s3c64xx_spi_dma_client);
}
return 0;
}
static void s3c64xx_spi_dma_stop(struct s3c64xx_spi_driver_data *sdd,
struct s3c64xx_spi_dma_data *dma)
{
sdd->ops->stop((enum dma_ch)dma->ch);
}
#else
static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
unsigned len, dma_addr_t buf)
{
struct s3c64xx_spi_driver_data *sdd;
struct dma_slave_config config;
struct dma_async_tx_descriptor *desc;
memset(&config, 0, sizeof(config));
if (dma->direction == DMA_DEV_TO_MEM) {
sdd = container_of((void *)dma,
struct s3c64xx_spi_driver_data, rx_dma);
config.direction = dma->direction;
config.src_addr = sdd->sfr_start + S3C64XX_SPI_RX_DATA;
config.src_addr_width = sdd->cur_bpw / 8;
config.src_maxburst = 1;
dmaengine_slave_config(dma->ch, &config);
} else {
sdd = container_of((void *)dma,
struct s3c64xx_spi_driver_data, tx_dma);
config.direction = dma->direction;
config.dst_addr = sdd->sfr_start + S3C64XX_SPI_TX_DATA;
config.dst_addr_width = sdd->cur_bpw / 8;
config.dst_maxburst = 1;
dmaengine_slave_config(dma->ch, &config);
}
desc = dmaengine_prep_slave_single(dma->ch, buf, len,
dma->direction, DMA_PREP_INTERRUPT);
desc->callback = s3c64xx_spi_dmacb;
desc->callback_param = dma;
dmaengine_submit(desc);
dma_async_issue_pending(dma->ch);
}
static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
{
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
dma_filter_fn filter = sdd->cntrlr_info->filter;
struct device *dev = &sdd->pdev->dev;
dma_cap_mask_t mask;
int ret;
if (!is_polling(sdd)) {
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
/* Acquire DMA channels */
sdd->rx_dma.ch = dma_request_slave_channel_compat(mask, filter,
(void *)sdd->rx_dma.dmach, dev, "rx");
if (!sdd->rx_dma.ch) {
dev_err(dev, "Failed to get RX DMA channel\n");
ret = -EBUSY;
goto out;
}
sdd->tx_dma.ch = dma_request_slave_channel_compat(mask, filter,
(void *)sdd->tx_dma.dmach, dev, "tx");
if (!sdd->tx_dma.ch) {
dev_err(dev, "Failed to get TX DMA channel\n");
ret = -EBUSY;
goto out_rx;
}
}
ret = pm_runtime_get_sync(&sdd->pdev->dev);
if (ret < 0) {
dev_err(dev, "Failed to enable device: %d\n", ret);
goto out_tx;
}
return 0;
out_tx:
dma_release_channel(sdd->tx_dma.ch);
out_rx:
dma_release_channel(sdd->rx_dma.ch);
out:
return ret;
}
static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
{
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
/* Free DMA channels */
if (!is_polling(sdd)) {
dma_release_channel(sdd->rx_dma.ch);
dma_release_channel(sdd->tx_dma.ch);
}
pm_runtime_put(&sdd->pdev->dev);
return 0;
}
static void s3c64xx_spi_dma_stop(struct s3c64xx_spi_driver_data *sdd,
struct s3c64xx_spi_dma_data *dma)
{
dmaengine_terminate_all(dma->ch);
}
#endif
static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
struct spi_device *spi,
struct spi_transfer *xfer, int dma_mode)
{
void __iomem *regs = sdd->regs;
u32 modecfg, chcfg;
modecfg = readl(regs + S3C64XX_SPI_MODE_CFG);
modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
chcfg = readl(regs + S3C64XX_SPI_CH_CFG);
chcfg &= ~S3C64XX_SPI_CH_TXCH_ON;
if (dma_mode) {
chcfg &= ~S3C64XX_SPI_CH_RXCH_ON;
} else {
/* Always shift in data in FIFO, even if xfer is Tx only,
* this helps setting PCKT_CNT value for generating clocks
* as exactly needed.
*/
chcfg |= S3C64XX_SPI_CH_RXCH_ON;
writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
| S3C64XX_SPI_PACKET_CNT_EN,
regs + S3C64XX_SPI_PACKET_CNT);
}
if (xfer->tx_buf != NULL) {
sdd->state |= TXBUSY;
chcfg |= S3C64XX_SPI_CH_TXCH_ON;
if (dma_mode) {
modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
prepare_dma(&sdd->tx_dma, xfer->len, xfer->tx_dma);
} else {
switch (sdd->cur_bpw) {
case 32:
iowrite32_rep(regs + S3C64XX_SPI_TX_DATA,
xfer->tx_buf, xfer->len / 4);
break;
case 16:
iowrite16_rep(regs + S3C64XX_SPI_TX_DATA,
xfer->tx_buf, xfer->len / 2);
break;
default:
iowrite8_rep(regs + S3C64XX_SPI_TX_DATA,
xfer->tx_buf, xfer->len);
break;
}
}
}
if (xfer->rx_buf != NULL) {
sdd->state |= RXBUSY;
if (sdd->port_conf->high_speed && sdd->cur_speed >= 30000000UL
&& !(sdd->cur_mode & SPI_CPHA))
chcfg |= S3C64XX_SPI_CH_HS_EN;
if (dma_mode) {
modecfg |= S3C64XX_SPI_MODE_RXDMA_ON;
chcfg |= S3C64XX_SPI_CH_RXCH_ON;
writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
| S3C64XX_SPI_PACKET_CNT_EN,
regs + S3C64XX_SPI_PACKET_CNT);
prepare_dma(&sdd->rx_dma, xfer->len, xfer->rx_dma);
}
}
writel(modecfg, regs + S3C64XX_SPI_MODE_CFG);
writel(chcfg, regs + S3C64XX_SPI_CH_CFG);
}
static inline void enable_cs(struct s3c64xx_spi_driver_data *sdd,
struct spi_device *spi)
{
if (sdd->tgl_spi != NULL) { /* If last device toggled after mssg */
if (sdd->tgl_spi != spi) { /* if last mssg on diff device */
/* Deselect the last toggled device */
if (spi->cs_gpio >= 0)
gpio_set_value(spi->cs_gpio,
spi->mode & SPI_CS_HIGH ? 0 : 1);
}
sdd->tgl_spi = NULL;
}
if (spi->cs_gpio >= 0)
gpio_set_value(spi->cs_gpio, spi->mode & SPI_CS_HIGH ? 1 : 0);
}
static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
int timeout_ms)
{
void __iomem *regs = sdd->regs;
unsigned long val = 1;
u32 status;
/* max fifo depth available */
u32 max_fifo = (FIFO_LVL_MASK(sdd) >> 1) + 1;
if (timeout_ms)
val = msecs_to_loops(timeout_ms);
do {
status = readl(regs + S3C64XX_SPI_STATUS);
} while (RX_FIFO_LVL(status, sdd) < max_fifo && --val);
/* return the actual received data length */
return RX_FIFO_LVL(status, sdd);
}
static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd,
struct spi_transfer *xfer, int dma_mode)
{
void __iomem *regs = sdd->regs;
unsigned long val;
int ms;
/* millisecs to xfer 'len' bytes @ 'cur_speed' */
ms = xfer->len * 8 * 1000 / sdd->cur_speed;
ms += 10; /* some tolerance */
if (dma_mode) {
val = msecs_to_jiffies(ms) + 10;
val = wait_for_completion_timeout(&sdd->xfer_completion, val);
} else {
u32 status;
val = msecs_to_loops(ms);
do {
status = readl(regs + S3C64XX_SPI_STATUS);
} while (RX_FIFO_LVL(status, sdd) < xfer->len && --val);
}
if (dma_mode) {
u32 status;
/*
* If the previous xfer was completed within timeout, then
* proceed further else return -EIO.
* DmaTx returns after simply writing data in the FIFO,
* w/o waiting for real transmission on the bus to finish.
* DmaRx returns only after Dma read data from FIFO which
* needs bus transmission to finish, so we don't worry if
* Xfer involved Rx(with or without Tx).
*/
if (val && !xfer->rx_buf) {
val = msecs_to_loops(10);
status = readl(regs + S3C64XX_SPI_STATUS);
while ((TX_FIFO_LVL(status, sdd)
|| !S3C64XX_SPI_ST_TX_DONE(status, sdd))
&& --val) {
cpu_relax();
status = readl(regs + S3C64XX_SPI_STATUS);
}
}
/* If timed out while checking rx/tx status return error */
if (!val)
return -EIO;
} else {
int loops;
u32 cpy_len;
u8 *buf;
/* If it was only Tx */
if (!xfer->rx_buf) {
sdd->state &= ~TXBUSY;
return 0;
}
/*
* If the receive length is bigger than the controller fifo
* size, calculate the loops and read the fifo as many times.
* loops = length / max fifo size (calculated by using the
* fifo mask).
* For any size less than the fifo size the below code is
* executed atleast once.
*/
loops = xfer->len / ((FIFO_LVL_MASK(sdd) >> 1) + 1);
buf = xfer->rx_buf;
do {
/* wait for data to be received in the fifo */
cpy_len = s3c64xx_spi_wait_for_timeout(sdd,
(loops ? ms : 0));
switch (sdd->cur_bpw) {
case 32:
ioread32_rep(regs + S3C64XX_SPI_RX_DATA,
buf, cpy_len / 4);
break;
case 16:
ioread16_rep(regs + S3C64XX_SPI_RX_DATA,
buf, cpy_len / 2);
break;
default:
ioread8_rep(regs + S3C64XX_SPI_RX_DATA,
buf, cpy_len);
break;
}
buf = buf + cpy_len;
} while (loops--);
sdd->state &= ~RXBUSY;
}
return 0;
}
static inline void disable_cs(struct s3c64xx_spi_driver_data *sdd,
struct spi_device *spi)
{
if (sdd->tgl_spi == spi)
sdd->tgl_spi = NULL;
if (spi->cs_gpio >= 0)
gpio_set_value(spi->cs_gpio, spi->mode & SPI_CS_HIGH ? 0 : 1);
}
static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
{
void __iomem *regs = sdd->regs;
u32 val;
/* Disable Clock */
if (sdd->port_conf->clk_from_cmu) {
clk_disable_unprepare(sdd->src_clk);
} else {
val = readl(regs + S3C64XX_SPI_CLK_CFG);
val &= ~S3C64XX_SPI_ENCLK_ENABLE;
writel(val, regs + S3C64XX_SPI_CLK_CFG);
}
/* Set Polarity and Phase */
val = readl(regs + S3C64XX_SPI_CH_CFG);
val &= ~(S3C64XX_SPI_CH_SLAVE |
S3C64XX_SPI_CPOL_L |
S3C64XX_SPI_CPHA_B);
if (sdd->cur_mode & SPI_CPOL)
val |= S3C64XX_SPI_CPOL_L;
if (sdd->cur_mode & SPI_CPHA)
val |= S3C64XX_SPI_CPHA_B;
writel(val, regs + S3C64XX_SPI_CH_CFG);
/* Set Channel & DMA Mode */
val = readl(regs + S3C64XX_SPI_MODE_CFG);
val &= ~(S3C64XX_SPI_MODE_BUS_TSZ_MASK
| S3C64XX_SPI_MODE_CH_TSZ_MASK);
switch (sdd->cur_bpw) {
case 32:
val |= S3C64XX_SPI_MODE_BUS_TSZ_WORD;
val |= S3C64XX_SPI_MODE_CH_TSZ_WORD;
break;
case 16:
val |= S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD;
val |= S3C64XX_SPI_MODE_CH_TSZ_HALFWORD;
break;
default:
val |= S3C64XX_SPI_MODE_BUS_TSZ_BYTE;
val |= S3C64XX_SPI_MODE_CH_TSZ_BYTE;
break;
}
writel(val, regs + S3C64XX_SPI_MODE_CFG);
if (sdd->port_conf->clk_from_cmu) {
/* Configure Clock */
/* There is half-multiplier before the SPI */
clk_set_rate(sdd->src_clk, sdd->cur_speed * 2);
/* Enable Clock */
clk_prepare_enable(sdd->src_clk);
} else {
/* Configure Clock */
val = readl(regs + S3C64XX_SPI_CLK_CFG);
val &= ~S3C64XX_SPI_PSR_MASK;
val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / 2 - 1)
& S3C64XX_SPI_PSR_MASK);
writel(val, regs + S3C64XX_SPI_CLK_CFG);
/* Enable Clock */
val = readl(regs + S3C64XX_SPI_CLK_CFG);
val |= S3C64XX_SPI_ENCLK_ENABLE;
writel(val, regs + S3C64XX_SPI_CLK_CFG);
}
}
#define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd,
struct spi_message *msg)
{
struct device *dev = &sdd->pdev->dev;
struct spi_transfer *xfer;
if (is_polling(sdd) || msg->is_dma_mapped)
return 0;
/* First mark all xfer unmapped */
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
xfer->rx_dma = XFER_DMAADDR_INVALID;
xfer->tx_dma = XFER_DMAADDR_INVALID;
}
/* Map until end or first fail */
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
if (xfer->len <= ((FIFO_LVL_MASK(sdd) >> 1) + 1))
continue;
if (xfer->tx_buf != NULL) {
xfer->tx_dma = dma_map_single(dev,
(void *)xfer->tx_buf, xfer->len,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, xfer->tx_dma)) {
dev_err(dev, "dma_map_single Tx failed\n");
xfer->tx_dma = XFER_DMAADDR_INVALID;
return -ENOMEM;
}
}
if (xfer->rx_buf != NULL) {
xfer->rx_dma = dma_map_single(dev, xfer->rx_buf,
xfer->len, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, xfer->rx_dma)) {
dev_err(dev, "dma_map_single Rx failed\n");
dma_unmap_single(dev, xfer->tx_dma,
xfer->len, DMA_TO_DEVICE);
xfer->tx_dma = XFER_DMAADDR_INVALID;
xfer->rx_dma = XFER_DMAADDR_INVALID;
return -ENOMEM;
}
}
}
return 0;
}
static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd,
struct spi_message *msg)
{
struct device *dev = &sdd->pdev->dev;
struct spi_transfer *xfer;
if (is_polling(sdd) || msg->is_dma_mapped)
return;
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
if (xfer->len <= ((FIFO_LVL_MASK(sdd) >> 1) + 1))
continue;
if (xfer->rx_buf != NULL
&& xfer->rx_dma != XFER_DMAADDR_INVALID)
dma_unmap_single(dev, xfer->rx_dma,
xfer->len, DMA_FROM_DEVICE);
if (xfer->tx_buf != NULL
&& xfer->tx_dma != XFER_DMAADDR_INVALID)
dma_unmap_single(dev, xfer->tx_dma,
xfer->len, DMA_TO_DEVICE);
}
}
static int s3c64xx_spi_prepare_message(struct spi_master *master,
struct spi_message *msg)
{
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
struct spi_device *spi = msg->spi;
struct s3c64xx_spi_csinfo *cs = spi->controller_data;
/* If Master's(controller) state differs from that needed by Slave */
if (sdd->cur_speed != spi->max_speed_hz
|| sdd->cur_mode != spi->mode
|| sdd->cur_bpw != spi->bits_per_word) {
sdd->cur_bpw = spi->bits_per_word;
sdd->cur_speed = spi->max_speed_hz;
sdd->cur_mode = spi->mode;
s3c64xx_spi_config(sdd);
}
/* Map all the transfers if needed */
if (s3c64xx_spi_map_mssg(sdd, msg)) {
dev_err(&spi->dev,
"Xfer: Unable to map message buffers!\n");
return -ENOMEM;
}
/* Configure feedback delay */
writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK);
return 0;
}
static int s3c64xx_spi_transfer_one(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
int status;
u32 speed;
u8 bpw;
unsigned long flags;
int use_dma;
reinit_completion(&sdd->xfer_completion);
/* Only BPW and Speed may change across transfers */
bpw = xfer->bits_per_word;
speed = xfer->speed_hz ? : spi->max_speed_hz;
if (xfer->len % (bpw / 8)) {
dev_err(&spi->dev,
"Xfer length(%u) not a multiple of word size(%u)\n",
xfer->len, bpw / 8);
return -EIO;
}
if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) {
sdd->cur_bpw = bpw;
sdd->cur_speed = speed;
s3c64xx_spi_config(sdd);
}
/* Polling method for xfers not bigger than FIFO capacity */
use_dma = 0;
if (!is_polling(sdd) &&
(sdd->rx_dma.ch && sdd->tx_dma.ch &&
(xfer->len > ((FIFO_LVL_MASK(sdd) >> 1) + 1))))
use_dma = 1;
spin_lock_irqsave(&sdd->lock, flags);
/* Pending only which is to be done */
sdd->state &= ~RXBUSY;
sdd->state &= ~TXBUSY;
enable_datapath(sdd, spi, xfer, use_dma);
/* Start the signals */
writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
/* Start the signals */
writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
spin_unlock_irqrestore(&sdd->lock, flags);
status = wait_for_xfer(sdd, xfer, use_dma);
if (status) {
dev_err(&spi->dev, "I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n",
xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0,
(sdd->state & RXBUSY) ? 'f' : 'p',
(sdd->state & TXBUSY) ? 'f' : 'p',
xfer->len);
if (use_dma) {
if (xfer->tx_buf != NULL
&& (sdd->state & TXBUSY))
s3c64xx_spi_dma_stop(sdd, &sdd->tx_dma);
if (xfer->rx_buf != NULL
&& (sdd->state & RXBUSY))
s3c64xx_spi_dma_stop(sdd, &sdd->rx_dma);
}
} else {
flush_fifo(sdd);
}
return status;
}
static int s3c64xx_spi_unprepare_message(struct spi_master *master,
struct spi_message *msg)
{
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
s3c64xx_spi_unmap_mssg(sdd, msg);
return 0;
}
static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata(
struct spi_device *spi)
{
struct s3c64xx_spi_csinfo *cs;
struct device_node *slave_np, *data_np = NULL;
struct s3c64xx_spi_driver_data *sdd;
u32 fb_delay = 0;
sdd = spi_master_get_devdata(spi->master);
slave_np = spi->dev.of_node;
if (!slave_np) {
dev_err(&spi->dev, "device node not found\n");
return ERR_PTR(-EINVAL);
}
data_np = of_get_child_by_name(slave_np, "controller-data");
if (!data_np) {
dev_err(&spi->dev, "child node 'controller-data' not found\n");
return ERR_PTR(-EINVAL);
}
cs = kzalloc(sizeof(*cs), GFP_KERNEL);
if (!cs) {
dev_err(&spi->dev, "could not allocate memory for controller data\n");
of_node_put(data_np);
return ERR_PTR(-ENOMEM);
}
/* The CS line is asserted/deasserted by the gpio pin */
if (sdd->cs_gpio)
cs->line = of_get_named_gpio(data_np, "cs-gpio", 0);
if (!gpio_is_valid(cs->line)) {
dev_err(&spi->dev, "chip select gpio is not specified or invalid\n");
kfree(cs);
of_node_put(data_np);
return ERR_PTR(-EINVAL);
}
of_property_read_u32(data_np, "samsung,spi-feedback-delay", &fb_delay);
cs->fb_delay = fb_delay;
of_node_put(data_np);
return cs;
}
/*
* Here we only check the validity of requested configuration
* and save the configuration in a local data-structure.
* The controller is actually configured only just before we
* get a message to transfer.
*/
static int s3c64xx_spi_setup(struct spi_device *spi)
{
struct s3c64xx_spi_csinfo *cs = spi->controller_data;
struct s3c64xx_spi_driver_data *sdd;
struct s3c64xx_spi_info *sci;
int err;
sdd = spi_master_get_devdata(spi->master);
if (!cs && spi->dev.of_node) {
cs = s3c64xx_get_slave_ctrldata(spi);
spi->controller_data = cs;
}
if (IS_ERR_OR_NULL(cs)) {
dev_err(&spi->dev, "No CS for SPI(%d)\n", spi->chip_select);
return -ENODEV;
}
if (!spi_get_ctldata(spi)) {
/* Request gpio only if cs line is asserted by gpio pins */
if (sdd->cs_gpio) {
err = gpio_request_one(cs->line, GPIOF_OUT_INIT_HIGH,
dev_name(&spi->dev));
if (err) {
dev_err(&spi->dev,
"Failed to get /CS gpio [%d]: %d\n",
cs->line, err);
goto err_gpio_req;
}
spi->cs_gpio = cs->line;
}
spi_set_ctldata(spi, cs);
}
sci = sdd->cntrlr_info;
pm_runtime_get_sync(&sdd->pdev->dev);
/* Check if we can provide the requested rate */
if (!sdd->port_conf->clk_from_cmu) {
u32 psr, speed;
/* Max possible */
speed = clk_get_rate(sdd->src_clk) / 2 / (0 + 1);
if (spi->max_speed_hz > speed)
spi->max_speed_hz = speed;
psr = clk_get_rate(sdd->src_clk) / 2 / spi->max_speed_hz - 1;
psr &= S3C64XX_SPI_PSR_MASK;
if (psr == S3C64XX_SPI_PSR_MASK)
psr--;
speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1);
if (spi->max_speed_hz < speed) {
if (psr+1 < S3C64XX_SPI_PSR_MASK) {
psr++;
} else {
err = -EINVAL;
goto setup_exit;
}
}
speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1);
if (spi->max_speed_hz >= speed) {
spi->max_speed_hz = speed;
} else {
dev_err(&spi->dev, "Can't set %dHz transfer speed\n",
spi->max_speed_hz);
err = -EINVAL;
goto setup_exit;
}
}
pm_runtime_put(&sdd->pdev->dev);
writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
disable_cs(sdd, spi);
return 0;
setup_exit:
pm_runtime_put(&sdd->pdev->dev);
/* setup() returns with device de-selected */
writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
disable_cs(sdd, spi);
gpio_free(cs->line);
spi_set_ctldata(spi, NULL);
err_gpio_req:
spi/s3c64xx: Don't free controller_data on non-dt platforms When s3c64xx-spi is instantiated from device tree an instance of struct s3c64xx_spi_csinfo is dynamically allocated in the driver. For non-dt platform it is passed from board code through spi_register_board_info(). On error path in s3c64xx_spi_setup() function there is an attempt to free this data struct s3c64xx_spi_csinfo object as it would have been allocated in the driver for both, dt and non-dt based platforms. This leads to following bug when gpio request fails: spi spi1.0: Failed to get /CS gpio [21]: -16 kernel BUG at mm/slub.c:3478! Internal error: Oops - BUG: 0 [#1] PREEMPT SMP ARM Modules linked in: CPU: 0 Not tainted (3.6.0-rc5-00092-g9b0b493-dirty #6111) PC is at kfree+0x148/0x158 LR is at s3c64xx_spi_setup+0xac/0x290 pc : [<c00a513c>] lr : [<c0227014>] psr: 40000013 sp : ee043e10 ip : c032883c fp : c0481f7c r10: ee0abd80 r9 : 00000063 r8 : 00000000 r7 : ee129e78 r6 : ee104a00 r5 : fffffff0 r4 : c047bc64 r3 : 40000400 r2 : c047bc64 r1 : c04def60 r0 : 0004047b Flags: nZcv IRQs on FIQs on Mode SVC_32 ISA ARM Segment kernel Control: 10c5387d Table: 4000404a DAC: 00000015 Process swapper/0 (pid: 1, stack limit = 0xee0422f0) Stack: (0xee043e10 to 0xee044000) ... [<c00a513c>] (kfree+0x148/0x158) from [<c0227014>] (s3c64xx_spi_setup+0xac/0x290) [<c0227014>] (s3c64xx_spi_setup+0xac/0x290) from [<c02251a4>] (spi_setup+0x34/0x4c) [<c02251a4>] (spi_setup+0x34/0x4c) from [<c02258d0>] (spi_add_device+0x98/0x128) [<c02258d0>] (spi_add_device+0x98/0x128) from [<c02259d4>] (spi_new_device+0x74/0xa8) [<c02259d4>] (spi_new_device+0x74/0xa8) from [<c0225a2c>] (spi_match_master_to_boardinfo+0x24/0x44) [<c0225a2c>] (spi_match_master_to_boardinfo+0x24/0x44) from [<c0225b40>] (spi_register_master+0xf4/0x2a8) [<c0225b40>] (spi_register_master+0xf4/0x2a8) from [<c043fe0c>] (s3c64xx_spi_probe+0x34c/0x42c) [<c043fe0c>] (s3c64xx_spi_probe+0x34c/0x42c) from [<c01fc198>] (platform_drv_probe+0x18/0x1c) There should be no attempt to kfree controller_data when it was externally provided through the board code. Fix this by freeing controller_data only when dev->of_node is not null. Signed-off-by: Sylwester Nawrocki <s.nawrocki@samsung.com> Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com> Acked-by: Kukjin Kim <kgene.kim@samsung.com> Signed-off-by: Mark Brown <broonie@opensource.wolfsonmicro.com>
2012-09-13 22:31:30 +08:00
if (spi->dev.of_node)
kfree(cs);
return err;
}
static void s3c64xx_spi_cleanup(struct spi_device *spi)
{
struct s3c64xx_spi_csinfo *cs = spi_get_ctldata(spi);
struct s3c64xx_spi_driver_data *sdd;
sdd = spi_master_get_devdata(spi->master);
if (spi->cs_gpio) {
gpio_free(spi->cs_gpio);
if (spi->dev.of_node)
kfree(cs);
}
spi_set_ctldata(spi, NULL);
}
static irqreturn_t s3c64xx_spi_irq(int irq, void *data)
{
struct s3c64xx_spi_driver_data *sdd = data;
struct spi_master *spi = sdd->master;
unsigned int val, clr = 0;
val = readl(sdd->regs + S3C64XX_SPI_STATUS);
if (val & S3C64XX_SPI_ST_RX_OVERRUN_ERR) {
clr = S3C64XX_SPI_PND_RX_OVERRUN_CLR;
dev_err(&spi->dev, "RX overrun\n");
}
if (val & S3C64XX_SPI_ST_RX_UNDERRUN_ERR) {
clr |= S3C64XX_SPI_PND_RX_UNDERRUN_CLR;
dev_err(&spi->dev, "RX underrun\n");
}
if (val & S3C64XX_SPI_ST_TX_OVERRUN_ERR) {
clr |= S3C64XX_SPI_PND_TX_OVERRUN_CLR;
dev_err(&spi->dev, "TX overrun\n");
}
if (val & S3C64XX_SPI_ST_TX_UNDERRUN_ERR) {
clr |= S3C64XX_SPI_PND_TX_UNDERRUN_CLR;
dev_err(&spi->dev, "TX underrun\n");
}
/* Clear the pending irq by setting and then clearing it */
writel(clr, sdd->regs + S3C64XX_SPI_PENDING_CLR);
writel(0, sdd->regs + S3C64XX_SPI_PENDING_CLR);
return IRQ_HANDLED;
}
static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel)
{
struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
void __iomem *regs = sdd->regs;
unsigned int val;
sdd->cur_speed = 0;
writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
/* Disable Interrupts - we use Polling if not DMA mode */
writel(0, regs + S3C64XX_SPI_INT_EN);
if (!sdd->port_conf->clk_from_cmu)
writel(sci->src_clk_nr << S3C64XX_SPI_CLKSEL_SRCSHFT,
regs + S3C64XX_SPI_CLK_CFG);
writel(0, regs + S3C64XX_SPI_MODE_CFG);
writel(0, regs + S3C64XX_SPI_PACKET_CNT);
/* Clear any irq pending bits, should set and clear the bits */
val = S3C64XX_SPI_PND_RX_OVERRUN_CLR |
S3C64XX_SPI_PND_RX_UNDERRUN_CLR |
S3C64XX_SPI_PND_TX_OVERRUN_CLR |
S3C64XX_SPI_PND_TX_UNDERRUN_CLR;
writel(val, regs + S3C64XX_SPI_PENDING_CLR);
writel(0, regs + S3C64XX_SPI_PENDING_CLR);
writel(0, regs + S3C64XX_SPI_SWAP_CFG);
val = readl(regs + S3C64XX_SPI_MODE_CFG);
val &= ~S3C64XX_SPI_MODE_4BURST;
val &= ~(S3C64XX_SPI_MAX_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
val |= (S3C64XX_SPI_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
writel(val, regs + S3C64XX_SPI_MODE_CFG);
flush_fifo(sdd);
}
#ifdef CONFIG_OF
static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
{
struct s3c64xx_spi_info *sci;
u32 temp;
sci = devm_kzalloc(dev, sizeof(*sci), GFP_KERNEL);
if (!sci) {
dev_err(dev, "memory allocation for spi_info failed\n");
return ERR_PTR(-ENOMEM);
}
if (of_property_read_u32(dev->of_node, "samsung,spi-src-clk", &temp)) {
dev_warn(dev, "spi bus clock parent not specified, using clock at index 0 as parent\n");
sci->src_clk_nr = 0;
} else {
sci->src_clk_nr = temp;
}
if (of_property_read_u32(dev->of_node, "num-cs", &temp)) {
dev_warn(dev, "number of chip select lines not specified, assuming 1 chip select line\n");
sci->num_cs = 1;
} else {
sci->num_cs = temp;
}
return sci;
}
#else
static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
{
return dev_get_platdata(dev);
}
#endif
static const struct of_device_id s3c64xx_spi_dt_match[];
static inline struct s3c64xx_spi_port_config *s3c64xx_spi_get_port_config(
struct platform_device *pdev)
{
#ifdef CONFIG_OF
if (pdev->dev.of_node) {
const struct of_device_id *match;
match = of_match_node(s3c64xx_spi_dt_match, pdev->dev.of_node);
return (struct s3c64xx_spi_port_config *)match->data;
}
#endif
return (struct s3c64xx_spi_port_config *)
platform_get_device_id(pdev)->driver_data;
}
static int s3c64xx_spi_probe(struct platform_device *pdev)
{
struct resource *mem_res;
struct resource *res;
struct s3c64xx_spi_driver_data *sdd;
struct s3c64xx_spi_info *sci = dev_get_platdata(&pdev->dev);
struct spi_master *master;
int ret, irq;
char clk_name[16];
if (!sci && pdev->dev.of_node) {
sci = s3c64xx_spi_parse_dt(&pdev->dev);
if (IS_ERR(sci))
return PTR_ERR(sci);
}
if (!sci) {
dev_err(&pdev->dev, "platform_data missing!\n");
return -ENODEV;
}
mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (mem_res == NULL) {
dev_err(&pdev->dev, "Unable to get SPI MEM resource\n");
return -ENXIO;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_warn(&pdev->dev, "Failed to get IRQ: %d\n", irq);
return irq;
}
master = spi_alloc_master(&pdev->dev,
sizeof(struct s3c64xx_spi_driver_data));
if (master == NULL) {
dev_err(&pdev->dev, "Unable to allocate SPI Master\n");
return -ENOMEM;
}
platform_set_drvdata(pdev, master);
sdd = spi_master_get_devdata(master);
sdd->port_conf = s3c64xx_spi_get_port_config(pdev);
sdd->master = master;
sdd->cntrlr_info = sci;
sdd->pdev = pdev;
sdd->sfr_start = mem_res->start;
sdd->cs_gpio = true;
if (pdev->dev.of_node) {
if (!of_find_property(pdev->dev.of_node, "cs-gpio", NULL))
sdd->cs_gpio = false;
ret = of_alias_get_id(pdev->dev.of_node, "spi");
if (ret < 0) {
dev_err(&pdev->dev, "failed to get alias id, errno %d\n",
ret);
goto err0;
}
sdd->port_id = ret;
} else {
sdd->port_id = pdev->id;
}
sdd->cur_bpw = 8;
if (!sdd->pdev->dev.of_node) {
res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
if (!res) {
dev_warn(&pdev->dev, "Unable to get SPI tx dma resource. Switching to poll mode\n");
sdd->port_conf->quirks = S3C64XX_SPI_QUIRK_POLL;
} else
sdd->tx_dma.dmach = res->start;
res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
if (!res) {
dev_warn(&pdev->dev, "Unable to get SPI rx dma resource. Switching to poll mode\n");
sdd->port_conf->quirks = S3C64XX_SPI_QUIRK_POLL;
} else
sdd->rx_dma.dmach = res->start;
}
sdd->tx_dma.direction = DMA_MEM_TO_DEV;
sdd->rx_dma.direction = DMA_DEV_TO_MEM;
master->dev.of_node = pdev->dev.of_node;
master->bus_num = sdd->port_id;
master->setup = s3c64xx_spi_setup;
master->cleanup = s3c64xx_spi_cleanup;
master->prepare_transfer_hardware = s3c64xx_spi_prepare_transfer;
master->prepare_message = s3c64xx_spi_prepare_message;
master->transfer_one = s3c64xx_spi_transfer_one;
master->unprepare_message = s3c64xx_spi_unprepare_message;
master->unprepare_transfer_hardware = s3c64xx_spi_unprepare_transfer;
master->num_chipselect = sci->num_cs;
master->dma_alignment = 8;
master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
SPI_BPW_MASK(8);
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
master->auto_runtime_pm = true;
sdd->regs = devm_ioremap_resource(&pdev->dev, mem_res);
if (IS_ERR(sdd->regs)) {
ret = PTR_ERR(sdd->regs);
goto err0;
}
if (sci->cfg_gpio && sci->cfg_gpio()) {
dev_err(&pdev->dev, "Unable to config gpio\n");
ret = -EBUSY;
goto err0;
}
/* Setup clocks */
sdd->clk = devm_clk_get(&pdev->dev, "spi");
if (IS_ERR(sdd->clk)) {
dev_err(&pdev->dev, "Unable to acquire clock 'spi'\n");
ret = PTR_ERR(sdd->clk);
goto err0;
}
if (clk_prepare_enable(sdd->clk)) {
dev_err(&pdev->dev, "Couldn't enable clock 'spi'\n");
ret = -EBUSY;
goto err0;
}
sprintf(clk_name, "spi_busclk%d", sci->src_clk_nr);
sdd->src_clk = devm_clk_get(&pdev->dev, clk_name);
if (IS_ERR(sdd->src_clk)) {
dev_err(&pdev->dev,
"Unable to acquire clock '%s'\n", clk_name);
ret = PTR_ERR(sdd->src_clk);
goto err2;
}
if (clk_prepare_enable(sdd->src_clk)) {
dev_err(&pdev->dev, "Couldn't enable clock '%s'\n", clk_name);
ret = -EBUSY;
goto err2;
}
/* Setup Deufult Mode */
s3c64xx_spi_hwinit(sdd, sdd->port_id);
spin_lock_init(&sdd->lock);
init_completion(&sdd->xfer_completion);
ret = devm_request_irq(&pdev->dev, irq, s3c64xx_spi_irq, 0,
"spi-s3c64xx", sdd);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request IRQ %d: %d\n",
irq, ret);
goto err3;
}
writel(S3C64XX_SPI_INT_RX_OVERRUN_EN | S3C64XX_SPI_INT_RX_UNDERRUN_EN |
S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN,
sdd->regs + S3C64XX_SPI_INT_EN);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
ret = devm_spi_register_master(&pdev->dev, master);
if (ret != 0) {
dev_err(&pdev->dev, "cannot register SPI master: %d\n", ret);
goto err3;
}
dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d with %d Slaves attached\n",
sdd->port_id, master->num_chipselect);
dev_dbg(&pdev->dev, "\tIOmem=[%pR]\tDMA=[Rx-%d, Tx-%d]\n",
mem_res,
sdd->rx_dma.dmach, sdd->tx_dma.dmach);
return 0;
err3:
clk_disable_unprepare(sdd->src_clk);
err2:
clk_disable_unprepare(sdd->clk);
err0:
spi_master_put(master);
return ret;
}
static int s3c64xx_spi_remove(struct platform_device *pdev)
{
struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
pm_runtime_disable(&pdev->dev);
writel(0, sdd->regs + S3C64XX_SPI_INT_EN);
clk_disable_unprepare(sdd->src_clk);
clk_disable_unprepare(sdd->clk);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int s3c64xx_spi_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
int ret = spi_master_suspend(master);
if (ret)
return ret;
spi/s3c64xx: Fix doubled clock disable on suspend Fix doubled clock disable and unprepare during PM suspend which triggered the warnings: WARNING: at drivers/clk/clk.c:800 clk_disable+0x18/0x24() Modules linked in: CPU: 0 PID: 1745 Comm: sh Not tainted 3.10.14-01211-ge2549bb-dirty #62 [<c0015980>] (unwind_backtrace+0x0/0x138) from [<c0012a44>] (show_stack+0x10/0x14) [<c0012a44>] (show_stack+0x10/0x14) from [<c0022818>] (warn_slowpath_common+0x4c/0x68) [<c0022818>] (warn_slowpath_common+0x4c/0x68) from [<c0022850>] (warn_slowpath_null+0x1c/0x24) [<c0022850>] (warn_slowpath_null+0x1c/0x24) from [<c036e274>] (clk_disable+0x18/0x24) [<c036e274>] (clk_disable+0x18/0x24) from [<c02d5f78>] (s3c64xx_spi_suspend+0x28/0x54) [<c02d5f78>] (s3c64xx_spi_suspend+0x28/0x54) from [<c02b3a54>] (platform_pm_suspend+0x2c/0x5c) [<c02b3a54>] (platform_pm_suspend+0x2c/0x5c) from [<c02b8a30>] (dpm_run_callback+0x44/0x7c) [<c02b8a30>] (dpm_run_callback+0x44/0x7c) from [<c02b8b70>] (__device_suspend+0x108/0x300) [<c02b8b70>] (__device_suspend+0x108/0x300) from [<c02ba4e0>] (dpm_suspend+0x54/0x208) [<c02ba4e0>] (dpm_suspend+0x54/0x208) from [<c0066bcc>] (suspend_devices_and_enter+0x98/0x458) [<c0066bcc>] (suspend_devices_and_enter+0x98/0x458) from [<c0067150>] (pm_suspend+0x1c4/0x25c) [<c0067150>] (pm_suspend+0x1c4/0x25c) from [<c0066044>] (state_store+0x6c/0xbc) [<c0066044>] (state_store+0x6c/0xbc) from [<c0203290>] (kobj_attr_store+0x14/0x20) [<c0203290>] (kobj_attr_store+0x14/0x20) from [<c0157530>] (sysfs_write_file+0xfc/0x164) [<c0157530>] (sysfs_write_file+0xfc/0x164) from [<c00fd6b0>] (vfs_write+0xbc/0x1bc) [<c00fd6b0>] (vfs_write+0xbc/0x1bc) from [<c00fdaf0>] (SyS_write+0x40/0x68) [<c00fdaf0>] (SyS_write+0x40/0x68) from [<c000ea80>] (ret_fast_syscall+0x0/0x3c) The clocks may be already disabled before suspending. Check PM runtime suspend status and disable clocks only if device is not suspended. During resume do not enable the clocks if device is runtime suspended. Signed-off-by: Krzysztof Kozlowski <k.kozlowski@samsung.com> Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com> Reviewed-by: Sylwester Nawrocki <s.nawrocki@samsung.com> Signed-off-by: Mark Brown <broonie@linaro.org>
2013-10-21 21:42:50 +08:00
if (!pm_runtime_suspended(dev)) {
clk_disable_unprepare(sdd->clk);
clk_disable_unprepare(sdd->src_clk);
}
sdd->cur_speed = 0; /* Output Clock is stopped */
return 0;
}
static int s3c64xx_spi_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
if (sci->cfg_gpio)
sci->cfg_gpio();
spi/s3c64xx: Fix doubled clock disable on suspend Fix doubled clock disable and unprepare during PM suspend which triggered the warnings: WARNING: at drivers/clk/clk.c:800 clk_disable+0x18/0x24() Modules linked in: CPU: 0 PID: 1745 Comm: sh Not tainted 3.10.14-01211-ge2549bb-dirty #62 [<c0015980>] (unwind_backtrace+0x0/0x138) from [<c0012a44>] (show_stack+0x10/0x14) [<c0012a44>] (show_stack+0x10/0x14) from [<c0022818>] (warn_slowpath_common+0x4c/0x68) [<c0022818>] (warn_slowpath_common+0x4c/0x68) from [<c0022850>] (warn_slowpath_null+0x1c/0x24) [<c0022850>] (warn_slowpath_null+0x1c/0x24) from [<c036e274>] (clk_disable+0x18/0x24) [<c036e274>] (clk_disable+0x18/0x24) from [<c02d5f78>] (s3c64xx_spi_suspend+0x28/0x54) [<c02d5f78>] (s3c64xx_spi_suspend+0x28/0x54) from [<c02b3a54>] (platform_pm_suspend+0x2c/0x5c) [<c02b3a54>] (platform_pm_suspend+0x2c/0x5c) from [<c02b8a30>] (dpm_run_callback+0x44/0x7c) [<c02b8a30>] (dpm_run_callback+0x44/0x7c) from [<c02b8b70>] (__device_suspend+0x108/0x300) [<c02b8b70>] (__device_suspend+0x108/0x300) from [<c02ba4e0>] (dpm_suspend+0x54/0x208) [<c02ba4e0>] (dpm_suspend+0x54/0x208) from [<c0066bcc>] (suspend_devices_and_enter+0x98/0x458) [<c0066bcc>] (suspend_devices_and_enter+0x98/0x458) from [<c0067150>] (pm_suspend+0x1c4/0x25c) [<c0067150>] (pm_suspend+0x1c4/0x25c) from [<c0066044>] (state_store+0x6c/0xbc) [<c0066044>] (state_store+0x6c/0xbc) from [<c0203290>] (kobj_attr_store+0x14/0x20) [<c0203290>] (kobj_attr_store+0x14/0x20) from [<c0157530>] (sysfs_write_file+0xfc/0x164) [<c0157530>] (sysfs_write_file+0xfc/0x164) from [<c00fd6b0>] (vfs_write+0xbc/0x1bc) [<c00fd6b0>] (vfs_write+0xbc/0x1bc) from [<c00fdaf0>] (SyS_write+0x40/0x68) [<c00fdaf0>] (SyS_write+0x40/0x68) from [<c000ea80>] (ret_fast_syscall+0x0/0x3c) The clocks may be already disabled before suspending. Check PM runtime suspend status and disable clocks only if device is not suspended. During resume do not enable the clocks if device is runtime suspended. Signed-off-by: Krzysztof Kozlowski <k.kozlowski@samsung.com> Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com> Reviewed-by: Sylwester Nawrocki <s.nawrocki@samsung.com> Signed-off-by: Mark Brown <broonie@linaro.org>
2013-10-21 21:42:50 +08:00
if (!pm_runtime_suspended(dev)) {
clk_prepare_enable(sdd->src_clk);
clk_prepare_enable(sdd->clk);
}
s3c64xx_spi_hwinit(sdd, sdd->port_id);
return spi_master_resume(master);
}
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM_RUNTIME
static int s3c64xx_spi_runtime_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
clk_disable_unprepare(sdd->clk);
clk_disable_unprepare(sdd->src_clk);
return 0;
}
static int s3c64xx_spi_runtime_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
int ret;
ret = clk_prepare_enable(sdd->src_clk);
if (ret != 0)
return ret;
ret = clk_prepare_enable(sdd->clk);
if (ret != 0) {
clk_disable_unprepare(sdd->src_clk);
return ret;
}
return 0;
}
#endif /* CONFIG_PM_RUNTIME */
static const struct dev_pm_ops s3c64xx_spi_pm = {
SET_SYSTEM_SLEEP_PM_OPS(s3c64xx_spi_suspend, s3c64xx_spi_resume)
SET_RUNTIME_PM_OPS(s3c64xx_spi_runtime_suspend,
s3c64xx_spi_runtime_resume, NULL)
};
static struct s3c64xx_spi_port_config s3c2443_spi_port_config = {
.fifo_lvl_mask = { 0x7f },
.rx_lvl_offset = 13,
.tx_st_done = 21,
.high_speed = true,
};
static struct s3c64xx_spi_port_config s3c6410_spi_port_config = {
.fifo_lvl_mask = { 0x7f, 0x7F },
.rx_lvl_offset = 13,
.tx_st_done = 21,
};
static struct s3c64xx_spi_port_config s5p64x0_spi_port_config = {
.fifo_lvl_mask = { 0x1ff, 0x7F },
.rx_lvl_offset = 15,
.tx_st_done = 25,
};
static struct s3c64xx_spi_port_config s5pc100_spi_port_config = {
.fifo_lvl_mask = { 0x7f, 0x7F },
.rx_lvl_offset = 13,
.tx_st_done = 21,
.high_speed = true,
};
static struct s3c64xx_spi_port_config s5pv210_spi_port_config = {
.fifo_lvl_mask = { 0x1ff, 0x7F },
.rx_lvl_offset = 15,
.tx_st_done = 25,
.high_speed = true,
};
static struct s3c64xx_spi_port_config exynos4_spi_port_config = {
.fifo_lvl_mask = { 0x1ff, 0x7F, 0x7F },
.rx_lvl_offset = 15,
.tx_st_done = 25,
.high_speed = true,
.clk_from_cmu = true,
};
static struct s3c64xx_spi_port_config exynos5440_spi_port_config = {
.fifo_lvl_mask = { 0x1ff },
.rx_lvl_offset = 15,
.tx_st_done = 25,
.high_speed = true,
.clk_from_cmu = true,
.quirks = S3C64XX_SPI_QUIRK_POLL,
};
static struct platform_device_id s3c64xx_spi_driver_ids[] = {
{
.name = "s3c2443-spi",
.driver_data = (kernel_ulong_t)&s3c2443_spi_port_config,
}, {
.name = "s3c6410-spi",
.driver_data = (kernel_ulong_t)&s3c6410_spi_port_config,
}, {
.name = "s5p64x0-spi",
.driver_data = (kernel_ulong_t)&s5p64x0_spi_port_config,
}, {
.name = "s5pc100-spi",
.driver_data = (kernel_ulong_t)&s5pc100_spi_port_config,
}, {
.name = "s5pv210-spi",
.driver_data = (kernel_ulong_t)&s5pv210_spi_port_config,
}, {
.name = "exynos4210-spi",
.driver_data = (kernel_ulong_t)&exynos4_spi_port_config,
},
{ },
};
static const struct of_device_id s3c64xx_spi_dt_match[] = {
{ .compatible = "samsung,s3c2443-spi",
.data = (void *)&s3c2443_spi_port_config,
},
{ .compatible = "samsung,s3c6410-spi",
.data = (void *)&s3c6410_spi_port_config,
},
{ .compatible = "samsung,s5pc100-spi",
.data = (void *)&s5pc100_spi_port_config,
},
{ .compatible = "samsung,s5pv210-spi",
.data = (void *)&s5pv210_spi_port_config,
},
{ .compatible = "samsung,exynos4210-spi",
.data = (void *)&exynos4_spi_port_config,
},
{ .compatible = "samsung,exynos5440-spi",
.data = (void *)&exynos5440_spi_port_config,
},
{ },
};
MODULE_DEVICE_TABLE(of, s3c64xx_spi_dt_match);
static struct platform_driver s3c64xx_spi_driver = {
.driver = {
.name = "s3c64xx-spi",
.owner = THIS_MODULE,
.pm = &s3c64xx_spi_pm,
.of_match_table = of_match_ptr(s3c64xx_spi_dt_match),
},
.probe = s3c64xx_spi_probe,
.remove = s3c64xx_spi_remove,
.id_table = s3c64xx_spi_driver_ids,
};
MODULE_ALIAS("platform:s3c64xx-spi");
module_platform_driver(s3c64xx_spi_driver);
MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
MODULE_DESCRIPTION("S3C64XX SPI Controller Driver");
MODULE_LICENSE("GPL");