forked from luck/tmp_suning_uos_patched
soc: ti: add Keystone Navigator QMSS driver
The QMSS (Queue Manager Sub System) found on Keystone SOCs is one of the main hardware sub system which forms the backbone of the Keystone Multi-core Navigator. QMSS consist of queue managers, packed-data structure processors(PDSP), linking RAM, descriptor pools and infrastructure Packet DMA. The Queue Manager is a hardware module that is responsible for accelerating management of the packet queues. Packets are queued/de-queued by writing or reading descriptor address to a particular memory mapped location. The PDSPs perform QMSS related functions like accumulation, QoS, or event management. Linking RAM registers are used to link the descriptors which are stored in descriptor RAM. Descriptor RAM is configurable as internal or external memory. The QMSS driver manages the PDSP setups, linking RAM regions, queue pool management (allocation, push, pop and notify) and descriptor pool management. The specifics on the device tree bindings for QMSS can be found in: Documentation/devicetree/bindings/soc/keystone-navigator-qmss.txt Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Kumar Gala <galak@codeaurora.org> Cc: Olof Johansson <olof@lixom.net> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Grant Likely <grant.likely@linaro.org> Cc: Rob Herring <robh+dt@kernel.org> Cc: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Sandeep Nair <sandeep_n@ti.com> Signed-off-by: Santosh Shilimkar <santosh.shilimkar@ti.com>
This commit is contained in:
parent
a4dfb8c410
commit
41f93af900
|
@ -148,6 +148,8 @@ source "drivers/remoteproc/Kconfig"
|
|||
|
||||
source "drivers/rpmsg/Kconfig"
|
||||
|
||||
source "drivers/soc/Kconfig"
|
||||
|
||||
source "drivers/devfreq/Kconfig"
|
||||
|
||||
source "drivers/extcon/Kconfig"
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
menu "SOC (System On Chip) specific Drivers"
|
||||
|
||||
source "drivers/soc/qcom/Kconfig"
|
||||
source "drivers/soc/ti/Kconfig"
|
||||
|
||||
endmenu
|
||||
|
|
|
@ -4,3 +4,4 @@
|
|||
|
||||
obj-$(CONFIG_ARCH_QCOM) += qcom/
|
||||
obj-$(CONFIG_ARCH_TEGRA) += tegra/
|
||||
obj-$(CONFIG_SOC_TI) += ti/
|
||||
|
|
21
drivers/soc/ti/Kconfig
Normal file
21
drivers/soc/ti/Kconfig
Normal file
|
@ -0,0 +1,21 @@
|
|||
#
|
||||
# TI SOC drivers
|
||||
#
|
||||
menuconfig SOC_TI
|
||||
bool "TI SOC drivers support"
|
||||
|
||||
if SOC_TI
|
||||
|
||||
config KEYSTONE_NAVIGATOR_QMSS
|
||||
tristate "Keystone Queue Manager Sub System"
|
||||
depends on ARCH_KEYSTONE
|
||||
help
|
||||
Say y here to support the Keystone multicore Navigator Queue
|
||||
Manager support. The Queue Manager is a hardware module that
|
||||
is responsible for accelerating management of the packet queues.
|
||||
Packets are queued/de-queued by writing/reading descriptor address
|
||||
to a particular memory mapped location in the Queue Manager module.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
endif # SOC_TI
|
4
drivers/soc/ti/Makefile
Normal file
4
drivers/soc/ti/Makefile
Normal file
|
@ -0,0 +1,4 @@
|
|||
#
|
||||
# TI Keystone SOC drivers
|
||||
#
|
||||
obj-$(CONFIG_KEYSTONE_NAVIGATOR_QMSS) += knav_qmss_queue.o knav_qmss_acc.o
|
386
drivers/soc/ti/knav_qmss.h
Normal file
386
drivers/soc/ti/knav_qmss.h
Normal file
|
@ -0,0 +1,386 @@
|
|||
/*
|
||||
* Keystone Navigator QMSS driver internal header
|
||||
*
|
||||
* Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
|
||||
* Author: Sandeep Nair <sandeep_n@ti.com>
|
||||
* Cyril Chemparathy <cyril@ti.com>
|
||||
* Santosh Shilimkar <santosh.shilimkar@ti.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*/
|
||||
|
||||
#ifndef __KNAV_QMSS_H__
|
||||
#define __KNAV_QMSS_H__
|
||||
|
||||
#define THRESH_GTE BIT(7)
|
||||
#define THRESH_LT 0
|
||||
|
||||
#define PDSP_CTRL_PC_MASK 0xffff0000
|
||||
#define PDSP_CTRL_SOFT_RESET BIT(0)
|
||||
#define PDSP_CTRL_ENABLE BIT(1)
|
||||
#define PDSP_CTRL_RUNNING BIT(15)
|
||||
|
||||
#define ACC_MAX_CHANNEL 48
|
||||
#define ACC_DEFAULT_PERIOD 25 /* usecs */
|
||||
|
||||
#define ACC_CHANNEL_INT_BASE 2
|
||||
|
||||
#define ACC_LIST_ENTRY_TYPE 1
|
||||
#define ACC_LIST_ENTRY_WORDS (1 << ACC_LIST_ENTRY_TYPE)
|
||||
#define ACC_LIST_ENTRY_QUEUE_IDX 0
|
||||
#define ACC_LIST_ENTRY_DESC_IDX (ACC_LIST_ENTRY_WORDS - 1)
|
||||
|
||||
#define ACC_CMD_DISABLE_CHANNEL 0x80
|
||||
#define ACC_CMD_ENABLE_CHANNEL 0x81
|
||||
#define ACC_CFG_MULTI_QUEUE BIT(21)
|
||||
|
||||
#define ACC_INTD_OFFSET_EOI (0x0010)
|
||||
#define ACC_INTD_OFFSET_COUNT(ch) (0x0300 + 4 * (ch))
|
||||
#define ACC_INTD_OFFSET_STATUS(ch) (0x0200 + 4 * ((ch) / 32))
|
||||
|
||||
#define RANGE_MAX_IRQS 64
|
||||
|
||||
#define ACC_DESCS_MAX SZ_1K
|
||||
#define ACC_DESCS_MASK (ACC_DESCS_MAX - 1)
|
||||
#define DESC_SIZE_MASK 0xful
|
||||
#define DESC_PTR_MASK (~DESC_SIZE_MASK)
|
||||
|
||||
#define KNAV_NAME_SIZE 32
|
||||
|
||||
enum knav_acc_result {
|
||||
ACC_RET_IDLE,
|
||||
ACC_RET_SUCCESS,
|
||||
ACC_RET_INVALID_COMMAND,
|
||||
ACC_RET_INVALID_CHANNEL,
|
||||
ACC_RET_INACTIVE_CHANNEL,
|
||||
ACC_RET_ACTIVE_CHANNEL,
|
||||
ACC_RET_INVALID_QUEUE,
|
||||
ACC_RET_INVALID_RET,
|
||||
};
|
||||
|
||||
struct knav_reg_config {
|
||||
u32 revision;
|
||||
u32 __pad1;
|
||||
u32 divert;
|
||||
u32 link_ram_base0;
|
||||
u32 link_ram_size0;
|
||||
u32 link_ram_base1;
|
||||
u32 __pad2[2];
|
||||
u32 starvation[0];
|
||||
};
|
||||
|
||||
struct knav_reg_region {
|
||||
u32 base;
|
||||
u32 start_index;
|
||||
u32 size_count;
|
||||
u32 __pad;
|
||||
};
|
||||
|
||||
struct knav_reg_pdsp_regs {
|
||||
u32 control;
|
||||
u32 status;
|
||||
u32 cycle_count;
|
||||
u32 stall_count;
|
||||
};
|
||||
|
||||
struct knav_reg_acc_command {
|
||||
u32 command;
|
||||
u32 queue_mask;
|
||||
u32 list_phys;
|
||||
u32 queue_num;
|
||||
u32 timer_config;
|
||||
};
|
||||
|
||||
struct knav_link_ram_block {
|
||||
dma_addr_t phys;
|
||||
void *virt;
|
||||
size_t size;
|
||||
};
|
||||
|
||||
struct knav_acc_info {
|
||||
u32 pdsp_id;
|
||||
u32 start_channel;
|
||||
u32 list_entries;
|
||||
u32 pacing_mode;
|
||||
u32 timer_count;
|
||||
int mem_size;
|
||||
int list_size;
|
||||
struct knav_pdsp_info *pdsp;
|
||||
};
|
||||
|
||||
struct knav_acc_channel {
|
||||
u32 channel;
|
||||
u32 list_index;
|
||||
u32 open_mask;
|
||||
u32 *list_cpu[2];
|
||||
dma_addr_t list_dma[2];
|
||||
char name[KNAV_NAME_SIZE];
|
||||
atomic_t retrigger_count;
|
||||
};
|
||||
|
||||
struct knav_pdsp_info {
|
||||
const char *name;
|
||||
struct knav_reg_pdsp_regs __iomem *regs;
|
||||
union {
|
||||
void __iomem *command;
|
||||
struct knav_reg_acc_command __iomem *acc_command;
|
||||
u32 __iomem *qos_command;
|
||||
};
|
||||
void __iomem *intd;
|
||||
u32 __iomem *iram;
|
||||
const char *firmware;
|
||||
u32 id;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
struct knav_qmgr_info {
|
||||
unsigned start_queue;
|
||||
unsigned num_queues;
|
||||
struct knav_reg_config __iomem *reg_config;
|
||||
struct knav_reg_region __iomem *reg_region;
|
||||
struct knav_reg_queue __iomem *reg_push, *reg_pop, *reg_peek;
|
||||
void __iomem *reg_status;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
#define KNAV_NUM_LINKRAM 2
|
||||
|
||||
/**
|
||||
* struct knav_queue_stats: queue statistics
|
||||
* pushes: number of push operations
|
||||
* pops: number of pop operations
|
||||
* push_errors: number of push errors
|
||||
* pop_errors: number of pop errors
|
||||
* notifies: notifier counts
|
||||
*/
|
||||
struct knav_queue_stats {
|
||||
atomic_t pushes;
|
||||
atomic_t pops;
|
||||
atomic_t push_errors;
|
||||
atomic_t pop_errors;
|
||||
atomic_t notifies;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct knav_reg_queue: queue registers
|
||||
* @entry_count: valid entries in the queue
|
||||
* @byte_count: total byte count in thhe queue
|
||||
* @packet_size: packet size for the queue
|
||||
* @ptr_size_thresh: packet pointer size threshold
|
||||
*/
|
||||
struct knav_reg_queue {
|
||||
u32 entry_count;
|
||||
u32 byte_count;
|
||||
u32 packet_size;
|
||||
u32 ptr_size_thresh;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct knav_region: qmss region info
|
||||
* @dma_start, dma_end: start and end dma address
|
||||
* @virt_start, virt_end: start and end virtual address
|
||||
* @desc_size: descriptor size
|
||||
* @used_desc: consumed descriptors
|
||||
* @id: region number
|
||||
* @num_desc: total descriptors
|
||||
* @link_index: index of the first descriptor
|
||||
* @name: region name
|
||||
* @list: instance in the device's region list
|
||||
* @pools: list of descriptor pools in the region
|
||||
*/
|
||||
struct knav_region {
|
||||
dma_addr_t dma_start, dma_end;
|
||||
void *virt_start, *virt_end;
|
||||
unsigned desc_size;
|
||||
unsigned used_desc;
|
||||
unsigned id;
|
||||
unsigned num_desc;
|
||||
unsigned link_index;
|
||||
const char *name;
|
||||
struct list_head list;
|
||||
struct list_head pools;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct knav_pool: qmss pools
|
||||
* @dev: device pointer
|
||||
* @region: qmss region info
|
||||
* @queue: queue registers
|
||||
* @kdev: qmss device pointer
|
||||
* @region_offset: offset from the base
|
||||
* @num_desc: total descriptors
|
||||
* @desc_size: descriptor size
|
||||
* @region_id: region number
|
||||
* @name: pool name
|
||||
* @list: list head
|
||||
* @region_inst: instance in the region's pool list
|
||||
*/
|
||||
struct knav_pool {
|
||||
struct device *dev;
|
||||
struct knav_region *region;
|
||||
struct knav_queue *queue;
|
||||
struct knav_device *kdev;
|
||||
int region_offset;
|
||||
int num_desc;
|
||||
int desc_size;
|
||||
int region_id;
|
||||
const char *name;
|
||||
struct list_head list;
|
||||
struct list_head region_inst;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct knav_queue_inst: qmss queue instace properties
|
||||
* @descs: descriptor pointer
|
||||
* @desc_head, desc_tail, desc_count: descriptor counters
|
||||
* @acc: accumulator channel pointer
|
||||
* @kdev: qmss device pointer
|
||||
* @range: range info
|
||||
* @qmgr: queue manager info
|
||||
* @id: queue instace id
|
||||
* @irq_num: irq line number
|
||||
* @notify_needed: notifier needed based on queue type
|
||||
* @num_notifiers: total notifiers
|
||||
* @handles: list head
|
||||
* @name: queue instance name
|
||||
* @irq_name: irq line name
|
||||
*/
|
||||
struct knav_queue_inst {
|
||||
u32 *descs;
|
||||
atomic_t desc_head, desc_tail, desc_count;
|
||||
struct knav_acc_channel *acc;
|
||||
struct knav_device *kdev;
|
||||
struct knav_range_info *range;
|
||||
struct knav_qmgr_info *qmgr;
|
||||
u32 id;
|
||||
int irq_num;
|
||||
int notify_needed;
|
||||
atomic_t num_notifiers;
|
||||
struct list_head handles;
|
||||
const char *name;
|
||||
const char *irq_name;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct knav_queue: qmss queue properties
|
||||
* @reg_push, reg_pop, reg_peek: push, pop queue registers
|
||||
* @inst: qmss queue instace properties
|
||||
* @notifier_fn: notifier function
|
||||
* @notifier_fn_arg: notifier function argument
|
||||
* @notifier_enabled: notier enabled for a give queue
|
||||
* @rcu: rcu head
|
||||
* @flags: queue flags
|
||||
* @list: list head
|
||||
*/
|
||||
struct knav_queue {
|
||||
struct knav_reg_queue __iomem *reg_push, *reg_pop, *reg_peek;
|
||||
struct knav_queue_inst *inst;
|
||||
struct knav_queue_stats stats;
|
||||
knav_queue_notify_fn notifier_fn;
|
||||
void *notifier_fn_arg;
|
||||
atomic_t notifier_enabled;
|
||||
struct rcu_head rcu;
|
||||
unsigned flags;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
struct knav_device {
|
||||
struct device *dev;
|
||||
unsigned base_id;
|
||||
unsigned num_queues;
|
||||
unsigned num_queues_in_use;
|
||||
unsigned inst_shift;
|
||||
struct knav_link_ram_block link_rams[KNAV_NUM_LINKRAM];
|
||||
void *instances;
|
||||
struct list_head regions;
|
||||
struct list_head queue_ranges;
|
||||
struct list_head pools;
|
||||
struct list_head pdsps;
|
||||
struct list_head qmgrs;
|
||||
};
|
||||
|
||||
struct knav_range_ops {
|
||||
int (*init_range)(struct knav_range_info *range);
|
||||
int (*free_range)(struct knav_range_info *range);
|
||||
int (*init_queue)(struct knav_range_info *range,
|
||||
struct knav_queue_inst *inst);
|
||||
int (*open_queue)(struct knav_range_info *range,
|
||||
struct knav_queue_inst *inst, unsigned flags);
|
||||
int (*close_queue)(struct knav_range_info *range,
|
||||
struct knav_queue_inst *inst);
|
||||
int (*set_notify)(struct knav_range_info *range,
|
||||
struct knav_queue_inst *inst, bool enabled);
|
||||
};
|
||||
|
||||
struct knav_irq_info {
|
||||
int irq;
|
||||
u32 cpu_map;
|
||||
};
|
||||
|
||||
struct knav_range_info {
|
||||
const char *name;
|
||||
struct knav_device *kdev;
|
||||
unsigned queue_base;
|
||||
unsigned num_queues;
|
||||
void *queue_base_inst;
|
||||
unsigned flags;
|
||||
struct list_head list;
|
||||
struct knav_range_ops *ops;
|
||||
struct knav_acc_info acc_info;
|
||||
struct knav_acc_channel *acc;
|
||||
unsigned num_irqs;
|
||||
struct knav_irq_info irqs[RANGE_MAX_IRQS];
|
||||
};
|
||||
|
||||
#define RANGE_RESERVED BIT(0)
|
||||
#define RANGE_HAS_IRQ BIT(1)
|
||||
#define RANGE_HAS_ACCUMULATOR BIT(2)
|
||||
#define RANGE_MULTI_QUEUE BIT(3)
|
||||
|
||||
#define for_each_region(kdev, region) \
|
||||
list_for_each_entry(region, &kdev->regions, list)
|
||||
|
||||
#define first_region(kdev) \
|
||||
list_first_entry(&kdev->regions, \
|
||||
struct knav_region, list)
|
||||
|
||||
#define for_each_queue_range(kdev, range) \
|
||||
list_for_each_entry(range, &kdev->queue_ranges, list)
|
||||
|
||||
#define first_queue_range(kdev) \
|
||||
list_first_entry(&kdev->queue_ranges, \
|
||||
struct knav_range_info, list)
|
||||
|
||||
#define for_each_pool(kdev, pool) \
|
||||
list_for_each_entry(pool, &kdev->pools, list)
|
||||
|
||||
#define for_each_pdsp(kdev, pdsp) \
|
||||
list_for_each_entry(pdsp, &kdev->pdsps, list)
|
||||
|
||||
#define for_each_qmgr(kdev, qmgr) \
|
||||
list_for_each_entry(qmgr, &kdev->qmgrs, list)
|
||||
|
||||
static inline struct knav_pdsp_info *
|
||||
knav_find_pdsp(struct knav_device *kdev, unsigned pdsp_id)
|
||||
{
|
||||
struct knav_pdsp_info *pdsp;
|
||||
|
||||
for_each_pdsp(kdev, pdsp)
|
||||
if (pdsp_id == pdsp->id)
|
||||
return pdsp;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
extern int knav_init_acc_range(struct knav_device *kdev,
|
||||
struct device_node *node,
|
||||
struct knav_range_info *range);
|
||||
extern void knav_queue_notify(struct knav_queue_inst *inst);
|
||||
|
||||
#endif /* __KNAV_QMSS_H__ */
|
591
drivers/soc/ti/knav_qmss_acc.c
Normal file
591
drivers/soc/ti/knav_qmss_acc.c
Normal file
|
@ -0,0 +1,591 @@
|
|||
/*
|
||||
* Keystone accumulator queue manager
|
||||
*
|
||||
* Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
|
||||
* Author: Sandeep Nair <sandeep_n@ti.com>
|
||||
* Cyril Chemparathy <cyril@ti.com>
|
||||
* Santosh Shilimkar <santosh.shilimkar@ti.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/soc/ti/knav_qmss.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/firmware.h>
|
||||
|
||||
#include "knav_qmss.h"
|
||||
|
||||
#define knav_range_offset_to_inst(kdev, range, q) \
|
||||
(range->queue_base_inst + (q << kdev->inst_shift))
|
||||
|
||||
static void __knav_acc_notify(struct knav_range_info *range,
|
||||
struct knav_acc_channel *acc)
|
||||
{
|
||||
struct knav_device *kdev = range->kdev;
|
||||
struct knav_queue_inst *inst;
|
||||
int range_base, queue;
|
||||
|
||||
range_base = kdev->base_id + range->queue_base;
|
||||
|
||||
if (range->flags & RANGE_MULTI_QUEUE) {
|
||||
for (queue = 0; queue < range->num_queues; queue++) {
|
||||
inst = knav_range_offset_to_inst(kdev, range,
|
||||
queue);
|
||||
if (inst->notify_needed) {
|
||||
inst->notify_needed = 0;
|
||||
dev_dbg(kdev->dev, "acc-irq: notifying %d\n",
|
||||
range_base + queue);
|
||||
knav_queue_notify(inst);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
queue = acc->channel - range->acc_info.start_channel;
|
||||
inst = knav_range_offset_to_inst(kdev, range, queue);
|
||||
dev_dbg(kdev->dev, "acc-irq: notifying %d\n",
|
||||
range_base + queue);
|
||||
knav_queue_notify(inst);
|
||||
}
|
||||
}
|
||||
|
||||
static int knav_acc_set_notify(struct knav_range_info *range,
|
||||
struct knav_queue_inst *kq,
|
||||
bool enabled)
|
||||
{
|
||||
struct knav_pdsp_info *pdsp = range->acc_info.pdsp;
|
||||
struct knav_device *kdev = range->kdev;
|
||||
u32 mask, offset;
|
||||
|
||||
/*
|
||||
* when enabling, we need to re-trigger an interrupt if we
|
||||
* have descriptors pending
|
||||
*/
|
||||
if (!enabled || atomic_read(&kq->desc_count) <= 0)
|
||||
return 0;
|
||||
|
||||
kq->notify_needed = 1;
|
||||
atomic_inc(&kq->acc->retrigger_count);
|
||||
mask = BIT(kq->acc->channel % 32);
|
||||
offset = ACC_INTD_OFFSET_STATUS(kq->acc->channel);
|
||||
dev_dbg(kdev->dev, "setup-notify: re-triggering irq for %s\n",
|
||||
kq->acc->name);
|
||||
writel_relaxed(mask, pdsp->intd + offset);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static irqreturn_t knav_acc_int_handler(int irq, void *_instdata)
|
||||
{
|
||||
struct knav_acc_channel *acc;
|
||||
struct knav_queue_inst *kq = NULL;
|
||||
struct knav_range_info *range;
|
||||
struct knav_pdsp_info *pdsp;
|
||||
struct knav_acc_info *info;
|
||||
struct knav_device *kdev;
|
||||
|
||||
u32 *list, *list_cpu, val, idx, notifies;
|
||||
int range_base, channel, queue = 0;
|
||||
dma_addr_t list_dma;
|
||||
|
||||
range = _instdata;
|
||||
info = &range->acc_info;
|
||||
kdev = range->kdev;
|
||||
pdsp = range->acc_info.pdsp;
|
||||
acc = range->acc;
|
||||
|
||||
range_base = kdev->base_id + range->queue_base;
|
||||
if ((range->flags & RANGE_MULTI_QUEUE) == 0) {
|
||||
for (queue = 0; queue < range->num_irqs; queue++)
|
||||
if (range->irqs[queue].irq == irq)
|
||||
break;
|
||||
kq = knav_range_offset_to_inst(kdev, range, queue);
|
||||
acc += queue;
|
||||
}
|
||||
|
||||
channel = acc->channel;
|
||||
list_dma = acc->list_dma[acc->list_index];
|
||||
list_cpu = acc->list_cpu[acc->list_index];
|
||||
dev_dbg(kdev->dev, "acc-irq: channel %d, list %d, virt %p, phys %x\n",
|
||||
channel, acc->list_index, list_cpu, list_dma);
|
||||
if (atomic_read(&acc->retrigger_count)) {
|
||||
atomic_dec(&acc->retrigger_count);
|
||||
__knav_acc_notify(range, acc);
|
||||
writel_relaxed(1, pdsp->intd + ACC_INTD_OFFSET_COUNT(channel));
|
||||
/* ack the interrupt */
|
||||
writel_relaxed(ACC_CHANNEL_INT_BASE + channel,
|
||||
pdsp->intd + ACC_INTD_OFFSET_EOI);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
notifies = readl_relaxed(pdsp->intd + ACC_INTD_OFFSET_COUNT(channel));
|
||||
WARN_ON(!notifies);
|
||||
dma_sync_single_for_cpu(kdev->dev, list_dma, info->list_size,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
for (list = list_cpu; list < list_cpu + (info->list_size / sizeof(u32));
|
||||
list += ACC_LIST_ENTRY_WORDS) {
|
||||
if (ACC_LIST_ENTRY_WORDS == 1) {
|
||||
dev_dbg(kdev->dev,
|
||||
"acc-irq: list %d, entry @%p, %08x\n",
|
||||
acc->list_index, list, list[0]);
|
||||
} else if (ACC_LIST_ENTRY_WORDS == 2) {
|
||||
dev_dbg(kdev->dev,
|
||||
"acc-irq: list %d, entry @%p, %08x %08x\n",
|
||||
acc->list_index, list, list[0], list[1]);
|
||||
} else if (ACC_LIST_ENTRY_WORDS == 4) {
|
||||
dev_dbg(kdev->dev,
|
||||
"acc-irq: list %d, entry @%p, %08x %08x %08x %08x\n",
|
||||
acc->list_index, list, list[0], list[1],
|
||||
list[2], list[3]);
|
||||
}
|
||||
|
||||
val = list[ACC_LIST_ENTRY_DESC_IDX];
|
||||
if (!val)
|
||||
break;
|
||||
|
||||
if (range->flags & RANGE_MULTI_QUEUE) {
|
||||
queue = list[ACC_LIST_ENTRY_QUEUE_IDX] >> 16;
|
||||
if (queue < range_base ||
|
||||
queue >= range_base + range->num_queues) {
|
||||
dev_err(kdev->dev,
|
||||
"bad queue %d, expecting %d-%d\n",
|
||||
queue, range_base,
|
||||
range_base + range->num_queues);
|
||||
break;
|
||||
}
|
||||
queue -= range_base;
|
||||
kq = knav_range_offset_to_inst(kdev, range,
|
||||
queue);
|
||||
}
|
||||
|
||||
if (atomic_inc_return(&kq->desc_count) >= ACC_DESCS_MAX) {
|
||||
atomic_dec(&kq->desc_count);
|
||||
dev_err(kdev->dev,
|
||||
"acc-irq: queue %d full, entry dropped\n",
|
||||
queue + range_base);
|
||||
continue;
|
||||
}
|
||||
|
||||
idx = atomic_inc_return(&kq->desc_tail) & ACC_DESCS_MASK;
|
||||
kq->descs[idx] = val;
|
||||
kq->notify_needed = 1;
|
||||
dev_dbg(kdev->dev, "acc-irq: enqueue %08x at %d, queue %d\n",
|
||||
val, idx, queue + range_base);
|
||||
}
|
||||
|
||||
__knav_acc_notify(range, acc);
|
||||
memset(list_cpu, 0, info->list_size);
|
||||
dma_sync_single_for_device(kdev->dev, list_dma, info->list_size,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
/* flip to the other list */
|
||||
acc->list_index ^= 1;
|
||||
|
||||
/* reset the interrupt counter */
|
||||
writel_relaxed(1, pdsp->intd + ACC_INTD_OFFSET_COUNT(channel));
|
||||
|
||||
/* ack the interrupt */
|
||||
writel_relaxed(ACC_CHANNEL_INT_BASE + channel,
|
||||
pdsp->intd + ACC_INTD_OFFSET_EOI);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
int knav_range_setup_acc_irq(struct knav_range_info *range,
|
||||
int queue, bool enabled)
|
||||
{
|
||||
struct knav_device *kdev = range->kdev;
|
||||
struct knav_acc_channel *acc;
|
||||
unsigned long cpu_map;
|
||||
int ret = 0, irq;
|
||||
u32 old, new;
|
||||
|
||||
if (range->flags & RANGE_MULTI_QUEUE) {
|
||||
acc = range->acc;
|
||||
irq = range->irqs[0].irq;
|
||||
cpu_map = range->irqs[0].cpu_map;
|
||||
} else {
|
||||
acc = range->acc + queue;
|
||||
irq = range->irqs[queue].irq;
|
||||
cpu_map = range->irqs[queue].cpu_map;
|
||||
}
|
||||
|
||||
old = acc->open_mask;
|
||||
if (enabled)
|
||||
new = old | BIT(queue);
|
||||
else
|
||||
new = old & ~BIT(queue);
|
||||
acc->open_mask = new;
|
||||
|
||||
dev_dbg(kdev->dev,
|
||||
"setup-acc-irq: open mask old %08x, new %08x, channel %s\n",
|
||||
old, new, acc->name);
|
||||
|
||||
if (likely(new == old))
|
||||
return 0;
|
||||
|
||||
if (new && !old) {
|
||||
dev_dbg(kdev->dev,
|
||||
"setup-acc-irq: requesting %s for channel %s\n",
|
||||
acc->name, acc->name);
|
||||
ret = request_irq(irq, knav_acc_int_handler, 0, acc->name,
|
||||
range);
|
||||
if (!ret && cpu_map) {
|
||||
ret = irq_set_affinity_hint(irq, to_cpumask(&cpu_map));
|
||||
if (ret) {
|
||||
dev_warn(range->kdev->dev,
|
||||
"Failed to set IRQ affinity\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (old && !new) {
|
||||
dev_dbg(kdev->dev, "setup-acc-irq: freeing %s for channel %s\n",
|
||||
acc->name, acc->name);
|
||||
free_irq(irq, range);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const char *knav_acc_result_str(enum knav_acc_result result)
|
||||
{
|
||||
static const char * const result_str[] = {
|
||||
[ACC_RET_IDLE] = "idle",
|
||||
[ACC_RET_SUCCESS] = "success",
|
||||
[ACC_RET_INVALID_COMMAND] = "invalid command",
|
||||
[ACC_RET_INVALID_CHANNEL] = "invalid channel",
|
||||
[ACC_RET_INACTIVE_CHANNEL] = "inactive channel",
|
||||
[ACC_RET_ACTIVE_CHANNEL] = "active channel",
|
||||
[ACC_RET_INVALID_QUEUE] = "invalid queue",
|
||||
[ACC_RET_INVALID_RET] = "invalid return code",
|
||||
};
|
||||
|
||||
if (result >= ARRAY_SIZE(result_str))
|
||||
return result_str[ACC_RET_INVALID_RET];
|
||||
else
|
||||
return result_str[result];
|
||||
}
|
||||
|
||||
static enum knav_acc_result
|
||||
knav_acc_write(struct knav_device *kdev, struct knav_pdsp_info *pdsp,
|
||||
struct knav_reg_acc_command *cmd)
|
||||
{
|
||||
u32 result;
|
||||
|
||||
dev_dbg(kdev->dev, "acc command %08x %08x %08x %08x %08x\n",
|
||||
cmd->command, cmd->queue_mask, cmd->list_phys,
|
||||
cmd->queue_num, cmd->timer_config);
|
||||
|
||||
writel_relaxed(cmd->timer_config, &pdsp->acc_command->timer_config);
|
||||
writel_relaxed(cmd->queue_num, &pdsp->acc_command->queue_num);
|
||||
writel_relaxed(cmd->list_phys, &pdsp->acc_command->list_phys);
|
||||
writel_relaxed(cmd->queue_mask, &pdsp->acc_command->queue_mask);
|
||||
writel_relaxed(cmd->command, &pdsp->acc_command->command);
|
||||
|
||||
/* wait for the command to clear */
|
||||
do {
|
||||
result = readl_relaxed(&pdsp->acc_command->command);
|
||||
} while ((result >> 8) & 0xff);
|
||||
|
||||
return (result >> 24) & 0xff;
|
||||
}
|
||||
|
||||
static void knav_acc_setup_cmd(struct knav_device *kdev,
|
||||
struct knav_range_info *range,
|
||||
struct knav_reg_acc_command *cmd,
|
||||
int queue)
|
||||
{
|
||||
struct knav_acc_info *info = &range->acc_info;
|
||||
struct knav_acc_channel *acc;
|
||||
int queue_base;
|
||||
u32 queue_mask;
|
||||
|
||||
if (range->flags & RANGE_MULTI_QUEUE) {
|
||||
acc = range->acc;
|
||||
queue_base = range->queue_base;
|
||||
queue_mask = BIT(range->num_queues) - 1;
|
||||
} else {
|
||||
acc = range->acc + queue;
|
||||
queue_base = range->queue_base + queue;
|
||||
queue_mask = 0;
|
||||
}
|
||||
|
||||
memset(cmd, 0, sizeof(*cmd));
|
||||
cmd->command = acc->channel;
|
||||
cmd->queue_mask = queue_mask;
|
||||
cmd->list_phys = acc->list_dma[0];
|
||||
cmd->queue_num = info->list_entries << 16;
|
||||
cmd->queue_num |= queue_base;
|
||||
|
||||
cmd->timer_config = ACC_LIST_ENTRY_TYPE << 18;
|
||||
if (range->flags & RANGE_MULTI_QUEUE)
|
||||
cmd->timer_config |= ACC_CFG_MULTI_QUEUE;
|
||||
cmd->timer_config |= info->pacing_mode << 16;
|
||||
cmd->timer_config |= info->timer_count;
|
||||
}
|
||||
|
||||
static void knav_acc_stop(struct knav_device *kdev,
|
||||
struct knav_range_info *range,
|
||||
int queue)
|
||||
{
|
||||
struct knav_reg_acc_command cmd;
|
||||
struct knav_acc_channel *acc;
|
||||
enum knav_acc_result result;
|
||||
|
||||
acc = range->acc + queue;
|
||||
|
||||
knav_acc_setup_cmd(kdev, range, &cmd, queue);
|
||||
cmd.command |= ACC_CMD_DISABLE_CHANNEL << 8;
|
||||
result = knav_acc_write(kdev, range->acc_info.pdsp, &cmd);
|
||||
|
||||
dev_dbg(kdev->dev, "stopped acc channel %s, result %s\n",
|
||||
acc->name, knav_acc_result_str(result));
|
||||
}
|
||||
|
||||
static enum knav_acc_result knav_acc_start(struct knav_device *kdev,
|
||||
struct knav_range_info *range,
|
||||
int queue)
|
||||
{
|
||||
struct knav_reg_acc_command cmd;
|
||||
struct knav_acc_channel *acc;
|
||||
enum knav_acc_result result;
|
||||
|
||||
acc = range->acc + queue;
|
||||
|
||||
knav_acc_setup_cmd(kdev, range, &cmd, queue);
|
||||
cmd.command |= ACC_CMD_ENABLE_CHANNEL << 8;
|
||||
result = knav_acc_write(kdev, range->acc_info.pdsp, &cmd);
|
||||
|
||||
dev_dbg(kdev->dev, "started acc channel %s, result %s\n",
|
||||
acc->name, knav_acc_result_str(result));
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static int knav_acc_init_range(struct knav_range_info *range)
|
||||
{
|
||||
struct knav_device *kdev = range->kdev;
|
||||
struct knav_acc_channel *acc;
|
||||
enum knav_acc_result result;
|
||||
int queue;
|
||||
|
||||
for (queue = 0; queue < range->num_queues; queue++) {
|
||||
acc = range->acc + queue;
|
||||
|
||||
knav_acc_stop(kdev, range, queue);
|
||||
acc->list_index = 0;
|
||||
result = knav_acc_start(kdev, range, queue);
|
||||
|
||||
if (result != ACC_RET_SUCCESS)
|
||||
return -EIO;
|
||||
|
||||
if (range->flags & RANGE_MULTI_QUEUE)
|
||||
return 0;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int knav_acc_init_queue(struct knav_range_info *range,
|
||||
struct knav_queue_inst *kq)
|
||||
{
|
||||
unsigned id = kq->id - range->queue_base;
|
||||
|
||||
kq->descs = devm_kzalloc(range->kdev->dev,
|
||||
ACC_DESCS_MAX * sizeof(u32), GFP_KERNEL);
|
||||
if (!kq->descs)
|
||||
return -ENOMEM;
|
||||
|
||||
kq->acc = range->acc;
|
||||
if ((range->flags & RANGE_MULTI_QUEUE) == 0)
|
||||
kq->acc += id;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int knav_acc_open_queue(struct knav_range_info *range,
|
||||
struct knav_queue_inst *inst, unsigned flags)
|
||||
{
|
||||
unsigned id = inst->id - range->queue_base;
|
||||
|
||||
return knav_range_setup_acc_irq(range, id, true);
|
||||
}
|
||||
|
||||
static int knav_acc_close_queue(struct knav_range_info *range,
|
||||
struct knav_queue_inst *inst)
|
||||
{
|
||||
unsigned id = inst->id - range->queue_base;
|
||||
|
||||
return knav_range_setup_acc_irq(range, id, false);
|
||||
}
|
||||
|
||||
static int knav_acc_free_range(struct knav_range_info *range)
|
||||
{
|
||||
struct knav_device *kdev = range->kdev;
|
||||
struct knav_acc_channel *acc;
|
||||
struct knav_acc_info *info;
|
||||
int channel, channels;
|
||||
|
||||
info = &range->acc_info;
|
||||
|
||||
if (range->flags & RANGE_MULTI_QUEUE)
|
||||
channels = 1;
|
||||
else
|
||||
channels = range->num_queues;
|
||||
|
||||
for (channel = 0; channel < channels; channel++) {
|
||||
acc = range->acc + channel;
|
||||
if (!acc->list_cpu[0])
|
||||
continue;
|
||||
dma_unmap_single(kdev->dev, acc->list_dma[0],
|
||||
info->mem_size, DMA_BIDIRECTIONAL);
|
||||
free_pages_exact(acc->list_cpu[0], info->mem_size);
|
||||
}
|
||||
devm_kfree(range->kdev->dev, range->acc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct knav_range_ops knav_acc_range_ops = {
|
||||
.set_notify = knav_acc_set_notify,
|
||||
.init_queue = knav_acc_init_queue,
|
||||
.open_queue = knav_acc_open_queue,
|
||||
.close_queue = knav_acc_close_queue,
|
||||
.init_range = knav_acc_init_range,
|
||||
.free_range = knav_acc_free_range,
|
||||
};
|
||||
|
||||
/**
|
||||
* knav_init_acc_range: Initialise accumulator ranges
|
||||
*
|
||||
* @kdev: qmss device
|
||||
* @node: device node
|
||||
* @range: qmms range information
|
||||
*
|
||||
* Return 0 on success or error
|
||||
*/
|
||||
int knav_init_acc_range(struct knav_device *kdev,
|
||||
struct device_node *node,
|
||||
struct knav_range_info *range)
|
||||
{
|
||||
struct knav_acc_channel *acc;
|
||||
struct knav_pdsp_info *pdsp;
|
||||
struct knav_acc_info *info;
|
||||
int ret, channel, channels;
|
||||
int list_size, mem_size;
|
||||
dma_addr_t list_dma;
|
||||
void *list_mem;
|
||||
u32 config[5];
|
||||
|
||||
range->flags |= RANGE_HAS_ACCUMULATOR;
|
||||
info = &range->acc_info;
|
||||
|
||||
ret = of_property_read_u32_array(node, "accumulator", config, 5);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
info->pdsp_id = config[0];
|
||||
info->start_channel = config[1];
|
||||
info->list_entries = config[2];
|
||||
info->pacing_mode = config[3];
|
||||
info->timer_count = config[4] / ACC_DEFAULT_PERIOD;
|
||||
|
||||
if (info->start_channel > ACC_MAX_CHANNEL) {
|
||||
dev_err(kdev->dev, "channel %d invalid for range %s\n",
|
||||
info->start_channel, range->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (info->pacing_mode > 3) {
|
||||
dev_err(kdev->dev, "pacing mode %d invalid for range %s\n",
|
||||
info->pacing_mode, range->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pdsp = knav_find_pdsp(kdev, info->pdsp_id);
|
||||
if (!pdsp) {
|
||||
dev_err(kdev->dev, "pdsp id %d not found for range %s\n",
|
||||
info->pdsp_id, range->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
info->pdsp = pdsp;
|
||||
channels = range->num_queues;
|
||||
if (of_get_property(node, "multi-queue", NULL)) {
|
||||
range->flags |= RANGE_MULTI_QUEUE;
|
||||
channels = 1;
|
||||
if (range->queue_base & (32 - 1)) {
|
||||
dev_err(kdev->dev,
|
||||
"misaligned multi-queue accumulator range %s\n",
|
||||
range->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (range->num_queues > 32) {
|
||||
dev_err(kdev->dev,
|
||||
"too many queues in accumulator range %s\n",
|
||||
range->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
/* figure out list size */
|
||||
list_size = info->list_entries;
|
||||
list_size *= ACC_LIST_ENTRY_WORDS * sizeof(u32);
|
||||
info->list_size = list_size;
|
||||
mem_size = PAGE_ALIGN(list_size * 2);
|
||||
info->mem_size = mem_size;
|
||||
range->acc = devm_kzalloc(kdev->dev, channels * sizeof(*range->acc),
|
||||
GFP_KERNEL);
|
||||
if (!range->acc)
|
||||
return -ENOMEM;
|
||||
|
||||
for (channel = 0; channel < channels; channel++) {
|
||||
acc = range->acc + channel;
|
||||
acc->channel = info->start_channel + channel;
|
||||
|
||||
/* allocate memory for the two lists */
|
||||
list_mem = alloc_pages_exact(mem_size, GFP_KERNEL | GFP_DMA);
|
||||
if (!list_mem)
|
||||
return -ENOMEM;
|
||||
|
||||
list_dma = dma_map_single(kdev->dev, list_mem, mem_size,
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (dma_mapping_error(kdev->dev, list_dma)) {
|
||||
free_pages_exact(list_mem, mem_size);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset(list_mem, 0, mem_size);
|
||||
dma_sync_single_for_device(kdev->dev, list_dma, mem_size,
|
||||
DMA_TO_DEVICE);
|
||||
scnprintf(acc->name, sizeof(acc->name), "hwqueue-acc-%d",
|
||||
acc->channel);
|
||||
acc->list_cpu[0] = list_mem;
|
||||
acc->list_cpu[1] = list_mem + list_size;
|
||||
acc->list_dma[0] = list_dma;
|
||||
acc->list_dma[1] = list_dma + list_size;
|
||||
dev_dbg(kdev->dev, "%s: channel %d, phys %08x, virt %8p\n",
|
||||
acc->name, acc->channel, list_dma, list_mem);
|
||||
}
|
||||
|
||||
range->ops = &knav_acc_range_ops;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(knav_init_acc_range);
|
1816
drivers/soc/ti/knav_qmss_queue.c
Normal file
1816
drivers/soc/ti/knav_qmss_queue.c
Normal file
File diff suppressed because it is too large
Load Diff
90
include/linux/soc/ti/knav_qmss.h
Normal file
90
include/linux/soc/ti/knav_qmss.h
Normal file
|
@ -0,0 +1,90 @@
|
|||
/*
|
||||
* Keystone Navigator Queue Management Sub-System header
|
||||
*
|
||||
* Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
|
||||
* Author: Sandeep Nair <sandeep_n@ti.com>
|
||||
* Cyril Chemparathy <cyril@ti.com>
|
||||
* Santosh Shilimkar <santosh.shilimkar@ti.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as
|
||||
* published by the Free Software Foundation version 2.
|
||||
*
|
||||
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
|
||||
* kind, whether express or implied; without even the implied warranty
|
||||
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#ifndef __SOC_TI_KNAV_QMSS_H__
|
||||
#define __SOC_TI_KNAV_QMSS_H__
|
||||
|
||||
#include <linux/err.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/fcntl.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
|
||||
/* queue types */
|
||||
#define KNAV_QUEUE_QPEND ((unsigned)-2) /* interruptible qpend queue */
|
||||
#define KNAV_QUEUE_ACC ((unsigned)-3) /* Accumulated queue */
|
||||
#define KNAV_QUEUE_GP ((unsigned)-4) /* General purpose queue */
|
||||
|
||||
/* queue flags */
|
||||
#define KNAV_QUEUE_SHARED 0x0001 /* Queue can be shared */
|
||||
|
||||
/**
|
||||
* enum knav_queue_ctrl_cmd - queue operations.
|
||||
* @KNAV_QUEUE_GET_ID: Get the ID number for an open queue
|
||||
* @KNAV_QUEUE_FLUSH: forcibly empty a queue if possible
|
||||
* @KNAV_QUEUE_SET_NOTIFIER: Set a notifier callback to a queue handle.
|
||||
* @KNAV_QUEUE_ENABLE_NOTIFY: Enable notifier callback for a queue handle.
|
||||
* @KNAV_QUEUE_DISABLE_NOTIFY: Disable notifier callback for a queue handle.
|
||||
* @KNAV_QUEUE_GET_COUNT: Get number of queues.
|
||||
*/
|
||||
enum knav_queue_ctrl_cmd {
|
||||
KNAV_QUEUE_GET_ID,
|
||||
KNAV_QUEUE_FLUSH,
|
||||
KNAV_QUEUE_SET_NOTIFIER,
|
||||
KNAV_QUEUE_ENABLE_NOTIFY,
|
||||
KNAV_QUEUE_DISABLE_NOTIFY,
|
||||
KNAV_QUEUE_GET_COUNT
|
||||
};
|
||||
|
||||
/* Queue notifier callback prototype */
|
||||
typedef void (*knav_queue_notify_fn)(void *arg);
|
||||
|
||||
/**
|
||||
* struct knav_queue_notify_config: Notifier configuration
|
||||
* @fn: Notifier function
|
||||
* @fn_arg: Notifier function arguments
|
||||
*/
|
||||
struct knav_queue_notify_config {
|
||||
knav_queue_notify_fn fn;
|
||||
void *fn_arg;
|
||||
};
|
||||
|
||||
void *knav_queue_open(const char *name, unsigned id,
|
||||
unsigned flags);
|
||||
void knav_queue_close(void *qhandle);
|
||||
int knav_queue_device_control(void *qhandle,
|
||||
enum knav_queue_ctrl_cmd cmd,
|
||||
unsigned long arg);
|
||||
dma_addr_t knav_queue_pop(void *qhandle, unsigned *size);
|
||||
int knav_queue_push(void *qhandle, dma_addr_t dma,
|
||||
unsigned size, unsigned flags);
|
||||
|
||||
void *knav_pool_create(const char *name,
|
||||
int num_desc, int region_id);
|
||||
void knav_pool_destroy(void *ph);
|
||||
int knav_pool_count(void *ph);
|
||||
void *knav_pool_desc_get(void *ph);
|
||||
void knav_pool_desc_put(void *ph, void *desc);
|
||||
int knav_pool_desc_map(void *ph, void *desc, unsigned size,
|
||||
dma_addr_t *dma, unsigned *dma_sz);
|
||||
void *knav_pool_desc_unmap(void *ph, dma_addr_t dma, unsigned dma_sz);
|
||||
dma_addr_t knav_pool_desc_virt_to_dma(void *ph, void *virt);
|
||||
void *knav_pool_desc_dma_to_virt(void *ph, dma_addr_t dma);
|
||||
|
||||
#endif /* __SOC_TI_KNAV_QMSS_H__ */
|
Loading…
Reference in New Issue
Block a user