2015-10-01 21:02:14 +08:00
|
|
|
/*
|
|
|
|
* amdtp-tascam.c - a part of driver for TASCAM FireWire series
|
|
|
|
*
|
|
|
|
* Copyright (c) 2015 Takashi Sakamoto
|
|
|
|
*
|
|
|
|
* Licensed under the terms of the GNU General Public License, version 2.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <sound/pcm.h>
|
|
|
|
#include "tascam.h"
|
|
|
|
|
|
|
|
#define AMDTP_FMT_TSCM_TX 0x1e
|
|
|
|
#define AMDTP_FMT_TSCM_RX 0x3e
|
|
|
|
|
|
|
|
struct amdtp_tscm {
|
|
|
|
unsigned int pcm_channels;
|
|
|
|
};
|
|
|
|
|
|
|
|
int amdtp_tscm_set_parameters(struct amdtp_stream *s, unsigned int rate)
|
|
|
|
{
|
|
|
|
struct amdtp_tscm *p = s->protocol;
|
|
|
|
unsigned int data_channels;
|
|
|
|
|
|
|
|
if (amdtp_stream_running(s))
|
|
|
|
return -EBUSY;
|
|
|
|
|
|
|
|
data_channels = p->pcm_channels;
|
|
|
|
|
|
|
|
/* Packets in in-stream have extra 2 data channels. */
|
|
|
|
if (s->direction == AMDTP_IN_STREAM)
|
|
|
|
data_channels += 2;
|
|
|
|
|
|
|
|
return amdtp_stream_set_parameters(s, rate, data_channels);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void write_pcm_s32(struct amdtp_stream *s,
|
|
|
|
struct snd_pcm_substream *pcm,
|
|
|
|
__be32 *buffer, unsigned int frames)
|
|
|
|
{
|
|
|
|
struct amdtp_tscm *p = s->protocol;
|
|
|
|
struct snd_pcm_runtime *runtime = pcm->runtime;
|
|
|
|
unsigned int channels, remaining_frames, i, c;
|
|
|
|
const u32 *src;
|
|
|
|
|
|
|
|
channels = p->pcm_channels;
|
|
|
|
src = (void *)runtime->dma_area +
|
|
|
|
frames_to_bytes(runtime, s->pcm_buffer_pointer);
|
|
|
|
remaining_frames = runtime->buffer_size - s->pcm_buffer_pointer;
|
|
|
|
|
|
|
|
for (i = 0; i < frames; ++i) {
|
|
|
|
for (c = 0; c < channels; ++c) {
|
|
|
|
buffer[c] = cpu_to_be32(*src);
|
|
|
|
src++;
|
|
|
|
}
|
|
|
|
buffer += s->data_block_quadlets;
|
|
|
|
if (--remaining_frames == 0)
|
|
|
|
src = (void *)runtime->dma_area;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void read_pcm_s32(struct amdtp_stream *s,
|
|
|
|
struct snd_pcm_substream *pcm,
|
|
|
|
__be32 *buffer, unsigned int frames)
|
|
|
|
{
|
|
|
|
struct amdtp_tscm *p = s->protocol;
|
|
|
|
struct snd_pcm_runtime *runtime = pcm->runtime;
|
|
|
|
unsigned int channels, remaining_frames, i, c;
|
|
|
|
u32 *dst;
|
|
|
|
|
|
|
|
channels = p->pcm_channels;
|
|
|
|
dst = (void *)runtime->dma_area +
|
|
|
|
frames_to_bytes(runtime, s->pcm_buffer_pointer);
|
|
|
|
remaining_frames = runtime->buffer_size - s->pcm_buffer_pointer;
|
|
|
|
|
|
|
|
/* The first data channel is for event counter. */
|
|
|
|
buffer += 1;
|
|
|
|
|
|
|
|
for (i = 0; i < frames; ++i) {
|
|
|
|
for (c = 0; c < channels; ++c) {
|
|
|
|
*dst = be32_to_cpu(buffer[c]);
|
|
|
|
dst++;
|
|
|
|
}
|
|
|
|
buffer += s->data_block_quadlets;
|
|
|
|
if (--remaining_frames == 0)
|
|
|
|
dst = (void *)runtime->dma_area;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void write_pcm_silence(struct amdtp_stream *s, __be32 *buffer,
|
|
|
|
unsigned int data_blocks)
|
|
|
|
{
|
|
|
|
struct amdtp_tscm *p = s->protocol;
|
|
|
|
unsigned int channels, i, c;
|
|
|
|
|
|
|
|
channels = p->pcm_channels;
|
|
|
|
|
|
|
|
for (i = 0; i < data_blocks; ++i) {
|
|
|
|
for (c = 0; c < channels; ++c)
|
|
|
|
buffer[c] = 0x00000000;
|
|
|
|
buffer += s->data_block_quadlets;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int amdtp_tscm_add_pcm_hw_constraints(struct amdtp_stream *s,
|
|
|
|
struct snd_pcm_runtime *runtime)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Our implementation allows this protocol to deliver 24 bit sample in
|
|
|
|
* 32bit data channel.
|
|
|
|
*/
|
|
|
|
err = snd_pcm_hw_constraint_msbits(runtime, 0, 32, 24);
|
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
return amdtp_stream_add_pcm_hw_constraints(s, runtime);
|
|
|
|
}
|
|
|
|
|
2018-11-23 12:13:03 +08:00
|
|
|
static void read_status_messages(struct amdtp_stream *s,
|
|
|
|
__be32 *buffer, unsigned int data_blocks)
|
|
|
|
{
|
|
|
|
struct snd_tscm *tscm = container_of(s, struct snd_tscm, tx_stream);
|
2018-11-23 12:13:05 +08:00
|
|
|
bool used = READ_ONCE(tscm->hwdep->used);
|
2018-11-23 12:13:03 +08:00
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < data_blocks; i++) {
|
|
|
|
unsigned int index;
|
2018-11-23 12:13:05 +08:00
|
|
|
__be32 before;
|
|
|
|
__be32 after;
|
2018-11-23 12:13:03 +08:00
|
|
|
|
|
|
|
index = be32_to_cpu(buffer[0]) % SNDRV_FIREWIRE_TASCAM_STATE_COUNT;
|
2018-11-23 12:13:05 +08:00
|
|
|
before = tscm->state[index];
|
|
|
|
after = buffer[s->data_block_quadlets - 1];
|
|
|
|
|
|
|
|
if (used && index > 4 && index < 16) {
|
|
|
|
__be32 mask;
|
|
|
|
|
|
|
|
if (index == 5)
|
|
|
|
mask = cpu_to_be32(~0x0000ffff);
|
|
|
|
else if (index == 6)
|
|
|
|
mask = cpu_to_be32(~0x0000ffff);
|
|
|
|
else if (index == 8)
|
|
|
|
mask = cpu_to_be32(~0x000f0f00);
|
|
|
|
else
|
|
|
|
mask = cpu_to_be32(~0x00000000);
|
|
|
|
|
|
|
|
if ((before ^ after) & mask) {
|
|
|
|
struct snd_firewire_tascam_change *entry =
|
|
|
|
&tscm->queue[tscm->push_pos];
|
|
|
|
|
|
|
|
spin_lock_irq(&tscm->lock);
|
|
|
|
entry->index = index;
|
|
|
|
entry->before = before;
|
|
|
|
entry->after = after;
|
|
|
|
if (++tscm->push_pos >= SND_TSCM_QUEUE_COUNT)
|
|
|
|
tscm->push_pos = 0;
|
|
|
|
spin_unlock_irq(&tscm->lock);
|
2018-11-23 12:13:07 +08:00
|
|
|
|
|
|
|
wake_up(&tscm->hwdep_wait);
|
2018-11-23 12:13:05 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
tscm->state[index] = after;
|
2018-11-23 12:13:03 +08:00
|
|
|
buffer += s->data_block_quadlets;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-01 21:02:14 +08:00
|
|
|
static unsigned int process_tx_data_blocks(struct amdtp_stream *s,
|
|
|
|
__be32 *buffer,
|
|
|
|
unsigned int data_blocks,
|
|
|
|
unsigned int *syt)
|
|
|
|
{
|
|
|
|
struct snd_pcm_substream *pcm;
|
|
|
|
|
locking/atomics: COCCINELLE/treewide: Convert trivial ACCESS_ONCE() patterns to READ_ONCE()/WRITE_ONCE()
Please do not apply this to mainline directly, instead please re-run the
coccinelle script shown below and apply its output.
For several reasons, it is desirable to use {READ,WRITE}_ONCE() in
preference to ACCESS_ONCE(), and new code is expected to use one of the
former. So far, there's been no reason to change most existing uses of
ACCESS_ONCE(), as these aren't harmful, and changing them results in
churn.
However, for some features, the read/write distinction is critical to
correct operation. To distinguish these cases, separate read/write
accessors must be used. This patch migrates (most) remaining
ACCESS_ONCE() instances to {READ,WRITE}_ONCE(), using the following
coccinelle script:
----
// Convert trivial ACCESS_ONCE() uses to equivalent READ_ONCE() and
// WRITE_ONCE()
// $ make coccicheck COCCI=/home/mark/once.cocci SPFLAGS="--include-headers" MODE=patch
virtual patch
@ depends on patch @
expression E1, E2;
@@
- ACCESS_ONCE(E1) = E2
+ WRITE_ONCE(E1, E2)
@ depends on patch @
expression E;
@@
- ACCESS_ONCE(E)
+ READ_ONCE(E)
----
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: davem@davemloft.net
Cc: linux-arch@vger.kernel.org
Cc: mpe@ellerman.id.au
Cc: shuah@kernel.org
Cc: snitzer@redhat.com
Cc: thor.thayer@linux.intel.com
Cc: tj@kernel.org
Cc: viro@zeniv.linux.org.uk
Cc: will.deacon@arm.com
Link: http://lkml.kernel.org/r/1508792849-3115-19-git-send-email-paulmck@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-10-24 05:07:29 +08:00
|
|
|
pcm = READ_ONCE(s->pcm);
|
2015-10-01 21:02:14 +08:00
|
|
|
if (data_blocks > 0 && pcm)
|
2017-05-22 21:22:21 +08:00
|
|
|
read_pcm_s32(s, pcm, buffer, data_blocks);
|
2015-10-01 21:02:14 +08:00
|
|
|
|
2018-11-23 12:13:03 +08:00
|
|
|
read_status_messages(s, buffer, data_blocks);
|
2015-10-01 21:02:14 +08:00
|
|
|
|
|
|
|
return data_blocks;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int process_rx_data_blocks(struct amdtp_stream *s,
|
|
|
|
__be32 *buffer,
|
|
|
|
unsigned int data_blocks,
|
|
|
|
unsigned int *syt)
|
|
|
|
{
|
|
|
|
struct snd_pcm_substream *pcm;
|
|
|
|
|
|
|
|
/* This field is not used. */
|
|
|
|
*syt = 0x0000;
|
|
|
|
|
locking/atomics: COCCINELLE/treewide: Convert trivial ACCESS_ONCE() patterns to READ_ONCE()/WRITE_ONCE()
Please do not apply this to mainline directly, instead please re-run the
coccinelle script shown below and apply its output.
For several reasons, it is desirable to use {READ,WRITE}_ONCE() in
preference to ACCESS_ONCE(), and new code is expected to use one of the
former. So far, there's been no reason to change most existing uses of
ACCESS_ONCE(), as these aren't harmful, and changing them results in
churn.
However, for some features, the read/write distinction is critical to
correct operation. To distinguish these cases, separate read/write
accessors must be used. This patch migrates (most) remaining
ACCESS_ONCE() instances to {READ,WRITE}_ONCE(), using the following
coccinelle script:
----
// Convert trivial ACCESS_ONCE() uses to equivalent READ_ONCE() and
// WRITE_ONCE()
// $ make coccicheck COCCI=/home/mark/once.cocci SPFLAGS="--include-headers" MODE=patch
virtual patch
@ depends on patch @
expression E1, E2;
@@
- ACCESS_ONCE(E1) = E2
+ WRITE_ONCE(E1, E2)
@ depends on patch @
expression E;
@@
- ACCESS_ONCE(E)
+ READ_ONCE(E)
----
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: davem@davemloft.net
Cc: linux-arch@vger.kernel.org
Cc: mpe@ellerman.id.au
Cc: shuah@kernel.org
Cc: snitzer@redhat.com
Cc: thor.thayer@linux.intel.com
Cc: tj@kernel.org
Cc: viro@zeniv.linux.org.uk
Cc: will.deacon@arm.com
Link: http://lkml.kernel.org/r/1508792849-3115-19-git-send-email-paulmck@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-10-24 05:07:29 +08:00
|
|
|
pcm = READ_ONCE(s->pcm);
|
2015-10-01 21:02:14 +08:00
|
|
|
if (pcm)
|
2017-05-22 21:22:21 +08:00
|
|
|
write_pcm_s32(s, pcm, buffer, data_blocks);
|
2015-10-01 21:02:14 +08:00
|
|
|
else
|
|
|
|
write_pcm_silence(s, buffer, data_blocks);
|
|
|
|
|
|
|
|
return data_blocks;
|
|
|
|
}
|
|
|
|
|
|
|
|
int amdtp_tscm_init(struct amdtp_stream *s, struct fw_unit *unit,
|
|
|
|
enum amdtp_stream_direction dir, unsigned int pcm_channels)
|
|
|
|
{
|
|
|
|
amdtp_stream_process_data_blocks_t process_data_blocks;
|
|
|
|
struct amdtp_tscm *p;
|
|
|
|
unsigned int fmt;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (dir == AMDTP_IN_STREAM) {
|
|
|
|
fmt = AMDTP_FMT_TSCM_TX;
|
|
|
|
process_data_blocks = process_tx_data_blocks;
|
|
|
|
} else {
|
|
|
|
fmt = AMDTP_FMT_TSCM_RX;
|
|
|
|
process_data_blocks = process_rx_data_blocks;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = amdtp_stream_init(s, unit, dir,
|
|
|
|
CIP_NONBLOCKING | CIP_SKIP_DBC_ZERO_CHECK, fmt,
|
|
|
|
process_data_blocks, sizeof(struct amdtp_tscm));
|
|
|
|
if (err < 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Use fixed value for FDF field. */
|
2019-05-21 22:57:34 +08:00
|
|
|
s->ctx_data.rx.fdf = 0x00;
|
2015-10-01 21:02:14 +08:00
|
|
|
|
|
|
|
/* This protocol uses fixed number of data channels for PCM samples. */
|
|
|
|
p = s->protocol;
|
|
|
|
p->pcm_channels = pcm_channels;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|