ALSA: ua101: Use guard() for spin locks
Clean up the code using guard() for spin locks. Merely code refactoring, and no behavior change. Signed-off-by: Takashi Iwai <tiwai@suse.de> Link: https://patch.msgid.link/20250829150724.6886-3-tiwai@suse.depull/476/head
parent
9b99749f2a
commit
f9435abeb3
|
|
@ -171,7 +171,6 @@ static void playback_urb_complete(struct urb *usb_urb)
|
||||||
{
|
{
|
||||||
struct ua101_urb *urb = (struct ua101_urb *)usb_urb;
|
struct ua101_urb *urb = (struct ua101_urb *)usb_urb;
|
||||||
struct ua101 *ua = urb->urb.context;
|
struct ua101 *ua = urb->urb.context;
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
if (unlikely(urb->urb.status == -ENOENT || /* unlinked */
|
if (unlikely(urb->urb.status == -ENOENT || /* unlinked */
|
||||||
urb->urb.status == -ENODEV || /* device removed */
|
urb->urb.status == -ENODEV || /* device removed */
|
||||||
|
|
@ -184,14 +183,13 @@ static void playback_urb_complete(struct urb *usb_urb)
|
||||||
|
|
||||||
if (test_bit(USB_PLAYBACK_RUNNING, &ua->states)) {
|
if (test_bit(USB_PLAYBACK_RUNNING, &ua->states)) {
|
||||||
/* append URB to FIFO */
|
/* append URB to FIFO */
|
||||||
spin_lock_irqsave(&ua->lock, flags);
|
guard(spinlock_irqsave)(&ua->lock);
|
||||||
list_add_tail(&urb->ready_list, &ua->ready_playback_urbs);
|
list_add_tail(&urb->ready_list, &ua->ready_playback_urbs);
|
||||||
if (ua->rate_feedback_count > 0)
|
if (ua->rate_feedback_count > 0)
|
||||||
queue_work(system_highpri_wq, &ua->playback_work);
|
queue_work(system_highpri_wq, &ua->playback_work);
|
||||||
ua->playback.substream->runtime->delay -=
|
ua->playback.substream->runtime->delay -=
|
||||||
urb->urb.iso_frame_desc[0].length /
|
urb->urb.iso_frame_desc[0].length /
|
||||||
ua->playback.frame_bytes;
|
ua->playback.frame_bytes;
|
||||||
spin_unlock_irqrestore(&ua->lock, flags);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -249,7 +247,6 @@ static inline void add_with_wraparound(struct ua101 *ua,
|
||||||
static void playback_work(struct work_struct *work)
|
static void playback_work(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct ua101 *ua = container_of(work, struct ua101, playback_work);
|
struct ua101 *ua = container_of(work, struct ua101, playback_work);
|
||||||
unsigned long flags;
|
|
||||||
unsigned int frames;
|
unsigned int frames;
|
||||||
struct ua101_urb *urb;
|
struct ua101_urb *urb;
|
||||||
bool do_period_elapsed = false;
|
bool do_period_elapsed = false;
|
||||||
|
|
@ -269,43 +266,43 @@ static void playback_work(struct work_struct *work)
|
||||||
* submitting playback URBs is possible as long as both FIFOs are
|
* submitting playback URBs is possible as long as both FIFOs are
|
||||||
* nonempty.
|
* nonempty.
|
||||||
*/
|
*/
|
||||||
spin_lock_irqsave(&ua->lock, flags);
|
scoped_guard(spinlock_irqsave, &ua->lock) {
|
||||||
while (ua->rate_feedback_count > 0 &&
|
while (ua->rate_feedback_count > 0 &&
|
||||||
!list_empty(&ua->ready_playback_urbs)) {
|
!list_empty(&ua->ready_playback_urbs)) {
|
||||||
/* take packet size out of FIFO */
|
/* take packet size out of FIFO */
|
||||||
frames = ua->rate_feedback[ua->rate_feedback_start];
|
frames = ua->rate_feedback[ua->rate_feedback_start];
|
||||||
add_with_wraparound(ua, &ua->rate_feedback_start, 1);
|
add_with_wraparound(ua, &ua->rate_feedback_start, 1);
|
||||||
ua->rate_feedback_count--;
|
ua->rate_feedback_count--;
|
||||||
|
|
||||||
/* take URB out of FIFO */
|
/* take URB out of FIFO */
|
||||||
urb = list_first_entry(&ua->ready_playback_urbs,
|
urb = list_first_entry(&ua->ready_playback_urbs,
|
||||||
struct ua101_urb, ready_list);
|
struct ua101_urb, ready_list);
|
||||||
list_del(&urb->ready_list);
|
list_del(&urb->ready_list);
|
||||||
|
|
||||||
/* fill packet with data or silence */
|
/* fill packet with data or silence */
|
||||||
urb->urb.iso_frame_desc[0].length =
|
urb->urb.iso_frame_desc[0].length =
|
||||||
frames * ua->playback.frame_bytes;
|
frames * ua->playback.frame_bytes;
|
||||||
if (test_bit(ALSA_PLAYBACK_RUNNING, &ua->states))
|
if (test_bit(ALSA_PLAYBACK_RUNNING, &ua->states))
|
||||||
do_period_elapsed |= copy_playback_data(&ua->playback,
|
do_period_elapsed |= copy_playback_data(&ua->playback,
|
||||||
&urb->urb,
|
&urb->urb,
|
||||||
frames);
|
frames);
|
||||||
else
|
else
|
||||||
memset(urb->urb.transfer_buffer, 0,
|
memset(urb->urb.transfer_buffer, 0,
|
||||||
urb->urb.iso_frame_desc[0].length);
|
urb->urb.iso_frame_desc[0].length);
|
||||||
|
|
||||||
/* and off you go ... */
|
/* and off you go ... */
|
||||||
err = usb_submit_urb(&urb->urb, GFP_ATOMIC);
|
err = usb_submit_urb(&urb->urb, GFP_ATOMIC);
|
||||||
if (unlikely(err < 0)) {
|
if (unlikely(err < 0)) {
|
||||||
spin_unlock_irqrestore(&ua->lock, flags);
|
abort_usb_playback(ua);
|
||||||
abort_usb_playback(ua);
|
abort_alsa_playback(ua);
|
||||||
abort_alsa_playback(ua);
|
dev_err(&ua->dev->dev, "USB request error %d: %s\n",
|
||||||
dev_err(&ua->dev->dev, "USB request error %d: %s\n",
|
err, usb_error_string(err));
|
||||||
err, usb_error_string(err));
|
return;
|
||||||
return;
|
}
|
||||||
|
ua->playback.substream->runtime->delay += frames;
|
||||||
}
|
}
|
||||||
ua->playback.substream->runtime->delay += frames;
|
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&ua->lock, flags);
|
|
||||||
if (do_period_elapsed)
|
if (do_period_elapsed)
|
||||||
snd_pcm_period_elapsed(ua->playback.substream);
|
snd_pcm_period_elapsed(ua->playback.substream);
|
||||||
}
|
}
|
||||||
|
|
@ -347,7 +344,6 @@ static void capture_urb_complete(struct urb *urb)
|
||||||
{
|
{
|
||||||
struct ua101 *ua = urb->context;
|
struct ua101 *ua = urb->context;
|
||||||
struct ua101_stream *stream = &ua->capture;
|
struct ua101_stream *stream = &ua->capture;
|
||||||
unsigned long flags;
|
|
||||||
unsigned int frames, write_ptr;
|
unsigned int frames, write_ptr;
|
||||||
bool do_period_elapsed;
|
bool do_period_elapsed;
|
||||||
int err;
|
int err;
|
||||||
|
|
@ -364,47 +360,45 @@ static void capture_urb_complete(struct urb *urb)
|
||||||
else
|
else
|
||||||
frames = 0;
|
frames = 0;
|
||||||
|
|
||||||
spin_lock_irqsave(&ua->lock, flags);
|
scoped_guard(spinlock_irqsave, &ua->lock) {
|
||||||
|
|
||||||
if (frames > 0 && test_bit(ALSA_CAPTURE_RUNNING, &ua->states))
|
if (frames > 0 && test_bit(ALSA_CAPTURE_RUNNING, &ua->states))
|
||||||
do_period_elapsed = copy_capture_data(stream, urb, frames);
|
do_period_elapsed = copy_capture_data(stream, urb, frames);
|
||||||
else
|
else
|
||||||
do_period_elapsed = false;
|
do_period_elapsed = false;
|
||||||
|
|
||||||
if (test_bit(USB_CAPTURE_RUNNING, &ua->states)) {
|
if (test_bit(USB_CAPTURE_RUNNING, &ua->states)) {
|
||||||
err = usb_submit_urb(urb, GFP_ATOMIC);
|
err = usb_submit_urb(urb, GFP_ATOMIC);
|
||||||
if (unlikely(err < 0)) {
|
if (unlikely(err < 0)) {
|
||||||
spin_unlock_irqrestore(&ua->lock, flags);
|
dev_err(&ua->dev->dev, "USB request error %d: %s\n",
|
||||||
dev_err(&ua->dev->dev, "USB request error %d: %s\n",
|
err, usb_error_string(err));
|
||||||
err, usb_error_string(err));
|
goto stream_stopped;
|
||||||
goto stream_stopped;
|
}
|
||||||
|
|
||||||
|
/* append packet size to FIFO */
|
||||||
|
write_ptr = ua->rate_feedback_start;
|
||||||
|
add_with_wraparound(ua, &write_ptr, ua->rate_feedback_count);
|
||||||
|
ua->rate_feedback[write_ptr] = frames;
|
||||||
|
if (ua->rate_feedback_count < ua->playback.queue_length) {
|
||||||
|
ua->rate_feedback_count++;
|
||||||
|
if (ua->rate_feedback_count ==
|
||||||
|
ua->playback.queue_length)
|
||||||
|
wake_up(&ua->rate_feedback_wait);
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* Ring buffer overflow; this happens when the playback
|
||||||
|
* stream is not running. Throw away the oldest entry,
|
||||||
|
* so that the playback stream, when it starts, sees
|
||||||
|
* the most recent packet sizes.
|
||||||
|
*/
|
||||||
|
add_with_wraparound(ua, &ua->rate_feedback_start, 1);
|
||||||
|
}
|
||||||
|
if (test_bit(USB_PLAYBACK_RUNNING, &ua->states) &&
|
||||||
|
!list_empty(&ua->ready_playback_urbs))
|
||||||
|
queue_work(system_highpri_wq, &ua->playback_work);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* append packet size to FIFO */
|
|
||||||
write_ptr = ua->rate_feedback_start;
|
|
||||||
add_with_wraparound(ua, &write_ptr, ua->rate_feedback_count);
|
|
||||||
ua->rate_feedback[write_ptr] = frames;
|
|
||||||
if (ua->rate_feedback_count < ua->playback.queue_length) {
|
|
||||||
ua->rate_feedback_count++;
|
|
||||||
if (ua->rate_feedback_count ==
|
|
||||||
ua->playback.queue_length)
|
|
||||||
wake_up(&ua->rate_feedback_wait);
|
|
||||||
} else {
|
|
||||||
/*
|
|
||||||
* Ring buffer overflow; this happens when the playback
|
|
||||||
* stream is not running. Throw away the oldest entry,
|
|
||||||
* so that the playback stream, when it starts, sees
|
|
||||||
* the most recent packet sizes.
|
|
||||||
*/
|
|
||||||
add_with_wraparound(ua, &ua->rate_feedback_start, 1);
|
|
||||||
}
|
|
||||||
if (test_bit(USB_PLAYBACK_RUNNING, &ua->states) &&
|
|
||||||
!list_empty(&ua->ready_playback_urbs))
|
|
||||||
queue_work(system_highpri_wq, &ua->playback_work);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irqrestore(&ua->lock, flags);
|
|
||||||
|
|
||||||
if (do_period_elapsed)
|
if (do_period_elapsed)
|
||||||
snd_pcm_period_elapsed(stream->substream);
|
snd_pcm_period_elapsed(stream->substream);
|
||||||
|
|
||||||
|
|
@ -558,9 +552,9 @@ static int start_usb_playback(struct ua101 *ua)
|
||||||
clear_bit(PLAYBACK_URB_COMPLETED, &ua->states);
|
clear_bit(PLAYBACK_URB_COMPLETED, &ua->states);
|
||||||
ua->playback.urbs[0]->urb.complete =
|
ua->playback.urbs[0]->urb.complete =
|
||||||
first_playback_urb_complete;
|
first_playback_urb_complete;
|
||||||
spin_lock_irq(&ua->lock);
|
scoped_guard(spinlock_irq, &ua->lock) {
|
||||||
INIT_LIST_HEAD(&ua->ready_playback_urbs);
|
INIT_LIST_HEAD(&ua->ready_playback_urbs);
|
||||||
spin_unlock_irq(&ua->lock);
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We submit the initial URBs all at once, so we have to wait for the
|
* We submit the initial URBs all at once, so we have to wait for the
|
||||||
|
|
@ -581,11 +575,11 @@ static int start_usb_playback(struct ua101 *ua)
|
||||||
|
|
||||||
for (i = 0; i < ua->playback.queue_length; ++i) {
|
for (i = 0; i < ua->playback.queue_length; ++i) {
|
||||||
/* all initial URBs contain silence */
|
/* all initial URBs contain silence */
|
||||||
spin_lock_irq(&ua->lock);
|
scoped_guard(spinlock_irq, &ua->lock) {
|
||||||
frames = ua->rate_feedback[ua->rate_feedback_start];
|
frames = ua->rate_feedback[ua->rate_feedback_start];
|
||||||
add_with_wraparound(ua, &ua->rate_feedback_start, 1);
|
add_with_wraparound(ua, &ua->rate_feedback_start, 1);
|
||||||
ua->rate_feedback_count--;
|
ua->rate_feedback_count--;
|
||||||
spin_unlock_irq(&ua->lock);
|
}
|
||||||
urb = &ua->playback.urbs[i]->urb;
|
urb = &ua->playback.urbs[i]->urb;
|
||||||
urb->iso_frame_desc[0].length =
|
urb->iso_frame_desc[0].length =
|
||||||
frames * ua->playback.frame_bytes;
|
frames * ua->playback.frame_bytes;
|
||||||
|
|
@ -834,13 +828,8 @@ static int playback_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
|
||||||
static inline snd_pcm_uframes_t ua101_pcm_pointer(struct ua101 *ua,
|
static inline snd_pcm_uframes_t ua101_pcm_pointer(struct ua101 *ua,
|
||||||
struct ua101_stream *stream)
|
struct ua101_stream *stream)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
guard(spinlock_irqsave)(&ua->lock);
|
||||||
unsigned int pos;
|
return stream->buffer_pos;
|
||||||
|
|
||||||
spin_lock_irqsave(&ua->lock, flags);
|
|
||||||
pos = stream->buffer_pos;
|
|
||||||
spin_unlock_irqrestore(&ua->lock, flags);
|
|
||||||
return pos;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static snd_pcm_uframes_t capture_pcm_pointer(struct snd_pcm_substream *subs)
|
static snd_pcm_uframes_t capture_pcm_pointer(struct snd_pcm_substream *subs)
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue