forked from luck/tmp_suning_uos_patched
ad9064d5e2
Current implementation of spin_event_timeout can be interrupted by an IRQ or context switch after testing the condition, but before checking the timeout. This can cause the loop to report a timeout when the condition actually became true in the middle. This patch adds one final check of the condition upon exit of the loop if the last test of the condition was still false. Signed-off-by: Grant Likely <grant.likely@secretlab.ca> Acked-by: Timur Tabi <timur@freescale.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
73 lines
2.9 KiB
C
73 lines
2.9 KiB
C
#ifndef _ASM_POWERPC_DELAY_H
|
|
#define _ASM_POWERPC_DELAY_H
|
|
#ifdef __KERNEL__
|
|
|
|
#include <asm/time.h>
|
|
|
|
/*
|
|
* Copyright 1996, Paul Mackerras.
|
|
* Copyright (C) 2009 Freescale Semiconductor, Inc. All rights reserved.
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the License, or (at your option) any later version.
|
|
*
|
|
* PPC64 Support added by Dave Engebretsen, Todd Inglett, Mike Corrigan,
|
|
* Anton Blanchard.
|
|
*/
|
|
|
|
extern void __delay(unsigned long loops);
|
|
extern void udelay(unsigned long usecs);
|
|
|
|
/*
|
|
* On shared processor machines the generic implementation of mdelay can
|
|
* result in large errors. While each iteration of the loop inside mdelay
|
|
* is supposed to take 1ms, the hypervisor could sleep our partition for
|
|
* longer (eg 10ms). With the right timing these errors can add up.
|
|
*
|
|
* Since there is no 32bit overflow issue on 64bit kernels, just call
|
|
* udelay directly.
|
|
*/
|
|
#ifdef CONFIG_PPC64
|
|
#define mdelay(n) udelay((n) * 1000)
|
|
#endif
|
|
|
|
/**
|
|
* spin_event_timeout - spin until a condition gets true or a timeout elapses
|
|
* @condition: a C expression to evalate
|
|
* @timeout: timeout, in microseconds
|
|
* @delay: the number of microseconds to delay between each evaluation of
|
|
* @condition
|
|
*
|
|
* The process spins until the condition evaluates to true (non-zero) or the
|
|
* timeout elapses. The return value of this macro is the value of
|
|
* @condition when the loop terminates. This allows you to determine the cause
|
|
* of the loop terminates. If the return value is zero, then you know a
|
|
* timeout has occurred.
|
|
*
|
|
* This primary purpose of this macro is to poll on a hardware register
|
|
* until a status bit changes. The timeout ensures that the loop still
|
|
* terminates even if the bit never changes. The delay is for devices that
|
|
* need a delay in between successive reads.
|
|
*
|
|
* gcc will optimize out the if-statement if @delay is a constant.
|
|
*/
|
|
#define spin_event_timeout(condition, timeout, delay) \
|
|
({ \
|
|
typeof(condition) __ret; \
|
|
unsigned long __loops = tb_ticks_per_usec * timeout; \
|
|
unsigned long __start = get_tbl(); \
|
|
while (!(__ret = (condition)) && (tb_ticks_since(__start) <= __loops)) \
|
|
if (delay) \
|
|
udelay(delay); \
|
|
else \
|
|
cpu_relax(); \
|
|
if (!__ret) \
|
|
__ret = (condition); \
|
|
__ret; \
|
|
})
|
|
|
|
#endif /* __KERNEL__ */
|
|
#endif /* _ASM_POWERPC_DELAY_H */
|